Commit
·
fabc533
1
Parent(s):
83687e6
Update parquet files (step 15 of 121)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Ciel Gestion Commerciale 16.0 Crack..md +0 -107
- spaces/1gistliPinn/ChatGPT4/Examples/Arbaeen Nawawi In Urdu Pdf Download.md +0 -112
- spaces/1gistliPinn/ChatGPT4/Examples/Csi Safe 2014 Crack 2015 13 ((EXCLUSIVE)).md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/Electronic Communications Systems By Wayne Tomasi Pdf 5th.rar TOP.md +0 -6
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/CarX Street Join Clubs Defeat Bosses and Build Your Dream Car - Free APK Download.md +0 -102
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Drift Hunters MAX for Mac and Enjoy Over 25 Awesome Drift Cars.md +0 -139
- spaces/1phancelerku/anime-remove-background/Enjoy Royal Match on Your Android Device and Unlock Amazing Rewards.md +0 -157
- spaces/AGITM/ToneCorrectionRecognition/app.py +0 -166
- spaces/AHzizi/WaifuVoiceGen/text/symbols.py +0 -39
- spaces/AIConsultant/MusicGen/audiocraft/grids/audiogen/audiogen_pretrained_16khz_eval.py +0 -68
- spaces/AIConsultant/MusicGen/audiocraft/grids/compression/encodec_musicgen_32khz.py +0 -34
- spaces/AIFILMS/StyleGANEX/models/bisenet/README.md +0 -68
- spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/audio/__init__.py +0 -0
- spaces/AIGC-Audio/AudioGPT/NeuralSeq/tasks/tts/synta.py +0 -25
- spaces/AIGC-Audio/Make_An_Audio/ldm/modules/losses_audio/vggishish/train_vggishish.py +0 -199
- spaces/AIWaves/Software_Company/src/agents/Agent/Agent.py +0 -243
- spaces/AIWaves/Software_Company/src/agents/LLM/__init__.py +0 -0
- spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/work_dirs/__init__.py +0 -0
- spaces/AUBADA-ALARABI/AraPoet/app.py +0 -121
- spaces/AchyuthGamer/OpenGPT-Chat-UI/.svelte-kit/types/src/routes/conversation/[id]/$types.d.ts +0 -24
- spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/server/database.ts +0 -23
- spaces/Adapter/T2I-Adapter/ldm/modules/extra_condition/api.py +0 -269
- spaces/AgentVerse/agentVerse/agentverse_command/main_simulation_gui.py +0 -21
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridsizer/GetExpandedChildHeight.js +0 -11
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/overlapsizer/OverlapSizer.js +0 -49
- spaces/AlexWang/lama/saicinpainting/training/losses/segmentation.py +0 -43
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/shap_e.md +0 -190
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/optimization/habana.md +0 -79
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/models/test_unet_2d_blocks.py +0 -337
- spaces/Andy1621/uniformer_image_detection/configs/gn+ws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco.py +0 -17
- spaces/Andy1621/uniformer_image_detection/tools/model_converters/upgrade_model_version.py +0 -209
- spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_d6_r101-d16_512x1024_40k_cityscapes.py +0 -2
- spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_hr18_512x512_160k_ade20k.py +0 -35
- spaces/AnishKumbhar/ChatBot/text-generation-webui-main/css/chat_style-messenger.css +0 -99
- spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/cnn/bricks/scale.py +0 -21
- spaces/Anonymous-sub/Rerender/ControlNet/ldm/modules/midas/midas/__init__.py +0 -0
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/command/install.py +0 -139
- spaces/Awesimo/jojogan/e4e/models/stylegan2/op/fused_bias_act.cpp +0 -21
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/export/caffe2_inference.py +0 -161
- spaces/Benson/text-generation/Examples/Calle Carx 1.74 5 Mod Apk.md +0 -64
- spaces/Bravefe/Artist_Classification/README.md +0 -12
- spaces/CVPR/LIVE/sample_boundary.h +0 -454
- spaces/CVPR/WALT/mmdet/__init__.py +0 -28
- spaces/CVPR/lama-example/saicinpainting/training/modules/ffc.py +0 -485
- spaces/CVPR/unicl-zero-shot-img-recog/model/image_encoder/swin_transformer.py +0 -636
- spaces/ClassCat/Brain-tumor-3D-segmentation-with-MONAI/README.md +0 -12
- spaces/Cpp4App/Cpp4App/app.py +0 -284
- spaces/Cropinky/esrgan/realesrgan/archs/discriminator_arch.py +0 -67
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/pens/roundingPen.py +0 -112
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-f90e1963.js +0 -0
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Ciel Gestion Commerciale 16.0 Crack..md
DELETED
@@ -1,107 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<br> - Risks and drawbacks of using a cracked version of Ciel gestion commerciale 16.0 <br> - Benefits and advantages of using a licensed version of Ciel gestion commerciale 16.0 | | H2: What is Ciel gestion commerciale and what is a crack? | - Definition and features of Ciel gestion commerciale 16.0 <br> - Definition and types of cracks <br> - How cracks work and why they are illegal | | H2: Risks and drawbacks of using a cracked version of Ciel gestion commerciale 16.0 | - Security risks: malware, viruses, spyware, ransomware, etc. <br> - Legal risks: fines, lawsuits, penalties, etc. <br> - Functional risks: errors, bugs, crashes, data loss, etc. <br> - Ethical risks: unfair competition, piracy, theft, etc. | | H2: Benefits and advantages of using a licensed version of Ciel gestion commerciale 16.0 | - Security benefits: protection, updates, support, etc. <br> - Legal benefits: compliance, warranty, rights, etc. <br> - Functional benefits: performance, reliability, compatibility, etc. <br> - Ethical benefits: respect, trust, reputation, etc. | | H1: Conclusion | - Summary of the main points <br> - Recommendation to avoid cracks and use licensed software <br> - Call to action to buy Ciel gestion commerciale 16.0 from the official website | Article with HTML formatting: <h1>Ciel gestion commerciale 16.0 crack: What is it and why you should avoid it</h1>
|
3 |
-
<p>If you are looking for a software to manage your business activities, you may have heard of Ciel gestion commerciale 16.0. This is a popular software that helps you to create invoices, manage stocks, track payments, generate reports, and more. But you may also have heard of Ciel gestion commerciale 16.0 crack, which is a modified version of the software that bypasses the activation process and allows you to use it for free.</p>
|
4 |
-
<h2>Ciel gestion commerciale 16.0 crack.</h2><br /><p><b><b>DOWNLOAD</b> »»» <a href="https://byltly.com/2uKAgf">https://byltly.com/2uKAgf</a></b></p><br /><br />
|
5 |
-
<p>In this article, we will explain what Ciel gestion commerciale 16.0 and what a crack are, and why you should avoid using a cracked version of this software. We will also show you the benefits and advantages of using a licensed version of Ciel gestion commerciale 16.0.</p>
|
6 |
-
<h2>What is Ciel gestion commerciale and what is a crack?</h2>
|
7 |
-
<p>Ciel gestion commerciale 16.0 is a software developed by Ciel, a French company that specializes in accounting and business management software. It is designed for small and medium-sized businesses that need a simple and efficient tool to manage their daily operations.</p>
|
8 |
-
<p>Ciel gestion commerciale 16.0 allows you to:</p>
|
9 |
-
<ul>
|
10 |
-
<li>Create professional invoices with customizable templates</li>
|
11 |
-
<li>Manage your stocks and inventory with barcode scanning</li>
|
12 |
-
<li>Track your payments and reminders with automatic alerts</li>
|
13 |
-
<li>Generate reports and statistics with graphs and charts</li>
|
14 |
-
<li>Integrate with other Ciel software such as Ciel Compta or Ciel Paye</li>
|
15 |
-
<li>Export your data to Excel or PDF formats</li>
|
16 |
-
</ul>
|
17 |
-
<p>A crack is a program that modifies another program to remove or disable its security features, such as activation codes or serial numbers. A crack can also be a patch that changes some parts of the original program's code to alter its behavior or functionality.</p>
|
18 |
-
<p>A crack is usually created by hackers or crackers who want to use a software without paying for it or without following its terms and conditions. A crack can also be distributed by websites or forums that offer illegal downloads of software.</p>
|
19 |
-
<p>Ciel gestion commerciale 16.0 serial key<br />
|
20 |
-
Ciel gestion commerciale 16.0 activation code<br />
|
21 |
-
Ciel gestion commerciale 16.0 license key<br />
|
22 |
-
Ciel gestion commerciale 16.0 patch<br />
|
23 |
-
Ciel gestion commerciale 16.0 keygen<br />
|
24 |
-
Ciel gestion commerciale 16.0 full version download<br />
|
25 |
-
Ciel gestion commerciale 16.0 torrent<br />
|
26 |
-
Ciel gestion commerciale 16.0 free download<br />
|
27 |
-
Ciel gestion commerciale 16.0 crack download<br />
|
28 |
-
Ciel gestion commerciale 16.0 cracked version<br />
|
29 |
-
Ciel gestion commerciale 16.0 crack mac<br />
|
30 |
-
Ciel gestion commerciale 16.0 crack windows<br />
|
31 |
-
Ciel gestion commerciale 16.0 crack francais<br />
|
32 |
-
Ciel gestion commerciale 16.0 crack gratuit<br />
|
33 |
-
Ciel gestion commerciale 16.0 crack telecharger<br />
|
34 |
-
Ciel gestion commerciale 16.0 crack mega<br />
|
35 |
-
Ciel gestion commerciale 16.0 crack mediafire<br />
|
36 |
-
Ciel gestion commerciale 16.0 crack zippyshare<br />
|
37 |
-
Ciel gestion commerciale 16.0 crack rar<br />
|
38 |
-
Ciel gestion commerciale 16.0 crack zip<br />
|
39 |
-
Ciel gestion commerciale 16.0 crack no survey<br />
|
40 |
-
Ciel gestion commerciale 16.0 crack no password<br />
|
41 |
-
Ciel gestion commerciale 16.0 crack online<br />
|
42 |
-
Ciel gestion commerciale 16.0 crack offline<br />
|
43 |
-
Ciel gestion commerciale 16.0 crack generator<br />
|
44 |
-
Ciel gestion commerciale 16.0 crack software<br />
|
45 |
-
Ciel gestion commerciale 16.0 crack tool<br />
|
46 |
-
Ciel gestion commerciale 16.0 crack apk<br />
|
47 |
-
Ciel gestion commerciale 16.0 crack ios<br />
|
48 |
-
Ciel gestion commerciale 16.0 crack android<br />
|
49 |
-
Ciel gestion commerciale 16.0 crack review<br />
|
50 |
-
Ciel gestion commerciale 16.0 crack tutorial<br />
|
51 |
-
Ciel gestion commerciale 16.0 crack video<br />
|
52 |
-
Ciel gestion commerciale 16.0 crack youtube<br />
|
53 |
-
Ciel gestion commerciale 16.0 crack reddit<br />
|
54 |
-
Ciel gestion commerciale 16.0 crack quora<br />
|
55 |
-
Ciel gestion commerciale 16.0 crack forum<br />
|
56 |
-
Ciel gestion commerciale 16.0 crack blog<br />
|
57 |
-
Ciel gestion commerciale 16.0 crack website<br />
|
58 |
-
Ciel gestion commerciale 16.0 crack link<br />
|
59 |
-
How to get ciel gestion commerciale 16.0 crack <br />
|
60 |
-
How to install ciel gestion commerciale 16.0 crack <br />
|
61 |
-
How to use ciel gestion commerciale 16.0 crack <br />
|
62 |
-
How to activate ciel gestion commerciale 16.0 crack <br />
|
63 |
-
How to update ciel gestion commerciale 16.0 crack <br />
|
64 |
-
How to uninstall ciel gestion commerciale 16.0 crack <br />
|
65 |
-
How to fix ciel gestion commerciale 16.0 crack <br />
|
66 |
-
How to remove ciel gestion commerciale 16.0 crack <br />
|
67 |
-
How to download ciel gestion commerciale 16.0 crack <br />
|
68 |
-
How to buy ciel gestion commerciale 16.0</p>
|
69 |
-
<p>A crack works by replacing or modifying some files or registry entries of the original program to trick it into thinking that it has been activated or registered legally. A crack can also bypass some checks or validations that the original program performs to verify its authenticity or integrity.</p>
|
70 |
-
<h2>Risks and drawbacks of using a cracked version of Ciel gestion commerciale 16.0</h2>
|
71 |
-
<p>Using a cracked version of Ciel gestion commerciale 16.0 may seem tempting if you want to save money or try the software before buying it. However, you should be aware of the many risks and drawbacks that come with using a cracked version of this software.</p>
|
72 |
-
<p>Some of the risks and drawbacks are:</p>
|
73 |
-
<ul>
|
74 |
-
<li><b>Security risks:</b> A cracked version of Ciel gestion commerciale 16.0 may contain malware, viruses, spyware, ransomware, or other malicious programs that can harm your computer or steal your data. You may also expose yourself to hackers or cybercriminals who can access your system or network through the crack.</li>
|
75 |
-
<li><b>Legal risks:</b> A cracked version of Ciel gestion commerciale 16.0 is illegal and violates the intellectual property rights of Ciel and its partners. You may face fines, lawsuits, penalties, or even criminal charges if you are caught using or distributing a cracked version of this software.</li>
|
76 |
-
<li><b>Functional risks:</b> A cracked version of Ciel gestion commerciale 16.0 may not work properly or as intended by the developers. You may encounter errors, bugs, crashes, data loss, or compatibility issues with other software or hardware. You may also miss out on important updates or features that are only available for licensed users.</li>
|
77 |
-
<li><b>Ethical risks:</b> A cracked version of Ciel gestion commerciale 16.0 is unfair and unethical towards the creators and developers of this software who invested time, money, and effort to produce it. You may also damage your reputation or credibility as a business owner or professional if you use or endorse a cracked version of this software.</li>
|
78 |
-
</ul>
|
79 |
-
<h2>Benefits and advantages of using a licensed version of Ciel gestion commerciale 16.0</h2>
|
80 |
-
<p>The best way to use Ciel gestion commerciale 16.0 is to buy a licensed version from the official website of Ciel or one of its authorized resellers. By doing so, you will enjoy many benefits and advantages that are not available for users of cracked versions.</p>
|
81 |
-
<p>Some of the benefits and advantages are:</p>
|
82 |
-
<ul>
|
83 |
-
<li><b>Security benefits:</b> A licensed version of Ciel gestion commerciale 16.0 is safe and secure from any malware, viruses, spyware, ransomware, or other malicious programs that can harm your computer or steal your data. You will also benefit from the protection, updates, support, and assistance that Ciel provides for its customers.</li>
|
84 |
-
<li><b>Legal benefits:</b> A licensed version of Ciel gestion commerciale 16.0 is legal and complies with the intellectual property rights of Ciel and its partners. You will not face any fines, lawsuits, penalties, or criminal charges for using or distributing this software.</li>
|
85 |
-
<li><b>Functional benefits:</b>A licensed version of Ciel gestion commerciale 16 . 0 works properly and as intended by the developers. You will not encounter any errors, bugs, crashes, data loss, or compatibility issues with other software or hardware. You will also enjoy all the updates and features that are only available for licensed users.</li>
|
86 |
-
<li><b>Ethical benefits:</b>A licensed version of Ciel gestion commerciale 16 . 0 is fair and ethical towards the creators and developers of this software who invested time, money, and effort to produce it. You will also enhance your reputation or credibility as a business owner or professional if you use or recommend this software.</li>
|
87 |
-
</ul>
|
88 |
-
<h1>Conclusion</h1>
|
89 |
-
<p>In conclusion, Ciel gestion commerciale 16. 0 is a great software for managing your business activities, but using a cracked version of it is not worth it. You will expose yourself to many risks and drawbacks that can harm your computer, your data, your business, and your reputation. On the other hand, using a licensed version of Ciel gestion commerciale 16. 0 will bring you many benefits and advantages that will improve your security, your legality, your functionality, and your ethics. Therefore, we strongly recommend you to avoid cracks and use licensed software instead.</p>
|
90 |
-
<p>If you want to buy Ciel gestion commerciale 16. 0, you can visit the official website of Ciel at https://www.ciel.com/ or contact one of their authorized resellers near you. You can also request a free trial or a demo to test the software before buying it. <p>opportunity to use Ciel gestion commerciale 16.0, the best software for your business management.</p>
|
91 |
-
<h3>FAQs</h3>
|
92 |
-
<p>Here are some frequently asked questions about Ciel gestion commerciale 16.0 and cracks:</p>
|
93 |
-
<ol>
|
94 |
-
<li><b>What is the price of Ciel gestion commerciale 16.0?</b><br>
|
95 |
-
The price of Ciel gestion commerciale 16.0 depends on the number of users and the duration of the subscription. You can choose between a monthly or a yearly subscription, and between one or more users. The prices range from 29€ to 99€ per month, or from 299€ to 999€ per year.</li>
|
96 |
-
<li><b>How can I activate Ciel gestion commerciale 16.0?</b><br>
|
97 |
-
To activate Ciel gestion commerciale 16.0, you need to enter the activation code that you received by email after purchasing the software. You can also activate it online by logging in to your Ciel account and following the instructions.</li>
|
98 |
-
<li><b>How can I update Ciel gestion commerciale 16.0?</b><br>
|
99 |
-
To update Ciel gestion commerciale 16.0, you need to have an active subscription and an internet connection. You can check for updates from the software itself or from your Ciel account. You will be notified when a new update is available and you can download and install it easily.</li>
|
100 |
-
<li><b>How can I contact Ciel for support or assistance?</b><br>
|
101 |
-
To contact Ciel for support or assistance, you can use their online chat service, their phone service, their email service, or their online forum. You can find all the contact details and the opening hours on their website at https://www.ciel.com/contactez-nous/.</li>
|
102 |
-
<li><b>How can I report a crack or a piracy of Ciel gestion commerciale 16.0?</b><br>
|
103 |
-
To report a crack or a piracy of Ciel gestion commerciale 16.0, you can use their online form at https://www.ciel.com/signaler-un-piratage/. You can also contact them by phone or by email and provide them with any evidence or information that you have.</li>
|
104 |
-
</ol>
|
105 |
-
</p> 0a6ba089eb<br />
|
106 |
-
<br />
|
107 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Arbaeen Nawawi In Urdu Pdf Download.md
DELETED
@@ -1,112 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Arbaeen Nawawi In Urdu Pdf Download: A Valuable Resource for Learning Hadith</h1>
|
3 |
-
|
4 |
-
<p>Hadith are the sayings and actions of the Prophet Muhammad (peace be upon him) that are recorded by his companions and transmitted to the later generations. Hadith are one of the primary sources of Islamic knowledge and guidance, along with the Quran. However, not all hadith are authentic and reliable, and some of them are fabricated or weak. Therefore, it is essential to learn hadith from trustworthy and qualified scholars who have verified and explained them.</p>
|
5 |
-
|
6 |
-
<p>One of the most famous and respected scholars of hadith is Imam Abu Zakariya Yahya bin Sharaf al-Nawawi (1233-1277 CE), who belonged to the Shafi school of thought. He was a prolific writer and a renowned jurist, theologian, historian, and mystic. He authored many books on various Islamic sciences, such as fiqh, tafsir, usul al-fiqh, tasawwuf, and hadith. Among his most popular works are Riyad al-Salihin (The Gardens of the Righteous), Al-Minhaj fi Sharh Sahih Muslim (The Methodology in Explaining Sahih Muslim), and Arbaeen al-Nawawi (The Forty Hadiths of Nawawi).</p>
|
7 |
-
<h2>Arbaeen Nawawi In Urdu Pdf Download</h2><br /><p><b><b>DOWNLOAD</b> ☆ <a href="https://imgfil.com/2uxYmE">https://imgfil.com/2uxYmE</a></b></p><br /><br />
|
8 |
-
|
9 |
-
<h2>What is Arbaeen al-Nawawi?</h2>
|
10 |
-
|
11 |
-
<p>Arbaeen al-Nawawi is a collection of forty hadiths that Imam Nawawi compiled and commented on. He selected these hadiths from various sources, such as Sahih al-Bukhari, Sahih Muslim, Sunan Abu Dawud, Sunan al-Tirmidhi, Sunan al-Nasa'i, Sunan Ibn Majah, Musnad Ahmad, Muwatta Malik, and others. He chose these hadiths because they are comprehensive and fundamental in Islamic teachings and cover various aspects of faith, worship, ethics, manners, and social relations.</p>
|
12 |
-
|
13 |
-
<p>Imam Nawawi said in his introduction to the book: "I have chosen these forty hadiths from among the sayings of Allah's Messenger (peace be upon him) that are comprehensive in meaning and convey great benefits. They are sufficient for those who act upon them to attain success in this world and the Hereafter."</p>
|
14 |
-
|
15 |
-
<p>Imam Nawawi also explained each hadith in detail, clarifying its meaning, context, chain of narration, authenticity, and implications. He also mentioned the opinions of other scholars and related verses from the Quran. He did this to make the book more useful and beneficial for the readers.</p>
|
16 |
-
|
17 |
-
<h3>Why should you download Arbaeen Nawawi in Urdu Pdf?</h3>
|
18 |
-
|
19 |
-
<p>If you want to learn hadith from a reliable and authoritative source, you should download Arbaeen Nawawi in Urdu Pdf. This book will help you to:</p>
|
20 |
-
|
21 |
-
<ul>
|
22 |
-
<li>Understand the core teachings and principles of Islam that are derived from the Quran and the Sunnah.</li>
|
23 |
-
<li>Increase your faith and love for Allah and His Messenger (peace be upon him) by learning their words and wisdom.</li>
|
24 |
-
<li>Improve your character and behavior by following the moral guidance and ethical standards that are set by the Prophet (peace be upon him).</li>
|
25 |
-
<li>Enhance your knowledge and skills by studying the commentary and explanation of Imam Nawawi and other scholars.</li>
|
26 |
-
<li>Practice what you learn by applying the hadiths to your daily life and situations.</li>
|
27 |
-
</ul>
|
28 |
-
|
29 |
-
<p>Downloading Arbaeen Nawawi in Urdu Pdf is easy and convenient. You can access it anytime and anywhere on your device without any hassle. You can also share it with your friends and family who are interested in learning hadith.</p>
|
30 |
-
|
31 |
-
<h4>How to download Arbaeen Nawawi in Urdu Pdf?</h4>
|
32 |
-
|
33 |
-
<p>To download Arbaeen Nawawi in Urdu Pdf, you just need to follow these simple steps:</p>
|
34 |
-
<p></p>
|
35 |
-
|
36 |
-
<ol>
|
37 |
-
<li>Go to any of the websites that offer Arbaeen Nawawi in Urdu Pdf for free download, such as <a href="https://archive.org/details/toobaa-research-library-ArbaeenNowviUrdu">https://archive.org/details/toobaa-research-library-ArbaeenNowviUrdu</a>, <a href="https://librarypk.com/sharah-arbaeen-e-nawawi-urdu/">https://librarypk.com/sharah-arbaeen-e-nawawi-urdu/</a>, or <a href="https://www.emaanlibrary.com/book/urdu-sharah-arbaeen-e-navavi-imam-nawawi/">https://www.emaanlibrary.com/book/urdu-sharah-arbaeen-e-navavi-imam-nawawi/</a>.</li>
|
38 |
-
<li>Find the link or button that says "Download" or "Download Pdf" and click on it.</li>
|
39 |
-
<li>Select the folder or location where you want to save the file on your device.</li>
|
40 |
-
<li>Open the file with a program or app that can read Pdf files and enjoy reading Arbaeen Nawawi in Urdu.</li>
|
41 |
-
</ol>
|
42 |
-
|
43 |
-
<h5>Conclusion</h5>
|
44 |
-
|
45 |
-
<p>Arbaeen Nawawi in Urdu Pdf is a valuable resource for learning hadith from one of the greatest scholars of Islam. It contains forty hadiths that are comprehensive and fundamental in Islamic teachings. It also provides a detailed commentary and explanation of each hadith by Imam Nawawi and other scholars. It is easy and convenient to download Arbaeen Nawawi in Urdu Pdf from various websites for free. By reading this book, you can increase your faith, knowledge, character, and practice of Islam.</p>
|
46 |
-
<h6>What are the main themes and topics of Arbaeen Nawawi in Urdu Pdf?</h6>
|
47 |
-
|
48 |
-
<p>Arbaeen Nawawi in Urdu Pdf covers the main themes and topics of Islam that are derived from the Quran and the Sunnah. These include:</p>
|
49 |
-
|
50 |
-
<ul>
|
51 |
-
<li>The fundamentals of faith, such as the belief in Allah, His angels, His books, His messengers, the Day of Judgment, and the divine decree.</li>
|
52 |
-
<li>The pillars of Islam, such as the testimony of faith, the prayer, the zakat, the fasting, and the pilgrimage.</li>
|
53 |
-
<li>The virtues and obligations of worship, such as sincerity, intention, remembrance, supplication, gratitude, patience, and repentance.</li>
|
54 |
-
<li>The rights and duties of Muslims, such as the rights of Allah, the rights of the Prophet (peace be upon him), the rights of parents, relatives, neighbors, friends, and strangers.</li>
|
55 |
-
<li>The moral and ethical values of Islam, such as honesty, justice, mercy, kindness, generosity, modesty, humility, and forgiveness.</li>
|
56 |
-
<li>The social and legal aspects of Islam, such as marriage, divorce, inheritance, trade, contracts, testimony, and judicial matters.</li>
|
57 |
-
<li>The spiritual and mystical aspects of Islam, such as purification of the heart, love of Allah and His Messenger (peace be upon him), trust in Allah, fear of Allah, hope in Allah, and contentment with Allah.</li>
|
58 |
-
</ul>
|
59 |
-
|
60 |
-
<p>These themes and topics are explained and illustrated by the hadiths that are collected and commented on by Imam Nawawi in Arbaeen Nawawi in Urdu Pdf.</p>
|
61 |
-
|
62 |
-
<h7>How to benefit from Arbaeen Nawawi in Urdu Pdf?</h7>
|
63 |
-
|
64 |
-
<p>Arbaeen Nawawi in Urdu Pdf is a book that can benefit anyone who reads it with sincerity and attention. However, to get the most benefit from this book, one should follow some guidelines and tips:</p>
|
65 |
-
|
66 |
-
<ul>
|
67 |
-
<li>Read the book with an open mind and a humble heart.</li>
|
68 |
-
<li>Read the book with a sincere intention to learn and act upon what is taught.</li>
|
69 |
-
<li>Read the book with respect and reverence for the words of Allah and His Messenger (peace be upon him).</li>
|
70 |
-
<li>Read the book with a critical and analytical mind that seeks to understand and verify what is said.</li>
|
71 |
-
<li>Read the book with a practical and realistic approach that applies what is learned to one's life and situations.</li>
|
72 |
-
<li>Read the book with a regular and consistent schedule that allows one to review and reflect on what is read.</li>
|
73 |
-
<li>Read the book with a supportive and cooperative attitude that shares what is learned with others and seeks their feedback and advice.</li>
|
74 |
-
</ul>
|
75 |
-
|
76 |
-
<p>With these guidelines and tips, Arbaeen Nawawi in Urdu Pdf can be a source of guidance and inspiration for anyone who wants to learn hadith from one of the greatest scholars of Islam.</p>
|
77 |
-
<h8>What are the challenges and opportunities of Arbaeen Nawawi in Urdu Pdf?</h8>
|
78 |
-
|
79 |
-
<p>Arbaeen Nawawi in Urdu Pdf is a book that has many challenges and opportunities for the readers and learners of hadith. Some of these are:</p>
|
80 |
-
|
81 |
-
<ul>
|
82 |
-
<li>The challenge of verifying the authenticity and reliability of the hadiths and their sources, as there are many fabricated and weak hadiths that are attributed to the Prophet (peace be upon him).</li>
|
83 |
-
<li>The challenge of understanding the meaning and context of the hadiths and their implications, as there are many linguistic, cultural, historical, and juristic nuances that need to be considered.</li>
|
84 |
-
<li>The challenge of applying the hadiths to one's life and situations, as there are many differences and changes that have occurred since the time of the Prophet (peace be upon him) and his companions.</li>
|
85 |
-
<li>The opportunity of learning from the best and most authentic sources of Islamic knowledge and guidance, as the hadiths are the second source of Islam after the Quran.</li>
|
86 |
-
<li>The opportunity of increasing one's faith and love for Allah and His Messenger (peace be upon him) by learning their words and wisdom.</li>
|
87 |
-
<li>The opportunity of improving one's character and behavior by following the moral guidance and ethical standards that are set by the Prophet (peace be upon him).</li>
|
88 |
-
<li>The opportunity of enhancing one's knowledge and skills by studying the commentary and explanation of Imam Nawawi and other scholars.</li>
|
89 |
-
</ul>
|
90 |
-
|
91 |
-
<p>These challenges and opportunities can be overcome and utilized by reading Arbaeen Nawawi in Urdu Pdf with sincerity, attention, respect, critical thinking, practical approach, regularity, consistency, support, cooperation, and feedback.</p>
|
92 |
-
|
93 |
-
<h9>Conclusion</h9>
|
94 |
-
|
95 |
-
<p>Arbaeen Nawawi in Urdu Pdf is a valuable resource for learning hadith from one of the greatest scholars of Islam. It contains forty hadiths that are comprehensive and fundamental in Islamic teachings. It also provides a detailed commentary and explanation of each hadith by Imam Nawawi and other scholars. It is easy and convenient to download Arbaeen Nawawi in Urdu Pdf from various websites for free. By reading this book, you can increase your faith, knowledge, character, and practice of Islam.</p>
|
96 |
-
|
97 |
-
<p>Arbaeen Nawawi in Urdu Pdf covers the main themes and topics of Islam that are derived from the Quran and the Sunnah. These include the fundamentals of faith, the pillars of Islam, the virtues and obligations of worship, the rights and duties of Muslims, the moral and ethical values of Islam, the social and legal aspects of Islam, and the spiritual and mystical aspects of Islam. These themes and topics are explained and illustrated by the hadiths that are collected and commented on by Imam Nawawi in Arbaeen Nawawi in Urdu Pdf.</p>
|
98 |
-
|
99 |
-
<p>To benefit from Arbaeen Nawawi in Urdu Pdf, one should follow some guidelines and tips such as reading the book with an open mind and a humble heart; reading the book with a sincere intention to learn and act upon what is taught; reading the book with respect and reverence for the words of Allah and His Messenger (peace be upon him); reading the book with a critical and analytical mind that seeks to understand and verify what is said; reading the book with a practical and realistic approach that applies what is learned to one's life and situations; reading the book with a regular and consistent schedule that allows one to review and reflect on what is read; reading the book with a supportive and cooperative attitude that shares what is learned with others and seeks their feedback and advice.</p>
|
100 |
-
|
101 |
-
<p>Arbaeen Nawawi in Urdu Pdf is a book that has many challenges and opportunities for the readers and learners of hadith. These include verifying the authenticity and reliability of the hadiths; understanding their meaning; applying them to one's life; learning from authentic sources; increasing faith; improving character; enhancing knowledge; studying commentary; overcoming difficulties; utilizing resources; seeking guidance; sharing benefits; etc. These challenges can be overcome by reading Arbaeen Nawawi in Urdu Pdf with sincerity, attention, respect, critical thinking, practical approach, regularity, consistency, support, cooperation, feedback.</p>
|
102 |
-
<h10>Conclusion</h10>
|
103 |
-
|
104 |
-
<p>Arbaeen Nawawi in Urdu Pdf is a book that every Muslim should read and benefit from. It is a collection of forty hadiths that summarize the essence of Islam and its teachings. It is also a commentary and explanation of these hadiths by one of the most eminent scholars of Islam, Imam Nawawi. It is a book that can be downloaded for free from various websites and read on any device. It is a book that can increase one's faith, knowledge, character, and practice of Islam.</p>
|
105 |
-
|
106 |
-
<p>Arbaeen Nawawi in Urdu Pdf covers the main themes and topics of Islam, such as the fundamentals of faith, the pillars of Islam, the virtues and obligations of worship, the rights and duties of Muslims, the moral and ethical values of Islam, the social and legal aspects of Islam, and the spiritual and mystical aspects of Islam. It explains and illustrates these themes and topics by the hadiths that are selected and commented on by Imam Nawawi. It also provides the opinions and views of other scholars and related verses from the Quran.</p>
|
107 |
-
|
108 |
-
<p>To benefit from Arbaeen Nawawi in Urdu Pdf, one should read it with sincerity, attention, respect, critical thinking, practical approach, regularity, consistency, support, cooperation, feedback. One should also overcome the challenges and utilize the opportunities that this book offers. One should also share what one learns with others and seek their guidance and advice.</p>
|
109 |
-
|
110 |
-
<p>Arbaeen Nawawi in Urdu Pdf is a book that can change one's life for the better. It is a book that can guide one to the path of Allah and His Messenger (peace be upon him). It is a book that can make one a better Muslim and a better human being.</p> 3cee63e6c2<br />
|
111 |
-
<br />
|
112 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Csi Safe 2014 Crack 2015 13 ((EXCLUSIVE)).md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>csi safe 2014 crack 2015 13</h2><br /><p><b><b>Download Zip</b> ⇔ <a href="https://imgfil.com/2uxX5L">https://imgfil.com/2uxX5L</a></b></p><br /><br />
|
2 |
-
|
3 |
-
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 4fefd39f24<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Electronic Communications Systems By Wayne Tomasi Pdf 5th.rar TOP.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>electronic communications systems by wayne tomasi pdf 5th.rar</h2><br /><p><b><b>Download File</b> 🆓 <a href="https://imgfil.com/2uxZZn">https://imgfil.com/2uxZZn</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
Electronic Communications Systems 5th Edition by Wayne Tomasi ... fundamentals through. tomasi torrent rar systems by wayne tomasi. 4d29de3e1b<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/CarX Street Join Clubs Defeat Bosses and Build Your Dream Car - Free APK Download.md
DELETED
@@ -1,102 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>CarX Street: A Free Racing Game for Android Lovers</h1>
|
3 |
-
<p>If you are a fan of racing games, you might have heard of CarX Street, a free racing game from CarX Technology for Android devices. CarX Street is an open beta test game that lets you experience the thrill of being a street racer in a dynamic open world. You can choose from a variety of cars, customize them to your liking, and race against other players or the AI in different modes. In this article, we will tell you more about CarX Street, its features, and how to download and install it on your Android device.</p>
|
4 |
-
<h2>Introduction</h2>
|
5 |
-
<h3>What is CarX Street?</h3>
|
6 |
-
<p>CarX Street is a racing game that was released in 2023 by CarX Technology, the makers of CarX Drift Racing 2. It is an open beta test game, which means that it is still in development and may have some bugs or glitches. However, it also means that you can play it for free and give your feedback to the developers to improve the game.</p>
|
7 |
-
<h2>car x street apkvision</h2><br /><p><b><b>Download File</b> ⭐ <a href="https://urlin.us/2uT2kG">https://urlin.us/2uT2kG</a></b></p><br /><br />
|
8 |
-
<p>CarX Street is set in Sunset City, a huge open world that you can explore freely. You can drive on highways, city streets, or off-road tracks. You can also join clubs, challenge bosses, and compete with other players online or offline. You can also build your own garage, buy houses for your cars, and collect different cars for each race mode.</p>
|
9 |
-
<h3>Why should you play CarX Street?</h3>
|
10 |
-
<p>CarX Street is a game that will appeal to anyone who loves racing games. Here are some reasons why you should play CarX Street:</p>
|
11 |
-
<ul>
|
12 |
-
<li>It is free to play. You don't need to pay anything to download or play the game. You can also earn upgrades and rewards by winning races or completing tasks.</li>
|
13 |
-
<li>It has realistic races and drifts. You can choose from different race modes, such as speed races, drift races, or drag races. You can also adjust the difficulty level and the AI behavior to suit your skills.</li>
|
14 |
-
<li>It has a detailed car tuning system. You can customize your car's performance and appearance by changing the engine, transmission, body, suspension, tires, and more. You can also swap parts and create unique combinations for each race.</li>
|
15 |
-
<li>It has stunning graphics and physics. The game uses the CarX Technology engine, which simulates realistic car behavior and physics. You can also enjoy the high-quality graphics and the dynamic day/night cycle.</li>
|
16 |
-
<li>It has a large open world. You can explore Sunset City at your own pace and discover hidden locations, secrets, and easter eggs. You can also interact with other players or NPCs in the world.</li>
|
17 |
-
</ul>
|
18 |
-
<h2>Features of CarX Street</h2>
|
19 |
-
<h3>Career mode</h3>
|
20 |
-
<p>In career mode, you can start your journey as a street racer and become the legend of Sunset City. You can join clubs, defeat bosses, and prove your skills to everyone. You can also unlock new cars, parts, houses, and rewards as you progress through the career mode.</p>
|
21 |
-
<h3>Improved car tuning</h3>
|
22 |
-
<p>In CarX Street, you can tune your car to fit your preferences and needs. You can change the engine, transmission, body, suspension, tires, and more. You can also swap parts and create unique combinations for each race. For example, you can use a V8 engine for speed races or a rotary engine for drift races.</p>
|
23 |
-
<h3>Visual car tuning</h3>
|
24 |
-
<p>Besides performance tuning, you can also customize your car's appearance by changing the mirrors, headlights, lights, skirt, bumper, rims, and more. You can create a unique look for your car by using different colors, stickers, decals, and accessories.</p>
|
25 |
-
<p>car x street racing game download<br />
|
26 |
-
car x street mod apk unlimited money<br />
|
27 |
-
car x street open world beta<br />
|
28 |
-
car x street android gameplay<br />
|
29 |
-
car x street apk obb<br />
|
30 |
-
car x street drift racing 2<br />
|
31 |
-
car x street sunset city<br />
|
32 |
-
car x street apk pure<br />
|
33 |
-
car x street hack apk<br />
|
34 |
-
car x street latest version<br />
|
35 |
-
car x street online multiplayer<br />
|
36 |
-
car x street best cars<br />
|
37 |
-
car x street cheats codes<br />
|
38 |
-
car x street free download for pc<br />
|
39 |
-
car x street update 2023<br />
|
40 |
-
car x street tuning guide<br />
|
41 |
-
car x street review<br />
|
42 |
-
car x street ios release date<br />
|
43 |
-
car x street apk mirror<br />
|
44 |
-
car x street system requirements<br />
|
45 |
-
car x street tips and tricks<br />
|
46 |
-
car x street gameplay trailer<br />
|
47 |
-
car x street offline mode<br />
|
48 |
-
car x street customisation options<br />
|
49 |
-
car x street apk mod menu<br />
|
50 |
-
car x street graphics settings<br />
|
51 |
-
car x street new features<br />
|
52 |
-
car x street apk revdl<br />
|
53 |
-
car x street unlimited coins and gems<br />
|
54 |
-
car x street how to play<br />
|
55 |
-
car x street apk data download<br />
|
56 |
-
car x street realistic physics engine<br />
|
57 |
-
car x street apk combo<br />
|
58 |
-
car x street filehippo.com download link[^3^]<br />
|
59 |
-
car x street google play store[^2^]<br />
|
60 |
-
carx technologies official site[^6^]<br />
|
61 |
-
carx technologies privacy policy[^5^]<br />
|
62 |
-
license agreement for carx technologies[^4^]<br />
|
63 |
-
carx technologies support email address[^1^]<br />
|
64 |
-
how to install carx technologies games on android devices[^1^]</p>
|
65 |
-
<h3>Real <h3>Realistic physics and graphics</h3>
|
66 |
-
<p>CarX Street uses the CarX Technology engine, which simulates realistic car behavior and physics. You can feel the difference between front-wheel drive, rear-wheel drive, and all-wheel drive cars. You can also experience the effects of traction, torque, and inertia on your car. The game also has stunning graphics and animations that make the races more immersive and exciting. You can see the details of the cars, the environment, and the weather. You can also enjoy the dynamic day/night cycle and the changing lighting and shadows.</p>
|
67 |
-
<h3>Dynamic day/night cycle</h3>
|
68 |
-
<p>One of the most impressive features of CarX Street is the dynamic day/night cycle. The game has a realistic time system that changes according to your location and timezone. You can see the sun rise and set, the moon phases, and the stars in the sky. The day/night cycle also affects the gameplay and the atmosphere of the races. For example, you can race in different weather conditions, such as sunny, cloudy, rainy, or foggy. You can also encounter different traffic patterns, pedestrians, and events in the city.</p>
|
69 |
-
<h2>How to download and install CarX Street</h2>
|
70 |
-
<h3>Requirements and compatibility</h3>
|
71 |
-
<p>CarX Street is a free racing game for Android devices. However, it is still in beta testing and may not be compatible with all devices or regions. To play CarX Street, you need to have an Android device that meets the following requirements:</p>
|
72 |
-
<ul>
|
73 |
-
<li>Android version: 5.0 or higher</li>
|
74 |
-
<li>RAM: 2 GB or more</li>
|
75 |
-
<li>Storage: 1 GB or more</li>
|
76 |
-
<li>Internet connection: required for online features</li>
|
77 |
-
</ul>
|
78 |
-
<p>You can check if your device is compatible with CarX Street by visiting its official website or its Google Play Store page. You can also join the official Discord server or Facebook group to get updates and support from the developers and other players.</p>
|
79 |
-
<h3>Steps to download and install CarX Street APK</h3>
|
80 |
-
<p>If you want to download and install CarX Street APK on your Android device, you can follow these steps:</p>
|
81 |
-
<ol>
|
82 |
-
<li>Go to the official website of CarX Street and click on the "Download APK" button.</li>
|
83 |
-
<li>Wait for the APK file to download on your device. You may need to allow unknown sources in your settings to install apps from outside the Google Play Store.</li>
|
84 |
-
<li>Open the APK file and follow the instructions to install CarX Street on your device.</li>
|
85 |
-
<li>Launch CarX Street and enjoy the game.</li>
|
86 |
-
</ol>
|
87 |
-
<h2>Conclusion</h2>
|
88 |
-
<p>CarX Street is a free racing game for Android devices that lets you experience the thrill of being a street racer in a dynamic open world. You can choose from a variety of cars, customize them to your liking, and race against other players or the AI in different modes. You can also explore Sunset City, join clubs, challenge bosses, and collect rewards. CarX Street has realistic physics and graphics, a detailed car tuning system, and a dynamic day/night cycle. If you are a fan of racing games, you should definitely try CarX Street.</p>
|
89 |
-
<h2>FAQs</h2>
|
90 |
-
<h4>What is CarX Technology?</h4>
|
91 |
-
<p>CarX Technology is a company that develops realistic car physics engines for games. They have created several popular racing games, such as CarX Drift Racing 2, CarX Highway Racing, and CarX Rally.</p>
|
92 |
-
<h4>How can I get more cars in CarX Street?</h4>
|
93 |
-
<p>You can get more cars in CarX Street by winning races, completing tasks, joining clubs, or buying them with in-game currency or real money.</p>
|
94 |
-
<h4>How can I play CarX Street online?</h4>
|
95 |
-
<p>You can play CarX Street online by connecting to a Wi-Fi or mobile data network. You can then join online races with other players or create your own lobby.</p>
|
96 |
-
<h4>How can I give feedback or report bugs in CarX Street?</h4>
|
97 |
-
<p>You can give feedback or report bugs in CarX Street by contacting the developers through their official website, Discord server, Facebook group, or email ([email protected]).</p>
|
98 |
-
<h4>Is CarX Street available for iOS devices?</h4>
|
99 |
-
<p>No, CarX Street is currently only available for Android devices. However, the developers have stated that they are working on an iOS version of the game.</p>
|
100 |
-
: https://carx-street.com/ : https://play</p> 197e85843d<br />
|
101 |
-
<br />
|
102 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Drift Hunters MAX for Mac and Enjoy Over 25 Awesome Drift Cars.md
DELETED
@@ -1,139 +0,0 @@
|
|
1 |
-
|
2 |
-
<table>
|
3 |
-
<tr>
|
4 |
-
<td>
|
5 |
-
<h1>Drift Hunters Download Mac: How to Enjoy the Ultimate Drifting Game on Your Mac</h1>
|
6 |
-
<p>If you are a fan of drifting games, you might have heard of <strong>Drift Hunters</strong>, one of the most popular and realistic drifting games online. But did you know that you can also enjoy this game on your Mac? In this article, we will show you how to download and play Drift Hunters on your Mac, as well as some tips and tricks to master the game.</p>
|
7 |
-
<h2>drift hunters download mac</h2><br /><p><b><b>Download</b> ✪ <a href="https://urlin.us/2uSStl">https://urlin.us/2uSStl</a></b></p><br /><br />
|
8 |
-
</td>
|
9 |
-
</tr>
|
10 |
-
<tr>
|
11 |
-
<td>
|
12 |
-
<h2>What is Drift Hunters?</h2>
|
13 |
-
<p>Drift Hunters is a free-to-play <strong>3D drifting game</strong> with an excellent selection of tracks and cars. You can drift a variety of high-performance tuner cars on different exciting tracks, from racetracks to city streets. The game uses the <strong>UNITY engine</strong>, which means a completely 3D world with realistic physics and a solid frame rate.</p>
|
14 |
-
<p>Drift Hunters also features <strong>detailed car tuning</strong>, which allows you to customize every aspect of your vehicle, from engine and turbo upgrades to brake balance and camber adjustments. You can also change the color and rims of your car to suit your style. You can earn points by drifting and use them to buy new cars or upgrade existing ones.</p>
|
15 |
-
<p>Drift Hunters has <strong>stunning graphics</strong> that make the game look amazing on any device. The game also has different modes, such as full screen, theatre, or regular, to fit your preference. You can also sign in to save your progress and access more features, such as cloud save, VIP club, leaderboard, and Discord server.</p>
|
16 |
-
<p>Drift Hunters is available on <strong>browser, iOS, and Android platforms</strong>. You can play it on any device that supports Unity WebGL technology, which includes most modern browsers. You can also download the game from the App Store or Google Play if you prefer to play it on your mobile device. However, in this article, we will focus on how to play Drift Hunters on your Mac.</p>
|
17 |
-
</td>
|
18 |
-
</tr>
|
19 |
-
<tr>
|
20 |
-
<td>
|
21 |
-
<h2>Why Drift Hunters is the best drifting game for Mac users</h2>
|
22 |
-
<p>There are many drifting games out there, but Drift Hunters stands out as the best one for Mac users. Here are some of the reasons why:</p>
|
23 |
-
<p>drift hunters max download mac<br />
|
24 |
-
drift hunters game download for mac<br />
|
25 |
-
drift hunters mac os x download<br />
|
26 |
-
how to download drift hunters on mac<br />
|
27 |
-
drift hunters free download mac<br />
|
28 |
-
drift hunters online download mac<br />
|
29 |
-
drift hunters pc download mac<br />
|
30 |
-
drift hunters car game download mac<br />
|
31 |
-
drift hunters unblocked download mac<br />
|
32 |
-
drift hunters 2 download mac<br />
|
33 |
-
drift hunters mod apk download mac<br />
|
34 |
-
drift hunters android download mac<br />
|
35 |
-
drift hunters ios download mac<br />
|
36 |
-
drift hunters web game download mac<br />
|
37 |
-
drift hunters linux download mac<br />
|
38 |
-
drift hunters windows download mac<br />
|
39 |
-
drift hunters steam download mac<br />
|
40 |
-
drift hunters app store download mac<br />
|
41 |
-
drift hunters play store download mac<br />
|
42 |
-
drift hunters browser game download mac<br />
|
43 |
-
drift hunters mobile game download mac<br />
|
44 |
-
drift hunters desktop game download mac<br />
|
45 |
-
drift hunters laptop game download mac<br />
|
46 |
-
drift hunters simulator game download mac<br />
|
47 |
-
drift hunters racing game download mac<br />
|
48 |
-
drift hunters drifting game download mac<br />
|
49 |
-
drift hunters driving game download mac<br />
|
50 |
-
drift hunters tuning game download mac<br />
|
51 |
-
drift hunters customization game download mac<br />
|
52 |
-
drift hunters 3d game download mac<br />
|
53 |
-
drift hunters hd game download mac<br />
|
54 |
-
drift hunters realistic game download mac<br />
|
55 |
-
drift hunters physics game download mac<br />
|
56 |
-
drift hunters graphics game download mac<br />
|
57 |
-
drift hunters tracks game download mac<br />
|
58 |
-
drift hunters cars game download mac<br />
|
59 |
-
drift hunters maps game download mac<br />
|
60 |
-
drift hunters locations game download mac<br />
|
61 |
-
drift hunters modes game download mac<br />
|
62 |
-
drift hunters levels game download mac<br />
|
63 |
-
drift hunters challenges game download mac<br />
|
64 |
-
drift hunters missions game download mac<br />
|
65 |
-
drift hunters achievements game download mac<br />
|
66 |
-
drift hunters leaderboards game download mac<br />
|
67 |
-
drift hunters multiplayer game download mac<br />
|
68 |
-
drift hunters offline game download mac<br />
|
69 |
-
drift hunters cheats game download mac<br />
|
70 |
-
drift hunters hacks game download mac<br />
|
71 |
-
drift hunters tips game download mac<br />
|
72 |
-
drift hunters tricks game download mac</p>
|
73 |
-
<ul>
|
74 |
-
<li>Drift Hunters is <strong>compatible with Mac browsers</strong> that support Unity WebGL technology. This means that you don't need to download or install anything to play the game. You just need to visit the official website of Drift Hunters or one of its alternatives, such as Crazy Games or Paco Games, and start playing right away.</li>
|
75 |
-
<li>Drift Hunters offers <strong>smooth and responsive gameplay</strong> on low-spec devices. The game is optimized for performance and runs well on most Macs, even older models. You can also adjust the graphics quality and resolution to suit your device and internet speed.</li>
|
76 |
-
<li>Drift Hunters has a <strong>large and active community</strong> of drift enthusiasts. You can join the VIP club to access exclusive cars and tracks, as well as chat with other players on the Discord server. You can also compete with other players on the leaderboard and see how you rank among the best drifters in the world.</li>
|
77 |
-
</ul>
|
78 |
-
<p>Drift Hunters is a game that will keep you entertained and challenged for hours. Whether you are a beginner or a pro, you will find something to enjoy in this game.</p>
|
79 |
-
</td>
|
80 |
-
</tr>
|
81 |
-
<tr>
|
82 |
-
<td>
|
83 |
-
<h2>How to download and play Drift Hunters on your Mac</h2>
|
84 |
-
<p>Downloading and playing Drift Hunters on your Mac is very easy and simple. Just follow these steps:</p>
|
85 |
-
<ol>
|
86 |
-
<li><strong>Visit the official website of Drift Hunters</strong> or one of its alternatives, such as Crazy Games or Paco Games. You can use any browser that supports Unity WebGL technology, such as Safari, Chrome, or Firefox.</li>
|
87 |
-
<li><strong>Choose your preferred mode</strong>: full screen, theatre, or regular. Full screen mode will fill your entire screen with the game, while theatre mode will leave some space for the browser toolbar and tabs. Regular mode will show the game in a smaller window.</li>
|
88 |
-
<li><strong>Sign in to save your progress and access more features</strong>. You can sign in with your email, Facebook, or Google account. Signing in will allow you to save your cars, tracks, and settings on the cloud, as well as join the VIP club and the Discord server.</li>
|
89 |
-
<li><strong>Select your car, track, and settings</strong>. You can choose from over 25 cars and 10 tracks in the game, each with different characteristics and challenges. You can also customize your car's appearance and performance by tuning it up. You can change the settings for sound, graphics, controls, camera, and units according to your preference.</li>
|
90 |
-
<li><strong>Start drifting and earning points</strong> to unlock more cars and upgrades. You can use the arrow keys or WASD keys to control your car, and the spacebar to use the handbrake. You can also use a controller if you have one connected to your Mac. The game will reward you with points based on how long and how well you drift. You can use these points to buy new cars or upgrade existing ones.</li>
|
91 |
-
</ol>
|
92 |
-
<p>That's it! You are now ready to enjoy Drift Hunters on your Mac.</p>
|
93 |
-
</td>
|
94 |
-
</tr>
|
95 |
-
<tr>
|
96 |
-
<td>
|
97 |
-
<h2>Tips and tricks to master Drift Hunters on your Mac</h2>
|
98 |
-
<p>Drift Hunters is a game that requires skill and practice to master. Here are some tips and tricks that will help you improve your drifting skills on your Mac:</p>
|
99 |
-
<ul>
|
100 |
-
<li><strong>Use acceleration cautiously when approaching corners mid-drift</strong>. If you accelerate too much, you might lose control of your car and spin out. If you accelerate too little, you might lose momentum and end your drift prematurely. Try to find the right balance between speed and stability.</li>
|
101 |
-
<li><strong>Drift from side-to-side on straight roads to keep the drift alive</strong>. If you only drift on corners, you might miss out on some points. To maximize your score, try to drift continuously by switching from left to right on straight roads. This will also help you maintain your speed and prepare for the next corner.</li>
|
102 |
-
<li><strong>Tune-up your vehicles to find the sweet spot for maximum drift</strong>. Different cars have different settings that affect their drifting performance. For example, some cars might need more power or less weight to drift better. You can adjust these settings by tuning up your car in the garage. Experiment with different combinations until you find the one that works best for your car and style.</li>
|
103 |
-
<li><strong>Drive on maps with plenty of space for long, uninterrupted drifting</strong>. Some maps are more suitable for drifting than others. For example, the airport map has a lot of open space and long roads that allow you to drift for a long time without stopping. The mountain map, on the other hand, has many sharp turns and obstacles that might interrupt your drift. Choose the map that matches your skill level and preference.</li>
|
104 |
-
<li><strong>Watch video tutorials and learn from other players on the leaderboard and Discord server</strong>. If you want to learn more about drifting techniques and strategies, you can watch some video tutorials on YouTube or other platforms. You can also check out the leaderboard and see how the top players drift. You can even join the Discord server and chat with other drift fans, ask for tips, or challenge them to a friendly competition.</li>
|
105 |
-
</ul>
|
106 |
-
<p>Drift Hunters is a game that will challenge you to improve your drifting skills and have fun at the same time. With these tips and tricks, you will be able to master the game on your Mac in no time.</p>
|
107 |
-
</td>
|
108 |
-
</tr>
|
109 |
-
<tr>
|
110 |
-
<td>
|
111 |
-
<h2>Conclusion</h2>
|
112 |
-
<p>Drift Hunters is a fun, free-to-play game that will test your drifting skills on your Mac. It has a variety of cars, tracks, and features to suit your preferences and style. It is easy to download and play on your Mac browser with Unity WebGL support. It also has a helpful and friendly community of drift fans that you can join and interact with.</p>
|
113 |
-
<p>If you are looking for a game that will give you the thrill of drifting without leaving your Mac, Drift Hunters is the game for you. Download it today and start drifting like a pro!</p>
|
114 |
-
</td>
|
115 |
-
</tr>
|
116 |
-
<tr>
|
117 |
-
<td>
|
118 |
-
<h2>FAQs</h2>
|
119 |
-
<p>Here are some frequently asked questions about Drift Hunters download Mac:</p>
|
120 |
-
<ol>
|
121 |
-
<li><strong>Is Drift Hunters safe to play on Mac?</strong></li>
|
122 |
-
<p>Yes, Drift Hunters is safe to play on Mac. The game does not require any downloads or installations, so it does not pose any risk to your device or data. The game also does not contain any viruses, malware, or spyware.</p>
|
123 |
-
<li><strong>How much does Drift Hunters cost to play on Mac?</strong></li>
|
124 |
-
<p>Drift Hunters is free to play on Mac. You do not need to pay anything to access the game or its features. However, if you want to support the developers and get some extra benefits, you can join the VIP club for a small fee.</p>
|
125 |
-
<li><strong>What are the minimum requirements to play Drift Hunters on Mac?</strong></li>
|
126 |
-
<p>The minimum requirements to play Drift Hunters on Mac are:</p>
|
127 |
-
<ul>
|
128 |
-
<li>A Mac device with an Intel processor and at least 4 GB of RAM</li>
|
129 |
-
<li>A browser that supports Unity WebGL technology, such as Safari, Chrome, or Firefox</li>
|
130 |
-
<li>A stable internet connection with at least 5 Mbps speed</li>
|
131 |
-
<li>A keyboard or a controller to control your car</li>
|
132 |
-
</ul>
|
133 |
-
<li><strong>Can I play Drift Hunters offline on Mac?</strong></li>
|
134 |
-
<p>No, you cannot play Drift Hunters offline on Mac. The game requires an internet connection to load and run properly. You also need an internet connection to save your progress and access the VIP club and Discord server.</p>
|
135 |
-
<li><strong>Can I play Drift Hunters with friends on Mac?</strong></li>
|
136 |
-
<p>Yes, you can play Drift Hunters with friends on Mac. You can invite your friends to join the VIP club and chat with them on the Discord server. You can also challenge them to a drift competition and see who can score higher on the leaderboard.</p>
|
137 |
-
</ol></p> 197e85843d<br />
|
138 |
-
<br />
|
139 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Enjoy Royal Match on Your Android Device and Unlock Amazing Rewards.md
DELETED
@@ -1,157 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Royal Match: A Fun and Challenging Match-3 Puzzle Game</h1>
|
3 |
-
<h2>Introduction</h2>
|
4 |
-
<p>If you are looking for a new and exciting match-3 puzzle game to play on your mobile device, you might want to check out Royal Match. This game is developed by Dream Games, a Turkish studio that has a lot of experience in creating addictive and engaging puzzle games. In this game, you will help King Robert to rebuild his castle by solving match-3 puzzles, collecting coins, unlocking boosters, and decorating the rooms. You will also compete with millions of players in various events and climb the leaderboards. Royal Match is a game that combines puzzle-solving and castle-building elements, making it a unique and enjoyable experience for all kinds of players.</p>
|
5 |
-
<h2>What is Royal Match?</h2>
|
6 |
-
<h3>What is Royal Match?</h3>
|
7 |
-
<p>Royal Match is a free-to-play match-3 puzzle game that is available for both Android and iOS devices. You can download it from Google Play or the App Store . The game has been released in February 2021 and has quickly become one of the top-grossing puzzle games on both platforms. It has also received positive reviews from critics and users alike, praising its graphics, gameplay, features, and content.</p>
|
8 |
-
<h2>royal match uptodown</h2><br /><p><b><b>Download Zip</b> ✺✺✺ <a href="https://jinyurl.com/2uNKrM">https://jinyurl.com/2uNKrM</a></b></p><br /><br />
|
9 |
-
<h3>How to play Royal Match?</h3>
|
10 |
-
<p>The gameplay of Royal Match is simple and intuitive. You just need to swipe your finger on the screen to match three or more tiles of the same color. By doing so, you will clear them from the board and complete the level's objective. Each level has a different objective, such as collecting a certain number of tiles, breaking boxes, clearing grass, or saving the king. You will also encounter various obstacles on the board, such as birds, boxes, potions, cupboards, diamonds, magic hats, coin safes, mysterious mailboxes, and piggy. You will need to clear them or use boosters to overcome them.</p>
|
11 |
-
<p>You have a limited number of moves for each level, so you need to plan your moves carefully and use them wisely. If you run out of moves before completing the objective, you will lose a life and have to try again. You can also buy extra moves with coins if you are close to finishing the level. Coins are the main currency of the game, which you can earn by completing levels, bonus levels, events, or opening chests. You can also buy coins with real money if you want to.</p>
|
12 |
-
<p>By completing levels, you will earn stars, which are needed to perform castle-decorating tasks. You will help King Robert to restore his castle by choosing from different options for each task. For example, you can choose the color of the walls, the type of furniture, or the style of the garden. Each task costs a certain number of stars, which vary depending on the difficulty of the level. You can also undo your choices if you change your mind later.</p>
|
13 |
-
<h2>Features of Royal Match</h2>
|
14 |
-
<h3>Unique match-3 gameplay and fun levels</h3>
|
15 |
-
<p>Royal Match offers a unique match-3 gameplay that is different from other games in the genre. It has fun and challenging levels that will test your skills and strategy. You will encounter various types of tiles, such as diamonds, rockets, TNTs, propellers, light balls, jesters hats, cannons, keys, locks, chests, crowns, hearts, stars, coins, and more. Each tile has a different effect when matched or activated.</p>
|
16 |
-
<p>For example, matching four tiles in a row or column will create a rocket that can clear an entire row or column when matched or tapped. Matching four tiles in a square will create a TNT that can explode and clear a 3x3 area when matched or tapped. Matching five tiles in a row or column will create a propeller that can clear all the tiles of the same color when matched or tapped. Matching five tiles in an L or T shape will create a light ball that can clear all the tiles in a cross shape when matched or tapped. Matching six tiles in a row or column will create a jester hat that can clear all the tiles on the board when matched or tapped.</p>
|
17 |
-
<p>The game has over 3000 levels to play, each with a different layout, objective, and difficulty. You will never get bored with the variety and challenge of the levels. You will also unlock new areas of the castle as you progress, such as the garden, the library, the kitchen, the bedroom, and more. Each area has its own theme and style, which you can customize according to your preference.</p>
|
18 |
-
<p>royal match game download uptodown<br />
|
19 |
-
royal match apk uptodown<br />
|
20 |
-
royal match android uptodown<br />
|
21 |
-
royal match mod apk uptodown<br />
|
22 |
-
royal match latest version uptodown<br />
|
23 |
-
royal match hack uptodown<br />
|
24 |
-
royal match free coins uptodown<br />
|
25 |
-
royal match unlimited lives uptodown<br />
|
26 |
-
royal match puzzle game uptodown<br />
|
27 |
-
royal match online uptodown<br />
|
28 |
-
royal match for pc uptodown<br />
|
29 |
-
royal match cheats uptodown<br />
|
30 |
-
royal match tips and tricks uptodown<br />
|
31 |
-
royal match walkthrough uptodown<br />
|
32 |
-
royal match levels uptodown<br />
|
33 |
-
royal match review uptodown<br />
|
34 |
-
royal match gameplay uptodown<br />
|
35 |
-
royal match update uptodown<br />
|
36 |
-
royal match offline uptodown<br />
|
37 |
-
royal match no ads uptodown<br />
|
38 |
-
royal match similar games uptodown<br />
|
39 |
-
royal match best strategies uptodown<br />
|
40 |
-
royal match challenges uptodown<br />
|
41 |
-
royal match rewards uptodown<br />
|
42 |
-
royal match boosters uptodown<br />
|
43 |
-
royal match characters uptodown<br />
|
44 |
-
royal match castle decoration uptodown<br />
|
45 |
-
royal match fun and addictive uptodown<br />
|
46 |
-
royal match how to play uptodown<br />
|
47 |
-
royal match features uptodown<br />
|
48 |
-
royal match graphics uptodown<br />
|
49 |
-
royal match sound effects uptodown<br />
|
50 |
-
royal match rating uptodown<br />
|
51 |
-
royal match feedback uptodown<br />
|
52 |
-
royal match support uptodown<br />
|
53 |
-
royal match bugs and issues uptodown<br />
|
54 |
-
royal match suggestions and ideas uptodown<br />
|
55 |
-
royal match community uptodown<br />
|
56 |
-
royal match facebook page uptodown<br />
|
57 |
-
royal match instagram account uptodown<br />
|
58 |
-
royal match twitter handle uptodown<br />
|
59 |
-
royal match youtube channel uptodown<br />
|
60 |
-
royal match developer team uptodown<br />
|
61 |
-
royal match dreamscapes studio uptodown<br />
|
62 |
-
royal match contact information uptodown<br />
|
63 |
-
royal match privacy policy uptodown<br />
|
64 |
-
royal match terms of service uptodown</p>
|
65 |
-
<h3>Powerful boosters and special treasures</h3>
|
66 |
-
<p>Royal Match also offers powerful boosters and special treasures that can help you complete the levels faster and easier. You can use these items before or during the level to get an advantage. Some of the boosters and treasures are:</p>
|
67 |
-
<ul>
|
68 |
-
<li><b>Hammer:</b> This booster can break any tile or obstacle on the board. You can use it before or during the level.</li>
|
69 |
-
<li><b>Glove:</b> This booster can swap any two adjacent tiles on the board. You can use it before or during the level.</li>
|
70 |
-
<li><b>Shuffle:</b> This booster can shuffle all the tiles on the board. You can use it before or during the level.</li>
|
71 |
-
<li><b>Rocket:</b> This treasure can clear an entire row or column on the board. You can use it during the level by tapping on it.</li>
|
72 |
-
<li><b>TNT:</b> This treasure can explode and clear a 3x3 area on the board. You can use it during the level by tapping on it.</li>
|
73 |
-
<li><b>Propeller:</b> This treasure can clear all the tiles of the same color on the board. You can use it during the level by tapping on it.</li>
|
74 |
-
<li><b>Light Ball:</b> This treasure can clear all the tiles in a cross shape on the board. You can use it during the level by tapping on it.</li>
|
75 |
-
<li><b>Jester Hat:</b> This treasure can clear all the tiles on the board. You can use it during the level by tapping on it.</li>
|
76 |
-
</ul>
|
77 |
-
<p>You can earn these boosters and treasures by completing levels, bonus levels, events, or opening chests. You can also buy them with coins or real money if you want to.</p>
|
78 |
-
<h3>Castle decoration and restoration</h3>
|
79 |
-
<p>Royal Match is not only a puzzle game, but also a castle-building game. You will help King Robert to restore his castle by completing tasks with stars. You will earn stars by completing levels, bonus levels, events, or opening chests. Each task costs a certain number of stars, which vary depending on the difficulty of the level.</p>
|
80 |
-
<p>You will have different options for each task, such as the color of the walls, the type of furniture, or the style of the garden. You can choose the option that suits your taste and personality. You can also undo your choices if you change your mind later.</p>
|
81 |
-
<p>By decorating and restoring the castle, you will unlock new stories and characters. You will meet King Robert's friends and foes, such as Princess Alice, Prince Arthur, Duke Henry, Lady Violet, and more. You will also discover the secrets and mysteries of the castle, such as the hidden treasure, the ghost, the curse, and more. You will enjoy the fun and humorous dialogues and interactions between the characters.</p>
|
82 |
-
<h3>Events and leaderboards</h3>
|
83 |
-
<p>Royal Match also offers various events and leaderboards that can make the game more exciting and rewarding. You can participate in these events and leaderboards by completing levels, bonus levels, events, or opening chests. Some of the events and leaderboards are:</p>
|
84 |
-
<ul>
|
85 |
-
<li><b>King's Challenge:</b> This event is a special bonus level that appears every few hours. You can play it to earn extra coins, stars, boosters, or treasures. The level is randomly generated and has a different objective and difficulty each time.</li>
|
86 |
-
<li><b>King's Tournament:</b> This event is a weekly competition where you can compete with other players in a series of levels. You can earn points by completing levels, bonus levels, events, or opening chests. The more points you earn, the higher you rank on the leaderboard. You can win amazing prizes based on your rank at the end of the week.</li>
|
87 |
-
<li><b>King's Club:</b> This event is a monthly subscription that gives you access to exclusive benefits and rewards. You can get unlimited lives, extra moves, free boosters, special chests, and more. You can also cancel your subscription at any time.</li>
|
88 |
-
<li><b>King's Guild:</b> This feature allows you to join or create a guild with other players. You can chat with your guild members, share tips and tricks, request or send lives, and more. You can also participate in guild events and challenges to earn guild points and rewards.</li>
|
89 |
-
</ul>
|
90 |
-
<h2>Tips and tricks for Royal Match</h2>
|
91 |
-
<h3>Pay attention to the objective and the hints</h3>
|
92 |
-
<p>One of the most important tips for Royal Match is to pay attention to the objective and the hints of each level. The objective tells you what you need to do to complete the level, such as collecting a certain number of tiles, breaking boxes, clearing grass, or saving the king. The hints show you which tiles or obstacles you need to focus on or clear first. They also show you which boosters or treasures you can use to help you.</p>
|
93 |
-
<p>You can see the objective and the hints at the top of the screen during the level. You can also tap on them to get more information or reminders. By following the objective and the hints, you can save your moves and time and complete the level faster and easier.</p>
|
94 |
-
<h3>Save resources for hard levels and use them wisely</h3>
|
95 |
-
<p>Another tip for Royal Match is to save your resources for hard levels and use them wisely. Your resources include your coins, stars, lives, moves, boosters, and treasures. You can earn these resources by completing levels, bonus levels, events, or opening chests. You can also buy them with real money if you want to.</p>
|
96 |
-
<p>However, you should not spend your resources recklessly or unnecessarily. You should save them for hard levels that are more difficult or require more moves to complete. You should also use them wisely and strategically, such as using boosters or treasures at the right time and place, buying extra moves only when you are close to finishing the level, or choosing the best option for each task.</p>
|
97 |
-
<p>By saving and using your resources wisely, you can avoid getting stuck or frustrated on hard levels and enjoy the game more.</p>
|
98 |
-
<h3>Mix and match boosters to get amazing results</h3>
|
99 |
-
<p>A third tip for Royal Match is to mix and match boosters to get amazing results. Boosters are special tiles that you can create by matching four or more tiles of the same color. They have different effects when matched or tapped, such as clearing rows, columns, areas, or colors. You can also mix and match boosters to create even more powerful effects, such as:</p>
|
100 |
-
<ul>
|
101 |
-
<li><b>Rocket + Rocket:</b> This combination will clear two rows or columns in a cross shape.</li>
|
102 |
-
<li><b>Rocket + TNT:</b> This combination will clear three rows or columns in a cross shape.</li>
|
103 |
-
<li><b>Rocket + Propeller:</b> This combination will clear all the tiles of the same color as the rocket.</li>
|
104 |
-
<li><b>Rocket + Light Ball:</b> This combination will clear four rows or columns in a cross shape.</li>
|
105 |
-
<li><b>Rocket + Jester Hat:</b> This combination will clear all the tiles on the board.</li>
|
106 |
-
<li><b>TNT + TNT:</b> This combination will explode and clear a 5x5 area on the board.</li>
|
107 |
-
<li><b>TNT + Propeller:</b> This combination will explode and clear all the tiles of the same color as the TNT.</li>
|
108 |
-
<li><b>TNT + Light Ball:</b> This combination will explode and clear all the tiles in a cross shape on the board.</li>
|
109 |
-
<li><b>TNT + Jester Hat:</b> This combination will explode and clear all the tiles on the board.</li>
|
110 |
-
<li><b>Propeller + Propeller:</b> This combination will clear two colors of tiles on the board.</li>
|
111 |
-
<li><b>Propeller + Light Ball:</b> This combination will clear three colors of tiles on the board.</li>
|
112 |
-
<li><b>Propeller + Jester Hat:</b> This combination will clear all the tiles on the board.</li>
|
113 |
-
<li><b>Light Ball + Light Ball:</b> This combination will clear four colors of tiles on the board.</li>
|
114 |
-
<li><b>Light Ball + Jester Hat:</b> This combination will clear all the tiles on the board.</li>
|
115 |
-
</ul>
|
116 |
-
<p>You can also mix and match boosters with treasures to get even more amazing results. For example, you can match a rocket with a rocket treasure to clear three rows or columns in a cross shape, or match a propeller with a propeller treasure to clear four colors of tiles on the board. You can experiment with different combinations and see what happens.</p>
|
117 |
-
<p>By mixing and matching boosters and treasures, you can clear more tiles and obstacles, complete the objective faster and easier, and earn more points and rewards.</p>
|
118 |
-
<h3>Clear obstacles and collect coins as soon as possible</h3>
|
119 |
-
<p>A fourth tip for Royal Match is to clear obstacles and collect coins as soon as possible. Obstacles are items that block your way or prevent you from matching tiles. They include birds, boxes, potions, cupboards, diamonds, magic hats, coin safes, mysterious mailboxes, piggy banks, and more. You need to clear them by matching tiles next to them, using boosters or treasures, or completing special tasks. Coins are items that you can collect by matching tiles next to them, using boosters or treasures, or opening chests. They are the main currency of the game, which you can use to buy extra moves, boosters, treasures, or other items.</p>
|
120 |
-
<p>You should try to clear obstacles and collect coins as soon as possible because they can help you in many ways. For example, clearing obstacles can give you more space and options to match tiles, activate boosters or treasures, or complete the objective. Collecting coins can give you more resources to buy extra moves, boosters, treasures, or other items. You can also use coins to decorate and restore the castle.</p>
|
121 |
-
<p>By clearing obstacles and collecting coins as soon as possible, you can make the game easier and more fun.</p>
|
122 |
-
<h3>Join a guild and get extra benefits</h3>
|
123 |
-
<p>A fifth tip for Royal Match is to join a guild and get extra benefits. A guild is a group of players who can chat, share, and cooperate with each other. You can join or create a guild by tapping on the guild icon at the bottom of the screen. You can also invite your friends to join your guild or search for other guilds to join.</p>
|
124 |
-
<p>By joining a guild, you can get extra benefits such as:</p>
|
125 |
-
<ul>
|
126 |
-
<li><b>Lives:</b> You can request or send lives to your guild members. Lives are needed to play levels, and they regenerate over time or can be bought with coins or real money. By requesting or sending lives, you can help yourself or your guild members to play more levels and have more fun.</li>
|
127 |
-
<li><b>Tips and tricks:</b> You can chat with your guild members and share tips and tricks for the game. You can ask for advice, give suggestions, or exchange opinions about the game. By chatting with your guild members, you can learn more about the game and improve your skills and strategy.</li>
|
128 |
-
<li><b>Events and challenges:</b> You can participate in guild events and challenges to earn guild points and rewards. Guild events and challenges are special tasks that you can complete with your guild members, such as collecting a certain number of tiles, clearing a certain number of levels, or reaching a certain score. By participating in guild events and challenges, you can compete with other guilds, earn more coins, stars, boosters, treasures, or other items, and have more fun.</li>
|
129 |
-
</ul>
|
130 |
-
<p>By joining a guild and getting extra benefits, you can make the game more social and rewarding.</p>
|
131 |
-
<h2>Conclusion</h2>
|
132 |
-
<p>Royal Match is a fun and challenging match-3 puzzle game that you can play on your mobile device. You can download it from Google Play or the App Store . In this game, you will help King Robert to rebuild his castle by solving match-3 puzzles, collecting coins, unlocking boosters, and decorating the rooms. You will also compete with millions of players in various events and climb the leaderboards. Royal Match is a game that combines puzzle-solving and castle-building elements, making it a unique and enjoyable experience for all kinds of players.</p>
|
133 |
-
<p>If you are looking for some tips and tricks for Royal Match, you can follow these suggestions:</p>
|
134 |
-
<ul>
|
135 |
-
<li><b>Pay attention to the objective and the hints</b></li>
|
136 |
-
<li><b>Save resources for hard levels and use them wisely</b></li>
|
137 |
-
<li><b>Mix and match boosters to get amazing results</b></li>
|
138 |
-
<li><b>Clear obstacles and collect coins as soon as possible</b></li>
|
139 |
-
<li><b>Join a guild and get extra benefits</b></li>
|
140 |
-
</ul>
|
141 |
-
<p>By following these tips and tricks, you can master the game and have more fun.</p>
|
142 |
-
<h2>FAQs</h2>
|
143 |
-
<p>Here are some frequently asked questions about Royal Match:</p>
|
144 |
-
<ol>
|
145 |
-
<li><b>How do I download Royal Match?</b></li>
|
146 |
-
<p>You can download Royal Match from Google Play or the App Store . The game is free to play but offers in-app purchases.</p>
|
147 |
-
<li><b>How do I update Royal Match?</b></li>
|
148 |
-
<p>You can update Royal Match by going to Google Play or the App Store and tapping on the update button. The game updates regularly with new levels, features, events, and bug fixes.</p>
|
149 |
-
<li><b>How do I contact Royal Match support?</b></li>
|
150 |
-
<p>You can contact Royal Match support by tapping on the settings icon at the top right corner of the screen and then tapping on the support button. You can also email them at [email protected] or visit their website at https://www.dreamgames.com/royalmatch/.</p>
|
151 |
-
<li><b>How do I connect Royal Match to Facebook?</b></li>
|
152 |
-
<p>You can connect Royal Match to Facebook by tapping on the settings icon at the top right corner of the screen and then tapping on the connect button. By connecting to Facebook, you can save your progress across different devices, invite your friends to play, or join a guild.</p>
|
153 |
-
<li><b>How do I reset Royal Match?</b></li>
|
154 |
-
<p>You can reset Royal Match by tapping on the settings icon at the top right corner of the screen and then tapping on the reset button. By resetting the game, you will lose all your progress, coins, stars, boosters, treasures, and other items. You will also disconnect from Facebook and your guild. You should only reset the game if you want to start over from the beginning.</p>
|
155 |
-
<p>I hope you enjoyed this article and found it helpful. If you have any questions or feedback, please leave a comment below. Thank you for reading and happy playing!</p> 401be4b1e0<br />
|
156 |
-
<br />
|
157 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AGITM/ToneCorrectionRecognition/app.py
DELETED
@@ -1,166 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
from spleeter.separator import Separator
|
3 |
-
from spleeter.audio.adapter import AudioAdapter
|
4 |
-
import spleeter.utils.logging as logging
|
5 |
-
import parselmouth
|
6 |
-
import numpy as np
|
7 |
-
import matplotlib.pyplot as plt
|
8 |
-
import seaborn as sns
|
9 |
-
import time
|
10 |
-
from matplotlib import rcParams
|
11 |
-
from you_get import common
|
12 |
-
|
13 |
-
|
14 |
-
#主程序
|
15 |
-
def main(audio,bg_time,ed_time):
|
16 |
-
#分离音频
|
17 |
-
vocals=spleeter(audio,bg_time,ed_time)
|
18 |
-
#音高标记
|
19 |
-
plt=pitch_mark(vocals)
|
20 |
-
#返回音高标记图像
|
21 |
-
return [plt,vocals]
|
22 |
-
|
23 |
-
#时间检查
|
24 |
-
def time_check(bg_time,ed_time):
|
25 |
-
#当两者都为整数且ed_time>bg_time时返回True
|
26 |
-
if bg_time.isdigit() and ed_time.isdigit():
|
27 |
-
if int(ed_time)>int(bg_time):
|
28 |
-
return True
|
29 |
-
return False
|
30 |
-
|
31 |
-
#音频分离
|
32 |
-
def spleeter(audio,bg_time,ed_time):
|
33 |
-
#分离音频并保存
|
34 |
-
separator = Separator('spleeter:2stems')
|
35 |
-
if time_check(bg_time,ed_time):
|
36 |
-
waveform=AudioAdapter.default().load_tf_waveform(audio,offset=int(bg_time), duration=int(ed_time)-int(bg_time))['waveform']
|
37 |
-
else:
|
38 |
-
waveform=AudioAdapter.default().load_tf_waveform(audio)['waveform']
|
39 |
-
vocals = separator.separate(waveform)['vocals']
|
40 |
-
#返回Tuple,格式[sample_rate, numpy array]
|
41 |
-
return (44100,vocals)
|
42 |
-
|
43 |
-
#音高标记
|
44 |
-
#计算标准音高和频率
|
45 |
-
def frequency(pitch):
|
46 |
-
return 16.35 * 2 ** (pitch /12 )
|
47 |
-
def generate_array(min_pitch, max_pitch):
|
48 |
-
array = []
|
49 |
-
names = ["C", "C#", "D", "D#",
|
50 |
-
"E", "F", "F#",
|
51 |
-
'G', 'G#', 'A', 'A#', 'B']
|
52 |
-
|
53 |
-
for pitch in range(120):
|
54 |
-
freq = frequency(pitch)
|
55 |
-
name = names[pitch % 12] + str(pitch // 12)
|
56 |
-
if frequency(pitch+1) > min_pitch and frequency(pitch-1) < max_pitch:
|
57 |
-
array.append([name, freq])
|
58 |
-
return array
|
59 |
-
|
60 |
-
def pitch_mark(wav):
|
61 |
-
config = {
|
62 |
-
"font.family":'serif',
|
63 |
-
"font.size": 8
|
64 |
-
}
|
65 |
-
sns.set()
|
66 |
-
rcParams.update(config)
|
67 |
-
|
68 |
-
wav = wav[1][:, 0]
|
69 |
-
snd = parselmouth.Sound(wav)
|
70 |
-
pitch = snd.to_pitch(pitch_floor=50, pitch_ceiling=3000)
|
71 |
-
plt.figure(figsize=(15,8),dpi=144)
|
72 |
-
|
73 |
-
pitch_values = pitch.selected_array['frequency']
|
74 |
-
#异常值修改为0
|
75 |
-
pitch_values[pitch_values>np.nanpercentile(pitch_values, 99)] = np.nan
|
76 |
-
pitch_values[pitch_values<np.nanpercentile(pitch_values, 1)] = np.nan
|
77 |
-
pitch_values[pitch_values==0] = np.nan
|
78 |
-
|
79 |
-
min_pitch = np.nanmin(pitch_values)
|
80 |
-
max_pitch = np.nanmax(pitch_values)
|
81 |
-
|
82 |
-
#绘制音高散点
|
83 |
-
|
84 |
-
plt.plot(pitch.xs(), pitch_values, 'o', markersize=3, color='w')
|
85 |
-
plt.plot(pitch.xs(), pitch_values, 'o', markersize=1.5)
|
86 |
-
|
87 |
-
#绘制标准音高
|
88 |
-
array = generate_array(min_pitch, max_pitch)
|
89 |
-
for name, freq in array:
|
90 |
-
plt.axhline(y=freq, color='blue', ls='-', lw=0.5)
|
91 |
-
plt.text(snd.xmax, freq, name, fontsize=8, ha='right', va='center', alpha=0.6)
|
92 |
-
plt.ylim(min_pitch-20, max_pitch+20)
|
93 |
-
plt.xlabel('Time [s]')
|
94 |
-
plt.xlim([snd.xmin, snd.xmax])
|
95 |
-
#时间戳生成唯一文件名
|
96 |
-
timestamp = int(time.time())
|
97 |
-
plt.savefig(str(timestamp)+'.png')
|
98 |
-
return str(timestamp)+'.png'
|
99 |
-
#u-get
|
100 |
-
def uget(url):
|
101 |
-
file_name=(int)(time.time())
|
102 |
-
common.output_filename = file_name
|
103 |
-
common.any_download(url,output_dir='.',merge=True)
|
104 |
-
return str(file_name)+'.mp4'
|
105 |
-
#css
|
106 |
-
css="""
|
107 |
-
#main{
|
108 |
-
background-color: #ffffff;
|
109 |
-
opacity: 0.8;
|
110 |
-
background-image: repeating-linear-gradient(45deg, #edffe1 25%, transparent 25%, transparent 75%, #edffe1 75%, #edffe1), repeating-linear-gradient(45deg, #edffe1 25%, #ffffff 25%, #ffffff 75%, #edffe1 75%, #edffe1);
|
111 |
-
background-position: 0 0, 40px 40px;
|
112 |
-
background-size: 80px 80px
|
113 |
-
}
|
114 |
-
#box{
|
115 |
-
margin-top: 45px;
|
116 |
-
}
|
117 |
-
"""
|
118 |
-
|
119 |
-
#gradio
|
120 |
-
with gr.Blocks(css=css) as app:
|
121 |
-
gr.HTML("""<h1 style="display: inline-block;">音准测试</h1>
|
122 |
-
<h3 style="display: inline-block;margin-left: 10px;">🛠一个音准测量工具</h3>
|
123 |
-
<div style="font-size: 12.5px;border: 1px dotted aqua;border-radius: 12px;padding: 5px;padding-left: 20px;background-color: aliceblue;">
|
124 |
-
<div>
|
125 |
-
<p>📒使用说明</p>
|
126 |
-
<ol style="padding-left: 20px">
|
127 |
-
<li>在下方上传音频/视频文件,或使用在线视频链接,输入需要分析的音频</li>
|
128 |
-
<li>点击“音准测试”按钮,生成音高图,右侧同时输出提取人声后的音频</li>
|
129 |
-
<li>输入开始和结束时间可以截取部分音频分析</li>
|
130 |
-
</ol>
|
131 |
-
</div>
|
132 |
-
<div>
|
133 |
-
<p>📝注意:10s音频分析时间不会超过30s,如卡住不动或出现error请尝试刷新</p>
|
134 |
-
</div>
|
135 |
-
</div>
|
136 |
-
""")
|
137 |
-
with gr.Row():
|
138 |
-
with gr.Column():
|
139 |
-
with gr.Tabs():
|
140 |
-
with gr.Tab("音频文件"):
|
141 |
-
audio = gr.Audio(type="filepath", label="音频文件")
|
142 |
-
btn_a=gr.Button("🎵音准测试")
|
143 |
-
with gr.Tab("视频文件"):
|
144 |
-
video = gr.Video(type="filepath", label="视频文件")
|
145 |
-
btn_b=gr.Button("��音准测试")
|
146 |
-
|
147 |
-
with gr.Column():
|
148 |
-
with gr.Box(elem_id="box"):
|
149 |
-
gr.HTML("""<p style="padding:0 px;margin:0 px;margin-top:10 px;font-size: 12.5px;color: gray;">📅开始时间和结束时间,直接输入数字,单位为s,不填默认为全长 <b>建议长度为10秒</b></p>""")
|
150 |
-
with gr.Row():
|
151 |
-
bg_time = gr.Textbox(type="text",label="开始时间")
|
152 |
-
ed_time = gr.Textbox(type="text",label="结束时间")
|
153 |
-
with gr.Box():
|
154 |
-
audio_output = gr.Audio(type="numpy", label="提取结果")
|
155 |
-
output_img=gr.Image(type="filepath", label="音准匹配图")
|
156 |
-
btn_a.click(main, [audio,bg_time,ed_time], [output_img,audio_output])
|
157 |
-
btn_b.click(main, [video,bg_time,ed_time], [output_img,audio_output])
|
158 |
-
gr.HTML("""
|
159 |
-
<div align=center >
|
160 |
-
<p style="font-size: 10px;color:gray">😃Credit:人声提取:<a href="https://github.com/deezer/spleeter">Spleeter</a> 音高标注:<a href="https://github.com/YannickJadoul/Parselmouth">Parselmouth</a>
|
161 |
-
网络视频获取:<a href="https://github.com/soimort/you-get">You-get</a> 方法来源:<a href="https://space.bilibili.com/245645656">码农高天</a>
|
162 |
-
</p>
|
163 |
-
<div align=center><img id="visitor-badge" alt="visitor badge" src="https://visitor-badge.laobi.icu/badge?page_id=AGITM/ToneCorrectionRecognition" /></div>
|
164 |
-
</div>""")
|
165 |
-
|
166 |
-
app.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AHzizi/WaifuVoiceGen/text/symbols.py
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
'''
|
2 |
-
Defines the set of symbols used in text input to the model.
|
3 |
-
'''
|
4 |
-
|
5 |
-
'''# japanese_cleaners
|
6 |
-
_pad = '_'
|
7 |
-
_punctuation = ',.!?-'
|
8 |
-
_letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧ↓↑ '
|
9 |
-
'''
|
10 |
-
|
11 |
-
'''# japanese_cleaners2
|
12 |
-
_pad = '_'
|
13 |
-
_punctuation = ',.!?-~…'
|
14 |
-
_letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧʦ↓↑ '
|
15 |
-
'''
|
16 |
-
|
17 |
-
'''# korean_cleaners
|
18 |
-
_pad = '_'
|
19 |
-
_punctuation = ',.!?…~'
|
20 |
-
_letters = 'ㄱㄴㄷㄹㅁㅂㅅㅇㅈㅊㅋㅌㅍㅎㄲㄸㅃㅆㅉㅏㅓㅗㅜㅡㅣㅐㅔ '
|
21 |
-
'''
|
22 |
-
|
23 |
-
'''# chinese_cleaners
|
24 |
-
_pad = '_'
|
25 |
-
_punctuation = ',。!?—…'
|
26 |
-
_letters = 'ㄅㄆㄇㄈㄉㄊㄋㄌㄍㄎㄏㄐㄑㄒㄓㄔㄕㄖㄗㄘㄙㄚㄛㄜㄝㄞㄟㄠㄡㄢㄣㄤㄥㄦㄧㄨㄩˉˊˇˋ˙ '
|
27 |
-
'''
|
28 |
-
|
29 |
-
# zh_ja_mixture_cleaners
|
30 |
-
_pad = '_'
|
31 |
-
_punctuation = ',.!?-~…'
|
32 |
-
_letters = 'AEINOQUabdefghijklmnoprstuvwyzʃʧʦɯɹəɥ⁼ʰ`→↓↑ '
|
33 |
-
|
34 |
-
|
35 |
-
# Export all symbols:
|
36 |
-
symbols = [_pad] + list(_punctuation) + list(_letters)
|
37 |
-
|
38 |
-
# Special symbol ids
|
39 |
-
SPACE_ID = symbols.index(" ")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIConsultant/MusicGen/audiocraft/grids/audiogen/audiogen_pretrained_16khz_eval.py
DELETED
@@ -1,68 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
"""
|
8 |
-
Evaluation with objective metrics for the pretrained AudioGen models.
|
9 |
-
This grid takes signature from the training grid and runs evaluation-only stage.
|
10 |
-
|
11 |
-
When running the grid for the first time, please use:
|
12 |
-
REGEN=1 dora grid audiogen.audiogen_pretrained_16khz_eval
|
13 |
-
and re-use the REGEN=1 option when the grid is changed to force regenerating it.
|
14 |
-
|
15 |
-
Note that you need the proper metrics external libraries setup to use all
|
16 |
-
the objective metrics activated in this grid. Refer to the README for more information.
|
17 |
-
"""
|
18 |
-
|
19 |
-
import os
|
20 |
-
|
21 |
-
from ..musicgen._explorers import GenerationEvalExplorer
|
22 |
-
from ...environment import AudioCraftEnvironment
|
23 |
-
from ... import train
|
24 |
-
|
25 |
-
|
26 |
-
def eval(launcher, batch_size: int = 32):
|
27 |
-
opts = {
|
28 |
-
'dset': 'audio/audiocaps_16khz',
|
29 |
-
'solver/audiogen/evaluation': 'objective_eval',
|
30 |
-
'execute_only': 'evaluate',
|
31 |
-
'+dataset.evaluate.batch_size': batch_size,
|
32 |
-
'+metrics.fad.tf.batch_size': 32,
|
33 |
-
}
|
34 |
-
# binary for FAD computation: replace this path with your own path
|
35 |
-
metrics_opts = {
|
36 |
-
'metrics.fad.tf.bin': '/data/home/jadecopet/local/usr/opt/google-research'
|
37 |
-
}
|
38 |
-
opt1 = {'generate.lm.use_sampling': True, 'generate.lm.top_k': 250, 'generate.lm.top_p': 0.}
|
39 |
-
opt2 = {'transformer_lm.two_step_cfg': True}
|
40 |
-
|
41 |
-
sub = launcher.bind(opts)
|
42 |
-
sub.bind_(metrics_opts)
|
43 |
-
|
44 |
-
# base objective metrics
|
45 |
-
sub(opt1, opt2)
|
46 |
-
|
47 |
-
|
48 |
-
@GenerationEvalExplorer
|
49 |
-
def explorer(launcher):
|
50 |
-
partitions = AudioCraftEnvironment.get_slurm_partitions(['team', 'global'])
|
51 |
-
launcher.slurm_(gpus=4, partition=partitions)
|
52 |
-
|
53 |
-
if 'REGEN' not in os.environ:
|
54 |
-
folder = train.main.dora.dir / 'grids' / __name__.split('.', 2)[-1]
|
55 |
-
with launcher.job_array():
|
56 |
-
for sig in folder.iterdir():
|
57 |
-
if not sig.is_symlink():
|
58 |
-
continue
|
59 |
-
xp = train.main.get_xp_from_sig(sig.name)
|
60 |
-
launcher(xp.argv)
|
61 |
-
return
|
62 |
-
|
63 |
-
audiogen_base = launcher.bind(solver="audiogen/audiogen_base_16khz")
|
64 |
-
audiogen_base.bind_({'autocast': False, 'fsdp.use': True})
|
65 |
-
|
66 |
-
audiogen_base_medium = audiogen_base.bind({'continue_from': '//pretrained/facebook/audiogen-medium'})
|
67 |
-
audiogen_base_medium.bind_({'model/lm/model_scale': 'medium'})
|
68 |
-
eval(audiogen_base_medium, batch_size=128)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIConsultant/MusicGen/audiocraft/grids/compression/encodec_musicgen_32khz.py
DELETED
@@ -1,34 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
"""
|
8 |
-
Grid search file, simply list all the exp you want in `explorer`.
|
9 |
-
Any new exp added there will be scheduled.
|
10 |
-
You can cancel and experiment by commenting its line.
|
11 |
-
|
12 |
-
This grid shows how to train a MusicGen EnCodec model at 32 kHz.
|
13 |
-
"""
|
14 |
-
|
15 |
-
from ._explorers import CompressionExplorer
|
16 |
-
from ...environment import AudioCraftEnvironment
|
17 |
-
|
18 |
-
|
19 |
-
@CompressionExplorer
|
20 |
-
def explorer(launcher):
|
21 |
-
partitions = AudioCraftEnvironment.get_slurm_partitions(['team', 'global'])
|
22 |
-
launcher.slurm_(gpus=8, partition=partitions)
|
23 |
-
# use configuration for MusicGen's EnCodec model trained on monophonic audio sampled at 32 kHz
|
24 |
-
# MusicGen's EnCodec is trained with a total stride of 640 leading to a frame rate of 50 hz
|
25 |
-
launcher.bind_(solver='compression/encodec_musicgen_32khz')
|
26 |
-
# replace this by the desired music dataset
|
27 |
-
launcher.bind_(dset='internal/music_400k_32khz')
|
28 |
-
# launch xp
|
29 |
-
launcher()
|
30 |
-
launcher({
|
31 |
-
'metrics.visqol.bin': '/data/home/jadecopet/local/usr/opt/visqol',
|
32 |
-
'label': 'visqol',
|
33 |
-
'evaluate.metrics.visqol': True
|
34 |
-
})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIFILMS/StyleGANEX/models/bisenet/README.md
DELETED
@@ -1,68 +0,0 @@
|
|
1 |
-
# face-parsing.PyTorch
|
2 |
-
|
3 |
-
<p align="center">
|
4 |
-
<a href="https://github.com/zllrunning/face-parsing.PyTorch">
|
5 |
-
<img class="page-image" src="https://github.com/zllrunning/face-parsing.PyTorch/blob/master/6.jpg" >
|
6 |
-
</a>
|
7 |
-
</p>
|
8 |
-
|
9 |
-
### Contents
|
10 |
-
- [Training](#training)
|
11 |
-
- [Demo](#Demo)
|
12 |
-
- [References](#references)
|
13 |
-
|
14 |
-
## Training
|
15 |
-
|
16 |
-
1. Prepare training data:
|
17 |
-
-- download [CelebAMask-HQ dataset](https://github.com/switchablenorms/CelebAMask-HQ)
|
18 |
-
|
19 |
-
-- change file path in the `prepropess_data.py` and run
|
20 |
-
```Shell
|
21 |
-
python prepropess_data.py
|
22 |
-
```
|
23 |
-
|
24 |
-
2. Train the model using CelebAMask-HQ dataset:
|
25 |
-
Just run the train script:
|
26 |
-
```
|
27 |
-
$ CUDA_VISIBLE_DEVICES=0,1 python -m torch.distributed.launch --nproc_per_node=2 train.py
|
28 |
-
```
|
29 |
-
|
30 |
-
If you do not wish to train the model, you can download [our pre-trained model](https://drive.google.com/open?id=154JgKpzCPW82qINcVieuPH3fZ2e0P812) and save it in `res/cp`.
|
31 |
-
|
32 |
-
|
33 |
-
## Demo
|
34 |
-
1. Evaluate the trained model using:
|
35 |
-
```Shell
|
36 |
-
# evaluate using GPU
|
37 |
-
python test.py
|
38 |
-
```
|
39 |
-
|
40 |
-
## Face makeup using parsing maps
|
41 |
-
[**face-makeup.PyTorch**](https://github.com/zllrunning/face-makeup.PyTorch)
|
42 |
-
<table>
|
43 |
-
|
44 |
-
<tr>
|
45 |
-
<th> </th>
|
46 |
-
<th>Hair</th>
|
47 |
-
<th>Lip</th>
|
48 |
-
</tr>
|
49 |
-
|
50 |
-
<!-- Line 1: Original Input -->
|
51 |
-
<tr>
|
52 |
-
<td><em>Original Input</em></td>
|
53 |
-
<td><img src="makeup/116_ori.png" height="256" width="256" alt="Original Input"></td>
|
54 |
-
<td><img src="makeup/116_lip_ori.png" height="256" width="256" alt="Original Input"></td>
|
55 |
-
</tr>
|
56 |
-
|
57 |
-
<!-- Line 3: Color -->
|
58 |
-
<tr>
|
59 |
-
<td>Color</td>
|
60 |
-
<td><img src="makeup/116_1.png" height="256" width="256" alt="Color"></td>
|
61 |
-
<td><img src="makeup/116_3.png" height="256" width="256" alt="Color"></td>
|
62 |
-
</tr>
|
63 |
-
|
64 |
-
</table>
|
65 |
-
|
66 |
-
|
67 |
-
## References
|
68 |
-
- [BiSeNet](https://github.com/CoinCheung/BiSeNet)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/audio/__init__.py
DELETED
File without changes
|
spaces/AIGC-Audio/AudioGPT/NeuralSeq/tasks/tts/synta.py
DELETED
@@ -1,25 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import torch
|
3 |
-
import torch.nn.functional as F
|
4 |
-
from torch import nn
|
5 |
-
|
6 |
-
from modules.tts.syntaspeech.syntaspeech import SyntaSpeech
|
7 |
-
from tasks.tts.ps_adv import PortaSpeechAdvTask
|
8 |
-
from utils.hparams import hparams
|
9 |
-
|
10 |
-
|
11 |
-
class SyntaSpeechTask(PortaSpeechAdvTask):
|
12 |
-
def build_tts_model(self):
|
13 |
-
ph_dict_size = len(self.token_encoder)
|
14 |
-
word_dict_size = len(self.word_encoder)
|
15 |
-
self.model = SyntaSpeech(ph_dict_size, word_dict_size, hparams)
|
16 |
-
|
17 |
-
self.gen_params = [p for p in self.model.parameters() if p.requires_grad]
|
18 |
-
self.dp_params = [p for k, p in self.model.named_parameters() if (('dur_predictor' in k) and p.requires_grad)]
|
19 |
-
self.gen_params_except_dp = [p for k, p in self.model.named_parameters() if (('dur_predictor' not in k) and p.requires_grad)]
|
20 |
-
self.bert_params = [p for k, p in self.model.named_parameters() if (('bert' in k) and p.requires_grad)]
|
21 |
-
self.gen_params_except_bert_and_dp = [p for k, p in self.model.named_parameters() if ('dur_predictor' not in k) and ('bert' not in k) and p.requires_grad ]
|
22 |
-
|
23 |
-
self.use_bert = True if len(self.bert_params) > 0 else False
|
24 |
-
|
25 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/Make_An_Audio/ldm/modules/losses_audio/vggishish/train_vggishish.py
DELETED
@@ -1,199 +0,0 @@
|
|
1 |
-
from loss import WeightedCrossEntropy
|
2 |
-
import random
|
3 |
-
|
4 |
-
import numpy as np
|
5 |
-
import torch
|
6 |
-
import torchvision
|
7 |
-
from omegaconf import OmegaConf
|
8 |
-
from torch.utils.data.dataloader import DataLoader
|
9 |
-
from tqdm import tqdm
|
10 |
-
|
11 |
-
from dataset import VGGSound
|
12 |
-
from transforms import Crop, StandardNormalizeAudio, ToTensor
|
13 |
-
from logger import LoggerWithTBoard
|
14 |
-
from metrics import metrics
|
15 |
-
from model import VGGishish
|
16 |
-
|
17 |
-
if __name__ == "__main__":
|
18 |
-
cfg_cli = OmegaConf.from_cli()
|
19 |
-
cfg_yml = OmegaConf.load(cfg_cli.config)
|
20 |
-
# the latter arguments are prioritized
|
21 |
-
cfg = OmegaConf.merge(cfg_yml, cfg_cli)
|
22 |
-
OmegaConf.set_readonly(cfg, True)
|
23 |
-
print(OmegaConf.to_yaml(cfg))
|
24 |
-
|
25 |
-
logger = LoggerWithTBoard(cfg)
|
26 |
-
|
27 |
-
random.seed(cfg.seed)
|
28 |
-
np.random.seed(cfg.seed)
|
29 |
-
torch.manual_seed(cfg.seed)
|
30 |
-
torch.cuda.manual_seed_all(cfg.seed)
|
31 |
-
# makes iterations faster (in this case 30%) if your inputs are of a fixed size
|
32 |
-
# https://discuss.pytorch.org/t/what-does-torch-backends-cudnn-benchmark-do/5936/3
|
33 |
-
torch.backends.cudnn.benchmark = True
|
34 |
-
|
35 |
-
transforms = [
|
36 |
-
StandardNormalizeAudio(cfg.mels_path),
|
37 |
-
]
|
38 |
-
if cfg.cropped_size not in [None, 'None', 'none']:
|
39 |
-
logger.print_logger.info(f'Using cropping {cfg.cropped_size}')
|
40 |
-
transforms.append(Crop(cfg.cropped_size))
|
41 |
-
transforms.append(ToTensor())
|
42 |
-
transforms = torchvision.transforms.transforms.Compose(transforms)
|
43 |
-
|
44 |
-
datasets = {
|
45 |
-
'train': VGGSound('train', cfg.mels_path, transforms),
|
46 |
-
'valid': VGGSound('valid', cfg.mels_path, transforms),
|
47 |
-
'test': VGGSound('test', cfg.mels_path, transforms),
|
48 |
-
}
|
49 |
-
|
50 |
-
loaders = {
|
51 |
-
'train': DataLoader(datasets['train'], batch_size=cfg.batch_size, shuffle=True, drop_last=True,
|
52 |
-
num_workers=cfg.num_workers, pin_memory=True),
|
53 |
-
'valid': DataLoader(datasets['valid'], batch_size=cfg.batch_size,
|
54 |
-
num_workers=cfg.num_workers, pin_memory=True),
|
55 |
-
'test': DataLoader(datasets['test'], batch_size=cfg.batch_size,
|
56 |
-
num_workers=cfg.num_workers, pin_memory=True),
|
57 |
-
}
|
58 |
-
|
59 |
-
device = torch.device(cfg.device if torch.cuda.is_available() else 'cpu')
|
60 |
-
|
61 |
-
model = VGGishish(cfg.conv_layers, cfg.use_bn, num_classes=len(datasets['train'].target2label))
|
62 |
-
model = model.to(device)
|
63 |
-
param_num = logger.log_param_num(model)
|
64 |
-
|
65 |
-
if cfg.optimizer == 'adam':
|
66 |
-
optimizer = torch.optim.Adam(
|
67 |
-
model.parameters(), lr=cfg.learning_rate, betas=cfg.betas, weight_decay=cfg.weight_decay)
|
68 |
-
elif cfg.optimizer == 'sgd':
|
69 |
-
optimizer = torch.optim.SGD(
|
70 |
-
model.parameters(), lr=cfg.learning_rate, momentum=cfg.momentum, weight_decay=cfg.weight_decay)
|
71 |
-
else:
|
72 |
-
raise NotImplementedError
|
73 |
-
|
74 |
-
if cfg.cls_weights_in_loss:
|
75 |
-
weights = 1 / datasets['train'].class_counts
|
76 |
-
else:
|
77 |
-
weights = torch.ones(len(datasets['train'].target2label))
|
78 |
-
criterion = WeightedCrossEntropy(weights.to(device))
|
79 |
-
|
80 |
-
# loop over the train and validation multiple times (typical PT boilerplate)
|
81 |
-
no_change_epochs = 0
|
82 |
-
best_valid_loss = float('inf')
|
83 |
-
early_stop_triggered = False
|
84 |
-
|
85 |
-
for epoch in range(cfg.num_epochs):
|
86 |
-
|
87 |
-
for phase in ['train', 'valid']:
|
88 |
-
if phase == 'train':
|
89 |
-
model.train()
|
90 |
-
else:
|
91 |
-
model.eval()
|
92 |
-
|
93 |
-
running_loss = 0
|
94 |
-
preds_from_each_batch = []
|
95 |
-
targets_from_each_batch = []
|
96 |
-
|
97 |
-
prog_bar = tqdm(loaders[phase], f'{phase} ({epoch})', ncols=0)
|
98 |
-
for i, batch in enumerate(prog_bar):
|
99 |
-
inputs = batch['input'].to(device)
|
100 |
-
targets = batch['target'].to(device)
|
101 |
-
|
102 |
-
# zero the parameter gradients
|
103 |
-
optimizer.zero_grad()
|
104 |
-
|
105 |
-
# forward + backward + optimize
|
106 |
-
with torch.set_grad_enabled(phase == 'train'):
|
107 |
-
outputs = model(inputs)
|
108 |
-
loss = criterion(outputs, targets, to_weight=phase == 'train')
|
109 |
-
|
110 |
-
if phase == 'train':
|
111 |
-
loss.backward()
|
112 |
-
optimizer.step()
|
113 |
-
|
114 |
-
# loss
|
115 |
-
running_loss += loss.item()
|
116 |
-
|
117 |
-
# for metrics calculation later on
|
118 |
-
preds_from_each_batch += [outputs.detach().cpu()]
|
119 |
-
targets_from_each_batch += [targets.cpu()]
|
120 |
-
|
121 |
-
# iter logging
|
122 |
-
if i % 50 == 0:
|
123 |
-
logger.log_iter_loss(loss.item(), epoch*len(loaders[phase])+i, phase)
|
124 |
-
# tracks loss in the tqdm progress bar
|
125 |
-
prog_bar.set_postfix(loss=loss.item())
|
126 |
-
|
127 |
-
# logging loss
|
128 |
-
epoch_loss = running_loss / len(loaders[phase])
|
129 |
-
logger.log_epoch_loss(epoch_loss, epoch, phase)
|
130 |
-
|
131 |
-
# logging metrics
|
132 |
-
preds_from_each_batch = torch.cat(preds_from_each_batch)
|
133 |
-
targets_from_each_batch = torch.cat(targets_from_each_batch)
|
134 |
-
metrics_dict = metrics(targets_from_each_batch, preds_from_each_batch)
|
135 |
-
logger.log_epoch_metrics(metrics_dict, epoch, phase)
|
136 |
-
|
137 |
-
# Early stopping
|
138 |
-
if phase == 'valid':
|
139 |
-
if epoch_loss < best_valid_loss:
|
140 |
-
no_change_epochs = 0
|
141 |
-
best_valid_loss = epoch_loss
|
142 |
-
logger.log_best_model(model, epoch_loss, epoch, optimizer, metrics_dict)
|
143 |
-
else:
|
144 |
-
no_change_epochs += 1
|
145 |
-
logger.print_logger.info(
|
146 |
-
f'Valid loss hasnt changed for {no_change_epochs} patience: {cfg.patience}'
|
147 |
-
)
|
148 |
-
if no_change_epochs >= cfg.patience:
|
149 |
-
early_stop_triggered = True
|
150 |
-
|
151 |
-
if early_stop_triggered:
|
152 |
-
logger.print_logger.info(f'Training is early stopped @ {epoch}')
|
153 |
-
break
|
154 |
-
|
155 |
-
logger.print_logger.info('Finished Training')
|
156 |
-
|
157 |
-
# loading the best model
|
158 |
-
ckpt = torch.load(logger.best_model_path)
|
159 |
-
model.load_state_dict(ckpt['model'])
|
160 |
-
logger.print_logger.info(f'Loading the best model from {logger.best_model_path}')
|
161 |
-
logger.print_logger.info((f'The model was trained for {ckpt["epoch"]} epochs. Loss: {ckpt["loss"]:.4f}'))
|
162 |
-
|
163 |
-
# Testing the model
|
164 |
-
model.eval()
|
165 |
-
running_loss = 0
|
166 |
-
preds_from_each_batch = []
|
167 |
-
targets_from_each_batch = []
|
168 |
-
|
169 |
-
for i, batch in enumerate(loaders['test']):
|
170 |
-
inputs = batch['input'].to(device)
|
171 |
-
targets = batch['target'].to(device)
|
172 |
-
|
173 |
-
# zero the parameter gradients
|
174 |
-
optimizer.zero_grad()
|
175 |
-
|
176 |
-
# forward + backward + optimize
|
177 |
-
with torch.set_grad_enabled(False):
|
178 |
-
outputs = model(inputs)
|
179 |
-
loss = criterion(outputs, targets, to_weight=False)
|
180 |
-
|
181 |
-
# loss
|
182 |
-
running_loss += loss.item()
|
183 |
-
|
184 |
-
# for metrics calculation later on
|
185 |
-
preds_from_each_batch += [outputs.detach().cpu()]
|
186 |
-
targets_from_each_batch += [targets.cpu()]
|
187 |
-
|
188 |
-
# logging metrics
|
189 |
-
preds_from_each_batch = torch.cat(preds_from_each_batch)
|
190 |
-
targets_from_each_batch = torch.cat(targets_from_each_batch)
|
191 |
-
test_metrics_dict = metrics(targets_from_each_batch, preds_from_each_batch)
|
192 |
-
test_metrics_dict['avg_loss'] = running_loss / len(loaders['test'])
|
193 |
-
test_metrics_dict['param_num'] = param_num
|
194 |
-
# TODO: I have no idea why tboard doesn't keep metrics (hparams) when
|
195 |
-
# I run this experiment from cli: `python train_vggishish.py config=./configs/vggish.yaml`
|
196 |
-
# while when I run it in vscode debugger the metrics are logger (wtf)
|
197 |
-
logger.log_test_metrics(test_metrics_dict, dict(cfg), ckpt['epoch'])
|
198 |
-
|
199 |
-
logger.print_logger.info('Finished the experiment')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIWaves/Software_Company/src/agents/Agent/Agent.py
DELETED
@@ -1,243 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2023 The AIWaves Inc. team.
|
3 |
-
|
4 |
-
#
|
5 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
-
# you may not use this file except in compliance with the License.
|
7 |
-
# You may obtain a copy of the License at
|
8 |
-
#
|
9 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
-
#
|
11 |
-
# Unless required by applicable law or agreed to in writing, software
|
12 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
-
# See the License for the specific language governing permissions and
|
15 |
-
# limitations under the License.
|
16 |
-
"""LLM autonoumous agent"""
|
17 |
-
from LLM.base_LLM import *
|
18 |
-
from Component import *
|
19 |
-
from Action import Action
|
20 |
-
from Prompt import *
|
21 |
-
|
22 |
-
headers = {
|
23 |
-
"Content-Type": "text/event-stream",
|
24 |
-
"Cache-Control": "no-cache",
|
25 |
-
"X-Accel-Buffering": "no",
|
26 |
-
}
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
class Agent:
|
32 |
-
"""
|
33 |
-
Auto agent, input the JSON of SOP.
|
34 |
-
"""
|
35 |
-
|
36 |
-
# Agent should have args: agents,states
|
37 |
-
def __init__(self, name, agent_state_roles, **kwargs) -> None:
|
38 |
-
self.state_roles = agent_state_roles
|
39 |
-
self.name = name
|
40 |
-
|
41 |
-
self.style = kwargs["style"]
|
42 |
-
self.LLMs = kwargs["LLMs"]
|
43 |
-
self.LLM = None
|
44 |
-
self.is_user = kwargs["is_user"]
|
45 |
-
self.begins = kwargs["begins"] if "begins" in kwargs else False
|
46 |
-
self.current_role = ""
|
47 |
-
self.long_term_memory = []
|
48 |
-
self.short_term_memory = ""
|
49 |
-
self.current_state = None
|
50 |
-
self.first_speak = True
|
51 |
-
self.environment = None
|
52 |
-
|
53 |
-
|
54 |
-
@classmethod
|
55 |
-
def from_config(cls, config_path):
|
56 |
-
"""
|
57 |
-
Initialize agents based on json file
|
58 |
-
Return:
|
59 |
-
agents(dict) : key:agent_name;value:class(Agent)
|
60 |
-
names_to_roles(dict) : key:state_name value:(dict; (key:agent_name ; value:agent_role))
|
61 |
-
roles_to_names(dict) : key:state_name value:(dict; (key:agent_role ; value:agent_name))
|
62 |
-
"""
|
63 |
-
with open(config_path) as f:
|
64 |
-
config = json.load(f)
|
65 |
-
|
66 |
-
roles_to_names = {}
|
67 |
-
names_to_roles = {}
|
68 |
-
agents = {}
|
69 |
-
user_names = json.loads(os.environ["User_Names"]) if "User_Names" in os.environ else []
|
70 |
-
for agent_name, agent_dict in config["agents"].items():
|
71 |
-
agent_state_roles = {}
|
72 |
-
agent_LLMs = {}
|
73 |
-
agent_begins = {}
|
74 |
-
for state_name, agent_role in agent_dict["roles"].items():
|
75 |
-
|
76 |
-
agent_begins[state_name] = {}
|
77 |
-
|
78 |
-
if state_name not in roles_to_names:
|
79 |
-
roles_to_names[state_name] = {}
|
80 |
-
if state_name not in names_to_roles:
|
81 |
-
names_to_roles[state_name] = {}
|
82 |
-
roles_to_names[state_name][agent_role] = agent_name
|
83 |
-
names_to_roles[state_name][agent_name] = agent_role
|
84 |
-
agent_state_roles[state_name] = agent_role
|
85 |
-
current_state = config["states"][state_name]
|
86 |
-
|
87 |
-
current_state_begin_role = current_state["begin_role"] if "begin_role" in current_state else current_state["roles"][0]
|
88 |
-
agent_begins[state_name]["is_begin"] = current_state_begin_role==agent_role if "begin_role" in current_state else False
|
89 |
-
agent_begins[state_name]["begin_query"] = current_state["begin_query"] if "begin_query" in current_state else " "
|
90 |
-
agent_LLMs[state_name] = init_LLM(f"logs/{agent_name}",**current_state["agent_states"][agent_role])
|
91 |
-
agents[agent_name] = cls(
|
92 |
-
agent_name,
|
93 |
-
agent_state_roles,
|
94 |
-
LLMs=agent_LLMs,
|
95 |
-
is_user=agent_name in user_names,
|
96 |
-
style = agent_dict["style"],
|
97 |
-
begins = agent_begins
|
98 |
-
)
|
99 |
-
assert len(config["agents"].keys()) != 2 or (roles_to_names[config["root"]][config["states"][config["root"]]["begin_role"]] not in user_names and "begin_query" in config["states"][config["root"]]),"In a single-agent scenario, there must be an opening statement and it must be the agent"
|
100 |
-
return agents, roles_to_names, names_to_roles
|
101 |
-
|
102 |
-
def step(self, current_state,input=""):
|
103 |
-
"""
|
104 |
-
return actions by current state and environment
|
105 |
-
Return: action(Action)
|
106 |
-
"""
|
107 |
-
|
108 |
-
current_state.chat_nums +=1
|
109 |
-
state_begin = current_state.is_begin
|
110 |
-
agent_begin = self.begins[current_state.name]["is_begin"]
|
111 |
-
self.begins[current_state.name]["is_begin"] = False
|
112 |
-
current_state.is_begin = False
|
113 |
-
environment = self.environment
|
114 |
-
|
115 |
-
self.current_state = current_state
|
116 |
-
# 先根据当前环境更新信息
|
117 |
-
# First update the information according to the current environment
|
118 |
-
|
119 |
-
response = " "
|
120 |
-
res_dict = {}
|
121 |
-
|
122 |
-
if self.is_user:
|
123 |
-
response = f"{self.name}:{input}"
|
124 |
-
else:
|
125 |
-
if len(environment.shared_memory["long_term_memory"])>0:
|
126 |
-
current_history = self.observe()
|
127 |
-
self.long_term_memory.append(current_history)
|
128 |
-
if agent_begin:
|
129 |
-
response = (char for char in self.begins[current_state.name]["begin_query"])
|
130 |
-
else:
|
131 |
-
response,res_dict = self.act()
|
132 |
-
|
133 |
-
|
134 |
-
action_dict = {
|
135 |
-
"response": response,
|
136 |
-
"res_dict": res_dict,
|
137 |
-
"role": self.state_roles[current_state.name],
|
138 |
-
"name": self.name,
|
139 |
-
"state_begin" : state_begin,
|
140 |
-
"agent_begin" : agent_begin,
|
141 |
-
"is_user" : self.is_user
|
142 |
-
}
|
143 |
-
return Action(**action_dict)
|
144 |
-
|
145 |
-
def act(self):
|
146 |
-
"""
|
147 |
-
return actions by the current state
|
148 |
-
"""
|
149 |
-
current_state = self.current_state
|
150 |
-
chat_history = self.long_term_memory
|
151 |
-
current_LLM = self.LLMs[current_state.name]
|
152 |
-
|
153 |
-
system_prompt, last_prompt, res_dict = self.compile()
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
response = current_LLM.get_response(
|
158 |
-
chat_history, system_prompt, last_prompt, stream=True
|
159 |
-
)
|
160 |
-
return response,res_dict
|
161 |
-
|
162 |
-
def update_memory(self, memory):
|
163 |
-
self.long_term_memory.append(
|
164 |
-
{"role": "assistant", "content": memory.content}
|
165 |
-
)
|
166 |
-
|
167 |
-
MAX_CHAT_HISTORY = eval(os.environ["MAX_CHAT_HISTORY"])
|
168 |
-
environment = self.environment
|
169 |
-
current_chat_history_idx = environment.current_chat_history_idx if environment.environment_type == "competive" else 0
|
170 |
-
|
171 |
-
current_long_term_memory = environment.shared_memory["long_term_memory"][current_chat_history_idx:]
|
172 |
-
last_conversation_idx = environment._get_agent_last_conversation_idx(self,current_long_term_memory)
|
173 |
-
if len(current_long_term_memory)-last_conversation_idx >= MAX_CHAT_HISTORY:
|
174 |
-
current_state = self.current_state
|
175 |
-
current_role = self.state_roles[current_state.name]
|
176 |
-
current_component_dict = current_state.components[current_role]
|
177 |
-
|
178 |
-
# get chat history from new conversation
|
179 |
-
conversations = environment._get_agent_new_memory(self,current_long_term_memory)
|
180 |
-
|
181 |
-
# get summary
|
182 |
-
summary_prompt = (
|
183 |
-
current_state.summary_prompt[current_role]
|
184 |
-
if current_state.summary_prompt
|
185 |
-
else f"""your name is {self.name},your role is{current_component_dict["style"].role},your task is {current_component_dict["task"].task}.\n"""
|
186 |
-
)
|
187 |
-
summary_prompt =eval(Agent_summary_system_prompt)
|
188 |
-
summary = self.LLMs[current_state.name].get_response(None, summary_prompt,stream = False)
|
189 |
-
self.short_term_memory = summary
|
190 |
-
|
191 |
-
|
192 |
-
def compile(self):
|
193 |
-
"""
|
194 |
-
get prompt from state depend on your role
|
195 |
-
Return:
|
196 |
-
system_prompt:system_prompt for agents's LLM
|
197 |
-
last_prompt:last_prompt for agents's LLM
|
198 |
-
res_dict(dict): Other return from tool component.For example: search engine results
|
199 |
-
"""
|
200 |
-
current_state = self.current_state
|
201 |
-
self.current_roles = self.state_roles[current_state.name]
|
202 |
-
current_state_name = current_state.name
|
203 |
-
self.LLM = self.LLMs[current_state_name]
|
204 |
-
components = current_state.components[self.state_roles[current_state_name]]
|
205 |
-
|
206 |
-
system_prompt = self.current_state.environment_prompt
|
207 |
-
last_prompt = ""
|
208 |
-
|
209 |
-
res_dict = {}
|
210 |
-
for component in components.values():
|
211 |
-
if isinstance(component, (OutputComponent, LastComponent)):
|
212 |
-
last_prompt = last_prompt + "\n" + component.get_prompt(self)
|
213 |
-
elif isinstance(component, PromptComponent):
|
214 |
-
system_prompt = (
|
215 |
-
system_prompt + "\n" + component.get_prompt(self)
|
216 |
-
)
|
217 |
-
elif isinstance(component, ToolComponent):
|
218 |
-
response = component.func(self)
|
219 |
-
if "prompt" in response and response["prompt"]:
|
220 |
-
last_prompt = last_prompt + "\n" + response["prompt"]
|
221 |
-
res_dict.update(response)
|
222 |
-
|
223 |
-
name = self.name
|
224 |
-
query = self.environment.shared_memory["long_term_memory"][-1]
|
225 |
-
last_prompt = eval(Agent_last_prompt)
|
226 |
-
system_prompt = eval(Agent_system_prompt)
|
227 |
-
return system_prompt, last_prompt, res_dict
|
228 |
-
|
229 |
-
|
230 |
-
def observe(self):
|
231 |
-
"""
|
232 |
-
Update one's own memory according to the current environment, including: updating short-term memory; updating long-term memory
|
233 |
-
"""
|
234 |
-
return self.environment._observe(self)
|
235 |
-
|
236 |
-
|
237 |
-
def generate_sop(self):
|
238 |
-
pass
|
239 |
-
|
240 |
-
def reflection(self):
|
241 |
-
pass
|
242 |
-
|
243 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIWaves/Software_Company/src/agents/LLM/__init__.py
DELETED
File without changes
|
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/work_dirs/__init__.py
DELETED
File without changes
|
spaces/AUBADA-ALARABI/AraPoet/app.py
DELETED
@@ -1,121 +0,0 @@
|
|
1 |
-
# coding=utf8
|
2 |
-
|
3 |
-
import json
|
4 |
-
import torch
|
5 |
-
import gradio as gr
|
6 |
-
import pyarabic.araby as araby
|
7 |
-
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, AutoConfig
|
8 |
-
|
9 |
-
feature_names = [
|
10 |
-
"Title",
|
11 |
-
"Meter",
|
12 |
-
"Theme",
|
13 |
-
"Name",
|
14 |
-
"Era",
|
15 |
-
"Country",
|
16 |
-
"Type"
|
17 |
-
]
|
18 |
-
|
19 |
-
with open("./poet_names.json", 'r', encoding="utf-8") as fin:
|
20 |
-
poet_names = json.load(fin)
|
21 |
-
|
22 |
-
def normalize_text(text):
|
23 |
-
text = araby.strip_tatweel(text)
|
24 |
-
return text
|
25 |
-
|
26 |
-
def generate_poem(country, era, meter, theme, lang_type, poet, num_lines, num_poems, title):
|
27 |
-
|
28 |
-
num_poems = int(num_poems)
|
29 |
-
prompt = title
|
30 |
-
prompt = normalize_text(prompt)
|
31 |
-
|
32 |
-
features = [prompt, meter, theme, poet, era, country, lang_type]
|
33 |
-
|
34 |
-
prompt = ""
|
35 |
-
for name, feat in zip(feature_names, features):
|
36 |
-
prompt += f"{name}: {feat}; "
|
37 |
-
prompt += f"Length: {num_lines}; Poem:"
|
38 |
-
|
39 |
-
num_beams = 5
|
40 |
-
top_k = 50
|
41 |
-
top_p = 0.9
|
42 |
-
r_penalty = 5.
|
43 |
-
|
44 |
-
input_ids = torch.tensor(tokenizer.encode(prompt)).unsqueeze(0)
|
45 |
-
print(f"> Running: {prompt} | {num_poems} Poems")
|
46 |
-
outputs = model.generate(input_ids=input_ids,
|
47 |
-
min_length=32,
|
48 |
-
max_length=256,
|
49 |
-
do_sample=True,
|
50 |
-
top_k=top_k,
|
51 |
-
top_p=top_p,
|
52 |
-
repetition_penalty=r_penalty,
|
53 |
-
num_beams=num_beams,
|
54 |
-
num_return_sequences=num_poems,
|
55 |
-
early_stopping=True
|
56 |
-
)
|
57 |
-
|
58 |
-
poems = []
|
59 |
-
print(f"> # of Outputs: {len(outputs)}")
|
60 |
-
for output in outputs:
|
61 |
-
raw = tokenizer.decode(output)
|
62 |
-
raw = raw.replace("<pad>", "").replace("</s>", "")
|
63 |
-
print("="*100)
|
64 |
-
print(raw)
|
65 |
-
print("="*100)
|
66 |
-
poems += ['\n'.join(raw.split("<s>"))]
|
67 |
-
|
68 |
-
return "\n\n".join(poems)
|
69 |
-
|
70 |
-
meters = ['البسيط', 'التفعيله', 'الحداء', 'الخفيف', 'الدوبيت', 'الرجز', 'الرمل', 'السريع', 'السلسلة', 'الصخري', 'الطويل', 'الكامل', 'الكان كان', 'اللويحاني', 'المتدارك', 'المتقارب', 'المجتث', 'المديد', 'المسحوب', 'المضارع', 'المقتضب', 'المنسرح', 'المواليا', 'الموشح', 'الهجيني', 'الهزج', 'الوافر', 'بحر أحذ الكامل', 'بحر أحذ المديد', 'بحر أحذ الوافر', 'بحر البسيط', 'بحر التفعيله', 'بحر الخبب', 'بحر الخفيف', 'بحر الدوبيت', 'بحر الرجز', 'بحر الرمل', 'بحر السريع', 'بحر السلسلة', 'بحر الطويل', 'بحر القوما', 'بحر الكامل', 'بحر الكامل المقطوع', 'بحر المتدارك', 'بحر المتدارك المنهوك', 'بحر المتقارب', 'بحر المجتث', 'بحر المديد', 'بحر المضارع', 'بحر المقتضب', 'بحر المنسرح', 'بحر المواليا', 'بحر الهزج', 'بحر الوافر', 'بحر تفعيلة الرجز', 'بحر تفعيلة الرمل', 'بحر تفعيلة الكامل', 'بحر تفعيلة المتقارب', 'بحر مجزوء البسيط', 'بحر مجزوء الخفيف', 'بحر مجزوء الدوبيت', 'بحر مجزوء الرجز', 'بحر مجزوء الرمل', 'بحر مجزوء الرمل ', 'بحر مجزوء السريع', 'بحر مجزوء الطويل', 'بحر مجزوء الكامل', 'بحر مجزوء المتدارك', 'بحر مجزوء المتقارب', 'بحر مجزوء المجتث', 'بحر مجزوء المديد', 'بحر مجزوء المنسرح', 'بحر مجزوء المواليا', 'بحر مجزوء الهزج', 'بحر مجزوء الوافر', 'بحر مجزوء موشح', 'بحر مخلع البسيط', 'بحر مخلع الرجز', 'بحر مخلع الرمل', 'بحر مخلع السريع', 'بحر مخلع الكامل', 'بحر مخلع موشح', 'بحر مربع البسيط', 'بحر مربع الرجز', 'بحر مشطور الرجز', 'بحر مشطور السريع', 'بحر مشطور الطويل', 'بحر منهوك البسيط', 'بحر منهوك الرجز', 'بحر منهوك الكامل', 'بحر منهوك المنسرح', 'بحر موشح', 'بسيط', 'زجل', 'شعر التفعيلة', 'شعر حر', 'عامي', 'عدة أبحر', 'عموديه', 'مجزوء الخفيف', 'نثريه', 'None']
|
71 |
-
themes = ['قصيدة اعتذار', 'قصيدة الاناشيد', 'قصيدة المعلقات', 'قصيدة حزينه', 'قصيدة دينية', 'قصيدة ذم', 'قصيدة رثاء', 'قصيدة رومنسيه', 'قصيدة سياسية', 'قصيدة شوق', 'قصيدة عامه', 'قصيدة عتاب', 'قصيدة غزل', 'قصيدة فراق', 'قصيدة قصيره', 'قصيدة مدح', 'قصيدة هجاء', 'قصيدة وطنيه', 'None']
|
72 |
-
language_types = ['شعبي', 'عامي', 'فصحى', 'فصيح', '-', 'None']
|
73 |
-
poet_era = ['العصر الأموي', 'العصر الأندلسي', 'العصر الأيوبي', 'العصر الإسلامي', 'العصر الجاهلي', 'العصر الحديث', 'العصر العباسي', 'العصر العثماني', 'العصر الفاطمي', 'العصر المملوكي', 'المخضرمين', 'المغرب والأندلس', 'عصر بين الدولتين', 'قبل الإسلام', 'None']
|
74 |
-
countries = ['الأردن', 'الإمارات', 'البحرين', 'الجزائر', 'السعودية', 'السنغال', 'السودان', 'الصومال', 'العراق', 'الكويت', 'المغرب', 'اليمن', 'تونس', 'سوريا', 'سورية', 'عمان', 'فلسطين', 'قطر', 'لبنان', 'ليبيا', 'مصر', 'موريتانيا', 'None']
|
75 |
-
|
76 |
-
tokenizer: AutoTokenizer = AutoTokenizer.from_pretrained("bkhmsi/arapoet-mt5", use_auth_token="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")
|
77 |
-
model: AutoModelForSeq2SeqLM = AutoModelForSeq2SeqLM.from_pretrained("bkhmsi/arapoet-mt5", use_auth_token="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")
|
78 |
-
model.eval()
|
79 |
-
|
80 |
-
title = ""
|
81 |
-
with gr.Blocks(title=title) as demo:
|
82 |
-
inputs = []
|
83 |
-
|
84 |
-
gr.Markdown(
|
85 |
-
"""
|
86 |
-
# AraPoet: Controlled Arabic Poetry Generation
|
87 |
-
|
88 |
-
The model hosted here is a finetuned version of [mT5-large](https://huggingface.co/google/mt5-large) (∼ 1.2B parameters) on the largest repository of Arabic poems, the [ashaar](https://huggingface.co/datasets/arbml/ashaar) dataset.
|
89 |
-
The model can be conditioned on a set of attributes to control the style of the generated poem.
|
90 |
-
Namely: the poet name, country, era, meter, theme, language type, title and the length of the poem.
|
91 |
-
You can start by clicking on one of the examples below or try your own input.
|
92 |
-
"""
|
93 |
-
)
|
94 |
-
|
95 |
-
with gr.Row():
|
96 |
-
inputs += [gr.Dropdown(countries, label="Country", value="مصر")]
|
97 |
-
inputs += [gr.Dropdown(poet_era, label="Era", value="العصر الحديث")]
|
98 |
-
with gr.Row():
|
99 |
-
inputs += [gr.Dropdown(meters, label="Meter", value="بحر السريع")]
|
100 |
-
inputs += [gr.Dropdown(themes, label="Theme", value="قصيدة رومنسيه")]
|
101 |
-
with gr.Row():
|
102 |
-
inputs += [gr.Dropdown(language_types, label="Language Type", value="فصحى")]
|
103 |
-
inputs += [gr.Dropdown(poet_names, label="Poet", value="أحمد شوقي")]
|
104 |
-
with gr.Row():
|
105 |
-
inputs += [gr.Slider(2, 20, value=6, step=1, label="Number of Lines")]
|
106 |
-
inputs += [gr.Slider(1, 4, value=1, step=1, label="Number of Samples")]
|
107 |
-
with gr.Row():
|
108 |
-
inputs += [gr.Textbox(label="Title", value="إثن عنان القلب واسلم به")]
|
109 |
-
|
110 |
-
btn = gr.Button("Generate")
|
111 |
-
examples = gr.Examples(examples="./examples", inputs=inputs)
|
112 |
-
btn.click(generate_poem, inputs, gr.TextArea(label="Generation"))
|
113 |
-
|
114 |
-
|
115 |
-
gr.Markdown(
|
116 |
-
"""
|
117 |
-
Checkout our [AraPoet Preprint](https://github.com/BKHMSI/BKHMSI.github.io/blob/master/archive/resources/AraPoet.pdf) for more details about the model.
|
118 |
-
"""
|
119 |
-
)
|
120 |
-
|
121 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT-Chat-UI/.svelte-kit/types/src/routes/conversation/[id]/$types.d.ts
DELETED
@@ -1,24 +0,0 @@
|
|
1 |
-
import type * as Kit from '@sveltejs/kit';
|
2 |
-
|
3 |
-
type Expand<T> = T extends infer O ? { [K in keyof O]: O[K] } : never;
|
4 |
-
type RouteParams = { id: string }
|
5 |
-
type RouteId = '/conversation/[id]';
|
6 |
-
type MaybeWithVoid<T> = {} extends T ? T | void : T;
|
7 |
-
export type RequiredKeys<T> = { [K in keyof T]-?: {} extends { [P in K]: T[K] } ? never : K; }[keyof T];
|
8 |
-
type OutputDataShape<T> = MaybeWithVoid<Omit<App.PageData, RequiredKeys<T>> & Partial<Pick<App.PageData, keyof T & keyof App.PageData>> & Record<string, any>>
|
9 |
-
type EnsureDefined<T> = T extends null | undefined ? {} : T;
|
10 |
-
type OptionalUnion<U extends Record<string, any>, A extends keyof U = U extends U ? keyof U : never> = U extends unknown ? { [P in Exclude<A, keyof U>]?: never } & U : never;
|
11 |
-
export type Snapshot<T = any> = Kit.Snapshot<T>;
|
12 |
-
type PageServerParentData = EnsureDefined<import('../../$types.js').LayoutServerData>;
|
13 |
-
type PageParentData = EnsureDefined<import('../../$types.js').LayoutData>;
|
14 |
-
|
15 |
-
export type EntryGenerator = () => Promise<Array<RouteParams>> | Array<RouteParams>;
|
16 |
-
export type PageServerLoad<OutputData extends OutputDataShape<PageServerParentData> = OutputDataShape<PageServerParentData>> = Kit.ServerLoad<RouteParams, PageServerParentData, OutputData, RouteId>;
|
17 |
-
export type PageServerLoadEvent = Parameters<PageServerLoad>[0];
|
18 |
-
export type ActionData = unknown;
|
19 |
-
export type PageServerData = Expand<OptionalUnion<EnsureDefined<Kit.AwaitedProperties<Awaited<ReturnType<typeof import('../../../../../../src/routes/conversation/[id]/+page.server.js').load>>>>>>;
|
20 |
-
export type PageData = Expand<Omit<PageParentData, keyof PageServerData> & EnsureDefined<PageServerData>>;
|
21 |
-
export type Action<OutputData extends Record<string, any> | void = Record<string, any> | void> = Kit.Action<RouteParams, OutputData, RouteId>
|
22 |
-
export type Actions<OutputData extends Record<string, any> | void = Record<string, any> | void> = Kit.Actions<RouteParams, OutputData, RouteId>
|
23 |
-
export type RequestHandler = Kit.RequestHandler<RouteParams, RouteId>;
|
24 |
-
export type RequestEvent = Kit.RequestEvent<RouteParams, RouteId>;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/server/database.ts
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
let client = undefined;
|
2 |
-
export const connectPromise = undefined;
|
3 |
-
|
4 |
-
const db = undefined;
|
5 |
-
|
6 |
-
const conversations = undefined;
|
7 |
-
const sharedConversations = undefined;
|
8 |
-
const abortedGenerations = undefined;
|
9 |
-
const settings = undefined;
|
10 |
-
const users = undefined;
|
11 |
-
const webSearches = undefined;
|
12 |
-
const messageEvents = undefined;
|
13 |
-
|
14 |
-
export { client, db };
|
15 |
-
export const collections = {
|
16 |
-
conversations,
|
17 |
-
sharedConversations,
|
18 |
-
abortedGenerations,
|
19 |
-
settings,
|
20 |
-
users,
|
21 |
-
webSearches,
|
22 |
-
messageEvents,
|
23 |
-
};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Adapter/T2I-Adapter/ldm/modules/extra_condition/api.py
DELETED
@@ -1,269 +0,0 @@
|
|
1 |
-
from enum import Enum, unique
|
2 |
-
|
3 |
-
import cv2
|
4 |
-
import torch
|
5 |
-
from basicsr.utils import img2tensor
|
6 |
-
from ldm.util import resize_numpy_image
|
7 |
-
from PIL import Image
|
8 |
-
from torch import autocast
|
9 |
-
|
10 |
-
|
11 |
-
@unique
|
12 |
-
class ExtraCondition(Enum):
|
13 |
-
sketch = 0
|
14 |
-
keypose = 1
|
15 |
-
seg = 2
|
16 |
-
depth = 3
|
17 |
-
canny = 4
|
18 |
-
style = 5
|
19 |
-
color = 6
|
20 |
-
openpose = 7
|
21 |
-
|
22 |
-
|
23 |
-
def get_cond_model(opt, cond_type: ExtraCondition):
|
24 |
-
if cond_type == ExtraCondition.sketch:
|
25 |
-
from ldm.modules.extra_condition.model_edge import pidinet
|
26 |
-
model = pidinet()
|
27 |
-
ckp = torch.load('models/table5_pidinet.pth', map_location='cpu')['state_dict']
|
28 |
-
model.load_state_dict({k.replace('module.', ''): v for k, v in ckp.items()}, strict=True)
|
29 |
-
model.to(opt.device)
|
30 |
-
return model
|
31 |
-
elif cond_type == ExtraCondition.seg:
|
32 |
-
raise NotImplementedError
|
33 |
-
elif cond_type == ExtraCondition.keypose:
|
34 |
-
import mmcv
|
35 |
-
from mmdet.apis import init_detector
|
36 |
-
from mmpose.apis import init_pose_model
|
37 |
-
det_config = 'configs/mm/faster_rcnn_r50_fpn_coco.py'
|
38 |
-
det_checkpoint = 'models/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth'
|
39 |
-
pose_config = 'configs/mm/hrnet_w48_coco_256x192.py'
|
40 |
-
pose_checkpoint = 'models/hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth'
|
41 |
-
det_config_mmcv = mmcv.Config.fromfile(det_config)
|
42 |
-
det_model = init_detector(det_config_mmcv, det_checkpoint, device=opt.device)
|
43 |
-
pose_config_mmcv = mmcv.Config.fromfile(pose_config)
|
44 |
-
pose_model = init_pose_model(pose_config_mmcv, pose_checkpoint, device=opt.device)
|
45 |
-
return {'pose_model': pose_model, 'det_model': det_model}
|
46 |
-
elif cond_type == ExtraCondition.depth:
|
47 |
-
from ldm.modules.extra_condition.midas.api import MiDaSInference
|
48 |
-
model = MiDaSInference(model_type='dpt_hybrid').to(opt.device)
|
49 |
-
return model
|
50 |
-
elif cond_type == ExtraCondition.canny:
|
51 |
-
return None
|
52 |
-
elif cond_type == ExtraCondition.style:
|
53 |
-
from transformers import CLIPProcessor, CLIPVisionModel
|
54 |
-
version = 'openai/clip-vit-large-patch14'
|
55 |
-
processor = CLIPProcessor.from_pretrained(version)
|
56 |
-
clip_vision_model = CLIPVisionModel.from_pretrained(version).to(opt.device)
|
57 |
-
return {'processor': processor, 'clip_vision_model': clip_vision_model}
|
58 |
-
elif cond_type == ExtraCondition.color:
|
59 |
-
return None
|
60 |
-
elif cond_type == ExtraCondition.openpose:
|
61 |
-
from ldm.modules.extra_condition.openpose.api import OpenposeInference
|
62 |
-
model = OpenposeInference().to(opt.device)
|
63 |
-
return model
|
64 |
-
else:
|
65 |
-
raise NotImplementedError
|
66 |
-
|
67 |
-
|
68 |
-
def get_cond_sketch(opt, cond_image, cond_inp_type, cond_model=None):
|
69 |
-
if isinstance(cond_image, str):
|
70 |
-
edge = cv2.imread(cond_image)
|
71 |
-
else:
|
72 |
-
# for gradio input, pay attention, it's rgb numpy
|
73 |
-
edge = cv2.cvtColor(cond_image, cv2.COLOR_RGB2BGR)
|
74 |
-
edge = resize_numpy_image(edge, max_resolution=opt.max_resolution, resize_short_edge=opt.resize_short_edge)
|
75 |
-
opt.H, opt.W = edge.shape[:2]
|
76 |
-
if cond_inp_type == 'sketch':
|
77 |
-
edge = img2tensor(edge)[0].unsqueeze(0).unsqueeze(0) / 255.
|
78 |
-
edge = edge.to(opt.device)
|
79 |
-
elif cond_inp_type == 'image':
|
80 |
-
edge = img2tensor(edge).unsqueeze(0) / 255.
|
81 |
-
edge = cond_model(edge.to(opt.device))[-1]
|
82 |
-
else:
|
83 |
-
raise NotImplementedError
|
84 |
-
|
85 |
-
# edge = 1-edge # for white background
|
86 |
-
edge = edge > 0.5
|
87 |
-
edge = edge.float()
|
88 |
-
|
89 |
-
return edge
|
90 |
-
|
91 |
-
|
92 |
-
def get_cond_seg(opt, cond_image, cond_inp_type='image', cond_model=None):
|
93 |
-
if isinstance(cond_image, str):
|
94 |
-
seg = cv2.imread(cond_image)
|
95 |
-
else:
|
96 |
-
seg = cv2.cvtColor(cond_image, cv2.COLOR_RGB2BGR)
|
97 |
-
seg = resize_numpy_image(seg, max_resolution=opt.max_resolution, resize_short_edge=opt.resize_short_edge)
|
98 |
-
opt.H, opt.W = seg.shape[:2]
|
99 |
-
if cond_inp_type == 'seg':
|
100 |
-
seg = img2tensor(seg).unsqueeze(0) / 255.
|
101 |
-
seg = seg.to(opt.device)
|
102 |
-
else:
|
103 |
-
raise NotImplementedError
|
104 |
-
|
105 |
-
return seg
|
106 |
-
|
107 |
-
|
108 |
-
def get_cond_keypose(opt, cond_image, cond_inp_type='image', cond_model=None):
|
109 |
-
if isinstance(cond_image, str):
|
110 |
-
pose = cv2.imread(cond_image)
|
111 |
-
else:
|
112 |
-
pose = cv2.cvtColor(cond_image, cv2.COLOR_RGB2BGR)
|
113 |
-
pose = resize_numpy_image(pose, max_resolution=opt.max_resolution, resize_short_edge=opt.resize_short_edge)
|
114 |
-
opt.H, opt.W = pose.shape[:2]
|
115 |
-
if cond_inp_type == 'keypose':
|
116 |
-
pose = img2tensor(pose).unsqueeze(0) / 255.
|
117 |
-
pose = pose.to(opt.device)
|
118 |
-
elif cond_inp_type == 'image':
|
119 |
-
from ldm.modules.extra_condition.utils import imshow_keypoints
|
120 |
-
from mmdet.apis import inference_detector
|
121 |
-
from mmpose.apis import (inference_top_down_pose_model, process_mmdet_results)
|
122 |
-
|
123 |
-
# mmpose seems not compatible with autocast fp16
|
124 |
-
with autocast("cuda", dtype=torch.float32):
|
125 |
-
mmdet_results = inference_detector(cond_model['det_model'], pose)
|
126 |
-
# keep the person class bounding boxes.
|
127 |
-
person_results = process_mmdet_results(mmdet_results, 1)
|
128 |
-
|
129 |
-
# optional
|
130 |
-
return_heatmap = False
|
131 |
-
dataset = cond_model['pose_model'].cfg.data['test']['type']
|
132 |
-
|
133 |
-
# e.g. use ('backbone', ) to return backbone feature
|
134 |
-
output_layer_names = None
|
135 |
-
pose_results, returned_outputs = inference_top_down_pose_model(
|
136 |
-
cond_model['pose_model'],
|
137 |
-
pose,
|
138 |
-
person_results,
|
139 |
-
bbox_thr=0.2,
|
140 |
-
format='xyxy',
|
141 |
-
dataset=dataset,
|
142 |
-
dataset_info=None,
|
143 |
-
return_heatmap=return_heatmap,
|
144 |
-
outputs=output_layer_names)
|
145 |
-
|
146 |
-
# show the results
|
147 |
-
pose = imshow_keypoints(pose, pose_results, radius=2, thickness=2)
|
148 |
-
pose = img2tensor(pose).unsqueeze(0) / 255.
|
149 |
-
pose = pose.to(opt.device)
|
150 |
-
else:
|
151 |
-
raise NotImplementedError
|
152 |
-
|
153 |
-
return pose
|
154 |
-
|
155 |
-
|
156 |
-
def get_cond_depth(opt, cond_image, cond_inp_type='image', cond_model=None):
|
157 |
-
if isinstance(cond_image, str):
|
158 |
-
depth = cv2.imread(cond_image)
|
159 |
-
else:
|
160 |
-
depth = cv2.cvtColor(cond_image, cv2.COLOR_RGB2BGR)
|
161 |
-
depth = resize_numpy_image(depth, max_resolution=opt.max_resolution, resize_short_edge=opt.resize_short_edge)
|
162 |
-
opt.H, opt.W = depth.shape[:2]
|
163 |
-
if cond_inp_type == 'depth':
|
164 |
-
depth = img2tensor(depth).unsqueeze(0) / 255.
|
165 |
-
depth = depth.to(opt.device)
|
166 |
-
elif cond_inp_type == 'image':
|
167 |
-
depth = img2tensor(depth).unsqueeze(0) / 127.5 - 1.0
|
168 |
-
depth = cond_model(depth.to(opt.device)).repeat(1, 3, 1, 1)
|
169 |
-
depth -= torch.min(depth)
|
170 |
-
depth /= torch.max(depth)
|
171 |
-
else:
|
172 |
-
raise NotImplementedError
|
173 |
-
|
174 |
-
return depth
|
175 |
-
|
176 |
-
|
177 |
-
def get_cond_canny(opt, cond_image, cond_inp_type='image', cond_model=None):
|
178 |
-
if isinstance(cond_image, str):
|
179 |
-
canny = cv2.imread(cond_image)
|
180 |
-
else:
|
181 |
-
canny = cv2.cvtColor(cond_image, cv2.COLOR_RGB2BGR)
|
182 |
-
canny = resize_numpy_image(canny, max_resolution=opt.max_resolution, resize_short_edge=opt.resize_short_edge)
|
183 |
-
opt.H, opt.W = canny.shape[:2]
|
184 |
-
if cond_inp_type == 'canny':
|
185 |
-
canny = img2tensor(canny)[0:1].unsqueeze(0) / 255.
|
186 |
-
canny = canny.to(opt.device)
|
187 |
-
elif cond_inp_type == 'image':
|
188 |
-
canny = cv2.Canny(canny, 100, 200)[..., None]
|
189 |
-
canny = img2tensor(canny).unsqueeze(0) / 255.
|
190 |
-
canny = canny.to(opt.device)
|
191 |
-
else:
|
192 |
-
raise NotImplementedError
|
193 |
-
|
194 |
-
return canny
|
195 |
-
|
196 |
-
|
197 |
-
def get_cond_style(opt, cond_image, cond_inp_type='image', cond_model=None):
|
198 |
-
assert cond_inp_type == 'image'
|
199 |
-
if isinstance(cond_image, str):
|
200 |
-
style = Image.open(cond_image)
|
201 |
-
else:
|
202 |
-
# numpy image to PIL image
|
203 |
-
style = Image.fromarray(cond_image)
|
204 |
-
|
205 |
-
style_for_clip = cond_model['processor'](images=style, return_tensors="pt")['pixel_values']
|
206 |
-
style_feat = cond_model['clip_vision_model'](style_for_clip.to(opt.device))['last_hidden_state']
|
207 |
-
|
208 |
-
return style_feat
|
209 |
-
|
210 |
-
|
211 |
-
def get_cond_color(opt, cond_image, cond_inp_type='image', cond_model=None):
|
212 |
-
if isinstance(cond_image, str):
|
213 |
-
color = cv2.imread(cond_image)
|
214 |
-
else:
|
215 |
-
color = cv2.cvtColor(cond_image, cv2.COLOR_RGB2BGR)
|
216 |
-
color = resize_numpy_image(color, max_resolution=opt.max_resolution, resize_short_edge=opt.resize_short_edge)
|
217 |
-
opt.H, opt.W = color.shape[:2]
|
218 |
-
if cond_inp_type == 'image':
|
219 |
-
color = cv2.resize(color, (opt.W//64, opt.H//64), interpolation=cv2.INTER_CUBIC)
|
220 |
-
color = cv2.resize(color, (opt.W, opt.H), interpolation=cv2.INTER_NEAREST)
|
221 |
-
color = img2tensor(color).unsqueeze(0) / 255.
|
222 |
-
color = color.to(opt.device)
|
223 |
-
return color
|
224 |
-
|
225 |
-
|
226 |
-
def get_cond_openpose(opt, cond_image, cond_inp_type='image', cond_model=None):
|
227 |
-
if isinstance(cond_image, str):
|
228 |
-
openpose_keypose = cv2.imread(cond_image)
|
229 |
-
else:
|
230 |
-
openpose_keypose = cv2.cvtColor(cond_image, cv2.COLOR_RGB2BGR)
|
231 |
-
openpose_keypose = resize_numpy_image(
|
232 |
-
openpose_keypose, max_resolution=opt.max_resolution, resize_short_edge=opt.resize_short_edge)
|
233 |
-
opt.H, opt.W = openpose_keypose.shape[:2]
|
234 |
-
if cond_inp_type == 'openpose':
|
235 |
-
openpose_keypose = img2tensor(openpose_keypose).unsqueeze(0) / 255.
|
236 |
-
openpose_keypose = openpose_keypose.to(opt.device)
|
237 |
-
elif cond_inp_type == 'image':
|
238 |
-
with autocast('cuda', dtype=torch.float32):
|
239 |
-
openpose_keypose = cond_model(openpose_keypose)
|
240 |
-
openpose_keypose = img2tensor(openpose_keypose).unsqueeze(0) / 255.
|
241 |
-
openpose_keypose = openpose_keypose.to(opt.device)
|
242 |
-
|
243 |
-
else:
|
244 |
-
raise NotImplementedError
|
245 |
-
|
246 |
-
return openpose_keypose
|
247 |
-
|
248 |
-
|
249 |
-
def get_adapter_feature(inputs, adapters):
|
250 |
-
ret_feat_map = None
|
251 |
-
ret_feat_seq = None
|
252 |
-
if not isinstance(inputs, list):
|
253 |
-
inputs = [inputs]
|
254 |
-
adapters = [adapters]
|
255 |
-
|
256 |
-
for input, adapter in zip(inputs, adapters):
|
257 |
-
cur_feature = adapter['model'](input)
|
258 |
-
if isinstance(cur_feature, list):
|
259 |
-
if ret_feat_map is None:
|
260 |
-
ret_feat_map = list(map(lambda x: x * adapter['cond_weight'], cur_feature))
|
261 |
-
else:
|
262 |
-
ret_feat_map = list(map(lambda x, y: x + y * adapter['cond_weight'], ret_feat_map, cur_feature))
|
263 |
-
else:
|
264 |
-
if ret_feat_seq is None:
|
265 |
-
ret_feat_seq = cur_feature
|
266 |
-
else:
|
267 |
-
ret_feat_seq = torch.cat([ret_feat_seq, cur_feature], dim=1)
|
268 |
-
|
269 |
-
return ret_feat_map, ret_feat_seq
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/agentverse_command/main_simulation_gui.py
DELETED
@@ -1,21 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
from agentverse.gui import GUI
|
3 |
-
from argparse import ArgumentParser
|
4 |
-
|
5 |
-
parser = ArgumentParser()
|
6 |
-
parser.add_argument("--task", type=str, default="simulation/nlp_classroom_9players")
|
7 |
-
parser.add_argument(
|
8 |
-
"--tasks_dir",
|
9 |
-
type=str,
|
10 |
-
default=os.path.join(os.path.dirname(__file__), "..", "agentverse", "tasks"),
|
11 |
-
)
|
12 |
-
args = parser.parse_args()
|
13 |
-
|
14 |
-
|
15 |
-
def cli_main():
|
16 |
-
ui = GUI(args.task, args.tasks_dir)
|
17 |
-
ui.launch()
|
18 |
-
|
19 |
-
|
20 |
-
if __name__ == "__main__":
|
21 |
-
cli_main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridsizer/GetExpandedChildHeight.js
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
var GetExpandedChildHeight = function (child, rowHeight) {
|
2 |
-
var childHeight;
|
3 |
-
var childConfig = child.rexSizer;
|
4 |
-
if (childConfig.expand) {
|
5 |
-
var padding = childConfig.padding;
|
6 |
-
childHeight = rowHeight - padding.top - padding.bottom;
|
7 |
-
}
|
8 |
-
return childHeight;
|
9 |
-
}
|
10 |
-
|
11 |
-
export default GetExpandedChildHeight;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/overlapsizer/OverlapSizer.js
DELETED
@@ -1,49 +0,0 @@
|
|
1 |
-
import BaseSizer from '../basesizer/BaseSizer.js';
|
2 |
-
import Methods from './Methods.js';
|
3 |
-
import Clear from '../../../plugins/utils/object/Clear.js';
|
4 |
-
import IndexOf from '../../../plugins/utils/object/IndexOf.js';
|
5 |
-
|
6 |
-
const IsPlainObject = Phaser.Utils.Objects.IsPlainObject;
|
7 |
-
const GetValue = Phaser.Utils.Objects.GetValue;
|
8 |
-
|
9 |
-
class OverlapSizer extends BaseSizer {
|
10 |
-
constructor(scene, x, y, minWidth, minHeight, config) {
|
11 |
-
if (IsPlainObject(x)) {
|
12 |
-
config = x;
|
13 |
-
x = GetValue(config, 'x', 0);
|
14 |
-
y = GetValue(config, 'y', 0);
|
15 |
-
minWidth = GetValue(config, 'width', undefined);
|
16 |
-
minHeight = GetValue(config, 'height', undefined);
|
17 |
-
} else if (IsPlainObject(minWidth)) {
|
18 |
-
config = minWidth;
|
19 |
-
minWidth = GetValue(config, 'width', undefined);
|
20 |
-
minHeight = GetValue(config, 'height', undefined);
|
21 |
-
}
|
22 |
-
|
23 |
-
super(scene, x, y, minWidth, minHeight, config);
|
24 |
-
|
25 |
-
this.type = 'rexOverlapSizer';
|
26 |
-
this.sizerChildren = {};
|
27 |
-
|
28 |
-
this.addChildrenMap('items', this.sizerChildren);
|
29 |
-
}
|
30 |
-
|
31 |
-
childToKey(gameObject) {
|
32 |
-
if (typeof (gameObject) === 'string') {
|
33 |
-
var key = gameObject;
|
34 |
-
if (this.sizerChildren.hasOwnPropery(key)) {
|
35 |
-
return key;
|
36 |
-
}
|
37 |
-
} else {
|
38 |
-
return IndexOf(this.sizerChildren, gameObject);
|
39 |
-
}
|
40 |
-
return null;
|
41 |
-
}
|
42 |
-
}
|
43 |
-
|
44 |
-
Object.assign(
|
45 |
-
OverlapSizer.prototype,
|
46 |
-
Methods
|
47 |
-
);
|
48 |
-
|
49 |
-
export default OverlapSizer;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlexWang/lama/saicinpainting/training/losses/segmentation.py
DELETED
@@ -1,43 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
import torch.nn.functional as F
|
4 |
-
|
5 |
-
from .constants import weights as constant_weights
|
6 |
-
|
7 |
-
|
8 |
-
class CrossEntropy2d(nn.Module):
|
9 |
-
def __init__(self, reduction="mean", ignore_label=255, weights=None, *args, **kwargs):
|
10 |
-
"""
|
11 |
-
weight (Tensor, optional): a manual rescaling weight given to each class.
|
12 |
-
If given, has to be a Tensor of size "nclasses"
|
13 |
-
"""
|
14 |
-
super(CrossEntropy2d, self).__init__()
|
15 |
-
self.reduction = reduction
|
16 |
-
self.ignore_label = ignore_label
|
17 |
-
self.weights = weights
|
18 |
-
if self.weights is not None:
|
19 |
-
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
20 |
-
self.weights = torch.FloatTensor(constant_weights[weights]).to(device)
|
21 |
-
|
22 |
-
def forward(self, predict, target):
|
23 |
-
"""
|
24 |
-
Args:
|
25 |
-
predict:(n, c, h, w)
|
26 |
-
target:(n, 1, h, w)
|
27 |
-
"""
|
28 |
-
target = target.long()
|
29 |
-
assert not target.requires_grad
|
30 |
-
assert predict.dim() == 4, "{0}".format(predict.size())
|
31 |
-
assert target.dim() == 4, "{0}".format(target.size())
|
32 |
-
assert predict.size(0) == target.size(0), "{0} vs {1} ".format(predict.size(0), target.size(0))
|
33 |
-
assert target.size(1) == 1, "{0}".format(target.size(1))
|
34 |
-
assert predict.size(2) == target.size(2), "{0} vs {1} ".format(predict.size(2), target.size(2))
|
35 |
-
assert predict.size(3) == target.size(3), "{0} vs {1} ".format(predict.size(3), target.size(3))
|
36 |
-
target = target.squeeze(1)
|
37 |
-
n, c, h, w = predict.size()
|
38 |
-
target_mask = (target >= 0) * (target != self.ignore_label)
|
39 |
-
target = target[target_mask]
|
40 |
-
predict = predict.transpose(1, 2).transpose(2, 3).contiguous()
|
41 |
-
predict = predict[target_mask.view(n, h, w, 1).repeat(1, 1, 1, c)].view(-1, c)
|
42 |
-
loss = F.cross_entropy(predict, target, weight=self.weights, reduction=self.reduction)
|
43 |
-
return loss
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/shap_e.md
DELETED
@@ -1,190 +0,0 @@
|
|
1 |
-
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
3 |
-
the License. You may obtain a copy of the License at
|
4 |
-
http://www.apache.org/licenses/LICENSE-2.0
|
5 |
-
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
6 |
-
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
7 |
-
specific language governing permissions and limitations under the License.
|
8 |
-
-->
|
9 |
-
|
10 |
-
# Shap-E
|
11 |
-
|
12 |
-
The Shap-E model was proposed in [Shap-E: Generating Conditional 3D Implicit Functions](https://huggingface.co/papers/2305.02463) by Alex Nichol and Heewon Jun from [OpenAI](https://github.com/openai).
|
13 |
-
|
14 |
-
The abstract from the paper is:
|
15 |
-
|
16 |
-
*We present Shap-E, a conditional generative model for 3D assets. Unlike recent work on 3D generative models which produce a single output representation, Shap-E directly generates the parameters of implicit functions that can be rendered as both textured meshes and neural radiance fields. We train Shap-E in two stages: first, we train an encoder that deterministically maps 3D assets into the parameters of an implicit function; second, we train a conditional diffusion model on outputs of the encoder. When trained on a large dataset of paired 3D and text data, our resulting models are capable of generating complex and diverse 3D assets in a matter of seconds. When compared to Point-E, an explicit generative model over point clouds, Shap-E converges faster and reaches comparable or better sample quality despite modeling a higher-dimensional, multi-representation output space.*
|
17 |
-
|
18 |
-
The original codebase can be found at [openai/shap-e](https://github.com/openai/shap-e).
|
19 |
-
|
20 |
-
<Tip>
|
21 |
-
|
22 |
-
Make sure to check out the Schedulers [guide](/using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](/using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines.
|
23 |
-
|
24 |
-
</Tip>
|
25 |
-
|
26 |
-
## Usage Examples
|
27 |
-
|
28 |
-
In the following, we will walk you through some examples of how to use Shap-E pipelines to create 3D objects in gif format.
|
29 |
-
|
30 |
-
### Text-to-3D image generation
|
31 |
-
|
32 |
-
We can use [`ShapEPipeline`] to create 3D object based on a text prompt. In this example, we will make a birthday cupcake for :firecracker: diffusers library's 1 year birthday. The workflow to use the Shap-E text-to-image pipeline is same as how you would use other text-to-image pipelines in diffusers.
|
33 |
-
|
34 |
-
```python
|
35 |
-
import torch
|
36 |
-
|
37 |
-
from diffusers import DiffusionPipeline
|
38 |
-
|
39 |
-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
40 |
-
|
41 |
-
repo = "openai/shap-e"
|
42 |
-
pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
|
43 |
-
pipe = pipe.to(device)
|
44 |
-
|
45 |
-
guidance_scale = 15.0
|
46 |
-
prompt = ["A firecracker", "A birthday cupcake"]
|
47 |
-
|
48 |
-
images = pipe(
|
49 |
-
prompt,
|
50 |
-
guidance_scale=guidance_scale,
|
51 |
-
num_inference_steps=64,
|
52 |
-
frame_size=256,
|
53 |
-
).images
|
54 |
-
```
|
55 |
-
|
56 |
-
The output of [`ShapEPipeline`] is a list of lists of images frames. Each list of frames can be used to create a 3D object. Let's use the `export_to_gif` utility function in diffusers to make a 3D cupcake!
|
57 |
-
|
58 |
-
```python
|
59 |
-
from diffusers.utils import export_to_gif
|
60 |
-
|
61 |
-
export_to_gif(images[0], "firecracker_3d.gif")
|
62 |
-
export_to_gif(images[1], "cake_3d.gif")
|
63 |
-
```
|
64 |
-

|
65 |
-

|
66 |
-
|
67 |
-
|
68 |
-
### Image-to-Image generation
|
69 |
-
|
70 |
-
You can use [`ShapEImg2ImgPipeline`] along with other text-to-image pipelines in diffusers and turn your 2D generation into 3D.
|
71 |
-
|
72 |
-
In this example, We will first genrate a cheeseburger with a simple prompt "A cheeseburger, white background"
|
73 |
-
|
74 |
-
```python
|
75 |
-
from diffusers import DiffusionPipeline
|
76 |
-
import torch
|
77 |
-
|
78 |
-
pipe_prior = DiffusionPipeline.from_pretrained("kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16)
|
79 |
-
pipe_prior.to("cuda")
|
80 |
-
|
81 |
-
t2i_pipe = DiffusionPipeline.from_pretrained("kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16)
|
82 |
-
t2i_pipe.to("cuda")
|
83 |
-
|
84 |
-
prompt = "A cheeseburger, white background"
|
85 |
-
|
86 |
-
image_embeds, negative_image_embeds = pipe_prior(prompt, guidance_scale=1.0).to_tuple()
|
87 |
-
image = t2i_pipe(
|
88 |
-
prompt,
|
89 |
-
image_embeds=image_embeds,
|
90 |
-
negative_image_embeds=negative_image_embeds,
|
91 |
-
).images[0]
|
92 |
-
|
93 |
-
image.save("burger.png")
|
94 |
-
```
|
95 |
-
|
96 |
-

|
97 |
-
|
98 |
-
we will then use the Shap-E image-to-image pipeline to turn it into a 3D cheeseburger :)
|
99 |
-
|
100 |
-
```python
|
101 |
-
from PIL import Image
|
102 |
-
from diffusers.utils import export_to_gif
|
103 |
-
|
104 |
-
repo = "openai/shap-e-img2img"
|
105 |
-
pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
|
106 |
-
pipe = pipe.to("cuda")
|
107 |
-
|
108 |
-
guidance_scale = 3.0
|
109 |
-
image = Image.open("burger.png").resize((256, 256))
|
110 |
-
|
111 |
-
images = pipe(
|
112 |
-
image,
|
113 |
-
guidance_scale=guidance_scale,
|
114 |
-
num_inference_steps=64,
|
115 |
-
frame_size=256,
|
116 |
-
).images
|
117 |
-
|
118 |
-
gif_path = export_to_gif(images[0], "burger_3d.gif")
|
119 |
-
```
|
120 |
-

|
121 |
-
|
122 |
-
### Generate mesh
|
123 |
-
|
124 |
-
For both [`ShapEPipeline`] and [`ShapEImg2ImgPipeline`], you can generate mesh output by passing `output_type` as `mesh` to the pipeline, and then use the [`ShapEPipeline.export_to_ply`] utility function to save the output as a `ply` file. We also provide a [`ShapEPipeline.export_to_obj`] function that you can use to save mesh outputs as `obj` files.
|
125 |
-
|
126 |
-
```python
|
127 |
-
import torch
|
128 |
-
|
129 |
-
from diffusers import DiffusionPipeline
|
130 |
-
from diffusers.utils import export_to_ply
|
131 |
-
|
132 |
-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
133 |
-
|
134 |
-
repo = "openai/shap-e"
|
135 |
-
pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16, variant="fp16")
|
136 |
-
pipe = pipe.to(device)
|
137 |
-
|
138 |
-
guidance_scale = 15.0
|
139 |
-
prompt = "A birthday cupcake"
|
140 |
-
|
141 |
-
images = pipe(prompt, guidance_scale=guidance_scale, num_inference_steps=64, frame_size=256, output_type="mesh").images
|
142 |
-
|
143 |
-
ply_path = export_to_ply(images[0], "3d_cake.ply")
|
144 |
-
print(f"saved to folder: {ply_path}")
|
145 |
-
```
|
146 |
-
|
147 |
-
Huggingface Datasets supports mesh visualization for mesh files in `glb` format. Below we will show you how to convert your mesh file into `glb` format so that you can use the Dataset viewer to render 3D objects.
|
148 |
-
|
149 |
-
We need to install `trimesh` library.
|
150 |
-
|
151 |
-
```
|
152 |
-
pip install trimesh
|
153 |
-
```
|
154 |
-
|
155 |
-
To convert the mesh file into `glb` format,
|
156 |
-
|
157 |
-
```python
|
158 |
-
import trimesh
|
159 |
-
|
160 |
-
mesh = trimesh.load("3d_cake.ply")
|
161 |
-
mesh.export("3d_cake.glb", file_type="glb")
|
162 |
-
```
|
163 |
-
|
164 |
-
By default, the mesh output of Shap-E is from the bottom viewpoint; you can change the default viewpoint by applying a rotation transformation
|
165 |
-
|
166 |
-
```python
|
167 |
-
import trimesh
|
168 |
-
import numpy as np
|
169 |
-
|
170 |
-
mesh = trimesh.load("3d_cake.ply")
|
171 |
-
rot = trimesh.transformations.rotation_matrix(-np.pi / 2, [1, 0, 0])
|
172 |
-
mesh = mesh.apply_transform(rot)
|
173 |
-
mesh.export("3d_cake.glb", file_type="glb")
|
174 |
-
```
|
175 |
-
|
176 |
-
Now you can upload your mesh file to your dataset and visualize it! Here is the link to the 3D cake we just generated
|
177 |
-
https://huggingface.co/datasets/hf-internal-testing/diffusers-images/blob/main/shap_e/3d_cake.glb
|
178 |
-
|
179 |
-
## ShapEPipeline
|
180 |
-
[[autodoc]] ShapEPipeline
|
181 |
-
- all
|
182 |
-
- __call__
|
183 |
-
|
184 |
-
## ShapEImg2ImgPipeline
|
185 |
-
[[autodoc]] ShapEImg2ImgPipeline
|
186 |
-
- all
|
187 |
-
- __call__
|
188 |
-
|
189 |
-
## ShapEPipelineOutput
|
190 |
-
[[autodoc]] pipelines.shap_e.pipeline_shap_e.ShapEPipelineOutput
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/optimization/habana.md
DELETED
@@ -1,79 +0,0 @@
|
|
1 |
-
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
|
3 |
-
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
4 |
-
the License. You may obtain a copy of the License at
|
5 |
-
|
6 |
-
http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
|
8 |
-
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
9 |
-
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
10 |
-
specific language governing permissions and limitations under the License.
|
11 |
-
-->
|
12 |
-
|
13 |
-
# How to use Stable Diffusion on Habana Gaudi
|
14 |
-
|
15 |
-
🤗 Diffusers is compatible with Habana Gaudi through 🤗 [Optimum Habana](https://huggingface.co/docs/optimum/habana/usage_guides/stable_diffusion).
|
16 |
-
|
17 |
-
## Requirements
|
18 |
-
|
19 |
-
- Optimum Habana 1.6 or later, [here](https://huggingface.co/docs/optimum/habana/installation) is how to install it.
|
20 |
-
- SynapseAI 1.10.
|
21 |
-
|
22 |
-
|
23 |
-
## Inference Pipeline
|
24 |
-
|
25 |
-
To generate images with Stable Diffusion 1 and 2 on Gaudi, you need to instantiate two instances:
|
26 |
-
- A pipeline with [`GaudiStableDiffusionPipeline`](https://huggingface.co/docs/optimum/habana/package_reference/stable_diffusion_pipeline). This pipeline supports *text-to-image generation*.
|
27 |
-
- A scheduler with [`GaudiDDIMScheduler`](https://huggingface.co/docs/optimum/habana/package_reference/stable_diffusion_pipeline#optimum.habana.diffusers.GaudiDDIMScheduler). This scheduler has been optimized for Habana Gaudi.
|
28 |
-
|
29 |
-
When initializing the pipeline, you have to specify `use_habana=True` to deploy it on HPUs.
|
30 |
-
Furthermore, in order to get the fastest possible generations you should enable **HPU graphs** with `use_hpu_graphs=True`.
|
31 |
-
Finally, you will need to specify a [Gaudi configuration](https://huggingface.co/docs/optimum/habana/package_reference/gaudi_config) which can be downloaded from the [Hugging Face Hub](https://huggingface.co/Habana).
|
32 |
-
|
33 |
-
```python
|
34 |
-
from optimum.habana import GaudiConfig
|
35 |
-
from optimum.habana.diffusers import GaudiDDIMScheduler, GaudiStableDiffusionPipeline
|
36 |
-
|
37 |
-
model_name = "stabilityai/stable-diffusion-2-base"
|
38 |
-
scheduler = GaudiDDIMScheduler.from_pretrained(model_name, subfolder="scheduler")
|
39 |
-
pipeline = GaudiStableDiffusionPipeline.from_pretrained(
|
40 |
-
model_name,
|
41 |
-
scheduler=scheduler,
|
42 |
-
use_habana=True,
|
43 |
-
use_hpu_graphs=True,
|
44 |
-
gaudi_config="Habana/stable-diffusion-2",
|
45 |
-
)
|
46 |
-
```
|
47 |
-
|
48 |
-
You can then call the pipeline to generate images by batches from one or several prompts:
|
49 |
-
```python
|
50 |
-
outputs = pipeline(
|
51 |
-
prompt=[
|
52 |
-
"High quality photo of an astronaut riding a horse in space",
|
53 |
-
"Face of a yellow cat, high resolution, sitting on a park bench",
|
54 |
-
],
|
55 |
-
num_images_per_prompt=10,
|
56 |
-
batch_size=4,
|
57 |
-
)
|
58 |
-
```
|
59 |
-
|
60 |
-
For more information, check out Optimum Habana's [documentation](https://huggingface.co/docs/optimum/habana/usage_guides/stable_diffusion) and the [example](https://github.com/huggingface/optimum-habana/tree/main/examples/stable-diffusion) provided in the official Github repository.
|
61 |
-
|
62 |
-
|
63 |
-
## Benchmark
|
64 |
-
|
65 |
-
Here are the latencies for Habana first-generation Gaudi and Gaudi2 with the [Habana/stable-diffusion](https://huggingface.co/Habana/stable-diffusion) and [Habana/stable-diffusion-2](https://huggingface.co/Habana/stable-diffusion-2) Gaudi configurations (mixed precision bf16/fp32):
|
66 |
-
|
67 |
-
- [Stable Diffusion v1.5](https://huggingface.co/runwayml/stable-diffusion-v1-5) (512x512 resolution):
|
68 |
-
|
69 |
-
| | Latency (batch size = 1) | Throughput (batch size = 8) |
|
70 |
-
| ---------------------- |:------------------------:|:---------------------------:|
|
71 |
-
| first-generation Gaudi | 3.80s | 0.308 images/s |
|
72 |
-
| Gaudi2 | 1.33s | 1.081 images/s |
|
73 |
-
|
74 |
-
- [Stable Diffusion v2.1](https://huggingface.co/stabilityai/stable-diffusion-2-1) (768x768 resolution):
|
75 |
-
|
76 |
-
| | Latency (batch size = 1) | Throughput |
|
77 |
-
| ---------------------- |:------------------------:|:-------------------------------:|
|
78 |
-
| first-generation Gaudi | 10.2s | 0.108 images/s (batch size = 4) |
|
79 |
-
| Gaudi2 | 3.17s | 0.379 images/s (batch size = 8) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/models/test_unet_2d_blocks.py
DELETED
@@ -1,337 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2023 HuggingFace Inc.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
import unittest
|
16 |
-
|
17 |
-
from diffusers.models.unet_2d_blocks import * # noqa F403
|
18 |
-
from diffusers.utils import torch_device
|
19 |
-
|
20 |
-
from .test_unet_blocks_common import UNetBlockTesterMixin
|
21 |
-
|
22 |
-
|
23 |
-
class DownBlock2DTests(UNetBlockTesterMixin, unittest.TestCase):
|
24 |
-
block_class = DownBlock2D # noqa F405
|
25 |
-
block_type = "down"
|
26 |
-
|
27 |
-
def test_output(self):
|
28 |
-
expected_slice = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
|
29 |
-
super().test_output(expected_slice)
|
30 |
-
|
31 |
-
|
32 |
-
class ResnetDownsampleBlock2DTests(UNetBlockTesterMixin, unittest.TestCase):
|
33 |
-
block_class = ResnetDownsampleBlock2D # noqa F405
|
34 |
-
block_type = "down"
|
35 |
-
|
36 |
-
def test_output(self):
|
37 |
-
expected_slice = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
|
38 |
-
super().test_output(expected_slice)
|
39 |
-
|
40 |
-
|
41 |
-
class AttnDownBlock2DTests(UNetBlockTesterMixin, unittest.TestCase):
|
42 |
-
block_class = AttnDownBlock2D # noqa F405
|
43 |
-
block_type = "down"
|
44 |
-
|
45 |
-
def test_output(self):
|
46 |
-
expected_slice = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
|
47 |
-
super().test_output(expected_slice)
|
48 |
-
|
49 |
-
|
50 |
-
class CrossAttnDownBlock2DTests(UNetBlockTesterMixin, unittest.TestCase):
|
51 |
-
block_class = CrossAttnDownBlock2D # noqa F405
|
52 |
-
block_type = "down"
|
53 |
-
|
54 |
-
def prepare_init_args_and_inputs_for_common(self):
|
55 |
-
init_dict, inputs_dict = super().prepare_init_args_and_inputs_for_common()
|
56 |
-
init_dict["cross_attention_dim"] = 32
|
57 |
-
return init_dict, inputs_dict
|
58 |
-
|
59 |
-
def test_output(self):
|
60 |
-
expected_slice = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
|
61 |
-
super().test_output(expected_slice)
|
62 |
-
|
63 |
-
|
64 |
-
class SimpleCrossAttnDownBlock2DTests(UNetBlockTesterMixin, unittest.TestCase):
|
65 |
-
block_class = SimpleCrossAttnDownBlock2D # noqa F405
|
66 |
-
block_type = "down"
|
67 |
-
|
68 |
-
@property
|
69 |
-
def dummy_input(self):
|
70 |
-
return super().get_dummy_input(include_encoder_hidden_states=True)
|
71 |
-
|
72 |
-
def prepare_init_args_and_inputs_for_common(self):
|
73 |
-
init_dict, inputs_dict = super().prepare_init_args_and_inputs_for_common()
|
74 |
-
init_dict["cross_attention_dim"] = 32
|
75 |
-
return init_dict, inputs_dict
|
76 |
-
|
77 |
-
@unittest.skipIf(torch_device == "mps", "MPS result is not consistent")
|
78 |
-
def test_output(self):
|
79 |
-
expected_slice = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
|
80 |
-
super().test_output(expected_slice)
|
81 |
-
|
82 |
-
|
83 |
-
class SkipDownBlock2DTests(UNetBlockTesterMixin, unittest.TestCase):
|
84 |
-
block_class = SkipDownBlock2D # noqa F405
|
85 |
-
block_type = "down"
|
86 |
-
|
87 |
-
@property
|
88 |
-
def dummy_input(self):
|
89 |
-
return super().get_dummy_input(include_skip_sample=True)
|
90 |
-
|
91 |
-
def test_output(self):
|
92 |
-
expected_slice = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
|
93 |
-
super().test_output(expected_slice)
|
94 |
-
|
95 |
-
|
96 |
-
class AttnSkipDownBlock2DTests(UNetBlockTesterMixin, unittest.TestCase):
|
97 |
-
block_class = AttnSkipDownBlock2D # noqa F405
|
98 |
-
block_type = "down"
|
99 |
-
|
100 |
-
@property
|
101 |
-
def dummy_input(self):
|
102 |
-
return super().get_dummy_input(include_skip_sample=True)
|
103 |
-
|
104 |
-
def test_output(self):
|
105 |
-
expected_slice = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
|
106 |
-
super().test_output(expected_slice)
|
107 |
-
|
108 |
-
|
109 |
-
class DownEncoderBlock2DTests(UNetBlockTesterMixin, unittest.TestCase):
|
110 |
-
block_class = DownEncoderBlock2D # noqa F405
|
111 |
-
block_type = "down"
|
112 |
-
|
113 |
-
@property
|
114 |
-
def dummy_input(self):
|
115 |
-
return super().get_dummy_input(include_temb=False)
|
116 |
-
|
117 |
-
def prepare_init_args_and_inputs_for_common(self):
|
118 |
-
init_dict = {
|
119 |
-
"in_channels": 32,
|
120 |
-
"out_channels": 32,
|
121 |
-
}
|
122 |
-
inputs_dict = self.dummy_input
|
123 |
-
return init_dict, inputs_dict
|
124 |
-
|
125 |
-
def test_output(self):
|
126 |
-
expected_slice = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
|
127 |
-
super().test_output(expected_slice)
|
128 |
-
|
129 |
-
|
130 |
-
class AttnDownEncoderBlock2DTests(UNetBlockTesterMixin, unittest.TestCase):
|
131 |
-
block_class = AttnDownEncoderBlock2D # noqa F405
|
132 |
-
block_type = "down"
|
133 |
-
|
134 |
-
@property
|
135 |
-
def dummy_input(self):
|
136 |
-
return super().get_dummy_input(include_temb=False)
|
137 |
-
|
138 |
-
def prepare_init_args_and_inputs_for_common(self):
|
139 |
-
init_dict = {
|
140 |
-
"in_channels": 32,
|
141 |
-
"out_channels": 32,
|
142 |
-
}
|
143 |
-
inputs_dict = self.dummy_input
|
144 |
-
return init_dict, inputs_dict
|
145 |
-
|
146 |
-
def test_output(self):
|
147 |
-
expected_slice = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
|
148 |
-
super().test_output(expected_slice)
|
149 |
-
|
150 |
-
|
151 |
-
class UNetMidBlock2DTests(UNetBlockTesterMixin, unittest.TestCase):
|
152 |
-
block_class = UNetMidBlock2D # noqa F405
|
153 |
-
block_type = "mid"
|
154 |
-
|
155 |
-
def prepare_init_args_and_inputs_for_common(self):
|
156 |
-
init_dict = {
|
157 |
-
"in_channels": 32,
|
158 |
-
"temb_channels": 128,
|
159 |
-
}
|
160 |
-
inputs_dict = self.dummy_input
|
161 |
-
return init_dict, inputs_dict
|
162 |
-
|
163 |
-
def test_output(self):
|
164 |
-
expected_slice = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
|
165 |
-
super().test_output(expected_slice)
|
166 |
-
|
167 |
-
|
168 |
-
class UNetMidBlock2DCrossAttnTests(UNetBlockTesterMixin, unittest.TestCase):
|
169 |
-
block_class = UNetMidBlock2DCrossAttn # noqa F405
|
170 |
-
block_type = "mid"
|
171 |
-
|
172 |
-
def prepare_init_args_and_inputs_for_common(self):
|
173 |
-
init_dict, inputs_dict = super().prepare_init_args_and_inputs_for_common()
|
174 |
-
init_dict["cross_attention_dim"] = 32
|
175 |
-
return init_dict, inputs_dict
|
176 |
-
|
177 |
-
def test_output(self):
|
178 |
-
expected_slice = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
|
179 |
-
super().test_output(expected_slice)
|
180 |
-
|
181 |
-
|
182 |
-
class UNetMidBlock2DSimpleCrossAttnTests(UNetBlockTesterMixin, unittest.TestCase):
|
183 |
-
block_class = UNetMidBlock2DSimpleCrossAttn # noqa F405
|
184 |
-
block_type = "mid"
|
185 |
-
|
186 |
-
@property
|
187 |
-
def dummy_input(self):
|
188 |
-
return super().get_dummy_input(include_encoder_hidden_states=True)
|
189 |
-
|
190 |
-
def prepare_init_args_and_inputs_for_common(self):
|
191 |
-
init_dict, inputs_dict = super().prepare_init_args_and_inputs_for_common()
|
192 |
-
init_dict["cross_attention_dim"] = 32
|
193 |
-
return init_dict, inputs_dict
|
194 |
-
|
195 |
-
def test_output(self):
|
196 |
-
expected_slice = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
|
197 |
-
super().test_output(expected_slice)
|
198 |
-
|
199 |
-
|
200 |
-
class UpBlock2DTests(UNetBlockTesterMixin, unittest.TestCase):
|
201 |
-
block_class = UpBlock2D # noqa F405
|
202 |
-
block_type = "up"
|
203 |
-
|
204 |
-
@property
|
205 |
-
def dummy_input(self):
|
206 |
-
return super().get_dummy_input(include_res_hidden_states_tuple=True)
|
207 |
-
|
208 |
-
def test_output(self):
|
209 |
-
expected_slice = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
|
210 |
-
super().test_output(expected_slice)
|
211 |
-
|
212 |
-
|
213 |
-
class ResnetUpsampleBlock2DTests(UNetBlockTesterMixin, unittest.TestCase):
|
214 |
-
block_class = ResnetUpsampleBlock2D # noqa F405
|
215 |
-
block_type = "up"
|
216 |
-
|
217 |
-
@property
|
218 |
-
def dummy_input(self):
|
219 |
-
return super().get_dummy_input(include_res_hidden_states_tuple=True)
|
220 |
-
|
221 |
-
def test_output(self):
|
222 |
-
expected_slice = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
|
223 |
-
super().test_output(expected_slice)
|
224 |
-
|
225 |
-
|
226 |
-
class CrossAttnUpBlock2DTests(UNetBlockTesterMixin, unittest.TestCase):
|
227 |
-
block_class = CrossAttnUpBlock2D # noqa F405
|
228 |
-
block_type = "up"
|
229 |
-
|
230 |
-
@property
|
231 |
-
def dummy_input(self):
|
232 |
-
return super().get_dummy_input(include_res_hidden_states_tuple=True)
|
233 |
-
|
234 |
-
def prepare_init_args_and_inputs_for_common(self):
|
235 |
-
init_dict, inputs_dict = super().prepare_init_args_and_inputs_for_common()
|
236 |
-
init_dict["cross_attention_dim"] = 32
|
237 |
-
return init_dict, inputs_dict
|
238 |
-
|
239 |
-
def test_output(self):
|
240 |
-
expected_slice = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
|
241 |
-
super().test_output(expected_slice)
|
242 |
-
|
243 |
-
|
244 |
-
class SimpleCrossAttnUpBlock2DTests(UNetBlockTesterMixin, unittest.TestCase):
|
245 |
-
block_class = SimpleCrossAttnUpBlock2D # noqa F405
|
246 |
-
block_type = "up"
|
247 |
-
|
248 |
-
@property
|
249 |
-
def dummy_input(self):
|
250 |
-
return super().get_dummy_input(include_res_hidden_states_tuple=True, include_encoder_hidden_states=True)
|
251 |
-
|
252 |
-
def prepare_init_args_and_inputs_for_common(self):
|
253 |
-
init_dict, inputs_dict = super().prepare_init_args_and_inputs_for_common()
|
254 |
-
init_dict["cross_attention_dim"] = 32
|
255 |
-
return init_dict, inputs_dict
|
256 |
-
|
257 |
-
def test_output(self):
|
258 |
-
expected_slice = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
|
259 |
-
super().test_output(expected_slice)
|
260 |
-
|
261 |
-
|
262 |
-
class AttnUpBlock2DTests(UNetBlockTesterMixin, unittest.TestCase):
|
263 |
-
block_class = AttnUpBlock2D # noqa F405
|
264 |
-
block_type = "up"
|
265 |
-
|
266 |
-
@property
|
267 |
-
def dummy_input(self):
|
268 |
-
return super().get_dummy_input(include_res_hidden_states_tuple=True)
|
269 |
-
|
270 |
-
@unittest.skipIf(torch_device == "mps", "MPS result is not consistent")
|
271 |
-
def test_output(self):
|
272 |
-
expected_slice = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
|
273 |
-
super().test_output(expected_slice)
|
274 |
-
|
275 |
-
|
276 |
-
class SkipUpBlock2DTests(UNetBlockTesterMixin, unittest.TestCase):
|
277 |
-
block_class = SkipUpBlock2D # noqa F405
|
278 |
-
block_type = "up"
|
279 |
-
|
280 |
-
@property
|
281 |
-
def dummy_input(self):
|
282 |
-
return super().get_dummy_input(include_res_hidden_states_tuple=True)
|
283 |
-
|
284 |
-
def test_output(self):
|
285 |
-
expected_slice = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
|
286 |
-
super().test_output(expected_slice)
|
287 |
-
|
288 |
-
|
289 |
-
class AttnSkipUpBlock2DTests(UNetBlockTesterMixin, unittest.TestCase):
|
290 |
-
block_class = AttnSkipUpBlock2D # noqa F405
|
291 |
-
block_type = "up"
|
292 |
-
|
293 |
-
@property
|
294 |
-
def dummy_input(self):
|
295 |
-
return super().get_dummy_input(include_res_hidden_states_tuple=True)
|
296 |
-
|
297 |
-
def test_output(self):
|
298 |
-
expected_slice = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
|
299 |
-
super().test_output(expected_slice)
|
300 |
-
|
301 |
-
|
302 |
-
class UpDecoderBlock2DTests(UNetBlockTesterMixin, unittest.TestCase):
|
303 |
-
block_class = UpDecoderBlock2D # noqa F405
|
304 |
-
block_type = "up"
|
305 |
-
|
306 |
-
@property
|
307 |
-
def dummy_input(self):
|
308 |
-
return super().get_dummy_input(include_temb=False)
|
309 |
-
|
310 |
-
def prepare_init_args_and_inputs_for_common(self):
|
311 |
-
init_dict = {"in_channels": 32, "out_channels": 32}
|
312 |
-
|
313 |
-
inputs_dict = self.dummy_input
|
314 |
-
return init_dict, inputs_dict
|
315 |
-
|
316 |
-
def test_output(self):
|
317 |
-
expected_slice = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
|
318 |
-
super().test_output(expected_slice)
|
319 |
-
|
320 |
-
|
321 |
-
class AttnUpDecoderBlock2DTests(UNetBlockTesterMixin, unittest.TestCase):
|
322 |
-
block_class = AttnUpDecoderBlock2D # noqa F405
|
323 |
-
block_type = "up"
|
324 |
-
|
325 |
-
@property
|
326 |
-
def dummy_input(self):
|
327 |
-
return super().get_dummy_input(include_temb=False)
|
328 |
-
|
329 |
-
def prepare_init_args_and_inputs_for_common(self):
|
330 |
-
init_dict = {"in_channels": 32, "out_channels": 32}
|
331 |
-
|
332 |
-
inputs_dict = self.dummy_input
|
333 |
-
return init_dict, inputs_dict
|
334 |
-
|
335 |
-
def test_output(self):
|
336 |
-
expected_slice = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
|
337 |
-
super().test_output(expected_slice)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/gn+ws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco.py
DELETED
@@ -1,17 +0,0 @@
|
|
1 |
-
_base_ = './mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py'
|
2 |
-
# model settings
|
3 |
-
conv_cfg = dict(type='ConvWS')
|
4 |
-
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
|
5 |
-
model = dict(
|
6 |
-
pretrained='open-mmlab://jhu/resnext50_32x4d_gn_ws',
|
7 |
-
backbone=dict(
|
8 |
-
type='ResNeXt',
|
9 |
-
depth=50,
|
10 |
-
groups=32,
|
11 |
-
base_width=4,
|
12 |
-
num_stages=4,
|
13 |
-
out_indices=(0, 1, 2, 3),
|
14 |
-
frozen_stages=1,
|
15 |
-
style='pytorch',
|
16 |
-
conv_cfg=conv_cfg,
|
17 |
-
norm_cfg=norm_cfg))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/tools/model_converters/upgrade_model_version.py
DELETED
@@ -1,209 +0,0 @@
|
|
1 |
-
import argparse
|
2 |
-
import re
|
3 |
-
import tempfile
|
4 |
-
from collections import OrderedDict
|
5 |
-
|
6 |
-
import torch
|
7 |
-
from mmcv import Config
|
8 |
-
|
9 |
-
|
10 |
-
def is_head(key):
|
11 |
-
valid_head_list = [
|
12 |
-
'bbox_head', 'mask_head', 'semantic_head', 'grid_head', 'mask_iou_head'
|
13 |
-
]
|
14 |
-
|
15 |
-
return any(key.startswith(h) for h in valid_head_list)
|
16 |
-
|
17 |
-
|
18 |
-
def parse_config(config_strings):
|
19 |
-
temp_file = tempfile.NamedTemporaryFile()
|
20 |
-
config_path = f'{temp_file.name}.py'
|
21 |
-
with open(config_path, 'w') as f:
|
22 |
-
f.write(config_strings)
|
23 |
-
|
24 |
-
config = Config.fromfile(config_path)
|
25 |
-
is_two_stage = True
|
26 |
-
is_ssd = False
|
27 |
-
is_retina = False
|
28 |
-
reg_cls_agnostic = False
|
29 |
-
if 'rpn_head' not in config.model:
|
30 |
-
is_two_stage = False
|
31 |
-
# check whether it is SSD
|
32 |
-
if config.model.bbox_head.type == 'SSDHead':
|
33 |
-
is_ssd = True
|
34 |
-
elif config.model.bbox_head.type == 'RetinaHead':
|
35 |
-
is_retina = True
|
36 |
-
elif isinstance(config.model['bbox_head'], list):
|
37 |
-
reg_cls_agnostic = True
|
38 |
-
elif 'reg_class_agnostic' in config.model.bbox_head:
|
39 |
-
reg_cls_agnostic = config.model.bbox_head \
|
40 |
-
.reg_class_agnostic
|
41 |
-
temp_file.close()
|
42 |
-
return is_two_stage, is_ssd, is_retina, reg_cls_agnostic
|
43 |
-
|
44 |
-
|
45 |
-
def reorder_cls_channel(val, num_classes=81):
|
46 |
-
# bias
|
47 |
-
if val.dim() == 1:
|
48 |
-
new_val = torch.cat((val[1:], val[:1]), dim=0)
|
49 |
-
# weight
|
50 |
-
else:
|
51 |
-
out_channels, in_channels = val.shape[:2]
|
52 |
-
# conv_cls for softmax output
|
53 |
-
if out_channels != num_classes and out_channels % num_classes == 0:
|
54 |
-
new_val = val.reshape(-1, num_classes, in_channels, *val.shape[2:])
|
55 |
-
new_val = torch.cat((new_val[:, 1:], new_val[:, :1]), dim=1)
|
56 |
-
new_val = new_val.reshape(val.size())
|
57 |
-
# fc_cls
|
58 |
-
elif out_channels == num_classes:
|
59 |
-
new_val = torch.cat((val[1:], val[:1]), dim=0)
|
60 |
-
# agnostic | retina_cls | rpn_cls
|
61 |
-
else:
|
62 |
-
new_val = val
|
63 |
-
|
64 |
-
return new_val
|
65 |
-
|
66 |
-
|
67 |
-
def truncate_cls_channel(val, num_classes=81):
|
68 |
-
|
69 |
-
# bias
|
70 |
-
if val.dim() == 1:
|
71 |
-
if val.size(0) % num_classes == 0:
|
72 |
-
new_val = val[:num_classes - 1]
|
73 |
-
else:
|
74 |
-
new_val = val
|
75 |
-
# weight
|
76 |
-
else:
|
77 |
-
out_channels, in_channels = val.shape[:2]
|
78 |
-
# conv_logits
|
79 |
-
if out_channels % num_classes == 0:
|
80 |
-
new_val = val.reshape(num_classes, in_channels, *val.shape[2:])[1:]
|
81 |
-
new_val = new_val.reshape(-1, *val.shape[1:])
|
82 |
-
# agnostic
|
83 |
-
else:
|
84 |
-
new_val = val
|
85 |
-
|
86 |
-
return new_val
|
87 |
-
|
88 |
-
|
89 |
-
def truncate_reg_channel(val, num_classes=81):
|
90 |
-
# bias
|
91 |
-
if val.dim() == 1:
|
92 |
-
# fc_reg | rpn_reg
|
93 |
-
if val.size(0) % num_classes == 0:
|
94 |
-
new_val = val.reshape(num_classes, -1)[:num_classes - 1]
|
95 |
-
new_val = new_val.reshape(-1)
|
96 |
-
# agnostic
|
97 |
-
else:
|
98 |
-
new_val = val
|
99 |
-
# weight
|
100 |
-
else:
|
101 |
-
out_channels, in_channels = val.shape[:2]
|
102 |
-
# fc_reg | rpn_reg
|
103 |
-
if out_channels % num_classes == 0:
|
104 |
-
new_val = val.reshape(num_classes, -1, in_channels,
|
105 |
-
*val.shape[2:])[1:]
|
106 |
-
new_val = new_val.reshape(-1, *val.shape[1:])
|
107 |
-
# agnostic
|
108 |
-
else:
|
109 |
-
new_val = val
|
110 |
-
|
111 |
-
return new_val
|
112 |
-
|
113 |
-
|
114 |
-
def convert(in_file, out_file, num_classes):
|
115 |
-
"""Convert keys in checkpoints.
|
116 |
-
|
117 |
-
There can be some breaking changes during the development of mmdetection,
|
118 |
-
and this tool is used for upgrading checkpoints trained with old versions
|
119 |
-
to the latest one.
|
120 |
-
"""
|
121 |
-
checkpoint = torch.load(in_file)
|
122 |
-
in_state_dict = checkpoint.pop('state_dict')
|
123 |
-
out_state_dict = OrderedDict()
|
124 |
-
meta_info = checkpoint['meta']
|
125 |
-
is_two_stage, is_ssd, is_retina, reg_cls_agnostic = parse_config(
|
126 |
-
'#' + meta_info['config'])
|
127 |
-
if meta_info['mmdet_version'] <= '0.5.3' and is_retina:
|
128 |
-
upgrade_retina = True
|
129 |
-
else:
|
130 |
-
upgrade_retina = False
|
131 |
-
|
132 |
-
# MMDetection v2.5.0 unifies the class order in RPN
|
133 |
-
# if the model is trained in version<v2.5.0
|
134 |
-
# The RPN model should be upgraded to be used in version>=2.5.0
|
135 |
-
if meta_info['mmdet_version'] < '2.5.0':
|
136 |
-
upgrade_rpn = True
|
137 |
-
else:
|
138 |
-
upgrade_rpn = False
|
139 |
-
|
140 |
-
for key, val in in_state_dict.items():
|
141 |
-
new_key = key
|
142 |
-
new_val = val
|
143 |
-
if is_two_stage and is_head(key):
|
144 |
-
new_key = 'roi_head.{}'.format(key)
|
145 |
-
|
146 |
-
# classification
|
147 |
-
if upgrade_rpn:
|
148 |
-
m = re.search(
|
149 |
-
r'(conv_cls|retina_cls|rpn_cls|fc_cls|fcos_cls|'
|
150 |
-
r'fovea_cls).(weight|bias)', new_key)
|
151 |
-
else:
|
152 |
-
m = re.search(
|
153 |
-
r'(conv_cls|retina_cls|fc_cls|fcos_cls|'
|
154 |
-
r'fovea_cls).(weight|bias)', new_key)
|
155 |
-
if m is not None:
|
156 |
-
print(f'reorder cls channels of {new_key}')
|
157 |
-
new_val = reorder_cls_channel(val, num_classes)
|
158 |
-
|
159 |
-
# regression
|
160 |
-
if upgrade_rpn:
|
161 |
-
m = re.search(r'(fc_reg).(weight|bias)', new_key)
|
162 |
-
else:
|
163 |
-
m = re.search(r'(fc_reg|rpn_reg).(weight|bias)', new_key)
|
164 |
-
if m is not None and not reg_cls_agnostic:
|
165 |
-
print(f'truncate regression channels of {new_key}')
|
166 |
-
new_val = truncate_reg_channel(val, num_classes)
|
167 |
-
|
168 |
-
# mask head
|
169 |
-
m = re.search(r'(conv_logits).(weight|bias)', new_key)
|
170 |
-
if m is not None:
|
171 |
-
print(f'truncate mask prediction channels of {new_key}')
|
172 |
-
new_val = truncate_cls_channel(val, num_classes)
|
173 |
-
|
174 |
-
m = re.search(r'(cls_convs|reg_convs).\d.(weight|bias)', key)
|
175 |
-
# Legacy issues in RetinaNet since V1.x
|
176 |
-
# Use ConvModule instead of nn.Conv2d in RetinaNet
|
177 |
-
# cls_convs.0.weight -> cls_convs.0.conv.weight
|
178 |
-
if m is not None and upgrade_retina:
|
179 |
-
param = m.groups()[1]
|
180 |
-
new_key = key.replace(param, f'conv.{param}')
|
181 |
-
out_state_dict[new_key] = val
|
182 |
-
print(f'rename the name of {key} to {new_key}')
|
183 |
-
continue
|
184 |
-
|
185 |
-
m = re.search(r'(cls_convs).\d.(weight|bias)', key)
|
186 |
-
if m is not None and is_ssd:
|
187 |
-
print(f'reorder cls channels of {new_key}')
|
188 |
-
new_val = reorder_cls_channel(val, num_classes)
|
189 |
-
|
190 |
-
out_state_dict[new_key] = new_val
|
191 |
-
checkpoint['state_dict'] = out_state_dict
|
192 |
-
torch.save(checkpoint, out_file)
|
193 |
-
|
194 |
-
|
195 |
-
def main():
|
196 |
-
parser = argparse.ArgumentParser(description='Upgrade model version')
|
197 |
-
parser.add_argument('in_file', help='input checkpoint file')
|
198 |
-
parser.add_argument('out_file', help='output checkpoint file')
|
199 |
-
parser.add_argument(
|
200 |
-
'--num-classes',
|
201 |
-
type=int,
|
202 |
-
default=81,
|
203 |
-
help='number of classes of the original model')
|
204 |
-
args = parser.parse_args()
|
205 |
-
convert(args.in_file, args.out_file, args.num_classes)
|
206 |
-
|
207 |
-
|
208 |
-
if __name__ == '__main__':
|
209 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_d6_r101-d16_512x1024_40k_cityscapes.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
_base_ = './fcn_d6_r50-d16_512x1024_40k_cityscapes.py'
|
2 |
-
model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_hr18_512x512_160k_ade20k.py
DELETED
@@ -1,35 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/ocrnet_hr18.py', '../_base_/datasets/ade20k.py',
|
3 |
-
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
|
4 |
-
]
|
5 |
-
norm_cfg = dict(type='SyncBN', requires_grad=True)
|
6 |
-
model = dict(decode_head=[
|
7 |
-
dict(
|
8 |
-
type='FCNHead',
|
9 |
-
in_channels=[18, 36, 72, 144],
|
10 |
-
channels=sum([18, 36, 72, 144]),
|
11 |
-
in_index=(0, 1, 2, 3),
|
12 |
-
input_transform='resize_concat',
|
13 |
-
kernel_size=1,
|
14 |
-
num_convs=1,
|
15 |
-
concat_input=False,
|
16 |
-
dropout_ratio=-1,
|
17 |
-
num_classes=150,
|
18 |
-
norm_cfg=norm_cfg,
|
19 |
-
align_corners=False,
|
20 |
-
loss_decode=dict(
|
21 |
-
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
|
22 |
-
dict(
|
23 |
-
type='OCRHead',
|
24 |
-
in_channels=[18, 36, 72, 144],
|
25 |
-
in_index=(0, 1, 2, 3),
|
26 |
-
input_transform='resize_concat',
|
27 |
-
channels=512,
|
28 |
-
ocr_channels=256,
|
29 |
-
dropout_ratio=-1,
|
30 |
-
num_classes=150,
|
31 |
-
norm_cfg=norm_cfg,
|
32 |
-
align_corners=False,
|
33 |
-
loss_decode=dict(
|
34 |
-
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
|
35 |
-
])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/css/chat_style-messenger.css
DELETED
@@ -1,99 +0,0 @@
|
|
1 |
-
.message {
|
2 |
-
padding-bottom: 25px;
|
3 |
-
font-size: 15px;
|
4 |
-
font-family: 'Noto Sans', Helvetica, Arial, sans-serif;
|
5 |
-
line-height: 1.428571429;
|
6 |
-
}
|
7 |
-
|
8 |
-
.circle-you {
|
9 |
-
width: 50px;
|
10 |
-
height: 50px;
|
11 |
-
background-color: rgb(238, 78, 59);
|
12 |
-
border-radius: 50%;
|
13 |
-
}
|
14 |
-
|
15 |
-
.circle-bot {
|
16 |
-
width: 50px;
|
17 |
-
height: 50px;
|
18 |
-
background-color: rgb(59, 78, 244);
|
19 |
-
border-radius: 50%;
|
20 |
-
float: left;
|
21 |
-
margin-right: 10px;
|
22 |
-
margin-top: 5px;
|
23 |
-
}
|
24 |
-
|
25 |
-
.circle-bot img,
|
26 |
-
.circle-you img {
|
27 |
-
border-radius: 50%;
|
28 |
-
width: 100%;
|
29 |
-
height: 100%;
|
30 |
-
object-fit: cover;
|
31 |
-
}
|
32 |
-
|
33 |
-
.circle-you {
|
34 |
-
margin-top: 5px;
|
35 |
-
float: right;
|
36 |
-
}
|
37 |
-
|
38 |
-
.circle-bot + .text, .circle-you + .text {
|
39 |
-
border-radius: 18px;
|
40 |
-
padding: 8px 12px;
|
41 |
-
}
|
42 |
-
|
43 |
-
.circle-bot + .text {
|
44 |
-
background-color: #E4E6EB;
|
45 |
-
float: left;
|
46 |
-
}
|
47 |
-
|
48 |
-
.circle-you + .text {
|
49 |
-
float: right;
|
50 |
-
background-color: rgb(0, 132, 255);
|
51 |
-
margin-right: 10px;
|
52 |
-
}
|
53 |
-
|
54 |
-
.circle-you + .text div, .circle-you + .text *, .dark .circle-you + .text div, .dark .circle-you + .text * {
|
55 |
-
color: #FFF !important;
|
56 |
-
}
|
57 |
-
|
58 |
-
.circle-you + .text .username {
|
59 |
-
text-align: right;
|
60 |
-
}
|
61 |
-
|
62 |
-
.dark .circle-bot + .text div, .dark .circle-bot + .text * {
|
63 |
-
color: #000;
|
64 |
-
}
|
65 |
-
|
66 |
-
.text {
|
67 |
-
max-width: 80%;
|
68 |
-
}
|
69 |
-
|
70 |
-
.text p {
|
71 |
-
margin-top: 5px;
|
72 |
-
}
|
73 |
-
|
74 |
-
.username {
|
75 |
-
font-weight: bold;
|
76 |
-
}
|
77 |
-
|
78 |
-
.message-body {
|
79 |
-
}
|
80 |
-
|
81 |
-
.message-body img {
|
82 |
-
max-width: 300px;
|
83 |
-
max-height: 300px;
|
84 |
-
border-radius: 20px;
|
85 |
-
}
|
86 |
-
|
87 |
-
.message-body p {
|
88 |
-
margin-bottom: 0 !important;
|
89 |
-
font-size: 15px !important;
|
90 |
-
line-height: 1.428571429 !important;
|
91 |
-
}
|
92 |
-
|
93 |
-
.dark .message-body p em {
|
94 |
-
color: rgb(138, 138, 138) !important;
|
95 |
-
}
|
96 |
-
|
97 |
-
.message-body p em {
|
98 |
-
color: rgb(110, 110, 110) !important;
|
99 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/cnn/bricks/scale.py
DELETED
@@ -1,21 +0,0 @@
|
|
1 |
-
# Copyright (c) OpenMMLab. All rights reserved.
|
2 |
-
import torch
|
3 |
-
import torch.nn as nn
|
4 |
-
|
5 |
-
|
6 |
-
class Scale(nn.Module):
|
7 |
-
"""A learnable scale parameter.
|
8 |
-
|
9 |
-
This layer scales the input by a learnable factor. It multiplies a
|
10 |
-
learnable scale parameter of shape (1,) with input of any shape.
|
11 |
-
|
12 |
-
Args:
|
13 |
-
scale (float): Initial value of scale factor. Default: 1.0
|
14 |
-
"""
|
15 |
-
|
16 |
-
def __init__(self, scale=1.0):
|
17 |
-
super(Scale, self).__init__()
|
18 |
-
self.scale = nn.Parameter(torch.tensor(scale, dtype=torch.float))
|
19 |
-
|
20 |
-
def forward(self, x):
|
21 |
-
return x * self.scale
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/ldm/modules/midas/midas/__init__.py
DELETED
File without changes
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/command/install.py
DELETED
@@ -1,139 +0,0 @@
|
|
1 |
-
from distutils.errors import DistutilsArgError
|
2 |
-
import inspect
|
3 |
-
import glob
|
4 |
-
import warnings
|
5 |
-
import platform
|
6 |
-
import distutils.command.install as orig
|
7 |
-
|
8 |
-
import setuptools
|
9 |
-
|
10 |
-
# Prior to numpy 1.9, NumPy relies on the '_install' name, so provide it for
|
11 |
-
# now. See https://github.com/pypa/setuptools/issues/199/
|
12 |
-
_install = orig.install
|
13 |
-
|
14 |
-
|
15 |
-
class install(orig.install):
|
16 |
-
"""Use easy_install to install the package, w/dependencies"""
|
17 |
-
|
18 |
-
user_options = orig.install.user_options + [
|
19 |
-
('old-and-unmanageable', None, "Try not to use this!"),
|
20 |
-
('single-version-externally-managed', None,
|
21 |
-
"used by system package builders to create 'flat' eggs"),
|
22 |
-
]
|
23 |
-
boolean_options = orig.install.boolean_options + [
|
24 |
-
'old-and-unmanageable', 'single-version-externally-managed',
|
25 |
-
]
|
26 |
-
new_commands = [
|
27 |
-
('install_egg_info', lambda self: True),
|
28 |
-
('install_scripts', lambda self: True),
|
29 |
-
]
|
30 |
-
_nc = dict(new_commands)
|
31 |
-
|
32 |
-
def initialize_options(self):
|
33 |
-
|
34 |
-
warnings.warn(
|
35 |
-
"setup.py install is deprecated. "
|
36 |
-
"Use build and pip and other standards-based tools.",
|
37 |
-
setuptools.SetuptoolsDeprecationWarning,
|
38 |
-
)
|
39 |
-
|
40 |
-
orig.install.initialize_options(self)
|
41 |
-
self.old_and_unmanageable = None
|
42 |
-
self.single_version_externally_managed = None
|
43 |
-
|
44 |
-
def finalize_options(self):
|
45 |
-
orig.install.finalize_options(self)
|
46 |
-
if self.root:
|
47 |
-
self.single_version_externally_managed = True
|
48 |
-
elif self.single_version_externally_managed:
|
49 |
-
if not self.root and not self.record:
|
50 |
-
raise DistutilsArgError(
|
51 |
-
"You must specify --record or --root when building system"
|
52 |
-
" packages"
|
53 |
-
)
|
54 |
-
|
55 |
-
def handle_extra_path(self):
|
56 |
-
if self.root or self.single_version_externally_managed:
|
57 |
-
# explicit backward-compatibility mode, allow extra_path to work
|
58 |
-
return orig.install.handle_extra_path(self)
|
59 |
-
|
60 |
-
# Ignore extra_path when installing an egg (or being run by another
|
61 |
-
# command without --root or --single-version-externally-managed
|
62 |
-
self.path_file = None
|
63 |
-
self.extra_dirs = ''
|
64 |
-
|
65 |
-
def run(self):
|
66 |
-
# Explicit request for old-style install? Just do it
|
67 |
-
if self.old_and_unmanageable or self.single_version_externally_managed:
|
68 |
-
return orig.install.run(self)
|
69 |
-
|
70 |
-
if not self._called_from_setup(inspect.currentframe()):
|
71 |
-
# Run in backward-compatibility mode to support bdist_* commands.
|
72 |
-
orig.install.run(self)
|
73 |
-
else:
|
74 |
-
self.do_egg_install()
|
75 |
-
|
76 |
-
@staticmethod
|
77 |
-
def _called_from_setup(run_frame):
|
78 |
-
"""
|
79 |
-
Attempt to detect whether run() was called from setup() or by another
|
80 |
-
command. If called by setup(), the parent caller will be the
|
81 |
-
'run_command' method in 'distutils.dist', and *its* caller will be
|
82 |
-
the 'run_commands' method. If called any other way, the
|
83 |
-
immediate caller *might* be 'run_command', but it won't have been
|
84 |
-
called by 'run_commands'. Return True in that case or if a call stack
|
85 |
-
is unavailable. Return False otherwise.
|
86 |
-
"""
|
87 |
-
if run_frame is None:
|
88 |
-
msg = "Call stack not available. bdist_* commands may fail."
|
89 |
-
warnings.warn(msg)
|
90 |
-
if platform.python_implementation() == 'IronPython':
|
91 |
-
msg = "For best results, pass -X:Frames to enable call stack."
|
92 |
-
warnings.warn(msg)
|
93 |
-
return True
|
94 |
-
|
95 |
-
frames = inspect.getouterframes(run_frame)
|
96 |
-
for frame in frames[2:4]:
|
97 |
-
caller, = frame[:1]
|
98 |
-
info = inspect.getframeinfo(caller)
|
99 |
-
caller_module = caller.f_globals.get('__name__', '')
|
100 |
-
|
101 |
-
if caller_module == "setuptools.dist" and info.function == "run_command":
|
102 |
-
# Starting from v61.0.0 setuptools overwrites dist.run_command
|
103 |
-
continue
|
104 |
-
|
105 |
-
return (
|
106 |
-
caller_module == 'distutils.dist'
|
107 |
-
and info.function == 'run_commands'
|
108 |
-
)
|
109 |
-
|
110 |
-
def do_egg_install(self):
|
111 |
-
|
112 |
-
easy_install = self.distribution.get_command_class('easy_install')
|
113 |
-
|
114 |
-
cmd = easy_install(
|
115 |
-
self.distribution, args="x", root=self.root, record=self.record,
|
116 |
-
)
|
117 |
-
cmd.ensure_finalized() # finalize before bdist_egg munges install cmd
|
118 |
-
cmd.always_copy_from = '.' # make sure local-dir eggs get installed
|
119 |
-
|
120 |
-
# pick up setup-dir .egg files only: no .egg-info
|
121 |
-
cmd.package_index.scan(glob.glob('*.egg'))
|
122 |
-
|
123 |
-
self.run_command('bdist_egg')
|
124 |
-
args = [self.distribution.get_command_obj('bdist_egg').egg_output]
|
125 |
-
|
126 |
-
if setuptools.bootstrap_install_from:
|
127 |
-
# Bootstrap self-installation of setuptools
|
128 |
-
args.insert(0, setuptools.bootstrap_install_from)
|
129 |
-
|
130 |
-
cmd.args = args
|
131 |
-
cmd.run(show_deprecation=False)
|
132 |
-
setuptools.bootstrap_install_from = None
|
133 |
-
|
134 |
-
|
135 |
-
# XXX Python 3.1 doesn't see _nc if this is inside the class
|
136 |
-
install.sub_commands = (
|
137 |
-
[cmd for cmd in orig.install.sub_commands if cmd[0] not in install._nc] +
|
138 |
-
install.new_commands
|
139 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awesimo/jojogan/e4e/models/stylegan2/op/fused_bias_act.cpp
DELETED
@@ -1,21 +0,0 @@
|
|
1 |
-
#include <torch/extension.h>
|
2 |
-
|
3 |
-
|
4 |
-
torch::Tensor fused_bias_act_op(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer,
|
5 |
-
int act, int grad, float alpha, float scale);
|
6 |
-
|
7 |
-
#define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor")
|
8 |
-
#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
|
9 |
-
#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
|
10 |
-
|
11 |
-
torch::Tensor fused_bias_act(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer,
|
12 |
-
int act, int grad, float alpha, float scale) {
|
13 |
-
CHECK_CUDA(input);
|
14 |
-
CHECK_CUDA(bias);
|
15 |
-
|
16 |
-
return fused_bias_act_op(input, bias, refer, act, grad, alpha, scale);
|
17 |
-
}
|
18 |
-
|
19 |
-
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
20 |
-
m.def("fused_bias_act", &fused_bias_act, "fused bias act (CUDA)");
|
21 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/export/caffe2_inference.py
DELETED
@@ -1,161 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
|
3 |
-
import logging
|
4 |
-
import numpy as np
|
5 |
-
from itertools import count
|
6 |
-
import torch
|
7 |
-
from caffe2.proto import caffe2_pb2
|
8 |
-
from caffe2.python import core
|
9 |
-
|
10 |
-
from .caffe2_modeling import META_ARCH_CAFFE2_EXPORT_TYPE_MAP, convert_batched_inputs_to_c2_format
|
11 |
-
from .shared import ScopedWS, get_pb_arg_vali, get_pb_arg_vals, infer_device_type
|
12 |
-
|
13 |
-
logger = logging.getLogger(__name__)
|
14 |
-
|
15 |
-
|
16 |
-
# ===== ref: mobile-vision predictor's 'Caffe2Wrapper' class ======
|
17 |
-
class ProtobufModel(torch.nn.Module):
|
18 |
-
"""
|
19 |
-
Wrapper of a caffe2's protobuf model.
|
20 |
-
It works just like nn.Module, but running caffe2 under the hood.
|
21 |
-
Input/Output are tuple[tensor] that match the caffe2 net's external_input/output.
|
22 |
-
"""
|
23 |
-
|
24 |
-
_ids = count(0)
|
25 |
-
|
26 |
-
def __init__(self, predict_net, init_net):
|
27 |
-
logger.info(f"Initializing ProtobufModel for: {predict_net.name} ...")
|
28 |
-
super().__init__()
|
29 |
-
assert isinstance(predict_net, caffe2_pb2.NetDef)
|
30 |
-
assert isinstance(init_net, caffe2_pb2.NetDef)
|
31 |
-
# create unique temporary workspace for each instance
|
32 |
-
self.ws_name = "__tmp_ProtobufModel_{}__".format(next(self._ids))
|
33 |
-
self.net = core.Net(predict_net)
|
34 |
-
|
35 |
-
logger.info("Running init_net once to fill the parameters ...")
|
36 |
-
with ScopedWS(self.ws_name, is_reset=True, is_cleanup=False) as ws:
|
37 |
-
ws.RunNetOnce(init_net)
|
38 |
-
uninitialized_external_input = []
|
39 |
-
for blob in self.net.Proto().external_input:
|
40 |
-
if blob not in ws.Blobs():
|
41 |
-
uninitialized_external_input.append(blob)
|
42 |
-
ws.CreateBlob(blob)
|
43 |
-
ws.CreateNet(self.net)
|
44 |
-
|
45 |
-
self._error_msgs = set()
|
46 |
-
self._input_blobs = uninitialized_external_input
|
47 |
-
|
48 |
-
def _infer_output_devices(self, inputs):
|
49 |
-
"""
|
50 |
-
Returns:
|
51 |
-
list[str]: list of device for each external output
|
52 |
-
"""
|
53 |
-
|
54 |
-
def _get_device_type(torch_tensor):
|
55 |
-
assert torch_tensor.device.type in ["cpu", "cuda"]
|
56 |
-
assert torch_tensor.device.index == 0
|
57 |
-
return torch_tensor.device.type
|
58 |
-
|
59 |
-
predict_net = self.net.Proto()
|
60 |
-
input_device_types = {
|
61 |
-
(name, 0): _get_device_type(tensor) for name, tensor in zip(self._input_blobs, inputs)
|
62 |
-
}
|
63 |
-
device_type_map = infer_device_type(
|
64 |
-
predict_net, known_status=input_device_types, device_name_style="pytorch"
|
65 |
-
)
|
66 |
-
ssa, versions = core.get_ssa(predict_net)
|
67 |
-
versioned_outputs = [(name, versions[name]) for name in predict_net.external_output]
|
68 |
-
output_devices = [device_type_map[outp] for outp in versioned_outputs]
|
69 |
-
return output_devices
|
70 |
-
|
71 |
-
def forward(self, inputs):
|
72 |
-
"""
|
73 |
-
Args:
|
74 |
-
inputs (tuple[torch.Tensor])
|
75 |
-
|
76 |
-
Returns:
|
77 |
-
tuple[torch.Tensor]
|
78 |
-
"""
|
79 |
-
assert len(inputs) == len(self._input_blobs), (
|
80 |
-
f"Length of inputs ({len(inputs)}) "
|
81 |
-
f"doesn't match the required input blobs: {self._input_blobs}"
|
82 |
-
)
|
83 |
-
|
84 |
-
with ScopedWS(self.ws_name, is_reset=False, is_cleanup=False) as ws:
|
85 |
-
for b, tensor in zip(self._input_blobs, inputs):
|
86 |
-
ws.FeedBlob(b, tensor)
|
87 |
-
|
88 |
-
try:
|
89 |
-
ws.RunNet(self.net.Proto().name)
|
90 |
-
except RuntimeError as e:
|
91 |
-
if not str(e) in self._error_msgs:
|
92 |
-
self._error_msgs.add(str(e))
|
93 |
-
logger.warning("Encountered new RuntimeError: \n{}".format(str(e)))
|
94 |
-
logger.warning("Catch the error and use partial results.")
|
95 |
-
|
96 |
-
c2_outputs = [ws.FetchBlob(b) for b in self.net.Proto().external_output]
|
97 |
-
# Remove outputs of current run, this is necessary in order to
|
98 |
-
# prevent fetching the result from previous run if the model fails
|
99 |
-
# in the middle.
|
100 |
-
for b in self.net.Proto().external_output:
|
101 |
-
# Needs to create uninitialized blob to make the net runable.
|
102 |
-
# This is "equivalent" to: ws.RemoveBlob(b) then ws.CreateBlob(b),
|
103 |
-
# but there'no such API.
|
104 |
-
ws.FeedBlob(b, f"{b}, a C++ native class of type nullptr (uninitialized).")
|
105 |
-
|
106 |
-
# Cast output to torch.Tensor on the desired device
|
107 |
-
output_devices = (
|
108 |
-
self._infer_output_devices(inputs)
|
109 |
-
if any(t.device.type != "cpu" for t in inputs)
|
110 |
-
else ["cpu" for _ in self.net.Proto().external_output]
|
111 |
-
)
|
112 |
-
|
113 |
-
outputs = []
|
114 |
-
for name, c2_output, device in zip(
|
115 |
-
self.net.Proto().external_output, c2_outputs, output_devices
|
116 |
-
):
|
117 |
-
if not isinstance(c2_output, np.ndarray):
|
118 |
-
raise RuntimeError(
|
119 |
-
"Invalid output for blob {}, received: {}".format(name, c2_output)
|
120 |
-
)
|
121 |
-
outputs.append(torch.tensor(c2_output).to(device=device))
|
122 |
-
return tuple(outputs)
|
123 |
-
|
124 |
-
|
125 |
-
class ProtobufDetectionModel(torch.nn.Module):
|
126 |
-
"""
|
127 |
-
A class works just like a pytorch meta arch in terms of inference, but running
|
128 |
-
caffe2 model under the hood.
|
129 |
-
"""
|
130 |
-
|
131 |
-
def __init__(self, predict_net, init_net, *, convert_outputs=None):
|
132 |
-
"""
|
133 |
-
Args:
|
134 |
-
predict_net, init_net (core.Net): caffe2 nets
|
135 |
-
convert_outptus (callable): a function that converts caffe2
|
136 |
-
outputs to the same format of the original pytorch model.
|
137 |
-
By default, use the one defined in the caffe2 meta_arch.
|
138 |
-
"""
|
139 |
-
super().__init__()
|
140 |
-
self.protobuf_model = ProtobufModel(predict_net, init_net)
|
141 |
-
self.size_divisibility = get_pb_arg_vali(predict_net, "size_divisibility", 0)
|
142 |
-
self.device = get_pb_arg_vals(predict_net, "device", b"cpu").decode("ascii")
|
143 |
-
|
144 |
-
if convert_outputs is None:
|
145 |
-
meta_arch = get_pb_arg_vals(predict_net, "meta_architecture", b"GeneralizedRCNN")
|
146 |
-
meta_arch = META_ARCH_CAFFE2_EXPORT_TYPE_MAP[meta_arch.decode("ascii")]
|
147 |
-
self._convert_outputs = meta_arch.get_outputs_converter(predict_net, init_net)
|
148 |
-
else:
|
149 |
-
self._convert_outputs = convert_outputs
|
150 |
-
|
151 |
-
def _convert_inputs(self, batched_inputs):
|
152 |
-
# currently all models convert inputs in the same way
|
153 |
-
return convert_batched_inputs_to_c2_format(
|
154 |
-
batched_inputs, self.size_divisibility, self.device
|
155 |
-
)
|
156 |
-
|
157 |
-
def forward(self, batched_inputs):
|
158 |
-
c2_inputs = self._convert_inputs(batched_inputs)
|
159 |
-
c2_results = self.protobuf_model(c2_inputs)
|
160 |
-
c2_results = dict(zip(self.protobuf_model.net.Proto().external_output, c2_results))
|
161 |
-
return self._convert_outputs(batched_inputs, c2_inputs, c2_results)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Calle Carx 1.74 5 Mod Apk.md
DELETED
@@ -1,64 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>CarX Street 1.74.5 Mod APK: Un juego de carreras gratis y divertido para Android</h1>
|
3 |
-
<p>Si usted es un fan de los juegos de carreras de coches, es posible que desee echar un vistazo a CarX Street, un juego de carreras gratis de CarX Technology para dispositivos Android. CarX Street es un juego de carreras realista e inmersivo que te permite personalizar tus coches, elegir tus pistas y competir con otros jugadores en línea o fuera de línea. En este artículo, le diremos qué es CarX Street, cómo descargar e instalar CarX Street 1.74.5 Mod APK, y cuáles son los beneficios de usar esta versión modificada del juego. </p>
|
4 |
-
<h2>¿Qué es CarX Street? </h2>
|
5 |
-
<p>CarX Street es un juego de carreras que simula la cultura de las carreras callejeras, donde puedes correr con diferentes tipos de autos, desde autos clásicos hasta autos deportivos modernos, en varias pistas urbanas, desde carreteras hasta zonas industriales. También puede personalizar sus coches con diferentes partes, colores, pegatinas y calcomanías, para que se vean únicos y se adapten a su estilo. También puede actualizar sus coches con diferentes motores, transmisiones, suspensiones, frenos, neumáticos y más, para mejorar su rendimiento y manejo. </p>
|
6 |
-
<h2>calle carx 1.74 5 mod apk</h2><br /><p><b><b>Download Zip</b> ✵✵✵ <a href="https://bltlly.com/2v6L8s">https://bltlly.com/2v6L8s</a></b></p><br /><br />
|
7 |
-
<h3>Características de CarX Street</h3>
|
8 |
-
<p>CarX Street tiene muchas características que lo convierten en un juego de carreras divertido y emocionante para usuarios de Android. Estos son algunos de ellos:</p>
|
9 |
-
<h4>Física y gráficos realistas</h4>
|
10 |
-
<p>CarX Street utiliza el CarX Physics Engine, una tecnología patentada que simula el comportamiento realista de los automóviles en diferentes superficies y condiciones. Puede sentir la diferencia entre conducir sobre asfalto, grava, arena, nieve o hielo, así como los efectos de la gravedad, la inercia, la fricción y la aerodinámica. También puede ver los efectos de daño realistas en sus coches, como arañazos, abolladuras, ventanas rotas o humo. </p>
|
11 |
-
|
12 |
-
<h4>Coches y pistas personalizables</h4>
|
13 |
-
<p>CarX Street le permite personalizar sus coches con más de 1000 piezas y accesorios, como parachoques, spoilers, campanas, ruedas, escapes, luces, espejos y más. También puede cambiar el color de sus coches con más de 100 opciones de pintura, o añadir pegatinas y calcomanías para hacerlos más personales. También puede crear sus propias pistas personalizadas con la función Editor de pistas, donde puede elegir la ubicación, longitud, ancho, curvatura, elevación, tipo de superficie y obstáculos de su pista. </p>
|
14 |
-
<h4>Modos online y offline</h4>
|
15 |
-
<p>CarX Street te permite jugar online o offline dependiendo de tu preferencia. Puedes jugar online con otros jugadores de todo el mundo en diferentes modos, como Carrera rápida, Time Attack, Drift Race o Torneo. También puedes chatear con otros jugadores, unirte a clubes o crear tu propio club. Puedes jugar sin conexión con oponentes de IA en diferentes modos, como Carrera, Free Ride o Test Drive. También puede jugar sin conexión con sus amigos en el mismo dispositivo con el modo de pantalla dividida. </p>
|
16 |
-
<h3>¿Cómo descargar e instalar CarX Street 1.74.5 Mod APK? </h3>
|
17 |
-
<p>Si desea descargar e instalar CarX Street 1.74.5 Mod APK, debe seguir estos pasos:</p>
|
18 |
-
<h4>Requisitos y permisos</h4>
|
19 |
-
<p>Antes de descargar e instalar CarX Street 1.74.5 Mod APK, debe asegurarse de que su dispositivo cumple con los siguientes requisitos y permisos:</p>
|
20 |
-
<ul>
|
21 |
-
<li>Tu dispositivo debe tener Android 6.0 o superior. </li>
|
22 |
-
<li> Su dispositivo debe tener al menos 2 GB de RAM y 1 GB de espacio de almacenamiento libre. </li>
|
23 |
-
<li> Es necesario habilitar la instalación de aplicaciones de fuentes desconocidas en la configuración del dispositivo. </li>
|
24 |
-
<li>Necesitas permitir que la aplicación acceda al almacenamiento, ubicación, cámara, micrófono y red de tu dispositivo. </li>
|
25 |
-
</ul>
|
26 |
-
<h4>Pasos para descargar e instalar</h4>
|
27 |
-
<p>Después de haber comprobado los requisitos y permisos, puede seguir estos pasos para descargar e instalar CarX Street 1.74.5 Mod APK:</p>
|
28 |
-
<ol>
|
29 |
-
|
30 |
-
<li>Localice el archivo descargado en el administrador de archivos de su dispositivo y toque en él para iniciar el proceso de instalación. </li>
|
31 |
-
<li>Siga las instrucciones en la pantalla y espere a que se complete la instalación. </li>
|
32 |
-
<li>Iniciar la aplicación y disfrutar del juego. </li>
|
33 |
-
</ol>
|
34 |
-
<h3>¿Cuáles son los beneficios de CarX Street 1.74.5 Mod APK? </h3>
|
35 |
-
<p>CarX Street 1.74.5 Mod APK es una versión modificada del juego original que ofrece algunos beneficios adicionales para los jugadores. Estos son algunos de ellos:</p>
|
36 |
-
<p></p>
|
37 |
-
<h4>Dinero y oro ilimitados</h4>
|
38 |
-
<p>Con CarX Street 1.74.5 Mod APK, puede obtener dinero y oro ilimitados en el juego, que se puede utilizar para comprar y actualizar sus coches, piezas y pistas. También puede desbloquear todos los coches y pistas en el juego sin gastar dinero real. </p>
|
39 |
-
<h4>Coches y pistas desbloqueados</h4>
|
40 |
-
<p>Con CarX Street 1.74.5 Mod APK, se puede acceder a todos los coches y pistas en el juego sin tener que completar ninguna misión o logros. Puedes elegir entre más de 50 coches y más de 20 pistas en el juego, cada una con sus propias características y desafíos. </p>
|
41 |
-
<h4>No se necesitan anuncios ni root</h4>
|
42 |
-
<p>Con CarX Street 1.74.5 Mod APK, puede disfrutar del juego sin ningún molesto anuncios o ventanas emergentes que podrían interrumpir su juego o consumir sus datos. Tampoco necesitas rootear tu dispositivo para usar esta versión modificada del juego, lo que podría comprometer la seguridad o garantía de tu dispositivo. </p>
|
43 |
-
<h2>Conclusión</h2>
|
44 |
-
<p>CarX Street es un juego de carreras gratuito y divertido para dispositivos Android que ofrece física y gráficos realistas, coches y pistas personalizables, modos en línea y fuera de línea, y más. Si desea mejorar su experiencia de juego con dinero ilimitado y oro, coches desbloqueados y pistas, sin anuncios y sin raíz necesaria, puede descargar e instalar CarX Street 1.74.5 Mod APK siguiendo los pasos que hemos proporcionado en este artículo. </p>
|
45 |
-
<h2>Preguntas frecuentes</h2>
|
46 |
-
<p>Aquí hay algunas preguntas frecuentes sobre CarX Street 1.74.5 Mod APK:</p>
|
47 |
-
<ol>
|
48 |
-
|
49 |
-
<p>Sí, CarX Street 1.74.5 Mod APK es seguro de usar siempre y cuando lo descargue de una fuente de confianza, como <a href="">APKPure</a> o <a href="">APKDone</a>. Sin embargo, siempre debe tener cuidado al instalar aplicaciones de fuentes desconocidas, ya que podrían contener malware o virus que podrían dañar su dispositivo o datos. </p>
|
50 |
-
<li><b>¿Se me prohibirá el uso de CarX Street 1.74.5 Mod APK? </b></li>
|
51 |
-
<p>No, no se le prohibió el uso de CarX Street 1.74.5 Mod APK, ya que esta versión modificada del juego no interfiere con los servidores del juego o características en línea. Sin embargo, siempre debes respetar las reglas y políticas del juego, y evitar usar trucos o hacks que puedan darte una ventaja injusta sobre otros jugadores. </p>
|
52 |
-
<li><b>¿Puedo actualizar CarX Street 1.74.5 Mod APK? </b></li>
|
53 |
-
<p>Sí, puede actualizar CarX Street 1.74.5 Mod APK cada vez que hay una nueva versión disponible desde la misma fuente que lo descargó desde. Sin embargo, siempre debes hacer una copia de seguridad de los datos del juego antes de actualizarlo, ya que algunas actualizaciones pueden sobrescribir o eliminar las características modificadas o el progreso. </p>
|
54 |
-
<li><b>¿Puedo jugar CarX Street 1.74.5 Mod APK en el PC? </b></li>
|
55 |
-
<p>Sí, puede jugar CarX Street 1.74.5 Mod APK en el PC mediante el uso de un emulador de Android, como <a href="">BlueStacks</a> o <a href="">NoxPlayer</a>. Un emulador de Android es un software que le permite ejecutar aplicaciones y juegos de Android en su PC. Solo tiene que descargar e instalar el emulador en su PC, y luego descargar e instalar CarX Street 1.74.5 Mod APK en el emulador. </p>
|
56 |
-
<li><b> ¿Cuáles son algunas alternativas a CarX Street 1.74.5 Mod APK? </b></li>
|
57 |
-
<p>Si usted está buscando algunas alternativas a CarX Street 1.74.5 Mod APK, es posible que desee probar estos otros juegos de carreras para Android:</p>
|
58 |
-
<ul>
|
59 |
-
<li><a href="">Asphalt 9: Legends</a>: Un juego de carreras de ritmo rápido y lleno de acción que cuenta con más de 60 coches y más de 80 pistas de todo el mundo. </li>
|
60 |
-
|
61 |
-
<li><a href="">Need for Speed: No Limits</a>: Un emocionante juego de carreras de adrenalina que cuenta con más de 100 coches y más de 1000 carreras en diferentes modos y eventos. </li>
|
62 |
-
</ul></p> 64aa2da5cf<br />
|
63 |
-
<br />
|
64 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bravefe/Artist_Classification/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Artist Classification
|
3 |
-
emoji: 🎨
|
4 |
-
colorFrom: purple
|
5 |
-
colorTo: indigo
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.42.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/sample_boundary.h
DELETED
@@ -1,454 +0,0 @@
|
|
1 |
-
#pragma once
|
2 |
-
|
3 |
-
#include "diffvg.h"
|
4 |
-
#include "shape.h"
|
5 |
-
#include "scene.h"
|
6 |
-
#include "vector.h"
|
7 |
-
#include "cdf.h"
|
8 |
-
|
9 |
-
struct PathBoundaryData {
|
10 |
-
int base_point_id;
|
11 |
-
int point_id;
|
12 |
-
float t;
|
13 |
-
};
|
14 |
-
|
15 |
-
struct BoundaryData {
|
16 |
-
PathBoundaryData path;
|
17 |
-
bool is_stroke;
|
18 |
-
};
|
19 |
-
|
20 |
-
DEVICE
|
21 |
-
Vector2f sample_boundary(const Circle &circle,
|
22 |
-
float t,
|
23 |
-
Vector2f &normal,
|
24 |
-
float &pdf,
|
25 |
-
BoundaryData &,
|
26 |
-
float stroke_perturb_direction,
|
27 |
-
float stroke_radius) {
|
28 |
-
// Parametric form of a circle (t in [0, 1)):
|
29 |
-
// x = center.x + r * cos(2pi * t)
|
30 |
-
// y = center.y + r * sin(2pi * t)
|
31 |
-
auto offset = Vector2f{
|
32 |
-
circle.radius * cos(2 * float(M_PI) * t),
|
33 |
-
circle.radius * sin(2 * float(M_PI) * t)
|
34 |
-
};
|
35 |
-
normal = normalize(offset);
|
36 |
-
pdf /= (2 * float(M_PI) * circle.radius);
|
37 |
-
auto ret = circle.center + offset;
|
38 |
-
if (stroke_perturb_direction != 0.f) {
|
39 |
-
ret += stroke_perturb_direction * stroke_radius * normal;
|
40 |
-
if (stroke_perturb_direction < 0) {
|
41 |
-
// normal should point towards the perturb direction
|
42 |
-
normal = -normal;
|
43 |
-
}
|
44 |
-
}
|
45 |
-
return ret;
|
46 |
-
}
|
47 |
-
|
48 |
-
DEVICE
|
49 |
-
Vector2f sample_boundary(const Ellipse &ellipse,
|
50 |
-
float t,
|
51 |
-
Vector2f &normal,
|
52 |
-
float &pdf,
|
53 |
-
BoundaryData &,
|
54 |
-
float stroke_perturb_direction,
|
55 |
-
float stroke_radius) {
|
56 |
-
// Parametric form of a ellipse (t in [0, 1)):
|
57 |
-
// x = center.x + r.x * cos(2pi * t)
|
58 |
-
// y = center.y + r.y * sin(2pi * t)
|
59 |
-
const auto &r = ellipse.radius;
|
60 |
-
auto offset = Vector2f{
|
61 |
-
r.x * cos(2 * float(M_PI) * t),
|
62 |
-
r.y * sin(2 * float(M_PI) * t)
|
63 |
-
};
|
64 |
-
auto dxdt = -r.x * sin(2 * float(M_PI) * t) * 2 * float(M_PI);
|
65 |
-
auto dydt = r.y * cos(2 * float(M_PI) * t) * 2 * float(M_PI);
|
66 |
-
// tangent is normalize(dxdt, dydt)
|
67 |
-
normal = normalize(Vector2f{dydt, -dxdt});
|
68 |
-
pdf /= sqrt(square(dxdt) + square(dydt));
|
69 |
-
auto ret = ellipse.center + offset;
|
70 |
-
if (stroke_perturb_direction != 0.f) {
|
71 |
-
ret += stroke_perturb_direction * stroke_radius * normal;
|
72 |
-
if (stroke_perturb_direction < 0) {
|
73 |
-
// normal should point towards the perturb direction
|
74 |
-
normal = -normal;
|
75 |
-
}
|
76 |
-
}
|
77 |
-
return ret;
|
78 |
-
}
|
79 |
-
|
80 |
-
DEVICE
|
81 |
-
Vector2f sample_boundary(const Path &path,
|
82 |
-
const float *path_length_cdf,
|
83 |
-
const float *path_length_pmf,
|
84 |
-
const int *point_id_map,
|
85 |
-
float path_length,
|
86 |
-
float t,
|
87 |
-
Vector2f &normal,
|
88 |
-
float &pdf,
|
89 |
-
BoundaryData &data,
|
90 |
-
float stroke_perturb_direction,
|
91 |
-
float stroke_radius) {
|
92 |
-
if (stroke_perturb_direction != 0.f && !path.is_closed) {
|
93 |
-
// We need to samples the "caps" of the path
|
94 |
-
// length of a cap is pi * abs(stroke_perturb_direction)
|
95 |
-
// there are two caps
|
96 |
-
auto cap_length = 0.f;
|
97 |
-
if (path.thickness != nullptr) {
|
98 |
-
auto r0 = path.thickness[0];
|
99 |
-
auto r1 = path.thickness[path.num_points - 1];
|
100 |
-
cap_length = float(M_PI) * (r0 + r1);
|
101 |
-
} else {
|
102 |
-
cap_length = 2 * float(M_PI) * stroke_radius;
|
103 |
-
}
|
104 |
-
auto cap_prob = cap_length / (cap_length + path_length);
|
105 |
-
if (t < cap_prob) {
|
106 |
-
t = t / cap_prob;
|
107 |
-
pdf *= cap_prob;
|
108 |
-
auto r0 = stroke_radius;
|
109 |
-
auto r1 = stroke_radius;
|
110 |
-
if (path.thickness != nullptr) {
|
111 |
-
r0 = path.thickness[0];
|
112 |
-
r1 = path.thickness[path.num_points - 1];
|
113 |
-
}
|
114 |
-
// HACK: in theory we want to compute the tangent and
|
115 |
-
// sample the hemi-circle, but here we just sample the
|
116 |
-
// full circle since it's less typing
|
117 |
-
if (stroke_perturb_direction < 0) {
|
118 |
-
// Sample the cap at the beginning
|
119 |
-
auto p0 = Vector2f{path.points[0], path.points[1]};
|
120 |
-
auto offset = Vector2f{
|
121 |
-
r0 * cos(2 * float(M_PI) * t),
|
122 |
-
r0 * sin(2 * float(M_PI) * t)
|
123 |
-
};
|
124 |
-
normal = normalize(offset);
|
125 |
-
pdf /= (2 * float(M_PI) * r0);
|
126 |
-
data.path.base_point_id = 0;
|
127 |
-
data.path.point_id = 0;
|
128 |
-
data.path.t = 0;
|
129 |
-
return p0 + offset;
|
130 |
-
} else {
|
131 |
-
// Sample the cap at the end
|
132 |
-
auto p0 = Vector2f{path.points[2 * (path.num_points - 1)],
|
133 |
-
path.points[2 * (path.num_points - 1) + 1]};
|
134 |
-
auto offset = Vector2f{
|
135 |
-
r1 * cos(2 * float(M_PI) * t),
|
136 |
-
r1 * sin(2 * float(M_PI) * t)
|
137 |
-
};
|
138 |
-
normal = normalize(offset);
|
139 |
-
pdf /= (2 * float(M_PI) * r1);
|
140 |
-
data.path.base_point_id = path.num_base_points - 1;
|
141 |
-
data.path.point_id = path.num_points - 2 -
|
142 |
-
path.num_control_points[data.path.base_point_id];
|
143 |
-
data.path.t = 1;
|
144 |
-
return p0 + offset;
|
145 |
-
}
|
146 |
-
} else {
|
147 |
-
t = (t - cap_prob) / (1 - cap_prob);
|
148 |
-
pdf *= (1 - cap_prob);
|
149 |
-
}
|
150 |
-
}
|
151 |
-
// Binary search on path_length_cdf
|
152 |
-
auto sample_id = sample(path_length_cdf,
|
153 |
-
path.num_base_points,
|
154 |
-
t,
|
155 |
-
&t);
|
156 |
-
assert(sample_id >= 0 && sample_id < path.num_base_points);
|
157 |
-
auto point_id = point_id_map[sample_id];
|
158 |
-
if (path.num_control_points[sample_id] == 0) {
|
159 |
-
// Straight line
|
160 |
-
auto i0 = point_id;
|
161 |
-
auto i1 = (i0 + 1) % path.num_points;
|
162 |
-
assert(i0 < path.num_points);
|
163 |
-
auto p0 = Vector2f{path.points[2 * i0], path.points[2 * i0 + 1]};
|
164 |
-
auto p1 = Vector2f{path.points[2 * i1], path.points[2 * i1 + 1]};
|
165 |
-
data.path.base_point_id = sample_id;
|
166 |
-
data.path.point_id = point_id;
|
167 |
-
data.path.t = t;
|
168 |
-
if (t < -1e-3f || t > 1+1e-3f) {
|
169 |
-
// return invalid sample
|
170 |
-
pdf = 0;
|
171 |
-
return Vector2f{0, 0};
|
172 |
-
}
|
173 |
-
auto tangent = (p1 - p0);
|
174 |
-
auto tan_len = length(tangent);
|
175 |
-
if (tan_len == 0) {
|
176 |
-
// return invalid sample
|
177 |
-
pdf = 0;
|
178 |
-
return Vector2f{0, 0};
|
179 |
-
}
|
180 |
-
normal = Vector2f{-tangent.y, tangent.x} / tan_len;
|
181 |
-
// length of tangent is the Jacobian of the sampling transformation
|
182 |
-
pdf *= path_length_pmf[sample_id] / tan_len;
|
183 |
-
auto ret = p0 + t * (p1 - p0);
|
184 |
-
if (stroke_perturb_direction != 0.f) {
|
185 |
-
auto r0 = stroke_radius;
|
186 |
-
auto r1 = stroke_radius;
|
187 |
-
if (path.thickness != nullptr) {
|
188 |
-
r0 = path.thickness[i0];
|
189 |
-
r1 = path.thickness[i1];
|
190 |
-
}
|
191 |
-
auto r = r0 + t * (r1 - r0);
|
192 |
-
ret += stroke_perturb_direction * r * normal;
|
193 |
-
if (stroke_perturb_direction < 0) {
|
194 |
-
// normal should point towards the perturb direction
|
195 |
-
normal = -normal;
|
196 |
-
}
|
197 |
-
}
|
198 |
-
return ret;
|
199 |
-
} else if (path.num_control_points[sample_id] == 1) {
|
200 |
-
// Quadratic Bezier curve
|
201 |
-
auto i0 = point_id;
|
202 |
-
auto i1 = i0 + 1;
|
203 |
-
auto i2 = (i0 + 2) % path.num_points;
|
204 |
-
auto p0 = Vector2f{path.points[2 * i0], path.points[2 * i0 + 1]};
|
205 |
-
auto p1 = Vector2f{path.points[2 * i1], path.points[2 * i1 + 1]};
|
206 |
-
auto p2 = Vector2f{path.points[2 * i2], path.points[2 * i2 + 1]};
|
207 |
-
auto eval = [&](float t) -> Vector2f {
|
208 |
-
auto tt = 1 - t;
|
209 |
-
return (tt*tt)*p0 + (2*tt*t)*p1 + (t*t)*p2;
|
210 |
-
};
|
211 |
-
data.path.base_point_id = sample_id;
|
212 |
-
data.path.point_id = point_id;
|
213 |
-
data.path.t = t;
|
214 |
-
if (t < -1e-3f || t > 1+1e-3f) {
|
215 |
-
// return invalid sample
|
216 |
-
pdf = 0;
|
217 |
-
return Vector2f{0, 0};
|
218 |
-
}
|
219 |
-
auto tangent = 2 * (1 - t) * (p1 - p0) + 2 * t * (p2 - p1);
|
220 |
-
auto tan_len = length(tangent);
|
221 |
-
if (tan_len == 0) {
|
222 |
-
// return invalid sample
|
223 |
-
pdf = 0;
|
224 |
-
return Vector2f{0, 0};
|
225 |
-
}
|
226 |
-
normal = Vector2f{-tangent.y, tangent.x} / tan_len;
|
227 |
-
// length of tangent is the Jacobian of the sampling transformation
|
228 |
-
pdf *= path_length_pmf[sample_id] / tan_len;
|
229 |
-
auto ret = eval(t);
|
230 |
-
if (stroke_perturb_direction != 0.f) {
|
231 |
-
auto r0 = stroke_radius;
|
232 |
-
auto r1 = stroke_radius;
|
233 |
-
auto r2 = stroke_radius;
|
234 |
-
if (path.thickness != nullptr) {
|
235 |
-
r0 = path.thickness[i0];
|
236 |
-
r1 = path.thickness[i1];
|
237 |
-
r2 = path.thickness[i2];
|
238 |
-
}
|
239 |
-
auto tt = 1 - t;
|
240 |
-
auto r = (tt*tt)*r0 + (2*tt*t)*r1 + (t*t)*r2;
|
241 |
-
ret += stroke_perturb_direction * r * normal;
|
242 |
-
if (stroke_perturb_direction < 0) {
|
243 |
-
// normal should point towards the perturb direction
|
244 |
-
normal = -normal;
|
245 |
-
}
|
246 |
-
}
|
247 |
-
return ret;
|
248 |
-
} else if (path.num_control_points[sample_id] == 2) {
|
249 |
-
// Cubic Bezier curve
|
250 |
-
auto i0 = point_id;
|
251 |
-
auto i1 = point_id + 1;
|
252 |
-
auto i2 = point_id + 2;
|
253 |
-
auto i3 = (point_id + 3) % path.num_points;
|
254 |
-
assert(i0 >= 0 && i2 < path.num_points);
|
255 |
-
auto p0 = Vector2f{path.points[2 * i0], path.points[2 * i0 + 1]};
|
256 |
-
auto p1 = Vector2f{path.points[2 * i1], path.points[2 * i1 + 1]};
|
257 |
-
auto p2 = Vector2f{path.points[2 * i2], path.points[2 * i2 + 1]};
|
258 |
-
auto p3 = Vector2f{path.points[2 * i3], path.points[2 * i3 + 1]};
|
259 |
-
auto eval = [&](float t) -> Vector2f {
|
260 |
-
auto tt = 1 - t;
|
261 |
-
return (tt*tt*tt)*p0 + (3*tt*tt*t)*p1 + (3*tt*t*t)*p2 + (t*t*t)*p3;
|
262 |
-
};
|
263 |
-
data.path.base_point_id = sample_id;
|
264 |
-
data.path.point_id = point_id;
|
265 |
-
data.path.t = t;
|
266 |
-
if (t < -1e-3f || t > 1+1e-3f) {
|
267 |
-
// return invalid sample
|
268 |
-
pdf = 0;
|
269 |
-
return Vector2f{0, 0};
|
270 |
-
}
|
271 |
-
auto tangent = 3 * square(1 - t) * (p1 - p0) + 6 * (1 - t) * t * (p2 - p1) + 3 * t * t * (p3 - p2);
|
272 |
-
auto tan_len = length(tangent);
|
273 |
-
if (tan_len == 0) {
|
274 |
-
// return invalid sample
|
275 |
-
pdf = 0;
|
276 |
-
return Vector2f{0, 0};
|
277 |
-
}
|
278 |
-
normal = Vector2f{-tangent.y, tangent.x} / tan_len;
|
279 |
-
// length of tangent is the Jacobian of the sampling transformation
|
280 |
-
pdf *= path_length_pmf[sample_id] / tan_len;
|
281 |
-
auto ret = eval(t);
|
282 |
-
if (stroke_perturb_direction != 0.f) {
|
283 |
-
auto r0 = stroke_radius;
|
284 |
-
auto r1 = stroke_radius;
|
285 |
-
auto r2 = stroke_radius;
|
286 |
-
auto r3 = stroke_radius;
|
287 |
-
if (path.thickness != nullptr) {
|
288 |
-
r0 = path.thickness[i0];
|
289 |
-
r1 = path.thickness[i1];
|
290 |
-
r2 = path.thickness[i2];
|
291 |
-
r3 = path.thickness[i3];
|
292 |
-
}
|
293 |
-
auto tt = 1 - t;
|
294 |
-
auto r = (tt*tt*tt)*r0 + (3*tt*tt*t)*r1 + (3*tt*t*t)*r2 + (t*t*t)*r3;
|
295 |
-
ret += stroke_perturb_direction * r * normal;
|
296 |
-
if (stroke_perturb_direction < 0) {
|
297 |
-
// normal should point towards the perturb direction
|
298 |
-
normal = -normal;
|
299 |
-
}
|
300 |
-
}
|
301 |
-
return ret;
|
302 |
-
} else {
|
303 |
-
assert(false);
|
304 |
-
}
|
305 |
-
assert(false);
|
306 |
-
return Vector2f{0, 0};
|
307 |
-
}
|
308 |
-
|
309 |
-
DEVICE
|
310 |
-
Vector2f sample_boundary(const Rect &rect,
|
311 |
-
float t, Vector2f &normal,
|
312 |
-
float &pdf,
|
313 |
-
BoundaryData &,
|
314 |
-
float stroke_perturb_direction,
|
315 |
-
float stroke_radius) {
|
316 |
-
// Roll a dice to decide whether to sample width or height
|
317 |
-
auto w = rect.p_max.x - rect.p_min.x;
|
318 |
-
auto h = rect.p_max.y - rect.p_min.y;
|
319 |
-
pdf /= (2 * (w +h));
|
320 |
-
if (t <= w / (w + h)) {
|
321 |
-
// Sample width
|
322 |
-
// reuse t for the next dice
|
323 |
-
t *= (w + h) / w;
|
324 |
-
// Roll a dice to decide whether to sample upper width or lower width
|
325 |
-
if (t < 0.5f) {
|
326 |
-
// Sample upper width
|
327 |
-
normal = Vector2f{0, -1};
|
328 |
-
auto ret = rect.p_min + 2 * t * Vector2f{rect.p_max.x - rect.p_min.x, 0.f};
|
329 |
-
if (stroke_perturb_direction != 0.f) {
|
330 |
-
ret += stroke_perturb_direction * stroke_radius * normal;
|
331 |
-
if (stroke_perturb_direction < 0) {
|
332 |
-
// normal should point towards the perturb direction
|
333 |
-
normal = -normal;
|
334 |
-
}
|
335 |
-
}
|
336 |
-
return ret;
|
337 |
-
} else {
|
338 |
-
// Sample lower width
|
339 |
-
normal = Vector2f{0, 1};
|
340 |
-
auto ret = Vector2f{rect.p_min.x, rect.p_max.y} +
|
341 |
-
2 * (t - 0.5f) * Vector2f{rect.p_max.x - rect.p_min.x, 0.f};
|
342 |
-
if (stroke_perturb_direction != 0.f) {
|
343 |
-
ret += stroke_perturb_direction * stroke_radius * normal;
|
344 |
-
if (stroke_perturb_direction < 0) {
|
345 |
-
// normal should point towards the perturb direction
|
346 |
-
normal = -normal;
|
347 |
-
}
|
348 |
-
}
|
349 |
-
return ret;
|
350 |
-
}
|
351 |
-
} else {
|
352 |
-
// Sample height
|
353 |
-
// reuse t for the next dice
|
354 |
-
assert(h > 0);
|
355 |
-
t = (t - w / (w + h)) * (w + h) / h;
|
356 |
-
// Roll a dice to decide whether to sample left height or right height
|
357 |
-
if (t < 0.5f) {
|
358 |
-
// Sample left height
|
359 |
-
normal = Vector2f{-1, 0};
|
360 |
-
auto ret = rect.p_min + 2 * t * Vector2f{0.f, rect.p_max.y - rect.p_min.y};
|
361 |
-
if (stroke_perturb_direction != 0.f) {
|
362 |
-
ret += stroke_perturb_direction * stroke_radius * normal;
|
363 |
-
if (stroke_perturb_direction < 0) {
|
364 |
-
// normal should point towards the perturb direction
|
365 |
-
normal = -normal;
|
366 |
-
}
|
367 |
-
}
|
368 |
-
return ret;
|
369 |
-
} else {
|
370 |
-
// Sample right height
|
371 |
-
normal = Vector2f{1, 0};
|
372 |
-
auto ret = Vector2f{rect.p_max.x, rect.p_min.y} +
|
373 |
-
2 * (t - 0.5f) * Vector2f{0.f, rect.p_max.y - rect.p_min.y};
|
374 |
-
if (stroke_perturb_direction != 0.f) {
|
375 |
-
ret += stroke_perturb_direction * stroke_radius * normal;
|
376 |
-
if (stroke_perturb_direction < 0) {
|
377 |
-
// normal should point towards the perturb direction
|
378 |
-
normal = -normal;
|
379 |
-
}
|
380 |
-
}
|
381 |
-
return ret;
|
382 |
-
}
|
383 |
-
}
|
384 |
-
}
|
385 |
-
|
386 |
-
DEVICE
|
387 |
-
Vector2f sample_boundary(const SceneData &scene,
|
388 |
-
int shape_group_id,
|
389 |
-
int shape_id,
|
390 |
-
float t,
|
391 |
-
Vector2f &normal,
|
392 |
-
float &pdf,
|
393 |
-
BoundaryData &data) {
|
394 |
-
const ShapeGroup &shape_group = scene.shape_groups[shape_group_id];
|
395 |
-
const Shape &shape = scene.shapes[shape_id];
|
396 |
-
pdf = 1;
|
397 |
-
// Choose which one to sample: stroke discontinuities or fill discontinuities.
|
398 |
-
// TODO: we don't need to sample fill discontinuities when stroke alpha is 1 and both
|
399 |
-
// fill and stroke color exists
|
400 |
-
auto stroke_perturb = false;
|
401 |
-
if (shape_group.fill_color != nullptr && shape_group.stroke_color != nullptr) {
|
402 |
-
if (t < 0.5f) {
|
403 |
-
stroke_perturb = false;
|
404 |
-
t = 2 * t;
|
405 |
-
pdf = 0.5f;
|
406 |
-
} else {
|
407 |
-
stroke_perturb = true;
|
408 |
-
t = 2 * (t - 0.5f);
|
409 |
-
pdf = 0.5f;
|
410 |
-
}
|
411 |
-
} else if (shape_group.stroke_color != nullptr) {
|
412 |
-
stroke_perturb = true;
|
413 |
-
}
|
414 |
-
data.is_stroke = stroke_perturb;
|
415 |
-
auto stroke_perturb_direction = 0.f;
|
416 |
-
if (stroke_perturb) {
|
417 |
-
if (t < 0.5f) {
|
418 |
-
stroke_perturb_direction = -1.f;
|
419 |
-
t = 2 * t;
|
420 |
-
pdf *= 0.5f;
|
421 |
-
} else {
|
422 |
-
stroke_perturb_direction = 1.f;
|
423 |
-
t = 2 * (t - 0.5f);
|
424 |
-
pdf *= 0.5f;
|
425 |
-
}
|
426 |
-
}
|
427 |
-
switch (shape.type) {
|
428 |
-
case ShapeType::Circle:
|
429 |
-
return sample_boundary(
|
430 |
-
*(const Circle *)shape.ptr, t, normal, pdf, data, stroke_perturb_direction, shape.stroke_width);
|
431 |
-
case ShapeType::Ellipse:
|
432 |
-
return sample_boundary(
|
433 |
-
*(const Ellipse *)shape.ptr, t, normal, pdf, data, stroke_perturb_direction, shape.stroke_width);
|
434 |
-
case ShapeType::Path:
|
435 |
-
return sample_boundary(
|
436 |
-
*(const Path *)shape.ptr,
|
437 |
-
scene.path_length_cdf[shape_id],
|
438 |
-
scene.path_length_pmf[shape_id],
|
439 |
-
scene.path_point_id_map[shape_id],
|
440 |
-
scene.shapes_length[shape_id],
|
441 |
-
t,
|
442 |
-
normal,
|
443 |
-
pdf,
|
444 |
-
data,
|
445 |
-
stroke_perturb_direction,
|
446 |
-
shape.stroke_width);
|
447 |
-
case ShapeType::Rect:
|
448 |
-
return sample_boundary(
|
449 |
-
*(const Rect *)shape.ptr, t, normal, pdf, data, stroke_perturb_direction, shape.stroke_width);
|
450 |
-
}
|
451 |
-
assert(false);
|
452 |
-
return Vector2f{};
|
453 |
-
}
|
454 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/mmdet/__init__.py
DELETED
@@ -1,28 +0,0 @@
|
|
1 |
-
import mmcv
|
2 |
-
|
3 |
-
from .version import __version__, short_version
|
4 |
-
|
5 |
-
|
6 |
-
def digit_version(version_str):
|
7 |
-
digit_version = []
|
8 |
-
for x in version_str.split('.'):
|
9 |
-
if x.isdigit():
|
10 |
-
digit_version.append(int(x))
|
11 |
-
elif x.find('rc') != -1:
|
12 |
-
patch_version = x.split('rc')
|
13 |
-
digit_version.append(int(patch_version[0]) - 1)
|
14 |
-
digit_version.append(int(patch_version[1]))
|
15 |
-
return digit_version
|
16 |
-
|
17 |
-
|
18 |
-
mmcv_minimum_version = '1.2.4'
|
19 |
-
mmcv_maximum_version = '1.4.0'
|
20 |
-
mmcv_version = digit_version(mmcv.__version__)
|
21 |
-
|
22 |
-
|
23 |
-
assert (mmcv_version >= digit_version(mmcv_minimum_version)
|
24 |
-
and mmcv_version <= digit_version(mmcv_maximum_version)), \
|
25 |
-
f'MMCV=={mmcv.__version__} is used but incompatible. ' \
|
26 |
-
f'Please install mmcv>={mmcv_minimum_version}, <={mmcv_maximum_version}.'
|
27 |
-
|
28 |
-
__all__ = ['__version__', 'short_version']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/lama-example/saicinpainting/training/modules/ffc.py
DELETED
@@ -1,485 +0,0 @@
|
|
1 |
-
# Fast Fourier Convolution NeurIPS 2020
|
2 |
-
# original implementation https://github.com/pkumivision/FFC/blob/main/model_zoo/ffc.py
|
3 |
-
# paper https://proceedings.neurips.cc/paper/2020/file/2fd5d41ec6cfab47e32164d5624269b1-Paper.pdf
|
4 |
-
|
5 |
-
import numpy as np
|
6 |
-
import torch
|
7 |
-
import torch.nn as nn
|
8 |
-
import torch.nn.functional as F
|
9 |
-
|
10 |
-
from saicinpainting.training.modules.base import get_activation, BaseDiscriminator
|
11 |
-
from saicinpainting.training.modules.spatial_transform import LearnableSpatialTransformWrapper
|
12 |
-
from saicinpainting.training.modules.squeeze_excitation import SELayer
|
13 |
-
from saicinpainting.utils import get_shape
|
14 |
-
|
15 |
-
|
16 |
-
class FFCSE_block(nn.Module):
|
17 |
-
|
18 |
-
def __init__(self, channels, ratio_g):
|
19 |
-
super(FFCSE_block, self).__init__()
|
20 |
-
in_cg = int(channels * ratio_g)
|
21 |
-
in_cl = channels - in_cg
|
22 |
-
r = 16
|
23 |
-
|
24 |
-
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
|
25 |
-
self.conv1 = nn.Conv2d(channels, channels // r,
|
26 |
-
kernel_size=1, bias=True)
|
27 |
-
self.relu1 = nn.ReLU(inplace=True)
|
28 |
-
self.conv_a2l = None if in_cl == 0 else nn.Conv2d(
|
29 |
-
channels // r, in_cl, kernel_size=1, bias=True)
|
30 |
-
self.conv_a2g = None if in_cg == 0 else nn.Conv2d(
|
31 |
-
channels // r, in_cg, kernel_size=1, bias=True)
|
32 |
-
self.sigmoid = nn.Sigmoid()
|
33 |
-
|
34 |
-
def forward(self, x):
|
35 |
-
x = x if type(x) is tuple else (x, 0)
|
36 |
-
id_l, id_g = x
|
37 |
-
|
38 |
-
x = id_l if type(id_g) is int else torch.cat([id_l, id_g], dim=1)
|
39 |
-
x = self.avgpool(x)
|
40 |
-
x = self.relu1(self.conv1(x))
|
41 |
-
|
42 |
-
x_l = 0 if self.conv_a2l is None else id_l * \
|
43 |
-
self.sigmoid(self.conv_a2l(x))
|
44 |
-
x_g = 0 if self.conv_a2g is None else id_g * \
|
45 |
-
self.sigmoid(self.conv_a2g(x))
|
46 |
-
return x_l, x_g
|
47 |
-
|
48 |
-
|
49 |
-
class FourierUnit(nn.Module):
|
50 |
-
|
51 |
-
def __init__(self, in_channels, out_channels, groups=1, spatial_scale_factor=None, spatial_scale_mode='bilinear',
|
52 |
-
spectral_pos_encoding=False, use_se=False, se_kwargs=None, ffc3d=False, fft_norm='ortho'):
|
53 |
-
# bn_layer not used
|
54 |
-
super(FourierUnit, self).__init__()
|
55 |
-
self.groups = groups
|
56 |
-
|
57 |
-
self.conv_layer = torch.nn.Conv2d(in_channels=in_channels * 2 + (2 if spectral_pos_encoding else 0),
|
58 |
-
out_channels=out_channels * 2,
|
59 |
-
kernel_size=1, stride=1, padding=0, groups=self.groups, bias=False)
|
60 |
-
self.bn = torch.nn.BatchNorm2d(out_channels * 2)
|
61 |
-
self.relu = torch.nn.ReLU(inplace=True)
|
62 |
-
|
63 |
-
# squeeze and excitation block
|
64 |
-
self.use_se = use_se
|
65 |
-
if use_se:
|
66 |
-
if se_kwargs is None:
|
67 |
-
se_kwargs = {}
|
68 |
-
self.se = SELayer(self.conv_layer.in_channels, **se_kwargs)
|
69 |
-
|
70 |
-
self.spatial_scale_factor = spatial_scale_factor
|
71 |
-
self.spatial_scale_mode = spatial_scale_mode
|
72 |
-
self.spectral_pos_encoding = spectral_pos_encoding
|
73 |
-
self.ffc3d = ffc3d
|
74 |
-
self.fft_norm = fft_norm
|
75 |
-
|
76 |
-
def forward(self, x):
|
77 |
-
batch = x.shape[0]
|
78 |
-
|
79 |
-
if self.spatial_scale_factor is not None:
|
80 |
-
orig_size = x.shape[-2:]
|
81 |
-
x = F.interpolate(x, scale_factor=self.spatial_scale_factor, mode=self.spatial_scale_mode, align_corners=False)
|
82 |
-
|
83 |
-
r_size = x.size()
|
84 |
-
# (batch, c, h, w/2+1, 2)
|
85 |
-
fft_dim = (-3, -2, -1) if self.ffc3d else (-2, -1)
|
86 |
-
ffted = torch.fft.rfftn(x, dim=fft_dim, norm=self.fft_norm)
|
87 |
-
ffted = torch.stack((ffted.real, ffted.imag), dim=-1)
|
88 |
-
ffted = ffted.permute(0, 1, 4, 2, 3).contiguous() # (batch, c, 2, h, w/2+1)
|
89 |
-
ffted = ffted.view((batch, -1,) + ffted.size()[3:])
|
90 |
-
|
91 |
-
if self.spectral_pos_encoding:
|
92 |
-
height, width = ffted.shape[-2:]
|
93 |
-
coords_vert = torch.linspace(0, 1, height)[None, None, :, None].expand(batch, 1, height, width).to(ffted)
|
94 |
-
coords_hor = torch.linspace(0, 1, width)[None, None, None, :].expand(batch, 1, height, width).to(ffted)
|
95 |
-
ffted = torch.cat((coords_vert, coords_hor, ffted), dim=1)
|
96 |
-
|
97 |
-
if self.use_se:
|
98 |
-
ffted = self.se(ffted)
|
99 |
-
|
100 |
-
ffted = self.conv_layer(ffted) # (batch, c*2, h, w/2+1)
|
101 |
-
ffted = self.relu(self.bn(ffted))
|
102 |
-
|
103 |
-
ffted = ffted.view((batch, -1, 2,) + ffted.size()[2:]).permute(
|
104 |
-
0, 1, 3, 4, 2).contiguous() # (batch,c, t, h, w/2+1, 2)
|
105 |
-
ffted = torch.complex(ffted[..., 0], ffted[..., 1])
|
106 |
-
|
107 |
-
ifft_shape_slice = x.shape[-3:] if self.ffc3d else x.shape[-2:]
|
108 |
-
output = torch.fft.irfftn(ffted, s=ifft_shape_slice, dim=fft_dim, norm=self.fft_norm)
|
109 |
-
|
110 |
-
if self.spatial_scale_factor is not None:
|
111 |
-
output = F.interpolate(output, size=orig_size, mode=self.spatial_scale_mode, align_corners=False)
|
112 |
-
|
113 |
-
return output
|
114 |
-
|
115 |
-
|
116 |
-
class SeparableFourierUnit(nn.Module):
|
117 |
-
|
118 |
-
def __init__(self, in_channels, out_channels, groups=1, kernel_size=3):
|
119 |
-
# bn_layer not used
|
120 |
-
super(SeparableFourierUnit, self).__init__()
|
121 |
-
self.groups = groups
|
122 |
-
row_out_channels = out_channels // 2
|
123 |
-
col_out_channels = out_channels - row_out_channels
|
124 |
-
self.row_conv = torch.nn.Conv2d(in_channels=in_channels * 2,
|
125 |
-
out_channels=row_out_channels * 2,
|
126 |
-
kernel_size=(kernel_size, 1), # kernel size is always like this, but the data will be transposed
|
127 |
-
stride=1, padding=(kernel_size // 2, 0),
|
128 |
-
padding_mode='reflect',
|
129 |
-
groups=self.groups, bias=False)
|
130 |
-
self.col_conv = torch.nn.Conv2d(in_channels=in_channels * 2,
|
131 |
-
out_channels=col_out_channels * 2,
|
132 |
-
kernel_size=(kernel_size, 1), # kernel size is always like this, but the data will be transposed
|
133 |
-
stride=1, padding=(kernel_size // 2, 0),
|
134 |
-
padding_mode='reflect',
|
135 |
-
groups=self.groups, bias=False)
|
136 |
-
self.row_bn = torch.nn.BatchNorm2d(row_out_channels * 2)
|
137 |
-
self.col_bn = torch.nn.BatchNorm2d(col_out_channels * 2)
|
138 |
-
self.relu = torch.nn.ReLU(inplace=True)
|
139 |
-
|
140 |
-
def process_branch(self, x, conv, bn):
|
141 |
-
batch = x.shape[0]
|
142 |
-
|
143 |
-
r_size = x.size()
|
144 |
-
# (batch, c, h, w/2+1, 2)
|
145 |
-
ffted = torch.fft.rfft(x, norm="ortho")
|
146 |
-
ffted = torch.stack((ffted.real, ffted.imag), dim=-1)
|
147 |
-
ffted = ffted.permute(0, 1, 4, 2, 3).contiguous() # (batch, c, 2, h, w/2+1)
|
148 |
-
ffted = ffted.view((batch, -1,) + ffted.size()[3:])
|
149 |
-
|
150 |
-
ffted = self.relu(bn(conv(ffted)))
|
151 |
-
|
152 |
-
ffted = ffted.view((batch, -1, 2,) + ffted.size()[2:]).permute(
|
153 |
-
0, 1, 3, 4, 2).contiguous() # (batch,c, t, h, w/2+1, 2)
|
154 |
-
ffted = torch.complex(ffted[..., 0], ffted[..., 1])
|
155 |
-
|
156 |
-
output = torch.fft.irfft(ffted, s=x.shape[-1:], norm="ortho")
|
157 |
-
return output
|
158 |
-
|
159 |
-
|
160 |
-
def forward(self, x):
|
161 |
-
rowwise = self.process_branch(x, self.row_conv, self.row_bn)
|
162 |
-
colwise = self.process_branch(x.permute(0, 1, 3, 2), self.col_conv, self.col_bn).permute(0, 1, 3, 2)
|
163 |
-
out = torch.cat((rowwise, colwise), dim=1)
|
164 |
-
return out
|
165 |
-
|
166 |
-
|
167 |
-
class SpectralTransform(nn.Module):
|
168 |
-
|
169 |
-
def __init__(self, in_channels, out_channels, stride=1, groups=1, enable_lfu=True, separable_fu=False, **fu_kwargs):
|
170 |
-
# bn_layer not used
|
171 |
-
super(SpectralTransform, self).__init__()
|
172 |
-
self.enable_lfu = enable_lfu
|
173 |
-
if stride == 2:
|
174 |
-
self.downsample = nn.AvgPool2d(kernel_size=(2, 2), stride=2)
|
175 |
-
else:
|
176 |
-
self.downsample = nn.Identity()
|
177 |
-
|
178 |
-
self.stride = stride
|
179 |
-
self.conv1 = nn.Sequential(
|
180 |
-
nn.Conv2d(in_channels, out_channels //
|
181 |
-
2, kernel_size=1, groups=groups, bias=False),
|
182 |
-
nn.BatchNorm2d(out_channels // 2),
|
183 |
-
nn.ReLU(inplace=True)
|
184 |
-
)
|
185 |
-
fu_class = SeparableFourierUnit if separable_fu else FourierUnit
|
186 |
-
self.fu = fu_class(
|
187 |
-
out_channels // 2, out_channels // 2, groups, **fu_kwargs)
|
188 |
-
if self.enable_lfu:
|
189 |
-
self.lfu = fu_class(
|
190 |
-
out_channels // 2, out_channels // 2, groups)
|
191 |
-
self.conv2 = torch.nn.Conv2d(
|
192 |
-
out_channels // 2, out_channels, kernel_size=1, groups=groups, bias=False)
|
193 |
-
|
194 |
-
def forward(self, x):
|
195 |
-
|
196 |
-
x = self.downsample(x)
|
197 |
-
x = self.conv1(x)
|
198 |
-
output = self.fu(x)
|
199 |
-
|
200 |
-
if self.enable_lfu:
|
201 |
-
n, c, h, w = x.shape
|
202 |
-
split_no = 2
|
203 |
-
split_s = h // split_no
|
204 |
-
xs = torch.cat(torch.split(
|
205 |
-
x[:, :c // 4], split_s, dim=-2), dim=1).contiguous()
|
206 |
-
xs = torch.cat(torch.split(xs, split_s, dim=-1),
|
207 |
-
dim=1).contiguous()
|
208 |
-
xs = self.lfu(xs)
|
209 |
-
xs = xs.repeat(1, 1, split_no, split_no).contiguous()
|
210 |
-
else:
|
211 |
-
xs = 0
|
212 |
-
|
213 |
-
output = self.conv2(x + output + xs)
|
214 |
-
|
215 |
-
return output
|
216 |
-
|
217 |
-
|
218 |
-
class FFC(nn.Module):
|
219 |
-
|
220 |
-
def __init__(self, in_channels, out_channels, kernel_size,
|
221 |
-
ratio_gin, ratio_gout, stride=1, padding=0,
|
222 |
-
dilation=1, groups=1, bias=False, enable_lfu=True,
|
223 |
-
padding_type='reflect', gated=False, **spectral_kwargs):
|
224 |
-
super(FFC, self).__init__()
|
225 |
-
|
226 |
-
assert stride == 1 or stride == 2, "Stride should be 1 or 2."
|
227 |
-
self.stride = stride
|
228 |
-
|
229 |
-
in_cg = int(in_channels * ratio_gin)
|
230 |
-
in_cl = in_channels - in_cg
|
231 |
-
out_cg = int(out_channels * ratio_gout)
|
232 |
-
out_cl = out_channels - out_cg
|
233 |
-
#groups_g = 1 if groups == 1 else int(groups * ratio_gout)
|
234 |
-
#groups_l = 1 if groups == 1 else groups - groups_g
|
235 |
-
|
236 |
-
self.ratio_gin = ratio_gin
|
237 |
-
self.ratio_gout = ratio_gout
|
238 |
-
self.global_in_num = in_cg
|
239 |
-
|
240 |
-
module = nn.Identity if in_cl == 0 or out_cl == 0 else nn.Conv2d
|
241 |
-
self.convl2l = module(in_cl, out_cl, kernel_size,
|
242 |
-
stride, padding, dilation, groups, bias, padding_mode=padding_type)
|
243 |
-
module = nn.Identity if in_cl == 0 or out_cg == 0 else nn.Conv2d
|
244 |
-
self.convl2g = module(in_cl, out_cg, kernel_size,
|
245 |
-
stride, padding, dilation, groups, bias, padding_mode=padding_type)
|
246 |
-
module = nn.Identity if in_cg == 0 or out_cl == 0 else nn.Conv2d
|
247 |
-
self.convg2l = module(in_cg, out_cl, kernel_size,
|
248 |
-
stride, padding, dilation, groups, bias, padding_mode=padding_type)
|
249 |
-
module = nn.Identity if in_cg == 0 or out_cg == 0 else SpectralTransform
|
250 |
-
self.convg2g = module(
|
251 |
-
in_cg, out_cg, stride, 1 if groups == 1 else groups // 2, enable_lfu, **spectral_kwargs)
|
252 |
-
|
253 |
-
self.gated = gated
|
254 |
-
module = nn.Identity if in_cg == 0 or out_cl == 0 or not self.gated else nn.Conv2d
|
255 |
-
self.gate = module(in_channels, 2, 1)
|
256 |
-
|
257 |
-
def forward(self, x):
|
258 |
-
x_l, x_g = x if type(x) is tuple else (x, 0)
|
259 |
-
out_xl, out_xg = 0, 0
|
260 |
-
|
261 |
-
if self.gated:
|
262 |
-
total_input_parts = [x_l]
|
263 |
-
if torch.is_tensor(x_g):
|
264 |
-
total_input_parts.append(x_g)
|
265 |
-
total_input = torch.cat(total_input_parts, dim=1)
|
266 |
-
|
267 |
-
gates = torch.sigmoid(self.gate(total_input))
|
268 |
-
g2l_gate, l2g_gate = gates.chunk(2, dim=1)
|
269 |
-
else:
|
270 |
-
g2l_gate, l2g_gate = 1, 1
|
271 |
-
|
272 |
-
if self.ratio_gout != 1:
|
273 |
-
out_xl = self.convl2l(x_l) + self.convg2l(x_g) * g2l_gate
|
274 |
-
if self.ratio_gout != 0:
|
275 |
-
out_xg = self.convl2g(x_l) * l2g_gate + self.convg2g(x_g)
|
276 |
-
|
277 |
-
return out_xl, out_xg
|
278 |
-
|
279 |
-
|
280 |
-
class FFC_BN_ACT(nn.Module):
|
281 |
-
|
282 |
-
def __init__(self, in_channels, out_channels,
|
283 |
-
kernel_size, ratio_gin, ratio_gout,
|
284 |
-
stride=1, padding=0, dilation=1, groups=1, bias=False,
|
285 |
-
norm_layer=nn.BatchNorm2d, activation_layer=nn.Identity,
|
286 |
-
padding_type='reflect',
|
287 |
-
enable_lfu=True, **kwargs):
|
288 |
-
super(FFC_BN_ACT, self).__init__()
|
289 |
-
self.ffc = FFC(in_channels, out_channels, kernel_size,
|
290 |
-
ratio_gin, ratio_gout, stride, padding, dilation,
|
291 |
-
groups, bias, enable_lfu, padding_type=padding_type, **kwargs)
|
292 |
-
lnorm = nn.Identity if ratio_gout == 1 else norm_layer
|
293 |
-
gnorm = nn.Identity if ratio_gout == 0 else norm_layer
|
294 |
-
global_channels = int(out_channels * ratio_gout)
|
295 |
-
self.bn_l = lnorm(out_channels - global_channels)
|
296 |
-
self.bn_g = gnorm(global_channels)
|
297 |
-
|
298 |
-
lact = nn.Identity if ratio_gout == 1 else activation_layer
|
299 |
-
gact = nn.Identity if ratio_gout == 0 else activation_layer
|
300 |
-
self.act_l = lact(inplace=True)
|
301 |
-
self.act_g = gact(inplace=True)
|
302 |
-
|
303 |
-
def forward(self, x):
|
304 |
-
x_l, x_g = self.ffc(x)
|
305 |
-
x_l = self.act_l(self.bn_l(x_l))
|
306 |
-
x_g = self.act_g(self.bn_g(x_g))
|
307 |
-
return x_l, x_g
|
308 |
-
|
309 |
-
|
310 |
-
class FFCResnetBlock(nn.Module):
|
311 |
-
def __init__(self, dim, padding_type, norm_layer, activation_layer=nn.ReLU, dilation=1,
|
312 |
-
spatial_transform_kwargs=None, inline=False, **conv_kwargs):
|
313 |
-
super().__init__()
|
314 |
-
self.conv1 = FFC_BN_ACT(dim, dim, kernel_size=3, padding=dilation, dilation=dilation,
|
315 |
-
norm_layer=norm_layer,
|
316 |
-
activation_layer=activation_layer,
|
317 |
-
padding_type=padding_type,
|
318 |
-
**conv_kwargs)
|
319 |
-
self.conv2 = FFC_BN_ACT(dim, dim, kernel_size=3, padding=dilation, dilation=dilation,
|
320 |
-
norm_layer=norm_layer,
|
321 |
-
activation_layer=activation_layer,
|
322 |
-
padding_type=padding_type,
|
323 |
-
**conv_kwargs)
|
324 |
-
if spatial_transform_kwargs is not None:
|
325 |
-
self.conv1 = LearnableSpatialTransformWrapper(self.conv1, **spatial_transform_kwargs)
|
326 |
-
self.conv2 = LearnableSpatialTransformWrapper(self.conv2, **spatial_transform_kwargs)
|
327 |
-
self.inline = inline
|
328 |
-
|
329 |
-
def forward(self, x):
|
330 |
-
if self.inline:
|
331 |
-
x_l, x_g = x[:, :-self.conv1.ffc.global_in_num], x[:, -self.conv1.ffc.global_in_num:]
|
332 |
-
else:
|
333 |
-
x_l, x_g = x if type(x) is tuple else (x, 0)
|
334 |
-
|
335 |
-
id_l, id_g = x_l, x_g
|
336 |
-
|
337 |
-
x_l, x_g = self.conv1((x_l, x_g))
|
338 |
-
x_l, x_g = self.conv2((x_l, x_g))
|
339 |
-
|
340 |
-
x_l, x_g = id_l + x_l, id_g + x_g
|
341 |
-
out = x_l, x_g
|
342 |
-
if self.inline:
|
343 |
-
out = torch.cat(out, dim=1)
|
344 |
-
return out
|
345 |
-
|
346 |
-
|
347 |
-
class ConcatTupleLayer(nn.Module):
|
348 |
-
def forward(self, x):
|
349 |
-
assert isinstance(x, tuple)
|
350 |
-
x_l, x_g = x
|
351 |
-
assert torch.is_tensor(x_l) or torch.is_tensor(x_g)
|
352 |
-
if not torch.is_tensor(x_g):
|
353 |
-
return x_l
|
354 |
-
return torch.cat(x, dim=1)
|
355 |
-
|
356 |
-
|
357 |
-
class FFCResNetGenerator(nn.Module):
|
358 |
-
def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=nn.BatchNorm2d,
|
359 |
-
padding_type='reflect', activation_layer=nn.ReLU,
|
360 |
-
up_norm_layer=nn.BatchNorm2d, up_activation=nn.ReLU(True),
|
361 |
-
init_conv_kwargs={}, downsample_conv_kwargs={}, resnet_conv_kwargs={},
|
362 |
-
spatial_transform_layers=None, spatial_transform_kwargs={},
|
363 |
-
add_out_act=True, max_features=1024, out_ffc=False, out_ffc_kwargs={}):
|
364 |
-
assert (n_blocks >= 0)
|
365 |
-
super().__init__()
|
366 |
-
|
367 |
-
model = [nn.ReflectionPad2d(3),
|
368 |
-
FFC_BN_ACT(input_nc, ngf, kernel_size=7, padding=0, norm_layer=norm_layer,
|
369 |
-
activation_layer=activation_layer, **init_conv_kwargs)]
|
370 |
-
|
371 |
-
### downsample
|
372 |
-
for i in range(n_downsampling):
|
373 |
-
mult = 2 ** i
|
374 |
-
if i == n_downsampling - 1:
|
375 |
-
cur_conv_kwargs = dict(downsample_conv_kwargs)
|
376 |
-
cur_conv_kwargs['ratio_gout'] = resnet_conv_kwargs.get('ratio_gin', 0)
|
377 |
-
else:
|
378 |
-
cur_conv_kwargs = downsample_conv_kwargs
|
379 |
-
model += [FFC_BN_ACT(min(max_features, ngf * mult),
|
380 |
-
min(max_features, ngf * mult * 2),
|
381 |
-
kernel_size=3, stride=2, padding=1,
|
382 |
-
norm_layer=norm_layer,
|
383 |
-
activation_layer=activation_layer,
|
384 |
-
**cur_conv_kwargs)]
|
385 |
-
|
386 |
-
mult = 2 ** n_downsampling
|
387 |
-
feats_num_bottleneck = min(max_features, ngf * mult)
|
388 |
-
|
389 |
-
### resnet blocks
|
390 |
-
for i in range(n_blocks):
|
391 |
-
cur_resblock = FFCResnetBlock(feats_num_bottleneck, padding_type=padding_type, activation_layer=activation_layer,
|
392 |
-
norm_layer=norm_layer, **resnet_conv_kwargs)
|
393 |
-
if spatial_transform_layers is not None and i in spatial_transform_layers:
|
394 |
-
cur_resblock = LearnableSpatialTransformWrapper(cur_resblock, **spatial_transform_kwargs)
|
395 |
-
model += [cur_resblock]
|
396 |
-
|
397 |
-
model += [ConcatTupleLayer()]
|
398 |
-
|
399 |
-
### upsample
|
400 |
-
for i in range(n_downsampling):
|
401 |
-
mult = 2 ** (n_downsampling - i)
|
402 |
-
model += [nn.ConvTranspose2d(min(max_features, ngf * mult),
|
403 |
-
min(max_features, int(ngf * mult / 2)),
|
404 |
-
kernel_size=3, stride=2, padding=1, output_padding=1),
|
405 |
-
up_norm_layer(min(max_features, int(ngf * mult / 2))),
|
406 |
-
up_activation]
|
407 |
-
|
408 |
-
if out_ffc:
|
409 |
-
model += [FFCResnetBlock(ngf, padding_type=padding_type, activation_layer=activation_layer,
|
410 |
-
norm_layer=norm_layer, inline=True, **out_ffc_kwargs)]
|
411 |
-
|
412 |
-
model += [nn.ReflectionPad2d(3),
|
413 |
-
nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
|
414 |
-
if add_out_act:
|
415 |
-
model.append(get_activation('tanh' if add_out_act is True else add_out_act))
|
416 |
-
self.model = nn.Sequential(*model)
|
417 |
-
|
418 |
-
def forward(self, input):
|
419 |
-
return self.model(input)
|
420 |
-
|
421 |
-
|
422 |
-
class FFCNLayerDiscriminator(BaseDiscriminator):
|
423 |
-
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, max_features=512,
|
424 |
-
init_conv_kwargs={}, conv_kwargs={}):
|
425 |
-
super().__init__()
|
426 |
-
self.n_layers = n_layers
|
427 |
-
|
428 |
-
def _act_ctor(inplace=True):
|
429 |
-
return nn.LeakyReLU(negative_slope=0.2, inplace=inplace)
|
430 |
-
|
431 |
-
kw = 3
|
432 |
-
padw = int(np.ceil((kw-1.0)/2))
|
433 |
-
sequence = [[FFC_BN_ACT(input_nc, ndf, kernel_size=kw, padding=padw, norm_layer=norm_layer,
|
434 |
-
activation_layer=_act_ctor, **init_conv_kwargs)]]
|
435 |
-
|
436 |
-
nf = ndf
|
437 |
-
for n in range(1, n_layers):
|
438 |
-
nf_prev = nf
|
439 |
-
nf = min(nf * 2, max_features)
|
440 |
-
|
441 |
-
cur_model = [
|
442 |
-
FFC_BN_ACT(nf_prev, nf,
|
443 |
-
kernel_size=kw, stride=2, padding=padw,
|
444 |
-
norm_layer=norm_layer,
|
445 |
-
activation_layer=_act_ctor,
|
446 |
-
**conv_kwargs)
|
447 |
-
]
|
448 |
-
sequence.append(cur_model)
|
449 |
-
|
450 |
-
nf_prev = nf
|
451 |
-
nf = min(nf * 2, 512)
|
452 |
-
|
453 |
-
cur_model = [
|
454 |
-
FFC_BN_ACT(nf_prev, nf,
|
455 |
-
kernel_size=kw, stride=1, padding=padw,
|
456 |
-
norm_layer=norm_layer,
|
457 |
-
activation_layer=lambda *args, **kwargs: nn.LeakyReLU(*args, negative_slope=0.2, **kwargs),
|
458 |
-
**conv_kwargs),
|
459 |
-
ConcatTupleLayer()
|
460 |
-
]
|
461 |
-
sequence.append(cur_model)
|
462 |
-
|
463 |
-
sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]]
|
464 |
-
|
465 |
-
for n in range(len(sequence)):
|
466 |
-
setattr(self, 'model'+str(n), nn.Sequential(*sequence[n]))
|
467 |
-
|
468 |
-
def get_all_activations(self, x):
|
469 |
-
res = [x]
|
470 |
-
for n in range(self.n_layers + 2):
|
471 |
-
model = getattr(self, 'model' + str(n))
|
472 |
-
res.append(model(res[-1]))
|
473 |
-
return res[1:]
|
474 |
-
|
475 |
-
def forward(self, x):
|
476 |
-
act = self.get_all_activations(x)
|
477 |
-
feats = []
|
478 |
-
for out in act[:-1]:
|
479 |
-
if isinstance(out, tuple):
|
480 |
-
if torch.is_tensor(out[1]):
|
481 |
-
out = torch.cat(out, dim=1)
|
482 |
-
else:
|
483 |
-
out = out[0]
|
484 |
-
feats.append(out)
|
485 |
-
return act[-1], feats
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/unicl-zero-shot-img-recog/model/image_encoder/swin_transformer.py
DELETED
@@ -1,636 +0,0 @@
|
|
1 |
-
# --------------------------------------------------------
|
2 |
-
# Swin Transformer
|
3 |
-
# Copyright (c) 2021 Microsoft
|
4 |
-
# Licensed under The MIT License [see LICENSE for details]
|
5 |
-
# Written by Ze Liu
|
6 |
-
# --------------------------------------------------------
|
7 |
-
import numpy as np
|
8 |
-
import torch
|
9 |
-
import torch.nn as nn
|
10 |
-
import torch.nn.functional as F
|
11 |
-
import torch.utils.checkpoint as checkpoint
|
12 |
-
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
|
13 |
-
|
14 |
-
|
15 |
-
class Mlp(nn.Module):
|
16 |
-
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
|
17 |
-
super().__init__()
|
18 |
-
out_features = out_features or in_features
|
19 |
-
hidden_features = hidden_features or in_features
|
20 |
-
self.fc1 = nn.Linear(in_features, hidden_features)
|
21 |
-
self.act = act_layer()
|
22 |
-
self.fc2 = nn.Linear(hidden_features, out_features)
|
23 |
-
self.drop = nn.Dropout(drop)
|
24 |
-
|
25 |
-
def forward(self, x):
|
26 |
-
x = self.fc1(x)
|
27 |
-
x = self.act(x)
|
28 |
-
x = self.drop(x)
|
29 |
-
x = self.fc2(x)
|
30 |
-
x = self.drop(x)
|
31 |
-
return x
|
32 |
-
|
33 |
-
|
34 |
-
def window_partition(x, window_size):
|
35 |
-
"""
|
36 |
-
Args:
|
37 |
-
x: (B, H, W, C)
|
38 |
-
window_size (int): window size
|
39 |
-
|
40 |
-
Returns:
|
41 |
-
windows: (num_windows*B, window_size, window_size, C)
|
42 |
-
"""
|
43 |
-
B, H, W, C = x.shape
|
44 |
-
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
|
45 |
-
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
|
46 |
-
return windows
|
47 |
-
|
48 |
-
|
49 |
-
def window_reverse(windows, window_size, H, W):
|
50 |
-
"""
|
51 |
-
Args:
|
52 |
-
windows: (num_windows*B, window_size, window_size, C)
|
53 |
-
window_size (int): Window size
|
54 |
-
H (int): Height of image
|
55 |
-
W (int): Width of image
|
56 |
-
|
57 |
-
Returns:
|
58 |
-
x: (B, H, W, C)
|
59 |
-
"""
|
60 |
-
B = int(windows.shape[0] / (H * W / window_size / window_size))
|
61 |
-
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
|
62 |
-
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
|
63 |
-
return x
|
64 |
-
|
65 |
-
|
66 |
-
class WindowAttention(nn.Module):
|
67 |
-
r""" Window based multi-head self attention (W-MSA) module with relative position bias.
|
68 |
-
It supports both of shifted and non-shifted window.
|
69 |
-
|
70 |
-
Args:
|
71 |
-
dim (int): Number of input channels.
|
72 |
-
window_size (tuple[int]): The height and width of the window.
|
73 |
-
num_heads (int): Number of attention heads.
|
74 |
-
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
|
75 |
-
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
|
76 |
-
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
|
77 |
-
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
|
78 |
-
"""
|
79 |
-
|
80 |
-
def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
|
81 |
-
|
82 |
-
super().__init__()
|
83 |
-
self.dim = dim
|
84 |
-
self.window_size = window_size # Wh, Ww
|
85 |
-
self.num_heads = num_heads
|
86 |
-
head_dim = dim // num_heads
|
87 |
-
self.scale = qk_scale or head_dim ** -0.5
|
88 |
-
|
89 |
-
# define a parameter table of relative position bias
|
90 |
-
self.relative_position_bias_table = nn.Parameter(
|
91 |
-
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
|
92 |
-
|
93 |
-
# get pair-wise relative position index for each token inside the window
|
94 |
-
coords_h = torch.arange(self.window_size[0])
|
95 |
-
coords_w = torch.arange(self.window_size[1])
|
96 |
-
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
|
97 |
-
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
|
98 |
-
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
|
99 |
-
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
|
100 |
-
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
|
101 |
-
relative_coords[:, :, 1] += self.window_size[1] - 1
|
102 |
-
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
|
103 |
-
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
|
104 |
-
self.register_buffer("relative_position_index", relative_position_index)
|
105 |
-
|
106 |
-
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
|
107 |
-
self.attn_drop = nn.Dropout(attn_drop)
|
108 |
-
self.proj = nn.Linear(dim, dim)
|
109 |
-
self.proj_drop = nn.Dropout(proj_drop)
|
110 |
-
|
111 |
-
trunc_normal_(self.relative_position_bias_table, std=.02)
|
112 |
-
self.softmax = nn.Softmax(dim=-1)
|
113 |
-
|
114 |
-
def forward(self, x, mask=None):
|
115 |
-
"""
|
116 |
-
Args:
|
117 |
-
x: input features with shape of (num_windows*B, N, C)
|
118 |
-
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
|
119 |
-
"""
|
120 |
-
B_, N, C = x.shape
|
121 |
-
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
|
122 |
-
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
|
123 |
-
|
124 |
-
q = q * self.scale
|
125 |
-
attn = (q @ k.transpose(-2, -1))
|
126 |
-
|
127 |
-
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
|
128 |
-
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
|
129 |
-
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
|
130 |
-
attn = attn + relative_position_bias.unsqueeze(0)
|
131 |
-
|
132 |
-
if mask is not None:
|
133 |
-
nW = mask.shape[0]
|
134 |
-
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
|
135 |
-
attn = attn.view(-1, self.num_heads, N, N)
|
136 |
-
attn = self.softmax(attn)
|
137 |
-
else:
|
138 |
-
attn = self.softmax(attn)
|
139 |
-
|
140 |
-
attn = self.attn_drop(attn)
|
141 |
-
|
142 |
-
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
|
143 |
-
x = self.proj(x)
|
144 |
-
x = self.proj_drop(x)
|
145 |
-
return x
|
146 |
-
|
147 |
-
def extra_repr(self) -> str:
|
148 |
-
return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'
|
149 |
-
|
150 |
-
def flops(self, N):
|
151 |
-
# calculate flops for 1 window with token length of N
|
152 |
-
flops = 0
|
153 |
-
# qkv = self.qkv(x)
|
154 |
-
flops += N * self.dim * 3 * self.dim
|
155 |
-
# attn = (q @ k.transpose(-2, -1))
|
156 |
-
flops += self.num_heads * N * (self.dim // self.num_heads) * N
|
157 |
-
# x = (attn @ v)
|
158 |
-
flops += self.num_heads * N * N * (self.dim // self.num_heads)
|
159 |
-
# x = self.proj(x)
|
160 |
-
flops += N * self.dim * self.dim
|
161 |
-
return flops
|
162 |
-
|
163 |
-
|
164 |
-
class SwinTransformerBlock(nn.Module):
|
165 |
-
r""" Swin Transformer Block.
|
166 |
-
|
167 |
-
Args:
|
168 |
-
dim (int): Number of input channels.
|
169 |
-
input_resolution (tuple[int]): Input resulotion.
|
170 |
-
num_heads (int): Number of attention heads.
|
171 |
-
window_size (int): Window size.
|
172 |
-
shift_size (int): Shift size for SW-MSA.
|
173 |
-
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
|
174 |
-
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
|
175 |
-
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
|
176 |
-
drop (float, optional): Dropout rate. Default: 0.0
|
177 |
-
attn_drop (float, optional): Attention dropout rate. Default: 0.0
|
178 |
-
drop_path (float, optional): Stochastic depth rate. Default: 0.0
|
179 |
-
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
|
180 |
-
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
|
181 |
-
"""
|
182 |
-
|
183 |
-
def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
|
184 |
-
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
|
185 |
-
act_layer=nn.GELU, norm_layer=nn.LayerNorm):
|
186 |
-
super().__init__()
|
187 |
-
self.dim = dim
|
188 |
-
self.input_resolution = input_resolution
|
189 |
-
self.num_heads = num_heads
|
190 |
-
self.window_size = window_size
|
191 |
-
self.shift_size = shift_size
|
192 |
-
self.mlp_ratio = mlp_ratio
|
193 |
-
if min(self.input_resolution) <= self.window_size:
|
194 |
-
# if window size is larger than input resolution, we don't partition windows
|
195 |
-
self.shift_size = 0
|
196 |
-
self.window_size = min(self.input_resolution)
|
197 |
-
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
|
198 |
-
|
199 |
-
self.norm1 = norm_layer(dim)
|
200 |
-
self.attn = WindowAttention(
|
201 |
-
dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
|
202 |
-
qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
|
203 |
-
|
204 |
-
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
205 |
-
self.norm2 = norm_layer(dim)
|
206 |
-
mlp_hidden_dim = int(dim * mlp_ratio)
|
207 |
-
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
|
208 |
-
|
209 |
-
if self.shift_size > 0:
|
210 |
-
# calculate attention mask for SW-MSA
|
211 |
-
H, W = self.input_resolution
|
212 |
-
img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
|
213 |
-
h_slices = (slice(0, -self.window_size),
|
214 |
-
slice(-self.window_size, -self.shift_size),
|
215 |
-
slice(-self.shift_size, None))
|
216 |
-
w_slices = (slice(0, -self.window_size),
|
217 |
-
slice(-self.window_size, -self.shift_size),
|
218 |
-
slice(-self.shift_size, None))
|
219 |
-
cnt = 0
|
220 |
-
for h in h_slices:
|
221 |
-
for w in w_slices:
|
222 |
-
img_mask[:, h, w, :] = cnt
|
223 |
-
cnt += 1
|
224 |
-
|
225 |
-
mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
|
226 |
-
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
|
227 |
-
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
|
228 |
-
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
|
229 |
-
else:
|
230 |
-
attn_mask = None
|
231 |
-
|
232 |
-
self.register_buffer("attn_mask", attn_mask)
|
233 |
-
|
234 |
-
def forward(self, x, Ph, Pw, attn_mask):
|
235 |
-
# H, W = self.input_resolution
|
236 |
-
B, L, C = x.shape
|
237 |
-
# assert L == H * W, "input feature has wrong size"
|
238 |
-
|
239 |
-
shortcut = x
|
240 |
-
x = self.norm1(x)
|
241 |
-
x = x.view(B, Ph, Pw, C)
|
242 |
-
|
243 |
-
# pad feature maps to multiples of window size
|
244 |
-
pad_l = pad_t = 0
|
245 |
-
pad_r = (self.window_size - Pw % self.window_size) % self.window_size
|
246 |
-
pad_b = (self.window_size - Ph % self.window_size) % self.window_size
|
247 |
-
x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))
|
248 |
-
_, Hp, Wp, _ = x.shape
|
249 |
-
|
250 |
-
# cyclic shift
|
251 |
-
if self.shift_size > 0:
|
252 |
-
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
|
253 |
-
attn_mask = attn_mask
|
254 |
-
else:
|
255 |
-
shifted_x = x
|
256 |
-
attn_mask = None
|
257 |
-
|
258 |
-
# partition windows
|
259 |
-
x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
|
260 |
-
x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
|
261 |
-
|
262 |
-
# W-MSA/SW-MSA
|
263 |
-
attn_windows = self.attn(x_windows, mask=attn_mask) # nW*B, window_size*window_size, C
|
264 |
-
|
265 |
-
# merge windows
|
266 |
-
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
|
267 |
-
shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # B H' W' C
|
268 |
-
|
269 |
-
# reverse cyclic shift
|
270 |
-
if self.shift_size > 0:
|
271 |
-
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
|
272 |
-
else:
|
273 |
-
x = shifted_x
|
274 |
-
|
275 |
-
if pad_r > 0 or pad_b > 0:
|
276 |
-
x = x[:, :Ph, :Pw, :].contiguous()
|
277 |
-
|
278 |
-
x = x.view(B, Ph * Pw, C)
|
279 |
-
|
280 |
-
# FFN
|
281 |
-
x = shortcut + self.drop_path(x)
|
282 |
-
x = x + self.drop_path(self.mlp(self.norm2(x)))
|
283 |
-
|
284 |
-
return x
|
285 |
-
|
286 |
-
def extra_repr(self) -> str:
|
287 |
-
return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
|
288 |
-
f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
|
289 |
-
|
290 |
-
def flops(self):
|
291 |
-
flops = 0
|
292 |
-
H, W = self.input_resolution
|
293 |
-
# norm1
|
294 |
-
flops += self.dim * H * W
|
295 |
-
# W-MSA/SW-MSA
|
296 |
-
nW = H * W / self.window_size / self.window_size
|
297 |
-
flops += nW * self.attn.flops(self.window_size * self.window_size)
|
298 |
-
# mlp
|
299 |
-
flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio
|
300 |
-
# norm2
|
301 |
-
flops += self.dim * H * W
|
302 |
-
return flops
|
303 |
-
|
304 |
-
|
305 |
-
class PatchMerging(nn.Module):
|
306 |
-
r""" Patch Merging Layer.
|
307 |
-
|
308 |
-
Args:
|
309 |
-
input_resolution (tuple[int]): Resolution of input feature.
|
310 |
-
dim (int): Number of input channels.
|
311 |
-
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
|
312 |
-
"""
|
313 |
-
|
314 |
-
def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
|
315 |
-
super().__init__()
|
316 |
-
self.input_resolution = input_resolution
|
317 |
-
self.dim = dim
|
318 |
-
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
|
319 |
-
self.norm = norm_layer(4 * dim)
|
320 |
-
|
321 |
-
def forward(self, x, Ph, Pw):
|
322 |
-
"""
|
323 |
-
x: B, H*W, C
|
324 |
-
"""
|
325 |
-
B, L, C = x.shape
|
326 |
-
# assert L == H * W, "input feature has wrong size"
|
327 |
-
# assert Ph % 2 == 0 and Pw % 2 == 0, f"x size ({Ph}*{Pw}) are not even."
|
328 |
-
|
329 |
-
x = x.view(B, Ph, Pw, C)
|
330 |
-
|
331 |
-
# padding
|
332 |
-
pad_input = (Ph % 2 == 1) or (Pw % 2 == 1)
|
333 |
-
if pad_input:
|
334 |
-
x = F.pad(x, (0, 0, 0, Pw % 2, 0, Ph % 2))
|
335 |
-
|
336 |
-
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
|
337 |
-
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
|
338 |
-
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
|
339 |
-
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
|
340 |
-
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
|
341 |
-
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
|
342 |
-
|
343 |
-
x = self.norm(x)
|
344 |
-
x = self.reduction(x)
|
345 |
-
|
346 |
-
return x
|
347 |
-
|
348 |
-
def extra_repr(self) -> str:
|
349 |
-
return f"input_resolution={self.input_resolution}, dim={self.dim}"
|
350 |
-
|
351 |
-
def flops(self):
|
352 |
-
H, W = self.input_resolution
|
353 |
-
flops = H * W * self.dim
|
354 |
-
flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim
|
355 |
-
return flops
|
356 |
-
|
357 |
-
|
358 |
-
class BasicLayer(nn.Module):
|
359 |
-
""" A basic Swin Transformer layer for one stage.
|
360 |
-
|
361 |
-
Args:
|
362 |
-
dim (int): Number of input channels.
|
363 |
-
input_resolution (tuple[int]): Input resolution.
|
364 |
-
depth (int): Number of blocks.
|
365 |
-
num_heads (int): Number of attention heads.
|
366 |
-
window_size (int): Local window size.
|
367 |
-
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
|
368 |
-
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
|
369 |
-
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
|
370 |
-
drop (float, optional): Dropout rate. Default: 0.0
|
371 |
-
attn_drop (float, optional): Attention dropout rate. Default: 0.0
|
372 |
-
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
|
373 |
-
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
|
374 |
-
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
|
375 |
-
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
|
376 |
-
"""
|
377 |
-
|
378 |
-
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
|
379 |
-
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
|
380 |
-
drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False):
|
381 |
-
|
382 |
-
super().__init__()
|
383 |
-
self.dim = dim
|
384 |
-
self.input_resolution = input_resolution
|
385 |
-
self.depth = depth
|
386 |
-
self.use_checkpoint = use_checkpoint
|
387 |
-
self.window_size = window_size
|
388 |
-
self.shift_size = window_size // 2
|
389 |
-
|
390 |
-
# build blocks
|
391 |
-
self.blocks = nn.ModuleList([
|
392 |
-
SwinTransformerBlock(dim=dim, input_resolution=input_resolution,
|
393 |
-
num_heads=num_heads, window_size=window_size,
|
394 |
-
shift_size=0 if (i % 2 == 0) else window_size // 2,
|
395 |
-
mlp_ratio=mlp_ratio,
|
396 |
-
qkv_bias=qkv_bias, qk_scale=qk_scale,
|
397 |
-
drop=drop, attn_drop=attn_drop,
|
398 |
-
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
|
399 |
-
norm_layer=norm_layer)
|
400 |
-
for i in range(depth)])
|
401 |
-
|
402 |
-
# patch merging layer
|
403 |
-
if downsample is not None:
|
404 |
-
self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
|
405 |
-
else:
|
406 |
-
self.downsample = None
|
407 |
-
|
408 |
-
def forward(self, x, Ph, Pw):
|
409 |
-
|
410 |
-
# calculate attention mask for SW-MSA
|
411 |
-
Hp = int(np.ceil(Ph / self.window_size)) * self.window_size
|
412 |
-
Wp = int(np.ceil(Pw / self.window_size)) * self.window_size
|
413 |
-
img_mask = torch.zeros((1, Hp, Wp, 1), device=x.device) # 1 Hp Wp 1
|
414 |
-
h_slices = (slice(0, -self.window_size),
|
415 |
-
slice(-self.window_size, -self.shift_size),
|
416 |
-
slice(-self.shift_size, None))
|
417 |
-
w_slices = (slice(0, -self.window_size),
|
418 |
-
slice(-self.window_size, -self.shift_size),
|
419 |
-
slice(-self.shift_size, None))
|
420 |
-
cnt = 0
|
421 |
-
for h in h_slices:
|
422 |
-
for w in w_slices:
|
423 |
-
img_mask[:, h, w, :] = cnt
|
424 |
-
cnt += 1
|
425 |
-
|
426 |
-
mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
|
427 |
-
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
|
428 |
-
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
|
429 |
-
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
|
430 |
-
|
431 |
-
|
432 |
-
for blk in self.blocks:
|
433 |
-
if self.use_checkpoint:
|
434 |
-
x = checkpoint.checkpoint(blk, x)
|
435 |
-
else:
|
436 |
-
x = blk(x, Ph, Pw, attn_mask)
|
437 |
-
if self.downsample is not None:
|
438 |
-
x = self.downsample(x, Ph, Pw)
|
439 |
-
Ph, Pw = (Ph + 1) // 2, (Pw + 1) // 2
|
440 |
-
return x, Ph, Pw
|
441 |
-
|
442 |
-
def extra_repr(self) -> str:
|
443 |
-
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
|
444 |
-
|
445 |
-
def flops(self):
|
446 |
-
flops = 0
|
447 |
-
for blk in self.blocks:
|
448 |
-
flops += blk.flops()
|
449 |
-
if self.downsample is not None:
|
450 |
-
flops += self.downsample.flops()
|
451 |
-
return flops
|
452 |
-
|
453 |
-
|
454 |
-
class PatchEmbed(nn.Module):
|
455 |
-
r""" Image to Patch Embedding
|
456 |
-
|
457 |
-
Args:
|
458 |
-
img_size (int): Image size. Default: 224.
|
459 |
-
patch_size (int): Patch token size. Default: 4.
|
460 |
-
in_chans (int): Number of input image channels. Default: 3.
|
461 |
-
embed_dim (int): Number of linear projection output channels. Default: 96.
|
462 |
-
norm_layer (nn.Module, optional): Normalization layer. Default: None
|
463 |
-
"""
|
464 |
-
|
465 |
-
def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
|
466 |
-
super().__init__()
|
467 |
-
img_size = to_2tuple(img_size)
|
468 |
-
patch_size = to_2tuple(patch_size)
|
469 |
-
patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
|
470 |
-
self.img_size = img_size
|
471 |
-
self.patch_size = patch_size
|
472 |
-
self.patches_resolution = patches_resolution
|
473 |
-
self.num_patches = patches_resolution[0] * patches_resolution[1]
|
474 |
-
|
475 |
-
self.in_chans = in_chans
|
476 |
-
self.embed_dim = embed_dim
|
477 |
-
|
478 |
-
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
|
479 |
-
if norm_layer is not None:
|
480 |
-
self.norm = norm_layer(embed_dim)
|
481 |
-
else:
|
482 |
-
self.norm = None
|
483 |
-
|
484 |
-
def forward(self, x):
|
485 |
-
B, C, H, W = x.shape
|
486 |
-
# FIXME look at relaxing size constraints
|
487 |
-
# assert H == self.img_size[0] and W == self.img_size[1], \
|
488 |
-
# f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
|
489 |
-
x = self.proj(x)
|
490 |
-
Ph, Pw = x.shape[2:]
|
491 |
-
x = x.flatten(2).transpose(1, 2) # B Ph*Pw C
|
492 |
-
if self.norm is not None:
|
493 |
-
x = self.norm(x)
|
494 |
-
return x, Ph, Pw
|
495 |
-
|
496 |
-
def flops(self):
|
497 |
-
Ho, Wo = self.patches_resolution
|
498 |
-
flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1])
|
499 |
-
if self.norm is not None:
|
500 |
-
flops += Ho * Wo * self.embed_dim
|
501 |
-
return flops
|
502 |
-
|
503 |
-
|
504 |
-
class SwinTransformer(nn.Module):
|
505 |
-
r""" Swin Transformer
|
506 |
-
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
|
507 |
-
https://arxiv.org/pdf/2103.14030
|
508 |
-
|
509 |
-
Args:
|
510 |
-
img_size (int | tuple(int)): Input image size. Default 224
|
511 |
-
patch_size (int | tuple(int)): Patch size. Default: 4
|
512 |
-
in_chans (int): Number of input image channels. Default: 3
|
513 |
-
num_classes (int): Number of classes for classification head. Default: 1000
|
514 |
-
embed_dim (int): Patch embedding dimension. Default: 96
|
515 |
-
depths (tuple(int)): Depth of each Swin Transformer layer.
|
516 |
-
num_heads (tuple(int)): Number of attention heads in different layers.
|
517 |
-
window_size (int): Window size. Default: 7
|
518 |
-
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
|
519 |
-
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
|
520 |
-
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None
|
521 |
-
drop_rate (float): Dropout rate. Default: 0
|
522 |
-
attn_drop_rate (float): Attention dropout rate. Default: 0
|
523 |
-
drop_path_rate (float): Stochastic depth rate. Default: 0.1
|
524 |
-
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
|
525 |
-
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
|
526 |
-
patch_norm (bool): If True, add normalization after patch embedding. Default: True
|
527 |
-
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
|
528 |
-
"""
|
529 |
-
|
530 |
-
def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000,
|
531 |
-
embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24],
|
532 |
-
window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None,
|
533 |
-
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
|
534 |
-
norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
|
535 |
-
use_checkpoint=False, **kwargs):
|
536 |
-
super().__init__()
|
537 |
-
|
538 |
-
self.num_classes = num_classes
|
539 |
-
self.num_layers = len(depths)
|
540 |
-
self.embed_dim = embed_dim
|
541 |
-
self.ape = ape
|
542 |
-
self.patch_norm = patch_norm
|
543 |
-
self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))
|
544 |
-
self.mlp_ratio = mlp_ratio
|
545 |
-
|
546 |
-
# split image into non-overlapping patches
|
547 |
-
self.patch_embed = PatchEmbed(
|
548 |
-
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,
|
549 |
-
norm_layer=norm_layer if self.patch_norm else None)
|
550 |
-
num_patches = self.patch_embed.num_patches
|
551 |
-
patches_resolution = self.patch_embed.patches_resolution
|
552 |
-
self.patches_resolution = patches_resolution
|
553 |
-
|
554 |
-
# absolute position embedding
|
555 |
-
if self.ape:
|
556 |
-
self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
|
557 |
-
trunc_normal_(self.absolute_pos_embed, std=.02)
|
558 |
-
|
559 |
-
self.pos_drop = nn.Dropout(p=drop_rate)
|
560 |
-
|
561 |
-
# stochastic depth
|
562 |
-
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
|
563 |
-
|
564 |
-
# build layers
|
565 |
-
self.layers = nn.ModuleList()
|
566 |
-
for i_layer in range(self.num_layers):
|
567 |
-
layer = BasicLayer(dim=int(embed_dim * 2 ** i_layer),
|
568 |
-
input_resolution=(patches_resolution[0] // (2 ** i_layer),
|
569 |
-
patches_resolution[1] // (2 ** i_layer)),
|
570 |
-
depth=depths[i_layer],
|
571 |
-
num_heads=num_heads[i_layer],
|
572 |
-
window_size=window_size,
|
573 |
-
mlp_ratio=self.mlp_ratio,
|
574 |
-
qkv_bias=qkv_bias, qk_scale=qk_scale,
|
575 |
-
drop=drop_rate, attn_drop=attn_drop_rate,
|
576 |
-
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
|
577 |
-
norm_layer=norm_layer,
|
578 |
-
downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
|
579 |
-
use_checkpoint=use_checkpoint)
|
580 |
-
self.layers.append(layer)
|
581 |
-
|
582 |
-
self.norm = norm_layer(self.num_features)
|
583 |
-
self.avgpool = nn.AdaptiveAvgPool1d(1)
|
584 |
-
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
|
585 |
-
self.dim_out = self.num_features
|
586 |
-
|
587 |
-
self.apply(self._init_weights)
|
588 |
-
|
589 |
-
def _init_weights(self, m):
|
590 |
-
if isinstance(m, nn.Linear):
|
591 |
-
trunc_normal_(m.weight, std=.02)
|
592 |
-
if isinstance(m, nn.Linear) and m.bias is not None:
|
593 |
-
nn.init.constant_(m.bias, 0)
|
594 |
-
elif isinstance(m, nn.LayerNorm):
|
595 |
-
nn.init.constant_(m.bias, 0)
|
596 |
-
nn.init.constant_(m.weight, 1.0)
|
597 |
-
|
598 |
-
@torch.jit.ignore
|
599 |
-
def no_weight_decay(self):
|
600 |
-
return {'absolute_pos_embed'}
|
601 |
-
|
602 |
-
@torch.jit.ignore
|
603 |
-
def no_weight_decay_keywords(self):
|
604 |
-
return {'relative_position_bias_table'}
|
605 |
-
|
606 |
-
def forward_features(self, x, output_map=False):
|
607 |
-
x, Ph, Pw = self.patch_embed(x)
|
608 |
-
if self.ape:
|
609 |
-
x = x + self.absolute_pos_embed
|
610 |
-
x = self.pos_drop(x)
|
611 |
-
|
612 |
-
for layer in self.layers:
|
613 |
-
x, Ph, Pw = layer(x, Ph, Pw)
|
614 |
-
|
615 |
-
x_map = self.norm(x).transpose(1, 2) # B C L
|
616 |
-
x = self.avgpool(x_map) # B C 1
|
617 |
-
x = torch.flatten(x, 1)
|
618 |
-
|
619 |
-
if output_map:
|
620 |
-
return x, x_map, Ph, Pw
|
621 |
-
else:
|
622 |
-
return x
|
623 |
-
|
624 |
-
def forward(self, x):
|
625 |
-
x = self.forward_features(x)
|
626 |
-
x = self.head(x)
|
627 |
-
return x
|
628 |
-
|
629 |
-
def flops(self):
|
630 |
-
flops = 0
|
631 |
-
flops += self.patch_embed.flops()
|
632 |
-
for i, layer in enumerate(self.layers):
|
633 |
-
flops += layer.flops()
|
634 |
-
flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers)
|
635 |
-
flops += self.num_features * self.num_classes
|
636 |
-
return flops
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ClassCat/Brain-tumor-3D-segmentation-with-MONAI/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Brain Tumor 3D Segmentation With MONAI
|
3 |
-
emoji: 🌖
|
4 |
-
colorFrom: indigo
|
5 |
-
colorTo: indigo
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.16.2
|
8 |
-
app_file: app.py
|
9 |
-
pinned: true
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Cpp4App/Cpp4App/app.py
DELETED
@@ -1,284 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import cv2
|
3 |
-
import numpy as np
|
4 |
-
import shutil
|
5 |
-
from bs4 import BeautifulSoup
|
6 |
-
import requests
|
7 |
-
import pandas as pd
|
8 |
-
|
9 |
-
from SEM.run_single_sem import run_single_pp
|
10 |
-
from CDM.run_single import run_single_img
|
11 |
-
|
12 |
-
title = "Cpp4App"
|
13 |
-
description = "Automated Contextual Privacy Policies Generation for Mobile Apps"
|
14 |
-
|
15 |
-
|
16 |
-
def write_and_read():
|
17 |
-
# Write
|
18 |
-
with open('myfile.txt', 'w') as f:
|
19 |
-
f.write('Hello, World!')
|
20 |
-
|
21 |
-
# Read
|
22 |
-
with open('myfile.txt', 'r') as f:
|
23 |
-
data = f.read()
|
24 |
-
|
25 |
-
print("this is data: ", data)
|
26 |
-
|
27 |
-
return data
|
28 |
-
|
29 |
-
def run_demo(img_root, output_root, segment_root, file):
|
30 |
-
print(type(file))
|
31 |
-
|
32 |
-
# file_content = file.read().decode('utf-8')
|
33 |
-
run_single_pp(file)
|
34 |
-
|
35 |
-
output_board, output_data, complete_result = run_single_img(img_root, output_root, segment_root)
|
36 |
-
|
37 |
-
return output_board, output_data, complete_result
|
38 |
-
|
39 |
-
def inference(img, html):
|
40 |
-
|
41 |
-
write_and_read()
|
42 |
-
|
43 |
-
if img is None or html is None:
|
44 |
-
return None, None
|
45 |
-
|
46 |
-
output_root = "./CDM/result_classification"
|
47 |
-
segment_root = './SEM/txt'
|
48 |
-
img_root = "./CDM/input_examples/1-1-write.jpg"
|
49 |
-
pp_root = "1.txt"
|
50 |
-
|
51 |
-
# output_root = ""
|
52 |
-
# segment_root = ""
|
53 |
-
# img_root = "demo_img.jpg"
|
54 |
-
|
55 |
-
img_array = np.array(img)
|
56 |
-
|
57 |
-
cv2.imwrite(img_root, img_array)
|
58 |
-
|
59 |
-
# replace example string with real example
|
60 |
-
# if html == 'html content 1':
|
61 |
-
# with open("examples/6.txt", "r") as f:
|
62 |
-
# html = f.read()
|
63 |
-
# elif html == 'html content 2':
|
64 |
-
# with open("examples/11.txt", "r") as f:
|
65 |
-
# html = f.read()
|
66 |
-
|
67 |
-
# print("string: ", html)
|
68 |
-
# with open(pp_root, 'w', encoding='utf-8') as file: # Open the destination file in text mode
|
69 |
-
# file.write(html) # Write the HTML content to the destination file
|
70 |
-
|
71 |
-
try:
|
72 |
-
response = requests.get(html)
|
73 |
-
response.raise_for_status() # Will raise an exception if the status is an error
|
74 |
-
input_text = response.text
|
75 |
-
except requests.HTTPError:
|
76 |
-
input_text = ""
|
77 |
-
# print("input_text: ", input_text)
|
78 |
-
with open(pp_root, 'w', encoding='utf-8') as file:
|
79 |
-
file.write(input_text)
|
80 |
-
|
81 |
-
soup = BeautifulSoup(open(pp_root, encoding='utf-8'), features="html.parser")
|
82 |
-
# print("pp_root soup: ", soup.contents)
|
83 |
-
|
84 |
-
output_board, output_data, complete_result = run_demo(img_root, output_root, segment_root, pp_root)
|
85 |
-
|
86 |
-
# print(output_data)
|
87 |
-
|
88 |
-
return output_board, output_data, complete_result
|
89 |
-
|
90 |
-
# inputs = [
|
91 |
-
# gr.inputs.Image(type="pil", label="Image Upload"),
|
92 |
-
# # gr.inputs.File(label="HTML File Upload"),
|
93 |
-
# gr.inputs.Textbox(label="Text Input")
|
94 |
-
# # gr.inputs.Textbox(lines=True, label="HTML Text")
|
95 |
-
# ]
|
96 |
-
# output = [
|
97 |
-
# gr.outputs.Image(type="pil", label="Result Image"),
|
98 |
-
# gr.outputs.Dataframe(type="pandas", label="Result Excel")
|
99 |
-
# ]
|
100 |
-
|
101 |
-
# gr.Interface(
|
102 |
-
# inference,
|
103 |
-
# # inputs,
|
104 |
-
# # output,
|
105 |
-
# inputs=[image_input_row, textbox_input_row],
|
106 |
-
# outputs=[image_output_row, dataframe_output_row],
|
107 |
-
# title=title,
|
108 |
-
# description=description,
|
109 |
-
# # examples=[['examples/6-8.jpg', 'examples/6.txt'], ['examples/11-9.jpg', 'examples/11.html']],
|
110 |
-
# # examples=[['examples/6-8.jpg', example_file_content_1], ['examples/11-9.jpg', example_file_content_2]],
|
111 |
-
# examples=[['examples/6-8.jpg', 'html content 1'], ['examples/11-9.jpg', 'html content 2']],
|
112 |
-
# enable_queue=True,
|
113 |
-
# capture_session=True,
|
114 |
-
# layout='vertical'
|
115 |
-
# ).launch(debug=False)
|
116 |
-
|
117 |
-
# def example_inference():
|
118 |
-
# image_input_bgr = cv2.imread('examples/6-8.jpg')
|
119 |
-
# image_input = cv2.cvtColor(image_input_bgr, cv2.COLOR_BGR2RGB)
|
120 |
-
# # text_input = 'html content 1' # example string
|
121 |
-
# text_input = 'https://www.whatsapp.com/legal/privacy-policy'
|
122 |
-
#
|
123 |
-
# out_result, out_segment = inference(image_input, text_input)
|
124 |
-
#
|
125 |
-
# return image_input, text_input, out_result, out_segment
|
126 |
-
|
127 |
-
def example_inference_1():
|
128 |
-
image_input_bgr = cv2.imread("examples/6-8.jpg")
|
129 |
-
image_input = cv2.cvtColor(image_input_bgr, cv2.COLOR_BGR2RGB)
|
130 |
-
text_input = 'https://www.whatsapp.com/legal/privacy-policy'
|
131 |
-
out_result, out_segment, complete_result = inference(image_input, text_input)
|
132 |
-
return image_input, text_input, out_result, out_segment, complete_result
|
133 |
-
|
134 |
-
def example_inference_2():
|
135 |
-
image_input_bgr = cv2.imread("examples/11-9.jpg")
|
136 |
-
image_input = cv2.cvtColor(image_input_bgr, cv2.COLOR_BGR2RGB)
|
137 |
-
text_input = 'https://values.snap.com/privacy/privacy-policy'
|
138 |
-
out_result, out_segment, complete_result = inference(image_input, text_input)
|
139 |
-
return image_input, text_input, out_result, out_segment, complete_result
|
140 |
-
|
141 |
-
def example_inference_3():
|
142 |
-
image_input_bgr = cv2.imread("examples/1-1.jpg")
|
143 |
-
image_input = cv2.cvtColor(image_input_bgr, cv2.COLOR_BGR2RGB)
|
144 |
-
text_input = 'https://mcdonalds.com.au/privacy-policy'
|
145 |
-
out_result, out_segment, complete_result = inference(image_input, text_input)
|
146 |
-
return image_input, text_input, out_result, out_segment, complete_result
|
147 |
-
|
148 |
-
def new_example_inference_1():
|
149 |
-
image_input_bgr = cv2.imread("examples/6-8.jpg")
|
150 |
-
image_input = cv2.cvtColor(image_input_bgr, cv2.COLOR_BGR2RGB)
|
151 |
-
text_input = 'https://www.whatsapp.com/legal/privacy-policy'
|
152 |
-
|
153 |
-
out_result_bgr = cv2.imread("results/result_1.png")
|
154 |
-
out_result = cv2.cvtColor(out_result_bgr, cv2.COLOR_BGR2RGB)
|
155 |
-
|
156 |
-
out_segment = pd.read_excel("results/result_1_S.xlsx")
|
157 |
-
complete_result = pd.read_excel("results/result_1_C.xlsx")
|
158 |
-
|
159 |
-
return image_input, text_input, out_result, out_segment, complete_result
|
160 |
-
|
161 |
-
def new_example_inference_2():
|
162 |
-
image_input_bgr = cv2.imread("examples/11-9.jpg")
|
163 |
-
image_input = cv2.cvtColor(image_input_bgr, cv2.COLOR_BGR2RGB)
|
164 |
-
text_input = 'https://values.snap.com/privacy/privacy-policy'
|
165 |
-
|
166 |
-
out_result_bgr = cv2.imread("results/result_2.png")
|
167 |
-
out_result = cv2.cvtColor(out_result_bgr, cv2.COLOR_BGR2RGB)
|
168 |
-
|
169 |
-
out_segment = pd.read_excel("results/result_2_S.xlsx")
|
170 |
-
complete_result = pd.read_excel("results/result_2_C.xlsx")
|
171 |
-
|
172 |
-
return image_input, text_input, out_result, out_segment, complete_result
|
173 |
-
|
174 |
-
def new_example_inference_3():
|
175 |
-
image_input_bgr = cv2.imread("examples/1-1.jpg")
|
176 |
-
image_input = cv2.cvtColor(image_input_bgr, cv2.COLOR_BGR2RGB)
|
177 |
-
text_input = 'https://mcdonalds.com.au/privacy-policy'
|
178 |
-
|
179 |
-
out_result_bgr = cv2.imread("results/result_3.png")
|
180 |
-
out_result = cv2.cvtColor(out_result_bgr, cv2.COLOR_BGR2RGB)
|
181 |
-
|
182 |
-
out_segment = pd.read_excel("results/result_3_S.xlsx")
|
183 |
-
complete_result = pd.read_excel("results/result_3_C.xlsx")
|
184 |
-
|
185 |
-
return image_input, text_input, out_result, out_segment, complete_result
|
186 |
-
|
187 |
-
# def toggle_dataframe_callback():
|
188 |
-
# complete_result_dataframe.visible = not complete_result_dataframe.visible
|
189 |
-
|
190 |
-
demo = gr.Blocks()
|
191 |
-
with demo:
|
192 |
-
gr.Markdown("# Cpp4App\n\n**Automated Contextual Privacy Policies Generation for Mobile Apps**"
|
193 |
-
"\n\nThere are two inputs to generate CPP for a mobile app: app's privacy policy URL link and a GUI screenshot")
|
194 |
-
|
195 |
-
with gr.Row():
|
196 |
-
example_image_1 = gr.Image(value="examples/6-8.jpg", label="Example 1")
|
197 |
-
example_image_2 = gr.Image(value="examples/11-9.jpg", label="Example 2")
|
198 |
-
example_image_3 = gr.Image(value="examples/1-1.jpg", label="Example 3")
|
199 |
-
with gr.Column():
|
200 |
-
gr.Markdown("**You can try with three examples we provided:**"
|
201 |
-
"\n\n- WhatsApp"
|
202 |
-
"\n\n- Snap"
|
203 |
-
"\n\n- Mcdonald's"
|
204 |
-
"\n\n**You can also try with your own example:**"
|
205 |
-
"\n\nUpload the screenshot and privacy policy URL link, then click 'submit' button"
|
206 |
-
# "\n\n"
|
207 |
-
# "\n\nThe three provided examples are pre-run, while your own screenshot needs to run for approximately one minute."
|
208 |
-
)
|
209 |
-
|
210 |
-
with gr.Row():
|
211 |
-
example_button_1 = gr.Button("Run with Example 1")
|
212 |
-
example_button_2 = gr.Button("Run with Example 2")
|
213 |
-
example_button_3 = gr.Button("Run with Example 3")
|
214 |
-
with gr.Column():
|
215 |
-
clear_button = gr.Button("Clear")
|
216 |
-
submit_button = gr.Button("Submit")
|
217 |
-
|
218 |
-
with gr.Row():
|
219 |
-
text_input = gr.inputs.Textbox(label="URL Input for the Privacy Policy of the App")
|
220 |
-
|
221 |
-
with gr.Column():
|
222 |
-
image_input = gr.inputs.Image(type="pil", label="Screenshot Upload")
|
223 |
-
result_image = gr.outputs.Image(type="pil", label="Result Screenshot")
|
224 |
-
|
225 |
-
with gr.Row():
|
226 |
-
result_dataframe = gr.outputs.Dataframe(type="pandas", label="Result Excel (Summarized)")
|
227 |
-
|
228 |
-
# with gr.Row():
|
229 |
-
# # Create a button to control the display of complete_result_dataframe
|
230 |
-
# toggle_dataframe_button = gr.Button("Show Complete Result Excel")
|
231 |
-
|
232 |
-
with gr.Row():
|
233 |
-
complete_result_dataframe = gr.outputs.Dataframe(type="pandas", label="Result Excel (Complete)")
|
234 |
-
|
235 |
-
# with gr.Row():
|
236 |
-
# example_button_1 = gr.Button("Run with Example 1")
|
237 |
-
# example_button_2 = gr.Button("Run with Example 2")
|
238 |
-
# example_button_3 = gr.Button("Run with Example 3")
|
239 |
-
# with gr.Column():
|
240 |
-
# clear_button = gr.Button("Clear")
|
241 |
-
# submit_button = gr.Button("Submit")
|
242 |
-
#
|
243 |
-
# with gr.Row():
|
244 |
-
# example_image_1 = gr.Image(value="examples/6-8.jpg", label="Example 1")
|
245 |
-
# example_image_2 = gr.Image(value="examples/11-9.jpg", label="Example 2")
|
246 |
-
# example_image_3 = gr.Image(value="examples/1-1.jpg", label="Example 3")
|
247 |
-
# with gr.Column():
|
248 |
-
# gr.Markdown("**You can try with three examples we provided:**"
|
249 |
-
# "\n\n- WhatsApp"
|
250 |
-
# "\n\n- Snap"
|
251 |
-
# "\n\n- Mcdonald's"
|
252 |
-
# "\n\n**You can also try with your own example:**"
|
253 |
-
# "\n\nUpload the screenshot and privacy policy URL link, then click 'submit' button")
|
254 |
-
|
255 |
-
submit_button.click(inference, inputs=[image_input, text_input], outputs=[result_image, result_dataframe, complete_result_dataframe])
|
256 |
-
clear_button.click(lambda: [None, None, None, None, None, None], inputs=[], outputs=[image_input, text_input, result_image, result_dataframe, complete_result_dataframe])
|
257 |
-
# example_button.click(example_inference, inputs=[], outputs=[image_input, text_input, result_image, result_dataframe])
|
258 |
-
example_button_1.click(new_example_inference_1,
|
259 |
-
inputs=[],
|
260 |
-
outputs=[image_input, text_input, result_image, result_dataframe, complete_result_dataframe])
|
261 |
-
example_button_2.click(new_example_inference_2,
|
262 |
-
inputs=[],
|
263 |
-
outputs=[image_input, text_input, result_image, result_dataframe, complete_result_dataframe])
|
264 |
-
example_button_3.click(new_example_inference_3,
|
265 |
-
inputs=[],
|
266 |
-
outputs=[image_input, text_input, result_image, result_dataframe, complete_result_dataframe])
|
267 |
-
|
268 |
-
# # Create a unique CSS ID for the dataframe output
|
269 |
-
# dataframe_id = id(complete_result_dataframe)
|
270 |
-
#
|
271 |
-
# # Define CSS styles for hiding/showing the dataframe
|
272 |
-
# hide_style = f"#{dataframe_id} {{ display: none; }}"
|
273 |
-
# show_style = f"#{dataframe_id} {{ display: block; }}"
|
274 |
-
#
|
275 |
-
#
|
276 |
-
# def toggle_dataframe_callback():
|
277 |
-
# if toggle_dataframe_button.label == "Show Complete Result Excel":
|
278 |
-
# toggle_dataframe_button.label = "Hide Complete Result Excel"
|
279 |
-
# gr.Html(style=show_style).show()
|
280 |
-
# else:
|
281 |
-
# toggle_dataframe_button.label = "Show Complete Result Excel"
|
282 |
-
# gr.Html(style=hide_style).show()
|
283 |
-
|
284 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Cropinky/esrgan/realesrgan/archs/discriminator_arch.py
DELETED
@@ -1,67 +0,0 @@
|
|
1 |
-
from basicsr.utils.registry import ARCH_REGISTRY
|
2 |
-
from torch import nn as nn
|
3 |
-
from torch.nn import functional as F
|
4 |
-
from torch.nn.utils import spectral_norm
|
5 |
-
|
6 |
-
|
7 |
-
@ARCH_REGISTRY.register()
|
8 |
-
class UNetDiscriminatorSN(nn.Module):
|
9 |
-
"""Defines a U-Net discriminator with spectral normalization (SN)
|
10 |
-
|
11 |
-
It is used in Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data.
|
12 |
-
|
13 |
-
Arg:
|
14 |
-
num_in_ch (int): Channel number of inputs. Default: 3.
|
15 |
-
num_feat (int): Channel number of base intermediate features. Default: 64.
|
16 |
-
skip_connection (bool): Whether to use skip connections between U-Net. Default: True.
|
17 |
-
"""
|
18 |
-
|
19 |
-
def __init__(self, num_in_ch, num_feat=64, skip_connection=True):
|
20 |
-
super(UNetDiscriminatorSN, self).__init__()
|
21 |
-
self.skip_connection = skip_connection
|
22 |
-
norm = spectral_norm
|
23 |
-
# the first convolution
|
24 |
-
self.conv0 = nn.Conv2d(num_in_ch, num_feat, kernel_size=3, stride=1, padding=1)
|
25 |
-
# downsample
|
26 |
-
self.conv1 = norm(nn.Conv2d(num_feat, num_feat * 2, 4, 2, 1, bias=False))
|
27 |
-
self.conv2 = norm(nn.Conv2d(num_feat * 2, num_feat * 4, 4, 2, 1, bias=False))
|
28 |
-
self.conv3 = norm(nn.Conv2d(num_feat * 4, num_feat * 8, 4, 2, 1, bias=False))
|
29 |
-
# upsample
|
30 |
-
self.conv4 = norm(nn.Conv2d(num_feat * 8, num_feat * 4, 3, 1, 1, bias=False))
|
31 |
-
self.conv5 = norm(nn.Conv2d(num_feat * 4, num_feat * 2, 3, 1, 1, bias=False))
|
32 |
-
self.conv6 = norm(nn.Conv2d(num_feat * 2, num_feat, 3, 1, 1, bias=False))
|
33 |
-
# extra convolutions
|
34 |
-
self.conv7 = norm(nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=False))
|
35 |
-
self.conv8 = norm(nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=False))
|
36 |
-
self.conv9 = nn.Conv2d(num_feat, 1, 3, 1, 1)
|
37 |
-
|
38 |
-
def forward(self, x):
|
39 |
-
# downsample
|
40 |
-
x0 = F.leaky_relu(self.conv0(x), negative_slope=0.2, inplace=True)
|
41 |
-
x1 = F.leaky_relu(self.conv1(x0), negative_slope=0.2, inplace=True)
|
42 |
-
x2 = F.leaky_relu(self.conv2(x1), negative_slope=0.2, inplace=True)
|
43 |
-
x3 = F.leaky_relu(self.conv3(x2), negative_slope=0.2, inplace=True)
|
44 |
-
|
45 |
-
# upsample
|
46 |
-
x3 = F.interpolate(x3, scale_factor=2, mode='bilinear', align_corners=False)
|
47 |
-
x4 = F.leaky_relu(self.conv4(x3), negative_slope=0.2, inplace=True)
|
48 |
-
|
49 |
-
if self.skip_connection:
|
50 |
-
x4 = x4 + x2
|
51 |
-
x4 = F.interpolate(x4, scale_factor=2, mode='bilinear', align_corners=False)
|
52 |
-
x5 = F.leaky_relu(self.conv5(x4), negative_slope=0.2, inplace=True)
|
53 |
-
|
54 |
-
if self.skip_connection:
|
55 |
-
x5 = x5 + x1
|
56 |
-
x5 = F.interpolate(x5, scale_factor=2, mode='bilinear', align_corners=False)
|
57 |
-
x6 = F.leaky_relu(self.conv6(x5), negative_slope=0.2, inplace=True)
|
58 |
-
|
59 |
-
if self.skip_connection:
|
60 |
-
x6 = x6 + x0
|
61 |
-
|
62 |
-
# extra convolutions
|
63 |
-
out = F.leaky_relu(self.conv7(x6), negative_slope=0.2, inplace=True)
|
64 |
-
out = F.leaky_relu(self.conv8(out), negative_slope=0.2, inplace=True)
|
65 |
-
out = self.conv9(out)
|
66 |
-
|
67 |
-
return out
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/pens/roundingPen.py
DELETED
@@ -1,112 +0,0 @@
|
|
1 |
-
from fontTools.misc.roundTools import otRound
|
2 |
-
from fontTools.misc.transform import Transform
|
3 |
-
from fontTools.pens.filterPen import FilterPen, FilterPointPen
|
4 |
-
|
5 |
-
|
6 |
-
__all__ = ["RoundingPen", "RoundingPointPen"]
|
7 |
-
|
8 |
-
|
9 |
-
class RoundingPen(FilterPen):
|
10 |
-
"""
|
11 |
-
Filter pen that rounds point coordinates and component XY offsets to integer.
|
12 |
-
|
13 |
-
>>> from fontTools.pens.recordingPen import RecordingPen
|
14 |
-
>>> recpen = RecordingPen()
|
15 |
-
>>> roundpen = RoundingPen(recpen)
|
16 |
-
>>> roundpen.moveTo((0.4, 0.6))
|
17 |
-
>>> roundpen.lineTo((1.6, 2.5))
|
18 |
-
>>> roundpen.qCurveTo((2.4, 4.6), (3.3, 5.7), (4.9, 6.1))
|
19 |
-
>>> roundpen.curveTo((6.4, 8.6), (7.3, 9.7), (8.9, 10.1))
|
20 |
-
>>> roundpen.addComponent("a", (1.5, 0, 0, 1.5, 10.5, -10.5))
|
21 |
-
>>> recpen.value == [
|
22 |
-
... ('moveTo', ((0, 1),)),
|
23 |
-
... ('lineTo', ((2, 3),)),
|
24 |
-
... ('qCurveTo', ((2, 5), (3, 6), (5, 6))),
|
25 |
-
... ('curveTo', ((6, 9), (7, 10), (9, 10))),
|
26 |
-
... ('addComponent', ('a', (1.5, 0, 0, 1.5, 11, -10))),
|
27 |
-
... ]
|
28 |
-
True
|
29 |
-
"""
|
30 |
-
|
31 |
-
def __init__(self, outPen, roundFunc=otRound):
|
32 |
-
super().__init__(outPen)
|
33 |
-
self.roundFunc = roundFunc
|
34 |
-
|
35 |
-
def moveTo(self, pt):
|
36 |
-
self._outPen.moveTo((self.roundFunc(pt[0]), self.roundFunc(pt[1])))
|
37 |
-
|
38 |
-
def lineTo(self, pt):
|
39 |
-
self._outPen.lineTo((self.roundFunc(pt[0]), self.roundFunc(pt[1])))
|
40 |
-
|
41 |
-
def curveTo(self, *points):
|
42 |
-
self._outPen.curveTo(
|
43 |
-
*((self.roundFunc(x), self.roundFunc(y)) for x, y in points)
|
44 |
-
)
|
45 |
-
|
46 |
-
def qCurveTo(self, *points):
|
47 |
-
self._outPen.qCurveTo(
|
48 |
-
*((self.roundFunc(x), self.roundFunc(y)) for x, y in points)
|
49 |
-
)
|
50 |
-
|
51 |
-
def addComponent(self, glyphName, transformation):
|
52 |
-
self._outPen.addComponent(
|
53 |
-
glyphName,
|
54 |
-
Transform(
|
55 |
-
*transformation[:4],
|
56 |
-
self.roundFunc(transformation[4]),
|
57 |
-
self.roundFunc(transformation[5]),
|
58 |
-
),
|
59 |
-
)
|
60 |
-
|
61 |
-
|
62 |
-
class RoundingPointPen(FilterPointPen):
|
63 |
-
"""
|
64 |
-
Filter point pen that rounds point coordinates and component XY offsets to integer.
|
65 |
-
|
66 |
-
>>> from fontTools.pens.recordingPen import RecordingPointPen
|
67 |
-
>>> recpen = RecordingPointPen()
|
68 |
-
>>> roundpen = RoundingPointPen(recpen)
|
69 |
-
>>> roundpen.beginPath()
|
70 |
-
>>> roundpen.addPoint((0.4, 0.6), 'line')
|
71 |
-
>>> roundpen.addPoint((1.6, 2.5), 'line')
|
72 |
-
>>> roundpen.addPoint((2.4, 4.6))
|
73 |
-
>>> roundpen.addPoint((3.3, 5.7))
|
74 |
-
>>> roundpen.addPoint((4.9, 6.1), 'qcurve')
|
75 |
-
>>> roundpen.endPath()
|
76 |
-
>>> roundpen.addComponent("a", (1.5, 0, 0, 1.5, 10.5, -10.5))
|
77 |
-
>>> recpen.value == [
|
78 |
-
... ('beginPath', (), {}),
|
79 |
-
... ('addPoint', ((0, 1), 'line', False, None), {}),
|
80 |
-
... ('addPoint', ((2, 3), 'line', False, None), {}),
|
81 |
-
... ('addPoint', ((2, 5), None, False, None), {}),
|
82 |
-
... ('addPoint', ((3, 6), None, False, None), {}),
|
83 |
-
... ('addPoint', ((5, 6), 'qcurve', False, None), {}),
|
84 |
-
... ('endPath', (), {}),
|
85 |
-
... ('addComponent', ('a', (1.5, 0, 0, 1.5, 11, -10)), {}),
|
86 |
-
... ]
|
87 |
-
True
|
88 |
-
"""
|
89 |
-
|
90 |
-
def __init__(self, outPen, roundFunc=otRound):
|
91 |
-
super().__init__(outPen)
|
92 |
-
self.roundFunc = roundFunc
|
93 |
-
|
94 |
-
def addPoint(self, pt, segmentType=None, smooth=False, name=None, **kwargs):
|
95 |
-
self._outPen.addPoint(
|
96 |
-
(self.roundFunc(pt[0]), self.roundFunc(pt[1])),
|
97 |
-
segmentType=segmentType,
|
98 |
-
smooth=smooth,
|
99 |
-
name=name,
|
100 |
-
**kwargs,
|
101 |
-
)
|
102 |
-
|
103 |
-
def addComponent(self, baseGlyphName, transformation, **kwargs):
|
104 |
-
self._outPen.addComponent(
|
105 |
-
baseGlyphName,
|
106 |
-
Transform(
|
107 |
-
*transformation[:4],
|
108 |
-
self.roundFunc(transformation[4]),
|
109 |
-
self.roundFunc(transformation[5]),
|
110 |
-
),
|
111 |
-
**kwargs,
|
112 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-f90e1963.js
DELETED
The diff for this file is too large to render.
See raw diff
|
|