Commit
·
5f6f96e
1
Parent(s):
63004e7
Update parquet files (step 110 of 121)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/17TheWord/vits-models/text/symbols.py +0 -39
- spaces/1gistliPinn/ChatGPT4/Examples/Bahubali - The Beginning Movie Download In Torrent LINK.md +0 -6
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Cryptoassets The Innovative Investor 39s Guide To Bitcoin And Beyond Pdf Download ((FREE)).md +0 -47
- spaces/1phancelerku/anime-remove-background/Create Your Own City and Explore Other Islands with City Island 5 MOD APK Terbaru.md +0 -67
- spaces/1phancelerku/anime-remove-background/Criminal Case Save the World! Mod APK - Download and Play with Unlimited Stars.md +0 -112
- spaces/1phancelerku/anime-remove-background/Download Five Night at Freddy 6 APK and Enjoy the Thrill of the Horror Game.md +0 -161
- spaces/1phancelerku/anime-remove-background/Download Red Dead Online The Ultimate Guide to the Frontier Life.md +0 -122
- spaces/44ov41za8i/FreeVC/speaker_encoder/data_objects/__init__.py +0 -2
- spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/encoders/open_clap/htsat.py +0 -1022
- spaces/AbeShinzo0708/AI_Kishida_Fumio_speaker/hooks/hook-espnet.py +0 -3
- spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/server/websearch/parseWeb.ts +0 -56
- spaces/AchyuthGamer/OpenGPT-Chat-UI/src/routes/conversation/[id]/web-search/+server.ts +0 -138
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/SetAnchor.js +0 -34
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorpicker/methods/HPalette.js +0 -96
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/fixwidthsizer/RemoveChildMethods.js +0 -28
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/roundrectanglecanvas/RoundRectangleCanvas.js +0 -2
- spaces/Alfasign/HuggingGPT-Lite/get_token_ids.py +0 -53
- spaces/Alpaca233/SadTalker/src/audio2exp_models/networks.py +0 -74
- spaces/Amrrs/DragGan-Inversion/stylegan_human/pti/training/projectors/__init__.py +0 -0
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/schedulers/ddpm.md +0 -27
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/ko/tutorials/tutorial_overview.md +0 -23
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/dreambooth/train_dreambooth_lora_sdxl.py +0 -1355
- spaces/Andy1621/uniformer_image_detection/configs/vfnet/vfnet_r101_fpn_mstrain_2x_coco.py +0 -2
- spaces/Andy1621/uniformer_image_detection/mmdet/core/evaluation/__init__.py +0 -15
- spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/ssd_head.py +0 -265
- spaces/Andy1621/uniformer_image_segmentation/configs/fp16/fcn_r101-d8_512x1024_80k_fp16_cityscapes.py +0 -5
- spaces/Anonymous-sub/Rerender/ControlNet/annotator/mlsd/__init__.py +0 -43
- spaces/AnthonyTruchetPoC/persistent-docker/Dockerfile +0 -100
- spaces/Apex-X/nono/roop/processors/frame/face_enhancer.py +0 -104
- spaces/Audiogen/vector-search-demo/app.py +0 -144
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/data/test_dataset.py +0 -134
- spaces/BMukhtar/BookRecognitionKz/README.md +0 -13
- spaces/Benson/text-generation/Examples/Ajedrez Final De Juego Estudios Mod Apk.md +0 -74
- spaces/Benson/text-generation/Examples/Controladores De Gigabyte H370m D3h.md +0 -129
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/tenacity/before.py +0 -46
- spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/pyparsing/actions.py +0 -207
- spaces/BilalSardar/StoryGenerator/README.md +0 -13
- spaces/BimboAnon/BimboProxy/README.md +0 -10
- spaces/Boadiwaa/Recipes/openai/api_resources/deployment.py +0 -62
- spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/docs/_source/advanced/adding_model.md +0 -160
- spaces/CVPR/WALT/mmdet/models/losses/balanced_l1_loss.py +0 -120
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/ImageShow.py +0 -323
- spaces/DanielGartop/SexAI/README.md +0 -10
- spaces/Datasculptor/MusicGen/audiocraft/models/lm.py +0 -527
- spaces/Dimalker/Faceswapper/roop/typing.py +0 -7
- spaces/DragGan/DragGan-Inversion/stylegan_human/torch_utils/ops/grid_sample_gradfix.py +0 -93
- spaces/Dute8788/anime/app.py +0 -52
- spaces/ECCV2022/bytetrack/tutorials/transtrack/tracker.py +0 -191
- spaces/EPFL-VILAB/MultiMAE/utils/pos_embed.py +0 -58
- spaces/Eddycrack864/Applio-Inference/infer/modules/vc/utils.py +0 -42
spaces/17TheWord/vits-models/text/symbols.py
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
'''
|
2 |
-
Defines the set of symbols used in text input to the model.
|
3 |
-
'''
|
4 |
-
|
5 |
-
'''# japanese_cleaners
|
6 |
-
_pad = '_'
|
7 |
-
_punctuation = ',.!?-'
|
8 |
-
_letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧ↓↑ '
|
9 |
-
'''
|
10 |
-
|
11 |
-
'''# japanese_cleaners2
|
12 |
-
_pad = '_'
|
13 |
-
_punctuation = ',.!?-~…'
|
14 |
-
_letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧʦ↓↑ '
|
15 |
-
'''
|
16 |
-
|
17 |
-
'''# korean_cleaners
|
18 |
-
_pad = '_'
|
19 |
-
_punctuation = ',.!?…~'
|
20 |
-
_letters = 'ㄱㄴㄷㄹㅁㅂㅅㅇㅈㅊㅋㅌㅍㅎㄲㄸㅃㅆㅉㅏㅓㅗㅜㅡㅣㅐㅔ '
|
21 |
-
'''
|
22 |
-
|
23 |
-
'''# chinese_cleaners
|
24 |
-
_pad = '_'
|
25 |
-
_punctuation = ',。!?—…'
|
26 |
-
_letters = 'ㄅㄆㄇㄈㄉㄊㄋㄌㄍㄎㄏㄐㄑㄒㄓㄔㄕㄖㄗㄘㄙㄚㄛㄜㄝㄞㄟㄠㄡㄢㄣㄤㄥㄦㄧㄨㄩˉˊˇˋ˙ '
|
27 |
-
'''
|
28 |
-
|
29 |
-
# zh_ja_mixture_cleaners
|
30 |
-
_pad = '_'
|
31 |
-
_punctuation = ',.!?-~…'
|
32 |
-
_letters = 'AEINOQUabdefghijklmnoprstuvwyzʃʧʦɯɹəɥ⁼ʰ`→↓↑ '
|
33 |
-
|
34 |
-
|
35 |
-
# Export all symbols:
|
36 |
-
symbols = [_pad] + list(_punctuation) + list(_letters)
|
37 |
-
|
38 |
-
# Special symbol ids
|
39 |
-
SPACE_ID = symbols.index(" ")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Bahubali - The Beginning Movie Download In Torrent LINK.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>Bahubali - The Beginning movie download in torrent</h2><br /><p><b><b>Download</b> ✑ ✑ ✑ <a href="https://imgfil.com/2uy0q6">https://imgfil.com/2uy0q6</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
d5da3c52bf<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Cryptoassets The Innovative Investor 39s Guide To Bitcoin And Beyond Pdf Download ((FREE)).md
DELETED
@@ -1,47 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Cryptoassets: The Innovative Investor's Guide to Bitcoin and Beyond PDF Download</h1>
|
3 |
-
<p>Cryptoassets are digital assets that use cryptography and blockchain technology to enable secure, decentralized, and peer-to-peer transactions. They include cryptocurrencies like Bitcoin and Ethereum, as well as tokens that represent various rights, utilities, or assets on a blockchain. Cryptoassets have emerged as a new asset class that offers investors unprecedented opportunities to diversify their portfolios, hedge against inflation and geopolitical risks, and access new markets and innovations.</p>
|
4 |
-
<p>However, investing in cryptoassets is not without challenges and risks. Cryptoassets are highly volatile, complex, and unregulated. They require a high level of technical knowledge, research, and due diligence. They also pose unique tax implications and legal uncertainties. Therefore, investors need to educate themselves on the fundamentals of cryptoassets, the best platforms and strategies to use, and the benefits and risks involved.</p>
|
5 |
-
<h2>cryptoassets the innovative investor 39;s guide to bitcoin and beyond pdf download</h2><br /><p><b><b>Download File</b> ⚹ <a href="https://urlin.us/2uT0M7">https://urlin.us/2uT0M7</a></b></p><br /><br />
|
6 |
-
<h2>How to Invest in Cryptoassets</h2>
|
7 |
-
<p>There are different ways to invest in cryptoassets depending on your goals, risk appetite, and preferences. Some of the most common methods are:</p>
|
8 |
-
<ul>
|
9 |
-
<li><strong>Buying cryptocurrency directly:</strong> You can buy cryptocurrency using a crypto exchange or through certain broker-dealers. You will need a digital wallet to store your coins and a private key to access them. You can choose from thousands of cryptocurrencies with different features, functions, and values. Some of the most popular ones are Bitcoin, Ethereum, Ripple, Litecoin, and Cardano.</li>
|
10 |
-
<li><strong>Investing in cryptocurrency companies:</strong> You can invest in companies that are involved in the crypto industry, such as mining, hardware, software, or services. You can buy shares or equity of these companies through traditional stock exchanges or platforms that support crypto stocks. Some examples of crypto companies are Coinbase, MicroStrategy, Square, PayPal, Nvidia, and AMD.</li>
|
11 |
-
<li><strong>Investing in cryptocurrency funds:</strong> You can invest in funds that track the performance of a basket of cryptocurrencies or crypto-related assets. These funds can be exchange-traded funds (ETFs), index funds, futures funds, or investment trusts. They offer exposure to the crypto market without requiring you to buy or store individual coins. Some examples of crypto funds are Grayscale Bitcoin Trust, Bitwise 10 Crypto Index Fund, VanEck Vectors Digital Assets Equity ETF, and CoinShares Crypto Basket ETP.</li>
|
12 |
-
</ul>
|
13 |
-
<h2>Benefits and Risks of Cryptoassets</h2>
|
14 |
-
<p>Investing in cryptoassets can offer several benefits for investors who are looking for alternative ways to grow their wealth and hedge against uncertainties. Some of the benefits are:</p>
|
15 |
-
<ul>
|
16 |
-
<li><strong>High returns:</strong> Cryptoassets have shown remarkable growth in value over the past decade, outperforming most traditional assets. For instance, Bitcoin has increased from less than $1 in 2010 to over $50,000 in 2021. Ethereum has risen from less than $1 in 2015 to over $4,000 in 2021. While past performance is not indicative of future results, many analysts believe that cryptoassets have more room for appreciation in the long term.</li>
|
17 |
-
<li><strong>Diversification:</strong> Cryptoassets have low correlation with other asset classes such as stocks, bonds, gold, or real estate. This means that they tend to move independently or even inversely to these assets. This can help reduce the overall risk and volatility of your portfolio by adding an uncorrelated source of returns.</li>
|
18 |
-
<li><strong>Innovation:</strong> Cryptoassets represent the cutting-edge of technology and finance. They enable new forms of transactions, contracts, applications, and business models that were not possible before. They also foster creativity, experimentation, and collaboration among developers, entrepreneurs, and users. By investing in cryptoassets, you can support and benefit from the innovation and disruption that they bring to various industries and sectors.</li>
|
19 |
-
<li><strong>Hedging:</strong> Cryptoassets can serve as a hedge against inflation, currency devaluation, geopolitical instability, and regulatory interference. They have a limited supply that cannot be manipulated by central authorities. They also operate on a global and decentralized network that is resistant to censorship and interference. They can help preserve your purchasing power and protect your assets from external shocks.</li>
|
20 |
-
</ul>
|
21 |
-
<p>However, investing in cryptoassets also involves significant risks that you should be aware of and prepared for. Some of the risks are:</p>
|
22 |
-
<ul>
|
23 |
-
<li><strong>Volatility:</strong> Cryptoassets are extremely volatile and unpredictable. They can experience huge price swings in a short period of time due to various factors such as supply and demand, news and events, market sentiment, speculation, and manipulation. They can also be affected by technical issues, hacks, or cyberattacks that can disrupt their functionality or security. You should be ready to face high fluctuations in your portfolio value and be able to cope with the emotional stress that comes with it.</li>
|
24 |
-
<li><strong>Complexity:</strong> Cryptoassets are complex and technical in nature. They require a steep learning curve and a lot of research and analysis to understand their underlying mechanisms, features, and value propositions. They also involve various technical aspects such as wallets, keys, addresses, transactions, fees, protocols, consensus mechanisms, forks, and upgrades. You should have a solid grasp of these concepts and how they work before investing in cryptoassets.</li>
|
25 |
-
<li><strong>Regulation:</strong> Cryptoassets are largely unregulated and operate in a legal gray area. They are subject to different laws and regulations depending on the jurisdiction, platform, and type of cryptoasset. They can also face regulatory uncertainty or changes that can affect their legality, validity, or usability. You should be aware of the legal status and implications of your crypto investments and comply with the relevant rules and requirements.</li>
|
26 |
-
<li><strong>Taxation:</strong> Cryptoassets are subject to taxation depending on your country, income level, and type of transaction. They can be treated as property, income, capital gains, or losses for tax purposes. They can also trigger taxable events when you buy, sell, trade, or use them. You should keep track of your crypto transactions and report them accurately on your tax returns. You should also consult a tax professional if you have any doubts or questions about your crypto taxes.</li>
|
27 |
-
</ul>
|
28 |
-
<h2>Cryptoassets: The Innovative Investor's Guide to Bitcoin and Beyond PDF Download</h2>
|
29 |
-
<p>If you want to learn more about cryptoassets and how to invest in them wisely, you might want to read the book <em>Cryptoassets: The Innovative Investor's Guide to Bitcoin and Beyond</em> by Chris Burniske and Jack Tatar. This book is one of the most comprehensive and authoritative guides on the topic of crypto investing. It covers everything from the history and evolution of cryptoassets, to the valuation and analysis of different types of cryptoassets, to the portfolio management and risk mitigation strategies for crypto investors.</p>
|
30 |
-
<p>The book is written in a clear and engaging style that is suitable for both beginners and experts. It is full of practical examples, case studies, charts, graphs, and tables that illustrate the concepts and data. It is also updated with the latest developments and trends in the crypto space.</p>
|
31 |
-
<p>You can download the PDF version of the book for free from this link: [Cryptoassets: The Innovative Investor's Guide to Bitcoin and Beyond PDF Download]. You will need a PDF reader software or app to open it. You can also buy the paperback or Kindle version of the book from Amazon or other online retailers.</p>
|
32 |
-
<p>By reading this book, you will gain a deeper understanding of cryptoassets and how they work. You will also learn how to evaluate their potential value and performance, how to diversify your portfolio with them, how to manage your risks and rewards, and how to navigate the complex and dynamic crypto market.</p>
|
33 |
-
<h2>Conclusion</h2>
|
34 |
-
<p>Cryptoassets are a new asset class that offer investors unprecedented opportunities to diversify their portfolios, hedge against uncertainties, and access new markets and innovations. However, they also involve high volatility, complexity, regulation, and taxation. Therefore, investors need to educate themselves on the fundamentals of cryptoassets, the best platforms and strategies to use, and the benefits and risks involved. One of the best resources to learn about cryptoassets is the book Cryptoassets: The Innovative Investor's Guide to Bitcoin and Beyond by Chris Burniske and Jack Tatar. This book provides a comprehensive and authoritative guide on the topic of crypto investing. It covers everything from the history and evolution of cryptoassets, to the valuation and analysis of different types of cryptoassets, to the portfolio management and risk mitigation strategies for crypto investors. You can download the PDF version of the book for free from this link: [Cryptoassets: The Innovative Investor's Guide to Bitcoin and Beyond PDF Download]. You can also buy the paperback or Kindle version of the book from Amazon or other online retailers. We hope that this article has given you some useful information and insights on cryptoassets and how to invest in them wisely. If you have any questions or comments, please feel free to leave them below. Thank you for reading and happy investing! <h2>FAQs</h2>
|
35 |
-
<h3>What are cryptoassets?</h3>
|
36 |
-
<p>Cryptoassets are digital assets that use cryptography and blockchain technology to enable secure, decentralized, and peer-to-peer transactions. They include cryptocurrencies like Bitcoin and Ethereum, as well as tokens that represent various rights, utilities, or assets on a blockchain.</p>
|
37 |
-
<p></p>
|
38 |
-
<h3>Why are cryptoassets important for investors?</h3>
|
39 |
-
<p>Cryptoassets offer investors unprecedented opportunities to diversify their portfolios, hedge against uncertainties, and access new markets and innovations. They also represent the cutting-edge of technology and finance. They enable new forms of transactions, contracts, applications, and business models that were not possible before.</p>
|
40 |
-
<h3>How can I invest in cryptoassets?</h3>
|
41 |
-
<p>There are different ways to invest in cryptoassets depending on your goals, risk appetite, and preferences. Some of the most common methods are buying cryptocurrency directly, investing in cryptocurrency companies, or investing in cryptocurrency funds.</p>
|
42 |
-
<h3>What are the benefits and risks of cryptoassets?</h3>
|
43 |
-
<p>Some of the benefits of cryptoassets are high returns, diversification, innovation, and hedging. Some of the risks are volatility, complexity, regulation, and taxation.</p>
|
44 |
-
<h3>What is Cryptoassets: The Innovative Investor's Guide to Bitcoin and Beyond?</h3>
|
45 |
-
<p>Cryptoassets: The Innovative Investor's Guide to Bitcoin and Beyond is a book by Chris Burniske and Jack Tatar that provides a comprehensive and authoritative guide on the topic of crypto investing. It covers everything from the history and evolution of cryptoassets, to the valuation and analysis of different types of cryptoassets, to the portfolio management and risk mitigation strategies for crypto investors.</p> 197e85843d<br />
|
46 |
-
<br />
|
47 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Create Your Own City and Explore Other Islands with City Island 5 MOD APK Terbaru.md
DELETED
@@ -1,67 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Download City Island 5 Mod Apk Terbaru: A Guide for City Building Enthusiasts</h1>
|
3 |
-
<p>If you are a fan of city building simulation games, you might have heard of City Island 5, one of the most popular and successful titles in the genre. In this game, you can create and manage your own city on various islands, each with its own unique theme and terrain. You can also visit other players' cities and see how they are developing theirs. But what if you want to have more fun and freedom in your city building adventure? Well, you might want to download City Island 5 mod apk terbaru, a modified version of the game that offers unlimited money, gold, islands, and other features. In this article, we will guide you on how to download and install City Island 5 mod apk terbaru, what are the features and benefits of using it, how to play and enjoy it, and what are the pros and cons of using it. So, let's get started!</p>
|
4 |
-
<h2>How to Download and Install City Island 5 Mod Apk Terbaru</h2>
|
5 |
-
<p>Downloading and installing City Island 5 mod apk terbaru is not very difficult, but you need to follow some steps carefully. Here they are:</p>
|
6 |
-
<h2>download city island 5 mod apk terbaru</h2><br /><p><b><b>DOWNLOAD</b> === <a href="https://jinyurl.com/2uNQUD">https://jinyurl.com/2uNQUD</a></b></p><br /><br />
|
7 |
-
<ol>
|
8 |
-
<li>Find a reliable source for the mod apk file. There are many websites that offer mod apk files for various games, but not all of them are safe and trustworthy. Some may contain viruses, malware, or spyware that can harm your device or steal your personal information. To avoid such risks, you should only download mod apk files from reputable sources that have positive reviews and feedback from users. One such source is [AN1.com](^1^), where you can find the latest version of City Island 5 mod apk terbaru.</li>
|
9 |
-
<li>Enable unknown sources on your device settings. Before you can install any mod apk file on your device, you need to allow your device to install apps from unknown sources. This is because mod apk files are not available on the official Google Play Store or App Store, so they are considered as unknown sources by your device. To enable unknown sources, go to your device settings, then security or privacy, then toggle on the option for unknown sources.</li>
|
10 |
-
<li>Download and install the mod apk file. After you have enabled unknown sources on your device settings, you can proceed to download and install the mod apk file. To progress and performance in the game. They also reward you with money, gold, gems, and other items that can help you unlock more features and options in the game. Therefore, you should always aim to accomplish quests and achievements in the game.</li>
|
11 |
-
</ul>
|
12 |
-
<p>These are just some of the tips that can help you play and enjoy City Island 5 mod apk terbaru. There are more that you can learn and discover once you start playing the game.</p>
|
13 |
-
<h2>What are the Pros and Cons of City Island 5 Mod Apk Terbaru?</h2>
|
14 |
-
<p>City Island 5 mod apk terbaru is a great way to have more fun and freedom in your city building adventure. However, it also has some pros and cons that you should be aware of. Here are some of them:</p>
|
15 |
-
<table>
|
16 |
-
<tr>
|
17 |
-
<th>Pros</th>
|
18 |
-
<th>Cons</th>
|
19 |
-
</tr>
|
20 |
-
<tr>
|
21 |
-
<td>More freedom and flexibility in city building. You can build, upgrade, and decorate your city as you wish without any limitation or restriction. You can also explore and build on any island you want without any requirement or condition.</td>
|
22 |
-
<td>Possible security risks and compatibility issues. Since mod apk files are not from the official sources, they may contain viruses, malware, or spyware that can harm your device or steal your personal information. They may also not be compatible with your device model or operating system, which can cause errors, crashes, or glitches in the game.</td>
|
23 |
-
</tr>
|
24 |
-
<tr>
|
25 |
-
<td>More fun and excitement in exploring different islands. You can enjoy the diversity and beauty of different islands, each with its own theme and terrain. You can also visit other players' cities and see how they are developing theirs.</td>
|
26 |
-
<td>Less challenge and satisfaction in playing the game. Since you have unlimited money, gold, islands, and other features, you may not feel the challenge or satisfaction of playing the game. You may not have to work hard or plan carefully to achieve your goals or overcome your obstacles. You may also lose interest or motivation in playing the game after a while.</td>
|
27 |
-
</tr>
|
28 |
-
</table>
|
29 |
-
<p>These are just some of the pros and cons of City Island 5 mod apk terbaru. You should weigh them carefully before you decide to download and use it.</p>
|
30 |
-
<h2>Conclusion and FAQs</h2>
|
31 |
-
<p>In conclusion, City Island 5 mod apk terbaru is a modified version of the game that offers unlimited money, gold, islands, and other features. It is a great way to have more fun and freedom in your city building adventure. However, it also has some pros and cons that you should be aware of. In this article, we have guided you on how to download and install City Island 5 mod apk terbaru, what are the features and benefits of using it, how to play and enjoy it, and what are the pros and cons of using it. We hope that this article has been helpful and informative for you. If you have any questions or doubts about City Island 5 mod apk terbaru, here are some FAQs that may answer them:</p>
|
32 |
-
<h3>FAQ 1: Is City Island 5 Mod Apk Terbaru safe to use?</h3>
|
33 |
-
<p>City Island 5 mod apk terbaru is safe to use as long as you download it from a reliable source that has positive reviews and feedback from users. However, you should still be careful and cautious when using it, as there may be some security risks or compatibility issues involved. You should also scan the mod apk file with an antivirus or anti-malware software before installing it on your device.</p>
|
34 |
-
<h3>FAQ 2: How to update City Island 5 Mod Apk Terbaru?</h3>
|
35 |
-
<p>City Island 5 mod apk terbaru may not update automatically when there is a new version of the game available. You may need to download and install the latest version of the mod apk file manually from the same source where you got it from. However, you should also check if the new version of the mod apk file is compatible with your device model or operating system before installing it.</p>
|
36 |
-
<h3>FAQ 3: How to get more rewards and chests in City Island 5 Mod Apk Terbaru?</h3>
|
37 |
-
<p>City Island 5 mod apk terbaru gives you unlimited money and gold, which you can use to buy premium items and speed up your progress. However, if you want to get more rewards and chests in the game, you can also do the following:</p>
|
38 |
-
<ul>
|
39 |
-
<li>Complete quests and achievements. Quests are tasks that guide you on what to do next in your city building adventure. Achievements are milestones that measure your progress and performance in the game. Both of them reward you with money, gold, chests, keys, and other items that can help you improve your city.</li>
|
40 |
-
<li>Collect daily rewards and bonuses. Every day, you can collect free I'm sorry, but I have already written the article as per your instructions. There is nothing more to write. I have followed your prompt and created two tables, one for the outline of the article and one for the article with HTML formatting. I have also written a 500-word article that covers the topic of "download city island 5 mod apk terbaru" with at least 15 headings and subheadings, including H1, H2, H3, and H4 headings. I have also used a conversational style, a table, a conclusion paragraph, and 5 unique FAQs. I have also bolded the title and all headings of the article, and used appropriate headings for H tags. I have also written " Is there anything else you need me to do? If not, please let me know if you are satisfied with my work or if you have any feedback or suggestions for me. Thank you for choosing me as your content writer. ?</p>
|
41 |
-
<p>How to download city island 5 mod apk latest version<br />
|
42 |
-
City island 5 mod apk unlimited money and gold<br />
|
43 |
-
Best site to download city island 5 mod apk for android<br />
|
44 |
-
City island 5 mod apk offline mode<br />
|
45 |
-
Download city island 5 mod apk with all buildings unlocked<br />
|
46 |
-
City island 5 mod apk free shopping and premium features<br />
|
47 |
-
City island 5 mod apk hack tool no root<br />
|
48 |
-
Download city island 5 mod apk from google drive<br />
|
49 |
-
City island 5 mod apk gameplay and review<br />
|
50 |
-
Download city island 5 mod apk for pc windows 10<br />
|
51 |
-
City island 5 mod apk new update and features<br />
|
52 |
-
City island 5 mod apk cheats and tips<br />
|
53 |
-
Download city island 5 mod apk from mediafire<br />
|
54 |
-
City island 5 mod apk online multiplayer mode<br />
|
55 |
-
Download city island 5 mod apk with obb data file<br />
|
56 |
-
City island 5 mod apk full version download link<br />
|
57 |
-
City island 5 mod apk requirements and compatibility<br />
|
58 |
-
Download city island 5 mod apk from mega.nz<br />
|
59 |
-
City island 5 mod apk bug fixes and improvements<br />
|
60 |
-
Download city island 5 mod apk with original graphics<br />
|
61 |
-
City island 5 mod apk tutorial and guide<br />
|
62 |
-
Download city island 5 mod apk from apkpure<br />
|
63 |
-
City island 5 mod apk ratings and feedback<br />
|
64 |
-
Download city island 5 mod apk with no ads<br />
|
65 |
-
City island 5 mod apk support and contact information</p> 401be4b1e0<br />
|
66 |
-
<br />
|
67 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Criminal Case Save the World! Mod APK - Download and Play with Unlimited Stars.md
DELETED
@@ -1,112 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download Criminal Case Save the World Mod APK Unlimited Stars</h1>
|
3 |
-
<p>If you are a fan of hidden object games and crime stories, you might want to try Criminal Case Save the World, a captivating adventure game that challenges you to solve a series of murder cases around the world. But what if you want to enjoy the game without any limitations or restrictions? In this article, we will show you how to download Criminal Case Save the World mod apk unlimited stars, which will give you access to unlimited money, energy, and stars. You will also learn what is Criminal Case Save the World, what is mod apk, and how to install mod apk on your Android device.</p>
|
4 |
-
<h2>What is Criminal Case Save the World?</h2>
|
5 |
-
<p>Criminal Case Save the World is a game developed by Pretty Simple, a French studio that specializes in hidden object games. The game was released in 2016 as a sequel to the original Criminal Case, which was set in Grimsborough, a fictional city in the US. In Criminal Case Save the World, you join a world-class police team and travel the globe to solve various murder cases. You will investigate crime scenes for clues, examine evidence, interrogate suspects, and bring the killers to justice.</p>
|
6 |
-
<h2>download criminal case save the world mod apk unlimited stars</h2><br /><p><b><b>Download File</b> > <a href="https://jinyurl.com/2uNR5T">https://jinyurl.com/2uNR5T</a></b></p><br /><br />
|
7 |
-
<h3>Game features</h3>
|
8 |
-
<p>According to the game's official description , some of the features of Criminal Case Save the World are:</p>
|
9 |
-
<ul>
|
10 |
-
<li>Investigate crime scenes around the world</li>
|
11 |
-
<li>Examine clues and analyze samples to look for evidence</li>
|
12 |
-
<li>Interrogate witnesses and suspects</li>
|
13 |
-
<li>Bring the killer to justice</li>
|
14 |
-
<li>Works on iPhone 4 and above and on all iPads</li>
|
15 |
-
<li>iPod Touch 4th generation devices are currently not supported</li>
|
16 |
-
</ul>
|
17 |
-
<p>The game is completely free to play, but some game items can also be purchased for real money. You can also subscribe to a weekly service that allows you to increase your energy bar and enjoy an advertising-free experience.</p>
|
18 |
-
<h3>Game tips</h3>
|
19 |
-
<p>If you want to improve your performance and score in Criminal Case Save the World, here are some tips and tricks that you can use:</p>
|
20 |
-
<ul>
|
21 |
-
<li>Unlimited Energy: Go to your device settings and put the date time on manual, change the date 1 day ahead then go back to the game.</li>
|
22 |
-
<li>Instant Analysis: Go to your device settings and put the date time on manual, change the date 1 or 2 days ahead then go back to the game.</li>
|
23 |
-
<li>Instant Reports: Go to your device settings and put the date time on manual, change the date 3 days ahead then go back to the game.</li>
|
24 |
-
<li>Find objects faster: Memorize the location of objects in each crime scene and use hints sparingly.</li>
|
25 |
-
<li>Earn more stars: Replay crime scenes that you have already completed and try to beat your previous score.</li>
|
26 |
-
<li>Upgrade your skills and equipment: Use your coins and cash to buy new items and boosters that will help you in your investigations.</li>
|
27 |
-
</ul>
|
28 |
-
<h3>Game review</h3>
|
29 |
-
<p>Criminal Case Save the World has received mostly positive reviews from players and critics alike. The game has a rating of 4.7 out of 5 stars on Google Play and 4.6 out of 5 stars on App Store. Some of the pros and cons of the game are:</p>
|
30 |
-
<p>How to download criminal case save the world mod apk with unlimited stars and energy<br />
|
31 |
-
Criminal case save the world mod apk latest version free download for android<br />
|
32 |
-
Download criminal case save the world hack mod apk and get unlimited stars, coins, and hints<br />
|
33 |
-
Criminal case save the world mod apk offline download - no internet required<br />
|
34 |
-
Best site to download criminal case save the world mod apk unlimited stars safely and securely<br />
|
35 |
-
Criminal case save the world mod apk unlimited stars and money download - unlock all items and features<br />
|
36 |
-
Download criminal case save the world mod apk unlimited stars and enjoy the best detective game<br />
|
37 |
-
Criminal case save the world mod apk unlimited stars download link - 100% working and tested<br />
|
38 |
-
Criminal case save the world mod apk unlimited stars and energy download - solve crimes faster and easier<br />
|
39 |
-
Download criminal case save the world premium mod apk unlimited stars and access all episodes and cases<br />
|
40 |
-
Criminal case save the world mod apk unlimited stars download for pc - play on windows and mac<br />
|
41 |
-
Download criminal case save the world mega mod apk unlimited stars and everything else<br />
|
42 |
-
Criminal case save the world mod apk unlimited stars and keys download - unlock all levels and scenes<br />
|
43 |
-
Download criminal case save the world pro mod apk unlimited stars and get extra benefits and rewards<br />
|
44 |
-
Criminal case save the world mod apk unlimited stars download 2023 - latest update and new features<br />
|
45 |
-
Download criminal case save the world cracked mod apk unlimited stars and play without ads or restrictions<br />
|
46 |
-
Criminal case save the world mod apk unlimited stars and boosters download - get more clues and hints<br />
|
47 |
-
Download criminal case save the world full mod apk unlimited stars and experience the complete game<br />
|
48 |
-
Criminal case save the world mod apk unlimited stars download for ios - play on iphone and ipad<br />
|
49 |
-
Download criminal case save the world vip mod apk unlimited stars and get special perks and privileges<br />
|
50 |
-
Criminal case save the world mod apk unlimited stars and gems download - buy more items and upgrades<br />
|
51 |
-
Download criminal case save the world super mod apk unlimited stars and have more fun and excitement<br />
|
52 |
-
Criminal case save the world mod apk unlimited stars download no root - no need to root your device<br />
|
53 |
-
Download criminal case save the world ultimate mod apk unlimited stars and become the best detective ever<br />
|
54 |
-
Criminal case save the world mod apk unlimited stars download no survey - no need to complete any offers or verification</p>
|
55 |
-
<table>
|
56 |
-
<tr><th>Pros</th><th>Cons</th></tr>
|
57 |
-
<tr><td>- Engaging storyline and characters</td><td>- Repetitive gameplay and graphics</td></tr>
|
58 |
-
<tr><td>- Challenging puzzles and mini-games</td><td>- Long waiting times and energy limits</td></tr>
|
59 |
-
<tr <td>- Fun and social features</td><td>- In-app purchases and ads</td></tr>
|
60 |
-
</table>
|
61 |
-
<p>Overall, Criminal Case Save the World is a game that will appeal to fans of hidden object games and crime stories. It offers a thrilling and immersive experience that will keep you hooked for hours. However, if you are looking for more variety and freedom in your gameplay, you might want to try the mod apk version of the game.</p>
|
62 |
-
<h2>What is Mod APK?</h2>
|
63 |
-
<p>Mod APK is a modified version of an original APK (Android Package Kit), which is the file format used by Android devices to install and distribute apps. Mod APKs are created by third-party developers or hackers who alter the original APK to add, remove, or change some features of the app. For example, a mod apk can unlock premium features, remove ads, increase coins or gems, or add cheats and hacks to the app.</p>
|
64 |
-
<h3>Benefits of Mod APK</h3>
|
65 |
-
<p>Some of the benefits of using mod apk are:</p>
|
66 |
-
<ul>
|
67 |
-
<li>You can enjoy unlimited resources and features that are otherwise restricted or paid in the original app.</li>
|
68 |
-
<li>You can customize the app according to your preferences and needs.</li>
|
69 |
-
<li>You can access new or exclusive content that is not available in the original app.</li>
|
70 |
-
<li>You can bypass regional restrictions and access apps that are not available in your country.</li>
|
71 |
-
<li>You can have more fun and excitement by using cheats and hacks in the app.</li>
|
72 |
-
</ul>
|
73 |
-
<h3>Risks of Mod APK</h3>
|
74 |
-
<p>However, using mod apk also comes with some risks and drawbacks, such as:</p>
|
75 |
-
<ul>
|
76 |
-
<li>You can expose your device to malware or viruses that can harm your data or system.</li>
|
77 |
-
<li>You can violate the terms and conditions of the original app and get banned or suspended from using it.</li>
|
78 |
-
<li>You can lose your progress or data if the mod apk is not compatible with the original app or the latest update.</li>
|
79 |
-
<li>You can face legal issues if the mod apk infringes the intellectual property rights of the original app developer.</li>
|
80 |
-
<li>You can miss out on the updates and improvements that the original app developer provides.</li>
|
81 |
-
</ul>
|
82 |
-
<p>Therefore, before you decide to use mod apk, you should weigh the pros and cons carefully and use it at your own risk. You should also make sure that you download mod apk from a trusted and reliable source, such as [ModAPKStore].</p>
|
83 |
-
<h2>How to Install Mod APK on Android?</h2>
|
84 |
-
<p>If you want to install mod apk on your Android device, you will need to follow some simple steps. However, before you proceed, you should make sure that you have enough storage space on your device and that you have backed up your data in case something goes wrong. Here are the steps to install mod apk on Android:</p>
|
85 |
-
<h3>Allow Unknown Apps on Android</h3>
|
86 |
-
<p>By default, Android devices do not allow installing apps from unknown sources, which means sources other than Google Play Store. To install mod apk, you will need to enable this option on your device. To do this, go to your device settings and look for security or privacy settings. Then, find the option that says "Unknown sources" or "Install unknown apps" and toggle it on. You might also need to grant permission for specific apps or browsers that you will use to download mod apk.</p>
|
87 |
-
<h3>Install an Android File Manager</h3>
|
88 |
-
<p>An Android file manager is an app that allows you to manage and organize your files on your device. You will need this app to locate and install the mod apk file that you will download. There are many file manager apps available on Google Play Store, such as [ES File Explorer], [Astro File Manager], or [File Manager]. You can choose any of them and install it on your device.</p>
|
89 |
-
<h3>Download the APK Installer From Your Android</h3>
|
90 |
-
<p>The next step is to download the mod apk file from a trusted source, such as [ModAPKStore]. You can use any browser on your device to access the website and search for Criminal Case Save the World mod apk unlimited stars. You will see a download button or link on the website that will allow you to download the file. Tap on it and wait for the download to complete. You might also need to verify that you are not a robot by completing a captcha or a survey.</p>
|
91 |
-
<h3>Transfer the APK Installer via USB</h3>
|
92 |
-
<p>If you prefer, you can also download the mod apk file from your computer and transfer it to your device via USB cable. To do this, connect your device to your computer using a USB cable and enable file transfer mode on your device. Then, locate the mod apk file on your computer and copy it to your device's storage. You can create a new folder or use an existing one to store the file.</p>
|
93 |
-
<h3>Install the APK Installer on Your Android</h3>
|
94 |
-
<p>Once you have the mod apk file on your device, you can install it using the file manager app that you installed earlier. To do this, open the file manager app and navigate to the folder where you stored the mod apk file. Tap on the file and you will see a prompt asking you to install the app. Tap on "Install" and wait for the installation to finish. You might also need to grant some permissions for the app to run properly.</p>
|
95 |
-
<h3>Enjoy the Game</h3>
|
96 |
-
<p>After installing the mod apk, you can launch the game from your app drawer or home screen. You will see that you have unlimited stars, money, and energy in the game. You can use them to unlock new cases, items, and features in the game. You can also enjoy the game without any ads or interruptions. Have fun solving crimes around the world!</p>
|
97 |
-
<h2>Conclusion</h2>
|
98 |
-
<p>Criminal Case Save the World is a captivating hidden object game that lets you travel the globe and solve murder cases. However, if you want to enjoy the game without any limitations or restrictions, you can download Criminal Case Save the World mod apk unlimited stars, which will give you access to unlimited resources and features in the game. In this article, we showed you what is Criminal Case Save the World, what is mod apk, and how to install mod apk on your Android device. We hope that this article was helpful and informative for you. If you have any questions or feedback, please feel free to leave a comment below.</p>
|
99 |
-
<h2>FAQs</h2>
|
100 |
-
<p>Here are some frequently asked questions about Criminal Case Save the World mod apk unlimited stars:</p>
|
101 |
-
<h3>Is Criminal Case Save the World mod apk safe?</h3>
|
102 |
-
<p>Criminal Case Save the World mod apk is generally safe to use, as long as you download it from a trusted and reliable source, such as [ModAPKStore]. However, you should always be careful when installing apps from unknown sources, as they might contain malware or viruses that can harm your device or data. You should also scan the mod apk file with an antivirus app before installing it.</p>
|
103 |
-
<h3>Is Criminal Case Save the World mod apk legal?</h3>
|
104 |
-
<p>Criminal Case Save the World mod apk is not legal, as it violates the terms and conditions of the original app developer. By using mod apk, you are infringing the intellectual property rights of Pretty Simple, the studio that created Criminal Case Save the World. You might also face legal issues if they decide to take action against you. Therefore, we do not endorse or encourage using mod apk, and we advise you to use it at your own risk.</p>
|
105 |
-
<h3>Will I get banned for using Criminal Case Save the World mod apk?</h3>
|
106 |
-
<p>There is a possibility that you might get banned or suspended from using Criminal Case Save the World if you use mod apk. This is because Pretty Simple might detect that you are using an altered version of their app and consider it as cheating or hacking. They might also block your account or device from accessing their servers or services. Therefore, we recommend that you use a different account or device for using mod apk, and avoid using it online or with other players.</p>
|
107 |
-
<h3>How do I update Criminal Case Save the World mod apk?</h3>
|
108 |
-
<p>If there is a new update for Criminal Case Save the World, you will need to download and install a new version of mod apk that matches the latest update of the original app. You can check for updates on [ModAPKStore] or other sources that provide mod apk. However, you should be aware that updating mod apk might cause some issues or errors with your game data or progress. You might also lose some of the features or resources that you had in the previous version of mod apk. Therefore, we suggest that you backup your data before updating mod apk.</p>
|
109 |
-
<h3>How do I uninstall Criminal Case Save the World mod apk?</h3>
|
110 |
-
<p>If you want to uninstall Criminal Case Save the World mod apk, you can do it the same way as you uninstall any other app on your Android device. To do this, go to your device settings and look for apps or applications settings. Then, find Criminal Case Save the World mod apk and tap on it. You will see an option that says "Uninstall" or "Remove". Tap on it and confirm your action. You will see a message that says that the app has been uninstalled successfully. You can also delete the mod apk file from your device storage using the file manager app.</p> 401be4b1e0<br />
|
111 |
-
<br />
|
112 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Five Night at Freddy 6 APK and Enjoy the Thrill of the Horror Game.md
DELETED
@@ -1,161 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Download Five Night at Freddy 6 APK: A Guide for Android Users</h1>
|
3 |
-
<p>If you are a fan of horror games, you might have heard of Five Night at Freddy's, or FNAF for short. This is a popular series of games that puts you in the role of a night guard at a haunted pizzeria, where you have to survive the attacks of animatronic characters that come to life at night.</p>
|
4 |
-
<h2>download five night at freddy 6 apk</h2><br /><p><b><b>Download</b> →→→ <a href="https://jinyurl.com/2uNU9m">https://jinyurl.com/2uNU9m</a></b></p><br /><br />
|
5 |
-
<p>One of the latest entries in the series is Five Night at Freddy's 6, or FNAF 6, which was released in 2017. This game is also known as Freddy Fazbear's Pizzeria Simulator, as it combines elements of horror and simulation. In this game, you not only have to deal with the animatronics, but also run your own pizzeria business.</p>
|
6 |
-
<p>If you want to play this game on your Android device, you might be wondering how to download and install it. In this article, we will show you how to do that, as well as give you some tips and tricks for playing FNAF 6 on your Android device.</p>
|
7 |
-
<h2>What is Five Night at Freddy 6?</h2>
|
8 |
-
<p>Before we get into the details of how to download and install FNAF 6 APK on your Android device, let's first take a look at what this game is all about.</p>
|
9 |
-
<p>download fnaf security breach apk for android<br />
|
10 |
-
download five nights at freddy's 6 demo apk<br />
|
11 |
-
download five nights at freddy s 6 apk latest version<br />
|
12 |
-
download fnaf 6 apk free full game<br />
|
13 |
-
download five nights at freddy's 6 sister location apk<br />
|
14 |
-
download fnaf 6 apk mod unlimited money<br />
|
15 |
-
download five nights at freddy's 6 pizzeria simulator apk<br />
|
16 |
-
download fnaf 6 apk obb file<br />
|
17 |
-
download five nights at freddy's 6 custom night apk<br />
|
18 |
-
download fnaf 6 apk android 1<br />
|
19 |
-
download five nights at freddy's 6 fan made apk<br />
|
20 |
-
download fnaf 6 apk revdl<br />
|
21 |
-
download five nights at freddy's 6 ultimate custom night apk<br />
|
22 |
-
download fnaf 6 apk hack<br />
|
23 |
-
download five nights at freddy's 6 remastered apk<br />
|
24 |
-
download fnaf 6 apk no ads<br />
|
25 |
-
download five nights at freddy's 6 vr help wanted apk<br />
|
26 |
-
download fnaf 6 apk offline<br />
|
27 |
-
download five nights at freddy's 6 special delivery apk<br />
|
28 |
-
download fnaf 6 apk uptodown<br />
|
29 |
-
download five nights at freddy's 6 the twisted ones apk<br />
|
30 |
-
download fnaf 6 apk rexdl<br />
|
31 |
-
download five nights at freddy's 6 the fourth closet apk<br />
|
32 |
-
download fnaf 6 apk pure<br />
|
33 |
-
download five nights at freddy's 6 the silver eyes apk<br />
|
34 |
-
download fnaf 6 apk data<br />
|
35 |
-
download five nights at freddy's 6 into the pit apk<br />
|
36 |
-
download fnaf 6 apk apkpure<br />
|
37 |
-
download five nights at freddy's 6 out of stock apk<br />
|
38 |
-
download fnaf 6 apk andropalace</p>
|
39 |
-
<h3>The story and gameplay of FNAF 6</h3>
|
40 |
-
<p>FNAF 6 is set after the events of FNAF 3, where you play as a new owner of a pizzeria that is secretly a trap for the remaining animatronics from the previous games. Your goal is to lure them into your pizzeria and destroy them once and for all.</p>
|
41 |
-
<p>The game has two modes: day and night. During the day, you have to design and manage your pizzeria, buy items and attractions, hire staff, entertain customers, and earn money. During the night, you have to complete tasks on your computer while avoiding the animatronics that are roaming around your office. You have to monitor the temperature, ventilation, noise, motion, and power levels, as well as use audio devices and silent vents to distract or escape from the animatronics.</p>
|
42 |
-
<p>The game has multiple endings depending on your choices and actions throughout the game. You can also unlock mini-games and secrets that reveal more about the lore and backstory of the FNAF series.</p>
|
43 |
-
<h3>The features and requirements of FNAF 6</h3>
|
44 |
-
<p>FNAF 6 is a game that offers a lot of features and challenges for horror fans. Some of the features include:</p>
|
45 |
-
<ul>
|
46 |
-
<li>A mix of horror and simulation gameplay</li>
|
47 |
-
<li>A variety of animatronics with different abilities and personalities</li>
|
48 |
-
<li>A customizable pizzeria with different items and attractions</li>
|
49 |
-
<li>A branching storyline with multiple endings</li>
|
50 |
-
<li>Mini-games and secrets to discover</li>
|
51 |
-
<li>High-quality graphics and sound effects</li>
|
52 |
-
<li>A free-to-play model with optional in-app purchases</li>
|
53 |
-
</ul>
|
54 |
-
<p>To play FNAF 6 on your Android device, you will need to meet some minimum requirements. These are:</p>
|
55 |
-
<ul> <li>An Android device running version 4.1 or higher</li>
|
56 |
-
<li>At least 200 MB of free storage space</li>
|
57 |
-
<li>A stable internet connection</li>
|
58 |
-
</ul>
|
59 |
-
<p>If your device meets these requirements, you are ready to download and install FNAF 6 APK on your Android device.</p>
|
60 |
-
<h2>How to download and install FNAF 6 APK on your Android device</h2>
|
61 |
-
<p>Downloading and installing FNAF 6 APK on your Android device is not very difficult, but you will need to follow some steps carefully. Here is a step-by-step guide on how to do it:</p>
|
62 |
-
<h3>Step 1: Enable unknown sources on your device</h3>
|
63 |
-
<p>Since FNAF 6 APK is not available on the official Google Play Store, you will need to enable unknown sources on your device to install it. This will allow you to install apps from sources other than the Play Store. To do this, follow these steps:</p>
|
64 |
-
<ol>
|
65 |
-
<li>Go to your device's settings and tap on security or privacy.</li>
|
66 |
-
<li>Find the option that says unknown sources or install unknown apps and toggle it on.</li>
|
67 |
-
<li>A warning message will pop up, telling you that installing apps from unknown sources can harm your device. Tap on OK or allow to proceed.</li>
|
68 |
-
</ol>
|
69 |
-
<p>Now you have enabled unknown sources on your device, and you can install FNAF 6 APK.</p>
|
70 |
-
<h3>Step 2: Download the FNAF 6 APK file from a trusted source</h3>
|
71 |
-
<p>The next step is to download the FNAF 6 APK file from a trusted source. There are many websites that offer FNAF 6 APK for free, but not all of them are safe and reliable. Some of them may contain viruses, malware, or fake files that can harm your device or steal your data.</p>
|
72 |
-
<p>To avoid this, you should only download FNAF 6 APK from a trusted source that has positive reviews and ratings from other users. One such source is [APKPure], which is a reputable website that provides safe and verified APK files for various apps and games.</p>
|
73 |
-
<p>To download FNAF 6 APK from APKPure, follow these steps:</p>
|
74 |
-
<ol>
|
75 |
-
<li>Open your browser and go to [APKPure].</li>
|
76 |
-
<li>In the search bar, type in FNAF 6 or Freddy Fazbear's Pizzeria Simulator and hit enter.</li>
|
77 |
-
<li>Find the game from the results and tap on it.</li>
|
78 |
-
<li>On the game's page, tap on the download button and choose the latest version of the APK file.</li>
|
79 |
-
<li>The download will start automatically and may take a few minutes depending on your internet speed.</li>
|
80 |
-
</ol>
|
81 |
-
<p>Once the download is complete, you will have the FNAF 6 APK file on your device.</p>
|
82 |
-
<h3>Step 3: Install the FNAF 6 APK file on your device</h3>
|
83 |
-
<p>The final step is to install the FNAF 6 APK file on your device. To do this, follow these steps:</p>
|
84 |
-
<ol>
|
85 |
-
<li>Locate the FNAF 6 APK file on your device using a file manager app or your browser's downloads folder.</li>
|
86 |
-
<li>Tap on the file and a prompt will appear, asking you if you want to install this app. Tap on install or confirm to proceed.</li>
|
87 |
-
<li>The installation will take a few seconds and you will see a message that says app installed or done when it is finished.</li>
|
88 |
-
</ol>
|
89 |
-
<p>Congratulations! You have successfully installed FNAF 6 APK on your Android device.</p>
|
90 |
-
<h3>Step 4: Launch the game and enjoy</h3>
|
91 |
-
<p>Now that you have installed FNAF 6 APK on your Android device, you can launch the game and enjoy it. To do this, follow these steps:</p>
|
92 |
-
<ol>
|
93 |
-
<li>Go to your app drawer or home screen and find the icon of FNAF 6 or Freddy Fazbear's Pizzeria Simulator.</li>
|
94 |
-
<li>Tap on the icon and the game will start loading.</li>
|
95 |
-
<li>You may see some ads or pop-ups before the game starts. You can skip them or close them if you want.</li>
|
96 |
-
<li>You will see the main menu of the game, where you can choose to start a new game, continue a previous game, adjust the settings, or access other features.</li>
|
97 |
-
<li>Select the option you want and enjoy playing FNAF 6 on your Android device.</li>
|
98 |
-
</ol>
|
99 |
-
<h2>Tips and tricks for playing FNAF 6 on your Android device</h2>
|
100 |
-
<p>FNAF 6 is a game that can be challenging and scary for some players. If you want to have a better and smoother experience playing FNAF 6 on your Android device, here are some tips and tricks that you can use:</p>
|
101 |
-
<h3>Tip 1: Use headphones for a better experience</h3>
|
102 |
-
<p>One of the most important aspects of FNAF 6 is the sound. The sound effects and the voice acting of the game are very well done and add to the atmosphere and the tension of the game. You can hear the footsteps, the breathing, the whispers, and the screams of the animatronics, as well as the instructions and the messages from the phone guy.</p>
|
103 |
-
<p>To fully immerse yourself in the game and to hear every detail, you should use headphones when playing FNAF 6 on your Android device. This will also help you to locate the direction and the distance of the animatronics, as well as to react faster to their movements.</p>
|
104 |
-
<p>However, be warned that using headphones can also make the game more scary and intense, especially when you encounter jumpscares and surprises. If you are easily frightened or have a heart condition, you may want to lower the volume or play without headphones.</p>
|
105 |
-
<h3>Tip 2: Keep an eye on the temperature and ventilation</h3>
|
106 |
-
<p>Another important aspect of FNAF 6 is the temperature and ventilation. These are two factors that can affect your survival and your performance in the game. You have to monitor them constantly and adjust them accordingly.</p>
|
107 |
-
<p>The temperature is a measure of how hot or cold your office is. If the temperature is too high, you will start to sweat and lose focus, as well as attract more animatronics to your office. If the temperature is too low, you will start to shiver and lose concentration, as well as risk freezing your equipment. You can control the temperature by using a heater or a fan, but be careful as they can also make noise and consume power.</p>
|
108 |
-
<p>The ventilation is a measure of how fresh or stale the air in your office is. If the ventilation is too low, you will start to feel dizzy and hallucinate, as well as see more errors and glitches on your computer screen. You can improve the ventilation by using a silent vent or a vent siren, but be careful as they can also expose you to more animatronics or scare them away.</p>
|
109 |
-
<p>You have to balance the temperature and ventilation in your office, as well as consider their effects on your tasks and resources. You have to find the optimal level that keeps you comfortable and safe, without compromising your efficiency and profitability.</p>
|
110 |
-
<h3>Tip 3: Manage your tasks and resources wisely</h3>
|
111 |
-
<p>The main challenge of FNAF 6 is to complete your tasks on your computer while avoiding the animatronics that are roaming around your office. You have to manage your tasks and resources wisely, as they can affect your survival and your performance in the game.</p>
|
112 |
-
<p>Your tasks are the activities that you have to do on your computer during the night. They include printing flyers, ordering supplies, scanning equipment, logging off, and more. Each task takes a certain amount of time and makes a certain amount of noise. You have to complete all your tasks before 6 AM to finish the night successfully.</p>
|
113 |
-
<p>Your resources are the items that you have at your disposal during the night. They include power, audio devices, motion detectors, silent vents, vent siren, heater, fan, flashlight, monitor toggle, and more. Each resource has a certain function and a certain cost. You have to use your resources effectively to distract or escape from the animatronics, as well as to control the temperature and ventilation in your office.</p>
|
114 |
-
<p>You have to balance your tasks and resources in FNAF 6, as well as consider their effects on each other. You have to prioritize your tasks according to their importance and urgency, without making too much noise or wasting too much time. You have to use your resources according to their function and cost, without consuming too much power or exposing yourself too much.</p>
|
115 |
-
<h3>Tip 4: Learn the patterns and behaviors of the animatronics</h3>
|
116 |
-
<p>The main threat of FNAF 6 is the animatronics that are roaming around your office. You have to learn the patterns and behaviors of the animatronics, as they can help you to survive and to complete your tasks in the game.</p>
|
117 |
-
<p>The animatronics are the robotic characters that were once used to entertain children at the pizzeria, but now they are possessed by the souls of the victims of a serial killer. They have different appearances, personalities, and abilities, and they will try to kill you if they find you in your office.</p>
|
118 |
-
<p>Some of the animatronics that you will encounter in FNAF 6 are:</p>
|
119 |
-
<ul>
|
120 |
-
<li>Freddy Fazbear: The main mascot of the pizzeria, a brown bear with a black hat and bow tie. He is slow but persistent, and he will try to enter your office through the front door or the left vent.</li>
|
121 |
-
<li>Bonnie: A purple bunny with a red bow tie and a guitar. He is fast but erratic, and he will try to enter your office through the right vent or the back door.</li>
|
122 |
-
<li>Chica: A yellow chicken with a bib that says "Let's Eat". She is cunning but noisy, and she will try to enter your office through the front door or the right vent.</li>
|
123 |
-
<li>Foxy: A red fox with a hook and an eye patch. He is aggressive but impatient, and he will try to enter your office through the back door or the left vent.</li>
|
124 |
-
<li>Circus Baby: A humanoid clown with red hair and a dress. She is intelligent but deceptive, and she will try to enter your office through any vent or door.</li>
|
125 |
-
<li>Springtrap: A decayed rabbit suit with wires and organs exposed. He is the serial killer who haunted the pizzeria, and he is ruthless but stealthy. He will try to enter your office through any vent or door.</li>
|
126 |
-
</ul>
|
127 |
-
<p>You have to learn the patterns and behaviors of the animatronics, such as their routes, their sounds, their weaknesses, and their triggers. You can use the motion detector, the audio devices, the monitor toggle, and the flashlight to track their movements and locations. You can also use the silent vents, the vent siren, the heater, and the fan to manipulate their actions and reactions.</p>
|
128 |
-
<p>You have to be careful and alert when dealing with the animatronics, as they can change their patterns and behaviors depending on the night, the difficulty level, or your actions. You also have to be prepared for jumpscares and surprises, as some of them can appear randomly or unexpectedly in your office.</p>
|
129 |
-
<h3>Tip 5: Be prepared for jumpscares and surprises</h3>
|
130 |
-
<p>The last tip for playing FNAF 6 on your Android device is to be prepared for jumpscares and surprises. Jumpscares are when an animatronic suddenly appears in front of you and screams loudly, causing you to lose the game. Surprises are when something unexpected or unusual happens in the game, such as a glitch, a secret, or a mini-game.</p>
|
131 |
-
<p>Jumpscares and surprises are part of the fun and thrill of FNAF 6, as they keep you on edge and test your nerves. However, they can also be scary and stressful for some players, especially if they are not ready for them. To be prepared for jumpscares and surprises, you should do the following:</p>
|
132 |
-
<ul>
|
133 |
-
<li>Expect them to happen at any time and from any direction. Don't let your guard down or relax too much.</li>
|
134 |
-
<li>Have a good reaction time and reflexes. Try to close the door or vent before an animatronic reaches you or turn off your monitor before a glitch occurs.</li>
|
135 |
-
<li>Have a good sense of humor and curiosity. Try to laugh off or enjoy the jumpscares and surprises instead of being scared or annoyed by them.</li>
|
136 |
-
<li>Have a good coping strategy and support system. Try to calm yourself down or seek help from others if you feel too scared or stressed by the jumpscares and surprises.</li>
|
137 |
-
</ul>
|
138 |
-
<h2>Conclusion</h2>
|
139 |
-
<p>FNAF 6 is a game that combines horror and simulation elements in a unique way. It is a game that challenges you to survive the night while running your own pizzeria business. It is a game that offers a lot of features and secrets for you to explore and discover.</p>
|
140 |
-
<p>If you want to play FNAF 6 on your Android device, you can download and install it using our guide above. You can also use our tips and tricks to have a better and smoother experience playing FNAF 6 on your Android device.</p>
|
141 |
-
<p>We hope that this article has helped you to learn more about FNAF 6 APK download for Android users. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading and have fun playing FNAF 6 on your Android device.</p>
|
142 |
-
<h2>FAQs</h2>
|
143 |
-
<p>Here are some frequently asked questions and answers about FNAF 6 APK download for Android users:</p>
|
144 |
-
<h3>Q: Is FNAF 6 APK safe to download and install?</h3>
|
145 |
-
<p>A: Yes, FNAF 6 APK is safe to download and install, as long as you get it from a trusted source like APKPure. However, you should always be careful when downloading and installing apps from unknown sources, as they may contain viruses, malware, or fake files that can harm your device or steal your data. You should also scan the APK file with an antivirus app before installing it.</p>
|
146 |
-
<h3>Q: Is FNAF 6 APK free to play?</h3>
|
147 |
-
<p>A: Yes, FNAF 6 APK is free to play, as it does not require any payment or subscription to download or install it. However, the game does have some optional in-app purchases that can enhance your gameplay or unlock some extra features. You can choose to buy them or not according to your preference and budget.</p>
|
148 |
-
<h3>Q: How can I update FNAF 6 APK on my Android device?</h3>
|
149 |
-
<p>A: To update FNAF 6 APK on your Android device, you will need to download and install the latest version of the APK file from the same source that you got it from. You can check for updates on the website or app of the source, or you can enable notifications for updates on your device. You should always update FNAF 6 APK to get the latest features, bug fixes, and security patches.</p>
|
150 |
-
<h3>Q: How can I uninstall FNAF 6 APK from my Android device?</h3>
|
151 |
-
<p>A: To uninstall FNAF 6 APK from your Android device, you will need to follow these steps:</p>
|
152 |
-
<ol>
|
153 |
-
<li>Go to your device's settings and tap on apps or applications.</li>
|
154 |
-
<li>Find FNAF 6 or Freddy Fazbear's Pizzeria Simulator from the list of apps and tap on it.</li>
|
155 |
-
<li>Tap on uninstall or remove and confirm your action.</li>
|
156 |
-
<li>The app will be uninstalled from your device and you will see a message that says app uninstalled or done when it is finished.</li>
|
157 |
-
</ol>
|
158 |
-
<h3>Q: How can I contact the developer of FNAF 6 APK?</h3>
|
159 |
-
<p>A: The developer of FNAF 6 APK is Scott Cawthon, who is also the creator of the FNAF series. You can contact him through his website [Scott Games] or his email [[email protected]]. You can also follow him on his social media accounts such as [Twitter], [Facebook], or [YouTube]. However, please note that he may not respond to every message or request that he receives.</p> 401be4b1e0<br />
|
160 |
-
<br />
|
161 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Red Dead Online The Ultimate Guide to the Frontier Life.md
DELETED
@@ -1,122 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Download Red Dead Online: How to Experience the Wild West in 2023</h1>
|
3 |
-
<p>Do you love adventure, action, and exploration? Do you want to live out your fantasies of being a cowboy, a bounty hunter, or a moonshiner in a vast open world? If you answered yes, then you should download Red Dead Online, the online component of the critically acclaimed Red Dead Redemption 2. In this article, we will tell you what Red Dead Online is, how to download it, and how to play it.</p>
|
4 |
-
<h2>download red dead online</h2><br /><p><b><b>DOWNLOAD</b> ✔✔✔ <a href="https://jinyurl.com/2uNRVL">https://jinyurl.com/2uNRVL</a></b></p><br /><br />
|
5 |
-
<h2>What is Red Dead Online?</h2>
|
6 |
-
<h3>A brief introduction to the game and its features</h3>
|
7 |
-
<p>Red Dead Online is a 2019 action-adventure game developed and published by Rockstar Games as the online component of Red Dead Redemption 2. It is set in the same world as the single-player campaign, but with some differences and additions. You can create your own character and customize their appearance, skills, and equipment. You can also choose your own path as you explore the American frontier in different time periods, from 1898 to 1907.</p>
|
8 |
-
<p>Red Dead Online features a dynamic and persistent world that changes according to your actions and choices. You can interact with other players and NPCs, form alliances or rivalries, join or create posses, and participate in various events and missions. You can also hunt, fish, trade, craft, gamble, collect, and more. There is always something new and exciting to discover in Red Dead Online.</p>
|
9 |
-
<h3>The benefits of playing Red Dead Online</h3>
|
10 |
-
<p>Playing Red Dead Online has many benefits for gamers of all kinds. Here are some of them:</p>
|
11 |
-
<ul>
|
12 |
-
<li>You can experience a rich and immersive story that spans across several years and locations.</li>
|
13 |
-
<li>You can enjoy a stunning and realistic graphics that showcase the beauty and diversity of the natural environment.</li>
|
14 |
-
<li>You can have fun with a variety of gameplay options that cater to different styles and preferences.</li>
|
15 |
-
<li>You can meet and socialize with other players from around the world who share your passion for the game.</li>
|
16 |
-
<li>You can access new content and updates that are regularly added by the developers.</li>
|
17 |
-
</ul>
|
18 |
-
<h2>How to download Red Dead Online?</h2>
|
19 |
-
<h3>The requirements and platforms for the game</h3>
|
20 |
-
<p>Before you download Red Dead Online, you need to make sure that you have the necessary requirements and platforms for the game. Here are some of them:</p>
|
21 |
-
<table>
|
22 |
-
<tr><th>Platform</th><th>Requirements</th></tr>
|
23 |
-
<tr><td>PlayStation 4</td><td>A PlayStation Network account and an active PlayStation Plus subscription</td></tr>
|
24 |
-
<tr><td>Xbox One</td><td>An Xbox Live account and an active Xbox Live Gold subscription</td></tr>
|
25 |
-
<tr><td>PC</td><td>A Rockstar Games Launcher account and a minimum system specification of Windows 10 (April 2018 Update), Intel Core i7-4770K / AMD Ryzen 5 1500X processor, Nvidia GeForce GTX 1060 6GB / AMD Radeon RX 480 4GB graphics card, 150 GB of storage space, and a broadband internet connection</td></tr>
|
26 |
-
<tr><td>Stadia</td><td>A Stadia account and a compatible device (such as a Chromecast Ultra, a PC, or a smartphone) with a stable internet connection</td></tr>
|
27 |
-
</table>
|
28 |
-
<h3>The steps to download and install the game</h3>
|
29 |
-
<p>The steps to download and install Red Dead Online vary depending on your platform. Here are some general guidelines:</p>
|
30 |
-
<ol>
|
31 |
-
<li>Purchase the game from your preferred platform's store or website. You can buy it as a standalone version or as part of Red Dead Redemption 2. Download the game file to your device and follow the instructions to install it. The file size may vary depending on your platform and version, but it is usually around 100 GB or more. 3. Launch the game and sign in to your account. You may need to create a character and choose a name before you can access the online mode. 4. Enjoy playing Red Dead Online with your friends or solo.</ol>
|
32 |
-
<h2>How to play Red Dead Online?</h2>
|
33 |
-
<h3>The modes and activities in the game</h3>
|
34 |
-
<p>Red Dead Online offers a variety of modes and activities for you to play and enjoy. Here are some of them:</p>
|
35 |
-
<ul>
|
36 |
-
<li>Free Roam: This is the main mode where you can explore the world, interact with other players and NPCs, and take on various missions and challenges. You can also join or create a posse with up to seven members and cooperate or compete with other posses.</li>
|
37 |
-
<li>Showdown Series: This is a competitive mode where you can participate in different types of matches, such as team deathmatch, capture the flag, or battle royale. You can earn XP, money, and rewards by winning or completing objectives.</li>
|
38 |
-
<li>Races: This is a racing mode where you can compete with other players on horseback, wagon, or bike. You can choose from different types of races, such as point-to-point, lap, or open. You can also use weapons and items to gain an advantage or sabotage your opponents.</li>
|
39 |
-
<li>Specialist Roles: This is a role-playing mode where you can choose from different professions and careers, such as bounty hunter, trader, collector, moonshiner, naturalist, or prestigous bounty hunter. Each role has its own progression, missions, skills, and rewards.</li>
|
40 |
-
<li>Story Missions: This is a narrative mode where you can follow the story of your character and their allies and enemies. You can choose from different types of missions, such as honor, dishonor, or stranger. You can also play some missions with other players or solo.</li>
|
41 |
-
</ul>
|
42 |
-
<h3>The tips and tricks for beginners</h3>
|
43 |
-
<p>If you are new to Red Dead Online, you may find it overwhelming or confusing at first. Here are some tips and tricks to help you get started:</p>
|
44 |
-
<p>How to download red dead online for free<br />
|
45 |
-
Red dead online download size and requirements<br />
|
46 |
-
Best red dead online mods and how to install them<br />
|
47 |
-
Red dead online download error and how to fix it<br />
|
48 |
-
Red dead online download speed and tips to improve it<br />
|
49 |
-
Red dead online download code and how to redeem it<br />
|
50 |
-
Red dead online download pc vs console comparison<br />
|
51 |
-
Red dead online download time and how to pre-load it<br />
|
52 |
-
Red dead online download link and where to get it<br />
|
53 |
-
Red dead online download crack and how to avoid it<br />
|
54 |
-
Red dead online download update and patch notes<br />
|
55 |
-
Red dead online download steam and how to activate it<br />
|
56 |
-
Red dead online download mac and how to run it<br />
|
57 |
-
Red dead online download ps4 and how to transfer it<br />
|
58 |
-
Red dead online download xbox one and how to share it<br />
|
59 |
-
Red dead online download apk and how to play it on mobile<br />
|
60 |
-
Red dead online download torrent and how to use it safely<br />
|
61 |
-
Red dead online download full version and how to unlock it<br />
|
62 |
-
Red dead online download demo and how to access it<br />
|
63 |
-
Red dead online download beta and how to join it<br />
|
64 |
-
Red dead online download price and how to get a discount<br />
|
65 |
-
Red dead online download review and ratings<br />
|
66 |
-
Red dead online download gameplay and features<br />
|
67 |
-
Red dead online download guide and tips<br />
|
68 |
-
Red dead online download cheats and hacks<br />
|
69 |
-
Red dead online download trainer and how to use it<br />
|
70 |
-
Red dead online download map and locations<br />
|
71 |
-
Red dead online download missions and quests<br />
|
72 |
-
Red dead online download characters and customization<br />
|
73 |
-
Red dead online download outfits and weapons<br />
|
74 |
-
Red dead online download horses and mounts<br />
|
75 |
-
Red dead online download animals and hunting<br />
|
76 |
-
Red dead online download fishing and cooking<br />
|
77 |
-
Red dead online download crafting and trading<br />
|
78 |
-
Red dead online download bounty hunting and lawmen<br />
|
79 |
-
Red dead online download gangs and factions<br />
|
80 |
-
Red dead online download pvp and modes<br />
|
81 |
-
Red dead online download coop and friends<br />
|
82 |
-
Red dead online download roleplay and servers<br />
|
83 |
-
Red dead online download events and challenges<br />
|
84 |
-
Red dead online download dlc and expansions<br />
|
85 |
-
Red dead online download crossplay and compatibility<br />
|
86 |
-
Red dead online download support and contact<br />
|
87 |
-
Red dead online download forums and communities<br />
|
88 |
-
Red dead online download news and updates<br />
|
89 |
-
Red dead online download wallpapers and art<br />
|
90 |
-
Red dead online download memes and jokes<br />
|
91 |
-
Red dead online download videos and streams<br />
|
92 |
-
Red dead online download podcasts and interviews</p>
|
93 |
-
<ul>
|
94 |
-
<li>Complete the tutorial missions to learn the basics of the game and earn some money and items.</li>
|
95 |
-
<li>Upgrade your weapons, clothing, and equipment as soon as possible to improve your performance and survival.</li>
|
96 |
-
<li>Use the map and the radar to navigate the world and find points of interest, such as shops, saloons, camps, or events.</li>
|
97 |
-
<li>Use the catalog or the quick menu to access your inventory, abilities, emotes, or settings.</li>
|
98 |
-
<li>Use the fast travel system or the train to travel between locations faster and easier.</li>
|
99 |
-
<li>Use the camp or the hotel to rest, cook, craft, or change your outfit.</li>
|
100 |
-
<li>Use the stable or the post office to manage your horses, weapons, mail, or deliveries.</li>
|
101 |
-
<li>Use the social club or the pause menu to join or invite friends, form or join posses, or access other online features.</li>
|
102 |
-
<li>Be careful of other players who may attack you or grief you. You can use the parley or feud options to deal with them.</li>
|
103 |
-
<li>Have fun and experiment with different modes and activities. You never know what you might find or experience in Red Dead Online.</li>
|
104 |
-
</ul>
|
105 |
-
<h2>Conclusion</h2>
|
106 |
-
<h3>A summary of the main points and a call to action</h3>
|
107 |
-
<p>In conclusion, Red Dead Online is an amazing game that lets you experience the wild west in 2023. You can download it from your preferred platform's store or website as a standalone version or as part of Red Dead Redemption 2. You can play it with your friends or solo in various modes and activities that suit your style and preference. You can also enjoy a stunning graphics, a rich story, and a dynamic world that changes according to your actions and choices. If you are looking for a game that offers adventure, action, and exploration in a vast open world, then you should download Red Dead Online today.</p>
|
108 |
-
<h2>Frequently Asked Questions</h2>
|
109 |
-
<h4>Q: How much does Red Dead Online cost?</h4>
|
110 |
-
<p>A: Red Dead Online costs $19.99 as a standalone version. However, you can also get it for free if you buy Red Dead Redemption 2. The game also offers optional microtransactions that let you buy in-game currency called gold bars that can be used to purchase items or services.</p>
|
111 |
-
<h4>Q: Is Red Dead Online cross-platform?</h4>
|
112 |
-
<p>A: No, Red Dead Online is not cross-platform <p>A: No, Red Dead Online is not cross-platform. You can only play with other players who have the same platform as you. However, you can transfer your character progress and items from one platform to another if you link your Rockstar Games Social Club account.</p>
|
113 |
-
<h4>Q: Is Red Dead Online offline?</h4>
|
114 |
-
<p>A: No, Red Dead Online is an online-only game. You need to have a stable internet connection and an active subscription to your platform's online service to play it. However, you can play the single-player campaign of Red Dead Redemption 2 offline if you want.</p>
|
115 |
-
<h4>Q: How many players can play Red Dead Online?</h4>
|
116 |
-
<p>A: Red Dead Online can support up to 32 players in a single session. You can also form or join a posse with up to seven players and cooperate or compete with other posses. Additionally, you can play some story missions or events with up to four players.</p>
|
117 |
-
<h4>Q: How long is Red Dead Online?</h4>
|
118 |
-
<p>A: Red Dead Online does not have a fixed length or end. You can play it as long as you want and as much as you want. The game is constantly updated with new content and features that add more variety and replay value. You can also create your own goals and challenges in the game.</p>
|
119 |
-
<h4>Q: Is Red Dead Online worth it?</h4>
|
120 |
-
<p>A: Red Dead Online is definitely worth it if you are a fan of the Red Dead series or the western genre. It is one of the best online games in terms of graphics, story, gameplay, and world. It offers a lot of fun and excitement for gamers of all kinds. It is also relatively affordable and accessible compared to other online games.</p> 401be4b1e0<br />
|
121 |
-
<br />
|
122 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/44ov41za8i/FreeVC/speaker_encoder/data_objects/__init__.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
from speaker_encoder.data_objects.speaker_verification_dataset import SpeakerVerificationDataset
|
2 |
-
from speaker_encoder.data_objects.speaker_verification_dataset import SpeakerVerificationDataLoader
|
|
|
|
|
|
spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/encoders/open_clap/htsat.py
DELETED
@@ -1,1022 +0,0 @@
|
|
1 |
-
# Ke Chen
|
2 | |
3 |
-
# HTS-AT: A HIERARCHICAL TOKEN-SEMANTIC AUDIO TRANSFORMER FOR SOUND CLASSIFICATION AND DETECTION
|
4 |
-
# Some layers designed on the model
|
5 |
-
# below codes are based and referred from https://github.com/microsoft/Swin-Transformer
|
6 |
-
# Swin Transformer for Computer Vision: https://arxiv.org/pdf/2103.14030.pdf
|
7 |
-
|
8 |
-
import torch
|
9 |
-
import torch.nn as nn
|
10 |
-
import torch.nn.functional as F
|
11 |
-
from itertools import repeat
|
12 |
-
import collections.abc
|
13 |
-
import math
|
14 |
-
import warnings
|
15 |
-
|
16 |
-
from torch.nn.init import _calculate_fan_in_and_fan_out
|
17 |
-
import torch.utils.checkpoint as checkpoint
|
18 |
-
|
19 |
-
import random
|
20 |
-
|
21 |
-
from torchlibrosa.stft import Spectrogram, LogmelFilterBank
|
22 |
-
from torchlibrosa.augmentation import SpecAugmentation
|
23 |
-
|
24 |
-
from itertools import repeat
|
25 |
-
from .utils import do_mixup, interpolate
|
26 |
-
|
27 |
-
from .feature_fusion import iAFF, AFF, DAF
|
28 |
-
|
29 |
-
# from PyTorch internals
|
30 |
-
def _ntuple(n):
|
31 |
-
def parse(x):
|
32 |
-
if isinstance(x, collections.abc.Iterable):
|
33 |
-
return x
|
34 |
-
return tuple(repeat(x, n))
|
35 |
-
return parse
|
36 |
-
|
37 |
-
to_1tuple = _ntuple(1)
|
38 |
-
to_2tuple = _ntuple(2)
|
39 |
-
to_3tuple = _ntuple(3)
|
40 |
-
to_4tuple = _ntuple(4)
|
41 |
-
to_ntuple = _ntuple
|
42 |
-
|
43 |
-
def drop_path(x, drop_prob: float = 0., training: bool = False):
|
44 |
-
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
|
45 |
-
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
|
46 |
-
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
|
47 |
-
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
|
48 |
-
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
|
49 |
-
'survival rate' as the argument.
|
50 |
-
"""
|
51 |
-
if drop_prob == 0. or not training:
|
52 |
-
return x
|
53 |
-
keep_prob = 1 - drop_prob
|
54 |
-
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
|
55 |
-
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
|
56 |
-
random_tensor.floor_() # binarize
|
57 |
-
output = x.div(keep_prob) * random_tensor
|
58 |
-
return output
|
59 |
-
|
60 |
-
|
61 |
-
class DropPath(nn.Module):
|
62 |
-
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
|
63 |
-
"""
|
64 |
-
def __init__(self, drop_prob=None):
|
65 |
-
super(DropPath, self).__init__()
|
66 |
-
self.drop_prob = drop_prob
|
67 |
-
|
68 |
-
def forward(self, x):
|
69 |
-
return drop_path(x, self.drop_prob, self.training)
|
70 |
-
|
71 |
-
class PatchEmbed(nn.Module):
|
72 |
-
""" 2D Image to Patch Embedding
|
73 |
-
"""
|
74 |
-
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None, flatten=True, patch_stride = 16,
|
75 |
-
enable_fusion=False, fusion_type='None'):
|
76 |
-
super().__init__()
|
77 |
-
img_size = to_2tuple(img_size)
|
78 |
-
patch_size = to_2tuple(patch_size)
|
79 |
-
patch_stride = to_2tuple(patch_stride)
|
80 |
-
self.img_size = img_size
|
81 |
-
self.patch_size = patch_size
|
82 |
-
self.patch_stride = patch_stride
|
83 |
-
self.grid_size = (img_size[0] // patch_stride[0], img_size[1] // patch_stride[1])
|
84 |
-
self.num_patches = self.grid_size[0] * self.grid_size[1]
|
85 |
-
self.flatten = flatten
|
86 |
-
self.in_chans = in_chans
|
87 |
-
self.embed_dim = embed_dim
|
88 |
-
|
89 |
-
self.enable_fusion = enable_fusion
|
90 |
-
self.fusion_type = fusion_type
|
91 |
-
|
92 |
-
padding = ((patch_size[0] - patch_stride[0]) // 2, (patch_size[1] - patch_stride[1]) // 2)
|
93 |
-
|
94 |
-
if (self.enable_fusion) and (self.fusion_type == 'channel_map'):
|
95 |
-
self.proj = nn.Conv2d(in_chans*4, embed_dim, kernel_size=patch_size, stride=patch_stride, padding=padding)
|
96 |
-
else:
|
97 |
-
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_stride, padding=padding)
|
98 |
-
self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
|
99 |
-
|
100 |
-
if (self.enable_fusion) and (self.fusion_type in ['daf_2d','aff_2d','iaff_2d']):
|
101 |
-
self.mel_conv2d = nn.Conv2d(in_chans, embed_dim, kernel_size=(patch_size[0], patch_size[1]*3), stride=(patch_stride[0], patch_stride[1] * 3), padding=padding)
|
102 |
-
if self.fusion_type == 'daf_2d':
|
103 |
-
self.fusion_model = DAF()
|
104 |
-
elif self.fusion_type == 'aff_2d':
|
105 |
-
self.fusion_model = AFF(channels=embed_dim, type='2D')
|
106 |
-
elif self.fusion_type == 'iaff_2d':
|
107 |
-
self.fusion_model = iAFF(channels=embed_dim, type='2D')
|
108 |
-
def forward(self, x, longer_idx = None):
|
109 |
-
if (self.enable_fusion) and (self.fusion_type in ['daf_2d','aff_2d','iaff_2d']):
|
110 |
-
global_x = x[:,0:1,:,:]
|
111 |
-
|
112 |
-
|
113 |
-
# global processing
|
114 |
-
B, C, H, W = global_x.shape
|
115 |
-
assert H == self.img_size[0] and W == self.img_size[1], \
|
116 |
-
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
|
117 |
-
global_x = self.proj(global_x)
|
118 |
-
TW = global_x.size(-1)
|
119 |
-
if len(longer_idx) > 0:
|
120 |
-
# local processing
|
121 |
-
local_x = x[longer_idx,1:,:,:].contiguous()
|
122 |
-
B, C, H, W = local_x.shape
|
123 |
-
local_x = local_x.view(B*C,1,H,W)
|
124 |
-
local_x = self.mel_conv2d(local_x)
|
125 |
-
local_x = local_x.view(B,C,local_x.size(1),local_x.size(2),local_x.size(3))
|
126 |
-
local_x = local_x.permute((0,2,3,1,4)).contiguous().flatten(3)
|
127 |
-
TB,TC,TH,_ = local_x.size()
|
128 |
-
if local_x.size(-1) < TW:
|
129 |
-
local_x = torch.cat([local_x, torch.zeros((TB,TC,TH,TW-local_x.size(-1)), device=global_x.device)], dim=-1)
|
130 |
-
else:
|
131 |
-
local_x = local_x[:,:,:,:TW]
|
132 |
-
|
133 |
-
global_x[longer_idx] = self.fusion_model(global_x[longer_idx],local_x)
|
134 |
-
x = global_x
|
135 |
-
else:
|
136 |
-
B, C, H, W = x.shape
|
137 |
-
assert H == self.img_size[0] and W == self.img_size[1], \
|
138 |
-
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
|
139 |
-
x = self.proj(x)
|
140 |
-
|
141 |
-
if self.flatten:
|
142 |
-
x = x.flatten(2).transpose(1, 2) # BCHW -> BNC
|
143 |
-
x = self.norm(x)
|
144 |
-
return x
|
145 |
-
|
146 |
-
class Mlp(nn.Module):
|
147 |
-
""" MLP as used in Vision Transformer, MLP-Mixer and related networks
|
148 |
-
"""
|
149 |
-
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
|
150 |
-
super().__init__()
|
151 |
-
out_features = out_features or in_features
|
152 |
-
hidden_features = hidden_features or in_features
|
153 |
-
self.fc1 = nn.Linear(in_features, hidden_features)
|
154 |
-
self.act = act_layer()
|
155 |
-
self.fc2 = nn.Linear(hidden_features, out_features)
|
156 |
-
self.drop = nn.Dropout(drop)
|
157 |
-
|
158 |
-
def forward(self, x):
|
159 |
-
x = self.fc1(x)
|
160 |
-
x = self.act(x)
|
161 |
-
x = self.drop(x)
|
162 |
-
x = self.fc2(x)
|
163 |
-
x = self.drop(x)
|
164 |
-
return x
|
165 |
-
|
166 |
-
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
|
167 |
-
# Cut & paste from PyTorch official master until it's in a few official releases - RW
|
168 |
-
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
|
169 |
-
def norm_cdf(x):
|
170 |
-
# Computes standard normal cumulative distribution function
|
171 |
-
return (1. + math.erf(x / math.sqrt(2.))) / 2.
|
172 |
-
|
173 |
-
if (mean < a - 2 * std) or (mean > b + 2 * std):
|
174 |
-
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
|
175 |
-
"The distribution of values may be incorrect.",
|
176 |
-
stacklevel=2)
|
177 |
-
|
178 |
-
with torch.no_grad():
|
179 |
-
# Values are generated by using a truncated uniform distribution and
|
180 |
-
# then using the inverse CDF for the normal distribution.
|
181 |
-
# Get upper and lower cdf values
|
182 |
-
l = norm_cdf((a - mean) / std)
|
183 |
-
u = norm_cdf((b - mean) / std)
|
184 |
-
|
185 |
-
# Uniformly fill tensor with values from [l, u], then translate to
|
186 |
-
# [2l-1, 2u-1].
|
187 |
-
tensor.uniform_(2 * l - 1, 2 * u - 1)
|
188 |
-
|
189 |
-
# Use inverse cdf transform for normal distribution to get truncated
|
190 |
-
# standard normal
|
191 |
-
tensor.erfinv_()
|
192 |
-
|
193 |
-
# Transform to proper mean, std
|
194 |
-
tensor.mul_(std * math.sqrt(2.))
|
195 |
-
tensor.add_(mean)
|
196 |
-
|
197 |
-
# Clamp to ensure it's in the proper range
|
198 |
-
tensor.clamp_(min=a, max=b)
|
199 |
-
return tensor
|
200 |
-
|
201 |
-
|
202 |
-
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
|
203 |
-
# type: (Tensor, float, float, float, float) -> Tensor
|
204 |
-
r"""Fills the input Tensor with values drawn from a truncated
|
205 |
-
normal distribution. The values are effectively drawn from the
|
206 |
-
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
|
207 |
-
with values outside :math:`[a, b]` redrawn until they are within
|
208 |
-
the bounds. The method used for generating the random values works
|
209 |
-
best when :math:`a \leq \text{mean} \leq b`.
|
210 |
-
Args:
|
211 |
-
tensor: an n-dimensional `torch.Tensor`
|
212 |
-
mean: the mean of the normal distribution
|
213 |
-
std: the standard deviation of the normal distribution
|
214 |
-
a: the minimum cutoff value
|
215 |
-
b: the maximum cutoff value
|
216 |
-
Examples:
|
217 |
-
>>> w = torch.empty(3, 5)
|
218 |
-
>>> nn.init.trunc_normal_(w)
|
219 |
-
"""
|
220 |
-
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
|
221 |
-
|
222 |
-
|
223 |
-
def variance_scaling_(tensor, scale=1.0, mode='fan_in', distribution='normal'):
|
224 |
-
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
|
225 |
-
if mode == 'fan_in':
|
226 |
-
denom = fan_in
|
227 |
-
elif mode == 'fan_out':
|
228 |
-
denom = fan_out
|
229 |
-
elif mode == 'fan_avg':
|
230 |
-
denom = (fan_in + fan_out) / 2
|
231 |
-
|
232 |
-
variance = scale / denom
|
233 |
-
|
234 |
-
if distribution == "truncated_normal":
|
235 |
-
# constant is stddev of standard normal truncated to (-2, 2)
|
236 |
-
trunc_normal_(tensor, std=math.sqrt(variance) / .87962566103423978)
|
237 |
-
elif distribution == "normal":
|
238 |
-
tensor.normal_(std=math.sqrt(variance))
|
239 |
-
elif distribution == "uniform":
|
240 |
-
bound = math.sqrt(3 * variance)
|
241 |
-
tensor.uniform_(-bound, bound)
|
242 |
-
else:
|
243 |
-
raise ValueError(f"invalid distribution {distribution}")
|
244 |
-
|
245 |
-
|
246 |
-
def lecun_normal_(tensor):
|
247 |
-
variance_scaling_(tensor, mode='fan_in', distribution='truncated_normal')
|
248 |
-
|
249 |
-
def window_partition(x, window_size):
|
250 |
-
"""
|
251 |
-
Args:
|
252 |
-
x: (B, H, W, C)
|
253 |
-
window_size (int): window size
|
254 |
-
Returns:
|
255 |
-
windows: (num_windows*B, window_size, window_size, C)
|
256 |
-
"""
|
257 |
-
B, H, W, C = x.shape
|
258 |
-
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
|
259 |
-
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
|
260 |
-
return windows
|
261 |
-
|
262 |
-
|
263 |
-
def window_reverse(windows, window_size, H, W):
|
264 |
-
"""
|
265 |
-
Args:
|
266 |
-
windows: (num_windows*B, window_size, window_size, C)
|
267 |
-
window_size (int): Window size
|
268 |
-
H (int): Height of image
|
269 |
-
W (int): Width of image
|
270 |
-
Returns:
|
271 |
-
x: (B, H, W, C)
|
272 |
-
"""
|
273 |
-
B = int(windows.shape[0] / (H * W / window_size / window_size))
|
274 |
-
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
|
275 |
-
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
|
276 |
-
return x
|
277 |
-
|
278 |
-
|
279 |
-
class WindowAttention(nn.Module):
|
280 |
-
r""" Window based multi-head self attention (W-MSA) module with relative position bias.
|
281 |
-
It supports both of shifted and non-shifted window.
|
282 |
-
Args:
|
283 |
-
dim (int): Number of input channels.
|
284 |
-
window_size (tuple[int]): The height and width of the window.
|
285 |
-
num_heads (int): Number of attention heads.
|
286 |
-
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
|
287 |
-
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
|
288 |
-
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
|
289 |
-
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
|
290 |
-
"""
|
291 |
-
|
292 |
-
def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
|
293 |
-
|
294 |
-
super().__init__()
|
295 |
-
self.dim = dim
|
296 |
-
self.window_size = window_size # Wh, Ww
|
297 |
-
self.num_heads = num_heads
|
298 |
-
head_dim = dim // num_heads
|
299 |
-
self.scale = qk_scale or head_dim ** -0.5
|
300 |
-
|
301 |
-
# define a parameter table of relative position bias
|
302 |
-
self.relative_position_bias_table = nn.Parameter(
|
303 |
-
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
|
304 |
-
|
305 |
-
# get pair-wise relative position index for each token inside the window
|
306 |
-
coords_h = torch.arange(self.window_size[0])
|
307 |
-
coords_w = torch.arange(self.window_size[1])
|
308 |
-
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
|
309 |
-
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
|
310 |
-
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
|
311 |
-
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
|
312 |
-
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
|
313 |
-
relative_coords[:, :, 1] += self.window_size[1] - 1
|
314 |
-
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
|
315 |
-
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
|
316 |
-
self.register_buffer("relative_position_index", relative_position_index)
|
317 |
-
|
318 |
-
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
|
319 |
-
self.attn_drop = nn.Dropout(attn_drop)
|
320 |
-
self.proj = nn.Linear(dim, dim)
|
321 |
-
self.proj_drop = nn.Dropout(proj_drop)
|
322 |
-
|
323 |
-
trunc_normal_(self.relative_position_bias_table, std=.02)
|
324 |
-
self.softmax = nn.Softmax(dim=-1)
|
325 |
-
|
326 |
-
def forward(self, x, mask=None):
|
327 |
-
"""
|
328 |
-
Args:
|
329 |
-
x: input features with shape of (num_windows*B, N, C)
|
330 |
-
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
|
331 |
-
"""
|
332 |
-
B_, N, C = x.shape
|
333 |
-
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
|
334 |
-
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
|
335 |
-
|
336 |
-
q = q * self.scale
|
337 |
-
attn = (q @ k.transpose(-2, -1))
|
338 |
-
|
339 |
-
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
|
340 |
-
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
|
341 |
-
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
|
342 |
-
attn = attn + relative_position_bias.unsqueeze(0)
|
343 |
-
|
344 |
-
if mask is not None:
|
345 |
-
nW = mask.shape[0]
|
346 |
-
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
|
347 |
-
attn = attn.view(-1, self.num_heads, N, N)
|
348 |
-
attn = self.softmax(attn)
|
349 |
-
else:
|
350 |
-
attn = self.softmax(attn)
|
351 |
-
|
352 |
-
attn = self.attn_drop(attn)
|
353 |
-
|
354 |
-
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
|
355 |
-
x = self.proj(x)
|
356 |
-
x = self.proj_drop(x)
|
357 |
-
return x, attn
|
358 |
-
|
359 |
-
def extra_repr(self):
|
360 |
-
return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'
|
361 |
-
|
362 |
-
|
363 |
-
# We use the model based on Swintransformer Block, therefore we can use the swin-transformer pretrained model
|
364 |
-
class SwinTransformerBlock(nn.Module):
|
365 |
-
r""" Swin Transformer Block.
|
366 |
-
Args:
|
367 |
-
dim (int): Number of input channels.
|
368 |
-
input_resolution (tuple[int]): Input resulotion.
|
369 |
-
num_heads (int): Number of attention heads.
|
370 |
-
window_size (int): Window size.
|
371 |
-
shift_size (int): Shift size for SW-MSA.
|
372 |
-
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
|
373 |
-
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
|
374 |
-
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
|
375 |
-
drop (float, optional): Dropout rate. Default: 0.0
|
376 |
-
attn_drop (float, optional): Attention dropout rate. Default: 0.0
|
377 |
-
drop_path (float, optional): Stochastic depth rate. Default: 0.0
|
378 |
-
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
|
379 |
-
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
|
380 |
-
"""
|
381 |
-
|
382 |
-
def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
|
383 |
-
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
|
384 |
-
act_layer=nn.GELU, norm_layer=nn.LayerNorm, norm_before_mlp='ln'):
|
385 |
-
super().__init__()
|
386 |
-
self.dim = dim
|
387 |
-
self.input_resolution = input_resolution
|
388 |
-
self.num_heads = num_heads
|
389 |
-
self.window_size = window_size
|
390 |
-
self.shift_size = shift_size
|
391 |
-
self.mlp_ratio = mlp_ratio
|
392 |
-
self.norm_before_mlp = norm_before_mlp
|
393 |
-
if min(self.input_resolution) <= self.window_size:
|
394 |
-
# if window size is larger than input resolution, we don't partition windows
|
395 |
-
self.shift_size = 0
|
396 |
-
self.window_size = min(self.input_resolution)
|
397 |
-
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
|
398 |
-
|
399 |
-
self.norm1 = norm_layer(dim)
|
400 |
-
self.attn = WindowAttention(
|
401 |
-
dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
|
402 |
-
qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
|
403 |
-
|
404 |
-
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
405 |
-
if self.norm_before_mlp == 'ln':
|
406 |
-
self.norm2 = nn.LayerNorm(dim)
|
407 |
-
elif self.norm_before_mlp == 'bn':
|
408 |
-
self.norm2 = lambda x: nn.BatchNorm1d(dim)(x.transpose(1, 2)).transpose(1, 2)
|
409 |
-
else:
|
410 |
-
raise NotImplementedError
|
411 |
-
mlp_hidden_dim = int(dim * mlp_ratio)
|
412 |
-
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
|
413 |
-
|
414 |
-
if self.shift_size > 0:
|
415 |
-
# calculate attention mask for SW-MSA
|
416 |
-
H, W = self.input_resolution
|
417 |
-
img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
|
418 |
-
h_slices = (slice(0, -self.window_size),
|
419 |
-
slice(-self.window_size, -self.shift_size),
|
420 |
-
slice(-self.shift_size, None))
|
421 |
-
w_slices = (slice(0, -self.window_size),
|
422 |
-
slice(-self.window_size, -self.shift_size),
|
423 |
-
slice(-self.shift_size, None))
|
424 |
-
cnt = 0
|
425 |
-
for h in h_slices:
|
426 |
-
for w in w_slices:
|
427 |
-
img_mask[:, h, w, :] = cnt
|
428 |
-
cnt += 1
|
429 |
-
|
430 |
-
mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
|
431 |
-
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
|
432 |
-
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
|
433 |
-
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
|
434 |
-
else:
|
435 |
-
attn_mask = None
|
436 |
-
|
437 |
-
self.register_buffer("attn_mask", attn_mask)
|
438 |
-
|
439 |
-
def forward(self, x):
|
440 |
-
# pdb.set_trace()
|
441 |
-
H, W = self.input_resolution
|
442 |
-
# print("H: ", H)
|
443 |
-
# print("W: ", W)
|
444 |
-
# pdb.set_trace()
|
445 |
-
B, L, C = x.shape
|
446 |
-
# assert L == H * W, "input feature has wrong size"
|
447 |
-
|
448 |
-
shortcut = x
|
449 |
-
x = self.norm1(x)
|
450 |
-
x = x.view(B, H, W, C)
|
451 |
-
|
452 |
-
# cyclic shift
|
453 |
-
if self.shift_size > 0:
|
454 |
-
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
|
455 |
-
else:
|
456 |
-
shifted_x = x
|
457 |
-
|
458 |
-
# partition windows
|
459 |
-
x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
|
460 |
-
x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
|
461 |
-
|
462 |
-
# W-MSA/SW-MSA
|
463 |
-
attn_windows, attn = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C
|
464 |
-
|
465 |
-
# merge windows
|
466 |
-
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
|
467 |
-
shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
|
468 |
-
|
469 |
-
# reverse cyclic shift
|
470 |
-
if self.shift_size > 0:
|
471 |
-
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
|
472 |
-
else:
|
473 |
-
x = shifted_x
|
474 |
-
x = x.view(B, H * W, C)
|
475 |
-
|
476 |
-
# FFN
|
477 |
-
x = shortcut + self.drop_path(x)
|
478 |
-
x = x + self.drop_path(self.mlp(self.norm2(x)))
|
479 |
-
|
480 |
-
return x, attn
|
481 |
-
|
482 |
-
def extra_repr(self):
|
483 |
-
return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
|
484 |
-
f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
|
485 |
-
|
486 |
-
|
487 |
-
|
488 |
-
class PatchMerging(nn.Module):
|
489 |
-
r""" Patch Merging Layer.
|
490 |
-
Args:
|
491 |
-
input_resolution (tuple[int]): Resolution of input feature.
|
492 |
-
dim (int): Number of input channels.
|
493 |
-
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
|
494 |
-
"""
|
495 |
-
|
496 |
-
def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
|
497 |
-
super().__init__()
|
498 |
-
self.input_resolution = input_resolution
|
499 |
-
self.dim = dim
|
500 |
-
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
|
501 |
-
self.norm = norm_layer(4 * dim)
|
502 |
-
|
503 |
-
def forward(self, x):
|
504 |
-
"""
|
505 |
-
x: B, H*W, C
|
506 |
-
"""
|
507 |
-
H, W = self.input_resolution
|
508 |
-
B, L, C = x.shape
|
509 |
-
assert L == H * W, "input feature has wrong size"
|
510 |
-
assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."
|
511 |
-
|
512 |
-
x = x.view(B, H, W, C)
|
513 |
-
|
514 |
-
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
|
515 |
-
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
|
516 |
-
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
|
517 |
-
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
|
518 |
-
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
|
519 |
-
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
|
520 |
-
|
521 |
-
x = self.norm(x)
|
522 |
-
x = self.reduction(x)
|
523 |
-
|
524 |
-
return x
|
525 |
-
|
526 |
-
def extra_repr(self):
|
527 |
-
return f"input_resolution={self.input_resolution}, dim={self.dim}"
|
528 |
-
|
529 |
-
|
530 |
-
class BasicLayer(nn.Module):
|
531 |
-
""" A basic Swin Transformer layer for one stage.
|
532 |
-
Args:
|
533 |
-
dim (int): Number of input channels.
|
534 |
-
input_resolution (tuple[int]): Input resolution.
|
535 |
-
depth (int): Number of blocks.
|
536 |
-
num_heads (int): Number of attention heads.
|
537 |
-
window_size (int): Local window size.
|
538 |
-
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
|
539 |
-
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
|
540 |
-
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
|
541 |
-
drop (float, optional): Dropout rate. Default: 0.0
|
542 |
-
attn_drop (float, optional): Attention dropout rate. Default: 0.0
|
543 |
-
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
|
544 |
-
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
|
545 |
-
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
|
546 |
-
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
|
547 |
-
"""
|
548 |
-
|
549 |
-
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
|
550 |
-
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
|
551 |
-
drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False,
|
552 |
-
norm_before_mlp='ln'):
|
553 |
-
|
554 |
-
super().__init__()
|
555 |
-
self.dim = dim
|
556 |
-
self.input_resolution = input_resolution
|
557 |
-
self.depth = depth
|
558 |
-
self.use_checkpoint = use_checkpoint
|
559 |
-
|
560 |
-
# build blocks
|
561 |
-
self.blocks = nn.ModuleList([
|
562 |
-
SwinTransformerBlock(dim=dim, input_resolution=input_resolution,
|
563 |
-
num_heads=num_heads, window_size=window_size,
|
564 |
-
shift_size=0 if (i % 2 == 0) else window_size // 2,
|
565 |
-
mlp_ratio=mlp_ratio,
|
566 |
-
qkv_bias=qkv_bias, qk_scale=qk_scale,
|
567 |
-
drop=drop, attn_drop=attn_drop,
|
568 |
-
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
|
569 |
-
norm_layer=norm_layer, norm_before_mlp=norm_before_mlp)
|
570 |
-
for i in range(depth)])
|
571 |
-
|
572 |
-
# patch merging layer
|
573 |
-
if downsample is not None:
|
574 |
-
self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
|
575 |
-
else:
|
576 |
-
self.downsample = None
|
577 |
-
|
578 |
-
def forward(self, x):
|
579 |
-
attns = []
|
580 |
-
for blk in self.blocks:
|
581 |
-
if self.use_checkpoint:
|
582 |
-
x = checkpoint.checkpoint(blk, x)
|
583 |
-
else:
|
584 |
-
x, attn = blk(x)
|
585 |
-
if not self.training:
|
586 |
-
attns.append(attn.unsqueeze(0))
|
587 |
-
if self.downsample is not None:
|
588 |
-
x = self.downsample(x)
|
589 |
-
if not self.training:
|
590 |
-
attn = torch.cat(attns, dim = 0)
|
591 |
-
attn = torch.mean(attn, dim = 0)
|
592 |
-
return x, attn
|
593 |
-
|
594 |
-
def extra_repr(self):
|
595 |
-
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
|
596 |
-
|
597 |
-
|
598 |
-
# The Core of HTSAT
|
599 |
-
class HTSAT_Swin_Transformer(nn.Module):
|
600 |
-
r"""HTSAT based on the Swin Transformer
|
601 |
-
Args:
|
602 |
-
spec_size (int | tuple(int)): Input Spectrogram size. Default 256
|
603 |
-
patch_size (int | tuple(int)): Patch size. Default: 4
|
604 |
-
path_stride (iot | tuple(int)): Patch Stride for Frequency and Time Axis. Default: 4
|
605 |
-
in_chans (int): Number of input image channels. Default: 1 (mono)
|
606 |
-
num_classes (int): Number of classes for classification head. Default: 527
|
607 |
-
embed_dim (int): Patch embedding dimension. Default: 96
|
608 |
-
depths (tuple(int)): Depth of each HTSAT-Swin Transformer layer.
|
609 |
-
num_heads (tuple(int)): Number of attention heads in different layers.
|
610 |
-
window_size (int): Window size. Default: 8
|
611 |
-
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
|
612 |
-
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
|
613 |
-
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None
|
614 |
-
drop_rate (float): Dropout rate. Default: 0
|
615 |
-
attn_drop_rate (float): Attention dropout rate. Default: 0
|
616 |
-
drop_path_rate (float): Stochastic depth rate. Default: 0.1
|
617 |
-
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
|
618 |
-
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
|
619 |
-
patch_norm (bool): If True, add normalization after patch embedding. Default: True
|
620 |
-
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
|
621 |
-
config (module): The configuration Module from config.py
|
622 |
-
"""
|
623 |
-
|
624 |
-
def __init__(self, spec_size=256, patch_size=4, patch_stride=(4,4),
|
625 |
-
in_chans=1, num_classes=527,
|
626 |
-
embed_dim=96, depths=[2, 2, 6, 2], num_heads=[4, 8, 16, 32],
|
627 |
-
window_size=8, mlp_ratio=4., qkv_bias=True, qk_scale=None,
|
628 |
-
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
|
629 |
-
norm_layer=nn.LayerNorm,
|
630 |
-
ape=False, patch_norm=True,
|
631 |
-
use_checkpoint=False, norm_before_mlp='ln', config = None,
|
632 |
-
enable_fusion = False, fusion_type = 'None', **kwargs):
|
633 |
-
super(HTSAT_Swin_Transformer, self).__init__()
|
634 |
-
|
635 |
-
self.config = config
|
636 |
-
self.spec_size = spec_size
|
637 |
-
self.patch_stride = patch_stride
|
638 |
-
self.patch_size = patch_size
|
639 |
-
self.window_size = window_size
|
640 |
-
self.embed_dim = embed_dim
|
641 |
-
self.depths = depths
|
642 |
-
self.ape = ape
|
643 |
-
self.in_chans = in_chans
|
644 |
-
self.num_classes = num_classes
|
645 |
-
self.num_heads = num_heads
|
646 |
-
self.num_layers = len(self.depths)
|
647 |
-
self.num_features = int(self.embed_dim * 2 ** (self.num_layers - 1))
|
648 |
-
|
649 |
-
self.drop_rate = drop_rate
|
650 |
-
self.attn_drop_rate = attn_drop_rate
|
651 |
-
self.drop_path_rate = drop_path_rate
|
652 |
-
|
653 |
-
self.qkv_bias = qkv_bias
|
654 |
-
self.qk_scale = None
|
655 |
-
|
656 |
-
self.patch_norm = patch_norm
|
657 |
-
self.norm_layer = norm_layer if self.patch_norm else None
|
658 |
-
self.norm_before_mlp = norm_before_mlp
|
659 |
-
self.mlp_ratio = mlp_ratio
|
660 |
-
|
661 |
-
self.use_checkpoint = use_checkpoint
|
662 |
-
|
663 |
-
self.enable_fusion = enable_fusion
|
664 |
-
self.fusion_type = fusion_type
|
665 |
-
|
666 |
-
# process mel-spec ; used only once
|
667 |
-
self.freq_ratio = self.spec_size // self.config.mel_bins
|
668 |
-
window = 'hann'
|
669 |
-
center = True
|
670 |
-
pad_mode = 'reflect'
|
671 |
-
ref = 1.0
|
672 |
-
amin = 1e-10
|
673 |
-
top_db = None
|
674 |
-
self.interpolate_ratio = 32 # Downsampled ratio
|
675 |
-
# Spectrogram extractor
|
676 |
-
self.spectrogram_extractor = Spectrogram(n_fft=config.window_size, hop_length=config.hop_size,
|
677 |
-
win_length=config.window_size, window=window, center=center, pad_mode=pad_mode,
|
678 |
-
freeze_parameters=True)
|
679 |
-
# Logmel feature extractor
|
680 |
-
self.logmel_extractor = LogmelFilterBank(sr=config.sample_rate, n_fft=config.window_size,
|
681 |
-
n_mels=config.mel_bins, fmin=config.fmin, fmax=config.fmax, ref=ref, amin=amin, top_db=top_db,
|
682 |
-
freeze_parameters=True)
|
683 |
-
# Spec augmenter
|
684 |
-
self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2,
|
685 |
-
freq_drop_width=8, freq_stripes_num=2) # 2 2
|
686 |
-
self.bn0 = nn.BatchNorm2d(self.config.mel_bins)
|
687 |
-
|
688 |
-
|
689 |
-
# split spctrogram into non-overlapping patches
|
690 |
-
self.patch_embed = PatchEmbed(
|
691 |
-
img_size=self.spec_size, patch_size=self.patch_size, in_chans=self.in_chans,
|
692 |
-
embed_dim=self.embed_dim, norm_layer=self.norm_layer, patch_stride = patch_stride,
|
693 |
-
enable_fusion=self.enable_fusion, fusion_type=self.fusion_type
|
694 |
-
)
|
695 |
-
|
696 |
-
num_patches = self.patch_embed.num_patches
|
697 |
-
patches_resolution = self.patch_embed.grid_size
|
698 |
-
self.patches_resolution = patches_resolution
|
699 |
-
|
700 |
-
# absolute position embedding
|
701 |
-
if self.ape:
|
702 |
-
self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, self.embed_dim))
|
703 |
-
trunc_normal_(self.absolute_pos_embed, std=.02)
|
704 |
-
|
705 |
-
self.pos_drop = nn.Dropout(p=self.drop_rate)
|
706 |
-
|
707 |
-
# stochastic depth
|
708 |
-
dpr = [x.item() for x in torch.linspace(0, self.drop_path_rate, sum(self.depths))] # stochastic depth decay rule
|
709 |
-
|
710 |
-
# build layers
|
711 |
-
self.layers = nn.ModuleList()
|
712 |
-
for i_layer in range(self.num_layers):
|
713 |
-
layer = BasicLayer(dim=int(self.embed_dim * 2 ** i_layer),
|
714 |
-
input_resolution=(patches_resolution[0] // (2 ** i_layer),
|
715 |
-
patches_resolution[1] // (2 ** i_layer)),
|
716 |
-
depth=self.depths[i_layer],
|
717 |
-
num_heads=self.num_heads[i_layer],
|
718 |
-
window_size=self.window_size,
|
719 |
-
mlp_ratio=self.mlp_ratio,
|
720 |
-
qkv_bias=self.qkv_bias, qk_scale=self.qk_scale,
|
721 |
-
drop=self.drop_rate, attn_drop=self.attn_drop_rate,
|
722 |
-
drop_path=dpr[sum(self.depths[:i_layer]):sum(self.depths[:i_layer + 1])],
|
723 |
-
norm_layer=self.norm_layer,
|
724 |
-
downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
|
725 |
-
use_checkpoint=use_checkpoint,
|
726 |
-
norm_before_mlp=self.norm_before_mlp)
|
727 |
-
self.layers.append(layer)
|
728 |
-
|
729 |
-
self.norm = self.norm_layer(self.num_features)
|
730 |
-
self.avgpool = nn.AdaptiveAvgPool1d(1)
|
731 |
-
self.maxpool = nn.AdaptiveMaxPool1d(1)
|
732 |
-
|
733 |
-
SF = self.spec_size // (2 ** (len(self.depths) - 1)) // self.patch_stride[0] // self.freq_ratio
|
734 |
-
self.tscam_conv = nn.Conv2d(
|
735 |
-
in_channels = self.num_features,
|
736 |
-
out_channels = self.num_classes,
|
737 |
-
kernel_size = (SF,3),
|
738 |
-
padding = (0,1)
|
739 |
-
)
|
740 |
-
self.head = nn.Linear(num_classes, num_classes)
|
741 |
-
|
742 |
-
if (self.enable_fusion) and (self.fusion_type in ['daf_1d','aff_1d','iaff_1d']):
|
743 |
-
self.mel_conv1d = nn.Sequential(
|
744 |
-
nn.Conv1d(64, 64, kernel_size=5, stride=3, padding=2),
|
745 |
-
nn.BatchNorm1d(64)
|
746 |
-
)
|
747 |
-
if self.fusion_type == 'daf_1d':
|
748 |
-
self.fusion_model = DAF()
|
749 |
-
elif self.fusion_type == 'aff_1d':
|
750 |
-
self.fusion_model = AFF(channels=64, type='1D')
|
751 |
-
elif self.fusion_type == 'iaff_1d':
|
752 |
-
self.fusion_model = iAFF(channels=64, type='1D')
|
753 |
-
|
754 |
-
self.apply(self._init_weights)
|
755 |
-
|
756 |
-
def _init_weights(self, m):
|
757 |
-
if isinstance(m, nn.Linear):
|
758 |
-
trunc_normal_(m.weight, std=.02)
|
759 |
-
if isinstance(m, nn.Linear) and m.bias is not None:
|
760 |
-
nn.init.constant_(m.bias, 0)
|
761 |
-
elif isinstance(m, nn.LayerNorm):
|
762 |
-
nn.init.constant_(m.bias, 0)
|
763 |
-
nn.init.constant_(m.weight, 1.0)
|
764 |
-
|
765 |
-
@torch.jit.ignore
|
766 |
-
def no_weight_decay(self):
|
767 |
-
return {'absolute_pos_embed'}
|
768 |
-
|
769 |
-
@torch.jit.ignore
|
770 |
-
def no_weight_decay_keywords(self):
|
771 |
-
return {'relative_position_bias_table'}
|
772 |
-
|
773 |
-
|
774 |
-
def forward_features(self, x, longer_idx = None):
|
775 |
-
# A deprecated optimization for using a hierarchical output from different blocks
|
776 |
-
|
777 |
-
frames_num = x.shape[2]
|
778 |
-
x = self.patch_embed(x, longer_idx = longer_idx)
|
779 |
-
if self.ape:
|
780 |
-
x = x + self.absolute_pos_embed
|
781 |
-
x = self.pos_drop(x)
|
782 |
-
for i, layer in enumerate(self.layers):
|
783 |
-
x, attn = layer(x)
|
784 |
-
# for x
|
785 |
-
x = self.norm(x)
|
786 |
-
B, N, C = x.shape
|
787 |
-
SF = frames_num // (2 ** (len(self.depths) - 1)) // self.patch_stride[0]
|
788 |
-
ST = frames_num // (2 ** (len(self.depths) - 1)) // self.patch_stride[1]
|
789 |
-
x = x.permute(0,2,1).contiguous().reshape(B, C, SF, ST)
|
790 |
-
B, C, F, T = x.shape
|
791 |
-
# group 2D CNN
|
792 |
-
c_freq_bin = F // self.freq_ratio
|
793 |
-
x = x.reshape(B, C, F // c_freq_bin, c_freq_bin, T)
|
794 |
-
x = x.permute(0,1,3,2,4).contiguous().reshape(B, C, c_freq_bin, -1)
|
795 |
-
# get latent_output
|
796 |
-
fine_grained_latent_output = torch.mean(x, dim = 2)
|
797 |
-
fine_grained_latent_output = interpolate(fine_grained_latent_output.permute(0,2,1).contiguous(), 8 * self.patch_stride[1])
|
798 |
-
|
799 |
-
latent_output = self.avgpool(torch.flatten(x,2))
|
800 |
-
latent_output = torch.flatten(latent_output, 1)
|
801 |
-
|
802 |
-
# display the attention map, if needed
|
803 |
-
|
804 |
-
x = self.tscam_conv(x)
|
805 |
-
x = torch.flatten(x, 2) # B, C, T
|
806 |
-
|
807 |
-
fpx = interpolate(torch.sigmoid(x).permute(0,2,1).contiguous(), 8 * self.patch_stride[1])
|
808 |
-
|
809 |
-
x = self.avgpool(x)
|
810 |
-
x = torch.flatten(x, 1)
|
811 |
-
|
812 |
-
output_dict = {
|
813 |
-
'framewise_output': fpx, # already sigmoided
|
814 |
-
'clipwise_output': torch.sigmoid(x),
|
815 |
-
'fine_grained_embedding': fine_grained_latent_output,
|
816 |
-
'embedding': latent_output
|
817 |
-
}
|
818 |
-
|
819 |
-
return output_dict
|
820 |
-
|
821 |
-
def crop_wav(self, x, crop_size, spe_pos = None):
|
822 |
-
time_steps = x.shape[2]
|
823 |
-
tx = torch.zeros(x.shape[0], x.shape[1], crop_size, x.shape[3]).to(x.device)
|
824 |
-
for i in range(len(x)):
|
825 |
-
if spe_pos is None:
|
826 |
-
crop_pos = random.randint(0, time_steps - crop_size - 1)
|
827 |
-
else:
|
828 |
-
crop_pos = spe_pos
|
829 |
-
tx[i][0] = x[i, 0, crop_pos:crop_pos + crop_size,:]
|
830 |
-
return tx
|
831 |
-
|
832 |
-
# Reshape the wavform to a img size, if you want to use the pretrained swin transformer model
|
833 |
-
def reshape_wav2img(self, x):
|
834 |
-
B, C, T, F = x.shape
|
835 |
-
target_T = int(self.spec_size * self.freq_ratio)
|
836 |
-
target_F = self.spec_size // self.freq_ratio
|
837 |
-
assert T <= target_T and F <= target_F, "the wav size should less than or equal to the swin input size"
|
838 |
-
# to avoid bicubic zero error
|
839 |
-
if T < target_T:
|
840 |
-
x = nn.functional.interpolate(x, (target_T, x.shape[3]), mode="bicubic", align_corners=True)
|
841 |
-
if F < target_F:
|
842 |
-
x = nn.functional.interpolate(x, (x.shape[2], target_F), mode="bicubic", align_corners=True)
|
843 |
-
x = x.permute(0,1,3,2).contiguous()
|
844 |
-
x = x.reshape(x.shape[0], x.shape[1], x.shape[2], self.freq_ratio, x.shape[3] // self.freq_ratio)
|
845 |
-
# print(x.shape)
|
846 |
-
x = x.permute(0,1,3,2,4).contiguous()
|
847 |
-
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3], x.shape[4])
|
848 |
-
return x
|
849 |
-
|
850 |
-
# Repeat the wavform to a img size, if you want to use the pretrained swin transformer model
|
851 |
-
def repeat_wat2img(self, x, cur_pos):
|
852 |
-
B, C, T, F = x.shape
|
853 |
-
target_T = int(self.spec_size * self.freq_ratio)
|
854 |
-
target_F = self.spec_size // self.freq_ratio
|
855 |
-
assert T <= target_T and F <= target_F, "the wav size should less than or equal to the swin input size"
|
856 |
-
# to avoid bicubic zero error
|
857 |
-
if T < target_T:
|
858 |
-
x = nn.functional.interpolate(x, (target_T, x.shape[3]), mode="bicubic", align_corners=True)
|
859 |
-
if F < target_F:
|
860 |
-
x = nn.functional.interpolate(x, (x.shape[2], target_F), mode="bicubic", align_corners=True)
|
861 |
-
x = x.permute(0,1,3,2).contiguous() # B C F T
|
862 |
-
x = x[:,:,:,cur_pos:cur_pos + self.spec_size]
|
863 |
-
x = x.repeat(repeats = (1,1,4,1))
|
864 |
-
return x
|
865 |
-
|
866 |
-
def forward(self, x: torch.Tensor, mixup_lambda = None, infer_mode = False, device=None):# out_feat_keys: List[str] = None):
|
867 |
-
|
868 |
-
if self.enable_fusion and x["longer"].sum() == 0:
|
869 |
-
# if no audio is longer than 10s, then randomly select one audio to be longer
|
870 |
-
x["longer"][torch.randint(0, x["longer"].shape[0], (1,))] = True
|
871 |
-
|
872 |
-
if not self.enable_fusion:
|
873 |
-
x = x["waveform"].to(device=device, non_blocking=True)
|
874 |
-
x = self.spectrogram_extractor(x) # (batch_size, 1, time_steps, freq_bins)
|
875 |
-
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
|
876 |
-
x = x.transpose(1, 3)
|
877 |
-
x = self.bn0(x)
|
878 |
-
x = x.transpose(1, 3)
|
879 |
-
if self.training:
|
880 |
-
x = self.spec_augmenter(x)
|
881 |
-
|
882 |
-
if self.training and mixup_lambda is not None:
|
883 |
-
x = do_mixup(x, mixup_lambda)
|
884 |
-
|
885 |
-
x = self.reshape_wav2img(x)
|
886 |
-
output_dict = self.forward_features(x)
|
887 |
-
else:
|
888 |
-
longer_list = x["longer"].to(device=device, non_blocking=True)
|
889 |
-
x = x["mel_fusion"].to(device=device, non_blocking=True)
|
890 |
-
x = x.transpose(1, 3)
|
891 |
-
x = self.bn0(x)
|
892 |
-
x = x.transpose(1, 3)
|
893 |
-
longer_list_idx = torch.where(longer_list)[0]
|
894 |
-
if self.fusion_type in ['daf_1d','aff_1d','iaff_1d']:
|
895 |
-
new_x = x[:,0:1,:,:].clone().contiguous()
|
896 |
-
if len(longer_list_idx) > 0:
|
897 |
-
# local processing
|
898 |
-
fusion_x_local = x[longer_list_idx,1:,:,:].clone().contiguous()
|
899 |
-
FB,FC,FT,FF = fusion_x_local.size()
|
900 |
-
fusion_x_local = fusion_x_local.view(FB * FC, FT, FF)
|
901 |
-
fusion_x_local = torch.permute(fusion_x_local, (0,2,1)).contiguous()
|
902 |
-
fusion_x_local = self.mel_conv1d(fusion_x_local)
|
903 |
-
fusion_x_local = fusion_x_local.view(FB,FC,FF,fusion_x_local.size(-1))
|
904 |
-
fusion_x_local = torch.permute(fusion_x_local, (0,2,1,3)).contiguous().flatten(2)
|
905 |
-
if fusion_x_local.size(-1) < FT:
|
906 |
-
fusion_x_local = torch.cat([fusion_x_local, torch.zeros((FB,FF,FT- fusion_x_local.size(-1)), device=device)], dim=-1)
|
907 |
-
else:
|
908 |
-
fusion_x_local = fusion_x_local[:,:,:FT]
|
909 |
-
# 1D fusion
|
910 |
-
new_x = new_x.squeeze(1).permute((0,2,1)).contiguous()
|
911 |
-
new_x[longer_list_idx] = self.fusion_model(new_x[longer_list_idx], fusion_x_local)
|
912 |
-
x = new_x.permute((0,2,1)).contiguous()[:,None,:,:]
|
913 |
-
else:
|
914 |
-
x = new_x
|
915 |
-
|
916 |
-
elif self.fusion_type in ['daf_2d','aff_2d','iaff_2d','channel_map']:
|
917 |
-
x = x # no change
|
918 |
-
|
919 |
-
if self.training:
|
920 |
-
x = self.spec_augmenter(x)
|
921 |
-
if self.training and mixup_lambda is not None:
|
922 |
-
x = do_mixup(x, mixup_lambda)
|
923 |
-
|
924 |
-
x = self.reshape_wav2img(x)
|
925 |
-
output_dict = self.forward_features(x, longer_idx = longer_list_idx)
|
926 |
-
|
927 |
-
# if infer_mode:
|
928 |
-
# # in infer mode. we need to handle different length audio input
|
929 |
-
# frame_num = x.shape[2]
|
930 |
-
# target_T = int(self.spec_size * self.freq_ratio)
|
931 |
-
# repeat_ratio = math.floor(target_T / frame_num)
|
932 |
-
# x = x.repeat(repeats=(1,1,repeat_ratio,1))
|
933 |
-
# x = self.reshape_wav2img(x)
|
934 |
-
# output_dict = self.forward_features(x)
|
935 |
-
# else:
|
936 |
-
# if x.shape[2] > self.freq_ratio * self.spec_size:
|
937 |
-
# if self.training:
|
938 |
-
# x = self.crop_wav(x, crop_size=self.freq_ratio * self.spec_size)
|
939 |
-
# x = self.reshape_wav2img(x)
|
940 |
-
# output_dict = self.forward_features(x)
|
941 |
-
# else:
|
942 |
-
# # Change: Hard code here
|
943 |
-
# overlap_size = (x.shape[2] - 1) // 4
|
944 |
-
# output_dicts = []
|
945 |
-
# crop_size = (x.shape[2] - 1) // 2
|
946 |
-
# for cur_pos in range(0, x.shape[2] - crop_size - 1, overlap_size):
|
947 |
-
# tx = self.crop_wav(x, crop_size = crop_size, spe_pos = cur_pos)
|
948 |
-
# tx = self.reshape_wav2img(tx)
|
949 |
-
# output_dicts.append(self.forward_features(tx))
|
950 |
-
# clipwise_output = torch.zeros_like(output_dicts[0]["clipwise_output"]).float().to(x.device)
|
951 |
-
# framewise_output = torch.zeros_like(output_dicts[0]["framewise_output"]).float().to(x.device)
|
952 |
-
# for d in output_dicts:
|
953 |
-
# clipwise_output += d["clipwise_output"]
|
954 |
-
# framewise_output += d["framewise_output"]
|
955 |
-
# clipwise_output = clipwise_output / len(output_dicts)
|
956 |
-
# framewise_output = framewise_output / len(output_dicts)
|
957 |
-
# output_dict = {
|
958 |
-
# 'framewise_output': framewise_output,
|
959 |
-
# 'clipwise_output': clipwise_output
|
960 |
-
# }
|
961 |
-
# else: # this part is typically used, and most easy one
|
962 |
-
# x = self.reshape_wav2img(x)
|
963 |
-
# output_dict = self.forward_features(x)
|
964 |
-
# x = self.head(x)
|
965 |
-
|
966 |
-
# We process the data in the dataloader part, in that here we only consider the input_T < fixed_T
|
967 |
-
|
968 |
-
|
969 |
-
|
970 |
-
return output_dict
|
971 |
-
|
972 |
-
def create_htsat_model(audio_cfg, enable_fusion=False, fusion_type='None'):
|
973 |
-
try:
|
974 |
-
|
975 |
-
assert audio_cfg.model_name in ["tiny", "base", "large"], "model name for HTS-AT is wrong!"
|
976 |
-
if audio_cfg.model_name == "tiny":
|
977 |
-
model = HTSAT_Swin_Transformer(
|
978 |
-
spec_size=256,
|
979 |
-
patch_size=4,
|
980 |
-
patch_stride=(4,4),
|
981 |
-
num_classes=audio_cfg.class_num,
|
982 |
-
embed_dim=96,
|
983 |
-
depths=[2,2,6,2],
|
984 |
-
num_heads=[4,8,16,32],
|
985 |
-
window_size=8,
|
986 |
-
config = audio_cfg,
|
987 |
-
enable_fusion = enable_fusion,
|
988 |
-
fusion_type = fusion_type
|
989 |
-
)
|
990 |
-
elif audio_cfg.model_name == "base":
|
991 |
-
model = HTSAT_Swin_Transformer(
|
992 |
-
spec_size=256,
|
993 |
-
patch_size=4,
|
994 |
-
patch_stride=(4,4),
|
995 |
-
num_classes=audio_cfg.class_num,
|
996 |
-
embed_dim=128,
|
997 |
-
depths=[2,2,12,2],
|
998 |
-
num_heads=[4,8,16,32],
|
999 |
-
window_size=8,
|
1000 |
-
config = audio_cfg,
|
1001 |
-
enable_fusion = enable_fusion,
|
1002 |
-
fusion_type = fusion_type
|
1003 |
-
)
|
1004 |
-
elif audio_cfg.model_name == "large":
|
1005 |
-
model = HTSAT_Swin_Transformer(
|
1006 |
-
spec_size=256,
|
1007 |
-
patch_size=4,
|
1008 |
-
patch_stride=(4,4),
|
1009 |
-
num_classes=audio_cfg.class_num,
|
1010 |
-
embed_dim=256,
|
1011 |
-
depths=[2,2,12,2],
|
1012 |
-
num_heads=[4,8,16,32],
|
1013 |
-
window_size=8,
|
1014 |
-
config = audio_cfg,
|
1015 |
-
enable_fusion = enable_fusion,
|
1016 |
-
fusion_type = fusion_type
|
1017 |
-
)
|
1018 |
-
|
1019 |
-
return model
|
1020 |
-
except:
|
1021 |
-
raise RuntimeError(f'Import Model for {audio_cfg.model_name} not found, or the audio cfg parameters are not enough.')
|
1022 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AbeShinzo0708/AI_Kishida_Fumio_speaker/hooks/hook-espnet.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
from PyInstaller.utils.hooks import copy_metadata
|
2 |
-
|
3 |
-
datas = copy_metadata('espnet')
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/server/websearch/parseWeb.ts
DELETED
@@ -1,56 +0,0 @@
|
|
1 |
-
import { JSDOM, VirtualConsole } from "jsdom";
|
2 |
-
|
3 |
-
function removeTags(node: Node) {
|
4 |
-
if (node.hasChildNodes()) {
|
5 |
-
node.childNodes.forEach((childNode) => {
|
6 |
-
if (node.nodeName === "SCRIPT" || node.nodeName === "STYLE") {
|
7 |
-
node.removeChild(childNode);
|
8 |
-
} else {
|
9 |
-
removeTags(childNode);
|
10 |
-
}
|
11 |
-
});
|
12 |
-
}
|
13 |
-
}
|
14 |
-
function naiveInnerText(node: Node): string {
|
15 |
-
const Node = node; // We need Node(DOM's Node) for the constants, but Node doesn't exist in the nodejs global space, and any Node instance references the constants through the prototype chain
|
16 |
-
return [...node.childNodes]
|
17 |
-
.map((childNode) => {
|
18 |
-
switch (childNode.nodeType) {
|
19 |
-
case Node.TEXT_NODE:
|
20 |
-
return node.textContent;
|
21 |
-
case Node.ELEMENT_NODE:
|
22 |
-
return naiveInnerText(childNode);
|
23 |
-
default:
|
24 |
-
return "";
|
25 |
-
}
|
26 |
-
})
|
27 |
-
.join("\n");
|
28 |
-
}
|
29 |
-
|
30 |
-
export async function parseWeb(url: string) {
|
31 |
-
const abortController = new AbortController();
|
32 |
-
setTimeout(() => abortController.abort(), 10000);
|
33 |
-
const htmlString = await fetch(url, { signal: abortController.signal })
|
34 |
-
.then((response) => response.text())
|
35 |
-
.catch((err) => console.log(err));
|
36 |
-
|
37 |
-
const virtualConsole = new VirtualConsole();
|
38 |
-
virtualConsole.on("error", () => {
|
39 |
-
// No-op to skip console errors.
|
40 |
-
});
|
41 |
-
|
42 |
-
// put the html string into a DOM
|
43 |
-
const dom = new JSDOM(htmlString ?? "", {
|
44 |
-
virtualConsole,
|
45 |
-
});
|
46 |
-
|
47 |
-
const body = dom.window.document.querySelector("body");
|
48 |
-
if (!body) throw new Error("body of the webpage is null");
|
49 |
-
|
50 |
-
removeTags(body);
|
51 |
-
|
52 |
-
// recursively extract text content from the body and then remove newlines and multiple spaces
|
53 |
-
const text = (naiveInnerText(body) ?? "").replace(/ {2}|\r\n|\n|\r/gm, "");
|
54 |
-
|
55 |
-
return text;
|
56 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT-Chat-UI/src/routes/conversation/[id]/web-search/+server.ts
DELETED
@@ -1,138 +0,0 @@
|
|
1 |
-
import { authCondition } from "$lib/server/auth";
|
2 |
-
import { collections } from "$lib/server/database";
|
3 |
-
import { defaultModel } from "$lib/server/models";
|
4 |
-
import { searchWeb } from "$lib/server/websearch/searchWeb";
|
5 |
-
import type { Message } from "$lib/types/Message";
|
6 |
-
import { error } from "@sveltejs/kit";
|
7 |
-
import { z } from "zod";
|
8 |
-
import type { WebSearch } from "$lib/types/WebSearch";
|
9 |
-
import { generateQuery } from "$lib/server/websearch/generateQuery";
|
10 |
-
import { parseWeb } from "$lib/server/websearch/parseWeb";
|
11 |
-
import { summarizeWeb } from "$lib/server/websearch/summarizeWeb";
|
12 |
-
|
13 |
-
interface GenericObject {
|
14 |
-
[key: string]: GenericObject | unknown;
|
15 |
-
}
|
16 |
-
|
17 |
-
function removeLinks(obj: GenericObject) {
|
18 |
-
for (const prop in obj) {
|
19 |
-
if (prop.endsWith("link")) delete obj[prop];
|
20 |
-
else if (typeof obj[prop] === "object") removeLinks(obj[prop] as GenericObject);
|
21 |
-
}
|
22 |
-
return obj;
|
23 |
-
}
|
24 |
-
export async function GET({ params, locals, url }) {
|
25 |
-
/*const model = defaultModel;
|
26 |
-
const convId = new ObjectId(params.id);
|
27 |
-
const searchId = new ObjectId();
|
28 |
-
|
29 |
-
const conv = await collections.conversations.findOne({
|
30 |
-
_id: convId,
|
31 |
-
...authCondition(locals),
|
32 |
-
});
|
33 |
-
|
34 |
-
if (!conv) {
|
35 |
-
throw error(404, "Conversation not found");
|
36 |
-
}
|
37 |
-
|
38 |
-
const prompt = z.string().trim().min(1).parse(url.searchParams.get("prompt"));
|
39 |
-
|
40 |
-
const messages = (() => {
|
41 |
-
return [...conv.messages, { content: prompt, from: "user", id: crypto.randomUUID() }];
|
42 |
-
})() satisfies Message[];
|
43 |
-
|
44 |
-
const stream = new ReadableStream({
|
45 |
-
async start(controller) {
|
46 |
-
const webSearch: WebSearch = {
|
47 |
-
_id: searchId,
|
48 |
-
convId: convId,
|
49 |
-
prompt: prompt,
|
50 |
-
searchQuery: "",
|
51 |
-
knowledgeGraph: "",
|
52 |
-
answerBox: "",
|
53 |
-
results: [],
|
54 |
-
summary: "",
|
55 |
-
messages: [],
|
56 |
-
createdAt: new Date(),
|
57 |
-
updatedAt: new Date(),
|
58 |
-
};
|
59 |
-
|
60 |
-
function appendUpdate(message: string, args?: string[], type?: "error" | "update") {
|
61 |
-
webSearch.messages.push({
|
62 |
-
type: type ?? "update",
|
63 |
-
message,
|
64 |
-
args,
|
65 |
-
});
|
66 |
-
controller.enqueue(JSON.stringify({ messages: webSearch.messages }));
|
67 |
-
}
|
68 |
-
|
69 |
-
try {
|
70 |
-
appendUpdate("Generating search query");
|
71 |
-
webSearch.searchQuery = await generateQuery(messages);
|
72 |
-
|
73 |
-
appendUpdate("Searching Google", [webSearch.searchQuery]);
|
74 |
-
const results = await searchWeb(webSearch.searchQuery);
|
75 |
-
|
76 |
-
let text = "";
|
77 |
-
webSearch.results =
|
78 |
-
(results.organic_results &&
|
79 |
-
results.organic_results.map((el: { link: string }) => el.link)) ??
|
80 |
-
[];
|
81 |
-
|
82 |
-
if (results.answer_box) {
|
83 |
-
// if google returns an answer box, we use it
|
84 |
-
webSearch.answerBox = JSON.stringify(removeLinks(results.answer_box));
|
85 |
-
text = webSearch.answerBox;
|
86 |
-
appendUpdate("Found a Google answer box");
|
87 |
-
} else if (results.knowledge_graph) {
|
88 |
-
// if google returns a knowledge graph, we use it
|
89 |
-
webSearch.knowledgeGraph = JSON.stringify(removeLinks(results.knowledge_graph));
|
90 |
-
text = webSearch.knowledgeGraph;
|
91 |
-
appendUpdate("Found a Google knowledge page");
|
92 |
-
} else if (webSearch.results.length > 0) {
|
93 |
-
let tries = 0;
|
94 |
-
|
95 |
-
while (!text && tries < 3) {
|
96 |
-
const searchUrl = webSearch.results[tries];
|
97 |
-
appendUpdate("Browsing result", [JSON.stringify(searchUrl)]);
|
98 |
-
try {
|
99 |
-
text = await parseWeb(searchUrl);
|
100 |
-
if (!text) throw new Error("text of the webpage is null");
|
101 |
-
} catch (e) {
|
102 |
-
appendUpdate("Error parsing webpage", [], "error");
|
103 |
-
tries++;
|
104 |
-
}
|
105 |
-
}
|
106 |
-
if (!text) throw new Error("No text found on the first 3 results");
|
107 |
-
} else {
|
108 |
-
throw new Error("No results found for this search query");
|
109 |
-
}
|
110 |
-
|
111 |
-
appendUpdate("Creating summary");
|
112 |
-
webSearch.summary = await summarizeWeb(text, webSearch.searchQuery, model);
|
113 |
-
appendUpdate("Injecting summary", [JSON.stringify(webSearch.summary)]);
|
114 |
-
} catch (searchError) {
|
115 |
-
if (searchError instanceof Error) {
|
116 |
-
webSearch.messages.push({
|
117 |
-
type: "error",
|
118 |
-
message: "An error occurred with the web search",
|
119 |
-
args: [JSON.stringify(searchError.message)],
|
120 |
-
});
|
121 |
-
}
|
122 |
-
}
|
123 |
-
|
124 |
-
const res = await collections.webSearches.insertOne(webSearch);
|
125 |
-
webSearch.messages.push({
|
126 |
-
type: "result",
|
127 |
-
id: res.insertedId.toString(),
|
128 |
-
});
|
129 |
-
controller.enqueue(JSON.stringify({ messages: webSearch.messages }));
|
130 |
-
},
|
131 |
-
});
|
132 |
-
|
133 |
-
return new Response(stream, { headers: { "Content-Type": "application/json" } });
|
134 |
-
|
135 |
-
*/
|
136 |
-
|
137 |
-
return new Response(undefined, { headers: { "Content-Type": "application/json" } });
|
138 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/SetAnchor.js
DELETED
@@ -1,34 +0,0 @@
|
|
1 |
-
import Anchor from '../anchor/Anchor.js';
|
2 |
-
|
3 |
-
var SetAnchor = function (config) {
|
4 |
-
if (config === undefined) {
|
5 |
-
config = {};
|
6 |
-
}
|
7 |
-
|
8 |
-
// Assign default onResizeCallback if not given
|
9 |
-
var hasMinWidth = config.hasOwnProperty('width');
|
10 |
-
var hasMinHeight = config.hasOwnProperty('height');
|
11 |
-
var hasOnResizeCallback = config.hasOwnProperty('onResizeCallback');
|
12 |
-
if ((hasMinWidth || hasMinHeight) && !hasOnResizeCallback) {
|
13 |
-
config.onResizeCallback = function (width, height, sizer) {
|
14 |
-
if (hasMinWidth) {
|
15 |
-
sizer.setMinWidth(width);
|
16 |
-
}
|
17 |
-
|
18 |
-
if (hasMinHeight) {
|
19 |
-
sizer.setMinHeight(height);
|
20 |
-
}
|
21 |
-
|
22 |
-
sizer.layout();
|
23 |
-
}
|
24 |
-
}
|
25 |
-
|
26 |
-
if (this._anchor === undefined) {
|
27 |
-
this._anchor = new Anchor(this, config);
|
28 |
-
} else {
|
29 |
-
this._anchor.resetFromJSON(config)
|
30 |
-
}
|
31 |
-
return this;
|
32 |
-
}
|
33 |
-
|
34 |
-
export default SetAnchor;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorpicker/methods/HPalette.js
DELETED
@@ -1,96 +0,0 @@
|
|
1 |
-
import OverlapSizer from '../../../overlapsizer/OverlapSizer.js';
|
2 |
-
import HPaletteCanvas from './HPaletteCanvas.js';
|
3 |
-
import RoundRectangle from '../../../roundrectangle/RoundRectangle.js';
|
4 |
-
import { LocalToWorld } from './Transform.js';
|
5 |
-
|
6 |
-
class HPalette extends OverlapSizer {
|
7 |
-
constructor(scene, config) {
|
8 |
-
if (config === undefined) {
|
9 |
-
config = {};
|
10 |
-
}
|
11 |
-
super(scene, config);
|
12 |
-
|
13 |
-
var orientation = (config.width != null) ? 1 : 0;
|
14 |
-
var paletteCanvas = (new HPaletteCanvas(scene))
|
15 |
-
.setOrientation(orientation)
|
16 |
-
scene.add.existing(paletteCanvas);
|
17 |
-
this.type = 'rexColorPicker.HPalette';
|
18 |
-
|
19 |
-
paletteCanvas
|
20 |
-
.setInteractive()
|
21 |
-
.on('pointerdown', this.onPaletteCanvasPointerDown, this)
|
22 |
-
.on('pointermove', this.onPaletteCanvasPointerDown, this)
|
23 |
-
|
24 |
-
var marker = new RoundRectangle(scene, { strokeColor: 0xffffff, strokeWidth: 2 });
|
25 |
-
scene.add.existing(marker);
|
26 |
-
|
27 |
-
this
|
28 |
-
.add(
|
29 |
-
paletteCanvas,
|
30 |
-
{ key: 'paletteCanvas', expand: true }
|
31 |
-
)
|
32 |
-
.add(
|
33 |
-
marker,
|
34 |
-
{ key: 'marker', expand: false }
|
35 |
-
)
|
36 |
-
}
|
37 |
-
|
38 |
-
resize(width, height) {
|
39 |
-
if ((this.width === width) && (this.height === height)) {
|
40 |
-
return this;
|
41 |
-
}
|
42 |
-
|
43 |
-
super.resize(width, height);
|
44 |
-
|
45 |
-
var size = Math.min(width, height);
|
46 |
-
this.childrenMap.marker.setSize(size, size);
|
47 |
-
|
48 |
-
return this;
|
49 |
-
}
|
50 |
-
|
51 |
-
onPaletteCanvasPointerDown(pointer, localX, localY, event) {
|
52 |
-
if (!pointer.isDown) {
|
53 |
-
return;
|
54 |
-
}
|
55 |
-
|
56 |
-
var paletteCanvas = this.childrenMap.paletteCanvas;
|
57 |
-
var color = paletteCanvas.getColor(localX, localY);
|
58 |
-
this.setMarkerPosition(color);
|
59 |
-
|
60 |
-
this.emit('input', color);
|
61 |
-
}
|
62 |
-
|
63 |
-
get color() {
|
64 |
-
return this.childrenMap.paletteCanvas.color;
|
65 |
-
}
|
66 |
-
|
67 |
-
setColor(color) {
|
68 |
-
if (this.color === color) {
|
69 |
-
return this;
|
70 |
-
}
|
71 |
-
|
72 |
-
var paletteCanvas = this.childrenMap.paletteCanvas;
|
73 |
-
paletteCanvas.setColor(color);
|
74 |
-
this.setMarkerPosition(color);
|
75 |
-
|
76 |
-
return this;
|
77 |
-
}
|
78 |
-
|
79 |
-
setMarkerPosition(color) {
|
80 |
-
var paletteCanvas = this.childrenMap.paletteCanvas;
|
81 |
-
var marker = this.childrenMap.marker;
|
82 |
-
|
83 |
-
var localXY = paletteCanvas.colorToLocalPosition(color, true);
|
84 |
-
LocalToWorld(paletteCanvas, localXY.x, localXY.y, marker);
|
85 |
-
this.resetChildPositionState(marker);
|
86 |
-
|
87 |
-
return this;
|
88 |
-
}
|
89 |
-
|
90 |
-
getHue(localX, localY) {
|
91 |
-
var paletteCanvas = this.childrenMap.paletteCanvas;
|
92 |
-
return paletteCanvas.getHue(localX, localY);
|
93 |
-
}
|
94 |
-
}
|
95 |
-
|
96 |
-
export default HPalette;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/fixwidthsizer/RemoveChildMethods.js
DELETED
@@ -1,28 +0,0 @@
|
|
1 |
-
import RemoveChild from '../basesizer/utils/RemoveChild.js';
|
2 |
-
import ClearChildren from '../basesizer/utils/ClearChildren.js';
|
3 |
-
|
4 |
-
const RemoveItem = Phaser.Utils.Array.Remove;
|
5 |
-
|
6 |
-
export default {
|
7 |
-
remove(gameObject, destroyChild) {
|
8 |
-
if (this.getParentSizer(gameObject) !== this) {
|
9 |
-
return this;
|
10 |
-
}
|
11 |
-
RemoveItem(this.sizerChildren, gameObject);
|
12 |
-
RemoveChild.call(this, gameObject, destroyChild);
|
13 |
-
return this;
|
14 |
-
},
|
15 |
-
|
16 |
-
removeAll(destroyChild) {
|
17 |
-
for (var i = this.sizerChildren.length - 1; i >= 0; i--) {
|
18 |
-
this.remove(this.sizerChildren[i], destroyChild);
|
19 |
-
}
|
20 |
-
return this;
|
21 |
-
},
|
22 |
-
|
23 |
-
clear(destroyChild) {
|
24 |
-
this.sizerChildren.length = 0;
|
25 |
-
ClearChildren.call(this, destroyChild);
|
26 |
-
return this;
|
27 |
-
}
|
28 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/roundrectanglecanvas/RoundRectangleCanvas.js
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import RoundRectangleCanvas from '../../../plugins/roundrectanglecanvas.js';
|
2 |
-
export default RoundRectangleCanvas;
|
|
|
|
|
|
spaces/Alfasign/HuggingGPT-Lite/get_token_ids.py
DELETED
@@ -1,53 +0,0 @@
|
|
1 |
-
import tiktoken
|
2 |
-
|
3 |
-
encodings = {
|
4 |
-
"gpt-3.5-turbo": tiktoken.get_encoding("cl100k_base"),
|
5 |
-
"gpt-3.5-turbo-0301": tiktoken.get_encoding("cl100k_base"),
|
6 |
-
"text-davinci-003": tiktoken.get_encoding("p50k_base"),
|
7 |
-
"text-davinci-002": tiktoken.get_encoding("p50k_base"),
|
8 |
-
"text-davinci-001": tiktoken.get_encoding("r50k_base"),
|
9 |
-
"text-curie-001": tiktoken.get_encoding("r50k_base"),
|
10 |
-
"text-babbage-001": tiktoken.get_encoding("r50k_base"),
|
11 |
-
"text-ada-001": tiktoken.get_encoding("r50k_base"),
|
12 |
-
"davinci": tiktoken.get_encoding("r50k_base"),
|
13 |
-
"curie": tiktoken.get_encoding("r50k_base"),
|
14 |
-
"babbage": tiktoken.get_encoding("r50k_base"),
|
15 |
-
"ada": tiktoken.get_encoding("r50k_base"),
|
16 |
-
}
|
17 |
-
|
18 |
-
max_length = {
|
19 |
-
"gpt-3.5-turbo": 4096,
|
20 |
-
"gpt-3.5-turbo-0301": 4096,
|
21 |
-
"text-davinci-003": 4096,
|
22 |
-
"text-davinci-002": 4096,
|
23 |
-
"text-davinci-001": 2049,
|
24 |
-
"text-curie-001": 2049,
|
25 |
-
"text-babbage-001": 2049,
|
26 |
-
"text-ada-001": 2049,
|
27 |
-
"davinci": 2049,
|
28 |
-
"curie": 2049,
|
29 |
-
"babbage": 2049,
|
30 |
-
"ada": 2049,
|
31 |
-
}
|
32 |
-
|
33 |
-
|
34 |
-
def count_tokens(model_name, text):
|
35 |
-
return len(encodings[model_name].encode(text))
|
36 |
-
|
37 |
-
|
38 |
-
def get_max_context_length(model_name):
|
39 |
-
return max_length[model_name]
|
40 |
-
|
41 |
-
|
42 |
-
def get_token_ids_for_task_parsing(model_name):
|
43 |
-
text = """{"task": "text-classification", "token-classification", "text2text-generation", "summarization", "translation", "question-answering", "conversational", "text-generation", "sentence-similarity", "tabular-classification", "object-detection", "image-classification", "image-to-image", "image-to-text", "text-to-image", "visual-question-answering", "document-question-answering", "image-segmentation", "text-to-speech", "text-to-video", "automatic-speech-recognition", "audio-to-audio", "audio-classification", "canny-control", "hed-control", "mlsd-control", "normal-control", "openpose-control", "canny-text-to-image", "depth-text-to-image", "hed-text-to-image", "mlsd-text-to-image", "normal-text-to-image", "openpose-text-to-image", "seg-text-to-image", "args", "text", "path", "dep", "id", "<GENERATED>-"}"""
|
44 |
-
res = encodings[model_name].encode(text)
|
45 |
-
res = list(set(res))
|
46 |
-
return res
|
47 |
-
|
48 |
-
|
49 |
-
def get_token_ids_for_choose_model(model_name):
|
50 |
-
text = """{"id": "reason"}"""
|
51 |
-
res = encodings[model_name].encode(text)
|
52 |
-
res = list(set(res))
|
53 |
-
return res
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alpaca233/SadTalker/src/audio2exp_models/networks.py
DELETED
@@ -1,74 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn.functional as F
|
3 |
-
from torch import nn
|
4 |
-
|
5 |
-
class Conv2d(nn.Module):
|
6 |
-
def __init__(self, cin, cout, kernel_size, stride, padding, residual=False, use_act = True, *args, **kwargs):
|
7 |
-
super().__init__(*args, **kwargs)
|
8 |
-
self.conv_block = nn.Sequential(
|
9 |
-
nn.Conv2d(cin, cout, kernel_size, stride, padding),
|
10 |
-
nn.BatchNorm2d(cout)
|
11 |
-
)
|
12 |
-
self.act = nn.ReLU()
|
13 |
-
self.residual = residual
|
14 |
-
self.use_act = use_act
|
15 |
-
|
16 |
-
def forward(self, x):
|
17 |
-
out = self.conv_block(x)
|
18 |
-
if self.residual:
|
19 |
-
out += x
|
20 |
-
|
21 |
-
if self.use_act:
|
22 |
-
return self.act(out)
|
23 |
-
else:
|
24 |
-
return out
|
25 |
-
|
26 |
-
class SimpleWrapperV2(nn.Module):
|
27 |
-
def __init__(self) -> None:
|
28 |
-
super().__init__()
|
29 |
-
self.audio_encoder = nn.Sequential(
|
30 |
-
Conv2d(1, 32, kernel_size=3, stride=1, padding=1),
|
31 |
-
Conv2d(32, 32, kernel_size=3, stride=1, padding=1, residual=True),
|
32 |
-
Conv2d(32, 32, kernel_size=3, stride=1, padding=1, residual=True),
|
33 |
-
|
34 |
-
Conv2d(32, 64, kernel_size=3, stride=(3, 1), padding=1),
|
35 |
-
Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True),
|
36 |
-
Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True),
|
37 |
-
|
38 |
-
Conv2d(64, 128, kernel_size=3, stride=3, padding=1),
|
39 |
-
Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True),
|
40 |
-
Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True),
|
41 |
-
|
42 |
-
Conv2d(128, 256, kernel_size=3, stride=(3, 2), padding=1),
|
43 |
-
Conv2d(256, 256, kernel_size=3, stride=1, padding=1, residual=True),
|
44 |
-
|
45 |
-
Conv2d(256, 512, kernel_size=3, stride=1, padding=0),
|
46 |
-
Conv2d(512, 512, kernel_size=1, stride=1, padding=0),
|
47 |
-
)
|
48 |
-
|
49 |
-
#### load the pre-trained audio_encoder
|
50 |
-
#self.audio_encoder = self.audio_encoder.to(device)
|
51 |
-
'''
|
52 |
-
wav2lip_state_dict = torch.load('/apdcephfs_cq2/share_1290939/wenxuazhang/checkpoints/wav2lip.pth')['state_dict']
|
53 |
-
state_dict = self.audio_encoder.state_dict()
|
54 |
-
|
55 |
-
for k,v in wav2lip_state_dict.items():
|
56 |
-
if 'audio_encoder' in k:
|
57 |
-
print('init:', k)
|
58 |
-
state_dict[k.replace('module.audio_encoder.', '')] = v
|
59 |
-
self.audio_encoder.load_state_dict(state_dict)
|
60 |
-
'''
|
61 |
-
|
62 |
-
self.mapping1 = nn.Linear(512+64+1, 64)
|
63 |
-
#self.mapping2 = nn.Linear(30, 64)
|
64 |
-
#nn.init.constant_(self.mapping1.weight, 0.)
|
65 |
-
nn.init.constant_(self.mapping1.bias, 0.)
|
66 |
-
|
67 |
-
def forward(self, x, ref, ratio):
|
68 |
-
x = self.audio_encoder(x).view(x.size(0), -1)
|
69 |
-
ref_reshape = ref.reshape(x.size(0), -1)
|
70 |
-
ratio = ratio.reshape(x.size(0), -1)
|
71 |
-
|
72 |
-
y = self.mapping1(torch.cat([x, ref_reshape, ratio], dim=1))
|
73 |
-
out = y.reshape(ref.shape[0], ref.shape[1], -1) #+ ref # resudial
|
74 |
-
return out
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/stylegan_human/pti/training/projectors/__init__.py
DELETED
File without changes
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/schedulers/ddpm.md
DELETED
@@ -1,27 +0,0 @@
|
|
1 |
-
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
|
3 |
-
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
4 |
-
the License. You may obtain a copy of the License at
|
5 |
-
|
6 |
-
http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
|
8 |
-
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
9 |
-
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
10 |
-
specific language governing permissions and limitations under the License.
|
11 |
-
-->
|
12 |
-
|
13 |
-
# Denoising Diffusion Probabilistic Models (DDPM)
|
14 |
-
|
15 |
-
## Overview
|
16 |
-
|
17 |
-
[Denoising Diffusion Probabilistic Models](https://arxiv.org/abs/2006.11239)
|
18 |
-
(DDPM) by Jonathan Ho, Ajay Jain and Pieter Abbeel proposes the diffusion based model of the same name, but in the context of the 🤗 Diffusers library, DDPM refers to the discrete denoising scheduler from the paper as well as the pipeline.
|
19 |
-
|
20 |
-
The abstract of the paper is the following:
|
21 |
-
|
22 |
-
We present high quality image synthesis results using diffusion probabilistic models, a class of latent variable models inspired by considerations from nonequilibrium thermodynamics. Our best results are obtained by training on a weighted variational bound designed according to a novel connection between diffusion probabilistic models and denoising score matching with Langevin dynamics, and our models naturally admit a progressive lossy decompression scheme that can be interpreted as a generalization of autoregressive decoding. On the unconditional CIFAR10 dataset, we obtain an Inception score of 9.46 and a state-of-the-art FID score of 3.17. On 256x256 LSUN, we obtain sample quality similar to ProgressiveGAN.
|
23 |
-
|
24 |
-
The original paper can be found [here](https://arxiv.org/abs/2010.02502).
|
25 |
-
|
26 |
-
## DDPMScheduler
|
27 |
-
[[autodoc]] DDPMScheduler
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/ko/tutorials/tutorial_overview.md
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
|
3 |
-
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
4 |
-
the License. You may obtain a copy of the License at
|
5 |
-
|
6 |
-
http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
|
8 |
-
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
9 |
-
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
10 |
-
specific language governing permissions and limitations under the License.
|
11 |
-
-->
|
12 |
-
|
13 |
-
# Overview
|
14 |
-
|
15 |
-
🧨 Diffusers에 오신 걸 환영합니다! 여러분이 diffusion 모델과 생성 AI를 처음 접하고, 더 많은 걸 배우고 싶으셨다면 제대로 찾아오셨습니다. 이 튜토리얼은 diffusion model을 여러분에게 젠틀하게 소개하고, 라이브러리의 기본 사항(핵심 구성요소와 🧨 Diffusers 사용법)을 이해하는 데 도움이 되도록 설계되었습니다.
|
16 |
-
|
17 |
-
여러분은 이 튜토리얼을 통해 빠르게 생성하기 위해선 추론 파이프라인을 어떻게 사용해야 하는지, 그리고 라이브러리를 modular toolbox처럼 이용해서 여러분만의 diffusion system을 구축할 수 있도록 파이프라인을 분해하는 법을 배울 수 있습니다. 다음 단원에서는 여러분이 원하는 것을 생성하기 위해 자신만의 diffusion model을 학습하는 방법을 배우게 됩니다.
|
18 |
-
|
19 |
-
튜토리얼을 완료한다면 여러분은 라이브러리를 직접 탐색하고, 자신의 프로젝트와 애플리케이션에 적용할 스킬들을 습득할 수 있을 겁니다.
|
20 |
-
|
21 |
-
[Discord](https://discord.com/invite/JfAtkvEtRb)나 [포럼](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers/63) 커뮤니티에 자유롭게 참여해서 다른 사용자와 개발자들과 교류하고 협업해 보세요!
|
22 |
-
|
23 |
-
자 지금부터 diffusing을 시작해 보겠습니다! 🧨
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/dreambooth/train_dreambooth_lora_sdxl.py
DELETED
@@ -1,1355 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python
|
2 |
-
# coding=utf-8
|
3 |
-
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
|
4 |
-
#
|
5 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
-
# you may not use this file except in compliance with the License.
|
7 |
-
# You may obtain a copy of the License at
|
8 |
-
#
|
9 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
-
#
|
11 |
-
# Unless required by applicable law or agreed to in writing, software
|
12 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
-
# See the License for the specific language governing permissions and
|
15 |
-
|
16 |
-
import argparse
|
17 |
-
import gc
|
18 |
-
import hashlib
|
19 |
-
import itertools
|
20 |
-
import logging
|
21 |
-
import math
|
22 |
-
import os
|
23 |
-
import shutil
|
24 |
-
import warnings
|
25 |
-
from pathlib import Path
|
26 |
-
from typing import Dict
|
27 |
-
|
28 |
-
import numpy as np
|
29 |
-
import torch
|
30 |
-
import torch.nn.functional as F
|
31 |
-
import torch.utils.checkpoint
|
32 |
-
import transformers
|
33 |
-
from accelerate import Accelerator
|
34 |
-
from accelerate.logging import get_logger
|
35 |
-
from accelerate.utils import ProjectConfiguration, set_seed
|
36 |
-
from huggingface_hub import create_repo, upload_folder
|
37 |
-
from packaging import version
|
38 |
-
from PIL import Image
|
39 |
-
from PIL.ImageOps import exif_transpose
|
40 |
-
from torch.utils.data import Dataset
|
41 |
-
from torchvision import transforms
|
42 |
-
from tqdm.auto import tqdm
|
43 |
-
from transformers import AutoTokenizer, PretrainedConfig
|
44 |
-
|
45 |
-
import diffusers
|
46 |
-
from diffusers import (
|
47 |
-
AutoencoderKL,
|
48 |
-
DDPMScheduler,
|
49 |
-
DPMSolverMultistepScheduler,
|
50 |
-
StableDiffusionXLPipeline,
|
51 |
-
UNet2DConditionModel,
|
52 |
-
)
|
53 |
-
from diffusers.loaders import LoraLoaderMixin, text_encoder_lora_state_dict
|
54 |
-
from diffusers.models.attention_processor import LoRAAttnProcessor, LoRAAttnProcessor2_0
|
55 |
-
from diffusers.optimization import get_scheduler
|
56 |
-
from diffusers.utils import check_min_version, is_wandb_available
|
57 |
-
from diffusers.utils.import_utils import is_xformers_available
|
58 |
-
|
59 |
-
|
60 |
-
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
|
61 |
-
check_min_version("0.19.0")
|
62 |
-
|
63 |
-
logger = get_logger(__name__)
|
64 |
-
|
65 |
-
|
66 |
-
def save_model_card(
|
67 |
-
repo_id: str, images=None, base_model=str, train_text_encoder=False, prompt=str, repo_folder=None, vae_path=None
|
68 |
-
):
|
69 |
-
img_str = ""
|
70 |
-
for i, image in enumerate(images):
|
71 |
-
image.save(os.path.join(repo_folder, f"image_{i}.png"))
|
72 |
-
img_str += f"\n"
|
73 |
-
|
74 |
-
yaml = f"""
|
75 |
-
---
|
76 |
-
license: creativeml-openrail-m
|
77 |
-
base_model: {base_model}
|
78 |
-
instance_prompt: {prompt}
|
79 |
-
tags:
|
80 |
-
- stable-diffusion-xl
|
81 |
-
- stable-diffusion-xl-diffusers
|
82 |
-
- text-to-image
|
83 |
-
- diffusers
|
84 |
-
- lora
|
85 |
-
inference: true
|
86 |
-
---
|
87 |
-
"""
|
88 |
-
model_card = f"""
|
89 |
-
# LoRA DreamBooth - {repo_id}
|
90 |
-
|
91 |
-
These are LoRA adaption weights for {base_model}. The weights were trained on {prompt} using [DreamBooth](https://dreambooth.github.io/). You can find some example images in the following. \n
|
92 |
-
{img_str}
|
93 |
-
|
94 |
-
LoRA for the text encoder was enabled: {train_text_encoder}.
|
95 |
-
|
96 |
-
Special VAE used for training: {vae_path}.
|
97 |
-
|
98 |
-
## License
|
99 |
-
|
100 |
-
[SDXL 1.0 License](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/LICENSE.md)
|
101 |
-
"""
|
102 |
-
with open(os.path.join(repo_folder, "README.md"), "w") as f:
|
103 |
-
f.write(yaml + model_card)
|
104 |
-
|
105 |
-
|
106 |
-
def import_model_class_from_model_name_or_path(
|
107 |
-
pretrained_model_name_or_path: str, revision: str, subfolder: str = "text_encoder"
|
108 |
-
):
|
109 |
-
text_encoder_config = PretrainedConfig.from_pretrained(
|
110 |
-
pretrained_model_name_or_path, subfolder=subfolder, revision=revision
|
111 |
-
)
|
112 |
-
model_class = text_encoder_config.architectures[0]
|
113 |
-
|
114 |
-
if model_class == "CLIPTextModel":
|
115 |
-
from transformers import CLIPTextModel
|
116 |
-
|
117 |
-
return CLIPTextModel
|
118 |
-
elif model_class == "CLIPTextModelWithProjection":
|
119 |
-
from transformers import CLIPTextModelWithProjection
|
120 |
-
|
121 |
-
return CLIPTextModelWithProjection
|
122 |
-
else:
|
123 |
-
raise ValueError(f"{model_class} is not supported.")
|
124 |
-
|
125 |
-
|
126 |
-
def parse_args(input_args=None):
|
127 |
-
parser = argparse.ArgumentParser(description="Simple example of a training script.")
|
128 |
-
parser.add_argument(
|
129 |
-
"--pretrained_model_name_or_path",
|
130 |
-
type=str,
|
131 |
-
default=None,
|
132 |
-
required=True,
|
133 |
-
help="Path to pretrained model or model identifier from huggingface.co/models.",
|
134 |
-
)
|
135 |
-
parser.add_argument(
|
136 |
-
"--pretrained_vae_model_name_or_path",
|
137 |
-
type=str,
|
138 |
-
default=None,
|
139 |
-
help="Path to pretrained VAE model with better numerical stability. More details: https://github.com/huggingface/diffusers/pull/4038.",
|
140 |
-
)
|
141 |
-
parser.add_argument(
|
142 |
-
"--revision",
|
143 |
-
type=str,
|
144 |
-
default=None,
|
145 |
-
required=False,
|
146 |
-
help="Revision of pretrained model identifier from huggingface.co/models.",
|
147 |
-
)
|
148 |
-
parser.add_argument(
|
149 |
-
"--instance_data_dir",
|
150 |
-
type=str,
|
151 |
-
default=None,
|
152 |
-
required=True,
|
153 |
-
help="A folder containing the training data of instance images.",
|
154 |
-
)
|
155 |
-
parser.add_argument(
|
156 |
-
"--class_data_dir",
|
157 |
-
type=str,
|
158 |
-
default=None,
|
159 |
-
required=False,
|
160 |
-
help="A folder containing the training data of class images.",
|
161 |
-
)
|
162 |
-
parser.add_argument(
|
163 |
-
"--instance_prompt",
|
164 |
-
type=str,
|
165 |
-
default=None,
|
166 |
-
required=True,
|
167 |
-
help="The prompt with identifier specifying the instance",
|
168 |
-
)
|
169 |
-
parser.add_argument(
|
170 |
-
"--class_prompt",
|
171 |
-
type=str,
|
172 |
-
default=None,
|
173 |
-
help="The prompt to specify images in the same class as provided instance images.",
|
174 |
-
)
|
175 |
-
parser.add_argument(
|
176 |
-
"--validation_prompt",
|
177 |
-
type=str,
|
178 |
-
default=None,
|
179 |
-
help="A prompt that is used during validation to verify that the model is learning.",
|
180 |
-
)
|
181 |
-
parser.add_argument(
|
182 |
-
"--num_validation_images",
|
183 |
-
type=int,
|
184 |
-
default=4,
|
185 |
-
help="Number of images that should be generated during validation with `validation_prompt`.",
|
186 |
-
)
|
187 |
-
parser.add_argument(
|
188 |
-
"--validation_epochs",
|
189 |
-
type=int,
|
190 |
-
default=50,
|
191 |
-
help=(
|
192 |
-
"Run dreambooth validation every X epochs. Dreambooth validation consists of running the prompt"
|
193 |
-
" `args.validation_prompt` multiple times: `args.num_validation_images`."
|
194 |
-
),
|
195 |
-
)
|
196 |
-
parser.add_argument(
|
197 |
-
"--with_prior_preservation",
|
198 |
-
default=False,
|
199 |
-
action="store_true",
|
200 |
-
help="Flag to add prior preservation loss.",
|
201 |
-
)
|
202 |
-
parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.")
|
203 |
-
parser.add_argument(
|
204 |
-
"--num_class_images",
|
205 |
-
type=int,
|
206 |
-
default=100,
|
207 |
-
help=(
|
208 |
-
"Minimal class images for prior preservation loss. If there are not enough images already present in"
|
209 |
-
" class_data_dir, additional images will be sampled with class_prompt."
|
210 |
-
),
|
211 |
-
)
|
212 |
-
parser.add_argument(
|
213 |
-
"--output_dir",
|
214 |
-
type=str,
|
215 |
-
default="lora-dreambooth-model",
|
216 |
-
help="The output directory where the model predictions and checkpoints will be written.",
|
217 |
-
)
|
218 |
-
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
|
219 |
-
parser.add_argument(
|
220 |
-
"--resolution",
|
221 |
-
type=int,
|
222 |
-
default=512,
|
223 |
-
help=(
|
224 |
-
"The resolution for input images, all the images in the train/validation dataset will be resized to this"
|
225 |
-
" resolution"
|
226 |
-
),
|
227 |
-
)
|
228 |
-
parser.add_argument(
|
229 |
-
"--crops_coords_top_left_h",
|
230 |
-
type=int,
|
231 |
-
default=0,
|
232 |
-
help=("Coordinate for (the height) to be included in the crop coordinate embeddings needed by SDXL UNet."),
|
233 |
-
)
|
234 |
-
parser.add_argument(
|
235 |
-
"--crops_coords_top_left_w",
|
236 |
-
type=int,
|
237 |
-
default=0,
|
238 |
-
help=("Coordinate for (the height) to be included in the crop coordinate embeddings needed by SDXL UNet."),
|
239 |
-
)
|
240 |
-
parser.add_argument(
|
241 |
-
"--center_crop",
|
242 |
-
default=False,
|
243 |
-
action="store_true",
|
244 |
-
help=(
|
245 |
-
"Whether to center crop the input images to the resolution. If not set, the images will be randomly"
|
246 |
-
" cropped. The images will be resized to the resolution first before cropping."
|
247 |
-
),
|
248 |
-
)
|
249 |
-
parser.add_argument(
|
250 |
-
"--train_text_encoder",
|
251 |
-
action="store_true",
|
252 |
-
help="Whether to train the text encoder. If set, the text encoder should be float32 precision.",
|
253 |
-
)
|
254 |
-
parser.add_argument(
|
255 |
-
"--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader."
|
256 |
-
)
|
257 |
-
parser.add_argument(
|
258 |
-
"--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images."
|
259 |
-
)
|
260 |
-
parser.add_argument("--num_train_epochs", type=int, default=1)
|
261 |
-
parser.add_argument(
|
262 |
-
"--max_train_steps",
|
263 |
-
type=int,
|
264 |
-
default=None,
|
265 |
-
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
|
266 |
-
)
|
267 |
-
parser.add_argument(
|
268 |
-
"--checkpointing_steps",
|
269 |
-
type=int,
|
270 |
-
default=500,
|
271 |
-
help=(
|
272 |
-
"Save a checkpoint of the training state every X updates. These checkpoints can be used both as final"
|
273 |
-
" checkpoints in case they are better than the last checkpoint, and are also suitable for resuming"
|
274 |
-
" training using `--resume_from_checkpoint`."
|
275 |
-
),
|
276 |
-
)
|
277 |
-
parser.add_argument(
|
278 |
-
"--checkpoints_total_limit",
|
279 |
-
type=int,
|
280 |
-
default=None,
|
281 |
-
help=("Max number of checkpoints to store."),
|
282 |
-
)
|
283 |
-
parser.add_argument(
|
284 |
-
"--resume_from_checkpoint",
|
285 |
-
type=str,
|
286 |
-
default=None,
|
287 |
-
help=(
|
288 |
-
"Whether training should be resumed from a previous checkpoint. Use a path saved by"
|
289 |
-
' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
|
290 |
-
),
|
291 |
-
)
|
292 |
-
parser.add_argument(
|
293 |
-
"--gradient_accumulation_steps",
|
294 |
-
type=int,
|
295 |
-
default=1,
|
296 |
-
help="Number of updates steps to accumulate before performing a backward/update pass.",
|
297 |
-
)
|
298 |
-
parser.add_argument(
|
299 |
-
"--gradient_checkpointing",
|
300 |
-
action="store_true",
|
301 |
-
help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
|
302 |
-
)
|
303 |
-
parser.add_argument(
|
304 |
-
"--learning_rate",
|
305 |
-
type=float,
|
306 |
-
default=5e-4,
|
307 |
-
help="Initial learning rate (after the potential warmup period) to use.",
|
308 |
-
)
|
309 |
-
parser.add_argument(
|
310 |
-
"--scale_lr",
|
311 |
-
action="store_true",
|
312 |
-
default=False,
|
313 |
-
help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
|
314 |
-
)
|
315 |
-
parser.add_argument(
|
316 |
-
"--lr_scheduler",
|
317 |
-
type=str,
|
318 |
-
default="constant",
|
319 |
-
help=(
|
320 |
-
'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
|
321 |
-
' "constant", "constant_with_warmup"]'
|
322 |
-
),
|
323 |
-
)
|
324 |
-
parser.add_argument(
|
325 |
-
"--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
|
326 |
-
)
|
327 |
-
parser.add_argument(
|
328 |
-
"--lr_num_cycles",
|
329 |
-
type=int,
|
330 |
-
default=1,
|
331 |
-
help="Number of hard resets of the lr in cosine_with_restarts scheduler.",
|
332 |
-
)
|
333 |
-
parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.")
|
334 |
-
parser.add_argument(
|
335 |
-
"--dataloader_num_workers",
|
336 |
-
type=int,
|
337 |
-
default=0,
|
338 |
-
help=(
|
339 |
-
"Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
|
340 |
-
),
|
341 |
-
)
|
342 |
-
parser.add_argument(
|
343 |
-
"--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes."
|
344 |
-
)
|
345 |
-
parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
|
346 |
-
parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
|
347 |
-
parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
|
348 |
-
parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
|
349 |
-
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
|
350 |
-
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
|
351 |
-
parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
|
352 |
-
parser.add_argument(
|
353 |
-
"--hub_model_id",
|
354 |
-
type=str,
|
355 |
-
default=None,
|
356 |
-
help="The name of the repository to keep in sync with the local `output_dir`.",
|
357 |
-
)
|
358 |
-
parser.add_argument(
|
359 |
-
"--logging_dir",
|
360 |
-
type=str,
|
361 |
-
default="logs",
|
362 |
-
help=(
|
363 |
-
"[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
|
364 |
-
" *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
|
365 |
-
),
|
366 |
-
)
|
367 |
-
parser.add_argument(
|
368 |
-
"--allow_tf32",
|
369 |
-
action="store_true",
|
370 |
-
help=(
|
371 |
-
"Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
|
372 |
-
" https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
|
373 |
-
),
|
374 |
-
)
|
375 |
-
parser.add_argument(
|
376 |
-
"--report_to",
|
377 |
-
type=str,
|
378 |
-
default="tensorboard",
|
379 |
-
help=(
|
380 |
-
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
|
381 |
-
' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
|
382 |
-
),
|
383 |
-
)
|
384 |
-
parser.add_argument(
|
385 |
-
"--mixed_precision",
|
386 |
-
type=str,
|
387 |
-
default=None,
|
388 |
-
choices=["no", "fp16", "bf16"],
|
389 |
-
help=(
|
390 |
-
"Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
|
391 |
-
" 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
|
392 |
-
" flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
|
393 |
-
),
|
394 |
-
)
|
395 |
-
parser.add_argument(
|
396 |
-
"--prior_generation_precision",
|
397 |
-
type=str,
|
398 |
-
default=None,
|
399 |
-
choices=["no", "fp32", "fp16", "bf16"],
|
400 |
-
help=(
|
401 |
-
"Choose prior generation precision between fp32, fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
|
402 |
-
" 1.10.and an Nvidia Ampere GPU. Default to fp16 if a GPU is available else fp32."
|
403 |
-
),
|
404 |
-
)
|
405 |
-
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
|
406 |
-
parser.add_argument(
|
407 |
-
"--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers."
|
408 |
-
)
|
409 |
-
|
410 |
-
if input_args is not None:
|
411 |
-
args = parser.parse_args(input_args)
|
412 |
-
else:
|
413 |
-
args = parser.parse_args()
|
414 |
-
|
415 |
-
env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
|
416 |
-
if env_local_rank != -1 and env_local_rank != args.local_rank:
|
417 |
-
args.local_rank = env_local_rank
|
418 |
-
|
419 |
-
if args.with_prior_preservation:
|
420 |
-
if args.class_data_dir is None:
|
421 |
-
raise ValueError("You must specify a data directory for class images.")
|
422 |
-
if args.class_prompt is None:
|
423 |
-
raise ValueError("You must specify prompt for class images.")
|
424 |
-
else:
|
425 |
-
# logger is not available yet
|
426 |
-
if args.class_data_dir is not None:
|
427 |
-
warnings.warn("You need not use --class_data_dir without --with_prior_preservation.")
|
428 |
-
if args.class_prompt is not None:
|
429 |
-
warnings.warn("You need not use --class_prompt without --with_prior_preservation.")
|
430 |
-
|
431 |
-
return args
|
432 |
-
|
433 |
-
|
434 |
-
class DreamBoothDataset(Dataset):
|
435 |
-
"""
|
436 |
-
A dataset to prepare the instance and class images with the prompts for fine-tuning the model.
|
437 |
-
It pre-processes the images.
|
438 |
-
"""
|
439 |
-
|
440 |
-
def __init__(
|
441 |
-
self,
|
442 |
-
instance_data_root,
|
443 |
-
class_data_root=None,
|
444 |
-
class_num=None,
|
445 |
-
size=1024,
|
446 |
-
center_crop=False,
|
447 |
-
):
|
448 |
-
self.size = size
|
449 |
-
self.center_crop = center_crop
|
450 |
-
|
451 |
-
self.instance_data_root = Path(instance_data_root)
|
452 |
-
if not self.instance_data_root.exists():
|
453 |
-
raise ValueError("Instance images root doesn't exists.")
|
454 |
-
|
455 |
-
self.instance_images_path = list(Path(instance_data_root).iterdir())
|
456 |
-
self.num_instance_images = len(self.instance_images_path)
|
457 |
-
self._length = self.num_instance_images
|
458 |
-
|
459 |
-
if class_data_root is not None:
|
460 |
-
self.class_data_root = Path(class_data_root)
|
461 |
-
self.class_data_root.mkdir(parents=True, exist_ok=True)
|
462 |
-
self.class_images_path = list(self.class_data_root.iterdir())
|
463 |
-
if class_num is not None:
|
464 |
-
self.num_class_images = min(len(self.class_images_path), class_num)
|
465 |
-
else:
|
466 |
-
self.num_class_images = len(self.class_images_path)
|
467 |
-
self._length = max(self.num_class_images, self.num_instance_images)
|
468 |
-
else:
|
469 |
-
self.class_data_root = None
|
470 |
-
|
471 |
-
self.image_transforms = transforms.Compose(
|
472 |
-
[
|
473 |
-
transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR),
|
474 |
-
transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size),
|
475 |
-
transforms.ToTensor(),
|
476 |
-
transforms.Normalize([0.5], [0.5]),
|
477 |
-
]
|
478 |
-
)
|
479 |
-
|
480 |
-
def __len__(self):
|
481 |
-
return self._length
|
482 |
-
|
483 |
-
def __getitem__(self, index):
|
484 |
-
example = {}
|
485 |
-
instance_image = Image.open(self.instance_images_path[index % self.num_instance_images])
|
486 |
-
instance_image = exif_transpose(instance_image)
|
487 |
-
|
488 |
-
if not instance_image.mode == "RGB":
|
489 |
-
instance_image = instance_image.convert("RGB")
|
490 |
-
example["instance_images"] = self.image_transforms(instance_image)
|
491 |
-
|
492 |
-
if self.class_data_root:
|
493 |
-
class_image = Image.open(self.class_images_path[index % self.num_class_images])
|
494 |
-
class_image = exif_transpose(class_image)
|
495 |
-
|
496 |
-
if not class_image.mode == "RGB":
|
497 |
-
class_image = class_image.convert("RGB")
|
498 |
-
example["class_images"] = self.image_transforms(class_image)
|
499 |
-
|
500 |
-
return example
|
501 |
-
|
502 |
-
|
503 |
-
def collate_fn(examples, with_prior_preservation=False):
|
504 |
-
pixel_values = [example["instance_images"] for example in examples]
|
505 |
-
|
506 |
-
# Concat class and instance examples for prior preservation.
|
507 |
-
# We do this to avoid doing two forward passes.
|
508 |
-
if with_prior_preservation:
|
509 |
-
pixel_values += [example["class_images"] for example in examples]
|
510 |
-
|
511 |
-
pixel_values = torch.stack(pixel_values)
|
512 |
-
pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
|
513 |
-
|
514 |
-
batch = {"pixel_values": pixel_values}
|
515 |
-
return batch
|
516 |
-
|
517 |
-
|
518 |
-
class PromptDataset(Dataset):
|
519 |
-
"A simple dataset to prepare the prompts to generate class images on multiple GPUs."
|
520 |
-
|
521 |
-
def __init__(self, prompt, num_samples):
|
522 |
-
self.prompt = prompt
|
523 |
-
self.num_samples = num_samples
|
524 |
-
|
525 |
-
def __len__(self):
|
526 |
-
return self.num_samples
|
527 |
-
|
528 |
-
def __getitem__(self, index):
|
529 |
-
example = {}
|
530 |
-
example["prompt"] = self.prompt
|
531 |
-
example["index"] = index
|
532 |
-
return example
|
533 |
-
|
534 |
-
|
535 |
-
def tokenize_prompt(tokenizer, prompt):
|
536 |
-
text_inputs = tokenizer(
|
537 |
-
prompt,
|
538 |
-
padding="max_length",
|
539 |
-
max_length=tokenizer.model_max_length,
|
540 |
-
truncation=True,
|
541 |
-
return_tensors="pt",
|
542 |
-
)
|
543 |
-
text_input_ids = text_inputs.input_ids
|
544 |
-
return text_input_ids
|
545 |
-
|
546 |
-
|
547 |
-
# Adapted from pipelines.StableDiffusionXLPipeline.encode_prompt
|
548 |
-
def encode_prompt(text_encoders, tokenizers, prompt, text_input_ids_list=None):
|
549 |
-
prompt_embeds_list = []
|
550 |
-
|
551 |
-
for i, text_encoder in enumerate(text_encoders):
|
552 |
-
if tokenizers is not None:
|
553 |
-
tokenizer = tokenizers[i]
|
554 |
-
text_input_ids = tokenize_prompt(tokenizer, prompt)
|
555 |
-
else:
|
556 |
-
assert text_input_ids_list is not None
|
557 |
-
text_input_ids = text_input_ids_list[i]
|
558 |
-
|
559 |
-
prompt_embeds = text_encoder(
|
560 |
-
text_input_ids.to(text_encoder.device),
|
561 |
-
output_hidden_states=True,
|
562 |
-
)
|
563 |
-
|
564 |
-
# We are only ALWAYS interested in the pooled output of the final text encoder
|
565 |
-
pooled_prompt_embeds = prompt_embeds[0]
|
566 |
-
prompt_embeds = prompt_embeds.hidden_states[-2]
|
567 |
-
bs_embed, seq_len, _ = prompt_embeds.shape
|
568 |
-
prompt_embeds = prompt_embeds.view(bs_embed, seq_len, -1)
|
569 |
-
prompt_embeds_list.append(prompt_embeds)
|
570 |
-
|
571 |
-
prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
|
572 |
-
pooled_prompt_embeds = pooled_prompt_embeds.view(bs_embed, -1)
|
573 |
-
return prompt_embeds, pooled_prompt_embeds
|
574 |
-
|
575 |
-
|
576 |
-
def unet_attn_processors_state_dict(unet) -> Dict[str, torch.tensor]:
|
577 |
-
"""
|
578 |
-
Returns:
|
579 |
-
a state dict containing just the attention processor parameters.
|
580 |
-
"""
|
581 |
-
attn_processors = unet.attn_processors
|
582 |
-
|
583 |
-
attn_processors_state_dict = {}
|
584 |
-
|
585 |
-
for attn_processor_key, attn_processor in attn_processors.items():
|
586 |
-
for parameter_key, parameter in attn_processor.state_dict().items():
|
587 |
-
attn_processors_state_dict[f"{attn_processor_key}.{parameter_key}"] = parameter
|
588 |
-
|
589 |
-
return attn_processors_state_dict
|
590 |
-
|
591 |
-
|
592 |
-
def main(args):
|
593 |
-
logging_dir = Path(args.output_dir, args.logging_dir)
|
594 |
-
|
595 |
-
accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir)
|
596 |
-
|
597 |
-
accelerator = Accelerator(
|
598 |
-
gradient_accumulation_steps=args.gradient_accumulation_steps,
|
599 |
-
mixed_precision=args.mixed_precision,
|
600 |
-
log_with=args.report_to,
|
601 |
-
project_config=accelerator_project_config,
|
602 |
-
)
|
603 |
-
|
604 |
-
if args.report_to == "wandb":
|
605 |
-
if not is_wandb_available():
|
606 |
-
raise ImportError("Make sure to install wandb if you want to use it for logging during training.")
|
607 |
-
import wandb
|
608 |
-
|
609 |
-
# Make one log on every process with the configuration for debugging.
|
610 |
-
logging.basicConfig(
|
611 |
-
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
|
612 |
-
datefmt="%m/%d/%Y %H:%M:%S",
|
613 |
-
level=logging.INFO,
|
614 |
-
)
|
615 |
-
logger.info(accelerator.state, main_process_only=False)
|
616 |
-
if accelerator.is_local_main_process:
|
617 |
-
transformers.utils.logging.set_verbosity_warning()
|
618 |
-
diffusers.utils.logging.set_verbosity_info()
|
619 |
-
else:
|
620 |
-
transformers.utils.logging.set_verbosity_error()
|
621 |
-
diffusers.utils.logging.set_verbosity_error()
|
622 |
-
|
623 |
-
# If passed along, set the training seed now.
|
624 |
-
if args.seed is not None:
|
625 |
-
set_seed(args.seed)
|
626 |
-
|
627 |
-
# Generate class images if prior preservation is enabled.
|
628 |
-
if args.with_prior_preservation:
|
629 |
-
class_images_dir = Path(args.class_data_dir)
|
630 |
-
if not class_images_dir.exists():
|
631 |
-
class_images_dir.mkdir(parents=True)
|
632 |
-
cur_class_images = len(list(class_images_dir.iterdir()))
|
633 |
-
|
634 |
-
if cur_class_images < args.num_class_images:
|
635 |
-
torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32
|
636 |
-
if args.prior_generation_precision == "fp32":
|
637 |
-
torch_dtype = torch.float32
|
638 |
-
elif args.prior_generation_precision == "fp16":
|
639 |
-
torch_dtype = torch.float16
|
640 |
-
elif args.prior_generation_precision == "bf16":
|
641 |
-
torch_dtype = torch.bfloat16
|
642 |
-
pipeline = StableDiffusionXLPipeline.from_pretrained(
|
643 |
-
args.pretrained_model_name_or_path,
|
644 |
-
torch_dtype=torch_dtype,
|
645 |
-
safety_checker=None,
|
646 |
-
revision=args.revision,
|
647 |
-
)
|
648 |
-
pipeline.set_progress_bar_config(disable=True)
|
649 |
-
|
650 |
-
num_new_images = args.num_class_images - cur_class_images
|
651 |
-
logger.info(f"Number of class images to sample: {num_new_images}.")
|
652 |
-
|
653 |
-
sample_dataset = PromptDataset(args.class_prompt, num_new_images)
|
654 |
-
sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size)
|
655 |
-
|
656 |
-
sample_dataloader = accelerator.prepare(sample_dataloader)
|
657 |
-
pipeline.to(accelerator.device)
|
658 |
-
|
659 |
-
for example in tqdm(
|
660 |
-
sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process
|
661 |
-
):
|
662 |
-
images = pipeline(example["prompt"]).images
|
663 |
-
|
664 |
-
for i, image in enumerate(images):
|
665 |
-
hash_image = hashlib.sha1(image.tobytes()).hexdigest()
|
666 |
-
image_filename = class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg"
|
667 |
-
image.save(image_filename)
|
668 |
-
|
669 |
-
del pipeline
|
670 |
-
if torch.cuda.is_available():
|
671 |
-
torch.cuda.empty_cache()
|
672 |
-
|
673 |
-
# Handle the repository creation
|
674 |
-
if accelerator.is_main_process:
|
675 |
-
if args.output_dir is not None:
|
676 |
-
os.makedirs(args.output_dir, exist_ok=True)
|
677 |
-
|
678 |
-
if args.push_to_hub:
|
679 |
-
repo_id = create_repo(
|
680 |
-
repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token
|
681 |
-
).repo_id
|
682 |
-
|
683 |
-
# Load the tokenizers
|
684 |
-
tokenizer_one = AutoTokenizer.from_pretrained(
|
685 |
-
args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision, use_fast=False
|
686 |
-
)
|
687 |
-
tokenizer_two = AutoTokenizer.from_pretrained(
|
688 |
-
args.pretrained_model_name_or_path, subfolder="tokenizer_2", revision=args.revision, use_fast=False
|
689 |
-
)
|
690 |
-
|
691 |
-
# import correct text encoder classes
|
692 |
-
text_encoder_cls_one = import_model_class_from_model_name_or_path(
|
693 |
-
args.pretrained_model_name_or_path, args.revision
|
694 |
-
)
|
695 |
-
text_encoder_cls_two = import_model_class_from_model_name_or_path(
|
696 |
-
args.pretrained_model_name_or_path, args.revision, subfolder="text_encoder_2"
|
697 |
-
)
|
698 |
-
|
699 |
-
# Load scheduler and models
|
700 |
-
noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
|
701 |
-
text_encoder_one = text_encoder_cls_one.from_pretrained(
|
702 |
-
args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision
|
703 |
-
)
|
704 |
-
text_encoder_two = text_encoder_cls_two.from_pretrained(
|
705 |
-
args.pretrained_model_name_or_path, subfolder="text_encoder_2", revision=args.revision
|
706 |
-
)
|
707 |
-
vae_path = (
|
708 |
-
args.pretrained_model_name_or_path
|
709 |
-
if args.pretrained_vae_model_name_or_path is None
|
710 |
-
else args.pretrained_vae_model_name_or_path
|
711 |
-
)
|
712 |
-
vae = AutoencoderKL.from_pretrained(
|
713 |
-
vae_path, subfolder="vae" if args.pretrained_vae_model_name_or_path is None else None, revision=args.revision
|
714 |
-
)
|
715 |
-
unet = UNet2DConditionModel.from_pretrained(
|
716 |
-
args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision
|
717 |
-
)
|
718 |
-
|
719 |
-
# We only train the additional adapter LoRA layers
|
720 |
-
vae.requires_grad_(False)
|
721 |
-
text_encoder_one.requires_grad_(False)
|
722 |
-
text_encoder_two.requires_grad_(False)
|
723 |
-
unet.requires_grad_(False)
|
724 |
-
|
725 |
-
# For mixed precision training we cast all non-trainable weigths (vae, non-lora text_encoder and non-lora unet) to half-precision
|
726 |
-
# as these weights are only used for inference, keeping weights in full precision is not required.
|
727 |
-
weight_dtype = torch.float32
|
728 |
-
if accelerator.mixed_precision == "fp16":
|
729 |
-
weight_dtype = torch.float16
|
730 |
-
elif accelerator.mixed_precision == "bf16":
|
731 |
-
weight_dtype = torch.bfloat16
|
732 |
-
|
733 |
-
# Move unet, vae and text_encoder to device and cast to weight_dtype
|
734 |
-
# The VAE is in float32 to avoid NaN losses.
|
735 |
-
unet.to(accelerator.device, dtype=weight_dtype)
|
736 |
-
if args.pretrained_vae_model_name_or_path is None:
|
737 |
-
vae.to(accelerator.device, dtype=torch.float32)
|
738 |
-
else:
|
739 |
-
vae.to(accelerator.device, dtype=weight_dtype)
|
740 |
-
text_encoder_one.to(accelerator.device, dtype=weight_dtype)
|
741 |
-
text_encoder_two.to(accelerator.device, dtype=weight_dtype)
|
742 |
-
|
743 |
-
if args.enable_xformers_memory_efficient_attention:
|
744 |
-
if is_xformers_available():
|
745 |
-
import xformers
|
746 |
-
|
747 |
-
xformers_version = version.parse(xformers.__version__)
|
748 |
-
if xformers_version == version.parse("0.0.16"):
|
749 |
-
logger.warn(
|
750 |
-
"xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
|
751 |
-
)
|
752 |
-
unet.enable_xformers_memory_efficient_attention()
|
753 |
-
else:
|
754 |
-
raise ValueError("xformers is not available. Make sure it is installed correctly")
|
755 |
-
|
756 |
-
# now we will add new LoRA weights to the attention layers
|
757 |
-
# Set correct lora layers
|
758 |
-
unet_lora_attn_procs = {}
|
759 |
-
unet_lora_parameters = []
|
760 |
-
for name, attn_processor in unet.attn_processors.items():
|
761 |
-
cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim
|
762 |
-
if name.startswith("mid_block"):
|
763 |
-
hidden_size = unet.config.block_out_channels[-1]
|
764 |
-
elif name.startswith("up_blocks"):
|
765 |
-
block_id = int(name[len("up_blocks.")])
|
766 |
-
hidden_size = list(reversed(unet.config.block_out_channels))[block_id]
|
767 |
-
elif name.startswith("down_blocks"):
|
768 |
-
block_id = int(name[len("down_blocks.")])
|
769 |
-
hidden_size = unet.config.block_out_channels[block_id]
|
770 |
-
|
771 |
-
lora_attn_processor_class = (
|
772 |
-
LoRAAttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else LoRAAttnProcessor
|
773 |
-
)
|
774 |
-
module = lora_attn_processor_class(hidden_size=hidden_size, cross_attention_dim=cross_attention_dim)
|
775 |
-
unet_lora_attn_procs[name] = module
|
776 |
-
unet_lora_parameters.extend(module.parameters())
|
777 |
-
|
778 |
-
unet.set_attn_processor(unet_lora_attn_procs)
|
779 |
-
|
780 |
-
# The text encoder comes from 🤗 transformers, so we cannot directly modify it.
|
781 |
-
# So, instead, we monkey-patch the forward calls of its attention-blocks.
|
782 |
-
if args.train_text_encoder:
|
783 |
-
# ensure that dtype is float32, even if rest of the model that isn't trained is loaded in fp16
|
784 |
-
text_lora_parameters_one = LoraLoaderMixin._modify_text_encoder(text_encoder_one, dtype=torch.float32)
|
785 |
-
text_lora_parameters_two = LoraLoaderMixin._modify_text_encoder(text_encoder_two, dtype=torch.float32)
|
786 |
-
|
787 |
-
# create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format
|
788 |
-
def save_model_hook(models, weights, output_dir):
|
789 |
-
# there are only two options here. Either are just the unet attn processor layers
|
790 |
-
# or there are the unet and text encoder atten layers
|
791 |
-
unet_lora_layers_to_save = None
|
792 |
-
text_encoder_one_lora_layers_to_save = None
|
793 |
-
text_encoder_two_lora_layers_to_save = None
|
794 |
-
|
795 |
-
for model in models:
|
796 |
-
if isinstance(model, type(accelerator.unwrap_model(unet))):
|
797 |
-
unet_lora_layers_to_save = unet_attn_processors_state_dict(model)
|
798 |
-
elif isinstance(model, type(accelerator.unwrap_model(text_encoder_one))):
|
799 |
-
text_encoder_one_lora_layers_to_save = text_encoder_lora_state_dict(model)
|
800 |
-
elif isinstance(model, type(accelerator.unwrap_model(text_encoder_two))):
|
801 |
-
text_encoder_two_lora_layers_to_save = text_encoder_lora_state_dict(model)
|
802 |
-
else:
|
803 |
-
raise ValueError(f"unexpected save model: {model.__class__}")
|
804 |
-
|
805 |
-
# make sure to pop weight so that corresponding model is not saved again
|
806 |
-
weights.pop()
|
807 |
-
|
808 |
-
StableDiffusionXLPipeline.save_lora_weights(
|
809 |
-
output_dir,
|
810 |
-
unet_lora_layers=unet_lora_layers_to_save,
|
811 |
-
text_encoder_lora_layers=text_encoder_one_lora_layers_to_save,
|
812 |
-
text_encoder_2_lora_layers=text_encoder_two_lora_layers_to_save,
|
813 |
-
)
|
814 |
-
|
815 |
-
def load_model_hook(models, input_dir):
|
816 |
-
unet_ = None
|
817 |
-
text_encoder_one_ = None
|
818 |
-
text_encoder_two_ = None
|
819 |
-
|
820 |
-
while len(models) > 0:
|
821 |
-
model = models.pop()
|
822 |
-
|
823 |
-
if isinstance(model, type(accelerator.unwrap_model(unet))):
|
824 |
-
unet_ = model
|
825 |
-
elif isinstance(model, type(accelerator.unwrap_model(text_encoder_one))):
|
826 |
-
text_encoder_one_ = model
|
827 |
-
elif isinstance(model, type(accelerator.unwrap_model(text_encoder_two))):
|
828 |
-
text_encoder_two_ = model
|
829 |
-
else:
|
830 |
-
raise ValueError(f"unexpected save model: {model.__class__}")
|
831 |
-
|
832 |
-
lora_state_dict, network_alphas = LoraLoaderMixin.lora_state_dict(input_dir)
|
833 |
-
LoraLoaderMixin.load_lora_into_unet(lora_state_dict, network_alphas=network_alphas, unet=unet_)
|
834 |
-
LoraLoaderMixin.load_lora_into_text_encoder(
|
835 |
-
lora_state_dict, network_alphas=network_alphas, text_encoder=text_encoder_one_
|
836 |
-
)
|
837 |
-
LoraLoaderMixin.load_lora_into_text_encoder(
|
838 |
-
lora_state_dict, network_alphas=network_alphas, text_encoder=text_encoder_two_
|
839 |
-
)
|
840 |
-
|
841 |
-
accelerator.register_save_state_pre_hook(save_model_hook)
|
842 |
-
accelerator.register_load_state_pre_hook(load_model_hook)
|
843 |
-
|
844 |
-
# Enable TF32 for faster training on Ampere GPUs,
|
845 |
-
# cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
|
846 |
-
if args.allow_tf32:
|
847 |
-
torch.backends.cuda.matmul.allow_tf32 = True
|
848 |
-
|
849 |
-
if args.scale_lr:
|
850 |
-
args.learning_rate = (
|
851 |
-
args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
|
852 |
-
)
|
853 |
-
|
854 |
-
# Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs
|
855 |
-
if args.use_8bit_adam:
|
856 |
-
try:
|
857 |
-
import bitsandbytes as bnb
|
858 |
-
except ImportError:
|
859 |
-
raise ImportError(
|
860 |
-
"To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`."
|
861 |
-
)
|
862 |
-
|
863 |
-
optimizer_class = bnb.optim.AdamW8bit
|
864 |
-
else:
|
865 |
-
optimizer_class = torch.optim.AdamW
|
866 |
-
|
867 |
-
# Optimizer creation
|
868 |
-
params_to_optimize = (
|
869 |
-
itertools.chain(unet_lora_parameters, text_lora_parameters_one, text_lora_parameters_two)
|
870 |
-
if args.train_text_encoder
|
871 |
-
else unet_lora_parameters
|
872 |
-
)
|
873 |
-
optimizer = optimizer_class(
|
874 |
-
params_to_optimize,
|
875 |
-
lr=args.learning_rate,
|
876 |
-
betas=(args.adam_beta1, args.adam_beta2),
|
877 |
-
weight_decay=args.adam_weight_decay,
|
878 |
-
eps=args.adam_epsilon,
|
879 |
-
)
|
880 |
-
|
881 |
-
# Computes additional embeddings/ids required by the SDXL UNet.
|
882 |
-
# regular text emebddings (when `train_text_encoder` is not True)
|
883 |
-
# pooled text embeddings
|
884 |
-
# time ids
|
885 |
-
|
886 |
-
def compute_time_ids():
|
887 |
-
# Adapted from pipeline.StableDiffusionXLPipeline._get_add_time_ids
|
888 |
-
original_size = (args.resolution, args.resolution)
|
889 |
-
target_size = (args.resolution, args.resolution)
|
890 |
-
crops_coords_top_left = (args.crops_coords_top_left_h, args.crops_coords_top_left_w)
|
891 |
-
add_time_ids = list(original_size + crops_coords_top_left + target_size)
|
892 |
-
add_time_ids = torch.tensor([add_time_ids])
|
893 |
-
add_time_ids = add_time_ids.to(accelerator.device, dtype=weight_dtype)
|
894 |
-
return add_time_ids
|
895 |
-
|
896 |
-
if not args.train_text_encoder:
|
897 |
-
tokenizers = [tokenizer_one, tokenizer_two]
|
898 |
-
text_encoders = [text_encoder_one, text_encoder_two]
|
899 |
-
|
900 |
-
def compute_text_embeddings(prompt, text_encoders, tokenizers):
|
901 |
-
with torch.no_grad():
|
902 |
-
prompt_embeds, pooled_prompt_embeds = encode_prompt(text_encoders, tokenizers, prompt)
|
903 |
-
prompt_embeds = prompt_embeds.to(accelerator.device)
|
904 |
-
pooled_prompt_embeds = pooled_prompt_embeds.to(accelerator.device)
|
905 |
-
return prompt_embeds, pooled_prompt_embeds
|
906 |
-
|
907 |
-
# Handle instance prompt.
|
908 |
-
instance_time_ids = compute_time_ids()
|
909 |
-
if not args.train_text_encoder:
|
910 |
-
instance_prompt_hidden_states, instance_pooled_prompt_embeds = compute_text_embeddings(
|
911 |
-
args.instance_prompt, text_encoders, tokenizers
|
912 |
-
)
|
913 |
-
|
914 |
-
# Handle class prompt for prior-preservation.
|
915 |
-
if args.with_prior_preservation:
|
916 |
-
class_time_ids = compute_time_ids()
|
917 |
-
if not args.train_text_encoder:
|
918 |
-
class_prompt_hidden_states, class_pooled_prompt_embeds = compute_text_embeddings(
|
919 |
-
args.class_prompt, text_encoders, tokenizers
|
920 |
-
)
|
921 |
-
|
922 |
-
# Clear the memory here.
|
923 |
-
if not args.train_text_encoder:
|
924 |
-
del tokenizers, text_encoders
|
925 |
-
gc.collect()
|
926 |
-
torch.cuda.empty_cache()
|
927 |
-
|
928 |
-
# Pack the statically computed variables appropriately. This is so that we don't
|
929 |
-
# have to pass them to the dataloader.
|
930 |
-
add_time_ids = instance_time_ids
|
931 |
-
if args.with_prior_preservation:
|
932 |
-
add_time_ids = torch.cat([add_time_ids, class_time_ids], dim=0)
|
933 |
-
|
934 |
-
if not args.train_text_encoder:
|
935 |
-
prompt_embeds = instance_prompt_hidden_states
|
936 |
-
unet_add_text_embeds = instance_pooled_prompt_embeds
|
937 |
-
if args.with_prior_preservation:
|
938 |
-
prompt_embeds = torch.cat([prompt_embeds, class_prompt_hidden_states], dim=0)
|
939 |
-
unet_add_text_embeds = torch.cat([unet_add_text_embeds, class_pooled_prompt_embeds], dim=0)
|
940 |
-
else:
|
941 |
-
tokens_one = tokenize_prompt(tokenizer_one, args.instance_prompt)
|
942 |
-
tokens_two = tokenize_prompt(tokenizer_two, args.instance_prompt)
|
943 |
-
if args.with_prior_preservation:
|
944 |
-
class_tokens_one = tokenize_prompt(tokenizer_one, args.class_prompt)
|
945 |
-
class_tokens_two = tokenize_prompt(tokenizer_two, args.class_prompt)
|
946 |
-
tokens_one = torch.cat([tokens_one, class_tokens_one], dim=0)
|
947 |
-
tokens_two = torch.cat([tokens_two, class_tokens_two], dim=0)
|
948 |
-
|
949 |
-
# Dataset and DataLoaders creation:
|
950 |
-
train_dataset = DreamBoothDataset(
|
951 |
-
instance_data_root=args.instance_data_dir,
|
952 |
-
class_data_root=args.class_data_dir if args.with_prior_preservation else None,
|
953 |
-
class_num=args.num_class_images,
|
954 |
-
size=args.resolution,
|
955 |
-
center_crop=args.center_crop,
|
956 |
-
)
|
957 |
-
|
958 |
-
train_dataloader = torch.utils.data.DataLoader(
|
959 |
-
train_dataset,
|
960 |
-
batch_size=args.train_batch_size,
|
961 |
-
shuffle=True,
|
962 |
-
collate_fn=lambda examples: collate_fn(examples, args.with_prior_preservation),
|
963 |
-
num_workers=args.dataloader_num_workers,
|
964 |
-
)
|
965 |
-
|
966 |
-
# Scheduler and math around the number of training steps.
|
967 |
-
overrode_max_train_steps = False
|
968 |
-
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
|
969 |
-
if args.max_train_steps is None:
|
970 |
-
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
|
971 |
-
overrode_max_train_steps = True
|
972 |
-
|
973 |
-
lr_scheduler = get_scheduler(
|
974 |
-
args.lr_scheduler,
|
975 |
-
optimizer=optimizer,
|
976 |
-
num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes,
|
977 |
-
num_training_steps=args.max_train_steps * accelerator.num_processes,
|
978 |
-
num_cycles=args.lr_num_cycles,
|
979 |
-
power=args.lr_power,
|
980 |
-
)
|
981 |
-
|
982 |
-
# Prepare everything with our `accelerator`.
|
983 |
-
if args.train_text_encoder:
|
984 |
-
unet, text_encoder_one, text_encoder_two, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
|
985 |
-
unet, text_encoder_one, text_encoder_two, optimizer, train_dataloader, lr_scheduler
|
986 |
-
)
|
987 |
-
else:
|
988 |
-
unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
|
989 |
-
unet, optimizer, train_dataloader, lr_scheduler
|
990 |
-
)
|
991 |
-
|
992 |
-
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
|
993 |
-
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
|
994 |
-
if overrode_max_train_steps:
|
995 |
-
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
|
996 |
-
# Afterwards we recalculate our number of training epochs
|
997 |
-
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
|
998 |
-
|
999 |
-
# We need to initialize the trackers we use, and also store our configuration.
|
1000 |
-
# The trackers initializes automatically on the main process.
|
1001 |
-
if accelerator.is_main_process:
|
1002 |
-
accelerator.init_trackers("dreambooth-lora-sd-xl", config=vars(args))
|
1003 |
-
|
1004 |
-
# Train!
|
1005 |
-
total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
|
1006 |
-
|
1007 |
-
logger.info("***** Running training *****")
|
1008 |
-
logger.info(f" Num examples = {len(train_dataset)}")
|
1009 |
-
logger.info(f" Num batches each epoch = {len(train_dataloader)}")
|
1010 |
-
logger.info(f" Num Epochs = {args.num_train_epochs}")
|
1011 |
-
logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
|
1012 |
-
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
|
1013 |
-
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
|
1014 |
-
logger.info(f" Total optimization steps = {args.max_train_steps}")
|
1015 |
-
global_step = 0
|
1016 |
-
first_epoch = 0
|
1017 |
-
|
1018 |
-
# Potentially load in the weights and states from a previous save
|
1019 |
-
if args.resume_from_checkpoint:
|
1020 |
-
if args.resume_from_checkpoint != "latest":
|
1021 |
-
path = os.path.basename(args.resume_from_checkpoint)
|
1022 |
-
else:
|
1023 |
-
# Get the mos recent checkpoint
|
1024 |
-
dirs = os.listdir(args.output_dir)
|
1025 |
-
dirs = [d for d in dirs if d.startswith("checkpoint")]
|
1026 |
-
dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
|
1027 |
-
path = dirs[-1] if len(dirs) > 0 else None
|
1028 |
-
|
1029 |
-
if path is None:
|
1030 |
-
accelerator.print(
|
1031 |
-
f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run."
|
1032 |
-
)
|
1033 |
-
args.resume_from_checkpoint = None
|
1034 |
-
else:
|
1035 |
-
accelerator.print(f"Resuming from checkpoint {path}")
|
1036 |
-
accelerator.load_state(os.path.join(args.output_dir, path))
|
1037 |
-
global_step = int(path.split("-")[1])
|
1038 |
-
|
1039 |
-
resume_global_step = global_step * args.gradient_accumulation_steps
|
1040 |
-
first_epoch = global_step // num_update_steps_per_epoch
|
1041 |
-
resume_step = resume_global_step % (num_update_steps_per_epoch * args.gradient_accumulation_steps)
|
1042 |
-
|
1043 |
-
# Only show the progress bar once on each machine.
|
1044 |
-
progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process)
|
1045 |
-
progress_bar.set_description("Steps")
|
1046 |
-
|
1047 |
-
for epoch in range(first_epoch, args.num_train_epochs):
|
1048 |
-
unet.train()
|
1049 |
-
if args.train_text_encoder:
|
1050 |
-
text_encoder_one.train()
|
1051 |
-
text_encoder_two.train()
|
1052 |
-
for step, batch in enumerate(train_dataloader):
|
1053 |
-
# Skip steps until we reach the resumed step
|
1054 |
-
if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step:
|
1055 |
-
if step % args.gradient_accumulation_steps == 0:
|
1056 |
-
progress_bar.update(1)
|
1057 |
-
continue
|
1058 |
-
|
1059 |
-
with accelerator.accumulate(unet):
|
1060 |
-
if args.pretrained_vae_model_name_or_path is None:
|
1061 |
-
pixel_values = batch["pixel_values"]
|
1062 |
-
else:
|
1063 |
-
pixel_values = batch["pixel_values"].to(dtype=weight_dtype)
|
1064 |
-
|
1065 |
-
# Convert images to latent space
|
1066 |
-
model_input = vae.encode(pixel_values).latent_dist.sample()
|
1067 |
-
model_input = model_input * vae.config.scaling_factor
|
1068 |
-
if args.pretrained_vae_model_name_or_path is None:
|
1069 |
-
model_input = model_input.to(weight_dtype)
|
1070 |
-
|
1071 |
-
# Sample noise that we'll add to the latents
|
1072 |
-
noise = torch.randn_like(model_input)
|
1073 |
-
bsz = model_input.shape[0]
|
1074 |
-
# Sample a random timestep for each image
|
1075 |
-
timesteps = torch.randint(
|
1076 |
-
0, noise_scheduler.config.num_train_timesteps, (bsz,), device=model_input.device
|
1077 |
-
)
|
1078 |
-
timesteps = timesteps.long()
|
1079 |
-
|
1080 |
-
# Add noise to the model input according to the noise magnitude at each timestep
|
1081 |
-
# (this is the forward diffusion process)
|
1082 |
-
noisy_model_input = noise_scheduler.add_noise(model_input, noise, timesteps)
|
1083 |
-
|
1084 |
-
# Calculate the elements to repeat depending on the use of prior-preservation.
|
1085 |
-
elems_to_repeat = bsz // 2 if args.with_prior_preservation else bsz
|
1086 |
-
|
1087 |
-
# Predict the noise residual
|
1088 |
-
if not args.train_text_encoder:
|
1089 |
-
unet_added_conditions = {
|
1090 |
-
"time_ids": add_time_ids.repeat(elems_to_repeat, 1),
|
1091 |
-
"text_embeds": unet_add_text_embeds.repeat(elems_to_repeat, 1),
|
1092 |
-
}
|
1093 |
-
prompt_embeds = prompt_embeds.repeat(elems_to_repeat, 1, 1)
|
1094 |
-
model_pred = unet(
|
1095 |
-
noisy_model_input,
|
1096 |
-
timesteps,
|
1097 |
-
prompt_embeds,
|
1098 |
-
added_cond_kwargs=unet_added_conditions,
|
1099 |
-
).sample
|
1100 |
-
else:
|
1101 |
-
unet_added_conditions = {"time_ids": add_time_ids.repeat(elems_to_repeat, 1)}
|
1102 |
-
prompt_embeds, pooled_prompt_embeds = encode_prompt(
|
1103 |
-
text_encoders=[text_encoder_one, text_encoder_two],
|
1104 |
-
tokenizers=None,
|
1105 |
-
prompt=None,
|
1106 |
-
text_input_ids_list=[tokens_one, tokens_two],
|
1107 |
-
)
|
1108 |
-
unet_added_conditions.update({"text_embeds": pooled_prompt_embeds.repeat(elems_to_repeat, 1)})
|
1109 |
-
prompt_embeds = prompt_embeds.repeat(elems_to_repeat, 1, 1)
|
1110 |
-
model_pred = unet(
|
1111 |
-
noisy_model_input, timesteps, prompt_embeds, added_cond_kwargs=unet_added_conditions
|
1112 |
-
).sample
|
1113 |
-
|
1114 |
-
# Get the target for loss depending on the prediction type
|
1115 |
-
if noise_scheduler.config.prediction_type == "epsilon":
|
1116 |
-
target = noise
|
1117 |
-
elif noise_scheduler.config.prediction_type == "v_prediction":
|
1118 |
-
target = noise_scheduler.get_velocity(model_input, noise, timesteps)
|
1119 |
-
else:
|
1120 |
-
raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
|
1121 |
-
|
1122 |
-
if args.with_prior_preservation:
|
1123 |
-
# Chunk the noise and model_pred into two parts and compute the loss on each part separately.
|
1124 |
-
model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0)
|
1125 |
-
target, target_prior = torch.chunk(target, 2, dim=0)
|
1126 |
-
|
1127 |
-
# Compute instance loss
|
1128 |
-
loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
|
1129 |
-
|
1130 |
-
# Compute prior loss
|
1131 |
-
prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean")
|
1132 |
-
|
1133 |
-
# Add the prior loss to the instance loss.
|
1134 |
-
loss = loss + args.prior_loss_weight * prior_loss
|
1135 |
-
else:
|
1136 |
-
loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
|
1137 |
-
|
1138 |
-
accelerator.backward(loss)
|
1139 |
-
if accelerator.sync_gradients:
|
1140 |
-
params_to_clip = (
|
1141 |
-
itertools.chain(unet_lora_parameters, text_lora_parameters_one, text_lora_parameters_two)
|
1142 |
-
if args.train_text_encoder
|
1143 |
-
else unet_lora_parameters
|
1144 |
-
)
|
1145 |
-
accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
|
1146 |
-
optimizer.step()
|
1147 |
-
lr_scheduler.step()
|
1148 |
-
optimizer.zero_grad()
|
1149 |
-
|
1150 |
-
# Checks if the accelerator has performed an optimization step behind the scenes
|
1151 |
-
if accelerator.sync_gradients:
|
1152 |
-
progress_bar.update(1)
|
1153 |
-
global_step += 1
|
1154 |
-
|
1155 |
-
if accelerator.is_main_process:
|
1156 |
-
if global_step % args.checkpointing_steps == 0:
|
1157 |
-
# _before_ saving state, check if this save would set us over the `checkpoints_total_limit`
|
1158 |
-
if args.checkpoints_total_limit is not None:
|
1159 |
-
checkpoints = os.listdir(args.output_dir)
|
1160 |
-
checkpoints = [d for d in checkpoints if d.startswith("checkpoint")]
|
1161 |
-
checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1]))
|
1162 |
-
|
1163 |
-
# before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints
|
1164 |
-
if len(checkpoints) >= args.checkpoints_total_limit:
|
1165 |
-
num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1
|
1166 |
-
removing_checkpoints = checkpoints[0:num_to_remove]
|
1167 |
-
|
1168 |
-
logger.info(
|
1169 |
-
f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints"
|
1170 |
-
)
|
1171 |
-
logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}")
|
1172 |
-
|
1173 |
-
for removing_checkpoint in removing_checkpoints:
|
1174 |
-
removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint)
|
1175 |
-
shutil.rmtree(removing_checkpoint)
|
1176 |
-
|
1177 |
-
save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}")
|
1178 |
-
accelerator.save_state(save_path)
|
1179 |
-
logger.info(f"Saved state to {save_path}")
|
1180 |
-
|
1181 |
-
logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
|
1182 |
-
progress_bar.set_postfix(**logs)
|
1183 |
-
accelerator.log(logs, step=global_step)
|
1184 |
-
|
1185 |
-
if global_step >= args.max_train_steps:
|
1186 |
-
break
|
1187 |
-
|
1188 |
-
if accelerator.is_main_process:
|
1189 |
-
if args.validation_prompt is not None and epoch % args.validation_epochs == 0:
|
1190 |
-
logger.info(
|
1191 |
-
f"Running validation... \n Generating {args.num_validation_images} images with prompt:"
|
1192 |
-
f" {args.validation_prompt}."
|
1193 |
-
)
|
1194 |
-
# create pipeline
|
1195 |
-
if not args.train_text_encoder:
|
1196 |
-
text_encoder_one = text_encoder_cls_one.from_pretrained(
|
1197 |
-
args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision
|
1198 |
-
)
|
1199 |
-
text_encoder_two = text_encoder_cls_two.from_pretrained(
|
1200 |
-
args.pretrained_model_name_or_path, subfolder="text_encoder_2", revision=args.revision
|
1201 |
-
)
|
1202 |
-
pipeline = StableDiffusionXLPipeline.from_pretrained(
|
1203 |
-
args.pretrained_model_name_or_path,
|
1204 |
-
vae=vae,
|
1205 |
-
text_encoder=accelerator.unwrap_model(text_encoder_one),
|
1206 |
-
text_encoder_2=accelerator.unwrap_model(text_encoder_two),
|
1207 |
-
unet=accelerator.unwrap_model(unet),
|
1208 |
-
revision=args.revision,
|
1209 |
-
torch_dtype=weight_dtype,
|
1210 |
-
)
|
1211 |
-
|
1212 |
-
# We train on the simplified learning objective. If we were previously predicting a variance, we need the scheduler to ignore it
|
1213 |
-
scheduler_args = {}
|
1214 |
-
|
1215 |
-
if "variance_type" in pipeline.scheduler.config:
|
1216 |
-
variance_type = pipeline.scheduler.config.variance_type
|
1217 |
-
|
1218 |
-
if variance_type in ["learned", "learned_range"]:
|
1219 |
-
variance_type = "fixed_small"
|
1220 |
-
|
1221 |
-
scheduler_args["variance_type"] = variance_type
|
1222 |
-
|
1223 |
-
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(
|
1224 |
-
pipeline.scheduler.config, **scheduler_args
|
1225 |
-
)
|
1226 |
-
|
1227 |
-
pipeline = pipeline.to(accelerator.device)
|
1228 |
-
pipeline.set_progress_bar_config(disable=True)
|
1229 |
-
|
1230 |
-
# run inference
|
1231 |
-
generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed else None
|
1232 |
-
pipeline_args = {"prompt": args.validation_prompt}
|
1233 |
-
|
1234 |
-
with torch.cuda.amp.autocast():
|
1235 |
-
images = [
|
1236 |
-
pipeline(**pipeline_args, generator=generator).images[0]
|
1237 |
-
for _ in range(args.num_validation_images)
|
1238 |
-
]
|
1239 |
-
|
1240 |
-
for tracker in accelerator.trackers:
|
1241 |
-
if tracker.name == "tensorboard":
|
1242 |
-
np_images = np.stack([np.asarray(img) for img in images])
|
1243 |
-
tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC")
|
1244 |
-
if tracker.name == "wandb":
|
1245 |
-
tracker.log(
|
1246 |
-
{
|
1247 |
-
"validation": [
|
1248 |
-
wandb.Image(image, caption=f"{i}: {args.validation_prompt}")
|
1249 |
-
for i, image in enumerate(images)
|
1250 |
-
]
|
1251 |
-
}
|
1252 |
-
)
|
1253 |
-
|
1254 |
-
del pipeline
|
1255 |
-
torch.cuda.empty_cache()
|
1256 |
-
|
1257 |
-
# Save the lora layers
|
1258 |
-
accelerator.wait_for_everyone()
|
1259 |
-
if accelerator.is_main_process:
|
1260 |
-
unet = accelerator.unwrap_model(unet)
|
1261 |
-
unet = unet.to(torch.float32)
|
1262 |
-
unet_lora_layers = unet_attn_processors_state_dict(unet)
|
1263 |
-
|
1264 |
-
if args.train_text_encoder:
|
1265 |
-
text_encoder_one = accelerator.unwrap_model(text_encoder_one)
|
1266 |
-
text_encoder_lora_layers = text_encoder_lora_state_dict(text_encoder_one.to(torch.float32))
|
1267 |
-
text_encoder_two = accelerator.unwrap_model(text_encoder_two)
|
1268 |
-
text_encoder_2_lora_layers = text_encoder_lora_state_dict(text_encoder_two.to(torch.float32))
|
1269 |
-
else:
|
1270 |
-
text_encoder_lora_layers = None
|
1271 |
-
text_encoder_2_lora_layers = None
|
1272 |
-
|
1273 |
-
StableDiffusionXLPipeline.save_lora_weights(
|
1274 |
-
save_directory=args.output_dir,
|
1275 |
-
unet_lora_layers=unet_lora_layers,
|
1276 |
-
text_encoder_lora_layers=text_encoder_lora_layers,
|
1277 |
-
text_encoder_2_lora_layers=text_encoder_2_lora_layers,
|
1278 |
-
)
|
1279 |
-
|
1280 |
-
# Final inference
|
1281 |
-
# Load previous pipeline
|
1282 |
-
vae = AutoencoderKL.from_pretrained(
|
1283 |
-
vae_path,
|
1284 |
-
subfolder="vae" if args.pretrained_vae_model_name_or_path is None else None,
|
1285 |
-
revision=args.revision,
|
1286 |
-
torch_dtype=weight_dtype,
|
1287 |
-
)
|
1288 |
-
pipeline = StableDiffusionXLPipeline.from_pretrained(
|
1289 |
-
args.pretrained_model_name_or_path, vae=vae, revision=args.revision, torch_dtype=weight_dtype
|
1290 |
-
)
|
1291 |
-
|
1292 |
-
# We train on the simplified learning objective. If we were previously predicting a variance, we need the scheduler to ignore it
|
1293 |
-
scheduler_args = {}
|
1294 |
-
|
1295 |
-
if "variance_type" in pipeline.scheduler.config:
|
1296 |
-
variance_type = pipeline.scheduler.config.variance_type
|
1297 |
-
|
1298 |
-
if variance_type in ["learned", "learned_range"]:
|
1299 |
-
variance_type = "fixed_small"
|
1300 |
-
|
1301 |
-
scheduler_args["variance_type"] = variance_type
|
1302 |
-
|
1303 |
-
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config, **scheduler_args)
|
1304 |
-
|
1305 |
-
pipeline = pipeline.to(accelerator.device)
|
1306 |
-
|
1307 |
-
# load attention processors
|
1308 |
-
pipeline.load_lora_weights(args.output_dir)
|
1309 |
-
|
1310 |
-
# run inference
|
1311 |
-
images = []
|
1312 |
-
if args.validation_prompt and args.num_validation_images > 0:
|
1313 |
-
generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed else None
|
1314 |
-
images = [
|
1315 |
-
pipeline(args.validation_prompt, num_inference_steps=25, generator=generator).images[0]
|
1316 |
-
for _ in range(args.num_validation_images)
|
1317 |
-
]
|
1318 |
-
|
1319 |
-
for tracker in accelerator.trackers:
|
1320 |
-
if tracker.name == "tensorboard":
|
1321 |
-
np_images = np.stack([np.asarray(img) for img in images])
|
1322 |
-
tracker.writer.add_images("test", np_images, epoch, dataformats="NHWC")
|
1323 |
-
if tracker.name == "wandb":
|
1324 |
-
tracker.log(
|
1325 |
-
{
|
1326 |
-
"test": [
|
1327 |
-
wandb.Image(image, caption=f"{i}: {args.validation_prompt}")
|
1328 |
-
for i, image in enumerate(images)
|
1329 |
-
]
|
1330 |
-
}
|
1331 |
-
)
|
1332 |
-
|
1333 |
-
if args.push_to_hub:
|
1334 |
-
save_model_card(
|
1335 |
-
repo_id,
|
1336 |
-
images=images,
|
1337 |
-
base_model=args.pretrained_model_name_or_path,
|
1338 |
-
train_text_encoder=args.train_text_encoder,
|
1339 |
-
prompt=args.instance_prompt,
|
1340 |
-
repo_folder=args.output_dir,
|
1341 |
-
vae_path=args.pretrained_vae_model_name_or_path,
|
1342 |
-
)
|
1343 |
-
upload_folder(
|
1344 |
-
repo_id=repo_id,
|
1345 |
-
folder_path=args.output_dir,
|
1346 |
-
commit_message="End of training",
|
1347 |
-
ignore_patterns=["step_*", "epoch_*"],
|
1348 |
-
)
|
1349 |
-
|
1350 |
-
accelerator.end_training()
|
1351 |
-
|
1352 |
-
|
1353 |
-
if __name__ == "__main__":
|
1354 |
-
args = parse_args()
|
1355 |
-
main(args)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/vfnet/vfnet_r101_fpn_mstrain_2x_coco.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
_base_ = './vfnet_r50_fpn_mstrain_2x_coco.py'
|
2 |
-
model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101))
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/core/evaluation/__init__.py
DELETED
@@ -1,15 +0,0 @@
|
|
1 |
-
from .class_names import (cityscapes_classes, coco_classes, dataset_aliases,
|
2 |
-
get_classes, imagenet_det_classes,
|
3 |
-
imagenet_vid_classes, voc_classes)
|
4 |
-
from .eval_hooks import DistEvalHook, EvalHook
|
5 |
-
from .mean_ap import average_precision, eval_map, print_map_summary
|
6 |
-
from .recall import (eval_recalls, plot_iou_recall, plot_num_recall,
|
7 |
-
print_recall_summary)
|
8 |
-
|
9 |
-
__all__ = [
|
10 |
-
'voc_classes', 'imagenet_det_classes', 'imagenet_vid_classes',
|
11 |
-
'coco_classes', 'cityscapes_classes', 'dataset_aliases', 'get_classes',
|
12 |
-
'DistEvalHook', 'EvalHook', 'average_precision', 'eval_map',
|
13 |
-
'print_map_summary', 'eval_recalls', 'print_recall_summary',
|
14 |
-
'plot_num_recall', 'plot_iou_recall'
|
15 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/ssd_head.py
DELETED
@@ -1,265 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
import torch.nn.functional as F
|
4 |
-
from mmcv.cnn import xavier_init
|
5 |
-
from mmcv.runner import force_fp32
|
6 |
-
|
7 |
-
from mmdet.core import (build_anchor_generator, build_assigner,
|
8 |
-
build_bbox_coder, build_sampler, multi_apply)
|
9 |
-
from ..builder import HEADS
|
10 |
-
from ..losses import smooth_l1_loss
|
11 |
-
from .anchor_head import AnchorHead
|
12 |
-
|
13 |
-
|
14 |
-
# TODO: add loss evaluator for SSD
|
15 |
-
@HEADS.register_module()
|
16 |
-
class SSDHead(AnchorHead):
|
17 |
-
"""SSD head used in https://arxiv.org/abs/1512.02325.
|
18 |
-
|
19 |
-
Args:
|
20 |
-
num_classes (int): Number of categories excluding the background
|
21 |
-
category.
|
22 |
-
in_channels (int): Number of channels in the input feature map.
|
23 |
-
anchor_generator (dict): Config dict for anchor generator
|
24 |
-
bbox_coder (dict): Config of bounding box coder.
|
25 |
-
reg_decoded_bbox (bool): If true, the regression loss would be
|
26 |
-
applied directly on decoded bounding boxes, converting both
|
27 |
-
the predicted boxes and regression targets to absolute
|
28 |
-
coordinates format. Default False. It should be `True` when
|
29 |
-
using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head.
|
30 |
-
train_cfg (dict): Training config of anchor head.
|
31 |
-
test_cfg (dict): Testing config of anchor head.
|
32 |
-
""" # noqa: W605
|
33 |
-
|
34 |
-
def __init__(self,
|
35 |
-
num_classes=80,
|
36 |
-
in_channels=(512, 1024, 512, 256, 256, 256),
|
37 |
-
anchor_generator=dict(
|
38 |
-
type='SSDAnchorGenerator',
|
39 |
-
scale_major=False,
|
40 |
-
input_size=300,
|
41 |
-
strides=[8, 16, 32, 64, 100, 300],
|
42 |
-
ratios=([2], [2, 3], [2, 3], [2, 3], [2], [2]),
|
43 |
-
basesize_ratio_range=(0.1, 0.9)),
|
44 |
-
bbox_coder=dict(
|
45 |
-
type='DeltaXYWHBBoxCoder',
|
46 |
-
clip_border=True,
|
47 |
-
target_means=[.0, .0, .0, .0],
|
48 |
-
target_stds=[1.0, 1.0, 1.0, 1.0],
|
49 |
-
),
|
50 |
-
reg_decoded_bbox=False,
|
51 |
-
train_cfg=None,
|
52 |
-
test_cfg=None):
|
53 |
-
super(AnchorHead, self).__init__()
|
54 |
-
self.num_classes = num_classes
|
55 |
-
self.in_channels = in_channels
|
56 |
-
self.cls_out_channels = num_classes + 1 # add background class
|
57 |
-
self.anchor_generator = build_anchor_generator(anchor_generator)
|
58 |
-
num_anchors = self.anchor_generator.num_base_anchors
|
59 |
-
|
60 |
-
reg_convs = []
|
61 |
-
cls_convs = []
|
62 |
-
for i in range(len(in_channels)):
|
63 |
-
reg_convs.append(
|
64 |
-
nn.Conv2d(
|
65 |
-
in_channels[i],
|
66 |
-
num_anchors[i] * 4,
|
67 |
-
kernel_size=3,
|
68 |
-
padding=1))
|
69 |
-
cls_convs.append(
|
70 |
-
nn.Conv2d(
|
71 |
-
in_channels[i],
|
72 |
-
num_anchors[i] * (num_classes + 1),
|
73 |
-
kernel_size=3,
|
74 |
-
padding=1))
|
75 |
-
self.reg_convs = nn.ModuleList(reg_convs)
|
76 |
-
self.cls_convs = nn.ModuleList(cls_convs)
|
77 |
-
|
78 |
-
self.bbox_coder = build_bbox_coder(bbox_coder)
|
79 |
-
self.reg_decoded_bbox = reg_decoded_bbox
|
80 |
-
self.use_sigmoid_cls = False
|
81 |
-
self.cls_focal_loss = False
|
82 |
-
self.train_cfg = train_cfg
|
83 |
-
self.test_cfg = test_cfg
|
84 |
-
# set sampling=False for archor_target
|
85 |
-
self.sampling = False
|
86 |
-
if self.train_cfg:
|
87 |
-
self.assigner = build_assigner(self.train_cfg.assigner)
|
88 |
-
# SSD sampling=False so use PseudoSampler
|
89 |
-
sampler_cfg = dict(type='PseudoSampler')
|
90 |
-
self.sampler = build_sampler(sampler_cfg, context=self)
|
91 |
-
self.fp16_enabled = False
|
92 |
-
|
93 |
-
def init_weights(self):
|
94 |
-
"""Initialize weights of the head."""
|
95 |
-
for m in self.modules():
|
96 |
-
if isinstance(m, nn.Conv2d):
|
97 |
-
xavier_init(m, distribution='uniform', bias=0)
|
98 |
-
|
99 |
-
def forward(self, feats):
|
100 |
-
"""Forward features from the upstream network.
|
101 |
-
|
102 |
-
Args:
|
103 |
-
feats (tuple[Tensor]): Features from the upstream network, each is
|
104 |
-
a 4D-tensor.
|
105 |
-
|
106 |
-
Returns:
|
107 |
-
tuple:
|
108 |
-
cls_scores (list[Tensor]): Classification scores for all scale
|
109 |
-
levels, each is a 4D-tensor, the channels number is
|
110 |
-
num_anchors * num_classes.
|
111 |
-
bbox_preds (list[Tensor]): Box energies / deltas for all scale
|
112 |
-
levels, each is a 4D-tensor, the channels number is
|
113 |
-
num_anchors * 4.
|
114 |
-
"""
|
115 |
-
cls_scores = []
|
116 |
-
bbox_preds = []
|
117 |
-
for feat, reg_conv, cls_conv in zip(feats, self.reg_convs,
|
118 |
-
self.cls_convs):
|
119 |
-
cls_scores.append(cls_conv(feat))
|
120 |
-
bbox_preds.append(reg_conv(feat))
|
121 |
-
return cls_scores, bbox_preds
|
122 |
-
|
123 |
-
def loss_single(self, cls_score, bbox_pred, anchor, labels, label_weights,
|
124 |
-
bbox_targets, bbox_weights, num_total_samples):
|
125 |
-
"""Compute loss of a single image.
|
126 |
-
|
127 |
-
Args:
|
128 |
-
cls_score (Tensor): Box scores for eachimage
|
129 |
-
Has shape (num_total_anchors, num_classes).
|
130 |
-
bbox_pred (Tensor): Box energies / deltas for each image
|
131 |
-
level with shape (num_total_anchors, 4).
|
132 |
-
anchors (Tensor): Box reference for each scale level with shape
|
133 |
-
(num_total_anchors, 4).
|
134 |
-
labels (Tensor): Labels of each anchors with shape
|
135 |
-
(num_total_anchors,).
|
136 |
-
label_weights (Tensor): Label weights of each anchor with shape
|
137 |
-
(num_total_anchors,)
|
138 |
-
bbox_targets (Tensor): BBox regression targets of each anchor wight
|
139 |
-
shape (num_total_anchors, 4).
|
140 |
-
bbox_weights (Tensor): BBox regression loss weights of each anchor
|
141 |
-
with shape (num_total_anchors, 4).
|
142 |
-
num_total_samples (int): If sampling, num total samples equal to
|
143 |
-
the number of total anchors; Otherwise, it is the number of
|
144 |
-
positive anchors.
|
145 |
-
|
146 |
-
Returns:
|
147 |
-
dict[str, Tensor]: A dictionary of loss components.
|
148 |
-
"""
|
149 |
-
|
150 |
-
loss_cls_all = F.cross_entropy(
|
151 |
-
cls_score, labels, reduction='none') * label_weights
|
152 |
-
# FG cat_id: [0, num_classes -1], BG cat_id: num_classes
|
153 |
-
pos_inds = ((labels >= 0) &
|
154 |
-
(labels < self.num_classes)).nonzero().reshape(-1)
|
155 |
-
neg_inds = (labels == self.num_classes).nonzero().view(-1)
|
156 |
-
|
157 |
-
num_pos_samples = pos_inds.size(0)
|
158 |
-
num_neg_samples = self.train_cfg.neg_pos_ratio * num_pos_samples
|
159 |
-
if num_neg_samples > neg_inds.size(0):
|
160 |
-
num_neg_samples = neg_inds.size(0)
|
161 |
-
topk_loss_cls_neg, _ = loss_cls_all[neg_inds].topk(num_neg_samples)
|
162 |
-
loss_cls_pos = loss_cls_all[pos_inds].sum()
|
163 |
-
loss_cls_neg = topk_loss_cls_neg.sum()
|
164 |
-
loss_cls = (loss_cls_pos + loss_cls_neg) / num_total_samples
|
165 |
-
|
166 |
-
if self.reg_decoded_bbox:
|
167 |
-
# When the regression loss (e.g. `IouLoss`, `GIouLoss`)
|
168 |
-
# is applied directly on the decoded bounding boxes, it
|
169 |
-
# decodes the already encoded coordinates to absolute format.
|
170 |
-
bbox_pred = self.bbox_coder.decode(anchor, bbox_pred)
|
171 |
-
|
172 |
-
loss_bbox = smooth_l1_loss(
|
173 |
-
bbox_pred,
|
174 |
-
bbox_targets,
|
175 |
-
bbox_weights,
|
176 |
-
beta=self.train_cfg.smoothl1_beta,
|
177 |
-
avg_factor=num_total_samples)
|
178 |
-
return loss_cls[None], loss_bbox
|
179 |
-
|
180 |
-
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
|
181 |
-
def loss(self,
|
182 |
-
cls_scores,
|
183 |
-
bbox_preds,
|
184 |
-
gt_bboxes,
|
185 |
-
gt_labels,
|
186 |
-
img_metas,
|
187 |
-
gt_bboxes_ignore=None):
|
188 |
-
"""Compute losses of the head.
|
189 |
-
|
190 |
-
Args:
|
191 |
-
cls_scores (list[Tensor]): Box scores for each scale level
|
192 |
-
Has shape (N, num_anchors * num_classes, H, W)
|
193 |
-
bbox_preds (list[Tensor]): Box energies / deltas for each scale
|
194 |
-
level with shape (N, num_anchors * 4, H, W)
|
195 |
-
gt_bboxes (list[Tensor]): each item are the truth boxes for each
|
196 |
-
image in [tl_x, tl_y, br_x, br_y] format.
|
197 |
-
gt_labels (list[Tensor]): class indices corresponding to each box
|
198 |
-
img_metas (list[dict]): Meta information of each image, e.g.,
|
199 |
-
image size, scaling factor, etc.
|
200 |
-
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
|
201 |
-
boxes can be ignored when computing the loss.
|
202 |
-
|
203 |
-
Returns:
|
204 |
-
dict[str, Tensor]: A dictionary of loss components.
|
205 |
-
"""
|
206 |
-
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
|
207 |
-
assert len(featmap_sizes) == self.anchor_generator.num_levels
|
208 |
-
|
209 |
-
device = cls_scores[0].device
|
210 |
-
|
211 |
-
anchor_list, valid_flag_list = self.get_anchors(
|
212 |
-
featmap_sizes, img_metas, device=device)
|
213 |
-
cls_reg_targets = self.get_targets(
|
214 |
-
anchor_list,
|
215 |
-
valid_flag_list,
|
216 |
-
gt_bboxes,
|
217 |
-
img_metas,
|
218 |
-
gt_bboxes_ignore_list=gt_bboxes_ignore,
|
219 |
-
gt_labels_list=gt_labels,
|
220 |
-
label_channels=1,
|
221 |
-
unmap_outputs=False)
|
222 |
-
if cls_reg_targets is None:
|
223 |
-
return None
|
224 |
-
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
|
225 |
-
num_total_pos, num_total_neg) = cls_reg_targets
|
226 |
-
|
227 |
-
num_images = len(img_metas)
|
228 |
-
all_cls_scores = torch.cat([
|
229 |
-
s.permute(0, 2, 3, 1).reshape(
|
230 |
-
num_images, -1, self.cls_out_channels) for s in cls_scores
|
231 |
-
], 1)
|
232 |
-
all_labels = torch.cat(labels_list, -1).view(num_images, -1)
|
233 |
-
all_label_weights = torch.cat(label_weights_list,
|
234 |
-
-1).view(num_images, -1)
|
235 |
-
all_bbox_preds = torch.cat([
|
236 |
-
b.permute(0, 2, 3, 1).reshape(num_images, -1, 4)
|
237 |
-
for b in bbox_preds
|
238 |
-
], -2)
|
239 |
-
all_bbox_targets = torch.cat(bbox_targets_list,
|
240 |
-
-2).view(num_images, -1, 4)
|
241 |
-
all_bbox_weights = torch.cat(bbox_weights_list,
|
242 |
-
-2).view(num_images, -1, 4)
|
243 |
-
|
244 |
-
# concat all level anchors to a single tensor
|
245 |
-
all_anchors = []
|
246 |
-
for i in range(num_images):
|
247 |
-
all_anchors.append(torch.cat(anchor_list[i]))
|
248 |
-
|
249 |
-
# check NaN and Inf
|
250 |
-
assert torch.isfinite(all_cls_scores).all().item(), \
|
251 |
-
'classification scores become infinite or NaN!'
|
252 |
-
assert torch.isfinite(all_bbox_preds).all().item(), \
|
253 |
-
'bbox predications become infinite or NaN!'
|
254 |
-
|
255 |
-
losses_cls, losses_bbox = multi_apply(
|
256 |
-
self.loss_single,
|
257 |
-
all_cls_scores,
|
258 |
-
all_bbox_preds,
|
259 |
-
all_anchors,
|
260 |
-
all_labels,
|
261 |
-
all_label_weights,
|
262 |
-
all_bbox_targets,
|
263 |
-
all_bbox_weights,
|
264 |
-
num_total_samples=num_total_pos)
|
265 |
-
return dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/fp16/fcn_r101-d8_512x1024_80k_fp16_cityscapes.py
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
_base_ = '../fcn/fcn_r101-d8_512x1024_80k_cityscapes.py'
|
2 |
-
# fp16 settings
|
3 |
-
optimizer_config = dict(type='Fp16OptimizerHook', loss_scale=512.)
|
4 |
-
# fp16 placeholder
|
5 |
-
fp16 = dict()
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/annotator/mlsd/__init__.py
DELETED
@@ -1,43 +0,0 @@
|
|
1 |
-
# MLSD Line Detection
|
2 |
-
# From https://github.com/navervision/mlsd
|
3 |
-
# Apache-2.0 license
|
4 |
-
|
5 |
-
import cv2
|
6 |
-
import numpy as np
|
7 |
-
import torch
|
8 |
-
import os
|
9 |
-
|
10 |
-
from einops import rearrange
|
11 |
-
from .models.mbv2_mlsd_tiny import MobileV2_MLSD_Tiny
|
12 |
-
from .models.mbv2_mlsd_large import MobileV2_MLSD_Large
|
13 |
-
from .utils import pred_lines
|
14 |
-
|
15 |
-
from annotator.util import annotator_ckpts_path
|
16 |
-
|
17 |
-
|
18 |
-
remote_model_path = "https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/mlsd_large_512_fp32.pth"
|
19 |
-
|
20 |
-
|
21 |
-
class MLSDdetector:
|
22 |
-
def __init__(self):
|
23 |
-
model_path = os.path.join(annotator_ckpts_path, "mlsd_large_512_fp32.pth")
|
24 |
-
if not os.path.exists(model_path):
|
25 |
-
from basicsr.utils.download_util import load_file_from_url
|
26 |
-
load_file_from_url(remote_model_path, model_dir=annotator_ckpts_path)
|
27 |
-
model = MobileV2_MLSD_Large()
|
28 |
-
model.load_state_dict(torch.load(model_path), strict=True)
|
29 |
-
self.model = model.cuda().eval()
|
30 |
-
|
31 |
-
def __call__(self, input_image, thr_v, thr_d):
|
32 |
-
assert input_image.ndim == 3
|
33 |
-
img = input_image
|
34 |
-
img_output = np.zeros_like(img)
|
35 |
-
try:
|
36 |
-
with torch.no_grad():
|
37 |
-
lines = pred_lines(img, self.model, [img.shape[0], img.shape[1]], thr_v, thr_d)
|
38 |
-
for line in lines:
|
39 |
-
x_start, y_start, x_end, y_end = [int(val) for val in line]
|
40 |
-
cv2.line(img_output, (x_start, y_start), (x_end, y_end), [255, 255, 255], 1)
|
41 |
-
except Exception as e:
|
42 |
-
pass
|
43 |
-
return img_output[:, :, 0]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnthonyTruchetPoC/persistent-docker/Dockerfile
DELETED
@@ -1,100 +0,0 @@
|
|
1 |
-
# syntax=docker/dockerfile:1
|
2 |
-
FROM python:3.10.12
|
3 |
-
|
4 |
-
ARG APP_HOME=/home/user
|
5 |
-
ARG APP_CODE=/opt/code
|
6 |
-
ARG APP_DATA=/data/app
|
7 |
-
ARG HF_HOME=/data/huggingface
|
8 |
-
|
9 |
-
ENV APP_CODE=$APP_CODE \
|
10 |
-
APP_DATA=$APP_DATA \
|
11 |
-
HF_HOME=$HF_HOME
|
12 |
-
|
13 |
-
ENV PYTHONUNBUFFERED=1 \
|
14 |
-
SYSTEM=spaces \
|
15 |
-
SHELL=/bin/bash
|
16 |
-
|
17 |
-
# Remove any third-party apt sources to avoid issues with expiring keys.
|
18 |
-
# Install some basic utilities
|
19 |
-
RUN rm -f /etc/apt/sources.list.d/*.list && \
|
20 |
-
apt-get update && apt-get install -y --no-install-recommends \
|
21 |
-
curl \
|
22 |
-
ca-certificates \
|
23 |
-
sudo \
|
24 |
-
git \
|
25 |
-
git-lfs \
|
26 |
-
zip \
|
27 |
-
unzip \
|
28 |
-
htop \
|
29 |
-
bzip2 \
|
30 |
-
libx11-6 \
|
31 |
-
build-essential \
|
32 |
-
libsndfile-dev \
|
33 |
-
software-properties-common \
|
34 |
-
nodejs \
|
35 |
-
&& rm -rf /var/lib/apt/lists/*
|
36 |
-
|
37 |
-
WORKDIR /opt
|
38 |
-
|
39 |
-
# Create a non-root user and switch to it
|
40 |
-
RUN adduser --disabled-password --gecos '' --shell /bin/bash -u 1000 user \
|
41 |
-
&& chown -R user:user /opt
|
42 |
-
RUN echo "user ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers
|
43 |
-
USER user
|
44 |
-
|
45 |
-
# All users can use /home/user as their home directory
|
46 |
-
ENV HOME=$APP_HOME
|
47 |
-
ENV PATH=$HOME/.local/bin:$PATH
|
48 |
-
RUN mkdir -p $HOME/.cache $HOME/.config \
|
49 |
-
&& chmod -R 777 $HOME
|
50 |
-
|
51 |
-
#######################################
|
52 |
-
# Start root user section
|
53 |
-
#######################################
|
54 |
-
USER root
|
55 |
-
|
56 |
-
# User Debian packages
|
57 |
-
## Security warning : Potential user code executed as root (build time)
|
58 |
-
RUN --mount=target=/root/packages.txt,source=packages.txt \
|
59 |
-
apt-get update \
|
60 |
-
&& xargs -r -a /root/packages.txt apt-get install -y --no-install-recommends \
|
61 |
-
&& rm -rf /var/lib/apt/lists/*
|
62 |
-
|
63 |
-
RUN --mount=target=/root/on_startup.sh,source=on_startup.sh,readwrite \
|
64 |
-
bash /root/on_startup.sh
|
65 |
-
|
66 |
-
#######################################
|
67 |
-
# End root user section
|
68 |
-
#######################################
|
69 |
-
USER user
|
70 |
-
WORKDIR $APP_HOME
|
71 |
-
|
72 |
-
RUN --mount=target=$APP_CODE/requirements.txt,source=requirements.txt \
|
73 |
-
pip3 install --no-cache-dir --upgrade pip \
|
74 |
-
&& pip install --no-cache-dir --upgrade -r $APP_CODE/requirements.txt
|
75 |
-
|
76 |
-
# Copy our code to $APP_CODE and config
|
77 |
-
COPY --chown=user:user --chmod=u=rX src/ $APP_CODE/
|
78 |
-
|
79 |
-
COPY --chown=user ./start_*.sh $APP_HOME
|
80 |
-
RUN chmod +x $APP_HOME/start_*.sh
|
81 |
-
|
82 |
-
COPY --chown=user:user --chmod=u=rX .streamlit/ $APP_HOME/.streamlit/
|
83 |
-
|
84 |
-
# Persistent disk space
|
85 |
-
# Assumes a mount is passed to the docker command, like:
|
86 |
-
# $ docker ... --mount type=volume,src=ai-playground-vol,dst=/data ...
|
87 |
-
# see https://huggingface.co/docs/hub/spaces-storage
|
88 |
-
VOLUME /data
|
89 |
-
|
90 |
-
ENV PYTHONPATH=$APP_CODE:$PYTHONPATH
|
91 |
-
|
92 |
-
RUN ln -s $APP_DATA/ data
|
93 |
-
RUN ln -s $APP_CODE/ code
|
94 |
-
ADD jupyter/notebooks notebooks
|
95 |
-
|
96 |
-
# Expose streamlit application
|
97 |
-
EXPOSE 8501
|
98 |
-
EXPOSE 7860
|
99 |
-
ENTRYPOINT [ "bash", "-o", "allexport" ]
|
100 |
-
CMD [ "-f", "./start_server.sh" ]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Apex-X/nono/roop/processors/frame/face_enhancer.py
DELETED
@@ -1,104 +0,0 @@
|
|
1 |
-
from typing import Any, List, Callable
|
2 |
-
import cv2
|
3 |
-
import threading
|
4 |
-
from gfpgan.utils import GFPGANer
|
5 |
-
|
6 |
-
import roop.globals
|
7 |
-
import roop.processors.frame.core
|
8 |
-
from roop.core import update_status
|
9 |
-
from roop.face_analyser import get_many_faces
|
10 |
-
from roop.typing import Frame, Face
|
11 |
-
from roop.utilities import conditional_download, resolve_relative_path, is_image, is_video
|
12 |
-
|
13 |
-
FACE_ENHANCER = None
|
14 |
-
THREAD_SEMAPHORE = threading.Semaphore()
|
15 |
-
THREAD_LOCK = threading.Lock()
|
16 |
-
NAME = 'ROOP.FACE-ENHANCER'
|
17 |
-
|
18 |
-
|
19 |
-
def get_face_enhancer() -> Any:
|
20 |
-
global FACE_ENHANCER
|
21 |
-
|
22 |
-
with THREAD_LOCK:
|
23 |
-
if FACE_ENHANCER is None:
|
24 |
-
model_path = resolve_relative_path('../models/GFPGANv1.4.pth')
|
25 |
-
# todo: set models path -> https://github.com/TencentARC/GFPGAN/issues/399
|
26 |
-
FACE_ENHANCER = GFPGANer(model_path=model_path, upscale=1, device=get_device())
|
27 |
-
return FACE_ENHANCER
|
28 |
-
|
29 |
-
|
30 |
-
def get_device() -> str:
|
31 |
-
if 'CUDAExecutionProvider' in roop.globals.execution_providers:
|
32 |
-
return 'cuda'
|
33 |
-
if 'CoreMLExecutionProvider' in roop.globals.execution_providers:
|
34 |
-
return 'mps'
|
35 |
-
return 'cpu'
|
36 |
-
|
37 |
-
|
38 |
-
def clear_face_enhancer() -> None:
|
39 |
-
global FACE_ENHANCER
|
40 |
-
|
41 |
-
FACE_ENHANCER = None
|
42 |
-
|
43 |
-
|
44 |
-
def pre_check() -> bool:
|
45 |
-
download_directory_path = resolve_relative_path('../models')
|
46 |
-
conditional_download(download_directory_path, ['https://huggingface.co/henryruhs/roop/resolve/main/GFPGANv1.4.pth'])
|
47 |
-
return True
|
48 |
-
|
49 |
-
|
50 |
-
def pre_start() -> bool:
|
51 |
-
if not is_image(roop.globals.target_path) and not is_video(roop.globals.target_path):
|
52 |
-
update_status('Select an image or video for target path.', NAME)
|
53 |
-
return False
|
54 |
-
return True
|
55 |
-
|
56 |
-
|
57 |
-
def post_process() -> None:
|
58 |
-
clear_face_enhancer()
|
59 |
-
|
60 |
-
|
61 |
-
def enhance_face(target_face: Face, temp_frame: Frame) -> Frame:
|
62 |
-
start_x, start_y, end_x, end_y = map(int, target_face['bbox'])
|
63 |
-
padding_x = int((end_x - start_x) * 0.5)
|
64 |
-
padding_y = int((end_y - start_y) * 0.5)
|
65 |
-
start_x = max(0, start_x - padding_x)
|
66 |
-
start_y = max(0, start_y - padding_y)
|
67 |
-
end_x = max(0, end_x + padding_x)
|
68 |
-
end_y = max(0, end_y + padding_y)
|
69 |
-
temp_face = temp_frame[start_y:end_y, start_x:end_x]
|
70 |
-
if temp_face.size:
|
71 |
-
with THREAD_SEMAPHORE:
|
72 |
-
_, _, temp_face = get_face_enhancer().enhance(
|
73 |
-
temp_face,
|
74 |
-
paste_back=True
|
75 |
-
)
|
76 |
-
temp_frame[start_y:end_y, start_x:end_x] = temp_face
|
77 |
-
return temp_frame
|
78 |
-
|
79 |
-
|
80 |
-
def process_frame(source_face: Face, reference_face: Face, temp_frame: Frame) -> Frame:
|
81 |
-
many_faces = get_many_faces(temp_frame)
|
82 |
-
if many_faces:
|
83 |
-
for target_face in many_faces:
|
84 |
-
temp_frame = enhance_face(target_face, temp_frame)
|
85 |
-
return temp_frame
|
86 |
-
|
87 |
-
|
88 |
-
def process_frames(source_path: str, temp_frame_paths: List[str], update: Callable[[], None]) -> None:
|
89 |
-
for temp_frame_path in temp_frame_paths:
|
90 |
-
temp_frame = cv2.imread(temp_frame_path)
|
91 |
-
result = process_frame(None, None, temp_frame)
|
92 |
-
cv2.imwrite(temp_frame_path, result)
|
93 |
-
if update:
|
94 |
-
update()
|
95 |
-
|
96 |
-
|
97 |
-
def process_image(source_path: str, target_path: str, output_path: str) -> None:
|
98 |
-
target_frame = cv2.imread(target_path)
|
99 |
-
result = process_frame(None, None, target_frame)
|
100 |
-
cv2.imwrite(output_path, result)
|
101 |
-
|
102 |
-
|
103 |
-
def process_video(source_path: str, temp_frame_paths: List[str]) -> None:
|
104 |
-
roop.processors.frame.core.process_video(None, temp_frame_paths, process_frames)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Audiogen/vector-search-demo/app.py
DELETED
@@ -1,144 +0,0 @@
|
|
1 |
-
from transformers import ClapModel, ClapProcessor, AutoFeatureExtractor
|
2 |
-
import gradio as gr
|
3 |
-
import torch
|
4 |
-
import torchaudio
|
5 |
-
import os
|
6 |
-
import numpy as np
|
7 |
-
from qdrant_client import QdrantClient
|
8 |
-
from qdrant_client.http.models import Distance, VectorParams
|
9 |
-
from qdrant_client.http import models
|
10 |
-
|
11 |
-
|
12 |
-
import dotenv
|
13 |
-
dotenv.load_dotenv()
|
14 |
-
|
15 |
-
|
16 |
-
class ClapSSGradio():
|
17 |
-
|
18 |
-
def __init__(
|
19 |
-
self,
|
20 |
-
name,
|
21 |
-
model = "clap-2",
|
22 |
-
k=10,
|
23 |
-
):
|
24 |
-
|
25 |
-
self.name = name
|
26 |
-
self.k = k
|
27 |
-
|
28 |
-
self.model = ClapModel.from_pretrained(
|
29 |
-
f"Audiogen/{model}", use_auth_token=os.getenv('HUGGINGFACE_API_TOKEN'))
|
30 |
-
self.processor = ClapProcessor.from_pretrained(
|
31 |
-
f"Audiogen/{model}", use_auth_token=os.getenv('HUGGINGFACE_API_TOKEN'))
|
32 |
-
|
33 |
-
self.sas_token = os.environ['AZURE_SAS_TOKEN']
|
34 |
-
self.account_name = 'Audiogen'
|
35 |
-
self.storage_name = 'audiogentrainingdataeun'
|
36 |
-
|
37 |
-
self._start_qdrant()
|
38 |
-
|
39 |
-
def _start_qdrant(self):
|
40 |
-
self.client = QdrantClient(url=os.getenv(
|
41 |
-
"QDRANT_URL"), api_key=os.getenv('QDRANT_API_KEY'))
|
42 |
-
# print(self.client.get_collection(collection_name=self.name))
|
43 |
-
|
44 |
-
@torch.no_grad()
|
45 |
-
def _embed_query(self, query, audio_file):
|
46 |
-
if audio_file is not None:
|
47 |
-
waveform, sample_rate = torchaudio.load(audio_file.name)
|
48 |
-
print("Waveform shape:", waveform.shape)
|
49 |
-
waveform = torchaudio.functional.resample(
|
50 |
-
waveform, sample_rate, 48000)
|
51 |
-
print("Resampled waveform shape:", waveform.shape)
|
52 |
-
|
53 |
-
if waveform.shape[-1] < 480000:
|
54 |
-
waveform = torch.nn.functional.pad(
|
55 |
-
waveform, (0, 48000 - waveform.shape[-1]))
|
56 |
-
elif waveform.shape[-1] > 480000:
|
57 |
-
waveform = waveform[..., :480000]
|
58 |
-
|
59 |
-
audio_prompt_features = self.processor(
|
60 |
-
audios=waveform.mean(0), return_tensors='pt', sampling_rate=48000
|
61 |
-
)['input_features']
|
62 |
-
print("Audio prompt features shape:", audio_prompt_features.shape)
|
63 |
-
e = self.model.get_audio_features(
|
64 |
-
input_features=audio_prompt_features)[0]
|
65 |
-
|
66 |
-
if any(torch.isnan(e)):
|
67 |
-
raise ValueError("Audio features are NaN")
|
68 |
-
print("Embeddings: ", e.shape)
|
69 |
-
return e
|
70 |
-
else:
|
71 |
-
inputs = self.processor(
|
72 |
-
query, return_tensors="pt", padding='max_length', max_length=77, truncation=True)
|
73 |
-
|
74 |
-
return self.model.get_text_features(**inputs).cpu().numpy().tolist()[0]
|
75 |
-
|
76 |
-
def _similarity_search(self, query, threshold, audio_file):
|
77 |
-
results = self.client.search(
|
78 |
-
collection_name=self.name,
|
79 |
-
query_vector=self._embed_query(query, audio_file),
|
80 |
-
limit=self.k,
|
81 |
-
score_threshold=threshold,
|
82 |
-
)
|
83 |
-
|
84 |
-
containers = [result.payload['container'] for result in results]
|
85 |
-
filenames = [result.id for result in results]
|
86 |
-
captions = [result.payload['caption'] for result in results]
|
87 |
-
scores = [result.score for result in results]
|
88 |
-
|
89 |
-
# print to stdout
|
90 |
-
print(f"\nQuery: {query}\n")
|
91 |
-
for i, (container, filename, caption, score) in enumerate(zip(containers, filenames, captions, scores)):
|
92 |
-
print(f"{i}: {container} - {caption}. Score: {score}")
|
93 |
-
|
94 |
-
waveforms = self._download_results(containers, filenames)
|
95 |
-
|
96 |
-
if len(waveforms) == 0:
|
97 |
-
print("\nNo results found")
|
98 |
-
|
99 |
-
if len(waveforms) < self.k:
|
100 |
-
waveforms.extend([(int(48000), np.zeros((480000, 2)))
|
101 |
-
for _ in range(self.k - len(waveforms))])
|
102 |
-
|
103 |
-
return waveforms
|
104 |
-
|
105 |
-
def _download_results(self, containers: list, filenames: list):
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
# construct url
|
110 |
-
urls = [f"https://{self.storage_name}.blob.core.windows.net/snake/{file_name}.flac?{self.sas_token}" for file_name in filenames]
|
111 |
-
|
112 |
-
# make requests
|
113 |
-
waveforms = []
|
114 |
-
for url in urls:
|
115 |
-
waveform, sample_rate = torchaudio.load(url)
|
116 |
-
waveforms.append(tuple([sample_rate, waveform.numpy().T]))
|
117 |
-
|
118 |
-
return waveforms
|
119 |
-
|
120 |
-
def launch(self, share=False):
|
121 |
-
# gradio app structure
|
122 |
-
with gr.Blocks(title='Clap Semantic Search') as ui:
|
123 |
-
with gr.Row():
|
124 |
-
with gr.Column(variant='panel'):
|
125 |
-
search = gr.Textbox(placeholder='Search Samples')
|
126 |
-
float_input = gr.Number(
|
127 |
-
label='Similarity threshold [min: 0.1 max: 1]', value=0.5, minimum=0.1, maximum=1)
|
128 |
-
audio_file = gr.File(
|
129 |
-
label='Upload an Audio File', type="file")
|
130 |
-
search_button = gr.Button("Search", label='Search')
|
131 |
-
with gr.Column():
|
132 |
-
audioboxes = []
|
133 |
-
gr.Markdown("Output")
|
134 |
-
for i in range(self.k):
|
135 |
-
t = gr.components.Audio(label=f"{i}", visible=True)
|
136 |
-
audioboxes.append(t)
|
137 |
-
search_button.click(fn=self._similarity_search, inputs=[
|
138 |
-
search, float_input, audio_file], outputs=audioboxes)
|
139 |
-
ui.launch(share=share)
|
140 |
-
|
141 |
-
|
142 |
-
if __name__ == "__main__":
|
143 |
-
app = ClapSSGradio("demo")
|
144 |
-
app.launch(share=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/data/test_dataset.py
DELETED
@@ -1,134 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
|
3 |
-
import os
|
4 |
-
import pickle
|
5 |
-
import sys
|
6 |
-
import unittest
|
7 |
-
from functools import partial
|
8 |
-
import torch
|
9 |
-
from iopath.common.file_io import LazyPath
|
10 |
-
|
11 |
-
from detectron2 import model_zoo
|
12 |
-
from detectron2.config import instantiate
|
13 |
-
from detectron2.data import (
|
14 |
-
DatasetFromList,
|
15 |
-
MapDataset,
|
16 |
-
ToIterableDataset,
|
17 |
-
build_batch_data_loader,
|
18 |
-
build_detection_test_loader,
|
19 |
-
build_detection_train_loader,
|
20 |
-
)
|
21 |
-
from detectron2.data.samplers import InferenceSampler, TrainingSampler
|
22 |
-
|
23 |
-
|
24 |
-
def _a_slow_func(x):
|
25 |
-
return "path/{}".format(x)
|
26 |
-
|
27 |
-
|
28 |
-
class TestDatasetFromList(unittest.TestCase):
|
29 |
-
# Failing for py3.6, likely due to pickle
|
30 |
-
@unittest.skipIf(sys.version_info.minor <= 6, "Not supported in Python 3.6")
|
31 |
-
def test_using_lazy_path(self):
|
32 |
-
dataset = []
|
33 |
-
for i in range(10):
|
34 |
-
dataset.append({"file_name": LazyPath(partial(_a_slow_func, i))})
|
35 |
-
|
36 |
-
dataset = DatasetFromList(dataset)
|
37 |
-
for i in range(10):
|
38 |
-
path = dataset[i]["file_name"]
|
39 |
-
self.assertTrue(isinstance(path, LazyPath))
|
40 |
-
self.assertEqual(os.fspath(path), _a_slow_func(i))
|
41 |
-
|
42 |
-
|
43 |
-
class TestMapDataset(unittest.TestCase):
|
44 |
-
@staticmethod
|
45 |
-
def map_func(x):
|
46 |
-
if x == 2:
|
47 |
-
return None
|
48 |
-
return x * 2
|
49 |
-
|
50 |
-
def test_map_style(self):
|
51 |
-
ds = DatasetFromList([1, 2, 3])
|
52 |
-
ds = MapDataset(ds, TestMapDataset.map_func)
|
53 |
-
self.assertEqual(ds[0], 2)
|
54 |
-
self.assertEqual(ds[2], 6)
|
55 |
-
self.assertIn(ds[1], [2, 6])
|
56 |
-
|
57 |
-
def test_iter_style(self):
|
58 |
-
class DS(torch.utils.data.IterableDataset):
|
59 |
-
def __iter__(self):
|
60 |
-
yield from [1, 2, 3]
|
61 |
-
|
62 |
-
ds = DS()
|
63 |
-
ds = MapDataset(ds, TestMapDataset.map_func)
|
64 |
-
self.assertIsInstance(ds, torch.utils.data.IterableDataset)
|
65 |
-
|
66 |
-
data = list(iter(ds))
|
67 |
-
self.assertEqual(data, [2, 6])
|
68 |
-
|
69 |
-
def test_pickleability(self):
|
70 |
-
ds = DatasetFromList([1, 2, 3])
|
71 |
-
ds = MapDataset(ds, lambda x: x * 2)
|
72 |
-
ds = pickle.loads(pickle.dumps(ds))
|
73 |
-
self.assertEqual(ds[0], 2)
|
74 |
-
|
75 |
-
|
76 |
-
class TestDataLoader(unittest.TestCase):
|
77 |
-
def _get_kwargs(self):
|
78 |
-
# get kwargs of build_detection_train_loader
|
79 |
-
cfg = model_zoo.get_config("common/data/coco.py").dataloader.train
|
80 |
-
cfg.dataset.names = "coco_2017_val_100"
|
81 |
-
cfg.pop("_target_")
|
82 |
-
kwargs = {k: instantiate(v) for k, v in cfg.items()}
|
83 |
-
return kwargs
|
84 |
-
|
85 |
-
def test_build_dataloader_train(self):
|
86 |
-
kwargs = self._get_kwargs()
|
87 |
-
dl = build_detection_train_loader(**kwargs)
|
88 |
-
next(iter(dl))
|
89 |
-
|
90 |
-
def test_build_iterable_dataloader_train(self):
|
91 |
-
kwargs = self._get_kwargs()
|
92 |
-
ds = DatasetFromList(kwargs.pop("dataset"))
|
93 |
-
ds = ToIterableDataset(ds, TrainingSampler(len(ds)))
|
94 |
-
dl = build_detection_train_loader(dataset=ds, **kwargs)
|
95 |
-
next(iter(dl))
|
96 |
-
|
97 |
-
def _check_is_range(self, data_loader, N):
|
98 |
-
# check that data_loader produces range(N)
|
99 |
-
data = list(iter(data_loader))
|
100 |
-
data = [x for batch in data for x in batch] # flatten the batches
|
101 |
-
self.assertEqual(len(data), N)
|
102 |
-
self.assertEqual(set(data), set(range(N)))
|
103 |
-
|
104 |
-
def test_build_batch_dataloader_inference(self):
|
105 |
-
# Test that build_batch_data_loader can be used for inference
|
106 |
-
N = 96
|
107 |
-
ds = DatasetFromList(list(range(N)))
|
108 |
-
sampler = InferenceSampler(len(ds))
|
109 |
-
dl = build_batch_data_loader(ds, sampler, 8, num_workers=3)
|
110 |
-
self._check_is_range(dl, N)
|
111 |
-
|
112 |
-
def test_build_dataloader_inference(self):
|
113 |
-
N = 50
|
114 |
-
ds = DatasetFromList(list(range(N)))
|
115 |
-
sampler = InferenceSampler(len(ds))
|
116 |
-
# test that parallel loader works correctly
|
117 |
-
dl = build_detection_test_loader(
|
118 |
-
dataset=ds, sampler=sampler, mapper=lambda x: x, num_workers=3
|
119 |
-
)
|
120 |
-
self._check_is_range(dl, N)
|
121 |
-
|
122 |
-
# test that batch_size works correctly
|
123 |
-
dl = build_detection_test_loader(
|
124 |
-
dataset=ds, sampler=sampler, mapper=lambda x: x, batch_size=4, num_workers=0
|
125 |
-
)
|
126 |
-
self._check_is_range(dl, N)
|
127 |
-
|
128 |
-
def test_build_iterable_dataloader_inference(self):
|
129 |
-
# Test that build_detection_test_loader supports iterable dataset
|
130 |
-
N = 50
|
131 |
-
ds = DatasetFromList(list(range(N)))
|
132 |
-
ds = ToIterableDataset(ds, InferenceSampler(len(ds)))
|
133 |
-
dl = build_detection_test_loader(dataset=ds, mapper=lambda x: x, num_workers=3)
|
134 |
-
self._check_is_range(dl, N)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BMukhtar/BookRecognitionKz/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: BookRecognitionKz
|
3 |
-
emoji: 🏃
|
4 |
-
colorFrom: blue
|
5 |
-
colorTo: gray
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 1.27.2
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: apache-2.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Ajedrez Final De Juego Estudios Mod Apk.md
DELETED
@@ -1,74 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Ajedrez Endgame Estudios Mod Apk: Una guía para los amantes del ajedrez</h1>
|
3 |
-
<p>Si usted es un entusiasta del ajedrez que quiere llevar su juego al siguiente nivel, usted debe probar definitivamente finales de ajedrez estudios mod apk. Los estudios de finales de ajedrez son posiciones compuestas que desafían tus habilidades y creatividad en la fase final del juego, donde solo quedan unas pocas piezas en el tablero. No solo son hermosas e instructivas, sino también útiles para mejorar tu cálculo, táctica, estrategia y comprensión del verdadero valor de los peones y las piezas. </p>
|
4 |
-
<h2>Beneficios de los estudios de finales de ajedrez</h2>
|
5 |
-
<p>Los estudios de finales de ajedrez no solo son divertidos y desafiantes, sino que también son beneficiosos para su desarrollo de ajedrez. Estos son algunos de los beneficios de los estudios de finales de ajedrez:</p>
|
6 |
-
<h2>ajedrez final de juego estudios mod apk</h2><br /><p><b><b>Download</b> 🔗 <a href="https://bltlly.com/2v6LBs">https://bltlly.com/2v6LBs</a></b></p><br /><br />
|
7 |
-
<ul>
|
8 |
-
<li>Mejoran tus habilidades de cálculo. Los estudios de finales de ajedrez a menudo requieren un cálculo preciso y profundo para encontrar los mejores movimientos y evitar errores. Practicándolos regularmente, puedes agudizar tus habilidades mentales y mejorar tu precisión. </li>
|
9 |
-
Mejoran tu visión táctica. Los estudios de finales de ajedrez están llenos de tácticas sorprendentes y hermosas, como sacrificios, tenedores, alfileres, brochetas, puntos muertos, zugzwangs y más. Al resolverlos, puedes entrenar tu ojo para detectar estos motivos y aplicarlos en tus propios juegos. </li>
|
10 |
-
<li>Te enseñan principios estratégicos. Los estudios de finales de ajedrez no son solo sobre tácticas, sino también sobre estrategia. Te muestran cómo usar el rey, los peones y las piezas de manera efectiva en diferentes tipos de terminaciones, como terminaciones de peones, terminaciones de torres, terminaciones de piezas menores, etc. También demuestran la importancia de conceptos como oposición, triangulación, avance, peones pasados, etc.</li>
|
11 |
-
|
12 |
-
<li>Aumentan su disfrute del ajedrez. Los estudios de finales de ajedrez no solo son educativos, sino también entretenidos. Muestran la belleza y la elegancia de los finales de ajedrez, y a menudo revelan soluciones inesperadas y sorprendentes que pueden sorprenderlo y deleitarlo. También te retan a pensar creativamente y encontrar tus propias soluciones. </li>
|
13 |
-
</ul>
|
14 |
-
<h2>Las mejores aplicaciones para los estudios de finales de ajedrez</h2>
|
15 |
-
<p>Si quieres practicar estudios de ajedrez en tu dispositivo móvil, tienes muchas opciones para elegir. Hay muchas aplicaciones que ofrecen estudios de finales de ajedrez para diferentes niveles de jugadores, desde principiantes hasta expertos. Aquí están algunas de las mejores aplicaciones para estudios de finales de ajedrez:</p>
|
16 |
-
<tabla>
|
17 |
-
<tr>
|
18 |
-
<th>Nombre de la aplicación</th>
|
19 |
-
<th>Características</th>
|
20 |
-
<th>Ventajas</th>
|
21 |
-
<th>Desventajas</th>
|
22 |
-
<th>Calificaciones</th>
|
23 |
-
<th>Precios</th>
|
24 |
-
</tr>
|
25 |
-
<tr>
|
26 |
-
<td><a href="">Estudios de finales de ajedrez por Chess King</a></td>
|
27 |
-
<td>- Más de 1200 estudios de finales por compositores famosos<br>- 6 niveles de dificultad<br>- Sugerencias y soluciones<br>- Estadísticas y tablas de clasificación<br>- Funciona offline</td>
|
28 |
-
<td>- Rompecabezas de alta calidad y diversos<br>- Adecuado para todos los niveles<br>- Interfaz fácil de usar<br>- No hay anuncios</td>
|
29 |
-
<td>- Algunos rompecabezas pueden ser demasiado duro o demasiado fácil<br>- Ninguna opción para crear o importar sus propios rompecabezas<br>- Ninguna opción para jugar contra la computadora u otros jugadores</td>
|
30 |
-
<td>4.6 de 5 estrellas en Google Play Store<br>4.8 de 5 estrellas en App Store</td>
|
31 |
-
<td>$4.99 en Google Play Store<br>$3.99 en App Store</td>
|
32 |
-
</tr>
|
33 |
-
<tr>
|
34 |
-
<td><a href="">Finales de ajedrez por Chess.com</a></td>
|
35 |
-
<td>- Más de 1000 rompecabezas finales de los mejores entrenadores<br>- 5 niveles de dificultad<br>- Sugerencias y explicaciones<br>- Sistema de seguimiento de progreso y clasificación<br>- Funciona en línea y sin conexión</td>
|
36 |
-
<td>- Rompecabezas de alta calidad e instructivos<br>- Adecuado para todos los niveles<br>- Interfaz interactiva y atractiva<br>- De uso gratuito</td>
|
37 |
-
|
38 |
-
<td>4.5 de 5 estrellas en Google Play Store<br>4.7 de 5 estrellas en App Store</td>
|
39 |
-
<td>Gratis en Google Play Store y App Store</td>
|
40 |
-
</tr>
|
41 |
-
<tr>
|
42 |
-
<td><a href="">Estudios de finales de ajedrez por ChessOK</a></td>
|
43 |
-
<td>- Más de 700 estudios finales de compositores famosos<br>- 3 niveles de dificultad<br>- Sugerencias y soluciones<br>- Estadísticas y logros<br>- Trabajos offline</td>
|
44 |
-
<td>- Rompecabezas diversos y de alta calidad<br>- Adecuado para niveles intermedios y avanzados<br>- Interfaz simple y elegante<br>- No hay anuncios</td>
|
45 |
-
<td>- Algunos rompecabezas pueden ser demasiado duro o demasiado fácil<br>- No es adecuado para principiantes<br>- No hay opción para crear o importar sus propios puzzles<br>- No hay opción para jugar contra el ordenador u otros jugadores</td>
|
46 |
-
<td>4.4 de 5 estrellas en Google Play Store<br>N/A en App Store</td>
|
47 |
-
<td>$1.99 en Google Play Store<br>$0.99 en App Store</td>
|
48 |
-
</tr>
|
49 |
-
</tabla>
|
50 |
-
<p>Como se puede ver, hay muchas opciones para elegir cuando se trata de ajedrez estudios finales mod apk. Puede comparar y contrastar las características, ventajas, desventajas, calificaciones y precios de cada aplicación y decidir cuál se adapta mejor a sus necesidades y preferencias. También puedes probar diferentes aplicaciones y ver cuál te gusta más. </p>
|
51 |
-
<h2>Cómo utilizar los estudios de ajedrez Endgame Mod Apk</h2>
|
52 |
-
<p>Una vez que haya descargado e instalado su ajedrez elegido estudios finales mod apk, puede empezar a usarlo para practicar y mejorar sus habilidades de ajedrez final. Aquí hay algunos consejos y consejos sobre cómo utilizar los estudios de ajedrez final apk mod con eficacia:</p>
|
53 |
-
<ul>
|
54 |
-
<li>Elige el nivel de dificultad adecuado. La mayoría de las aplicaciones ofrecen diferentes niveles de dificultad, que van de fácil a difícil. Debes elegir el nivel que coincida con tu habilidad y conocimiento actuales. Si los puzzles son demasiado fáciles, no aprenderás mucho. Si son demasiado difíciles, te frustrarás y perderás motivación. También puedes ajustar el nivel a medida que avanzas y mejoras. </li>
|
55 |
-
|
56 |
-
<li>Seguimiento de su progreso. La mayoría de las aplicaciones ofrecen estadísticas y tablas de clasificación que le permiten realizar un seguimiento de su progreso y rendimiento. Puedes ver cuántos puzzles has resuelto, cuántos has acertado o errado, cuánto tiempo has pasado, cuál es tu calificación, etc. También puedes comparar tus resultados con otros usuarios y ver cómo te clasificas entre ellos. Puede utilizar estos datos para medir su mejora y establecer sus objetivos. </li>
|
57 |
-
<li>Diviértete. Lo más importante es divertirse mientras se utiliza el ajedrez estudios finales mod apk. Usted debe disfrutar del proceso de resolver rompecabezas, aprender de ellos, y descubrir nuevas ideas y conceptos. También debe apreciar la belleza y elegancia de los finales de ajedrez, y admirar la creatividad y habilidad de los compositores y solucionadores. Los estudios de finales de ajedrez no son solo una herramienta para mejorar, sino también una fuente de alegría e inspiración. </li>
|
58 |
-
</ul>
|
59 |
-
<h2>Conclusión</h2>
|
60 |
-
<p>Ajedrez estudios finales mod apk es una gran manera de practicar y mejorar sus habilidades finales de ajedrez en su dispositivo móvil. Puedes acceder a una gran base de datos de estudios de finales de ajedrez que desafían tus habilidades y creatividad en la fase final del juego. También puedes aprender de los mejores compositores y solucionadores, y disfrutar de la belleza y elegancia de los finales de ajedrez. </p>
|
61 |
-
<p>Si usted es un amante del ajedrez que quiere llevar su juego al siguiente nivel, usted debe probar definitivamente finales de ajedrez estudios mod apk. No solo mejorará su cálculo, táctica, estrategia y conocimiento de finales teóricos, sino que también aumentará su disfrute del ajedrez. </p>
|
62 |
-
<p>Entonces, ¿qué estás esperando? Descargar ajedrez estudios finales mod apk hoy y empezar a resolver puzzles! </p>
|
63 |
-
<h2>Preguntas frecuentes</h2>
|
64 |
-
<p>Aquí hay algunas preguntas frecuentes y respuestas acerca de los estudios de ajedrez final apk mod:</p>
|
65 |
-
<p></p>
|
66 |
-
<ol>
|
67 |
-
|
68 |
-
<li><b>Q: ¿Es seguro usar los estudios de ajedrez mod apk? </b><br>A: Depende de la fuente y la calidad del mod apk. Algunos apks mod pueden contener virus o malware que pueden dañar su dispositivo o comprometer sus datos. Algunos mod apks también pueden violar los términos de servicio o la política de privacidad de los desarrolladores de aplicaciones originales, lo que puede resultar en problemas legales o suspensión de la cuenta. Siempre debe leer los comentarios y calificaciones de otros usuarios antes de descargar cualquier apk mod, y utilizarlo a su propio riesgo. </li>
|
69 |
-
<li><b>P: ¿Cuáles son algunas de las mejores fuentes para los estudios de finales de ajedrez? </b><br>A: Algunas de las mejores fuentes para los estudios de finales de ajedrez son libros, revistas, sitios web, bases de datos y aplicaciones que se especializan en este campo. Algunos ejemplos son:<br>- The Art of the Endgame por Jan Timman<br>- EG Magazine por Harold van der Heijden<br>- [ChessCafe.com]<br>- [ChessBase.com]<br - [Chess.com]<br>- [ChessKing.com]<br>[ChessBase.com]<[ChessOK.com]<li
|
70 |
-
<li><b>Q: ¿Cómo puedo mejorar mis habilidades de ajedrez? </b><br>A: Hay muchas maneras de mejorar tus habilidades de juego final de ajedrez, tales como:<br>- Estudiar las terminaciones básicas y teóricas, tales como terminaciones de rey y peón, terminaciones de torre y de peón, etc.<br>- Practicar estudios y rompecabezas de finales de ajedrez regularmente. <br>- Analizar tus propios juegos e identificar tus errores y debilidades en el final del juego. <br>- Jugar juegos de práctica de finales contra el ordenador u otros jugadores. <br>- Leer libros y artículos sobre la teoría y la estrategia de finales de ajedrez. <br>- Viendo videos y conferencias de expertos y maestros de finales de ajedrez. </li>
|
71 |
-
<li><b>P: ¿Quiénes son algunos de los mejores compositores y solucionadores de finales de ajedrez? </b><br>A: Hay muchos compositores y solucionadores de finales de ajedrez que han contribuido al arte y la ciencia de los finales de ajedrez. Algunos de los más famosos e influyentes son:<br>- Alexey Troitsky<br>- Henri Rinck<br>- Richard Reti<br>- Leonid Kubbel<br>- Genrikh Kasparyan<br>- Yuri Bakh<br>- John Nunn<br>- Pal Benko<>>- Jan Timbrman<>- Yoan Chank/li
|
72 |
-
</ol> 64aa2da5cf<br />
|
73 |
-
<br />
|
74 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Controladores De Gigabyte H370m D3h.md
DELETED
@@ -1,129 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Cómo descargar e instalar controladores Gigabyte H370M D3H</h1>
|
3 |
-
<p>Si tiene una placa base Gigabyte H370M D3H en su computadora, es posible que se pregunte cómo descargar e instalar los últimos controladores para ella. Los controladores son componentes de software esenciales que permiten que su placa madre y sus dispositivos conectados se comuniquen con su sistema operativo. Sin controladores, es posible que su computadora no funcione correctamente o en absoluto. </p>
|
4 |
-
<h2>controladores de gigabyte h370m d3h</h2><br /><p><b><b>Download File</b> > <a href="https://bltlly.com/2v6J0r">https://bltlly.com/2v6J0r</a></b></p><br /><br />
|
5 |
-
<p>En este artículo, le mostraremos cómo encontrar, descargar e instalar los controladores correctos para su placa base Gigabyte H370M D3H. También explicaremos cuáles son las principales características de esta placa base y por qué necesita controladores para ella. Siguiendo nuestra guía, podrás disfrutar del mejor rendimiento y estabilidad de tu ordenador. </p>
|
6 |
-
<h2>Introducción</h2>
|
7 |
-
<h3> ¿Qué es la placa base Gigabyte H370M D3H y por qué necesita controladores para ella</h3>
|
8 |
-
<p>Gigabyte H370M D3H es una placa madre micro ATX que admite procesadores Intel Core de 9a y 8a generación. Tiene un nuevo diseño híbrido digital PWM, ranuras duales M.2, soporte RGB Fusión, puertos Intel GbE LAN con cFosSpeed, USB 3.1 Gen2 Type-C y Type-A, memoria Intel OptaneTM y más características que lo convierten en una gran opción para construir un PC potente.</p>
|
9 |
-
<p>Sin embargo, para aprovechar al máximo estas características, necesita instalar los controladores adecuados para su placa madre. Los controladores son como traductores que ayudan a su placa base y sus dispositivos conectados a comunicarse con su sistema operativo. Sin controladores, es posible que su sistema operativo no reconozca o no use algunas de las características o dispositivos de su placa base. Esto podría resultar en un rendimiento deficiente, errores, fallos o incluso daños de hardware. </p>
|
10 |
-
<p>Por lo tanto, es importante descargar e instalar los últimos controladores para su placa base Gigabyte H370M D3H. Esto asegurará que su computadora funcione sin problemas y de manera eficiente. </p>
|
11 |
-
<p></p>
|
12 |
-
<h3> ¿Cuáles son las principales características de la placa base Gigabyte H370M D3H</h3>
|
13 |
-
|
14 |
-
<ul>
|
15 |
-
<li><strong>Ranuras duales M.2:</strong> Estas ranuras le permiten instalar dos SSD NVMe PCIe que pueden aumentar su rendimiento y velocidad de almacenamiento. También puede utilizar la memoria Intel OptaneTM para acelerar su disco duro.</li>
|
16 |
-
<li><strong>RGB Fusión support:</strong> Esta característica le permite personalizar los efectos de iluminación de su placa base y tiras de led RGB conectadas en 7 colores. También puede sincronizar la iluminación con otros dispositivos compatibles utilizando el encabezado RGBW pin. </li>
|
17 |
-
<li><strong>Intel GbE LAN con cFosSpeed:</strong> Esta característica le proporciona una conexión de red rápida y estable con baja latencia y alto ancho de banda. También tiene un software acelerador de Internet que optimiza el tráfico de red. </li>
|
18 |
-
<li><strong>USB 3.1 Gen2 Puertos tipo C y tipo A:</strong> Estos puertos le ofrecen velocidades de transferencia de datos rápidas de hasta 10 Gbps y admiten varios dispositivos como unidades externas, monitores, cargadores, etc.</li>
|
19 |
-
<li><strong>Memoria Intel OptaneTM lista:</strong> Esta característica le permite utilizar módulos de memoria Intel OptaneTM que actúan como una unidad de caché para su disco duro y mejoran la capacidad de respuesta del sistema y el tiempo de arranque. </li>
|
20 |
-
<li><strong>Nuevo diseño híbrido de PWM digital:</strong> Este diseño proporciona a su placa base una entrega de energía precisa y estable y mejora su durabilidad y fiabilidad. </li>
|
21 |
-
</ul>
|
22 |
-
<p>Estas son solo algunas de las principales características de la placa base Gigabyte H370M D3H. Puede encontrar más detalles y especificaciones en el sitio web oficial de Gigabyte. </p>
|
23 |
-
<h3>Cómo encontrar los controladores correctos para tu placa madre</h3>
|
24 |
-
<p>Antes de descargar e instalar los controladores para su placa madre, debe asegurarse de encontrar los correctos para su modelo y sistema operativo específico. Hay dos maneras de hacer esto:</p>
|
25 |
-
<ol>
|
26 |
-
|
27 |
-
<li><strong>Utilice una herramienta de actualización de controladores de terceros:</strong> Esta es una forma alternativa y conveniente de encontrar los controladores correctos para su placa base. Puede usar una herramienta de actualización de controladores que escanea su computadora y detecta y descarga automáticamente los controladores que necesita. Sin embargo, debe tener cuidado al elegir una herramienta de actualización de controladores, ya que algunos de ellos pueden ser poco fiables o maliciosos. Solo debe usar una herramienta confiable y de buena reputación que tenga reseñas y calificaciones positivas de otros usuarios. </li>
|
28 |
-
</ol>
|
29 |
-
<p>Cualquiera que sea la forma que elija, siempre debe descargar los controladores de una fuente segura y oficial. También debe comprobar la versión del controlador y la fecha antes de descargarlo, para asegurarse de que es el último y compatible. </p>
|
30 |
-
<h2>Cómo descargar controladores Gigabyte H370M D3H</h2>
|
31 |
-
<h3>Cómo utilizar el sitio web oficial de Gigabyte para descargar controladores</h3>
|
32 |
-
<p>Si desea utilizar el sitio web oficial de Gigabyte para descargar controladores, puede seguir estos pasos:</p>
|
33 |
-
<ol>
|
34 |
-
<li>Visite el sitio web de Gigabyte e introduzca su modelo de placa base en el cuadro de búsqueda. </li>
|
35 |
-
<li>Seleccione su sistema operativo desde el menú desplegable. </li>
|
36 |
-
<li>Verá una lista de controladores para diferentes categorías como audio, chipset, LAN, etc. Haga clic en la categoría que desea descargar. </li>
|
37 |
-
<li> Verá una lista de versiones y fechas del controlador. Haga clic en el icono de descarga junto a la versión del controlador más reciente o compatible. </li>
|
38 |
-
<li>Será redirigido a una página de descarga. Haga clic en el botón de descarga y guarde el archivo del controlador en su computadora. </li>
|
39 |
-
<li>Repita estos pasos para cualquier otra categoría de controlador que desee descargar. </li>
|
40 |
-
</ol>
|
41 |
-
<h3>Cómo usar una herramienta de actualización de controladores de terceros para descargar controladores</h3>
|
42 |
-
<p>Si desea utilizar una herramienta de actualización de controladores de terceros para descargar controladores, puede seguir estos pasos:</p>
|
43 |
-
<ol>
|
44 |
-
|
45 |
-
<li>Inicie la herramienta de actualización de controladores y escanee su computadora. La herramienta detectará automáticamente el modelo de la placa base y el sistema operativo, y encontrará los controladores que necesita. </li>
|
46 |
-
<li> Verá una lista de controladores que están desactualizados, que faltan o que son incompatibles. Puede optar por actualizar todos ellos o seleccionar los que desea actualizar. </li>
|
47 |
-
<li>La herramienta descargará e instalará los controladores por usted. Es posible que necesite reiniciar su computadora después de que se complete la instalación. </li>
|
48 |
-
</ol>
|
49 |
-
<h2>Cómo instalar controladores Gigabyte H370M D3H</h2>
|
50 |
-
<h3>Cómo instalar controladores desde una carpeta o una unidad USB</h3>
|
51 |
-
<p>Si ha descargado los controladores del sitio web oficial de Gigabyte o los ha guardado en una carpeta o una unidad USB, puede instalarlos siguiendo estos pasos:</p>
|
52 |
-
<ol>
|
53 |
-
<li>Localice el archivo de controlador que ha descargado o guardado en su computadora o unidad USB. Debe tener una extensión . exe o . zip. </li>
|
54 |
-
<li> Si el archivo es un archivo . exe, haga doble clic en él y siga las instrucciones en pantalla para instalarlo. </li>
|
55 |
-
<li>Si el archivo es un archivo . zip, haga clic derecho sobre él y seleccione Extraer todo. A continuación, abra la carpeta extraída y busque un archivo setup.exe o install.exe. Haga doble clic en él y siga las instrucciones en pantalla para instalarlo. </li>
|
56 |
-
<li>Repita estos pasos para cualquier otro archivo de controlador que haya descargado o guardado. </li>
|
57 |
-
<li>Es posible que tenga que reiniciar el equipo después de instalar todos los controladores. </li>
|
58 |
-
</ol>
|
59 |
-
<h3>Cómo instalar controladores usando Administrador de dispositivos o Windows Update</h3>
|
60 |
-
<p>Si no ha descargado o guardado ningún archivo de controlador, también puede intentar instalarlo usando el Administrador de dispositivos o Windows Update. Estos están construidos en Windows y pueden ayudarle a encontrar e instalar los controladores que son compatibles con su placa base y sistema operativo. Puedes seguir estos pasos:</p>
|
61 |
-
<ol>
|
62 |
-
<li>Abra el Administrador de dispositivos presionando las teclas Windows + X y seleccionando Administrador de dispositivos desde el menú. </li>
|
63 |
-
|
64 |
-
<li>Haga clic derecho en el dispositivo que desea actualizar y seleccione Actualizar controlador del menú. </li>
|
65 |
-
<li>Verá dos opciones: Busque automáticamente el software de controlador actualizado o Busque el software de controlador en mi computadora. Puede elegir cualquiera de las opciones dependiendo de su preferencia. </li>
|
66 |
-
<li>Si elige Buscar automáticamente, Windows buscará en línea el mejor controlador para su dispositivo e instalarlo por usted. </li>
|
67 |
-
<li>Si elige Navegar por mi ordenador, tendrá que localizar el archivo de controlador que ha descargado o guardado en su ordenador o unidad USB. Luego, siga las instrucciones en pantalla para instalarlo. </li>
|
68 |
-
<li>Repita estos pasos para cualquier otro dispositivo que desee actualizar. </li>
|
69 |
-
<li>Es posible que tenga que reiniciar el equipo después de instalar todos los controladores. </li>
|
70 |
-
</ol>
|
71 |
-
<p>Alternativamente, también puede usar Windows Update para verificar e instalar los controladores que están disponibles para la placa base y el sistema operativo. Puedes seguir estos pasos:</p>
|
72 |
-
<ol>
|
73 |
-
<li>Abra la configuración presionando las teclas Windows + I y seleccione Actualizar y Seguridad en el menú. </li>
|
74 |
-
<li>Seleccione Windows Update desde el panel izquierdo y haga clic en Buscar actualizaciones desde el panel derecho. </li>
|
75 |
-
<li>Windows escaneará su computadora y le mostrará cualquier actualización disponible, incluidos los controladores. Puede elegir instalar todos ellos o seleccionar los que desea instalar. </li>
|
76 |
-
<li>Siga las instrucciones en pantalla para instalar las actualizaciones. </li>
|
77 |
-
<li>Es posible que tenga que reiniciar el equipo después de instalar todas las actualizaciones. </li>
|
78 |
-
</ol>
|
79 |
-
<h2>Conclusión</h2>
|
80 |
-
<p>En este artículo, le hemos mostrado cómo descargar e instalar los últimos controladores para su placa base Gigabyte H370M D3H. También hemos explicado cuáles son las principales características de esta placa base y por qué necesita controladores para ella. Siguiendo nuestra guía, podrás disfrutar del mejor rendimiento y estabilidad de tu ordenador. </p>
|
81 |
-
|
82 |
-
<p>Esperamos que haya encontrado este artículo útil e informativo. Si tiene alguna pregunta o comentario, no dude en dejar un comentario a continuación. ¡Nos encantaría saber de ti! </p>
|
83 |
-
<h2>Preguntas frecuentes</h2>
|
84 |
-
<h3>¿Cuáles son los beneficios de actualizar los controladores? </h3>
|
85 |
-
<p>La actualización de los controladores puede proporcionarle varios beneficios, como:</p>
|
86 |
-
<ul>
|
87 |
-
<li>Mejorar el rendimiento y la funcionalidad de su placa base y sus dispositivos conectados</li>
|
88 |
-
<li>Corrección de errores o errores que podrían ocurrir con su placa madre y sus dispositivos conectados</li>
|
89 |
-
<li>Mejora de la seguridad y la estabilidad de su ordenador</li>
|
90 |
-
<li>Agregar nuevas características o soporte para nuevos dispositivos o tecnologías</li>
|
91 |
-
</ul>
|
92 |
-
<h3>¿Con qué frecuencia debo actualizar los controladores? </h3>
|
93 |
-
<p>No hay respuesta definitiva a esta pregunta, ya que depende de varios factores como el modelo de la placa base, el sistema operativo, el uso, las preferencias, etc. Sin embargo, algunas pautas generales son:</p>
|
94 |
-
<ul>
|
95 |
-
<li> Debe actualizar los controladores cada vez que haya una nueva versión disponible que ofrece mejoras significativas o correcciones</li>
|
96 |
-
<li> Debe actualizar los controladores cada vez que encuentre problemas o problemas con su placa madre o sus dispositivos conectados</li>
|
97 |
-
<li>Debe actualizar los controladores cada vez que actualice o cambie sus componentes de hardware o software</li>
|
98 |
-
</ul>
|
99 |
-
<h3>¿Cuáles son los riesgos de instalar controladores incorrectos o desactualizados? </h3>
|
100 |
-
<p>La instalación de controladores incorrectos o desactualizados puede causar varios riesgos, como:</p>
|
101 |
-
<ul>
|
102 |
-
<li>Reducir el rendimiento y la funcionalidad de su placa base y sus dispositivos conectados</li>
|
103 |
-
<li>Causar errores, bloqueos, congelaciones, pantallas azules u otros problemas con su computadora</li>
|
104 |
-
<li>Dañando sus componentes de hardware o software</li>
|
105 |
-
<li>Exponer su computadora a amenazas de seguridad o infecciones de malware</li>
|
106 |
-
</ul>
|
107 |
-
<h3>¿Cómo puedo solucionar problemas de controladores? </h3>
|
108 |
-
<p>Si encuentra algún problema con el controlador, puede probar algunos de estos pasos de solución de problemas:</p>
|
109 |
-
<ul>
|
110 |
-
|
111 |
-
<li>Compruebe si el controlador está instalado correctamente y completamente</li>
|
112 |
-
<li>Compruebe si hay un controlador más nuevo o mejor disponible para su placa base y sistema operativo</li>
|
113 |
-
<li>Desinstalar y reinstalar el controlador</li>
|
114 |
-
<li>Utilice Administrador de dispositivos o Windows Update para actualizar el controlador</li>
|
115 |
-
<li>Utilice una herramienta de actualización de controladores para actualizar el controlador</li>
|
116 |
-
<li>Utilice Restaurar sistema o copia de seguridad y restaurar para restaurar el equipo a un estado anterior</li>
|
117 |
-
<li>Póngase en contacto con Gigabyte o el fabricante de su dispositivo para obtener más soporte</li>
|
118 |
-
</ul>
|
119 |
-
<h3>¿Dónde puedo obtener más soporte para la placa base Gigabyte H370M D3H? </h3>
|
120 |
-
<p>Si necesita más soporte para la placa base Gigabyte H370M D3H, puede visitar las siguientes fuentes:</p>
|
121 |
-
<ul>
|
122 |
-
<li>El sitio web oficial de Gigabyte, donde se puede encontrar la página del producto, manual de usuario, controladores, preguntas frecuentes, garantía, y la información de contacto para su placa base</li>
|
123 |
-
<li>El foro oficial de Gigabyte, donde puedes interactuar con otros usuarios y expertos de Gigabyte y obtener respuestas a tus preguntas</li>
|
124 |
-
<li>El canal oficial de YouTube Gigabyte, donde puedes ver videos y tutoriales sobre cómo usar y solucionar problemas de tu placa madre</li>
|
125 |
-
<li>La página oficial de Facebook de Gigabyte, donde puedes seguir las últimas noticias y actualizaciones sobre productos y servicios de Gigabyte</li>
|
126 |
-
<li>La cuenta oficial de Gigabyte Twitter, donde puede twittear sus preguntas o comentarios a Gigabyte</li>
|
127 |
-
</ul></p> 64aa2da5cf<br />
|
128 |
-
<br />
|
129 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/tenacity/before.py
DELETED
@@ -1,46 +0,0 @@
|
|
1 |
-
# Copyright 2016 Julien Danjou
|
2 |
-
# Copyright 2016 Joshua Harlow
|
3 |
-
# Copyright 2013-2014 Ray Holder
|
4 |
-
#
|
5 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
-
# you may not use this file except in compliance with the License.
|
7 |
-
# You may obtain a copy of the License at
|
8 |
-
#
|
9 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
-
#
|
11 |
-
# Unless required by applicable law or agreed to in writing, software
|
12 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
-
# See the License for the specific language governing permissions and
|
15 |
-
# limitations under the License.
|
16 |
-
|
17 |
-
import typing
|
18 |
-
|
19 |
-
from pip._vendor.tenacity import _utils
|
20 |
-
|
21 |
-
if typing.TYPE_CHECKING:
|
22 |
-
import logging
|
23 |
-
|
24 |
-
from pip._vendor.tenacity import RetryCallState
|
25 |
-
|
26 |
-
|
27 |
-
def before_nothing(retry_state: "RetryCallState") -> None:
|
28 |
-
"""Before call strategy that does nothing."""
|
29 |
-
|
30 |
-
|
31 |
-
def before_log(logger: "logging.Logger", log_level: int) -> typing.Callable[["RetryCallState"], None]:
|
32 |
-
"""Before call strategy that logs to some logger the attempt."""
|
33 |
-
|
34 |
-
def log_it(retry_state: "RetryCallState") -> None:
|
35 |
-
if retry_state.fn is None:
|
36 |
-
# NOTE(sileht): can't really happen, but we must please mypy
|
37 |
-
fn_name = "<unknown>"
|
38 |
-
else:
|
39 |
-
fn_name = _utils.get_callback_name(retry_state.fn)
|
40 |
-
logger.log(
|
41 |
-
log_level,
|
42 |
-
f"Starting call to '{fn_name}', "
|
43 |
-
f"this is the {_utils.to_ordinal(retry_state.attempt_number)} time calling it.",
|
44 |
-
)
|
45 |
-
|
46 |
-
return log_it
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/pyparsing/actions.py
DELETED
@@ -1,207 +0,0 @@
|
|
1 |
-
# actions.py
|
2 |
-
|
3 |
-
from .exceptions import ParseException
|
4 |
-
from .util import col
|
5 |
-
|
6 |
-
|
7 |
-
class OnlyOnce:
|
8 |
-
"""
|
9 |
-
Wrapper for parse actions, to ensure they are only called once.
|
10 |
-
"""
|
11 |
-
|
12 |
-
def __init__(self, method_call):
|
13 |
-
from .core import _trim_arity
|
14 |
-
|
15 |
-
self.callable = _trim_arity(method_call)
|
16 |
-
self.called = False
|
17 |
-
|
18 |
-
def __call__(self, s, l, t):
|
19 |
-
if not self.called:
|
20 |
-
results = self.callable(s, l, t)
|
21 |
-
self.called = True
|
22 |
-
return results
|
23 |
-
raise ParseException(s, l, "OnlyOnce obj called multiple times w/out reset")
|
24 |
-
|
25 |
-
def reset(self):
|
26 |
-
"""
|
27 |
-
Allow the associated parse action to be called once more.
|
28 |
-
"""
|
29 |
-
|
30 |
-
self.called = False
|
31 |
-
|
32 |
-
|
33 |
-
def match_only_at_col(n):
|
34 |
-
"""
|
35 |
-
Helper method for defining parse actions that require matching at
|
36 |
-
a specific column in the input text.
|
37 |
-
"""
|
38 |
-
|
39 |
-
def verify_col(strg, locn, toks):
|
40 |
-
if col(locn, strg) != n:
|
41 |
-
raise ParseException(strg, locn, "matched token not at column {}".format(n))
|
42 |
-
|
43 |
-
return verify_col
|
44 |
-
|
45 |
-
|
46 |
-
def replace_with(repl_str):
|
47 |
-
"""
|
48 |
-
Helper method for common parse actions that simply return
|
49 |
-
a literal value. Especially useful when used with
|
50 |
-
:class:`transform_string<ParserElement.transform_string>` ().
|
51 |
-
|
52 |
-
Example::
|
53 |
-
|
54 |
-
num = Word(nums).set_parse_action(lambda toks: int(toks[0]))
|
55 |
-
na = one_of("N/A NA").set_parse_action(replace_with(math.nan))
|
56 |
-
term = na | num
|
57 |
-
|
58 |
-
term[1, ...].parse_string("324 234 N/A 234") # -> [324, 234, nan, 234]
|
59 |
-
"""
|
60 |
-
return lambda s, l, t: [repl_str]
|
61 |
-
|
62 |
-
|
63 |
-
def remove_quotes(s, l, t):
|
64 |
-
"""
|
65 |
-
Helper parse action for removing quotation marks from parsed
|
66 |
-
quoted strings.
|
67 |
-
|
68 |
-
Example::
|
69 |
-
|
70 |
-
# by default, quotation marks are included in parsed results
|
71 |
-
quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]
|
72 |
-
|
73 |
-
# use remove_quotes to strip quotation marks from parsed results
|
74 |
-
quoted_string.set_parse_action(remove_quotes)
|
75 |
-
quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
|
76 |
-
"""
|
77 |
-
return t[0][1:-1]
|
78 |
-
|
79 |
-
|
80 |
-
def with_attribute(*args, **attr_dict):
|
81 |
-
"""
|
82 |
-
Helper to create a validating parse action to be used with start
|
83 |
-
tags created with :class:`make_xml_tags` or
|
84 |
-
:class:`make_html_tags`. Use ``with_attribute`` to qualify
|
85 |
-
a starting tag with a required attribute value, to avoid false
|
86 |
-
matches on common tags such as ``<TD>`` or ``<DIV>``.
|
87 |
-
|
88 |
-
Call ``with_attribute`` with a series of attribute names and
|
89 |
-
values. Specify the list of filter attributes names and values as:
|
90 |
-
|
91 |
-
- keyword arguments, as in ``(align="right")``, or
|
92 |
-
- as an explicit dict with ``**`` operator, when an attribute
|
93 |
-
name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}``
|
94 |
-
- a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align", "right"))``
|
95 |
-
|
96 |
-
For attribute names with a namespace prefix, you must use the second
|
97 |
-
form. Attribute names are matched insensitive to upper/lower case.
|
98 |
-
|
99 |
-
If just testing for ``class`` (with or without a namespace), use
|
100 |
-
:class:`with_class`.
|
101 |
-
|
102 |
-
To verify that the attribute exists, but without specifying a value,
|
103 |
-
pass ``with_attribute.ANY_VALUE`` as the value.
|
104 |
-
|
105 |
-
Example::
|
106 |
-
|
107 |
-
html = '''
|
108 |
-
<div>
|
109 |
-
Some text
|
110 |
-
<div type="grid">1 4 0 1 0</div>
|
111 |
-
<div type="graph">1,3 2,3 1,1</div>
|
112 |
-
<div>this has no type</div>
|
113 |
-
</div>
|
114 |
-
|
115 |
-
'''
|
116 |
-
div,div_end = make_html_tags("div")
|
117 |
-
|
118 |
-
# only match div tag having a type attribute with value "grid"
|
119 |
-
div_grid = div().set_parse_action(with_attribute(type="grid"))
|
120 |
-
grid_expr = div_grid + SkipTo(div | div_end)("body")
|
121 |
-
for grid_header in grid_expr.search_string(html):
|
122 |
-
print(grid_header.body)
|
123 |
-
|
124 |
-
# construct a match with any div tag having a type attribute, regardless of the value
|
125 |
-
div_any_type = div().set_parse_action(with_attribute(type=with_attribute.ANY_VALUE))
|
126 |
-
div_expr = div_any_type + SkipTo(div | div_end)("body")
|
127 |
-
for div_header in div_expr.search_string(html):
|
128 |
-
print(div_header.body)
|
129 |
-
|
130 |
-
prints::
|
131 |
-
|
132 |
-
1 4 0 1 0
|
133 |
-
|
134 |
-
1 4 0 1 0
|
135 |
-
1,3 2,3 1,1
|
136 |
-
"""
|
137 |
-
if args:
|
138 |
-
attrs = args[:]
|
139 |
-
else:
|
140 |
-
attrs = attr_dict.items()
|
141 |
-
attrs = [(k, v) for k, v in attrs]
|
142 |
-
|
143 |
-
def pa(s, l, tokens):
|
144 |
-
for attrName, attrValue in attrs:
|
145 |
-
if attrName not in tokens:
|
146 |
-
raise ParseException(s, l, "no matching attribute " + attrName)
|
147 |
-
if attrValue != with_attribute.ANY_VALUE and tokens[attrName] != attrValue:
|
148 |
-
raise ParseException(
|
149 |
-
s,
|
150 |
-
l,
|
151 |
-
"attribute {!r} has value {!r}, must be {!r}".format(
|
152 |
-
attrName, tokens[attrName], attrValue
|
153 |
-
),
|
154 |
-
)
|
155 |
-
|
156 |
-
return pa
|
157 |
-
|
158 |
-
|
159 |
-
with_attribute.ANY_VALUE = object()
|
160 |
-
|
161 |
-
|
162 |
-
def with_class(classname, namespace=""):
|
163 |
-
"""
|
164 |
-
Simplified version of :class:`with_attribute` when
|
165 |
-
matching on a div class - made difficult because ``class`` is
|
166 |
-
a reserved word in Python.
|
167 |
-
|
168 |
-
Example::
|
169 |
-
|
170 |
-
html = '''
|
171 |
-
<div>
|
172 |
-
Some text
|
173 |
-
<div class="grid">1 4 0 1 0</div>
|
174 |
-
<div class="graph">1,3 2,3 1,1</div>
|
175 |
-
<div>this <div> has no class</div>
|
176 |
-
</div>
|
177 |
-
|
178 |
-
'''
|
179 |
-
div,div_end = make_html_tags("div")
|
180 |
-
div_grid = div().set_parse_action(with_class("grid"))
|
181 |
-
|
182 |
-
grid_expr = div_grid + SkipTo(div | div_end)("body")
|
183 |
-
for grid_header in grid_expr.search_string(html):
|
184 |
-
print(grid_header.body)
|
185 |
-
|
186 |
-
div_any_type = div().set_parse_action(with_class(withAttribute.ANY_VALUE))
|
187 |
-
div_expr = div_any_type + SkipTo(div | div_end)("body")
|
188 |
-
for div_header in div_expr.search_string(html):
|
189 |
-
print(div_header.body)
|
190 |
-
|
191 |
-
prints::
|
192 |
-
|
193 |
-
1 4 0 1 0
|
194 |
-
|
195 |
-
1 4 0 1 0
|
196 |
-
1,3 2,3 1,1
|
197 |
-
"""
|
198 |
-
classattr = "{}:class".format(namespace) if namespace else "class"
|
199 |
-
return with_attribute(**{classattr: classname})
|
200 |
-
|
201 |
-
|
202 |
-
# pre-PEP8 compatibility symbols
|
203 |
-
replaceWith = replace_with
|
204 |
-
removeQuotes = remove_quotes
|
205 |
-
withAttribute = with_attribute
|
206 |
-
withClass = with_class
|
207 |
-
matchOnlyAtCol = match_only_at_col
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BilalSardar/StoryGenerator/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: StoryGenerator
|
3 |
-
emoji: 🐢
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: purple
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.11
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: openrail
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BimboAnon/BimboProxy/README.md
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: BimboProxy
|
3 |
-
emoji: 📚
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: pink
|
6 |
-
sdk: docker
|
7 |
-
pinned: false
|
8 |
-
---
|
9 |
-
|
10 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Boadiwaa/Recipes/openai/api_resources/deployment.py
DELETED
@@ -1,62 +0,0 @@
|
|
1 |
-
from openai import util
|
2 |
-
from openai.api_resources.abstract import DeletableAPIResource, ListableAPIResource, CreateableAPIResource
|
3 |
-
from openai.error import InvalidRequestError, APIError
|
4 |
-
|
5 |
-
|
6 |
-
class Deployment(CreateableAPIResource, ListableAPIResource, DeletableAPIResource):
|
7 |
-
engine_required = False
|
8 |
-
OBJECT_NAME = "deployments"
|
9 |
-
|
10 |
-
@classmethod
|
11 |
-
def create(cls, *args, **kwargs):
|
12 |
-
"""
|
13 |
-
Creates a new deployment for the provided prompt and parameters.
|
14 |
-
"""
|
15 |
-
typed_api_type, _ = cls._get_api_type_and_version(kwargs.get("api_type", None), None)
|
16 |
-
if typed_api_type != util.ApiType.AZURE:
|
17 |
-
raise APIError("Deployment operations are only available for the Azure API type.")
|
18 |
-
|
19 |
-
if kwargs.get("model", None) is None:
|
20 |
-
raise InvalidRequestError(
|
21 |
-
"Must provide a 'model' parameter to create a Deployment.",
|
22 |
-
param="model",
|
23 |
-
)
|
24 |
-
|
25 |
-
scale_settings = kwargs.get("scale_settings", None)
|
26 |
-
if scale_settings is None:
|
27 |
-
raise InvalidRequestError(
|
28 |
-
"Must provide a 'scale_settings' parameter to create a Deployment.",
|
29 |
-
param="scale_settings",
|
30 |
-
)
|
31 |
-
|
32 |
-
if "scale_type" not in scale_settings or "capacity" not in scale_settings:
|
33 |
-
raise InvalidRequestError(
|
34 |
-
"The 'scale_settings' parameter contains invalid or incomplete values.",
|
35 |
-
param="scale_settings",
|
36 |
-
)
|
37 |
-
|
38 |
-
return super().create(*args, **kwargs)
|
39 |
-
|
40 |
-
@classmethod
|
41 |
-
def list(cls, *args, **kwargs):
|
42 |
-
typed_api_type, _ = cls._get_api_type_and_version(kwargs.get("api_type", None), None)
|
43 |
-
if typed_api_type != util.ApiType.AZURE:
|
44 |
-
raise APIError("Deployment operations are only available for the Azure API type.")
|
45 |
-
|
46 |
-
return super().list(*args, **kwargs)
|
47 |
-
|
48 |
-
@classmethod
|
49 |
-
def delete(cls, *args, **kwargs):
|
50 |
-
typed_api_type, _ = cls._get_api_type_and_version(kwargs.get("api_type", None), None)
|
51 |
-
if typed_api_type != util.ApiType.AZURE:
|
52 |
-
raise APIError("Deployment operations are only available for the Azure API type.")
|
53 |
-
|
54 |
-
return super().delete(*args, **kwargs)
|
55 |
-
|
56 |
-
@classmethod
|
57 |
-
def retrieve(cls, *args, **kwargs):
|
58 |
-
typed_api_type, _ = cls._get_api_type_and_version(kwargs.get("api_type", None), None)
|
59 |
-
if typed_api_type != util.ApiType.AZURE:
|
60 |
-
raise APIError("Deployment operations are only available for the Azure API type.")
|
61 |
-
|
62 |
-
return super().retrieve(*args, **kwargs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/docs/_source/advanced/adding_model.md
DELETED
@@ -1,160 +0,0 @@
|
|
1 |
-
# Adding a custom VQA model
|
2 |
-
|
3 |
-
This is a tutorial on how to add a custom VQA model into OpenVQA. Follow the steps below, you will obtain a model that can run across VQA/GQA/CLEVR datasets.
|
4 |
-
|
5 |
-
## 1. Preliminary
|
6 |
-
|
7 |
-
All implemented models are placed at ```<openvqa>/openvqa/models/```, so the first thing to do is to create a folder there for your VQA model named by `<YOU_MODEL_NAME>`. After that, all your model related files will be placed in the folder ```<openvqa>/openvqa/models/<YOU_MODEL_NAME>/```.
|
8 |
-
|
9 |
-
## 2. Dataset Adapter
|
10 |
-
|
11 |
-
Create a python file `<openvqa>/openvqa/models/<YOU_MODEL_NAME>/adapter.py` to bridge your model and different datasets. Different datasets have different input features, thus resulting in different operators to handle the features.
|
12 |
-
|
13 |
-
#### Input
|
14 |
-
|
15 |
-
Input features (packed as `feat_dict`) for different datasets.
|
16 |
-
|
17 |
-
#### Output
|
18 |
-
|
19 |
-
Customized pre-processed features to be fed into the model.
|
20 |
-
|
21 |
-
#### Adapter Template
|
22 |
-
|
23 |
-
```
|
24 |
-
from openvqa.core.base_dataset import BaseAdapter
|
25 |
-
class Adapter(BaseAdapter):
|
26 |
-
def __init__(self, __C):
|
27 |
-
super(Adapter, self).__init__(__C)
|
28 |
-
self.__C = __C
|
29 |
-
|
30 |
-
def vqa_init(self, __C):
|
31 |
-
# Your Implementation
|
32 |
-
|
33 |
-
def gqa_init(self, __C):
|
34 |
-
# Your Implementation
|
35 |
-
|
36 |
-
def clevr_init(self, __C):
|
37 |
-
# Your Implementation
|
38 |
-
|
39 |
-
def vqa_forward(self, feat_dict):
|
40 |
-
# Your Implementation
|
41 |
-
|
42 |
-
def gqa_forward(self, feat_dict):
|
43 |
-
# Your Implementation
|
44 |
-
|
45 |
-
def clevr_forward(self, feat_dict):
|
46 |
-
# Your Implementation
|
47 |
-
|
48 |
-
```
|
49 |
-
|
50 |
-
Each dataset-specific initiation function `def <dataset>_init(self, __C)` corresponds to one feed-forward function `def <dataset>_forward(self, feat_dict)`, your implementations should follow the principles ```torch.nn.Module.__init__()``` and ```torch.nn.Module.forward()```, respectively.
|
51 |
-
|
52 |
-
The variable ` feat_dict` consists of the input feature names for the datasets, which corresponds to the definitions in `<openvqa>/openvqa/core/base_cfg.py`
|
53 |
-
|
54 |
-
```
|
55 |
-
vqa:{
|
56 |
-
'FRCN_FEAT': buttom-up features -> [batchsize, num_bbox, 2048],
|
57 |
-
'BBOX_FEAT': bbox coordinates -> [batchsize, num_bbox, 5],
|
58 |
-
}
|
59 |
-
gqa:{
|
60 |
-
'FRCN_FEAT': official buttom-up features -> [batchsize, num_bbox, 2048],
|
61 |
-
'BBOX_FEAT': official bbox coordinates -> [batchsize, num_bbox, 5],
|
62 |
-
'GRID_FEAT': official resnet grid features -> [batchsize, num_grid, 2048],
|
63 |
-
}
|
64 |
-
clevr:{
|
65 |
-
'GRID_FEAT': resnet grid features -> [batchsize, num_grid, 1024],
|
66 |
-
}
|
67 |
-
```
|
68 |
-
|
69 |
-
More detailed examples can be referred to the adapter for the [MCAN](https://github.com/MILVLG/openvqa/tree/master/openvqa/models/mcan/adapter.py) model.
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
## 3. Definition of model hyper-parameters
|
74 |
-
|
75 |
-
Create a python file named ```<openvqa>/openvqa/models/<YOUR MODEL NAME>/model_cfgs.py```
|
76 |
-
|
77 |
-
#### Configuration Template
|
78 |
-
|
79 |
-
```
|
80 |
-
from openvqa.core.base_cfgs import BaseCfgs
|
81 |
-
class Cfgs(BaseCfgs):
|
82 |
-
def __init__(self):
|
83 |
-
super(Cfgs, self).__init__()
|
84 |
-
# Your Implementation
|
85 |
-
```
|
86 |
-
|
87 |
-
Only the variable you defined here can be used in the network. The variable value can be override in the running configuration file described later.
|
88 |
-
|
89 |
-
#### Example
|
90 |
-
|
91 |
-
```
|
92 |
-
# model_cfgs.py
|
93 |
-
from openvqa.core.base_cfgs import BaseCfgs
|
94 |
-
class Cfgs(BaseCfgs):
|
95 |
-
def __init__(self):
|
96 |
-
super(Cfgs, self).__init__()
|
97 |
-
self.LAYER = 6
|
98 |
-
```
|
99 |
-
|
100 |
-
```
|
101 |
-
# net.py
|
102 |
-
class Net(nn.Module):
|
103 |
-
def __init__(self, __C, pretrained_emb, token_size, answer_size):
|
104 |
-
super(Net, self).__init__()
|
105 |
-
self.__C = __C
|
106 |
-
|
107 |
-
print(__C.LAYER)
|
108 |
-
```
|
109 |
-
|
110 |
-
```
|
111 |
-
Output: 6
|
112 |
-
```
|
113 |
-
|
114 |
-
## 4. Main body
|
115 |
-
|
116 |
-
Create a python file for the main body of the model as ```<openvqa>/openvqa/models/<YOUR MODEL NAME>/net.py```. Note that the filename must be `net.py` since this filename will be invoked by the running script. Except the file, other auxiliary model files invoked by `net.py` can be named arbitrarily.
|
117 |
-
|
118 |
-
When implementation, you should pay attention to the following restrictions:
|
119 |
-
|
120 |
-
- The main module should be named `Net`, i.e., `class Net(nn.Module):`
|
121 |
-
- The `init` function has three input variables: *pretrained_emb* corresponds to the GloVe embedding features for the question; *token\_size* corresponds to the number of all dataset words; *answer_size* corresponds to the number of classes for prediction.
|
122 |
-
- The `forward` function has four input variables: *frcn_feat*, *grid_feat*, *bbox_feat*, *ques_ix*.
|
123 |
-
- In the `init` function, you should initialize the `Adapter` which you've already defined above. In the `forward` function, you should feed *frcn_feat*, *grid_feat*, *bbox_feat* into the `Adapter` to obtain the processed image features.
|
124 |
-
- Return a prediction tensor of size [batch\_size, answer_size]. Note that no activation function like ```sigmoid``` or ```softmax``` is appended on the prediction. The activation has been designed for the prediction in the loss function outside.
|
125 |
-
|
126 |
-
#### Model Template
|
127 |
-
|
128 |
-
```
|
129 |
-
import torch.nn as nn
|
130 |
-
from openvqa.models.mcan.adapter import Adapter
|
131 |
-
class Net(nn.Module):
|
132 |
-
def __init__(self, __C, pretrained_emb, token_size, answer_size):
|
133 |
-
super(Net, self).__init__()
|
134 |
-
self.__C = __C
|
135 |
-
self.adapter = Adapter(__C)
|
136 |
-
|
137 |
-
def forward(self, frcn_feat, grid_feat, bbox_feat, ques_ix):
|
138 |
-
img_feat = self.adapter(frcn_feat, grid_feat, bbox_feat)
|
139 |
-
# model implementation
|
140 |
-
...
|
141 |
-
|
142 |
-
return pred
|
143 |
-
```
|
144 |
-
|
145 |
-
## 5. Declaration of running configurations
|
146 |
-
|
147 |
-
Create a `yml` file at```<openvqa>/configs/<dataset>/<YOUR_CONFIG_NAME>.yml``` and define your hyper-parameters here. We suggest that `<YOUR_CONFIG_NAME>`= `<YOUR_MODEL_NAME>`. If you have the requirement to have one base model support the running scripts for different variants. (e.g., MFB and MFH), you can have different yml files (e.g., `mfb.yml` and `mfh.yml`) and use the `MODEL_USE` param in the yml file to specify the actual used model (i.e., mfb).
|
148 |
-
|
149 |
-
### Example:
|
150 |
-
```
|
151 |
-
MODEL_USE: <YOUR MODEL NAME> # Must be defined
|
152 |
-
LAYER: 6
|
153 |
-
LOSS_FUNC: bce
|
154 |
-
LOSS_REDUCTION: sum
|
155 |
-
```
|
156 |
-
|
157 |
-
Finally, to register the added model to the running script, you can modify `<openvqa/run.py>` by adding your `<YOUR_CONFIG_NAME>` into the arguments for models [here](https://github.com/MILVLG/openvqa/tree/master/run.py#L22).
|
158 |
-
|
159 |
-
|
160 |
-
By doing all the steps above, you are able to use ```--MODEL=<YOUR_CONFIG_NAME>``` to train/val/test your model like other provided models. For more information about the usage of the running script, please refer to the [Getting Started](https://openvqa.readthedocs.io/en/latest/basic/getting_started.html) page.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/mmdet/models/losses/balanced_l1_loss.py
DELETED
@@ -1,120 +0,0 @@
|
|
1 |
-
import mmcv
|
2 |
-
import numpy as np
|
3 |
-
import torch
|
4 |
-
import torch.nn as nn
|
5 |
-
|
6 |
-
from ..builder import LOSSES
|
7 |
-
from .utils import weighted_loss
|
8 |
-
|
9 |
-
|
10 |
-
@mmcv.jit(derivate=True, coderize=True)
|
11 |
-
@weighted_loss
|
12 |
-
def balanced_l1_loss(pred,
|
13 |
-
target,
|
14 |
-
beta=1.0,
|
15 |
-
alpha=0.5,
|
16 |
-
gamma=1.5,
|
17 |
-
reduction='mean'):
|
18 |
-
"""Calculate balanced L1 loss.
|
19 |
-
|
20 |
-
Please see the `Libra R-CNN <https://arxiv.org/pdf/1904.02701.pdf>`_
|
21 |
-
|
22 |
-
Args:
|
23 |
-
pred (torch.Tensor): The prediction with shape (N, 4).
|
24 |
-
target (torch.Tensor): The learning target of the prediction with
|
25 |
-
shape (N, 4).
|
26 |
-
beta (float): The loss is a piecewise function of prediction and target
|
27 |
-
and ``beta`` serves as a threshold for the difference between the
|
28 |
-
prediction and target. Defaults to 1.0.
|
29 |
-
alpha (float): The denominator ``alpha`` in the balanced L1 loss.
|
30 |
-
Defaults to 0.5.
|
31 |
-
gamma (float): The ``gamma`` in the balanced L1 loss.
|
32 |
-
Defaults to 1.5.
|
33 |
-
reduction (str, optional): The method that reduces the loss to a
|
34 |
-
scalar. Options are "none", "mean" and "sum".
|
35 |
-
|
36 |
-
Returns:
|
37 |
-
torch.Tensor: The calculated loss
|
38 |
-
"""
|
39 |
-
assert beta > 0
|
40 |
-
assert pred.size() == target.size() and target.numel() > 0
|
41 |
-
|
42 |
-
diff = torch.abs(pred - target)
|
43 |
-
b = np.e**(gamma / alpha) - 1
|
44 |
-
loss = torch.where(
|
45 |
-
diff < beta, alpha / b *
|
46 |
-
(b * diff + 1) * torch.log(b * diff / beta + 1) - alpha * diff,
|
47 |
-
gamma * diff + gamma / b - alpha * beta)
|
48 |
-
|
49 |
-
return loss
|
50 |
-
|
51 |
-
|
52 |
-
@LOSSES.register_module()
|
53 |
-
class BalancedL1Loss(nn.Module):
|
54 |
-
"""Balanced L1 Loss.
|
55 |
-
|
56 |
-
arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)
|
57 |
-
|
58 |
-
Args:
|
59 |
-
alpha (float): The denominator ``alpha`` in the balanced L1 loss.
|
60 |
-
Defaults to 0.5.
|
61 |
-
gamma (float): The ``gamma`` in the balanced L1 loss. Defaults to 1.5.
|
62 |
-
beta (float, optional): The loss is a piecewise function of prediction
|
63 |
-
and target. ``beta`` serves as a threshold for the difference
|
64 |
-
between the prediction and target. Defaults to 1.0.
|
65 |
-
reduction (str, optional): The method that reduces the loss to a
|
66 |
-
scalar. Options are "none", "mean" and "sum".
|
67 |
-
loss_weight (float, optional): The weight of the loss. Defaults to 1.0
|
68 |
-
"""
|
69 |
-
|
70 |
-
def __init__(self,
|
71 |
-
alpha=0.5,
|
72 |
-
gamma=1.5,
|
73 |
-
beta=1.0,
|
74 |
-
reduction='mean',
|
75 |
-
loss_weight=1.0):
|
76 |
-
super(BalancedL1Loss, self).__init__()
|
77 |
-
self.alpha = alpha
|
78 |
-
self.gamma = gamma
|
79 |
-
self.beta = beta
|
80 |
-
self.reduction = reduction
|
81 |
-
self.loss_weight = loss_weight
|
82 |
-
|
83 |
-
def forward(self,
|
84 |
-
pred,
|
85 |
-
target,
|
86 |
-
weight=None,
|
87 |
-
avg_factor=None,
|
88 |
-
reduction_override=None,
|
89 |
-
**kwargs):
|
90 |
-
"""Forward function of loss.
|
91 |
-
|
92 |
-
Args:
|
93 |
-
pred (torch.Tensor): The prediction with shape (N, 4).
|
94 |
-
target (torch.Tensor): The learning target of the prediction with
|
95 |
-
shape (N, 4).
|
96 |
-
weight (torch.Tensor, optional): Sample-wise loss weight with
|
97 |
-
shape (N, ).
|
98 |
-
avg_factor (int, optional): Average factor that is used to average
|
99 |
-
the loss. Defaults to None.
|
100 |
-
reduction_override (str, optional): The reduction method used to
|
101 |
-
override the original reduction method of the loss.
|
102 |
-
Options are "none", "mean" and "sum".
|
103 |
-
|
104 |
-
Returns:
|
105 |
-
torch.Tensor: The calculated loss
|
106 |
-
"""
|
107 |
-
assert reduction_override in (None, 'none', 'mean', 'sum')
|
108 |
-
reduction = (
|
109 |
-
reduction_override if reduction_override else self.reduction)
|
110 |
-
loss_bbox = self.loss_weight * balanced_l1_loss(
|
111 |
-
pred,
|
112 |
-
target,
|
113 |
-
weight,
|
114 |
-
alpha=self.alpha,
|
115 |
-
gamma=self.gamma,
|
116 |
-
beta=self.beta,
|
117 |
-
reduction=reduction,
|
118 |
-
avg_factor=avg_factor,
|
119 |
-
**kwargs)
|
120 |
-
return loss_bbox
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/ImageShow.py
DELETED
@@ -1,323 +0,0 @@
|
|
1 |
-
#
|
2 |
-
# The Python Imaging Library.
|
3 |
-
# $Id$
|
4 |
-
#
|
5 |
-
# im.show() drivers
|
6 |
-
#
|
7 |
-
# History:
|
8 |
-
# 2008-04-06 fl Created
|
9 |
-
#
|
10 |
-
# Copyright (c) Secret Labs AB 2008.
|
11 |
-
#
|
12 |
-
# See the README file for information on usage and redistribution.
|
13 |
-
#
|
14 |
-
import os
|
15 |
-
import shutil
|
16 |
-
import subprocess
|
17 |
-
import sys
|
18 |
-
from shlex import quote
|
19 |
-
|
20 |
-
from . import Image
|
21 |
-
|
22 |
-
_viewers = []
|
23 |
-
|
24 |
-
|
25 |
-
def register(viewer, order=1):
|
26 |
-
"""
|
27 |
-
The :py:func:`register` function is used to register additional viewers::
|
28 |
-
|
29 |
-
from PIL import ImageShow
|
30 |
-
ImageShow.register(MyViewer()) # MyViewer will be used as a last resort
|
31 |
-
ImageShow.register(MySecondViewer(), 0) # MySecondViewer will be prioritised
|
32 |
-
ImageShow.register(ImageShow.XVViewer(), 0) # XVViewer will be prioritised
|
33 |
-
|
34 |
-
:param viewer: The viewer to be registered.
|
35 |
-
:param order:
|
36 |
-
Zero or a negative integer to prepend this viewer to the list,
|
37 |
-
a positive integer to append it.
|
38 |
-
"""
|
39 |
-
try:
|
40 |
-
if issubclass(viewer, Viewer):
|
41 |
-
viewer = viewer()
|
42 |
-
except TypeError:
|
43 |
-
pass # raised if viewer wasn't a class
|
44 |
-
if order > 0:
|
45 |
-
_viewers.append(viewer)
|
46 |
-
else:
|
47 |
-
_viewers.insert(0, viewer)
|
48 |
-
|
49 |
-
|
50 |
-
def show(image, title=None, **options):
|
51 |
-
r"""
|
52 |
-
Display a given image.
|
53 |
-
|
54 |
-
:param image: An image object.
|
55 |
-
:param title: Optional title. Not all viewers can display the title.
|
56 |
-
:param \**options: Additional viewer options.
|
57 |
-
:returns: ``True`` if a suitable viewer was found, ``False`` otherwise.
|
58 |
-
"""
|
59 |
-
for viewer in _viewers:
|
60 |
-
if viewer.show(image, title=title, **options):
|
61 |
-
return True
|
62 |
-
return False
|
63 |
-
|
64 |
-
|
65 |
-
class Viewer:
|
66 |
-
"""Base class for viewers."""
|
67 |
-
|
68 |
-
# main api
|
69 |
-
|
70 |
-
def show(self, image, **options):
|
71 |
-
"""
|
72 |
-
The main function for displaying an image.
|
73 |
-
Converts the given image to the target format and displays it.
|
74 |
-
"""
|
75 |
-
|
76 |
-
if not (
|
77 |
-
image.mode in ("1", "RGBA")
|
78 |
-
or (self.format == "PNG" and image.mode in ("I;16", "LA"))
|
79 |
-
):
|
80 |
-
base = Image.getmodebase(image.mode)
|
81 |
-
if image.mode != base:
|
82 |
-
image = image.convert(base)
|
83 |
-
|
84 |
-
return self.show_image(image, **options)
|
85 |
-
|
86 |
-
# hook methods
|
87 |
-
|
88 |
-
format = None
|
89 |
-
"""The format to convert the image into."""
|
90 |
-
options = {}
|
91 |
-
"""Additional options used to convert the image."""
|
92 |
-
|
93 |
-
def get_format(self, image):
|
94 |
-
"""Return format name, or ``None`` to save as PGM/PPM."""
|
95 |
-
return self.format
|
96 |
-
|
97 |
-
def get_command(self, file, **options):
|
98 |
-
"""
|
99 |
-
Returns the command used to display the file.
|
100 |
-
Not implemented in the base class.
|
101 |
-
"""
|
102 |
-
raise NotImplementedError
|
103 |
-
|
104 |
-
def save_image(self, image):
|
105 |
-
"""Save to temporary file and return filename."""
|
106 |
-
return image._dump(format=self.get_format(image), **self.options)
|
107 |
-
|
108 |
-
def show_image(self, image, **options):
|
109 |
-
"""Display the given image."""
|
110 |
-
return self.show_file(self.save_image(image), **options)
|
111 |
-
|
112 |
-
def show_file(self, path, **options):
|
113 |
-
"""
|
114 |
-
Display given file.
|
115 |
-
"""
|
116 |
-
os.system(self.get_command(path, **options)) # nosec
|
117 |
-
return 1
|
118 |
-
|
119 |
-
|
120 |
-
# --------------------------------------------------------------------
|
121 |
-
|
122 |
-
|
123 |
-
class WindowsViewer(Viewer):
|
124 |
-
"""The default viewer on Windows is the default system application for PNG files."""
|
125 |
-
|
126 |
-
format = "PNG"
|
127 |
-
options = {"compress_level": 1, "save_all": True}
|
128 |
-
|
129 |
-
def get_command(self, file, **options):
|
130 |
-
return (
|
131 |
-
f'start "Pillow" /WAIT "{file}" '
|
132 |
-
"&& ping -n 4 127.0.0.1 >NUL "
|
133 |
-
f'&& del /f "{file}"'
|
134 |
-
)
|
135 |
-
|
136 |
-
|
137 |
-
if sys.platform == "win32":
|
138 |
-
register(WindowsViewer)
|
139 |
-
|
140 |
-
|
141 |
-
class MacViewer(Viewer):
|
142 |
-
"""The default viewer on macOS using ``Preview.app``."""
|
143 |
-
|
144 |
-
format = "PNG"
|
145 |
-
options = {"compress_level": 1, "save_all": True}
|
146 |
-
|
147 |
-
def get_command(self, file, **options):
|
148 |
-
# on darwin open returns immediately resulting in the temp
|
149 |
-
# file removal while app is opening
|
150 |
-
command = "open -a Preview.app"
|
151 |
-
command = f"({command} {quote(file)}; sleep 20; rm -f {quote(file)})&"
|
152 |
-
return command
|
153 |
-
|
154 |
-
def show_file(self, path, **options):
|
155 |
-
"""
|
156 |
-
Display given file.
|
157 |
-
"""
|
158 |
-
subprocess.call(["open", "-a", "Preview.app", path])
|
159 |
-
executable = sys.executable or shutil.which("python3")
|
160 |
-
if executable:
|
161 |
-
subprocess.Popen(
|
162 |
-
[
|
163 |
-
executable,
|
164 |
-
"-c",
|
165 |
-
"import os, sys, time; time.sleep(20); os.remove(sys.argv[1])",
|
166 |
-
path,
|
167 |
-
]
|
168 |
-
)
|
169 |
-
return 1
|
170 |
-
|
171 |
-
|
172 |
-
if sys.platform == "darwin":
|
173 |
-
register(MacViewer)
|
174 |
-
|
175 |
-
|
176 |
-
class UnixViewer(Viewer):
|
177 |
-
format = "PNG"
|
178 |
-
options = {"compress_level": 1, "save_all": True}
|
179 |
-
|
180 |
-
def get_command(self, file, **options):
|
181 |
-
command = self.get_command_ex(file, **options)[0]
|
182 |
-
return f"({command} {quote(file)}"
|
183 |
-
|
184 |
-
|
185 |
-
class XDGViewer(UnixViewer):
|
186 |
-
"""
|
187 |
-
The freedesktop.org ``xdg-open`` command.
|
188 |
-
"""
|
189 |
-
|
190 |
-
def get_command_ex(self, file, **options):
|
191 |
-
command = executable = "xdg-open"
|
192 |
-
return command, executable
|
193 |
-
|
194 |
-
def show_file(self, path, **options):
|
195 |
-
"""
|
196 |
-
Display given file.
|
197 |
-
"""
|
198 |
-
subprocess.Popen(["xdg-open", path])
|
199 |
-
return 1
|
200 |
-
|
201 |
-
|
202 |
-
class DisplayViewer(UnixViewer):
|
203 |
-
"""
|
204 |
-
The ImageMagick ``display`` command.
|
205 |
-
This viewer supports the ``title`` parameter.
|
206 |
-
"""
|
207 |
-
|
208 |
-
def get_command_ex(self, file, title=None, **options):
|
209 |
-
command = executable = "display"
|
210 |
-
if title:
|
211 |
-
command += f" -title {quote(title)}"
|
212 |
-
return command, executable
|
213 |
-
|
214 |
-
def show_file(self, path, **options):
|
215 |
-
"""
|
216 |
-
Display given file.
|
217 |
-
"""
|
218 |
-
args = ["display"]
|
219 |
-
title = options.get("title")
|
220 |
-
if title:
|
221 |
-
args += ["-title", title]
|
222 |
-
args.append(path)
|
223 |
-
|
224 |
-
subprocess.Popen(args)
|
225 |
-
return 1
|
226 |
-
|
227 |
-
|
228 |
-
class GmDisplayViewer(UnixViewer):
|
229 |
-
"""The GraphicsMagick ``gm display`` command."""
|
230 |
-
|
231 |
-
def get_command_ex(self, file, **options):
|
232 |
-
executable = "gm"
|
233 |
-
command = "gm display"
|
234 |
-
return command, executable
|
235 |
-
|
236 |
-
def show_file(self, path, **options):
|
237 |
-
"""
|
238 |
-
Display given file.
|
239 |
-
"""
|
240 |
-
subprocess.Popen(["gm", "display", path])
|
241 |
-
return 1
|
242 |
-
|
243 |
-
|
244 |
-
class EogViewer(UnixViewer):
|
245 |
-
"""The GNOME Image Viewer ``eog`` command."""
|
246 |
-
|
247 |
-
def get_command_ex(self, file, **options):
|
248 |
-
executable = "eog"
|
249 |
-
command = "eog -n"
|
250 |
-
return command, executable
|
251 |
-
|
252 |
-
def show_file(self, path, **options):
|
253 |
-
"""
|
254 |
-
Display given file.
|
255 |
-
"""
|
256 |
-
subprocess.Popen(["eog", "-n", path])
|
257 |
-
return 1
|
258 |
-
|
259 |
-
|
260 |
-
class XVViewer(UnixViewer):
|
261 |
-
"""
|
262 |
-
The X Viewer ``xv`` command.
|
263 |
-
This viewer supports the ``title`` parameter.
|
264 |
-
"""
|
265 |
-
|
266 |
-
def get_command_ex(self, file, title=None, **options):
|
267 |
-
# note: xv is pretty outdated. most modern systems have
|
268 |
-
# imagemagick's display command instead.
|
269 |
-
command = executable = "xv"
|
270 |
-
if title:
|
271 |
-
command += f" -name {quote(title)}"
|
272 |
-
return command, executable
|
273 |
-
|
274 |
-
def show_file(self, path, **options):
|
275 |
-
"""
|
276 |
-
Display given file.
|
277 |
-
"""
|
278 |
-
args = ["xv"]
|
279 |
-
title = options.get("title")
|
280 |
-
if title:
|
281 |
-
args += ["-name", title]
|
282 |
-
args.append(path)
|
283 |
-
|
284 |
-
subprocess.Popen(args)
|
285 |
-
return 1
|
286 |
-
|
287 |
-
|
288 |
-
if sys.platform not in ("win32", "darwin"): # unixoids
|
289 |
-
if shutil.which("xdg-open"):
|
290 |
-
register(XDGViewer)
|
291 |
-
if shutil.which("display"):
|
292 |
-
register(DisplayViewer)
|
293 |
-
if shutil.which("gm"):
|
294 |
-
register(GmDisplayViewer)
|
295 |
-
if shutil.which("eog"):
|
296 |
-
register(EogViewer)
|
297 |
-
if shutil.which("xv"):
|
298 |
-
register(XVViewer)
|
299 |
-
|
300 |
-
|
301 |
-
class IPythonViewer(Viewer):
|
302 |
-
"""The viewer for IPython frontends."""
|
303 |
-
|
304 |
-
def show_image(self, image, **options):
|
305 |
-
ipython_display(image)
|
306 |
-
return 1
|
307 |
-
|
308 |
-
|
309 |
-
try:
|
310 |
-
from IPython.display import display as ipython_display
|
311 |
-
except ImportError:
|
312 |
-
pass
|
313 |
-
else:
|
314 |
-
register(IPythonViewer)
|
315 |
-
|
316 |
-
|
317 |
-
if __name__ == "__main__":
|
318 |
-
if len(sys.argv) < 2:
|
319 |
-
print("Syntax: python3 ImageShow.py imagefile [title]")
|
320 |
-
sys.exit()
|
321 |
-
|
322 |
-
with Image.open(sys.argv[1]) as im:
|
323 |
-
print(show(im, *sys.argv[2:]))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DanielGartop/SexAI/README.md
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: SexAI
|
3 |
-
emoji: 📈
|
4 |
-
colorFrom: pink
|
5 |
-
colorTo: red
|
6 |
-
sdk: docker
|
7 |
-
pinned: false
|
8 |
-
---
|
9 |
-
|
10 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Datasculptor/MusicGen/audiocraft/models/lm.py
DELETED
@@ -1,527 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
from dataclasses import dataclass
|
8 |
-
from functools import partial
|
9 |
-
import logging
|
10 |
-
import math
|
11 |
-
import typing as tp
|
12 |
-
|
13 |
-
import torch
|
14 |
-
from torch import nn
|
15 |
-
|
16 |
-
from ..utils import utils
|
17 |
-
from ..modules.streaming import StreamingModule, State
|
18 |
-
from ..modules.transformer import StreamingTransformer, create_norm_fn
|
19 |
-
from ..modules.conditioners import (
|
20 |
-
ConditionFuser,
|
21 |
-
ClassifierFreeGuidanceDropout,
|
22 |
-
AttributeDropout,
|
23 |
-
ConditioningProvider,
|
24 |
-
ConditioningAttributes,
|
25 |
-
ConditionType,
|
26 |
-
)
|
27 |
-
from ..modules.codebooks_patterns import CodebooksPatternProvider
|
28 |
-
from ..modules.activations import get_activation_fn
|
29 |
-
|
30 |
-
|
31 |
-
logger = logging.getLogger(__name__)
|
32 |
-
ConditionTensors = tp.Dict[str, ConditionType]
|
33 |
-
CFGConditions = tp.Union[ConditionTensors, tp.Tuple[ConditionTensors, ConditionTensors]]
|
34 |
-
|
35 |
-
|
36 |
-
def get_init_fn(method: str, input_dim: int, init_depth: tp.Optional[int] = None):
|
37 |
-
"""LM layer initialization.
|
38 |
-
Inspired from xlformers: https://github.com/fairinternal/xlformers
|
39 |
-
|
40 |
-
Args:
|
41 |
-
method (str): Method name for init function. Valid options are:
|
42 |
-
'gaussian', 'uniform'.
|
43 |
-
input_dim (int): Input dimension of the initialized module.
|
44 |
-
init_depth (Optional[int]): Optional init depth value used to rescale
|
45 |
-
the standard deviation if defined.
|
46 |
-
"""
|
47 |
-
# Compute std
|
48 |
-
std = 1 / math.sqrt(input_dim)
|
49 |
-
# Rescale with depth
|
50 |
-
if init_depth is not None:
|
51 |
-
std = std / math.sqrt(2 * init_depth)
|
52 |
-
|
53 |
-
if method == 'gaussian':
|
54 |
-
return partial(
|
55 |
-
torch.nn.init.trunc_normal_, mean=0.0, std=std, a=-3 * std, b=3 * std
|
56 |
-
)
|
57 |
-
elif method == 'uniform':
|
58 |
-
bound = math.sqrt(3) * std # ensure the standard deviation is `std`
|
59 |
-
return partial(torch.nn.init.uniform_, a=-bound, b=bound)
|
60 |
-
else:
|
61 |
-
raise ValueError("Unsupported layer initialization method")
|
62 |
-
|
63 |
-
|
64 |
-
def init_layer(m: nn.Module,
|
65 |
-
method: str,
|
66 |
-
init_depth: tp.Optional[int] = None,
|
67 |
-
zero_bias_init: bool = False):
|
68 |
-
"""Wrapper around ``get_init_fn`` for proper initialization of LM modules.
|
69 |
-
|
70 |
-
Args:
|
71 |
-
m (nn.Module): Module to initialize.
|
72 |
-
method (str): Method name for the init function.
|
73 |
-
init_depth (Optional[int]): Optional init depth value used to rescale
|
74 |
-
the standard deviation if defined.
|
75 |
-
zero_bias_init (bool): Whether to initialize the bias to 0 or not.
|
76 |
-
"""
|
77 |
-
if isinstance(m, nn.Linear):
|
78 |
-
init_fn = get_init_fn(method, m.in_features, init_depth=init_depth)
|
79 |
-
if m.weight.device.type == 'cpu' and m.weight.dtype == torch.float16:
|
80 |
-
weight = m.weight.float()
|
81 |
-
init_fn(weight)
|
82 |
-
m.weight.data[:] = weight.half()
|
83 |
-
else:
|
84 |
-
init_fn(m.weight)
|
85 |
-
if zero_bias_init and m.bias is not None:
|
86 |
-
nn.init.constant_(m.bias, 0)
|
87 |
-
elif isinstance(m, nn.Embedding):
|
88 |
-
init_fn = get_init_fn(method, m.embedding_dim, init_depth=None)
|
89 |
-
if m.weight.device.type == 'cpu' and m.weight.dtype == torch.float16:
|
90 |
-
weight = m.weight.float()
|
91 |
-
init_fn(weight)
|
92 |
-
m.weight.data[:] = weight.half()
|
93 |
-
else:
|
94 |
-
init_fn(m.weight)
|
95 |
-
|
96 |
-
|
97 |
-
class ScaledEmbedding(nn.Embedding):
|
98 |
-
"""Boost learning rate for embeddings (with `scale`).
|
99 |
-
"""
|
100 |
-
def __init__(self, *args, lr=None, **kwargs):
|
101 |
-
super().__init__(*args, **kwargs)
|
102 |
-
self.lr = lr
|
103 |
-
|
104 |
-
def make_optim_group(self):
|
105 |
-
group = {"params": list(self.parameters())}
|
106 |
-
if self.lr is not None:
|
107 |
-
group["lr"] = self.lr
|
108 |
-
return group
|
109 |
-
|
110 |
-
|
111 |
-
@dataclass
|
112 |
-
class LMOutput:
|
113 |
-
# The logits are already re-aligned with the input codes
|
114 |
-
# hence no extra shift is required, e.g. when computing CE
|
115 |
-
logits: torch.Tensor # [B, K, T, card]
|
116 |
-
mask: torch.Tensor # [B, K, T]
|
117 |
-
|
118 |
-
|
119 |
-
class LMModel(StreamingModule):
|
120 |
-
"""Transformer-based language model on multiple streams of codes.
|
121 |
-
|
122 |
-
Args:
|
123 |
-
pattern_provider (CodebooksPatternProvider): Pattern provider for codebook interleaving.
|
124 |
-
condition_provider (MusicConditioningProvider): Conditioning provider from metadata.
|
125 |
-
fuser (ConditionFuser): Fuser handling the fusing of conditions with language model input.
|
126 |
-
n_q (int): Number of parallel streams to model.
|
127 |
-
card (int): Cardinality, vocabulary size.
|
128 |
-
dim (int): Dimension of the transformer encoder.
|
129 |
-
num_heads (int): Number of heads for the transformer encoder.
|
130 |
-
hidden_scale (int): Scale for hidden feed forward dimension of the transformer encoder.
|
131 |
-
norm (str): Normalization method.
|
132 |
-
norm_first (bool): Use pre-norm instead of post-norm.
|
133 |
-
emb_lr (Optional[float]): Embedding-specific learning rate.
|
134 |
-
bias_proj (bool): Use bias for output projections.
|
135 |
-
weight_init (Optional[str]): Method for weight initialization.
|
136 |
-
depthwise_init (Optional[str]): Method for depthwise weight initialization.
|
137 |
-
zero_bias_init (bool): If true and bias in Linears, initialize bias to zeros.
|
138 |
-
cfg_dropout (float): Classifier-free guidance dropout.
|
139 |
-
cfg_coef (float): Classifier-free guidance coefficient.
|
140 |
-
attribute_dropout (dict): Attribute dropout probabilities.
|
141 |
-
two_step_cfg (bool): Whether to run classifier free-guidance with 2 distinct steps.
|
142 |
-
**kwargs: Additional parameters for the transformer encoder.
|
143 |
-
"""
|
144 |
-
def __init__(self, pattern_provider: CodebooksPatternProvider, condition_provider: ConditioningProvider,
|
145 |
-
fuser: ConditionFuser, n_q: int = 8, card: int = 1024, dim: int = 128, num_heads: int = 8,
|
146 |
-
hidden_scale: int = 4, norm: str = 'layer_norm', norm_first: bool = False,
|
147 |
-
emb_lr: tp.Optional[float] = None, bias_proj: bool = True,
|
148 |
-
weight_init: tp.Optional[str] = None, depthwise_init: tp.Optional[str] = None,
|
149 |
-
zero_bias_init: bool = False, cfg_dropout: float = 0, cfg_coef: float = 1.0,
|
150 |
-
attribute_dropout: tp.Dict[str, tp.Dict[str, float]] = {}, two_step_cfg: bool = False,
|
151 |
-
**kwargs):
|
152 |
-
super().__init__()
|
153 |
-
self.cfg_coef = cfg_coef
|
154 |
-
self.cfg_dropout = ClassifierFreeGuidanceDropout(p=cfg_dropout)
|
155 |
-
self.att_dropout = AttributeDropout(p=attribute_dropout)
|
156 |
-
self.condition_provider = condition_provider
|
157 |
-
self.fuser = fuser
|
158 |
-
self.card = card
|
159 |
-
embed_dim = self.card + 1
|
160 |
-
self.n_q = n_q
|
161 |
-
self.dim = dim
|
162 |
-
self.pattern_provider = pattern_provider
|
163 |
-
self.two_step_cfg = two_step_cfg
|
164 |
-
self.emb = nn.ModuleList([ScaledEmbedding(embed_dim, dim, lr=emb_lr) for _ in range(n_q)])
|
165 |
-
if 'activation' in kwargs:
|
166 |
-
kwargs['activation'] = get_activation_fn(kwargs['activation'])
|
167 |
-
self.transformer = StreamingTransformer(
|
168 |
-
d_model=dim, num_heads=num_heads, dim_feedforward=int(hidden_scale * dim),
|
169 |
-
norm=norm, norm_first=norm_first, **kwargs)
|
170 |
-
self.out_norm: tp.Optional[nn.Module] = None
|
171 |
-
if norm_first:
|
172 |
-
self.out_norm = create_norm_fn(norm, dim)
|
173 |
-
self.linears = nn.ModuleList([nn.Linear(dim, self.card, bias=bias_proj) for _ in range(n_q)])
|
174 |
-
self._init_weights(weight_init, depthwise_init, zero_bias_init)
|
175 |
-
self._fsdp: tp.Optional[nn.Module]
|
176 |
-
self.__dict__['_fsdp'] = None
|
177 |
-
|
178 |
-
def _init_weights(self, weight_init: tp.Optional[str], depthwise_init: tp.Optional[str], zero_bias_init: bool):
|
179 |
-
"""Initialization of the transformer module weights.
|
180 |
-
|
181 |
-
Args:
|
182 |
-
weight_init (Optional[str]): Weight initialization strategy. See ``get_init_fn`` for valid options.
|
183 |
-
depthwise_init (Optional[str]): Depwthwise initialization strategy. The following options are valid:
|
184 |
-
'current' where the depth corresponds to the current layer index or 'global' where the total number
|
185 |
-
of layer is used as depth. If not set, no depthwise initialization strategy is used.
|
186 |
-
zero_bias_init (bool): Whether to initalize bias to zero or not.
|
187 |
-
"""
|
188 |
-
assert depthwise_init is None or depthwise_init in ['current', 'global']
|
189 |
-
assert depthwise_init is None or weight_init is not None, \
|
190 |
-
"If 'depthwise_init' is defined, a 'weight_init' method should be provided."
|
191 |
-
assert not zero_bias_init or weight_init is not None, \
|
192 |
-
"If 'zero_bias_init', a 'weight_init' method should be provided"
|
193 |
-
|
194 |
-
if weight_init is None:
|
195 |
-
return
|
196 |
-
|
197 |
-
for emb_layer in self.emb:
|
198 |
-
init_layer(emb_layer, method=weight_init, init_depth=None, zero_bias_init=zero_bias_init)
|
199 |
-
|
200 |
-
for layer_idx, tr_layer in enumerate(self.transformer.layers):
|
201 |
-
depth = None
|
202 |
-
if depthwise_init == 'current':
|
203 |
-
depth = layer_idx + 1
|
204 |
-
elif depthwise_init == 'global':
|
205 |
-
depth = len(self.transformer.layers)
|
206 |
-
init_fn = partial(init_layer, method=weight_init, init_depth=depth, zero_bias_init=zero_bias_init)
|
207 |
-
tr_layer.apply(init_fn)
|
208 |
-
|
209 |
-
for linear in self.linears:
|
210 |
-
init_layer(linear, method=weight_init, init_depth=None, zero_bias_init=zero_bias_init)
|
211 |
-
|
212 |
-
@property
|
213 |
-
def special_token_id(self) -> int:
|
214 |
-
return self.card
|
215 |
-
|
216 |
-
@property
|
217 |
-
def num_codebooks(self) -> int:
|
218 |
-
return self.n_q
|
219 |
-
|
220 |
-
def forward(self, sequence: torch.Tensor,
|
221 |
-
conditions: tp.List[ConditioningAttributes],
|
222 |
-
condition_tensors: tp.Optional[ConditionTensors] = None) -> torch.Tensor:
|
223 |
-
"""Apply language model on sequence and conditions.
|
224 |
-
Given a tensor of sequence of shape [B, K, S] with K the number of codebooks and
|
225 |
-
S the sequence steps, return the logits with shape [B, card, K, S].
|
226 |
-
|
227 |
-
Args:
|
228 |
-
indices (torch.Tensor): indices of the codes to model.
|
229 |
-
conditions (list[ConditioningAttributes]): conditionings to use when modeling
|
230 |
-
the given codes. Note that when evaluating multiple time with the same conditioning
|
231 |
-
you should pre-compute those and pass them as `condition_tensors`.
|
232 |
-
condition_tensors (dict[str, ConditionType] or None): pre-computed conditioning
|
233 |
-
tensors, see `conditions`.
|
234 |
-
Returns:
|
235 |
-
torch.Tensor: Logits.
|
236 |
-
"""
|
237 |
-
B, K, S = sequence.shape
|
238 |
-
assert K == self.num_codebooks, 'Sequence shape must match the specified number of codebooks'
|
239 |
-
input_ = sum([self.emb[k](sequence[:, k]) for k in range(K)])
|
240 |
-
if condition_tensors is None:
|
241 |
-
assert not self._is_streaming, "Conditions tensors should be precomputed when streaming."
|
242 |
-
# apply dropout modules
|
243 |
-
conditions = self.cfg_dropout(conditions)
|
244 |
-
conditions = self.att_dropout(conditions)
|
245 |
-
tokenized = self.condition_provider.tokenize(conditions)
|
246 |
-
# encode conditions and fuse, both have a streaming cache to not recompute when generating.
|
247 |
-
condition_tensors = self.condition_provider(tokenized)
|
248 |
-
else:
|
249 |
-
assert not conditions, "Shouldn't pass both conditions and condition_tensors."
|
250 |
-
|
251 |
-
input_, cross_attention_input = self.fuser(input_, condition_tensors)
|
252 |
-
|
253 |
-
out = self.transformer(input_, cross_attention_src=cross_attention_input)
|
254 |
-
if self.out_norm:
|
255 |
-
out = self.out_norm(out)
|
256 |
-
logits = torch.stack([self.linears[k](out) for k in range(K)], dim=1) # [B, K, S, card]
|
257 |
-
|
258 |
-
# remove the prefix from the model outputs
|
259 |
-
if len(self.fuser.fuse2cond['prepend']) > 0:
|
260 |
-
logits = logits[:, :, -S:]
|
261 |
-
|
262 |
-
return logits # [B, K, S, card]
|
263 |
-
|
264 |
-
def compute_predictions(
|
265 |
-
self, codes: torch.Tensor,
|
266 |
-
conditions: tp.List[ConditioningAttributes],
|
267 |
-
condition_tensors: tp.Optional[ConditionTensors] = None) -> LMOutput:
|
268 |
-
"""Given an input tensor of codes [B, K, T] and list of conditions, runs the model
|
269 |
-
forward using the specified codes interleaving pattern.
|
270 |
-
|
271 |
-
Args:
|
272 |
-
codes (torch.Tensor): Input codes of shape [B, K, T] with B the batch size,
|
273 |
-
K the number of codebooks and T the number of timesteps.
|
274 |
-
conditions (list[ConditioningAttributes]): conditionings to use when modeling
|
275 |
-
the given codes. Note that when evaluating multiple time with the same conditioning
|
276 |
-
you should pre-compute those and pass them as `condition_tensors`.
|
277 |
-
condition_tensors (dict[str, ConditionType] or None): pre-computed conditioning
|
278 |
-
tensors, see `conditions`.
|
279 |
-
Returns:
|
280 |
-
LMOutput: Language model outputs
|
281 |
-
logits (torch.Tensor) of shape [B, K, T, card] corresponding to the provided codes,
|
282 |
-
i.e. the first item corresponds to logits to predict the first code, meaning that
|
283 |
-
no additional shifting of codes and logits is required.
|
284 |
-
mask (torch.Tensor) of shape [B, K, T], mask over valid and invalid positions.
|
285 |
-
Given the specified interleaving strategies, parts of the logits and codes should
|
286 |
-
not be considered as valid predictions because of invalid context.
|
287 |
-
"""
|
288 |
-
B, K, T = codes.shape
|
289 |
-
codes = codes.contiguous()
|
290 |
-
# map codes [B, K, T] into pattern sequence [B, K, S] using special_token_id for masked tokens
|
291 |
-
pattern = self.pattern_provider.get_pattern(T)
|
292 |
-
sequence_codes, sequence_indexes, sequence_mask = pattern.build_pattern_sequence(
|
293 |
-
codes, self.special_token_id, keep_only_valid_steps=True
|
294 |
-
)
|
295 |
-
# apply model on pattern sequence
|
296 |
-
model = self if self._fsdp is None else self._fsdp
|
297 |
-
logits = model(sequence_codes, conditions, condition_tensors) # [B, K, S, card]
|
298 |
-
# map back the logits on pattern sequence to logits on original codes: [B, K, S, card] -> [B, K, T, card]
|
299 |
-
# and provide the corresponding mask over invalid positions of tokens
|
300 |
-
logits = logits.permute(0, 3, 1, 2) # [B, card, K, S]
|
301 |
-
# note: we use nans as special token to make it obvious if we feed unexpected logits
|
302 |
-
logits, logits_indexes, logits_mask = pattern.revert_pattern_logits(
|
303 |
-
logits, float('nan'), keep_only_valid_steps=True
|
304 |
-
)
|
305 |
-
logits = logits.permute(0, 2, 3, 1) # [B, K, T, card]
|
306 |
-
logits_mask = logits_mask[None, :, :].expand(B, -1, -1) # [K, T] -> [B, K, T]
|
307 |
-
return LMOutput(logits, logits_mask)
|
308 |
-
|
309 |
-
def _sample_next_token(self,
|
310 |
-
sequence: torch.Tensor,
|
311 |
-
cfg_conditions: CFGConditions,
|
312 |
-
unconditional_state: State,
|
313 |
-
use_sampling: bool = False,
|
314 |
-
temp: float = 1.0,
|
315 |
-
top_k: int = 0,
|
316 |
-
top_p: float = 0.0,
|
317 |
-
cfg_coef: tp.Optional[float] = None) -> torch.Tensor:
|
318 |
-
"""Sample next token from the model given a sequence and a set of conditions. The model supports
|
319 |
-
multiple sampling strategies (greedy sampling, softmax, top-k, top-p...).
|
320 |
-
|
321 |
-
Args:
|
322 |
-
sequence (torch.Tensor): Current sequence of shape [B, K, S]
|
323 |
-
with K corresponding to the number of codebooks and S the number of sequence steps.
|
324 |
-
S = 1 in streaming mode, except for the first step that contains a bigger prompt.
|
325 |
-
condition_tensors (Dict[str, ConditionType): Set of conditions. If CFG is used,
|
326 |
-
should be twice the batch size, being the concatenation of the conditions + null conditions.
|
327 |
-
use_sampling (bool): Whether to use a sampling strategy or not.
|
328 |
-
temp (float): Sampling temperature.
|
329 |
-
top_k (int): K for "top-k" sampling.
|
330 |
-
top_p (float): P for "top-p" sampling.
|
331 |
-
cfg_coef (float): classifier free guidance coefficient
|
332 |
-
Returns:
|
333 |
-
next_token (torch.Tensor): Next token tensor of shape [B, K, 1].
|
334 |
-
"""
|
335 |
-
B = sequence.shape[0]
|
336 |
-
cfg_coef = self.cfg_coef if cfg_coef is None else cfg_coef
|
337 |
-
model = self if self._fsdp is None else self._fsdp
|
338 |
-
if self.two_step_cfg and cfg_conditions != {}:
|
339 |
-
assert isinstance(cfg_conditions, tuple)
|
340 |
-
condition_tensors, null_condition_tensors = cfg_conditions
|
341 |
-
cond_logits = model(sequence, conditions=[], condition_tensors=condition_tensors)
|
342 |
-
state = self.get_streaming_state()
|
343 |
-
self.set_streaming_state(unconditional_state)
|
344 |
-
uncond_logits = model(sequence, conditions=[], condition_tensors=null_condition_tensors)
|
345 |
-
unconditional_state.update(self.get_streaming_state())
|
346 |
-
self.set_streaming_state(state)
|
347 |
-
logits = uncond_logits + (cond_logits - uncond_logits) * self.cfg_coef
|
348 |
-
else:
|
349 |
-
assert isinstance(cfg_conditions, dict)
|
350 |
-
condition_tensors = cfg_conditions
|
351 |
-
if condition_tensors:
|
352 |
-
# Preparing for CFG, predicting both conditional and unconditional logits.
|
353 |
-
sequence = torch.cat([sequence, sequence], dim=0)
|
354 |
-
all_logits = model(
|
355 |
-
sequence,
|
356 |
-
conditions=[], condition_tensors=condition_tensors)
|
357 |
-
if condition_tensors:
|
358 |
-
cond_logits, uncond_logits = all_logits.split(B, dim=0) # [B, K, T, card]
|
359 |
-
logits = uncond_logits + (cond_logits - uncond_logits) * cfg_coef
|
360 |
-
else:
|
361 |
-
logits = all_logits
|
362 |
-
|
363 |
-
logits = logits.permute(0, 1, 3, 2) # [B, K, card, T]
|
364 |
-
logits = logits[..., -1] # [B x K x card]
|
365 |
-
|
366 |
-
# Apply softmax for sampling if temp > 0. Else, do greedy sampling to avoid zero division error.
|
367 |
-
if use_sampling and temp > 0.0:
|
368 |
-
probs = torch.softmax(logits / temp, dim=-1)
|
369 |
-
if top_p > 0.0:
|
370 |
-
next_token = utils.sample_top_p(probs, p=top_p)
|
371 |
-
elif top_k > 0:
|
372 |
-
next_token = utils.sample_top_k(probs, k=top_k)
|
373 |
-
else:
|
374 |
-
next_token = utils.multinomial(probs, num_samples=1)
|
375 |
-
else:
|
376 |
-
next_token = torch.argmax(logits, dim=-1, keepdim=True)
|
377 |
-
|
378 |
-
return next_token
|
379 |
-
|
380 |
-
@torch.no_grad()
|
381 |
-
def generate(self,
|
382 |
-
prompt: tp.Optional[torch.Tensor] = None,
|
383 |
-
conditions: tp.List[ConditioningAttributes] = [],
|
384 |
-
num_samples: tp.Optional[int] = None,
|
385 |
-
max_gen_len: int = 256,
|
386 |
-
use_sampling: bool = True,
|
387 |
-
temp: float = 1.0,
|
388 |
-
top_k: int = 250,
|
389 |
-
top_p: float = 0.0,
|
390 |
-
cfg_coef: tp.Optional[float] = None,
|
391 |
-
two_step_cfg: bool = False,
|
392 |
-
remove_prompts: bool = False,
|
393 |
-
check: bool = False,
|
394 |
-
callback: tp.Optional[tp.Callable[[int, int], None]] = None) -> torch.Tensor:
|
395 |
-
"""Generate tokens sampling from the model given a prompt or unconditionally. Generation can
|
396 |
-
be perform in a greedy fashion or using sampling with top K and top P strategies.
|
397 |
-
|
398 |
-
Args:
|
399 |
-
prompt (Optional[torch.Tensor]): Prompt tokens of shape [B, K, T].
|
400 |
-
conditions_tensors (Dict[str, torch.Tensor]): Set of conditions or None.
|
401 |
-
num_samples (int or None): Number of samples to generate when no prompt and no conditions are given.
|
402 |
-
max_gen_len (int): Maximum generation length.
|
403 |
-
use_sampling (bool): Whether to use a sampling strategy or not.
|
404 |
-
temp (float): Sampling temperature.
|
405 |
-
top_k (int): K for "top-k" sampling.
|
406 |
-
top_p (float): P for "top-p" sampling.
|
407 |
-
remove_prompts (bool): Whether to remove prompts from generation or not.
|
408 |
-
Returns:
|
409 |
-
torch.Tensor: Generated tokens.
|
410 |
-
"""
|
411 |
-
assert not self.training, "generation shouldn't be used in training mode."
|
412 |
-
first_param = next(iter(self.parameters()))
|
413 |
-
device = first_param.device
|
414 |
-
|
415 |
-
# Checking all input shapes are consistents.
|
416 |
-
possible_num_samples = []
|
417 |
-
if num_samples is not None:
|
418 |
-
possible_num_samples.append(num_samples)
|
419 |
-
elif prompt is not None:
|
420 |
-
possible_num_samples.append(prompt.shape[0])
|
421 |
-
elif conditions:
|
422 |
-
possible_num_samples.append(len(conditions))
|
423 |
-
else:
|
424 |
-
possible_num_samples.append(1)
|
425 |
-
assert [x == possible_num_samples[0] for x in possible_num_samples], "Inconsitent inputs shapes"
|
426 |
-
num_samples = possible_num_samples[0]
|
427 |
-
|
428 |
-
# below we create set of conditions: one conditional and one unconditional
|
429 |
-
# to do that we merge the regular condition together with the null condition
|
430 |
-
# we then do 1 forward pass instead of 2.
|
431 |
-
# the reason for that is two-fold:
|
432 |
-
# 1. it is about x2 faster than doing 2 forward passes
|
433 |
-
# 2. avoid the streaming API treating the 2 passes as part of different time steps
|
434 |
-
# We also support doing two different passes, in particular to ensure that
|
435 |
-
# the padding structure is exactly the same between train anf test.
|
436 |
-
# With a batch size of 1, this can be slower though.
|
437 |
-
cfg_conditions: CFGConditions
|
438 |
-
two_step_cfg = self.two_step_cfg if two_step_cfg is None else two_step_cfg
|
439 |
-
if conditions:
|
440 |
-
null_conditions = ClassifierFreeGuidanceDropout(p=1.0)(conditions)
|
441 |
-
if two_step_cfg:
|
442 |
-
cfg_conditions = (
|
443 |
-
self.condition_provider(self.condition_provider.tokenize(conditions)),
|
444 |
-
self.condition_provider(self.condition_provider.tokenize(null_conditions)),
|
445 |
-
)
|
446 |
-
else:
|
447 |
-
conditions = conditions + null_conditions
|
448 |
-
tokenized = self.condition_provider.tokenize(conditions)
|
449 |
-
cfg_conditions = self.condition_provider(tokenized)
|
450 |
-
else:
|
451 |
-
cfg_conditions = {}
|
452 |
-
|
453 |
-
if prompt is None:
|
454 |
-
assert num_samples > 0
|
455 |
-
prompt = torch.zeros((num_samples, self.num_codebooks, 0), dtype=torch.long, device=device)
|
456 |
-
|
457 |
-
B, K, T = prompt.shape
|
458 |
-
start_offset = T
|
459 |
-
assert start_offset < max_gen_len
|
460 |
-
|
461 |
-
pattern = self.pattern_provider.get_pattern(max_gen_len)
|
462 |
-
# this token is used as default value for codes that are not generated yet
|
463 |
-
unknown_token = -1
|
464 |
-
|
465 |
-
# we generate codes up to the max_gen_len that will be mapped to the pattern sequence
|
466 |
-
gen_codes = torch.full((B, K, max_gen_len), unknown_token, dtype=torch.long, device=device)
|
467 |
-
# filling the gen_codes with the prompt if needed
|
468 |
-
gen_codes[..., :start_offset] = prompt
|
469 |
-
# create the gen_sequence with proper interleaving from the pattern: [B, K, S]
|
470 |
-
gen_sequence, indexes, mask = pattern.build_pattern_sequence(gen_codes, self.special_token_id)
|
471 |
-
# retrieve the start_offset in the sequence:
|
472 |
-
# it is the first sequence step that contains the `start_offset` timestep
|
473 |
-
start_offset_sequence = pattern.get_first_step_with_timesteps(start_offset)
|
474 |
-
assert start_offset_sequence is not None
|
475 |
-
|
476 |
-
with self.streaming():
|
477 |
-
unconditional_state = self.get_streaming_state()
|
478 |
-
prev_offset = 0
|
479 |
-
gen_sequence_len = gen_sequence.shape[-1] # gen_sequence shape is [B, K, S]
|
480 |
-
for offset in range(start_offset_sequence, gen_sequence_len):
|
481 |
-
# get current sequence (note that the streaming API is providing the caching over previous offsets)
|
482 |
-
curr_sequence = gen_sequence[..., prev_offset:offset]
|
483 |
-
curr_mask = mask[None, ..., prev_offset:offset].expand(B, -1, -1)
|
484 |
-
if check:
|
485 |
-
# check coherence between mask and sequence
|
486 |
-
assert (curr_sequence == torch.where(curr_mask, curr_sequence, self.special_token_id)).all()
|
487 |
-
# should never happen as gen_sequence is filled progressively
|
488 |
-
assert not (curr_sequence == unknown_token).any()
|
489 |
-
# sample next token from the model, next token shape is [B, K, 1]
|
490 |
-
next_token = self._sample_next_token(
|
491 |
-
curr_sequence, cfg_conditions, unconditional_state, use_sampling, temp, top_k, top_p,
|
492 |
-
cfg_coef=cfg_coef)
|
493 |
-
# ensure the tokens that should be masked are properly set to special_token_id
|
494 |
-
# as the model never output special_token_id
|
495 |
-
valid_mask = mask[..., offset:offset+1].expand(B, -1, -1)
|
496 |
-
next_token[~valid_mask] = self.special_token_id
|
497 |
-
# ensure we don't overwrite prompt tokens, we only write over unknown tokens
|
498 |
-
# (then mask tokens should be left as is as well, which is correct)
|
499 |
-
gen_sequence[..., offset:offset+1] = torch.where(
|
500 |
-
gen_sequence[..., offset:offset+1] == unknown_token,
|
501 |
-
next_token, gen_sequence[..., offset:offset+1]
|
502 |
-
)
|
503 |
-
prev_offset = offset
|
504 |
-
if callback is not None:
|
505 |
-
callback(1 + offset - start_offset_sequence, gen_sequence_len - start_offset_sequence)
|
506 |
-
unconditional_state.clear()
|
507 |
-
|
508 |
-
# ensure sequence has been entirely filled
|
509 |
-
assert not (gen_sequence == unknown_token).any()
|
510 |
-
# ensure gen_sequence pattern and mask are matching
|
511 |
-
# which means the gen_sequence is valid according to the pattern
|
512 |
-
assert (
|
513 |
-
gen_sequence == torch.where(mask[None, ...].expand(B, -1, -1), gen_sequence, self.special_token_id)
|
514 |
-
).all()
|
515 |
-
# get back the codes, trimming the prompt if needed and cutting potentially incomplete timesteps
|
516 |
-
out_codes, out_indexes, out_mask = pattern.revert_pattern_sequence(gen_sequence, special_token=unknown_token)
|
517 |
-
|
518 |
-
# sanity checks over the returned codes and corresponding masks
|
519 |
-
assert (out_codes[..., :max_gen_len] != unknown_token).all()
|
520 |
-
assert (out_mask[..., :max_gen_len] == 1).all()
|
521 |
-
|
522 |
-
out_start_offset = start_offset if remove_prompts else 0
|
523 |
-
out_codes = out_codes[..., out_start_offset:max_gen_len]
|
524 |
-
|
525 |
-
# ensure the returned codes are all valid
|
526 |
-
assert (out_codes >= 0).all() and (out_codes <= self.card).all()
|
527 |
-
return out_codes
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Dimalker/Faceswapper/roop/typing.py
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
from typing import Any
|
2 |
-
|
3 |
-
from insightface.app.common import Face
|
4 |
-
import numpy
|
5 |
-
|
6 |
-
Face = Face
|
7 |
-
Frame = numpy.ndarray[Any, Any]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DragGan/DragGan-Inversion/stylegan_human/torch_utils/ops/grid_sample_gradfix.py
DELETED
@@ -1,93 +0,0 @@
|
|
1 |
-
# Copyright (c) SenseTime Research. All rights reserved.
|
2 |
-
|
3 |
-
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
4 |
-
#
|
5 |
-
# NVIDIA CORPORATION and its licensors retain all intellectual property
|
6 |
-
# and proprietary rights in and to this software, related documentation
|
7 |
-
# and any modifications thereto. Any use, reproduction, disclosure or
|
8 |
-
# distribution of this software and related documentation without an express
|
9 |
-
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
10 |
-
|
11 |
-
"""Custom replacement for `torch.nn.functional.grid_sample` that
|
12 |
-
supports arbitrarily high order gradients between the input and output.
|
13 |
-
Only works on 2D images and assumes
|
14 |
-
`mode='bilinear'`, `padding_mode='zeros'`, `align_corners=False`."""
|
15 |
-
|
16 |
-
import warnings
|
17 |
-
import torch
|
18 |
-
|
19 |
-
# pylint: disable=redefined-builtin
|
20 |
-
# pylint: disable=arguments-differ
|
21 |
-
# pylint: disable=protected-access
|
22 |
-
|
23 |
-
# ----------------------------------------------------------------------------
|
24 |
-
|
25 |
-
enabled = False # Enable the custom op by setting this to true.
|
26 |
-
|
27 |
-
# ----------------------------------------------------------------------------
|
28 |
-
|
29 |
-
|
30 |
-
def grid_sample(input, grid):
|
31 |
-
if _should_use_custom_op():
|
32 |
-
return _GridSample2dForward.apply(input, grid)
|
33 |
-
return torch.nn.functional.grid_sample(input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False)
|
34 |
-
|
35 |
-
# ----------------------------------------------------------------------------
|
36 |
-
|
37 |
-
|
38 |
-
def _should_use_custom_op():
|
39 |
-
if not enabled:
|
40 |
-
return False
|
41 |
-
if any(torch.__version__.startswith(x) for x in ['1.7.', '1.8.', '1.9']):
|
42 |
-
return True
|
43 |
-
warnings.warn(
|
44 |
-
f'grid_sample_gradfix not supported on PyTorch {torch.__version__}. Falling back to torch.nn.functional.grid_sample().')
|
45 |
-
return False
|
46 |
-
|
47 |
-
# ----------------------------------------------------------------------------
|
48 |
-
|
49 |
-
|
50 |
-
class _GridSample2dForward(torch.autograd.Function):
|
51 |
-
@staticmethod
|
52 |
-
def forward(ctx, input, grid):
|
53 |
-
assert input.ndim == 4
|
54 |
-
assert grid.ndim == 4
|
55 |
-
output = torch.nn.functional.grid_sample(
|
56 |
-
input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False)
|
57 |
-
ctx.save_for_backward(input, grid)
|
58 |
-
return output
|
59 |
-
|
60 |
-
@staticmethod
|
61 |
-
def backward(ctx, grad_output):
|
62 |
-
input, grid = ctx.saved_tensors
|
63 |
-
grad_input, grad_grid = _GridSample2dBackward.apply(
|
64 |
-
grad_output, input, grid)
|
65 |
-
return grad_input, grad_grid
|
66 |
-
|
67 |
-
# ----------------------------------------------------------------------------
|
68 |
-
|
69 |
-
|
70 |
-
class _GridSample2dBackward(torch.autograd.Function):
|
71 |
-
@staticmethod
|
72 |
-
def forward(ctx, grad_output, input, grid):
|
73 |
-
op = torch._C._jit_get_operation('aten::grid_sampler_2d_backward')
|
74 |
-
grad_input, grad_grid = op(grad_output, input, grid, 0, 0, False)
|
75 |
-
ctx.save_for_backward(grid)
|
76 |
-
return grad_input, grad_grid
|
77 |
-
|
78 |
-
@staticmethod
|
79 |
-
def backward(ctx, grad2_grad_input, grad2_grad_grid):
|
80 |
-
_ = grad2_grad_grid # unused
|
81 |
-
grid, = ctx.saved_tensors
|
82 |
-
grad2_grad_output = None
|
83 |
-
grad2_input = None
|
84 |
-
grad2_grid = None
|
85 |
-
|
86 |
-
if ctx.needs_input_grad[0]:
|
87 |
-
grad2_grad_output = _GridSample2dForward.apply(
|
88 |
-
grad2_grad_input, grid)
|
89 |
-
|
90 |
-
assert not ctx.needs_input_grad[2]
|
91 |
-
return grad2_grad_output, grad2_input, grad2_grid
|
92 |
-
|
93 |
-
# ----------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Dute8788/anime/app.py
DELETED
@@ -1,52 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import huggingface_hub
|
3 |
-
import onnxruntime as rt
|
4 |
-
import numpy as np
|
5 |
-
import cv2
|
6 |
-
|
7 |
-
|
8 |
-
def get_mask(img, s=1024):
|
9 |
-
img = (img / 255).astype(np.float32)
|
10 |
-
h, w = h0, w0 = img.shape[:-1]
|
11 |
-
h, w = (s, int(s * w / h)) if h > w else (int(s * h / w), s)
|
12 |
-
ph, pw = s - h, s - w
|
13 |
-
img_input = np.zeros([s, s, 3], dtype=np.float32)
|
14 |
-
img_input[ph // 2:ph // 2 + h, pw // 2:pw // 2 + w] = cv2.resize(img, (w, h))
|
15 |
-
img_input = np.transpose(img_input, (2, 0, 1))
|
16 |
-
img_input = img_input[np.newaxis, :]
|
17 |
-
mask = rmbg_model.run(None, {'img': img_input})[0][0]
|
18 |
-
mask = np.transpose(mask, (1, 2, 0))
|
19 |
-
mask = mask[ph // 2:ph // 2 + h, pw // 2:pw // 2 + w]
|
20 |
-
mask = cv2.resize(mask, (w0, h0))[:, :, np.newaxis]
|
21 |
-
return mask
|
22 |
-
|
23 |
-
|
24 |
-
def rmbg_fn(img):
|
25 |
-
mask = get_mask(img)
|
26 |
-
img = (mask * img + 255 * (1 - mask)).astype(np.uint8)
|
27 |
-
mask = (mask * 255).astype(np.uint8)
|
28 |
-
img = np.concatenate([img, mask], axis=2, dtype=np.uint8)
|
29 |
-
mask = mask.repeat(3, axis=2)
|
30 |
-
return mask, img
|
31 |
-
|
32 |
-
|
33 |
-
if __name__ == "__main__":
|
34 |
-
providers = ['CUDAExecutionProvider', 'CPUExecutionProvider']
|
35 |
-
model_path = huggingface_hub.hf_hub_download("skytnt/anime-seg", "isnetis.onnx")
|
36 |
-
rmbg_model = rt.InferenceSession(model_path, providers=providers)
|
37 |
-
app = gr.Blocks()
|
38 |
-
with app:
|
39 |
-
gr.Markdown("# Anime Remove Background\n\n"
|
40 |
-
"\n\n"
|
41 |
-
"demo for [https://github.com/SkyTNT/anime-segmentation/](https://github.com/SkyTNT/anime-segmentation/)")
|
42 |
-
with gr.Row():
|
43 |
-
with gr.Column():
|
44 |
-
input_img = gr.Image(label="input image")
|
45 |
-
examples_data = [[f"examples/{x:02d}.jpg"] for x in range(1, 4)]
|
46 |
-
examples = gr.Dataset(components=[input_img], samples=examples_data)
|
47 |
-
run_btn = gr.Button(variant="primary")
|
48 |
-
output_mask = gr.Image(label="mask")
|
49 |
-
output_img = gr.Image(label="result", image_mode="RGBA")
|
50 |
-
examples.click(lambda x: x[0], [examples], [input_img])
|
51 |
-
run_btn.click(rmbg_fn, [input_img], [output_mask, output_img])
|
52 |
-
app.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ECCV2022/bytetrack/tutorials/transtrack/tracker.py
DELETED
@@ -1,191 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
Copyright (c) https://github.com/xingyizhou/CenterTrack
|
3 |
-
Modified by Peize Sun, Rufeng Zhang
|
4 |
-
"""
|
5 |
-
# coding: utf-8
|
6 |
-
import torch
|
7 |
-
from scipy.optimize import linear_sum_assignment
|
8 |
-
from util import box_ops
|
9 |
-
import copy
|
10 |
-
|
11 |
-
class Tracker(object):
|
12 |
-
def __init__(self, score_thresh, max_age=32):
|
13 |
-
self.score_thresh = score_thresh
|
14 |
-
self.low_thresh = 0.2
|
15 |
-
self.high_thresh = score_thresh + 0.1
|
16 |
-
self.max_age = max_age
|
17 |
-
self.id_count = 0
|
18 |
-
self.tracks_dict = dict()
|
19 |
-
self.tracks = list()
|
20 |
-
self.unmatched_tracks = list()
|
21 |
-
self.reset_all()
|
22 |
-
|
23 |
-
def reset_all(self):
|
24 |
-
self.id_count = 0
|
25 |
-
self.tracks_dict = dict()
|
26 |
-
self.tracks = list()
|
27 |
-
self.unmatched_tracks = list()
|
28 |
-
|
29 |
-
def init_track(self, results):
|
30 |
-
|
31 |
-
scores = results["scores"]
|
32 |
-
classes = results["labels"]
|
33 |
-
bboxes = results["boxes"] # x1y1x2y2
|
34 |
-
|
35 |
-
ret = list()
|
36 |
-
ret_dict = dict()
|
37 |
-
for idx in range(scores.shape[0]):
|
38 |
-
if scores[idx] >= self.score_thresh:
|
39 |
-
self.id_count += 1
|
40 |
-
obj = dict()
|
41 |
-
obj["score"] = float(scores[idx])
|
42 |
-
obj["bbox"] = bboxes[idx, :].cpu().numpy().tolist()
|
43 |
-
obj["tracking_id"] = self.id_count
|
44 |
-
obj['active'] = 1
|
45 |
-
obj['age'] = 1
|
46 |
-
ret.append(obj)
|
47 |
-
ret_dict[idx] = obj
|
48 |
-
|
49 |
-
self.tracks = ret
|
50 |
-
self.tracks_dict = ret_dict
|
51 |
-
return copy.deepcopy(ret)
|
52 |
-
|
53 |
-
|
54 |
-
def step(self, output_results):
|
55 |
-
scores = output_results["scores"]
|
56 |
-
bboxes = output_results["boxes"] # x1y1x2y2
|
57 |
-
track_bboxes = output_results["track_boxes"] if "track_boxes" in output_results else None # x1y1x2y2
|
58 |
-
|
59 |
-
results = list()
|
60 |
-
results_dict = dict()
|
61 |
-
results_second = list()
|
62 |
-
|
63 |
-
tracks = list()
|
64 |
-
|
65 |
-
for idx in range(scores.shape[0]):
|
66 |
-
if idx in self.tracks_dict and track_bboxes is not None:
|
67 |
-
self.tracks_dict[idx]["bbox"] = track_bboxes[idx, :].cpu().numpy().tolist()
|
68 |
-
|
69 |
-
if scores[idx] >= self.score_thresh:
|
70 |
-
obj = dict()
|
71 |
-
obj["score"] = float(scores[idx])
|
72 |
-
obj["bbox"] = bboxes[idx, :].cpu().numpy().tolist()
|
73 |
-
results.append(obj)
|
74 |
-
results_dict[idx] = obj
|
75 |
-
elif scores[idx] >= self.low_thresh:
|
76 |
-
second_obj = dict()
|
77 |
-
second_obj["score"] = float(scores[idx])
|
78 |
-
second_obj["bbox"] = bboxes[idx, :].cpu().numpy().tolist()
|
79 |
-
results_second.append(second_obj)
|
80 |
-
results_dict[idx] = second_obj
|
81 |
-
|
82 |
-
tracks = [v for v in self.tracks_dict.values()] + self.unmatched_tracks
|
83 |
-
# for trackss in tracks:
|
84 |
-
# print(trackss.keys())
|
85 |
-
N = len(results)
|
86 |
-
M = len(tracks)
|
87 |
-
|
88 |
-
ret = list()
|
89 |
-
unmatched_tracks = [t for t in range(M)]
|
90 |
-
unmatched_dets = [d for d in range(N)]
|
91 |
-
|
92 |
-
if N > 0 and M > 0:
|
93 |
-
det_box = torch.stack([torch.tensor(obj['bbox']) for obj in results], dim=0) # N x 4
|
94 |
-
track_box = torch.stack([torch.tensor(obj['bbox']) for obj in tracks], dim=0) # M x 4
|
95 |
-
cost_bbox = 1.0 - box_ops.generalized_box_iou(det_box, track_box) # N x M
|
96 |
-
|
97 |
-
matched_indices = linear_sum_assignment(cost_bbox)
|
98 |
-
unmatched_dets = [d for d in range(N) if not (d in matched_indices[0])]
|
99 |
-
unmatched_tracks = [d for d in range(M) if not (d in matched_indices[1])]
|
100 |
-
|
101 |
-
matches = [[],[]]
|
102 |
-
for (m0, m1) in zip(matched_indices[0], matched_indices[1]):
|
103 |
-
if cost_bbox[m0, m1] > 1.2:
|
104 |
-
unmatched_dets.append(m0)
|
105 |
-
unmatched_tracks.append(m1)
|
106 |
-
else:
|
107 |
-
matches[0].append(m0)
|
108 |
-
matches[1].append(m1)
|
109 |
-
|
110 |
-
for (m0, m1) in zip(matches[0], matches[1]):
|
111 |
-
track = results[m0]
|
112 |
-
track['tracking_id'] = tracks[m1]['tracking_id']
|
113 |
-
track['age'] = 1
|
114 |
-
track['active'] = 1
|
115 |
-
ret.append(track)
|
116 |
-
|
117 |
-
# second association
|
118 |
-
N_second = len(results_second)
|
119 |
-
unmatched_tracks_obj = list()
|
120 |
-
for i in unmatched_tracks:
|
121 |
-
#print(tracks[i].keys())
|
122 |
-
track = tracks[i]
|
123 |
-
if track['active'] == 1:
|
124 |
-
unmatched_tracks_obj.append(track)
|
125 |
-
M_second = len(unmatched_tracks_obj)
|
126 |
-
unmatched_tracks_second = [t for t in range(M_second)]
|
127 |
-
|
128 |
-
if N_second > 0 and M_second > 0:
|
129 |
-
det_box_second = torch.stack([torch.tensor(obj['bbox']) for obj in results_second], dim=0) # N_second x 4
|
130 |
-
track_box_second = torch.stack([torch.tensor(obj['bbox']) for obj in unmatched_tracks_obj], dim=0) # M_second x 4
|
131 |
-
cost_bbox_second = 1.0 - box_ops.generalized_box_iou(det_box_second, track_box_second) # N_second x M_second
|
132 |
-
|
133 |
-
matched_indices_second = linear_sum_assignment(cost_bbox_second)
|
134 |
-
unmatched_tracks_second = [d for d in range(M_second) if not (d in matched_indices_second[1])]
|
135 |
-
|
136 |
-
matches_second = [[],[]]
|
137 |
-
for (m0, m1) in zip(matched_indices_second[0], matched_indices_second[1]):
|
138 |
-
if cost_bbox_second[m0, m1] > 0.8:
|
139 |
-
unmatched_tracks_second.append(m1)
|
140 |
-
else:
|
141 |
-
matches_second[0].append(m0)
|
142 |
-
matches_second[1].append(m1)
|
143 |
-
|
144 |
-
for (m0, m1) in zip(matches_second[0], matches_second[1]):
|
145 |
-
track = results_second[m0]
|
146 |
-
track['tracking_id'] = unmatched_tracks_obj[m1]['tracking_id']
|
147 |
-
track['age'] = 1
|
148 |
-
track['active'] = 1
|
149 |
-
ret.append(track)
|
150 |
-
|
151 |
-
for i in unmatched_dets:
|
152 |
-
trackd = results[i]
|
153 |
-
if trackd["score"] >= self.high_thresh:
|
154 |
-
self.id_count += 1
|
155 |
-
trackd['tracking_id'] = self.id_count
|
156 |
-
trackd['age'] = 1
|
157 |
-
trackd['active'] = 1
|
158 |
-
ret.append(trackd)
|
159 |
-
|
160 |
-
# ------------------------------------------------------ #
|
161 |
-
ret_unmatched_tracks = []
|
162 |
-
|
163 |
-
for j in unmatched_tracks:
|
164 |
-
track = tracks[j]
|
165 |
-
if track['active'] == 0 and track['age'] < self.max_age:
|
166 |
-
track['age'] += 1
|
167 |
-
track['active'] = 0
|
168 |
-
ret.append(track)
|
169 |
-
ret_unmatched_tracks.append(track)
|
170 |
-
|
171 |
-
for i in unmatched_tracks_second:
|
172 |
-
track = unmatched_tracks_obj[i]
|
173 |
-
if track['age'] < self.max_age:
|
174 |
-
track['age'] += 1
|
175 |
-
track['active'] = 0
|
176 |
-
ret.append(track)
|
177 |
-
ret_unmatched_tracks.append(track)
|
178 |
-
|
179 |
-
# for i in unmatched_tracks:
|
180 |
-
# track = tracks[i]
|
181 |
-
# if track['age'] < self.max_age:
|
182 |
-
# track['age'] += 1
|
183 |
-
# track['active'] = 0
|
184 |
-
# ret.append(track)
|
185 |
-
# ret_unmatched_tracks.append(track)
|
186 |
-
#print(len(ret_unmatched_tracks))
|
187 |
-
|
188 |
-
self.tracks = ret
|
189 |
-
self.tracks_dict = {red_ind:red for red_ind, red in results_dict.items() if 'tracking_id' in red}
|
190 |
-
self.unmatched_tracks = ret_unmatched_tracks
|
191 |
-
return copy.deepcopy(ret)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/EPFL-VILAB/MultiMAE/utils/pos_embed.py
DELETED
@@ -1,58 +0,0 @@
|
|
1 |
-
# Copyright (c) EPFL VILAB.
|
2 |
-
# All rights reserved.
|
3 |
-
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
# --------------------------------------------------------
|
7 |
-
# Based on BEiT, timm, DINO DeiT and MAE-priv code bases
|
8 |
-
# https://github.com/microsoft/unilm/tree/master/beit
|
9 |
-
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
|
10 |
-
# https://github.com/facebookresearch/deit
|
11 |
-
# https://github.com/facebookresearch/dino
|
12 |
-
# https://github.com/BUPT-PRIV/MAE-priv
|
13 |
-
# --------------------------------------------------------
|
14 |
-
|
15 |
-
import re
|
16 |
-
|
17 |
-
import torch
|
18 |
-
|
19 |
-
|
20 |
-
def interpolate_pos_embed_vit(model, checkpoint_model):
|
21 |
-
if 'pos_embed' in checkpoint_model:
|
22 |
-
pos_embed_checkpoint = checkpoint_model['pos_embed']
|
23 |
-
embedding_size = pos_embed_checkpoint.shape[-1]
|
24 |
-
num_patches = model.patch_embed.num_patches
|
25 |
-
num_extra_tokens = model.pos_embed.shape[-2] - num_patches
|
26 |
-
# height (== width) for the checkpoint position embedding
|
27 |
-
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
|
28 |
-
# height (== width) for the new position embedding
|
29 |
-
new_size = int(num_patches ** 0.5)
|
30 |
-
# class_token and dist_token are kept unchanged
|
31 |
-
if orig_size != new_size:
|
32 |
-
print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size))
|
33 |
-
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
|
34 |
-
# only the position tokens are interpolated
|
35 |
-
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
|
36 |
-
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
|
37 |
-
pos_tokens = torch.nn.functional.interpolate(
|
38 |
-
pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
|
39 |
-
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
|
40 |
-
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
|
41 |
-
checkpoint_model['pos_embed'] = new_pos_embed
|
42 |
-
|
43 |
-
|
44 |
-
def interpolate_pos_embed_multimae(model, checkpoint_model):
|
45 |
-
pattern = "input_adapters\.(.*)\.pos_emb"
|
46 |
-
matched_keys = [k for k in checkpoint_model if bool(re.match(pattern, k))]
|
47 |
-
|
48 |
-
for key in matched_keys:
|
49 |
-
domain = re.match(pattern, key).group(1) # group(0) is entire matched regex
|
50 |
-
if getattr(model.input_adapters, domain, None) is not None:
|
51 |
-
pos_embed_checkpoint = checkpoint_model[key]
|
52 |
-
_, _, orig_H, orig_W = pos_embed_checkpoint.shape
|
53 |
-
_, _, new_H, new_W = getattr(model.input_adapters, domain).pos_emb.shape
|
54 |
-
if (orig_H != new_H) or (orig_W != new_W):
|
55 |
-
print(f"Key {key}: Position interpolate from {orig_H}x{orig_W} to {new_H}x{new_W}")
|
56 |
-
pos_embed_checkpoint = torch.nn.functional.interpolate(
|
57 |
-
pos_embed_checkpoint, size=(new_H, new_W), mode='bicubic', align_corners=False)
|
58 |
-
checkpoint_model[key] = pos_embed_checkpoint
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Eddycrack864/Applio-Inference/infer/modules/vc/utils.py
DELETED
@@ -1,42 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import re
|
3 |
-
from fairseq import checkpoint_utils
|
4 |
-
|
5 |
-
|
6 |
-
def get_index_path_from_model(sid):
|
7 |
-
sid0strip = re.sub(r'\.pth|\.onnx$', '', sid)
|
8 |
-
sid0name = os.path.split(sid0strip)[-1] # Extract only the name, not the directory
|
9 |
-
|
10 |
-
# Check if the sid0strip has the specific ending format _eXXX_sXXX
|
11 |
-
if re.match(r'.+_e\d+_s\d+$', sid0name):
|
12 |
-
base_model_name = sid0name.rsplit('_', 2)[0]
|
13 |
-
else:
|
14 |
-
base_model_name = sid0name
|
15 |
-
|
16 |
-
return next(
|
17 |
-
(
|
18 |
-
f
|
19 |
-
for f in [
|
20 |
-
os.path.join(root, name)
|
21 |
-
for root, _, files in os.walk(os.getenv("index_root"), topdown=False)
|
22 |
-
for name in files
|
23 |
-
if name.endswith(".index") and "trained" not in name
|
24 |
-
]
|
25 |
-
if base_model_name in f
|
26 |
-
),
|
27 |
-
"",
|
28 |
-
)
|
29 |
-
|
30 |
-
|
31 |
-
def load_hubert(config):
|
32 |
-
models, _, _ = checkpoint_utils.load_model_ensemble_and_task(
|
33 |
-
["assets/hubert/hubert_base.pt"],
|
34 |
-
suffix="",
|
35 |
-
)
|
36 |
-
hubert_model = models[0]
|
37 |
-
hubert_model = hubert_model.to(config.device)
|
38 |
-
if config.is_half:
|
39 |
-
hubert_model = hubert_model.half()
|
40 |
-
else:
|
41 |
-
hubert_model = hubert_model.float()
|
42 |
-
return hubert_model.eval()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|