Commit
·
d01490c
1
Parent(s):
fa20475
Update parquet files (step 54 of 249)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Bangla Hasir Natok Script Pdf Free 120 Get Ready for Some Serious Fun with These Comedy Scripts.md +0 -115
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/CRACK Adobe Acrobat XI Pro 11.0.22 Multilingual Crack [SadeemPC].md +0 -48
- spaces/1gistliPinn/ChatGPT4/Examples/Adobe.acrobat.pro.x.v10.0.multilingual.incl.keymaker-core 121.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/Allavsoft Video Downloader Converter 3.17.9.7206 Crack _TOP_ With License Key.md +0 -8
- spaces/1gistliPinn/ChatGPT4/Examples/Descargar Contpaq 2005 Gratis.md +0 -102
- spaces/1gistliPinn/ChatGPT4/Examples/Free Download [BETTER] Hindi Movie Kala Patthar.md +0 -8
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Adobe Media Encoder 2020 How to Download and Install for Free.md +0 -144
- spaces/1phancelerku/anime-remove-background/1945 Air Force APK Mod A Classic Shooting Game with Endless Possibilities.md +0 -107
- spaces/1phancelerku/anime-remove-background/Challenge Yourself with Car Parking Multiplayer Levels on Play Store.md +0 -119
- spaces/1phancelerku/anime-remove-background/Como Conseguir Robux Infinito no ROBLOX 2022 Download Grtis e Seguro.md +0 -89
- spaces/1phancelerku/anime-remove-background/Download Arceus X V3.1.0 Beta and Enjoy Roblox Like Never Before.md +0 -145
- spaces/1phancelerku/anime-remove-background/Download and Play PS3 Games on iOS with RetroArch and RPCS3.md +0 -183
- spaces/2ndelement/voicevox/build_util/create_venv_and_generate_licenses.bash +0 -24
- spaces/2ndelement/voicevox/voicevox_engine/dev/synthesis_engine/mock.py +0 -136
- spaces/4Taps/SadTalker/src/face3d/util/detect_lm68.py +0 -106
- spaces/4com/stable-diffusion/README.md +0 -13
- spaces/AIFILMS/generate_human_motion/VQ-Trans/models/resnet.py +0 -82
- spaces/ASJMO/freegpt/client/html/index.html +0 -135
- spaces/Adapter/T2I-Adapter/ldm/models/diffusion/ddpm.py +0 -1313
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/canvas/Canvas.js +0 -2
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/slider/UpdateIndicator.js +0 -67
- spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/utils/utils_amp.py +0 -88
- spaces/Alpaca233/ai-stable-diffusion-Text-to-Image/app.py +0 -3
- spaces/Aluxes/anime-remove-background/app.py +0 -52
- spaces/Amrrs/portfolio/README.md +0 -36
- spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/src/decoder/tensor_base.py +0 -458
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/models/overview.md +0 -12
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/tiled_upscaling.py +0 -298
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/audio_diffusion/__init__.py +0 -0
- spaces/Andy1621/uniformer_image_detection/configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py +0 -46
- spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/atss_head.py +0 -689
- spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/point_rend_roi_head.py +0 -218
- spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r18-d8_512x1024_80k_cityscapes.py +0 -9
- spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_40k_cityscapes.py +0 -9
- spaces/AnimalEquality/chatbot/_proc/_docs/lchain_tool.html +0 -937
- spaces/AnthonyTruchetPoC/persistent-docker/src/athai/hello.py +0 -27
- spaces/Artrajz/vits-simple-api/README_zh.md +0 -433
- spaces/Artrajz/vits-simple-api/contants.py +0 -7
- spaces/Asahi402/Real-CUGAN/upcunet_v3.py +0 -714
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/cache.py +0 -272
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/requests/packages.py +0 -16
- spaces/AzinZ/vitscn/models.py +0 -534
- spaces/Bart92/RVC_HF/infer/lib/infer_pack/models.py +0 -1174
- spaces/Benson/text-generation/Examples/Arena Breakout Ios Descargar.md +0 -58
- spaces/BramVanroy/llama-2-13b-chat-dutch-space/README.md +0 -21
- spaces/CVPR/LIVE/thrust/thrust/detail/execution_policy.h +0 -77
- spaces/CVPR/WALT/mmdet/models/utils/positional_encoding.py +0 -150
- spaces/CVPR/unicl-zero-shot-img-recog/model/model.py +0 -215
- spaces/Caoyunkang/Segment-Any-Anomaly/SAM/segment_anything/utils/amg.py +0 -346
- spaces/CarlDennis/Lovelive-VITS-JPZH/text/cleaners.py +0 -87
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Bangla Hasir Natok Script Pdf Free 120 Get Ready for Some Serious Fun with These Comedy Scripts.md
DELETED
@@ -1,115 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<br> - How to download Bangla hasir natok script pdf free 120 from various sources? <br> - What are some of the features and benefits of Bangla hasir natok script pdf free 120? <br> - Conclusion: A summary of the main points and a call to action. | | H2: What are Bangla hasir natok or Bengali comedy plays and why are they popular? | - Definition and history of Bangla hasir natok or Bengali comedy plays. <br> - Examples of famous Bangla hasir natok or Bengali comedy plays and their writers. <br> - Reasons for the popularity of Bangla hasir natok or Bengali comedy plays among different audiences. | | H2: How to download Bangla hasir natok script pdf free 120 from various sources? | - A list of websites that offer Bangla hasir natok script pdf free 120 for download. <br> - A step-by-step guide on how to download Bangla hasir natok script pdf free 120 from each website. <br> - A comparison of the quality and quantity of Bangla hasir natok script pdf free 120 available on each website. | | H2: What are some of the features and benefits of Bangla hasir natok script pdf free 120? | - A description of the content and format of Bangla hasir natok script pdf free 120. <br> - A table that shows the titles, genres, themes, and lengths of some of the plays included in Bangla hasir natok script pdf free 120. <br> - A discussion of how Bangla hasir natok script pdf free 120 can be used for entertainment, education, and inspiration. | | H2: Conclusion: A summary of the main points and a call to action. | - A recap of what Bangla hasir natok or Bengali comedy plays are and why they are popular. <br> - A reminder of how to download Bangla hasir natok script pdf free 120 from various sources. <br> - A suggestion to read and enjoy Bangla hasir natok script pdf free 120 and share it with others. | **Table 2: Article with HTML formatting** <h1>Bangla Hasir Natok Script Pdf Free 120: A Collection of Hilarious Plays for All Ages</h1>
|
3 |
-
<p>If you are looking for some fun and laughter, you might want to check out Bangla hasir natok or Bengali comedy plays. These are short plays that are written and performed in Bengali language, often with witty dialogues, humorous situations, and social satire. They are a popular form of entertainment in Bangladesh and West Bengal, where they are staged in theatres, festivals, TV channels, and online platforms.</p>
|
4 |
-
<p>In this article, we will tell you what Bangla hasir natok or Bengali comedy plays are and why they are popular. We will also show you how to download Bangla hasir natok script pdf free 120 from various sources. This is a collection of 120 hilarious plays for all ages that you can read and enjoy anytime, anywhere. We will also discuss some of the features and benefits of Bangla hasir natok script pdf free 120 and how you can use it for entertainment, education, and inspiration.</p>
|
5 |
-
<h2>Bangla Hasir Natok Script Pdf Free 120</h2><br /><p><b><b>Download File</b> ————— <a href="https://byltly.com/2uKx1a">https://byltly.com/2uKx1a</a></b></p><br /><br />
|
6 |
-
<h2>What are Bangla hasir natok or Bengali comedy plays and why are they popular?</h2>
|
7 |
-
<p>Bangla hasir natok or Bengali comedy plays are a type of drama that originated in Bengal in the late 19th century. They were influenced by the British colonial rule, the Bengali Renaissance, and the folk theatre traditions of Bengal. They often deal with social issues, political satire, family conflicts, romantic comedy, and absurd humor.</p>
|
8 |
-
<p>Some of the famous writers of Bangla hasir natok or Bengali comedy plays include Rabindranath Tagore, Sukumar Ray, Manoj Mitra, Parimal Tribedi, Mamata Mitra, Amalendu Chatterjee, Rupak Saha, etc. Some of their popular plays include Chotushkone (Four Corners), Jhalapala (Water Splash), Bharate Chai (I Want a Bride), Obak (Surprised), Rater Rajanigandha (Night Jasmine), etc.</p>
|
9 |
-
<p>Bangla hasir natok or Bengali comedy plays are popular among different audiences because they are entertaining, engaging, and enlightening. They make people laugh and think at the same time. They reflect the culture, values, and problems of Bengal and its people. They also showcase the creativity, talent, and diversity of Bengali writers and actors.</p>
|
10 |
-
<p>Bangla comedy drama script pdf free download<br />
|
11 |
-
Bengali hasir natok script pdf free downloadinstmank<br />
|
12 |
-
Bangla sruti natok script pdf free download<br />
|
13 |
-
Bangla hasir natok script by Sukumar Ray<br />
|
14 |
-
Bengali comedy play scripts pdf free download<br />
|
15 |
-
Bangla hasir natok script by Narayan Gangopadhyay<br />
|
16 |
-
Bengali short drama script pdf download<br />
|
17 |
-
Bangla comedy natok script pdf free download<br />
|
18 |
-
Bangla hasir natok script by Parimal Tribedi<br />
|
19 |
-
Bengali funny drama script pdf free download<br />
|
20 |
-
Bangla hasir natok script by Mamata Mitra<br />
|
21 |
-
Bengali comedy drama script for school students<br />
|
22 |
-
Bangla hasir natok script by Amalendu Chatterjee<br />
|
23 |
-
Bengali comedy skit script pdf free download<br />
|
24 |
-
Bangla hasir natok script by Soumitra Chattopadhyay<br />
|
25 |
-
Bengali one act play scripts pdf free download<br />
|
26 |
-
Bangla hasir natok script by Rupak Saha<br />
|
27 |
-
Bengali comedy drama script for college students<br />
|
28 |
-
Bangla hasir natok script by E de Fossard<br />
|
29 |
-
Bengali comedy drama script for teachers day<br />
|
30 |
-
Bangla hasir natok script by Jean-Pierre Martinez<br />
|
31 |
-
Bengali comedy drama script for annual function<br />
|
32 |
-
Bangla hasir natok script by Samit Dutta<br />
|
33 |
-
Bengali comedy drama script for farewell party<br />
|
34 |
-
Bangla hasir natok script by Vikram Mitra<br />
|
35 |
-
Bengali comedy drama script for children's day<br />
|
36 |
-
Bangla hasir natok script by Kkhh<br />
|
37 |
-
Bengali comedy drama script for republic day<br />
|
38 |
-
Bangla hasir natok script by Boimela.in<br />
|
39 |
-
Bengali comedy drama script for independence day<br />
|
40 |
-
Bangla hasir natok script by Dasti Shruti Natak 150 Bachharer<br />
|
41 |
-
Bengali comedy drama script for women's day<br />
|
42 |
-
Bangla hasir natok script by Half Dozon Chotoder Natok<br />
|
43 |
-
Bengali comedy drama script for valentine's day<br />
|
44 |
-
Bangla hasir natok script by Prankhola Hasir Natok<br />
|
45 |
-
Bengali comedy drama script for friendship day<br />
|
46 |
-
Bangla hasir natok script by Lotun Jebon Betar Natok<br />
|
47 |
-
Bengali comedy drama script for teacher's day<br />
|
48 |
-
Bangla hasir natok script by Natak Samagra a Lot Stumble <br />
|
49 |
-
Bengali comedy drama script for mother's day <br />
|
50 |
-
Bangla hasir natok script by Bharate Chai a Comedy Play <br />
|
51 |
-
Bengali comedy drama script for father's day <br />
|
52 |
-
Bangla hasir natok script by Obak Abak Indian Full 35 Mins <br />
|
53 |
-
Bengali comedy drama script for raksha bandhan <br />
|
54 |
-
Bangla hasir natok script by Rater Rajanigandha a Hot Funny Video <br />
|
55 |
-
Bengali comedy drama script for diwali <br />
|
56 |
-
Bangla hasir natok script by 3 on a Bed a Full Movie <br />
|
57 |
-
Bengali comedy drama script for holi <br />
|
58 |
-
Bangla hasir natok script by Sister 1 a Short Play <br />
|
59 |
-
Bengali comedy drama script for new year </p>
|
60 |
-
<h2>How to download Bangla hasir natok script pdf free 120 from various sources?</h2>
|
61 |
-
<p>If you want to read Bangla hasir natok or Bengali comedy plays on your computer or mobile device, you can download Bangla hasir natok script pdf free 120 from various sources. This is a collection of 120 hilarious plays for all ages that you can access without any cost or registration.</p>
|
62 |
-
<p>Here is a list of websites that offer Bangla hasir natok script pdf free 120 for download:</p>
|
63 |
-
<ul>
|
64 |
-
<li><a href="https://www.scribd.com/document/436972041/Bangla-Hasir-Natok-Script-PDF-Free-Downloadinstmank">Scribd</a>: This is a digital library that hosts millions of books, documents, audiobooks, podcasts, etc. You can find Bangla hasir natok script pdf free 120 by searching for it on the website or clicking on this link. You can download it as a PDF file by clicking on the Download button on the top right corner.</li>
|
65 |
-
<li><a href="https://hodgdearedol1989.wixsite.com/sounneocenthumb/post/bangla-hasir-natok-script-pdf-free-download">Wixsite</a>: This is a website builder that allows users to create their own websites for free. You can find Bangla hasir natok script pdf free 120 by visiting this link. You can download it as a PDF file by clicking on the Download button on the bottom right corner.</li>
|
66 |
-
<li><a href="https://hub.docker.com/r/narogosne/bangla-hasir-natok-script-pdf-free-120">Docker Hub</a>: This is a platform that hosts docker images and containers for various applications. You can find Bangla hasir natok script pdf free 120 by visiting this link. You can download it as a PDF file by clicking on the Download button on the top right corner.</li>
|
67 |
-
</ul>
|
68 |
-
<p>To download Bangla hasir natok script pdf free 120 from each website, you need to follow these steps:</p>
|
69 |
-
<ol>
|
70 |
-
<li>Click on the link that takes you to the website that offers Bangla hasir natok script pdf free 120 for download.</li>
|
71 |
-
<li>On the website, look for the file name or title that says "Bangla Hasir Natok Script Pdf Free Downloadinstmank" or something similar.</li>
|
72 |
-
<li>Click on the file name or title to open it in a new tab or window.</li>
|
73 |
-
<li>On the new tab or window, look for the Download button that is usually located on the top right corner or bottom right corner.</li>
|
74 |
-
<li>Click on the Download button to start downloading the file to your computer or mobile device.</li>
|
75 |
-
<li>Wait for the download to finish and then open the file with a PDF reader application.</li>
|
76 |
-
</ol>
|
77 |
-
<p>You can compare the quality and quantity of Bangla hasir natok script pdf free 120 available on each website by looking at these factors:</p>
|
78 |
-
<ul>
|
79 |
-
<li>The size of the file: The larger the file size, the more pages and content it contains.</li>
|
80 |
-
<li>The number of views: The higher the number of views, the more popular and reliable it is.</li>
|
81 |
-
<li>The date of upload: The newer the date of upload, the more updated and relevant it is.</li>
|
82 |
-
<li>The ratings and reviews: The higher the ratings and reviews, the more positive feedback it received from other users.</li>
|
83 |
-
</ul>
|
84 |
-
<h2>What are some of the features and benefits of Bangla hasir natok script pdf free 120?</h2>
|
85 |
-
<p>Bangla hasir natok script pdf free 120 is a collection of 120 hilarious plays for all ages that you can read and enjoy anytime, anywhere. It has some features and benefits that make it a valuable and enjoyable resource for anyone who loves Bangla hasir natok or Bengali comedy plays.</p>
|
86 |
-
<p>Some of the features and benefits of Bangla hasir natok script pdf free 120 are:</p>
|
87 |
-
<ul>
|
88 |
-
<li>It is free and easy to download from various sources. You don't need to pay any money or register any account to access it.</li>
|
89 |
-
<li>It is in PDF format, which means you can read it on any device that supports PDF files, such as computers, laptops, tablets, smartphones, etc.</li>
|
90 |
-
<li>It is in Bengali language, which means you can read it in your native language and appreciate the nuances and expressions of the writers and actors.</li>
|
91 |
-
<li>It contains 120 plays that cover different genres, themes, and lengths. You can find plays that suit your mood, preference, and time availability.</li>
|
92 |
-
<li>It is a collection of hilarious plays that will make you laugh out loud and forget your worries. You can also share it with your friends and family and have a good time together.</li>
|
93 |
-
<li>It is a source of entertainment, education, and inspiration. You can learn about the culture, values, and problems of Bengal and its people. You can also get inspired by the creativity, talent, and diversity of Bengali writers and actors.</li>
|
94 |
-
</ul>
|
95 |
-
<p>To give you an idea of what Bangla hasir natok script pdf free 120 contains, here is a table that shows the titles, genres, themes, and lengths of some of the plays included in it:</p>
|
96 |
-
| Title | Genre | Theme | Length | | --- | --- | --- | --- | | Chotushkone (Four Corners) | Mystery | A murder mystery involving four suspects who are trapped in a room. | 40 minutes | | Jhalapala (Water Splash) | Comedy | A comedy of errors involving a water tank, a plumber, a landlord, and a tenant. | 35 minutes | | Bharate Chai (I Want a Bride) | Romance | A romantic comedy involving a young man who wants to marry a girl he met online. | 45 minutes | | Obak (Surprised) | Satire | A satire on the political and social situation of Bangladesh. | 30 minutes | | Rater Rajanigandha (Night Jasmine) | Drama | A drama about a married couple who face a crisis in their relationship. | 50 minutes | <h2>Conclusion: A summary of the main points and a call to action.</h2>
|
97 |
-
<p>In conclusion, Bangla hasir natok or Bengali comedy plays are a popular form of entertainment in Bangladesh and West Bengal. They are short plays that are written and performed in Bengali language, often with witty dialogues, humorous situations, and social satire. They reflect the culture, values, and problems of Bengal and its people. They also showcase the creativity, talent, and diversity of Bengali writers and actors.</p>
|
98 |
-
<p>If you want to read Bangla hasir natok or Bengali comedy plays on your computer or mobile device, you can download Bangla hasir natok script pdf free 120 from various sources. This is a collection of 120 hilarious plays for all ages that you can read and enjoy anytime, anywhere. It has some features and benefits that make it a valuable and enjoyable resource for anyone who loves Bangla hasir natok or Bengali comedy plays.</p>
|
99 |
-
<p>We hope you enjoyed this article and learned something new about Bangla hasir natok or Bengali comedy plays. We also hope you will download Bangla hasir natok script pdf free 120 and read it for yourself. You will surely have a lot of fun and laughter with it. You can also share it with your friends and family and have a good time together.</p>
|
100 |
-
<p>Thank you for reading this article. If you have any questions or comments, please feel free to leave them below. We would love to hear from you.</p>
|
101 |
-
<h3>Frequently Asked Questions</h3>
|
102 |
-
<ol>
|
103 |
-
<li><b>What is the difference between Bangla hasir natok and Bangla sruti natok?</b><br>Bangla hasir natok are Bengali comedy plays that are staged in theatres, festivals, TV channels, or online platforms. Bangla sruti natok are Bengali audio plays that are broadcasted on radio stations or online platforms.</li>
|
104 |
-
<li><b>Who are some of the famous actors of Bangla hasir natok or Bengali comedy plays?</b><br>Some of the famous actors of Bangla hasir natok or Bengali comedy plays include Mosharraf Karim, Chanchal Chowdhury, Zahid Hasan, Nusrat Imrose Tisha, Mir Sabbir, etc.</li>
|
105 |
-
<h3>Frequently Asked Questions</h3>
|
106 |
-
<ol>
|
107 |
-
<li><b>What is the difference between Bangla hasir natok and Bangla sruti natok?</b><br>Bangla hasir natok are Bengali comedy plays that are staged in theatres, festivals, TV channels, or online platforms. Bangla sruti natok are Bengali audio plays that are broadcasted on radio stations or online platforms.</li>
|
108 |
-
<li><b>Who are some of the famous actors of Bangla hasir natok or Bengali comedy plays?</b><br>Some of the famous actors of Bangla hasir natok or Bengali comedy plays include Mosharraf Karim, Chanchal Chowdhury, Zahid Hasan, Nusrat Imrose Tisha, Mir Sabbir, etc.</li>
|
109 |
-
<li><b>Where can I watch Bangla hasir natok or Bengali comedy plays online?</b><br>You can watch Bangla hasir natok or Bengali comedy plays online on various platforms such as YouTube, Facebook, BongoBD, Bioscope, etc.</li>
|
110 |
-
<li><b>How can I write my own Bangla hasir natok or Bengali comedy play?</b><br>You can write your own Bangla hasir natok or Bengali comedy play by following these steps:<br>- Choose a genre, theme, and title for your play.<br>- Create a plot outline with a beginning, middle, and end.<br>- Develop your characters and their personalities, motivations, and relationships.<br>- Write the dialogues and actions for each scene.<br>- Use humor, irony, sarcasm, and exaggeration to make your play funny and engaging.<br>- Edit and revise your play until you are satisfied with it.</li>
|
111 |
-
<li><b>What are some of the benefits of reading and watching Bangla hasir natok or Bengali comedy plays?</b><br>Some of the benefits of reading and watching Bangla hasir natok or Bengali comedy plays are:<br>- They can improve your mood and reduce stress.<br>- They can enhance your language and communication skills.<br>- They can increase your knowledge and awareness of social and cultural issues.<br>- They can stimulate your creativity and imagination.<br>- They can inspire you to express yourself and have fun.</li>
|
112 |
-
</ol>
|
113 |
-
</p> 0a6ba089eb<br />
|
114 |
-
<br />
|
115 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/CRACK Adobe Acrobat XI Pro 11.0.22 Multilingual Crack [SadeemPC].md
DELETED
@@ -1,48 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Install and Update Adobe Acrobat XI Pro 11.0.22 Multilingual [SadeemPC]</h1>
|
3 |
-
|
4 |
-
<p>Adobe Acrobat XI Pro is a powerful and versatile software that allows you to create, edit, convert, sign, and share PDF files. It also lets you fill, save, and send forms electronically. With Adobe Acrobat XI Pro, you can work with PDFs anywhere, anytime, and on any device.</p>
|
5 |
-
|
6 |
-
<p>If you have purchased Adobe Acrobat XI Pro from a disc or a download link, you may need to install and update it to the latest version (11.0.22) to enjoy its full features and security patches. In this article, we will show you how to do that step by step.</p>
|
7 |
-
<h2>CRACK Adobe Acrobat XI Pro 11.0.22 Multilingual Crack [SadeemPC]</h2><br /><p><b><b>Download File</b> ★★★★★ <a href="https://byltly.com/2uKxqy">https://byltly.com/2uKxqy</a></b></p><br /><br />
|
8 |
-
|
9 |
-
<h2>How to Install Adobe Acrobat XI Pro 11.0.22 Multilingual [SadeemPC]</h2>
|
10 |
-
|
11 |
-
<p>Before you install Adobe Acrobat XI Pro, make sure your computer meets the minimum system requirements. You can check them here: <a href="https://helpx.adobe.com/acrobat/system-requirements-acrobat-xi-pro.html">https://helpx.adobe.com/acrobat/system-requirements-acrobat-xi-pro.html</a></p>
|
12 |
-
|
13 |
-
<p>Also, make sure you have your serial number handy. You can find it on the disc case or in the confirmation email if you bought it online.</p>
|
14 |
-
|
15 |
-
<p>Follow these steps to install Adobe Acrobat XI Pro:</p>
|
16 |
-
|
17 |
-
<ol>
|
18 |
-
<li>Download the Acrobat XI Pro installer from this link: <a href="https://helpx.adobe.com/acrobat/kb/acrobat-downloads.html">https://helpx.adobe.com/acrobat/kb/acrobat-downloads.html</a>. Choose the appropriate language and operating system for your computer.</li>
|
19 |
-
<li>Double-click the downloaded file (AcrobatPro_11_Web_WWMUI.exe for Windows or AcrobatPro_11_Web_WWMUI.dmg for Mac) to start the installation process.</li>
|
20 |
-
<li>Follow the on-screen instructions to complete the installation. You may need to enter your serial number and sign in with your Adobe ID.</li>
|
21 |
-
<li>When the installation is finished, launch Adobe Acrobat XI Pro from your desktop or applications folder.</li>
|
22 |
-
</ol>
|
23 |
-
|
24 |
-
<h2>How to Update Adobe Acrobat XI Pro 11.0.22 Multilingual [SadeemPC]</h2>
|
25 |
-
|
26 |
-
<p>To keep your Adobe Acrobat XI Pro up to date and secure, you should check for and install updates regularly. You can do this manually or automatically.</p>
|
27 |
-
|
28 |
-
<p>To check for updates manually, follow these steps:</p>
|
29 |
-
|
30 |
-
<ol>
|
31 |
-
<li>Open Adobe Acrobat XI Pro and go to Help > Check for Updates.</li>
|
32 |
-
<li>If there are any available updates, click Download.</li>
|
33 |
-
<li>When the download is complete, click Install.</li>
|
34 |
-
<li>Follow the on-screen instructions to complete the update process. You may need to restart your computer.</li>
|
35 |
-
</ol>
|
36 |
-
|
37 |
-
<p>To check for updates automatically, follow these steps:</p>
|
38 |
-
|
39 |
-
<ol>
|
40 |
-
<li>Open Adobe Acrobat XI Pro and go to Edit > Preferences (Windows) or Acrobat > Preferences (Mac).</li>
|
41 |
-
<li>Select Updater from the left pane.</li>
|
42 |
-
<li>Choose one of the following options: Automatically install updates (recommended), Automatically download updates but let me choose when to install them, or Do not download or install updates automatically.</li>
|
43 |
-
<li>Click OK to save your settings.</li>
|
44 |
-
</ol>
|
45 |
-
|
46 |
-
<p>Congratulations! You have successfully installed and updated Adobe Acrobat XI Pro 11.0.22 Multilingual [SadeemPC]. Now you can enjoy working with PDFs like a pro!</p> 7b8c122e87<br />
|
47 |
-
<br />
|
48 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Adobe.acrobat.pro.x.v10.0.multilingual.incl.keymaker-core 121.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>Adobe.acrobat.pro.x.v10.0.multilingual.incl.keymaker-core 121</h2><br /><p><b><b>Download File</b> 🔗 <a href="https://imgfil.com/2uxYLq">https://imgfil.com/2uxYLq</a></b></p><br /><br />
|
2 |
-
|
3 |
-
Foxit PhantomPDF Business Edition v6.0.7.0806 Incl Crack and Key [TorDigger] . Foxit.. GOLD: ... Adobe.acrobat.pro.x.v10.0.multilingual.incl.keymaker-core 121 4d29de3e1b<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Allavsoft Video Downloader Converter 3.17.9.7206 Crack _TOP_ With License Key.md
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
<h2>Allavsoft Video Downloader Converter 3.17.9.7206 Crack With License Key</h2><br /><p><b><b>Download File</b> >>> <a href="https://imgfil.com/2uxY9a">https://imgfil.com/2uxY9a</a></b></p><br /><br />
|
2 |
-
|
3 |
-
allavsoft video er converter 3.17.9.7206 with license key Allavsoft Video Converter is all-in-one all video converter, video downloader and video resizer (convert DVD to avi, mov, wmv, mp4, 3gp, mp3 etc.) and movie converter.
|
4 |
-
It can convert all video formats, including mpg, mpeg, divX, xvid, avi, asf, wmv, dv, f4v, rm, rmvb, wmv, mov, 3gpp, webm, mp3, mp4 etc.
|
5 |
-
It contains two tools that can enhance your video effects like quantization, pitch, fading effects, hue, brightness, sharpen and many more. 8a78ff9644<br />
|
6 |
-
<br />
|
7 |
-
<br />
|
8 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Descargar Contpaq 2005 Gratis.md
DELETED
@@ -1,102 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>¿Cómo descargar Contpaq 2005 gratis?</h1>
|
3 |
-
|
4 |
-
<p>Contpaq 2005 es un software contable que te permite llevar el control de tu contabilidad de forma fácil y eficiente. Con Contpaq 2005 puedes registrar y consultar tus movimientos contables, generar reportes e informes, integrar tu contabilidad con otros sistemas como bancos, nóminas, facturación, etc. Además, Contpaq 2005 es compatible con Windows 98, Me, 2000 y XP, y soporta tanto arquitecturas de 32 bits como de 64 bits.</p>
|
5 |
-
|
6 |
-
<p>Si quieres descargar Contpaq 2005 gratis, tienes varias opciones disponibles. Una de ellas es visitar el sitio web oficial de Computación en Acción, la empresa desarrolladora de Contpaq 2005. Allí podrás encontrar el archivo de instalación de Contpaq 2005, así como manuales, tutoriales y cursos para aprender a usar el software. Sin embargo, para poder usar Contpaq 2005 necesitarás una licencia válida, la cual puedes obtener desde el mismo sitio web o desde un distribuidor autorizado.</p>
|
7 |
-
<h2>Descargar Contpaq 2005 Gratis</h2><br /><p><b><b>Download Zip</b> ✪ <a href="https://imgfil.com/2uxYaT">https://imgfil.com/2uxYaT</a></b></p><br /><br />
|
8 |
-
|
9 |
-
<p>Otra opción para descargar Contpaq 2005 gratis es recurrir a una fuente de terceros, como Google Drive, Trello o Netlify. Estos sitios ofrecen el archivo de instalación de Contpaq 2005, así como un software llamado Xforce Keygen 64 Bit, el cual te permite generar números de serie y códigos de activación para varios productos de Autodesk, incluyendo Contpaq 2005. De esta forma, podrás activar Contpaq 2005 sin necesidad de comprar una licencia. Sin embargo, debes tener cuidado al descargar Contpaq 2005 y Xforce Keygen 64 Bit desde estos sitios, ya que algunos archivos pueden contener virus o malware que pueden dañar tu dispositivo o comprometer tus datos.</p>
|
10 |
-
|
11 |
-
<p>En este artículo te mostraremos cómo descargar Contpaq 2005 gratis desde una fuente de terceros y cómo activarlo con Xforce Keygen 64 Bit. También te daremos algunos consejos y trucos para optimizar tu experiencia con Contpaq 2005 y evitar posibles problemas. Sigue estos pasos para descargar Contpaq 2005 gratis:</p>
|
12 |
-
|
13 |
-
<ol>
|
14 |
-
<li>Visita un sitio web confiable que ofrezca Contpaq 2005 y Xforce Keygen 64 Bit. Por ejemplo, puedes visitar Google Drive, Trello o Netlify. Asegúrate de escanear el sitio web con un software antivirus antes de proceder.</li>
|
15 |
-
<li>Haz clic en el enlace o botón de descarga para descargar Contpaq 2005 y Xforce Keygen 64 Bit. El tamaño del archivo puede variar dependiendo del sitio web y la versión de Contpaq 2005 y Xforce Keygen 64 Bit.</li>
|
16 |
-
<li>Guarda el archivo en una carpeta en tu dispositivo. Puede que necesites un software como WinRAR o 7-Zip para extraer el archivo si está comprimido.</li>
|
17 |
-
<li>Escanear el archivo con un software antivirus antes de abrirlo. Si el archivo está infectado o corrupto, bórralo inmediatamente y prueba otro sitio web.</li>
|
18 |
-
<li>Ejecuta el Xforce Keygen 64 Bit como administrador. Puede que necesites desactivar tu firewall o antivirus temporalmente para hacer esto.</li>
|
19 |
-
<li>Selecciona Contpaq 2005 de la lista de productos en la interfaz de Xforce Keygen.</li>
|
20 |
-
<li>Haz clic en el botón Generar para generar un número de serie y un código de activación para Contpaq 2005.</li>
|
21 |
-
<li>Copia el número de serie y el código de activación a un archivo de texto o a un portapapeles.</li>
|
22 |
-
<li>Instala Contpaq 2005 en tu dispositivo. Puedes descargar el archivo de instalación desde el sitio web oficial de Computación en Acción o desde una fuente de terceros.</li>
|
23 |
-
<li>Cuando se te solicite, ingresa el número de serie y el código de activación que generaste con Xforce Keygen.</li>
|
24 |
-
<li>Sigue las instrucciones en la pantalla para completar el proceso de instalación y activación.</li>
|
25 |
-
<li>Disfruta usando Contpaq 2005 en tu dispositivo.</li>
|
26 |
-
</ol>
|
27 |
-
|
28 |
-
<h2>Consejos y trucos para usar Contpaq 2005</h2>
|
29 |
-
|
30 |
-
<p>Aquí te damos algunos consejos y trucos para usar Contpaq 2005:</p>
|
31 |
-
|
32 |
-
<ul>
|
33 |
-
<li>Asegúrate de tener suficiente espacio en tu dispositivo para instalar y ejecutar Contpaq 2005. El software requiere al menos 750 MB de espacio libre en disco y 512 MB de RAM.</li>
|
34 |
-
<li>Asegúrate de tener una conexión a internet estable cuando descargues e instales Contpaq 2005 y Xforce Keygen. Una conexión lenta o interrumpida puede causar errores o corrupción de archivos.</li>
|
35 |
-
<li>Asegúrate de hacer una copia de seguridad de tus datos antes de usar Xforce Keygen. Aunque Xforce Keygen es generalmente seguro y confiable, siempre hay un riesgo</p>
|
36 |
-
<h2>¿Cómo usar Contpaq 2005?</h2>
|
37 |
-
|
38 |
-
<p>Contpaq 2005 es un software contable que te ofrece una serie de funciones y herramientas para llevar el control de tu contabilidad de forma fácil y eficiente. Con Contpaq 2005 puedes:</p>
|
39 |
-
|
40 |
-
<ul>
|
41 |
-
<li>Registrar y consultar tus movimientos contables, como ingresos, egresos, cuentas por cobrar, cuentas por pagar, etc.</li>
|
42 |
-
<li>Generar reportes e informes contables, como balances, estados de resultados, estados de flujo de efectivo, etc.</li>
|
43 |
-
<li>Integrar tu contabilidad con otros sistemas, como bancos, nóminas, facturación, etc. para automatizar procesos y evitar errores.</li>
|
44 |
-
<li>Cumplir con las normas y obligaciones fiscales, como el cálculo y la presentación de impuestos, la emisión de comprobantes fiscales digitales, etc.</li>
|
45 |
-
<li>Personalizar tu contabilidad según tus necesidades y preferencias, como el catálogo de cuentas, los tipos de pólizas, los centros de costos, etc.</li>
|
46 |
-
</ul>
|
47 |
-
|
48 |
-
<p>Para usar Contpaq 2005, debes seguir estos pasos:</p>
|
49 |
-
<p></p>
|
50 |
-
|
51 |
-
<ol>
|
52 |
-
<li>Lanza Contpaq 2005 en tu dispositivo. Puedes usar el icono del escritorio, el menú de inicio o la barra de tareas para lanzar Contpaq 2005.</li>
|
53 |
-
<li>Crea o abre una empresa en Contpaq 2005. Puedes usar el menú Archivo, la barra de herramientas o la línea de comandos para crear o abrir una empresa en Contpaq 2005.</li>
|
54 |
-
<li>Configura tu empresa en Contpaq 2005. Puedes usar el menú Configuración, la barra de herramientas o la línea de comandos para configurar tu empresa en Contpaq 2005. Debes ingresar los datos generales de tu empresa, como el nombre, el RFC, el domicilio fiscal, etc. También debes configurar los parámetros contables de tu empresa, como el catálogo de cuentas, los tipos de pólizas, los centros de costos, etc.</li>
|
55 |
-
<li>Registra y consulta tus movimientos contables en Contpaq 2005. Puedes usar el menú Movimientos, la barra de herramientas o la línea de comandos para registrar y consultar tus movimientos contables en Contpaq 2005. Debes ingresar los datos de cada movimiento contable, como el tipo de póliza, la fecha, el concepto, las cuentas afectadas, los importes, etc. También puedes consultar tus movimientos contables por diferentes criterios, como el periodo, el tipo de póliza, el centro de costos, etc.</li>
|
56 |
-
<li>Genera reportes e informes contables en Contpaq 2005. Puedes usar el menú Reportes
|
57 |
-
<h2>¿Qué ventajas tiene Contpaq 2005?</h2>
|
58 |
-
|
59 |
-
<p>Contpaq 2005 es un software contable que tiene varias ventajas que lo hacen destacar entre otros programas similares. Algunas de las ventajas de Contpaq 2005 son:</p>
|
60 |
-
|
61 |
-
<ul>
|
62 |
-
<li>Es fácil de usar y aprender. Contpaq 2005 tiene una interfaz gráfica amigable y sencilla, que te permite acceder a todas las funciones y herramientas con unos pocos clics. Además, Contpaq 2005 cuenta con manuales, tutoriales y cursos que te enseñan a usar el software paso a paso.</li>
|
63 |
-
<li>Es flexible y adaptable. Contpaq 2005 te permite personalizar tu contabilidad según tus necesidades y preferencias, como el catálogo de cuentas, los tipos de pólizas, los centros de costos, etc. También te permite integrar tu contabilidad con otros sistemas, como bancos, nóminas, facturación, etc. para automatizar procesos y evitar errores.</li>
|
64 |
-
<li>Es seguro y confiable. Contpaq 2005 te ofrece un alto nivel de seguridad y confiabilidad en el manejo de tu información contable. Contpaq 2005 cuenta con un sistema de respaldo y restauración de datos, que te permite recuperar tu información en caso de pérdida o daño. También cuenta con un sistema de auditoría y control, que te permite verificar la integridad y consistencia de tus datos.</li>
|
65 |
-
<li>Es compatible y actualizado. Contpaq 2005 es compatible con Windows 98, Me, 2000 y XP, y soporta tanto arquitecturas de 32 bits como de 64 bits. Además, Contpaq 2005 se actualiza constantemente para adaptarse a las normas y obligaciones fiscales vigentes, como el cálculo y la presentación de impuestos, la emisión de comprobantes fiscales digitales, etc.</li>
|
66 |
-
</ul>
|
67 |
-
|
68 |
-
<h2>¿Qué desventajas tiene Contpaq 2005?</h2>
|
69 |
-
|
70 |
-
<p>Contpaq 2005 es un software contable que tiene pocas desventajas en comparación con sus ventajas. Sin embargo, algunas de las desventajas de Contpaq 2005 son:</p>
|
71 |
-
|
72 |
-
<ul>
|
73 |
-
<li>Es costoso y limitado. Contpaq 2005 es un software contable que requiere una licencia válida para poder usarlo, la cual puedes obtener desde el sitio web oficial de Computación en Acción o desde un distribuidor autorizado. Sin embargo, la licencia tiene un costo elevado y una duración limitada, lo que puede representar una inversión considerable para algunos usuarios.</li>
|
74 |
-
<li>Es vulnerable y riesgoso. Contpaq 2005 es un software contable que puede ser vulnerable y riesgoso si se usa de forma indebida o ilegal. Por ejemplo, si se descarga Contpaq 2005 desde una fuente de terceros o si se activa con Xforce Keygen 64 Bit, se puede exponer el dispositivo o los datos a virus o malware que pueden dañarlos o comprometerlos. También se puede incurrir en delitos o sanciones por violar los derechos de autor o las leyes fiscales.</li>
|
75 |
-
</ul>
|
76 |
-
<h2>¿Qué opiniones tienen los usuarios de Contpaq 2005?</h2>
|
77 |
-
|
78 |
-
<p>Contpaq 2005 es un software contable que tiene muchas opiniones positivas y negativas de los usuarios que lo han usado o lo usan actualmente. Algunas de las opiniones de los usuarios de Contpaq 2005 son:</p>
|
79 |
-
|
80 |
-
<ul>
|
81 |
-
<li>Opiniones positivas: Los usuarios que han tenido una buena experiencia con Contpaq 2005 destacan que el software es fácil de usar y aprender, que tiene una interfaz gráfica amigable y sencilla, que ofrece una serie de funciones y herramientas para llevar el control de la contabilidad, que se integra con otros sistemas como bancos, nóminas, facturación, etc., que cumple con las normas y obligaciones fiscales vigentes, y que es compatible y actualizado.</li>
|
82 |
-
<li>Opiniones negativas: Los usuarios que han tenido una mala experiencia con Contpaq 2005 señalan que el software es costoso y limitado, que requiere una licencia válida para poder usarlo, que puede ser vulnerable y riesgoso si se usa de forma indebida o ilegal, que puede tener errores o fallas técnicas, que puede ser lento o pesado en algunos dispositivos, y que tiene un servicio al cliente deficiente o inexistente.</li>
|
83 |
-
</ul>
|
84 |
-
|
85 |
-
<p>Estas son solo algunas de las opiniones de los usuarios de Contpaq 2005 que puedes encontrar en internet. Cada usuario tiene su propia opinión basada en su experiencia personal con el software. Por eso, te recomendamos que leas varias opiniones antes de decidir si Contpaq 2005 es el software contable adecuado para ti.</p>
|
86 |
-
|
87 |
-
<h2>¿Dónde puedo descargar Contpaq 2005 gratis?</h2>
|
88 |
-
|
89 |
-
<p>Si quieres descargar Contpaq 2005 gratis, tienes varias opciones disponibles. Una de ellas es visitar el sitio web oficial de Computación en Acción, la empresa desarrolladora de Contpaq 2005. Allí podrás encontrar el archivo de instalación de Contpaq 2005, así como manuales, tutoriales y cursos para aprender a usar el software. Sin embargo, para poder usar Contpaq 2005 necesitarás una licencia válida, la cual puedes obtener desde el mismo sitio web o desde un distribuidor autorizado.</p>
|
90 |
-
|
91 |
-
<p>Otra opción para descargar Contpaq 2005 gratis es recurrir a una fuente de terceros, como Google Drive, Trello o Netlify. Estos sitios ofrecen el archivo de instalación de Contpaq 2005, así como un software llamado Xforce Keygen 64 Bit, el cual te permite generar números de serie y códigos de activación para varios productos de Autodesk, incluyendo Contpaq 2005. De esta forma, podrás activar Contpaq 2005 sin necesidad de comprar una licencia. Sin embargo, debes tener cuidado al descargar Contpaq 2005 y Xforce Keygen 64 Bit desde estos sitios, ya que algunos archivos pueden contener virus o malware que pueden dañar tu dispositivo o comprometer tus datos.</p>
|
92 |
-
|
93 |
-
<p>En este artículo te hemos mostrado cómo descargar Contpaq 2005 gratis desde una fuente de terceros y cómo activarlo con Xforce Keygen 64 Bit. También te hemos dado algunos consejos y trucos para optimizar tu experiencia con Contpaq 2005 y evitar posibles problemas. También te hemos comparado Contpaq 2005 con otras versiones y productos de Contpaq
|
94 |
-
<h2>Conclusión</h2>
|
95 |
-
|
96 |
-
<p>Contpaq 2005 es un software contable que te permite llevar el control de tu contabilidad de forma fácil y eficiente. Con Contpaq 2005 puedes registrar y consultar tus movimientos contables, generar reportes e informes, integrar tu contabilidad con otros sistemas como bancos, nóminas, facturación, etc. Además, Contpaq 2005 es compatible con Windows 98, Me, 2000 y XP, y soporta tanto arquitecturas de 32 bits como de 64 bits. Sin embargo, para usar Contpaq 2005 necesitas una licencia válida, la cual puedes obtener desde el sitio web oficial de Computación en Acción o desde una fuente de terceros como Xforce Keygen 64 Bit.</p>
|
97 |
-
|
98 |
-
<p>En este artículo te hemos mostrado cómo descargar Contpaq 2005 gratis desde una fuente de terceros y cómo activarlo con Xforce Keygen 64 Bit. También te hemos dado algunos consejos y trucos para optimizar tu experiencia con Contpaq 2005 y evitar posibles problemas. También te hemos comparado Contpaq 2005 con otras versiones y productos de Contpaq, y te hemos mostrado algunas alternativas y opiniones de los usuarios de Contpaq 2005.</p>
|
99 |
-
|
100 |
-
<p>Esperamos que este artículo te haya ayudado a aprender cómo descargar Contpaq 2005 gratis y cómo usarlo. Si tienes alguna pregunta o comentario, por favor déjanos un mensaje abajo. ¡Gracias por leer!</p> 3cee63e6c2<br />
|
101 |
-
<br />
|
102 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Free Download [BETTER] Hindi Movie Kala Patthar.md
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
<h2>free download hindi movie kala patthar</h2><br /><p><b><b>Download Zip</b> ———>>> <a href="https://imgfil.com/2uxXU1">https://imgfil.com/2uxXU1</a></b></p><br /><br />
|
2 |
-
|
3 |
-
hindi movie kala patthar, hindi movie kala patthar
|
4 |
-
hindi movie hindi movie hindi movie kala patthar hindi movie kala patthar hindi movie 2019, hindi movie, hindi movie 2019, hindi movie 2019 , hindi movie 2019 trailer, hindi movie 2019 full, hindi movie 2019 trailer,
|
5 |
-
hindi movie 8a78ff9644<br />
|
6 |
-
<br />
|
7 |
-
<br />
|
8 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Adobe Media Encoder 2020 How to Download and Install for Free.md
DELETED
@@ -1,144 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Adobe Media Encoder 2020 Free Download</h1>
|
3 |
-
<p>If you are looking for a reliable and powerful video encoding software, you might want to check out Adobe Media Encoder 2020. This software allows you to ingest, transcode, create proxies, and output to almost any format you can imagine. It also integrates seamlessly with other Adobe applications, such as Premiere Pro, After Effects, and Audition. In this article, we will show you how to download and install Adobe Media Encoder 2020 for free, how to use it, what are its features and system requirements, and what are some best practices and alternatives.</p>
|
4 |
-
<h2>What is Adobe Media Encoder 2020?</h2>
|
5 |
-
<p>Adobe Media Encoder 2020 is a software that enables you to encode and export video files for various platforms and devices. It supports a wide range of formats, codecs, resolutions, frame rates, aspect ratios, and profiles. You can also apply presets, watch folders, destination publishing, time tuner, LUTs, loudness corrections, and other settings to automate your workflows and enhance your output quality. Adobe Media Encoder 2020 is part of the Adobe Creative Cloud suite, which means you can access it with a subscription plan or a free trial.</p>
|
6 |
-
<h2>adobe media encoder 2020 free download</h2><br /><p><b><b>Download Zip</b> ⭐ <a href="https://urlin.us/2uT08P">https://urlin.us/2uT08P</a></b></p><br /><br />
|
7 |
-
<h2>Why do you need Adobe Media Encoder 2020?</h2>
|
8 |
-
<p>Adobe Media Encoder 2020 is a useful tool for anyone who works with video production, editing, or distribution. Here are some reasons why you might need it:</p>
|
9 |
-
<ul>
|
10 |
-
<li>You want to convert your video files into different formats for various purposes, such as web streaming, social media sharing, DVD authoring, or archiving.</li>
|
11 |
-
<li>You want to create proxies or lower-resolution versions of your video files for faster editing or previewing.</li>
|
12 |
-
<li>You want to export your video projects from Premiere Pro or After Effects without opening them.</li>
|
13 |
-
<li>You want to adjust the duration, color, or audio of your video files without re-opening them.</li>
|
14 |
-
<li>You want to publish your video files directly to YouTube, Vimeo, Facebook, Twitter, or other platforms.</li>
|
15 |
-
</ul>
|
16 |
-
<h2>How to download and install Adobe Media Encoder 2020?</h2>
|
17 |
-
<p>To download and install Adobe Media Encoder 2020, follow these steps:</p>
|
18 |
-
<ol>
|
19 |
-
<li>Click <a href="(^3^)">here</a> to go to the official website of Adobe Media Encoder.</li>
|
20 |
-
<li>Click on the <strong>Free Trial</strong> button at the top right corner of the page.</li>
|
21 |
-
<li>Sign in with your Adobe ID or create one if you don't have one.</li>
|
22 |
-
<li>Select your plan from the options available. You can choose between a single app plan or a Creative Cloud plan that includes other Adobe apps.</li>
|
23 |
-
<li>Click on <strong>Start free trial</strong> and follow the onscreen instructions to download the installer file.</li>
|
24 |
-
<li>Run the installer file and follow the prompts to complete the installation process.</li>
|
25 |
-
<li>Launch Adobe Media Encoder 2020 from your desktop or start menu.</li>
|
26 |
-
</ol>
|
27 |
-
<h2>How to use Adobe Media Encoder 2020?</h2>
|
28 |
-
<p>To use Adobe Media Encoder 2020, follow these steps:</p>
|
29 |
-
<ol>
|
30 |
-
<li>Add your source video files to the queue by clicking on the <strong>Add Source</strong> button at the top left corner of the window. You can also drag and drop your files from your file explorer or import them from Premiere Pro or After Effects.</li>
|
31 |
-
<li>Select your output format and preset from the drop-down menus at the right side of the window. You can also customize your settings by clicking on the <strong>Edit Preset</strong> button.</li>
|
32 |
-
<li>Choose your output destination by clicking on the <strong>Output File</strong> link at the right side of the window. You can also specify a watch folder or a destination publishing option.</li>
|
33 |
-
<li>Click on the <strong>Start Queue</strong> button at the top right corner of the window to begin encoding your video files. You can monitor the progress and status of your encoding jobs in the queue panel.</li>
|
34 |
-
<li>Once your encoding is done, you can preview your output files by clicking on the <strong>Output</strong> tab at the bottom of the window. You can also open them in your default media player or folder by right-clicking on them and selecting the appropriate option.</li>
|
35 |
-
</ol>
|
36 |
-
<h2>What are the system requirements for Adobe Media Encoder 2020?</h2>
|
37 |
-
<p>To run Adobe Media Encoder 2020 smoothly, you need to meet the following system requirements:</p>
|
38 |
-
<table>
|
39 |
-
<tr>
|
40 |
-
<th>Operating System</th>
|
41 |
-
<th>Minimum Requirements</th>
|
42 |
-
<th>Recommended Requirements</th>
|
43 |
-
</tr>
|
44 |
-
<tr>
|
45 |
-
<td>Windows 10 (64-bit)</td>
|
46 |
-
<td>- Intel 6th Gen or newer CPU<br>- 8 GB of RAM<br>- 4 GB of GPU VRAM<br>- 1920 x 1080 display resolution<br>- Sound card compatible with ASIO protocol or Microsoft Windows Driver Model<br>- Fast internal SSD for app installation and cache<br>- 10 GB of available hard-disk space for installation; additional free space required during installation (cannot install on removable flash storage devices)<br>- Optional: Adobe-recommended GPU card for GPU-accelerated performance (see Premiere Pro System Requirements)</td>
|
47 |
-
<td>- Intel 7th Gen or newer CPU<br>- 16 GB of RAM for HD media<br>- 32 GB or more of RAM for 4K media<br>- 4 GB of GPU VRAM<br>- Fast internal SSD (recommended) for app installation and cache – plus provisional space for media<br>- Additional high-speed drive(s) for media</td>
|
48 |
-
</tr>
|
49 |
-
<tr>
|
50 |
-
<td>macOS v10.13 or later</td>
|
51 |
-
<td>- Intel 6th Gen or newer CPU<br>- 8 GB of RAM<br>- 4 GB of GPU VRAM<br>- 1920 x 1080 display resolution<br>- Sound card compatible with Apple Core Audio<br>- Fast internal SSD for app installation and cache<br>- 10 GB of available hard-disk space for installation; additional free space required during installation (cannot install on a volume that uses a case-sensitive file system or on removable flash storage devices)<br>- Optional: Adobe-recommended GPU card for GPU-accelerated performance (see Premiere Pro System Requirements)</td>
|
52 |
-
<td>- Intel 7th Gen or newer CPU<br>- 16 GB of RAM for HD media<br>- 32 GB or more of RAM for 4K media<br>- 4 GB of GPU VRAM<br>- Fast internal SSD (recommended) for app installation and cache – plus provisional space for media<br>- Additional high-speed drive(s) for media</td>
|
53 |
-
</tr>
|
54 |
-
</table>
|
55 |
-
<h2>What are the new features in Adobe Media Encoder 2020?</h2>
|
56 |
-
<p>Adobe Media Encoder 2020 comes with several new features and improvements that enhance your encoding experience. Here are some of them:</p>
|
57 |
-
<ul>
|
58 |
-
<li><strong>New file format support:</strong> You can now encode and export video files in AV1, HEIF, Canon XF-HEVC, and Sony VENICE V4 formats.</li>
|
59 |
-
<li><strong>New hardware-accelerated encoding:</strong> You can now use hardware encoding for H.264 and HEVC formats on Windows with Intel and NVIDIA GPUs, and on macOS with AMD and Intel GPUs.</li>
|
60 |
-
<li><strong>New encoding presets:</strong> You can now use new presets for social media platforms, such as TikTok, Reddit, and Snapchat.</li>
|
61 |
-
<li><strong>New destination publishing:</strong> You can now publish your video files directly to Behance, along with YouTube, Vimeo, Facebook, and Twitter.</li>
|
62 |
-
<li><strong>New time tuner effect:</strong> You can now use the time tuner effect to automatically adjust the duration of your video files by adding or removing frames.</li>
|
63 |
-
<li><strong>New HDR support:</strong> You can now encode and export HDR video files with HDR10 metadata.</li>
|
64 |
-
<li><strong>New performance improvements:</strong> You can now enjoy faster encoding and decoding with the latest Adobe Media Encoder engine and improved GPU support.</li>
|
65 |
-
</ul>
|
66 |
-
<h2>What are the best practices for using Adobe Media Encoder 2020?</h2>
|
67 |
-
<p>To get the most out of Adobe Media Encoder 2020, here are some best practices you can follow:</p>
|
68 |
-
<ul>
|
69 |
-
<li><strong>Choose the right format and preset for your output:</strong> Depending on your purpose and platform, you should select the appropriate format and preset for your video files. For example, if you want to upload your video to YouTube, you should use the H.264 format and the YouTube preset. You can also customize your settings to suit your needs and preferences.</li>
|
70 |
-
<li><strong>Use proxies for faster editing:</strong> If you have high-resolution or high-bitrate video files, you might experience lagging or crashing when editing them. To avoid this, you can create proxies or lower-resolution versions of your video files with Adobe Media Encoder 2020 and use them for editing in Premiere Pro or After Effects. You can then switch back to the original files when exporting.</li>
|
71 |
-
<li><strong>Use watch folders for batch processing:</strong> If you have multiple video files that need the same encoding settings, you can use watch folders to automate your workflows. Watch folders are folders that Adobe Media Encoder 2020 monitors for new files and applies a preset to them automatically. You can create watch folders by clicking on the <strong>Add Watch Folder</strong> button at the top left corner of the window and selecting a folder and a preset.</li>
|
72 |
-
<li><strong>Use destination publishing for easy sharing:</strong> If you want to share your video files online, you can use destination publishing to upload them directly to your preferred platform. Destination publishing allows you to enter your account credentials and metadata for YouTube, Vimeo, Facebook, Twitter, or Behance, and publish your video files with one click. You can enable destination publishing by clicking on the <strong>Add Destination</strong> button at the right side of the window and selecting a platform.</li>
|
73 |
-
<li><strong>Use time tuner and loudness correction for fine-tuning:</strong> If you want to adjust the duration or audio of your video files without re-opening them, you can use time tuner and loudness correction effects in Adobe Media Encoder 2020. Time tuner allows you to add or remove frames from your video files to match a specific duration. Loudness correction allows you to normalize the audio levels of your video files to meet broadcast standards. You can apply these effects by clicking on the <strong>Add Effect</strong> button at the right side of the window and selecting an effect.</li>
|
74 |
-
</ul>
|
75 |
-
<h2>What are the alternatives to Adobe Media Encoder 2020?</h2>
|
76 |
-
<p>If you are looking for other video encoding software, here are some alternatives to Adobe Media Encoder 2020:</p>
|
77 |
-
<p>adobe media encoder 2020 trial download<br />
|
78 |
-
how to get adobe media encoder 2020 for free<br />
|
79 |
-
adobe media encoder 2020 crack download<br />
|
80 |
-
adobe media encoder 2020 full version free download<br />
|
81 |
-
adobe media encoder 2020 system requirements<br />
|
82 |
-
adobe media encoder 2020 tutorial<br />
|
83 |
-
adobe media encoder 2020 presets download<br />
|
84 |
-
adobe media encoder 2020 vs 2019<br />
|
85 |
-
adobe media encoder 2020 mac free download<br />
|
86 |
-
adobe media encoder 2020 windows 10 free download<br />
|
87 |
-
adobe media encoder 2020 offline installer<br />
|
88 |
-
adobe media encoder 2020 update download<br />
|
89 |
-
adobe media encoder 2020 new features<br />
|
90 |
-
adobe media encoder 2020 not working<br />
|
91 |
-
adobe media encoder 2020 activation key<br />
|
92 |
-
adobe media encoder 2020 serial number<br />
|
93 |
-
adobe media encoder 2020 license key<br />
|
94 |
-
adobe media encoder 2020 patch download<br />
|
95 |
-
adobe media encoder 2020 portable download<br />
|
96 |
-
adobe media encoder 2020 reddit<br />
|
97 |
-
adobe media encoder 2020 review<br />
|
98 |
-
adobe media encoder 2020 price<br />
|
99 |
-
adobe media encoder 2020 free alternative<br />
|
100 |
-
adobe media encoder 2020 youtube<br />
|
101 |
-
adobe media encoder 2020 h.264 settings<br />
|
102 |
-
adobe media encoder 2020 hevc codec download<br />
|
103 |
-
adobe media encoder 2020 gpu acceleration<br />
|
104 |
-
adobe media encoder 2020 hardware encoding<br />
|
105 |
-
adobe media encoder 2020 slow rendering<br />
|
106 |
-
adobe media encoder 2020 watch folder setup<br />
|
107 |
-
adobe media encoder 2020 export settings<br />
|
108 |
-
adobe media encoder 2020 export mp4<br />
|
109 |
-
adobe media encoder 2020 export gif<br />
|
110 |
-
adobe media encoder 2020 export webm<br />
|
111 |
-
adobe media encoder 2020 export mov<br />
|
112 |
-
adobe media encoder 2020 export prores<br />
|
113 |
-
adobe media encoder 2020 export audio only<br />
|
114 |
-
adobe media encoder 2020 export with alpha channel<br />
|
115 |
-
adobe media encoder 2020 export subtitles<br />
|
116 |
-
adobe media encoder 2020 export frame rate<br />
|
117 |
-
adobe media encoder 2020 import formats<br />
|
118 |
-
adobe media encoder 2020 import presets<br />
|
119 |
-
adobe media encoder 2020 import error<br />
|
120 |
-
adobe media encoder 2020 import queue from premiere pro<br />
|
121 |
-
adobe media encoder 2020 import after effects project<br />
|
122 |
-
adobe media encoder 2020 import photoshop file<br />
|
123 |
-
adobe media encoder 2020 import xml file<br />
|
124 |
-
adobe media encoder 2020 import edl file<br />
|
125 |
-
adobe media encoder 2020 import avid sequence<br />
|
126 |
-
adobe media encoder 2020 import final cut pro project</p>
|
127 |
-
<ul>
|
128 |
-
<li><strong>HandBrake:</strong> This is a free and open-source software that allows you to convert video files from any format to MP4 or MKV. It supports a variety of codecs, presets, filters, subtitles, and chapters. It also has a simple and user-friendly interface.</li>
|
129 |
-
<li><strong>VLC Media Player:</strong> This is a free and cross-platform software that allows you to play, convert, stream, and record video files. It supports a wide range of formats, codecs, protocols, devices, and features. It also has a customizable and versatile interface.</li>
|
130 |
-
<li><strong>FFmpeg:</strong> This is a free and command-line-based software that allows you to encode, decode, transcode, mux, demux, stream, filter, and play video files. It supports almost all formats, codecs, filters, and features imaginable. It also has a high performance and quality.</li>
|
131 |
-
</ul>
|
132 |
-
<h2>Conclusion</h2>
|
133 |
-
<p>In conclusion, Adobe Media Encoder 2020 is a powerful and reliable video encoding software that allows you to ingest, transcode, create proxies, and output to almost any format you can imagine. It also integrates seamlessly with other Adobe applications, such as Premiere Pro, After Effects, and Audition. You can download and install Adobe Media Encoder 2020 for free with a trial or a subscription plan from the official website. You can also use it to encode and export video files with various settings, presets, effects, watch folders, and destination publishing options. You can also use it to adjust the duration, color, or audio of your video files without re-opening them. Adobe Media Encoder 2020 also comes with new features and improvements, such as new file format support, new hardware-accelerated encoding, new encoding presets, new destination publishing, new time tuner effect, new HDR support, and new performance improvements. You can also follow some best practices to optimize your workflow and output quality, such as choosing the right format and preset, using proxies, using watch folders, using destination publishing, and using time tuner and loudness correction. If you are looking for other video encoding software, you can also try some alternatives, such as HandBrake, VLC Media Player, or FFmpeg.</p>
|
134 |
-
<h3>FAQs</h3>
|
135 |
-
<p>Here are some frequently asked questions and answers about Adobe Media Encoder 2020:</p>
|
136 |
-
<ul>
|
137 |
-
<li><strong>Q: How long is the free trial for Adobe Media Encoder 2020?</strong><br>A: The free trial for Adobe Media Encoder 2020 lasts for seven days from the day you start it. You can cancel it anytime before the trial ends and you won't be charged.</li>
|
138 |
-
<li><strong>Q: How much does Adobe Media Encoder 2020 cost?</strong><br>A: Adobe Media Encoder 2020 costs $20.99 per month for a single app plan or $52.99 per month for a Creative Cloud plan that includes other Adobe apps. You can also save money by choosing an annual plan or a student or teacher plan.</li>
|
139 |
-
<li><strong>Q: Can I use Adobe Media Encoder 2020 offline?</strong><br>A: Yes, you can use Adobe Media Encoder 2020 offline once you have installed it and signed in with your Adobe ID. However, you will need an internet connection to activate your software, update it, access online services, or sync your settings.</li>
|
140 |
-
<li><strong>Q: Can I use Adobe Media Encoder 2020 on multiple computers?</strong><br>A: Yes, you can use Adobe Media Encoder 2020 on up to two computers at a time with a single license. However, you cannot use it on both computers at the same time.</li>
|
141 |
-
<li><strong>Q: How can I get help or support for Adobe Media Encoder 2020?</strong><br>A: You can get help or support for Adobe Media Encoder 2020 by visiting the official website of Adobe Media Encoder 2020 and accessing the user guide, tutorials, forums, community help, or contact options.</li>
|
142 |
-
</ul></p> 197e85843d<br />
|
143 |
-
<br />
|
144 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/1945 Air Force APK Mod A Classic Shooting Game with Endless Possibilities.md
DELETED
@@ -1,107 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Download 1945 Air Force APK Mod: A Guide for Android Users</h1>
|
3 |
-
<p>If you are a fan of classic arcade shooting games, you might want to try out 1945 Air Force. This is a game that lets you experience the thrill of aerial combat in various historical scenarios. You can choose from over 200 different planes, each with their own unique features and abilities. You can also upgrade your planes, customize your weapons, and challenge yourself with different modes and missions.</p>
|
4 |
-
<p>But what if you want to enjoy the game without any limitations or restrictions? What if you want to have unlimited coins, gems, energy, and other resources? Well, in that case, you might want to download 1945 Air Force APK Mod. This is a modified version of the game that gives you access to all the features and content that the original game does not. In this article, we will tell you everything you need to know about 1945 Air Force APK Mod, including what it is, why you should download it, and how to download it. Let's get started!</p>
|
5 |
-
<h2>download 1945 air force apk mod</h2><br /><p><b><b>Download File</b> - <a href="https://jinyurl.com/2uNT3q">https://jinyurl.com/2uNT3q</a></b></p><br /><br />
|
6 |
-
<h2>What is 1945 Air Force?</h2>
|
7 |
-
<p>1945 Air Force is a free-to-play arcade shooting game developed by ONESOFT. The game is inspired by the classic games of the genre, such as 1942, 1943, and Raiden. The game features stunning graphics, realistic sound effects, and smooth gameplay. You can immerse yourself in the epic battles of World War II, the Cold War, the Vietnam War, and more. You can also join forces with other players online and compete for the highest scores.</p>
|
8 |
-
<h3>Features of 1945 Air Force</h3>
|
9 |
-
<p>Some of the features that make 1945 Air Force an amazing game are:</p>
|
10 |
-
<ul>
|
11 |
-
<li>Over 200 planes to choose from, each with their own characteristics and special skills.</li>
|
12 |
-
<li>Over 100 missions to complete, each with different objectives and challenges.</li>
|
13 |
-
<li>Over 10 game modes to enjoy, such as Campaign, Endless, Boss Battle, PvP, and more.</li>
|
14 |
-
<li>Over 30 support items to help you in your missions, such as bombs, missiles, shields, and more.</li>
|
15 |
-
<li>Daily rewards, events, achievements, and leaderboards to keep you engaged and motivated.</li>
|
16 |
-
</ul>
|
17 |
-
<h3>How to play 1945 Air Force</h3>
|
18 |
-
<p>The gameplay of 1945 Air Force is simple and intuitive. You just need to swipe your finger on the screen to move your plane and avoid enemy fire. You can also tap the screen to fire your weapons and use your special skills. You can also collect coins, gems, energy, and other items along the way. You can use these resources to upgrade your planes, weapons, and items. You can also unlock new planes and modes as you progress in the game.</p>
|
19 |
-
<h2>Why download 1945 Air Force APK Mod?</h2>
|
20 |
-
<p>While 1945 Air Force is a fun and addictive game, it also has some drawbacks. For example, the game requires an internet connection to play. The game also has ads that can interrupt your gameplay. Moreover, the game has some in-app purchases that can make the game easier or more enjoyable. However, these purchases can be expensive and not everyone can afford them.</p>
|
21 |
-
<p>This is where 1945 Air Force APK Mod comes in handy. This is a modified version of the game that removes all the limitations and restrictions that the original game has. With this version, you can enjoy the following benefits:</p>
|
22 |
-
<h3>Benefits of 1945 Air Force APK Mod</h3>
|
23 |
-
<ul>
|
24 |
-
<li>You <li>You can play the game offline, without any internet connection.</li>
|
25 |
-
<li>You can get rid of all the ads that can annoy you or distract you from the game.</li>
|
26 |
-
<li>You can get unlimited coins, gems, energy, and other resources that you can use to upgrade your planes, weapons, and items.</li>
|
27 |
-
<li>You can unlock all the planes and modes that are otherwise locked or require real money to access.</li>
|
28 |
-
<li>You can have more fun and excitement with the game, without any worries or hassles.</li>
|
29 |
-
</ul>
|
30 |
-
<h3>Risks of 1945 Air Force APK Mod</h3>
|
31 |
-
<p>However, downloading 1945 Air Force APK Mod also comes with some risks that you should be aware of. These are:</p>
|
32 |
-
<ul>
|
33 |
-
<li>You might face some compatibility issues with your device or the game version.</li>
|
34 |
-
<li>You might encounter some bugs or glitches that can affect your gameplay or performance.</li>
|
35 |
-
<li>You might lose your progress or data if you uninstall the game or switch to the original version.</li>
|
36 |
-
<li>You might violate the terms and conditions of the game developer and get banned from the game or their services.</li>
|
37 |
-
<li>You might expose your device to malware or viruses that can harm your device or compromise your security.</li>
|
38 |
-
</ul>
|
39 |
-
<p>Therefore, you should download 1945 Air Force APK Mod at your own risk and discretion. You should also make sure that you download it from a reliable and trustworthy source. You should also scan the file for any malicious content before installing it on your device.</p>
|
40 |
-
<h2>How to download 1945 Air Force APK Mod?</h2>
|
41 |
-
<p>If you have decided to download 1945 Air Force APK Mod, you will need to follow some simple steps to do so. These are:</p>
|
42 |
-
<p>download 1945 air force mod apk unlimited money<br />
|
43 |
-
download 1945 air force mod apk latest version<br />
|
44 |
-
download 1945 air force mod apk android 1<br />
|
45 |
-
download 1945 air force mod apk free shopping<br />
|
46 |
-
download 1945 air force mod apk revdl<br />
|
47 |
-
download 1945 air force mod apk no ads<br />
|
48 |
-
download 1945 air force mod apk offline<br />
|
49 |
-
download 1945 air force mod apk hack<br />
|
50 |
-
download 1945 air force mod apk unlimited gems<br />
|
51 |
-
download 1945 air force mod apk unlimited coins<br />
|
52 |
-
download 1945 air force game mod apk<br />
|
53 |
-
download 1945 air force shooting game mod apk<br />
|
54 |
-
download 1945 air force classic arcade shooter mod apk<br />
|
55 |
-
download 1945 air force arcade shooting game mod apk<br />
|
56 |
-
download 1945 air force world war ii mod apk<br />
|
57 |
-
download 1945 air force old version mod apk<br />
|
58 |
-
download 1945 air force premium mod apk<br />
|
59 |
-
download 1945 air force pro mod apk<br />
|
60 |
-
download 1945 air force full version mod apk<br />
|
61 |
-
download 1945 air force unlocked mod apk<br />
|
62 |
-
how to download 1945 air force mod apk<br />
|
63 |
-
where to download 1945 air force mod apk<br />
|
64 |
-
best site to download 1945 air force mod apk<br />
|
65 |
-
safe site to download 1945 air force mod apk<br />
|
66 |
-
trusted site to download 1945 air force mod apk<br />
|
67 |
-
easy way to download 1945 air force mod apk<br />
|
68 |
-
fast way to download 1945 air force mod apk<br />
|
69 |
-
free way to download 1945 air force mod apk<br />
|
70 |
-
legal way to download 1945 air force mod apk<br />
|
71 |
-
working way to download 1945 air force mod apk<br />
|
72 |
-
download and install 1945 air force mod apk<br />
|
73 |
-
download and play 1945 air force mod apk<br />
|
74 |
-
download and enjoy 1945 air force mod apk<br />
|
75 |
-
download and update 1945 air force mod apk<br />
|
76 |
-
download and review 1945 air force mod apk<br />
|
77 |
-
why download 1945 air force mod apk<br />
|
78 |
-
benefits of downloading 1945 air force mod apk<br />
|
79 |
-
features of downloading 1945 air force mod apk<br />
|
80 |
-
advantages of downloading 1945 air force mod apk<br />
|
81 |
-
disadvantages of downloading 1945 air force mod apk</p>
|
82 |
-
<h3>Step 1: Enable unknown sources</h3>
|
83 |
-
<p>The first thing you need to do is to enable unknown sources on your device. This will allow you to install apps that are not from the official Google Play Store. To do this, you need to go to your device settings and look for the security or privacy option. Then, you need to find the unknown sources option and toggle it on. You might see a warning message, but you can ignore it and proceed.</p>
|
84 |
-
<h3>Step 2: Find a reliable source</h3>
|
85 |
-
<p>The next thing you need to do is to find a reliable source that offers 1945 Air Force APK Mod. You can search online for various websites or blogs that provide this file. However, you need to be careful and avoid any suspicious or fake links that can harm your device or steal your information. You can also check the reviews and ratings of the source to see if it is trustworthy and reputable.</p>
|
86 |
-
<h3>Step 3: Download and install the APK file</h3>
|
87 |
-
<p>Once you have found a reliable source, you can download the APK file by clicking on the download button or link. You might need to wait for a few seconds or minutes for the download to complete. After that, you can locate the file on your device storage and tap on it to install it. You might see some prompts or permissions that you need to accept or allow. Once the installation is done, you will see the game icon on your home screen or app drawer.</p>
|
88 |
-
<h3>Step 4: Launch the game and enjoy</h3>
|
89 |
-
<p>The final step is to launch the game and enjoy it. You can open the game by tapping on its icon and start playing it. You will notice that you have unlimited resources and access to all the features and content that the original game does not have. You can also customize your settings and preferences according to your liking. You can also invite your friends and play with them online.</p>
|
90 |
-
<h2>Conclusion</h2>
|
91 |
-
<p>1945 Air Force is a great arcade shooting game that will keep you entertained and challenged for hours. However, if you want to have more fun and excitement with the game, you can download 1945 Air Force APK Mod. This is a modified version of the game that gives you unlimited resources and access to all the features and content that the original game does not have. However, you should also be aware of the risks involved in downloading this version and take precautions accordingly. You should also follow the steps mentioned above to download and install this version safely and easily on your device.</p>
|
92 |
-
<h2>FAQs</h2>
|
93 |
-
<p>Here are some frequently asked questions about 1945 Air Force APK Mod:</p>
|
94 |
-
<ul>
|
95 |
-
<li><b>What is the difference between 1945 Air Force APK Mod and 1945 Air Force Hack?</b></li>
|
96 |
-
<p>1945 Air Force APK Mod is a modified version of the game that gives you unlimited resources and access to all the features and content that the original game does not have. 1945 Air Force Hack is a tool or software that allows you to hack or cheat in the game and get unlimited resources and access to all the features and content that the original game does not have. However, 1945 Air Force APK Mod is easier and safer to use than 1945 Air Force Hack, as you do not need to download or install any additional software or tool on your device.</p>
|
97 |
-
<li><b>Is 1945 Air Force APK Mod free to download and use?</b></li>
|
98 |
-
<p>Yes, 1945 Air Force APK Mod is free to download and use. You do not need to pay any money or subscription fee to enjoy this version of the game. However, you should also be careful of any source that asks you for any personal or financial information or requires you to complete any survey or verification before downloading the file. These are likely to be scams or frauds that can harm your device or steal your information.</p>
|
99 |
-
<li><b>Is 1945 Air Force APK Mod compatible with all Android devices?</b></li>
|
100 |
-
<p>Not necessarily. 1945 Air Force APK Mod may not work on some Android devices due to various factors, such as the device model, the Android version, the game version, or the file quality. Therefore, you should check the compatibility of the file with your device before downloading it. You should also make sure that your device has enough storage space and meets the minimum requirements of the game.</p>
|
101 |
-
<li><b>Can I switch between 1945 Air Force APK Mod and 1945 Air Force original version?</b></li>
|
102 |
-
<p>Yes, you can switch between 1945 Air Force APK Mod and 1945 Air Force original version anytime you want. However, you should be aware that you might lose your progress or data if you do so. Therefore, you should back up your data before switching versions. You should also uninstall one version before installing another one to avoid any conflicts or errors.</p>
|
103 |
-
<li><b>Can I play 1945 Air Force APK Mod with my friends online?</b></li>
|
104 |
-
<p>Yes, you can play 1945 Air Force APK Mod with your friends online. However, you should also be aware that you might face some issues or difficulties in doing so. For example, you might not be able to join the same server or room as your friends who are using the original version of the game. You might also encounter some lag or delay in your gameplay or communication. Moreover, you might get banned from the game or their services if the game developer detects that you are using a modified version of the game.</p>
|
105 |
-
</ul></p> 401be4b1e0<br />
|
106 |
-
<br />
|
107 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Challenge Yourself with Car Parking Multiplayer Levels on Play Store.md
DELETED
@@ -1,119 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Play Store Car Parking Multiplayer: A Review</h1>
|
3 |
-
<p>Are you looking for a realistic and fun car parking game that you can play with your friends? If so, you might want to check out Car Parking Multiplayer, a popular game on the Play Store that offers more than just parking. In this article, we will review Car Parking Multiplayer, its features, pros and cons, how to download and install it, and some alternatives that you can try.</p>
|
4 |
-
<h2>play store car parking multiplayer</h2><br /><p><b><b>Download File</b> ✪✪✪ <a href="https://jinyurl.com/2uNQAs">https://jinyurl.com/2uNQAs</a></b></p><br /><br />
|
5 |
-
<h2>What is Car Parking Multiplayer?</h2>
|
6 |
-
<p>Car Parking Multiplayer is a simulation game developed by olzhass, a studio that specializes in car games. It was released in 2017 and has since gained over 100 million downloads and 2.13 million reviews on the Play Store. The game is rated 4.4 out of 5 stars by the users, who praise its graphics, gameplay, and variety.</p>
|
7 |
-
<p>Car Parking Multiplayer is more than just a parking game. It also features an open-world multiplayer mode, car tuning, free walking, voice chat, police mode, and more. You can choose from over 100 cars with real interiors, 16 player skins, and various environments to explore. You can also compete against real players in the multiplayer racing, exchange cars with them, or join them in free roaming.</p>
|
8 |
-
<h3>Features of Car Parking Multiplayer</h3>
|
9 |
-
<p>Car Parking Multiplayer has many features that make it stand out from other parking games. Here are some of them:</p>
|
10 |
-
<h4>Multiplayer open world mode</h4>
|
11 |
-
<p>This mode allows you to interact with thousands of real players every day. You can chat with them using voice or text, make friends, join clans, or challenge them to races. You can also free walk around the map, visit gas stations and car services, or role play as a police officer or a criminal.</p>
|
12 |
-
<h4>Car customization</h4>
|
13 |
-
<p>You can customize your car with different options, such as suspension, wheel angle, engine, turbo, gearbox, exhaust, and more. You can also change the color, vinyls, and body parts of your car. You can tune your car to suit your driving style and preferences.</p>
|
14 |
-
<h4>High-quality open world</h4>
|
15 |
-
<p>The game has highly-detailed environments that you can explore with your car or on foot. You can drive in cities, highways, mountains, deserts, and more. The game also has realistic physics, weather effects, day and night cycles, and traffic.</p>
|
16 |
-
<p>play store car parking multiplayer game<br />
|
17 |
-
play store car parking multiplayer online<br />
|
18 |
-
play store car parking multiplayer download<br />
|
19 |
-
play store car parking multiplayer mod apk<br />
|
20 |
-
play store car parking multiplayer free<br />
|
21 |
-
play store car parking multiplayer 2<br />
|
22 |
-
play store car parking multiplayer simulator<br />
|
23 |
-
play store car parking multiplayer cheats<br />
|
24 |
-
play store car parking multiplayer hack<br />
|
25 |
-
play store car parking multiplayer android<br />
|
26 |
-
play store car parking multiplayer review<br />
|
27 |
-
play store car parking multiplayer tips<br />
|
28 |
-
play store car parking multiplayer update<br />
|
29 |
-
play store car parking multiplayer best cars<br />
|
30 |
-
play store car parking multiplayer drift<br />
|
31 |
-
play store car parking multiplayer city<br />
|
32 |
-
play store car parking multiplayer map<br />
|
33 |
-
play store car parking multiplayer customizations<br />
|
34 |
-
play store car parking multiplayer challenges<br />
|
35 |
-
play store car parking multiplayer voice chat<br />
|
36 |
-
play store car parking multiplayer police mode<br />
|
37 |
-
play store car parking multiplayer racing<br />
|
38 |
-
play store car parking multiplayer friends<br />
|
39 |
-
play store car parking multiplayer skins<br />
|
40 |
-
play store car parking multiplayer graphics<br />
|
41 |
-
play store car parking master game<br />
|
42 |
-
play store real car parking master online<br />
|
43 |
-
play store real car parking master download<br />
|
44 |
-
play store real car parking master mod apk<br />
|
45 |
-
play store real car parking master free<br />
|
46 |
-
play store real car parking master 2<br />
|
47 |
-
play store real car parking master simulator<br />
|
48 |
-
play store real car parking master cheats<br />
|
49 |
-
play store real car parking master hack<br />
|
50 |
-
play store real car parking master android<br />
|
51 |
-
play store real car parking master review<br />
|
52 |
-
play store real car parking master tips<br />
|
53 |
-
play store real car parking master update<br />
|
54 |
-
play store real car parking master best cars<br />
|
55 |
-
play store real car parking master drift<br />
|
56 |
-
play store real car parking master city<br />
|
57 |
-
play store real car parking master map<br />
|
58 |
-
play store real car parking master customizations<br />
|
59 |
-
play store real car parking master challenges<br />
|
60 |
-
play store real car parking master voice chat<br />
|
61 |
-
play store real car parking master police mode<br />
|
62 |
-
play store real car parking master racing</p>
|
63 |
-
<h4>Interesting gameplay</h4>
|
64 |
-
<p>The game has 82 real-life parking and driving challenges that you can complete to improve your skills. You can also try different vehicles, such as tow trucks, pickups, trucks, sports cars, and classic cars. The game has realistic controls and camera views that make the gameplay immersive and enjoyable.</p>
|
65 |
-
<h3>Pros and Cons of Car Parking Multiplayer</h3>
|
66 |
-
<p>Like any other game, Car Parking Multiplayer has its advantages and disadvantages. Here are some of them:</p>
|
67 |
-
<h4>Pros</h4>
|
68 |
-
<ul>
|
69 |
-
<li>The game has amazing graphics and sound effects that create a realistic atmosphere.</li>
|
70 |
-
<li>The game has a lot of variety and content that keep the players entertained and engaged.</li>
|
71 |
-
<li>The game has a friendly and active community that makes the multiplayer mode fun and social.</li>
|
72 |
-
<li>The game is free to download and play, although it contains ads and in-app purchases.</li>
|
73 |
-
</ul>
|
74 |
-
<h4>Cons</h4>
|
75 |
-
<ul>
|
76 |
-
<li>The game may have some bugs and glitches that affect the performance and gameplay.</li>
|
77 |
-
<li>The game may have some hackers and cheaters that ruin the multiplayer mode for others.</li>
|
78 |
-
<li>The game may have some inappropriate content or language that may not be suitable for younger players.</li>
|
79 |
-
<li>The game may consume a lot of battery and storage space on your device.</li>
|
80 |
-
</ul>
|
81 |
-
<h3>How to Download and Install Car Parking Multiplayer</h3>
|
82 |
-
<p>If you want to try Car Parking Multiplayer, you can download and install it easily from the Play Store. Here are the requirements and steps to do so:</p>
|
83 |
-
<h4>Requirements</h4>
|
84 |
-
<ul>
|
85 |
-
<li>Your device must have Android 5.0 or higher.</li>
|
86 |
-
<li>Your device must have at least 1 GB of RAM and 500 MB of free storage space.</li>
|
87 |
-
<li>Your device must have a stable internet connection to play the multiplayer mode.</li>
|
88 |
-
</ul>
|
89 |
-
<h4>Steps</h4>
|
90 |
-
<ol>
|
91 |
-
<li>Open the Play Store app on your device and search for Car Parking Multiplayer.</li>
|
92 |
-
<li>Select the game from the search results and tap on Install.</li>
|
93 |
-
<li>Wait for the game to download and install on your device.</li>
|
94 |
-
<li>Once the installation is complete, tap on Open to launch the game.</li>
|
95 |
-
<li>Enjoy playing Car Parking Multiplayer!</li>
|
96 |
-
</ol>
|
97 |
-
<h3>Alternatives to Car Parking Multiplayer</h3>
|
98 |
-
<p>If you like Car Parking Multiplayer, you might also like some other parking games that are similar or better. Here are two alternatives that you can try:</p>
|
99 |
-
<h4>Parking Master Multiplayer 2</h4>
|
100 |
-
<p>This is a sequel to the popular Parking Master Multiplayer game that offers more features and challenges. You can play with real players online, customize your car, explore different maps, and complete various parking missions. The game has realistic graphics, physics, and controls that make it fun and addictive. You can download it from the Play Store for free.</p>
|
101 |
-
<h4>Parking Master Multiplayer</h4>
|
102 |
-
<p>This is the original version of Parking Master Multiplayer that started it all. You can play with real players online, customize your car, explore different maps, and complete various parking missions. The game has realistic graphics, physics, and controls that make it fun and addictive. You can download it from the Play Store for free.</p>
|
103 |
-
<h3>Conclusion</h3>
|
104 |
-
<p>Car Parking Multiplayer is a great game for anyone who loves cars and parking. It has many features, pros and cons, and ways to download and install it. It also has some alternatives that you can try if you want more variety. If you are looking for a realistic and fun car parking game that you can play with your friends, you should give Car Parking Multiplayer a try. You might be surprised by how much you enjoy it!</p>
|
105 |
-
<h3>FAQs</h3>
|
106 |
-
<ul>
|
107 |
-
<li><b>Q: How do I join a multiplayer server in Car Parking Multiplayer?</b></li>
|
108 |
-
<li>A: To join a multiplayer server, you need to tap on the multiplayer button on the main menu, select a region, and choose a server from the list. You can also create your own server by tapping on the create button.</li>
|
109 |
-
<li><b>Q: How do I earn money in Car Parking Multiplayer?</b></li>
|
110 |
-
<li>A: You can earn money in Car Parking Multiplayer by completing parking missions, racing against other players, selling or exchanging cars, or watching ads.</li>
|
111 |
-
<li><b>Q: How do I chat with other players in Car Parking Multiplayer?</b></li>
|
112 |
-
<li>A: You can chat with other players in Car Parking Multiplayer by tapping on the chat button on the top right corner of the screen. You can use voice or text chat, as well as emojis and stickers.</li>
|
113 |
-
<li><b>Q: How do I report a hacker or cheater in Car Parking Multiplayer?</b></li>
|
114 |
-
<li>A: You can report a hacker or cheater in Car Parking Multiplayer by tapping on their name on the player list, and then tapping on the report button. You can also block them from chatting with you or joining your server.</li>
|
115 |
-
<li><b>Q: How do I update Car Parking Multiplayer?</b></li>
|
116 |
-
<li>A: You can update Car Parking Multiplayer by opening the Play Store app on your device, searching for Car Parking Multiplayer, and tapping on Update. You can also enable automatic updates for the game in the settings of the Play Store app.</li>
|
117 |
-
</ul></p> 401be4b1e0<br />
|
118 |
-
<br />
|
119 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Como Conseguir Robux Infinito no ROBLOX 2022 Download Grtis e Seguro.md
DELETED
@@ -1,89 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download Roblox Robux Infinito 2022</h1>
|
3 |
-
<p>Roblox is one of the most popular online gaming platforms in the world, with over 115 million active players. It allows you to create, play, and share your own games and experiences with others. However, to enjoy the full potential of Roblox, you need Robux, the premium currency of the game. Robux can be used to buy games, items, accessories, and more. But how can you get more Robux without spending real money? One way is to download Roblox Robux Infinito 2022, a modded version of Roblox that gives you unlimited Robux and other features. In this article, we will explain what Roblox Robux Infinito 2022 is, why you might want to download it, and how to do it safely and easily.</p>
|
4 |
-
<h2>download roblox robux infinito 2022</h2><br /><p><b><b>Download File</b> ⚹⚹⚹ <a href="https://jinyurl.com/2uNU0I">https://jinyurl.com/2uNU0I</a></b></p><br /><br />
|
5 |
-
<h2>What is Roblox Robux Infinito 2022?</h2>
|
6 |
-
<p>Roblox Robux Infinito 2022 is a modified version of the original Roblox app that gives you access to unlimited Robux and other hacks. Before we explain more about it, let's first understand what Robux and modded apps are.</p>
|
7 |
-
<h3>Robux: The Premium Currency of Roblox</h3>
|
8 |
-
<p>Robux is the in-game currency that you can use to buy games, items, accessories, and more on Roblox. You can earn Robux by creating games, selling items, or joining the Premium subscription. You can also buy Robux with real money through the official website or app. However, buying Robux can be expensive, especially if you want to get a lot of them. For example, 10,000 Robux cost $99.99. That's why some players look for alternative ways to get more Robux for free.</p>
|
9 |
-
<h3>Robux Infinito: A Modded Version of Roblox</h3>
|
10 |
-
<p>A modded app is an app that has been modified by someone other than the original developer. Modded apps usually have features that are not available in the official app, such as cheats, hacks, or unlimited resources. For example, Robux Infinito is a modded app that gives you unlimited Robux and other features such as:</p>
|
11 |
-
<ul>
|
12 |
-
<li>Fly hack: You can fly in any game.</li>
|
13 |
-
<li>Teleport hack: You can teleport to any location in any game.</li>
|
14 |
-
<li>Wallhack: You can walk through walls in any game.</li>
|
15 |
-
<li>Immortality hack: You cannot die in any game.</li>
|
16 |
-
<li>And more!</li>
|
17 |
-
</ul>
|
18 |
-
<p>Robux Infinito is not an official app from Roblox Corporation. It is created by third-party developers who are not affiliated with or endorsed by Roblox Corporation. Therefore, it is not available on the official website or app store. You have to download it from other sources online.</p>
|
19 |
-
<p>download roblox mod apk robux infinito 2022 mediafıre<br />
|
20 |
-
como baixar roblox com robux infinito 2022 no celular<br />
|
21 |
-
roblox hack robux infinito 2022 download pc<br />
|
22 |
-
roblox robux infinito 2022 apk atualizado<br />
|
23 |
-
download roblox unlimited robux 2022 for android<br />
|
24 |
-
how to get free robux in roblox 2022 no human verification<br />
|
25 |
-
roblox robux generator 2022 online no survey<br />
|
26 |
-
download roblox mod menu robux infinito 2022<br />
|
27 |
-
como instalar roblox com robux infinito 2022<br />
|
28 |
-
roblox hack tool robux infinito 2022 free download<br />
|
29 |
-
download roblox apk mod robux infinito 2022 atualizado<br />
|
30 |
-
how to download roblox with unlimited robux 2022<br />
|
31 |
-
roblox cheat engine robux infinito 2022<br />
|
32 |
-
download roblox hack apk robux infinito 2022<br />
|
33 |
-
como conseguir robux infinito en roblox 2022 gratis<br />
|
34 |
-
roblox mod apk unlimited robux 2022 latest version<br />
|
35 |
-
download roblox for pc with infinite robux 2022<br />
|
36 |
-
como tener robux infinitos en roblox 2022 sin descargar nada<br />
|
37 |
-
roblox hack script robux infinito 2022<br />
|
38 |
-
download roblox mod apk unlimited money and robux 2022<br />
|
39 |
-
how to hack roblox for free robux 2022 easy<br />
|
40 |
-
roblox online generator unlimited robux 2022 working<br />
|
41 |
-
download game roblox mod apk unlimited robux 2022 offline<br />
|
42 |
-
como jogar roblox com robux infinito 2022 no pc<br />
|
43 |
-
download aplikasi cheat roblx mod apk unlimited rbx and tix 2022 terbaru</p>
|
44 |
-
<h2>Why Download Roblox Robux Infinito 2022?</h2>
|
45 |
-
<p>Downloading Roblox Robux Infinito 2022 can have some benefits and risks. Here are some of them:</p>
|
46 |
-
<h3>Benefits of Robux Infinito</h3>
|
47 |
-
<p>The main benefit of downloading Robux Infinito is that you can get unlimited Robux for free. This means that you can buy any game, item, accessory, or feature that you want on Roblox without spending real money. You can also enjoy the hacks and cheats that make the game more fun and easy. For example, you can fly around the map, teleport to different places, walk through walls, and be <p>invincible in any game. You can also create and customize your own games and items with unlimited Robux. You can share them with other players and earn more Robux from them. You can also join any game or group that requires Robux without paying anything. In short, you can have more fun and freedom on Roblox with Robux Infinito.</p>
|
48 |
-
<h3>Risks of Robux Infinito</h3>
|
49 |
-
<p>However, downloading Robux Infinito also has some risks that you should be aware of. The main risk is that it is not safe or legal to use. Since it is a modded app, it is not approved by Roblox Corporation or the app store. It may contain viruses, malware, or spyware that can harm your device or steal your personal information. It may also have bugs, glitches, or errors that can affect the performance of the app or the game. Moreover, using Robux Infinito is against the terms of service and community guidelines of Roblox. It is considered cheating and hacking, which can result in your account being banned or suspended. You may also lose all your progress, items, and Robux that you have earned legitimately. Furthermore, using Robux Infinito can ruin the game experience for other players who are playing fairly and honestly. It can make the game unfair, boring, or frustrating for them. It can also damage the reputation and quality of the game and the platform.</p>
|
50 |
-
<h2>How to Download Roblox Robux Infinito 2022?</h2>
|
51 |
-
<p>If you still want to download Roblox Robux Infinito 2022 despite the risks, you need to follow these steps:</p>
|
52 |
-
<h3>Step 1: Find a Reliable Source</h3>
|
53 |
-
<p>The first step is to find a reliable source that offers the latest version of Robux Infinito 2022. You cannot download it from the official website or app store, so you have to search for it online. However, not all sources are trustworthy or safe. Some may provide fake or outdated versions of the app that do not work or have viruses. Some may also ask you to complete surveys, download other apps, or enter your personal information before giving you the download link. To avoid these scams, you should look for sources that have positive reviews, ratings, comments, and feedback from other users. You should also check the file size, name, and extension of the app before downloading it. The file should be an APK file with a size of around 100 MB and a name similar to "Roblox_Robux_Infinito_2022.apk".</p>
|
54 |
-
<h3>Step 2: Download and Install the APK File</h3>
|
55 |
-
<p>The second step is to download and install the APK file on your device. An APK file is an Android application package file that contains all the files and data needed to run an app on an Android device. To download and install an APK file, you need to enable the "Unknown Sources" option on your device settings. This option allows you to install apps from sources other than the official app store. To enable this option, go to Settings > Security > Unknown Sources and toggle it on. Then, go to the source where you downloaded the APK file and tap on it to start the installation process. Follow the instructions on the screen and wait for the installation to finish.</p>
|
56 |
-
<h3>Step 3: Enjoy the Unlimited Robux and Features</h3>
|
57 |
-
<p>The third step is to enjoy the unlimited Robux and features that Robux Infinito 2022 offers. Once you have installed the app, you can open it and log in with your existing Roblox account or create a new one. You will see that you have unlimited Robux in your account balance. You can use them to buy anything you want on Roblox without spending real money. You can also access the hacks and cheats that are available in the app settings. You can enable or disable them as you wish. You can also create and customize your own games and items with unlimited Robux. You can share them with other players and earn more Robux from them. You can also join any game or group that requires Robux without paying anything.</p>
|
58 |
-
<h2>Conclusion</h2>
|
59 |
-
<p>Roblox is a great platform for creating, playing, and sharing games and experiences with others. However, if you want to get more out of it, you need Robux, the premium currency of the game. One way to get more Robux for free is to download Roblox Robux Infinito 2022, a modded version of Roblox that gives you unlimited Robux and other features such as hacks and cheats. However, downloading Robux Infinito 2022 also has some risks such as viruses, malware, account bans, and game quality issues. Therefore, you should be careful when downloading and using it. If you <p>decide to download Robux Infinito 2022, you should do it at your own risk and responsibility. We hope this article has helped you understand what Robux Infinito 2022 is, why you might want to download it, and how to do it safely and easily. Happy gaming!</p>
|
60 |
-
<h2>FAQs</h2>
|
61 |
-
<p>Here are some frequently asked questions about Robux Infinito 2022:</p>
|
62 |
-
<table>
|
63 |
-
<tr>
|
64 |
-
<th>Question</th>
|
65 |
-
<th>Answer</th>
|
66 |
-
</tr>
|
67 |
-
<tr>
|
68 |
-
<td>Is Robux Infinito 2022 free?</td>
|
69 |
-
<td>Yes, Robux Infinito 2022 is free to download and use. However, you may have to complete some tasks or surveys to get the download link from some sources.</td>
|
70 |
-
</tr>
|
71 |
-
<tr>
|
72 |
-
<td>Is Robux Infinito 2022 safe?</td>
|
73 |
-
<td>No, Robux Infinito 2022 is not safe to use. It may contain viruses, malware, or spyware that can harm your device or steal your personal information. It may also have bugs, glitches, or errors that can affect the performance of the app or the game. Moreover, using Robux Infinito 2022 is against the terms of service and community guidelines of Roblox. It is considered cheating and hacking, which can result in your account being banned or suspended.</td>
|
74 |
-
</tr>
|
75 |
-
<tr>
|
76 |
-
<td>Is Robux Infinito 2022 legal?</td>
|
77 |
-
<td>No, Robux Infinito 2022 is not legal to use. It is a modded app that violates the intellectual property rights of Roblox Corporation and the app store. It also violates the laws and regulations of some countries that prohibit the use of modded apps or games.</td>
|
78 |
-
</tr>
|
79 |
-
<tr>
|
80 |
-
<td>Can I use Robux Infinito 2022 on iOS devices?</td>
|
81 |
-
<td>No, Robux Infinito 2022 is only compatible with Android devices. You cannot use it on iOS devices such as iPhones or iPads.</td>
|
82 |
-
</tr>
|
83 |
-
<tr>
|
84 |
-
<td>Can I use Robux Infinito 2022 on PC?</td>
|
85 |
-
<td>Yes, you can use Robux Infinito 2022 on PC if you have an Android emulator installed on your computer. An Android emulator is a software that allows you to run Android apps or games on your PC. Some examples of Android emulators are BlueStacks, NoxPlayer, and LDPlayer.</td>
|
86 |
-
</tr>
|
87 |
-
</table></p> 401be4b1e0<br />
|
88 |
-
<br />
|
89 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Arceus X V3.1.0 Beta and Enjoy Roblox Like Never Before.md
DELETED
@@ -1,145 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Arceus X: How to Download and Play the Ultimate Roblox Mod Menu on iOS</h1>
|
3 |
-
<p>If you are a fan of Roblox, you might have heard of Arceus X, a mod menu that allows you to exploit your favorite games with features such as Android LuaU Execution, Infinite Jump, Super Speed, Btools, Script Hub, and more. Arceus X is a first and one of the most widely used Roblox Mod Menu/exploit specially developed for Android. But what if you want to play it on your iOS device? Is it possible to download and install Arceus X on iOS? The answer is yes, and in this article, we will show you how to do it step by step.</p>
|
4 |
-
<h2>arceus x ios download</h2><br /><p><b><b>Download</b> ⚙⚙⚙ <a href="https://jinyurl.com/2uNSyT">https://jinyurl.com/2uNSyT</a></b></p><br /><br />
|
5 |
-
<h2>What is Arceus X?</h2>
|
6 |
-
<p>Arceus X is a first Android Roblox Mod Menu/Exploit to improve the gameplay. It allows you to use features such as Android LuaU Execution, Infinite Jump, Super Speed, Btools, Script Hub, More!. Arceus X APK is developed using Node.js, C++, JAVA. It’s an Android application that has floating Menu to execute scripts while you are in the game.</p>
|
7 |
-
<h3>Features of Arceus X</h3>
|
8 |
-
<p>Some of the features that make Arceus X stand out from other Roblox mod menus are:</p>
|
9 |
-
<ul>
|
10 |
-
<li>Android LuaU Execution: You can run any Lua script on your Android device without any limitations.</li>
|
11 |
-
<li>Infinite Jump: You can jump as high as you want in any game.</li>
|
12 |
-
<li>Super Speed: You can move faster than normal in any game.</li>
|
13 |
-
<li>Btools: You can delete or modify any object in any game.</li>
|
14 |
-
<li>Script Hub: You can access a collection of scripts for various games from the mod menu.</li>
|
15 |
-
<li>More!: You can also use features such as Fly, Noclip, ESP, Aimbot, God Mode, and more.</li>
|
16 |
-
</ul>
|
17 |
-
<h3>Requirements for Arceus X</h3>
|
18 |
-
<p>To download and play Arceus X on your iOS device, you will need:</p>
|
19 |
-
<ul>
|
20 |
-
<li>An iOS device with iOS 10 or later.</li>
|
21 |
-
<li>An Android device or an emulator to get the Arceus X APK file.</li>
|
22 |
-
<li>A file manager app on your iOS device to transfer the APK file.</li>
|
23 |
-
<li>An iOS emulator app on your iOS device to run the APK file.</li>
|
24 |
-
<li>A Roblox account to play the games.</li>
|
25 |
-
</ul>
|
26 |
-
<h2>How to Download Arceus X on iOS</h2>
|
27 |
-
<p>Now that you know what Arceus X is and what you need to play it on your iOS device, let's get started with the download process. Here are the steps you need to follow:</p>
|
28 |
-
<h3>Step 1: Get the Arceus X APK file</h3>
|
29 |
-
<p>The first step is to get the Arceus X APK file from a reliable source. You can either use an Android device or an emulator on your PC to do this. Here are some options for getting the APK file:</p>
|
30 |
-
<ul>
|
31 |
-
<li>You can download it from the official website of Arceus X. Just click on the download button and complete the verification process. The APK file will be downloaded automatically.</li>
|
32 |
-
<li>You can watch a tutorial video on YouTube that shows you how to download and install Arceus X on your Android device. Just follow the instructions in the video and get the APK file.</ <li>You can join the Discord server of Arceus X and ask for the APK file from the developers or other users. You might need to verify your identity and follow some rules to get access to the file.</li>
|
33 |
-
</ul>
|
34 |
-
<p>Once you have the APK file, you need to transfer it to your iOS device. You can use a USB cable, Bluetooth, Wi-Fi, or any other method that works for you. Just make sure you have a file manager app on your iOS device to locate the APK file.</p>
|
35 |
-
<p>arceus x v3 download tutorial<br />
|
36 |
-
arceus x apk official<br />
|
37 |
-
arceus x roblox mod menu<br />
|
38 |
-
arceus x v3.1.0 public beta<br />
|
39 |
-
arceus x android roblox exploit<br />
|
40 |
-
arceus x ios 16.0.4 install<br />
|
41 |
-
arceus x script executor for mobile<br />
|
42 |
-
arceus x apk without linkvertise<br />
|
43 |
-
arceus x roblox hack android<br />
|
44 |
-
arceus x v3 update download<br />
|
45 |
-
arceus x mod menu apk<br />
|
46 |
-
arceus x roblox cheat ios<br />
|
47 |
-
arceus x verification process<br />
|
48 |
-
arceus x apk free download<br />
|
49 |
-
arceus x roblox exploit ios<br />
|
50 |
-
arceus x v3 mod menu tutorial<br />
|
51 |
-
arceus x apk latest version<br />
|
52 |
-
arceus x roblox script hub<br />
|
53 |
-
arceus x verification bypass<br />
|
54 |
-
arceus x apk no ads<br />
|
55 |
-
arceus x roblox infinite jump<br />
|
56 |
-
arceus x v3 install guide<br />
|
57 |
-
arceus x apk direct download<br />
|
58 |
-
arceus x roblox super speed<br />
|
59 |
-
arceus x verification completed<br />
|
60 |
-
arceus x apk no verification<br />
|
61 |
-
arceus x roblox btools hack<br />
|
62 |
-
arceus x v3 download link<br />
|
63 |
-
arceus x apk easy download<br />
|
64 |
-
arceus x roblox luau execution<br />
|
65 |
-
arceus x verification failed fix<br />
|
66 |
-
arceus x apk no root<br />
|
67 |
-
arceus x roblox android modding<br />
|
68 |
-
arceus x v3 features overview<br />
|
69 |
-
arceus x apk fast download<br />
|
70 |
-
arceus x roblox exploit features<br />
|
71 |
-
arceus x verification steps explained<br />
|
72 |
-
arceus x apk safe download<br />
|
73 |
-
arceus x roblox pc scripts support<br />
|
74 |
-
arceus x v3 release date ios<br />
|
75 |
-
arceus x apk working download<br />
|
76 |
-
arceus x roblox exploit review<br />
|
77 |
-
arceus x verification code generator<br />
|
78 |
-
arceus x apk virus free download<br />
|
79 |
-
arceus x roblox exploit comparison</p>
|
80 |
-
<h3>Step 2: Install an iOS emulator</h3>
|
81 |
-
<p>The next step is to install an iOS emulator app on your iOS device that can run Android apps. An emulator is a software that mimics the behavior of another device or platform. There are many iOS emulators available on the App Store, but not all of them can run Arceus X smoothly. Here are some of the best iOS emulators that we recommend for Arceus X:</p>
|
82 |
-
<ul>
|
83 |
-
<li>iAndroid: This is one of the most popular and reliable iOS emulators that can run Android apps without any hassle. It has a simple interface and supports most of the Android features. You can download it from the App Store for free.</li>
|
84 |
-
<li>Cider: This is another iOS emulator that can run Android apps with ease. It has a fast performance and supports many Android games. You can download it from the official website for free.</li>
|
85 |
-
<li>Appetize.io: This is an online iOS emulator that can run Android apps on your browser. You don't need to install anything on your device, just upload the APK file and start playing. It has a high compatibility and supports many Android features. You can use it for free for 100 minutes per month, or upgrade to a paid plan for more time.</li>
|
86 |
-
</ul>
|
87 |
-
<p>Once you have installed an iOS emulator of your choice, you need to launch it and grant it the necessary permissions to access your device's storage, camera, microphone, etc.</p>
|
88 |
-
<h3>Step 3: Run the Arceus X APK file on the emulator</h3>
|
89 |
-
<p>The final step is to run the Arceus X APK file on the emulator and start playing. Here are the steps you need to follow:</p>
|
90 |
-
<ol>
|
91 |
-
<li>Open the file manager app on your iOS device and locate the Arceus X APK file that you transferred earlier.</li>
|
92 |
-
<li>Tap on the APK file and select the option to open it with the emulator app that you installed.</li>
|
93 |
-
<li>The emulator will launch and install the Arceus X app on its virtual environment.</li>
|
94 |
-
<li>Once the installation is complete, you will see the Arceus X icon on the emulator's home screen.</li>
|
95 |
-
<li>Tap on the icon and log in with your Roblox account credentials.</li>
|
96 |
-
<li>You will see a floating mod menu on your screen with various options to exploit your favorite games.</li>
|
97 |
-
</ol>
|
98 |
-
<h3>Step 4: Enjoy the game</h3>
|
99 |
-
<p>Congratulations! You have successfully downloaded and installed Arceus X on your iOS device. Now you can enjoy playing Roblox with unlimited features and fun. You can access the mod menu anytime by tapping on it and selecting the options you want to use. You can also use the script hub to find and execute scripts for different games. Just be careful not to abuse the mod menu or get reported by other players, as you might get banned by Roblox.</p>
|
100 |
-
<h2>Tips and Tricks for Arceus X</h2>
|
101 |
-
<p>To make the most out of Arceus X, here are some tips and tricks that you should know:</p>
|
102 |
-
<h3>How to use the script hub</h3>
|
103 |
-
<p>The script hub is a feature that allows you to access a collection of scripts for various games from the mod menu. You can use these scripts to enhance your gameplay or perform certain actions that are not possible otherwise. Here are some steps to use the script hub:</p>
|
104 |
-
<ol>
|
105 |
-
<li>Tap on the mod menu and select the script hub option.</li>
|
106 |
-
<li>You will see a list of games that have scripts available for them.</li>
|
107 |
-
<li>Select the game that you want to play and tap on it.</li>
|
108 |
-
<li>You will see a list of scripts that you can use for that game.</li>
|
109 |
-
<li>Select the script that you want to use and tap on it.</li>
|
110 |
-
<li>The script will be executed automatically and you will see its effects in the game.</li>
|
111 |
-
</ol>
|
112 |
-
<h3>How to customize the mod menu</h3>
|
113 |
-
<p>The mod menu is a feature that allows you to customize various aspects of Arceus X, such as its appearance, position, size, transparency, etc. You can also enable or disable certain features or change their settings according to your preference. Here are some steps to customize the mod menu:</p>
|
114 |
-
<ol>
|
115 |
-
<li>Tap on the mod menu and select the settings option.</li>
|
116 |
-
<li>You will see a list of options that you can change, such as color, size, position, transparency, etc.</li>
|
117 |
-
<li>Select the option that you want to change and adjust it according to your liking.</li>
|
118 |
-
<li>You can also enable or disable certain features or change their settings by tapping on them.</li>
|
119 |
-
<li>Once you are done, tap on the save button to apply the changes.</li>
|
120 |
-
</ol>
|
121 |
-
<h3>How to avoid getting banned</h3>
|
122 |
-
<p>While Arceus X is a fun and powerful mod menu, it is also a risky one. If you use it too much or too blatantly, you might get detected and banned by Roblox. To avoid this, here are some tips that you should follow:</p>
|
123 |
-
<ul>
|
124 |
-
<li>Use the mod menu sparingly and discreetly. Don't use it in every game or every round. Don't use it in front of other players or moderators. Don't use it to ruin the game for others.</li>
|
125 |
-
<li>Use the anti-ban feature. This feature is designed to prevent Roblox from detecting your mod menu and banning you. It does this by changing your device ID, IP address, and other information that Roblox uses to identify you. You can enable this feature from the mod menu settings.</li>
|
126 |
-
<li>Use a VPN service. A VPN service is a tool that encrypts your internet traffic and hides your IP address and location. This can help you avoid getting banned by Roblox, as they won't be able to trace your activity or location. You can use any VPN service that works for you, but make sure it is reliable and secure.</li>
|
127 |
-
</ul>
|
128 |
-
<h2>Conclusion</h2>
|
129 |
-
<p>In this article, we have shown you how to download and play Arceus X on your iOS device. Arceus X is a first Android Roblox Mod Menu/Exploit that allows you to exploit your favorite games with features such as Android LuaU Execution, Infinite Jump, Super Speed, Btools, Script Hub, More!. To play it on your iOS device, you need to get the Arceus X APK file from a reliable source, install an iOS emulator app on your device, run the APK file on the emulator, and enjoy the game. We have also given you some tips and tricks for using Arceus X, such as how to use the script hub, how to customize the mod menu, and how to avoid getting banned. We hope you found this article helpful and informative. If you have any questions or feedback, feel free to leave a comment below.</p>
|
130 |
-
<h3>FAQs</h3>
|
131 |
-
<p>Here are some of the frequently asked questions about Arceus X:</p>
|
132 |
-
<ol>
|
133 |
-
<li><b>Is Arceus X safe to use?</b></li>
|
134 |
-
<p>Arceus X is safe to use as long as you download it from a trusted source and follow the instructions carefully. However, there is always a risk of getting banned by Roblox if you use it too much or too blatantly. To minimize this risk, use the anti-ban feature and a VPN service.</p>
|
135 |
-
<li><b>Is Arceus X free to use?</b></li>
|
136 |
-
<p>Yes, Arceus X is free to use and does not require any payment or subscription. However, you might need to complete some verification steps or watch some ads before downloading it.</p>
|
137 |
-
<li><b>Does Arceus X work on all games?</b></li>
|
138 |
-
<p>No, Arceus X does not work on all games. Some games have anti-cheat systems or scripts that prevent Arceus X from working properly. You can check the script hub for the list of games that have scripts available for them.</p>
|
139 |
-
<li><b>Can I use Arceus X on other devices?</b></li>
|
140 |
-
<p>Yes, you can use Arceus X on other devices besides iOS. You can use it on Android devices directly without any emulator. You can also use it on PC devices with an Android emulator such as BlueStacks or Nox Player.</p>
|
141 |
-
<li><b>Where can I get more information about Arceus X?</b></li>
|
142 |
-
<p>You can get more information about Arceus X from its official website, its YouTube channel, or its Discord server. You can also contact the developers or other users for support or feedback.</p>
|
143 |
-
</ol></p> 401be4b1e0<br />
|
144 |
-
<br />
|
145 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download and Play PS3 Games on iOS with RetroArch and RPCS3.md
DELETED
@@ -1,183 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Download and Install PS3 Emulator for iOS</h1>
|
3 |
-
<p>Do you want to play your favorite PS3 games on your iPhone or iPad? If yes, then you need a PS3 emulator for iOS. A PS3 emulator is a software that mimics the hardware and software of the PlayStation 3 console, allowing you to run PS3 games on your iOS device. In this article, we will show you how to download and install a PS3 emulator for iOS, as well as how to play PS3 games on your iOS device.</p>
|
4 |
-
<h2>What Is PS3 Emulator for iOS?</h2>
|
5 |
-
<h3>Definition and features of PS3 emulator for iOS</h3>
|
6 |
-
<p>A PS3 emulator for iOS is a program that simulates the functionality of the PlayStation 3 console on an iOS device. It does this by translating the instructions and data of the PS3 games into a format that the iOS device can understand and execute. A PS3 emulator for iOS has several features, such as:</p>
|
7 |
-
<h2>download ps3 emulator for ios</h2><br /><p><b><b>Download Zip</b> 🔗 <a href="https://jinyurl.com/2uNUn9">https://jinyurl.com/2uNUn9</a></b></p><br /><br />
|
8 |
-
<ul>
|
9 |
-
<li>Supporting multiple gaming systems, such as PlayStation 1-3, Nintendo 64, DS, Game Boy, PSP, and more.</li>
|
10 |
-
<li>Enhancing the graphical quality of the games by rendering them at higher resolutions than the original console.</li>
|
11 |
-
<li>Offering save states, which allow you to save your progress at any point in the game and resume it later.</li>
|
12 |
-
<li>Enabling online multiplayer mode for compatible games.</li>
|
13 |
-
<li>Allowing you to customize the controls, audio, video, and other settings of the emulator.</li>
|
14 |
-
</ul>
|
15 |
-
<h3>Benefits and drawbacks of PS3 emulator for iOS</h3>
|
16 |
-
<p>A PS3 emulator for iOS has several benefits, such as:</p>
|
17 |
-
<ul>
|
18 |
-
<li>It lets you play PS3 games that are not available or compatible with your iOS device.</li>
|
19 |
-
<li>It saves you money and space by not requiring you to buy or own a PS3 console and its accessories.</li>
|
20 |
-
<li>It gives you access to a large library of retro and classic games that you can enjoy on your iOS device.</li>
|
21 |
-
<li>It allows you to experience the nostalgia and fun of playing old-school games on a modern device.</li>
|
22 |
-
</ul>
|
23 |
-
<p>However, a PS3 emulator for iOS also has some drawbacks, such as:</p>
|
24 |
-
<ul>
|
25 |
-
<li>It may not run all PS3 games smoothly or accurately, depending on the performance and compatibility of your iOS device and the emulator.</li>
|
26 |
-
<li>It may consume a lot of battery power and storage space on your iOS device.</li>
|
27 |
-
<li>It may expose your iOS device to security risks if you download or install an emulator or ROMs from untrusted sources.</li>
|
28 |
-
<li>It may violate the intellectual property rights of the game developers and publishers if you download or use ROMs without their permission.</li>
|
29 |
-
</ul>
|
30 |
-
<h2>How to Download PS3 Emulator for iOS?</h2>
|
31 |
-
<h3>Requirements and compatibility of PS3 emulator for iOS</h3>
|
32 |
-
<p>Before you download a PS3 emulator for iOS, you need to make sure that your iOS device meets the minimum requirements and compatibility of the emulator. Here are some general requirements and compatibility of a PS3 emulator for iOS:</p>
|
33 |
-
<ul>
|
34 |
-
<li>Your iOS device should have at least iOS 10 or newer installed.</li>
|
35 |
-
<li>Your iOS device should have at least 64 MB of free storage space available.</li>
|
36 |
-
<li>Your iOS device should have a jailbreak or an alternative app store installed, such as AltStore.</li>
|
37 |
-
<li>Your iOS device should support external controllers, such as PS4, PS <h3>Steps to download PS3 emulator for iOS using AltStore</h3>
|
38 |
-
<p>One of the best PS3 emulators for iOS is RetroArch, which is a multi-system emulator that supports PlayStation 1-3, Nintendo 64, DS, Game Boy, PSP, and more. RetroArch is available on AltStore, which is an alternative app store that lets you install apps that are not available on the official App Store. To download RetroArch using AltStore, you need to follow these steps:</p>
|
39 |
-
<p>How to download ps3 emulator for ios devices<br />
|
40 |
-
Best ps3 emulator for ios free download<br />
|
41 |
-
Download rpcs3 ps3 emulator for ios iphone app<br />
|
42 |
-
Ps3 emulator for ios no jailbreak required<br />
|
43 |
-
Retroarch ps3 emulator for ios and android<br />
|
44 |
-
Download ps3 games for ios emulator<br />
|
45 |
-
Ps3 emulator for ios 11 and later<br />
|
46 |
-
Ps3 emulator for ios 9 and later<br />
|
47 |
-
Ps3 emulator for ios 7 and later<br />
|
48 |
-
Ps3 emulator for ios 6 and earlier<br />
|
49 |
-
Ps3 emulator for ipad pro and mini<br />
|
50 |
-
Ps3 emulator for iphone x and xr<br />
|
51 |
-
Ps3 emulator for iphone 8 and 7<br />
|
52 |
-
Ps3 emulator for iphone 6 and 5<br />
|
53 |
-
Ps3 emulator for iphone 4 and 3<br />
|
54 |
-
Ps3 emulator for ipod touch and nano<br />
|
55 |
-
Download ps3 bios for ios emulator<br />
|
56 |
-
Download ps3 controller for ios emulator<br />
|
57 |
-
Download ps3 iso files for ios emulator<br />
|
58 |
-
Download ps3 roms for ios emulator<br />
|
59 |
-
Download ps3 cheats for ios emulator<br />
|
60 |
-
Download ps3 save data for ios emulator<br />
|
61 |
-
Download ps3 themes for ios emulator<br />
|
62 |
-
Download ps3 updates for ios emulator<br />
|
63 |
-
Download ps3 dlc for ios emulator<br />
|
64 |
-
Ps3 emulator for ios compatible games list<br />
|
65 |
-
Ps3 emulator for ios performance and settings guide<br />
|
66 |
-
Ps3 emulator for ios troubleshooting and errors fix<br />
|
67 |
-
Ps3 emulator for ios online multiplayer support<br />
|
68 |
-
Ps3 emulator for ios offline mode feature<br />
|
69 |
-
Ps3 emulator for ios screen recording and streaming option<br />
|
70 |
-
Ps3 emulator for ios custom skins and mods availability<br />
|
71 |
-
Ps3 emulator for ios voice chat and messaging function<br />
|
72 |
-
Ps3 emulator for ios achievements and trophies unlocker<br />
|
73 |
-
Ps3 emulator for ios parental control and security settings<br />
|
74 |
-
Ps3 emulator for ios reviews and ratings from users<br />
|
75 |
-
Ps3 emulator for ios download link and installation instructions<br />
|
76 |
-
Ps3 emulator for ios download size and system requirements<br />
|
77 |
-
Ps3 emulator for ios download speed and time estimation<br />
|
78 |
-
Ps3 emulator for ios download virus scan and safety check</p>
|
79 |
-
<ol>
|
80 |
-
<li>Download and install AltStore on your iOS device and your computer. You can find the instructions and the download links on the official website of AltStore: <a href="">https://altstore.io/</a></li>
|
81 |
-
<li>Launch AltStore on your computer and connect your iOS device to your computer using a USB cable.</li>
|
82 |
-
<li>Trust your computer on your iOS device and trust your iOS device on your computer.</li>
|
83 |
-
<li>Enter your Apple ID and password on AltStore on your computer. This is required to sign the apps that you install using AltStore.</li>
|
84 |
-
<li>Open the AltStore app on your iOS device and tap on the Browse tab.</li>
|
85 |
-
<li>Search for RetroArch and tap on the Install button next to it.</li>
|
86 |
-
<li>Wait for the installation to complete. You may need to enter your Apple ID and password again.</li>
|
87 |
-
<li>Go to Settings > General > Device Management on your iOS device and trust the developer profile of RetroArch.</li>
|
88 |
-
</ol>
|
89 |
-
<p>Congratulations! You have successfully downloaded RetroArch using AltStore. Now you can proceed to install it on your iOS device.</p>
|
90 |
-
<h2>How to Install PS3 Emulator for iOS?</h2>
|
91 |
-
<h3>Steps to install PS3 emulator for iOS using AltStore</h3>
|
92 |
-
<p>To install RetroArch on your iOS device using AltStore, you need to follow these steps:</p>
|
93 |
-
<ol>
|
94 |
-
<li>Launch the AltStore app on your iOS device and tap on the My Apps tab.</li>
|
95 |
-
<li>Tap on the RetroArch icon and then tap on the Open button.</li>
|
96 |
-
<li>Allow RetroArch to access your photos, media, and files on your iOS device.</li>
|
97 |
-
<li>Accept the terms and conditions of RetroArch.</li>
|
98 |
-
<li>Select your preferred language for RetroArch.</li>
|
99 |
-
</ol>
|
100 |
-
<p>Congratulations! You have successfully installed RetroArch on your iOS device using AltStore. Now you can proceed to install PS3 firmware and ROMs for RetroArch.</p>
|
101 |
-
<h3>How to install PS3 firmware and ROMs for PS3 emulator for iOS</h3>
|
102 |
-
<p>To play PS3 games on RetroArch, you need to install PS3 firmware and ROMs for RetroArch. PS3 firmware is the software that runs the PS3 console, while ROMs are the files that contain the games. To install PS3 firmware and ROMs for RetroArch, you need to follow these steps:</p>
|
103 |
-
<ol>
|
104 |
-
<li>Download the PS3 firmware from a trusted source. You can find it online by searching for "PS3 firmware download". Make sure you download the latest version of the firmware.</li>
|
105 |
-
<li>Extract the firmware file using a file manager app or a computer. You should get a file named "PS3UPDAT.PUP".</li>
|
106 |
-
<li>Rename the file to "PS3UPDAT.PUP.bak" and copy it to a folder named "firmware" in the Documents folder of your iOS device.</li>
|
107 |
-
<li>Download the ROMs of the PS3 games that you want to play from a trusted source. You can find them online by searching for "PS3 ROMs download". Make sure you download the ROMs that are compatible with RetroArch.</li>
|
108 |
-
<li>Extract the ROMs files using a file manager app or a computer. You should get files with extensions such as ".iso", ".bin", ".cue", ".mdf", ".mds", or ".pbp".</li>
|
109 |
-
<li>Copy the ROMs files to a folder named "roms" in the Documents folder of your iOS device.</li>
|
110 |
-
</ol>
|
111 |
-
<p>Congratulations! You have successfully installed PS3 firmware and ROMs for RetroArch. Now you can proceed to play PS3 games on your iOS device.</p>
|
112 |
-
<h2>How to Play PS3 Games on iOS?</h2>
|
113 |
-
<h3>Tips and tricks for playing PS3 games on iOS</h3>
|
114 |
-
<p>To play PS3 games on RetroArch, you need to follow these tips and tricks:</p>
|
115 |
-
<ul>
|
116 |
-
<li>Launch RetroArch on your iOS device and tap on the Load Core option.</li>
|
117 |
-
<li>Select PlayStation 3 (Beetle PSX HW) as the core that you want to load.</li>
|
118 |
-
<li>Tap on the Load Content option and navigate to the folder where you stored your ROMs files.</ <li>Select the ROM that you want to play and tap on the Run option.</li>
|
119 |
-
<li>Wait for the game to load and enjoy playing it on your iOS device.</li>
|
120 |
-
<li>You can use the on-screen buttons or an external controller to control the game.</li>
|
121 |
-
<li>You can access the RetroArch menu by tapping on the RetroArch icon on the top left corner of the screen.</li>
|
122 |
-
<li>You can save and load your game progress by using the Save State and Load State options in the Quick Menu.</li>
|
123 |
-
<li>You can adjust the settings of the emulator, such as video, audio, input, and cheats, by using the Options and Settings options in the Main Menu.</li>
|
124 |
-
</ul>
|
125 |
-
<h3>Best PS3 games to play on iOS</h3>
|
126 |
-
<p>There are many PS3 games that you can play on your iOS device using RetroArch, but some of them are more suitable and enjoyable than others. Here are some of the best PS3 games to play on iOS:</p>
|
127 |
-
<table>
|
128 |
-
<tr>
|
129 |
-
<th>Game</th>
|
130 |
-
<th>Genre</th>
|
131 |
-
<th>Description</th>
|
132 |
-
</tr>
|
133 |
-
<tr>
|
134 |
-
<td>The Last of Us</td>
|
135 |
-
<td>Action-adventure, survival horror</td>
|
136 |
-
<td>A post-apocalyptic game that follows the journey of Joel and Ellie, two survivors of a fungal outbreak that turned most of humanity into zombie-like creatures.</td>
|
137 |
-
</tr>
|
138 |
-
<tr>
|
139 |
-
<td>God of War III</td>
|
140 |
-
<td>Action-adventure, hack and slash</td>
|
141 |
-
<td>A mythological game that follows the revenge of Kratos, a former Spartan warrior, against the gods of Olympus for betraying him.</td>
|
142 |
-
</tr>
|
143 |
-
<tr>
|
144 |
-
<td>Uncharted 2: Among Thieves</td>
|
145 |
-
<td>Action-adventure, third-person shooter</td>
|
146 |
-
<td>A treasure-hunting game that follows the adventures of Nathan Drake, a charismatic explorer, as he searches for the lost city of Shambhala.</td>
|
147 |
-
</tr>
|
148 |
-
<tr>
|
149 |
-
<td>Grand Theft Auto V</td>
|
150 |
-
<td>Action-adventure, open world</td>
|
151 |
-
<td>A crime game that follows the lives of three protagonists, Michael, Franklin, and Trevor, as they commit heists and other illegal activities in Los Santos.</td>
|
152 |
-
</tr>
|
153 |
-
<tr>
|
154 |
-
<td>Metal Gear Solid 4: Guns of the Patriots</td>
|
155 |
-
<td>Action-adventure, stealth</td>
|
156 |
-
<td>A spy game that follows the final mission of Solid Snake, an aging soldier, as he tries to stop a global war caused by a rogue AI system.</td>
|
157 |
-
</tr>
|
158 |
-
</table>
|
159 |
-
<h2>Conclusion</h2>
|
160 |
-
<p>In this article, we have shown you how to download and install a PS3 emulator for iOS, as well as how to play PS3 games on your iOS device. A PS3 emulator for iOS is a great way to enjoy your favorite PS3 games on your iPhone or iPad without having to buy or own a PS3 console. However, you should also be aware of the potential drawbacks and risks of using a PS3 emulator for iOS, such as performance issues, battery drain, security threats, and legal implications. Therefore, you should use a PS3 emulator for iOS at your own discretion and responsibility. We hope you found this article helpful and informative. If you have any questions or feedback, please feel free to leave them in the comments section below. Happy gaming!</p>
|
161 |
-
<h2>FAQs</h2>
|
162 |
-
<h4>Q: Is PS3 emulator for iOS legal?</h4>
|
163 |
-
<p>A: The legality of PS3 emulator for iOS depends on several factors, such as where you live, what games you play, and how you obtain them. Generally speaking, emulators themselves are not illegal, but downloading or using ROMs without owning the original games or having their permission may be illegal. Therefore, you should always check your local laws and regulations before using a PS3 emulator for iOS.</p>
|
164 |
-
<h4>Q: Is PS3 emulator for iOS safe?</h4>
|
165 |
-
<p>A: The safety of PS3 emulator for iOS depends on several factors, such as where you download or install it from, what games you play, and how you protect your iOS device. Generally speaking, emulators themselves are not harmful, but downloading or installing them from untrusted sources may expose your iOS device to malware or viruses. Therefore, you should always use a reputable app store or website to download or install a PS3 emulator for iOS.</p>
|
166 |
-
<h4>Q: Is PS3 emulator for iOS free?</h4>
|
167 |
-
<p>A: The cost of PS3 emulator for iOS depends on several factors, such as which app store or website you use to download or install it from, what features or services it offers, and whether it has ads or in-app purchases. Generally speaking, most emulators are free or have a free version available, but some may charge a fee or require a subscription to access certain features or services. Therefore, you should always check the price and terms of use before downloading or installing a PS3 emulator for iOS.</p>
|
168 |
-
<h4>Q: What are the alternatives to PS3 emulator for iOS?</h4>
|
169 |
-
<p>A: If you are looking for alternatives to PS3 emulator for iOS, you may consider the following options:</p>
|
170 |
-
<ul>
|
171 |
-
<li>Use a PS3 console and connect it to your iOS device using a capture card or a streaming app.</li>
|
172 |
-
<li>Use a cloud gaming service that lets you play PS3 games on your iOS device using a remote server.</li>
|
173 |
-
<li>Use a different emulator that supports PS3 games on your iOS device, such as PPSSPP or Play!</li>
|
174 |
-
</ul>
|
175 |
-
<h4>Q: How to update PS3 emulator for iOS?</h4>
|
176 |
-
<p>A: To update PS3 emulator for iOS, you need to follow these steps:</p>
|
177 |
-
<ol>
|
178 |
-
<li>Launch the AltStore app on your iOS device and tap on the My Apps tab.</li>
|
179 |
-
<li>Tap on the Update button next to RetroArch if there is a new version available.</li>
|
180 |
-
<li>Wait for the update to complete and launch RetroArch again.</li>
|
181 |
-
</ol></p> 197e85843d<br />
|
182 |
-
<br />
|
183 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/2ndelement/voicevox/build_util/create_venv_and_generate_licenses.bash
DELETED
@@ -1,24 +0,0 @@
|
|
1 |
-
# 仮想環境を作ってrequirements.txtをインストールし、ライセンス一覧を生成する
|
2 |
-
|
3 |
-
set -eux
|
4 |
-
|
5 |
-
if [ ! -v OUTPUT_LICENSE_JSON_PATH ]; then
|
6 |
-
echo "OUTPUT_LICENSE_JSON_PATHが未定義です"
|
7 |
-
exit 1
|
8 |
-
fi
|
9 |
-
|
10 |
-
VENV_PATH="licenses_venv"
|
11 |
-
|
12 |
-
python -m venv $VENV_PATH
|
13 |
-
if [ -d "$VENV_PATH/Scripts" ]; then
|
14 |
-
source $VENV_PATH/Scripts/activate
|
15 |
-
else
|
16 |
-
source $VENV_PATH/bin/activate
|
17 |
-
fi
|
18 |
-
|
19 |
-
pip install -r requirements-license.txt
|
20 |
-
python generate_licenses.py >$OUTPUT_LICENSE_JSON_PATH
|
21 |
-
|
22 |
-
deactivate
|
23 |
-
|
24 |
-
rm -rf $VENV_PATH
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/2ndelement/voicevox/voicevox_engine/dev/synthesis_engine/mock.py
DELETED
@@ -1,136 +0,0 @@
|
|
1 |
-
from logging import getLogger
|
2 |
-
from typing import Any, Dict, List, Optional
|
3 |
-
|
4 |
-
import numpy as np
|
5 |
-
from pyopenjtalk import tts
|
6 |
-
from scipy.signal import resample
|
7 |
-
|
8 |
-
from ...model import AccentPhrase, AudioQuery
|
9 |
-
from ...synthesis_engine import SynthesisEngineBase
|
10 |
-
from ...synthesis_engine.synthesis_engine import to_flatten_moras
|
11 |
-
|
12 |
-
|
13 |
-
class MockSynthesisEngine(SynthesisEngineBase):
|
14 |
-
"""
|
15 |
-
SynthesisEngine [Mock]
|
16 |
-
"""
|
17 |
-
|
18 |
-
def __init__(
|
19 |
-
self,
|
20 |
-
speakers: str,
|
21 |
-
supported_devices: Optional[str] = None,
|
22 |
-
):
|
23 |
-
"""
|
24 |
-
__init__ [Mock]
|
25 |
-
"""
|
26 |
-
super().__init__()
|
27 |
-
|
28 |
-
self._speakers = speakers
|
29 |
-
self._supported_devices = supported_devices
|
30 |
-
self.default_sampling_rate = 24000
|
31 |
-
|
32 |
-
@property
|
33 |
-
def speakers(self) -> str:
|
34 |
-
return self._speakers
|
35 |
-
|
36 |
-
@property
|
37 |
-
def supported_devices(self) -> Optional[str]:
|
38 |
-
return self._supported_devices
|
39 |
-
|
40 |
-
def replace_phoneme_length(
|
41 |
-
self, accent_phrases: List[AccentPhrase], speaker_id: int
|
42 |
-
) -> List[AccentPhrase]:
|
43 |
-
"""
|
44 |
-
replace_phoneme_length 入力accent_phrasesを変更せずにそのまま返します [Mock]
|
45 |
-
|
46 |
-
Parameters
|
47 |
-
----------
|
48 |
-
accent_phrases : List[AccentPhrase]
|
49 |
-
フレーズ句のリスト
|
50 |
-
speaker_id : int
|
51 |
-
話者
|
52 |
-
|
53 |
-
Returns
|
54 |
-
-------
|
55 |
-
List[AccentPhrase]
|
56 |
-
フレーズ句のリスト(変更なし)
|
57 |
-
"""
|
58 |
-
return accent_phrases
|
59 |
-
|
60 |
-
def replace_mora_pitch(
|
61 |
-
self, accent_phrases: List[AccentPhrase], speaker_id: int
|
62 |
-
) -> List[AccentPhrase]:
|
63 |
-
"""
|
64 |
-
replace_mora_pitch 入力accent_phrasesを変更せずにそのまま返します [Mock]
|
65 |
-
|
66 |
-
Parameters
|
67 |
-
----------
|
68 |
-
accent_phrases : List[AccentPhrase]
|
69 |
-
フレーズ句のリスト
|
70 |
-
speaker_id : int
|
71 |
-
話者
|
72 |
-
|
73 |
-
Returns
|
74 |
-
-------
|
75 |
-
List[AccentPhrase]
|
76 |
-
フレーズ句のリスト(変更なし)
|
77 |
-
"""
|
78 |
-
return accent_phrases
|
79 |
-
|
80 |
-
def _synthesis_impl(self, query: AudioQuery, speaker_id: int) -> np.ndarray:
|
81 |
-
"""
|
82 |
-
synthesis voicevox coreを使わずに、音声合成する [Mock]
|
83 |
-
|
84 |
-
Parameters
|
85 |
-
----------
|
86 |
-
query : AudioQuery
|
87 |
-
/audio_query APIで得たjson
|
88 |
-
speaker_id : int
|
89 |
-
話者
|
90 |
-
|
91 |
-
Returns
|
92 |
-
-------
|
93 |
-
wave [npt.NDArray[np.int16]]
|
94 |
-
音声波形データをNumPy配列で返します
|
95 |
-
"""
|
96 |
-
# recall text in katakana
|
97 |
-
flatten_moras = to_flatten_moras(query.accent_phrases)
|
98 |
-
kana_text = "".join([mora.text for mora in flatten_moras])
|
99 |
-
|
100 |
-
wave = self.forward(kana_text)
|
101 |
-
|
102 |
-
# volume
|
103 |
-
wave *= query.volumeScale
|
104 |
-
|
105 |
-
return wave.astype("int16")
|
106 |
-
|
107 |
-
def forward(self, text: str, **kwargs: Dict[str, Any]) -> np.ndarray:
|
108 |
-
"""
|
109 |
-
forward tts via pyopenjtalk.tts()
|
110 |
-
参照→SynthesisEngine のdocstring [Mock]
|
111 |
-
|
112 |
-
Parameters
|
113 |
-
----------
|
114 |
-
text : str
|
115 |
-
入力文字列(例:読み上げたい文章をカタカナにした文字列、等)
|
116 |
-
|
117 |
-
Returns
|
118 |
-
-------
|
119 |
-
wave [npt.NDArray[np.int16]]
|
120 |
-
音声波形データをNumPy配列で返します
|
121 |
-
|
122 |
-
Note
|
123 |
-
-------
|
124 |
-
ここで行う音声合成では、調声(ピッチ等)を反映しない
|
125 |
-
|
126 |
-
# pyopenjtalk.tts()の出力仕様
|
127 |
-
dtype=np.float64, 16 bit, mono 48000 Hz
|
128 |
-
|
129 |
-
# resampleの説明
|
130 |
-
非モック実装(decode_forward)と合わせるために、出力を24kHzに変換した。
|
131 |
-
"""
|
132 |
-
logger = getLogger("uvicorn") # FastAPI / Uvicorn 内からの利用のため
|
133 |
-
logger.info("[Mock] input text: %s" % text)
|
134 |
-
wave, sr = tts(text)
|
135 |
-
wave = resample(wave, 24000 * len(wave) // 48000)
|
136 |
-
return wave
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/4Taps/SadTalker/src/face3d/util/detect_lm68.py
DELETED
@@ -1,106 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import cv2
|
3 |
-
import numpy as np
|
4 |
-
from scipy.io import loadmat
|
5 |
-
import tensorflow as tf
|
6 |
-
from util.preprocess import align_for_lm
|
7 |
-
from shutil import move
|
8 |
-
|
9 |
-
mean_face = np.loadtxt('util/test_mean_face.txt')
|
10 |
-
mean_face = mean_face.reshape([68, 2])
|
11 |
-
|
12 |
-
def save_label(labels, save_path):
|
13 |
-
np.savetxt(save_path, labels)
|
14 |
-
|
15 |
-
def draw_landmarks(img, landmark, save_name):
|
16 |
-
landmark = landmark
|
17 |
-
lm_img = np.zeros([img.shape[0], img.shape[1], 3])
|
18 |
-
lm_img[:] = img.astype(np.float32)
|
19 |
-
landmark = np.round(landmark).astype(np.int32)
|
20 |
-
|
21 |
-
for i in range(len(landmark)):
|
22 |
-
for j in range(-1, 1):
|
23 |
-
for k in range(-1, 1):
|
24 |
-
if img.shape[0] - 1 - landmark[i, 1]+j > 0 and \
|
25 |
-
img.shape[0] - 1 - landmark[i, 1]+j < img.shape[0] and \
|
26 |
-
landmark[i, 0]+k > 0 and \
|
27 |
-
landmark[i, 0]+k < img.shape[1]:
|
28 |
-
lm_img[img.shape[0] - 1 - landmark[i, 1]+j, landmark[i, 0]+k,
|
29 |
-
:] = np.array([0, 0, 255])
|
30 |
-
lm_img = lm_img.astype(np.uint8)
|
31 |
-
|
32 |
-
cv2.imwrite(save_name, lm_img)
|
33 |
-
|
34 |
-
|
35 |
-
def load_data(img_name, txt_name):
|
36 |
-
return cv2.imread(img_name), np.loadtxt(txt_name)
|
37 |
-
|
38 |
-
# create tensorflow graph for landmark detector
|
39 |
-
def load_lm_graph(graph_filename):
|
40 |
-
with tf.gfile.GFile(graph_filename, 'rb') as f:
|
41 |
-
graph_def = tf.GraphDef()
|
42 |
-
graph_def.ParseFromString(f.read())
|
43 |
-
|
44 |
-
with tf.Graph().as_default() as graph:
|
45 |
-
tf.import_graph_def(graph_def, name='net')
|
46 |
-
img_224 = graph.get_tensor_by_name('net/input_imgs:0')
|
47 |
-
output_lm = graph.get_tensor_by_name('net/lm:0')
|
48 |
-
lm_sess = tf.Session(graph=graph)
|
49 |
-
|
50 |
-
return lm_sess,img_224,output_lm
|
51 |
-
|
52 |
-
# landmark detection
|
53 |
-
def detect_68p(img_path,sess,input_op,output_op):
|
54 |
-
print('detecting landmarks......')
|
55 |
-
names = [i for i in sorted(os.listdir(
|
56 |
-
img_path)) if 'jpg' in i or 'png' in i or 'jpeg' in i or 'PNG' in i]
|
57 |
-
vis_path = os.path.join(img_path, 'vis')
|
58 |
-
remove_path = os.path.join(img_path, 'remove')
|
59 |
-
save_path = os.path.join(img_path, 'landmarks')
|
60 |
-
if not os.path.isdir(vis_path):
|
61 |
-
os.makedirs(vis_path)
|
62 |
-
if not os.path.isdir(remove_path):
|
63 |
-
os.makedirs(remove_path)
|
64 |
-
if not os.path.isdir(save_path):
|
65 |
-
os.makedirs(save_path)
|
66 |
-
|
67 |
-
for i in range(0, len(names)):
|
68 |
-
name = names[i]
|
69 |
-
print('%05d' % (i), ' ', name)
|
70 |
-
full_image_name = os.path.join(img_path, name)
|
71 |
-
txt_name = '.'.join(name.split('.')[:-1]) + '.txt'
|
72 |
-
full_txt_name = os.path.join(img_path, 'detections', txt_name) # 5 facial landmark path for each image
|
73 |
-
|
74 |
-
# if an image does not have detected 5 facial landmarks, remove it from the training list
|
75 |
-
if not os.path.isfile(full_txt_name):
|
76 |
-
move(full_image_name, os.path.join(remove_path, name))
|
77 |
-
continue
|
78 |
-
|
79 |
-
# load data
|
80 |
-
img, five_points = load_data(full_image_name, full_txt_name)
|
81 |
-
input_img, scale, bbox = align_for_lm(img, five_points) # align for 68 landmark detection
|
82 |
-
|
83 |
-
# if the alignment fails, remove corresponding image from the training list
|
84 |
-
if scale == 0:
|
85 |
-
move(full_txt_name, os.path.join(
|
86 |
-
remove_path, txt_name))
|
87 |
-
move(full_image_name, os.path.join(remove_path, name))
|
88 |
-
continue
|
89 |
-
|
90 |
-
# detect landmarks
|
91 |
-
input_img = np.reshape(
|
92 |
-
input_img, [1, 224, 224, 3]).astype(np.float32)
|
93 |
-
landmark = sess.run(
|
94 |
-
output_op, feed_dict={input_op: input_img})
|
95 |
-
|
96 |
-
# transform back to original image coordinate
|
97 |
-
landmark = landmark.reshape([68, 2]) + mean_face
|
98 |
-
landmark[:, 1] = 223 - landmark[:, 1]
|
99 |
-
landmark = landmark / scale
|
100 |
-
landmark[:, 0] = landmark[:, 0] + bbox[0]
|
101 |
-
landmark[:, 1] = landmark[:, 1] + bbox[1]
|
102 |
-
landmark[:, 1] = img.shape[0] - 1 - landmark[:, 1]
|
103 |
-
|
104 |
-
if i % 100 == 0:
|
105 |
-
draw_landmarks(img, landmark, os.path.join(vis_path, name))
|
106 |
-
save_label(landmark, os.path.join(save_path, txt_name))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/4com/stable-diffusion/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Stable Diffusion
|
3 |
-
emoji: 🔥
|
4 |
-
colorFrom: yellow
|
5 |
-
colorTo: gray
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.43.2
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: creativeml-openrail-m
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIFILMS/generate_human_motion/VQ-Trans/models/resnet.py
DELETED
@@ -1,82 +0,0 @@
|
|
1 |
-
import torch.nn as nn
|
2 |
-
import torch
|
3 |
-
|
4 |
-
class nonlinearity(nn.Module):
|
5 |
-
def __init__(self):
|
6 |
-
super().__init__()
|
7 |
-
|
8 |
-
def forward(self, x):
|
9 |
-
# swish
|
10 |
-
return x * torch.sigmoid(x)
|
11 |
-
|
12 |
-
class ResConv1DBlock(nn.Module):
|
13 |
-
def __init__(self, n_in, n_state, dilation=1, activation='silu', norm=None, dropout=None):
|
14 |
-
super().__init__()
|
15 |
-
padding = dilation
|
16 |
-
self.norm = norm
|
17 |
-
if norm == "LN":
|
18 |
-
self.norm1 = nn.LayerNorm(n_in)
|
19 |
-
self.norm2 = nn.LayerNorm(n_in)
|
20 |
-
elif norm == "GN":
|
21 |
-
self.norm1 = nn.GroupNorm(num_groups=32, num_channels=n_in, eps=1e-6, affine=True)
|
22 |
-
self.norm2 = nn.GroupNorm(num_groups=32, num_channels=n_in, eps=1e-6, affine=True)
|
23 |
-
elif norm == "BN":
|
24 |
-
self.norm1 = nn.BatchNorm1d(num_features=n_in, eps=1e-6, affine=True)
|
25 |
-
self.norm2 = nn.BatchNorm1d(num_features=n_in, eps=1e-6, affine=True)
|
26 |
-
|
27 |
-
else:
|
28 |
-
self.norm1 = nn.Identity()
|
29 |
-
self.norm2 = nn.Identity()
|
30 |
-
|
31 |
-
if activation == "relu":
|
32 |
-
self.activation1 = nn.ReLU()
|
33 |
-
self.activation2 = nn.ReLU()
|
34 |
-
|
35 |
-
elif activation == "silu":
|
36 |
-
self.activation1 = nonlinearity()
|
37 |
-
self.activation2 = nonlinearity()
|
38 |
-
|
39 |
-
elif activation == "gelu":
|
40 |
-
self.activation1 = nn.GELU()
|
41 |
-
self.activation2 = nn.GELU()
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
self.conv1 = nn.Conv1d(n_in, n_state, 3, 1, padding, dilation)
|
46 |
-
self.conv2 = nn.Conv1d(n_state, n_in, 1, 1, 0,)
|
47 |
-
|
48 |
-
|
49 |
-
def forward(self, x):
|
50 |
-
x_orig = x
|
51 |
-
if self.norm == "LN":
|
52 |
-
x = self.norm1(x.transpose(-2, -1))
|
53 |
-
x = self.activation1(x.transpose(-2, -1))
|
54 |
-
else:
|
55 |
-
x = self.norm1(x)
|
56 |
-
x = self.activation1(x)
|
57 |
-
|
58 |
-
x = self.conv1(x)
|
59 |
-
|
60 |
-
if self.norm == "LN":
|
61 |
-
x = self.norm2(x.transpose(-2, -1))
|
62 |
-
x = self.activation2(x.transpose(-2, -1))
|
63 |
-
else:
|
64 |
-
x = self.norm2(x)
|
65 |
-
x = self.activation2(x)
|
66 |
-
|
67 |
-
x = self.conv2(x)
|
68 |
-
x = x + x_orig
|
69 |
-
return x
|
70 |
-
|
71 |
-
class Resnet1D(nn.Module):
|
72 |
-
def __init__(self, n_in, n_depth, dilation_growth_rate=1, reverse_dilation=True, activation='relu', norm=None):
|
73 |
-
super().__init__()
|
74 |
-
|
75 |
-
blocks = [ResConv1DBlock(n_in, n_in, dilation=dilation_growth_rate ** depth, activation=activation, norm=norm) for depth in range(n_depth)]
|
76 |
-
if reverse_dilation:
|
77 |
-
blocks = blocks[::-1]
|
78 |
-
|
79 |
-
self.model = nn.Sequential(*blocks)
|
80 |
-
|
81 |
-
def forward(self, x):
|
82 |
-
return self.model(x)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ASJMO/freegpt/client/html/index.html
DELETED
@@ -1,135 +0,0 @@
|
|
1 |
-
<!DOCTYPE html>
|
2 |
-
<html lang="en">
|
3 |
-
<head>
|
4 |
-
<meta charset="UTF-8" />
|
5 |
-
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
|
6 |
-
<meta name="viewport" content="width=device-width, initial-scale=1.0 maximum-scale=1.0" />
|
7 |
-
<meta name="description" content="A conversational AI system that listens, learns, and challenges" />
|
8 |
-
<meta property="og:title" content="ChatGPT" />
|
9 |
-
<meta property="og:image" content="https://openai.com/content/images/2022/11/ChatGPT.jpg" />
|
10 |
-
<meta
|
11 |
-
property="og:description"
|
12 |
-
content="A conversational AI system that listens, learns, and challenges" />
|
13 |
-
<meta property="og:url" content="https://chat.acy.dev" />
|
14 |
-
<link rel="stylesheet" href="{{ url_for('bp.static', filename='css/style.css') }}" />
|
15 |
-
<link
|
16 |
-
rel="apple-touch-icon"
|
17 |
-
sizes="180x180"
|
18 |
-
href="{{ url_for('bp.static', filename='img/apple-touch-icon.png') }}" />
|
19 |
-
<link
|
20 |
-
rel="icon"
|
21 |
-
type="image/png"
|
22 |
-
sizes="32x32"
|
23 |
-
href="{{ url_for('bp.static', filename='img/favicon-32x32.png') }}" />
|
24 |
-
<link
|
25 |
-
rel="icon"
|
26 |
-
type="image/png"
|
27 |
-
sizes="16x16"
|
28 |
-
href="{{ url_for('bp.static', filename='img/favicon-16x16.png') }}" />
|
29 |
-
<link rel="manifest" href="{{ url_for('bp.static', filename='img/site.webmanifest') }}" />
|
30 |
-
<link
|
31 |
-
rel="stylesheet"
|
32 |
-
href="//cdn.jsdelivr.net/gh/highlightjs/cdn-release@latest/build/styles/base16/dracula.min.css" />
|
33 |
-
<title>FreeGPT</title>
|
34 |
-
</head>
|
35 |
-
|
36 |
-
<body data-urlprefix="{{ url_prefix}}">
|
37 |
-
<div class="main-container">
|
38 |
-
<div class="box sidebar">
|
39 |
-
<div class="top">
|
40 |
-
<button class="button" onclick="new_conversation()">
|
41 |
-
<i class="fa-regular fa-plus"></i>
|
42 |
-
<span>{{_('New Conversation')}}</span>
|
43 |
-
</button>
|
44 |
-
<div class="spinner"></div>
|
45 |
-
</div>
|
46 |
-
<div class="sidebar-footer">
|
47 |
-
<button class="button" onclick="delete_conversations()">
|
48 |
-
<i class="fa-regular fa-trash"></i>
|
49 |
-
<span>{{_('Clear Conversations')}}</span>
|
50 |
-
</button>
|
51 |
-
<div class="settings-container">
|
52 |
-
<div class="checkbox field">
|
53 |
-
<span>{{_('Dark Mode')}}</span>
|
54 |
-
<input type="checkbox" id="theme-toggler" />
|
55 |
-
<label for="theme-toggler"></label>
|
56 |
-
</div>
|
57 |
-
<div class="field">
|
58 |
-
<span>{{_('Language')}}</span>
|
59 |
-
<select
|
60 |
-
class="dropdown"
|
61 |
-
id="language"
|
62 |
-
onchange="changeLanguage(this.value)"></select>
|
63 |
-
</div>
|
64 |
-
</div>
|
65 |
-
<a class="info" href="https://github.com/ramonvc/gptfree-jailbreak-webui" target="_blank">
|
66 |
-
<i class="fa-brands fa-github"></i>
|
67 |
-
<span class="conversation-title"> {{_('Version')}}: 0.1.0 </span>
|
68 |
-
</a>
|
69 |
-
</div>
|
70 |
-
</div>
|
71 |
-
<div class="conversation">
|
72 |
-
<div class="stop-generating stop-generating-hidden">
|
73 |
-
<button class="button" id="cancelButton">
|
74 |
-
<span>{{_('Stop Generating')}}</span>
|
75 |
-
</button>
|
76 |
-
</div>
|
77 |
-
<div class="box" id="messages"></div>
|
78 |
-
<div class="user-input">
|
79 |
-
<div class="box input-box">
|
80 |
-
<textarea
|
81 |
-
id="message-input"
|
82 |
-
placeholder="{{_('Ask a question')}}"
|
83 |
-
cols="30"
|
84 |
-
rows="10"
|
85 |
-
style="white-space: pre-wrap"></textarea>
|
86 |
-
<div id="send-button">
|
87 |
-
<i class="fa-regular fa-paper-plane-top"></i>
|
88 |
-
</div>
|
89 |
-
</div>
|
90 |
-
</div>
|
91 |
-
<div>
|
92 |
-
<div class="options-container">
|
93 |
-
<div class="buttons">
|
94 |
-
<div class="field">
|
95 |
-
<select class="dropdown" name="model" id="model">
|
96 |
-
<option value="gpt-3.5-turbo">GPT-3.5</option>
|
97 |
-
<option value="gpt-3.5-turbo-16k">GPT-3.5-turbo-16k</option>
|
98 |
-
<option value="gpt-4" selected>GPT-4</option>
|
99 |
-
</select>
|
100 |
-
</div>
|
101 |
-
<div class="field">
|
102 |
-
<select class="dropdown" name="jailbreak" id="jailbreak">
|
103 |
-
<option value="default" selected>{{_('Default')}}</option>
|
104 |
-
<option value="gpt-dan-11.0">{{_('DAN')}}</option>
|
105 |
-
<option value="gpt-evil">{{_('Evil')}}</option>
|
106 |
-
</select>
|
107 |
-
</div>
|
108 |
-
</div>
|
109 |
-
<div class="field checkbox">
|
110 |
-
<input type="checkbox" id="switch" />
|
111 |
-
<label for="switch"></label>
|
112 |
-
<span>{{_('Web Access')}}</span>
|
113 |
-
</div>
|
114 |
-
</div>
|
115 |
-
</div>
|
116 |
-
</div>
|
117 |
-
</div>
|
118 |
-
<div class="menu-button">
|
119 |
-
<i class="fa-solid fa-bars"></i>
|
120 |
-
</div>
|
121 |
-
|
122 |
-
<!-- scripts -->
|
123 |
-
<script>
|
124 |
-
window.conversation_id = "{{ chat_id }}";
|
125 |
-
</script>
|
126 |
-
<script src="{{ url_for('bp.static', filename='js/icons.js') }}"></script>
|
127 |
-
<script src="{{ url_for('bp.static', filename='js/chat.js') }}" defer></script>
|
128 |
-
<script src="https://cdn.jsdelivr.net/npm/markdown-it@latest/dist/markdown-it.min.js"></script>
|
129 |
-
<script src="{{ url_for('bp.static', filename='js/highlight.min.js') }}"></script>
|
130 |
-
<script src="{{ url_for('bp.static', filename='js/highlightjs-copy.min.js') }}"></script>
|
131 |
-
<script src="{{ url_for('bp.static', filename='js/theme-toggler.js') }}"></script>
|
132 |
-
<script src="{{ url_for('bp.static', filename='js/sidebar-toggler.js') }}"></script>
|
133 |
-
<script src="{{ url_for('bp.static', filename='js/change-language.js') }}"></script>
|
134 |
-
</body>
|
135 |
-
</html>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Adapter/T2I-Adapter/ldm/models/diffusion/ddpm.py
DELETED
@@ -1,1313 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
wild mixture of
|
3 |
-
https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
|
4 |
-
https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
|
5 |
-
https://github.com/CompVis/taming-transformers
|
6 |
-
-- merci
|
7 |
-
"""
|
8 |
-
|
9 |
-
import torch
|
10 |
-
import torch.nn as nn
|
11 |
-
import numpy as np
|
12 |
-
import pytorch_lightning as pl
|
13 |
-
from torch.optim.lr_scheduler import LambdaLR
|
14 |
-
from einops import rearrange, repeat
|
15 |
-
from contextlib import contextmanager, nullcontext
|
16 |
-
from functools import partial
|
17 |
-
import itertools
|
18 |
-
from tqdm import tqdm
|
19 |
-
from torchvision.utils import make_grid
|
20 |
-
from pytorch_lightning.utilities.distributed import rank_zero_only
|
21 |
-
from omegaconf import ListConfig
|
22 |
-
|
23 |
-
from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
|
24 |
-
from ldm.modules.ema import LitEma
|
25 |
-
from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
|
26 |
-
from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL
|
27 |
-
from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
|
28 |
-
from ldm.models.diffusion.ddim import DDIMSampler
|
29 |
-
|
30 |
-
|
31 |
-
__conditioning_keys__ = {'concat': 'c_concat',
|
32 |
-
'crossattn': 'c_crossattn',
|
33 |
-
'adm': 'y'}
|
34 |
-
|
35 |
-
|
36 |
-
def disabled_train(self, mode=True):
|
37 |
-
"""Overwrite model.train with this function to make sure train/eval mode
|
38 |
-
does not change anymore."""
|
39 |
-
return self
|
40 |
-
|
41 |
-
|
42 |
-
def uniform_on_device(r1, r2, shape, device):
|
43 |
-
return (r1 - r2) * torch.rand(*shape, device=device) + r2
|
44 |
-
|
45 |
-
|
46 |
-
class DDPM(pl.LightningModule):
|
47 |
-
# classic DDPM with Gaussian diffusion, in image space
|
48 |
-
def __init__(self,
|
49 |
-
unet_config,
|
50 |
-
timesteps=1000,
|
51 |
-
beta_schedule="linear",
|
52 |
-
loss_type="l2",
|
53 |
-
ckpt_path=None,
|
54 |
-
ignore_keys=[],
|
55 |
-
load_only_unet=False,
|
56 |
-
monitor="val/loss",
|
57 |
-
use_ema=True,
|
58 |
-
first_stage_key="image",
|
59 |
-
image_size=256,
|
60 |
-
channels=3,
|
61 |
-
log_every_t=100,
|
62 |
-
clip_denoised=True,
|
63 |
-
linear_start=1e-4,
|
64 |
-
linear_end=2e-2,
|
65 |
-
cosine_s=8e-3,
|
66 |
-
given_betas=None,
|
67 |
-
original_elbo_weight=0.,
|
68 |
-
v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
|
69 |
-
l_simple_weight=1.,
|
70 |
-
conditioning_key=None,
|
71 |
-
parameterization="eps", # all assuming fixed variance schedules
|
72 |
-
scheduler_config=None,
|
73 |
-
use_positional_encodings=False,
|
74 |
-
learn_logvar=False,
|
75 |
-
logvar_init=0.,
|
76 |
-
make_it_fit=False,
|
77 |
-
ucg_training=None,
|
78 |
-
reset_ema=False,
|
79 |
-
reset_num_ema_updates=False,
|
80 |
-
):
|
81 |
-
super().__init__()
|
82 |
-
assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"'
|
83 |
-
self.parameterization = parameterization
|
84 |
-
print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
|
85 |
-
self.cond_stage_model = None
|
86 |
-
self.clip_denoised = clip_denoised
|
87 |
-
self.log_every_t = log_every_t
|
88 |
-
self.first_stage_key = first_stage_key
|
89 |
-
self.image_size = image_size # try conv?
|
90 |
-
self.channels = channels
|
91 |
-
self.use_positional_encodings = use_positional_encodings
|
92 |
-
self.model = DiffusionWrapper(unet_config, conditioning_key)
|
93 |
-
count_params(self.model, verbose=True)
|
94 |
-
self.use_ema = use_ema
|
95 |
-
if self.use_ema:
|
96 |
-
self.model_ema = LitEma(self.model)
|
97 |
-
print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
|
98 |
-
|
99 |
-
self.use_scheduler = scheduler_config is not None
|
100 |
-
if self.use_scheduler:
|
101 |
-
self.scheduler_config = scheduler_config
|
102 |
-
|
103 |
-
self.v_posterior = v_posterior
|
104 |
-
self.original_elbo_weight = original_elbo_weight
|
105 |
-
self.l_simple_weight = l_simple_weight
|
106 |
-
|
107 |
-
if monitor is not None:
|
108 |
-
self.monitor = monitor
|
109 |
-
self.make_it_fit = make_it_fit
|
110 |
-
if reset_ema: assert exists(ckpt_path)
|
111 |
-
if ckpt_path is not None:
|
112 |
-
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet)
|
113 |
-
if reset_ema:
|
114 |
-
assert self.use_ema
|
115 |
-
print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.")
|
116 |
-
self.model_ema = LitEma(self.model)
|
117 |
-
if reset_num_ema_updates:
|
118 |
-
print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ")
|
119 |
-
assert self.use_ema
|
120 |
-
self.model_ema.reset_num_updates()
|
121 |
-
|
122 |
-
self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps,
|
123 |
-
linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s)
|
124 |
-
|
125 |
-
self.loss_type = loss_type
|
126 |
-
|
127 |
-
self.learn_logvar = learn_logvar
|
128 |
-
self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,))
|
129 |
-
if self.learn_logvar:
|
130 |
-
self.logvar = nn.Parameter(self.logvar, requires_grad=True)
|
131 |
-
|
132 |
-
self.ucg_training = ucg_training or dict()
|
133 |
-
if self.ucg_training:
|
134 |
-
self.ucg_prng = np.random.RandomState()
|
135 |
-
|
136 |
-
def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000,
|
137 |
-
linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
|
138 |
-
if exists(given_betas):
|
139 |
-
betas = given_betas
|
140 |
-
else:
|
141 |
-
betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end,
|
142 |
-
cosine_s=cosine_s)
|
143 |
-
alphas = 1. - betas
|
144 |
-
alphas_cumprod = np.cumprod(alphas, axis=0)
|
145 |
-
alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
|
146 |
-
|
147 |
-
timesteps, = betas.shape
|
148 |
-
self.num_timesteps = int(timesteps)
|
149 |
-
self.linear_start = linear_start
|
150 |
-
self.linear_end = linear_end
|
151 |
-
assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep'
|
152 |
-
|
153 |
-
to_torch = partial(torch.tensor, dtype=torch.float32)
|
154 |
-
|
155 |
-
self.register_buffer('betas', to_torch(betas))
|
156 |
-
self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
|
157 |
-
self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev))
|
158 |
-
|
159 |
-
# calculations for diffusion q(x_t | x_{t-1}) and others
|
160 |
-
self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
|
161 |
-
self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
|
162 |
-
self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod)))
|
163 |
-
self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))
|
164 |
-
self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))
|
165 |
-
|
166 |
-
# calculations for posterior q(x_{t-1} | x_t, x_0)
|
167 |
-
posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / (
|
168 |
-
1. - alphas_cumprod) + self.v_posterior * betas
|
169 |
-
# above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
|
170 |
-
self.register_buffer('posterior_variance', to_torch(posterior_variance))
|
171 |
-
# below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
|
172 |
-
self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20))))
|
173 |
-
self.register_buffer('posterior_mean_coef1', to_torch(
|
174 |
-
betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)))
|
175 |
-
self.register_buffer('posterior_mean_coef2', to_torch(
|
176 |
-
(1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod)))
|
177 |
-
|
178 |
-
if self.parameterization == "eps":
|
179 |
-
lvlb_weights = self.betas ** 2 / (
|
180 |
-
2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))
|
181 |
-
elif self.parameterization == "x0":
|
182 |
-
lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod))
|
183 |
-
elif self.parameterization == "v":
|
184 |
-
lvlb_weights = torch.ones_like(self.betas ** 2 / (
|
185 |
-
2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)))
|
186 |
-
else:
|
187 |
-
raise NotImplementedError("mu not supported")
|
188 |
-
lvlb_weights[0] = lvlb_weights[1]
|
189 |
-
self.register_buffer('lvlb_weights', lvlb_weights, persistent=False)
|
190 |
-
assert not torch.isnan(self.lvlb_weights).all()
|
191 |
-
|
192 |
-
@contextmanager
|
193 |
-
def ema_scope(self, context=None):
|
194 |
-
if self.use_ema:
|
195 |
-
self.model_ema.store(self.model.parameters())
|
196 |
-
self.model_ema.copy_to(self.model)
|
197 |
-
if context is not None:
|
198 |
-
print(f"{context}: Switched to EMA weights")
|
199 |
-
try:
|
200 |
-
yield None
|
201 |
-
finally:
|
202 |
-
if self.use_ema:
|
203 |
-
self.model_ema.restore(self.model.parameters())
|
204 |
-
if context is not None:
|
205 |
-
print(f"{context}: Restored training weights")
|
206 |
-
|
207 |
-
@torch.no_grad()
|
208 |
-
def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
|
209 |
-
sd = torch.load(path, map_location="cpu")
|
210 |
-
if "state_dict" in list(sd.keys()):
|
211 |
-
sd = sd["state_dict"]
|
212 |
-
keys = list(sd.keys())
|
213 |
-
for k in keys:
|
214 |
-
for ik in ignore_keys:
|
215 |
-
if k.startswith(ik):
|
216 |
-
print("Deleting key {} from state_dict.".format(k))
|
217 |
-
del sd[k]
|
218 |
-
if self.make_it_fit:
|
219 |
-
n_params = len([name for name, _ in
|
220 |
-
itertools.chain(self.named_parameters(),
|
221 |
-
self.named_buffers())])
|
222 |
-
for name, param in tqdm(
|
223 |
-
itertools.chain(self.named_parameters(),
|
224 |
-
self.named_buffers()),
|
225 |
-
desc="Fitting old weights to new weights",
|
226 |
-
total=n_params
|
227 |
-
):
|
228 |
-
if not name in sd:
|
229 |
-
continue
|
230 |
-
old_shape = sd[name].shape
|
231 |
-
new_shape = param.shape
|
232 |
-
assert len(old_shape) == len(new_shape)
|
233 |
-
if len(new_shape) > 2:
|
234 |
-
# we only modify first two axes
|
235 |
-
assert new_shape[2:] == old_shape[2:]
|
236 |
-
# assumes first axis corresponds to output dim
|
237 |
-
if not new_shape == old_shape:
|
238 |
-
new_param = param.clone()
|
239 |
-
old_param = sd[name]
|
240 |
-
if len(new_shape) == 1:
|
241 |
-
for i in range(new_param.shape[0]):
|
242 |
-
new_param[i] = old_param[i % old_shape[0]]
|
243 |
-
elif len(new_shape) >= 2:
|
244 |
-
for i in range(new_param.shape[0]):
|
245 |
-
for j in range(new_param.shape[1]):
|
246 |
-
new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]]
|
247 |
-
|
248 |
-
n_used_old = torch.ones(old_shape[1])
|
249 |
-
for j in range(new_param.shape[1]):
|
250 |
-
n_used_old[j % old_shape[1]] += 1
|
251 |
-
n_used_new = torch.zeros(new_shape[1])
|
252 |
-
for j in range(new_param.shape[1]):
|
253 |
-
n_used_new[j] = n_used_old[j % old_shape[1]]
|
254 |
-
|
255 |
-
n_used_new = n_used_new[None, :]
|
256 |
-
while len(n_used_new.shape) < len(new_shape):
|
257 |
-
n_used_new = n_used_new.unsqueeze(-1)
|
258 |
-
new_param /= n_used_new
|
259 |
-
|
260 |
-
sd[name] = new_param
|
261 |
-
|
262 |
-
missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
|
263 |
-
sd, strict=False)
|
264 |
-
print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
|
265 |
-
if len(missing) > 0:
|
266 |
-
print(f"Missing Keys:\n {missing}")
|
267 |
-
if len(unexpected) > 0:
|
268 |
-
print(f"\nUnexpected Keys:\n {unexpected}")
|
269 |
-
|
270 |
-
def q_mean_variance(self, x_start, t):
|
271 |
-
"""
|
272 |
-
Get the distribution q(x_t | x_0).
|
273 |
-
:param x_start: the [N x C x ...] tensor of noiseless inputs.
|
274 |
-
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
|
275 |
-
:return: A tuple (mean, variance, log_variance), all of x_start's shape.
|
276 |
-
"""
|
277 |
-
mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start)
|
278 |
-
variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
|
279 |
-
log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape)
|
280 |
-
return mean, variance, log_variance
|
281 |
-
|
282 |
-
def predict_start_from_noise(self, x_t, t, noise):
|
283 |
-
return (
|
284 |
-
extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
|
285 |
-
extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
|
286 |
-
)
|
287 |
-
|
288 |
-
def predict_start_from_z_and_v(self, x_t, t, v):
|
289 |
-
# self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
|
290 |
-
# self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
|
291 |
-
return (
|
292 |
-
extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t -
|
293 |
-
extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v
|
294 |
-
)
|
295 |
-
|
296 |
-
def predict_eps_from_z_and_v(self, x_t, t, v):
|
297 |
-
return (
|
298 |
-
extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v +
|
299 |
-
extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t
|
300 |
-
)
|
301 |
-
|
302 |
-
def q_posterior(self, x_start, x_t, t):
|
303 |
-
posterior_mean = (
|
304 |
-
extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start +
|
305 |
-
extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
|
306 |
-
)
|
307 |
-
posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape)
|
308 |
-
posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape)
|
309 |
-
return posterior_mean, posterior_variance, posterior_log_variance_clipped
|
310 |
-
|
311 |
-
def p_mean_variance(self, x, t, clip_denoised: bool):
|
312 |
-
model_out = self.model(x, t)
|
313 |
-
if self.parameterization == "eps":
|
314 |
-
x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
|
315 |
-
elif self.parameterization == "x0":
|
316 |
-
x_recon = model_out
|
317 |
-
if clip_denoised:
|
318 |
-
x_recon.clamp_(-1., 1.)
|
319 |
-
|
320 |
-
model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
|
321 |
-
return model_mean, posterior_variance, posterior_log_variance
|
322 |
-
|
323 |
-
@torch.no_grad()
|
324 |
-
def p_sample(self, x, t, clip_denoised=True, repeat_noise=False):
|
325 |
-
b, *_, device = *x.shape, x.device
|
326 |
-
model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised)
|
327 |
-
noise = noise_like(x.shape, device, repeat_noise)
|
328 |
-
# no noise when t == 0
|
329 |
-
nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
|
330 |
-
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
|
331 |
-
|
332 |
-
@torch.no_grad()
|
333 |
-
def p_sample_loop(self, shape, return_intermediates=False):
|
334 |
-
device = self.betas.device
|
335 |
-
b = shape[0]
|
336 |
-
img = torch.randn(shape, device=device)
|
337 |
-
intermediates = [img]
|
338 |
-
for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps):
|
339 |
-
img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long),
|
340 |
-
clip_denoised=self.clip_denoised)
|
341 |
-
if i % self.log_every_t == 0 or i == self.num_timesteps - 1:
|
342 |
-
intermediates.append(img)
|
343 |
-
if return_intermediates:
|
344 |
-
return img, intermediates
|
345 |
-
return img
|
346 |
-
|
347 |
-
@torch.no_grad()
|
348 |
-
def sample(self, batch_size=16, return_intermediates=False):
|
349 |
-
image_size = self.image_size
|
350 |
-
channels = self.channels
|
351 |
-
return self.p_sample_loop((batch_size, channels, image_size, image_size),
|
352 |
-
return_intermediates=return_intermediates)
|
353 |
-
|
354 |
-
def q_sample(self, x_start, t, noise=None):
|
355 |
-
noise = default(noise, lambda: torch.randn_like(x_start))
|
356 |
-
return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
|
357 |
-
extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise)
|
358 |
-
|
359 |
-
def get_v(self, x, noise, t):
|
360 |
-
return (
|
361 |
-
extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * noise -
|
362 |
-
extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * x
|
363 |
-
)
|
364 |
-
|
365 |
-
def get_loss(self, pred, target, mean=True):
|
366 |
-
if self.loss_type == 'l1':
|
367 |
-
loss = (target - pred).abs()
|
368 |
-
if mean:
|
369 |
-
loss = loss.mean()
|
370 |
-
elif self.loss_type == 'l2':
|
371 |
-
if mean:
|
372 |
-
loss = torch.nn.functional.mse_loss(target, pred)
|
373 |
-
else:
|
374 |
-
loss = torch.nn.functional.mse_loss(target, pred, reduction='none')
|
375 |
-
else:
|
376 |
-
raise NotImplementedError("unknown loss type '{loss_type}'")
|
377 |
-
|
378 |
-
return loss
|
379 |
-
|
380 |
-
def p_losses(self, x_start, t, noise=None):
|
381 |
-
noise = default(noise, lambda: torch.randn_like(x_start))
|
382 |
-
x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
|
383 |
-
model_out = self.model(x_noisy, t)
|
384 |
-
|
385 |
-
loss_dict = {}
|
386 |
-
if self.parameterization == "eps":
|
387 |
-
target = noise
|
388 |
-
elif self.parameterization == "x0":
|
389 |
-
target = x_start
|
390 |
-
elif self.parameterization == "v":
|
391 |
-
target = self.get_v(x_start, noise, t)
|
392 |
-
else:
|
393 |
-
raise NotImplementedError(f"Parameterization {self.parameterization} not yet supported")
|
394 |
-
|
395 |
-
loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3])
|
396 |
-
|
397 |
-
log_prefix = 'train' if self.training else 'val'
|
398 |
-
|
399 |
-
loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()})
|
400 |
-
loss_simple = loss.mean() * self.l_simple_weight
|
401 |
-
|
402 |
-
loss_vlb = (self.lvlb_weights[t] * loss).mean()
|
403 |
-
loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb})
|
404 |
-
|
405 |
-
loss = loss_simple + self.original_elbo_weight * loss_vlb
|
406 |
-
|
407 |
-
loss_dict.update({f'{log_prefix}/loss': loss})
|
408 |
-
|
409 |
-
return loss, loss_dict
|
410 |
-
|
411 |
-
def forward(self, x, *args, **kwargs):
|
412 |
-
# b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size
|
413 |
-
# assert h == img_size and w == img_size, f'height and width of image must be {img_size}'
|
414 |
-
t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
|
415 |
-
return self.p_losses(x, t, *args, **kwargs)
|
416 |
-
|
417 |
-
def get_input(self, batch, k):
|
418 |
-
x = batch[k]
|
419 |
-
# if len(x.shape) == 3:
|
420 |
-
# x = x[..., None]
|
421 |
-
# x = rearrange(x, 'b h w c -> b c h w')
|
422 |
-
# x = x.to(memory_format=torch.contiguous_format).float()
|
423 |
-
return x
|
424 |
-
|
425 |
-
def shared_step(self, batch):
|
426 |
-
x = self.get_input(batch, self.first_stage_key)
|
427 |
-
loss, loss_dict = self(x)
|
428 |
-
return loss, loss_dict
|
429 |
-
|
430 |
-
def training_step(self, batch, batch_idx):
|
431 |
-
loss, loss_dict = self.shared_step(batch)
|
432 |
-
|
433 |
-
self.log_dict(loss_dict, prog_bar=True,
|
434 |
-
logger=True, on_step=True, on_epoch=True)
|
435 |
-
|
436 |
-
self.log("global_step", self.global_step,
|
437 |
-
prog_bar=True, logger=True, on_step=True, on_epoch=False)
|
438 |
-
|
439 |
-
if self.use_scheduler:
|
440 |
-
lr = self.optimizers().param_groups[0]['lr']
|
441 |
-
self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False)
|
442 |
-
|
443 |
-
return loss
|
444 |
-
|
445 |
-
@torch.no_grad()
|
446 |
-
def validation_step(self, batch, batch_idx):
|
447 |
-
_, loss_dict_no_ema = self.shared_step(batch)
|
448 |
-
with self.ema_scope():
|
449 |
-
_, loss_dict_ema = self.shared_step(batch)
|
450 |
-
loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema}
|
451 |
-
self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
|
452 |
-
self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
|
453 |
-
|
454 |
-
def on_train_batch_end(self, *args, **kwargs):
|
455 |
-
if self.use_ema:
|
456 |
-
self.model_ema(self.model)
|
457 |
-
|
458 |
-
def _get_rows_from_list(self, samples):
|
459 |
-
n_imgs_per_row = len(samples)
|
460 |
-
denoise_grid = rearrange(samples, 'n b c h w -> b n c h w')
|
461 |
-
denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
|
462 |
-
denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
|
463 |
-
return denoise_grid
|
464 |
-
|
465 |
-
@torch.no_grad()
|
466 |
-
def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs):
|
467 |
-
log = dict()
|
468 |
-
x = self.get_input(batch, self.first_stage_key)
|
469 |
-
N = min(x.shape[0], N)
|
470 |
-
n_row = min(x.shape[0], n_row)
|
471 |
-
x = x.to(self.device)[:N]
|
472 |
-
log["inputs"] = x
|
473 |
-
|
474 |
-
# get diffusion row
|
475 |
-
diffusion_row = list()
|
476 |
-
x_start = x[:n_row]
|
477 |
-
|
478 |
-
for t in range(self.num_timesteps):
|
479 |
-
if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
|
480 |
-
t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
|
481 |
-
t = t.to(self.device).long()
|
482 |
-
noise = torch.randn_like(x_start)
|
483 |
-
x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
|
484 |
-
diffusion_row.append(x_noisy)
|
485 |
-
|
486 |
-
log["diffusion_row"] = self._get_rows_from_list(diffusion_row)
|
487 |
-
|
488 |
-
if sample:
|
489 |
-
# get denoise row
|
490 |
-
with self.ema_scope("Plotting"):
|
491 |
-
samples, denoise_row = self.sample(batch_size=N, return_intermediates=True)
|
492 |
-
|
493 |
-
log["samples"] = samples
|
494 |
-
log["denoise_row"] = self._get_rows_from_list(denoise_row)
|
495 |
-
|
496 |
-
if return_keys:
|
497 |
-
if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
|
498 |
-
return log
|
499 |
-
else:
|
500 |
-
return {key: log[key] for key in return_keys}
|
501 |
-
return log
|
502 |
-
|
503 |
-
def configure_optimizers(self):
|
504 |
-
lr = self.learning_rate
|
505 |
-
params = list(self.model.parameters())
|
506 |
-
if self.learn_logvar:
|
507 |
-
params = params + [self.logvar]
|
508 |
-
opt = torch.optim.AdamW(params, lr=lr)
|
509 |
-
return opt
|
510 |
-
|
511 |
-
|
512 |
-
class LatentDiffusion(DDPM):
|
513 |
-
"""main class"""
|
514 |
-
|
515 |
-
def __init__(self,
|
516 |
-
first_stage_config,
|
517 |
-
cond_stage_config,
|
518 |
-
num_timesteps_cond=None,
|
519 |
-
cond_stage_key="image",
|
520 |
-
cond_stage_trainable=False,
|
521 |
-
concat_mode=True,
|
522 |
-
cond_stage_forward=None,
|
523 |
-
conditioning_key=None,
|
524 |
-
scale_factor=1.0,
|
525 |
-
scale_by_std=False,
|
526 |
-
*args, **kwargs):
|
527 |
-
self.num_timesteps_cond = default(num_timesteps_cond, 1)
|
528 |
-
self.scale_by_std = scale_by_std
|
529 |
-
assert self.num_timesteps_cond <= kwargs['timesteps']
|
530 |
-
# for backwards compatibility after implementation of DiffusionWrapper
|
531 |
-
if conditioning_key is None:
|
532 |
-
conditioning_key = 'concat' if concat_mode else 'crossattn'
|
533 |
-
if cond_stage_config == '__is_unconditional__':
|
534 |
-
conditioning_key = None
|
535 |
-
ckpt_path = kwargs.pop("ckpt_path", None)
|
536 |
-
reset_ema = kwargs.pop("reset_ema", False)
|
537 |
-
reset_num_ema_updates = kwargs.pop("reset_num_ema_updates", False)
|
538 |
-
ignore_keys = kwargs.pop("ignore_keys", [])
|
539 |
-
super().__init__(conditioning_key=conditioning_key, *args, **kwargs)
|
540 |
-
self.concat_mode = concat_mode
|
541 |
-
self.cond_stage_trainable = cond_stage_trainable
|
542 |
-
self.cond_stage_key = cond_stage_key
|
543 |
-
try:
|
544 |
-
self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1
|
545 |
-
except:
|
546 |
-
self.num_downs = 0
|
547 |
-
if not scale_by_std:
|
548 |
-
self.scale_factor = scale_factor
|
549 |
-
else:
|
550 |
-
self.register_buffer('scale_factor', torch.tensor(scale_factor))
|
551 |
-
self.instantiate_first_stage(first_stage_config)
|
552 |
-
self.instantiate_cond_stage(cond_stage_config)
|
553 |
-
self.cond_stage_forward = cond_stage_forward
|
554 |
-
self.clip_denoised = False
|
555 |
-
self.bbox_tokenizer = None
|
556 |
-
|
557 |
-
self.restarted_from_ckpt = False
|
558 |
-
if ckpt_path is not None:
|
559 |
-
self.init_from_ckpt(ckpt_path, ignore_keys)
|
560 |
-
self.restarted_from_ckpt = True
|
561 |
-
if reset_ema:
|
562 |
-
assert self.use_ema
|
563 |
-
print(
|
564 |
-
f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.")
|
565 |
-
self.model_ema = LitEma(self.model)
|
566 |
-
if reset_num_ema_updates:
|
567 |
-
print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ")
|
568 |
-
assert self.use_ema
|
569 |
-
self.model_ema.reset_num_updates()
|
570 |
-
|
571 |
-
def make_cond_schedule(self, ):
|
572 |
-
self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long)
|
573 |
-
ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long()
|
574 |
-
self.cond_ids[:self.num_timesteps_cond] = ids
|
575 |
-
|
576 |
-
def register_schedule(self,
|
577 |
-
given_betas=None, beta_schedule="linear", timesteps=1000,
|
578 |
-
linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
|
579 |
-
super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s)
|
580 |
-
|
581 |
-
self.shorten_cond_schedule = self.num_timesteps_cond > 1
|
582 |
-
if self.shorten_cond_schedule:
|
583 |
-
self.make_cond_schedule()
|
584 |
-
|
585 |
-
def instantiate_first_stage(self, config):
|
586 |
-
model = instantiate_from_config(config)
|
587 |
-
self.first_stage_model = model.eval()
|
588 |
-
self.first_stage_model.train = disabled_train
|
589 |
-
for param in self.first_stage_model.parameters():
|
590 |
-
param.requires_grad = False
|
591 |
-
|
592 |
-
def instantiate_cond_stage(self, config):
|
593 |
-
if not self.cond_stage_trainable:
|
594 |
-
if config == "__is_first_stage__":
|
595 |
-
print("Using first stage also as cond stage.")
|
596 |
-
self.cond_stage_model = self.first_stage_model
|
597 |
-
elif config == "__is_unconditional__":
|
598 |
-
print(f"Training {self.__class__.__name__} as an unconditional model.")
|
599 |
-
self.cond_stage_model = None
|
600 |
-
# self.be_unconditional = True
|
601 |
-
else:
|
602 |
-
model = instantiate_from_config(config)
|
603 |
-
self.cond_stage_model = model.eval()
|
604 |
-
self.cond_stage_model.train = disabled_train
|
605 |
-
for param in self.cond_stage_model.parameters():
|
606 |
-
param.requires_grad = False
|
607 |
-
else:
|
608 |
-
assert config != '__is_first_stage__'
|
609 |
-
assert config != '__is_unconditional__'
|
610 |
-
model = instantiate_from_config(config)
|
611 |
-
self.cond_stage_model = model
|
612 |
-
|
613 |
-
def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False):
|
614 |
-
denoise_row = []
|
615 |
-
for zd in tqdm(samples, desc=desc):
|
616 |
-
denoise_row.append(self.decode_first_stage(zd.to(self.device),
|
617 |
-
force_not_quantize=force_no_decoder_quantization))
|
618 |
-
n_imgs_per_row = len(denoise_row)
|
619 |
-
denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W
|
620 |
-
denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w')
|
621 |
-
denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
|
622 |
-
denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
|
623 |
-
return denoise_grid
|
624 |
-
|
625 |
-
def get_first_stage_encoding(self, encoder_posterior):
|
626 |
-
if isinstance(encoder_posterior, DiagonalGaussianDistribution):
|
627 |
-
z = encoder_posterior.sample()
|
628 |
-
elif isinstance(encoder_posterior, torch.Tensor):
|
629 |
-
z = encoder_posterior
|
630 |
-
else:
|
631 |
-
raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented")
|
632 |
-
return self.scale_factor * z
|
633 |
-
|
634 |
-
def get_learned_conditioning(self, c):
|
635 |
-
if self.cond_stage_forward is None:
|
636 |
-
if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode):
|
637 |
-
c = self.cond_stage_model.encode(c)
|
638 |
-
if isinstance(c, DiagonalGaussianDistribution):
|
639 |
-
c = c.mode()
|
640 |
-
else:
|
641 |
-
c = self.cond_stage_model(c)
|
642 |
-
else:
|
643 |
-
assert hasattr(self.cond_stage_model, self.cond_stage_forward)
|
644 |
-
c = getattr(self.cond_stage_model, self.cond_stage_forward)(c)
|
645 |
-
return c
|
646 |
-
|
647 |
-
def meshgrid(self, h, w):
|
648 |
-
y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1)
|
649 |
-
x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1)
|
650 |
-
|
651 |
-
arr = torch.cat([y, x], dim=-1)
|
652 |
-
return arr
|
653 |
-
|
654 |
-
def delta_border(self, h, w):
|
655 |
-
"""
|
656 |
-
:param h: height
|
657 |
-
:param w: width
|
658 |
-
:return: normalized distance to image border,
|
659 |
-
wtith min distance = 0 at border and max dist = 0.5 at image center
|
660 |
-
"""
|
661 |
-
lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)
|
662 |
-
arr = self.meshgrid(h, w) / lower_right_corner
|
663 |
-
dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0]
|
664 |
-
dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0]
|
665 |
-
edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0]
|
666 |
-
return edge_dist
|
667 |
-
|
668 |
-
def get_weighting(self, h, w, Ly, Lx, device):
|
669 |
-
weighting = self.delta_border(h, w)
|
670 |
-
weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"],
|
671 |
-
self.split_input_params["clip_max_weight"], )
|
672 |
-
weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device)
|
673 |
-
|
674 |
-
if self.split_input_params["tie_braker"]:
|
675 |
-
L_weighting = self.delta_border(Ly, Lx)
|
676 |
-
L_weighting = torch.clip(L_weighting,
|
677 |
-
self.split_input_params["clip_min_tie_weight"],
|
678 |
-
self.split_input_params["clip_max_tie_weight"])
|
679 |
-
|
680 |
-
L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device)
|
681 |
-
weighting = weighting * L_weighting
|
682 |
-
return weighting
|
683 |
-
|
684 |
-
def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code
|
685 |
-
"""
|
686 |
-
:param x: img of size (bs, c, h, w)
|
687 |
-
:return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1])
|
688 |
-
"""
|
689 |
-
bs, nc, h, w = x.shape
|
690 |
-
|
691 |
-
# number of crops in image
|
692 |
-
Ly = (h - kernel_size[0]) // stride[0] + 1
|
693 |
-
Lx = (w - kernel_size[1]) // stride[1] + 1
|
694 |
-
|
695 |
-
if uf == 1 and df == 1:
|
696 |
-
fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
|
697 |
-
unfold = torch.nn.Unfold(**fold_params)
|
698 |
-
|
699 |
-
fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params)
|
700 |
-
|
701 |
-
weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype)
|
702 |
-
normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap
|
703 |
-
weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx))
|
704 |
-
|
705 |
-
elif uf > 1 and df == 1:
|
706 |
-
fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
|
707 |
-
unfold = torch.nn.Unfold(**fold_params)
|
708 |
-
|
709 |
-
fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf),
|
710 |
-
dilation=1, padding=0,
|
711 |
-
stride=(stride[0] * uf, stride[1] * uf))
|
712 |
-
fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2)
|
713 |
-
|
714 |
-
weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype)
|
715 |
-
normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap
|
716 |
-
weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx))
|
717 |
-
|
718 |
-
elif df > 1 and uf == 1:
|
719 |
-
fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
|
720 |
-
unfold = torch.nn.Unfold(**fold_params)
|
721 |
-
|
722 |
-
fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df),
|
723 |
-
dilation=1, padding=0,
|
724 |
-
stride=(stride[0] // df, stride[1] // df))
|
725 |
-
fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2)
|
726 |
-
|
727 |
-
weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype)
|
728 |
-
normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap
|
729 |
-
weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx))
|
730 |
-
|
731 |
-
else:
|
732 |
-
raise NotImplementedError
|
733 |
-
|
734 |
-
return fold, unfold, normalization, weighting
|
735 |
-
|
736 |
-
@torch.no_grad()
|
737 |
-
def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False,
|
738 |
-
cond_key=None, return_original_cond=False, bs=None):
|
739 |
-
x = super().get_input(batch, k)
|
740 |
-
if bs is not None:
|
741 |
-
x = x[:bs]
|
742 |
-
x = x.to(self.device)
|
743 |
-
encoder_posterior = self.encode_first_stage(x)
|
744 |
-
z = self.get_first_stage_encoding(encoder_posterior).detach()
|
745 |
-
|
746 |
-
if self.model.conditioning_key is not None:
|
747 |
-
if cond_key is None:
|
748 |
-
cond_key = self.cond_stage_key
|
749 |
-
if cond_key != self.first_stage_key:
|
750 |
-
if cond_key in ['caption', 'coordinates_bbox', "txt"]:
|
751 |
-
xc = batch[cond_key]
|
752 |
-
elif cond_key in ['class_label', 'cls']:
|
753 |
-
xc = batch
|
754 |
-
else:
|
755 |
-
xc = super().get_input(batch, cond_key).to(self.device)
|
756 |
-
else:
|
757 |
-
xc = x
|
758 |
-
if not self.cond_stage_trainable or force_c_encode:
|
759 |
-
if isinstance(xc, dict) or isinstance(xc, list):
|
760 |
-
# import pudb; pudb.set_trace()
|
761 |
-
c = self.get_learned_conditioning(xc)
|
762 |
-
else:
|
763 |
-
c = self.get_learned_conditioning(xc.to(self.device))
|
764 |
-
else:
|
765 |
-
c = xc
|
766 |
-
if bs is not None:
|
767 |
-
c = c[:bs]
|
768 |
-
|
769 |
-
if self.use_positional_encodings:
|
770 |
-
pos_x, pos_y = self.compute_latent_shifts(batch)
|
771 |
-
ckey = __conditioning_keys__[self.model.conditioning_key]
|
772 |
-
c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y}
|
773 |
-
|
774 |
-
else:
|
775 |
-
c = None
|
776 |
-
xc = None
|
777 |
-
if self.use_positional_encodings:
|
778 |
-
pos_x, pos_y = self.compute_latent_shifts(batch)
|
779 |
-
c = {'pos_x': pos_x, 'pos_y': pos_y}
|
780 |
-
out = [z, c]
|
781 |
-
if return_first_stage_outputs:
|
782 |
-
xrec = self.decode_first_stage(z)
|
783 |
-
out.extend([x, xrec])
|
784 |
-
if return_original_cond:
|
785 |
-
out.append(xc)
|
786 |
-
return out
|
787 |
-
|
788 |
-
@torch.no_grad()
|
789 |
-
def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
|
790 |
-
if predict_cids:
|
791 |
-
if z.dim() == 4:
|
792 |
-
z = torch.argmax(z.exp(), dim=1).long()
|
793 |
-
z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
|
794 |
-
z = rearrange(z, 'b h w c -> b c h w').contiguous()
|
795 |
-
|
796 |
-
z = 1. / self.scale_factor * z
|
797 |
-
return self.first_stage_model.decode(z)
|
798 |
-
|
799 |
-
@torch.no_grad()
|
800 |
-
def encode_first_stage(self, x):
|
801 |
-
return self.first_stage_model.encode(x)
|
802 |
-
|
803 |
-
def shared_step(self, batch, **kwargs):
|
804 |
-
x, c = self.get_input(batch, self.first_stage_key)
|
805 |
-
loss = self(x, c, **kwargs)
|
806 |
-
return loss
|
807 |
-
|
808 |
-
def forward(self, x, c, *args, **kwargs):
|
809 |
-
if 't' not in kwargs:
|
810 |
-
t = torch.randint(0, self.num_timesteps, (x.shape[0], ), device=self.device).long()
|
811 |
-
else:
|
812 |
-
t = kwargs.pop('t')
|
813 |
-
|
814 |
-
return self.p_losses(x, c, t, *args, **kwargs)
|
815 |
-
|
816 |
-
def apply_model(self, x_noisy, t, cond, return_ids=False, **kwargs):
|
817 |
-
if isinstance(cond, dict):
|
818 |
-
# hybrid case, cond is expected to be a dict
|
819 |
-
pass
|
820 |
-
else:
|
821 |
-
if not isinstance(cond, list):
|
822 |
-
cond = [cond]
|
823 |
-
key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn'
|
824 |
-
cond = {key: cond}
|
825 |
-
|
826 |
-
x_recon = self.model(x_noisy, t, **cond, **kwargs)
|
827 |
-
|
828 |
-
if isinstance(x_recon, tuple) and not return_ids:
|
829 |
-
return x_recon[0]
|
830 |
-
else:
|
831 |
-
return x_recon
|
832 |
-
|
833 |
-
def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
|
834 |
-
return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \
|
835 |
-
extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
|
836 |
-
|
837 |
-
def _prior_bpd(self, x_start):
|
838 |
-
"""
|
839 |
-
Get the prior KL term for the variational lower-bound, measured in
|
840 |
-
bits-per-dim.
|
841 |
-
This term can't be optimized, as it only depends on the encoder.
|
842 |
-
:param x_start: the [N x C x ...] tensor of inputs.
|
843 |
-
:return: a batch of [N] KL values (in bits), one per batch element.
|
844 |
-
"""
|
845 |
-
batch_size = x_start.shape[0]
|
846 |
-
t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
|
847 |
-
qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
|
848 |
-
kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
|
849 |
-
return mean_flat(kl_prior) / np.log(2.0)
|
850 |
-
|
851 |
-
def p_losses(self, x_start, cond, t, noise=None, **kwargs):
|
852 |
-
noise = default(noise, lambda: torch.randn_like(x_start))
|
853 |
-
x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
|
854 |
-
model_output = self.apply_model(x_noisy, t, cond, **kwargs)
|
855 |
-
|
856 |
-
loss_dict = {}
|
857 |
-
prefix = 'train' if self.training else 'val'
|
858 |
-
|
859 |
-
if self.parameterization == "x0":
|
860 |
-
target = x_start
|
861 |
-
elif self.parameterization == "eps":
|
862 |
-
target = noise
|
863 |
-
elif self.parameterization == "v":
|
864 |
-
target = self.get_v(x_start, noise, t)
|
865 |
-
else:
|
866 |
-
raise NotImplementedError()
|
867 |
-
|
868 |
-
loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3])
|
869 |
-
loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()})
|
870 |
-
|
871 |
-
logvar_t = self.logvar[t].to(self.device)
|
872 |
-
loss = loss_simple / torch.exp(logvar_t) + logvar_t
|
873 |
-
# loss = loss_simple / torch.exp(self.logvar) + self.logvar
|
874 |
-
if self.learn_logvar:
|
875 |
-
loss_dict.update({f'{prefix}/loss_gamma': loss.mean()})
|
876 |
-
loss_dict.update({'logvar': self.logvar.data.mean()})
|
877 |
-
|
878 |
-
loss = self.l_simple_weight * loss.mean()
|
879 |
-
|
880 |
-
loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3))
|
881 |
-
loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()
|
882 |
-
loss_dict.update({f'{prefix}/loss_vlb': loss_vlb})
|
883 |
-
loss += (self.original_elbo_weight * loss_vlb)
|
884 |
-
loss_dict.update({f'{prefix}/loss': loss})
|
885 |
-
|
886 |
-
return loss, loss_dict
|
887 |
-
|
888 |
-
def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False,
|
889 |
-
return_x0=False, score_corrector=None, corrector_kwargs=None):
|
890 |
-
t_in = t
|
891 |
-
model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids)
|
892 |
-
|
893 |
-
if score_corrector is not None:
|
894 |
-
assert self.parameterization == "eps"
|
895 |
-
model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs)
|
896 |
-
|
897 |
-
if return_codebook_ids:
|
898 |
-
model_out, logits = model_out
|
899 |
-
|
900 |
-
if self.parameterization == "eps":
|
901 |
-
x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
|
902 |
-
elif self.parameterization == "x0":
|
903 |
-
x_recon = model_out
|
904 |
-
else:
|
905 |
-
raise NotImplementedError()
|
906 |
-
|
907 |
-
if clip_denoised:
|
908 |
-
x_recon.clamp_(-1., 1.)
|
909 |
-
if quantize_denoised:
|
910 |
-
x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon)
|
911 |
-
model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
|
912 |
-
if return_codebook_ids:
|
913 |
-
return model_mean, posterior_variance, posterior_log_variance, logits
|
914 |
-
elif return_x0:
|
915 |
-
return model_mean, posterior_variance, posterior_log_variance, x_recon
|
916 |
-
else:
|
917 |
-
return model_mean, posterior_variance, posterior_log_variance
|
918 |
-
|
919 |
-
@torch.no_grad()
|
920 |
-
def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False,
|
921 |
-
return_codebook_ids=False, quantize_denoised=False, return_x0=False,
|
922 |
-
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None):
|
923 |
-
b, *_, device = *x.shape, x.device
|
924 |
-
outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised,
|
925 |
-
return_codebook_ids=return_codebook_ids,
|
926 |
-
quantize_denoised=quantize_denoised,
|
927 |
-
return_x0=return_x0,
|
928 |
-
score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
|
929 |
-
if return_codebook_ids:
|
930 |
-
raise DeprecationWarning("Support dropped.")
|
931 |
-
model_mean, _, model_log_variance, logits = outputs
|
932 |
-
elif return_x0:
|
933 |
-
model_mean, _, model_log_variance, x0 = outputs
|
934 |
-
else:
|
935 |
-
model_mean, _, model_log_variance = outputs
|
936 |
-
|
937 |
-
noise = noise_like(x.shape, device, repeat_noise) * temperature
|
938 |
-
if noise_dropout > 0.:
|
939 |
-
noise = torch.nn.functional.dropout(noise, p=noise_dropout)
|
940 |
-
# no noise when t == 0
|
941 |
-
nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
|
942 |
-
|
943 |
-
if return_codebook_ids:
|
944 |
-
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1)
|
945 |
-
if return_x0:
|
946 |
-
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0
|
947 |
-
else:
|
948 |
-
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
|
949 |
-
|
950 |
-
@torch.no_grad()
|
951 |
-
def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False,
|
952 |
-
img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0.,
|
953 |
-
score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None,
|
954 |
-
log_every_t=None):
|
955 |
-
if not log_every_t:
|
956 |
-
log_every_t = self.log_every_t
|
957 |
-
timesteps = self.num_timesteps
|
958 |
-
if batch_size is not None:
|
959 |
-
b = batch_size if batch_size is not None else shape[0]
|
960 |
-
shape = [batch_size] + list(shape)
|
961 |
-
else:
|
962 |
-
b = batch_size = shape[0]
|
963 |
-
if x_T is None:
|
964 |
-
img = torch.randn(shape, device=self.device)
|
965 |
-
else:
|
966 |
-
img = x_T
|
967 |
-
intermediates = []
|
968 |
-
if cond is not None:
|
969 |
-
if isinstance(cond, dict):
|
970 |
-
cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
|
971 |
-
list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
|
972 |
-
else:
|
973 |
-
cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
|
974 |
-
|
975 |
-
if start_T is not None:
|
976 |
-
timesteps = min(timesteps, start_T)
|
977 |
-
iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation',
|
978 |
-
total=timesteps) if verbose else reversed(
|
979 |
-
range(0, timesteps))
|
980 |
-
if type(temperature) == float:
|
981 |
-
temperature = [temperature] * timesteps
|
982 |
-
|
983 |
-
for i in iterator:
|
984 |
-
ts = torch.full((b,), i, device=self.device, dtype=torch.long)
|
985 |
-
if self.shorten_cond_schedule:
|
986 |
-
assert self.model.conditioning_key != 'hybrid'
|
987 |
-
tc = self.cond_ids[ts].to(cond.device)
|
988 |
-
cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
|
989 |
-
|
990 |
-
img, x0_partial = self.p_sample(img, cond, ts,
|
991 |
-
clip_denoised=self.clip_denoised,
|
992 |
-
quantize_denoised=quantize_denoised, return_x0=True,
|
993 |
-
temperature=temperature[i], noise_dropout=noise_dropout,
|
994 |
-
score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
|
995 |
-
if mask is not None:
|
996 |
-
assert x0 is not None
|
997 |
-
img_orig = self.q_sample(x0, ts)
|
998 |
-
img = img_orig * mask + (1. - mask) * img
|
999 |
-
|
1000 |
-
if i % log_every_t == 0 or i == timesteps - 1:
|
1001 |
-
intermediates.append(x0_partial)
|
1002 |
-
if callback: callback(i)
|
1003 |
-
if img_callback: img_callback(img, i)
|
1004 |
-
return img, intermediates
|
1005 |
-
|
1006 |
-
@torch.no_grad()
|
1007 |
-
def p_sample_loop(self, cond, shape, return_intermediates=False,
|
1008 |
-
x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False,
|
1009 |
-
mask=None, x0=None, img_callback=None, start_T=None,
|
1010 |
-
log_every_t=None):
|
1011 |
-
|
1012 |
-
if not log_every_t:
|
1013 |
-
log_every_t = self.log_every_t
|
1014 |
-
device = self.betas.device
|
1015 |
-
b = shape[0]
|
1016 |
-
if x_T is None:
|
1017 |
-
img = torch.randn(shape, device=device)
|
1018 |
-
else:
|
1019 |
-
img = x_T
|
1020 |
-
|
1021 |
-
intermediates = [img]
|
1022 |
-
if timesteps is None:
|
1023 |
-
timesteps = self.num_timesteps
|
1024 |
-
|
1025 |
-
if start_T is not None:
|
1026 |
-
timesteps = min(timesteps, start_T)
|
1027 |
-
iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed(
|
1028 |
-
range(0, timesteps))
|
1029 |
-
|
1030 |
-
if mask is not None:
|
1031 |
-
assert x0 is not None
|
1032 |
-
assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match
|
1033 |
-
|
1034 |
-
for i in iterator:
|
1035 |
-
ts = torch.full((b,), i, device=device, dtype=torch.long)
|
1036 |
-
if self.shorten_cond_schedule:
|
1037 |
-
assert self.model.conditioning_key != 'hybrid'
|
1038 |
-
tc = self.cond_ids[ts].to(cond.device)
|
1039 |
-
cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
|
1040 |
-
|
1041 |
-
img = self.p_sample(img, cond, ts,
|
1042 |
-
clip_denoised=self.clip_denoised,
|
1043 |
-
quantize_denoised=quantize_denoised)
|
1044 |
-
if mask is not None:
|
1045 |
-
img_orig = self.q_sample(x0, ts)
|
1046 |
-
img = img_orig * mask + (1. - mask) * img
|
1047 |
-
|
1048 |
-
if i % log_every_t == 0 or i == timesteps - 1:
|
1049 |
-
intermediates.append(img)
|
1050 |
-
if callback: callback(i)
|
1051 |
-
if img_callback: img_callback(img, i)
|
1052 |
-
|
1053 |
-
if return_intermediates:
|
1054 |
-
return img, intermediates
|
1055 |
-
return img
|
1056 |
-
|
1057 |
-
@torch.no_grad()
|
1058 |
-
def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None,
|
1059 |
-
verbose=True, timesteps=None, quantize_denoised=False,
|
1060 |
-
mask=None, x0=None, shape=None, **kwargs):
|
1061 |
-
if shape is None:
|
1062 |
-
shape = (batch_size, self.channels, self.image_size, self.image_size)
|
1063 |
-
if cond is not None:
|
1064 |
-
if isinstance(cond, dict):
|
1065 |
-
cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
|
1066 |
-
list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
|
1067 |
-
else:
|
1068 |
-
cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
|
1069 |
-
return self.p_sample_loop(cond,
|
1070 |
-
shape,
|
1071 |
-
return_intermediates=return_intermediates, x_T=x_T,
|
1072 |
-
verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised,
|
1073 |
-
mask=mask, x0=x0)
|
1074 |
-
|
1075 |
-
@torch.no_grad()
|
1076 |
-
def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs):
|
1077 |
-
if ddim:
|
1078 |
-
ddim_sampler = DDIMSampler(self)
|
1079 |
-
shape = (self.channels, self.image_size, self.image_size)
|
1080 |
-
samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size,
|
1081 |
-
shape, cond, verbose=False, **kwargs)
|
1082 |
-
|
1083 |
-
else:
|
1084 |
-
samples, intermediates = self.sample(cond=cond, batch_size=batch_size,
|
1085 |
-
return_intermediates=True, **kwargs)
|
1086 |
-
|
1087 |
-
return samples, intermediates
|
1088 |
-
|
1089 |
-
@torch.no_grad()
|
1090 |
-
def get_unconditional_conditioning(self, batch_size, null_label=None):
|
1091 |
-
if null_label is not None:
|
1092 |
-
xc = null_label
|
1093 |
-
if isinstance(xc, ListConfig):
|
1094 |
-
xc = list(xc)
|
1095 |
-
if isinstance(xc, dict) or isinstance(xc, list):
|
1096 |
-
c = self.get_learned_conditioning(xc)
|
1097 |
-
else:
|
1098 |
-
if hasattr(xc, "to"):
|
1099 |
-
xc = xc.to(self.device)
|
1100 |
-
c = self.get_learned_conditioning(xc)
|
1101 |
-
else:
|
1102 |
-
if self.cond_stage_key in ["class_label", "cls"]:
|
1103 |
-
xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device)
|
1104 |
-
return self.get_learned_conditioning(xc)
|
1105 |
-
else:
|
1106 |
-
raise NotImplementedError("todo")
|
1107 |
-
if isinstance(c, list): # in case the encoder gives us a list
|
1108 |
-
for i in range(len(c)):
|
1109 |
-
c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device)
|
1110 |
-
else:
|
1111 |
-
c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device)
|
1112 |
-
return c
|
1113 |
-
|
1114 |
-
@torch.no_grad()
|
1115 |
-
def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None,
|
1116 |
-
quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,
|
1117 |
-
plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None,
|
1118 |
-
use_ema_scope=True,
|
1119 |
-
**kwargs):
|
1120 |
-
ema_scope = self.ema_scope if use_ema_scope else nullcontext
|
1121 |
-
use_ddim = ddim_steps is not None
|
1122 |
-
|
1123 |
-
log = dict()
|
1124 |
-
z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key,
|
1125 |
-
return_first_stage_outputs=True,
|
1126 |
-
force_c_encode=True,
|
1127 |
-
return_original_cond=True,
|
1128 |
-
bs=N)
|
1129 |
-
N = min(x.shape[0], N)
|
1130 |
-
n_row = min(x.shape[0], n_row)
|
1131 |
-
log["inputs"] = x
|
1132 |
-
log["reconstruction"] = xrec
|
1133 |
-
if self.model.conditioning_key is not None:
|
1134 |
-
if hasattr(self.cond_stage_model, "decode"):
|
1135 |
-
xc = self.cond_stage_model.decode(c)
|
1136 |
-
log["conditioning"] = xc
|
1137 |
-
elif self.cond_stage_key in ["caption", "txt"]:
|
1138 |
-
xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25)
|
1139 |
-
log["conditioning"] = xc
|
1140 |
-
elif self.cond_stage_key in ['class_label', "cls"]:
|
1141 |
-
try:
|
1142 |
-
xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25)
|
1143 |
-
log['conditioning'] = xc
|
1144 |
-
except KeyError:
|
1145 |
-
# probably no "human_label" in batch
|
1146 |
-
pass
|
1147 |
-
elif isimage(xc):
|
1148 |
-
log["conditioning"] = xc
|
1149 |
-
if ismap(xc):
|
1150 |
-
log["original_conditioning"] = self.to_rgb(xc)
|
1151 |
-
|
1152 |
-
if plot_diffusion_rows:
|
1153 |
-
# get diffusion row
|
1154 |
-
diffusion_row = list()
|
1155 |
-
z_start = z[:n_row]
|
1156 |
-
for t in range(self.num_timesteps):
|
1157 |
-
if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
|
1158 |
-
t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
|
1159 |
-
t = t.to(self.device).long()
|
1160 |
-
noise = torch.randn_like(z_start)
|
1161 |
-
z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
|
1162 |
-
diffusion_row.append(self.decode_first_stage(z_noisy))
|
1163 |
-
|
1164 |
-
diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
|
1165 |
-
diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
|
1166 |
-
diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
|
1167 |
-
diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
|
1168 |
-
log["diffusion_row"] = diffusion_grid
|
1169 |
-
|
1170 |
-
if sample:
|
1171 |
-
# get denoise row
|
1172 |
-
with ema_scope("Sampling"):
|
1173 |
-
samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,
|
1174 |
-
ddim_steps=ddim_steps, eta=ddim_eta)
|
1175 |
-
# samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
|
1176 |
-
x_samples = self.decode_first_stage(samples)
|
1177 |
-
log["samples"] = x_samples
|
1178 |
-
if plot_denoise_rows:
|
1179 |
-
denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
|
1180 |
-
log["denoise_row"] = denoise_grid
|
1181 |
-
|
1182 |
-
if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(
|
1183 |
-
self.first_stage_model, IdentityFirstStage):
|
1184 |
-
# also display when quantizing x0 while sampling
|
1185 |
-
with ema_scope("Plotting Quantized Denoised"):
|
1186 |
-
samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,
|
1187 |
-
ddim_steps=ddim_steps, eta=ddim_eta,
|
1188 |
-
quantize_denoised=True)
|
1189 |
-
# samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True,
|
1190 |
-
# quantize_denoised=True)
|
1191 |
-
x_samples = self.decode_first_stage(samples.to(self.device))
|
1192 |
-
log["samples_x0_quantized"] = x_samples
|
1193 |
-
|
1194 |
-
if unconditional_guidance_scale > 1.0:
|
1195 |
-
uc = self.get_unconditional_conditioning(N, unconditional_guidance_label)
|
1196 |
-
if self.model.conditioning_key == "crossattn-adm":
|
1197 |
-
uc = {"c_crossattn": [uc], "c_adm": c["c_adm"]}
|
1198 |
-
with ema_scope("Sampling with classifier-free guidance"):
|
1199 |
-
samples_cfg, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,
|
1200 |
-
ddim_steps=ddim_steps, eta=ddim_eta,
|
1201 |
-
unconditional_guidance_scale=unconditional_guidance_scale,
|
1202 |
-
unconditional_conditioning=uc,
|
1203 |
-
)
|
1204 |
-
x_samples_cfg = self.decode_first_stage(samples_cfg)
|
1205 |
-
log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg
|
1206 |
-
|
1207 |
-
if inpaint:
|
1208 |
-
# make a simple center square
|
1209 |
-
b, h, w = z.shape[0], z.shape[2], z.shape[3]
|
1210 |
-
mask = torch.ones(N, h, w).to(self.device)
|
1211 |
-
# zeros will be filled in
|
1212 |
-
mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.
|
1213 |
-
mask = mask[:, None, ...]
|
1214 |
-
with ema_scope("Plotting Inpaint"):
|
1215 |
-
samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta,
|
1216 |
-
ddim_steps=ddim_steps, x0=z[:N], mask=mask)
|
1217 |
-
x_samples = self.decode_first_stage(samples.to(self.device))
|
1218 |
-
log["samples_inpainting"] = x_samples
|
1219 |
-
log["mask"] = mask
|
1220 |
-
|
1221 |
-
# outpaint
|
1222 |
-
mask = 1. - mask
|
1223 |
-
with ema_scope("Plotting Outpaint"):
|
1224 |
-
samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta,
|
1225 |
-
ddim_steps=ddim_steps, x0=z[:N], mask=mask)
|
1226 |
-
x_samples = self.decode_first_stage(samples.to(self.device))
|
1227 |
-
log["samples_outpainting"] = x_samples
|
1228 |
-
|
1229 |
-
if plot_progressive_rows:
|
1230 |
-
with ema_scope("Plotting Progressives"):
|
1231 |
-
img, progressives = self.progressive_denoising(c,
|
1232 |
-
shape=(self.channels, self.image_size, self.image_size),
|
1233 |
-
batch_size=N)
|
1234 |
-
prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation")
|
1235 |
-
log["progressive_row"] = prog_row
|
1236 |
-
|
1237 |
-
if return_keys:
|
1238 |
-
if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
|
1239 |
-
return log
|
1240 |
-
else:
|
1241 |
-
return {key: log[key] for key in return_keys}
|
1242 |
-
return log
|
1243 |
-
|
1244 |
-
def configure_optimizers(self):
|
1245 |
-
lr = self.learning_rate
|
1246 |
-
params = list(self.model.parameters())
|
1247 |
-
if self.cond_stage_trainable:
|
1248 |
-
print(f"{self.__class__.__name__}: Also optimizing conditioner params!")
|
1249 |
-
params = params + list(self.cond_stage_model.parameters())
|
1250 |
-
if self.learn_logvar:
|
1251 |
-
print('Diffusion model optimizing logvar')
|
1252 |
-
params.append(self.logvar)
|
1253 |
-
opt = torch.optim.AdamW(params, lr=lr)
|
1254 |
-
if self.use_scheduler:
|
1255 |
-
assert 'target' in self.scheduler_config
|
1256 |
-
scheduler = instantiate_from_config(self.scheduler_config)
|
1257 |
-
|
1258 |
-
print("Setting up LambdaLR scheduler...")
|
1259 |
-
scheduler = [
|
1260 |
-
{
|
1261 |
-
'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule),
|
1262 |
-
'interval': 'step',
|
1263 |
-
'frequency': 1
|
1264 |
-
}]
|
1265 |
-
return [opt], scheduler
|
1266 |
-
return opt
|
1267 |
-
|
1268 |
-
@torch.no_grad()
|
1269 |
-
def to_rgb(self, x):
|
1270 |
-
x = x.float()
|
1271 |
-
if not hasattr(self, "colorize"):
|
1272 |
-
self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x)
|
1273 |
-
x = nn.functional.conv2d(x, weight=self.colorize)
|
1274 |
-
x = 2. * (x - x.min()) / (x.max() - x.min()) - 1.
|
1275 |
-
return x
|
1276 |
-
|
1277 |
-
|
1278 |
-
class DiffusionWrapper(pl.LightningModule):
|
1279 |
-
def __init__(self, diff_model_config, conditioning_key):
|
1280 |
-
super().__init__()
|
1281 |
-
self.diffusion_model = instantiate_from_config(diff_model_config)
|
1282 |
-
self.conditioning_key = conditioning_key
|
1283 |
-
assert self.conditioning_key in [None, 'concat', 'crossattn', 'hybrid', 'adm', 'hybrid-adm', 'crossattn-adm']
|
1284 |
-
|
1285 |
-
def forward(self, x, t, c_concat: list = None, c_crossattn: list = None, c_adm=None, **kwargs):
|
1286 |
-
if self.conditioning_key is None:
|
1287 |
-
out = self.diffusion_model(x, t, **kwargs)
|
1288 |
-
elif self.conditioning_key == 'concat':
|
1289 |
-
xc = torch.cat([x] + c_concat, dim=1)
|
1290 |
-
out = self.diffusion_model(xc, t, **kwargs)
|
1291 |
-
elif self.conditioning_key == 'crossattn':
|
1292 |
-
cc = torch.cat(c_crossattn, 1)
|
1293 |
-
out = self.diffusion_model(x, t, context=cc, **kwargs)
|
1294 |
-
elif self.conditioning_key == 'hybrid':
|
1295 |
-
xc = torch.cat([x] + c_concat, dim=1)
|
1296 |
-
cc = torch.cat(c_crossattn, 1)
|
1297 |
-
out = self.diffusion_model(xc, t, context=cc, **kwargs)
|
1298 |
-
elif self.conditioning_key == 'hybrid-adm':
|
1299 |
-
assert c_adm is not None
|
1300 |
-
xc = torch.cat([x] + c_concat, dim=1)
|
1301 |
-
cc = torch.cat(c_crossattn, 1)
|
1302 |
-
out = self.diffusion_model(xc, t, context=cc, y=c_adm, **kwargs)
|
1303 |
-
elif self.conditioning_key == 'crossattn-adm':
|
1304 |
-
assert c_adm is not None
|
1305 |
-
cc = torch.cat(c_crossattn, 1)
|
1306 |
-
out = self.diffusion_model(x, t, context=cc, y=c_adm, **kwargs)
|
1307 |
-
elif self.conditioning_key == 'adm':
|
1308 |
-
cc = c_crossattn[0]
|
1309 |
-
out = self.diffusion_model(x, t, y=cc, **kwargs)
|
1310 |
-
else:
|
1311 |
-
raise NotImplementedError()
|
1312 |
-
|
1313 |
-
return out
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/canvas/Canvas.js
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import Canvas from '../../../plugins/canvas.js';
|
2 |
-
export default Canvas;
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/slider/UpdateIndicator.js
DELETED
@@ -1,67 +0,0 @@
|
|
1 |
-
import ResizeGameObject from '../../../plugins/utils/size/ResizeGameObject.js';
|
2 |
-
import AlignIn from '../../../plugins/utils/align/align/in/QuickSet.js';
|
3 |
-
import { GetDisplayWidth, GetDisplayHeight } from '../../../plugins/utils/size/GetDisplaySize.js';
|
4 |
-
|
5 |
-
const AlignLeft = Phaser.Display.Align.LEFT_CENTER;
|
6 |
-
const AlignTop = Phaser.Display.Align.TOP_CENTER;
|
7 |
-
const AlignRight = Phaser.Display.Align.RIGHT_CENTER;
|
8 |
-
const AlignBottom = Phaser.Display.Align.BOTTOM_CENTER;
|
9 |
-
|
10 |
-
var UpdateIndicator = function (t) {
|
11 |
-
var indicator = this.childrenMap.indicator;
|
12 |
-
if (indicator === undefined) {
|
13 |
-
return this;
|
14 |
-
}
|
15 |
-
|
16 |
-
if (t === undefined) {
|
17 |
-
t = this.value;
|
18 |
-
}
|
19 |
-
|
20 |
-
var reverseAxis = this.reverseAxis;
|
21 |
-
var newWidth, newHeight;
|
22 |
-
var thumb = this.childrenMap.thumb;
|
23 |
-
if (thumb) {
|
24 |
-
if (this.orientation === 0) { // x, extend width
|
25 |
-
var thumbWidth = GetDisplayWidth(thumb);
|
26 |
-
|
27 |
-
if (!reverseAxis) {
|
28 |
-
var thumbLeft = thumb.x - (thumbWidth * thumb.originX);
|
29 |
-
var thumbRight = thumbLeft + thumbWidth;
|
30 |
-
newWidth = thumbRight - this.left;
|
31 |
-
} else {
|
32 |
-
var thumbLeft = thumb.x - (thumbWidth * thumb.originX);
|
33 |
-
newWidth = this.right - thumbLeft;
|
34 |
-
}
|
35 |
-
} else { // y, extend height
|
36 |
-
var thumbHeight = GetDisplayHeight(thumb);
|
37 |
-
|
38 |
-
if (!reverseAxis) {
|
39 |
-
var thumbTop = thumb.y - (thumbHeight * thumb.originY);
|
40 |
-
var thumbBottom = thumbTop + thumbHeight;
|
41 |
-
newHeight = thumbBottom - this.top;
|
42 |
-
} else {
|
43 |
-
var thumbTop = thumb.y - (thumbHeight * thumb.originY);
|
44 |
-
newHeight = this.bottom - thumbTop;
|
45 |
-
}
|
46 |
-
}
|
47 |
-
} else {
|
48 |
-
if (this.orientation === 0) { // x, extend width
|
49 |
-
newWidth = this.width * t;
|
50 |
-
} else { // y, extend eight
|
51 |
-
newHeight = this.height * t;
|
52 |
-
}
|
53 |
-
}
|
54 |
-
ResizeGameObject(indicator, newWidth, newHeight);
|
55 |
-
|
56 |
-
var align;
|
57 |
-
if (!reverseAxis) {
|
58 |
-
align = (this.orientation === 0) ? AlignLeft : AlignTop;
|
59 |
-
} else {
|
60 |
-
align = (this.orientation === 0) ? AlignRight : AlignBottom;
|
61 |
-
}
|
62 |
-
AlignIn(indicator, this, align);
|
63 |
-
|
64 |
-
this.resetChildPositionState(indicator);
|
65 |
-
}
|
66 |
-
|
67 |
-
export default UpdateIndicator;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/utils/utils_amp.py
DELETED
@@ -1,88 +0,0 @@
|
|
1 |
-
from typing import Dict, List
|
2 |
-
|
3 |
-
import torch
|
4 |
-
|
5 |
-
if torch.__version__ < '1.9':
|
6 |
-
Iterable = torch._six.container_abcs.Iterable
|
7 |
-
else:
|
8 |
-
import collections
|
9 |
-
|
10 |
-
Iterable = collections.abc.Iterable
|
11 |
-
from torch.cuda.amp import GradScaler
|
12 |
-
|
13 |
-
|
14 |
-
class _MultiDeviceReplicator(object):
|
15 |
-
"""
|
16 |
-
Lazily serves copies of a tensor to requested devices. Copies are cached per-device.
|
17 |
-
"""
|
18 |
-
|
19 |
-
def __init__(self, master_tensor: torch.Tensor) -> None:
|
20 |
-
assert master_tensor.is_cuda
|
21 |
-
self.master = master_tensor
|
22 |
-
self._per_device_tensors: Dict[torch.device, torch.Tensor] = {}
|
23 |
-
|
24 |
-
def get(self, device) -> torch.Tensor:
|
25 |
-
retval = self._per_device_tensors.get(device, None)
|
26 |
-
if retval is None:
|
27 |
-
retval = self.master.to(device=device, non_blocking=True, copy=True)
|
28 |
-
self._per_device_tensors[device] = retval
|
29 |
-
return retval
|
30 |
-
|
31 |
-
|
32 |
-
class MaxClipGradScaler(GradScaler):
|
33 |
-
def __init__(self, init_scale, max_scale: float, growth_interval=100):
|
34 |
-
GradScaler.__init__(self, init_scale=init_scale, growth_interval=growth_interval)
|
35 |
-
self.max_scale = max_scale
|
36 |
-
|
37 |
-
def scale_clip(self):
|
38 |
-
if self.get_scale() == self.max_scale:
|
39 |
-
self.set_growth_factor(1)
|
40 |
-
elif self.get_scale() < self.max_scale:
|
41 |
-
self.set_growth_factor(2)
|
42 |
-
elif self.get_scale() > self.max_scale:
|
43 |
-
self._scale.fill_(self.max_scale)
|
44 |
-
self.set_growth_factor(1)
|
45 |
-
|
46 |
-
def scale(self, outputs):
|
47 |
-
"""
|
48 |
-
Multiplies ('scales') a tensor or list of tensors by the scale factor.
|
49 |
-
|
50 |
-
Returns scaled outputs. If this instance of :class:`GradScaler` is not enabled, outputs are returned
|
51 |
-
unmodified.
|
52 |
-
|
53 |
-
Arguments:
|
54 |
-
outputs (Tensor or iterable of Tensors): Outputs to scale.
|
55 |
-
"""
|
56 |
-
if not self._enabled:
|
57 |
-
return outputs
|
58 |
-
self.scale_clip()
|
59 |
-
# Short-circuit for the common case.
|
60 |
-
if isinstance(outputs, torch.Tensor):
|
61 |
-
assert outputs.is_cuda
|
62 |
-
if self._scale is None:
|
63 |
-
self._lazy_init_scale_growth_tracker(outputs.device)
|
64 |
-
assert self._scale is not None
|
65 |
-
return outputs * self._scale.to(device=outputs.device, non_blocking=True)
|
66 |
-
|
67 |
-
# Invoke the more complex machinery only if we're treating multiple outputs.
|
68 |
-
stash: List[_MultiDeviceReplicator] = [] # holds a reference that can be overwritten by apply_scale
|
69 |
-
|
70 |
-
def apply_scale(val):
|
71 |
-
if isinstance(val, torch.Tensor):
|
72 |
-
assert val.is_cuda
|
73 |
-
if len(stash) == 0:
|
74 |
-
if self._scale is None:
|
75 |
-
self._lazy_init_scale_growth_tracker(val.device)
|
76 |
-
assert self._scale is not None
|
77 |
-
stash.append(_MultiDeviceReplicator(self._scale))
|
78 |
-
return val * stash[0].get(val.device)
|
79 |
-
elif isinstance(val, Iterable):
|
80 |
-
iterable = map(apply_scale, val)
|
81 |
-
if isinstance(val, list) or isinstance(val, tuple):
|
82 |
-
return type(val)(iterable)
|
83 |
-
else:
|
84 |
-
return iterable
|
85 |
-
else:
|
86 |
-
raise ValueError("outputs must be a Tensor or an iterable of Tensors")
|
87 |
-
|
88 |
-
return apply_scale(outputs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alpaca233/ai-stable-diffusion-Text-to-Image/app.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
gr.Interface.load("models/stabilityai/stable-diffusion-xl-base-1.0").launch()
|
|
|
|
|
|
|
|
spaces/Aluxes/anime-remove-background/app.py
DELETED
@@ -1,52 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import huggingface_hub
|
3 |
-
import onnxruntime as rt
|
4 |
-
import numpy as np
|
5 |
-
import cv2
|
6 |
-
|
7 |
-
|
8 |
-
def get_mask(img, s=1024):
|
9 |
-
img = (img / 255).astype(np.float32)
|
10 |
-
h, w = h0, w0 = img.shape[:-1]
|
11 |
-
h, w = (s, int(s * w / h)) if h > w else (int(s * h / w), s)
|
12 |
-
ph, pw = s - h, s - w
|
13 |
-
img_input = np.zeros([s, s, 3], dtype=np.float32)
|
14 |
-
img_input[ph // 2:ph // 2 + h, pw // 2:pw // 2 + w] = cv2.resize(img, (w, h))
|
15 |
-
img_input = np.transpose(img_input, (2, 0, 1))
|
16 |
-
img_input = img_input[np.newaxis, :]
|
17 |
-
mask = rmbg_model.run(None, {'img': img_input})[0][0]
|
18 |
-
mask = np.transpose(mask, (1, 2, 0))
|
19 |
-
mask = mask[ph // 2:ph // 2 + h, pw // 2:pw // 2 + w]
|
20 |
-
mask = cv2.resize(mask, (w0, h0))[:, :, np.newaxis]
|
21 |
-
return mask
|
22 |
-
|
23 |
-
|
24 |
-
def rmbg_fn(img):
|
25 |
-
mask = get_mask(img)
|
26 |
-
img = (mask * img + 255 * (1 - mask)).astype(np.uint8)
|
27 |
-
mask = (mask * 255).astype(np.uint8)
|
28 |
-
img = np.concatenate([img, mask], axis=2, dtype=np.uint8)
|
29 |
-
mask = mask.repeat(3, axis=2)
|
30 |
-
return mask, img
|
31 |
-
|
32 |
-
|
33 |
-
if __name__ == "__main__":
|
34 |
-
providers = ['CUDAExecutionProvider', 'CPUExecutionProvider']
|
35 |
-
model_path = huggingface_hub.hf_hub_download("skytnt/anime-seg", "isnetis.onnx")
|
36 |
-
rmbg_model = rt.InferenceSession(model_path, providers=providers)
|
37 |
-
app = gr.Blocks()
|
38 |
-
with app:
|
39 |
-
gr.Markdown("# Anime Remove Background\n\n"
|
40 |
-
"\n\n"
|
41 |
-
"demo for [https://github.com/SkyTNT/anime-segmentation/](https://github.com/SkyTNT/anime-segmentation/)")
|
42 |
-
with gr.Row():
|
43 |
-
with gr.Column():
|
44 |
-
input_img = gr.Image(label="input image")
|
45 |
-
examples_data = [[f"examples/{x:02d}.jpg"] for x in range(1, 4)]
|
46 |
-
examples = gr.Dataset(components=[input_img], samples=examples_data)
|
47 |
-
run_btn = gr.Button(variant="primary")
|
48 |
-
output_mask = gr.Image(label="mask")
|
49 |
-
output_img = gr.Image(label="result", image_mode="RGBA")
|
50 |
-
examples.click(lambda x: x[0], [examples], [input_img])
|
51 |
-
run_btn.click(rmbg_fn, [input_img], [output_mask, output_img])
|
52 |
-
app.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/portfolio/README.md
DELETED
@@ -1,36 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Portfolio
|
3 |
-
emoji: 😻
|
4 |
-
colorFrom: blue
|
5 |
-
colorTo: blue
|
6 |
-
sdk: static
|
7 |
-
pinned: false
|
8 |
-
---
|
9 |
-
|
10 |
-
# Configuration
|
11 |
-
|
12 |
-
`title`: _string_
|
13 |
-
Display title for the Space
|
14 |
-
|
15 |
-
`emoji`: _string_
|
16 |
-
Space emoji (emoji-only character allowed)
|
17 |
-
|
18 |
-
`colorFrom`: _string_
|
19 |
-
Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
|
20 |
-
|
21 |
-
`colorTo`: _string_
|
22 |
-
Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
|
23 |
-
|
24 |
-
`sdk`: _string_
|
25 |
-
Can be either `gradio`, `streamlit`, or `static`
|
26 |
-
|
27 |
-
`sdk_version` : _string_
|
28 |
-
Only applicable for `streamlit` SDK.
|
29 |
-
See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
|
30 |
-
|
31 |
-
`app_file`: _string_
|
32 |
-
Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code).
|
33 |
-
Path is relative to the root of the repository.
|
34 |
-
|
35 |
-
`pinned`: _boolean_
|
36 |
-
Whether the Space stays on top of your list.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/src/decoder/tensor_base.py
DELETED
@@ -1,458 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn
|
3 |
-
import torch.nn.functional as F
|
4 |
-
from .sh import eval_sh_bases
|
5 |
-
import numpy as np
|
6 |
-
import time
|
7 |
-
|
8 |
-
|
9 |
-
def positional_encoding(positions, freqs):
|
10 |
-
freq_bands = (2 ** torch.arange(freqs).float()).to(positions.device) # (F,)
|
11 |
-
pts = (positions[..., None] * freq_bands).reshape(
|
12 |
-
positions.shape[:-1] + (freqs * positions.shape[-1],)) # (..., DF)
|
13 |
-
pts = torch.cat([torch.sin(pts), torch.cos(pts)], dim=-1)
|
14 |
-
return pts
|
15 |
-
|
16 |
-
|
17 |
-
def raw2alpha(sigma, dist):
|
18 |
-
# sigma, dist [N_rays, N_samples]
|
19 |
-
alpha = 1. - torch.exp(-sigma * dist)
|
20 |
-
|
21 |
-
T = torch.cumprod(torch.cat([torch.ones(alpha.shape[0], 1).to(alpha.device), 1. - alpha + 1e-10], -1), -1)
|
22 |
-
|
23 |
-
weights = alpha * T[:, :-1] # [N_rays, N_samples]
|
24 |
-
return alpha, weights, T[:, -1:]
|
25 |
-
|
26 |
-
|
27 |
-
def SHRender(xyz_sampled, viewdirs, features):
|
28 |
-
sh_mult = eval_sh_bases(2, viewdirs)[:, None]
|
29 |
-
rgb_sh = features.view(-1, 3, sh_mult.shape[-1])
|
30 |
-
rgb = torch.relu(torch.sum(sh_mult * rgb_sh, dim=-1) + 0.5)
|
31 |
-
return rgb
|
32 |
-
|
33 |
-
|
34 |
-
def RGBRender(xyz_sampled, viewdirs, features):
|
35 |
-
rgb = features
|
36 |
-
return rgb
|
37 |
-
|
38 |
-
|
39 |
-
class AlphaGridMask(torch.nn.Module):
|
40 |
-
def __init__(self, device, aabb, alpha_volume):
|
41 |
-
super(AlphaGridMask, self).__init__()
|
42 |
-
self.device = device
|
43 |
-
|
44 |
-
self.aabb = aabb.to(self.device)
|
45 |
-
self.aabbSize = self.aabb[1] - self.aabb[0]
|
46 |
-
self.invgridSize = 1.0 / self.aabbSize * 2
|
47 |
-
self.alpha_volume = alpha_volume.view(1, 1, *alpha_volume.shape[-3:])
|
48 |
-
self.gridSize = torch.LongTensor([alpha_volume.shape[-1], alpha_volume.shape[-2], alpha_volume.shape[-3]]).to(
|
49 |
-
self.device)
|
50 |
-
|
51 |
-
def sample_alpha(self, xyz_sampled):
|
52 |
-
xyz_sampled = self.normalize_coord(xyz_sampled)
|
53 |
-
alpha_vals = F.grid_sample(self.alpha_volume, xyz_sampled.view(1, -1, 1, 1, 3), align_corners=True).view(-1)
|
54 |
-
|
55 |
-
return alpha_vals
|
56 |
-
|
57 |
-
def normalize_coord(self, xyz_sampled):
|
58 |
-
return (xyz_sampled - self.aabb[0]) * self.invgridSize - 1
|
59 |
-
|
60 |
-
|
61 |
-
class MLPRender_Fea(torch.nn.Module):
|
62 |
-
def __init__(self, inChanel, viewpe=6, feape=6, featureC=128):
|
63 |
-
super(MLPRender_Fea, self).__init__()
|
64 |
-
|
65 |
-
self.in_mlpC = 2 * viewpe * 3 + 2 * feape * inChanel + 3 + inChanel
|
66 |
-
self.viewpe = viewpe
|
67 |
-
self.feape = feape
|
68 |
-
layer1 = torch.nn.Linear(self.in_mlpC, featureC)
|
69 |
-
layer2 = torch.nn.Linear(featureC, featureC)
|
70 |
-
layer3 = torch.nn.Linear(featureC, 3)
|
71 |
-
|
72 |
-
self.mlp = torch.nn.Sequential(layer1, torch.nn.ReLU(inplace=True), layer2, torch.nn.ReLU(inplace=True), layer3)
|
73 |
-
torch.nn.init.constant_(self.mlp[-1].bias, 0)
|
74 |
-
|
75 |
-
def forward(self, pts, viewdirs, features):
|
76 |
-
indata = [features, viewdirs]
|
77 |
-
if self.feape > 0:
|
78 |
-
indata += [positional_encoding(features, self.feape)]
|
79 |
-
if self.viewpe > 0:
|
80 |
-
indata += [positional_encoding(viewdirs, self.viewpe)]
|
81 |
-
mlp_in = torch.cat(indata, dim=-1)
|
82 |
-
rgb = self.mlp(mlp_in)
|
83 |
-
rgb = torch.sigmoid(rgb)
|
84 |
-
|
85 |
-
return rgb
|
86 |
-
|
87 |
-
|
88 |
-
class MLPRender_PE(torch.nn.Module):
|
89 |
-
def __init__(self, inChanel, viewpe=6, pospe=6, featureC=128):
|
90 |
-
super(MLPRender_PE, self).__init__()
|
91 |
-
|
92 |
-
self.in_mlpC = (3 + 2 * viewpe * 3) + (3 + 2 * pospe * 3) + inChanel #
|
93 |
-
self.viewpe = viewpe
|
94 |
-
self.pospe = pospe
|
95 |
-
layer1 = torch.nn.Linear(self.in_mlpC, featureC)
|
96 |
-
layer2 = torch.nn.Linear(featureC, featureC)
|
97 |
-
layer3 = torch.nn.Linear(featureC, 3)
|
98 |
-
|
99 |
-
self.mlp = torch.nn.Sequential(layer1, torch.nn.ReLU(inplace=True), layer2, torch.nn.ReLU(inplace=True), layer3)
|
100 |
-
torch.nn.init.constant_(self.mlp[-1].bias, 0)
|
101 |
-
|
102 |
-
def forward(self, pts, viewdirs, features):
|
103 |
-
indata = [features, viewdirs]
|
104 |
-
if self.pospe > 0:
|
105 |
-
indata += [positional_encoding(pts, self.pospe)]
|
106 |
-
if self.viewpe > 0:
|
107 |
-
indata += [positional_encoding(viewdirs, self.viewpe)]
|
108 |
-
mlp_in = torch.cat(indata, dim=-1)
|
109 |
-
rgb = self.mlp(mlp_in)
|
110 |
-
rgb = torch.sigmoid(rgb)
|
111 |
-
|
112 |
-
return rgb
|
113 |
-
|
114 |
-
|
115 |
-
class MLPRender(torch.nn.Module):
|
116 |
-
def __init__(self, inChanel, viewpe=6, featureC=128):
|
117 |
-
super(MLPRender, self).__init__()
|
118 |
-
|
119 |
-
self.in_mlpC = (3 + 2 * viewpe * 3) + inChanel
|
120 |
-
self.viewpe = viewpe
|
121 |
-
|
122 |
-
layer1 = torch.nn.Linear(self.in_mlpC, featureC)
|
123 |
-
layer2 = torch.nn.Linear(featureC, featureC)
|
124 |
-
layer3 = torch.nn.Linear(featureC, 3)
|
125 |
-
|
126 |
-
self.mlp = torch.nn.Sequential(layer1, torch.nn.ReLU(inplace=True), layer2, torch.nn.ReLU(inplace=True), layer3)
|
127 |
-
torch.nn.init.constant_(self.mlp[-1].bias, 0)
|
128 |
-
|
129 |
-
def forward(self, pts, viewdirs, features):
|
130 |
-
indata = [features, viewdirs]
|
131 |
-
if self.viewpe > 0:
|
132 |
-
indata += [positional_encoding(viewdirs, self.viewpe)]
|
133 |
-
mlp_in = torch.cat(indata, dim=-1)
|
134 |
-
rgb = self.mlp(mlp_in)
|
135 |
-
rgb = torch.sigmoid(rgb)
|
136 |
-
|
137 |
-
return rgb
|
138 |
-
|
139 |
-
|
140 |
-
class TensorBase(torch.nn.Module):
|
141 |
-
def __init__(self, aabb, gridSize, device, density_n_comp=8, appearance_n_comp=24, app_dim=27,
|
142 |
-
shadingMode='MLP_PE', alphaMask=None, near_far=[2.0, 6.0],
|
143 |
-
density_shift=-10, alphaMask_thres=0.001, distance_scale=25, rayMarch_weight_thres=0.0001,
|
144 |
-
pos_pe=6, view_pe=6, fea_pe=6, featureC=128, step_ratio=2.0,
|
145 |
-
fea2denseAct='softplus'):
|
146 |
-
super(TensorBase, self).__init__()
|
147 |
-
|
148 |
-
self.density_n_comp = density_n_comp
|
149 |
-
self.app_n_comp = appearance_n_comp
|
150 |
-
self.app_dim = app_dim
|
151 |
-
self.aabb = aabb
|
152 |
-
self.alphaMask = alphaMask
|
153 |
-
self.device = device
|
154 |
-
|
155 |
-
self.density_shift = density_shift
|
156 |
-
self.alphaMask_thres = alphaMask_thres
|
157 |
-
self.distance_scale = distance_scale
|
158 |
-
self.rayMarch_weight_thres = rayMarch_weight_thres
|
159 |
-
self.fea2denseAct = fea2denseAct
|
160 |
-
|
161 |
-
self.near_far = near_far
|
162 |
-
self.step_ratio = step_ratio
|
163 |
-
|
164 |
-
self.update_stepSize(gridSize)
|
165 |
-
|
166 |
-
self.matMode = [[0, 1], [0, 2], [1, 2]]
|
167 |
-
self.vecMode = [2, 1, 0]
|
168 |
-
self.comp_w = [1, 1, 1]
|
169 |
-
|
170 |
-
self.init_svd_volume(gridSize[0], device)
|
171 |
-
|
172 |
-
self.shadingMode, self.pos_pe, self.view_pe, self.fea_pe, self.featureC = shadingMode, pos_pe, view_pe, fea_pe, featureC
|
173 |
-
self.init_render_func(shadingMode, pos_pe, view_pe, fea_pe, featureC, device)
|
174 |
-
|
175 |
-
def init_render_func(self, shadingMode, pos_pe, view_pe, fea_pe, featureC, device):
|
176 |
-
if shadingMode == 'MLP_PE':
|
177 |
-
self.renderModule = MLPRender_PE(self.app_dim, view_pe, pos_pe, featureC).to(device)
|
178 |
-
elif shadingMode == 'MLP_Fea':
|
179 |
-
self.renderModule = MLPRender_Fea(self.app_dim, view_pe, fea_pe, featureC).to(device)
|
180 |
-
elif shadingMode == 'MLP':
|
181 |
-
self.renderModule = MLPRender(self.app_dim, view_pe, featureC).to(device)
|
182 |
-
elif shadingMode == 'SH':
|
183 |
-
self.renderModule = SHRender
|
184 |
-
elif shadingMode == 'RGB':
|
185 |
-
assert self.app_dim == 3
|
186 |
-
self.renderModule = RGBRender
|
187 |
-
else:
|
188 |
-
print("Unrecognized shading module")
|
189 |
-
exit()
|
190 |
-
print("pos_pe", pos_pe, "view_pe", view_pe, "fea_pe", fea_pe)
|
191 |
-
|
192 |
-
def update_stepSize(self, gridSize):
|
193 |
-
print("aabb", self.aabb.view(-1))
|
194 |
-
print("grid size", gridSize)
|
195 |
-
self.aabbSize = self.aabb[1] - self.aabb[0]
|
196 |
-
self.invaabbSize = 2.0 / self.aabbSize
|
197 |
-
self.gridSize = torch.LongTensor(gridSize).to(self.device)
|
198 |
-
self.units = self.aabbSize / (self.gridSize - 1)
|
199 |
-
self.stepSize = torch.mean(self.units) * self.step_ratio
|
200 |
-
self.aabbDiag = torch.sqrt(torch.sum(torch.square(self.aabbSize)))
|
201 |
-
self.nSamples = int((self.aabbDiag / self.stepSize).item()) + 1
|
202 |
-
print("sampling step size: ", self.stepSize)
|
203 |
-
print("sampling number: ", self.nSamples)
|
204 |
-
|
205 |
-
def init_svd_volume(self, res, device):
|
206 |
-
pass
|
207 |
-
|
208 |
-
def compute_features(self, xyz_sampled):
|
209 |
-
pass
|
210 |
-
|
211 |
-
def compute_densityfeature(self, xyz_sampled):
|
212 |
-
pass
|
213 |
-
|
214 |
-
def compute_appfeature(self, xyz_sampled):
|
215 |
-
pass
|
216 |
-
|
217 |
-
def normalize_coord(self, xyz_sampled):
|
218 |
-
return (xyz_sampled - self.aabb[0]) * self.invaabbSize - 1
|
219 |
-
|
220 |
-
def get_optparam_groups(self, lr_init_spatial=0.02, lr_init_network=0.001):
|
221 |
-
pass
|
222 |
-
|
223 |
-
def get_kwargs(self):
|
224 |
-
return {
|
225 |
-
'aabb': self.aabb,
|
226 |
-
'gridSize': self.gridSize.tolist(),
|
227 |
-
'density_n_comp': self.density_n_comp,
|
228 |
-
'appearance_n_comp': self.app_n_comp,
|
229 |
-
'app_dim': self.app_dim,
|
230 |
-
|
231 |
-
'density_shift': self.density_shift,
|
232 |
-
'alphaMask_thres': self.alphaMask_thres,
|
233 |
-
'distance_scale': self.distance_scale,
|
234 |
-
'rayMarch_weight_thres': self.rayMarch_weight_thres,
|
235 |
-
'fea2denseAct': self.fea2denseAct,
|
236 |
-
|
237 |
-
'near_far': self.near_far,
|
238 |
-
'step_ratio': self.step_ratio,
|
239 |
-
|
240 |
-
'shadingMode': self.shadingMode,
|
241 |
-
'pos_pe': self.pos_pe,
|
242 |
-
'view_pe': self.view_pe,
|
243 |
-
'fea_pe': self.fea_pe,
|
244 |
-
'featureC': self.featureC
|
245 |
-
}
|
246 |
-
|
247 |
-
def save(self, path):
|
248 |
-
kwargs = self.get_kwargs()
|
249 |
-
ckpt = {'kwargs': kwargs, 'state_dict': self.state_dict()}
|
250 |
-
if self.alphaMask is not None:
|
251 |
-
alpha_volume = self.alphaMask.alpha_volume.bool().cpu().numpy()
|
252 |
-
ckpt.update({'alphaMask.shape': alpha_volume.shape})
|
253 |
-
ckpt.update({'alphaMask.mask': np.packbits(alpha_volume.reshape(-1))})
|
254 |
-
ckpt.update({'alphaMask.aabb': self.alphaMask.aabb.cpu()})
|
255 |
-
torch.save(ckpt, path)
|
256 |
-
|
257 |
-
def load(self, ckpt):
|
258 |
-
if 'alphaMask.aabb' in ckpt.keys():
|
259 |
-
length = np.prod(ckpt['alphaMask.shape'])
|
260 |
-
alpha_volume = torch.from_numpy(
|
261 |
-
np.unpackbits(ckpt['alphaMask.mask'])[:length].reshape(ckpt['alphaMask.shape']))
|
262 |
-
self.alphaMask = AlphaGridMask(self.device, ckpt['alphaMask.aabb'].to(self.device),
|
263 |
-
alpha_volume.float().to(self.device))
|
264 |
-
self.load_state_dict(ckpt['state_dict'])
|
265 |
-
|
266 |
-
def sample_ray_ndc(self, rays_o, rays_d, is_train=True, N_samples=-1):
|
267 |
-
N_samples = N_samples if N_samples > 0 else self.nSamples
|
268 |
-
near, far = self.near_far
|
269 |
-
interpx = torch.linspace(near, far, N_samples).unsqueeze(0).to(rays_o)
|
270 |
-
if is_train:
|
271 |
-
interpx += torch.rand_like(interpx).to(rays_o) * ((far - near) / N_samples)
|
272 |
-
|
273 |
-
rays_pts = rays_o[..., None, :] + rays_d[..., None, :] * interpx[..., None]
|
274 |
-
mask_outbbox = ((self.aabb[0] > rays_pts) | (rays_pts > self.aabb[1])).any(dim=-1)
|
275 |
-
return rays_pts, interpx, ~mask_outbbox
|
276 |
-
|
277 |
-
def sample_ray(self, rays_o, rays_d, is_train=True, N_samples=-1):
|
278 |
-
N_samples = N_samples if N_samples > 0 else self.nSamples
|
279 |
-
stepsize = self.stepSize
|
280 |
-
near, far = self.near_far
|
281 |
-
vec = torch.where(rays_d == 0, torch.full_like(rays_d, 1e-6), rays_d)
|
282 |
-
rate_a = (self.aabb[1] - rays_o) / vec
|
283 |
-
rate_b = (self.aabb[0] - rays_o) / vec
|
284 |
-
t_min = torch.minimum(rate_a, rate_b).amax(-1).clamp(min=near, max=far)
|
285 |
-
|
286 |
-
rng = torch.arange(N_samples)[None].float()
|
287 |
-
if is_train:
|
288 |
-
rng = rng.repeat(rays_d.shape[-2], 1)
|
289 |
-
rng += torch.rand_like(rng[:, [0]])
|
290 |
-
step = stepsize * rng.to(rays_o.device)
|
291 |
-
interpx = (t_min[..., None] + step)
|
292 |
-
|
293 |
-
rays_pts = rays_o[..., None, :] + rays_d[..., None, :] * interpx[..., None]
|
294 |
-
mask_outbbox = ((self.aabb[0] > rays_pts) | (rays_pts > self.aabb[1])).any(dim=-1)
|
295 |
-
|
296 |
-
return rays_pts, interpx, ~mask_outbbox
|
297 |
-
|
298 |
-
def shrink(self, new_aabb, voxel_size):
|
299 |
-
pass
|
300 |
-
|
301 |
-
@torch.no_grad()
|
302 |
-
def getDenseAlpha(self, gridSize=None):
|
303 |
-
gridSize = self.gridSize if gridSize is None else gridSize
|
304 |
-
|
305 |
-
samples = torch.stack(torch.meshgrid(
|
306 |
-
torch.linspace(0, 1, gridSize[0]),
|
307 |
-
torch.linspace(0, 1, gridSize[1]),
|
308 |
-
torch.linspace(0, 1, gridSize[2]),
|
309 |
-
), -1).to(self.device)
|
310 |
-
dense_xyz = self.aabb[0] * (1 - samples) + self.aabb[1] * samples
|
311 |
-
|
312 |
-
# dense_xyz = dense_xyz
|
313 |
-
# print(self.stepSize, self.distance_scale*self.aabbDiag)
|
314 |
-
alpha = torch.zeros_like(dense_xyz[..., 0])
|
315 |
-
for i in range(gridSize[0]):
|
316 |
-
alpha[i] = self.compute_alpha(dense_xyz[i].view(-1, 3), self.stepSize).view((gridSize[1], gridSize[2]))
|
317 |
-
return alpha, dense_xyz
|
318 |
-
|
319 |
-
@torch.no_grad()
|
320 |
-
def updateAlphaMask(self, gridSize=(200, 200, 200)):
|
321 |
-
|
322 |
-
alpha, dense_xyz = self.getDenseAlpha(gridSize)
|
323 |
-
dense_xyz = dense_xyz.transpose(0, 2).contiguous()
|
324 |
-
alpha = alpha.clamp(0, 1).transpose(0, 2).contiguous()[None, None]
|
325 |
-
total_voxels = gridSize[0] * gridSize[1] * gridSize[2]
|
326 |
-
|
327 |
-
ks = 3
|
328 |
-
alpha = F.max_pool3d(alpha, kernel_size=ks, padding=ks // 2, stride=1).view(gridSize[::-1])
|
329 |
-
alpha[alpha >= self.alphaMask_thres] = 1
|
330 |
-
alpha[alpha < self.alphaMask_thres] = 0
|
331 |
-
|
332 |
-
self.alphaMask = AlphaGridMask(self.device, self.aabb, alpha)
|
333 |
-
|
334 |
-
valid_xyz = dense_xyz[alpha > 0.5]
|
335 |
-
|
336 |
-
xyz_min = valid_xyz.amin(0)
|
337 |
-
xyz_max = valid_xyz.amax(0)
|
338 |
-
|
339 |
-
new_aabb = torch.stack((xyz_min, xyz_max))
|
340 |
-
|
341 |
-
total = torch.sum(alpha)
|
342 |
-
print(f"bbox: {xyz_min, xyz_max} alpha rest %%%f" % (total / total_voxels * 100))
|
343 |
-
return new_aabb
|
344 |
-
|
345 |
-
@torch.no_grad()
|
346 |
-
def filtering_rays(self, all_rays, all_rgbs, N_samples=256, chunk=10240 * 5, bbox_only=False):
|
347 |
-
print('========> filtering rays ...')
|
348 |
-
tt = time.time()
|
349 |
-
|
350 |
-
N = torch.tensor(all_rays.shape[:-1]).prod()
|
351 |
-
|
352 |
-
mask_filtered = []
|
353 |
-
idx_chunks = torch.split(torch.arange(N), chunk)
|
354 |
-
for idx_chunk in idx_chunks:
|
355 |
-
rays_chunk = all_rays[idx_chunk].to(self.device)
|
356 |
-
|
357 |
-
rays_o, rays_d = rays_chunk[..., :3], rays_chunk[..., 3:6]
|
358 |
-
if bbox_only:
|
359 |
-
vec = torch.where(rays_d == 0, torch.full_like(rays_d, 1e-6), rays_d)
|
360 |
-
rate_a = (self.aabb[1] - rays_o) / vec
|
361 |
-
rate_b = (self.aabb[0] - rays_o) / vec
|
362 |
-
t_min = torch.minimum(rate_a, rate_b).amax(-1) # .clamp(min=near, max=far)
|
363 |
-
t_max = torch.maximum(rate_a, rate_b).amin(-1) # .clamp(min=near, max=far)
|
364 |
-
mask_inbbox = t_max > t_min
|
365 |
-
|
366 |
-
else:
|
367 |
-
xyz_sampled, _, _ = self.sample_ray(rays_o, rays_d, N_samples=N_samples, is_train=False)
|
368 |
-
mask_inbbox = (self.alphaMask.sample_alpha(xyz_sampled).view(xyz_sampled.shape[:-1]) > 0).any(-1)
|
369 |
-
|
370 |
-
mask_filtered.append(mask_inbbox.cpu())
|
371 |
-
|
372 |
-
mask_filtered = torch.cat(mask_filtered).view(all_rgbs.shape[:-1])
|
373 |
-
|
374 |
-
print(f'Ray filtering done! takes {time.time() - tt} s. ray mask ratio: {torch.sum(mask_filtered) / N}')
|
375 |
-
return all_rays[mask_filtered], all_rgbs[mask_filtered]
|
376 |
-
|
377 |
-
def feature2density(self, density_features):
|
378 |
-
if self.fea2denseAct == "softplus":
|
379 |
-
return F.softplus(density_features + self.density_shift)
|
380 |
-
elif self.fea2denseAct == "relu":
|
381 |
-
return F.relu(density_features)
|
382 |
-
|
383 |
-
def compute_alpha(self, xyz_locs, length=1):
|
384 |
-
|
385 |
-
if self.alphaMask is not None:
|
386 |
-
alphas = self.alphaMask.sample_alpha(xyz_locs)
|
387 |
-
alpha_mask = alphas > 0
|
388 |
-
else:
|
389 |
-
alpha_mask = torch.ones_like(xyz_locs[:, 0], dtype=bool)
|
390 |
-
|
391 |
-
sigma = torch.zeros(xyz_locs.shape[:-1], device=xyz_locs.device)
|
392 |
-
|
393 |
-
if alpha_mask.any():
|
394 |
-
xyz_sampled = self.normalize_coord(xyz_locs[alpha_mask])
|
395 |
-
sigma_feature = self.compute_densityfeature(xyz_sampled)
|
396 |
-
validsigma = self.feature2density(sigma_feature)
|
397 |
-
sigma[alpha_mask] = validsigma
|
398 |
-
|
399 |
-
alpha = 1 - torch.exp(-sigma * length).view(xyz_locs.shape[:-1])
|
400 |
-
|
401 |
-
return alpha
|
402 |
-
|
403 |
-
def forward(self, rays_chunk, white_bg=True, is_train=False, ndc_ray=False, N_samples=-1):
|
404 |
-
|
405 |
-
# sample points
|
406 |
-
viewdirs = rays_chunk[:, 3:6]
|
407 |
-
if ndc_ray:
|
408 |
-
xyz_sampled, z_vals, ray_valid = self.sample_ray_ndc(rays_chunk[:, :3], viewdirs, is_train=is_train,
|
409 |
-
N_samples=N_samples)
|
410 |
-
dists = torch.cat((z_vals[:, 1:] - z_vals[:, :-1], torch.zeros_like(z_vals[:, :1])), dim=-1)
|
411 |
-
rays_norm = torch.norm(viewdirs, dim=-1, keepdim=True)
|
412 |
-
dists = dists * rays_norm
|
413 |
-
viewdirs = viewdirs / rays_norm
|
414 |
-
else:
|
415 |
-
xyz_sampled, z_vals, ray_valid = self.sample_ray(rays_chunk[:, :3], viewdirs, is_train=is_train,
|
416 |
-
N_samples=N_samples)
|
417 |
-
dists = torch.cat((z_vals[:, 1:] - z_vals[:, :-1], torch.zeros_like(z_vals[:, :1])), dim=-1)
|
418 |
-
viewdirs = viewdirs.view(-1, 1, 3).expand(xyz_sampled.shape)
|
419 |
-
|
420 |
-
if self.alphaMask is not None:
|
421 |
-
alphas = self.alphaMask.sample_alpha(xyz_sampled[ray_valid])
|
422 |
-
alpha_mask = alphas > 0
|
423 |
-
ray_invalid = ~ray_valid
|
424 |
-
ray_invalid[ray_valid] |= (~alpha_mask)
|
425 |
-
ray_valid = ~ray_invalid
|
426 |
-
|
427 |
-
sigma = torch.zeros(xyz_sampled.shape[:-1], device=xyz_sampled.device)
|
428 |
-
rgb = torch.zeros((*xyz_sampled.shape[:2], 3), device=xyz_sampled.device)
|
429 |
-
|
430 |
-
if ray_valid.any():
|
431 |
-
xyz_sampled = self.normalize_coord(xyz_sampled)
|
432 |
-
sigma_feature = self.compute_densityfeature(xyz_sampled[ray_valid])
|
433 |
-
|
434 |
-
validsigma = self.feature2density(sigma_feature)
|
435 |
-
sigma[ray_valid] = validsigma
|
436 |
-
|
437 |
-
alpha, weight, bg_weight = raw2alpha(sigma, dists * self.distance_scale)
|
438 |
-
|
439 |
-
app_mask = weight > self.rayMarch_weight_thres
|
440 |
-
|
441 |
-
if app_mask.any():
|
442 |
-
app_features = self.compute_appfeature(xyz_sampled[app_mask])
|
443 |
-
valid_rgbs = self.renderModule(xyz_sampled[app_mask], viewdirs[app_mask], app_features)
|
444 |
-
rgb[app_mask] = valid_rgbs
|
445 |
-
|
446 |
-
acc_map = torch.sum(weight, -1)
|
447 |
-
rgb_map = torch.sum(weight[..., None] * rgb, -2)
|
448 |
-
|
449 |
-
if white_bg or (is_train and torch.rand((1,)) < 0.5):
|
450 |
-
rgb_map = rgb_map + (1. - acc_map[..., None])
|
451 |
-
|
452 |
-
rgb_map = rgb_map.clamp(0, 1)
|
453 |
-
|
454 |
-
with torch.no_grad():
|
455 |
-
depth_map = torch.sum(weight * z_vals, -1)
|
456 |
-
depth_map = depth_map + (1. - acc_map) * rays_chunk[..., -1]
|
457 |
-
|
458 |
-
return rgb_map, depth_map # rgb, sigma, alpha, weight, bg_weight
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/models/overview.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
# Models
|
2 |
-
|
3 |
-
🤗 Diffusers provides pretrained models for popular algorithms and modules to create custom diffusion systems. The primary function of models is to denoise an input sample as modeled by the distribution \\(p_{\theta}(x_{t-1}|x_{t})\\).
|
4 |
-
|
5 |
-
All models are built from the base [`ModelMixin`] class which is a [`torch.nn.module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html) providing basic functionality for saving and loading models, locally and from the Hugging Face Hub.
|
6 |
-
|
7 |
-
## ModelMixin
|
8 |
-
[[autodoc]] ModelMixin
|
9 |
-
|
10 |
-
## FlaxModelMixin
|
11 |
-
|
12 |
-
[[autodoc]] FlaxModelMixin
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/tiled_upscaling.py
DELETED
@@ -1,298 +0,0 @@
|
|
1 |
-
# Copyright 2023 Peter Willemsen <[email protected]>. All rights reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
|
15 |
-
import math
|
16 |
-
from typing import Callable, List, Optional, Union
|
17 |
-
|
18 |
-
import numpy as np
|
19 |
-
import PIL
|
20 |
-
import torch
|
21 |
-
from PIL import Image
|
22 |
-
from transformers import CLIPTextModel, CLIPTokenizer
|
23 |
-
|
24 |
-
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
25 |
-
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
|
26 |
-
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
|
27 |
-
|
28 |
-
|
29 |
-
def make_transparency_mask(size, overlap_pixels, remove_borders=[]):
|
30 |
-
size_x = size[0] - overlap_pixels * 2
|
31 |
-
size_y = size[1] - overlap_pixels * 2
|
32 |
-
for letter in ["l", "r"]:
|
33 |
-
if letter in remove_borders:
|
34 |
-
size_x += overlap_pixels
|
35 |
-
for letter in ["t", "b"]:
|
36 |
-
if letter in remove_borders:
|
37 |
-
size_y += overlap_pixels
|
38 |
-
mask = np.ones((size_y, size_x), dtype=np.uint8) * 255
|
39 |
-
mask = np.pad(mask, mode="linear_ramp", pad_width=overlap_pixels, end_values=0)
|
40 |
-
|
41 |
-
if "l" in remove_borders:
|
42 |
-
mask = mask[:, overlap_pixels : mask.shape[1]]
|
43 |
-
if "r" in remove_borders:
|
44 |
-
mask = mask[:, 0 : mask.shape[1] - overlap_pixels]
|
45 |
-
if "t" in remove_borders:
|
46 |
-
mask = mask[overlap_pixels : mask.shape[0], :]
|
47 |
-
if "b" in remove_borders:
|
48 |
-
mask = mask[0 : mask.shape[0] - overlap_pixels, :]
|
49 |
-
return mask
|
50 |
-
|
51 |
-
|
52 |
-
def clamp(n, smallest, largest):
|
53 |
-
return max(smallest, min(n, largest))
|
54 |
-
|
55 |
-
|
56 |
-
def clamp_rect(rect: [int], min: [int], max: [int]):
|
57 |
-
return (
|
58 |
-
clamp(rect[0], min[0], max[0]),
|
59 |
-
clamp(rect[1], min[1], max[1]),
|
60 |
-
clamp(rect[2], min[0], max[0]),
|
61 |
-
clamp(rect[3], min[1], max[1]),
|
62 |
-
)
|
63 |
-
|
64 |
-
|
65 |
-
def add_overlap_rect(rect: [int], overlap: int, image_size: [int]):
|
66 |
-
rect = list(rect)
|
67 |
-
rect[0] -= overlap
|
68 |
-
rect[1] -= overlap
|
69 |
-
rect[2] += overlap
|
70 |
-
rect[3] += overlap
|
71 |
-
rect = clamp_rect(rect, [0, 0], [image_size[0], image_size[1]])
|
72 |
-
return rect
|
73 |
-
|
74 |
-
|
75 |
-
def squeeze_tile(tile, original_image, original_slice, slice_x):
|
76 |
-
result = Image.new("RGB", (tile.size[0] + original_slice, tile.size[1]))
|
77 |
-
result.paste(
|
78 |
-
original_image.resize((tile.size[0], tile.size[1]), Image.BICUBIC).crop(
|
79 |
-
(slice_x, 0, slice_x + original_slice, tile.size[1])
|
80 |
-
),
|
81 |
-
(0, 0),
|
82 |
-
)
|
83 |
-
result.paste(tile, (original_slice, 0))
|
84 |
-
return result
|
85 |
-
|
86 |
-
|
87 |
-
def unsqueeze_tile(tile, original_image_slice):
|
88 |
-
crop_rect = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
|
89 |
-
tile = tile.crop(crop_rect)
|
90 |
-
return tile
|
91 |
-
|
92 |
-
|
93 |
-
def next_divisible(n, d):
|
94 |
-
divisor = n % d
|
95 |
-
return n - divisor
|
96 |
-
|
97 |
-
|
98 |
-
class StableDiffusionTiledUpscalePipeline(StableDiffusionUpscalePipeline):
|
99 |
-
r"""
|
100 |
-
Pipeline for tile-based text-guided image super-resolution using Stable Diffusion 2, trading memory for compute
|
101 |
-
to create gigantic images.
|
102 |
-
|
103 |
-
This model inherits from [`StableDiffusionUpscalePipeline`]. Check the superclass documentation for the generic methods the
|
104 |
-
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
105 |
-
|
106 |
-
Args:
|
107 |
-
vae ([`AutoencoderKL`]):
|
108 |
-
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
109 |
-
text_encoder ([`CLIPTextModel`]):
|
110 |
-
Frozen text-encoder. Stable Diffusion uses the text portion of
|
111 |
-
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
|
112 |
-
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
113 |
-
tokenizer (`CLIPTokenizer`):
|
114 |
-
Tokenizer of class
|
115 |
-
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
116 |
-
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
117 |
-
low_res_scheduler ([`SchedulerMixin`]):
|
118 |
-
A scheduler used to add initial noise to the low res conditioning image. It must be an instance of
|
119 |
-
[`DDPMScheduler`].
|
120 |
-
scheduler ([`SchedulerMixin`]):
|
121 |
-
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
122 |
-
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
123 |
-
"""
|
124 |
-
|
125 |
-
def __init__(
|
126 |
-
self,
|
127 |
-
vae: AutoencoderKL,
|
128 |
-
text_encoder: CLIPTextModel,
|
129 |
-
tokenizer: CLIPTokenizer,
|
130 |
-
unet: UNet2DConditionModel,
|
131 |
-
low_res_scheduler: DDPMScheduler,
|
132 |
-
scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
|
133 |
-
max_noise_level: int = 350,
|
134 |
-
):
|
135 |
-
super().__init__(
|
136 |
-
vae=vae,
|
137 |
-
text_encoder=text_encoder,
|
138 |
-
tokenizer=tokenizer,
|
139 |
-
unet=unet,
|
140 |
-
low_res_scheduler=low_res_scheduler,
|
141 |
-
scheduler=scheduler,
|
142 |
-
max_noise_level=max_noise_level,
|
143 |
-
)
|
144 |
-
|
145 |
-
def _process_tile(self, original_image_slice, x, y, tile_size, tile_border, image, final_image, **kwargs):
|
146 |
-
torch.manual_seed(0)
|
147 |
-
crop_rect = (
|
148 |
-
min(image.size[0] - (tile_size + original_image_slice), x * tile_size),
|
149 |
-
min(image.size[1] - (tile_size + original_image_slice), y * tile_size),
|
150 |
-
min(image.size[0], (x + 1) * tile_size),
|
151 |
-
min(image.size[1], (y + 1) * tile_size),
|
152 |
-
)
|
153 |
-
crop_rect_with_overlap = add_overlap_rect(crop_rect, tile_border, image.size)
|
154 |
-
tile = image.crop(crop_rect_with_overlap)
|
155 |
-
translated_slice_x = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
|
156 |
-
translated_slice_x = translated_slice_x - (original_image_slice / 2)
|
157 |
-
translated_slice_x = max(0, translated_slice_x)
|
158 |
-
to_input = squeeze_tile(tile, image, original_image_slice, translated_slice_x)
|
159 |
-
orig_input_size = to_input.size
|
160 |
-
to_input = to_input.resize((tile_size, tile_size), Image.BICUBIC)
|
161 |
-
upscaled_tile = super(StableDiffusionTiledUpscalePipeline, self).__call__(image=to_input, **kwargs).images[0]
|
162 |
-
upscaled_tile = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4), Image.BICUBIC)
|
163 |
-
upscaled_tile = unsqueeze_tile(upscaled_tile, original_image_slice)
|
164 |
-
upscaled_tile = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4), Image.BICUBIC)
|
165 |
-
remove_borders = []
|
166 |
-
if x == 0:
|
167 |
-
remove_borders.append("l")
|
168 |
-
elif crop_rect[2] == image.size[0]:
|
169 |
-
remove_borders.append("r")
|
170 |
-
if y == 0:
|
171 |
-
remove_borders.append("t")
|
172 |
-
elif crop_rect[3] == image.size[1]:
|
173 |
-
remove_borders.append("b")
|
174 |
-
transparency_mask = Image.fromarray(
|
175 |
-
make_transparency_mask(
|
176 |
-
(upscaled_tile.size[0], upscaled_tile.size[1]), tile_border * 4, remove_borders=remove_borders
|
177 |
-
),
|
178 |
-
mode="L",
|
179 |
-
)
|
180 |
-
final_image.paste(
|
181 |
-
upscaled_tile, (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4), transparency_mask
|
182 |
-
)
|
183 |
-
|
184 |
-
@torch.no_grad()
|
185 |
-
def __call__(
|
186 |
-
self,
|
187 |
-
prompt: Union[str, List[str]],
|
188 |
-
image: Union[PIL.Image.Image, List[PIL.Image.Image]],
|
189 |
-
num_inference_steps: int = 75,
|
190 |
-
guidance_scale: float = 9.0,
|
191 |
-
noise_level: int = 50,
|
192 |
-
negative_prompt: Optional[Union[str, List[str]]] = None,
|
193 |
-
num_images_per_prompt: Optional[int] = 1,
|
194 |
-
eta: float = 0.0,
|
195 |
-
generator: Optional[torch.Generator] = None,
|
196 |
-
latents: Optional[torch.FloatTensor] = None,
|
197 |
-
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
198 |
-
callback_steps: int = 1,
|
199 |
-
tile_size: int = 128,
|
200 |
-
tile_border: int = 32,
|
201 |
-
original_image_slice: int = 32,
|
202 |
-
):
|
203 |
-
r"""
|
204 |
-
Function invoked when calling the pipeline for generation.
|
205 |
-
|
206 |
-
Args:
|
207 |
-
prompt (`str` or `List[str]`):
|
208 |
-
The prompt or prompts to guide the image generation.
|
209 |
-
image (`PIL.Image.Image` or List[`PIL.Image.Image`] or `torch.FloatTensor`):
|
210 |
-
`Image`, or tensor representing an image batch which will be upscaled. *
|
211 |
-
num_inference_steps (`int`, *optional*, defaults to 50):
|
212 |
-
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
213 |
-
expense of slower inference.
|
214 |
-
guidance_scale (`float`, *optional*, defaults to 7.5):
|
215 |
-
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
216 |
-
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
217 |
-
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
218 |
-
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
219 |
-
usually at the expense of lower image quality.
|
220 |
-
negative_prompt (`str` or `List[str]`, *optional*):
|
221 |
-
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
222 |
-
if `guidance_scale` is less than `1`).
|
223 |
-
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
224 |
-
The number of images to generate per prompt.
|
225 |
-
eta (`float`, *optional*, defaults to 0.0):
|
226 |
-
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
227 |
-
[`schedulers.DDIMScheduler`], will be ignored for others.
|
228 |
-
generator (`torch.Generator`, *optional*):
|
229 |
-
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
|
230 |
-
deterministic.
|
231 |
-
latents (`torch.FloatTensor`, *optional*):
|
232 |
-
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
233 |
-
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
234 |
-
tensor will ge generated by sampling using the supplied random `generator`.
|
235 |
-
tile_size (`int`, *optional*):
|
236 |
-
The size of the tiles. Too big can result in an OOM-error.
|
237 |
-
tile_border (`int`, *optional*):
|
238 |
-
The number of pixels around a tile to consider (bigger means less seams, too big can lead to an OOM-error).
|
239 |
-
original_image_slice (`int`, *optional*):
|
240 |
-
The amount of pixels of the original image to calculate with the current tile (bigger means more depth
|
241 |
-
is preserved, less blur occurs in the final image, too big can lead to an OOM-error or loss in detail).
|
242 |
-
callback (`Callable`, *optional*):
|
243 |
-
A function that take a callback function with a single argument, a dict,
|
244 |
-
that contains the (partially) processed image under "image",
|
245 |
-
as well as the progress (0 to 1, where 1 is completed) under "progress".
|
246 |
-
|
247 |
-
Returns: A PIL.Image that is 4 times larger than the original input image.
|
248 |
-
|
249 |
-
"""
|
250 |
-
|
251 |
-
final_image = Image.new("RGB", (image.size[0] * 4, image.size[1] * 4))
|
252 |
-
tcx = math.ceil(image.size[0] / tile_size)
|
253 |
-
tcy = math.ceil(image.size[1] / tile_size)
|
254 |
-
total_tile_count = tcx * tcy
|
255 |
-
current_count = 0
|
256 |
-
for y in range(tcy):
|
257 |
-
for x in range(tcx):
|
258 |
-
self._process_tile(
|
259 |
-
original_image_slice,
|
260 |
-
x,
|
261 |
-
y,
|
262 |
-
tile_size,
|
263 |
-
tile_border,
|
264 |
-
image,
|
265 |
-
final_image,
|
266 |
-
prompt=prompt,
|
267 |
-
num_inference_steps=num_inference_steps,
|
268 |
-
guidance_scale=guidance_scale,
|
269 |
-
noise_level=noise_level,
|
270 |
-
negative_prompt=negative_prompt,
|
271 |
-
num_images_per_prompt=num_images_per_prompt,
|
272 |
-
eta=eta,
|
273 |
-
generator=generator,
|
274 |
-
latents=latents,
|
275 |
-
)
|
276 |
-
current_count += 1
|
277 |
-
if callback is not None:
|
278 |
-
callback({"progress": current_count / total_tile_count, "image": final_image})
|
279 |
-
return final_image
|
280 |
-
|
281 |
-
|
282 |
-
def main():
|
283 |
-
# Run a demo
|
284 |
-
model_id = "stabilityai/stable-diffusion-x4-upscaler"
|
285 |
-
pipe = StableDiffusionTiledUpscalePipeline.from_pretrained(model_id, revision="fp16", torch_dtype=torch.float16)
|
286 |
-
pipe = pipe.to("cuda")
|
287 |
-
image = Image.open("../../docs/source/imgs/diffusers_library.jpg")
|
288 |
-
|
289 |
-
def callback(obj):
|
290 |
-
print(f"progress: {obj['progress']:.4f}")
|
291 |
-
obj["image"].save("diffusers_library_progress.jpg")
|
292 |
-
|
293 |
-
final_image = pipe(image=image, prompt="Black font, white background, vector", noise_level=40, callback=callback)
|
294 |
-
final_image.save("diffusers_library.jpg")
|
295 |
-
|
296 |
-
|
297 |
-
if __name__ == "__main__":
|
298 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/audio_diffusion/__init__.py
DELETED
File without changes
|
spaces/Andy1621/uniformer_image_detection/configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py
DELETED
@@ -1,46 +0,0 @@
|
|
1 |
-
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
|
2 |
-
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
|
3 |
-
model = dict(
|
4 |
-
pretrained='open-mmlab://detectron/resnet50_gn',
|
5 |
-
backbone=dict(norm_cfg=norm_cfg),
|
6 |
-
neck=dict(norm_cfg=norm_cfg),
|
7 |
-
roi_head=dict(
|
8 |
-
bbox_head=dict(
|
9 |
-
type='Shared4Conv1FCBBoxHead',
|
10 |
-
conv_out_channels=256,
|
11 |
-
norm_cfg=norm_cfg),
|
12 |
-
mask_head=dict(norm_cfg=norm_cfg)))
|
13 |
-
img_norm_cfg = dict(
|
14 |
-
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
|
15 |
-
train_pipeline = [
|
16 |
-
dict(type='LoadImageFromFile'),
|
17 |
-
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
|
18 |
-
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
|
19 |
-
dict(type='RandomFlip', flip_ratio=0.5),
|
20 |
-
dict(type='Normalize', **img_norm_cfg),
|
21 |
-
dict(type='Pad', size_divisor=32),
|
22 |
-
dict(type='DefaultFormatBundle'),
|
23 |
-
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
|
24 |
-
]
|
25 |
-
test_pipeline = [
|
26 |
-
dict(type='LoadImageFromFile'),
|
27 |
-
dict(
|
28 |
-
type='MultiScaleFlipAug',
|
29 |
-
img_scale=(1333, 800),
|
30 |
-
flip=False,
|
31 |
-
transforms=[
|
32 |
-
dict(type='Resize', keep_ratio=True),
|
33 |
-
dict(type='RandomFlip'),
|
34 |
-
dict(type='Normalize', **img_norm_cfg),
|
35 |
-
dict(type='Pad', size_divisor=32),
|
36 |
-
dict(type='ImageToTensor', keys=['img']),
|
37 |
-
dict(type='Collect', keys=['img']),
|
38 |
-
])
|
39 |
-
]
|
40 |
-
data = dict(
|
41 |
-
train=dict(pipeline=train_pipeline),
|
42 |
-
val=dict(pipeline=test_pipeline),
|
43 |
-
test=dict(pipeline=test_pipeline))
|
44 |
-
# learning policy
|
45 |
-
lr_config = dict(step=[16, 22])
|
46 |
-
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/atss_head.py
DELETED
@@ -1,689 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
from mmcv.cnn import ConvModule, Scale, bias_init_with_prob, normal_init
|
4 |
-
from mmcv.runner import force_fp32
|
5 |
-
|
6 |
-
from mmdet.core import (anchor_inside_flags, build_assigner, build_sampler,
|
7 |
-
images_to_levels, multi_apply, multiclass_nms,
|
8 |
-
reduce_mean, unmap)
|
9 |
-
from ..builder import HEADS, build_loss
|
10 |
-
from .anchor_head import AnchorHead
|
11 |
-
|
12 |
-
EPS = 1e-12
|
13 |
-
|
14 |
-
|
15 |
-
@HEADS.register_module()
|
16 |
-
class ATSSHead(AnchorHead):
|
17 |
-
"""Bridging the Gap Between Anchor-based and Anchor-free Detection via
|
18 |
-
Adaptive Training Sample Selection.
|
19 |
-
|
20 |
-
ATSS head structure is similar with FCOS, however ATSS use anchor boxes
|
21 |
-
and assign label by Adaptive Training Sample Selection instead max-iou.
|
22 |
-
|
23 |
-
https://arxiv.org/abs/1912.02424
|
24 |
-
"""
|
25 |
-
|
26 |
-
def __init__(self,
|
27 |
-
num_classes,
|
28 |
-
in_channels,
|
29 |
-
stacked_convs=4,
|
30 |
-
conv_cfg=None,
|
31 |
-
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
|
32 |
-
loss_centerness=dict(
|
33 |
-
type='CrossEntropyLoss',
|
34 |
-
use_sigmoid=True,
|
35 |
-
loss_weight=1.0),
|
36 |
-
**kwargs):
|
37 |
-
self.stacked_convs = stacked_convs
|
38 |
-
self.conv_cfg = conv_cfg
|
39 |
-
self.norm_cfg = norm_cfg
|
40 |
-
super(ATSSHead, self).__init__(num_classes, in_channels, **kwargs)
|
41 |
-
|
42 |
-
self.sampling = False
|
43 |
-
if self.train_cfg:
|
44 |
-
self.assigner = build_assigner(self.train_cfg.assigner)
|
45 |
-
# SSD sampling=False so use PseudoSampler
|
46 |
-
sampler_cfg = dict(type='PseudoSampler')
|
47 |
-
self.sampler = build_sampler(sampler_cfg, context=self)
|
48 |
-
self.loss_centerness = build_loss(loss_centerness)
|
49 |
-
|
50 |
-
def _init_layers(self):
|
51 |
-
"""Initialize layers of the head."""
|
52 |
-
self.relu = nn.ReLU(inplace=True)
|
53 |
-
self.cls_convs = nn.ModuleList()
|
54 |
-
self.reg_convs = nn.ModuleList()
|
55 |
-
for i in range(self.stacked_convs):
|
56 |
-
chn = self.in_channels if i == 0 else self.feat_channels
|
57 |
-
self.cls_convs.append(
|
58 |
-
ConvModule(
|
59 |
-
chn,
|
60 |
-
self.feat_channels,
|
61 |
-
3,
|
62 |
-
stride=1,
|
63 |
-
padding=1,
|
64 |
-
conv_cfg=self.conv_cfg,
|
65 |
-
norm_cfg=self.norm_cfg))
|
66 |
-
self.reg_convs.append(
|
67 |
-
ConvModule(
|
68 |
-
chn,
|
69 |
-
self.feat_channels,
|
70 |
-
3,
|
71 |
-
stride=1,
|
72 |
-
padding=1,
|
73 |
-
conv_cfg=self.conv_cfg,
|
74 |
-
norm_cfg=self.norm_cfg))
|
75 |
-
self.atss_cls = nn.Conv2d(
|
76 |
-
self.feat_channels,
|
77 |
-
self.num_anchors * self.cls_out_channels,
|
78 |
-
3,
|
79 |
-
padding=1)
|
80 |
-
self.atss_reg = nn.Conv2d(
|
81 |
-
self.feat_channels, self.num_anchors * 4, 3, padding=1)
|
82 |
-
self.atss_centerness = nn.Conv2d(
|
83 |
-
self.feat_channels, self.num_anchors * 1, 3, padding=1)
|
84 |
-
self.scales = nn.ModuleList(
|
85 |
-
[Scale(1.0) for _ in self.anchor_generator.strides])
|
86 |
-
|
87 |
-
def init_weights(self):
|
88 |
-
"""Initialize weights of the head."""
|
89 |
-
for m in self.cls_convs:
|
90 |
-
normal_init(m.conv, std=0.01)
|
91 |
-
for m in self.reg_convs:
|
92 |
-
normal_init(m.conv, std=0.01)
|
93 |
-
bias_cls = bias_init_with_prob(0.01)
|
94 |
-
normal_init(self.atss_cls, std=0.01, bias=bias_cls)
|
95 |
-
normal_init(self.atss_reg, std=0.01)
|
96 |
-
normal_init(self.atss_centerness, std=0.01)
|
97 |
-
|
98 |
-
def forward(self, feats):
|
99 |
-
"""Forward features from the upstream network.
|
100 |
-
|
101 |
-
Args:
|
102 |
-
feats (tuple[Tensor]): Features from the upstream network, each is
|
103 |
-
a 4D-tensor.
|
104 |
-
|
105 |
-
Returns:
|
106 |
-
tuple: Usually a tuple of classification scores and bbox prediction
|
107 |
-
cls_scores (list[Tensor]): Classification scores for all scale
|
108 |
-
levels, each is a 4D-tensor, the channels number is
|
109 |
-
num_anchors * num_classes.
|
110 |
-
bbox_preds (list[Tensor]): Box energies / deltas for all scale
|
111 |
-
levels, each is a 4D-tensor, the channels number is
|
112 |
-
num_anchors * 4.
|
113 |
-
"""
|
114 |
-
return multi_apply(self.forward_single, feats, self.scales)
|
115 |
-
|
116 |
-
def forward_single(self, x, scale):
|
117 |
-
"""Forward feature of a single scale level.
|
118 |
-
|
119 |
-
Args:
|
120 |
-
x (Tensor): Features of a single scale level.
|
121 |
-
scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize
|
122 |
-
the bbox prediction.
|
123 |
-
|
124 |
-
Returns:
|
125 |
-
tuple:
|
126 |
-
cls_score (Tensor): Cls scores for a single scale level
|
127 |
-
the channels number is num_anchors * num_classes.
|
128 |
-
bbox_pred (Tensor): Box energies / deltas for a single scale
|
129 |
-
level, the channels number is num_anchors * 4.
|
130 |
-
centerness (Tensor): Centerness for a single scale level, the
|
131 |
-
channel number is (N, num_anchors * 1, H, W).
|
132 |
-
"""
|
133 |
-
cls_feat = x
|
134 |
-
reg_feat = x
|
135 |
-
for cls_conv in self.cls_convs:
|
136 |
-
cls_feat = cls_conv(cls_feat)
|
137 |
-
for reg_conv in self.reg_convs:
|
138 |
-
reg_feat = reg_conv(reg_feat)
|
139 |
-
cls_score = self.atss_cls(cls_feat)
|
140 |
-
# we just follow atss, not apply exp in bbox_pred
|
141 |
-
bbox_pred = scale(self.atss_reg(reg_feat)).float()
|
142 |
-
centerness = self.atss_centerness(reg_feat)
|
143 |
-
return cls_score, bbox_pred, centerness
|
144 |
-
|
145 |
-
def loss_single(self, anchors, cls_score, bbox_pred, centerness, labels,
|
146 |
-
label_weights, bbox_targets, num_total_samples):
|
147 |
-
"""Compute loss of a single scale level.
|
148 |
-
|
149 |
-
Args:
|
150 |
-
cls_score (Tensor): Box scores for each scale level
|
151 |
-
Has shape (N, num_anchors * num_classes, H, W).
|
152 |
-
bbox_pred (Tensor): Box energies / deltas for each scale
|
153 |
-
level with shape (N, num_anchors * 4, H, W).
|
154 |
-
anchors (Tensor): Box reference for each scale level with shape
|
155 |
-
(N, num_total_anchors, 4).
|
156 |
-
labels (Tensor): Labels of each anchors with shape
|
157 |
-
(N, num_total_anchors).
|
158 |
-
label_weights (Tensor): Label weights of each anchor with shape
|
159 |
-
(N, num_total_anchors)
|
160 |
-
bbox_targets (Tensor): BBox regression targets of each anchor wight
|
161 |
-
shape (N, num_total_anchors, 4).
|
162 |
-
num_total_samples (int): Number os positive samples that is
|
163 |
-
reduced over all GPUs.
|
164 |
-
|
165 |
-
Returns:
|
166 |
-
dict[str, Tensor]: A dictionary of loss components.
|
167 |
-
"""
|
168 |
-
|
169 |
-
anchors = anchors.reshape(-1, 4)
|
170 |
-
cls_score = cls_score.permute(0, 2, 3, 1).reshape(
|
171 |
-
-1, self.cls_out_channels).contiguous()
|
172 |
-
bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)
|
173 |
-
centerness = centerness.permute(0, 2, 3, 1).reshape(-1)
|
174 |
-
bbox_targets = bbox_targets.reshape(-1, 4)
|
175 |
-
labels = labels.reshape(-1)
|
176 |
-
label_weights = label_weights.reshape(-1)
|
177 |
-
|
178 |
-
# classification loss
|
179 |
-
loss_cls = self.loss_cls(
|
180 |
-
cls_score, labels, label_weights, avg_factor=num_total_samples)
|
181 |
-
|
182 |
-
# FG cat_id: [0, num_classes -1], BG cat_id: num_classes
|
183 |
-
bg_class_ind = self.num_classes
|
184 |
-
pos_inds = ((labels >= 0)
|
185 |
-
& (labels < bg_class_ind)).nonzero().squeeze(1)
|
186 |
-
|
187 |
-
if len(pos_inds) > 0:
|
188 |
-
pos_bbox_targets = bbox_targets[pos_inds]
|
189 |
-
pos_bbox_pred = bbox_pred[pos_inds]
|
190 |
-
pos_anchors = anchors[pos_inds]
|
191 |
-
pos_centerness = centerness[pos_inds]
|
192 |
-
|
193 |
-
centerness_targets = self.centerness_target(
|
194 |
-
pos_anchors, pos_bbox_targets)
|
195 |
-
pos_decode_bbox_pred = self.bbox_coder.decode(
|
196 |
-
pos_anchors, pos_bbox_pred)
|
197 |
-
pos_decode_bbox_targets = self.bbox_coder.decode(
|
198 |
-
pos_anchors, pos_bbox_targets)
|
199 |
-
|
200 |
-
# regression loss
|
201 |
-
loss_bbox = self.loss_bbox(
|
202 |
-
pos_decode_bbox_pred,
|
203 |
-
pos_decode_bbox_targets,
|
204 |
-
weight=centerness_targets,
|
205 |
-
avg_factor=1.0)
|
206 |
-
|
207 |
-
# centerness loss
|
208 |
-
loss_centerness = self.loss_centerness(
|
209 |
-
pos_centerness,
|
210 |
-
centerness_targets,
|
211 |
-
avg_factor=num_total_samples)
|
212 |
-
|
213 |
-
else:
|
214 |
-
loss_bbox = bbox_pred.sum() * 0
|
215 |
-
loss_centerness = centerness.sum() * 0
|
216 |
-
centerness_targets = bbox_targets.new_tensor(0.)
|
217 |
-
|
218 |
-
return loss_cls, loss_bbox, loss_centerness, centerness_targets.sum()
|
219 |
-
|
220 |
-
@force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))
|
221 |
-
def loss(self,
|
222 |
-
cls_scores,
|
223 |
-
bbox_preds,
|
224 |
-
centernesses,
|
225 |
-
gt_bboxes,
|
226 |
-
gt_labels,
|
227 |
-
img_metas,
|
228 |
-
gt_bboxes_ignore=None):
|
229 |
-
"""Compute losses of the head.
|
230 |
-
|
231 |
-
Args:
|
232 |
-
cls_scores (list[Tensor]): Box scores for each scale level
|
233 |
-
Has shape (N, num_anchors * num_classes, H, W)
|
234 |
-
bbox_preds (list[Tensor]): Box energies / deltas for each scale
|
235 |
-
level with shape (N, num_anchors * 4, H, W)
|
236 |
-
centernesses (list[Tensor]): Centerness for each scale
|
237 |
-
level with shape (N, num_anchors * 1, H, W)
|
238 |
-
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
|
239 |
-
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
|
240 |
-
gt_labels (list[Tensor]): class indices corresponding to each box
|
241 |
-
img_metas (list[dict]): Meta information of each image, e.g.,
|
242 |
-
image size, scaling factor, etc.
|
243 |
-
gt_bboxes_ignore (list[Tensor] | None): specify which bounding
|
244 |
-
boxes can be ignored when computing the loss.
|
245 |
-
|
246 |
-
Returns:
|
247 |
-
dict[str, Tensor]: A dictionary of loss components.
|
248 |
-
"""
|
249 |
-
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
|
250 |
-
assert len(featmap_sizes) == self.anchor_generator.num_levels
|
251 |
-
|
252 |
-
device = cls_scores[0].device
|
253 |
-
anchor_list, valid_flag_list = self.get_anchors(
|
254 |
-
featmap_sizes, img_metas, device=device)
|
255 |
-
label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
|
256 |
-
|
257 |
-
cls_reg_targets = self.get_targets(
|
258 |
-
anchor_list,
|
259 |
-
valid_flag_list,
|
260 |
-
gt_bboxes,
|
261 |
-
img_metas,
|
262 |
-
gt_bboxes_ignore_list=gt_bboxes_ignore,
|
263 |
-
gt_labels_list=gt_labels,
|
264 |
-
label_channels=label_channels)
|
265 |
-
if cls_reg_targets is None:
|
266 |
-
return None
|
267 |
-
|
268 |
-
(anchor_list, labels_list, label_weights_list, bbox_targets_list,
|
269 |
-
bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets
|
270 |
-
|
271 |
-
num_total_samples = reduce_mean(
|
272 |
-
torch.tensor(num_total_pos, dtype=torch.float,
|
273 |
-
device=device)).item()
|
274 |
-
num_total_samples = max(num_total_samples, 1.0)
|
275 |
-
|
276 |
-
losses_cls, losses_bbox, loss_centerness,\
|
277 |
-
bbox_avg_factor = multi_apply(
|
278 |
-
self.loss_single,
|
279 |
-
anchor_list,
|
280 |
-
cls_scores,
|
281 |
-
bbox_preds,
|
282 |
-
centernesses,
|
283 |
-
labels_list,
|
284 |
-
label_weights_list,
|
285 |
-
bbox_targets_list,
|
286 |
-
num_total_samples=num_total_samples)
|
287 |
-
|
288 |
-
bbox_avg_factor = sum(bbox_avg_factor)
|
289 |
-
bbox_avg_factor = reduce_mean(bbox_avg_factor).item()
|
290 |
-
if bbox_avg_factor < EPS:
|
291 |
-
bbox_avg_factor = 1
|
292 |
-
losses_bbox = list(map(lambda x: x / bbox_avg_factor, losses_bbox))
|
293 |
-
return dict(
|
294 |
-
loss_cls=losses_cls,
|
295 |
-
loss_bbox=losses_bbox,
|
296 |
-
loss_centerness=loss_centerness)
|
297 |
-
|
298 |
-
def centerness_target(self, anchors, bbox_targets):
|
299 |
-
# only calculate pos centerness targets, otherwise there may be nan
|
300 |
-
gts = self.bbox_coder.decode(anchors, bbox_targets)
|
301 |
-
anchors_cx = (anchors[:, 2] + anchors[:, 0]) / 2
|
302 |
-
anchors_cy = (anchors[:, 3] + anchors[:, 1]) / 2
|
303 |
-
l_ = anchors_cx - gts[:, 0]
|
304 |
-
t_ = anchors_cy - gts[:, 1]
|
305 |
-
r_ = gts[:, 2] - anchors_cx
|
306 |
-
b_ = gts[:, 3] - anchors_cy
|
307 |
-
|
308 |
-
left_right = torch.stack([l_, r_], dim=1)
|
309 |
-
top_bottom = torch.stack([t_, b_], dim=1)
|
310 |
-
centerness = torch.sqrt(
|
311 |
-
(left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) *
|
312 |
-
(top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0]))
|
313 |
-
assert not torch.isnan(centerness).any()
|
314 |
-
return centerness
|
315 |
-
|
316 |
-
@force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))
|
317 |
-
def get_bboxes(self,
|
318 |
-
cls_scores,
|
319 |
-
bbox_preds,
|
320 |
-
centernesses,
|
321 |
-
img_metas,
|
322 |
-
cfg=None,
|
323 |
-
rescale=False,
|
324 |
-
with_nms=True):
|
325 |
-
"""Transform network output for a batch into bbox predictions.
|
326 |
-
|
327 |
-
Args:
|
328 |
-
cls_scores (list[Tensor]): Box scores for each scale level
|
329 |
-
with shape (N, num_anchors * num_classes, H, W).
|
330 |
-
bbox_preds (list[Tensor]): Box energies / deltas for each scale
|
331 |
-
level with shape (N, num_anchors * 4, H, W).
|
332 |
-
centernesses (list[Tensor]): Centerness for each scale level with
|
333 |
-
shape (N, num_anchors * 1, H, W).
|
334 |
-
img_metas (list[dict]): Meta information of each image, e.g.,
|
335 |
-
image size, scaling factor, etc.
|
336 |
-
cfg (mmcv.Config | None): Test / postprocessing configuration,
|
337 |
-
if None, test_cfg would be used. Default: None.
|
338 |
-
rescale (bool): If True, return boxes in original image space.
|
339 |
-
Default: False.
|
340 |
-
with_nms (bool): If True, do nms before return boxes.
|
341 |
-
Default: True.
|
342 |
-
|
343 |
-
Returns:
|
344 |
-
list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
|
345 |
-
The first item is an (n, 5) tensor, where 5 represent
|
346 |
-
(tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1.
|
347 |
-
The shape of the second tensor in the tuple is (n,), and
|
348 |
-
each element represents the class label of the corresponding
|
349 |
-
box.
|
350 |
-
"""
|
351 |
-
cfg = self.test_cfg if cfg is None else cfg
|
352 |
-
assert len(cls_scores) == len(bbox_preds)
|
353 |
-
num_levels = len(cls_scores)
|
354 |
-
device = cls_scores[0].device
|
355 |
-
featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)]
|
356 |
-
mlvl_anchors = self.anchor_generator.grid_anchors(
|
357 |
-
featmap_sizes, device=device)
|
358 |
-
|
359 |
-
cls_score_list = [cls_scores[i].detach() for i in range(num_levels)]
|
360 |
-
bbox_pred_list = [bbox_preds[i].detach() for i in range(num_levels)]
|
361 |
-
centerness_pred_list = [
|
362 |
-
centernesses[i].detach() for i in range(num_levels)
|
363 |
-
]
|
364 |
-
img_shapes = [
|
365 |
-
img_metas[i]['img_shape'] for i in range(cls_scores[0].shape[0])
|
366 |
-
]
|
367 |
-
scale_factors = [
|
368 |
-
img_metas[i]['scale_factor'] for i in range(cls_scores[0].shape[0])
|
369 |
-
]
|
370 |
-
result_list = self._get_bboxes(cls_score_list, bbox_pred_list,
|
371 |
-
centerness_pred_list, mlvl_anchors,
|
372 |
-
img_shapes, scale_factors, cfg, rescale,
|
373 |
-
with_nms)
|
374 |
-
return result_list
|
375 |
-
|
376 |
-
def _get_bboxes(self,
|
377 |
-
cls_scores,
|
378 |
-
bbox_preds,
|
379 |
-
centernesses,
|
380 |
-
mlvl_anchors,
|
381 |
-
img_shapes,
|
382 |
-
scale_factors,
|
383 |
-
cfg,
|
384 |
-
rescale=False,
|
385 |
-
with_nms=True):
|
386 |
-
"""Transform outputs for a single batch item into labeled boxes.
|
387 |
-
|
388 |
-
Args:
|
389 |
-
cls_scores (list[Tensor]): Box scores for a single scale level
|
390 |
-
with shape (N, num_anchors * num_classes, H, W).
|
391 |
-
bbox_preds (list[Tensor]): Box energies / deltas for a single
|
392 |
-
scale level with shape (N, num_anchors * 4, H, W).
|
393 |
-
centernesses (list[Tensor]): Centerness for a single scale level
|
394 |
-
with shape (N, num_anchors * 1, H, W).
|
395 |
-
mlvl_anchors (list[Tensor]): Box reference for a single scale level
|
396 |
-
with shape (num_total_anchors, 4).
|
397 |
-
img_shapes (list[tuple[int]]): Shape of the input image,
|
398 |
-
list[(height, width, 3)].
|
399 |
-
scale_factors (list[ndarray]): Scale factor of the image arrange as
|
400 |
-
(w_scale, h_scale, w_scale, h_scale).
|
401 |
-
cfg (mmcv.Config | None): Test / postprocessing configuration,
|
402 |
-
if None, test_cfg would be used.
|
403 |
-
rescale (bool): If True, return boxes in original image space.
|
404 |
-
Default: False.
|
405 |
-
with_nms (bool): If True, do nms before return boxes.
|
406 |
-
Default: True.
|
407 |
-
|
408 |
-
Returns:
|
409 |
-
list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
|
410 |
-
The first item is an (n, 5) tensor, where 5 represent
|
411 |
-
(tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1.
|
412 |
-
The shape of the second tensor in the tuple is (n,), and
|
413 |
-
each element represents the class label of the corresponding
|
414 |
-
box.
|
415 |
-
"""
|
416 |
-
assert len(cls_scores) == len(bbox_preds) == len(mlvl_anchors)
|
417 |
-
device = cls_scores[0].device
|
418 |
-
batch_size = cls_scores[0].shape[0]
|
419 |
-
# convert to tensor to keep tracing
|
420 |
-
nms_pre_tensor = torch.tensor(
|
421 |
-
cfg.get('nms_pre', -1), device=device, dtype=torch.long)
|
422 |
-
mlvl_bboxes = []
|
423 |
-
mlvl_scores = []
|
424 |
-
mlvl_centerness = []
|
425 |
-
for cls_score, bbox_pred, centerness, anchors in zip(
|
426 |
-
cls_scores, bbox_preds, centernesses, mlvl_anchors):
|
427 |
-
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
|
428 |
-
scores = cls_score.permute(0, 2, 3, 1).reshape(
|
429 |
-
batch_size, -1, self.cls_out_channels).sigmoid()
|
430 |
-
centerness = centerness.permute(0, 2, 3,
|
431 |
-
1).reshape(batch_size,
|
432 |
-
-1).sigmoid()
|
433 |
-
bbox_pred = bbox_pred.permute(0, 2, 3,
|
434 |
-
1).reshape(batch_size, -1, 4)
|
435 |
-
|
436 |
-
# Always keep topk op for dynamic input in onnx
|
437 |
-
if nms_pre_tensor > 0 and (torch.onnx.is_in_onnx_export()
|
438 |
-
or scores.shape[-2] > nms_pre_tensor):
|
439 |
-
from torch import _shape_as_tensor
|
440 |
-
# keep shape as tensor and get k
|
441 |
-
num_anchor = _shape_as_tensor(scores)[-2].to(device)
|
442 |
-
nms_pre = torch.where(nms_pre_tensor < num_anchor,
|
443 |
-
nms_pre_tensor, num_anchor)
|
444 |
-
|
445 |
-
max_scores, _ = (scores * centerness[..., None]).max(-1)
|
446 |
-
_, topk_inds = max_scores.topk(nms_pre)
|
447 |
-
anchors = anchors[topk_inds, :]
|
448 |
-
batch_inds = torch.arange(batch_size).view(
|
449 |
-
-1, 1).expand_as(topk_inds).long()
|
450 |
-
bbox_pred = bbox_pred[batch_inds, topk_inds, :]
|
451 |
-
scores = scores[batch_inds, topk_inds, :]
|
452 |
-
centerness = centerness[batch_inds, topk_inds]
|
453 |
-
else:
|
454 |
-
anchors = anchors.expand_as(bbox_pred)
|
455 |
-
|
456 |
-
bboxes = self.bbox_coder.decode(
|
457 |
-
anchors, bbox_pred, max_shape=img_shapes)
|
458 |
-
mlvl_bboxes.append(bboxes)
|
459 |
-
mlvl_scores.append(scores)
|
460 |
-
mlvl_centerness.append(centerness)
|
461 |
-
|
462 |
-
batch_mlvl_bboxes = torch.cat(mlvl_bboxes, dim=1)
|
463 |
-
if rescale:
|
464 |
-
batch_mlvl_bboxes /= batch_mlvl_bboxes.new_tensor(
|
465 |
-
scale_factors).unsqueeze(1)
|
466 |
-
batch_mlvl_scores = torch.cat(mlvl_scores, dim=1)
|
467 |
-
batch_mlvl_centerness = torch.cat(mlvl_centerness, dim=1)
|
468 |
-
|
469 |
-
# Set max number of box to be feed into nms in deployment
|
470 |
-
deploy_nms_pre = cfg.get('deploy_nms_pre', -1)
|
471 |
-
if deploy_nms_pre > 0 and torch.onnx.is_in_onnx_export():
|
472 |
-
batch_mlvl_scores, _ = (
|
473 |
-
batch_mlvl_scores *
|
474 |
-
batch_mlvl_centerness.unsqueeze(2).expand_as(batch_mlvl_scores)
|
475 |
-
).max(-1)
|
476 |
-
_, topk_inds = batch_mlvl_scores.topk(deploy_nms_pre)
|
477 |
-
batch_inds = torch.arange(batch_size).view(-1,
|
478 |
-
1).expand_as(topk_inds)
|
479 |
-
batch_mlvl_scores = batch_mlvl_scores[batch_inds, topk_inds, :]
|
480 |
-
batch_mlvl_bboxes = batch_mlvl_bboxes[batch_inds, topk_inds, :]
|
481 |
-
batch_mlvl_centerness = batch_mlvl_centerness[batch_inds,
|
482 |
-
topk_inds]
|
483 |
-
# remind that we set FG labels to [0, num_class-1] since mmdet v2.0
|
484 |
-
# BG cat_id: num_class
|
485 |
-
padding = batch_mlvl_scores.new_zeros(batch_size,
|
486 |
-
batch_mlvl_scores.shape[1], 1)
|
487 |
-
batch_mlvl_scores = torch.cat([batch_mlvl_scores, padding], dim=-1)
|
488 |
-
|
489 |
-
if with_nms:
|
490 |
-
det_results = []
|
491 |
-
for (mlvl_bboxes, mlvl_scores,
|
492 |
-
mlvl_centerness) in zip(batch_mlvl_bboxes, batch_mlvl_scores,
|
493 |
-
batch_mlvl_centerness):
|
494 |
-
det_bbox, det_label = multiclass_nms(
|
495 |
-
mlvl_bboxes,
|
496 |
-
mlvl_scores,
|
497 |
-
cfg.score_thr,
|
498 |
-
cfg.nms,
|
499 |
-
cfg.max_per_img,
|
500 |
-
score_factors=mlvl_centerness)
|
501 |
-
det_results.append(tuple([det_bbox, det_label]))
|
502 |
-
else:
|
503 |
-
det_results = [
|
504 |
-
tuple(mlvl_bs)
|
505 |
-
for mlvl_bs in zip(batch_mlvl_bboxes, batch_mlvl_scores,
|
506 |
-
batch_mlvl_centerness)
|
507 |
-
]
|
508 |
-
return det_results
|
509 |
-
|
510 |
-
def get_targets(self,
|
511 |
-
anchor_list,
|
512 |
-
valid_flag_list,
|
513 |
-
gt_bboxes_list,
|
514 |
-
img_metas,
|
515 |
-
gt_bboxes_ignore_list=None,
|
516 |
-
gt_labels_list=None,
|
517 |
-
label_channels=1,
|
518 |
-
unmap_outputs=True):
|
519 |
-
"""Get targets for ATSS head.
|
520 |
-
|
521 |
-
This method is almost the same as `AnchorHead.get_targets()`. Besides
|
522 |
-
returning the targets as the parent method does, it also returns the
|
523 |
-
anchors as the first element of the returned tuple.
|
524 |
-
"""
|
525 |
-
num_imgs = len(img_metas)
|
526 |
-
assert len(anchor_list) == len(valid_flag_list) == num_imgs
|
527 |
-
|
528 |
-
# anchor number of multi levels
|
529 |
-
num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
|
530 |
-
num_level_anchors_list = [num_level_anchors] * num_imgs
|
531 |
-
|
532 |
-
# concat all level anchors and flags to a single tensor
|
533 |
-
for i in range(num_imgs):
|
534 |
-
assert len(anchor_list[i]) == len(valid_flag_list[i])
|
535 |
-
anchor_list[i] = torch.cat(anchor_list[i])
|
536 |
-
valid_flag_list[i] = torch.cat(valid_flag_list[i])
|
537 |
-
|
538 |
-
# compute targets for each image
|
539 |
-
if gt_bboxes_ignore_list is None:
|
540 |
-
gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
|
541 |
-
if gt_labels_list is None:
|
542 |
-
gt_labels_list = [None for _ in range(num_imgs)]
|
543 |
-
(all_anchors, all_labels, all_label_weights, all_bbox_targets,
|
544 |
-
all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply(
|
545 |
-
self._get_target_single,
|
546 |
-
anchor_list,
|
547 |
-
valid_flag_list,
|
548 |
-
num_level_anchors_list,
|
549 |
-
gt_bboxes_list,
|
550 |
-
gt_bboxes_ignore_list,
|
551 |
-
gt_labels_list,
|
552 |
-
img_metas,
|
553 |
-
label_channels=label_channels,
|
554 |
-
unmap_outputs=unmap_outputs)
|
555 |
-
# no valid anchors
|
556 |
-
if any([labels is None for labels in all_labels]):
|
557 |
-
return None
|
558 |
-
# sampled anchors of all images
|
559 |
-
num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
|
560 |
-
num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
|
561 |
-
# split targets to a list w.r.t. multiple levels
|
562 |
-
anchors_list = images_to_levels(all_anchors, num_level_anchors)
|
563 |
-
labels_list = images_to_levels(all_labels, num_level_anchors)
|
564 |
-
label_weights_list = images_to_levels(all_label_weights,
|
565 |
-
num_level_anchors)
|
566 |
-
bbox_targets_list = images_to_levels(all_bbox_targets,
|
567 |
-
num_level_anchors)
|
568 |
-
bbox_weights_list = images_to_levels(all_bbox_weights,
|
569 |
-
num_level_anchors)
|
570 |
-
return (anchors_list, labels_list, label_weights_list,
|
571 |
-
bbox_targets_list, bbox_weights_list, num_total_pos,
|
572 |
-
num_total_neg)
|
573 |
-
|
574 |
-
def _get_target_single(self,
|
575 |
-
flat_anchors,
|
576 |
-
valid_flags,
|
577 |
-
num_level_anchors,
|
578 |
-
gt_bboxes,
|
579 |
-
gt_bboxes_ignore,
|
580 |
-
gt_labels,
|
581 |
-
img_meta,
|
582 |
-
label_channels=1,
|
583 |
-
unmap_outputs=True):
|
584 |
-
"""Compute regression, classification targets for anchors in a single
|
585 |
-
image.
|
586 |
-
|
587 |
-
Args:
|
588 |
-
flat_anchors (Tensor): Multi-level anchors of the image, which are
|
589 |
-
concatenated into a single tensor of shape (num_anchors ,4)
|
590 |
-
valid_flags (Tensor): Multi level valid flags of the image,
|
591 |
-
which are concatenated into a single tensor of
|
592 |
-
shape (num_anchors,).
|
593 |
-
num_level_anchors Tensor): Number of anchors of each scale level.
|
594 |
-
gt_bboxes (Tensor): Ground truth bboxes of the image,
|
595 |
-
shape (num_gts, 4).
|
596 |
-
gt_bboxes_ignore (Tensor): Ground truth bboxes to be
|
597 |
-
ignored, shape (num_ignored_gts, 4).
|
598 |
-
gt_labels (Tensor): Ground truth labels of each box,
|
599 |
-
shape (num_gts,).
|
600 |
-
img_meta (dict): Meta info of the image.
|
601 |
-
label_channels (int): Channel of label.
|
602 |
-
unmap_outputs (bool): Whether to map outputs back to the original
|
603 |
-
set of anchors.
|
604 |
-
|
605 |
-
Returns:
|
606 |
-
tuple: N is the number of total anchors in the image.
|
607 |
-
labels (Tensor): Labels of all anchors in the image with shape
|
608 |
-
(N,).
|
609 |
-
label_weights (Tensor): Label weights of all anchor in the
|
610 |
-
image with shape (N,).
|
611 |
-
bbox_targets (Tensor): BBox targets of all anchors in the
|
612 |
-
image with shape (N, 4).
|
613 |
-
bbox_weights (Tensor): BBox weights of all anchors in the
|
614 |
-
image with shape (N, 4)
|
615 |
-
pos_inds (Tensor): Indices of positive anchor with shape
|
616 |
-
(num_pos,).
|
617 |
-
neg_inds (Tensor): Indices of negative anchor with shape
|
618 |
-
(num_neg,).
|
619 |
-
"""
|
620 |
-
inside_flags = anchor_inside_flags(flat_anchors, valid_flags,
|
621 |
-
img_meta['img_shape'][:2],
|
622 |
-
self.train_cfg.allowed_border)
|
623 |
-
if not inside_flags.any():
|
624 |
-
return (None, ) * 7
|
625 |
-
# assign gt and sample anchors
|
626 |
-
anchors = flat_anchors[inside_flags, :]
|
627 |
-
|
628 |
-
num_level_anchors_inside = self.get_num_level_anchors_inside(
|
629 |
-
num_level_anchors, inside_flags)
|
630 |
-
assign_result = self.assigner.assign(anchors, num_level_anchors_inside,
|
631 |
-
gt_bboxes, gt_bboxes_ignore,
|
632 |
-
gt_labels)
|
633 |
-
|
634 |
-
sampling_result = self.sampler.sample(assign_result, anchors,
|
635 |
-
gt_bboxes)
|
636 |
-
|
637 |
-
num_valid_anchors = anchors.shape[0]
|
638 |
-
bbox_targets = torch.zeros_like(anchors)
|
639 |
-
bbox_weights = torch.zeros_like(anchors)
|
640 |
-
labels = anchors.new_full((num_valid_anchors, ),
|
641 |
-
self.num_classes,
|
642 |
-
dtype=torch.long)
|
643 |
-
label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)
|
644 |
-
|
645 |
-
pos_inds = sampling_result.pos_inds
|
646 |
-
neg_inds = sampling_result.neg_inds
|
647 |
-
if len(pos_inds) > 0:
|
648 |
-
if hasattr(self, 'bbox_coder'):
|
649 |
-
pos_bbox_targets = self.bbox_coder.encode(
|
650 |
-
sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes)
|
651 |
-
else:
|
652 |
-
# used in VFNetHead
|
653 |
-
pos_bbox_targets = sampling_result.pos_gt_bboxes
|
654 |
-
bbox_targets[pos_inds, :] = pos_bbox_targets
|
655 |
-
bbox_weights[pos_inds, :] = 1.0
|
656 |
-
if gt_labels is None:
|
657 |
-
# Only rpn gives gt_labels as None
|
658 |
-
# Foreground is the first class since v2.5.0
|
659 |
-
labels[pos_inds] = 0
|
660 |
-
else:
|
661 |
-
labels[pos_inds] = gt_labels[
|
662 |
-
sampling_result.pos_assigned_gt_inds]
|
663 |
-
if self.train_cfg.pos_weight <= 0:
|
664 |
-
label_weights[pos_inds] = 1.0
|
665 |
-
else:
|
666 |
-
label_weights[pos_inds] = self.train_cfg.pos_weight
|
667 |
-
if len(neg_inds) > 0:
|
668 |
-
label_weights[neg_inds] = 1.0
|
669 |
-
|
670 |
-
# map up to original set of anchors
|
671 |
-
if unmap_outputs:
|
672 |
-
num_total_anchors = flat_anchors.size(0)
|
673 |
-
anchors = unmap(anchors, num_total_anchors, inside_flags)
|
674 |
-
labels = unmap(
|
675 |
-
labels, num_total_anchors, inside_flags, fill=self.num_classes)
|
676 |
-
label_weights = unmap(label_weights, num_total_anchors,
|
677 |
-
inside_flags)
|
678 |
-
bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)
|
679 |
-
bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)
|
680 |
-
|
681 |
-
return (anchors, labels, label_weights, bbox_targets, bbox_weights,
|
682 |
-
pos_inds, neg_inds)
|
683 |
-
|
684 |
-
def get_num_level_anchors_inside(self, num_level_anchors, inside_flags):
|
685 |
-
split_inside_flags = torch.split(inside_flags, num_level_anchors)
|
686 |
-
num_level_anchors_inside = [
|
687 |
-
int(flags.sum()) for flags in split_inside_flags
|
688 |
-
]
|
689 |
-
return num_level_anchors_inside
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/point_rend_roi_head.py
DELETED
@@ -1,218 +0,0 @@
|
|
1 |
-
# Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend # noqa
|
2 |
-
|
3 |
-
import torch
|
4 |
-
import torch.nn.functional as F
|
5 |
-
from mmcv.ops import point_sample, rel_roi_point_to_rel_img_point
|
6 |
-
|
7 |
-
from mmdet.core import bbox2roi, bbox_mapping, merge_aug_masks
|
8 |
-
from .. import builder
|
9 |
-
from ..builder import HEADS
|
10 |
-
from .standard_roi_head import StandardRoIHead
|
11 |
-
|
12 |
-
|
13 |
-
@HEADS.register_module()
|
14 |
-
class PointRendRoIHead(StandardRoIHead):
|
15 |
-
"""`PointRend <https://arxiv.org/abs/1912.08193>`_."""
|
16 |
-
|
17 |
-
def __init__(self, point_head, *args, **kwargs):
|
18 |
-
super().__init__(*args, **kwargs)
|
19 |
-
assert self.with_bbox and self.with_mask
|
20 |
-
self.init_point_head(point_head)
|
21 |
-
|
22 |
-
def init_point_head(self, point_head):
|
23 |
-
"""Initialize ``point_head``"""
|
24 |
-
self.point_head = builder.build_head(point_head)
|
25 |
-
|
26 |
-
def init_weights(self, pretrained):
|
27 |
-
"""Initialize the weights in head.
|
28 |
-
|
29 |
-
Args:
|
30 |
-
pretrained (str, optional): Path to pre-trained weights.
|
31 |
-
"""
|
32 |
-
super().init_weights(pretrained)
|
33 |
-
self.point_head.init_weights()
|
34 |
-
|
35 |
-
def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks,
|
36 |
-
img_metas):
|
37 |
-
"""Run forward function and calculate loss for mask head and point head
|
38 |
-
in training."""
|
39 |
-
mask_results = super()._mask_forward_train(x, sampling_results,
|
40 |
-
bbox_feats, gt_masks,
|
41 |
-
img_metas)
|
42 |
-
if mask_results['loss_mask'] is not None:
|
43 |
-
loss_point = self._mask_point_forward_train(
|
44 |
-
x, sampling_results, mask_results['mask_pred'], gt_masks,
|
45 |
-
img_metas)
|
46 |
-
mask_results['loss_mask'].update(loss_point)
|
47 |
-
|
48 |
-
return mask_results
|
49 |
-
|
50 |
-
def _mask_point_forward_train(self, x, sampling_results, mask_pred,
|
51 |
-
gt_masks, img_metas):
|
52 |
-
"""Run forward function and calculate loss for point head in
|
53 |
-
training."""
|
54 |
-
pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
|
55 |
-
rel_roi_points = self.point_head.get_roi_rel_points_train(
|
56 |
-
mask_pred, pos_labels, cfg=self.train_cfg)
|
57 |
-
rois = bbox2roi([res.pos_bboxes for res in sampling_results])
|
58 |
-
|
59 |
-
fine_grained_point_feats = self._get_fine_grained_point_feats(
|
60 |
-
x, rois, rel_roi_points, img_metas)
|
61 |
-
coarse_point_feats = point_sample(mask_pred, rel_roi_points)
|
62 |
-
mask_point_pred = self.point_head(fine_grained_point_feats,
|
63 |
-
coarse_point_feats)
|
64 |
-
mask_point_target = self.point_head.get_targets(
|
65 |
-
rois, rel_roi_points, sampling_results, gt_masks, self.train_cfg)
|
66 |
-
loss_mask_point = self.point_head.loss(mask_point_pred,
|
67 |
-
mask_point_target, pos_labels)
|
68 |
-
|
69 |
-
return loss_mask_point
|
70 |
-
|
71 |
-
def _get_fine_grained_point_feats(self, x, rois, rel_roi_points,
|
72 |
-
img_metas):
|
73 |
-
"""Sample fine grained feats from each level feature map and
|
74 |
-
concatenate them together."""
|
75 |
-
num_imgs = len(img_metas)
|
76 |
-
fine_grained_feats = []
|
77 |
-
for idx in range(self.mask_roi_extractor.num_inputs):
|
78 |
-
feats = x[idx]
|
79 |
-
spatial_scale = 1. / float(
|
80 |
-
self.mask_roi_extractor.featmap_strides[idx])
|
81 |
-
point_feats = []
|
82 |
-
for batch_ind in range(num_imgs):
|
83 |
-
# unravel batch dim
|
84 |
-
feat = feats[batch_ind].unsqueeze(0)
|
85 |
-
inds = (rois[:, 0].long() == batch_ind)
|
86 |
-
if inds.any():
|
87 |
-
rel_img_points = rel_roi_point_to_rel_img_point(
|
88 |
-
rois[inds], rel_roi_points[inds], feat.shape[2:],
|
89 |
-
spatial_scale).unsqueeze(0)
|
90 |
-
point_feat = point_sample(feat, rel_img_points)
|
91 |
-
point_feat = point_feat.squeeze(0).transpose(0, 1)
|
92 |
-
point_feats.append(point_feat)
|
93 |
-
fine_grained_feats.append(torch.cat(point_feats, dim=0))
|
94 |
-
return torch.cat(fine_grained_feats, dim=1)
|
95 |
-
|
96 |
-
def _mask_point_forward_test(self, x, rois, label_pred, mask_pred,
|
97 |
-
img_metas):
|
98 |
-
"""Mask refining process with point head in testing."""
|
99 |
-
refined_mask_pred = mask_pred.clone()
|
100 |
-
for subdivision_step in range(self.test_cfg.subdivision_steps):
|
101 |
-
refined_mask_pred = F.interpolate(
|
102 |
-
refined_mask_pred,
|
103 |
-
scale_factor=self.test_cfg.scale_factor,
|
104 |
-
mode='bilinear',
|
105 |
-
align_corners=False)
|
106 |
-
# If `subdivision_num_points` is larger or equal to the
|
107 |
-
# resolution of the next step, then we can skip this step
|
108 |
-
num_rois, channels, mask_height, mask_width = \
|
109 |
-
refined_mask_pred.shape
|
110 |
-
if (self.test_cfg.subdivision_num_points >=
|
111 |
-
self.test_cfg.scale_factor**2 * mask_height * mask_width
|
112 |
-
and
|
113 |
-
subdivision_step < self.test_cfg.subdivision_steps - 1):
|
114 |
-
continue
|
115 |
-
point_indices, rel_roi_points = \
|
116 |
-
self.point_head.get_roi_rel_points_test(
|
117 |
-
refined_mask_pred, label_pred, cfg=self.test_cfg)
|
118 |
-
fine_grained_point_feats = self._get_fine_grained_point_feats(
|
119 |
-
x, rois, rel_roi_points, img_metas)
|
120 |
-
coarse_point_feats = point_sample(mask_pred, rel_roi_points)
|
121 |
-
mask_point_pred = self.point_head(fine_grained_point_feats,
|
122 |
-
coarse_point_feats)
|
123 |
-
|
124 |
-
point_indices = point_indices.unsqueeze(1).expand(-1, channels, -1)
|
125 |
-
refined_mask_pred = refined_mask_pred.reshape(
|
126 |
-
num_rois, channels, mask_height * mask_width)
|
127 |
-
refined_mask_pred = refined_mask_pred.scatter_(
|
128 |
-
2, point_indices, mask_point_pred)
|
129 |
-
refined_mask_pred = refined_mask_pred.view(num_rois, channels,
|
130 |
-
mask_height, mask_width)
|
131 |
-
|
132 |
-
return refined_mask_pred
|
133 |
-
|
134 |
-
def simple_test_mask(self,
|
135 |
-
x,
|
136 |
-
img_metas,
|
137 |
-
det_bboxes,
|
138 |
-
det_labels,
|
139 |
-
rescale=False):
|
140 |
-
"""Obtain mask prediction without augmentation."""
|
141 |
-
ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)
|
142 |
-
scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
|
143 |
-
num_imgs = len(det_bboxes)
|
144 |
-
if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):
|
145 |
-
segm_results = [[[] for _ in range(self.mask_head.num_classes)]
|
146 |
-
for _ in range(num_imgs)]
|
147 |
-
else:
|
148 |
-
# if det_bboxes is rescaled to the original image size, we need to
|
149 |
-
# rescale it back to the testing scale to obtain RoIs.
|
150 |
-
if rescale and not isinstance(scale_factors[0], float):
|
151 |
-
scale_factors = [
|
152 |
-
torch.from_numpy(scale_factor).to(det_bboxes[0].device)
|
153 |
-
for scale_factor in scale_factors
|
154 |
-
]
|
155 |
-
_bboxes = [
|
156 |
-
det_bboxes[i][:, :4] *
|
157 |
-
scale_factors[i] if rescale else det_bboxes[i][:, :4]
|
158 |
-
for i in range(len(det_bboxes))
|
159 |
-
]
|
160 |
-
mask_rois = bbox2roi(_bboxes)
|
161 |
-
mask_results = self._mask_forward(x, mask_rois)
|
162 |
-
# split batch mask prediction back to each image
|
163 |
-
mask_pred = mask_results['mask_pred']
|
164 |
-
num_mask_roi_per_img = [len(det_bbox) for det_bbox in det_bboxes]
|
165 |
-
mask_preds = mask_pred.split(num_mask_roi_per_img, 0)
|
166 |
-
mask_rois = mask_rois.split(num_mask_roi_per_img, 0)
|
167 |
-
|
168 |
-
# apply mask post-processing to each image individually
|
169 |
-
segm_results = []
|
170 |
-
for i in range(num_imgs):
|
171 |
-
if det_bboxes[i].shape[0] == 0:
|
172 |
-
segm_results.append(
|
173 |
-
[[] for _ in range(self.mask_head.num_classes)])
|
174 |
-
else:
|
175 |
-
x_i = [xx[[i]] for xx in x]
|
176 |
-
mask_rois_i = mask_rois[i]
|
177 |
-
mask_rois_i[:, 0] = 0 # TODO: remove this hack
|
178 |
-
mask_pred_i = self._mask_point_forward_test(
|
179 |
-
x_i, mask_rois_i, det_labels[i], mask_preds[i],
|
180 |
-
[img_metas])
|
181 |
-
segm_result = self.mask_head.get_seg_masks(
|
182 |
-
mask_pred_i, _bboxes[i], det_labels[i], self.test_cfg,
|
183 |
-
ori_shapes[i], scale_factors[i], rescale)
|
184 |
-
segm_results.append(segm_result)
|
185 |
-
return segm_results
|
186 |
-
|
187 |
-
def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels):
|
188 |
-
"""Test for mask head with test time augmentation."""
|
189 |
-
if det_bboxes.shape[0] == 0:
|
190 |
-
segm_result = [[] for _ in range(self.mask_head.num_classes)]
|
191 |
-
else:
|
192 |
-
aug_masks = []
|
193 |
-
for x, img_meta in zip(feats, img_metas):
|
194 |
-
img_shape = img_meta[0]['img_shape']
|
195 |
-
scale_factor = img_meta[0]['scale_factor']
|
196 |
-
flip = img_meta[0]['flip']
|
197 |
-
_bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,
|
198 |
-
scale_factor, flip)
|
199 |
-
mask_rois = bbox2roi([_bboxes])
|
200 |
-
mask_results = self._mask_forward(x, mask_rois)
|
201 |
-
mask_results['mask_pred'] = self._mask_point_forward_test(
|
202 |
-
x, mask_rois, det_labels, mask_results['mask_pred'],
|
203 |
-
img_metas)
|
204 |
-
# convert to numpy array to save memory
|
205 |
-
aug_masks.append(
|
206 |
-
mask_results['mask_pred'].sigmoid().cpu().numpy())
|
207 |
-
merged_masks = merge_aug_masks(aug_masks, img_metas, self.test_cfg)
|
208 |
-
|
209 |
-
ori_shape = img_metas[0][0]['ori_shape']
|
210 |
-
segm_result = self.mask_head.get_seg_masks(
|
211 |
-
merged_masks,
|
212 |
-
det_bboxes,
|
213 |
-
det_labels,
|
214 |
-
self.test_cfg,
|
215 |
-
ori_shape,
|
216 |
-
scale_factor=1.0,
|
217 |
-
rescale=False)
|
218 |
-
return segm_result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r18-d8_512x1024_80k_cityscapes.py
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
_base_ = './deeplabv3_r50-d8_512x1024_80k_cityscapes.py'
|
2 |
-
model = dict(
|
3 |
-
pretrained='open-mmlab://resnet18_v1c',
|
4 |
-
backbone=dict(depth=18),
|
5 |
-
decode_head=dict(
|
6 |
-
in_channels=512,
|
7 |
-
channels=128,
|
8 |
-
),
|
9 |
-
auxiliary_head=dict(in_channels=256, channels=64))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_40k_cityscapes.py
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/deeplabv3plus_r50-d8.py',
|
3 |
-
'../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py',
|
4 |
-
'../_base_/schedules/schedule_40k.py'
|
5 |
-
]
|
6 |
-
model = dict(
|
7 |
-
decode_head=dict(align_corners=True),
|
8 |
-
auxiliary_head=dict(align_corners=True),
|
9 |
-
test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513)))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnimalEquality/chatbot/_proc/_docs/lchain_tool.html
DELETED
@@ -1,937 +0,0 @@
|
|
1 |
-
<!DOCTYPE html>
|
2 |
-
<html xmlns="http://www.w3.org/1999/xhtml" lang="en" xml:lang="en"><head>
|
3 |
-
|
4 |
-
<meta charset="utf-8">
|
5 |
-
<meta name="generator" content="quarto-1.3.361">
|
6 |
-
|
7 |
-
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes">
|
8 |
-
|
9 |
-
<meta name="description" content="Exploring Langchain Tool capabilities">
|
10 |
-
|
11 |
-
<title>lv-recipe-chatbot - lchain_tool</title>
|
12 |
-
<style>
|
13 |
-
code{white-space: pre-wrap;}
|
14 |
-
span.smallcaps{font-variant: small-caps;}
|
15 |
-
div.columns{display: flex; gap: min(4vw, 1.5em);}
|
16 |
-
div.column{flex: auto; overflow-x: auto;}
|
17 |
-
div.hanging-indent{margin-left: 1.5em; text-indent: -1.5em;}
|
18 |
-
ul.task-list{list-style: none;}
|
19 |
-
ul.task-list li input[type="checkbox"] {
|
20 |
-
width: 0.8em;
|
21 |
-
margin: 0 0.8em 0.2em -1em; /* quarto-specific, see https://github.com/quarto-dev/quarto-cli/issues/4556 */
|
22 |
-
vertical-align: middle;
|
23 |
-
}
|
24 |
-
/* CSS for syntax highlighting */
|
25 |
-
pre > code.sourceCode { white-space: pre; position: relative; }
|
26 |
-
pre > code.sourceCode > span { display: inline-block; line-height: 1.25; }
|
27 |
-
pre > code.sourceCode > span:empty { height: 1.2em; }
|
28 |
-
.sourceCode { overflow: visible; }
|
29 |
-
code.sourceCode > span { color: inherit; text-decoration: inherit; }
|
30 |
-
div.sourceCode { margin: 1em 0; }
|
31 |
-
pre.sourceCode { margin: 0; }
|
32 |
-
@media screen {
|
33 |
-
div.sourceCode { overflow: auto; }
|
34 |
-
}
|
35 |
-
@media print {
|
36 |
-
pre > code.sourceCode { white-space: pre-wrap; }
|
37 |
-
pre > code.sourceCode > span { text-indent: -5em; padding-left: 5em; }
|
38 |
-
}
|
39 |
-
pre.numberSource code
|
40 |
-
{ counter-reset: source-line 0; }
|
41 |
-
pre.numberSource code > span
|
42 |
-
{ position: relative; left: -4em; counter-increment: source-line; }
|
43 |
-
pre.numberSource code > span > a:first-child::before
|
44 |
-
{ content: counter(source-line);
|
45 |
-
position: relative; left: -1em; text-align: right; vertical-align: baseline;
|
46 |
-
border: none; display: inline-block;
|
47 |
-
-webkit-touch-callout: none; -webkit-user-select: none;
|
48 |
-
-khtml-user-select: none; -moz-user-select: none;
|
49 |
-
-ms-user-select: none; user-select: none;
|
50 |
-
padding: 0 4px; width: 4em;
|
51 |
-
}
|
52 |
-
pre.numberSource { margin-left: 3em; padding-left: 4px; }
|
53 |
-
div.sourceCode
|
54 |
-
{ }
|
55 |
-
@media screen {
|
56 |
-
pre > code.sourceCode > span > a:first-child::before { text-decoration: underline; }
|
57 |
-
}
|
58 |
-
</style>
|
59 |
-
|
60 |
-
|
61 |
-
<script src="site_libs/quarto-nav/quarto-nav.js"></script>
|
62 |
-
<script src="site_libs/quarto-nav/headroom.min.js"></script>
|
63 |
-
<script src="site_libs/clipboard/clipboard.min.js"></script>
|
64 |
-
<script src="site_libs/quarto-search/autocomplete.umd.js"></script>
|
65 |
-
<script src="site_libs/quarto-search/fuse.min.js"></script>
|
66 |
-
<script src="site_libs/quarto-search/quarto-search.js"></script>
|
67 |
-
<meta name="quarto:offset" content="./">
|
68 |
-
<script src="site_libs/quarto-html/quarto.js"></script>
|
69 |
-
<script src="site_libs/quarto-html/popper.min.js"></script>
|
70 |
-
<script src="site_libs/quarto-html/tippy.umd.min.js"></script>
|
71 |
-
<script src="site_libs/quarto-html/anchor.min.js"></script>
|
72 |
-
<link href="site_libs/quarto-html/tippy.css" rel="stylesheet">
|
73 |
-
<link href="site_libs/quarto-html/quarto-syntax-highlighting.css" rel="stylesheet" id="quarto-text-highlighting-styles">
|
74 |
-
<script src="site_libs/bootstrap/bootstrap.min.js"></script>
|
75 |
-
<link href="site_libs/bootstrap/bootstrap-icons.css" rel="stylesheet">
|
76 |
-
<link href="site_libs/bootstrap/bootstrap.min.css" rel="stylesheet" id="quarto-bootstrap" data-mode="light">
|
77 |
-
<script id="quarto-search-options" type="application/json">{
|
78 |
-
"location": "navbar",
|
79 |
-
"copy-button": false,
|
80 |
-
"collapse-after": 3,
|
81 |
-
"panel-placement": "end",
|
82 |
-
"type": "overlay",
|
83 |
-
"limit": 20,
|
84 |
-
"language": {
|
85 |
-
"search-no-results-text": "No results",
|
86 |
-
"search-matching-documents-text": "matching documents",
|
87 |
-
"search-copy-link-title": "Copy link to search",
|
88 |
-
"search-hide-matches-text": "Hide additional matches",
|
89 |
-
"search-more-match-text": "more match in this document",
|
90 |
-
"search-more-matches-text": "more matches in this document",
|
91 |
-
"search-clear-button-title": "Clear",
|
92 |
-
"search-detached-cancel-button-title": "Cancel",
|
93 |
-
"search-submit-button-title": "Submit",
|
94 |
-
"search-label": "Search"
|
95 |
-
}
|
96 |
-
}</script>
|
97 |
-
<script src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.6/require.min.js" integrity="sha512-c3Nl8+7g4LMSTdrm621y7kf9v3SDPnhxLNhcjFJbKECVnmZHTdo+IRO05sNLTH/D3vA6u1X32ehoLC7WFVdheg==" crossorigin="anonymous"></script>
|
98 |
-
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.5.1/jquery.min.js" integrity="sha512-bLT0Qm9VnAYZDflyKcBaQ2gg0hSYNQrJ8RilYldYQ1FxQYoCLtUjuuRuZo+fjqhx/qtq/1itJ0C2ejDxltZVFg==" crossorigin="anonymous"></script>
|
99 |
-
<script type="application/javascript">define('jquery', [],function() {return window.jQuery;})</script>
|
100 |
-
|
101 |
-
|
102 |
-
<link rel="stylesheet" href="styles.css">
|
103 |
-
<meta property="og:title" content="lv-recipe-chatbot - lchain_tool">
|
104 |
-
<meta property="og:description" content="Exploring Langchain Tool capabilities">
|
105 |
-
<meta property="og:image" content="https://serpapi.com/searches/6480db18c56d93170a8e715f/images/6f34b4708ae4dd36a28ca4ca4a3abf6af168f575eef7bd2e8f81a12e175fcf53.jpeg">
|
106 |
-
<meta property="og:site-name" content="lv-recipe-chatbot">
|
107 |
-
<meta name="twitter:title" content="lv-recipe-chatbot - lchain_tool">
|
108 |
-
<meta name="twitter:description" content="Exploring Langchain Tool capabilities">
|
109 |
-
<meta name="twitter:image" content="https://serpapi.com/searches/6480db18c56d93170a8e715f/images/6f34b4708ae4dd36a28ca4ca4a3abf6af168f575eef7bd2e8f81a12e175fcf53.jpeg">
|
110 |
-
<meta name="twitter:card" content="summary_large_image">
|
111 |
-
</head>
|
112 |
-
|
113 |
-
<body class="nav-sidebar floating nav-fixed">
|
114 |
-
|
115 |
-
<div id="quarto-search-results"></div>
|
116 |
-
<header id="quarto-header" class="headroom fixed-top">
|
117 |
-
<nav class="navbar navbar-expand-lg navbar-dark ">
|
118 |
-
<div class="navbar-container container-fluid">
|
119 |
-
<div class="navbar-brand-container">
|
120 |
-
<a class="navbar-brand" href="./index.html">
|
121 |
-
<span class="navbar-title">lv-recipe-chatbot</span>
|
122 |
-
</a>
|
123 |
-
</div>
|
124 |
-
<div class="quarto-navbar-tools ms-auto">
|
125 |
-
</div>
|
126 |
-
<div id="quarto-search" class="" title="Search"></div>
|
127 |
-
</div> <!-- /container-fluid -->
|
128 |
-
</nav>
|
129 |
-
<nav class="quarto-secondary-nav">
|
130 |
-
<div class="container-fluid d-flex">
|
131 |
-
<button type="button" class="quarto-btn-toggle btn" data-bs-toggle="collapse" data-bs-target="#quarto-sidebar,#quarto-sidebar-glass" aria-controls="quarto-sidebar" aria-expanded="false" aria-label="Toggle sidebar navigation" onclick="if (window.quartoToggleHeadroom) { window.quartoToggleHeadroom(); }">
|
132 |
-
<i class="bi bi-layout-text-sidebar-reverse"></i>
|
133 |
-
</button>
|
134 |
-
<nav class="quarto-page-breadcrumbs" aria-label="breadcrumb"><ol class="breadcrumb"><li class="breadcrumb-item"><a href="./lchain_tool.html">lchain_tool</a></li></ol></nav>
|
135 |
-
<a class="flex-grow-1" role="button" data-bs-toggle="collapse" data-bs-target="#quarto-sidebar,#quarto-sidebar-glass" aria-controls="quarto-sidebar" aria-expanded="false" aria-label="Toggle sidebar navigation" onclick="if (window.quartoToggleHeadroom) { window.quartoToggleHeadroom(); }">
|
136 |
-
</a>
|
137 |
-
</div>
|
138 |
-
</nav>
|
139 |
-
</header>
|
140 |
-
<!-- content -->
|
141 |
-
<div id="quarto-content" class="quarto-container page-columns page-rows-contents page-layout-article page-navbar">
|
142 |
-
<!-- sidebar -->
|
143 |
-
<nav id="quarto-sidebar" class="sidebar collapse collapse-horizontal sidebar-navigation floating overflow-auto">
|
144 |
-
<div class="sidebar-menu-container">
|
145 |
-
<ul class="list-unstyled mt-1">
|
146 |
-
<li class="sidebar-item">
|
147 |
-
<div class="sidebar-item-container">
|
148 |
-
<a href="./index.html" class="sidebar-item-text sidebar-link">
|
149 |
-
<span class="menu-text">lv-recipe-chatbot</span></a>
|
150 |
-
</div>
|
151 |
-
</li>
|
152 |
-
<li class="sidebar-item">
|
153 |
-
<div class="sidebar-item-container">
|
154 |
-
<a href="./engineer_prompt.html" class="sidebar-item-text sidebar-link">
|
155 |
-
<span class="menu-text">engineer_prompt</span></a>
|
156 |
-
</div>
|
157 |
-
</li>
|
158 |
-
<li class="sidebar-item">
|
159 |
-
<div class="sidebar-item-container">
|
160 |
-
<a href="./app.html" class="sidebar-item-text sidebar-link">
|
161 |
-
<span class="menu-text">app</span></a>
|
162 |
-
</div>
|
163 |
-
</li>
|
164 |
-
<li class="sidebar-item">
|
165 |
-
<div class="sidebar-item-container">
|
166 |
-
<a href="./lchain_tool.html" class="sidebar-item-text sidebar-link active">
|
167 |
-
<span class="menu-text">lchain_tool</span></a>
|
168 |
-
</div>
|
169 |
-
</li>
|
170 |
-
<li class="sidebar-item">
|
171 |
-
<div class="sidebar-item-container">
|
172 |
-
<a href="./ingredient_vision.html" class="sidebar-item-text sidebar-link">
|
173 |
-
<span class="menu-text">ingredient_vision</span></a>
|
174 |
-
</div>
|
175 |
-
</li>
|
176 |
-
<li class="sidebar-item">
|
177 |
-
<div class="sidebar-item-container">
|
178 |
-
<a href="./edamam_api.html" class="sidebar-item-text sidebar-link">
|
179 |
-
<span class="menu-text">edamam_api</span></a>
|
180 |
-
</div>
|
181 |
-
</li>
|
182 |
-
</ul>
|
183 |
-
</div>
|
184 |
-
</nav>
|
185 |
-
<div id="quarto-sidebar-glass" data-bs-toggle="collapse" data-bs-target="#quarto-sidebar,#quarto-sidebar-glass"></div>
|
186 |
-
<!-- margin-sidebar -->
|
187 |
-
<div id="quarto-margin-sidebar" class="sidebar margin-sidebar">
|
188 |
-
<nav id="TOC" role="doc-toc" class="toc-active">
|
189 |
-
<h2 id="toc-title">On this page</h2>
|
190 |
-
|
191 |
-
<ul>
|
192 |
-
<li><a href="#serpapiwrapper" id="toc-serpapiwrapper" class="nav-link active" data-scroll-target="#serpapiwrapper">SerpAPIWrapper</a></li>
|
193 |
-
<li><a href="#recipeserpapiwrapper" id="toc-recipeserpapiwrapper" class="nav-link" data-scroll-target="#recipeserpapiwrapper">RecipeSerpAPIWrapper</a></li>
|
194 |
-
<li><a href="#load_tools" id="toc-load_tools" class="nav-link" data-scroll-target="#load_tools">load_tools</a></li>
|
195 |
-
</ul>
|
196 |
-
<div class="toc-actions"><div><i class="bi bi-git"></i></div><div class="action-links"><p><a href="https://gitlab.com/animalequality/lv-recipe-chatbot/issues/new" class="toc-action">Report an issue</a></p></div></div></nav>
|
197 |
-
</div>
|
198 |
-
<!-- main -->
|
199 |
-
<main class="content" id="quarto-document-content">
|
200 |
-
|
201 |
-
<header id="title-block-header" class="quarto-title-block default">
|
202 |
-
<div class="quarto-title">
|
203 |
-
<h1 class="title">lchain_tool</h1>
|
204 |
-
</div>
|
205 |
-
|
206 |
-
<div>
|
207 |
-
<div class="description">
|
208 |
-
Exploring Langchain Tool capabilities
|
209 |
-
</div>
|
210 |
-
</div>
|
211 |
-
|
212 |
-
|
213 |
-
<div class="quarto-title-meta">
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
</div>
|
219 |
-
|
220 |
-
|
221 |
-
</header>
|
222 |
-
|
223 |
-
<!-- WARNING: THIS FILE WAS AUTOGENERATED! DO NOT EDIT! -->
|
224 |
-
<div class="cell">
|
225 |
-
<div class="sourceCode cell-code" id="cb1"><pre class="sourceCode python code-with-copy"><code class="sourceCode python"><span id="cb1-1"><a href="#cb1-1" aria-hidden="true" tabindex="-1"></a><span class="im">from</span> dotenv <span class="im">import</span> load_dotenv</span></code><button title="Copy to Clipboard" class="code-copy-button"><i class="bi"></i></button></pre></div>
|
226 |
-
</div>
|
227 |
-
<div class="cell">
|
228 |
-
<div class="sourceCode cell-code" id="cb2"><pre class="sourceCode python code-with-copy"><code class="sourceCode python"><span id="cb2-1"><a href="#cb2-1" aria-hidden="true" tabindex="-1"></a>load_dotenv()</span></code><button title="Copy to Clipboard" class="code-copy-button"><i class="bi"></i></button></pre></div>
|
229 |
-
<div class="cell-output cell-output-display">
|
230 |
-
<pre><code>True</code></pre>
|
231 |
-
</div>
|
232 |
-
</div>
|
233 |
-
<div class="cell">
|
234 |
-
<div class="sourceCode cell-code" id="cb4"><pre class="sourceCode python code-with-copy"><code class="sourceCode python"><span id="cb4-1"><a href="#cb4-1" aria-hidden="true" tabindex="-1"></a>llm <span class="op">=</span> ChatOpenAI(temperature<span class="op">=</span><span class="dv">0</span>)</span></code><button title="Copy to Clipboard" class="code-copy-button"><i class="bi"></i></button></pre></div>
|
235 |
-
</div>
|
236 |
-
<div class="cell">
|
237 |
-
<div class="sourceCode cell-code" id="cb5"><pre class="sourceCode python code-with-copy"><code class="sourceCode python"><span id="cb5-1"><a href="#cb5-1" aria-hidden="true" tabindex="-1"></a>tools <span class="op">=</span> load_tools([<span class="st">"llm-math"</span>], llm<span class="op">=</span>llm)</span>
|
238 |
-
<span id="cb5-2"><a href="#cb5-2" aria-hidden="true" tabindex="-1"></a>agent <span class="op">=</span> initialize_agent(</span>
|
239 |
-
<span id="cb5-3"><a href="#cb5-3" aria-hidden="true" tabindex="-1"></a> tools,</span>
|
240 |
-
<span id="cb5-4"><a href="#cb5-4" aria-hidden="true" tabindex="-1"></a> llm,</span>
|
241 |
-
<span id="cb5-5"><a href="#cb5-5" aria-hidden="true" tabindex="-1"></a> agent<span class="op">=</span>AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION,</span>
|
242 |
-
<span id="cb5-6"><a href="#cb5-6" aria-hidden="true" tabindex="-1"></a> handle_parsing_errors<span class="op">=</span><span class="va">True</span>,</span>
|
243 |
-
<span id="cb5-7"><a href="#cb5-7" aria-hidden="true" tabindex="-1"></a> verbose<span class="op">=</span><span class="va">True</span>,</span>
|
244 |
-
<span id="cb5-8"><a href="#cb5-8" aria-hidden="true" tabindex="-1"></a>)</span></code><button title="Copy to Clipboard" class="code-copy-button"><i class="bi"></i></button></pre></div>
|
245 |
-
</div>
|
246 |
-
<div class="cell">
|
247 |
-
<div class="sourceCode cell-code" id="cb6"><pre class="sourceCode python code-with-copy"><code class="sourceCode python"><span id="cb6-1"><a href="#cb6-1" aria-hidden="true" tabindex="-1"></a>agent(<span class="st">"What is the 3</span><span class="sc">% o</span><span class="st">f of 300 * 30?"</span>)</span></code><button title="Copy to Clipboard" class="code-copy-button"><i class="bi"></i></button></pre></div>
|
248 |
-
<div class="cell-output cell-output-stdout">
|
249 |
-
<pre><code>
|
250 |
-
|
251 |
-
> Entering new AgentExecutor chain...
|
252 |
-
We can calculate this using the Calculator tool.
|
253 |
-
|
254 |
-
Action:
|
255 |
-
```
|
256 |
-
{
|
257 |
-
"action": "Calculator",
|
258 |
-
"action_input": "0.03 * 300 * 30"
|
259 |
-
}
|
260 |
-
```
|
261 |
-
|
262 |
-
|
263 |
-
Observation: Answer: 270.0
|
264 |
-
Thought:Could not parse LLM output: This is the correct answer to the question.
|
265 |
-
Observation: Invalid or incomplete response
|
266 |
-
Thought:Let me try the same action again.
|
267 |
-
|
268 |
-
Action:
|
269 |
-
```
|
270 |
-
{
|
271 |
-
"action": "Calculator",
|
272 |
-
"action_input": "0.03 * 300 * 30"
|
273 |
-
}
|
274 |
-
```
|
275 |
-
|
276 |
-
|
277 |
-
Observation: Answer: 270.0
|
278 |
-
Thought:Could not parse LLM output: The tool gave the same answer, so I can be confident that it is correct.
|
279 |
-
|
280 |
-
Observation: Invalid or incomplete response
|
281 |
-
Thought:There seems to be an issue with the LLM response. Let me try a different way to calculate the answer.
|
282 |
-
|
283 |
-
Action:
|
284 |
-
```
|
285 |
-
{
|
286 |
-
"action": "Calculator",
|
287 |
-
"action_input": "300 * 30 * 0.03"
|
288 |
-
}
|
289 |
-
```
|
290 |
-
|
291 |
-
|
292 |
-
Observation: Answer: 270.0
|
293 |
-
Thought:I have successfully calculated the answer to the question using the calculator tool.
|
294 |
-
|
295 |
-
Final Answer: 270.0
|
296 |
-
|
297 |
-
> Finished chain.</code></pre>
|
298 |
-
</div>
|
299 |
-
<div class="cell-output cell-output-display">
|
300 |
-
<pre><code>{'input': 'What is the 3% of of 300 * 30?', 'output': '270.0'}</code></pre>
|
301 |
-
</div>
|
302 |
-
</div>
|
303 |
-
<p><a href="https://python.langchain.com/en/latest/modules/agents/tools/examples/google_serper.html#searching-for-google-images">SerpAPI Google Images</a></p>
|
304 |
-
<div class="cell">
|
305 |
-
<div class="sourceCode cell-code" id="cb9"><pre class="sourceCode python code-with-copy"><code class="sourceCode python"><span id="cb9-1"><a href="#cb9-1" aria-hidden="true" tabindex="-1"></a>params <span class="op">=</span> {</span>
|
306 |
-
<span id="cb9-2"><a href="#cb9-2" aria-hidden="true" tabindex="-1"></a> <span class="st">"q"</span>: <span class="st">"Vegan pad thai recipes"</span>,</span>
|
307 |
-
<span id="cb9-3"><a href="#cb9-3" aria-hidden="true" tabindex="-1"></a> <span class="st">"location"</span>: <span class="st">"United States"</span>,</span>
|
308 |
-
<span id="cb9-4"><a href="#cb9-4" aria-hidden="true" tabindex="-1"></a> <span class="st">"hl"</span>: <span class="st">"en"</span>,</span>
|
309 |
-
<span id="cb9-5"><a href="#cb9-5" aria-hidden="true" tabindex="-1"></a> <span class="st">"gl"</span>: <span class="st">"us"</span>,</span>
|
310 |
-
<span id="cb9-6"><a href="#cb9-6" aria-hidden="true" tabindex="-1"></a> <span class="st">"api_key"</span>: os.environ[<span class="st">"SERPAPI_API_KEY"</span>],</span>
|
311 |
-
<span id="cb9-7"><a href="#cb9-7" aria-hidden="true" tabindex="-1"></a>}</span>
|
312 |
-
<span id="cb9-8"><a href="#cb9-8" aria-hidden="true" tabindex="-1"></a></span>
|
313 |
-
<span id="cb9-9"><a href="#cb9-9" aria-hidden="true" tabindex="-1"></a>search <span class="op">=</span> GoogleSearch(params)</span>
|
314 |
-
<span id="cb9-10"><a href="#cb9-10" aria-hidden="true" tabindex="-1"></a>results <span class="op">=</span> search.get_dict()</span>
|
315 |
-
<span id="cb9-11"><a href="#cb9-11" aria-hidden="true" tabindex="-1"></a>recipes_results <span class="op">=</span> results[<span class="st">"recipes_results"</span>]</span>
|
316 |
-
<span id="cb9-12"><a href="#cb9-12" aria-hidden="true" tabindex="-1"></a>recipes_results</span></code><button title="Copy to Clipboard" class="code-copy-button"><i class="bi"></i></button></pre></div>
|
317 |
-
<div class="cell-output cell-output-display">
|
318 |
-
<pre><code>[{'title': 'Easy Tofu Pad Thai',
|
319 |
-
'link': 'https://minimalistbaker.com/easy-tofu-pad-thai/',
|
320 |
-
'source': 'Minimalist Baker',
|
321 |
-
'rating': 4.9,
|
322 |
-
'reviews': 117,
|
323 |
-
'total_time': '30 min',
|
324 |
-
'ingredients': ['Pad thai rice',
|
325 |
-
'peanut sauce',
|
326 |
-
'thai red',
|
327 |
-
'soy sauce',
|
328 |
-
'bean sprouts']},
|
329 |
-
{'title': 'Vegan Pad Thai',
|
330 |
-
'link': 'https://www.noracooks.com/vegan-pad-thai/',
|
331 |
-
'source': 'Nora Cooks',
|
332 |
-
'rating': 5.0,
|
333 |
-
'reviews': 53,
|
334 |
-
'total_time': '30 min',
|
335 |
-
'ingredients': ['Stir fry rice',
|
336 |
-
'mung bean sprouts',
|
337 |
-
'soy sauce',
|
338 |
-
'maple syrup',
|
339 |
-
'sriracha hot sauce']},
|
340 |
-
{'title': 'Vegan Pad Thai',
|
341 |
-
'link': 'https://www.pickuplimes.com/recipe/speedy-vegan-pad-thai-116',
|
342 |
-
'source': 'Pick Up Limes',
|
343 |
-
'rating': 5.0,
|
344 |
-
'reviews': 34,
|
345 |
-
'total_time': '30 min',
|
346 |
-
'ingredients': ['Brown rice noodles',
|
347 |
-
'red hot',
|
348 |
-
'soy sauce',
|
349 |
-
'bean sprouts',
|
350 |
-
'sriracha hot sauce']}]</code></pre>
|
351 |
-
</div>
|
352 |
-
</div>
|
353 |
-
<hr>
|
354 |
-
<section id="serpapiwrapper" class="level3">
|
355 |
-
<h3 class="anchored" data-anchor-id="serpapiwrapper">SerpAPIWrapper</h3>
|
356 |
-
<blockquote class="blockquote">
|
357 |
-
<pre><code> SerpAPIWrapper (search_engine:Any=None, params:dict={'engine': 'google',
|
358 |
-
'google_domain': 'google.com', 'gl': 'us', 'hl': 'en'},
|
359 |
-
serpapi_api_key:Optional[str]=None,
|
360 |
-
aiosession:Optional[aiohttp.client.ClientSession]=None)</code></pre>
|
361 |
-
</blockquote>
|
362 |
-
<p>Wrapper around SerpAPI.</p>
|
363 |
-
<p>To use, you should have the <code>google-search-results</code> python package installed, and the environment variable <code>SERPAPI_API_KEY</code> set with your API key, or pass <code>serpapi_api_key</code> as a named parameter to the constructor.</p>
|
364 |
-
<p>Example: .. code-block:: python</p>
|
365 |
-
<pre><code> from langchain import SerpAPIWrapper
|
366 |
-
serpapi = SerpAPIWrapper()</code></pre>
|
367 |
-
<hr>
|
368 |
-
<p><a href="https://gitlab.com/animalequality/lv-recipe-chatbot/blob/main/lv_recipe_chatbot/lchain_tool.py#L19" target="_blank" style="float:right; font-size:smaller">source</a></p>
|
369 |
-
</section>
|
370 |
-
<section id="recipeserpapiwrapper" class="level3">
|
371 |
-
<h3 class="anchored" data-anchor-id="recipeserpapiwrapper">RecipeSerpAPIWrapper</h3>
|
372 |
-
<blockquote class="blockquote">
|
373 |
-
<pre><code> RecipeSerpAPIWrapper (search_engine:Any=None, params:dict={'engine':
|
374 |
-
'google', 'google_domain': 'google.com', 'gl':
|
375 |
-
'us', 'hl': 'en'},
|
376 |
-
serpapi_api_key:Optional[str]=None, aiosession:Opti
|
377 |
-
onal[aiohttp.client.ClientSession]=None)</code></pre>
|
378 |
-
</blockquote>
|
379 |
-
<p>Wrapper around SerpAPI.</p>
|
380 |
-
<p>To use, you should have the <code>google-search-results</code> python package installed, and the environment variable <code>SERPAPI_API_KEY</code> set with your API key, or pass <code>serpapi_api_key</code> as a named parameter to the constructor.</p>
|
381 |
-
<p>Example: .. code-block:: python</p>
|
382 |
-
<pre><code> from langchain import SerpAPIWrapper
|
383 |
-
serpapi = SerpAPIWrapper()</code></pre>
|
384 |
-
<div class="cell">
|
385 |
-
<div class="sourceCode cell-code" id="cb15"><pre class="sourceCode python code-with-copy"><code class="sourceCode python"><span id="cb15-1"><a href="#cb15-1" aria-hidden="true" tabindex="-1"></a>params <span class="op">=</span> {</span>
|
386 |
-
<span id="cb15-2"><a href="#cb15-2" aria-hidden="true" tabindex="-1"></a> <span class="st">"location"</span>: <span class="st">"United States"</span>,</span>
|
387 |
-
<span id="cb15-3"><a href="#cb15-3" aria-hidden="true" tabindex="-1"></a> <span class="st">"hl"</span>: <span class="st">"en"</span>,</span>
|
388 |
-
<span id="cb15-4"><a href="#cb15-4" aria-hidden="true" tabindex="-1"></a> <span class="st">"gl"</span>: <span class="st">"us"</span>,</span>
|
389 |
-
<span id="cb15-5"><a href="#cb15-5" aria-hidden="true" tabindex="-1"></a>}</span>
|
390 |
-
<span id="cb15-6"><a href="#cb15-6" aria-hidden="true" tabindex="-1"></a>search <span class="op">=</span> RecipeSerpAPIWrapper(params<span class="op">=</span>params)</span></code><button title="Copy to Clipboard" class="code-copy-button"><i class="bi"></i></button></pre></div>
|
391 |
-
</div>
|
392 |
-
<div class="cell">
|
393 |
-
<div class="sourceCode cell-code" id="cb16"><pre class="sourceCode python code-with-copy"><code class="sourceCode python"><span id="cb16-1"><a href="#cb16-1" aria-hidden="true" tabindex="-1"></a>vegan_recipes <span class="op">=</span> search.run(<span class="st">"Vegan fried rice recipes"</span>)</span>
|
394 |
-
<span id="cb16-2"><a href="#cb16-2" aria-hidden="true" tabindex="-1"></a>vegan_recipes[<span class="dv">0</span>:<span class="dv">3</span>]</span></code><button title="Copy to Clipboard" class="code-copy-button"><i class="bi"></i></button></pre></div>
|
395 |
-
<div class="cell-output cell-output-display">
|
396 |
-
<pre><code>[{'title': 'Easy Vegan Fried Rice',
|
397 |
-
'link': 'https://minimalistbaker.com/easy-vegan-fried-rice/',
|
398 |
-
'source': 'Minimalist Baker',
|
399 |
-
'rating': 4.8,
|
400 |
-
'reviews': 457,
|
401 |
-
'total_time': '1 hr 15 min',
|
402 |
-
'ingredients': ['Peanut butter',
|
403 |
-
'grain brown rice',
|
404 |
-
'soy sauce',
|
405 |
-
'maple syrup',
|
406 |
-
'chili garlic sauce']},
|
407 |
-
{'title': 'The Best Vegan Fried Rice',
|
408 |
-
'link': 'https://shortgirltallorder.com/best-vegan-fried-rice',
|
409 |
-
'source': 'Short Girl Tall Order',
|
410 |
-
'rating': 4.8,
|
411 |
-
'reviews': 65,
|
412 |
-
'total_time': '28 min',
|
413 |
-
'ingredients': ['Soy sauce',
|
414 |
-
'white rice',
|
415 |
-
'rice wine vinegar',
|
416 |
-
'sugar',
|
417 |
-
'fresh peas']},
|
418 |
-
{'title': 'Vegan Fried Rice',
|
419 |
-
'link': 'https://www.noracooks.com/vegan-fried-rice/',
|
420 |
-
'source': 'Nora Cooks',
|
421 |
-
'rating': 5.0,
|
422 |
-
'reviews': 15,
|
423 |
-
'total_time': '20 min',
|
424 |
-
'ingredients': ['Gluten free',
|
425 |
-
'nutritional yeast',
|
426 |
-
'toasted sesame oil',
|
427 |
-
'carrots',
|
428 |
-
'olive oil']}]</code></pre>
|
429 |
-
</div>
|
430 |
-
</div>
|
431 |
-
<div class="cell">
|
432 |
-
<div class="sourceCode cell-code" id="cb18"><pre class="sourceCode python code-with-copy"><code class="sourceCode python"><span id="cb18-1"><a href="#cb18-1" aria-hidden="true" tabindex="-1"></a>params <span class="op">=</span> {</span>
|
433 |
-
<span id="cb18-2"><a href="#cb18-2" aria-hidden="true" tabindex="-1"></a> <span class="st">"engine"</span>: <span class="st">"google_images"</span>,</span>
|
434 |
-
<span id="cb18-3"><a href="#cb18-3" aria-hidden="true" tabindex="-1"></a> <span class="st">"q"</span>: <span class="st">"Vegan pad thai recipes"</span>,</span>
|
435 |
-
<span id="cb18-4"><a href="#cb18-4" aria-hidden="true" tabindex="-1"></a> <span class="st">"location"</span>: <span class="st">"United States"</span>,</span>
|
436 |
-
<span id="cb18-5"><a href="#cb18-5" aria-hidden="true" tabindex="-1"></a> <span class="st">"api_key"</span>: os.environ[<span class="st">"SERPAPI_API_KEY"</span>],</span>
|
437 |
-
<span id="cb18-6"><a href="#cb18-6" aria-hidden="true" tabindex="-1"></a>}</span>
|
438 |
-
<span id="cb18-7"><a href="#cb18-7" aria-hidden="true" tabindex="-1"></a></span>
|
439 |
-
<span id="cb18-8"><a href="#cb18-8" aria-hidden="true" tabindex="-1"></a>search <span class="op">=</span> GoogleSearch(params)</span>
|
440 |
-
<span id="cb18-9"><a href="#cb18-9" aria-hidden="true" tabindex="-1"></a>results <span class="op">=</span> search.get_dict()</span></code><button title="Copy to Clipboard" class="code-copy-button"><i class="bi"></i></button></pre></div>
|
441 |
-
</div>
|
442 |
-
<div class="cell">
|
443 |
-
<div class="sourceCode cell-code" id="cb19"><pre class="sourceCode python code-with-copy"><code class="sourceCode python"><span id="cb19-1"><a href="#cb19-1" aria-hidden="true" tabindex="-1"></a><span class="cf">for</span> r <span class="kw">in</span> results[<span class="st">"images_results"</span>][<span class="dv">0</span>:<span class="dv">5</span>]:</span>
|
444 |
-
<span id="cb19-2"><a href="#cb19-2" aria-hidden="true" tabindex="-1"></a> display(r[<span class="st">"title"</span>], r[<span class="st">"link"</span>], Image(url<span class="op">=</span>r[<span class="st">"thumbnail"</span>]))</span></code><button title="Copy to Clipboard" class="code-copy-button"><i class="bi"></i></button></pre></div>
|
445 |
-
<div class="cell-output cell-output-display">
|
446 |
-
<pre><code>'Easy Tofu Pad Thai (Vegan) | Minimalist Baker Recipes'</code></pre>
|
447 |
-
</div>
|
448 |
-
<div class="cell-output cell-output-display">
|
449 |
-
<pre><code>'https://minimalistbaker.com/easy-tofu-pad-thai/'</code></pre>
|
450 |
-
</div>
|
451 |
-
<div class="cell-output cell-output-display">
|
452 |
-
<img src="https://serpapi.com/searches/6480db18c56d93170a8e715f/images/6f34b4708ae4dd36a28ca4ca4a3abf6af168f575eef7bd2e8f81a12e175fcf53.jpeg">
|
453 |
-
</div>
|
454 |
-
<div class="cell-output cell-output-display">
|
455 |
-
<pre><code>'Healthier vegan pad thai - Lazy Cat Kitchen'</code></pre>
|
456 |
-
</div>
|
457 |
-
<div class="cell-output cell-output-display">
|
458 |
-
<pre><code>'https://www.lazycatkitchen.com/healthier-vegan-pad-thai/'</code></pre>
|
459 |
-
</div>
|
460 |
-
<div class="cell-output cell-output-display">
|
461 |
-
<img src="https://serpapi.com/searches/6480db18c56d93170a8e715f/images/6f34b4708ae4dd36dee6fed89369c822a79ad529f726d1a65fdd09459c0a0b6a.jpeg">
|
462 |
-
</div>
|
463 |
-
<div class="cell-output cell-output-display">
|
464 |
-
<pre><code>'The Best Vegan Pad Thai - Full of Plants'</code></pre>
|
465 |
-
</div>
|
466 |
-
<div class="cell-output cell-output-display">
|
467 |
-
<pre><code>'https://fullofplants.com/the-best-vegan-pad-thai/'</code></pre>
|
468 |
-
</div>
|
469 |
-
<div class="cell-output cell-output-display">
|
470 |
-
<img src="https://serpapi.com/searches/6480db18c56d93170a8e715f/images/6f34b4708ae4dd3695828a207980e4280bb4e14cdccb84ebf5350f19237416f8.jpeg">
|
471 |
-
</div>
|
472 |
-
<div class="cell-output cell-output-display">
|
473 |
-
<pre><code>'Easy Vegan Pad Thai - Oh My Veggies'</code></pre>
|
474 |
-
</div>
|
475 |
-
<div class="cell-output cell-output-display">
|
476 |
-
<pre><code>'https://ohmyveggies.com/easy-vegan-pad-thai/'</code></pre>
|
477 |
-
</div>
|
478 |
-
<div class="cell-output cell-output-display">
|
479 |
-
<img src="https://serpapi.com/searches/6480db18c56d93170a8e715f/images/6f34b4708ae4dd36885ca51553e15b434e41039ef307ecbb4869522eeeefcfa5.jpeg">
|
480 |
-
</div>
|
481 |
-
<div class="cell-output cell-output-display">
|
482 |
-
<pre><code>'Easy Vegan Pad Thai - My Darling Vegan'</code></pre>
|
483 |
-
</div>
|
484 |
-
<div class="cell-output cell-output-display">
|
485 |
-
<pre><code>'https://www.mydarlingvegan.com/vegan-pad-thai/'</code></pre>
|
486 |
-
</div>
|
487 |
-
<div class="cell-output cell-output-display">
|
488 |
-
<img src="https://serpapi.com/searches/6480db18c56d93170a8e715f/images/6f34b4708ae4dd36a554bfded8055a9df50470d25fe62e19b9de5f16e262497f.jpeg">
|
489 |
-
</div>
|
490 |
-
</div>
|
491 |
-
<hr>
|
492 |
-
</section>
|
493 |
-
<section id="load_tools" class="level3">
|
494 |
-
<h3 class="anchored" data-anchor-id="load_tools">load_tools</h3>
|
495 |
-
<blockquote class="blockquote">
|
496 |
-
<pre><code> load_tools (tool_names:List[str],
|
497 |
-
llm:Optional[langchain.base_language.BaseLanguageModel]=None,
|
498 |
-
callbacks:Union[List[langchain.callbacks.base.BaseCallbackHan
|
499 |
-
dler],langchain.callbacks.base.BaseCallbackManager,NoneType]=
|
500 |
-
None, **kwargs:Any)</code></pre>
|
501 |
-
</blockquote>
|
502 |
-
<p>Load tools based on their name.</p>
|
503 |
-
<p>Args: tool_names: name of tools to load. llm: Optional language model, may be needed to initialize certain tools. callbacks: Optional callback manager or list of callback handlers. If not provided, default global callback manager will be used.</p>
|
504 |
-
<p>Returns: List of tools.</p>
|
505 |
-
<p>Here is the SerpAPIWrapper tool implementation</p>
|
506 |
-
<div class="cell">
|
507 |
-
<div class="sourceCode cell-code" id="cb31"><pre class="sourceCode python code-with-copy"><code class="sourceCode python"><span id="cb31-1"><a href="#cb31-1" aria-hidden="true" tabindex="-1"></a><span class="im">from</span> langchain.agents.load_tools <span class="im">import</span> _get_serpapi</span></code><button title="Copy to Clipboard" class="code-copy-button"><i class="bi"></i></button></pre></div>
|
508 |
-
</div>
|
509 |
-
<div class="cell">
|
510 |
-
<div class="sourceCode cell-code" id="cb32"><pre class="sourceCode python code-with-copy"><code class="sourceCode python"><span id="cb32-1"><a href="#cb32-1" aria-hidden="true" tabindex="-1"></a>??_get_serpapi</span></code><button title="Copy to Clipboard" class="code-copy-button"><i class="bi"></i></button></pre></div>
|
511 |
-
<div class="cell-output cell-output-display">
|
512 |
-
<div class="ansi-escaped-output">
|
513 |
-
<pre><span class="ansi-red-fg">Signature:</span> _get_serpapi<span class="ansi-blue-fg">(</span><span class="ansi-blue-fg">**</span>kwargs<span class="ansi-blue-fg">:</span> Any<span class="ansi-blue-fg">)</span> <span class="ansi-blue-fg">-></span> langchain<span class="ansi-blue-fg">.</span>tools<span class="ansi-blue-fg">.</span>base<span class="ansi-blue-fg">.</span>BaseTool
|
514 |
-
<span class="ansi-red-fg">Docstring:</span> <no docstring>
|
515 |
-
<span class="ansi-red-fg">Source:</span>
|
516 |
-
<span class="ansi-green-fg">def</span> _get_serpapi<span class="ansi-blue-fg">(</span><span class="ansi-blue-fg">**</span>kwargs<span class="ansi-blue-fg">:</span> Any<span class="ansi-blue-fg">)</span> <span class="ansi-blue-fg">-></span> BaseTool<span class="ansi-blue-fg">:</span>
|
517 |
-
<span class="ansi-green-fg">return</span> Tool<span class="ansi-blue-fg">(</span>
|
518 |
-
name<span class="ansi-blue-fg">=</span><span class="ansi-blue-fg">"Search"</span><span class="ansi-blue-fg">,</span>
|
519 |
-
description<span class="ansi-blue-fg">=</span><span class="ansi-blue-fg">"A search engine. Useful for when you need to answer questions about current events. Input should be a search query."</span><span class="ansi-blue-fg">,</span>
|
520 |
-
func<span class="ansi-blue-fg">=</span>SerpAPIWrapper<span class="ansi-blue-fg">(</span><span class="ansi-blue-fg">**</span>kwargs<span class="ansi-blue-fg">)</span><span class="ansi-blue-fg">.</span>run<span class="ansi-blue-fg">,</span>
|
521 |
-
coroutine<span class="ansi-blue-fg">=</span>SerpAPIWrapper<span class="ansi-blue-fg">(</span><span class="ansi-blue-fg">**</span>kwargs<span class="ansi-blue-fg">)</span><span class="ansi-blue-fg">.</span>arun<span class="ansi-blue-fg">,</span>
|
522 |
-
<span class="ansi-blue-fg">)</span>
|
523 |
-
<span class="ansi-red-fg">File:</span> ~/AnimalEquality/lv-recipe-chatbot/env/lib/python3.10/site-packages/langchain/agents/load_tools.py
|
524 |
-
<span class="ansi-red-fg">Type:</span> function</pre>
|
525 |
-
</div>
|
526 |
-
</div>
|
527 |
-
</div>
|
528 |
-
<p>Let’s use that for inspiration for our recipe version of the tool</p>
|
529 |
-
<div class="cell">
|
530 |
-
<div class="sourceCode cell-code" id="cb33"><pre class="sourceCode python code-with-copy"><code class="sourceCode python"><span id="cb33-1"><a href="#cb33-1" aria-hidden="true" tabindex="-1"></a>params <span class="op">=</span> {</span>
|
531 |
-
<span id="cb33-2"><a href="#cb33-2" aria-hidden="true" tabindex="-1"></a> <span class="st">"location"</span>: <span class="st">"United States"</span>,</span>
|
532 |
-
<span id="cb33-3"><a href="#cb33-3" aria-hidden="true" tabindex="-1"></a> <span class="st">"hl"</span>: <span class="st">"en"</span>,</span>
|
533 |
-
<span id="cb33-4"><a href="#cb33-4" aria-hidden="true" tabindex="-1"></a> <span class="st">"gl"</span>: <span class="st">"us"</span>,</span>
|
534 |
-
<span id="cb33-5"><a href="#cb33-5" aria-hidden="true" tabindex="-1"></a>}</span>
|
535 |
-
<span id="cb33-6"><a href="#cb33-6" aria-hidden="true" tabindex="-1"></a>search <span class="op">=</span> RecipeSerpAPIWrapper(params<span class="op">=</span>params)</span>
|
536 |
-
<span id="cb33-7"><a href="#cb33-7" aria-hidden="true" tabindex="-1"></a>serpapi_recipe_tool <span class="op">=</span> Tool(</span>
|
537 |
-
<span id="cb33-8"><a href="#cb33-8" aria-hidden="true" tabindex="-1"></a> name<span class="op">=</span><span class="st">"Vegan Recipe Search"</span>,</span>
|
538 |
-
<span id="cb33-9"><a href="#cb33-9" aria-hidden="true" tabindex="-1"></a> description<span class="op">=</span><span class="st">"A search engine. Useful for when you need to fetch existing vetted vegan recipes. Input should be a vegan recipe search query."</span>,</span>
|
539 |
-
<span id="cb33-10"><a href="#cb33-10" aria-hidden="true" tabindex="-1"></a> func<span class="op">=</span>search.run,</span>
|
540 |
-
<span id="cb33-11"><a href="#cb33-11" aria-hidden="true" tabindex="-1"></a>)</span></code><button title="Copy to Clipboard" class="code-copy-button"><i class="bi"></i></button></pre></div>
|
541 |
-
</div>
|
542 |
-
<div class="cell">
|
543 |
-
<div class="sourceCode cell-code" id="cb34"><pre class="sourceCode python code-with-copy"><code class="sourceCode python"><span id="cb34-1"><a href="#cb34-1" aria-hidden="true" tabindex="-1"></a><span class="at">@tool</span></span>
|
544 |
-
<span id="cb34-2"><a href="#cb34-2" aria-hidden="true" tabindex="-1"></a><span class="kw">def</span> time(text: <span class="bu">str</span>) <span class="op">-></span> <span class="bu">str</span>:</span>
|
545 |
-
<span id="cb34-3"><a href="#cb34-3" aria-hidden="true" tabindex="-1"></a> <span class="co">"""Returns todays date, use this for any</span></span>
|
546 |
-
<span id="cb34-4"><a href="#cb34-4" aria-hidden="true" tabindex="-1"></a><span class="co"> questions related to knowing todays date.</span></span>
|
547 |
-
<span id="cb34-5"><a href="#cb34-5" aria-hidden="true" tabindex="-1"></a><span class="co"> The input should always be an empty string,</span></span>
|
548 |
-
<span id="cb34-6"><a href="#cb34-6" aria-hidden="true" tabindex="-1"></a><span class="co"> and this function will always return todays</span></span>
|
549 |
-
<span id="cb34-7"><a href="#cb34-7" aria-hidden="true" tabindex="-1"></a><span class="co"> date - any date mathmatics should occur</span></span>
|
550 |
-
<span id="cb34-8"><a href="#cb34-8" aria-hidden="true" tabindex="-1"></a><span class="co"> outside this function."""</span></span>
|
551 |
-
<span id="cb34-9"><a href="#cb34-9" aria-hidden="true" tabindex="-1"></a> <span class="cf">return</span> <span class="bu">str</span>(date.today())</span></code><button title="Copy to Clipboard" class="code-copy-button"><i class="bi"></i></button></pre></div>
|
552 |
-
</div>
|
553 |
-
<div class="cell">
|
554 |
-
<div class="sourceCode cell-code" id="cb35"><pre class="sourceCode python code-with-copy"><code class="sourceCode python"><span id="cb35-1"><a href="#cb35-1" aria-hidden="true" tabindex="-1"></a>agent <span class="op">=</span> initialize_agent(</span>
|
555 |
-
<span id="cb35-2"><a href="#cb35-2" aria-hidden="true" tabindex="-1"></a> [time],</span>
|
556 |
-
<span id="cb35-3"><a href="#cb35-3" aria-hidden="true" tabindex="-1"></a> llm,</span>
|
557 |
-
<span id="cb35-4"><a href="#cb35-4" aria-hidden="true" tabindex="-1"></a> agent<span class="op">=</span>AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION,</span>
|
558 |
-
<span id="cb35-5"><a href="#cb35-5" aria-hidden="true" tabindex="-1"></a> handle_parsing_errors<span class="op">=</span><span class="va">True</span>,</span>
|
559 |
-
<span id="cb35-6"><a href="#cb35-6" aria-hidden="true" tabindex="-1"></a> verbose<span class="op">=</span><span class="va">True</span>,</span>
|
560 |
-
<span id="cb35-7"><a href="#cb35-7" aria-hidden="true" tabindex="-1"></a>)</span></code><button title="Copy to Clipboard" class="code-copy-button"><i class="bi"></i></button></pre></div>
|
561 |
-
</div>
|
562 |
-
<div class="cell">
|
563 |
-
<div class="sourceCode cell-code" id="cb36"><pre class="sourceCode python code-with-copy"><code class="sourceCode python"><span id="cb36-1"><a href="#cb36-1" aria-hidden="true" tabindex="-1"></a><span class="at">@tool</span></span>
|
564 |
-
<span id="cb36-2"><a href="#cb36-2" aria-hidden="true" tabindex="-1"></a><span class="kw">def</span> vegan_recipe_serpapi_search(text: <span class="bu">str</span>) <span class="op">-></span> <span class="bu">str</span>:</span>
|
565 |
-
<span id="cb36-3"><a href="#cb36-3" aria-hidden="true" tabindex="-1"></a> <span class="co">"""Returns a JSON/Python list of dictionaries of recipe data with keys in format:</span></span>
|
566 |
-
<span id="cb36-4"><a href="#cb36-4" aria-hidden="true" tabindex="-1"></a><span class="co"> ```</span></span>
|
567 |
-
<span id="cb36-5"><a href="#cb36-5" aria-hidden="true" tabindex="-1"></a><span class="co"> 'title': str,</span></span>
|
568 |
-
<span id="cb36-6"><a href="#cb36-6" aria-hidden="true" tabindex="-1"></a><span class="co"> 'link': str,</span></span>
|
569 |
-
<span id="cb36-7"><a href="#cb36-7" aria-hidden="true" tabindex="-1"></a><span class="co"> 'source': str,</span></span>
|
570 |
-
<span id="cb36-8"><a href="#cb36-8" aria-hidden="true" tabindex="-1"></a><span class="co"> 'rating': int,</span></span>
|
571 |
-
<span id="cb36-9"><a href="#cb36-9" aria-hidden="true" tabindex="-1"></a><span class="co"> 'reviews': int,</span></span>
|
572 |
-
<span id="cb36-10"><a href="#cb36-10" aria-hidden="true" tabindex="-1"></a><span class="co"> 'total_time': str,</span></span>
|
573 |
-
<span id="cb36-11"><a href="#cb36-11" aria-hidden="true" tabindex="-1"></a><span class="co"> 'ingredients': [</span></span>
|
574 |
-
<span id="cb36-12"><a href="#cb36-12" aria-hidden="true" tabindex="-1"></a><span class="co"> str,</span></span>
|
575 |
-
<span id="cb36-13"><a href="#cb36-13" aria-hidden="true" tabindex="-1"></a><span class="co"> str,</span></span>
|
576 |
-
<span id="cb36-14"><a href="#cb36-14" aria-hidden="true" tabindex="-1"></a><span class="co"> ```</span></span>
|
577 |
-
<span id="cb36-15"><a href="#cb36-15" aria-hidden="true" tabindex="-1"></a><span class="co"> The input must be the name of a vegan recipe </span><span class="ch">\</span></span>
|
578 |
-
<span id="cb36-16"><a href="#cb36-16" aria-hidden="true" tabindex="-1"></a><span class="co"> or query parameters such as ingredients to include, prep time, cuisine region. </span><span class="ch">\</span></span>
|
579 |
-
<span id="cb36-17"><a href="#cb36-17" aria-hidden="true" tabindex="-1"></a><span class="co"> Only execute the search for vegan recipes and ingredients. </span><span class="ch">\</span></span>
|
580 |
-
<span id="cb36-18"><a href="#cb36-18" aria-hidden="true" tabindex="-1"></a><span class="co"> If the SerpAPI request errors or recipes are not found, </span><span class="ch">\</span></span>
|
581 |
-
<span id="cb36-19"><a href="#cb36-19" aria-hidden="true" tabindex="-1"></a><span class="co"> an explanation message will be returned instead of the recipe JSON."""</span></span>
|
582 |
-
<span id="cb36-20"><a href="#cb36-20" aria-hidden="true" tabindex="-1"></a> params <span class="op">=</span> {</span>
|
583 |
-
<span id="cb36-21"><a href="#cb36-21" aria-hidden="true" tabindex="-1"></a> <span class="st">"q"</span>: text,</span>
|
584 |
-
<span id="cb36-22"><a href="#cb36-22" aria-hidden="true" tabindex="-1"></a> <span class="st">"location"</span>: <span class="st">"United States"</span>,</span>
|
585 |
-
<span id="cb36-23"><a href="#cb36-23" aria-hidden="true" tabindex="-1"></a> <span class="st">"hl"</span>: <span class="st">"en"</span>,</span>
|
586 |
-
<span id="cb36-24"><a href="#cb36-24" aria-hidden="true" tabindex="-1"></a> <span class="st">"gl"</span>: <span class="st">"us"</span>,</span>
|
587 |
-
<span id="cb36-25"><a href="#cb36-25" aria-hidden="true" tabindex="-1"></a> <span class="st">"api_key"</span>: os.environ[<span class="st">"SERPAPI_API_KEY"</span>],</span>
|
588 |
-
<span id="cb36-26"><a href="#cb36-26" aria-hidden="true" tabindex="-1"></a> }</span>
|
589 |
-
<span id="cb36-27"><a href="#cb36-27" aria-hidden="true" tabindex="-1"></a></span>
|
590 |
-
<span id="cb36-28"><a href="#cb36-28" aria-hidden="true" tabindex="-1"></a> search <span class="op">=</span> GoogleSearch(params)</span>
|
591 |
-
<span id="cb36-29"><a href="#cb36-29" aria-hidden="true" tabindex="-1"></a> results <span class="op">=</span> search.get_dict()</span>
|
592 |
-
<span id="cb36-30"><a href="#cb36-30" aria-hidden="true" tabindex="-1"></a> <span class="cf">if</span> <span class="st">"error"</span> <span class="kw">in</span> results.keys():</span>
|
593 |
-
<span id="cb36-31"><a href="#cb36-31" aria-hidden="true" tabindex="-1"></a> <span class="cf">return</span> <span class="ss">f"Received an error from SerpAPI: </span><span class="sc">{</span>results[<span class="st">'error'</span>]<span class="sc">}</span><span class="ch">\n</span><span class="ss"> Query: </span><span class="sc">{</span>text<span class="sc">}</span><span class="ss">"</span></span>
|
594 |
-
<span id="cb36-32"><a href="#cb36-32" aria-hidden="true" tabindex="-1"></a></span>
|
595 |
-
<span id="cb36-33"><a href="#cb36-33" aria-hidden="true" tabindex="-1"></a> <span class="cf">if</span> <span class="st">"recipes_results"</span> <span class="kw">in</span> results.keys():</span>
|
596 |
-
<span id="cb36-34"><a href="#cb36-34" aria-hidden="true" tabindex="-1"></a> <span class="cf">return</span> <span class="bu">str</span>(results[<span class="st">"recipes_results"</span>])</span>
|
597 |
-
<span id="cb36-35"><a href="#cb36-35" aria-hidden="true" tabindex="-1"></a></span>
|
598 |
-
<span id="cb36-36"><a href="#cb36-36" aria-hidden="true" tabindex="-1"></a> <span class="cf">return</span> <span class="st">"No recipes found for that query"</span></span></code><button title="Copy to Clipboard" class="code-copy-button"><i class="bi"></i></button></pre></div>
|
599 |
-
</div>
|
600 |
-
<p>Create an agent with the tool</p>
|
601 |
-
<div class="cell">
|
602 |
-
<div class="sourceCode cell-code" id="cb37"><pre class="sourceCode python code-with-copy"><code class="sourceCode python"><span id="cb37-1"><a href="#cb37-1" aria-hidden="true" tabindex="-1"></a>agent <span class="op">=</span> initialize_agent(</span>
|
603 |
-
<span id="cb37-2"><a href="#cb37-2" aria-hidden="true" tabindex="-1"></a> tools<span class="op">=</span>[vegan_recipe_serpapi_search],</span>
|
604 |
-
<span id="cb37-3"><a href="#cb37-3" aria-hidden="true" tabindex="-1"></a> llm<span class="op">=</span>llm,</span>
|
605 |
-
<span id="cb37-4"><a href="#cb37-4" aria-hidden="true" tabindex="-1"></a> agent<span class="op">=</span>AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION,</span>
|
606 |
-
<span id="cb37-5"><a href="#cb37-5" aria-hidden="true" tabindex="-1"></a> handle_parsing_errors<span class="op">=</span><span class="va">True</span>,</span>
|
607 |
-
<span id="cb37-6"><a href="#cb37-6" aria-hidden="true" tabindex="-1"></a> verbose<span class="op">=</span><span class="va">True</span>,</span>
|
608 |
-
<span id="cb37-7"><a href="#cb37-7" aria-hidden="true" tabindex="-1"></a>)</span></code><button title="Copy to Clipboard" class="code-copy-button"><i class="bi"></i></button></pre></div>
|
609 |
-
</div>
|
610 |
-
<div class="cell">
|
611 |
-
<div class="sourceCode cell-code" id="cb38"><pre class="sourceCode python code-with-copy"><code class="sourceCode python"><span id="cb38-1"><a href="#cb38-1" aria-hidden="true" tabindex="-1"></a>agent.run(<span class="st">"Search vegan pad thai recipes"</span>)</span></code><button title="Copy to Clipboard" class="code-copy-button"><i class="bi"></i></button></pre></div>
|
612 |
-
<div class="cell-output cell-output-stdout">
|
613 |
-
<pre><code>
|
614 |
-
|
615 |
-
> Entering new AgentExecutor chain...
|
616 |
-
Thought: I can use the `vegan_recipe_serpapi_search` tool to search for vegan pad thai recipes.
|
617 |
-
|
618 |
-
Action:
|
619 |
-
```
|
620 |
-
{
|
621 |
-
"action": "vegan_recipe_serpapi_search",
|
622 |
-
"action_input": "vegan pad thai"
|
623 |
-
}
|
624 |
-
```
|
625 |
-
|
626 |
-
|
627 |
-
Observation: [{'title': 'Vegan Pad Thai', 'link': 'https://www.noracooks.com/vegan-pad-thai/', 'source': 'Nora Cooks', 'rating': 5.0, 'reviews': 53, 'total_time': '30 min', 'ingredients': ['Stir fry rice', 'mung bean sprouts', 'soy sauce', 'maple syrup', 'sriracha hot sauce']}, {'title': 'Easy Tofu Pad Thai', 'link': 'https://minimalistbaker.com/easy-tofu-pad-thai/', 'source': 'Minimalist Baker', 'rating': 4.9, 'reviews': 117, 'total_time': '30 min', 'ingredients': ['Pad thai rice', 'peanut sauce', 'thai red', 'soy sauce', 'bean sprouts']}, {'title': 'Vegan Pad Thai', 'link': 'https://www.pickuplimes.com/recipe/speedy-vegan-pad-thai-116', 'source': 'Pick Up Limes', 'rating': 5.0, 'reviews': 34, 'total_time': '30 min', 'ingredients': ['Brown rice noodles', 'red hot', 'soy sauce', 'bean sprouts', 'sriracha hot sauce']}]
|
628 |
-
Thought:Could not parse LLM output: The `vegan_recipe_serpapi_search` tool returned a list of three vegan pad thai recipes with their titles, links, sources, ratings, reviews, total time, and ingredients.
|
629 |
-
Observation: Invalid or incomplete response
|
630 |
-
Thought:I will try running the `vegan_recipe_serpapi_search` tool again with the input "vegan pad thai".
|
631 |
-
|
632 |
-
Action:
|
633 |
-
```
|
634 |
-
{
|
635 |
-
"action": "vegan_recipe_serpapi_search",
|
636 |
-
"action_input": "vegan pad thai"
|
637 |
-
}
|
638 |
-
```
|
639 |
-
|
640 |
-
|
641 |
-
|
642 |
-
Observation: [{'title': 'Vegan Pad Thai', 'link': 'https://www.noracooks.com/vegan-pad-thai/', 'source': 'Nora Cooks', 'rating': 5.0, 'reviews': 53, 'total_time': '30 min', 'ingredients': ['Stir fry rice', 'mung bean sprouts', 'soy sauce', 'maple syrup', 'sriracha hot sauce']}, {'title': 'Easy Tofu Pad Thai', 'link': 'https://minimalistbaker.com/easy-tofu-pad-thai/', 'source': 'Minimalist Baker', 'rating': 4.9, 'reviews': 117, 'total_time': '30 min', 'ingredients': ['Pad thai rice', 'peanut sauce', 'thai red', 'soy sauce', 'bean sprouts']}, {'title': 'Vegan Pad Thai', 'link': 'https://www.pickuplimes.com/recipe/speedy-vegan-pad-thai-116', 'source': 'Pick Up Limes', 'rating': 5.0, 'reviews': 34, 'total_time': '30 min', 'ingredients': ['Brown rice noodles', 'red hot', 'soy sauce', 'bean sprouts', 'sriracha hot sauce']}]
|
643 |
-
Thought:Could not parse LLM output: The `vegan_recipe_serpapi_search` tool returned a list of three vegan pad thai recipes with their titles, links, sources, ratings, reviews, total time, and ingredients.
|
644 |
-
|
645 |
-
Observation: Invalid or incomplete response
|
646 |
-
Thought:I will try running the `vegan_recipe_serpapi_search` tool again with the input "vegan pad thai recipes".
|
647 |
-
|
648 |
-
Action:
|
649 |
-
```
|
650 |
-
{
|
651 |
-
"action": "vegan_recipe_serpapi_search",
|
652 |
-
"action_input": "vegan pad thai recipes"
|
653 |
-
}
|
654 |
-
```
|
655 |
-
|
656 |
-
|
657 |
-
|
658 |
-
Observation: [{'title': 'Easy Tofu Pad Thai', 'link': 'https://minimalistbaker.com/easy-tofu-pad-thai/', 'source': 'Minimalist Baker', 'rating': 4.9, 'reviews': 117, 'total_time': '30 min', 'ingredients': ['Pad thai rice', 'peanut sauce', 'thai red', 'soy sauce', 'bean sprouts']}, {'title': 'Vegan Pad Thai', 'link': 'https://www.noracooks.com/vegan-pad-thai/', 'source': 'Nora Cooks', 'rating': 5.0, 'reviews': 53, 'total_time': '30 min', 'ingredients': ['Stir fry rice', 'mung bean sprouts', 'soy sauce', 'maple syrup', 'sriracha hot sauce']}, {'title': 'Vegan Pad Thai', 'link': 'https://www.pickuplimes.com/recipe/speedy-vegan-pad-thai-116', 'source': 'Pick Up Limes', 'rating': 5.0, 'reviews': 34, 'total_time': '30 min', 'ingredients': ['Brown rice noodles', 'red hot', 'soy sauce', 'bean sprouts', 'sriracha hot sauce']}]
|
659 |
-
Thought:Could not parse LLM output: I have successfully used the `vegan_recipe_serpapi_search` tool to search for vegan pad thai recipes. The tool returned a list of three vegan pad thai recipes with their titles, links, sources, ratings, reviews, total time, and ingredients.
|
660 |
-
|
661 |
-
|
662 |
-
Observation: Invalid or incomplete response
|
663 |
-
Thought:I will try running the `vegan_recipe_serpapi_search` tool again with the input "vegan pad thai recipe".
|
664 |
-
|
665 |
-
Action:
|
666 |
-
```
|
667 |
-
{
|
668 |
-
"action": "vegan_recipe_serpapi_search",
|
669 |
-
"action_input": "vegan pad thai recipe"
|
670 |
-
}
|
671 |
-
```
|
672 |
-
|
673 |
-
|
674 |
-
|
675 |
-
Observation: [{'title': 'Easy Tofu Pad Thai', 'link': 'https://minimalistbaker.com/easy-tofu-pad-thai/', 'source': 'Minimalist Baker', 'rating': 4.9, 'reviews': 117, 'total_time': '30 min', 'ingredients': ['Pad thai rice', 'peanut sauce', 'thai red', 'soy sauce', 'bean sprouts']}, {'title': 'Vegan Pad Thai', 'link': 'https://www.noracooks.com/vegan-pad-thai/', 'source': 'Nora Cooks', 'rating': 5.0, 'reviews': 53, 'total_time': '30 min', 'ingredients': ['Stir fry rice', 'mung bean sprouts', 'soy sauce', 'maple syrup', 'sriracha hot sauce']}, {'title': 'Vegan Pad Thai', 'link': 'https://www.pickuplimes.com/recipe/speedy-vegan-pad-thai-116', 'source': 'Pick Up Limes', 'rating': 5.0, 'reviews': 34, 'total_time': '30 min', 'ingredients': ['Brown rice noodles', 'red hot', 'soy sauce', 'bean sprouts', 'sriracha hot sauce']}]
|
676 |
-
Thought:Could not parse LLM output: I have successfully used the `vegan_recipe_serpapi_search` tool to search for vegan pad thai recipes. The tool returned a list of three vegan pad thai recipes with their titles, links, sources, ratings, reviews, total time, and ingredients.
|
677 |
-
|
678 |
-
Final Answer: Here are three vegan pad thai recipes:
|
679 |
-
1. Easy Tofu Pad Thai from Minimalist Baker
|
680 |
-
2. Vegan Pad Thai from Nora Cooks
|
681 |
-
3. Vegan Pad Thai from Pick Up Limes.
|
682 |
-
|
683 |
-
> Finished chain.</code></pre>
|
684 |
-
</div>
|
685 |
-
<div class="cell-output cell-output-display">
|
686 |
-
<pre><code>'Here are three vegan pad thai recipes: \n1. Easy Tofu Pad Thai from Minimalist Baker\n2. Vegan Pad Thai from Nora Cooks\n3. Vegan Pad Thai from Pick Up Limes.'</code></pre>
|
687 |
-
</div>
|
688 |
-
</div>
|
689 |
-
<p>This doc should be corrected <a href="https://python.langchain.com/en/latest/modules/agents/tools/examples/serpapi.html">LangChain serpapi doc could be updated</a></p>
|
690 |
-
<div class="cell">
|
691 |
-
<div class="sourceCode cell-code" id="cb41"><pre class="sourceCode python code-with-copy"><code class="sourceCode python"><span id="cb41-1"><a href="#cb41-1" aria-hidden="true" tabindex="-1"></a>search <span class="op">=</span> GoogleSerperAPIWrapper(<span class="bu">type</span><span class="op">=</span><span class="st">"search"</span>)</span>
|
692 |
-
<span id="cb41-2"><a href="#cb41-2" aria-hidden="true" tabindex="-1"></a>results <span class="op">=</span> search.results(<span class="st">"Lion"</span>)</span></code><button title="Copy to Clipboard" class="code-copy-button"><i class="bi"></i></button></pre></div>
|
693 |
-
</div>
|
694 |
-
<p><a href="https://www.edamam.com/">edamam</a></p>
|
695 |
-
|
696 |
-
|
697 |
-
</section>
|
698 |
-
|
699 |
-
</main> <!-- /main -->
|
700 |
-
<script id="quarto-html-after-body" type="application/javascript">
|
701 |
-
window.document.addEventListener("DOMContentLoaded", function (event) {
|
702 |
-
const toggleBodyColorMode = (bsSheetEl) => {
|
703 |
-
const mode = bsSheetEl.getAttribute("data-mode");
|
704 |
-
const bodyEl = window.document.querySelector("body");
|
705 |
-
if (mode === "dark") {
|
706 |
-
bodyEl.classList.add("quarto-dark");
|
707 |
-
bodyEl.classList.remove("quarto-light");
|
708 |
-
} else {
|
709 |
-
bodyEl.classList.add("quarto-light");
|
710 |
-
bodyEl.classList.remove("quarto-dark");
|
711 |
-
}
|
712 |
-
}
|
713 |
-
const toggleBodyColorPrimary = () => {
|
714 |
-
const bsSheetEl = window.document.querySelector("link#quarto-bootstrap");
|
715 |
-
if (bsSheetEl) {
|
716 |
-
toggleBodyColorMode(bsSheetEl);
|
717 |
-
}
|
718 |
-
}
|
719 |
-
toggleBodyColorPrimary();
|
720 |
-
const icon = "";
|
721 |
-
const anchorJS = new window.AnchorJS();
|
722 |
-
anchorJS.options = {
|
723 |
-
placement: 'right',
|
724 |
-
icon: icon
|
725 |
-
};
|
726 |
-
anchorJS.add('.anchored');
|
727 |
-
const isCodeAnnotation = (el) => {
|
728 |
-
for (const clz of el.classList) {
|
729 |
-
if (clz.startsWith('code-annotation-')) {
|
730 |
-
return true;
|
731 |
-
}
|
732 |
-
}
|
733 |
-
return false;
|
734 |
-
}
|
735 |
-
const clipboard = new window.ClipboardJS('.code-copy-button', {
|
736 |
-
text: function(trigger) {
|
737 |
-
const codeEl = trigger.previousElementSibling.cloneNode(true);
|
738 |
-
for (const childEl of codeEl.children) {
|
739 |
-
if (isCodeAnnotation(childEl)) {
|
740 |
-
childEl.remove();
|
741 |
-
}
|
742 |
-
}
|
743 |
-
return codeEl.innerText;
|
744 |
-
}
|
745 |
-
});
|
746 |
-
clipboard.on('success', function(e) {
|
747 |
-
// button target
|
748 |
-
const button = e.trigger;
|
749 |
-
// don't keep focus
|
750 |
-
button.blur();
|
751 |
-
// flash "checked"
|
752 |
-
button.classList.add('code-copy-button-checked');
|
753 |
-
var currentTitle = button.getAttribute("title");
|
754 |
-
button.setAttribute("title", "Copied!");
|
755 |
-
let tooltip;
|
756 |
-
if (window.bootstrap) {
|
757 |
-
button.setAttribute("data-bs-toggle", "tooltip");
|
758 |
-
button.setAttribute("data-bs-placement", "left");
|
759 |
-
button.setAttribute("data-bs-title", "Copied!");
|
760 |
-
tooltip = new bootstrap.Tooltip(button,
|
761 |
-
{ trigger: "manual",
|
762 |
-
customClass: "code-copy-button-tooltip",
|
763 |
-
offset: [0, -8]});
|
764 |
-
tooltip.show();
|
765 |
-
}
|
766 |
-
setTimeout(function() {
|
767 |
-
if (tooltip) {
|
768 |
-
tooltip.hide();
|
769 |
-
button.removeAttribute("data-bs-title");
|
770 |
-
button.removeAttribute("data-bs-toggle");
|
771 |
-
button.removeAttribute("data-bs-placement");
|
772 |
-
}
|
773 |
-
button.setAttribute("title", currentTitle);
|
774 |
-
button.classList.remove('code-copy-button-checked');
|
775 |
-
}, 1000);
|
776 |
-
// clear code selection
|
777 |
-
e.clearSelection();
|
778 |
-
});
|
779 |
-
function tippyHover(el, contentFn) {
|
780 |
-
const config = {
|
781 |
-
allowHTML: true,
|
782 |
-
content: contentFn,
|
783 |
-
maxWidth: 500,
|
784 |
-
delay: 100,
|
785 |
-
arrow: false,
|
786 |
-
appendTo: function(el) {
|
787 |
-
return el.parentElement;
|
788 |
-
},
|
789 |
-
interactive: true,
|
790 |
-
interactiveBorder: 10,
|
791 |
-
theme: 'quarto',
|
792 |
-
placement: 'bottom-start'
|
793 |
-
};
|
794 |
-
window.tippy(el, config);
|
795 |
-
}
|
796 |
-
const noterefs = window.document.querySelectorAll('a[role="doc-noteref"]');
|
797 |
-
for (var i=0; i<noterefs.length; i++) {
|
798 |
-
const ref = noterefs[i];
|
799 |
-
tippyHover(ref, function() {
|
800 |
-
// use id or data attribute instead here
|
801 |
-
let href = ref.getAttribute('data-footnote-href') || ref.getAttribute('href');
|
802 |
-
try { href = new URL(href).hash; } catch {}
|
803 |
-
const id = href.replace(/^#\/?/, "");
|
804 |
-
const note = window.document.getElementById(id);
|
805 |
-
return note.innerHTML;
|
806 |
-
});
|
807 |
-
}
|
808 |
-
let selectedAnnoteEl;
|
809 |
-
const selectorForAnnotation = ( cell, annotation) => {
|
810 |
-
let cellAttr = 'data-code-cell="' + cell + '"';
|
811 |
-
let lineAttr = 'data-code-annotation="' + annotation + '"';
|
812 |
-
const selector = 'span[' + cellAttr + '][' + lineAttr + ']';
|
813 |
-
return selector;
|
814 |
-
}
|
815 |
-
const selectCodeLines = (annoteEl) => {
|
816 |
-
const doc = window.document;
|
817 |
-
const targetCell = annoteEl.getAttribute("data-target-cell");
|
818 |
-
const targetAnnotation = annoteEl.getAttribute("data-target-annotation");
|
819 |
-
const annoteSpan = window.document.querySelector(selectorForAnnotation(targetCell, targetAnnotation));
|
820 |
-
const lines = annoteSpan.getAttribute("data-code-lines").split(",");
|
821 |
-
const lineIds = lines.map((line) => {
|
822 |
-
return targetCell + "-" + line;
|
823 |
-
})
|
824 |
-
let top = null;
|
825 |
-
let height = null;
|
826 |
-
let parent = null;
|
827 |
-
if (lineIds.length > 0) {
|
828 |
-
//compute the position of the single el (top and bottom and make a div)
|
829 |
-
const el = window.document.getElementById(lineIds[0]);
|
830 |
-
top = el.offsetTop;
|
831 |
-
height = el.offsetHeight;
|
832 |
-
parent = el.parentElement.parentElement;
|
833 |
-
if (lineIds.length > 1) {
|
834 |
-
const lastEl = window.document.getElementById(lineIds[lineIds.length - 1]);
|
835 |
-
const bottom = lastEl.offsetTop + lastEl.offsetHeight;
|
836 |
-
height = bottom - top;
|
837 |
-
}
|
838 |
-
if (top !== null && height !== null && parent !== null) {
|
839 |
-
// cook up a div (if necessary) and position it
|
840 |
-
let div = window.document.getElementById("code-annotation-line-highlight");
|
841 |
-
if (div === null) {
|
842 |
-
div = window.document.createElement("div");
|
843 |
-
div.setAttribute("id", "code-annotation-line-highlight");
|
844 |
-
div.style.position = 'absolute';
|
845 |
-
parent.appendChild(div);
|
846 |
-
}
|
847 |
-
div.style.top = top - 2 + "px";
|
848 |
-
div.style.height = height + 4 + "px";
|
849 |
-
let gutterDiv = window.document.getElementById("code-annotation-line-highlight-gutter");
|
850 |
-
if (gutterDiv === null) {
|
851 |
-
gutterDiv = window.document.createElement("div");
|
852 |
-
gutterDiv.setAttribute("id", "code-annotation-line-highlight-gutter");
|
853 |
-
gutterDiv.style.position = 'absolute';
|
854 |
-
const codeCell = window.document.getElementById(targetCell);
|
855 |
-
const gutter = codeCell.querySelector('.code-annotation-gutter');
|
856 |
-
gutter.appendChild(gutterDiv);
|
857 |
-
}
|
858 |
-
gutterDiv.style.top = top - 2 + "px";
|
859 |
-
gutterDiv.style.height = height + 4 + "px";
|
860 |
-
}
|
861 |
-
selectedAnnoteEl = annoteEl;
|
862 |
-
}
|
863 |
-
};
|
864 |
-
const unselectCodeLines = () => {
|
865 |
-
const elementsIds = ["code-annotation-line-highlight", "code-annotation-line-highlight-gutter"];
|
866 |
-
elementsIds.forEach((elId) => {
|
867 |
-
const div = window.document.getElementById(elId);
|
868 |
-
if (div) {
|
869 |
-
div.remove();
|
870 |
-
}
|
871 |
-
});
|
872 |
-
selectedAnnoteEl = undefined;
|
873 |
-
};
|
874 |
-
// Attach click handler to the DT
|
875 |
-
const annoteDls = window.document.querySelectorAll('dt[data-target-cell]');
|
876 |
-
for (const annoteDlNode of annoteDls) {
|
877 |
-
annoteDlNode.addEventListener('click', (event) => {
|
878 |
-
const clickedEl = event.target;
|
879 |
-
if (clickedEl !== selectedAnnoteEl) {
|
880 |
-
unselectCodeLines();
|
881 |
-
const activeEl = window.document.querySelector('dt[data-target-cell].code-annotation-active');
|
882 |
-
if (activeEl) {
|
883 |
-
activeEl.classList.remove('code-annotation-active');
|
884 |
-
}
|
885 |
-
selectCodeLines(clickedEl);
|
886 |
-
clickedEl.classList.add('code-annotation-active');
|
887 |
-
} else {
|
888 |
-
// Unselect the line
|
889 |
-
unselectCodeLines();
|
890 |
-
clickedEl.classList.remove('code-annotation-active');
|
891 |
-
}
|
892 |
-
});
|
893 |
-
}
|
894 |
-
const findCites = (el) => {
|
895 |
-
const parentEl = el.parentElement;
|
896 |
-
if (parentEl) {
|
897 |
-
const cites = parentEl.dataset.cites;
|
898 |
-
if (cites) {
|
899 |
-
return {
|
900 |
-
el,
|
901 |
-
cites: cites.split(' ')
|
902 |
-
};
|
903 |
-
} else {
|
904 |
-
return findCites(el.parentElement)
|
905 |
-
}
|
906 |
-
} else {
|
907 |
-
return undefined;
|
908 |
-
}
|
909 |
-
};
|
910 |
-
var bibliorefs = window.document.querySelectorAll('a[role="doc-biblioref"]');
|
911 |
-
for (var i=0; i<bibliorefs.length; i++) {
|
912 |
-
const ref = bibliorefs[i];
|
913 |
-
const citeInfo = findCites(ref);
|
914 |
-
if (citeInfo) {
|
915 |
-
tippyHover(citeInfo.el, function() {
|
916 |
-
var popup = window.document.createElement('div');
|
917 |
-
citeInfo.cites.forEach(function(cite) {
|
918 |
-
var citeDiv = window.document.createElement('div');
|
919 |
-
citeDiv.classList.add('hanging-indent');
|
920 |
-
citeDiv.classList.add('csl-entry');
|
921 |
-
var biblioDiv = window.document.getElementById('ref-' + cite);
|
922 |
-
if (biblioDiv) {
|
923 |
-
citeDiv.innerHTML = biblioDiv.innerHTML;
|
924 |
-
}
|
925 |
-
popup.appendChild(citeDiv);
|
926 |
-
});
|
927 |
-
return popup.innerHTML;
|
928 |
-
});
|
929 |
-
}
|
930 |
-
}
|
931 |
-
});
|
932 |
-
</script>
|
933 |
-
</div> <!-- /content -->
|
934 |
-
|
935 |
-
|
936 |
-
|
937 |
-
</body></html>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnthonyTruchetPoC/persistent-docker/src/athai/hello.py
DELETED
@@ -1,27 +0,0 @@
|
|
1 |
-
from typing import Optional
|
2 |
-
|
3 |
-
|
4 |
-
print(
|
5 |
-
f"executing the file '{__file__}'"
|
6 |
-
f"in module '{__name__}' and package '{__package__}'"
|
7 |
-
)
|
8 |
-
|
9 |
-
|
10 |
-
def build_greetings(name: Optional[str] = None) -> str:
|
11 |
-
"""
|
12 |
-
Return a greeting message, possibly customize with a name.
|
13 |
-
|
14 |
-
>>> build_greetings()
|
15 |
-
'Hello, World!'
|
16 |
-
>>> build_greetings('Toto')
|
17 |
-
'Nice to meet you, Toto!'
|
18 |
-
"""
|
19 |
-
return name and f"Nice to meet you, {name}!" or "Hello, World!"
|
20 |
-
|
21 |
-
|
22 |
-
def main():
|
23 |
-
print(build_greetings())
|
24 |
-
|
25 |
-
|
26 |
-
if __name__ == "__main__":
|
27 |
-
main() # pragma: no cover
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Artrajz/vits-simple-api/README_zh.md
DELETED
@@ -1,433 +0,0 @@
|
|
1 |
-
<div class="title" align=center>
|
2 |
-
<h1>vits-simple-api</h1>
|
3 |
-
<div>Simply call the vits api</div>
|
4 |
-
<br/>
|
5 |
-
<br/>
|
6 |
-
<p>
|
7 |
-
<img src="https://img.shields.io/github/license/Artrajz/vits-simple-api">
|
8 |
-
<img src="https://img.shields.io/badge/python-3.10-green">
|
9 |
-
<a href="https://hub.docker.com/r/artrajz/vits-simple-api">
|
10 |
-
<img src="https://img.shields.io/docker/pulls/artrajz/vits-simple-api"></a>
|
11 |
-
</p>
|
12 |
-
<a href="https://github.com/Artrajz/vits-simple-api/blob/main/README.md">English</a>|<a href="https://github.com/Artrajz/vits-simple-api/blob/main/README_zh.md">中文文档</a>
|
13 |
-
<br/>
|
14 |
-
</div>
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
# Feature
|
21 |
-
|
22 |
-
- [x] VITS语音合成,语音转换
|
23 |
-
- [x] HuBert-soft VITS模型
|
24 |
-
- [x] W2V2 VITS / [emotional-vits](https://github.com/innnky/emotional-vits)维度情感模型
|
25 |
-
- [x] [vits_chinese](https://github.com/PlayVoice/vits_chinese)
|
26 |
-
- [x] [Bert-VITS2](https://github.com/Stardust-minus/Bert-VITS2)
|
27 |
-
- [x] 加载多模型
|
28 |
-
- [x] 自动识别语言并处理,根据模型的cleaner设置语言类型识别的范围,支持自定义语言类型范围
|
29 |
-
- [x] 自定义默认参数
|
30 |
-
- [x] 长文本批处理
|
31 |
-
- [x] GPU加速推理
|
32 |
-
- [x] SSML语音合成标记语言(完善中...)
|
33 |
-
|
34 |
-
|
35 |
-
## demo
|
36 |
-
|
37 |
-
[](https://huggingface.co/spaces/Artrajz/vits-simple-api)
|
38 |
-
|
39 |
-
注意不同的id支持的语言可能有所不同。[speakers](https://artrajz-vits-simple-api.hf.space/voice/speakers)
|
40 |
-
|
41 |
-
|
42 |
-
- `https://artrajz-vits-simple-api.hf.space/voice/vits?text=你好,こんにちは&id=164`
|
43 |
-
- `https://artrajz-vits-simple-api.hf.space/voice/vits?text=我觉得1%2B1≠3&id=164&lang=zh`(get中一些字符需要转义不然会被过滤掉)
|
44 |
-
- `https://artrajz-vits-simple-api.hf.space/voice/vits?text=Difficult the first time, easy the second.&id=4`
|
45 |
-
- 激动:`https://artrajz-vits-simple-api.hf.space/voice/w2v2-vits?text=こんにちは&id=3&emotion=111`
|
46 |
-
- 小声:`https://artrajz-vits-simple-api.hf.space/voice/w2v2-vits?text=こんにちは&id=3&emotion=2077`
|
47 |
-
|
48 |
-
https://user-images.githubusercontent.com/73542220/237995061-c1f25b4e-dd86-438a-9363-4bb1fe65b425.mov
|
49 |
-
|
50 |
-
# 部署
|
51 |
-
|
52 |
-
## Docker部署(Linux推荐)
|
53 |
-
|
54 |
-
### 镜像拉取脚本
|
55 |
-
|
56 |
-
```
|
57 |
-
bash -c "$(wget -O- https://raw.githubusercontent.com/Artrajz/vits-simple-api/main/vits-simple-api-installer-latest.sh)"
|
58 |
-
```
|
59 |
-
|
60 |
-
- 目前docker镜像支持的平台`linux/amd64,linux/arm64`(arm64仅有CPU版本)
|
61 |
-
- 在拉取完成后,需要导入VITS模型才能使用,请根据以下步骤导入模型。
|
62 |
-
|
63 |
-
### 下载VITS模型
|
64 |
-
|
65 |
-
将模型放入`/usr/local/vits-simple-api/Model`
|
66 |
-
|
67 |
-
<details><summary>Folder structure</summary><pre><code>
|
68 |
-
│ hubert-soft-0d54a1f4.pt
|
69 |
-
│ model.onnx
|
70 |
-
│ model.yaml
|
71 |
-
├─g
|
72 |
-
│ config.json
|
73 |
-
│ G_953000.pth
|
74 |
-
│
|
75 |
-
├─louise
|
76 |
-
│ 360_epochs.pth
|
77 |
-
│ config.json
|
78 |
-
│
|
79 |
-
├─Nene_Nanami_Rong_Tang
|
80 |
-
│ 1374_epochs.pth
|
81 |
-
│ config.json
|
82 |
-
│
|
83 |
-
├─Zero_no_tsukaima
|
84 |
-
│ 1158_epochs.pth
|
85 |
-
│ config.json
|
86 |
-
│
|
87 |
-
└─npy
|
88 |
-
25ecb3f6-f968-11ed-b094-e0d4e84af078.npy
|
89 |
-
all_emotions.npy
|
90 |
-
</code></pre></details>
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
### 修改模型路径
|
95 |
-
|
96 |
-
Modify in `/usr/local/vits-simple-api/config.py`
|
97 |
-
|
98 |
-
<details><summary>config.py</summary><pre><code>
|
99 |
-
# 在此填写模型路径
|
100 |
-
MODEL_LIST = [
|
101 |
-
# VITS
|
102 |
-
[ABS_PATH + "/Model/Nene_Nanami_Rong_Tang/1374_epochs.pth", ABS_PATH + "/Model/Nene_Nanami_Rong_Tang/config.json"],
|
103 |
-
[ABS_PATH + "/Model/Zero_no_tsukaima/1158_epochs.pth", ABS_PATH + "/Model/Zero_no_tsukaima/config.json"],
|
104 |
-
[ABS_PATH + "/Model/g/G_953000.pth", ABS_PATH + "/Model/g/config.json"],
|
105 |
-
# HuBert-VITS (Need to configure HUBERT_SOFT_MODEL)
|
106 |
-
[ABS_PATH + "/Model/louise/360_epochs.pth", ABS_PATH + "/Model/louise/config.json"],
|
107 |
-
# W2V2-VITS (Need to configure DIMENSIONAL_EMOTION_NPY)
|
108 |
-
[ABS_PATH + "/Model/w2v2-vits/1026_epochs.pth", ABS_PATH + "/Model/w2v2-vits/config.json"],
|
109 |
-
]
|
110 |
-
# hubert-vits: hubert soft 编码器
|
111 |
-
HUBERT_SOFT_MODEL = ABS_PATH + "/Model/hubert-soft-0d54a1f4.pt"
|
112 |
-
# w2v2-vits: Dimensional emotion npy file
|
113 |
-
# 加载单独的npy: ABS_PATH+"/all_emotions.npy
|
114 |
-
# 加载多个npy: [ABS_PATH + "/emotions1.npy", ABS_PATH + "/emotions2.npy"]
|
115 |
-
# 从文件夹里加载npy: ABS_PATH + "/Model/npy"
|
116 |
-
DIMENSIONAL_EMOTION_NPY = ABS_PATH + "/Model/npy"
|
117 |
-
# w2v2-vits: 需要在同一路径下有model.onnx和model.yaml
|
118 |
-
DIMENSIONAL_EMOTION_MODEL = ABS_PATH + "/Model/model.yaml"
|
119 |
-
</code></pre></details>
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
### 启动
|
124 |
-
|
125 |
-
`docker compose up -d`
|
126 |
-
|
127 |
-
或者重新执行拉取脚本
|
128 |
-
|
129 |
-
### 镜像更新
|
130 |
-
|
131 |
-
重新执行docker镜像拉取脚本即可
|
132 |
-
|
133 |
-
## 虚拟环境部署
|
134 |
-
|
135 |
-
### Clone
|
136 |
-
|
137 |
-
`git clone https://github.com/Artrajz/vits-simple-api.git`
|
138 |
-
|
139 |
-
### 下载python依赖
|
140 |
-
|
141 |
-
推荐使用python的虚拟环境
|
142 |
-
|
143 |
-
`pip install -r requirements.txt`
|
144 |
-
|
145 |
-
windows下可能安装不了fasttext,可以用以下命令安装,附[wheels下载地址](https://www.lfd.uci.edu/~gohlke/pythonlibs/#fasttext)
|
146 |
-
|
147 |
-
```
|
148 |
-
# python3.10 win_amd64
|
149 |
-
pip install https://github.com/Artrajz/archived/raw/main/fasttext/fasttext-0.9.2-cp310-cp310-win_amd64.whl
|
150 |
-
```
|
151 |
-
|
152 |
-
### 下载VITS模型
|
153 |
-
|
154 |
-
将模型放入 `/path/to/vits-simple-api/Model`
|
155 |
-
|
156 |
-
<details><summary>文件夹结构</summary><pre><code>
|
157 |
-
├─g
|
158 |
-
│ config.json
|
159 |
-
│ G_953000.pth
|
160 |
-
│
|
161 |
-
├─louise
|
162 |
-
│ 360_epochs.pth
|
163 |
-
│ config.json
|
164 |
-
│ hubert-soft-0d54a1f4.pt
|
165 |
-
│
|
166 |
-
├─Nene_Nanami_Rong_Tang
|
167 |
-
│ 1374_epochs.pth
|
168 |
-
│ config.json
|
169 |
-
│
|
170 |
-
└─Zero_no_tsukaima
|
171 |
-
1158_epochs.pth
|
172 |
-
config.json
|
173 |
-
</code></pre></details>
|
174 |
-
|
175 |
-
### 修改模型路径
|
176 |
-
|
177 |
-
在 `/path/to/vits-simple-api/config.py` 修改
|
178 |
-
|
179 |
-
<details><summary>config.py</summary><pre><code>
|
180 |
-
# 在此填写模型路径
|
181 |
-
MODEL_LIST = [
|
182 |
-
# VITS
|
183 |
-
[ABS_PATH + "/Model/Nene_Nanami_Rong_Tang/1374_epochs.pth", ABS_PATH + "/Model/Nene_Nanami_Rong_Tang/config.json"],
|
184 |
-
[ABS_PATH + "/Model/Zero_no_tsukaima/1158_epochs.pth", ABS_PATH + "/Model/Zero_no_tsukaima/config.json"],
|
185 |
-
[ABS_PATH + "/Model/g/G_953000.pth", ABS_PATH + "/Model/g/config.json"],
|
186 |
-
# HuBert-VITS (Need to configure HUBERT_SOFT_MODEL)
|
187 |
-
[ABS_PATH + "/Model/louise/360_epochs.pth", ABS_PATH + "/Model/louise/config.json"],
|
188 |
-
# W2V2-VITS (Need to configure DIMENSIONAL_EMOTION_NPY)
|
189 |
-
[ABS_PATH + "/Model/w2v2-vits/1026_epochs.pth", ABS_PATH + "/Model/w2v2-vits/config.json"],
|
190 |
-
]
|
191 |
-
# hubert-vits: hubert soft 编码器
|
192 |
-
HUBERT_SOFT_MODEL = ABS_PATH + "/Model/hubert-soft-0d54a1f4.pt"
|
193 |
-
# w2v2-vits: Dimensional emotion npy file
|
194 |
-
# 加载单独的npy: ABS_PATH+"/all_emotions.npy
|
195 |
-
# 加载多个npy: [ABS_PATH + "/emotions1.npy", ABS_PATH + "/emotions2.npy"]
|
196 |
-
# 从文件夹里加载npy: ABS_PATH + "/Model/npy"
|
197 |
-
DIMENSIONAL_EMOTION_NPY = ABS_PATH + "/Model/npy"
|
198 |
-
# w2v2-vits: 需要在同一路径下有model.onnx和model.yaml
|
199 |
-
DIMENSIONAL_EMOTION_MODEL = ABS_PATH + "/Model/model.yaml"
|
200 |
-
</code></pre></details>
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
### 启动
|
205 |
-
|
206 |
-
`python app.py`
|
207 |
-
|
208 |
-
# GPU 加速
|
209 |
-
|
210 |
-
## windows
|
211 |
-
|
212 |
-
### 安装CUDA
|
213 |
-
|
214 |
-
查看显卡最高支持CUDA的版本
|
215 |
-
|
216 |
-
```
|
217 |
-
nvidia-smi
|
218 |
-
```
|
219 |
-
|
220 |
-
以CUDA11.7为例,[官网](https://developer.nvidia.com/cuda-11-7-0-download-archive?target_os=Windows&target_arch=x86_64&target_version=10&target_type=exe_local)
|
221 |
-
|
222 |
-
### 安装GPU版pytorch
|
223 |
-
|
224 |
-
CUDA11.7对应的pytorch是用这个命令安装,推荐使用1.13.1+cu117,其他版本可能存在内存不稳定的问题。
|
225 |
-
|
226 |
-
```
|
227 |
-
pip install torch==1.13.1+cu117 --extra-index-url https://download.pytorch.org/whl/cu117
|
228 |
-
```
|
229 |
-
|
230 |
-
## Linux
|
231 |
-
|
232 |
-
安装过程类似,但我没有相应的环境所以没办法测试
|
233 |
-
|
234 |
-
# 依赖安装问题
|
235 |
-
|
236 |
-
由于pypi.org没有pyopenjtalk的whl文件,通常需要从源代码来安装,这一过程对于一些人来说可能比较麻烦,所以你也可以使用我构建的whl来安装。
|
237 |
-
|
238 |
-
```
|
239 |
-
pip install pyopenjtalk -i https://pypi.artrajz.cn/simple
|
240 |
-
```
|
241 |
-
|
242 |
-
# API
|
243 |
-
|
244 |
-
## GET
|
245 |
-
|
246 |
-
#### speakers list
|
247 |
-
|
248 |
-
- GET http://127.0.0.1:23456/voice/speakers
|
249 |
-
|
250 |
-
返回id对应角色的映射表
|
251 |
-
|
252 |
-
#### voice vits
|
253 |
-
|
254 |
-
- GET http://127.0.0.1:23456/voice/vits?text=text
|
255 |
-
|
256 |
-
其他参数不指定时均为默认值
|
257 |
-
|
258 |
-
- GET http://127.0.0.1:23456/voice/vits?text=[ZH]text[ZH][JA]text[JA]&lang=mix
|
259 |
-
|
260 |
-
lang=mix时文本要标注
|
261 |
-
|
262 |
-
- GET http://127.0.0.1:23456/voice/vits?text=text&id=142&format=wav&lang=zh&length=1.4
|
263 |
-
|
264 |
-
文本为text,角色id为142,音频格式为wav,文本语言为zh,语音长度为1.4,其余参数默认
|
265 |
-
|
266 |
-
#### check
|
267 |
-
|
268 |
-
- GET http://127.0.0.1:23456/voice/check?id=0&model=vits
|
269 |
-
|
270 |
-
## POST
|
271 |
-
|
272 |
-
- 见`api_test.py`
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
## API KEY
|
277 |
-
|
278 |
-
在config.py中设置`API_KEY_ENABLED = True`以启用,api key填写:`API_KEY = "api-key"`。
|
279 |
-
|
280 |
-
启用后,GET请求中使用需要增加参数api_key,POST请求中使用需要在header中添加参数`X-API-KEY`。
|
281 |
-
|
282 |
-
# Parameter
|
283 |
-
|
284 |
-
## VITS语音合成
|
285 |
-
|
286 |
-
| Name | Parameter | Is must | Default | Type | Instruction |
|
287 |
-
| ------------- | --------- | ------- | ------------------- | ----- | ------------------------------------------------------------ |
|
288 |
-
| 合成文本 | text | true | | str | 需要合成语音的文本。 |
|
289 |
-
| 角色id | id | false | 从`config.py`中获取 | int | 即说话人id。 |
|
290 |
-
| 音频格式 | format | false | 从`config.py`中获取 | str | 支持wav,ogg,silk,mp3,flac |
|
291 |
-
| 文本语言 | lang | false | 从`config.py`中获取 | str | auto为自动识别语言模式,也是默认模式。lang=mix时,文本应该用[ZH] 或 [JA] 包裹。方言无法自动识别。 |
|
292 |
-
| 语音长度/语速 | length | false | 从`config.py`中获取 | float | 调节语音长度,相当于调节语速,该数值越大语速越慢。 |
|
293 |
-
| 噪声 | noise | false | 从`config.py`中获取 | float | 样本噪声,控制合成的随机性。 |
|
294 |
-
| sdp噪声 | noisew | false | 从`config.py`中获��� | float | 随机时长预测器噪声,控制音素发音长度。 |
|
295 |
-
| 分段阈值 | max | false | 从`config.py`中获取 | int | 按标点符号分段,加起来大于max时为一段文本。max<=0表示不分段。 |
|
296 |
-
| 流式响应 | streaming | false | false | bool | 流式合成语音,更快的首包响应。 |
|
297 |
-
|
298 |
-
## VITS 语音转换
|
299 |
-
|
300 |
-
| Name | Parameter | Is must | Default | Type | Instruction |
|
301 |
-
| ---------- | ----------- | ------- | ------- | ---- | ---------------------- |
|
302 |
-
| 上传音频 | upload | true | | file | wav or ogg |
|
303 |
-
| 源角色id | original_id | true | | int | 上传文件所使用的角色id |
|
304 |
-
| 目标角色id | target_id | true | | int | 要转换的目标角色id |
|
305 |
-
|
306 |
-
## HuBert-VITS 语音转换
|
307 |
-
|
308 |
-
| Name | Parameter | Is must | Default | Type | Instruction |
|
309 |
-
| ------------- | --------- | ------- | ------- | ----- | ------------------------------------------------ |
|
310 |
-
| 上传音频 | upload | true | | file | 需要转换说话人的音频文件。 |
|
311 |
-
| 目标角色id | id | true | | int | 目标说话人id。 |
|
312 |
-
| 音频格式 | format | true | | str | wav,ogg,silk |
|
313 |
-
| 语音长度/语速 | length | true | | float | 调节语音长度,相当于调节语速,该数值越大语速越慢 |
|
314 |
-
| 噪声 | noise | true | | float | 样本噪声,控制合成的随机性。 |
|
315 |
-
| sdp噪声 | noisew | true | | float | 随机时长预测器噪声,控制音素发音长度。 |
|
316 |
-
|
317 |
-
## W2V2-VITS
|
318 |
-
|
319 |
-
| Name | Parameter | Is must | Default | Type | Instruction |
|
320 |
-
| ------------- | --------- | ------- | ------------------- | ----- | ------------------------------------------------------------ |
|
321 |
-
| 合成文本 | text | true | | str | 需要合成语音的文本。 |
|
322 |
-
| 角色id | id | false | 从`config.py`中获取 | int | 即说话人id。 |
|
323 |
-
| 音频格式 | format | false | 从`config.py`中获取 | str | 支持wav,ogg,silk,mp3,flac |
|
324 |
-
| 文本语言 | lang | false | 从`config.py`中获取 | str | auto为自动识别语言模式,也是默认模式。lang=mix时,文本应该用[ZH] 或 [JA] 包裹。方言无法自动识别。 |
|
325 |
-
| 语音长度/语速 | length | false | 从`config.py`中获取 | float | 调节语音长度,相当于调节语速,该数值越大语速越慢 |
|
326 |
-
| 噪声 | noise | false | 从`config.py`中获取 | float | 样本噪声,控制合成的随机性。 |
|
327 |
-
| sdp噪声 | noisew | false | 从`config.py`中获取 | float | 随机时长预测器噪声,控制音素发音长度。 |
|
328 |
-
| 分段阈值 | max | false | 从`config.py`中获取 | int | 按标点符号分段,加起来大于max时为一段文本。max<=0表示不分段。 |
|
329 |
-
| 维度情感 | emotion | false | 0 | int | 范围取决于npy情感参考文件,如[innnky](https://huggingface.co/spaces/innnky/nene-emotion/tree/main)的all_emotions.npy模型范围是0-5457 |
|
330 |
-
|
331 |
-
## Dimensional emotion
|
332 |
-
|
333 |
-
| Name | Parameter | Is must | Default | Type | Instruction |
|
334 |
-
| -------- | --------- | ------- | ------- | ---- | ----------------------------- |
|
335 |
-
| 上传音频 | upload | true | | file | 返回存储维度情感向量的npy文件 |
|
336 |
-
|
337 |
-
## Bert-VITS2语音合成
|
338 |
-
|
339 |
-
| Name | Parameter | Is must | Default | Type | Instruction |
|
340 |
-
| ------------- | --------- | ------- | ------------------- | ----- | ------------------------------------------------------------ |
|
341 |
-
| 合成文本 | text | true | | str | 需要合成语音的文本。 |
|
342 |
-
| 角色id | id | false | 从`config.py`中获取 | int | 即说话人id。 |
|
343 |
-
| 音频格式 | format | false | 从`config.py`中获取 | str | 支持wav,ogg,silk,mp3,flac |
|
344 |
-
| 文本语言 | lang | false | 从`config.py`中获取 | str | auto为自动识别语言模式,也是默认模式,但目前只支持识别整段文本的语言,无法细分到每个句子。其余可选语言zh和ja。 |
|
345 |
-
| 语音长度/语速 | length | false | 从`config.py`中获取 | float | 调节语音长度,相当于调节语速,该���值越大语速越慢。 |
|
346 |
-
| 噪声 | noise | false | 从`config.py`中获取 | float | 样本噪声,控制合成的随机性。 |
|
347 |
-
| sdp噪声 | noisew | false | 从`config.py`中获取 | float | 随机时长预测器噪声,控制音素发音长度。 |
|
348 |
-
| 分段阈值 | max | false | 从`config.py`中获取 | int | 按标点符号分段,加起来大于max时为一段文本。max<=0表示不分段。 |
|
349 |
-
| SDP/DP混合比 | sdp_ratio | false | 从`config.py`中获取 | int | SDP在合成时的占比,理论上此比率越高,合成的语音语调方差越大。 |
|
350 |
-
|
351 |
-
## SSML语音合成标记语言
|
352 |
-
目前支持的元素与属性
|
353 |
-
|
354 |
-
`speak`元素
|
355 |
-
|
356 |
-
| Attribute | Description | Is must |
|
357 |
-
| --------- | ------------------------------------------------------------ | ------- |
|
358 |
-
| id | 默认值从`config.py`中读取 | false |
|
359 |
-
| lang | 默认值从`config.py`中读取 | false |
|
360 |
-
| length | 默认值从`config.py`中读取 | false |
|
361 |
-
| noise | 默认值从`config.py`中读取 | false |
|
362 |
-
| noisew | 默认值从`config.py`中读取 | false |
|
363 |
-
| max | 按标点符号分段,加起来大于max时为一段文本。max<=0表示不分段,这里默认为0。 | false |
|
364 |
-
| model | 默认为vits,可选`w2v2-vits`,`emotion-vits` | false |
|
365 |
-
| emotion | 只有用`w2v2-vits`或`emotion-vits`时`emotion`才生效,范围取决于npy情感参考文件 | false |
|
366 |
-
|
367 |
-
`voice`元素
|
368 |
-
|
369 |
-
优先级大于`speak`
|
370 |
-
|
371 |
-
| Attribute | Description | Is must |
|
372 |
-
| --------- | ------------------------------------------------------------ | ------- |
|
373 |
-
| id | 默认值从`config.py`中读取 | false |
|
374 |
-
| lang | 默认值从`config.py`中读取 | false |
|
375 |
-
| length | 默认值从`config.py`中读取 | false |
|
376 |
-
| noise | 默认值从`config.py`中读取 | false |
|
377 |
-
| noisew | 默认值从`config.py`中读取 | false |
|
378 |
-
| max | 按标点符号分段,加起来大于max时为一段文本。max<=0表示不分段,这里默认为0。 | false |
|
379 |
-
| model | 默认为vits,可选`w2v2-vits`,`emotion-vits` | false |
|
380 |
-
| emotion | 只有用`w2v2-vits`或`emotion-vits`时`emotion`才会生效 | false |
|
381 |
-
|
382 |
-
`break`元素
|
383 |
-
|
384 |
-
| Attribute | Description | Is must |
|
385 |
-
| --------- | ------------------------------------------------------------ | ------- |
|
386 |
-
| strength | x-weak,weak,medium(默认值),strong,x-strong | false |
|
387 |
-
| time | 暂停的绝对持续时间,以秒为单位(例如 `2s`)或以毫秒为单位(例如 `500ms`)。 有效值的范围为 0 到 5000 毫秒。 如果设置的值大于支持的最大值,则服务将使用 `5000ms`。 如果设置了 `time` 属性,则会忽略 `strength` 属性。 | false |
|
388 |
-
|
389 |
-
| Strength | Relative Duration |
|
390 |
-
| :------- | :---------------- |
|
391 |
-
| x-weak | 250 毫秒 |
|
392 |
-
| weak | 500 毫秒 |
|
393 |
-
| Medium | 750 毫秒 |
|
394 |
-
| Strong | 1000 毫秒 |
|
395 |
-
| x-strong | 1250 毫秒 |
|
396 |
-
|
397 |
-
示例
|
398 |
-
|
399 |
-
```xml
|
400 |
-
<speak lang="zh" format="mp3" length="1.2">
|
401 |
-
<voice id="92" >这几天心里颇不宁静。</voice>
|
402 |
-
<voice id="125">今晚在院子里坐着乘凉,忽然想起日日走过的荷塘,在这满月的光里,总该另有一番样子吧。</voice>
|
403 |
-
<voice id="142">月亮渐渐地升高了,墙外马路上孩子们的欢笑,已经听不见了;</voice>
|
404 |
-
<voice id="98">妻在屋里拍着闰儿,迷迷糊糊地哼着眠歌。</voice>
|
405 |
-
<voice id="120">我悄悄地披了大衫,带上门出去。</voice><break time="2s"/>
|
406 |
-
<voice id="121">沿着荷塘,是一条曲折的小煤屑路。</voice>
|
407 |
-
<voice id="122">这是一条幽僻的路;白天也少人走,夜晚更加寂寞。</voice>
|
408 |
-
<voice id="123">荷塘四面,长着许多树,蓊蓊郁郁的。</voice>
|
409 |
-
<voice id="124">路的一旁,是些杨柳,和一些不知道名字的树。</voice>
|
410 |
-
<voice id="125">没有月光的晚上,这路上阴森森的,有些怕人。</voice>
|
411 |
-
<voice id="126">今晚却很好,虽然月光也还是淡淡的。</voice><break time="2s"/>
|
412 |
-
<voice id="127">路上只我一个人,背着手踱着。</voice>
|
413 |
-
<voice id="128">这一片天地好像是我的;我也像超出了平常的自己,到了另一个世界里。</voice>
|
414 |
-
<voice id="129">我爱热闹,也爱冷静;<break strength="x-weak"/>爱群居,也爱独处。</voice>
|
415 |
-
<voice id="130">像今晚上,一个人在这苍茫的月下,什么都可以想,什么都可以不想,便觉是个自由的人。</voice>
|
416 |
-
<voice id="131">白天里一定要做的事,一定要说的话,现在都可不理。</voice>
|
417 |
-
<voice id="132">这是独处的妙处,我且受用这无边的荷香月色好了。</voice>
|
418 |
-
</speak>
|
419 |
-
```
|
420 |
-
|
421 |
-
# 交流平台
|
422 |
-
|
423 |
-
现在只有 [Q群](https://qm.qq.com/cgi-bin/qm/qr?k=-1GknIe4uXrkmbDKBGKa1aAUteq40qs_&jump_from=webapi&authKey=x5YYt6Dggs1ZqWxvZqvj3fV8VUnxRyXm5S5Kzntc78+Nv3iXOIawplGip9LWuNR/)
|
424 |
-
|
425 |
-
# 鸣谢
|
426 |
-
|
427 |
-
- vits:https://github.com/jaywalnut310/vits
|
428 |
-
- MoeGoe:https://github.com/CjangCjengh/MoeGoe
|
429 |
-
- emotional-vits:https://github.com/innnky/emotional-vits
|
430 |
-
- vits-uma-genshin-honkai:https://huggingface.co/spaces/zomehwh/vits-uma-genshin-honkai
|
431 |
-
- vits_chinese:https://github.com/PlayVoice/vits_chinese
|
432 |
-
- Bert_VITS2:https://github.com/fishaudio/Bert-VITS2
|
433 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Artrajz/vits-simple-api/contants.py
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
from enum import Enum
|
2 |
-
|
3 |
-
class ModelType(Enum):
|
4 |
-
VITS = "VITS"
|
5 |
-
HUBERT_VITS = "HUBERT-VITS"
|
6 |
-
W2V2_VITS = "W2V2-VITS"
|
7 |
-
BERT_VITS2 = "BERT-VITS2"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Asahi402/Real-CUGAN/upcunet_v3.py
DELETED
@@ -1,714 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from torch import nn as nn
|
3 |
-
from torch.nn import functional as F
|
4 |
-
import os, sys
|
5 |
-
import numpy as np
|
6 |
-
|
7 |
-
root_path = os.path.abspath('.')
|
8 |
-
sys.path.append(root_path)
|
9 |
-
|
10 |
-
|
11 |
-
class SEBlock(nn.Module):
|
12 |
-
def __init__(self, in_channels, reduction=8, bias=False):
|
13 |
-
super(SEBlock, self).__init__()
|
14 |
-
self.conv1 = nn.Conv2d(in_channels, in_channels // reduction, 1, 1, 0, bias=bias)
|
15 |
-
self.conv2 = nn.Conv2d(in_channels // reduction, in_channels, 1, 1, 0, bias=bias)
|
16 |
-
|
17 |
-
def forward(self, x):
|
18 |
-
if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
|
19 |
-
x0 = torch.mean(x.float(), dim=(2, 3), keepdim=True).half()
|
20 |
-
else:
|
21 |
-
x0 = torch.mean(x, dim=(2, 3), keepdim=True)
|
22 |
-
x0 = self.conv1(x0)
|
23 |
-
x0 = F.relu(x0, inplace=True)
|
24 |
-
x0 = self.conv2(x0)
|
25 |
-
x0 = torch.sigmoid(x0)
|
26 |
-
x = torch.mul(x, x0)
|
27 |
-
return x
|
28 |
-
|
29 |
-
def forward_mean(self, x, x0):
|
30 |
-
x0 = self.conv1(x0)
|
31 |
-
x0 = F.relu(x0, inplace=True)
|
32 |
-
x0 = self.conv2(x0)
|
33 |
-
x0 = torch.sigmoid(x0)
|
34 |
-
x = torch.mul(x, x0)
|
35 |
-
return x
|
36 |
-
|
37 |
-
|
38 |
-
class UNetConv(nn.Module):
|
39 |
-
def __init__(self, in_channels, mid_channels, out_channels, se):
|
40 |
-
super(UNetConv, self).__init__()
|
41 |
-
self.conv = nn.Sequential(
|
42 |
-
nn.Conv2d(in_channels, mid_channels, 3, 1, 0),
|
43 |
-
nn.LeakyReLU(0.1, inplace=True),
|
44 |
-
nn.Conv2d(mid_channels, out_channels, 3, 1, 0),
|
45 |
-
nn.LeakyReLU(0.1, inplace=True),
|
46 |
-
)
|
47 |
-
if se:
|
48 |
-
self.seblock = SEBlock(out_channels, reduction=8, bias=True)
|
49 |
-
else:
|
50 |
-
self.seblock = None
|
51 |
-
|
52 |
-
def forward(self, x):
|
53 |
-
z = self.conv(x)
|
54 |
-
if self.seblock is not None:
|
55 |
-
z = self.seblock(z)
|
56 |
-
return z
|
57 |
-
|
58 |
-
|
59 |
-
class UNet1(nn.Module):
|
60 |
-
def __init__(self, in_channels, out_channels, deconv):
|
61 |
-
super(UNet1, self).__init__()
|
62 |
-
self.conv1 = UNetConv(in_channels, 32, 64, se=False)
|
63 |
-
self.conv1_down = nn.Conv2d(64, 64, 2, 2, 0)
|
64 |
-
self.conv2 = UNetConv(64, 128, 64, se=True)
|
65 |
-
self.conv2_up = nn.ConvTranspose2d(64, 64, 2, 2, 0)
|
66 |
-
self.conv3 = nn.Conv2d(64, 64, 3, 1, 0)
|
67 |
-
|
68 |
-
if deconv:
|
69 |
-
self.conv_bottom = nn.ConvTranspose2d(64, out_channels, 4, 2, 3)
|
70 |
-
else:
|
71 |
-
self.conv_bottom = nn.Conv2d(64, out_channels, 3, 1, 0)
|
72 |
-
|
73 |
-
for m in self.modules():
|
74 |
-
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
|
75 |
-
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
|
76 |
-
elif isinstance(m, nn.Linear):
|
77 |
-
nn.init.normal_(m.weight, 0, 0.01)
|
78 |
-
if m.bias is not None:
|
79 |
-
nn.init.constant_(m.bias, 0)
|
80 |
-
|
81 |
-
def forward(self, x):
|
82 |
-
x1 = self.conv1(x)
|
83 |
-
x2 = self.conv1_down(x1)
|
84 |
-
x2 = F.leaky_relu(x2, 0.1, inplace=True)
|
85 |
-
x2 = self.conv2(x2)
|
86 |
-
x2 = self.conv2_up(x2)
|
87 |
-
x2 = F.leaky_relu(x2, 0.1, inplace=True)
|
88 |
-
|
89 |
-
x1 = F.pad(x1, (-4, -4, -4, -4))
|
90 |
-
x3 = self.conv3(x1 + x2)
|
91 |
-
x3 = F.leaky_relu(x3, 0.1, inplace=True)
|
92 |
-
z = self.conv_bottom(x3)
|
93 |
-
return z
|
94 |
-
|
95 |
-
def forward_a(self, x):
|
96 |
-
x1 = self.conv1(x)
|
97 |
-
x2 = self.conv1_down(x1)
|
98 |
-
x2 = F.leaky_relu(x2, 0.1, inplace=True)
|
99 |
-
x2 = self.conv2.conv(x2)
|
100 |
-
return x1, x2
|
101 |
-
|
102 |
-
def forward_b(self, x1, x2):
|
103 |
-
x2 = self.conv2_up(x2)
|
104 |
-
x2 = F.leaky_relu(x2, 0.1, inplace=True)
|
105 |
-
|
106 |
-
x1 = F.pad(x1, (-4, -4, -4, -4))
|
107 |
-
x3 = self.conv3(x1 + x2)
|
108 |
-
x3 = F.leaky_relu(x3, 0.1, inplace=True)
|
109 |
-
z = self.conv_bottom(x3)
|
110 |
-
return z
|
111 |
-
|
112 |
-
|
113 |
-
class UNet1x3(nn.Module):
|
114 |
-
def __init__(self, in_channels, out_channels, deconv):
|
115 |
-
super(UNet1x3, self).__init__()
|
116 |
-
self.conv1 = UNetConv(in_channels, 32, 64, se=False)
|
117 |
-
self.conv1_down = nn.Conv2d(64, 64, 2, 2, 0)
|
118 |
-
self.conv2 = UNetConv(64, 128, 64, se=True)
|
119 |
-
self.conv2_up = nn.ConvTranspose2d(64, 64, 2, 2, 0)
|
120 |
-
self.conv3 = nn.Conv2d(64, 64, 3, 1, 0)
|
121 |
-
|
122 |
-
if deconv:
|
123 |
-
self.conv_bottom = nn.ConvTranspose2d(64, out_channels, 5, 3, 2)
|
124 |
-
else:
|
125 |
-
self.conv_bottom = nn.Conv2d(64, out_channels, 3, 1, 0)
|
126 |
-
|
127 |
-
for m in self.modules():
|
128 |
-
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
|
129 |
-
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
|
130 |
-
elif isinstance(m, nn.Linear):
|
131 |
-
nn.init.normal_(m.weight, 0, 0.01)
|
132 |
-
if m.bias is not None:
|
133 |
-
nn.init.constant_(m.bias, 0)
|
134 |
-
|
135 |
-
def forward(self, x):
|
136 |
-
x1 = self.conv1(x)
|
137 |
-
x2 = self.conv1_down(x1)
|
138 |
-
x2 = F.leaky_relu(x2, 0.1, inplace=True)
|
139 |
-
x2 = self.conv2(x2)
|
140 |
-
x2 = self.conv2_up(x2)
|
141 |
-
x2 = F.leaky_relu(x2, 0.1, inplace=True)
|
142 |
-
|
143 |
-
x1 = F.pad(x1, (-4, -4, -4, -4))
|
144 |
-
x3 = self.conv3(x1 + x2)
|
145 |
-
x3 = F.leaky_relu(x3, 0.1, inplace=True)
|
146 |
-
z = self.conv_bottom(x3)
|
147 |
-
return z
|
148 |
-
|
149 |
-
def forward_a(self, x):
|
150 |
-
x1 = self.conv1(x)
|
151 |
-
x2 = self.conv1_down(x1)
|
152 |
-
x2 = F.leaky_relu(x2, 0.1, inplace=True)
|
153 |
-
x2 = self.conv2.conv(x2)
|
154 |
-
return x1, x2
|
155 |
-
|
156 |
-
def forward_b(self, x1, x2):
|
157 |
-
x2 = self.conv2_up(x2)
|
158 |
-
x2 = F.leaky_relu(x2, 0.1, inplace=True)
|
159 |
-
|
160 |
-
x1 = F.pad(x1, (-4, -4, -4, -4))
|
161 |
-
x3 = self.conv3(x1 + x2)
|
162 |
-
x3 = F.leaky_relu(x3, 0.1, inplace=True)
|
163 |
-
z = self.conv_bottom(x3)
|
164 |
-
return z
|
165 |
-
|
166 |
-
|
167 |
-
class UNet2(nn.Module):
|
168 |
-
def __init__(self, in_channels, out_channels, deconv):
|
169 |
-
super(UNet2, self).__init__()
|
170 |
-
|
171 |
-
self.conv1 = UNetConv(in_channels, 32, 64, se=False)
|
172 |
-
self.conv1_down = nn.Conv2d(64, 64, 2, 2, 0)
|
173 |
-
self.conv2 = UNetConv(64, 64, 128, se=True)
|
174 |
-
self.conv2_down = nn.Conv2d(128, 128, 2, 2, 0)
|
175 |
-
self.conv3 = UNetConv(128, 256, 128, se=True)
|
176 |
-
self.conv3_up = nn.ConvTranspose2d(128, 128, 2, 2, 0)
|
177 |
-
self.conv4 = UNetConv(128, 64, 64, se=True)
|
178 |
-
self.conv4_up = nn.ConvTranspose2d(64, 64, 2, 2, 0)
|
179 |
-
self.conv5 = nn.Conv2d(64, 64, 3, 1, 0)
|
180 |
-
|
181 |
-
if deconv:
|
182 |
-
self.conv_bottom = nn.ConvTranspose2d(64, out_channels, 4, 2, 3)
|
183 |
-
else:
|
184 |
-
self.conv_bottom = nn.Conv2d(64, out_channels, 3, 1, 0)
|
185 |
-
|
186 |
-
for m in self.modules():
|
187 |
-
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
|
188 |
-
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
|
189 |
-
elif isinstance(m, nn.Linear):
|
190 |
-
nn.init.normal_(m.weight, 0, 0.01)
|
191 |
-
if m.bias is not None:
|
192 |
-
nn.init.constant_(m.bias, 0)
|
193 |
-
|
194 |
-
def forward(self, x):
|
195 |
-
x1 = self.conv1(x)
|
196 |
-
x2 = self.conv1_down(x1)
|
197 |
-
x2 = F.leaky_relu(x2, 0.1, inplace=True)
|
198 |
-
x2 = self.conv2(x2)
|
199 |
-
|
200 |
-
x3 = self.conv2_down(x2)
|
201 |
-
x3 = F.leaky_relu(x3, 0.1, inplace=True)
|
202 |
-
x3 = self.conv3(x3)
|
203 |
-
x3 = self.conv3_up(x3)
|
204 |
-
x3 = F.leaky_relu(x3, 0.1, inplace=True)
|
205 |
-
|
206 |
-
x2 = F.pad(x2, (-4, -4, -4, -4))
|
207 |
-
x4 = self.conv4(x2 + x3)
|
208 |
-
x4 = self.conv4_up(x4)
|
209 |
-
x4 = F.leaky_relu(x4, 0.1, inplace=True)
|
210 |
-
|
211 |
-
x1 = F.pad(x1, (-16, -16, -16, -16))
|
212 |
-
x5 = self.conv5(x1 + x4)
|
213 |
-
x5 = F.leaky_relu(x5, 0.1, inplace=True)
|
214 |
-
|
215 |
-
z = self.conv_bottom(x5)
|
216 |
-
return z
|
217 |
-
|
218 |
-
def forward_a(self, x): # conv234结尾有se
|
219 |
-
x1 = self.conv1(x)
|
220 |
-
x2 = self.conv1_down(x1)
|
221 |
-
x2 = F.leaky_relu(x2, 0.1, inplace=True)
|
222 |
-
x2 = self.conv2.conv(x2)
|
223 |
-
return x1, x2
|
224 |
-
|
225 |
-
def forward_b(self, x2): # conv234结尾有se
|
226 |
-
x3 = self.conv2_down(x2)
|
227 |
-
x3 = F.leaky_relu(x3, 0.1, inplace=True)
|
228 |
-
x3 = self.conv3.conv(x3)
|
229 |
-
return x3
|
230 |
-
|
231 |
-
def forward_c(self, x2, x3): # conv234结尾有se
|
232 |
-
x3 = self.conv3_up(x3)
|
233 |
-
x3 = F.leaky_relu(x3, 0.1, inplace=True)
|
234 |
-
|
235 |
-
x2 = F.pad(x2, (-4, -4, -4, -4))
|
236 |
-
x4 = self.conv4.conv(x2 + x3)
|
237 |
-
return x4
|
238 |
-
|
239 |
-
def forward_d(self, x1, x4): # conv234结尾有se
|
240 |
-
x4 = self.conv4_up(x4)
|
241 |
-
x4 = F.leaky_relu(x4, 0.1, inplace=True)
|
242 |
-
|
243 |
-
x1 = F.pad(x1, (-16, -16, -16, -16))
|
244 |
-
x5 = self.conv5(x1 + x4)
|
245 |
-
x5 = F.leaky_relu(x5, 0.1, inplace=True)
|
246 |
-
|
247 |
-
z = self.conv_bottom(x5)
|
248 |
-
return z
|
249 |
-
|
250 |
-
|
251 |
-
class UpCunet2x(nn.Module): # 完美tile,全程无损
|
252 |
-
def __init__(self, in_channels=3, out_channels=3):
|
253 |
-
super(UpCunet2x, self).__init__()
|
254 |
-
self.unet1 = UNet1(in_channels, out_channels, deconv=True)
|
255 |
-
self.unet2 = UNet2(in_channels, out_channels, deconv=False)
|
256 |
-
|
257 |
-
def forward(self, x, tile_mode): # 1.7G
|
258 |
-
n, c, h0, w0 = x.shape
|
259 |
-
if (tile_mode == 0): # 不tile
|
260 |
-
ph = ((h0 - 1) // 2 + 1) * 2
|
261 |
-
pw = ((w0 - 1) // 2 + 1) * 2
|
262 |
-
x = F.pad(x, (18, 18 + pw - w0, 18, 18 + ph - h0), 'reflect') # 需要保证被2整除
|
263 |
-
x = self.unet1.forward(x)
|
264 |
-
x0 = self.unet2.forward(x)
|
265 |
-
x1 = F.pad(x, (-20, -20, -20, -20))
|
266 |
-
x = torch.add(x0, x1)
|
267 |
-
if (w0 != pw or h0 != ph): x = x[:, :, :h0 * 2, :w0 * 2]
|
268 |
-
return x
|
269 |
-
elif (tile_mode == 1): # 对长边减半
|
270 |
-
if (w0 >= h0):
|
271 |
-
crop_size_w = ((w0 - 1) // 4 * 4 + 4) // 2 # 减半后能被2整除,所以要先被4整除
|
272 |
-
crop_size_h = (h0 - 1) // 2 * 2 + 2 # 能被2整除
|
273 |
-
else:
|
274 |
-
crop_size_h = ((h0 - 1) // 4 * 4 + 4) // 2 # 减半后能被2整除,所以要先被4整除
|
275 |
-
crop_size_w = (w0 - 1) // 2 * 2 + 2 # 能被2整除
|
276 |
-
crop_size = (crop_size_h, crop_size_w) # 6.6G
|
277 |
-
elif (tile_mode == 2): # hw都减半
|
278 |
-
crop_size = (((h0 - 1) // 4 * 4 + 4) // 2, ((w0 - 1) // 4 * 4 + 4) // 2) # 5.6G
|
279 |
-
elif (tile_mode == 3): # hw都三分之一
|
280 |
-
crop_size = (((h0 - 1) // 6 * 6 + 6) // 3, ((w0 - 1) // 6 * 6 + 6) // 3) # 4.2G
|
281 |
-
elif (tile_mode == 4): # hw都四分���一
|
282 |
-
crop_size = (((h0 - 1) // 8 * 8 + 8) // 4, ((w0 - 1) // 8 * 8 + 8) // 4) # 3.7G
|
283 |
-
ph = ((h0 - 1) // crop_size[0] + 1) * crop_size[0]
|
284 |
-
pw = ((w0 - 1) // crop_size[1] + 1) * crop_size[1]
|
285 |
-
x = F.pad(x, (18, 18 + pw - w0, 18, 18 + ph - h0), 'reflect')
|
286 |
-
n, c, h, w = x.shape
|
287 |
-
se_mean0 = torch.zeros((n, 64, 1, 1)).to(x.device)
|
288 |
-
if ("Half" in x.type()):
|
289 |
-
se_mean0 = se_mean0.half()
|
290 |
-
n_patch = 0
|
291 |
-
tmp_dict = {}
|
292 |
-
opt_res_dict = {}
|
293 |
-
for i in range(0, h - 36, crop_size[0]):
|
294 |
-
tmp_dict[i] = {}
|
295 |
-
for j in range(0, w - 36, crop_size[1]):
|
296 |
-
x_crop = x[:, :, i:i + crop_size[0] + 36, j:j + crop_size[1] + 36]
|
297 |
-
n, c1, h1, w1 = x_crop.shape
|
298 |
-
tmp0, x_crop = self.unet1.forward_a(x_crop)
|
299 |
-
if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
|
300 |
-
tmp_se_mean = torch.mean(x_crop.float(), dim=(2, 3), keepdim=True).half()
|
301 |
-
else:
|
302 |
-
tmp_se_mean = torch.mean(x_crop, dim=(2, 3), keepdim=True)
|
303 |
-
se_mean0 += tmp_se_mean
|
304 |
-
n_patch += 1
|
305 |
-
tmp_dict[i][j] = (tmp0, x_crop)
|
306 |
-
se_mean0 /= n_patch
|
307 |
-
se_mean1 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64
|
308 |
-
if ("Half" in x.type()):
|
309 |
-
se_mean1 = se_mean1.half()
|
310 |
-
for i in range(0, h - 36, crop_size[0]):
|
311 |
-
for j in range(0, w - 36, crop_size[1]):
|
312 |
-
tmp0, x_crop = tmp_dict[i][j]
|
313 |
-
x_crop = self.unet1.conv2.seblock.forward_mean(x_crop, se_mean0)
|
314 |
-
opt_unet1 = self.unet1.forward_b(tmp0, x_crop)
|
315 |
-
tmp_x1, tmp_x2 = self.unet2.forward_a(opt_unet1)
|
316 |
-
if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
|
317 |
-
tmp_se_mean = torch.mean(tmp_x2.float(), dim=(2, 3), keepdim=True).half()
|
318 |
-
else:
|
319 |
-
tmp_se_mean = torch.mean(tmp_x2, dim=(2, 3), keepdim=True)
|
320 |
-
se_mean1 += tmp_se_mean
|
321 |
-
tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2)
|
322 |
-
se_mean1 /= n_patch
|
323 |
-
se_mean0 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64
|
324 |
-
if ("Half" in x.type()):
|
325 |
-
se_mean0 = se_mean0.half()
|
326 |
-
for i in range(0, h - 36, crop_size[0]):
|
327 |
-
for j in range(0, w - 36, crop_size[1]):
|
328 |
-
opt_unet1, tmp_x1, tmp_x2 = tmp_dict[i][j]
|
329 |
-
tmp_x2 = self.unet2.conv2.seblock.forward_mean(tmp_x2, se_mean1)
|
330 |
-
tmp_x3 = self.unet2.forward_b(tmp_x2)
|
331 |
-
if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
|
332 |
-
tmp_se_mean = torch.mean(tmp_x3.float(), dim=(2, 3), keepdim=True).half()
|
333 |
-
else:
|
334 |
-
tmp_se_mean = torch.mean(tmp_x3, dim=(2, 3), keepdim=True)
|
335 |
-
se_mean0 += tmp_se_mean
|
336 |
-
tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2, tmp_x3)
|
337 |
-
se_mean0 /= n_patch
|
338 |
-
se_mean1 = torch.zeros((n, 64, 1, 1)).to(x.device) # 64#128#128#64
|
339 |
-
if ("Half" in x.type()):
|
340 |
-
se_mean1 = se_mean1.half()
|
341 |
-
for i in range(0, h - 36, crop_size[0]):
|
342 |
-
for j in range(0, w - 36, crop_size[1]):
|
343 |
-
opt_unet1, tmp_x1, tmp_x2, tmp_x3 = tmp_dict[i][j]
|
344 |
-
tmp_x3 = self.unet2.conv3.seblock.forward_mean(tmp_x3, se_mean0)
|
345 |
-
tmp_x4 = self.unet2.forward_c(tmp_x2, tmp_x3)
|
346 |
-
if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
|
347 |
-
tmp_se_mean = torch.mean(tmp_x4.float(), dim=(2, 3), keepdim=True).half()
|
348 |
-
else:
|
349 |
-
tmp_se_mean = torch.mean(tmp_x4, dim=(2, 3), keepdim=True)
|
350 |
-
se_mean1 += tmp_se_mean
|
351 |
-
tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x4)
|
352 |
-
se_mean1 /= n_patch
|
353 |
-
for i in range(0, h - 36, crop_size[0]):
|
354 |
-
opt_res_dict[i] = {}
|
355 |
-
for j in range(0, w - 36, crop_size[1]):
|
356 |
-
opt_unet1, tmp_x1, tmp_x4 = tmp_dict[i][j]
|
357 |
-
tmp_x4 = self.unet2.conv4.seblock.forward_mean(tmp_x4, se_mean1)
|
358 |
-
x0 = self.unet2.forward_d(tmp_x1, tmp_x4)
|
359 |
-
x1 = F.pad(opt_unet1, (-20, -20, -20, -20))
|
360 |
-
x_crop = torch.add(x0, x1) # x0是unet2的最终输出
|
361 |
-
opt_res_dict[i][j] = x_crop
|
362 |
-
del tmp_dict
|
363 |
-
torch.cuda.empty_cache()
|
364 |
-
res = torch.zeros((n, c, h * 2 - 72, w * 2 - 72)).to(x.device)
|
365 |
-
if ("Half" in x.type()):
|
366 |
-
res = res.half()
|
367 |
-
for i in range(0, h - 36, crop_size[0]):
|
368 |
-
for j in range(0, w - 36, crop_size[1]):
|
369 |
-
res[:, :, i * 2:i * 2 + h1 * 2 - 72, j * 2:j * 2 + w1 * 2 - 72] = opt_res_dict[i][j]
|
370 |
-
del opt_res_dict
|
371 |
-
torch.cuda.empty_cache()
|
372 |
-
if (w0 != pw or h0 != ph): res = res[:, :, :h0 * 2, :w0 * 2]
|
373 |
-
return res #
|
374 |
-
|
375 |
-
|
376 |
-
class UpCunet3x(nn.Module): # 完美tile,全程无损
|
377 |
-
def __init__(self, in_channels=3, out_channels=3):
|
378 |
-
super(UpCunet3x, self).__init__()
|
379 |
-
self.unet1 = UNet1x3(in_channels, out_channels, deconv=True)
|
380 |
-
self.unet2 = UNet2(in_channels, out_channels, deconv=False)
|
381 |
-
|
382 |
-
def forward(self, x, tile_mode): # 1.7G
|
383 |
-
n, c, h0, w0 = x.shape
|
384 |
-
if (tile_mode == 0): # 不tile
|
385 |
-
ph = ((h0 - 1) // 4 + 1) * 4
|
386 |
-
pw = ((w0 - 1) // 4 + 1) * 4
|
387 |
-
x = F.pad(x, (14, 14 + pw - w0, 14, 14 + ph - h0), 'reflect') # 需要保证被2整除
|
388 |
-
x = self.unet1.forward(x)
|
389 |
-
x0 = self.unet2.forward(x)
|
390 |
-
x1 = F.pad(x, (-20, -20, -20, -20))
|
391 |
-
x = torch.add(x0, x1)
|
392 |
-
if (w0 != pw or h0 != ph): x = x[:, :, :h0 * 3, :w0 * 3]
|
393 |
-
return x
|
394 |
-
elif (tile_mode == 1): # 对长边减半
|
395 |
-
if (w0 >= h0):
|
396 |
-
crop_size_w = ((w0 - 1) // 8 * 8 + 8) // 2 # 减半后能被4整除,所以要先被8整除
|
397 |
-
crop_size_h = (h0 - 1) // 4 * 4 + 4 # 能被4整除
|
398 |
-
else:
|
399 |
-
crop_size_h = ((h0 - 1) // 8 * 8 + 8) // 2 # 减半后能被4整除,所以要先被8整除
|
400 |
-
crop_size_w = (w0 - 1) // 4 * 4 + 4 # 能被4整除
|
401 |
-
crop_size = (crop_size_h, crop_size_w) # 6.6G
|
402 |
-
elif (tile_mode == 2): # hw都减半
|
403 |
-
crop_size = (((h0 - 1) // 8 * 8 + 8) // 2, ((w0 - 1) // 8 * 8 + 8) // 2) # 5.6G
|
404 |
-
elif (tile_mode == 3): # hw都三分之一
|
405 |
-
crop_size = (((h0 - 1) // 12 * 12 + 12) // 3, ((w0 - 1) // 12 * 12 + 12) // 3) # 4.2G
|
406 |
-
elif (tile_mode == 4): # hw都四分之一
|
407 |
-
crop_size = (((h0 - 1) // 16 * 16 + 16) // 4, ((w0 - 1) // 16 * 16 + 16) // 4) # 3.7G
|
408 |
-
ph = ((h0 - 1) // crop_size[0] + 1) * crop_size[0]
|
409 |
-
pw = ((w0 - 1) // crop_size[1] + 1) * crop_size[1]
|
410 |
-
x = F.pad(x, (14, 14 + pw - w0, 14, 14 + ph - h0), 'reflect')
|
411 |
-
n, c, h, w = x.shape
|
412 |
-
se_mean0 = torch.zeros((n, 64, 1, 1)).to(x.device)
|
413 |
-
if ("Half" in x.type()):
|
414 |
-
se_mean0 = se_mean0.half()
|
415 |
-
n_patch = 0
|
416 |
-
tmp_dict = {}
|
417 |
-
opt_res_dict = {}
|
418 |
-
for i in range(0, h - 28, crop_size[0]):
|
419 |
-
tmp_dict[i] = {}
|
420 |
-
for j in range(0, w - 28, crop_size[1]):
|
421 |
-
x_crop = x[:, :, i:i + crop_size[0] + 28, j:j + crop_size[1] + 28]
|
422 |
-
n, c1, h1, w1 = x_crop.shape
|
423 |
-
tmp0, x_crop = self.unet1.forward_a(x_crop)
|
424 |
-
if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
|
425 |
-
tmp_se_mean = torch.mean(x_crop.float(), dim=(2, 3), keepdim=True).half()
|
426 |
-
else:
|
427 |
-
tmp_se_mean = torch.mean(x_crop, dim=(2, 3), keepdim=True)
|
428 |
-
se_mean0 += tmp_se_mean
|
429 |
-
n_patch += 1
|
430 |
-
tmp_dict[i][j] = (tmp0, x_crop)
|
431 |
-
se_mean0 /= n_patch
|
432 |
-
se_mean1 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64
|
433 |
-
if ("Half" in x.type()):
|
434 |
-
se_mean1 = se_mean1.half()
|
435 |
-
for i in range(0, h - 28, crop_size[0]):
|
436 |
-
for j in range(0, w - 28, crop_size[1]):
|
437 |
-
tmp0, x_crop = tmp_dict[i][j]
|
438 |
-
x_crop = self.unet1.conv2.seblock.forward_mean(x_crop, se_mean0)
|
439 |
-
opt_unet1 = self.unet1.forward_b(tmp0, x_crop)
|
440 |
-
tmp_x1, tmp_x2 = self.unet2.forward_a(opt_unet1)
|
441 |
-
if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
|
442 |
-
tmp_se_mean = torch.mean(tmp_x2.float(), dim=(2, 3), keepdim=True).half()
|
443 |
-
else:
|
444 |
-
tmp_se_mean = torch.mean(tmp_x2, dim=(2, 3), keepdim=True)
|
445 |
-
se_mean1 += tmp_se_mean
|
446 |
-
tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2)
|
447 |
-
se_mean1 /= n_patch
|
448 |
-
se_mean0 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64
|
449 |
-
if ("Half" in x.type()):
|
450 |
-
se_mean0 = se_mean0.half()
|
451 |
-
for i in range(0, h - 28, crop_size[0]):
|
452 |
-
for j in range(0, w - 28, crop_size[1]):
|
453 |
-
opt_unet1, tmp_x1, tmp_x2 = tmp_dict[i][j]
|
454 |
-
tmp_x2 = self.unet2.conv2.seblock.forward_mean(tmp_x2, se_mean1)
|
455 |
-
tmp_x3 = self.unet2.forward_b(tmp_x2)
|
456 |
-
if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
|
457 |
-
tmp_se_mean = torch.mean(tmp_x3.float(), dim=(2, 3), keepdim=True).half()
|
458 |
-
else:
|
459 |
-
tmp_se_mean = torch.mean(tmp_x3, dim=(2, 3), keepdim=True)
|
460 |
-
se_mean0 += tmp_se_mean
|
461 |
-
tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2, tmp_x3)
|
462 |
-
se_mean0 /= n_patch
|
463 |
-
se_mean1 = torch.zeros((n, 64, 1, 1)).to(x.device) # 64#128#128#64
|
464 |
-
if ("Half" in x.type()):
|
465 |
-
se_mean1 = se_mean1.half()
|
466 |
-
for i in range(0, h - 28, crop_size[0]):
|
467 |
-
for j in range(0, w - 28, crop_size[1]):
|
468 |
-
opt_unet1, tmp_x1, tmp_x2, tmp_x3 = tmp_dict[i][j]
|
469 |
-
tmp_x3 = self.unet2.conv3.seblock.forward_mean(tmp_x3, se_mean0)
|
470 |
-
tmp_x4 = self.unet2.forward_c(tmp_x2, tmp_x3)
|
471 |
-
if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
|
472 |
-
tmp_se_mean = torch.mean(tmp_x4.float(), dim=(2, 3), keepdim=True).half()
|
473 |
-
else:
|
474 |
-
tmp_se_mean = torch.mean(tmp_x4, dim=(2, 3), keepdim=True)
|
475 |
-
se_mean1 += tmp_se_mean
|
476 |
-
tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x4)
|
477 |
-
se_mean1 /= n_patch
|
478 |
-
for i in range(0, h - 28, crop_size[0]):
|
479 |
-
opt_res_dict[i] = {}
|
480 |
-
for j in range(0, w - 28, crop_size[1]):
|
481 |
-
opt_unet1, tmp_x1, tmp_x4 = tmp_dict[i][j]
|
482 |
-
tmp_x4 = self.unet2.conv4.seblock.forward_mean(tmp_x4, se_mean1)
|
483 |
-
x0 = self.unet2.forward_d(tmp_x1, tmp_x4)
|
484 |
-
x1 = F.pad(opt_unet1, (-20, -20, -20, -20))
|
485 |
-
x_crop = torch.add(x0, x1) # x0是unet2的最终输出
|
486 |
-
opt_res_dict[i][j] = x_crop #
|
487 |
-
del tmp_dict
|
488 |
-
torch.cuda.empty_cache()
|
489 |
-
res = torch.zeros((n, c, h * 3 - 84, w * 3 - 84)).to(x.device)
|
490 |
-
if ("Half" in x.type()):
|
491 |
-
res = res.half()
|
492 |
-
for i in range(0, h - 28, crop_size[0]):
|
493 |
-
for j in range(0, w - 28, crop_size[1]):
|
494 |
-
res[:, :, i * 3:i * 3 + h1 * 3 - 84, j * 3:j * 3 + w1 * 3 - 84] = opt_res_dict[i][j]
|
495 |
-
del opt_res_dict
|
496 |
-
torch.cuda.empty_cache()
|
497 |
-
if (w0 != pw or h0 != ph): res = res[:, :, :h0 * 3, :w0 * 3]
|
498 |
-
return res
|
499 |
-
|
500 |
-
|
501 |
-
class UpCunet4x(nn.Module): # 完美tile,全程无损
|
502 |
-
def __init__(self, in_channels=3, out_channels=3):
|
503 |
-
super(UpCunet4x, self).__init__()
|
504 |
-
self.unet1 = UNet1(in_channels, 64, deconv=True)
|
505 |
-
self.unet2 = UNet2(64, 64, deconv=False)
|
506 |
-
self.ps = nn.PixelShuffle(2)
|
507 |
-
self.conv_final = nn.Conv2d(64, 12, 3, 1, padding=0, bias=True)
|
508 |
-
|
509 |
-
def forward(self, x, tile_mode):
|
510 |
-
n, c, h0, w0 = x.shape
|
511 |
-
x00 = x
|
512 |
-
if (tile_mode == 0): # 不tile
|
513 |
-
ph = ((h0 - 1) // 2 + 1) * 2
|
514 |
-
pw = ((w0 - 1) // 2 + 1) * 2
|
515 |
-
x = F.pad(x, (19, 19 + pw - w0, 19, 19 + ph - h0), 'reflect') # 需要保证被2整除
|
516 |
-
x = self.unet1.forward(x)
|
517 |
-
x0 = self.unet2.forward(x)
|
518 |
-
x1 = F.pad(x, (-20, -20, -20, -20))
|
519 |
-
x = torch.add(x0, x1)
|
520 |
-
x = self.conv_final(x)
|
521 |
-
x = F.pad(x, (-1, -1, -1, -1))
|
522 |
-
x = self.ps(x)
|
523 |
-
if (w0 != pw or h0 != ph): x = x[:, :, :h0 * 4, :w0 * 4]
|
524 |
-
x += F.interpolate(x00, scale_factor=4, mode='nearest')
|
525 |
-
return x
|
526 |
-
elif (tile_mode == 1): # 对长边减半
|
527 |
-
if (w0 >= h0):
|
528 |
-
crop_size_w = ((w0 - 1) // 4 * 4 + 4) // 2 # 减半后能被2整除,所以要先被4整除
|
529 |
-
crop_size_h = (h0 - 1) // 2 * 2 + 2 # 能被2整除
|
530 |
-
else:
|
531 |
-
crop_size_h = ((h0 - 1) // 4 * 4 + 4) // 2 # 减半后能被2整除,所以要先被4整除
|
532 |
-
crop_size_w = (w0 - 1) // 2 * 2 + 2 # 能被2整除
|
533 |
-
crop_size = (crop_size_h, crop_size_w) # 6.6G
|
534 |
-
elif (tile_mode == 2): # hw都减半
|
535 |
-
crop_size = (((h0 - 1) // 4 * 4 + 4) // 2, ((w0 - 1) // 4 * 4 + 4) // 2) # 5.6G
|
536 |
-
elif (tile_mode == 3): # hw都三分之一
|
537 |
-
crop_size = (((h0 - 1) // 6 * 6 + 6) // 3, ((w0 - 1) // 6 * 6 + 6) // 3) # 4.1G
|
538 |
-
elif (tile_mode == 4): # hw都四分之一
|
539 |
-
crop_size = (((h0 - 1) // 8 * 8 + 8) // 4, ((w0 - 1) // 8 * 8 + 8) // 4) # 3.7G
|
540 |
-
ph = ((h0 - 1) // crop_size[0] + 1) * crop_size[0]
|
541 |
-
pw = ((w0 - 1) // crop_size[1] + 1) * crop_size[1]
|
542 |
-
x = F.pad(x, (19, 19 + pw - w0, 19, 19 + ph - h0), 'reflect')
|
543 |
-
n, c, h, w = x.shape
|
544 |
-
se_mean0 = torch.zeros((n, 64, 1, 1)).to(x.device)
|
545 |
-
if ("Half" in x.type()):
|
546 |
-
se_mean0 = se_mean0.half()
|
547 |
-
n_patch = 0
|
548 |
-
tmp_dict = {}
|
549 |
-
opt_res_dict = {}
|
550 |
-
for i in range(0, h - 38, crop_size[0]):
|
551 |
-
tmp_dict[i] = {}
|
552 |
-
for j in range(0, w - 38, crop_size[1]):
|
553 |
-
x_crop = x[:, :, i:i + crop_size[0] + 38, j:j + crop_size[1] + 38]
|
554 |
-
n, c1, h1, w1 = x_crop.shape
|
555 |
-
tmp0, x_crop = self.unet1.forward_a(x_crop)
|
556 |
-
if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
|
557 |
-
tmp_se_mean = torch.mean(x_crop.float(), dim=(2, 3), keepdim=True).half()
|
558 |
-
else:
|
559 |
-
tmp_se_mean = torch.mean(x_crop, dim=(2, 3), keepdim=True)
|
560 |
-
se_mean0 += tmp_se_mean
|
561 |
-
n_patch += 1
|
562 |
-
tmp_dict[i][j] = (tmp0, x_crop)
|
563 |
-
se_mean0 /= n_patch
|
564 |
-
se_mean1 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64
|
565 |
-
if ("Half" in x.type()):
|
566 |
-
se_mean1 = se_mean1.half()
|
567 |
-
for i in range(0, h - 38, crop_size[0]):
|
568 |
-
for j in range(0, w - 38, crop_size[1]):
|
569 |
-
tmp0, x_crop = tmp_dict[i][j]
|
570 |
-
x_crop = self.unet1.conv2.seblock.forward_mean(x_crop, se_mean0)
|
571 |
-
opt_unet1 = self.unet1.forward_b(tmp0, x_crop)
|
572 |
-
tmp_x1, tmp_x2 = self.unet2.forward_a(opt_unet1)
|
573 |
-
if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
|
574 |
-
tmp_se_mean = torch.mean(tmp_x2.float(), dim=(2, 3), keepdim=True).half()
|
575 |
-
else:
|
576 |
-
tmp_se_mean = torch.mean(tmp_x2, dim=(2, 3), keepdim=True)
|
577 |
-
se_mean1 += tmp_se_mean
|
578 |
-
tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2)
|
579 |
-
se_mean1 /= n_patch
|
580 |
-
se_mean0 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64
|
581 |
-
if ("Half" in x.type()):
|
582 |
-
se_mean0 = se_mean0.half()
|
583 |
-
for i in range(0, h - 38, crop_size[0]):
|
584 |
-
for j in range(0, w - 38, crop_size[1]):
|
585 |
-
opt_unet1, tmp_x1, tmp_x2 = tmp_dict[i][j]
|
586 |
-
tmp_x2 = self.unet2.conv2.seblock.forward_mean(tmp_x2, se_mean1)
|
587 |
-
tmp_x3 = self.unet2.forward_b(tmp_x2)
|
588 |
-
if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
|
589 |
-
tmp_se_mean = torch.mean(tmp_x3.float(), dim=(2, 3), keepdim=True).half()
|
590 |
-
else:
|
591 |
-
tmp_se_mean = torch.mean(tmp_x3, dim=(2, 3), keepdim=True)
|
592 |
-
se_mean0 += tmp_se_mean
|
593 |
-
tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2, tmp_x3)
|
594 |
-
se_mean0 /= n_patch
|
595 |
-
se_mean1 = torch.zeros((n, 64, 1, 1)).to(x.device) # 64#128#128#64
|
596 |
-
if ("Half" in x.type()):
|
597 |
-
se_mean1 = se_mean1.half()
|
598 |
-
for i in range(0, h - 38, crop_size[0]):
|
599 |
-
for j in range(0, w - 38, crop_size[1]):
|
600 |
-
opt_unet1, tmp_x1, tmp_x2, tmp_x3 = tmp_dict[i][j]
|
601 |
-
tmp_x3 = self.unet2.conv3.seblock.forward_mean(tmp_x3, se_mean0)
|
602 |
-
tmp_x4 = self.unet2.forward_c(tmp_x2, tmp_x3)
|
603 |
-
if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
|
604 |
-
tmp_se_mean = torch.mean(tmp_x4.float(), dim=(2, 3), keepdim=True).half()
|
605 |
-
else:
|
606 |
-
tmp_se_mean = torch.mean(tmp_x4, dim=(2, 3), keepdim=True)
|
607 |
-
se_mean1 += tmp_se_mean
|
608 |
-
tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x4)
|
609 |
-
se_mean1 /= n_patch
|
610 |
-
for i in range(0, h - 38, crop_size[0]):
|
611 |
-
opt_res_dict[i] = {}
|
612 |
-
for j in range(0, w - 38, crop_size[1]):
|
613 |
-
opt_unet1, tmp_x1, tmp_x4 = tmp_dict[i][j]
|
614 |
-
tmp_x4 = self.unet2.conv4.seblock.forward_mean(tmp_x4, se_mean1)
|
615 |
-
x0 = self.unet2.forward_d(tmp_x1, tmp_x4)
|
616 |
-
x1 = F.pad(opt_unet1, (-20, -20, -20, -20))
|
617 |
-
x_crop = torch.add(x0, x1) # x0是unet2的最终输出
|
618 |
-
x_crop = self.conv_final(x_crop)
|
619 |
-
x_crop = F.pad(x_crop, (-1, -1, -1, -1))
|
620 |
-
x_crop = self.ps(x_crop)
|
621 |
-
opt_res_dict[i][j] = x_crop
|
622 |
-
del tmp_dict
|
623 |
-
torch.cuda.empty_cache()
|
624 |
-
res = torch.zeros((n, c, h * 4 - 152, w * 4 - 152)).to(x.device)
|
625 |
-
if ("Half" in x.type()):
|
626 |
-
res = res.half()
|
627 |
-
for i in range(0, h - 38, crop_size[0]):
|
628 |
-
for j in range(0, w - 38, crop_size[1]):
|
629 |
-
# print(opt_res_dict[i][j].shape,res[:, :, i * 4:i * 4 + h1 * 4 - 144, j * 4:j * 4 + w1 * 4 - 144].shape)
|
630 |
-
res[:, :, i * 4:i * 4 + h1 * 4 - 152, j * 4:j * 4 + w1 * 4 - 152] = opt_res_dict[i][j]
|
631 |
-
del opt_res_dict
|
632 |
-
torch.cuda.empty_cache()
|
633 |
-
if (w0 != pw or h0 != ph): res = res[:, :, :h0 * 4, :w0 * 4]
|
634 |
-
res += F.interpolate(x00, scale_factor=4, mode='nearest')
|
635 |
-
return res #
|
636 |
-
|
637 |
-
|
638 |
-
class RealWaifuUpScaler(object):
|
639 |
-
def __init__(self, scale, weight_path, half, device):
|
640 |
-
weight = torch.load(weight_path, map_location="cpu")
|
641 |
-
self.model = eval("UpCunet%sx" % scale)()
|
642 |
-
if (half == True):
|
643 |
-
self.model = self.model.half().to(device)
|
644 |
-
else:
|
645 |
-
self.model = self.model.to(device)
|
646 |
-
self.model.load_state_dict(weight, strict=True)
|
647 |
-
self.model.eval()
|
648 |
-
self.half = half
|
649 |
-
self.device = device
|
650 |
-
|
651 |
-
def np2tensor(self, np_frame):
|
652 |
-
if (self.half == False):
|
653 |
-
return torch.from_numpy(np.transpose(np_frame, (2, 0, 1))).unsqueeze(0).to(self.device).float() / 255
|
654 |
-
else:
|
655 |
-
return torch.from_numpy(np.transpose(np_frame, (2, 0, 1))).unsqueeze(0).to(self.device).half() / 255
|
656 |
-
|
657 |
-
def tensor2np(self, tensor):
|
658 |
-
if (self.half == False):
|
659 |
-
return (
|
660 |
-
np.transpose((tensor.data.squeeze() * 255.0).round().clamp_(0, 255).byte().cpu().numpy(), (1, 2, 0)))
|
661 |
-
else:
|
662 |
-
return (np.transpose((tensor.data.squeeze().float() * 255.0).round().clamp_(0, 255).byte().cpu().numpy(),
|
663 |
-
(1, 2, 0)))
|
664 |
-
|
665 |
-
def __call__(self, frame, tile_mode):
|
666 |
-
with torch.no_grad():
|
667 |
-
tensor = self.np2tensor(frame)
|
668 |
-
result = self.tensor2np(self.model(tensor, tile_mode))
|
669 |
-
return result
|
670 |
-
|
671 |
-
|
672 |
-
if __name__ == "__main__":
|
673 |
-
###########inference_img
|
674 |
-
import time, cv2, sys
|
675 |
-
from time import time as ttime
|
676 |
-
|
677 |
-
for weight_path, scale in [("weights_v3/up2x-latest-denoise3x.pth", 2), ("weights_v3/up3x-latest-denoise3x.pth", 3),
|
678 |
-
("weights_v3/up4x-latest-denoise3x.pth", 4)]:
|
679 |
-
for tile_mode in [0, 1, 2, 3, 4]:
|
680 |
-
upscaler2x = RealWaifuUpScaler(scale, weight_path, half=True, device="cuda:0")
|
681 |
-
input_dir = "%s/input_dir1" % root_path
|
682 |
-
output_dir = "%s/opt-dir-all-test" % root_path
|
683 |
-
os.makedirs(output_dir, exist_ok=True)
|
684 |
-
for name in os.listdir(input_dir):
|
685 |
-
print(name)
|
686 |
-
tmp = name.split(".")
|
687 |
-
inp_path = os.path.join(input_dir, name)
|
688 |
-
suffix = tmp[-1]
|
689 |
-
prefix = ".".join(tmp[:-1])
|
690 |
-
tmp_path = os.path.join(root_path, "tmp", "%s.%s" % (int(time.time() * 1000000), suffix))
|
691 |
-
print(inp_path, tmp_path)
|
692 |
-
# 支持中文路径
|
693 |
-
# os.link(inp_path, tmp_path)#win用硬链接
|
694 |
-
os.symlink(inp_path, tmp_path) # linux用软链接
|
695 |
-
frame = cv2.imread(tmp_path)[:, :, [2, 1, 0]]
|
696 |
-
t0 = ttime()
|
697 |
-
result = upscaler2x(frame, tile_mode=tile_mode)[:, :, ::-1]
|
698 |
-
t1 = ttime()
|
699 |
-
print(prefix, "done", t1 - t0)
|
700 |
-
tmp_opt_path = os.path.join(root_path, "tmp", "%s.%s" % (int(time.time() * 1000000), suffix))
|
701 |
-
cv2.imwrite(tmp_opt_path, result)
|
702 |
-
n = 0
|
703 |
-
while (1):
|
704 |
-
if (n == 0):
|
705 |
-
suffix = "_%sx_tile%s.png" % (scale, tile_mode)
|
706 |
-
else:
|
707 |
-
suffix = "_%sx_tile%s_%s.png" % (scale, tile_mode, n) #
|
708 |
-
if (os.path.exists(os.path.join(output_dir, prefix + suffix)) == False):
|
709 |
-
break
|
710 |
-
else:
|
711 |
-
n += 1
|
712 |
-
final_opt_path = os.path.join(output_dir, prefix + suffix)
|
713 |
-
os.rename(tmp_opt_path, final_opt_path)
|
714 |
-
os.remove(tmp_path)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/cache.py
DELETED
@@ -1,272 +0,0 @@
|
|
1 |
-
"""Cache Management
|
2 |
-
"""
|
3 |
-
|
4 |
-
import hashlib
|
5 |
-
import json
|
6 |
-
import logging
|
7 |
-
import os
|
8 |
-
from pathlib import Path
|
9 |
-
from typing import Any, Dict, List, Optional
|
10 |
-
|
11 |
-
from pip._vendor.packaging.tags import Tag, interpreter_name, interpreter_version
|
12 |
-
from pip._vendor.packaging.utils import canonicalize_name
|
13 |
-
|
14 |
-
from pip._internal.exceptions import InvalidWheelFilename
|
15 |
-
from pip._internal.models.direct_url import DirectUrl
|
16 |
-
from pip._internal.models.link import Link
|
17 |
-
from pip._internal.models.wheel import Wheel
|
18 |
-
from pip._internal.utils.temp_dir import TempDirectory, tempdir_kinds
|
19 |
-
from pip._internal.utils.urls import path_to_url
|
20 |
-
|
21 |
-
logger = logging.getLogger(__name__)
|
22 |
-
|
23 |
-
ORIGIN_JSON_NAME = "origin.json"
|
24 |
-
|
25 |
-
|
26 |
-
def _hash_dict(d: Dict[str, str]) -> str:
|
27 |
-
"""Return a stable sha224 of a dictionary."""
|
28 |
-
s = json.dumps(d, sort_keys=True, separators=(",", ":"), ensure_ascii=True)
|
29 |
-
return hashlib.sha224(s.encode("ascii")).hexdigest()
|
30 |
-
|
31 |
-
|
32 |
-
class Cache:
|
33 |
-
"""An abstract class - provides cache directories for data from links
|
34 |
-
|
35 |
-
:param cache_dir: The root of the cache.
|
36 |
-
"""
|
37 |
-
|
38 |
-
def __init__(self, cache_dir: str) -> None:
|
39 |
-
super().__init__()
|
40 |
-
assert not cache_dir or os.path.isabs(cache_dir)
|
41 |
-
self.cache_dir = cache_dir or None
|
42 |
-
|
43 |
-
def _get_cache_path_parts(self, link: Link) -> List[str]:
|
44 |
-
"""Get parts of part that must be os.path.joined with cache_dir"""
|
45 |
-
|
46 |
-
# We want to generate an url to use as our cache key, we don't want to
|
47 |
-
# just re-use the URL because it might have other items in the fragment
|
48 |
-
# and we don't care about those.
|
49 |
-
key_parts = {"url": link.url_without_fragment}
|
50 |
-
if link.hash_name is not None and link.hash is not None:
|
51 |
-
key_parts[link.hash_name] = link.hash
|
52 |
-
if link.subdirectory_fragment:
|
53 |
-
key_parts["subdirectory"] = link.subdirectory_fragment
|
54 |
-
|
55 |
-
# Include interpreter name, major and minor version in cache key
|
56 |
-
# to cope with ill-behaved sdists that build a different wheel
|
57 |
-
# depending on the python version their setup.py is being run on,
|
58 |
-
# and don't encode the difference in compatibility tags.
|
59 |
-
# https://github.com/pypa/pip/issues/7296
|
60 |
-
key_parts["interpreter_name"] = interpreter_name()
|
61 |
-
key_parts["interpreter_version"] = interpreter_version()
|
62 |
-
|
63 |
-
# Encode our key url with sha224, we'll use this because it has similar
|
64 |
-
# security properties to sha256, but with a shorter total output (and
|
65 |
-
# thus less secure). However the differences don't make a lot of
|
66 |
-
# difference for our use case here.
|
67 |
-
hashed = _hash_dict(key_parts)
|
68 |
-
|
69 |
-
# We want to nest the directories some to prevent having a ton of top
|
70 |
-
# level directories where we might run out of sub directories on some
|
71 |
-
# FS.
|
72 |
-
parts = [hashed[:2], hashed[2:4], hashed[4:6], hashed[6:]]
|
73 |
-
|
74 |
-
return parts
|
75 |
-
|
76 |
-
def _get_candidates(self, link: Link, canonical_package_name: str) -> List[Any]:
|
77 |
-
can_not_cache = not self.cache_dir or not canonical_package_name or not link
|
78 |
-
if can_not_cache:
|
79 |
-
return []
|
80 |
-
|
81 |
-
candidates = []
|
82 |
-
path = self.get_path_for_link(link)
|
83 |
-
if os.path.isdir(path):
|
84 |
-
for candidate in os.listdir(path):
|
85 |
-
candidates.append((candidate, path))
|
86 |
-
return candidates
|
87 |
-
|
88 |
-
def get_path_for_link(self, link: Link) -> str:
|
89 |
-
"""Return a directory to store cached items in for link."""
|
90 |
-
raise NotImplementedError()
|
91 |
-
|
92 |
-
def get(
|
93 |
-
self,
|
94 |
-
link: Link,
|
95 |
-
package_name: Optional[str],
|
96 |
-
supported_tags: List[Tag],
|
97 |
-
) -> Link:
|
98 |
-
"""Returns a link to a cached item if it exists, otherwise returns the
|
99 |
-
passed link.
|
100 |
-
"""
|
101 |
-
raise NotImplementedError()
|
102 |
-
|
103 |
-
|
104 |
-
class SimpleWheelCache(Cache):
|
105 |
-
"""A cache of wheels for future installs."""
|
106 |
-
|
107 |
-
def __init__(self, cache_dir: str) -> None:
|
108 |
-
super().__init__(cache_dir)
|
109 |
-
|
110 |
-
def get_path_for_link(self, link: Link) -> str:
|
111 |
-
"""Return a directory to store cached wheels for link
|
112 |
-
|
113 |
-
Because there are M wheels for any one sdist, we provide a directory
|
114 |
-
to cache them in, and then consult that directory when looking up
|
115 |
-
cache hits.
|
116 |
-
|
117 |
-
We only insert things into the cache if they have plausible version
|
118 |
-
numbers, so that we don't contaminate the cache with things that were
|
119 |
-
not unique. E.g. ./package might have dozens of installs done for it
|
120 |
-
and build a version of 0.0...and if we built and cached a wheel, we'd
|
121 |
-
end up using the same wheel even if the source has been edited.
|
122 |
-
|
123 |
-
:param link: The link of the sdist for which this will cache wheels.
|
124 |
-
"""
|
125 |
-
parts = self._get_cache_path_parts(link)
|
126 |
-
assert self.cache_dir
|
127 |
-
# Store wheels within the root cache_dir
|
128 |
-
return os.path.join(self.cache_dir, "wheels", *parts)
|
129 |
-
|
130 |
-
def get(
|
131 |
-
self,
|
132 |
-
link: Link,
|
133 |
-
package_name: Optional[str],
|
134 |
-
supported_tags: List[Tag],
|
135 |
-
) -> Link:
|
136 |
-
candidates = []
|
137 |
-
|
138 |
-
if not package_name:
|
139 |
-
return link
|
140 |
-
|
141 |
-
canonical_package_name = canonicalize_name(package_name)
|
142 |
-
for wheel_name, wheel_dir in self._get_candidates(link, canonical_package_name):
|
143 |
-
try:
|
144 |
-
wheel = Wheel(wheel_name)
|
145 |
-
except InvalidWheelFilename:
|
146 |
-
continue
|
147 |
-
if canonicalize_name(wheel.name) != canonical_package_name:
|
148 |
-
logger.debug(
|
149 |
-
"Ignoring cached wheel %s for %s as it "
|
150 |
-
"does not match the expected distribution name %s.",
|
151 |
-
wheel_name,
|
152 |
-
link,
|
153 |
-
package_name,
|
154 |
-
)
|
155 |
-
continue
|
156 |
-
if not wheel.supported(supported_tags):
|
157 |
-
# Built for a different python/arch/etc
|
158 |
-
continue
|
159 |
-
candidates.append(
|
160 |
-
(
|
161 |
-
wheel.support_index_min(supported_tags),
|
162 |
-
wheel_name,
|
163 |
-
wheel_dir,
|
164 |
-
)
|
165 |
-
)
|
166 |
-
|
167 |
-
if not candidates:
|
168 |
-
return link
|
169 |
-
|
170 |
-
_, wheel_name, wheel_dir = min(candidates)
|
171 |
-
return Link(path_to_url(os.path.join(wheel_dir, wheel_name)))
|
172 |
-
|
173 |
-
|
174 |
-
class EphemWheelCache(SimpleWheelCache):
|
175 |
-
"""A SimpleWheelCache that creates it's own temporary cache directory"""
|
176 |
-
|
177 |
-
def __init__(self) -> None:
|
178 |
-
self._temp_dir = TempDirectory(
|
179 |
-
kind=tempdir_kinds.EPHEM_WHEEL_CACHE,
|
180 |
-
globally_managed=True,
|
181 |
-
)
|
182 |
-
|
183 |
-
super().__init__(self._temp_dir.path)
|
184 |
-
|
185 |
-
|
186 |
-
class CacheEntry:
|
187 |
-
def __init__(
|
188 |
-
self,
|
189 |
-
link: Link,
|
190 |
-
persistent: bool,
|
191 |
-
):
|
192 |
-
self.link = link
|
193 |
-
self.persistent = persistent
|
194 |
-
self.origin: Optional[DirectUrl] = None
|
195 |
-
origin_direct_url_path = Path(self.link.file_path).parent / ORIGIN_JSON_NAME
|
196 |
-
if origin_direct_url_path.exists():
|
197 |
-
self.origin = DirectUrl.from_json(origin_direct_url_path.read_text())
|
198 |
-
|
199 |
-
|
200 |
-
class WheelCache(Cache):
|
201 |
-
"""Wraps EphemWheelCache and SimpleWheelCache into a single Cache
|
202 |
-
|
203 |
-
This Cache allows for gracefully degradation, using the ephem wheel cache
|
204 |
-
when a certain link is not found in the simple wheel cache first.
|
205 |
-
"""
|
206 |
-
|
207 |
-
def __init__(self, cache_dir: str) -> None:
|
208 |
-
super().__init__(cache_dir)
|
209 |
-
self._wheel_cache = SimpleWheelCache(cache_dir)
|
210 |
-
self._ephem_cache = EphemWheelCache()
|
211 |
-
|
212 |
-
def get_path_for_link(self, link: Link) -> str:
|
213 |
-
return self._wheel_cache.get_path_for_link(link)
|
214 |
-
|
215 |
-
def get_ephem_path_for_link(self, link: Link) -> str:
|
216 |
-
return self._ephem_cache.get_path_for_link(link)
|
217 |
-
|
218 |
-
def get(
|
219 |
-
self,
|
220 |
-
link: Link,
|
221 |
-
package_name: Optional[str],
|
222 |
-
supported_tags: List[Tag],
|
223 |
-
) -> Link:
|
224 |
-
cache_entry = self.get_cache_entry(link, package_name, supported_tags)
|
225 |
-
if cache_entry is None:
|
226 |
-
return link
|
227 |
-
return cache_entry.link
|
228 |
-
|
229 |
-
def get_cache_entry(
|
230 |
-
self,
|
231 |
-
link: Link,
|
232 |
-
package_name: Optional[str],
|
233 |
-
supported_tags: List[Tag],
|
234 |
-
) -> Optional[CacheEntry]:
|
235 |
-
"""Returns a CacheEntry with a link to a cached item if it exists or
|
236 |
-
None. The cache entry indicates if the item was found in the persistent
|
237 |
-
or ephemeral cache.
|
238 |
-
"""
|
239 |
-
retval = self._wheel_cache.get(
|
240 |
-
link=link,
|
241 |
-
package_name=package_name,
|
242 |
-
supported_tags=supported_tags,
|
243 |
-
)
|
244 |
-
if retval is not link:
|
245 |
-
return CacheEntry(retval, persistent=True)
|
246 |
-
|
247 |
-
retval = self._ephem_cache.get(
|
248 |
-
link=link,
|
249 |
-
package_name=package_name,
|
250 |
-
supported_tags=supported_tags,
|
251 |
-
)
|
252 |
-
if retval is not link:
|
253 |
-
return CacheEntry(retval, persistent=False)
|
254 |
-
|
255 |
-
return None
|
256 |
-
|
257 |
-
@staticmethod
|
258 |
-
def record_download_origin(cache_dir: str, download_info: DirectUrl) -> None:
|
259 |
-
origin_path = Path(cache_dir) / ORIGIN_JSON_NAME
|
260 |
-
if origin_path.is_file():
|
261 |
-
origin = DirectUrl.from_json(origin_path.read_text())
|
262 |
-
# TODO: use DirectUrl.equivalent when https://github.com/pypa/pip/pull/10564
|
263 |
-
# is merged.
|
264 |
-
if origin.url != download_info.url:
|
265 |
-
logger.warning(
|
266 |
-
"Origin URL %s in cache entry %s does not match download URL %s. "
|
267 |
-
"This is likely a pip bug or a cache corruption issue.",
|
268 |
-
origin.url,
|
269 |
-
cache_dir,
|
270 |
-
download_info.url,
|
271 |
-
)
|
272 |
-
origin_path.write_text(download_info.to_json(), encoding="utf-8")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/requests/packages.py
DELETED
@@ -1,16 +0,0 @@
|
|
1 |
-
import sys
|
2 |
-
|
3 |
-
# This code exists for backwards compatibility reasons.
|
4 |
-
# I don't like it either. Just look the other way. :)
|
5 |
-
|
6 |
-
for package in ('urllib3', 'idna', 'chardet'):
|
7 |
-
vendored_package = "pip._vendor." + package
|
8 |
-
locals()[package] = __import__(vendored_package)
|
9 |
-
# This traversal is apparently necessary such that the identities are
|
10 |
-
# preserved (requests.packages.urllib3.* is urllib3.*)
|
11 |
-
for mod in list(sys.modules):
|
12 |
-
if mod == vendored_package or mod.startswith(vendored_package + '.'):
|
13 |
-
unprefixed_mod = mod[len("pip._vendor."):]
|
14 |
-
sys.modules['pip._vendor.requests.packages.' + unprefixed_mod] = sys.modules[mod]
|
15 |
-
|
16 |
-
# Kinda cool, though, right?
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AzinZ/vitscn/models.py
DELETED
@@ -1,534 +0,0 @@
|
|
1 |
-
import copy
|
2 |
-
import math
|
3 |
-
import torch
|
4 |
-
from torch import nn
|
5 |
-
from torch.nn import functional as F
|
6 |
-
|
7 |
-
import commons
|
8 |
-
import modules
|
9 |
-
import attentions
|
10 |
-
import monotonic_align
|
11 |
-
|
12 |
-
from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
|
13 |
-
from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
|
14 |
-
from commons import init_weights, get_padding
|
15 |
-
|
16 |
-
|
17 |
-
class StochasticDurationPredictor(nn.Module):
|
18 |
-
def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0):
|
19 |
-
super().__init__()
|
20 |
-
filter_channels = in_channels # it needs to be removed from future version.
|
21 |
-
self.in_channels = in_channels
|
22 |
-
self.filter_channels = filter_channels
|
23 |
-
self.kernel_size = kernel_size
|
24 |
-
self.p_dropout = p_dropout
|
25 |
-
self.n_flows = n_flows
|
26 |
-
self.gin_channels = gin_channels
|
27 |
-
|
28 |
-
self.log_flow = modules.Log()
|
29 |
-
self.flows = nn.ModuleList()
|
30 |
-
self.flows.append(modules.ElementwiseAffine(2))
|
31 |
-
for i in range(n_flows):
|
32 |
-
self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
|
33 |
-
self.flows.append(modules.Flip())
|
34 |
-
|
35 |
-
self.post_pre = nn.Conv1d(1, filter_channels, 1)
|
36 |
-
self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1)
|
37 |
-
self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
|
38 |
-
self.post_flows = nn.ModuleList()
|
39 |
-
self.post_flows.append(modules.ElementwiseAffine(2))
|
40 |
-
for i in range(4):
|
41 |
-
self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
|
42 |
-
self.post_flows.append(modules.Flip())
|
43 |
-
|
44 |
-
self.pre = nn.Conv1d(in_channels, filter_channels, 1)
|
45 |
-
self.proj = nn.Conv1d(filter_channels, filter_channels, 1)
|
46 |
-
self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
|
47 |
-
if gin_channels != 0:
|
48 |
-
self.cond = nn.Conv1d(gin_channels, filter_channels, 1)
|
49 |
-
|
50 |
-
def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0):
|
51 |
-
x = torch.detach(x)
|
52 |
-
x = self.pre(x)
|
53 |
-
if g is not None:
|
54 |
-
g = torch.detach(g)
|
55 |
-
x = x + self.cond(g)
|
56 |
-
x = self.convs(x, x_mask)
|
57 |
-
x = self.proj(x) * x_mask
|
58 |
-
|
59 |
-
if not reverse:
|
60 |
-
flows = self.flows
|
61 |
-
assert w is not None
|
62 |
-
|
63 |
-
logdet_tot_q = 0
|
64 |
-
h_w = self.post_pre(w)
|
65 |
-
h_w = self.post_convs(h_w, x_mask)
|
66 |
-
h_w = self.post_proj(h_w) * x_mask
|
67 |
-
e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask
|
68 |
-
z_q = e_q
|
69 |
-
for flow in self.post_flows:
|
70 |
-
z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w))
|
71 |
-
logdet_tot_q += logdet_q
|
72 |
-
z_u, z1 = torch.split(z_q, [1, 1], 1)
|
73 |
-
u = torch.sigmoid(z_u) * x_mask
|
74 |
-
z0 = (w - u) * x_mask
|
75 |
-
logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2])
|
76 |
-
logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q
|
77 |
-
|
78 |
-
logdet_tot = 0
|
79 |
-
z0, logdet = self.log_flow(z0, x_mask)
|
80 |
-
logdet_tot += logdet
|
81 |
-
z = torch.cat([z0, z1], 1)
|
82 |
-
for flow in flows:
|
83 |
-
z, logdet = flow(z, x_mask, g=x, reverse=reverse)
|
84 |
-
logdet_tot = logdet_tot + logdet
|
85 |
-
nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot
|
86 |
-
return nll + logq # [b]
|
87 |
-
else:
|
88 |
-
flows = list(reversed(self.flows))
|
89 |
-
flows = flows[:-2] + [flows[-1]] # remove a useless vflow
|
90 |
-
z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale
|
91 |
-
for flow in flows:
|
92 |
-
z = flow(z, x_mask, g=x, reverse=reverse)
|
93 |
-
z0, z1 = torch.split(z, [1, 1], 1)
|
94 |
-
logw = z0
|
95 |
-
return logw
|
96 |
-
|
97 |
-
|
98 |
-
class DurationPredictor(nn.Module):
|
99 |
-
def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0):
|
100 |
-
super().__init__()
|
101 |
-
|
102 |
-
self.in_channels = in_channels
|
103 |
-
self.filter_channels = filter_channels
|
104 |
-
self.kernel_size = kernel_size
|
105 |
-
self.p_dropout = p_dropout
|
106 |
-
self.gin_channels = gin_channels
|
107 |
-
|
108 |
-
self.drop = nn.Dropout(p_dropout)
|
109 |
-
self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2)
|
110 |
-
self.norm_1 = modules.LayerNorm(filter_channels)
|
111 |
-
self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2)
|
112 |
-
self.norm_2 = modules.LayerNorm(filter_channels)
|
113 |
-
self.proj = nn.Conv1d(filter_channels, 1, 1)
|
114 |
-
|
115 |
-
if gin_channels != 0:
|
116 |
-
self.cond = nn.Conv1d(gin_channels, in_channels, 1)
|
117 |
-
|
118 |
-
def forward(self, x, x_mask, g=None):
|
119 |
-
x = torch.detach(x)
|
120 |
-
if g is not None:
|
121 |
-
g = torch.detach(g)
|
122 |
-
x = x + self.cond(g)
|
123 |
-
x = self.conv_1(x * x_mask)
|
124 |
-
x = torch.relu(x)
|
125 |
-
x = self.norm_1(x)
|
126 |
-
x = self.drop(x)
|
127 |
-
x = self.conv_2(x * x_mask)
|
128 |
-
x = torch.relu(x)
|
129 |
-
x = self.norm_2(x)
|
130 |
-
x = self.drop(x)
|
131 |
-
x = self.proj(x * x_mask)
|
132 |
-
return x * x_mask
|
133 |
-
|
134 |
-
|
135 |
-
class TextEncoder(nn.Module):
|
136 |
-
def __init__(self,
|
137 |
-
n_vocab,
|
138 |
-
out_channels,
|
139 |
-
hidden_channels,
|
140 |
-
filter_channels,
|
141 |
-
n_heads,
|
142 |
-
n_layers,
|
143 |
-
kernel_size,
|
144 |
-
p_dropout):
|
145 |
-
super().__init__()
|
146 |
-
self.n_vocab = n_vocab
|
147 |
-
self.out_channels = out_channels
|
148 |
-
self.hidden_channels = hidden_channels
|
149 |
-
self.filter_channels = filter_channels
|
150 |
-
self.n_heads = n_heads
|
151 |
-
self.n_layers = n_layers
|
152 |
-
self.kernel_size = kernel_size
|
153 |
-
self.p_dropout = p_dropout
|
154 |
-
|
155 |
-
self.emb = nn.Embedding(n_vocab, hidden_channels)
|
156 |
-
nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5)
|
157 |
-
|
158 |
-
self.encoder = attentions.Encoder(
|
159 |
-
hidden_channels,
|
160 |
-
filter_channels,
|
161 |
-
n_heads,
|
162 |
-
n_layers,
|
163 |
-
kernel_size,
|
164 |
-
p_dropout)
|
165 |
-
self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
166 |
-
|
167 |
-
def forward(self, x, x_lengths):
|
168 |
-
x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h]
|
169 |
-
x = torch.transpose(x, 1, -1) # [b, h, t]
|
170 |
-
x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
|
171 |
-
|
172 |
-
x = self.encoder(x * x_mask, x_mask)
|
173 |
-
stats = self.proj(x) * x_mask
|
174 |
-
|
175 |
-
m, logs = torch.split(stats, self.out_channels, dim=1)
|
176 |
-
return x, m, logs, x_mask
|
177 |
-
|
178 |
-
|
179 |
-
class ResidualCouplingBlock(nn.Module):
|
180 |
-
def __init__(self,
|
181 |
-
channels,
|
182 |
-
hidden_channels,
|
183 |
-
kernel_size,
|
184 |
-
dilation_rate,
|
185 |
-
n_layers,
|
186 |
-
n_flows=4,
|
187 |
-
gin_channels=0):
|
188 |
-
super().__init__()
|
189 |
-
self.channels = channels
|
190 |
-
self.hidden_channels = hidden_channels
|
191 |
-
self.kernel_size = kernel_size
|
192 |
-
self.dilation_rate = dilation_rate
|
193 |
-
self.n_layers = n_layers
|
194 |
-
self.n_flows = n_flows
|
195 |
-
self.gin_channels = gin_channels
|
196 |
-
|
197 |
-
self.flows = nn.ModuleList()
|
198 |
-
for i in range(n_flows):
|
199 |
-
self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True))
|
200 |
-
self.flows.append(modules.Flip())
|
201 |
-
|
202 |
-
def forward(self, x, x_mask, g=None, reverse=False):
|
203 |
-
if not reverse:
|
204 |
-
for flow in self.flows:
|
205 |
-
x, _ = flow(x, x_mask, g=g, reverse=reverse)
|
206 |
-
else:
|
207 |
-
for flow in reversed(self.flows):
|
208 |
-
x = flow(x, x_mask, g=g, reverse=reverse)
|
209 |
-
return x
|
210 |
-
|
211 |
-
|
212 |
-
class PosteriorEncoder(nn.Module):
|
213 |
-
def __init__(self,
|
214 |
-
in_channels,
|
215 |
-
out_channels,
|
216 |
-
hidden_channels,
|
217 |
-
kernel_size,
|
218 |
-
dilation_rate,
|
219 |
-
n_layers,
|
220 |
-
gin_channels=0):
|
221 |
-
super().__init__()
|
222 |
-
self.in_channels = in_channels
|
223 |
-
self.out_channels = out_channels
|
224 |
-
self.hidden_channels = hidden_channels
|
225 |
-
self.kernel_size = kernel_size
|
226 |
-
self.dilation_rate = dilation_rate
|
227 |
-
self.n_layers = n_layers
|
228 |
-
self.gin_channels = gin_channels
|
229 |
-
|
230 |
-
self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
|
231 |
-
self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
|
232 |
-
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
233 |
-
|
234 |
-
def forward(self, x, x_lengths, g=None):
|
235 |
-
x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
|
236 |
-
x = self.pre(x) * x_mask
|
237 |
-
x = self.enc(x, x_mask, g=g)
|
238 |
-
stats = self.proj(x) * x_mask
|
239 |
-
m, logs = torch.split(stats, self.out_channels, dim=1)
|
240 |
-
z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
|
241 |
-
return z, m, logs, x_mask
|
242 |
-
|
243 |
-
|
244 |
-
class Generator(torch.nn.Module):
|
245 |
-
def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0):
|
246 |
-
super(Generator, self).__init__()
|
247 |
-
self.num_kernels = len(resblock_kernel_sizes)
|
248 |
-
self.num_upsamples = len(upsample_rates)
|
249 |
-
self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)
|
250 |
-
resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2
|
251 |
-
|
252 |
-
self.ups = nn.ModuleList()
|
253 |
-
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
|
254 |
-
self.ups.append(weight_norm(
|
255 |
-
ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)),
|
256 |
-
k, u, padding=(k-u)//2)))
|
257 |
-
|
258 |
-
self.resblocks = nn.ModuleList()
|
259 |
-
for i in range(len(self.ups)):
|
260 |
-
ch = upsample_initial_channel//(2**(i+1))
|
261 |
-
for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
|
262 |
-
self.resblocks.append(resblock(ch, k, d))
|
263 |
-
|
264 |
-
self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
|
265 |
-
self.ups.apply(init_weights)
|
266 |
-
|
267 |
-
if gin_channels != 0:
|
268 |
-
self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
|
269 |
-
|
270 |
-
def forward(self, x, g=None):
|
271 |
-
x = self.conv_pre(x)
|
272 |
-
if g is not None:
|
273 |
-
x = x + self.cond(g)
|
274 |
-
|
275 |
-
for i in range(self.num_upsamples):
|
276 |
-
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
277 |
-
x = self.ups[i](x)
|
278 |
-
xs = None
|
279 |
-
for j in range(self.num_kernels):
|
280 |
-
if xs is None:
|
281 |
-
xs = self.resblocks[i*self.num_kernels+j](x)
|
282 |
-
else:
|
283 |
-
xs += self.resblocks[i*self.num_kernels+j](x)
|
284 |
-
x = xs / self.num_kernels
|
285 |
-
x = F.leaky_relu(x)
|
286 |
-
x = self.conv_post(x)
|
287 |
-
x = torch.tanh(x)
|
288 |
-
|
289 |
-
return x
|
290 |
-
|
291 |
-
def remove_weight_norm(self):
|
292 |
-
print('Removing weight norm...')
|
293 |
-
for l in self.ups:
|
294 |
-
remove_weight_norm(l)
|
295 |
-
for l in self.resblocks:
|
296 |
-
l.remove_weight_norm()
|
297 |
-
|
298 |
-
|
299 |
-
class DiscriminatorP(torch.nn.Module):
|
300 |
-
def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
|
301 |
-
super(DiscriminatorP, self).__init__()
|
302 |
-
self.period = period
|
303 |
-
self.use_spectral_norm = use_spectral_norm
|
304 |
-
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
305 |
-
self.convs = nn.ModuleList([
|
306 |
-
norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
|
307 |
-
norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
|
308 |
-
norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
|
309 |
-
norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
|
310 |
-
norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))),
|
311 |
-
])
|
312 |
-
self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
|
313 |
-
|
314 |
-
def forward(self, x):
|
315 |
-
fmap = []
|
316 |
-
|
317 |
-
# 1d to 2d
|
318 |
-
b, c, t = x.shape
|
319 |
-
if t % self.period != 0: # pad first
|
320 |
-
n_pad = self.period - (t % self.period)
|
321 |
-
x = F.pad(x, (0, n_pad), "reflect")
|
322 |
-
t = t + n_pad
|
323 |
-
x = x.view(b, c, t // self.period, self.period)
|
324 |
-
|
325 |
-
for l in self.convs:
|
326 |
-
x = l(x)
|
327 |
-
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
328 |
-
fmap.append(x)
|
329 |
-
x = self.conv_post(x)
|
330 |
-
fmap.append(x)
|
331 |
-
x = torch.flatten(x, 1, -1)
|
332 |
-
|
333 |
-
return x, fmap
|
334 |
-
|
335 |
-
|
336 |
-
class DiscriminatorS(torch.nn.Module):
|
337 |
-
def __init__(self, use_spectral_norm=False):
|
338 |
-
super(DiscriminatorS, self).__init__()
|
339 |
-
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
340 |
-
self.convs = nn.ModuleList([
|
341 |
-
norm_f(Conv1d(1, 16, 15, 1, padding=7)),
|
342 |
-
norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
|
343 |
-
norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
|
344 |
-
norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
|
345 |
-
norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
|
346 |
-
norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
|
347 |
-
])
|
348 |
-
self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
|
349 |
-
|
350 |
-
def forward(self, x):
|
351 |
-
fmap = []
|
352 |
-
|
353 |
-
for l in self.convs:
|
354 |
-
x = l(x)
|
355 |
-
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
356 |
-
fmap.append(x)
|
357 |
-
x = self.conv_post(x)
|
358 |
-
fmap.append(x)
|
359 |
-
x = torch.flatten(x, 1, -1)
|
360 |
-
|
361 |
-
return x, fmap
|
362 |
-
|
363 |
-
|
364 |
-
class MultiPeriodDiscriminator(torch.nn.Module):
|
365 |
-
def __init__(self, use_spectral_norm=False):
|
366 |
-
super(MultiPeriodDiscriminator, self).__init__()
|
367 |
-
periods = [2,3,5,7,11]
|
368 |
-
|
369 |
-
discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
|
370 |
-
discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]
|
371 |
-
self.discriminators = nn.ModuleList(discs)
|
372 |
-
|
373 |
-
def forward(self, y, y_hat):
|
374 |
-
y_d_rs = []
|
375 |
-
y_d_gs = []
|
376 |
-
fmap_rs = []
|
377 |
-
fmap_gs = []
|
378 |
-
for i, d in enumerate(self.discriminators):
|
379 |
-
y_d_r, fmap_r = d(y)
|
380 |
-
y_d_g, fmap_g = d(y_hat)
|
381 |
-
y_d_rs.append(y_d_r)
|
382 |
-
y_d_gs.append(y_d_g)
|
383 |
-
fmap_rs.append(fmap_r)
|
384 |
-
fmap_gs.append(fmap_g)
|
385 |
-
|
386 |
-
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
|
387 |
-
|
388 |
-
|
389 |
-
|
390 |
-
class SynthesizerTrn(nn.Module):
|
391 |
-
"""
|
392 |
-
Synthesizer for Training
|
393 |
-
"""
|
394 |
-
|
395 |
-
def __init__(self,
|
396 |
-
n_vocab,
|
397 |
-
spec_channels,
|
398 |
-
segment_size,
|
399 |
-
inter_channels,
|
400 |
-
hidden_channels,
|
401 |
-
filter_channels,
|
402 |
-
n_heads,
|
403 |
-
n_layers,
|
404 |
-
kernel_size,
|
405 |
-
p_dropout,
|
406 |
-
resblock,
|
407 |
-
resblock_kernel_sizes,
|
408 |
-
resblock_dilation_sizes,
|
409 |
-
upsample_rates,
|
410 |
-
upsample_initial_channel,
|
411 |
-
upsample_kernel_sizes,
|
412 |
-
n_speakers=0,
|
413 |
-
gin_channels=0,
|
414 |
-
use_sdp=True,
|
415 |
-
**kwargs):
|
416 |
-
|
417 |
-
super().__init__()
|
418 |
-
self.n_vocab = n_vocab
|
419 |
-
self.spec_channels = spec_channels
|
420 |
-
self.inter_channels = inter_channels
|
421 |
-
self.hidden_channels = hidden_channels
|
422 |
-
self.filter_channels = filter_channels
|
423 |
-
self.n_heads = n_heads
|
424 |
-
self.n_layers = n_layers
|
425 |
-
self.kernel_size = kernel_size
|
426 |
-
self.p_dropout = p_dropout
|
427 |
-
self.resblock = resblock
|
428 |
-
self.resblock_kernel_sizes = resblock_kernel_sizes
|
429 |
-
self.resblock_dilation_sizes = resblock_dilation_sizes
|
430 |
-
self.upsample_rates = upsample_rates
|
431 |
-
self.upsample_initial_channel = upsample_initial_channel
|
432 |
-
self.upsample_kernel_sizes = upsample_kernel_sizes
|
433 |
-
self.segment_size = segment_size
|
434 |
-
self.n_speakers = n_speakers
|
435 |
-
self.gin_channels = gin_channels
|
436 |
-
|
437 |
-
self.use_sdp = use_sdp
|
438 |
-
|
439 |
-
self.enc_p = TextEncoder(n_vocab,
|
440 |
-
inter_channels,
|
441 |
-
hidden_channels,
|
442 |
-
filter_channels,
|
443 |
-
n_heads,
|
444 |
-
n_layers,
|
445 |
-
kernel_size,
|
446 |
-
p_dropout)
|
447 |
-
self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels)
|
448 |
-
self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels)
|
449 |
-
self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)
|
450 |
-
|
451 |
-
if use_sdp:
|
452 |
-
self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels)
|
453 |
-
else:
|
454 |
-
self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels)
|
455 |
-
|
456 |
-
if n_speakers > 1:
|
457 |
-
self.emb_g = nn.Embedding(n_speakers, gin_channels)
|
458 |
-
|
459 |
-
def forward(self, x, x_lengths, y, y_lengths, sid=None):
|
460 |
-
|
461 |
-
x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths)
|
462 |
-
if self.n_speakers > 0:
|
463 |
-
g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
|
464 |
-
else:
|
465 |
-
g = None
|
466 |
-
|
467 |
-
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
|
468 |
-
z_p = self.flow(z, y_mask, g=g)
|
469 |
-
|
470 |
-
with torch.no_grad():
|
471 |
-
# negative cross-entropy
|
472 |
-
s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]
|
473 |
-
neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s]
|
474 |
-
neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
|
475 |
-
neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
|
476 |
-
neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s]
|
477 |
-
neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4
|
478 |
-
|
479 |
-
attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
|
480 |
-
attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach()
|
481 |
-
|
482 |
-
w = attn.sum(2)
|
483 |
-
if self.use_sdp:
|
484 |
-
l_length = self.dp(x, x_mask, w, g=g)
|
485 |
-
l_length = l_length / torch.sum(x_mask)
|
486 |
-
else:
|
487 |
-
logw_ = torch.log(w + 1e-6) * x_mask
|
488 |
-
logw = self.dp(x, x_mask, g=g)
|
489 |
-
l_length = torch.sum((logw - logw_)**2, [1,2]) / torch.sum(x_mask) # for averaging
|
490 |
-
|
491 |
-
# expand prior
|
492 |
-
m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)
|
493 |
-
logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)
|
494 |
-
|
495 |
-
z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size)
|
496 |
-
o = self.dec(z_slice, g=g)
|
497 |
-
return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
|
498 |
-
|
499 |
-
def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None):
|
500 |
-
x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths)
|
501 |
-
if self.n_speakers > 0:
|
502 |
-
g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
|
503 |
-
else:
|
504 |
-
g = None
|
505 |
-
|
506 |
-
if self.use_sdp:
|
507 |
-
logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w)
|
508 |
-
else:
|
509 |
-
logw = self.dp(x, x_mask, g=g)
|
510 |
-
w = torch.exp(logw) * x_mask * length_scale
|
511 |
-
w_ceil = torch.ceil(w)
|
512 |
-
y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()
|
513 |
-
y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype)
|
514 |
-
attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
|
515 |
-
attn = commons.generate_path(w_ceil, attn_mask)
|
516 |
-
|
517 |
-
m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
|
518 |
-
logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
|
519 |
-
|
520 |
-
z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale
|
521 |
-
z = self.flow(z_p, y_mask, g=g, reverse=True)
|
522 |
-
o = self.dec((z * y_mask)[:,:,:max_len], g=g)
|
523 |
-
return o, attn, y_mask, (z, z_p, m_p, logs_p)
|
524 |
-
|
525 |
-
def voice_conversion(self, y, y_lengths, sid_src, sid_tgt):
|
526 |
-
assert self.n_speakers > 0, "n_speakers have to be larger than 0."
|
527 |
-
g_src = self.emb_g(sid_src).unsqueeze(-1)
|
528 |
-
g_tgt = self.emb_g(sid_tgt).unsqueeze(-1)
|
529 |
-
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src)
|
530 |
-
z_p = self.flow(z, y_mask, g=g_src)
|
531 |
-
z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True)
|
532 |
-
o_hat = self.dec(z_hat * y_mask, g=g_tgt)
|
533 |
-
return o_hat, y_mask, (z, z_p, z_hat)
|
534 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bart92/RVC_HF/infer/lib/infer_pack/models.py
DELETED
@@ -1,1174 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
import logging
|
3 |
-
|
4 |
-
logger = logging.getLogger(__name__)
|
5 |
-
|
6 |
-
import numpy as np
|
7 |
-
import torch
|
8 |
-
from torch import nn
|
9 |
-
from torch.nn import AvgPool1d, Conv1d, Conv2d, ConvTranspose1d
|
10 |
-
from torch.nn import functional as F
|
11 |
-
from torch.nn.utils import remove_weight_norm, spectral_norm, weight_norm
|
12 |
-
|
13 |
-
from infer.lib.infer_pack import attentions, commons, modules
|
14 |
-
from infer.lib.infer_pack.commons import get_padding, init_weights
|
15 |
-
has_xpu = bool(hasattr(torch, "xpu") and torch.xpu.is_available())
|
16 |
-
|
17 |
-
class TextEncoder256(nn.Module):
|
18 |
-
def __init__(
|
19 |
-
self,
|
20 |
-
out_channels,
|
21 |
-
hidden_channels,
|
22 |
-
filter_channels,
|
23 |
-
n_heads,
|
24 |
-
n_layers,
|
25 |
-
kernel_size,
|
26 |
-
p_dropout,
|
27 |
-
f0=True,
|
28 |
-
):
|
29 |
-
super().__init__()
|
30 |
-
self.out_channels = out_channels
|
31 |
-
self.hidden_channels = hidden_channels
|
32 |
-
self.filter_channels = filter_channels
|
33 |
-
self.n_heads = n_heads
|
34 |
-
self.n_layers = n_layers
|
35 |
-
self.kernel_size = kernel_size
|
36 |
-
self.p_dropout = p_dropout
|
37 |
-
self.emb_phone = nn.Linear(256, hidden_channels)
|
38 |
-
self.lrelu = nn.LeakyReLU(0.1, inplace=True)
|
39 |
-
if f0 == True:
|
40 |
-
self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
|
41 |
-
self.encoder = attentions.Encoder(
|
42 |
-
hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
|
43 |
-
)
|
44 |
-
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
45 |
-
|
46 |
-
def forward(self, phone, pitch, lengths):
|
47 |
-
if pitch == None:
|
48 |
-
x = self.emb_phone(phone)
|
49 |
-
else:
|
50 |
-
x = self.emb_phone(phone) + self.emb_pitch(pitch)
|
51 |
-
x = x * math.sqrt(self.hidden_channels) # [b, t, h]
|
52 |
-
x = self.lrelu(x)
|
53 |
-
x = torch.transpose(x, 1, -1) # [b, h, t]
|
54 |
-
x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
|
55 |
-
x.dtype
|
56 |
-
)
|
57 |
-
x = self.encoder(x * x_mask, x_mask)
|
58 |
-
stats = self.proj(x) * x_mask
|
59 |
-
|
60 |
-
m, logs = torch.split(stats, self.out_channels, dim=1)
|
61 |
-
return m, logs, x_mask
|
62 |
-
|
63 |
-
|
64 |
-
class TextEncoder768(nn.Module):
|
65 |
-
def __init__(
|
66 |
-
self,
|
67 |
-
out_channels,
|
68 |
-
hidden_channels,
|
69 |
-
filter_channels,
|
70 |
-
n_heads,
|
71 |
-
n_layers,
|
72 |
-
kernel_size,
|
73 |
-
p_dropout,
|
74 |
-
f0=True,
|
75 |
-
):
|
76 |
-
super().__init__()
|
77 |
-
self.out_channels = out_channels
|
78 |
-
self.hidden_channels = hidden_channels
|
79 |
-
self.filter_channels = filter_channels
|
80 |
-
self.n_heads = n_heads
|
81 |
-
self.n_layers = n_layers
|
82 |
-
self.kernel_size = kernel_size
|
83 |
-
self.p_dropout = p_dropout
|
84 |
-
self.emb_phone = nn.Linear(768, hidden_channels)
|
85 |
-
self.lrelu = nn.LeakyReLU(0.1, inplace=True)
|
86 |
-
if f0 == True:
|
87 |
-
self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
|
88 |
-
self.encoder = attentions.Encoder(
|
89 |
-
hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
|
90 |
-
)
|
91 |
-
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
92 |
-
|
93 |
-
def forward(self, phone, pitch, lengths):
|
94 |
-
if pitch == None:
|
95 |
-
x = self.emb_phone(phone)
|
96 |
-
else:
|
97 |
-
x = self.emb_phone(phone) + self.emb_pitch(pitch)
|
98 |
-
x = x * math.sqrt(self.hidden_channels) # [b, t, h]
|
99 |
-
x = self.lrelu(x)
|
100 |
-
x = torch.transpose(x, 1, -1) # [b, h, t]
|
101 |
-
x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
|
102 |
-
x.dtype
|
103 |
-
)
|
104 |
-
x = self.encoder(x * x_mask, x_mask)
|
105 |
-
stats = self.proj(x) * x_mask
|
106 |
-
|
107 |
-
m, logs = torch.split(stats, self.out_channels, dim=1)
|
108 |
-
return m, logs, x_mask
|
109 |
-
|
110 |
-
|
111 |
-
class ResidualCouplingBlock(nn.Module):
|
112 |
-
def __init__(
|
113 |
-
self,
|
114 |
-
channels,
|
115 |
-
hidden_channels,
|
116 |
-
kernel_size,
|
117 |
-
dilation_rate,
|
118 |
-
n_layers,
|
119 |
-
n_flows=4,
|
120 |
-
gin_channels=0,
|
121 |
-
):
|
122 |
-
super().__init__()
|
123 |
-
self.channels = channels
|
124 |
-
self.hidden_channels = hidden_channels
|
125 |
-
self.kernel_size = kernel_size
|
126 |
-
self.dilation_rate = dilation_rate
|
127 |
-
self.n_layers = n_layers
|
128 |
-
self.n_flows = n_flows
|
129 |
-
self.gin_channels = gin_channels
|
130 |
-
|
131 |
-
self.flows = nn.ModuleList()
|
132 |
-
for i in range(n_flows):
|
133 |
-
self.flows.append(
|
134 |
-
modules.ResidualCouplingLayer(
|
135 |
-
channels,
|
136 |
-
hidden_channels,
|
137 |
-
kernel_size,
|
138 |
-
dilation_rate,
|
139 |
-
n_layers,
|
140 |
-
gin_channels=gin_channels,
|
141 |
-
mean_only=True,
|
142 |
-
)
|
143 |
-
)
|
144 |
-
self.flows.append(modules.Flip())
|
145 |
-
|
146 |
-
def forward(self, x, x_mask, g=None, reverse=False):
|
147 |
-
if not reverse:
|
148 |
-
for flow in self.flows:
|
149 |
-
x, _ = flow(x, x_mask, g=g, reverse=reverse)
|
150 |
-
else:
|
151 |
-
for flow in reversed(self.flows):
|
152 |
-
x = flow(x, x_mask, g=g, reverse=reverse)
|
153 |
-
return x
|
154 |
-
|
155 |
-
def remove_weight_norm(self):
|
156 |
-
for i in range(self.n_flows):
|
157 |
-
self.flows[i * 2].remove_weight_norm()
|
158 |
-
|
159 |
-
|
160 |
-
class PosteriorEncoder(nn.Module):
|
161 |
-
def __init__(
|
162 |
-
self,
|
163 |
-
in_channels,
|
164 |
-
out_channels,
|
165 |
-
hidden_channels,
|
166 |
-
kernel_size,
|
167 |
-
dilation_rate,
|
168 |
-
n_layers,
|
169 |
-
gin_channels=0,
|
170 |
-
):
|
171 |
-
super().__init__()
|
172 |
-
self.in_channels = in_channels
|
173 |
-
self.out_channels = out_channels
|
174 |
-
self.hidden_channels = hidden_channels
|
175 |
-
self.kernel_size = kernel_size
|
176 |
-
self.dilation_rate = dilation_rate
|
177 |
-
self.n_layers = n_layers
|
178 |
-
self.gin_channels = gin_channels
|
179 |
-
|
180 |
-
self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
|
181 |
-
self.enc = modules.WN(
|
182 |
-
hidden_channels,
|
183 |
-
kernel_size,
|
184 |
-
dilation_rate,
|
185 |
-
n_layers,
|
186 |
-
gin_channels=gin_channels,
|
187 |
-
)
|
188 |
-
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
189 |
-
|
190 |
-
def forward(self, x, x_lengths, g=None):
|
191 |
-
x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
|
192 |
-
x.dtype
|
193 |
-
)
|
194 |
-
x = self.pre(x) * x_mask
|
195 |
-
x = self.enc(x, x_mask, g=g)
|
196 |
-
stats = self.proj(x) * x_mask
|
197 |
-
m, logs = torch.split(stats, self.out_channels, dim=1)
|
198 |
-
z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
|
199 |
-
return z, m, logs, x_mask
|
200 |
-
|
201 |
-
def remove_weight_norm(self):
|
202 |
-
self.enc.remove_weight_norm()
|
203 |
-
|
204 |
-
|
205 |
-
class Generator(torch.nn.Module):
|
206 |
-
def __init__(
|
207 |
-
self,
|
208 |
-
initial_channel,
|
209 |
-
resblock,
|
210 |
-
resblock_kernel_sizes,
|
211 |
-
resblock_dilation_sizes,
|
212 |
-
upsample_rates,
|
213 |
-
upsample_initial_channel,
|
214 |
-
upsample_kernel_sizes,
|
215 |
-
gin_channels=0,
|
216 |
-
):
|
217 |
-
super(Generator, self).__init__()
|
218 |
-
self.num_kernels = len(resblock_kernel_sizes)
|
219 |
-
self.num_upsamples = len(upsample_rates)
|
220 |
-
self.conv_pre = Conv1d(
|
221 |
-
initial_channel, upsample_initial_channel, 7, 1, padding=3
|
222 |
-
)
|
223 |
-
resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
|
224 |
-
|
225 |
-
self.ups = nn.ModuleList()
|
226 |
-
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
|
227 |
-
self.ups.append(
|
228 |
-
weight_norm(
|
229 |
-
ConvTranspose1d(
|
230 |
-
upsample_initial_channel // (2**i),
|
231 |
-
upsample_initial_channel // (2 ** (i + 1)),
|
232 |
-
k,
|
233 |
-
u,
|
234 |
-
padding=(k - u) // 2,
|
235 |
-
)
|
236 |
-
)
|
237 |
-
)
|
238 |
-
|
239 |
-
self.resblocks = nn.ModuleList()
|
240 |
-
for i in range(len(self.ups)):
|
241 |
-
ch = upsample_initial_channel // (2 ** (i + 1))
|
242 |
-
for j, (k, d) in enumerate(
|
243 |
-
zip(resblock_kernel_sizes, resblock_dilation_sizes)
|
244 |
-
):
|
245 |
-
self.resblocks.append(resblock(ch, k, d))
|
246 |
-
|
247 |
-
self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
|
248 |
-
self.ups.apply(init_weights)
|
249 |
-
|
250 |
-
if gin_channels != 0:
|
251 |
-
self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
|
252 |
-
|
253 |
-
def forward(self, x, g=None):
|
254 |
-
x = self.conv_pre(x)
|
255 |
-
if g is not None:
|
256 |
-
x = x + self.cond(g)
|
257 |
-
|
258 |
-
for i in range(self.num_upsamples):
|
259 |
-
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
260 |
-
x = self.ups[i](x)
|
261 |
-
xs = None
|
262 |
-
for j in range(self.num_kernels):
|
263 |
-
if xs is None:
|
264 |
-
xs = self.resblocks[i * self.num_kernels + j](x)
|
265 |
-
else:
|
266 |
-
xs += self.resblocks[i * self.num_kernels + j](x)
|
267 |
-
x = xs / self.num_kernels
|
268 |
-
x = F.leaky_relu(x)
|
269 |
-
x = self.conv_post(x)
|
270 |
-
x = torch.tanh(x)
|
271 |
-
|
272 |
-
return x
|
273 |
-
|
274 |
-
def remove_weight_norm(self):
|
275 |
-
for l in self.ups:
|
276 |
-
remove_weight_norm(l)
|
277 |
-
for l in self.resblocks:
|
278 |
-
l.remove_weight_norm()
|
279 |
-
|
280 |
-
|
281 |
-
class SineGen(torch.nn.Module):
|
282 |
-
"""Definition of sine generator
|
283 |
-
SineGen(samp_rate, harmonic_num = 0,
|
284 |
-
sine_amp = 0.1, noise_std = 0.003,
|
285 |
-
voiced_threshold = 0,
|
286 |
-
flag_for_pulse=False)
|
287 |
-
samp_rate: sampling rate in Hz
|
288 |
-
harmonic_num: number of harmonic overtones (default 0)
|
289 |
-
sine_amp: amplitude of sine-wavefrom (default 0.1)
|
290 |
-
noise_std: std of Gaussian noise (default 0.003)
|
291 |
-
voiced_thoreshold: F0 threshold for U/V classification (default 0)
|
292 |
-
flag_for_pulse: this SinGen is used inside PulseGen (default False)
|
293 |
-
Note: when flag_for_pulse is True, the first time step of a voiced
|
294 |
-
segment is always sin(np.pi) or cos(0)
|
295 |
-
"""
|
296 |
-
|
297 |
-
def __init__(
|
298 |
-
self,
|
299 |
-
samp_rate,
|
300 |
-
harmonic_num=0,
|
301 |
-
sine_amp=0.1,
|
302 |
-
noise_std=0.003,
|
303 |
-
voiced_threshold=0,
|
304 |
-
flag_for_pulse=False,
|
305 |
-
):
|
306 |
-
super(SineGen, self).__init__()
|
307 |
-
self.sine_amp = sine_amp
|
308 |
-
self.noise_std = noise_std
|
309 |
-
self.harmonic_num = harmonic_num
|
310 |
-
self.dim = self.harmonic_num + 1
|
311 |
-
self.sampling_rate = samp_rate
|
312 |
-
self.voiced_threshold = voiced_threshold
|
313 |
-
|
314 |
-
def _f02uv(self, f0):
|
315 |
-
# generate uv signal
|
316 |
-
uv = torch.ones_like(f0)
|
317 |
-
uv = uv * (f0 > self.voiced_threshold)
|
318 |
-
if uv.device.type == "privateuseone": # for DirectML
|
319 |
-
uv = uv.float()
|
320 |
-
return uv
|
321 |
-
|
322 |
-
def forward(self, f0, upp):
|
323 |
-
"""sine_tensor, uv = forward(f0)
|
324 |
-
input F0: tensor(batchsize=1, length, dim=1)
|
325 |
-
f0 for unvoiced steps should be 0
|
326 |
-
output sine_tensor: tensor(batchsize=1, length, dim)
|
327 |
-
output uv: tensor(batchsize=1, length, 1)
|
328 |
-
"""
|
329 |
-
with torch.no_grad():
|
330 |
-
f0 = f0[:, None].transpose(1, 2)
|
331 |
-
f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
|
332 |
-
# fundamental component
|
333 |
-
f0_buf[:, :, 0] = f0[:, :, 0]
|
334 |
-
for idx in np.arange(self.harmonic_num):
|
335 |
-
f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (
|
336 |
-
idx + 2
|
337 |
-
) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
|
338 |
-
rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化
|
339 |
-
rand_ini = torch.rand(
|
340 |
-
f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device
|
341 |
-
)
|
342 |
-
rand_ini[:, 0] = 0
|
343 |
-
rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
|
344 |
-
tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化
|
345 |
-
tmp_over_one *= upp
|
346 |
-
tmp_over_one = F.interpolate(
|
347 |
-
tmp_over_one.transpose(2, 1),
|
348 |
-
scale_factor=upp,
|
349 |
-
mode="linear",
|
350 |
-
align_corners=True,
|
351 |
-
).transpose(2, 1)
|
352 |
-
rad_values = F.interpolate(
|
353 |
-
rad_values.transpose(2, 1), scale_factor=upp, mode="nearest"
|
354 |
-
).transpose(
|
355 |
-
2, 1
|
356 |
-
) #######
|
357 |
-
tmp_over_one %= 1
|
358 |
-
tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
|
359 |
-
cumsum_shift = torch.zeros_like(rad_values)
|
360 |
-
cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
|
361 |
-
sine_waves = torch.sin(
|
362 |
-
torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi
|
363 |
-
)
|
364 |
-
sine_waves = sine_waves * self.sine_amp
|
365 |
-
uv = self._f02uv(f0)
|
366 |
-
uv = F.interpolate(
|
367 |
-
uv.transpose(2, 1), scale_factor=upp, mode="nearest"
|
368 |
-
).transpose(2, 1)
|
369 |
-
noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
|
370 |
-
noise = noise_amp * torch.randn_like(sine_waves)
|
371 |
-
sine_waves = sine_waves * uv + noise
|
372 |
-
return sine_waves, uv, noise
|
373 |
-
|
374 |
-
|
375 |
-
class SourceModuleHnNSF(torch.nn.Module):
|
376 |
-
"""SourceModule for hn-nsf
|
377 |
-
SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
|
378 |
-
add_noise_std=0.003, voiced_threshod=0)
|
379 |
-
sampling_rate: sampling_rate in Hz
|
380 |
-
harmonic_num: number of harmonic above F0 (default: 0)
|
381 |
-
sine_amp: amplitude of sine source signal (default: 0.1)
|
382 |
-
add_noise_std: std of additive Gaussian noise (default: 0.003)
|
383 |
-
note that amplitude of noise in unvoiced is decided
|
384 |
-
by sine_amp
|
385 |
-
voiced_threshold: threhold to set U/V given F0 (default: 0)
|
386 |
-
Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
|
387 |
-
F0_sampled (batchsize, length, 1)
|
388 |
-
Sine_source (batchsize, length, 1)
|
389 |
-
noise_source (batchsize, length 1)
|
390 |
-
uv (batchsize, length, 1)
|
391 |
-
"""
|
392 |
-
|
393 |
-
def __init__(
|
394 |
-
self,
|
395 |
-
sampling_rate,
|
396 |
-
harmonic_num=0,
|
397 |
-
sine_amp=0.1,
|
398 |
-
add_noise_std=0.003,
|
399 |
-
voiced_threshod=0,
|
400 |
-
is_half=True,
|
401 |
-
):
|
402 |
-
super(SourceModuleHnNSF, self).__init__()
|
403 |
-
|
404 |
-
self.sine_amp = sine_amp
|
405 |
-
self.noise_std = add_noise_std
|
406 |
-
self.is_half = is_half
|
407 |
-
# to produce sine waveforms
|
408 |
-
self.l_sin_gen = SineGen(
|
409 |
-
sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod
|
410 |
-
)
|
411 |
-
|
412 |
-
# to merge source harmonics into a single excitation
|
413 |
-
self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
|
414 |
-
self.l_tanh = torch.nn.Tanh()
|
415 |
-
|
416 |
-
def forward(self, x, upp=None):
|
417 |
-
if hasattr(self, "ddtype") == False:
|
418 |
-
self.ddtype = self.l_linear.weight.dtype
|
419 |
-
sine_wavs, uv, _ = self.l_sin_gen(x, upp)
|
420 |
-
# print(x.dtype,sine_wavs.dtype,self.l_linear.weight.dtype)
|
421 |
-
# if self.is_half:
|
422 |
-
# sine_wavs = sine_wavs.half()
|
423 |
-
# sine_merge = self.l_tanh(self.l_linear(sine_wavs.to(x)))
|
424 |
-
# print(sine_wavs.dtype,self.ddtype)
|
425 |
-
if sine_wavs.dtype != self.ddtype:
|
426 |
-
sine_wavs = sine_wavs.to(self.ddtype)
|
427 |
-
sine_merge = self.l_tanh(self.l_linear(sine_wavs))
|
428 |
-
return sine_merge, None, None # noise, uv
|
429 |
-
|
430 |
-
|
431 |
-
class GeneratorNSF(torch.nn.Module):
|
432 |
-
def __init__(
|
433 |
-
self,
|
434 |
-
initial_channel,
|
435 |
-
resblock,
|
436 |
-
resblock_kernel_sizes,
|
437 |
-
resblock_dilation_sizes,
|
438 |
-
upsample_rates,
|
439 |
-
upsample_initial_channel,
|
440 |
-
upsample_kernel_sizes,
|
441 |
-
gin_channels,
|
442 |
-
sr,
|
443 |
-
is_half=False,
|
444 |
-
):
|
445 |
-
super(GeneratorNSF, self).__init__()
|
446 |
-
self.num_kernels = len(resblock_kernel_sizes)
|
447 |
-
self.num_upsamples = len(upsample_rates)
|
448 |
-
|
449 |
-
self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates))
|
450 |
-
self.m_source = SourceModuleHnNSF(
|
451 |
-
sampling_rate=sr, harmonic_num=0, is_half=is_half
|
452 |
-
)
|
453 |
-
self.noise_convs = nn.ModuleList()
|
454 |
-
self.conv_pre = Conv1d(
|
455 |
-
initial_channel, upsample_initial_channel, 7, 1, padding=3
|
456 |
-
)
|
457 |
-
resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
|
458 |
-
|
459 |
-
self.ups = nn.ModuleList()
|
460 |
-
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
|
461 |
-
c_cur = upsample_initial_channel // (2 ** (i + 1))
|
462 |
-
self.ups.append(
|
463 |
-
weight_norm(
|
464 |
-
ConvTranspose1d(
|
465 |
-
upsample_initial_channel // (2**i),
|
466 |
-
upsample_initial_channel // (2 ** (i + 1)),
|
467 |
-
k,
|
468 |
-
u,
|
469 |
-
padding=(k - u) // 2,
|
470 |
-
)
|
471 |
-
)
|
472 |
-
)
|
473 |
-
if i + 1 < len(upsample_rates):
|
474 |
-
stride_f0 = np.prod(upsample_rates[i + 1 :])
|
475 |
-
self.noise_convs.append(
|
476 |
-
Conv1d(
|
477 |
-
1,
|
478 |
-
c_cur,
|
479 |
-
kernel_size=stride_f0 * 2,
|
480 |
-
stride=stride_f0,
|
481 |
-
padding=stride_f0 // 2,
|
482 |
-
)
|
483 |
-
)
|
484 |
-
else:
|
485 |
-
self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
|
486 |
-
|
487 |
-
self.resblocks = nn.ModuleList()
|
488 |
-
for i in range(len(self.ups)):
|
489 |
-
ch = upsample_initial_channel // (2 ** (i + 1))
|
490 |
-
for j, (k, d) in enumerate(
|
491 |
-
zip(resblock_kernel_sizes, resblock_dilation_sizes)
|
492 |
-
):
|
493 |
-
self.resblocks.append(resblock(ch, k, d))
|
494 |
-
|
495 |
-
self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
|
496 |
-
self.ups.apply(init_weights)
|
497 |
-
|
498 |
-
if gin_channels != 0:
|
499 |
-
self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
|
500 |
-
|
501 |
-
self.upp = np.prod(upsample_rates)
|
502 |
-
|
503 |
-
def forward(self, x, f0, g=None):
|
504 |
-
har_source, noi_source, uv = self.m_source(f0, self.upp)
|
505 |
-
har_source = har_source.transpose(1, 2)
|
506 |
-
x = self.conv_pre(x)
|
507 |
-
if g is not None:
|
508 |
-
x = x + self.cond(g)
|
509 |
-
|
510 |
-
for i in range(self.num_upsamples):
|
511 |
-
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
512 |
-
x = self.ups[i](x)
|
513 |
-
x_source = self.noise_convs[i](har_source)
|
514 |
-
x = x + x_source
|
515 |
-
xs = None
|
516 |
-
for j in range(self.num_kernels):
|
517 |
-
if xs is None:
|
518 |
-
xs = self.resblocks[i * self.num_kernels + j](x)
|
519 |
-
else:
|
520 |
-
xs += self.resblocks[i * self.num_kernels + j](x)
|
521 |
-
x = xs / self.num_kernels
|
522 |
-
x = F.leaky_relu(x)
|
523 |
-
x = self.conv_post(x)
|
524 |
-
x = torch.tanh(x)
|
525 |
-
return x
|
526 |
-
|
527 |
-
def remove_weight_norm(self):
|
528 |
-
for l in self.ups:
|
529 |
-
remove_weight_norm(l)
|
530 |
-
for l in self.resblocks:
|
531 |
-
l.remove_weight_norm()
|
532 |
-
|
533 |
-
|
534 |
-
sr2sr = {
|
535 |
-
"32k": 32000,
|
536 |
-
"40k": 40000,
|
537 |
-
"48k": 48000,
|
538 |
-
}
|
539 |
-
|
540 |
-
|
541 |
-
class SynthesizerTrnMs256NSFsid(nn.Module):
|
542 |
-
def __init__(
|
543 |
-
self,
|
544 |
-
spec_channels,
|
545 |
-
segment_size,
|
546 |
-
inter_channels,
|
547 |
-
hidden_channels,
|
548 |
-
filter_channels,
|
549 |
-
n_heads,
|
550 |
-
n_layers,
|
551 |
-
kernel_size,
|
552 |
-
p_dropout,
|
553 |
-
resblock,
|
554 |
-
resblock_kernel_sizes,
|
555 |
-
resblock_dilation_sizes,
|
556 |
-
upsample_rates,
|
557 |
-
upsample_initial_channel,
|
558 |
-
upsample_kernel_sizes,
|
559 |
-
spk_embed_dim,
|
560 |
-
gin_channels,
|
561 |
-
sr,
|
562 |
-
**kwargs
|
563 |
-
):
|
564 |
-
super().__init__()
|
565 |
-
if type(sr) == type("strr"):
|
566 |
-
sr = sr2sr[sr]
|
567 |
-
self.spec_channels = spec_channels
|
568 |
-
self.inter_channels = inter_channels
|
569 |
-
self.hidden_channels = hidden_channels
|
570 |
-
self.filter_channels = filter_channels
|
571 |
-
self.n_heads = n_heads
|
572 |
-
self.n_layers = n_layers
|
573 |
-
self.kernel_size = kernel_size
|
574 |
-
self.p_dropout = p_dropout
|
575 |
-
self.resblock = resblock
|
576 |
-
self.resblock_kernel_sizes = resblock_kernel_sizes
|
577 |
-
self.resblock_dilation_sizes = resblock_dilation_sizes
|
578 |
-
self.upsample_rates = upsample_rates
|
579 |
-
self.upsample_initial_channel = upsample_initial_channel
|
580 |
-
self.upsample_kernel_sizes = upsample_kernel_sizes
|
581 |
-
self.segment_size = segment_size
|
582 |
-
self.gin_channels = gin_channels
|
583 |
-
# self.hop_length = hop_length#
|
584 |
-
self.spk_embed_dim = spk_embed_dim
|
585 |
-
self.enc_p = TextEncoder256(
|
586 |
-
inter_channels,
|
587 |
-
hidden_channels,
|
588 |
-
filter_channels,
|
589 |
-
n_heads,
|
590 |
-
n_layers,
|
591 |
-
kernel_size,
|
592 |
-
p_dropout,
|
593 |
-
)
|
594 |
-
self.dec = GeneratorNSF(
|
595 |
-
inter_channels,
|
596 |
-
resblock,
|
597 |
-
resblock_kernel_sizes,
|
598 |
-
resblock_dilation_sizes,
|
599 |
-
upsample_rates,
|
600 |
-
upsample_initial_channel,
|
601 |
-
upsample_kernel_sizes,
|
602 |
-
gin_channels=gin_channels,
|
603 |
-
sr=sr,
|
604 |
-
is_half=kwargs["is_half"],
|
605 |
-
)
|
606 |
-
self.enc_q = PosteriorEncoder(
|
607 |
-
spec_channels,
|
608 |
-
inter_channels,
|
609 |
-
hidden_channels,
|
610 |
-
5,
|
611 |
-
1,
|
612 |
-
16,
|
613 |
-
gin_channels=gin_channels,
|
614 |
-
)
|
615 |
-
self.flow = ResidualCouplingBlock(
|
616 |
-
inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
|
617 |
-
)
|
618 |
-
self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
|
619 |
-
logger.debug(
|
620 |
-
"gin_channels: "
|
621 |
-
+ str(gin_channels)
|
622 |
-
+ ", self.spk_embed_dim: "
|
623 |
-
+ str(self.spk_embed_dim)
|
624 |
-
)
|
625 |
-
|
626 |
-
def remove_weight_norm(self):
|
627 |
-
self.dec.remove_weight_norm()
|
628 |
-
self.flow.remove_weight_norm()
|
629 |
-
self.enc_q.remove_weight_norm()
|
630 |
-
|
631 |
-
def forward(
|
632 |
-
self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds
|
633 |
-
): # 这里ds是id,[bs,1]
|
634 |
-
# print(1,pitch.shape)#[bs,t]
|
635 |
-
g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
|
636 |
-
m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
|
637 |
-
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
|
638 |
-
z_p = self.flow(z, y_mask, g=g)
|
639 |
-
z_slice, ids_slice = commons.rand_slice_segments(
|
640 |
-
z, y_lengths, self.segment_size
|
641 |
-
)
|
642 |
-
# print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
|
643 |
-
pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
|
644 |
-
# print(-2,pitchf.shape,z_slice.shape)
|
645 |
-
o = self.dec(z_slice, pitchf, g=g)
|
646 |
-
return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
|
647 |
-
|
648 |
-
def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate=None):
|
649 |
-
g = self.emb_g(sid).unsqueeze(-1)
|
650 |
-
m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
|
651 |
-
z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
|
652 |
-
if rate:
|
653 |
-
head = int(z_p.shape[2] * rate)
|
654 |
-
z_p = z_p[:, :, -head:]
|
655 |
-
x_mask = x_mask[:, :, -head:]
|
656 |
-
nsff0 = nsff0[:, -head:]
|
657 |
-
z = self.flow(z_p, x_mask, g=g, reverse=True)
|
658 |
-
o = self.dec(z * x_mask, nsff0, g=g)
|
659 |
-
return o, x_mask, (z, z_p, m_p, logs_p)
|
660 |
-
|
661 |
-
|
662 |
-
class SynthesizerTrnMs768NSFsid(nn.Module):
|
663 |
-
def __init__(
|
664 |
-
self,
|
665 |
-
spec_channels,
|
666 |
-
segment_size,
|
667 |
-
inter_channels,
|
668 |
-
hidden_channels,
|
669 |
-
filter_channels,
|
670 |
-
n_heads,
|
671 |
-
n_layers,
|
672 |
-
kernel_size,
|
673 |
-
p_dropout,
|
674 |
-
resblock,
|
675 |
-
resblock_kernel_sizes,
|
676 |
-
resblock_dilation_sizes,
|
677 |
-
upsample_rates,
|
678 |
-
upsample_initial_channel,
|
679 |
-
upsample_kernel_sizes,
|
680 |
-
spk_embed_dim,
|
681 |
-
gin_channels,
|
682 |
-
sr,
|
683 |
-
**kwargs
|
684 |
-
):
|
685 |
-
super().__init__()
|
686 |
-
if type(sr) == type("strr"):
|
687 |
-
sr = sr2sr[sr]
|
688 |
-
self.spec_channels = spec_channels
|
689 |
-
self.inter_channels = inter_channels
|
690 |
-
self.hidden_channels = hidden_channels
|
691 |
-
self.filter_channels = filter_channels
|
692 |
-
self.n_heads = n_heads
|
693 |
-
self.n_layers = n_layers
|
694 |
-
self.kernel_size = kernel_size
|
695 |
-
self.p_dropout = p_dropout
|
696 |
-
self.resblock = resblock
|
697 |
-
self.resblock_kernel_sizes = resblock_kernel_sizes
|
698 |
-
self.resblock_dilation_sizes = resblock_dilation_sizes
|
699 |
-
self.upsample_rates = upsample_rates
|
700 |
-
self.upsample_initial_channel = upsample_initial_channel
|
701 |
-
self.upsample_kernel_sizes = upsample_kernel_sizes
|
702 |
-
self.segment_size = segment_size
|
703 |
-
self.gin_channels = gin_channels
|
704 |
-
# self.hop_length = hop_length#
|
705 |
-
self.spk_embed_dim = spk_embed_dim
|
706 |
-
self.enc_p = TextEncoder768(
|
707 |
-
inter_channels,
|
708 |
-
hidden_channels,
|
709 |
-
filter_channels,
|
710 |
-
n_heads,
|
711 |
-
n_layers,
|
712 |
-
kernel_size,
|
713 |
-
p_dropout,
|
714 |
-
)
|
715 |
-
self.dec = GeneratorNSF(
|
716 |
-
inter_channels,
|
717 |
-
resblock,
|
718 |
-
resblock_kernel_sizes,
|
719 |
-
resblock_dilation_sizes,
|
720 |
-
upsample_rates,
|
721 |
-
upsample_initial_channel,
|
722 |
-
upsample_kernel_sizes,
|
723 |
-
gin_channels=gin_channels,
|
724 |
-
sr=sr,
|
725 |
-
is_half=kwargs["is_half"],
|
726 |
-
)
|
727 |
-
self.enc_q = PosteriorEncoder(
|
728 |
-
spec_channels,
|
729 |
-
inter_channels,
|
730 |
-
hidden_channels,
|
731 |
-
5,
|
732 |
-
1,
|
733 |
-
16,
|
734 |
-
gin_channels=gin_channels,
|
735 |
-
)
|
736 |
-
self.flow = ResidualCouplingBlock(
|
737 |
-
inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
|
738 |
-
)
|
739 |
-
self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
|
740 |
-
logger.debug(
|
741 |
-
"gin_channels: "
|
742 |
-
+ str(gin_channels)
|
743 |
-
+ ", self.spk_embed_dim: "
|
744 |
-
+ str(self.spk_embed_dim)
|
745 |
-
)
|
746 |
-
|
747 |
-
def remove_weight_norm(self):
|
748 |
-
self.dec.remove_weight_norm()
|
749 |
-
self.flow.remove_weight_norm()
|
750 |
-
self.enc_q.remove_weight_norm()
|
751 |
-
|
752 |
-
def forward(
|
753 |
-
self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds
|
754 |
-
): # 这里ds是id,[bs,1]
|
755 |
-
# print(1,pitch.shape)#[bs,t]
|
756 |
-
g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
|
757 |
-
m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
|
758 |
-
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
|
759 |
-
z_p = self.flow(z, y_mask, g=g)
|
760 |
-
z_slice, ids_slice = commons.rand_slice_segments(
|
761 |
-
z, y_lengths, self.segment_size
|
762 |
-
)
|
763 |
-
# print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
|
764 |
-
pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
|
765 |
-
# print(-2,pitchf.shape,z_slice.shape)
|
766 |
-
o = self.dec(z_slice, pitchf, g=g)
|
767 |
-
return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
|
768 |
-
|
769 |
-
def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate=None):
|
770 |
-
g = self.emb_g(sid).unsqueeze(-1)
|
771 |
-
m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
|
772 |
-
z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
|
773 |
-
if rate:
|
774 |
-
head = int(z_p.shape[2] * rate)
|
775 |
-
z_p = z_p[:, :, -head:]
|
776 |
-
x_mask = x_mask[:, :, -head:]
|
777 |
-
nsff0 = nsff0[:, -head:]
|
778 |
-
z = self.flow(z_p, x_mask, g=g, reverse=True)
|
779 |
-
o = self.dec(z * x_mask, nsff0, g=g)
|
780 |
-
return o, x_mask, (z, z_p, m_p, logs_p)
|
781 |
-
|
782 |
-
|
783 |
-
class SynthesizerTrnMs256NSFsid_nono(nn.Module):
|
784 |
-
def __init__(
|
785 |
-
self,
|
786 |
-
spec_channels,
|
787 |
-
segment_size,
|
788 |
-
inter_channels,
|
789 |
-
hidden_channels,
|
790 |
-
filter_channels,
|
791 |
-
n_heads,
|
792 |
-
n_layers,
|
793 |
-
kernel_size,
|
794 |
-
p_dropout,
|
795 |
-
resblock,
|
796 |
-
resblock_kernel_sizes,
|
797 |
-
resblock_dilation_sizes,
|
798 |
-
upsample_rates,
|
799 |
-
upsample_initial_channel,
|
800 |
-
upsample_kernel_sizes,
|
801 |
-
spk_embed_dim,
|
802 |
-
gin_channels,
|
803 |
-
sr=None,
|
804 |
-
**kwargs
|
805 |
-
):
|
806 |
-
super().__init__()
|
807 |
-
self.spec_channels = spec_channels
|
808 |
-
self.inter_channels = inter_channels
|
809 |
-
self.hidden_channels = hidden_channels
|
810 |
-
self.filter_channels = filter_channels
|
811 |
-
self.n_heads = n_heads
|
812 |
-
self.n_layers = n_layers
|
813 |
-
self.kernel_size = kernel_size
|
814 |
-
self.p_dropout = p_dropout
|
815 |
-
self.resblock = resblock
|
816 |
-
self.resblock_kernel_sizes = resblock_kernel_sizes
|
817 |
-
self.resblock_dilation_sizes = resblock_dilation_sizes
|
818 |
-
self.upsample_rates = upsample_rates
|
819 |
-
self.upsample_initial_channel = upsample_initial_channel
|
820 |
-
self.upsample_kernel_sizes = upsample_kernel_sizes
|
821 |
-
self.segment_size = segment_size
|
822 |
-
self.gin_channels = gin_channels
|
823 |
-
# self.hop_length = hop_length#
|
824 |
-
self.spk_embed_dim = spk_embed_dim
|
825 |
-
self.enc_p = TextEncoder256(
|
826 |
-
inter_channels,
|
827 |
-
hidden_channels,
|
828 |
-
filter_channels,
|
829 |
-
n_heads,
|
830 |
-
n_layers,
|
831 |
-
kernel_size,
|
832 |
-
p_dropout,
|
833 |
-
f0=False,
|
834 |
-
)
|
835 |
-
self.dec = Generator(
|
836 |
-
inter_channels,
|
837 |
-
resblock,
|
838 |
-
resblock_kernel_sizes,
|
839 |
-
resblock_dilation_sizes,
|
840 |
-
upsample_rates,
|
841 |
-
upsample_initial_channel,
|
842 |
-
upsample_kernel_sizes,
|
843 |
-
gin_channels=gin_channels,
|
844 |
-
)
|
845 |
-
self.enc_q = PosteriorEncoder(
|
846 |
-
spec_channels,
|
847 |
-
inter_channels,
|
848 |
-
hidden_channels,
|
849 |
-
5,
|
850 |
-
1,
|
851 |
-
16,
|
852 |
-
gin_channels=gin_channels,
|
853 |
-
)
|
854 |
-
self.flow = ResidualCouplingBlock(
|
855 |
-
inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
|
856 |
-
)
|
857 |
-
self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
|
858 |
-
logger.debug(
|
859 |
-
"gin_channels: "
|
860 |
-
+ str(gin_channels)
|
861 |
-
+ ", self.spk_embed_dim: "
|
862 |
-
+ str(self.spk_embed_dim)
|
863 |
-
)
|
864 |
-
|
865 |
-
def remove_weight_norm(self):
|
866 |
-
self.dec.remove_weight_norm()
|
867 |
-
self.flow.remove_weight_norm()
|
868 |
-
self.enc_q.remove_weight_norm()
|
869 |
-
|
870 |
-
def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
|
871 |
-
g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
|
872 |
-
m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
|
873 |
-
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
|
874 |
-
z_p = self.flow(z, y_mask, g=g)
|
875 |
-
z_slice, ids_slice = commons.rand_slice_segments(
|
876 |
-
z, y_lengths, self.segment_size
|
877 |
-
)
|
878 |
-
o = self.dec(z_slice, g=g)
|
879 |
-
return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
|
880 |
-
|
881 |
-
def infer(self, phone, phone_lengths, sid, rate=None):
|
882 |
-
g = self.emb_g(sid).unsqueeze(-1)
|
883 |
-
m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
|
884 |
-
z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
|
885 |
-
if rate:
|
886 |
-
head = int(z_p.shape[2] * rate)
|
887 |
-
z_p = z_p[:, :, -head:]
|
888 |
-
x_mask = x_mask[:, :, -head:]
|
889 |
-
z = self.flow(z_p, x_mask, g=g, reverse=True)
|
890 |
-
o = self.dec(z * x_mask, g=g)
|
891 |
-
return o, x_mask, (z, z_p, m_p, logs_p)
|
892 |
-
|
893 |
-
|
894 |
-
class SynthesizerTrnMs768NSFsid_nono(nn.Module):
|
895 |
-
def __init__(
|
896 |
-
self,
|
897 |
-
spec_channels,
|
898 |
-
segment_size,
|
899 |
-
inter_channels,
|
900 |
-
hidden_channels,
|
901 |
-
filter_channels,
|
902 |
-
n_heads,
|
903 |
-
n_layers,
|
904 |
-
kernel_size,
|
905 |
-
p_dropout,
|
906 |
-
resblock,
|
907 |
-
resblock_kernel_sizes,
|
908 |
-
resblock_dilation_sizes,
|
909 |
-
upsample_rates,
|
910 |
-
upsample_initial_channel,
|
911 |
-
upsample_kernel_sizes,
|
912 |
-
spk_embed_dim,
|
913 |
-
gin_channels,
|
914 |
-
sr=None,
|
915 |
-
**kwargs
|
916 |
-
):
|
917 |
-
super().__init__()
|
918 |
-
self.spec_channels = spec_channels
|
919 |
-
self.inter_channels = inter_channels
|
920 |
-
self.hidden_channels = hidden_channels
|
921 |
-
self.filter_channels = filter_channels
|
922 |
-
self.n_heads = n_heads
|
923 |
-
self.n_layers = n_layers
|
924 |
-
self.kernel_size = kernel_size
|
925 |
-
self.p_dropout = p_dropout
|
926 |
-
self.resblock = resblock
|
927 |
-
self.resblock_kernel_sizes = resblock_kernel_sizes
|
928 |
-
self.resblock_dilation_sizes = resblock_dilation_sizes
|
929 |
-
self.upsample_rates = upsample_rates
|
930 |
-
self.upsample_initial_channel = upsample_initial_channel
|
931 |
-
self.upsample_kernel_sizes = upsample_kernel_sizes
|
932 |
-
self.segment_size = segment_size
|
933 |
-
self.gin_channels = gin_channels
|
934 |
-
# self.hop_length = hop_length#
|
935 |
-
self.spk_embed_dim = spk_embed_dim
|
936 |
-
self.enc_p = TextEncoder768(
|
937 |
-
inter_channels,
|
938 |
-
hidden_channels,
|
939 |
-
filter_channels,
|
940 |
-
n_heads,
|
941 |
-
n_layers,
|
942 |
-
kernel_size,
|
943 |
-
p_dropout,
|
944 |
-
f0=False,
|
945 |
-
)
|
946 |
-
self.dec = Generator(
|
947 |
-
inter_channels,
|
948 |
-
resblock,
|
949 |
-
resblock_kernel_sizes,
|
950 |
-
resblock_dilation_sizes,
|
951 |
-
upsample_rates,
|
952 |
-
upsample_initial_channel,
|
953 |
-
upsample_kernel_sizes,
|
954 |
-
gin_channels=gin_channels,
|
955 |
-
)
|
956 |
-
self.enc_q = PosteriorEncoder(
|
957 |
-
spec_channels,
|
958 |
-
inter_channels,
|
959 |
-
hidden_channels,
|
960 |
-
5,
|
961 |
-
1,
|
962 |
-
16,
|
963 |
-
gin_channels=gin_channels,
|
964 |
-
)
|
965 |
-
self.flow = ResidualCouplingBlock(
|
966 |
-
inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
|
967 |
-
)
|
968 |
-
self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
|
969 |
-
logger.debug(
|
970 |
-
"gin_channels: "
|
971 |
-
+ str(gin_channels)
|
972 |
-
+ ", self.spk_embed_dim: "
|
973 |
-
+ str(self.spk_embed_dim)
|
974 |
-
)
|
975 |
-
|
976 |
-
def remove_weight_norm(self):
|
977 |
-
self.dec.remove_weight_norm()
|
978 |
-
self.flow.remove_weight_norm()
|
979 |
-
self.enc_q.remove_weight_norm()
|
980 |
-
|
981 |
-
def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
|
982 |
-
g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
|
983 |
-
m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
|
984 |
-
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
|
985 |
-
z_p = self.flow(z, y_mask, g=g)
|
986 |
-
z_slice, ids_slice = commons.rand_slice_segments(
|
987 |
-
z, y_lengths, self.segment_size
|
988 |
-
)
|
989 |
-
o = self.dec(z_slice, g=g)
|
990 |
-
return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
|
991 |
-
|
992 |
-
def infer(self, phone, phone_lengths, sid, rate=None):
|
993 |
-
g = self.emb_g(sid).unsqueeze(-1)
|
994 |
-
m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
|
995 |
-
z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
|
996 |
-
if rate:
|
997 |
-
head = int(z_p.shape[2] * rate)
|
998 |
-
z_p = z_p[:, :, -head:]
|
999 |
-
x_mask = x_mask[:, :, -head:]
|
1000 |
-
z = self.flow(z_p, x_mask, g=g, reverse=True)
|
1001 |
-
o = self.dec(z * x_mask, g=g)
|
1002 |
-
return o, x_mask, (z, z_p, m_p, logs_p)
|
1003 |
-
|
1004 |
-
|
1005 |
-
class MultiPeriodDiscriminator(torch.nn.Module):
|
1006 |
-
def __init__(self, use_spectral_norm=False):
|
1007 |
-
super(MultiPeriodDiscriminator, self).__init__()
|
1008 |
-
periods = [2, 3, 5, 7, 11, 17]
|
1009 |
-
# periods = [3, 5, 7, 11, 17, 23, 37]
|
1010 |
-
|
1011 |
-
discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
|
1012 |
-
discs = discs + [
|
1013 |
-
DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
|
1014 |
-
]
|
1015 |
-
self.discriminators = nn.ModuleList(discs)
|
1016 |
-
|
1017 |
-
def forward(self, y, y_hat):
|
1018 |
-
y_d_rs = [] #
|
1019 |
-
y_d_gs = []
|
1020 |
-
fmap_rs = []
|
1021 |
-
fmap_gs = []
|
1022 |
-
for i, d in enumerate(self.discriminators):
|
1023 |
-
y_d_r, fmap_r = d(y)
|
1024 |
-
y_d_g, fmap_g = d(y_hat)
|
1025 |
-
# for j in range(len(fmap_r)):
|
1026 |
-
# print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
|
1027 |
-
y_d_rs.append(y_d_r)
|
1028 |
-
y_d_gs.append(y_d_g)
|
1029 |
-
fmap_rs.append(fmap_r)
|
1030 |
-
fmap_gs.append(fmap_g)
|
1031 |
-
|
1032 |
-
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
|
1033 |
-
|
1034 |
-
|
1035 |
-
class MultiPeriodDiscriminatorV2(torch.nn.Module):
|
1036 |
-
def __init__(self, use_spectral_norm=False):
|
1037 |
-
super(MultiPeriodDiscriminatorV2, self).__init__()
|
1038 |
-
# periods = [2, 3, 5, 7, 11, 17]
|
1039 |
-
periods = [2, 3, 5, 7, 11, 17, 23, 37]
|
1040 |
-
|
1041 |
-
discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
|
1042 |
-
discs = discs + [
|
1043 |
-
DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
|
1044 |
-
]
|
1045 |
-
self.discriminators = nn.ModuleList(discs)
|
1046 |
-
|
1047 |
-
def forward(self, y, y_hat):
|
1048 |
-
y_d_rs = [] #
|
1049 |
-
y_d_gs = []
|
1050 |
-
fmap_rs = []
|
1051 |
-
fmap_gs = []
|
1052 |
-
for i, d in enumerate(self.discriminators):
|
1053 |
-
y_d_r, fmap_r = d(y)
|
1054 |
-
y_d_g, fmap_g = d(y_hat)
|
1055 |
-
# for j in range(len(fmap_r)):
|
1056 |
-
# print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
|
1057 |
-
y_d_rs.append(y_d_r)
|
1058 |
-
y_d_gs.append(y_d_g)
|
1059 |
-
fmap_rs.append(fmap_r)
|
1060 |
-
fmap_gs.append(fmap_g)
|
1061 |
-
|
1062 |
-
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
|
1063 |
-
|
1064 |
-
|
1065 |
-
class DiscriminatorS(torch.nn.Module):
|
1066 |
-
def __init__(self, use_spectral_norm=False):
|
1067 |
-
super(DiscriminatorS, self).__init__()
|
1068 |
-
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
1069 |
-
self.convs = nn.ModuleList(
|
1070 |
-
[
|
1071 |
-
norm_f(Conv1d(1, 16, 15, 1, padding=7)),
|
1072 |
-
norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
|
1073 |
-
norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
|
1074 |
-
norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
|
1075 |
-
norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
|
1076 |
-
norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
|
1077 |
-
]
|
1078 |
-
)
|
1079 |
-
self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
|
1080 |
-
|
1081 |
-
def forward(self, x):
|
1082 |
-
fmap = []
|
1083 |
-
|
1084 |
-
for l in self.convs:
|
1085 |
-
x = l(x)
|
1086 |
-
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
1087 |
-
fmap.append(x)
|
1088 |
-
x = self.conv_post(x)
|
1089 |
-
fmap.append(x)
|
1090 |
-
x = torch.flatten(x, 1, -1)
|
1091 |
-
|
1092 |
-
return x, fmap
|
1093 |
-
|
1094 |
-
|
1095 |
-
class DiscriminatorP(torch.nn.Module):
|
1096 |
-
def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
|
1097 |
-
super(DiscriminatorP, self).__init__()
|
1098 |
-
self.period = period
|
1099 |
-
self.use_spectral_norm = use_spectral_norm
|
1100 |
-
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
1101 |
-
self.convs = nn.ModuleList(
|
1102 |
-
[
|
1103 |
-
norm_f(
|
1104 |
-
Conv2d(
|
1105 |
-
1,
|
1106 |
-
32,
|
1107 |
-
(kernel_size, 1),
|
1108 |
-
(stride, 1),
|
1109 |
-
padding=(get_padding(kernel_size, 1), 0),
|
1110 |
-
)
|
1111 |
-
),
|
1112 |
-
norm_f(
|
1113 |
-
Conv2d(
|
1114 |
-
32,
|
1115 |
-
128,
|
1116 |
-
(kernel_size, 1),
|
1117 |
-
(stride, 1),
|
1118 |
-
padding=(get_padding(kernel_size, 1), 0),
|
1119 |
-
)
|
1120 |
-
),
|
1121 |
-
norm_f(
|
1122 |
-
Conv2d(
|
1123 |
-
128,
|
1124 |
-
512,
|
1125 |
-
(kernel_size, 1),
|
1126 |
-
(stride, 1),
|
1127 |
-
padding=(get_padding(kernel_size, 1), 0),
|
1128 |
-
)
|
1129 |
-
),
|
1130 |
-
norm_f(
|
1131 |
-
Conv2d(
|
1132 |
-
512,
|
1133 |
-
1024,
|
1134 |
-
(kernel_size, 1),
|
1135 |
-
(stride, 1),
|
1136 |
-
padding=(get_padding(kernel_size, 1), 0),
|
1137 |
-
)
|
1138 |
-
),
|
1139 |
-
norm_f(
|
1140 |
-
Conv2d(
|
1141 |
-
1024,
|
1142 |
-
1024,
|
1143 |
-
(kernel_size, 1),
|
1144 |
-
1,
|
1145 |
-
padding=(get_padding(kernel_size, 1), 0),
|
1146 |
-
)
|
1147 |
-
),
|
1148 |
-
]
|
1149 |
-
)
|
1150 |
-
self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
|
1151 |
-
|
1152 |
-
def forward(self, x):
|
1153 |
-
fmap = []
|
1154 |
-
|
1155 |
-
# 1d to 2d
|
1156 |
-
b, c, t = x.shape
|
1157 |
-
if t % self.period != 0: # pad first
|
1158 |
-
n_pad = self.period - (t % self.period)
|
1159 |
-
if has_xpu and x.dtype == torch.bfloat16:
|
1160 |
-
x = F.pad(x.to(dtype=torch.float16), (0, n_pad), "reflect").to(dtype=torch.bfloat16)
|
1161 |
-
else:
|
1162 |
-
x = F.pad(x, (0, n_pad), "reflect")
|
1163 |
-
t = t + n_pad
|
1164 |
-
x = x.view(b, c, t // self.period, self.period)
|
1165 |
-
|
1166 |
-
for l in self.convs:
|
1167 |
-
x = l(x)
|
1168 |
-
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
1169 |
-
fmap.append(x)
|
1170 |
-
x = self.conv_post(x)
|
1171 |
-
fmap.append(x)
|
1172 |
-
x = torch.flatten(x, 1, -1)
|
1173 |
-
|
1174 |
-
return x, fmap
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Arena Breakout Ios Descargar.md
DELETED
@@ -1,58 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Arena Breakout: Cómo descargar y jugar el FPS táctico inmersivo de próxima generación en iOS</h1>
|
3 |
-
<p>¿Estás buscando un nuevo y emocionante juego móvil que desafiará tus habilidades y emocionará tus sentidos? Si es así, es posible que desee echa un vistazo a Arena Breakout, un FPS táctico inmersivo de próxima generación que está llegando pronto a los dispositivos iOS. Arena Breakout es un juego de disparos de supervivencia que combina gráficos realistas y jugabilidad con altas apuestas y recompensas. En este juego, entrarás en una arena sin ley donde buscarás botín, lucharás contra enemigos e intentarás salir con vida con tu botín. También podrá personalizar sus armas y estrategias para adaptarse a su estilo de juego y preferencias. Si usted está interesado en jugar a este juego, aquí es cómo se puede descargar y jugar Arena Breakout en iOS. </p>
|
4 |
-
<h2>¿Qué es Arena Breakout? </h2>
|
5 |
-
<p>Arena Breakout es un nuevo juego para móviles desarrollado por Morefun Studios y publicado por Tencent Games. Es un juego de disparos gratuito que cuenta con tres aspectos principales: supervivencia, saqueo y tácticas. </p>
|
6 |
-
<h2>arena breakout ios descargar</h2><br /><p><b><b>DOWNLOAD</b> > <a href="https://bltlly.com/2v6LmZ">https://bltlly.com/2v6LmZ</a></b></p><br /><br />
|
7 |
-
<h3>Un juego de disparos de supervivencia con gráficos realistas y jugabilidad</h3>
|
8 |
-
<p>Arena Breakout no es el típico juego de disparos donde solo apunta y dispara. Es un juego de disparos de supervivencia que requiere que preste atención a varios factores que afectan a su rendimiento y supervivencia. Por ejemplo, tendrá que lidiar con un sistema de lesiones de todo el cuerpo que afecta su movimiento, resistencia y precisión dependiendo de qué parte del cuerpo se golpea. También tendrás que lidiar con efectos de retroceso realistas, animaciones de armas y efectos de sonido que te sumergen en el campo de batalla. Además, tendrás que enfrentarte a condiciones climáticas dinámicas, como lluvia, niebla, nieve y viento, que pueden cambiar la visibilidad y dificultad del juego. </p>
|
9 |
-
<h3>Un juego de disparos de saqueo con altas apuestas y recompensas</h3>
|
10 |
-
|
11 |
-
<h3>Un juego de disparos tácticos con armas y estrategias personalizables</h3>
|
12 |
-
<p>Arena Breakout no es un juego en el que solo se puede correr y arma. Es un juego de disparos tácticos que requiere que planees tus movimientos y coordines con tu equipo. Antes de entrar en el campo de batalla, tendrás que elegir tu equipo de entrada y carga que determinará tu poder de combate. También tendrá que personalizar sus armas con más de 700 piezas de armas que caben en más de 10 ranuras de modificación. Puede mezclar y combinar diferentes partes para crear su propia arma única que se adapte a su estilo de juego. Además, tendrás que usar diferentes estrategias dependiendo de la situación. Puedes optar por enfrentarte a los enemigos, escabullirte de ellos o evitarlos por completo. También puedes usar diferentes objetos, como granadas, flashbangs, bombas de humo o drones, para ganar ventaja sobre tus oponentes. </p>
|
13 |
-
<h <h2>Cómo descargar Arena Breakout en iOS? </h2>
|
14 |
-
<p>Arena Breakout aún no ha sido lanzado oficialmente en la App Store, pero puedes pre-registrarte y ser notificado cuando esté disponible. También puede unirse a la prueba beta y obtener acceso temprano al juego antes del lanzamiento oficial. Estos son los pasos para descargar Arena Breakout en iOS:</p>
|
15 |
-
<h3>Pre-registro en la App Store o en el sitio web de Tap Tap</h3>
|
16 |
-
<p>El primer paso es pre-registrarse para Arena Breakout en el sitio web de App Store o Tap Tap. Puede hacer esto siguiendo estos enlaces:</p>
|
17 |
-
<ul>
|
18 |
-
<li><a href="">Ruptura de la Arena en la App Store</a></li>
|
19 |
-
<li><a href="">Ruptura de Arena en Tap Tap</a></li>
|
20 |
-
</ul>
|
21 |
-
<p>Al registrarte, podrás recibir actualizaciones y noticias sobre el juego, así como recompensas y beneficios exclusivos cuando el juego sea lanzado. </p>
|
22 |
-
<h3>Esperar la fecha oficial de lanzamiento o unirse a la prueba beta</h3>
|
23 |
-
|
24 |
-
<h3>Instala el juego e inicia sesión con tu cuenta</h3>
|
25 |
-
<p>El tercer paso es instalar el juego e iniciar sesión con su cuenta. Usted tendrá que tener al menos 4 GB de espacio libre en su dispositivo y una conexión a Internet estable para jugar el juego. También tendrá que crear una cuenta o iniciar sesión con su cuenta existente desde otros juegos de Tencent, como PUBG Mobile o Call of Duty Mobile. Después de iniciar sesión, podrás acceder al juego y comenzar a jugar. </p>
|
26 |
-
<p></p>
|
27 |
-
<h2>¿Cómo se juega Arena Breakout en iOS? </h2>
|
28 |
-
<p>Arena Breakout es un juego multijugador que admite hasta 60 jugadores por partido. Puedes jugar solo o con tus amigos en diferentes modos, como combate a muerte en equipo, captura la bandera o batalla real. El modo de juego básico de Arena Breakout es el siguiente:</p>
|
29 |
-
<h3>Elige tu equipo de entrada y carga</h3>
|
30 |
-
<p>Antes de entrar en el campo de batalla, tendrás que elegir tu equipo de entrada y carga que determinará tu poder de combate. Puede elegir entre diferentes categorías de armas, como rifles de asalto, rifles de francotirador, escopetas, pistolas o armas cuerpo a cuerpo. También puede elegir diferentes accesorios, como alcances, silenciadores, apretones o cargadores. Además, puede elegir diferentes artículos, como placas de armadura, cascos, mochilas o chalecos. También puedes personalizar la apariencia y las habilidades de tu personaje. </p>
|
31 |
-
<h3>Buscar botín y luchar contra los enemigos en el mapa abierto</h3>
|
32 |
-
<p>Después de elegir su equipo y carga, entrará en un gran mapa abierto donde encontrará varias armas, accesorios, municiones, alimentos, bebidas, medicamentos y más. Tendrás que buscar botín y luchar contra los enemigos para sobrevivir y ganar. Te encontrarás con otros jugadores y enemigos de IA que intentarán robarte tu botín o matarte. También tendrás que lidiar con condiciones climáticas dinámicas que pueden cambiar la visibilidad y la dificultad del juego. </p>
|
33 |
-
<h3>Sal viva del área de combate con tu botín</h3>
|
34 |
-
|
35 |
-
<p>Arena Breakout es un juego desafiante y gratificante que pondrá a prueba tus habilidades y estrategias. Para ayudarte a mejorar tu jugabilidad y disfrutar más del juego, aquí hay algunos consejos y trucos que puedes usar:</p>
|
36 |
-
<h3>Utilice el sistema de lesiones para su ventaja</h3>
|
37 |
-
<p>Arena Breakout tiene un sistema de lesiones realista que afecta su movimiento, resistencia y precisión dependiendo de qué parte del cuerpo es golpeado. Por ejemplo, si le disparan en la pierna, cojeará y correrá más lento. Si le disparan en el brazo, tendrá menos estabilidad y control de retroceso. Si le disparan en la cabeza, tendrá visión borrosa y audición reducida. Para curarse, necesitará usar vendajes, botiquines médicos o jeringas que puedan restaurar su salud y eliminar los efectos de la lesión. Sin embargo, también puedes usar el sistema de lesiones para tu ventaja al apuntar a partes específicas del cuerpo de tus enemigos. Por ejemplo, si disparas a un enemigo en la pierna, puedes ralentizarlo y evitar que escape. Si disparas a un enemigo en el brazo, puedes reducir su precisión y daño. Si disparas a un enemigo en la cabeza, puedes aturdirlo y acabar con él. </p>
|
38 |
-
<h3>Administre su munición y suministros sabiamente</h3>
|
39 |
-
<p>Arena Breakout tiene un sistema de munición y suministro realista que requiere que administres tus recursos cuidadosamente. Por ejemplo, tendrás que recargar tus armas manualmente y realizar un seguimiento de cuántas balas te quedan. También tendrás que llevar diferentes tipos de munición para diferentes armas, como 5,56 mm, 7,62 mm o 9 mm. Además, tendrá que llevar diferentes tipos de suministros, como alimentos, bebidas, medicamentos o granadas. Sin embargo, también tendrá un espacio de inventario limitado que depende del tamaño de su mochila. Por lo tanto, tendrás que decidir qué conservar y qué descartar o intercambiar con otros jugadores. También tendrá que equilibrar entre saquear más artículos y arriesgarse a exponerse o mantenerse bajo y ahorrar recursos. </p>
|
40 |
-
<h3>Coordina con tu equipo y comunícate eficazmente</h3>
|
41 |
-
|
42 |
-
<h2>Conclusión</h2>
|
43 |
-
<p>Arena Breakout es un FPS táctico inmersivo de próxima generación que llegará pronto a los dispositivos iOS. Es un juego de disparos de supervivencia que combina gráficos realistas y jugabilidad con altas apuestas y recompensas. En este juego, entrarás en una arena sin ley donde buscarás botín, lucharás contra enemigos e intentarás salir con vida con tu botín. También podrá personalizar sus armas y estrategias para adaptarse a su estilo de juego y preferencias. Si usted está interesado en jugar a este juego, puede pre-registrarse en la App Store o Tap Tap sitio web o unirse a la prueba beta y obtener acceso temprano al juego antes del lanzamiento oficial. </p>
|
44 |
-
<h2>Preguntas frecuentes</h2>
|
45 |
-
<ul>
|
46 |
-
<li><b>Q: ¿Cuáles son los requisitos mínimos para jugar Arena Breakout en iOS? </b></li>
|
47 |
-
<li>A: Necesitará un dispositivo iOS que se ejecute en iOS 13 o posterior y que tenga al menos 4 GB de espacio libre. </li>
|
48 |
-
<li><b>Q: ¿Cómo puedo obtener más recompensas y beneficios en Arena Breakout? </b></li>
|
49 |
-
<li>A: Puedes obtener más recompensas y beneficios al registrarte para el juego, unirte a la prueba beta, completar misiones y logros, posicionarte en las tablas de clasificación o participar en eventos y promociones. </li>
|
50 |
-
<li><b>Q: ¿Cómo puedo jugar Arena Breakout con mis amigos? </b></li>
|
51 |
-
<li>A: Puedes jugar a Arena Breakout con tus amigos invitándolos a unirse a tu equipo o unirse a su equipo a través del menú del juego o las plataformas de redes sociales. </li>
|
52 |
-
<li><b>Q: ¿Cómo puedo reportar errores o dar retroalimentación sobre Arena Breakout? </b></li>
|
53 |
-
<li>A: Puedes reportar errores o dar retroalimentación sobre Arena Breakout contactando al equipo de servicio al cliente a través de la configuración del juego o enviando un correo electrónico a [email protected]. </li>
|
54 |
-
<li><b>Q: ¿Cómo puedo aprender más sobre Arena Breakout? </b></li>
|
55 |
-
<li>A: Puedes aprender más sobre Arena Breakout visitando el sitio web oficial o siguiendo las cuentas oficiales de redes sociales en Facebook, Twitter, Instagram o YouTube.</li>
|
56 |
-
</ul></p> 64aa2da5cf<br />
|
57 |
-
<br />
|
58 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BramVanroy/llama-2-13b-chat-dutch-space/README.md
DELETED
@@ -1,21 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Llama 2 13b Chat Dutch
|
3 |
-
emoji: 🦙
|
4 |
-
colorFrom: indigo
|
5 |
-
colorTo: pink
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.45.1
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: other
|
11 |
-
suggested_hardware: a10g-small
|
12 |
-
_duplicated_from: huggingface-projects/llama-2-13b-chat_
|
13 |
-
---
|
14 |
-
|
15 |
-
# LLAMA v2 finetuned for Dutch Chat
|
16 |
-
|
17 |
-
Llama v2 was introduced in [this paper](https://arxiv.org/abs/2307.09288).
|
18 |
-
|
19 |
-
This Space demonstrates [BramVanroy/Llama-2-13b-chat-dutch](https://huggingface.co/BramVanroy/Llama-2-13b-chat-dutch). Please, check the original model card for details.
|
20 |
-
|
21 |
-
This Space was duplicated and modified from [huggingface-projects/llama-2-13b-chat](https://huggingface.co/spaces/huggingface-projects/llama-2-13b-chat).
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/detail/execution_policy.h
DELETED
@@ -1,77 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
|
21 |
-
namespace thrust
|
22 |
-
{
|
23 |
-
namespace detail
|
24 |
-
{
|
25 |
-
|
26 |
-
struct execution_policy_marker {};
|
27 |
-
|
28 |
-
// execution_policy_base serves as a guard against
|
29 |
-
// inifinite recursion in thrust entry points:
|
30 |
-
//
|
31 |
-
// template<typename DerivedPolicy>
|
32 |
-
// void foo(const thrust::detail::execution_policy_base<DerivedPolicy> &s)
|
33 |
-
// {
|
34 |
-
// using thrust::system::detail::generic::foo;
|
35 |
-
//
|
36 |
-
// foo(thrust::detail::derived_cast(thrust::detail::strip_const(s));
|
37 |
-
// }
|
38 |
-
//
|
39 |
-
// foo is not recursive when
|
40 |
-
// 1. DerivedPolicy is derived from thrust::execution_policy below
|
41 |
-
// 2. generic::foo takes thrust::execution_policy as a parameter
|
42 |
-
template<typename DerivedPolicy>
|
43 |
-
struct execution_policy_base : execution_policy_marker {};
|
44 |
-
|
45 |
-
|
46 |
-
template<typename DerivedPolicy>
|
47 |
-
THRUST_CONSTEXPR __host__ __device__
|
48 |
-
execution_policy_base<DerivedPolicy> &strip_const(const execution_policy_base<DerivedPolicy> &x)
|
49 |
-
{
|
50 |
-
return const_cast<execution_policy_base<DerivedPolicy>&>(x);
|
51 |
-
}
|
52 |
-
|
53 |
-
|
54 |
-
template<typename DerivedPolicy>
|
55 |
-
THRUST_CONSTEXPR __host__ __device__
|
56 |
-
DerivedPolicy &derived_cast(execution_policy_base<DerivedPolicy> &x)
|
57 |
-
{
|
58 |
-
return static_cast<DerivedPolicy&>(x);
|
59 |
-
}
|
60 |
-
|
61 |
-
|
62 |
-
template<typename DerivedPolicy>
|
63 |
-
THRUST_CONSTEXPR __host__ __device__
|
64 |
-
const DerivedPolicy &derived_cast(const execution_policy_base<DerivedPolicy> &x)
|
65 |
-
{
|
66 |
-
return static_cast<const DerivedPolicy&>(x);
|
67 |
-
}
|
68 |
-
|
69 |
-
} // end detail
|
70 |
-
|
71 |
-
template<typename DerivedPolicy>
|
72 |
-
struct execution_policy
|
73 |
-
: thrust::detail::execution_policy_base<DerivedPolicy>
|
74 |
-
{};
|
75 |
-
|
76 |
-
} // end thrust
|
77 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/mmdet/models/utils/positional_encoding.py
DELETED
@@ -1,150 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
|
3 |
-
import torch
|
4 |
-
import torch.nn as nn
|
5 |
-
from mmcv.cnn import uniform_init
|
6 |
-
|
7 |
-
from .builder import POSITIONAL_ENCODING
|
8 |
-
|
9 |
-
|
10 |
-
@POSITIONAL_ENCODING.register_module()
|
11 |
-
class SinePositionalEncoding(nn.Module):
|
12 |
-
"""Position encoding with sine and cosine functions.
|
13 |
-
|
14 |
-
See `End-to-End Object Detection with Transformers
|
15 |
-
<https://arxiv.org/pdf/2005.12872>`_ for details.
|
16 |
-
|
17 |
-
Args:
|
18 |
-
num_feats (int): The feature dimension for each position
|
19 |
-
along x-axis or y-axis. Note the final returned dimension
|
20 |
-
for each position is 2 times of this value.
|
21 |
-
temperature (int, optional): The temperature used for scaling
|
22 |
-
the position embedding. Default 10000.
|
23 |
-
normalize (bool, optional): Whether to normalize the position
|
24 |
-
embedding. Default False.
|
25 |
-
scale (float, optional): A scale factor that scales the position
|
26 |
-
embedding. The scale will be used only when `normalize` is True.
|
27 |
-
Default 2*pi.
|
28 |
-
eps (float, optional): A value added to the denominator for
|
29 |
-
numerical stability. Default 1e-6.
|
30 |
-
"""
|
31 |
-
|
32 |
-
def __init__(self,
|
33 |
-
num_feats,
|
34 |
-
temperature=10000,
|
35 |
-
normalize=False,
|
36 |
-
scale=2 * math.pi,
|
37 |
-
eps=1e-6):
|
38 |
-
super(SinePositionalEncoding, self).__init__()
|
39 |
-
if normalize:
|
40 |
-
assert isinstance(scale, (float, int)), 'when normalize is set,' \
|
41 |
-
'scale should be provided and in float or int type, ' \
|
42 |
-
f'found {type(scale)}'
|
43 |
-
self.num_feats = num_feats
|
44 |
-
self.temperature = temperature
|
45 |
-
self.normalize = normalize
|
46 |
-
self.scale = scale
|
47 |
-
self.eps = eps
|
48 |
-
|
49 |
-
def forward(self, mask):
|
50 |
-
"""Forward function for `SinePositionalEncoding`.
|
51 |
-
|
52 |
-
Args:
|
53 |
-
mask (Tensor): ByteTensor mask. Non-zero values representing
|
54 |
-
ignored positions, while zero values means valid positions
|
55 |
-
for this image. Shape [bs, h, w].
|
56 |
-
|
57 |
-
Returns:
|
58 |
-
pos (Tensor): Returned position embedding with shape
|
59 |
-
[bs, num_feats*2, h, w].
|
60 |
-
"""
|
61 |
-
not_mask = ~mask
|
62 |
-
y_embed = not_mask.cumsum(1, dtype=torch.float32)
|
63 |
-
x_embed = not_mask.cumsum(2, dtype=torch.float32)
|
64 |
-
if self.normalize:
|
65 |
-
y_embed = y_embed / (y_embed[:, -1:, :] + self.eps) * self.scale
|
66 |
-
x_embed = x_embed / (x_embed[:, :, -1:] + self.eps) * self.scale
|
67 |
-
dim_t = torch.arange(
|
68 |
-
self.num_feats, dtype=torch.float32, device=mask.device)
|
69 |
-
dim_t = self.temperature**(2 * (dim_t // 2) / self.num_feats)
|
70 |
-
pos_x = x_embed[:, :, :, None] / dim_t
|
71 |
-
pos_y = y_embed[:, :, :, None] / dim_t
|
72 |
-
pos_x = torch.stack(
|
73 |
-
(pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()),
|
74 |
-
dim=4).flatten(3)
|
75 |
-
pos_y = torch.stack(
|
76 |
-
(pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()),
|
77 |
-
dim=4).flatten(3)
|
78 |
-
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
|
79 |
-
return pos
|
80 |
-
|
81 |
-
def __repr__(self):
|
82 |
-
"""str: a string that describes the module"""
|
83 |
-
repr_str = self.__class__.__name__
|
84 |
-
repr_str += f'(num_feats={self.num_feats}, '
|
85 |
-
repr_str += f'temperature={self.temperature}, '
|
86 |
-
repr_str += f'normalize={self.normalize}, '
|
87 |
-
repr_str += f'scale={self.scale}, '
|
88 |
-
repr_str += f'eps={self.eps})'
|
89 |
-
return repr_str
|
90 |
-
|
91 |
-
|
92 |
-
@POSITIONAL_ENCODING.register_module()
|
93 |
-
class LearnedPositionalEncoding(nn.Module):
|
94 |
-
"""Position embedding with learnable embedding weights.
|
95 |
-
|
96 |
-
Args:
|
97 |
-
num_feats (int): The feature dimension for each position
|
98 |
-
along x-axis or y-axis. The final returned dimension for
|
99 |
-
each position is 2 times of this value.
|
100 |
-
row_num_embed (int, optional): The dictionary size of row embeddings.
|
101 |
-
Default 50.
|
102 |
-
col_num_embed (int, optional): The dictionary size of col embeddings.
|
103 |
-
Default 50.
|
104 |
-
"""
|
105 |
-
|
106 |
-
def __init__(self, num_feats, row_num_embed=50, col_num_embed=50):
|
107 |
-
super(LearnedPositionalEncoding, self).__init__()
|
108 |
-
self.row_embed = nn.Embedding(row_num_embed, num_feats)
|
109 |
-
self.col_embed = nn.Embedding(col_num_embed, num_feats)
|
110 |
-
self.num_feats = num_feats
|
111 |
-
self.row_num_embed = row_num_embed
|
112 |
-
self.col_num_embed = col_num_embed
|
113 |
-
self.init_weights()
|
114 |
-
|
115 |
-
def init_weights(self):
|
116 |
-
"""Initialize the learnable weights."""
|
117 |
-
uniform_init(self.row_embed)
|
118 |
-
uniform_init(self.col_embed)
|
119 |
-
|
120 |
-
def forward(self, mask):
|
121 |
-
"""Forward function for `LearnedPositionalEncoding`.
|
122 |
-
|
123 |
-
Args:
|
124 |
-
mask (Tensor): ByteTensor mask. Non-zero values representing
|
125 |
-
ignored positions, while zero values means valid positions
|
126 |
-
for this image. Shape [bs, h, w].
|
127 |
-
|
128 |
-
Returns:
|
129 |
-
pos (Tensor): Returned position embedding with shape
|
130 |
-
[bs, num_feats*2, h, w].
|
131 |
-
"""
|
132 |
-
h, w = mask.shape[-2:]
|
133 |
-
x = torch.arange(w, device=mask.device)
|
134 |
-
y = torch.arange(h, device=mask.device)
|
135 |
-
x_embed = self.col_embed(x)
|
136 |
-
y_embed = self.row_embed(y)
|
137 |
-
pos = torch.cat(
|
138 |
-
(x_embed.unsqueeze(0).repeat(h, 1, 1), y_embed.unsqueeze(1).repeat(
|
139 |
-
1, w, 1)),
|
140 |
-
dim=-1).permute(2, 0,
|
141 |
-
1).unsqueeze(0).repeat(mask.shape[0], 1, 1, 1)
|
142 |
-
return pos
|
143 |
-
|
144 |
-
def __repr__(self):
|
145 |
-
"""str: a string that describes the module"""
|
146 |
-
repr_str = self.__class__.__name__
|
147 |
-
repr_str += f'(num_feats={self.num_feats}, '
|
148 |
-
repr_str += f'row_num_embed={self.row_num_embed}, '
|
149 |
-
repr_str += f'col_num_embed={self.col_num_embed})'
|
150 |
-
return repr_str
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/unicl-zero-shot-img-recog/model/model.py
DELETED
@@ -1,215 +0,0 @@
|
|
1 |
-
import pathlib
|
2 |
-
import tempfile
|
3 |
-
from collections import OrderedDict
|
4 |
-
from typing import Tuple, Union
|
5 |
-
import logging
|
6 |
-
import os
|
7 |
-
|
8 |
-
import numpy as np
|
9 |
-
import torch
|
10 |
-
import torch.nn.functional as F
|
11 |
-
from torch import nn
|
12 |
-
|
13 |
-
from timm.models.layers import DropPath, trunc_normal_
|
14 |
-
|
15 |
-
from .image_encoder import build_image_encoder
|
16 |
-
from .text_encoder import build_text_encoder
|
17 |
-
from .text_encoder import build_tokenizer
|
18 |
-
from .templates import DEFAULT_TEMPLATES
|
19 |
-
|
20 |
-
logger = logging.getLogger(__name__)
|
21 |
-
|
22 |
-
|
23 |
-
class UniCLModel(nn.Module):
|
24 |
-
def __init__(self, config: dict,):
|
25 |
-
super().__init__()
|
26 |
-
|
27 |
-
self.conf_lang_encoder = config['MODEL']['TEXT_ENCODER']
|
28 |
-
self.tokenizer = build_tokenizer(self.conf_lang_encoder)
|
29 |
-
|
30 |
-
self.text_encoder = build_text_encoder(self.conf_lang_encoder, self.tokenizer, config['VERBOSE'])
|
31 |
-
|
32 |
-
dim_projection = config['MODEL']['DIM_PROJECTION']
|
33 |
-
if hasattr(self.text_encoder, 'dim_out'):
|
34 |
-
dim_out = self.text_encoder.dim_out
|
35 |
-
else:
|
36 |
-
with torch.no_grad():
|
37 |
-
dim_out = self.text_encoder(
|
38 |
-
torch.zeros(1,1).type(torch.LongTensor)
|
39 |
-
)['last_hidden_state'].size(2)
|
40 |
-
|
41 |
-
self.text_projection = nn.Parameter(torch.empty(dim_out, dim_projection))
|
42 |
-
|
43 |
-
self.conf_image_encoder = config['MODEL']['IMAGE_ENCODER']
|
44 |
-
self.image_encoder = build_image_encoder(self.conf_image_encoder)
|
45 |
-
|
46 |
-
self.image_projection = nn.Parameter(
|
47 |
-
torch.empty(self.image_encoder.dim_out, dim_projection)
|
48 |
-
)
|
49 |
-
|
50 |
-
self.logit_scale = nn.Parameter(torch.ones([]))
|
51 |
-
|
52 |
-
trunc_normal_(self.text_projection, std=.02)
|
53 |
-
trunc_normal_(self.image_projection, std=.02)
|
54 |
-
|
55 |
-
def _convert_old_weights(self, model_dict):
|
56 |
-
model_dict_updated = {}
|
57 |
-
for k, v in model_dict.items():
|
58 |
-
if k.startswith('visual.'):
|
59 |
-
model_dict_updated['image_encoder.'+k[7:]] = v
|
60 |
-
elif k.startswith('text.'):
|
61 |
-
model_dict_updated['lang_encoder.'+k[5:]] = v
|
62 |
-
elif k == 'vision_projection':
|
63 |
-
model_dict_updated['image_projection'] = v
|
64 |
-
elif k == 'text_projection':
|
65 |
-
model_dict_updated['text_projection'] = v
|
66 |
-
else:
|
67 |
-
model_dict_updated[k] = v
|
68 |
-
|
69 |
-
return model_dict_updated
|
70 |
-
|
71 |
-
def from_pretrained(self, pretrained='', pretrained_layers=[], verbose=True):
|
72 |
-
if not os.path.isfile(pretrained):
|
73 |
-
logger.warning(f'=> Pretrained model ({pretrained}) is not a file, skip init weight')
|
74 |
-
return
|
75 |
-
|
76 |
-
pretrained_dict = torch.load(pretrained, map_location='cpu')
|
77 |
-
logger.info(f'=> Loading pretrained model {pretrained}')
|
78 |
-
pretrained_dict = self._convert_old_weights(pretrained_dict)
|
79 |
-
model_dict = self.state_dict()
|
80 |
-
pretrained_dict = {
|
81 |
-
k: v for k, v in pretrained_dict.items()
|
82 |
-
if k in model_dict.keys()
|
83 |
-
}
|
84 |
-
need_init_state_dict = {}
|
85 |
-
image_encoder_state_dict = {}
|
86 |
-
for k, v in pretrained_dict.items():
|
87 |
-
need_init = (
|
88 |
-
k.split('.')[0] in pretrained_layers
|
89 |
-
or pretrained_layers[0] == '*'
|
90 |
-
)
|
91 |
-
|
92 |
-
if need_init:
|
93 |
-
if k.startswith('image_encoder.'):
|
94 |
-
image_encoder_state_dict[k] = v
|
95 |
-
else:
|
96 |
-
if verbose:
|
97 |
-
logger.info(f'=> init {k} from {pretrained}')
|
98 |
-
|
99 |
-
need_init_state_dict[k] = v
|
100 |
-
self.image_encoder.from_state_dict(image_encoder_state_dict, ['*'], verbose)
|
101 |
-
self.load_state_dict(need_init_state_dict, strict=False)
|
102 |
-
|
103 |
-
@torch.jit.ignore
|
104 |
-
def no_weight_decay(self):
|
105 |
-
no_weight_decay = {'logit_scale'}
|
106 |
-
if hasattr(self.text_encoder, 'no_weight_decay'):
|
107 |
-
for k in self.text_encoder.no_weight_decay():
|
108 |
-
no_weight_decay.add('lang_encoder.'+k)
|
109 |
-
|
110 |
-
if hasattr(self.image_encoder, 'no_weight_decay'):
|
111 |
-
for k in self.image_encoder.no_weight_decay():
|
112 |
-
no_weight_decay.add('image_encoder.'+k)
|
113 |
-
|
114 |
-
return no_weight_decay
|
115 |
-
|
116 |
-
@property
|
117 |
-
def dtype(self):
|
118 |
-
return self.logit_scale.dtype
|
119 |
-
|
120 |
-
def get_imnet_embeddings(self):
|
121 |
-
templates = IMAGENET_DEFAULT_TEMPLATES[:1]
|
122 |
-
clss_embeddings = []
|
123 |
-
for clss in IMAGENET_CLASSES:
|
124 |
-
txts = [template.format(clss) for template in templates]
|
125 |
-
|
126 |
-
tokens = self.tokenizer(
|
127 |
-
txts, padding='max_length', truncation=True, max_length=77, return_tensors='pt'
|
128 |
-
)
|
129 |
-
tokens = {key:(val.cuda() if next(self.parameters()).is_cuda else val) for key,val in tokens.items()}
|
130 |
-
|
131 |
-
clss_embedding = self.encode_text(tokens)
|
132 |
-
clss_embedding = clss_embedding.mean(dim=0)
|
133 |
-
clss_embedding /= clss_embedding.norm()
|
134 |
-
clss_embeddings.append(clss_embedding)
|
135 |
-
imnet_text_embeddings = torch.stack(clss_embeddings, dim=0)
|
136 |
-
return imnet_text_embeddings
|
137 |
-
|
138 |
-
def get_text_embeddings(self, texts):
|
139 |
-
templates = DEFAULT_TEMPLATES[:1]
|
140 |
-
clss_embeddings = []
|
141 |
-
for clss in texts:
|
142 |
-
txts = [template.format(clss) for template in templates]
|
143 |
-
|
144 |
-
tokens = self.tokenizer(
|
145 |
-
txts, padding='max_length', truncation=True, max_length=77, return_tensors='pt'
|
146 |
-
)
|
147 |
-
tokens = {key:(val.cuda() if next(self.parameters()).is_cuda else val) for key,val in tokens.items()}
|
148 |
-
|
149 |
-
clss_embedding = self.encode_text(tokens)
|
150 |
-
clss_embedding = clss_embedding.mean(dim=0)
|
151 |
-
clss_embedding /= clss_embedding.norm()
|
152 |
-
clss_embeddings.append(clss_embedding)
|
153 |
-
imnet_text_embeddings = torch.stack(clss_embeddings, dim=0)
|
154 |
-
return imnet_text_embeddings
|
155 |
-
|
156 |
-
def encode_image(self, image, norm=True, output_map=False):
|
157 |
-
x = self.image_encoder.forward_features(image, output_map=output_map)
|
158 |
-
if output_map:
|
159 |
-
x, x_map, H, W = x
|
160 |
-
|
161 |
-
x = x @ self.image_projection
|
162 |
-
|
163 |
-
if output_map:
|
164 |
-
x_map = self.image_projection.unsqueeze(0).transpose(1, 2) @ x_map
|
165 |
-
|
166 |
-
if norm:
|
167 |
-
x = x / x.norm(dim=-1, keepdim=True)
|
168 |
-
if output_map:
|
169 |
-
x_map = x_map / x_map.norm(dim=1, keepdim=True)
|
170 |
-
|
171 |
-
if output_map:
|
172 |
-
return x, x_map, H, W
|
173 |
-
else:
|
174 |
-
return x
|
175 |
-
|
176 |
-
def encode_text(self, text, norm=True):
|
177 |
-
x = self.text_encoder(**text)
|
178 |
-
x = x['last_hidden_state']
|
179 |
-
|
180 |
-
if self.conf_lang_encoder['TOKENIZER'] == 'clip':
|
181 |
-
x = x[torch.arange(x.size(0)), text['input_ids'].argmax(dim=-1)]
|
182 |
-
else:
|
183 |
-
x = x[:, 0]
|
184 |
-
|
185 |
-
x = x @ self.text_projection
|
186 |
-
|
187 |
-
if norm:
|
188 |
-
x = x / x.norm(dim=-1, keepdim=True)
|
189 |
-
|
190 |
-
return x
|
191 |
-
|
192 |
-
def forward(self, image, text):
|
193 |
-
features_image = self.encode_image(image)
|
194 |
-
features_text = self.encode_text(text)
|
195 |
-
|
196 |
-
# cosine similarity as logits
|
197 |
-
T = self.logit_scale.exp()
|
198 |
-
|
199 |
-
return features_image, features_text, T
|
200 |
-
|
201 |
-
|
202 |
-
def build_unicl_model(config, **kwargs):
|
203 |
-
model = UniCLModel(config)
|
204 |
-
if config['MODEL']['PRETRAINED'] != '':
|
205 |
-
pretrained_path = config['MODEL']['PRETRAINED']
|
206 |
-
from ..Utils.Utils import is_valid_url, download_file
|
207 |
-
if is_valid_url(pretrained_path):
|
208 |
-
with tempfile.TemporaryDirectory() as tmp_path:
|
209 |
-
file_local_path = pathlib.Path(tmp_path) / 'base_model.pt'
|
210 |
-
download_file(pretrained_path, file_local_path)
|
211 |
-
model.from_pretrained(str(file_local_path), config['MODEL']['PRETRAINED_LAYERS'], config['VERBOSE'])
|
212 |
-
else:
|
213 |
-
model.from_pretrained(pretrained_path, config['MODEL']['PRETRAINED_LAYERS'], config['VERBOSE'])
|
214 |
-
|
215 |
-
return model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Caoyunkang/Segment-Any-Anomaly/SAM/segment_anything/utils/amg.py
DELETED
@@ -1,346 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
import numpy as np
|
8 |
-
import torch
|
9 |
-
|
10 |
-
import math
|
11 |
-
from copy import deepcopy
|
12 |
-
from itertools import product
|
13 |
-
from typing import Any, Dict, Generator, ItemsView, List, Tuple
|
14 |
-
|
15 |
-
|
16 |
-
class MaskData:
|
17 |
-
"""
|
18 |
-
A structure for storing masks and their related data in batched format.
|
19 |
-
Implements basic filtering and concatenation.
|
20 |
-
"""
|
21 |
-
|
22 |
-
def __init__(self, **kwargs) -> None:
|
23 |
-
for v in kwargs.values():
|
24 |
-
assert isinstance(
|
25 |
-
v, (list, np.ndarray, torch.Tensor)
|
26 |
-
), "MaskData only supports list, numpy arrays, and torch tensors."
|
27 |
-
self._stats = dict(**kwargs)
|
28 |
-
|
29 |
-
def __setitem__(self, key: str, item: Any) -> None:
|
30 |
-
assert isinstance(
|
31 |
-
item, (list, np.ndarray, torch.Tensor)
|
32 |
-
), "MaskData only supports list, numpy arrays, and torch tensors."
|
33 |
-
self._stats[key] = item
|
34 |
-
|
35 |
-
def __delitem__(self, key: str) -> None:
|
36 |
-
del self._stats[key]
|
37 |
-
|
38 |
-
def __getitem__(self, key: str) -> Any:
|
39 |
-
return self._stats[key]
|
40 |
-
|
41 |
-
def items(self) -> ItemsView[str, Any]:
|
42 |
-
return self._stats.items()
|
43 |
-
|
44 |
-
def filter(self, keep: torch.Tensor) -> None:
|
45 |
-
for k, v in self._stats.items():
|
46 |
-
if v is None:
|
47 |
-
self._stats[k] = None
|
48 |
-
elif isinstance(v, torch.Tensor):
|
49 |
-
self._stats[k] = v[torch.as_tensor(keep, device=v.device)]
|
50 |
-
elif isinstance(v, np.ndarray):
|
51 |
-
self._stats[k] = v[keep.detach().cpu().numpy()]
|
52 |
-
elif isinstance(v, list) and keep.dtype == torch.bool:
|
53 |
-
self._stats[k] = [a for i, a in enumerate(v) if keep[i]]
|
54 |
-
elif isinstance(v, list):
|
55 |
-
self._stats[k] = [v[i] for i in keep]
|
56 |
-
else:
|
57 |
-
raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.")
|
58 |
-
|
59 |
-
def cat(self, new_stats: "MaskData") -> None:
|
60 |
-
for k, v in new_stats.items():
|
61 |
-
if k not in self._stats or self._stats[k] is None:
|
62 |
-
self._stats[k] = deepcopy(v)
|
63 |
-
elif isinstance(v, torch.Tensor):
|
64 |
-
self._stats[k] = torch.cat([self._stats[k], v], dim=0)
|
65 |
-
elif isinstance(v, np.ndarray):
|
66 |
-
self._stats[k] = np.concatenate([self._stats[k], v], axis=0)
|
67 |
-
elif isinstance(v, list):
|
68 |
-
self._stats[k] = self._stats[k] + deepcopy(v)
|
69 |
-
else:
|
70 |
-
raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.")
|
71 |
-
|
72 |
-
def to_numpy(self) -> None:
|
73 |
-
for k, v in self._stats.items():
|
74 |
-
if isinstance(v, torch.Tensor):
|
75 |
-
self._stats[k] = v.detach().cpu().numpy()
|
76 |
-
|
77 |
-
|
78 |
-
def is_box_near_crop_edge(
|
79 |
-
boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0
|
80 |
-
) -> torch.Tensor:
|
81 |
-
"""Filter masks at the edge of a crop, but not at the edge of the original image."""
|
82 |
-
crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)
|
83 |
-
orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)
|
84 |
-
boxes = uncrop_boxes_xyxy(boxes, crop_box).float()
|
85 |
-
near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)
|
86 |
-
near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)
|
87 |
-
near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)
|
88 |
-
return torch.any(near_crop_edge, dim=1)
|
89 |
-
|
90 |
-
|
91 |
-
def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor:
|
92 |
-
box_xywh = deepcopy(box_xyxy)
|
93 |
-
box_xywh[2] = box_xywh[2] - box_xywh[0]
|
94 |
-
box_xywh[3] = box_xywh[3] - box_xywh[1]
|
95 |
-
return box_xywh
|
96 |
-
|
97 |
-
|
98 |
-
def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:
|
99 |
-
assert len(args) > 0 and all(
|
100 |
-
len(a) == len(args[0]) for a in args
|
101 |
-
), "Batched iteration must have inputs of all the same size."
|
102 |
-
n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)
|
103 |
-
for b in range(n_batches):
|
104 |
-
yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args]
|
105 |
-
|
106 |
-
|
107 |
-
def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]:
|
108 |
-
"""
|
109 |
-
Encodes masks to an uncompressed RLE, in the format expected by
|
110 |
-
pycoco tools.
|
111 |
-
"""
|
112 |
-
# Put in fortran order and flatten h,w
|
113 |
-
b, h, w = tensor.shape
|
114 |
-
tensor = tensor.permute(0, 2, 1).flatten(1)
|
115 |
-
|
116 |
-
# Compute change indices
|
117 |
-
diff = tensor[:, 1:] ^ tensor[:, :-1]
|
118 |
-
change_indices = diff.nonzero()
|
119 |
-
|
120 |
-
# Encode run length
|
121 |
-
out = []
|
122 |
-
for i in range(b):
|
123 |
-
cur_idxs = change_indices[change_indices[:, 0] == i, 1]
|
124 |
-
cur_idxs = torch.cat(
|
125 |
-
[
|
126 |
-
torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device),
|
127 |
-
cur_idxs + 1,
|
128 |
-
torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device),
|
129 |
-
]
|
130 |
-
)
|
131 |
-
btw_idxs = cur_idxs[1:] - cur_idxs[:-1]
|
132 |
-
counts = [] if tensor[i, 0] == 0 else [0]
|
133 |
-
counts.extend(btw_idxs.detach().cpu().tolist())
|
134 |
-
out.append({"size": [h, w], "counts": counts})
|
135 |
-
return out
|
136 |
-
|
137 |
-
|
138 |
-
def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:
|
139 |
-
"""Compute a binary mask from an uncompressed RLE."""
|
140 |
-
h, w = rle["size"]
|
141 |
-
mask = np.empty(h * w, dtype=bool)
|
142 |
-
idx = 0
|
143 |
-
parity = False
|
144 |
-
for count in rle["counts"]:
|
145 |
-
mask[idx : idx + count] = parity
|
146 |
-
idx += count
|
147 |
-
parity ^= True
|
148 |
-
mask = mask.reshape(w, h)
|
149 |
-
return mask.transpose() # Put in C order
|
150 |
-
|
151 |
-
|
152 |
-
def area_from_rle(rle: Dict[str, Any]) -> int:
|
153 |
-
return sum(rle["counts"][1::2])
|
154 |
-
|
155 |
-
|
156 |
-
def calculate_stability_score(
|
157 |
-
masks: torch.Tensor, mask_threshold: float, threshold_offset: float
|
158 |
-
) -> torch.Tensor:
|
159 |
-
"""
|
160 |
-
Computes the stability score for a batch of masks. The stability
|
161 |
-
score is the IoU between the binary masks obtained by thresholding
|
162 |
-
the predicted mask logits at high and low values.
|
163 |
-
"""
|
164 |
-
# One mask is always contained inside the other.
|
165 |
-
# Save memory by preventing unnecesary cast to torch.int64
|
166 |
-
intersections = (
|
167 |
-
(masks > (mask_threshold + threshold_offset))
|
168 |
-
.sum(-1, dtype=torch.int16)
|
169 |
-
.sum(-1, dtype=torch.int32)
|
170 |
-
)
|
171 |
-
unions = (
|
172 |
-
(masks > (mask_threshold - threshold_offset))
|
173 |
-
.sum(-1, dtype=torch.int16)
|
174 |
-
.sum(-1, dtype=torch.int32)
|
175 |
-
)
|
176 |
-
return intersections / unions
|
177 |
-
|
178 |
-
|
179 |
-
def build_point_grid(n_per_side: int) -> np.ndarray:
|
180 |
-
"""Generates a 2D grid of points evenly spaced in [0,1]x[0,1]."""
|
181 |
-
offset = 1 / (2 * n_per_side)
|
182 |
-
points_one_side = np.linspace(offset, 1 - offset, n_per_side)
|
183 |
-
points_x = np.tile(points_one_side[None, :], (n_per_side, 1))
|
184 |
-
points_y = np.tile(points_one_side[:, None], (1, n_per_side))
|
185 |
-
points = np.stack([points_x, points_y], axis=-1).reshape(-1, 2)
|
186 |
-
return points
|
187 |
-
|
188 |
-
|
189 |
-
def build_all_layer_point_grids(
|
190 |
-
n_per_side: int, n_layers: int, scale_per_layer: int
|
191 |
-
) -> List[np.ndarray]:
|
192 |
-
"""Generates point grids for all crop layers."""
|
193 |
-
points_by_layer = []
|
194 |
-
for i in range(n_layers + 1):
|
195 |
-
n_points = int(n_per_side / (scale_per_layer**i))
|
196 |
-
points_by_layer.append(build_point_grid(n_points))
|
197 |
-
return points_by_layer
|
198 |
-
|
199 |
-
|
200 |
-
def generate_crop_boxes(
|
201 |
-
im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float
|
202 |
-
) -> Tuple[List[List[int]], List[int]]:
|
203 |
-
"""
|
204 |
-
Generates a list of crop boxes of different sizes. Each layer
|
205 |
-
has (2**i)**2 boxes for the ith layer.
|
206 |
-
"""
|
207 |
-
crop_boxes, layer_idxs = [], []
|
208 |
-
im_h, im_w = im_size
|
209 |
-
short_side = min(im_h, im_w)
|
210 |
-
|
211 |
-
# Original image
|
212 |
-
crop_boxes.append([0, 0, im_w, im_h])
|
213 |
-
layer_idxs.append(0)
|
214 |
-
|
215 |
-
def crop_len(orig_len, n_crops, overlap):
|
216 |
-
return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops))
|
217 |
-
|
218 |
-
for i_layer in range(n_layers):
|
219 |
-
n_crops_per_side = 2 ** (i_layer + 1)
|
220 |
-
overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))
|
221 |
-
|
222 |
-
crop_w = crop_len(im_w, n_crops_per_side, overlap)
|
223 |
-
crop_h = crop_len(im_h, n_crops_per_side, overlap)
|
224 |
-
|
225 |
-
crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)]
|
226 |
-
crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)]
|
227 |
-
|
228 |
-
# Crops in XYWH format
|
229 |
-
for x0, y0 in product(crop_box_x0, crop_box_y0):
|
230 |
-
box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)]
|
231 |
-
crop_boxes.append(box)
|
232 |
-
layer_idxs.append(i_layer + 1)
|
233 |
-
|
234 |
-
return crop_boxes, layer_idxs
|
235 |
-
|
236 |
-
|
237 |
-
def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor:
|
238 |
-
x0, y0, _, _ = crop_box
|
239 |
-
offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device)
|
240 |
-
# Check if boxes has a channel dimension
|
241 |
-
if len(boxes.shape) == 3:
|
242 |
-
offset = offset.unsqueeze(1)
|
243 |
-
return boxes + offset
|
244 |
-
|
245 |
-
|
246 |
-
def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor:
|
247 |
-
x0, y0, _, _ = crop_box
|
248 |
-
offset = torch.tensor([[x0, y0]], device=points.device)
|
249 |
-
# Check if points has a channel dimension
|
250 |
-
if len(points.shape) == 3:
|
251 |
-
offset = offset.unsqueeze(1)
|
252 |
-
return points + offset
|
253 |
-
|
254 |
-
|
255 |
-
def uncrop_masks(
|
256 |
-
masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int
|
257 |
-
) -> torch.Tensor:
|
258 |
-
x0, y0, x1, y1 = crop_box
|
259 |
-
if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h:
|
260 |
-
return masks
|
261 |
-
# Coordinate transform masks
|
262 |
-
pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0)
|
263 |
-
pad = (x0, pad_x - x0, y0, pad_y - y0)
|
264 |
-
return torch.nn.functional.pad(masks, pad, value=0)
|
265 |
-
|
266 |
-
|
267 |
-
def remove_small_regions(
|
268 |
-
mask: np.ndarray, area_thresh: float, mode: str
|
269 |
-
) -> Tuple[np.ndarray, bool]:
|
270 |
-
"""
|
271 |
-
Removes small disconnected regions and holes in a mask. Returns the
|
272 |
-
mask and an indicator of if the mask has been modified.
|
273 |
-
"""
|
274 |
-
import cv2 # type: ignore
|
275 |
-
|
276 |
-
assert mode in ["holes", "islands"]
|
277 |
-
correct_holes = mode == "holes"
|
278 |
-
working_mask = (correct_holes ^ mask).astype(np.uint8)
|
279 |
-
n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8)
|
280 |
-
sizes = stats[:, -1][1:] # Row 0 is background label
|
281 |
-
small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh]
|
282 |
-
if len(small_regions) == 0:
|
283 |
-
return mask, False
|
284 |
-
fill_labels = [0] + small_regions
|
285 |
-
if not correct_holes:
|
286 |
-
fill_labels = [i for i in range(n_labels) if i not in fill_labels]
|
287 |
-
# If every region is below threshold, keep largest
|
288 |
-
if len(fill_labels) == 0:
|
289 |
-
fill_labels = [int(np.argmax(sizes)) + 1]
|
290 |
-
mask = np.isin(regions, fill_labels)
|
291 |
-
return mask, True
|
292 |
-
|
293 |
-
|
294 |
-
def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]:
|
295 |
-
from pycocotools import mask as mask_utils # type: ignore
|
296 |
-
|
297 |
-
h, w = uncompressed_rle["size"]
|
298 |
-
rle = mask_utils.frPyObjects(uncompressed_rle, h, w)
|
299 |
-
rle["counts"] = rle["counts"].decode("utf-8") # Necessary to serialize with json
|
300 |
-
return rle
|
301 |
-
|
302 |
-
|
303 |
-
def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor:
|
304 |
-
"""
|
305 |
-
Calculates boxes in XYXY format around masks. Return [0,0,0,0] for
|
306 |
-
an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4.
|
307 |
-
"""
|
308 |
-
# torch.max below raises an error on empty inputs, just skip in this case
|
309 |
-
if torch.numel(masks) == 0:
|
310 |
-
return torch.zeros(*masks.shape[:-2], 4, device=masks.device)
|
311 |
-
|
312 |
-
# Normalize shape to CxHxW
|
313 |
-
shape = masks.shape
|
314 |
-
h, w = shape[-2:]
|
315 |
-
if len(shape) > 2:
|
316 |
-
masks = masks.flatten(0, -3)
|
317 |
-
else:
|
318 |
-
masks = masks.unsqueeze(0)
|
319 |
-
|
320 |
-
# Get top and bottom edges
|
321 |
-
in_height, _ = torch.max(masks, dim=-1)
|
322 |
-
in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :]
|
323 |
-
bottom_edges, _ = torch.max(in_height_coords, dim=-1)
|
324 |
-
in_height_coords = in_height_coords + h * (~in_height)
|
325 |
-
top_edges, _ = torch.min(in_height_coords, dim=-1)
|
326 |
-
|
327 |
-
# Get left and right edges
|
328 |
-
in_width, _ = torch.max(masks, dim=-2)
|
329 |
-
in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :]
|
330 |
-
right_edges, _ = torch.max(in_width_coords, dim=-1)
|
331 |
-
in_width_coords = in_width_coords + w * (~in_width)
|
332 |
-
left_edges, _ = torch.min(in_width_coords, dim=-1)
|
333 |
-
|
334 |
-
# If the mask is empty the right edge will be to the left of the left edge.
|
335 |
-
# Replace these boxes with [0, 0, 0, 0]
|
336 |
-
empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)
|
337 |
-
out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)
|
338 |
-
out = out * (~empty_filter).unsqueeze(-1)
|
339 |
-
|
340 |
-
# Return to original shape
|
341 |
-
if len(shape) > 2:
|
342 |
-
out = out.reshape(*shape[:-2], 4)
|
343 |
-
else:
|
344 |
-
out = out[0]
|
345 |
-
|
346 |
-
return out
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CarlDennis/Lovelive-VITS-JPZH/text/cleaners.py
DELETED
@@ -1,87 +0,0 @@
|
|
1 |
-
import re
|
2 |
-
|
3 |
-
|
4 |
-
def japanese_cleaners(text):
|
5 |
-
from text.japanese import japanese_to_romaji_with_accent
|
6 |
-
text = japanese_to_romaji_with_accent(text)
|
7 |
-
if re.match('[A-Za-z]', text[-1]):
|
8 |
-
text += '.'
|
9 |
-
return text
|
10 |
-
|
11 |
-
|
12 |
-
def japanese_cleaners2(text):
|
13 |
-
return japanese_cleaners(text).replace('ts', 'ʦ').replace('...', '…')
|
14 |
-
|
15 |
-
|
16 |
-
def korean_cleaners(text):
|
17 |
-
'''Pipeline for Korean text'''
|
18 |
-
from text.korean import latin_to_hangul, number_to_hangul, divide_hangul
|
19 |
-
text = latin_to_hangul(text)
|
20 |
-
text = number_to_hangul(text)
|
21 |
-
text = divide_hangul(text)
|
22 |
-
if re.match('[\u3131-\u3163]', text[-1]):
|
23 |
-
text += '.'
|
24 |
-
return text
|
25 |
-
|
26 |
-
|
27 |
-
def chinese_cleaners(text):
|
28 |
-
'''Pipeline for Chinese text'''
|
29 |
-
from text.mandarin import number_to_chinese, chinese_to_bopomofo, latin_to_bopomofo
|
30 |
-
text = number_to_chinese(text)
|
31 |
-
text = chinese_to_bopomofo(text)
|
32 |
-
text = latin_to_bopomofo(text)
|
33 |
-
if re.match('[ˉˊˇˋ˙]', text[-1]):
|
34 |
-
text += '。'
|
35 |
-
return text
|
36 |
-
|
37 |
-
|
38 |
-
def zh_ja_mixture_cleaners(text):
|
39 |
-
from text.mandarin import chinese_to_romaji
|
40 |
-
from text.japanese import japanese_to_romaji_with_accent
|
41 |
-
chinese_texts = re.findall(r'\[ZH\].*?\[ZH\]', text)
|
42 |
-
japanese_texts = re.findall(r'\[JA\].*?\[JA\]', text)
|
43 |
-
for chinese_text in chinese_texts:
|
44 |
-
cleaned_text = chinese_to_romaji(chinese_text[4:-4])
|
45 |
-
text = text.replace(chinese_text, cleaned_text+' ', 1)
|
46 |
-
for japanese_text in japanese_texts:
|
47 |
-
cleaned_text = japanese_to_romaji_with_accent(
|
48 |
-
japanese_text[4:-4]).replace('ts', 'ʦ').replace('u', 'ɯ').replace('...', '…')
|
49 |
-
text = text.replace(japanese_text, cleaned_text+' ', 1)
|
50 |
-
text = text[:-1]
|
51 |
-
if re.match('[A-Za-zɯɹəɥ→↓↑]', text[-1]):
|
52 |
-
text += '.'
|
53 |
-
return text
|
54 |
-
|
55 |
-
|
56 |
-
def sanskrit_cleaners(text):
|
57 |
-
text = text.replace('॥', '।').replace('ॐ', 'ओम्')
|
58 |
-
if text[-1] != '।':
|
59 |
-
text += ' ।'
|
60 |
-
return text
|
61 |
-
|
62 |
-
|
63 |
-
def cjks_cleaners(text):
|
64 |
-
from text.mandarin import chinese_to_lazy_ipa
|
65 |
-
from text.japanese import japanese_to_ipa
|
66 |
-
from text.korean import korean_to_lazy_ipa
|
67 |
-
from text.sanskrit import devanagari_to_ipa
|
68 |
-
chinese_texts = re.findall(r'\[ZH\].*?\[ZH\]', text)
|
69 |
-
japanese_texts = re.findall(r'\[JA\].*?\[JA\]', text)
|
70 |
-
korean_texts = re.findall(r'\[KO\].*?\[KO\]', text)
|
71 |
-
sanskrit_texts = re.findall(r'\[SA\].*?\[SA\]', text)
|
72 |
-
for chinese_text in chinese_texts:
|
73 |
-
cleaned_text = chinese_to_lazy_ipa(chinese_text[4:-4])
|
74 |
-
text = text.replace(chinese_text, cleaned_text+' ', 1)
|
75 |
-
for japanese_text in japanese_texts:
|
76 |
-
cleaned_text = japanese_to_ipa(japanese_text[4:-4])
|
77 |
-
text = text.replace(japanese_text, cleaned_text+' ', 1)
|
78 |
-
for korean_text in korean_texts:
|
79 |
-
cleaned_text = korean_to_lazy_ipa(korean_text[4:-4])
|
80 |
-
text = text.replace(korean_text, cleaned_text+' ', 1)
|
81 |
-
for sanskrit_text in sanskrit_texts:
|
82 |
-
cleaned_text = devanagari_to_ipa(sanskrit_text[4:-4])
|
83 |
-
text = text.replace(sanskrit_text, cleaned_text+' ', 1)
|
84 |
-
text = text[:-1]
|
85 |
-
if re.match(r'[^\.,!\?\-…~]', text[-1]):
|
86 |
-
text += '.'
|
87 |
-
return text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|