parquet-converter commited on
Commit
a9b4011
·
1 Parent(s): 9324965

Update parquet files (step 127 of 249)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Discover the Secrets of Your Destiny with Kundli 5.5 Software Hindi Version (Free Download Available).md +0 -29
  2. spaces/1gistliPinn/ChatGPT4/Examples/360 Total Security Free Antivirus 10.2.0.1197 Crack With License Key 2020.md +0 -6
  3. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/CTET Certificate 2021 Everything You Need to Know About the Teacher Eligibility Test.md +0 -135
  4. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Cheat Naruto x Boruto Ninja Voltage Tips and Tricks for Building the Best Ninja Fortress.md +0 -131
  5. spaces/1phancelerku/anime-remove-background/Car Parking Multiplayer APK Para Hileli How to Install and Play.md +0 -92
  6. spaces/1phancelerku/anime-remove-background/Download Sinking Simulator and unleash your destructive side.md +0 -150
  7. spaces/1phancelerku/anime-remove-background/Enjoy the Groovy Soundtrack and Exciting Physics Elements of Red Ball 4 on iOS.md +0 -129
  8. spaces/1phancelerku/anime-remove-background/Experience the Thrill of Honor of Kings - The Best Mobile MOBA on Play Store - Download for Free.md +0 -119
  9. spaces/AI-Dashboards/Streamlit-Markdown-ChatGPT-CCD/README.md +0 -13
  10. spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/fastspeech/tts_modules.py +0 -385
  11. spaces/AIGC-Audio/AudioGPT/NeuralSeq/tasks/tts/tts_base.py +0 -305
  12. spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/models/decoder.py +0 -746
  13. spaces/AP123/ai-avatars/app.py +0 -616
  14. spaces/ASJMO/freegpt/g4f/Provider/Providers/Gravityengine.py +0 -27
  15. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/__init__.py +0 -0
  16. spaces/AchyuthGamer/OpenGPT/g4f/Provider/Opchatgpts.py +0 -8
  17. spaces/Adam111/stable-diffusion-webui/app.py +0 -76
  18. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/bars/Bars.d.ts +0 -2
  19. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/slider/UpdateThumb.js +0 -26
  20. spaces/Akmyradov/chatbot_testing/app.py +0 -25
  21. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/lpw_stable_diffusion.py +0 -1470
  22. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/unet_2d_condition_flax.py +0 -357
  23. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/dit/__init__.py +0 -0
  24. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/schedulers/test_scheduler_dpm_multi_inverse.py +0 -266
  25. spaces/Andy1621/uniformer_image_detection/configs/_base_/datasets/cityscapes_instance.py +0 -55
  26. spaces/Andy1621/uniformer_image_detection/configs/cornernet/cornernet_hourglass104_mstest_32x3_210e_coco.py +0 -105
  27. spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/centripetal_head.py +0 -421
  28. spaces/Andy1621/uniformer_image_segmentation/configs/gcnet/gcnet_r101-d8_512x1024_80k_cityscapes.py +0 -2
  29. spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_hr18_512x512_40k_voc12aug.py +0 -36
  30. spaces/Ank0X0/text-to-3d-shap-e-webui/app.py +0 -49
  31. spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/model/base_function.py +0 -611
  32. spaces/AntX-ai/Fintech/README.md +0 -10
  33. spaces/AriaMei/TTSdemo/text/symbols.py +0 -69
  34. spaces/Artrajz/vits-simple-api/vits/transforms.py +0 -193
  35. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/controller.py +0 -439
  36. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/charsetgroupprober.py +0 -106
  37. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/euckrfreq.py +0 -196
  38. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/core.py +0 -0
  39. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/installer.py +0 -104
  40. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/layers/test_roi_align.py +0 -210
  41. spaces/Benson/text-generation/Examples/Descarga De La Herramienta Flash Del Jio Profesional A23.md +0 -109
  42. spaces/Benson/text-generation/Examples/Descarga De Tema De Batera Baja.md +0 -87
  43. spaces/Benson/text-generation/Examples/Descargar 3mb.md +0 -126
  44. spaces/Benson/text-generation/Examples/Descargar Fm WhatsApp Apk 2023.md +0 -110
  45. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/requests/utils.py +0 -1086
  46. spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/command/build_ext.py +0 -787
  47. spaces/BilalSardar/karlo-cpu-api/app.py +0 -26
  48. spaces/CVPR/LIVE/pybind11/pybind11/__main__.py +0 -37
  49. spaces/CVPR/regionclip-demo/detectron2/data/common.py +0 -186
  50. spaces/CVPR/transfiner/configs/new_baselines/mask_rcnn_R_101_FPN_200ep_LSJ.py +0 -14
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Discover the Secrets of Your Destiny with Kundli 5.5 Software Hindi Version (Free Download Available).md DELETED
@@ -1,29 +0,0 @@
1
-
2
- <h1>How to Free Download Kundli 5.5 Software Hindi Version</h1>
3
- <p>Kundli is a popular astrology software that helps you generate accurate horoscopes based on your birth details. Kundli 5.5 is one of the most popular versions of this software, as it offers many features and benefits for the users. However, finding a reliable source to free download Kundli 5.5 software Hindi version can be challenging, as there are many websites that claim to offer it but may contain viruses or malware.</p>
4
- <p>In this article, we will show you how to free download Kundli 5.5 software Hindi version safely and easily, without compromising your computer's security or performance. We will also explain some of the features and benefits of using Kundli 5.5 software Hindi version for your astrology needs.</p>
5
- <h2>free download kundli 5.5 software hindi</h2><br /><p><b><b>Download Zip</b> &rArr; <a href="https://byltly.com/2uKx2F">https://byltly.com/2uKx2F</a></b></p><br /><br />
6
- <h2>Features and Benefits of Kundli 5.5 Software Hindi Version</h2>
7
- <p>Kundli 5.5 software Hindi version is a comprehensive astrology software that can help you create detailed and accurate horoscopes for yourself and others. Some of the features and benefits of this software are:</p>
8
- <ul>
9
- <li>It supports multiple languages, including Hindi, English, Bengali, Gujarati, Marathi, Telugu, Tamil, Kannada, and Malayalam.</li>
10
- <li>It allows you to choose from various chart styles, such as North Indian, South Indian, East Indian, and Kerala.</li>
11
- <li>It provides detailed calculations and predictions based on various astrological systems, such as Vedic astrology, KP system, Lal Kitab, Prashna Kundli, etc.</li>
12
- <li>It generates various reports and charts, such as birth chart, planetary positions, dasha periods, yogas, kundli matching, gemstone recommendation, numerology report, etc.</li>
13
- <li>It has a user-friendly interface and easy navigation that makes it suitable for beginners and experts alike.</li>
14
- <li>It has a low system requirement and can run smoothly on any Windows operating system.</li>
15
- </ul>
16
- <h2>How to Free Download Kundli 5.5 Software Hindi Version</h2>
17
- <p>If you want to free download Kundli 5.5 software Hindi version for your personal use or professional practice, you need to follow these steps:</p>
18
- <ol>
19
- <li>Visit the official website of Kundli software at <a href="https://www.kundlisoftware.com/">https://www.kundlisoftware.com/</a>.</li>
20
- <li>Click on the "Download" button on the homepage and choose the "Kundli 5.5" option from the drop-down menu.</li>
21
- <li>Select the "Hindi" language option from the next page and click on the "Download Now" button.</li>
22
- <li>A pop-up window will appear asking you to enter your name and email address. Enter your details and click on the "Submit" button.</li>
23
- <li>You will receive an email with a download link for Kundli 5.5 software Hindi version. Click on the link and save the file on your computer.</li>
24
- <li>Once the download is complete, double-click on the file and follow the installation instructions to install Kundli 5.5 software Hindi version on your computer.</li>
25
- <li>After the installation is complete, launch Kundli 5.5 software Hindi version from your desktop or start menu and enjoy creating accurate horoscopes.</li>
26
- </ol>
27
- <p>Congratulations! You have successfully free downloaded Kundli 5.5 software Hindi version for your astrology needs.</p> ddb901b051<br />
28
- <br />
29
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/360 Total Security Free Antivirus 10.2.0.1197 Crack With License Key 2020.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>360 Total Security Free Antivirus 10.2.0.1197 Crack With License Key 2020</h2><br /><p><b><b>DOWNLOAD</b> &rarr;&rarr;&rarr; <a href="https://imgfil.com/2uxZdy">https://imgfil.com/2uxZdy</a></b></p><br /><br />
2
- <br />
3
- Total Security 10.8.0.1200 Premium Crack + License Key 2020 — 360 Total Security 10.8.0.1200 Crack Premium 2020 & License Key Free Download ... The antivirus engine it uses by default is feeble and can't ... 4d29de3e1b<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/CTET Certificate 2021 Everything You Need to Know About the Teacher Eligibility Test.md DELETED
@@ -1,135 +0,0 @@
1
- <br />
2
- <h1>How to Download CTET Certificate 2021</h1>
3
- <p>If you are one of the candidates who have qualified the Central Teacher Eligibility Test (CTET) 2021, then you must be wondering how to download your CTET certificate 2021. The CTET certificate is a document that proves your eligibility to teach in any of the schools affiliated to the Central Board of Secondary Education (CBSE) or any other state board. In this article, we will tell you everything you need to know about the CTET certificate 2021, such as what it is, why it is important, how to download it from DigiLocker or the CTET website, how to apply for a duplicate certificate, and how to check its validity. So, read on and find out how to download your CTET certificate 2021.</p>
4
- <h2>What is CTET Certificate?</h2>
5
- <p>The CTET certificate is a document that is issued by the CBSE to the candidates who have successfully passed the CTET exam. The CTET exam is conducted twice a year, usually in July and December, to test the eligibility of candidates who aspire to become teachers for classes I to VIII. The exam consists of two papers: Paper I for classes I to V and Paper II for classes VI to VIII. Candidates can appear for either one or both papers depending on their preference.</p>
6
- <h2>download ctet certificate 2021</h2><br /><p><b><b>DOWNLOAD</b> &#9733;&#9733;&#9733; <a href="https://urlin.us/2uSYEm">https://urlin.us/2uSYEm</a></b></p><br /><br />
7
- <h3>Why is CTET Certificate Important?</h3>
8
- <p>The CTET certificate is important because it is a mandatory qualification for teaching in any of the schools affiliated to the CBSE or any other state board. According to the Right of Children to Free and Compulsory Education (RTE) Act, 2009, every teacher appointed in any school must have a minimum qualification as prescribed by the National Council for Teacher Education (NCTE). One of these qualifications is passing the Teacher Eligibility Test (TET) conducted by the appropriate government in accordance with the guidelines framed by the NCTE. The CTET is one such TET conducted by the CBSE at the national level.</p>
9
- <p>The CTET certificate not only validates your eligibility as a teacher but also enhances your career prospects as a teacher. It gives you an edge over other candidates who do not have the certificate and opens up more opportunities for you in various schools across India. It also helps you in getting promotions and increments as a teacher.</p>
10
- <h2>How to Download CTET Certificate 2021 from DigiLocker?</h2>
11
- <p>The CBSE has partnered with DigiLocker, a digital platform of the Government of India, to issue the CTET certificate 2021 in digital format. DigiLocker is a secure and convenient way of storing and accessing your documents online. You can download your CTET certificate 2021 from DigiLocker by following these steps:</p>
12
- <h3>What is DigiLocker?</h3>
13
- <p>DigiLocker is a cloud-based platform that allows you to store and access your documents online. It provides you with a dedicated personal storage space where you can upload your scanned documents or get them directly from registered issuers such as CBSE, UIDAI, Income Tax Department, etc. You can also share your documents with others using a secure link or QR code. DigiLocker eliminates the need for carrying physical copies of your documents and ensures their authenticity and security.</p>
14
- <h3>How to <h3>How to Register on DigiLocker?</h3>
15
- <p>To register on DigiLocker, you need to have a valid mobile number and an Aadhaar number. You can follow these steps to register on DigiLocker:</p>
16
- <ol>
17
- <li>Visit the official website of DigiLocker at <a href="">https://digilocker.gov.in/</a> or download the DigiLocker app from Google Play Store or Apple App Store.</li>
18
- <li>Click on the "Sign Up" button and enter your mobile number. You will receive an OTP on your mobile number. Enter the OTP and click on "Verify".</li>
19
- <li>Enter your Aadhaar number and click on "Verify". You will receive another OTP on your mobile number. Enter the OTP and click on "Verify".</li>
20
- <li>Create a username and password for your DigiLocker account and click on "Submit".</li>
21
- <li>You have successfully registered on DigiLocker. You can now access your dashboard and upload or download your documents.</li>
22
- </ol>
23
- <h3>How to Link Aadhaar with DigiLocker?</h3>
24
- <p>To link your Aadhaar with DigiLocker, you need to have a registered DigiLocker account and a valid Aadhaar number. You can follow these steps to link your Aadhaar with DigiLocker:</p>
25
- <ol>
26
- <li>Login to your DigiLocker account using your username and password.</li>
27
- <li>Click on the "Issued Documents" section and select "Aadhaar" from the list of document types.</li>
28
- <li>Click on the "Get Document" button and enter your Aadhaar number. You will receive an OTP on your mobile number. Enter the OTP and click on "Verify".</li>
29
- <li>Your Aadhaar will be linked with your DigiLocker account and you will be able to view your Aadhaar details on your dashboard.</li>
30
- </ol>
31
- <h3>How to Download CTET Certificate 2021 from DigiLocker?</h3>
32
- <p>To download your CTET certificate 2021 from DigiLocker, you need to have a registered and Aadhaar-linked DigiLocker account and a valid CTET roll number. You can follow these steps to download your CTET certificate 2021 from DigiLocker:</p>
33
- <p>How to download ctet certificate 2021 from digilocker<br />
34
- CTET certificate 2021 validity period and eligibility criteria<br />
35
- CTET certificate 2021 download link and steps<br />
36
- CTET marksheet 2021 download online using roll number<br />
37
- CTET certificate 2021 pdf download for qualified candidates<br />
38
- CTET certificate 2021 verification process and details<br />
39
- CTET certificate 2021 correction facility and fee<br />
40
- CTET certificate 2021 release date and notification<br />
41
- CTET certificate 2021 login details and password reset<br />
42
- CTET certificate 2021 sample and format<br />
43
- CTET certificate 2021 benefits and advantages<br />
44
- CTET certificate 2021 duplicate copy and replacement<br />
45
- CTET certificate 2021 status and availability<br />
46
- CTET certificate 2021 print out and hard copy<br />
47
- CTET certificate 2021 application form and fee payment<br />
48
- CTET certificate 2021 exam date and result declaration<br />
49
- CTET certificate 2021 cut off marks and merit list<br />
50
- CTET certificate 2021 syllabus and exam pattern<br />
51
- CTET certificate 2021 answer key and challenge<br />
52
- CTET certificate 2021 admit card and exam center<br />
53
- CTET certificate 2021 preparation tips and books<br />
54
- CTET certificate 2021 mock test and practice papers<br />
55
- CTET certificate 2021 previous year papers and solutions<br />
56
- CTET certificate 2021 expected questions and topics<br />
57
- CTET certificate 2021 online coaching and study material<br />
58
- CTET certificate 2021 latest news and updates<br />
59
- CTET certificate 2021 faq and customer support<br />
60
- CTET certificate 2021 helpline number and email id<br />
61
- CTET certificate 2021 official website and portal<br />
62
- CTET certificate 2021 registration process and documents required<br />
63
- CTET certificate 2021 eligibility test and qualification<br />
64
- CTET certificate 2021 selection process and criteria<br />
65
- CTET certificate 2021 counselling process and schedule<br />
66
- CTET certificate 2021 job opportunities and salary<br />
67
- CTET certificate 2021 career guidance and advice<br />
68
- CTET certificate 2021 success stories and testimonials<br />
69
- CTET certificate 2021 feedback form and survey<br />
70
- CTET certificate 2021 renewal process and fee structure<br />
71
- CTET certificate 2021 extension request and approval<br />
72
- CTET certificate 2021 cancellation policy and refund procedure<br />
73
- CTET marksheet vs ctet certificate difference and comparison <br />
74
- How to get ctet marksheet along with ctet certificate <br />
75
- How to check ctet marksheet online before downloading ctet certificate <br />
76
- How to apply for ctet marksheet correction before getting ctet certificate <br />
77
- How to verify ctet marksheet authenticity after downloading ctet certificate <br />
78
- How to print ctet marksheet in color or black and white with ctet certificate <br />
79
- How to save ctet marksheet in mobile or laptop with ctet certificate <br />
80
- How to share ctet marksheet with employers or recruiters with ctet certificate <br />
81
- How to use ctet marksheet for further studies or exams with ctet certificate</p>
82
- <ol>
83
- <li>Login to your DigiLocker account using your username and password.</li>
84
- <li>Click on the "Issued Documents" section and select "Central Board of Secondary Education (CBSE)" from the list of issuers.</li>
85
- <li>Select "Teacher Eligibility Test (CTET)" from the list of document types.</li>
86
- <li>Enter your CTET roll number and click on "Get Document". Your CTET certificate 2021 will be fetched from the CBSE database and displayed on your dashboard.</li>
87
- <li>Click on the "Download" button to save your CTET certificate 2021 in PDF format. You can also print or share your CTET certificate 2021 using the options available on the dashboard.</li>
88
- </ol>
89
- <h2>How to Download CTET Certificate 2021 from CTET Website?</h2>
90
- <p>If you do not have a DigiLocker account or you face any difficulty in downloading your CTET certificate 2021 from DigiLocker, you can also download it from the official website of CTET. You can follow these steps to download your CTET certificate 2021 from CTET website:</p>
91
- <ol>
92
- <li>Visit the official website of CTET at <a href="">https://ctet.nic.in/</a>.</li>
93
- <li>Click on the "Download Certificate (July 2020 onwards)" link under the "Current Events" section.</li>
94
- <li>Enter your roll number and date of birth and click on "Submit". Your CTET certificate 2021 will be displayed on the screen.</li>
95
- <li>Click on the "Download" button to save your CTET certificate 2021 in PDF format. You can also print or share your CTET certificate 2021 using the options available on the screen.</li>
96
- </ol>
97
- <h3>How to Apply for Duplicate CTET Certificate?</h3>
98
- <p>If you have lost or damaged your original CTET certificate, you can apply for a duplicate CTET certificate from CBSE. You need to pay a fee of Rs. 200/- per document through demand draft in favour of Secretary, Central Board of Secondary Education, payable at Delhi/New Delhi. You also need to send an application along with the following documents to CBSE:</p>
99
- <ul>
100
- <li>A copy of FIR lodged with police stating loss of original certificate.</li>
101
- <li>A copy of Class X marksheet or certificate for verification of candidate's name, father's name, mother's name, and date of birth.</li>
102
- <li>A copy of admit card of CTET exam for verification of roll number and year/ month of exam.</li>
103
- <li>A demand draft of Rs. 200/- per document.</li>
104
- </ul>
105
- <p>You need to send the application and the documents to the following address:</p>
106
- <p>The Joint Secretary (CTET), Central Board of Secondary Education, PS 1-2, Institutional Area, I P Extension, Patparganj, Delhi-110 092</p>
107
- <h3>How to Check CTET Certificate Validity?</h3>
108
- <p>The CTET certificate is valid for a period of seven years from the date of declaration of result. You can check the validity of your CTET certificate by looking at the date of issue and the date of expiry printed on the certificate. You can also check the validity of your CTET certificate online by visiting the official website of CTET and clicking on the "Certificate Validity" link under the "Current Events" section. You need to enter your roll number and date of birth and click on "Submit". The validity status of your CTET certificate will be displayed on the screen.</p>
109
- <h2>FAQs on CTET Certificate 2021</h2>
110
- <h3>Q1. What is the validity period of CTET certificate?</h3>
111
- <p>A1. The CTET certificate is valid for a period of seven years from the date of declaration of result. You can apply for a revalidation of your CTET certificate after seven years if you want to continue teaching in any CBSE or state board affiliated school.</p>
112
- <h3>Q2. What is the difference between CTET marksheet and certificate?</h3>
113
- <p>A2. The CTET marksheet is a document that shows your marks obtained in each paper of the CTET exam. The CTET certificate is a document that proves your eligibility to teach in any CBSE or state board affiliated school. You need to score at least 60% marks in each paper to qualify the CTET exam and get the CTET certificate.</p>
114
- <h3>Q3. How can I check my CTET result 2021?</h3>
115
- <p>A3. You can check your CTET result 2021 by visiting the official website of CTET and clicking on the "Result (July 2020 onwards)" link under the "Current Events" section. You need to enter your roll number and date of birth and click on "Submit". Your CTET result 2021 will be displayed on the screen.</p>
116
- <h3>Q4. What are the benefits of qualifying CTET exam?</h3>
117
- <p>A4. Qualifying CTET exam has many benefits such as:</p>
118
- <ul>
119
- <li>It validates your eligibility as a teacher for classes I to VIII in any CBSE or state board affiliated school.</li>
120
- <li>It enhances your career prospects as a teacher and opens up more opportunities for you in various schools across India.</li>
121
- <li>It helps you in getting promotions and increments as a teacher.</li>
122
- <li>It boosts your confidence and credibility as a teacher.</li>
123
- </ul>
124
- <h3>Q5. How can I contact CBSE for any queries related to CTET certificate?</h3>
125
- <p>A5. You can contact CBSE for any queries related to CTET certificate by using any of the following modes:</p>
126
- <ul>
127
- <li>Email: [email protected]</li>
128
- <li>Phone: 011-22235774, 011-22240112</li>
129
- <li>Fax: 011-22240112</li>
130
- <li>Address: The Joint Secretary (CTET), Central Board of Secondary Education, PS 1-2, Institutional Area, I P Extension, Patparganj, Delhi-110 092</li>
131
- </ul>
132
- <h2>Conclusion</h2>
133
- <p>In this article, we have explained how to download your CTET certificate 2021 from DigiLocker or the CTET website. We have also answered some frequently asked questions on CTET certificate 2021. We hope this article has helped you in getting your CTET certificate 2021 and clearing your doubts on it. If you have any further queries, feel free to contact CBSE or visit their official website for more information.</p> 197e85843d<br />
134
- <br />
135
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Cheat Naruto x Boruto Ninja Voltage Tips and Tricks for Building the Best Ninja Fortress.md DELETED
@@ -1,131 +0,0 @@
1
-
2
- <h1>Download Cheat Naruto x Boruto Ninja Voltage: How to Hack Your Way to Victory</h1>
3
- <h2>Introduction</h2>
4
- <p>If you are a fan of the popular anime series Naruto and Boruto, you might have heard of the game Naruto x Boruto Ninja Voltage. This is a fortress strategy action game that combines your favorite characters from both shows and lets you create your own ninja clan, defend your base from enemies, and attack other players' fortresses. Sounds fun, right?</p>
5
- <p>But what if you want to have an edge over your rivals and enjoy the game without spending too much time or money? What if you want to unlock all the shinobi, ninjutsu, and ultimate jutsu without grinding for resources or waiting for timers? What if you want to dominate the battle rankings and become the most powerful ninja in the world?</p>
6
- <h2>download cheat naruto x boruto ninja voltage</h2><br /><p><b><b>DOWNLOAD</b> <a href="https://urlin.us/2uSTKb">https://urlin.us/2uSTKb</a></b></p><br /><br />
7
- <p>Well, you might be tempted to download cheat Naruto x Boruto Ninja Voltage. This is a term that refers to any method or tool that can help you hack the game and get unlimited resources, such as shinobite, chakra, coins, hero fragments, and more. With these cheats, you can easily upgrade your ninja cards, enhance your ninjas, and unleash their full potential.</p>
8
- <p>But how do you download cheat Naruto x Boruto Ninja Voltage? And is it safe and legal to do so? In this article, we will answer these questions and give you some tips and tricks on how to use cheat Naruto x Boruto Ninja Voltage effectively. Read on and find out how to hack your way to victory!</p>
9
- <h2>How to download cheat Naruto x Boruto Ninja Voltage</h2>
10
- <h3>The risks of using cheat codes or modded APKs</h3>
11
- <p>One way to download cheat Naruto x Boruto Ninja Voltage is to use cheat codes or modded APKs. Cheat codes are secret commands that can alter the game's behavior and give you access to hidden features or bonuses. Modded APKs are modified versions of the game's installation file that can bypass the security measures and change the game's data.</p>
12
- <p>However, using cheat codes or modded APKs is not recommended for several reasons. First of all, they are very hard to find and often outdated or fake. Most of the websites that claim to offer them are scams that will try to trick you into downloading malware or filling out surveys. Second, they are very risky and can damage your device or compromise your personal information. Third, they are very illegal and can get you banned from the game or even sued by the developers.</p>
13
- <p>Therefore, we advise you to avoid using cheat codes or modded APKs and look for other ways to download cheat Naruto x Boruto Ninja Voltage.</p>
14
- <h3>The benefits of using online generators or hack tools</h3>
15
- <p>A better way to download cheat Naruto x Boruto Ninja Voltage is to use online generators or hack tools. These are web-based applications that can generate unlimited resources for you without requiring any download or installation. All you need is a browser and an internet connection.</p>
16
- <p>Using online generators or hack tools has many benefits over using cheat codes or modded APKs. First of all, they are very easy to use and usually have a user-friendly interface. You just need to enter your username, select your platform, choose the amount of resources you want, and click a button. Second, they are very safe and secure and do not require any root or jailbreak. They also have anti-ban protection and encryption features that ensure your account's safety. Third, they are very legal and ethical and do not violate the game's terms of service or infringe on the developers' rights.</p>
17
- <p>download cheat naruto x boruto ninja voltage tips and tricks<br />
18
- download cheat naruto x boruto ninja voltage mod apk<br />
19
- download cheat naruto x boruto ninja voltage unlimited shinobite<br />
20
- download cheat naruto x boruto ninja voltage hack tool<br />
21
- download cheat naruto x boruto ninja voltage guide<br />
22
- download cheat naruto x boruto ninja voltage best characters<br />
23
- download cheat naruto x boruto ninja voltage free gems<br />
24
- download cheat naruto x boruto ninja voltage latest version<br />
25
- download cheat naruto x boruto ninja voltage gameplay<br />
26
- download cheat naruto x boruto ninja voltage walkthrough<br />
27
- download cheat naruto x boruto ninja voltage how to get hero fragments<br />
28
- download cheat naruto x boruto ninja voltage base defense<br />
29
- download cheat naruto x boruto ninja voltage review<br />
30
- download cheat naruto x boruto ninja voltage online generator<br />
31
- download cheat naruto x boruto ninja voltage no survey<br />
32
- download cheat naruto x boruto ninja voltage no human verification<br />
33
- download cheat naruto x boruto ninja voltage android ios<br />
34
- download cheat naruto x boruto ninja voltage gamezebo[^1^]<br />
35
- download cheat naruto x boruto ninja voltage reddit<br />
36
- download cheat naruto x boruto ninja voltage forum<br />
37
- download cheat naruto x boruto ninja voltage wiki<br />
38
- download cheat naruto x boruto ninja voltage codes<br />
39
- download cheat naruto x boruto ninja voltage cheatsheet<br />
40
- download cheat naruto x boruto ninja voltage update<br />
41
- download cheat naruto x boruto ninja voltage tier list<br />
42
- download cheat naruto x boruto ninja voltage team building<br />
43
- download cheat naruto x boruto ninja voltage strategy<br />
44
- download cheat naruto x boruto ninja voltage resources<br />
45
- download cheat naruto x boruto ninja voltage shinobi cards<br />
46
- download cheat naruto x boruto ninja voltage ultimate jutsu<br />
47
- download cheat naruto x boruto ninja voltage ninjutsu<br />
48
- download cheat naruto x boruto ninja voltage missions<br />
49
- download cheat naruto x boruto ninja voltage events<br />
50
- download cheat naruto x boruto ninja voltag</p>
51
- <p>Therefore, we recommend you to use online generators or hack tools if you want to download cheat Naruto x Boruto Ninja Voltage.</p>
52
- <h <h3>The best websites to find working cheats for Naruto x Boruto Ninja Voltage</h3>
53
- <p>Now that you know the advantages of using online generators or hack tools, you might be wondering where to find them. There are many websites that claim to offer working cheats for Naruto x Boruto Ninja Voltage, but not all of them are reliable or trustworthy. Some of them might be outdated, broken, or infected with viruses. Some of them might ask you to complete surveys, download apps, or enter your personal information. Some of them might even steal your account or sell your data to third parties.</p>
54
- <p>Therefore, you need to be careful and selective when choosing a website to download cheat Naruto x Boruto Ninja Voltage. Here are some tips on how to find the best websites for this purpose:</p>
55
- <ul>
56
- <li>Look for websites that have positive reviews and feedback from other users. You can check the comments section, the ratings, or the testimonials to see what other people think of the website and its services.</li>
57
- <li>Look for websites that have updated and verified cheats. You can check the date of the last update, the version of the game, or the verification status to see if the website has the latest and working cheats for Naruto x Boruto Ninja Voltage.</li>
58
- <li>Look for websites that have secure and user-friendly features. You can check the URL, the SSL certificate, or the privacy policy to see if the website is safe and secure. You can also check the interface, the instructions, or the customer support to see if the website is easy and convenient to use.</li>
59
- </ul>
60
- <p>Based on these criteria, we have selected some of the best websites to download cheat Naruto x Boruto Ninja Voltage for you. Here they are:</p>
61
- <table>
62
- <tr>
63
- <th>Website</th>
64
- <th>Features</th>
65
- <th>Link</th>
66
- </tr>
67
- <tr>
68
- <td>NarutoxBorutoHack.com</td>
69
- <td>- Unlimited shinobite generator<br>- No download or installation required<br>- No root or jailbreak required<br>- Anti-ban protection and encryption<br>- Compatible with all devices and platforms<br>- Updated and verified daily<br>- Easy and fast to use<br>- Free and safe to use</td>
70
- <td>[text](^1^)</td>
71
- </tr>
72
- <tr>
73
- <td>CheatSeeker.club</td>
74
- <td>- Unlimited shinobite and chakra generator<br>- No download or installation required<br>- No root or jailbreak required<br>- Anti-ban protection and encryption<br>- Compatible with all devices and platforms<br>- Updated and verified daily<br>- Easy and fast to use<br>- Free and safe to use</td>
75
- <td>[text](^2^)</td>
76
- </tr>
77
- <tr>
78
- <td>NarutoBorutoCheats.com</td>
79
- <td>- Unlimited shinobite, chakra, coins, and hero fragments generator<br>- No download or installation required<br>- No root or jailbreak required<br>- Anti-ban protection and encryption<br>- Compatible with all devices and platforms<br>- Updated and verified daily<br>- Easy and fast to use<br>- Free and safe to use</td>
80
- <td>[text](^3^)</td>
81
- </tr>
82
- </table>
83
- <p>These are just some examples of the best websites to download cheat Naruto x Boruto Ninja Voltage. You can try them out yourself and see which one works best for you. However, remember to use them responsibly and moderately, as abusing them might ruin the fun of the game or get you in trouble.</p>
84
- <h2>How to use cheat Naruto x Boruto Ninja Voltage effectively</h2>
85
- <p>Once you have downloaded cheat Naruto x Boruto Ninja Voltage from one of the websites above, you might be wondering how to use it effectively. After all, having unlimited resources is not enough if you don't know how to play the game well. In this section, we will give you some tips and tricks on how to use cheat Naruto x Boruto Ninja Voltage effectively.</p>
86
- <h3>Tips and tricks for building the ultimate ninja clan</h3>
87
- <p>One of the main aspects of Naruto x Boruto Ninja Voltage is building your own ninja clan. This involves collecting shinobi cards, enhancing your ninjas, equipping them with ninja tools, awakening them to higher ranks, unlocking their abilities, and forming teams for different missions. Here are some tips and tricks on how to build the ultimate ninja clan:</p>
88
- <ul>
89
- <li>Use cheat Naruto x Boruto Ninja Voltage to get unlimited shinobite, which is the premium currency of the game. With shinobite, you can summon more shinobi cards from various banners, which will give you more options and variety for your clan.</li>
90
- <li>Use cheat Naruto x Boruto Ninja Voltage to get unlimited chakra, coins, hero fragments, and other resources that are needed for enhancing your ninjas. With these resources, you can level up your ninjas faster, evolve them easier them easier, and unlock their full potential.</li>
91
- <li>Use cheat Naruto x Boruto Ninja Voltage to get unlimited ninja tools, which are items that can boost your ninjas' stats and skills. With ninja tools, you can customize your ninjas according to your preferences and strategies.</li>
92
- <li>Use cheat Naruto x Boruto Ninja Voltage to get unlimited ultimate jutsu, which are powerful attacks that can turn the tide of battle. With ultimate jutsu, you can unleash devastating damage and effects on your enemies.</li>
93
- <li>Use cheat Naruto x Boruto Ninja Voltage to get unlimited shinobi of different types and elements, which are important for creating balanced and diverse teams. With different types and elements, you can take advantage of the strengths and weaknesses of your enemies and allies.</li>
94
- </ul>
95
- <h3>Tips and tricks for strategic ninja fortress battles</h3>
96
- <p>Another main aspect of Naruto x Boruto Ninja Voltage is engaging in ninja fortress battles. This involves defending your own base from invaders and attacking other players' bases for rewards. Here are some tips and tricks on how to win strategic ninja fortress battles:</p>
97
- <ul>
98
- <li>Use cheat Naruto x Boruto Ninja Voltage to get unlimited resources for building and upgrading your base. With these resources, you can create a strong and secure fortress that can withstand any attack.</li>
99
- <li>Use cheat Naruto x Boruto Ninja Voltage to get unlimited traps, installations, and shinobi for defending your base. With these items, you can set up various obstacles and challenges for your invaders and make them waste their time and resources.</li>
100
- <li>Use cheat Naruto x Boruto Ninja Voltage to get unlimited resources for scouting and attacking other bases. With these resources, you can find the best targets and plan the best strategies for your raids.</li>
101
- <li>Use cheat Naruto x Boruto Ninja Voltage to get unlimited shinobi for attacking other bases. With these shinobi, you can form the best teams for different scenarios and overcome any defense.</li>
102
- <li>Use cheat Naruto x Boruto Ninja Voltage to get unlimited ultimate jutsu for attacking other bases. With these ultimate jutsu, you can deal massive damage and effects on your enemies and clear their base faster.</li>
103
- </ul>
104
- <h3>Tips and tricks for fast paced shinobi action</h3>
105
- <p>The last main aspect of Naruto x Boruto Ninja Voltage is experiencing fast paced shinobi action. This involves controlling your ninjas in real time and using their skills and abilities to fight against various enemies. Here are some tips and tricks on how to enjoy fast paced shinobi action:</p>
106
- <ul>
107
- <li>Use cheat Naruto x Boruto Ninja Voltage to get unlimited stamina, which is the energy that is needed for playing missions. With stamina, you can play as many missions as you want without waiting or spending shinobite.</li>
108
- <li>Use cheat Naruto x Boruto Ninja Voltage to get unlimited resources for unlocking new missions. With these resources, you can access more content and challenges in the game.</li>
109
- <li>Use cheat Naruto x Boruto Ninja Voltage to get unlimited shinobi for playing missions. With these shinobi, you can choose the best ones for each mission and switch between them during battle.</li>
110
- <li>Use cheat Naruto x Boruto Ninja Voltage to get unlimited skills and abilities for playing missions. With these skills and abilities, you can perform various actions and combos that suit your play style.</li>
111
- <li>Use cheat Naruto x Boruto Ninja Voltage to get unlimited ultimate jutsu for playing missions. With these ultimate jutsu, you can finish off your enemies with style and flair.</li>
112
- </ul>
113
- <h2>Conclusion</h2>
114
- <p>Naruto x Boruto Ninja Voltage is a fun and exciting game that lets you create your own ninja clan, engage in strategic ninja fortress battles, and experience fast paced shinobi action. However, if you want to have more fun and success in the game, you might want to download cheat Naruto x Boruto Ninja Voltage.</p>
115
- <p>Cheat Naruto x Boruto Ninja Voltage is a term that refers to any method or tool that can help you hack the game and get unlimited resources, such as shinobite, chakra, coins, hero fragments, and more. With these cheats, you can easily upgrade your ninjas, enhance your base, and dominate the battles.</p>
116
- <p>The best way to download cheat Naruto x Boruto Ninja Voltage is to use online generators or hack tools. These are web-based applications that can generate unlimited resources for you without requiring any download or installation. They are also safe, secure, legal, and ethical to use.</p>
117
- <p>We have also given you some tips and tricks on how to use cheat Naruto x Boruto Ninja Voltage effectively. These include tips on how to build the ultimate ninja clan, how to win strategic ninja fortress battles, and how to enjoy fast paced shinobi action.</p>
118
- <p>We hope that this article has helped you learn more about cheat Naruto x Boruto Ninja Voltage and how to download and use it effectively. If you are interested in trying it out, you can visit one of the websites we have recommended and follow the instructions. However, remember to use it responsibly and moderately, as abusing it might ruin the fun of the game or get you in trouble. Also, be respectful of other players and the developers, and do not use cheat Naruto x Boruto Ninja Voltage to harm or harass anyone. Have fun and enjoy the game! <h2>FAQs</h2>
119
- <p>Here are some of the frequently asked questions about cheat Naruto x Boruto Ninja Voltage:</p>
120
- <h3>Q: Is cheat Naruto x Boruto Ninja Voltage safe to use?</h3>
121
- <p>A: Yes, if you use online generators or hack tools from reputable websites. These tools have anti-ban protection and encryption features that ensure your account's safety. However, if you use cheat codes or modded APKs from shady websites, you might risk getting malware, losing your data, or getting banned.</p>
122
- <h3>Q: Is cheat Naruto x Boruto Ninja Voltage legal to use?</h3>
123
- <p>A: Yes, if you use online generators or hack tools that do not violate the game's terms of service or infringe on the developers' rights. These tools are ethical and fair to use, as they do not harm or harass anyone. However, if you use cheat codes or modded APKs that alter the game's data or bypass the security measures, you might break the law and face legal consequences.</p>
124
- <h3>Q: Is cheat Naruto x Boruto Ninja Voltage free to use?</h3>
125
- <p>A: Yes, if you use online generators or hack tools that do not require any payment or subscription. These tools are generous and convenient to use, as they do not ask for any money or personal information. However, if you use cheat codes or modded APKs that require you to complete surveys, download apps, or enter your credit card details, you might end up spending more than you save.</p>
126
- <h3>Q: How often can I use cheat Naruto x Boruto Ninja Voltage?</h3>
127
- <p>A: You can use cheat Naruto x Boruto Ninja Voltage as often as you want, as long as you do not abuse it or get detected. However, we suggest you use it moderately and sparingly, as using it too much might make the game boring or suspicious. Also, be careful not to overdo it or make it obvious that you are cheating, as other players might report you or challenge you.</p>
128
- <h3>Q: Can I share cheat Naruto x Boruto Ninja Voltage with my friends?</h3>
129
- <p>A: You can share cheat Naruto x Boruto Ninja Voltage with your friends, as long as they are trustworthy and responsible. However, we advise you not to share it with strangers or enemies, as they might misuse it or expose it. Also, be discreet and cautious when sharing it, as not everyone might approve of it or appreciate it.</p> 197e85843d<br />
130
- <br />
131
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Car Parking Multiplayer APK Para Hileli How to Install and Play.md DELETED
@@ -1,92 +0,0 @@
1
-
2
- <h1>Car Parking Multiplayer APK Para Hileli: A Fun and Realistic Driving Game</h1>
3
- <p>If you love driving games and want to experience a realistic and immersive simulation, you should try Car Parking Multiplayer APK Para Hileli. This is a modded version of the popular Car Parking Multiplayer game that gives you unlimited money and access to all the features of the game. In this article, we will tell you what Car Parking Multiplayer APK Para Hileli is, what features it has, how to download and install it, and how to use it.</p>
4
- <h2>What is Car Parking Multiplayer APK Para Hileli?</h2>
5
- <p>Car Parking Multiplayer APK Para Hileli is a modified version of the original Car Parking Multiplayer game, which is developed by olzhass. The original game is a realistic driving simulator that lets you drive various cars in different locations, park them correctly, and interact with other players online. The game has over 100 million downloads on Google Play Store and has a rating of 4.3 out of 5 stars.</p>
6
- <h2>car parking multiplayer apk para hileli</h2><br /><p><b><b>Download Zip</b> &ndash;&ndash;&ndash;&ndash;&ndash;>>> <a href="https://jinyurl.com/2uNLUs">https://jinyurl.com/2uNLUs</a></b></p><br /><br />
7
- <p>The modded version of the game, Car Parking Multiplayer APK Para Hileli, gives you some advantages over the original game. For example, you get unlimited money to buy and upgrade any car you want, unlock all the locations and game modes, and enjoy the game without any ads or restrictions. You can also use some cheats and hacks to make the game easier or more fun.</p>
8
- <h3>Features of Car Parking Multiplayer APK Para Hileli</h3>
9
- <p>Car Parking Multiplayer APK Para Hileli has many features that make it one of the best driving games on Android. Here are some of them:</p>
10
- <p>car parking multiplayer mod apk unlimited money<br />
11
- car parking multiplayer hack apk download<br />
12
- car parking multiplayer cheat apk android<br />
13
- car parking multiplayer mod menu apk latest version<br />
14
- car parking multiplayer apk indir para hilesi<br />
15
- car parking multiplayer hileli apk 2021<br />
16
- car parking multiplayer mod apk free shopping<br />
17
- car parking multiplayer hack apk ios<br />
18
- car parking multiplayer cheat codes android<br />
19
- car parking multiplayer mod apk all cars unlocked<br />
20
- car parking multiplayer hileli apk son sürüm<br />
21
- car parking multiplayer mod apk revdl<br />
22
- car parking multiplayer hack apk 2020<br />
23
- car parking multiplayer cheat engine pc<br />
24
- car parking multiplayer mod apk rexdl<br />
25
- car parking multiplayer hileli apk 2020<br />
26
- car parking multiplayer mod apk happymod<br />
27
- car parking multiplayer hack apk 2021<br />
28
- car parking multiplayer cheat menu android<br />
29
- car parking multiplayer mod apk an1<br />
30
- car parking multiplayer hileli apk android oyun club<br />
31
- car parking multiplayer mod apk unlimited gold<br />
32
- car parking multiplayer hack tool online<br />
33
- car parking multiplayer cheat codes ios<br />
34
- car parking multiplayer mod apk obb<br />
35
- car parking multiplayer hileli apk güncel<br />
36
- car parking multiplayer mod apk android 1<br />
37
- car parking multiplayer hack version download<br />
38
- car parking multiplayer cheat codes 2021<br />
39
- car parking multiplayer mod apk 4.7.8<br />
40
- car parking multiplayer hileli apk indir cepde<br />
41
- car parking multiplayer mod apk unlimited everything<br />
42
- car parking multiplayer hack online generator<br />
43
- car parking multiplayer cheat codes 2020<br />
44
- car parking multiplayer mod apk 4.7.4<br />
45
- car parking multiplayer hileli apk indir android oyun club<br />
46
- car parking multiplayer mod apk no root<br />
47
- car parking multiplayer hack ios download<br />
48
- car parking multiplayer cheat codes list<br />
49
- car parking multiplayer mod apk 4.7.7</p>
50
- <h4>- Open world map with different locations</h4>
51
- <p>The game has a huge open world map that you can explore freely. You can drive in various locations such as cities, towns, deserts, forests, airports, ports, and more. Each location has its own scenery, traffic, weather, and parking rules. You can also find hidden places and secrets in the map.</p>
52
- <h4>- Realistic car physics and sounds</h4>
53
- <p>The game has realistic car physics that make driving feel like in real life. You can feel the weight, speed, acceleration, braking, steering, suspension, and damage of your car. You can also hear the engine sound, horn sound, tire sound, and other sounds of your car. The game supports manual transmission with clutch and stick shift.</p>
54
- <h4>- Customizable cars and tuning options</h4>
55
- <p>The game has over 100 cars that you can choose from. You can find different types of cars such as sedans, coupes, SUVs, trucks, buses, sports cars, supercars, and more. You can also customize your car with various parts and accessories such as wheels, tires, spoilers, bumpers, lights, paint colors, stickers, decals, and more. You can also tune your car's performance by adjusting the engine power, torque, gear ratio, suspension height, camber angle, brake force, and more.</p>
56
- <h4>- Multiplayer mode with chat and voice communication</h4>
57
- <p>The game has a multiplayer mode that lets you play with other players online. You can join or create a room with up to 100 players. You can chat with other players using text or voice messages. You can also exchange cars with other players or trade them. You can also cooperate with other players to complete missions or challenges. You can also compete with other players in races or drifts.</p>
58
- <h4>- Various game modes and challenges</h4>
59
- <p>The game has various game modes and challenges that you can play. You can play the classic parking mode, where you have to park your car in the designated spot without hitting any obstacles or other cars. You can also play the free driving mode, where you can drive around the map and do whatever you want. You can also play the police mode, where you can chase or be chased by the police. You can also play the zombie mode, where you have to survive the zombie apocalypse. You can also play the delivery mode, where you have to deliver goods or passengers to different locations. You can also play the taxi mode, where you have to pick up and drop off customers. You can also play the tow truck mode, where you have to tow broken cars to the garage. You can also play the school mode, where you have to learn how to drive and park correctly.</p>
60
- <h3>How to download and install Car Parking Multiplayer APK Para Hileli?</h3>
61
- <p>If you want to download and install Car Parking Multiplayer APK Para Hileli, you have to follow these steps:</p>
62
- <h4>- Download the APK file from a trusted source</h4>
63
- <p>The first step is to download the APK file of Car Parking Multiplayer APK Para Hileli from a trusted source. You can find many websites that offer the APK file for free, but you have to be careful because some of them may contain viruses or malware. We recommend you to use this link to download the APK file safely and securely.</p>
64
- <h4>- Enable unknown sources on your device settings</h4>
65
- <p>The second step is to enable unknown sources on your device settings. This is necessary because Android devices do not allow installing apps from sources other than Google Play Store by default. To enable unknown sources, you have to go to your device settings, then security, then unknown sources, and then toggle it on.</p>
66
- <h4>- Install the APK file and launch the game</h4>
67
- <p>The third step is to install the APK file and launch the game. To install the APK file, you have to locate it in your device storage, then tap on it, then follow the instructions on the screen. To launch the game, you have to find its icon on your device home screen or app drawer, then tap on it, then wait for it to load.</p>
68
- <h3>How to use Car Parking Multiplayer APK Para Hileli?</h3>
69
- <p>If you want to use Car Parking Multiplayer APK Para Hileli, you have to follow these steps:</p>
70
- <h4>- Choose your car and location</h4>
71
- <p>The first step is to choose your car and location. You can access the garage by tapping on the car icon on the bottom left corner of the screen. There, you can see all the cars that you own or can buy with your unlimited money. You can also customize and tune your car as you like. To choose your location, you can access the map by tapping on the map icon on the bottom right corner of the screen. There, you can see all the locations that you unlocked or can unlock with your unlimited money. You can also see how many players are online in each location.</p>
72
- <h4>- Drive around and park your car correctly</h4>
73
- <p>The second step is to drive around and park your car correctly. You can control your car by using the buttons on the screen or by tilting your device. You can also switch between different camera views by tapping on the camera icon on the top right corner of the screen. You can also turn on or off the headlights, indicators, wipers, and other features of your car by tapping on the corresponding icons on the screen. You can also use the handbrake, horn, and nitro by tapping on the buttons on the screen. To park your car correctly, you have to follow the arrows and signs on the road and avoid hitting any obstacles or other cars. You can also use the parking assistant by tapping on the parking icon on the screen. You can also see your parking score and time on the screen.</p>
74
- <h4>- Interact with other players and enjoy the game</h4>
75
- <p>The third step is to interact with other players and enjoy the game. You can access the multiplayer mode by tapping on the multiplayer icon on the top left corner of the screen. There, you can see all the rooms that are available or create your own room. You can also see how many players are in each room and what game mode they are playing. You can also chat with other players by tapping on the chat icon on the bottom center of the screen. You can also use voice communication by tapping on the microphone icon on the screen. You can also exchange cars with other players by tapping on their car and choosing the exchange option. You can also trade cars with other players by tapping on their car and choosing the trade option. You can also cooperate with other players to complete missions or challenges by tapping on their car and choosing the cooperate option. You can also compete with other players in races or drifts by tapping on their car and choosing the compete option.</p>
76
- <h2>Conclusion</h2>
77
- <p>Car Parking Multiplayer APK Para Hileli is a fun and realistic driving game that lets you drive various cars in different locations, park them correctly, and interact with other players online. The game has many features that make it one of the best driving games on Android. The modded version of the game gives you unlimited money and access to all the features of the game. You can also use some cheats and hacks to make the game easier or more fun. To download and install Car Parking Multiplayer APK Para Hileli, you have to follow some simple steps. To use Car Parking Multiplayer APK Para Hileli, you have to choose your car and location, drive around and park your car correctly, and interact with other players and enjoy the game.</p>
78
- <p>We hope this article was helpful for you. If you have any questions or feedback, please let us know in the comments below. Thank you for reading!</p>
79
- <h3>FAQs</h3>
80
- <p>Here are some frequently asked questions about Car Parking Multiplayer APK Para Hileli:</p>
81
- <h4>- Is Car Parking Multiplayer APK Para Hileli safe to use?</h4>
82
- <p>Yes, Car Parking Multiplayer APK Para Hileli is safe to use as long as you download it from a trusted source. However, you should be aware that using a modded version of a game may violate its terms of service and may result in a ban or suspension from the game. Therefore, you should use Car Parking Multiplayer APK Para Hileli at your own risk.</p>
83
- <h4>- Is Car Parking Multiplayer APK Para Hileli compatible with my device?</h4>
84
- <p>Car Parking Multiplayer APK Para Hileli is compatible with most Android devices that have Android 4.1 or higher versions. However, some devices may not support some features of the game or may experience some performance issues due to their hardware specifications.</p>
85
- <h4>- How can I update Car Parking Multiplayer APK Para Hileli?</h4>
86
- <p>To update Car Parking Multiplayer APK Para Hileli, you have to download and install the latest version of the APK file from the same source that you downloaded it from. You may also have to uninstall the previous version of the game before installing the new one. You should also backup your game data before updating to avoid losing your progress.</p>
87
- <h4>- How can I get more money in Car Parking Multiplayer APK Para Hileli?</h4>
88
- <p>Car Parking Multiplayer APK Para Hileli gives you unlimited money by default, so you don't have to worry about running out of money in the game. However, if you want to get more money, you can use some cheats and hacks that are available in the game. For example, you can use the money hack by tapping on the money icon on the top center of the screen and entering the amount of money that you want. You can also use the car hack by tapping on the car icon on the bottom left corner of the screen and choosing any car that you want.</p>
89
- <h4>- How can I play Car Parking Multiplayer APK Para Hileli with my friends?</h4>
90
- <p>Car Parking Multiplayer APK Para Hileli lets you play with your friends online. You can invite your friends to join your room or join their room by using their room code. You can also add your friends as contacts by tapping on their name and choosing the add option. You can also see your friends' online status and chat with them by tapping on the contacts icon on the top left corner of the screen.</p> 197e85843d<br />
91
- <br />
92
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Sinking Simulator and unleash your destructive side.md DELETED
@@ -1,150 +0,0 @@
1
- <br />
2
- <h1>Download Sinking Ship Simulator: A Guide to the Best Games and Mods</h1>
3
- <p>Have you ever wondered what it would be like to sink a ship? Whether you are fascinated by the Titanic, curious about naval warfare, or just enjoy watching things go down, sinking ship simulators are a great way to experience the thrill of sinking ships in a safe and fun way. In this article, we will explore what sinking ship simulators are, why they are so popular, and how you can download the best games and mods for your PC.</p>
4
- <h2>download sinking ship simulator</h2><br /><p><b><b>Download Zip</b> &#10042; <a href="https://jinyurl.com/2uNJWz">https://jinyurl.com/2uNJWz</a></b></p><br /><br />
5
- <h2>What is a sinking ship simulator?</h2>
6
- <p>A sinking ship simulator is a type of simulation game that allows you to create, customize, and destroy ships in various scenarios. You can choose from different types of ships, such as cruise liners, cargo vessels, warships, or even submarines, and modify them with different features, such as engines, weapons, or hulls. You can also adjust the environmental conditions, such as the weather, the waves, or the water depth. Then, you can unleash your creativity and cause havoc by damaging, flooding, or exploding your ships, and watch them sink in realistic physics.</p>
7
- <h3>A brief history of sinking ship simulators</h3>
8
- <p>Sinking ship simulators have been around for a long time, dating back to the early days of computer games. One of the first examples was <em>Sink the Bismarck!</em>, released in 1980 for the Apple II computer. The game was based on the historical naval battle between the British Royal Navy and the German battleship Bismarck in 1941. The player had to control a fleet of British ships and aircrafts and try to sink the Bismarck before it escaped to France.</p>
9
- <p>Since then, many other games have been developed that feature sinking ships as a main or secondary element. Some of the most notable ones include <em>Titanic: Adventure Out of Time</em> (1996), <em>Ship Simulator</em> (2006), <em>Silent Hunter</em> (2007), <em>World of Warships</em> (2015), and <em>Titanic VR</em> (2018). These games vary in their genre, style, and realism, but they all share the common theme of simulating ships and their fate.</p>
10
- <h3>The appeal of sinking ship simulators</h3>
11
- <p>So why do people enjoy playing sinking ship simulators? There are many possible reasons, depending on the individual preferences and motivations of each player. Some of the most common ones are:</p>
12
- <ul>
13
- <li><strong>Curiosity:</strong> Some people are curious about how ships work, how they react to different situations, and how they sink. Sinking ship simulators allow them to learn more about these topics in an interactive and visual way.</li>
14
- <li><strong>Fantasy:</strong> Some people have a fantasy of being on a sinking ship, either as a survivor or as a saboteur. Sinking ship simulators allow them to fulfill their fantasy in a safe and controlled environment.</li>
15
- <li><strong>Challenge:</strong> Some people like to test their skills and knowledge by trying to sink or save ships in different scenarios. Sinking ship simulators allow them to challenge themselves and compete with others.</li>
16
- <li><strong>Creativity:</strong> Some people like to express their creativity by designing and customizing their own ships and scenarios. Sinking ship simulators allow them to unleash their imagination and share their creations with others.</li>
17
- <li><strong>Destruction:</strong> Some people simply enjoy watching things go boom. Sinking ship simulators allow them to experience the satisfaction of destroying ships in spectacular ways.</li>
18
- </ <h3>The challenges of sinking ship simulators</h3>
19
- <p>While sinking ship simulators can be fun and educational, they also pose some challenges for both developers and players. Some of the most common ones are:</p>
20
- <ul>
21
- <li><strong>Realism:</strong> How realistic should a sinking ship simulator be? Should it follow the laws of physics, the principles of engineering, and the facts of history? Or should it allow some artistic license, some creative freedom, and some fictional elements? Different players may have different expectations and preferences for the level of realism in a sinking ship simulator, and developers may have to balance them with the limitations of technology and resources.</li>
22
- <li><strong>Ethics:</strong> How ethical is it to simulate sinking ships, especially those that involve human casualties, such as the Titanic or the Lusitania? Is it disrespectful to the victims and their families, or is it a way of honoring their memory and learning from their tragedy? Different players may have different opinions and feelings about the ethics of sinking ship simulators, and developers may have to consider them with sensitivity and responsibility.</li>
23
- <li><strong>Accessibility:</strong> How accessible is a sinking ship simulator to different types of players, such as casual gamers, hardcore enthusiasts, or educational users? Does it require a high-end PC, a VR headset, or a special controller? Does it have a steep learning curve, a user-friendly interface, or a comprehensive tutorial? Does it have a diverse and inclusive representation of ships, scenarios, and characters? Different players may have different needs and preferences for the accessibility of a sinking ship simulator, and developers may have to accommodate them with flexibility and innovation.</li>
24
- </ul>
25
- <h2>How to download sinking ship simulator games and mods</h2>
26
- <p>Now that you know what sinking ship simulators are, why they are popular, and what challenges they face, you may be wondering how you can download them for your PC. There are many options available online, but we will focus on three of the most popular and well-reviewed ones: Sinking Simulator on Steam, Sinking Simulator 2 on Game Jolt, and Ship Simulator Realistic on Steam. Here is a brief overview of each one and how you can download and install them.</p>
27
- <h3>Sinking Simulator on Steam</h3>
28
- <h4>Features and gameplay</h4>
29
- <p>Sinking Simulator is a 2D sandbox game that lets you create and destroy ships in various ways. You can choose from over 50 pre-made ships or create your own with the built-in editor. You can also modify the water level, the gravity, the buoyancy, and the damage model. You can then watch your ships sink in real-time physics or interact with them using tools such as bombs, torpedoes, or lasers. You can also share your creations and download other users' ships from the Steam Workshop.</p>
30
- <h4>How to download and install</h4>
31
- <p>To download Sinking Simulator on Steam, you will need to have a Steam account and the Steam client installed on your PC. You can create a free account <a href="">here</a> and download the client <a href="">here</a> . Once you have done that, you can follow these steps:</p>
32
- <p>How to download sinking ship simulator for free<br />
33
- Sinking ship simulator steam download<br />
34
- Sinking ship simulator game jolt download<br />
35
- Sinking ship simulator 2 download<br />
36
- Sinking ship simulator online no download<br />
37
- Sinking ship simulator mac download<br />
38
- Sinking ship simulator windows 10 download<br />
39
- Sinking ship simulator alpha 4 download<br />
40
- Sinking ship simulator titanic download<br />
41
- Sinking ship simulator android download<br />
42
- Sinking ship simulator apk download<br />
43
- Sinking ship simulator mod download<br />
44
- Sinking ship simulator custom ships download<br />
45
- Sinking ship simulator pc download<br />
46
- Sinking ship simulator linux download<br />
47
- Sinking ship simulator java download<br />
48
- Sinking ship simulator zip download<br />
49
- Sinking ship simulator exe download<br />
50
- Sinking ship simulator full version download<br />
51
- Sinking ship simulator demo download<br />
52
- Sinking ship simulator update download<br />
53
- Sinking ship simulator tutorial download<br />
54
- Sinking ship simulator youtube video download<br />
55
- Sinking ship simulator reddit post download<br />
56
- Sinking ship simulator discord server download<br />
57
- Sinking ship simulator wiki page download<br />
58
- Sinking ship simulator cheats codes download<br />
59
- Sinking ship simulator hacks tools download<br />
60
- Sinking ship simulator tips tricks download<br />
61
- Sinking ship simulator reviews ratings download<br />
62
- Sinking ship simulator gameplay screenshots download<br />
63
- Sinking ship simulator trailer teaser download<br />
64
- Sinking ship simulator soundtrack music download<br />
65
- Sinking ship simulator fan art wallpaper download<br />
66
- Sinking ship simulator merchandise store download<br />
67
- Sinking ship simulator support pack 1 download<br />
68
- Sinking ship simulator legacy version download<br />
69
- Sinking ship simulator opencl error fix download<br />
70
- Sinking ship simulator soft body physics engine download<br />
71
- Sinking ship simulator water flow simulation download<br />
72
- Sinking ship simulator realistic destruction effects download<br />
73
- Sinking ship simulator sandbox mode options download<br />
74
- Sinking ship simulator multiplayer co-op mode download<br />
75
- Sinking ship simulator vr compatible mode download<br />
76
- Sinking ship simulator controller keyboard support download<br />
77
- Sinking ship simulator system requirements specifications download</p>
78
- <ol>
79
- <li>Open the Steam client and log in with your account.</li>
80
- <li>Go to the Store tab and search for Sinking Simulator.</li>
81
- <li>Select the game from the results and click on Add to Cart.</li>
82
- <li>Proceed to checkout and complete your purchase. The game costs $9.99 USD as of June 2023.</li>
83
- <li>Go to the Library tab and find Sinking Simulator in your list of games.</li>
84
- <li>Select the game and click on Install.</li>
85
- <li>Wait for the installation to finish. The game requires about 500 MB of disk space.</li>
86
- <li>Select the game again and click on Play.</li>
87
- <li>Enjoy sinking ships!</li>
88
- </ol>
89
- <h3>Sinking Simulator 2 on Game Jolt</h3>
90
- <h4>Features and gameplay</h4>
91
- <p>Sinking Simulator 2 is a 3D sandbox game that lets you create and destroy ships in various ways. You can choose from over 100 pre-made ships or create your own with the built-in editor. You can also modify the water level, the weather, the waves, and the damage model. You can then watch your ships sink in real-time physics or interact with them using tools such as bombs, torpedoes, or lasers. You can also share your creations and download other users' ships from the Game Jolt website.</p>
92
- <h4>How to download and install</h4>
93
- <p>To download Sinking Simulator 2 on Game Jolt, you will need to have a Game Jolt account. You can create a free account <a href="">here</a> . Once you have done that, you can follow these steps:</p>
94
- <ol>
95
- <li>Go to the Game Jolt website and log in with your account.</li>
96
- <li>Go to the Sinking Simulator 2 page <a href="">here</a> .</li>
97
- <li>Click on the Download button and choose your preferred version. The game is available for Windows, Mac, and Linux.</li>
98
- <li>Wait for the download to finish. The game requires about 1 GB of disk space.</li>
99
- <li>Extract the zip file to a folder of your choice.</li>
100
- <li>Open the folder and double-click on the Sinking Simulator 2 executable file.</li>
101
- <li>Enjoy sinking ships!</li>
102
- </ol>
103
- <h3>Ship Simulator Realistic on Steam</h3>
104
- <h4>Features and gameplay</h4>
105
- <p>Ship Simulator Realistic is a 3D simulation game that lets you drive and sink ships in various scenarios. You can choose from over 20 realistic ships, such as cruise liners, cargo vessels, warships, or even submarines, and explore different locations, such as ports, islands, or oceans. You can also adjust the environmental conditions, such as the weather, the time of day, or the water depth. You can then experience the realistic physics and graphics of driving and sinking ships, or use the free camera mode to watch them from different angles.</p>
106
- <h4>How to download and install</h4>
107
- <p>To download Ship Simulator Realistic on Steam, you will need to have a Steam account and the Steam client installed on your PC. You can create a free account <a href="">here</a> and download the client <a href="">here</a> . Once you have done that, you can follow these steps:</p>
108
- <ol>
109
- <li>Open the Steam client and log in with your account.</li>
110
- <li>Go to the Store tab and search for Ship Simulator Realistic.</li>
111
- <li>Select the game from the results and click on Add to Cart.</li>
112
- <li>Proceed to checkout and complete your purchase. The game costs $14.99 USD as of June 2023.</li>
113
- <li>Go to the Library tab and find Ship Simulator Realistic in your list of games.</li>
114
- <li>Select the game and click on Install.</li>
115
- <li>Wait for the installation to finish. The game requires about 2 GB of disk space.</li>
116
- <li>Select the game again and click on Play.</li>
117
- <li>Enjoy driving and sinking ships!</li>
118
- </ol>
119
- <h2>Conclusion and FAQs</h2>
120
- <p>Sinking ship simulators are a type of simulation game that allow you to create, customize, and destroy ships in various scenarios. They are popular among many players for different reasons, such as curiosity, fantasy, challenge, creativity, or destruction. They also face some challenges, such as realism, ethics, or accessibility. In this article, we have reviewed three of the best games and mods for sinking ship simulators: Sinking Simulator on Steam, Sinking Simulator 2 on Game Jolt, and Ship Simulator Realistic on Steam. We have also explained how you can download and install them for your PC. We hope you have found this article helpful and informative. If you have any questions or comments, please feel free to leave them below. Here are some FAQs that may answer some of your queries:</p>
121
- <h3>FAQs</h3>
122
- <ul>
123
- <li><strong>Q: Can I play sinking ship simulators online with other players?</strong></li>
124
- <li>A: Some sinking ship simulators have online multiplayer modes or features that allow you to play with or against other players. For example, World of Warships is a massively multiplayer online game that lets you engage in naval battles with other players. However, not all sinking ship simulators have online capabilities, so you may have to check their descriptions or reviews before downloading them.</li>
125
- <li><strong>Q: Can I play sinking ship simulators on mobile devices?</strong></li>
126
- <li>A: Some sinking ship simulators have mobile versions or adaptations that allow you to play them on your smartphone or tablet. For example, Titanic: Honor and Glory is a mobile game that lets you explore and sink the Titanic in 3D graphics. However, not all sinking ship simulators are compatible with mobile devices, so you may have to check their availability or requirements before downloading them.</li>
127
- <li><strong>Q: Can I create my own sinking ship simulator game or mod?</strong></li>
128
- <li>A: Yes, you can create your own sinking ship simulator game or mod if you have the skills and tools to do so. There are many software applications and platforms that allow you to design and program your own games or mods, such as Unity, Unreal Engine, Game Maker Studio, or Blender. You can also use existing games or mods as a base or inspiration for your own sinking ship simulator game or mod. However, creating your own sinking ship simulator game or mod may require a lot of time, effort, and resources, so you may want to consider the feasibility and purpose of your project before starting it.</li>
129
- <li><strong>Q: What are some tips and tricks for playing sinking ship simulators?</strong></li>
130
- <li>A: Here are some general tips and tricks that may help you improve your sinking ship simulator experience:</li>
131
- <ul>
132
- <li>Read the instructions and tutorials of each game or mod carefully to understand how to control and interact with your ships and the environment.</li>
133
- <li>Experiment with different settings and options to find the ones that suit your preferences and goals. You can change the graphics, the sound, the difficulty, the camera, and more.</li>
134
- <li>Save your progress frequently to avoid losing your data or progress in case of a crash or a bug.</li>
135
- <li>Use the pause or slow-motion features to analyze your ships and their sinking process in more detail.</li>
136
- <li>Compare your results and performance with other players and learn from their feedback and tips.</li>
137
- </ul>
138
- <li><strong>Q: Where can I find more information and resources about sinking ship simulators?</strong></li>
139
- <li>A: There are many websites, forums, blogs, videos, podcasts, and books that provide more information and resources about sinking ship simulators. Some of the most popular and reliable ones are:</li>
140
- <ul>
141
- <li><a href="">Sinking Ship Simulator Wiki</a>: A wiki that contains information, guides, tips, and trivia about various sinking ship simulator games and mods.</li>
142
- <li><a href="">Sinking Ship Simulator Reddit</a>: A subreddit that allows users to discuss, share, and ask questions about sinking ship simulator games and mods.</li>
143
- <li><a href="">Sinking Ship Simulator YouTube</a>: A YouTube channel that features gameplay videos, reviews, tutorials, and challenges of sinking ship simulator games and mods.</li>
144
- <li><a href="">Sinking Ship Simulator Podcast</a>: A podcast that covers news, updates, interviews, and stories about sinking ship simulator games and mods.</li>
145
- <li><a href="">Sinking Ship Simulator Book</a>: A book that explores the history, culture, science, and art of sinking ship simulators.</li>
146
- </ul>
147
- </ul>
148
- <p>I hope you have enjoyed reading this article and learned something new about sinking ship simulators. If you have any feedback or suggestions for improvement, please let me know in the comments section below. Thank you for your time and attention. Happy sinking!</p> 401be4b1e0<br />
149
- <br />
150
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Enjoy the Groovy Soundtrack and Exciting Physics Elements of Red Ball 4 on iOS.md DELETED
@@ -1,129 +0,0 @@
1
- <br />
2
- <br> - The main features and gameplay elements of the game | | H2: How to Download and Install Red Ball 4 on iOS? | - The steps to download the game from the App Store <br> - The requirements and compatibility of the game with different iOS devices | | H2: How to Play Red Ball 4? | - The basic controls and mechanics of the game <br> - The tips and tricks to complete the levels and collect the stars <br> - The different types of enemies and obstacles that the player will encounter <br> - The boss battles and how to defeat them | | H2: Why Should You Play Red Ball 4? | - The benefits and advantages of playing the game <br> - The positive reviews and ratings of the game from other players and critics <br> - The challenges and achievements that the game offers to the players | | H2: Conclusion | - A summary of the main points of the article <br> - A call to action for the readers to download and play the game | | H2: FAQs | - The answers to some common questions that the readers may have about the game | Table 2: Article with HTML formatting <h1>Red Ball 4: A Fun and Challenging Platformer Game for iOS Devices</h1>
3
- <p>If you are looking for a platformer game that is easy to play but hard to master, then you should try Red Ball 4. This is a game that will test your skills, reflexes, and logic as you roll, jump, and bounce through 75 exciting levels full of adventure. In this article, we will tell you everything you need to know about Red Ball 4, including what it is, how to download and install it on your iOS device, how to play it, and why you should play it.</p>
4
- <h2>What is Red Ball 4?</h2>
5
- <p>Red Ball 4 is a platformer game developed by FDG Entertainment GmbH & Co.KG. It was released in 2013 for Android devices and in 2014 for iOS devices. The game has been downloaded over 100 million times on Google Play Store and has received over 2.4 million ratings on App Store. It has also been featured in many gaming websites and magazines, such as Gamezebo, Didagame.com, and CrazyGamesOnline.</p>
6
- <h2>red ball 4 ios download</h2><br /><p><b><b>Download File</b> &#9734; <a href="https://jinyurl.com/2uNPTW">https://jinyurl.com/2uNPTW</a></b></p><br /><br />
7
- <p>The game's story revolves around an evil plan by some square-shaped minions who want to turn the round world into a square shape. They have also captured and transformed some of the red balls into squares. The only one who can stop them is Red Ball, a brave and heroic ball who sets out to rescue his friends and save the world. Along the way, he will face many dangers, puzzles, traps, and enemies, as well as epic boss battles.</p>
8
- <p>The game's features include:</p>
9
- <ul>
10
- <li>All-New Red Ball Adventure: A new story with new characters, locations, and challenges.</li>
11
- <li>75 Levels: A variety of levels with different themes, such as forest, factory, cave, moon, etc.</li>
12
- <li>Epic Boss Battles: Fight against giant bosses who have unique abilities and weaknesses.</li>
13
- <li>Cloud Support: Save your progress online and continue playing on any device.</li>
14
- <li>Exciting Physics Elements: Use momentum, gravity, bouncing, rolling, etc. to overcome obstacles and solve puzzles.</li>
15
- <li>Groovy Soundtrack: Enjoy the catchy music that matches the mood of each level.</li>
16
- <li>HID Controller Support: Play with a compatible controller for a more comfortable experience.</li>
17
- </ul>
18
- <h2>How to Download and Install Red Ball 4 on iOS?</h2>
19
- <p>To download and install Red Ball 4 on your iOS device, you need to follow these simple steps:</p>
20
- <ol>
21
- <li>Open the App Store app on your device.</li>
22
- <li>Search for "Red Ball 4" in the search bar.</li>
23
- <li>Select the app from the search results and tap on "Get".</li>
24
- <li>Enter your Apple ID password or use Touch ID or Face ID to confirm.</li>
25
- <li>Wait for the app to download and install on your device.</li>
26
- <li>Tap on "Open" or find the app icon on your home screen and tap on it to launch the game.</li>
27
- </ol <h2>How to Play Red Ball 4?</h2>
28
- <p>Playing Red Ball 4 is very easy and intuitive. You just need to use the arrow buttons on the bottom left corner of the screen to move the red ball left or right. You can also use the jump button on the bottom right corner of the screen to make the red ball jump over gaps, obstacles, or enemies. You can also tap on the pause button on the top right corner of the screen to access the menu, where you can resume, restart, or quit the game.</p>
29
- <p>red ball 4 game download for iphone<br />
30
- how to install red ball 4 on ipad<br />
31
- red ball 4 app store link<br />
32
- red ball 4 full version free download ios<br />
33
- red ball 4 adventure game for ios devices<br />
34
- download red ball 4 (ad supported) on iphone<br />
35
- red ball 4 premium apk download for ios<br />
36
- red ball 4 offline game download for ipad<br />
37
- red ball 4 latest version download for ios<br />
38
- red ball 4 cheats and tips for iphone<br />
39
- red ball 4 review and rating on app store<br />
40
- red ball 4 best levels and challenges for ipad<br />
41
- red ball 4 cloud support and backup for ios<br />
42
- red ball 4 controller support and compatibility for iphone<br />
43
- red ball 4 soundtrack and sound effects for ipad<br />
44
- red ball 4 physics elements and gameplay for ios<br />
45
- red ball 4 epic boss battles and enemies for iphone<br />
46
- red ball 4 unlock all levels and features for ipad<br />
47
- red ball 4 modded and hacked version download for ios<br />
48
- red ball 4 fun and addictive game for iphone<br />
49
- red ball 4 alternatives and similar games for ipad<br />
50
- red ball 4 updates and bug fixes for ios<br />
51
- red ball 4 screenshots and videos for iphone<br />
52
- red ball 4 developer and publisher information for ipad<br />
53
- red ball 4 size and category on app store for ios<br />
54
- red ball 4 languages and subtitles for iphone<br />
55
- red ball 4 age rating and content advisory for ipad<br />
56
- red ball 4 privacy policy and data collection for ios<br />
57
- red ball 4 customer service and support for iphone<br />
58
- red ball 4 feedback and suggestions for ipad<br />
59
- how to play red ball 4 on ios device<br />
60
- how to uninstall red ball 4 from iphone or ipad<br />
61
- how to restore purchases on red ball 4 for ios<br />
62
- how to share red ball 4 with friends on iphone or ipad<br />
63
- how to rate and review red ball 4 on app store<br />
64
- how to contact red ball 4 developer or publisher on ios device<br />
65
- how to report a problem or issue with red ball 4 on iphone or ipad<br />
66
- how to join the community of red ball 4 players on ios device<br />
67
- how to access the settings and options of red ball 4 on iphone or ipad<br />
68
- how to enable or disable notifications of red ball 4 on ios device<br />
69
- how to change the language or subtitle of red ball 4 on iphone or ipad<br />
70
- how to adjust the volume or sound effects of red ball 4 on ios device<br />
71
- how to switch the controller or touch mode of red ball 4 on iphone or ipad<br />
72
- how to sync or backup the progress of red ball 4 on ios device<br />
73
- how to earn or buy coins or gems on red ball 4 for ios device<br />
74
- how to use the coins or gems on red ball 4 for iphone or ipad <br />
75
- how to unlock the premium features or levels of red ball 4 on ios device <br />
76
- how to beat the boss battles or enemies of red ball 4 on iphone or ipad <br />
77
- how to solve the puzzles or traps of red ball 4 on ios device <br />
78
- how to enjoy the adventure and fun of red ball 4 on iphone or ipad</p>
79
- <p>The goal of each level is to reach the flag at the end of the level. Along the way, you can collect stars that are scattered throughout the level. The more stars you collect, the higher your score will be. You can also find hidden secrets and bonus levels that will reward you with extra stars and fun surprises.</p>
80
- <p>However, you also need to be careful of the enemies and hazards that will try to stop you. Some of the enemies include square-shaped minions, spikes, saws, lasers, cannons, etc. If you touch them, you will lose a life and have to restart from the last checkpoint. You have three lives in each level, so use them wisely. You can also use some objects in the environment to help you, such as boxes, springs, levers, switches, etc.</p>
81
- <p>At the end of each world, you will face a boss battle that will test your skills and reflexes. Each boss has a different attack pattern and weakness that you need to figure out and exploit. You need to hit the boss three times to defeat it and move on to the next world.</p>
82
- <p>Here are some tips and tricks to help you play Red Ball 4 better:</p>
83
- <ul>
84
- <li>Explore every corner of the level and look for hidden stars and secrets.</li>
85
- <li>Use your momentum and bouncing ability to reach higher places or jump farther.</li>
86
- <li>Avoid unnecessary risks and plan your moves ahead.</li>
87
- <li>Learn from your mistakes and try again until you succeed.</li>
88
- <li>Have fun and enjoy the game!</li>
89
- </ul>
90
- <h2>Why Should You Play Red Ball 4?</h2>
91
- <p>There are many reasons why you should play Red Ball 4. Here are some of them:</p>
92
- <ul>
93
- <li>It is a fun and challenging game that will keep you entertained for hours.</li>
94
- <li>It has beautiful graphics and animations that will make you feel like you are in a cartoon world.</li>
95
- <li>It has a catchy soundtrack and sound effects that will enhance your gaming experience.</li>
96
- <li>It has a simple and intuitive control system that anyone can learn and master.</li>
97
- <li>It has a variety of levels and worlds that will offer you different challenges and scenarios.</li>
98
- <li>It has a humorous and engaging story that will make you root for Red Ball and his friends.</li>
99
- <li>It has a high replay value as you can try to collect all the stars and secrets in each level.</li>
100
- <li>It is suitable for all ages and preferences as it has different difficulty modes and options.</li>
101
- <li>It is free to download and play on your iOS device.</li>
102
- </ul>
103
- <p>Don't just take our word for it. Here are some of the positive reviews and ratings that Red Ball 4 has received from other players and critics:</p>
104
- <table>
105
- <tr><th>Name</th><th>Rating</th><th>Review</th></tr>
106
- <tr><td>Alexis</td><td>5 stars</td><td>"This game is awesome! I love how it is challenging but not impossible. The graphics are amazing and the music is catchy. I recommend this game to everyone!"</td></tr>
107
- <tr><td>Jake</td><td>4 stars</td><td>"I really enjoy playing this game. It is very fun and addictive. The only thing I don't like is that sometimes it crashes or freezes on my device. Please fix this bug."</td></tr>
108
- <tr><td>Lily</td><td>5 stars</td><td>"This game is so cute and funny. I love how Red Ball has different expressions and reactions. The levels are creative and well-designed. The bosses are epic and challenging."</td></tr>
109
- <tr><td>Ryan</td><td>5 stars</td><td>"This game is one of the best platformer games I have ever played. It has everything: action, adventure, puzzle, humor, etc. It is very satisfying to complete each level and collect all the stars."</td></tr>
110
- <tr><td>Sophia</td><td>5 stars</td><td>"This game is amazing! It is very easy to play but hard to master. It has a lot of levels and worlds that are different and exciting. It is very fun to play with friends or compete with them."</td></tr>
111
- </table>
112
- <p>As you can see, Red Ball 4 is a game that has received a lot of praise and appreciation from both players and critics. It is a game that will not disappoint you and will provide you with hours of fun and entertainment.</p>
113
- <h2>Conclusion</h2>
114
- <p>Red Ball 4 is a platformer game that you should definitely download and play on your iOS device. It is a game that will challenge your skills, reflexes, and logic as you roll, jump, and bounce through 75 levels full of adventure. It is a game that has a beautiful graphics, a catchy soundtrack, a humorous story, and a variety of features and options. It is a game that is suitable for all ages and preferences and has a high replay value. It is a game that is free to download and play and has received positive reviews and ratings from other players and critics.</p>
115
- <p>So what are you waiting for? Download Red Ball 4 today and join the red ball in his quest to save the world from the evil squares. You will not regret it!</p>
116
- <h2>FAQs</h2>
117
- <p>Here are some of the frequently asked questions that you may have about Red Ball 4:</p>
118
- <h3>Q: How many worlds and levels are there in Red Ball 4?</h3>
119
- <p>A: There are six worlds and 75 levels in Red Ball 4. The worlds are: Green Hills, Deep Forest, Box Factory, Battle for the Moon, Into the Cave, and Boss Fight.</p>
120
- <h3>Q: How can I unlock the bonus levels in Red Ball 4?</h3>
121
- <p>A: You can unlock the bonus levels by finding the hidden keys in some of the regular levels. The keys are usually hidden behind boxes or other objects. You need to push or break them to reveal the keys. Then you need to collect the keys and finish the level. The bonus levels will appear on the map after you do that.</p>
122
- <h3>Q: How can I get more lives in Red Ball 4?</h3>
123
- <p>A: You can get more lives by collecting the hearts that are scattered throughout the levels. Each heart will give you one extra life. You can also watch an ad to get three more lives if you run out of them.</p>
124
- <h3>Q: How can I change the difficulty mode in Red Ball 4?</h3>
125
- <p>A: You can change the difficulty mode by tapping on the settings button on the main menu or the pause menu. Then you can choose between easy, normal, or hard mode. The difficulty mode will affect the number of enemies, obstacles, and stars in each level.</p>
126
- <h3>Q: How can I contact the developer of Red Ball 4?</h3>
127
- <p>A: You can contact the developer of Red Ball 4 by visiting their website or sending them an email at [email protected]. You can also follow them on Facebook or Twitter for updates and news about their games.</p> 401be4b1e0<br />
128
- <br />
129
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Experience the Thrill of Honor of Kings - The Best Mobile MOBA on Play Store - Download for Free.md DELETED
@@ -1,119 +0,0 @@
1
- <br />
2
- <h1>Honor of Kings: The World's Most-Played Mobile MOBA</h1>
3
- <p>If you are looking for a competitive and exciting mobile game that offers the ultimate MOBA experience, then you should check out Honor of Kings. Honor of Kings is a multiplayer online battle arena game that has over 100 million daily players worldwide. It is the most-played mobile MOBA in the world, and it is now available to download and play in Brazil.</p>
4
- <p>In this article, we will tell you everything you need to know about Honor of Kings, including how to download and play it on your device, what are its game features and modes, and what are some tips and tricks for beginners. By the end of this article, you will be ready to join the fight, test your tactics, upgrade your heroes, and outplay your enemies in Honor of Kings.</p>
5
- <h2>honor of kings download play store</h2><br /><p><b><b>DOWNLOAD</b> &#9999; &#9999; &#9999; <a href="https://jinyurl.com/2uNOvk">https://jinyurl.com/2uNOvk</a></b></p><br /><br />
6
- <h2>How to Download and Play Honor of Kings on Your Device</h2>
7
- <p>Honor of Kings is free to download and play, and it is compatible with both Android and iOS devices. Here are the steps to download and install Honor of Kings on your device:</p>
8
- <h3>Google Play Store</h3>
9
- <p>If you have an Android device, you can download Honor of Kings from the Google Play Store. Here is how:</p>
10
- <ol>
11
- <li>Open the Google Play Store app on your device.</li>
12
- <li>Search for "Honor of Kings" in the search bar.</li>
13
- <li>Tap on the "Install" button to download and install the game.</li>
14
- <li>Once the installation is complete, tap on the "Open" button to launch the game.</li>
15
- <li>Follow the instructions on the screen to create your account and start playing.</li>
16
- </ol>
17
- <h3>App Store</h3>
18
- <p>If you have an iOS device, you can download Honor of Kings from the App Store. Here is how:</p>
19
- <ol>
20
- <li>Open the App Store app on your device.</li>
21
- <li>Search for "Honor of Kings" in the search bar.</li>
22
- <li>Tap on the "Get" button to download and install the game.</li>
23
- <li>Once the installation is complete, tap on the "Open" button to launch the game.</li>
24
- <li>Follow the instructions on the screen to create your account and start playing.</li>
25
- </ol>
26
- <h2>Honor of Kings Game Features and Modes</h2>
27
- <p>Honor of Kings is not just another MOBA game. It has many unique features and modes that make it stand out from other games in the genre. Here are some of them:</p>
28
- <h3>Game Features</h3>
29
- <ul>
30
- <li><b>Play Your Style, It's Showtime:</b> No matter if you master the Tank, Warrior, Assassin, Mage, Marksman, or Support roles, anyone can be the star player. It's time to show your style and lead the team to victory!</li>
31
- <li><b>Unique Heroes, Amazing Skills:</b> Players will have a selection of around 60 unique heroes to choose from, each with their own signature skills to unleash, stunning skins to unlock, and legendary stories to explore. Look out for more heroes to be constantly added to the roster in the future. Don't miss out on the chance to unlock and purchase the amazing skins and flaunt your style!</li>
32
- <li><b>Fierce Teamfights, Extreme Fun:</b> The fast-paced teamfights promise extreme fun wherever you play. The game features a smooth control system, stunning graphics, and realistic sound effects that will immerse you in the action.</li>
33
- <li><b>Compete with Friends, Rank Up Together:</b> Honor of Kings is more fun when you play with your friends. You can invite your friends to join you in the game, chat with them in real-time, and form a team to compete against other players. You can also rank up together and climb the leaderboards to earn rewards and glory.</li>
34
- </ul>
35
- <h3>Game Modes</h3>
36
- <p>Honor of Kings offers a variety of game modes to suit different preferences and skill levels. Here are some of them:</p>
37
- <ul>
38
- <li><b>Classic 5v5:</b> This is the standard mode where two teams of five players each battle it out on a three-lane map. The objective is to destroy the enemy's base while defending your own. This mode is ideal for beginners and casual players who want to enjoy the classic MOBA gameplay.</li>
39
- <li><b>Ranked 5v5:</b> This is similar to the classic mode, but with a competitive twist. Players can choose their preferred roles and heroes, and match with other players of similar skill levels. The objective is to win the game and earn points to increase your rank. This mode is ideal for experienced and ambitious players who want to challenge themselves and prove their skills.</li>
40
- <li><b>Quick Match 3v3:</b> This is a fast-paced mode where two teams of three players each fight on a smaller map with only one lane. The objective is to destroy the enemy's tower while protecting your own. This mode is ideal for busy and impatient players who want to have a quick and fun game.</li>
41
- <li><b>Valley of Kings:</b> This is a special mode where two teams of five players each compete for resources and objectives on a large map with multiple lanes. The objective is to collect crystals, defeat monsters, and destroy the enemy's crystal base. This mode is ideal for adventurous and strategic players who want to experience a different and exciting game mode.</li>
42
- </ul>
43
- <h2>Honor of Kings Game Tips and Tricks for Beginners</h2>
44
- <p>If you are new to Honor of Kings, you might feel overwhelmed by the game's complexity and diversity. Don't worry, we have some tips and tricks for you to help you get started and improve your game. Here are some of them:</p>
45
- <p>honor of kings mobile game download<br />
46
- how to download honor of kings on android<br />
47
- honor of kings apk free download<br />
48
- honor of kings play store link<br />
49
- honor of kings brazil download<br />
50
- honor of kings global version download<br />
51
- honor of kings android game play online<br />
52
- honor of kings best heroes and skins<br />
53
- honor of kings tips and tricks for beginners<br />
54
- honor of kings gameplay and review<br />
55
- honor of kings vs arena of valor<br />
56
- honor of kings latest update and patch notes<br />
57
- honor of kings system requirements and compatibility<br />
58
- honor of kings download size and installation guide<br />
59
- honor of kings official website and social media<br />
60
- honor of kings free gift codes and rewards<br />
61
- honor of kings in-app purchases and prices<br />
62
- honor of kings support and customer service<br />
63
- honor of kings data privacy and security practices<br />
64
- honor of kings community and forums<br />
65
- honor of kings esports and tournaments<br />
66
- honor of kings fan art and wallpapers<br />
67
- honor of kings mod apk download and features<br />
68
- honor of kings cheats and hacks<br />
69
- honor of kings bugs and issues report<br />
70
- honor of kings ratings and feedback<br />
71
- honor of kings alternatives and similar games<br />
72
- honor of kings history and development<br />
73
- honor of kings lore and story mode<br />
74
- honor of kings characters and voice actors</p>
75
- <h3>Game Tips</h3>
76
- <ul>
77
- <li><b>Choose Your Hero Wisely:</b> Honor of Kings has a wide range of heroes to choose from, each with their own strengths, weaknesses, roles, and playstyles. You should try out different heroes and find the ones that suit your preferences and skills. You should also consider the team composition and the enemy's heroes when choosing your hero. For example, if your team lacks a tank or a support, you might want to fill that role. If the enemy has a lot of magic damage dealers, you might want to pick a hero that has magic resistance or can disrupt their spells.</li>
78
- <li><b>Learn Your Hero's Skills:</b> Each hero has four skills: one passive skill and three active skills. You should learn how each skill works, what are its effects, cooldowns, mana costs, ranges, etc. You should also learn how to combo your skills for maximum damage and efficiency. For example, if you are playing as Sun Wukong, you can use your first skill to dash towards an enemy, then use your second skill to stun them, then use your third skill to deal massive damage and escape.</li>
79
- <li><b>Farm Gold and Experience:</b> Gold and experience are essential resources in Honor of Kings. You need gold to buy items that enhance your hero's stats and abilities. You need experience to level up your hero and unlock or upgrade your skills. You can farm gold and experience by killing minions, monsters, towers, or enemies. You should try to farm as much as possible without dying or neglecting your team's objectives.</li>
80
- </ul>
81
- <h3>Game Tricks</h3>
82
- <ul>
83
- <li><b>Use the Map and Vision:</b> The map is your best friend in Honor of Kings. It shows you where your allies and enemies are, where the objectives are, where the monsters are, etc. You should always keep an eye on the map and use it to plan your moves and avoid danger. You should also use vision items such as wards or scout traps to reveal hidden areas or enemies. Vision can give you an advantage in terms of information, ambushes, or escapes.</li>
84
- <li><b>Communicate with Your Team:</b> Honor of Kings is a team game that requires coordination and cooperation among teammates. You should communicate with your team using the chat system or the quick commands system. You can use them to share information, request assistance, signal your intentions, or give instructions, or praise your teammates. Communication can improve your team's synergy and performance.</li>
85
- <li><b>Be Flexible and Adaptive:</b> Honor of Kings is a dynamic game that can change quickly depending on various factors. You should be flexible and adaptive to the changing situations and conditions. You should be willing to adjust your hero's build, skills, or role according to the needs of your team or the enemy's strategy. You should also be ready to switch your tactics, objectives, or targets depending on the circumstances. For example, if you are losing a teamfight, you might want to retreat and regroup. If you see an opportunity to take a tower or a monster, you might want to seize it.</li>
86
- </ul>
87
- <h2>Conclusion and FAQs</h2>
88
- <p>Honor of Kings is a thrilling and addictive mobile MOBA game that you should not miss. It offers a variety of heroes, game modes, and features that will keep you entertained and challenged. It also allows you to play with your friends and compete with other players from around the world. If you are looking for a game that combines strategy, action, and fun, then download Honor of Kings today and join the millions of players who are already enjoying it.</p>
89
- <p>Here are some FAQs that you might have about Honor of Kings:</p>
90
- <h3>FAQs</h3>
91
- <ol>
92
- <li><b>Is Honor of Kings the same as Arena of Valor?</b><br>
93
- No, Honor of Kings and Arena of Valor are two different games developed by Tencent Games. Honor of Kings is the original version that was released in China in 2015, while Arena of Valor is the international version that was released in 2017. They have some similarities, such as the game mechanics and some heroes, but they also have many differences, such as the art style, the lore, the voiceovers, the skins, and some game modes.</li>
94
- <li><b>Is Honor of Kings pay-to-win?</b><br>
95
- No, Honor of Kings is not pay-to-win. All heroes can be unlocked by playing the game and earning gold or vouchers. The items that can be purchased with real money are mostly cosmetic, such as skins or effects. They do not affect the gameplay or give any unfair advantage. Honor of Kings is a game that rewards skill, strategy, and teamwork.</li>
96
- <li><b>How can I get more vouchers in Honor of Kings?</b><br>
97
- Vouchers are the premium currency in Honor of Kings that can be used to buy skins, heroes, or other items. You can get more vouchers by doing the following: <ul>
98
- <li>Purchasing them with real money.</li>
99
- <li>Completing daily tasks and achievements.</li>
100
- <li>Participating in events and promotions.</li>
101
- <li>Inviting your friends to play the game.</li>
102
- </ul></li>
103
- <li><b>How can I report a bug or a problem in Honor of Kings?</b><br>
104
- If you encounter a bug or a problem in Honor of Kings, you can report it by doing the following: <ul>
105
- <li>Tapping on the "Settings" icon on the top right corner of the main screen.</li>
106
- <li>Tapping on the "Customer Service" option.</li>
107
- <li>Tapping on the "Feedback" option.</li>
108
- <li>Filling out the form with your details and description of the issue.</li>
109
- <li>Tapping on the "Submit" button.</li>
110
- </ul></li>
111
- <li><b>How can I contact the Honor of Kings team or community?</b><br>
112
- If you want to contact the Honor of Kings team or community, you can do so by doing the following: <ul>
113
- <li>Visiting the official website: https://www.hokgame.com/</li>
114
- <li>Following the official social media accounts: Facebook (@HonorofKingsGlobal), Twitter (@HonorofKings), Instagram (@honorofkingsglobal), YouTube (Honor of Kings)</li>
115
- <li>Joining the official Discord server: https://discord.gg/hok</li>
116
- </ul></li>
117
- </ol></p> 197e85843d<br />
118
- <br />
119
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AI-Dashboards/Streamlit-Markdown-ChatGPT-CCD/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Streamlit Markdown ChatGPT CCD
3
- emoji: 🌖
4
- colorFrom: gray
5
- colorTo: green
6
- sdk: streamlit
7
- sdk_version: 1.19.0
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/fastspeech/tts_modules.py DELETED
@@ -1,385 +0,0 @@
1
- import logging
2
- import math
3
-
4
- import torch
5
- import torch.nn as nn
6
- from torch.nn import functional as F
7
-
8
- from modules.commons.espnet_positional_embedding import RelPositionalEncoding
9
- from modules.commons.common_layers import SinusoidalPositionalEmbedding, Linear, EncSALayer, DecSALayer, BatchNorm1dTBC
10
- from utils.hparams import hparams
11
-
12
- DEFAULT_MAX_SOURCE_POSITIONS = 2000
13
- DEFAULT_MAX_TARGET_POSITIONS = 2000
14
-
15
-
16
- class TransformerEncoderLayer(nn.Module):
17
- def __init__(self, hidden_size, dropout, kernel_size=None, num_heads=2, norm='ln'):
18
- super().__init__()
19
- self.hidden_size = hidden_size
20
- self.dropout = dropout
21
- self.num_heads = num_heads
22
- self.op = EncSALayer(
23
- hidden_size, num_heads, dropout=dropout,
24
- attention_dropout=0.0, relu_dropout=dropout,
25
- kernel_size=kernel_size
26
- if kernel_size is not None else hparams['enc_ffn_kernel_size'],
27
- padding=hparams['ffn_padding'],
28
- norm=norm, act=hparams['ffn_act'])
29
-
30
- def forward(self, x, **kwargs):
31
- return self.op(x, **kwargs)
32
-
33
-
34
- ######################
35
- # fastspeech modules
36
- ######################
37
- class LayerNorm(torch.nn.LayerNorm):
38
- """Layer normalization module.
39
- :param int nout: output dim size
40
- :param int dim: dimension to be normalized
41
- """
42
-
43
- def __init__(self, nout, dim=-1, eps=1e-5):
44
- """Construct an LayerNorm object."""
45
- super(LayerNorm, self).__init__(nout, eps=eps)
46
- self.dim = dim
47
-
48
- def forward(self, x):
49
- """Apply layer normalization.
50
- :param torch.Tensor x: input tensor
51
- :return: layer normalized tensor
52
- :rtype torch.Tensor
53
- """
54
- if self.dim == -1:
55
- return super(LayerNorm, self).forward(x)
56
- return super(LayerNorm, self).forward(x.transpose(1, -1)).transpose(1, -1)
57
-
58
-
59
- class DurationPredictor(torch.nn.Module):
60
- """Duration predictor module.
61
- This is a module of duration predictor described in `FastSpeech: Fast, Robust and Controllable Text to Speech`_.
62
- The duration predictor predicts a duration of each frame in log domain from the hidden embeddings of encoder.
63
- .. _`FastSpeech: Fast, Robust and Controllable Text to Speech`:
64
- https://arxiv.org/pdf/1905.09263.pdf
65
- Note:
66
- The calculation domain of outputs is different between in `forward` and in `inference`. In `forward`,
67
- the outputs are calculated in log domain but in `inference`, those are calculated in linear domain.
68
- """
69
-
70
- def __init__(self, idim, odims = 1, n_layers=2, n_chans=384, kernel_size=3, dropout_rate=0.1, offset=1.0, padding='SAME'):
71
- """Initilize duration predictor module.
72
- Args:
73
- idim (int): Input dimension.
74
- n_layers (int, optional): Number of convolutional layers.
75
- n_chans (int, optional): Number of channels of convolutional layers.
76
- kernel_size (int, optional): Kernel size of convolutional layers.
77
- dropout_rate (float, optional): Dropout rate.
78
- offset (float, optional): Offset value to avoid nan in log domain.
79
- """
80
- super(DurationPredictor, self).__init__()
81
- self.offset = offset
82
- self.conv = torch.nn.ModuleList()
83
- self.kernel_size = kernel_size
84
- self.padding = padding
85
- for idx in range(n_layers):
86
- in_chans = idim if idx == 0 else n_chans
87
- self.conv += [torch.nn.Sequential(
88
- torch.nn.ConstantPad1d(((kernel_size - 1) // 2, (kernel_size - 1) // 2)
89
- if padding == 'SAME'
90
- else (kernel_size - 1, 0), 0),
91
- torch.nn.Conv1d(in_chans, n_chans, kernel_size, stride=1, padding=0),
92
- torch.nn.ReLU(),
93
- LayerNorm(n_chans, dim=1),
94
- torch.nn.Dropout(dropout_rate)
95
- )]
96
- self.linear = torch.nn.Linear(n_chans, odims)
97
-
98
- def _forward(self, xs, x_masks=None, is_inference=False):
99
- xs = xs.transpose(1, -1) # (B, idim, Tmax)
100
- for f in self.conv:
101
- xs = f(xs) # (B, C, Tmax)
102
- if x_masks is not None:
103
- xs = xs * (1 - x_masks.float())[:, None, :]
104
-
105
- xs = self.linear(xs.transpose(1, -1)) # [B, T, C]
106
- xs = xs * (1 - x_masks.float())[:, :, None] # (B, T, C)
107
- if is_inference:
108
- return self.out2dur(xs), xs
109
- else:
110
- if hparams['dur_loss'] in ['mse']:
111
- xs = xs.squeeze(-1) # (B, Tmax)
112
- return xs
113
-
114
- def out2dur(self, xs):
115
- if hparams['dur_loss'] in ['mse']:
116
- # NOTE: calculate in log domain
117
- xs = xs.squeeze(-1) # (B, Tmax)
118
- dur = torch.clamp(torch.round(xs.exp() - self.offset), min=0).long() # avoid negative value
119
- elif hparams['dur_loss'] == 'mog':
120
- return NotImplementedError
121
- elif hparams['dur_loss'] == 'crf':
122
- dur = torch.LongTensor(self.crf.decode(xs)).cuda()
123
- return dur
124
-
125
- def forward(self, xs, x_masks=None):
126
- """Calculate forward propagation.
127
- Args:
128
- xs (Tensor): Batch of input sequences (B, Tmax, idim).
129
- x_masks (ByteTensor, optional): Batch of masks indicating padded part (B, Tmax).
130
- Returns:
131
- Tensor: Batch of predicted durations in log domain (B, Tmax).
132
- """
133
- return self._forward(xs, x_masks, False)
134
-
135
- def inference(self, xs, x_masks=None):
136
- """Inference duration.
137
- Args:
138
- xs (Tensor): Batch of input sequences (B, Tmax, idim).
139
- x_masks (ByteTensor, optional): Batch of masks indicating padded part (B, Tmax).
140
- Returns:
141
- LongTensor: Batch of predicted durations in linear domain (B, Tmax).
142
- """
143
- return self._forward(xs, x_masks, True)
144
-
145
- class SyntaDurationPredictor(torch.nn.Module):
146
- def __init__(self, idim, n_layers=2, n_chans=384, kernel_size=3, dropout_rate=0.1, offset=1.0):
147
- super(SyntaDurationPredictor, self).__init__()
148
- from modules.syntaspeech.syntactic_graph_encoder import GraphAuxEnc
149
- self.graph_encoder = GraphAuxEnc(in_dim=idim, hid_dim=idim, out_dim=idim)
150
- self.offset = offset
151
- self.conv = torch.nn.ModuleList()
152
- self.kernel_size = kernel_size
153
- for idx in range(n_layers):
154
- in_chans = idim if idx == 0 else n_chans
155
- self.conv += [torch.nn.Sequential(
156
- torch.nn.Conv1d(in_chans, n_chans, kernel_size, stride=1, padding=kernel_size // 2),
157
- torch.nn.ReLU(),
158
- LayerNorm(n_chans, dim=1),
159
- torch.nn.Dropout(dropout_rate)
160
- )]
161
- self.linear = nn.Sequential(torch.nn.Linear(n_chans, 1), nn.Softplus())
162
-
163
- def forward(self, x, x_padding=None, ph2word=None, graph_lst=None, etypes_lst=None):
164
- x = x.transpose(1, -1) # (B, idim, Tmax)
165
- assert ph2word is not None and graph_lst is not None and etypes_lst is not None
166
- x_graph = self.graph_encoder(graph_lst, x, ph2word, etypes_lst)
167
- x = x + x_graph * 1.
168
-
169
- for f in self.conv:
170
- x = f(x) # (B, C, Tmax)
171
- if x_padding is not None:
172
- x = x * (1 - x_padding.float())[:, None, :]
173
-
174
- x = self.linear(x.transpose(1, -1)) # [B, T, C]
175
- x = x * (1 - x_padding.float())[:, :, None] # (B, T, C)
176
- x = x[..., 0] # (B, Tmax)
177
- return x
178
-
179
- class LengthRegulator(torch.nn.Module):
180
- def __init__(self, pad_value=0.0):
181
- super(LengthRegulator, self).__init__()
182
- self.pad_value = pad_value
183
-
184
- def forward(self, dur, dur_padding=None, alpha=1.0):
185
- """
186
- Example (no batch dim version):
187
- 1. dur = [2,2,3]
188
- 2. token_idx = [[1],[2],[3]], dur_cumsum = [2,4,7], dur_cumsum_prev = [0,2,4]
189
- 3. token_mask = [[1,1,0,0,0,0,0],
190
- [0,0,1,1,0,0,0],
191
- [0,0,0,0,1,1,1]]
192
- 4. token_idx * token_mask = [[1,1,0,0,0,0,0],
193
- [0,0,2,2,0,0,0],
194
- [0,0,0,0,3,3,3]]
195
- 5. (token_idx * token_mask).sum(0) = [1,1,2,2,3,3,3]
196
-
197
- :param dur: Batch of durations of each frame (B, T_txt)
198
- :param dur_padding: Batch of padding of each frame (B, T_txt)
199
- :param alpha: duration rescale coefficient
200
- :return:
201
- mel2ph (B, T_speech)
202
- """
203
- assert alpha > 0
204
- dur = torch.round(dur.float() * alpha).long()
205
- if dur_padding is not None:
206
- dur = dur * (1 - dur_padding.long())
207
- token_idx = torch.arange(1, dur.shape[1] + 1)[None, :, None].to(dur.device)
208
- dur_cumsum = torch.cumsum(dur, 1)
209
- dur_cumsum_prev = F.pad(dur_cumsum, [1, -1], mode='constant', value=0)
210
-
211
- pos_idx = torch.arange(dur.sum(-1).max())[None, None].to(dur.device)
212
- token_mask = (pos_idx >= dur_cumsum_prev[:, :, None]) & (pos_idx < dur_cumsum[:, :, None])
213
- mel2ph = (token_idx * token_mask.long()).sum(1)
214
- return mel2ph
215
-
216
-
217
- class PitchPredictor(torch.nn.Module):
218
- def __init__(self, idim, n_layers=5, n_chans=384, odim=2, kernel_size=5,
219
- dropout_rate=0.1, padding='SAME'):
220
- """Initilize pitch predictor module.
221
- Args:
222
- idim (int): Input dimension.
223
- n_layers (int, optional): Number of convolutional layers.
224
- n_chans (int, optional): Number of channels of convolutional layers.
225
- kernel_size (int, optional): Kernel size of convolutional layers.
226
- dropout_rate (float, optional): Dropout rate.
227
- """
228
- super(PitchPredictor, self).__init__()
229
- self.conv = torch.nn.ModuleList()
230
- self.kernel_size = kernel_size
231
- self.padding = padding
232
- for idx in range(n_layers):
233
- in_chans = idim if idx == 0 else n_chans
234
- self.conv += [torch.nn.Sequential(
235
- torch.nn.ConstantPad1d(((kernel_size - 1) // 2, (kernel_size - 1) // 2)
236
- if padding == 'SAME'
237
- else (kernel_size - 1, 0), 0),
238
- torch.nn.Conv1d(in_chans, n_chans, kernel_size, stride=1, padding=0),
239
- torch.nn.ReLU(),
240
- LayerNorm(n_chans, dim=1),
241
- torch.nn.Dropout(dropout_rate)
242
- )]
243
- self.linear = torch.nn.Linear(n_chans, odim)
244
- self.embed_positions = SinusoidalPositionalEmbedding(idim, 0, init_size=4096)
245
- self.pos_embed_alpha = nn.Parameter(torch.Tensor([1]))
246
-
247
- def forward(self, xs):
248
- """
249
-
250
- :param xs: [B, T, H]
251
- :return: [B, T, H]
252
- """
253
- positions = self.pos_embed_alpha * self.embed_positions(xs[..., 0])
254
- xs = xs + positions
255
- xs = xs.transpose(1, -1) # (B, idim, Tmax)
256
- for f in self.conv:
257
- xs = f(xs) # (B, C, Tmax)
258
- # NOTE: calculate in log domain
259
- xs = self.linear(xs.transpose(1, -1)) # (B, Tmax, H)
260
- return xs
261
-
262
-
263
- class EnergyPredictor(PitchPredictor):
264
- pass
265
-
266
-
267
- def mel2ph_to_dur(mel2ph, T_txt, max_dur=None):
268
- B, _ = mel2ph.shape
269
- dur = mel2ph.new_zeros(B, T_txt + 1).scatter_add(1, mel2ph, torch.ones_like(mel2ph))
270
- dur = dur[:, 1:]
271
- if max_dur is not None:
272
- dur = dur.clamp(max=max_dur)
273
- return dur
274
-
275
-
276
- class FFTBlocks(nn.Module):
277
- def __init__(self, hidden_size, num_layers, ffn_kernel_size=9, dropout=None, num_heads=2,
278
- use_pos_embed=True, use_last_norm=True, norm='ln', use_pos_embed_alpha=True):
279
- super().__init__()
280
- self.num_layers = num_layers
281
- embed_dim = self.hidden_size = hidden_size
282
- self.dropout = dropout if dropout is not None else hparams['dropout']
283
- self.use_pos_embed = use_pos_embed
284
- self.use_last_norm = use_last_norm
285
- if use_pos_embed:
286
- self.max_source_positions = DEFAULT_MAX_TARGET_POSITIONS
287
- self.padding_idx = 0
288
- self.pos_embed_alpha = nn.Parameter(torch.Tensor([1])) if use_pos_embed_alpha else 1
289
- self.embed_positions = SinusoidalPositionalEmbedding(
290
- embed_dim, self.padding_idx, init_size=DEFAULT_MAX_TARGET_POSITIONS,
291
- )
292
-
293
- self.layers = nn.ModuleList([])
294
- self.layers.extend([
295
- TransformerEncoderLayer(self.hidden_size, self.dropout,
296
- kernel_size=ffn_kernel_size, num_heads=num_heads)
297
- for _ in range(self.num_layers)
298
- ])
299
- if self.use_last_norm:
300
- if norm == 'ln':
301
- self.layer_norm = nn.LayerNorm(embed_dim)
302
- elif norm == 'bn':
303
- self.layer_norm = BatchNorm1dTBC(embed_dim)
304
- else:
305
- self.layer_norm = None
306
-
307
- def forward(self, x, padding_mask=None, attn_mask=None, return_hiddens=False):
308
- """
309
- :param x: [B, T, C]
310
- :param padding_mask: [B, T]
311
- :return: [B, T, C] or [L, B, T, C]
312
- """
313
- padding_mask = x.abs().sum(-1).eq(0).data if padding_mask is None else padding_mask
314
- nonpadding_mask_TB = 1 - padding_mask.transpose(0, 1).float()[:, :, None] # [T, B, 1]
315
- if self.use_pos_embed:
316
- positions = self.pos_embed_alpha * self.embed_positions(x[..., 0])
317
- x = x + positions
318
- x = F.dropout(x, p=self.dropout, training=self.training)
319
- # B x T x C -> T x B x C
320
- x = x.transpose(0, 1) * nonpadding_mask_TB
321
- hiddens = []
322
- for layer in self.layers:
323
- x = layer(x, encoder_padding_mask=padding_mask, attn_mask=attn_mask) * nonpadding_mask_TB
324
- hiddens.append(x)
325
- if self.use_last_norm:
326
- x = self.layer_norm(x) * nonpadding_mask_TB
327
- if return_hiddens:
328
- x = torch.stack(hiddens, 0) # [L, T, B, C]
329
- x = x.transpose(1, 2) # [L, B, T, C]
330
- else:
331
- x = x.transpose(0, 1) # [B, T, C]
332
- return x
333
-
334
-
335
- class FastspeechEncoder(FFTBlocks):
336
- def __init__(self, embed_tokens, hidden_size=None, num_layers=None, kernel_size=None, num_heads=2):
337
- hidden_size = hparams['hidden_size'] if hidden_size is None else hidden_size
338
- kernel_size = hparams['enc_ffn_kernel_size'] if kernel_size is None else kernel_size
339
- num_layers = hparams['dec_layers'] if num_layers is None else num_layers
340
- super().__init__(hidden_size, num_layers, kernel_size, num_heads=num_heads,
341
- use_pos_embed=False) # use_pos_embed_alpha for compatibility
342
- self.embed_tokens = embed_tokens
343
- self.embed_scale = math.sqrt(hidden_size)
344
- self.padding_idx = 0
345
- if hparams.get('rel_pos') is not None and hparams['rel_pos']:
346
- self.embed_positions = RelPositionalEncoding(hidden_size, dropout_rate=0.0)
347
- else:
348
- self.embed_positions = SinusoidalPositionalEmbedding(
349
- hidden_size, self.padding_idx, init_size=DEFAULT_MAX_TARGET_POSITIONS,
350
- )
351
-
352
- def forward(self, txt_tokens):
353
- """
354
-
355
- :param txt_tokens: [B, T]
356
- :return: {
357
- 'encoder_out': [T x B x C]
358
- }
359
- """
360
- encoder_padding_mask = txt_tokens.eq(self.padding_idx).data
361
- x = self.forward_embedding(txt_tokens) # [B, T, H]
362
- x = super(FastspeechEncoder, self).forward(x, encoder_padding_mask)
363
- return x
364
-
365
- def forward_embedding(self, txt_tokens):
366
- # embed tokens and positions
367
- x = self.embed_scale * self.embed_tokens(txt_tokens)
368
- if hparams['use_pos_embed']:
369
- if hparams.get('rel_pos') is not None and hparams['rel_pos']:
370
- x = self.embed_positions(x)
371
- else:
372
- positions = self.embed_positions(txt_tokens)
373
- x = x + positions
374
- x = F.dropout(x, p=self.dropout, training=self.training)
375
- return x
376
-
377
-
378
- class FastspeechDecoder(FFTBlocks):
379
- def __init__(self, hidden_size=None, num_layers=None, kernel_size=None, num_heads=None):
380
- num_heads = hparams['num_heads'] if num_heads is None else num_heads
381
- hidden_size = hparams['hidden_size'] if hidden_size is None else hidden_size
382
- kernel_size = hparams['dec_ffn_kernel_size'] if kernel_size is None else kernel_size
383
- num_layers = hparams['dec_layers'] if num_layers is None else num_layers
384
- super().__init__(hidden_size, num_layers, kernel_size, num_heads=num_heads)
385
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/NeuralSeq/tasks/tts/tts_base.py DELETED
@@ -1,305 +0,0 @@
1
- import filecmp
2
-
3
- import matplotlib
4
-
5
- from utils.plot import spec_to_figure
6
-
7
- matplotlib.use('Agg')
8
-
9
- from data_gen.tts.data_gen_utils import get_pitch
10
- from modules.fastspeech.tts_modules import mel2ph_to_dur
11
- from tasks.tts.dataset_utils import BaseTTSDataset
12
- from utils.tts_utils import sequence_mask
13
- from multiprocessing.pool import Pool
14
- from tasks.base_task import data_loader, BaseConcatDataset
15
- from utils.common_schedulers import RSQRTSchedule, NoneSchedule
16
- from vocoders.base_vocoder import get_vocoder_cls, BaseVocoder
17
- import os
18
- import numpy as np
19
- from tqdm import tqdm
20
- import torch.distributed as dist
21
- from tasks.base_task import BaseTask
22
- from utils.hparams import hparams
23
- from utils.text_encoder import TokenTextEncoder
24
- import json
25
- import matplotlib.pyplot as plt
26
- import torch
27
- import torch.optim
28
- import torch.utils.data
29
- import utils
30
- from utils import audio
31
- import pandas as pd
32
-
33
-
34
- class TTSBaseTask(BaseTask):
35
- def __init__(self, *args, **kwargs):
36
- super().__init__(*args, **kwargs)
37
- self.dataset_cls = BaseTTSDataset
38
- self.max_tokens = hparams['max_tokens']
39
- self.max_sentences = hparams['max_sentences']
40
- self.max_valid_tokens = hparams['max_valid_tokens']
41
- if self.max_valid_tokens == -1:
42
- hparams['max_valid_tokens'] = self.max_valid_tokens = self.max_tokens
43
- self.max_valid_sentences = hparams['max_valid_sentences']
44
- if self.max_valid_sentences == -1:
45
- hparams['max_valid_sentences'] = self.max_valid_sentences = self.max_sentences
46
- self.vocoder = None
47
- self.phone_encoder = self.build_phone_encoder(hparams['binary_data_dir'])
48
- self.padding_idx = self.phone_encoder.pad()
49
- self.eos_idx = self.phone_encoder.eos()
50
- self.seg_idx = self.phone_encoder.seg()
51
- self.saving_result_pool = None
52
- self.saving_results_futures = None
53
- self.stats = {}
54
-
55
- @data_loader
56
- def train_dataloader(self):
57
- if hparams['train_sets'] != '':
58
- train_sets = hparams['train_sets'].split("|")
59
- # check if all train_sets have the same spk map and dictionary
60
- binary_data_dir = hparams['binary_data_dir']
61
- file_to_cmp = ['phone_set.json']
62
- if os.path.exists(f'{binary_data_dir}/word_set.json'):
63
- file_to_cmp.append('word_set.json')
64
- if hparams['use_spk_id']:
65
- file_to_cmp.append('spk_map.json')
66
- for f in file_to_cmp:
67
- for ds_name in train_sets:
68
- base_file = os.path.join(binary_data_dir, f)
69
- ds_file = os.path.join(ds_name, f)
70
- assert filecmp.cmp(base_file, ds_file), \
71
- f'{f} in {ds_name} is not same with that in {binary_data_dir}.'
72
- train_dataset = BaseConcatDataset([
73
- self.dataset_cls(prefix='train', shuffle=True, data_dir=ds_name) for ds_name in train_sets])
74
- else:
75
- train_dataset = self.dataset_cls(prefix=hparams['train_set_name'], shuffle=True)
76
- return self.build_dataloader(train_dataset, True, self.max_tokens, self.max_sentences,
77
- endless=hparams['endless_ds'])
78
-
79
- @data_loader
80
- def val_dataloader(self):
81
- valid_dataset = self.dataset_cls(prefix=hparams['valid_set_name'], shuffle=False)
82
- return self.build_dataloader(valid_dataset, False, self.max_valid_tokens, self.max_valid_sentences)
83
-
84
- @data_loader
85
- def test_dataloader(self):
86
- test_dataset = self.dataset_cls(prefix=hparams['test_set_name'], shuffle=False)
87
- self.test_dl = self.build_dataloader(
88
- test_dataset, False, self.max_valid_tokens,
89
- self.max_valid_sentences, batch_by_size=False)
90
- return self.test_dl
91
-
92
- def build_dataloader(self, dataset, shuffle, max_tokens=None, max_sentences=None,
93
- required_batch_size_multiple=-1, endless=False, batch_by_size=True):
94
- devices_cnt = torch.cuda.device_count()
95
- if devices_cnt == 0:
96
- devices_cnt = 1
97
- if required_batch_size_multiple == -1:
98
- required_batch_size_multiple = devices_cnt
99
-
100
- def shuffle_batches(batches):
101
- np.random.shuffle(batches)
102
- return batches
103
-
104
- if max_tokens is not None:
105
- max_tokens *= devices_cnt
106
- if max_sentences is not None:
107
- max_sentences *= devices_cnt
108
- indices = dataset.ordered_indices()
109
- if batch_by_size:
110
- batch_sampler = utils.batch_by_size(
111
- indices, dataset.num_tokens, max_tokens=max_tokens, max_sentences=max_sentences,
112
- required_batch_size_multiple=required_batch_size_multiple,
113
- )
114
- else:
115
- batch_sampler = []
116
- for i in range(0, len(indices), max_sentences):
117
- batch_sampler.append(indices[i:i + max_sentences])
118
-
119
- if shuffle:
120
- batches = shuffle_batches(list(batch_sampler))
121
- if endless:
122
- batches = [b for _ in range(1000) for b in shuffle_batches(list(batch_sampler))]
123
- else:
124
- batches = batch_sampler
125
- if endless:
126
- batches = [b for _ in range(1000) for b in batches]
127
- num_workers = dataset.num_workers
128
- if self.trainer.use_ddp:
129
- num_replicas = dist.get_world_size()
130
- rank = dist.get_rank()
131
- batches = [x[rank::num_replicas] for x in batches if len(x) % num_replicas == 0]
132
- return torch.utils.data.DataLoader(dataset,
133
- collate_fn=dataset.collater,
134
- batch_sampler=batches,
135
- num_workers=num_workers,
136
- pin_memory=False)
137
-
138
- def build_phone_encoder(self, data_dir):
139
- phone_list_file = os.path.join(data_dir, 'phone_set.json')
140
- phone_list = json.load(open(phone_list_file))
141
- return TokenTextEncoder(None, vocab_list=phone_list, replace_oov=',')
142
-
143
- def build_scheduler(self, optimizer):
144
- if hparams['scheduler'] == 'rsqrt':
145
- return RSQRTSchedule(optimizer)
146
- else:
147
- return NoneSchedule(optimizer)
148
-
149
- def build_optimizer(self, model):
150
- self.optimizer = optimizer = torch.optim.AdamW(
151
- model.parameters(),
152
- lr=hparams['lr'],
153
- betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']),
154
- weight_decay=hparams['weight_decay'])
155
- return optimizer
156
-
157
- def plot_mel(self, batch_idx, spec, spec_out, name=None):
158
- spec_cat = torch.cat([spec, spec_out], -1)
159
- name = f'mel_{batch_idx}' if name is None else name
160
- vmin = hparams['mel_vmin']
161
- vmax = hparams['mel_vmax']
162
- self.logger.add_figure(name, spec_to_figure(spec_cat[0], vmin, vmax), self.global_step)
163
-
164
- def test_start(self):
165
- self.saving_result_pool = Pool(min(int(os.getenv('N_PROC', os.cpu_count())), 16))
166
- self.saving_results_futures = []
167
- self.results_id = 0
168
- self.gen_dir = os.path.join(
169
- hparams['work_dir'],
170
- f'generated_{self.trainer.global_step}_{hparams["gen_dir_name"]}')
171
- self.vocoder: BaseVocoder = get_vocoder_cls(hparams)()
172
-
173
- def after_infer(self, predictions, sil_start_frame=0):
174
- predictions = utils.unpack_dict_to_list(predictions)
175
- assert len(predictions) == 1, 'Only support batch_size=1 in inference.'
176
- prediction = predictions[0]
177
- prediction = utils.tensors_to_np(prediction)
178
- item_name = prediction.get('item_name')
179
- text = prediction.get('text')
180
- ph_tokens = prediction.get('txt_tokens')
181
- mel_gt = prediction["mels"]
182
- mel2ph_gt = prediction.get("mel2ph")
183
- mel2ph_gt = mel2ph_gt if mel2ph_gt is not None else None
184
- mel_pred = prediction["outputs"]
185
- mel2ph_pred = prediction.get("mel2ph_pred")
186
- f0_gt = prediction.get("f0")
187
- f0_pred = prediction.get("f0_pred")
188
-
189
- str_phs = None
190
- if self.phone_encoder is not None and 'txt_tokens' in prediction:
191
- str_phs = self.phone_encoder.decode(prediction['txt_tokens'], strip_padding=True)
192
-
193
- if 'encdec_attn' in prediction:
194
- encdec_attn = prediction['encdec_attn']
195
- encdec_attn = encdec_attn[encdec_attn.max(-1).sum(-1).argmax(-1)]
196
- txt_lengths = prediction.get('txt_lengths')
197
- encdec_attn = encdec_attn.T[:txt_lengths, :len(mel_gt)]
198
- else:
199
- encdec_attn = None
200
-
201
- wav_pred = self.vocoder.spec2wav(mel_pred, f0=f0_pred)
202
- wav_pred[:sil_start_frame * hparams['hop_size']] = 0
203
- gen_dir = self.gen_dir
204
- base_fn = f'[{self.results_id:06d}][{item_name}][%s]'
205
- # if text is not None:
206
- # base_fn += text.replace(":", "%3A")[:80]
207
- base_fn = base_fn.replace(' ', '_')
208
- if not hparams['profile_infer']:
209
- os.makedirs(gen_dir, exist_ok=True)
210
- os.makedirs(f'{gen_dir}/wavs', exist_ok=True)
211
- os.makedirs(f'{gen_dir}/plot', exist_ok=True)
212
- if hparams.get('save_mel_npy', False):
213
- os.makedirs(f'{gen_dir}/npy', exist_ok=True)
214
- if 'encdec_attn' in prediction:
215
- os.makedirs(f'{gen_dir}/attn_plot', exist_ok=True)
216
- self.saving_results_futures.append(
217
- self.saving_result_pool.apply_async(self.save_result, args=[
218
- wav_pred, mel_pred, base_fn % 'P', gen_dir, str_phs, mel2ph_pred, encdec_attn]))
219
-
220
- if mel_gt is not None and hparams['save_gt']:
221
- wav_gt = self.vocoder.spec2wav(mel_gt, f0=f0_gt)
222
- self.saving_results_futures.append(
223
- self.saving_result_pool.apply_async(self.save_result, args=[
224
- wav_gt, mel_gt, base_fn % 'G', gen_dir, str_phs, mel2ph_gt]))
225
- if hparams['save_f0']:
226
- import matplotlib.pyplot as plt
227
- f0_pred_, _ = get_pitch(wav_pred, mel_pred, hparams)
228
- f0_gt_, _ = get_pitch(wav_gt, mel_gt, hparams)
229
- fig = plt.figure()
230
- plt.plot(f0_pred_, label=r'$\hat{f_0}$')
231
- plt.plot(f0_gt_, label=r'$f_0$')
232
- plt.legend()
233
- plt.tight_layout()
234
- plt.savefig(f'{gen_dir}/plot/[F0][{item_name}]{text}.png', format='png')
235
- plt.close(fig)
236
- print(f"Pred_shape: {mel_pred.shape}, gt_shape: {mel_gt.shape}")
237
- self.results_id += 1
238
- return {
239
- 'item_name': item_name,
240
- 'text': text,
241
- 'ph_tokens': self.phone_encoder.decode(ph_tokens.tolist()),
242
- 'wav_fn_pred': base_fn % 'P',
243
- 'wav_fn_gt': base_fn % 'G',
244
- }
245
-
246
- @staticmethod
247
- def save_result(wav_out, mel, base_fn, gen_dir, str_phs=None, mel2ph=None, alignment=None):
248
- audio.save_wav(wav_out, f'{gen_dir}/wavs/{base_fn}.wav', hparams['audio_sample_rate'],
249
- norm=hparams['out_wav_norm'])
250
- fig = plt.figure(figsize=(14, 10))
251
- spec_vmin = hparams['mel_vmin']
252
- spec_vmax = hparams['mel_vmax']
253
- heatmap = plt.pcolor(mel.T, vmin=spec_vmin, vmax=spec_vmax)
254
- fig.colorbar(heatmap)
255
- f0, _ = get_pitch(wav_out, mel, hparams)
256
- f0 = f0 / 10 * (f0 > 0)
257
- plt.plot(f0, c='white', linewidth=1, alpha=0.6)
258
- if mel2ph is not None and str_phs is not None:
259
- decoded_txt = str_phs.split(" ")
260
- dur = mel2ph_to_dur(torch.LongTensor(mel2ph)[None, :], len(decoded_txt))[0].numpy()
261
- dur = [0] + list(np.cumsum(dur))
262
- for i in range(len(dur) - 1):
263
- shift = (i % 20) + 1
264
- plt.text(dur[i], shift, decoded_txt[i])
265
- plt.hlines(shift, dur[i], dur[i + 1], colors='b' if decoded_txt[i] != '|' else 'black')
266
- plt.vlines(dur[i], 0, 5, colors='b' if decoded_txt[i] != '|' else 'black',
267
- alpha=1, linewidth=1)
268
- plt.tight_layout()
269
- plt.savefig(f'{gen_dir}/plot/{base_fn}.png', format='png')
270
- plt.close(fig)
271
- if hparams.get('save_mel_npy', False):
272
- np.save(f'{gen_dir}/npy/{base_fn}', mel)
273
- if alignment is not None:
274
- fig, ax = plt.subplots(figsize=(12, 16))
275
- im = ax.imshow(alignment, aspect='auto', origin='lower',
276
- interpolation='none')
277
- decoded_txt = str_phs.split(" ")
278
- ax.set_yticks(np.arange(len(decoded_txt)))
279
- ax.set_yticklabels(list(decoded_txt), fontsize=6)
280
- fig.colorbar(im, ax=ax)
281
- fig.savefig(f'{gen_dir}/attn_plot/{base_fn}_attn.png', format='png')
282
- plt.close(fig)
283
-
284
- def test_end(self, outputs):
285
- pd.DataFrame(outputs).to_csv(f'{self.gen_dir}/meta.csv')
286
- self.saving_result_pool.close()
287
- [f.get() for f in tqdm(self.saving_results_futures)]
288
- self.saving_result_pool.join()
289
- return {}
290
-
291
- ##########
292
- # utils
293
- ##########
294
- def weights_nonzero_speech(self, target):
295
- # target : B x T x mel
296
- # Assign weight 1.0 to all labels except for padding (id=0).
297
- dim = target.size(-1)
298
- return target.abs().sum(-1, keepdim=True).ne(0).float().repeat(1, 1, dim)
299
-
300
- def make_stop_target(self, target):
301
- # target : B x T x mel
302
- seq_mask = target.abs().sum(-1).ne(0).float()
303
- seq_length = seq_mask.sum(1)
304
- mask_r = 1 - sequence_mask(seq_length - 1, target.size(1)).float()
305
- return seq_mask, mask_r
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/models/decoder.py DELETED
@@ -1,746 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
-
3
- import math
4
- from functools import partial
5
-
6
- import numpy as np
7
- import torch
8
- import torch.nn as nn
9
-
10
- from .utils import generate_length_mask, init, PositionalEncoding
11
-
12
-
13
- class BaseDecoder(nn.Module):
14
- """
15
- Take word/audio embeddings and output the next word probs
16
- Base decoder, cannot be called directly
17
- All decoders should inherit from this class
18
- """
19
-
20
- def __init__(self, emb_dim, vocab_size, fc_emb_dim,
21
- attn_emb_dim, dropout=0.2):
22
- super().__init__()
23
- self.emb_dim = emb_dim
24
- self.vocab_size = vocab_size
25
- self.fc_emb_dim = fc_emb_dim
26
- self.attn_emb_dim = attn_emb_dim
27
- self.word_embedding = nn.Embedding(vocab_size, emb_dim)
28
- self.in_dropout = nn.Dropout(dropout)
29
-
30
- def forward(self, x):
31
- raise NotImplementedError
32
-
33
- def load_word_embedding(self, weight, freeze=True):
34
- embedding = np.load(weight)
35
- assert embedding.shape[0] == self.vocab_size, "vocabulary size mismatch"
36
- assert embedding.shape[1] == self.emb_dim, "embed size mismatch"
37
-
38
- # embeddings = torch.as_tensor(embeddings).float()
39
- # self.word_embeddings.weight = nn.Parameter(embeddings)
40
- # for para in self.word_embeddings.parameters():
41
- # para.requires_grad = tune
42
- self.word_embedding = nn.Embedding.from_pretrained(embedding,
43
- freeze=freeze)
44
-
45
-
46
- class RnnDecoder(BaseDecoder):
47
-
48
- def __init__(self, emb_dim, vocab_size, fc_emb_dim, attn_emb_dim,
49
- dropout, d_model, **kwargs):
50
- super().__init__(emb_dim, vocab_size, fc_emb_dim, attn_emb_dim,
51
- dropout,)
52
- self.d_model = d_model
53
- self.num_layers = kwargs.get('num_layers', 1)
54
- self.bidirectional = kwargs.get('bidirectional', False)
55
- self.rnn_type = kwargs.get('rnn_type', "GRU")
56
- self.classifier = nn.Linear(
57
- self.d_model * (self.bidirectional + 1), vocab_size)
58
-
59
- def forward(self, x):
60
- raise NotImplementedError
61
-
62
- def init_hidden(self, bs, device):
63
- num_dire = self.bidirectional + 1
64
- n_layer = self.num_layers
65
- hid_dim = self.d_model
66
- if self.rnn_type == "LSTM":
67
- return (torch.zeros(num_dire * n_layer, bs, hid_dim).to(device),
68
- torch.zeros(num_dire * n_layer, bs, hid_dim).to(device))
69
- else:
70
- return torch.zeros(num_dire * n_layer, bs, hid_dim).to(device)
71
-
72
-
73
- class RnnFcDecoder(RnnDecoder):
74
-
75
- def __init__(self, emb_dim, vocab_size, fc_emb_dim, attn_emb_dim, dropout, d_model, **kwargs):
76
- super().__init__(emb_dim, vocab_size, fc_emb_dim, attn_emb_dim, dropout, d_model, **kwargs)
77
- self.model = getattr(nn, self.rnn_type)(
78
- input_size=self.emb_dim * 2,
79
- hidden_size=self.d_model,
80
- batch_first=True,
81
- num_layers=self.num_layers,
82
- bidirectional=self.bidirectional)
83
- self.fc_proj = nn.Linear(self.fc_emb_dim, self.emb_dim)
84
- self.apply(init)
85
-
86
- def forward(self, input_dict):
87
- word = input_dict["word"]
88
- state = input_dict.get("state", None)
89
- fc_emb = input_dict["fc_emb"]
90
-
91
- word = word.to(fc_emb.device)
92
- embed = self.in_dropout(self.word_embedding(word))
93
-
94
- p_fc_emb = self.fc_proj(fc_emb)
95
- # embed: [N, T, embed_size]
96
- embed = torch.cat((embed, p_fc_emb), dim=-1)
97
-
98
- out, state = self.model(embed, state)
99
- # out: [N, T, hs], states: [num_layers * num_dire, N, hs]
100
- logits = self.classifier(out)
101
- output = {
102
- "state": state,
103
- "embeds": out,
104
- "logits": logits
105
- }
106
-
107
- return output
108
-
109
-
110
- class Seq2SeqAttention(nn.Module):
111
-
112
- def __init__(self, hs_enc, hs_dec, attn_size):
113
- """
114
- Args:
115
- hs_enc: encoder hidden size
116
- hs_dec: decoder hidden size
117
- attn_size: attention vector size
118
- """
119
- super(Seq2SeqAttention, self).__init__()
120
- self.h2attn = nn.Linear(hs_enc + hs_dec, attn_size)
121
- self.v = nn.Parameter(torch.randn(attn_size))
122
- self.apply(init)
123
-
124
- def forward(self, h_dec, h_enc, src_lens):
125
- """
126
- Args:
127
- h_dec: decoder hidden (query), [N, hs_dec]
128
- h_enc: encoder memory (key/value), [N, src_max_len, hs_enc]
129
- src_lens: source (encoder memory) lengths, [N, ]
130
- """
131
- N = h_enc.size(0)
132
- src_max_len = h_enc.size(1)
133
- h_dec = h_dec.unsqueeze(1).repeat(1, src_max_len, 1) # [N, src_max_len, hs_dec]
134
-
135
- attn_input = torch.cat((h_dec, h_enc), dim=-1)
136
- attn_out = torch.tanh(self.h2attn(attn_input)) # [N, src_max_len, attn_size]
137
-
138
- v = self.v.repeat(N, 1).unsqueeze(1) # [N, 1, attn_size]
139
- score = torch.bmm(v, attn_out.transpose(1, 2)).squeeze(1) # [N, src_max_len]
140
-
141
- idxs = torch.arange(src_max_len).repeat(N).view(N, src_max_len)
142
- mask = (idxs < src_lens.view(-1, 1)).to(h_dec.device)
143
-
144
- score = score.masked_fill(mask == 0, -1e10)
145
- weights = torch.softmax(score, dim=-1) # [N, src_max_len]
146
- ctx = torch.bmm(weights.unsqueeze(1), h_enc).squeeze(1) # [N, hs_enc]
147
-
148
- return ctx, weights
149
-
150
-
151
- class AttentionProj(nn.Module):
152
-
153
- def __init__(self, hs_enc, hs_dec, embed_dim, attn_size):
154
- self.q_proj = nn.Linear(hs_dec, embed_dim)
155
- self.kv_proj = nn.Linear(hs_enc, embed_dim)
156
- self.h2attn = nn.Linear(embed_dim * 2, attn_size)
157
- self.v = nn.Parameter(torch.randn(attn_size))
158
- self.apply(init)
159
-
160
- def init(self, m):
161
- if isinstance(m, nn.Linear):
162
- nn.init.kaiming_uniform_(m.weight)
163
- if m.bias is not None:
164
- nn.init.constant_(m.bias, 0)
165
-
166
- def forward(self, h_dec, h_enc, src_lens):
167
- """
168
- Args:
169
- h_dec: decoder hidden (query), [N, hs_dec]
170
- h_enc: encoder memory (key/value), [N, src_max_len, hs_enc]
171
- src_lens: source (encoder memory) lengths, [N, ]
172
- """
173
- h_enc = self.kv_proj(h_enc) # [N, src_max_len, embed_dim]
174
- h_dec = self.q_proj(h_dec) # [N, embed_dim]
175
- N = h_enc.size(0)
176
- src_max_len = h_enc.size(1)
177
- h_dec = h_dec.unsqueeze(1).repeat(1, src_max_len, 1) # [N, src_max_len, hs_dec]
178
-
179
- attn_input = torch.cat((h_dec, h_enc), dim=-1)
180
- attn_out = torch.tanh(self.h2attn(attn_input)) # [N, src_max_len, attn_size]
181
-
182
- v = self.v.repeat(N, 1).unsqueeze(1) # [N, 1, attn_size]
183
- score = torch.bmm(v, attn_out.transpose(1, 2)).squeeze(1) # [N, src_max_len]
184
-
185
- idxs = torch.arange(src_max_len).repeat(N).view(N, src_max_len)
186
- mask = (idxs < src_lens.view(-1, 1)).to(h_dec.device)
187
-
188
- score = score.masked_fill(mask == 0, -1e10)
189
- weights = torch.softmax(score, dim=-1) # [N, src_max_len]
190
- ctx = torch.bmm(weights.unsqueeze(1), h_enc).squeeze(1) # [N, hs_enc]
191
-
192
- return ctx, weights
193
-
194
-
195
- class BahAttnDecoder(RnnDecoder):
196
-
197
- def __init__(self, emb_dim, vocab_size, fc_emb_dim, attn_emb_dim,
198
- dropout, d_model, **kwargs):
199
- """
200
- concatenate fc, attn, word to feed to the rnn
201
- """
202
- super().__init__(emb_dim, vocab_size, fc_emb_dim, attn_emb_dim,
203
- dropout, d_model, **kwargs)
204
- attn_size = kwargs.get("attn_size", self.d_model)
205
- self.model = getattr(nn, self.rnn_type)(
206
- input_size=self.emb_dim * 3,
207
- hidden_size=self.d_model,
208
- batch_first=True,
209
- num_layers=self.num_layers,
210
- bidirectional=self.bidirectional)
211
- self.attn = Seq2SeqAttention(self.attn_emb_dim,
212
- self.d_model * (self.bidirectional + 1) * \
213
- self.num_layers,
214
- attn_size)
215
- self.fc_proj = nn.Linear(self.fc_emb_dim, self.emb_dim)
216
- self.ctx_proj = nn.Linear(self.attn_emb_dim, self.emb_dim)
217
- self.apply(init)
218
-
219
- def forward(self, input_dict):
220
- word = input_dict["word"]
221
- state = input_dict.get("state", None) # [n_layer * n_dire, bs, d_model]
222
- fc_emb = input_dict["fc_emb"]
223
- attn_emb = input_dict["attn_emb"]
224
- attn_emb_len = input_dict["attn_emb_len"]
225
-
226
- word = word.to(fc_emb.device)
227
- embed = self.in_dropout(self.word_embedding(word))
228
-
229
- # embed: [N, 1, embed_size]
230
- if state is None:
231
- state = self.init_hidden(word.size(0), fc_emb.device)
232
- if self.rnn_type == "LSTM":
233
- query = state[0].transpose(0, 1).flatten(1)
234
- else:
235
- query = state.transpose(0, 1).flatten(1)
236
- c, attn_weight = self.attn(query, attn_emb, attn_emb_len)
237
-
238
- p_fc_emb = self.fc_proj(fc_emb)
239
- p_ctx = self.ctx_proj(c)
240
- rnn_input = torch.cat((embed, p_ctx.unsqueeze(1), p_fc_emb.unsqueeze(1)),
241
- dim=-1)
242
-
243
- out, state = self.model(rnn_input, state)
244
-
245
- output = {
246
- "state": state,
247
- "embed": out,
248
- "logit": self.classifier(out),
249
- "attn_weight": attn_weight
250
- }
251
- return output
252
-
253
-
254
- class BahAttnDecoder2(RnnDecoder):
255
-
256
- def __init__(self, emb_dim, vocab_size, fc_emb_dim, attn_emb_dim,
257
- dropout, d_model, **kwargs):
258
- """
259
- add fc, attn, word together to feed to the rnn
260
- """
261
- super().__init__(emb_dim, vocab_size, fc_emb_dim, attn_emb_dim,
262
- dropout, d_model, **kwargs)
263
- attn_size = kwargs.get("attn_size", self.d_model)
264
- self.model = getattr(nn, self.rnn_type)(
265
- input_size=self.emb_dim,
266
- hidden_size=self.d_model,
267
- batch_first=True,
268
- num_layers=self.num_layers,
269
- bidirectional=self.bidirectional)
270
- self.attn = Seq2SeqAttention(self.emb_dim,
271
- self.d_model * (self.bidirectional + 1) * \
272
- self.num_layers,
273
- attn_size)
274
- self.fc_proj = nn.Linear(self.fc_emb_dim, self.emb_dim)
275
- self.attn_proj = nn.Linear(self.attn_emb_dim, self.emb_dim)
276
- self.apply(partial(init, method="xavier"))
277
-
278
- def forward(self, input_dict):
279
- word = input_dict["word"]
280
- state = input_dict.get("state", None) # [n_layer * n_dire, bs, d_model]
281
- fc_emb = input_dict["fc_emb"]
282
- attn_emb = input_dict["attn_emb"]
283
- attn_emb_len = input_dict["attn_emb_len"]
284
-
285
- word = word.to(fc_emb.device)
286
- embed = self.in_dropout(self.word_embedding(word))
287
- p_attn_emb = self.attn_proj(attn_emb)
288
-
289
- # embed: [N, 1, embed_size]
290
- if state is None:
291
- state = self.init_hidden(word.size(0), fc_emb.device)
292
- if self.rnn_type == "LSTM":
293
- query = state[0].transpose(0, 1).flatten(1)
294
- else:
295
- query = state.transpose(0, 1).flatten(1)
296
- c, attn_weight = self.attn(query, p_attn_emb, attn_emb_len)
297
-
298
- p_fc_emb = self.fc_proj(fc_emb)
299
- rnn_input = embed + c.unsqueeze(1) + p_fc_emb.unsqueeze(1)
300
-
301
- out, state = self.model(rnn_input, state)
302
-
303
- output = {
304
- "state": state,
305
- "embed": out,
306
- "logit": self.classifier(out),
307
- "attn_weight": attn_weight
308
- }
309
- return output
310
-
311
-
312
- class ConditionalBahAttnDecoder(RnnDecoder):
313
-
314
- def __init__(self, emb_dim, vocab_size, fc_emb_dim, attn_emb_dim,
315
- dropout, d_model, **kwargs):
316
- """
317
- concatenate fc, attn, word to feed to the rnn
318
- """
319
- super().__init__(emb_dim, vocab_size, fc_emb_dim, attn_emb_dim,
320
- dropout, d_model, **kwargs)
321
- attn_size = kwargs.get("attn_size", self.d_model)
322
- self.model = getattr(nn, self.rnn_type)(
323
- input_size=self.emb_dim * 3,
324
- hidden_size=self.d_model,
325
- batch_first=True,
326
- num_layers=self.num_layers,
327
- bidirectional=self.bidirectional)
328
- self.attn = Seq2SeqAttention(self.attn_emb_dim,
329
- self.d_model * (self.bidirectional + 1) * \
330
- self.num_layers,
331
- attn_size)
332
- self.ctx_proj = nn.Linear(self.attn_emb_dim, self.emb_dim)
333
- self.condition_embedding = nn.Embedding(2, emb_dim)
334
- self.apply(init)
335
-
336
- def forward(self, input_dict):
337
- word = input_dict["word"]
338
- state = input_dict.get("state", None) # [n_layer * n_dire, bs, d_model]
339
- fc_emb = input_dict["fc_emb"]
340
- attn_emb = input_dict["attn_emb"]
341
- attn_emb_len = input_dict["attn_emb_len"]
342
- condition = input_dict["condition"]
343
-
344
- word = word.to(fc_emb.device)
345
- embed = self.in_dropout(self.word_embedding(word))
346
-
347
- condition = torch.as_tensor([[1 - c, c] for c in condition]).to(fc_emb.device)
348
- condition_emb = torch.matmul(condition, self.condition_embedding.weight)
349
- # condition_embs: [N, emb_dim]
350
-
351
- # embed: [N, 1, embed_size]
352
- if state is None:
353
- state = self.init_hidden(word.size(0), fc_emb.device)
354
- if self.rnn_type == "LSTM":
355
- query = state[0].transpose(0, 1).flatten(1)
356
- else:
357
- query = state.transpose(0, 1).flatten(1)
358
- c, attn_weight = self.attn(query, attn_emb, attn_emb_len)
359
-
360
- p_ctx = self.ctx_proj(c)
361
- rnn_input = torch.cat((embed, p_ctx.unsqueeze(1), condition_emb.unsqueeze(1)),
362
- dim=-1)
363
-
364
- out, state = self.model(rnn_input, state)
365
-
366
- output = {
367
- "state": state,
368
- "embed": out,
369
- "logit": self.classifier(out),
370
- "attn_weight": attn_weight
371
- }
372
- return output
373
-
374
-
375
- class StructBahAttnDecoder(RnnDecoder):
376
-
377
- def __init__(self, emb_dim, vocab_size, fc_emb_dim, struct_vocab_size,
378
- attn_emb_dim, dropout, d_model, **kwargs):
379
- """
380
- concatenate fc, attn, word to feed to the rnn
381
- """
382
- super().__init__(emb_dim, vocab_size, fc_emb_dim, attn_emb_dim,
383
- dropout, d_model, **kwargs)
384
- attn_size = kwargs.get("attn_size", self.d_model)
385
- self.model = getattr(nn, self.rnn_type)(
386
- input_size=self.emb_dim * 3,
387
- hidden_size=self.d_model,
388
- batch_first=True,
389
- num_layers=self.num_layers,
390
- bidirectional=self.bidirectional)
391
- self.attn = Seq2SeqAttention(self.attn_emb_dim,
392
- self.d_model * (self.bidirectional + 1) * \
393
- self.num_layers,
394
- attn_size)
395
- self.ctx_proj = nn.Linear(self.attn_emb_dim, self.emb_dim)
396
- self.struct_embedding = nn.Embedding(struct_vocab_size, emb_dim)
397
- self.apply(init)
398
-
399
- def forward(self, input_dict):
400
- word = input_dict["word"]
401
- state = input_dict.get("state", None) # [n_layer * n_dire, bs, d_model]
402
- fc_emb = input_dict["fc_emb"]
403
- attn_emb = input_dict["attn_emb"]
404
- attn_emb_len = input_dict["attn_emb_len"]
405
- structure = input_dict["structure"]
406
-
407
- word = word.to(fc_emb.device)
408
- embed = self.in_dropout(self.word_embedding(word))
409
-
410
- struct_emb = self.struct_embedding(structure)
411
- # struct_embs: [N, emb_dim]
412
-
413
- # embed: [N, 1, embed_size]
414
- if state is None:
415
- state = self.init_hidden(word.size(0), fc_emb.device)
416
- if self.rnn_type == "LSTM":
417
- query = state[0].transpose(0, 1).flatten(1)
418
- else:
419
- query = state.transpose(0, 1).flatten(1)
420
- c, attn_weight = self.attn(query, attn_emb, attn_emb_len)
421
-
422
- p_ctx = self.ctx_proj(c)
423
- rnn_input = torch.cat((embed, p_ctx.unsqueeze(1), struct_emb.unsqueeze(1)), dim=-1)
424
-
425
- out, state = self.model(rnn_input, state)
426
-
427
- output = {
428
- "state": state,
429
- "embed": out,
430
- "logit": self.classifier(out),
431
- "attn_weight": attn_weight
432
- }
433
- return output
434
-
435
-
436
- class StyleBahAttnDecoder(RnnDecoder):
437
-
438
- def __init__(self, emb_dim, vocab_size, fc_emb_dim, attn_emb_dim,
439
- dropout, d_model, **kwargs):
440
- """
441
- concatenate fc, attn, word to feed to the rnn
442
- """
443
- super().__init__(emb_dim, vocab_size, fc_emb_dim, attn_emb_dim,
444
- dropout, d_model, **kwargs)
445
- attn_size = kwargs.get("attn_size", self.d_model)
446
- self.model = getattr(nn, self.rnn_type)(
447
- input_size=self.emb_dim * 3,
448
- hidden_size=self.d_model,
449
- batch_first=True,
450
- num_layers=self.num_layers,
451
- bidirectional=self.bidirectional)
452
- self.attn = Seq2SeqAttention(self.attn_emb_dim,
453
- self.d_model * (self.bidirectional + 1) * \
454
- self.num_layers,
455
- attn_size)
456
- self.ctx_proj = nn.Linear(self.attn_emb_dim, self.emb_dim)
457
- self.apply(init)
458
-
459
- def forward(self, input_dict):
460
- word = input_dict["word"]
461
- state = input_dict.get("state", None) # [n_layer * n_dire, bs, d_model]
462
- fc_emb = input_dict["fc_emb"]
463
- attn_emb = input_dict["attn_emb"]
464
- attn_emb_len = input_dict["attn_emb_len"]
465
- style = input_dict["style"]
466
-
467
- word = word.to(fc_emb.device)
468
- embed = self.in_dropout(self.word_embedding(word))
469
-
470
- # embed: [N, 1, embed_size]
471
- if state is None:
472
- state = self.init_hidden(word.size(0), fc_emb.device)
473
- if self.rnn_type == "LSTM":
474
- query = state[0].transpose(0, 1).flatten(1)
475
- else:
476
- query = state.transpose(0, 1).flatten(1)
477
- c, attn_weight = self.attn(query, attn_emb, attn_emb_len)
478
-
479
- p_ctx = self.ctx_proj(c)
480
- rnn_input = torch.cat((embed, p_ctx.unsqueeze(1), style.unsqueeze(1)),
481
- dim=-1)
482
-
483
- out, state = self.model(rnn_input, state)
484
-
485
- output = {
486
- "state": state,
487
- "embed": out,
488
- "logit": self.classifier(out),
489
- "attn_weight": attn_weight
490
- }
491
- return output
492
-
493
-
494
- class BahAttnDecoder3(RnnDecoder):
495
-
496
- def __init__(self, emb_dim, vocab_size, fc_emb_dim, attn_emb_dim,
497
- dropout, d_model, **kwargs):
498
- """
499
- concatenate fc, attn, word to feed to the rnn
500
- """
501
- super().__init__(emb_dim, vocab_size, fc_emb_dim, attn_emb_dim,
502
- dropout, d_model, **kwargs)
503
- attn_size = kwargs.get("attn_size", self.d_model)
504
- self.model = getattr(nn, self.rnn_type)(
505
- input_size=self.emb_dim + attn_emb_dim,
506
- hidden_size=self.d_model,
507
- batch_first=True,
508
- num_layers=self.num_layers,
509
- bidirectional=self.bidirectional)
510
- self.attn = Seq2SeqAttention(self.attn_emb_dim,
511
- self.d_model * (self.bidirectional + 1) * \
512
- self.num_layers,
513
- attn_size)
514
- self.ctx_proj = lambda x: x
515
- self.apply(init)
516
-
517
- def forward(self, input_dict):
518
- word = input_dict["word"]
519
- state = input_dict.get("state", None) # [n_layer * n_dire, bs, d_model]
520
- fc_emb = input_dict["fc_emb"]
521
- attn_emb = input_dict["attn_emb"]
522
- attn_emb_len = input_dict["attn_emb_len"]
523
-
524
- if word.size(-1) == self.fc_emb_dim: # fc_emb
525
- embed = word.unsqueeze(1)
526
- elif word.size(-1) == 1: # word
527
- word = word.to(fc_emb.device)
528
- embed = self.in_dropout(self.word_embedding(word))
529
- else:
530
- raise Exception(f"problem with word input size {word.size()}")
531
-
532
- # embed: [N, 1, embed_size]
533
- if state is None:
534
- state = self.init_hidden(word.size(0), fc_emb.device)
535
- if self.rnn_type == "LSTM":
536
- query = state[0].transpose(0, 1).flatten(1)
537
- else:
538
- query = state.transpose(0, 1).flatten(1)
539
- c, attn_weight = self.attn(query, attn_emb, attn_emb_len)
540
-
541
- p_ctx = self.ctx_proj(c)
542
- rnn_input = torch.cat((embed, p_ctx.unsqueeze(1)), dim=-1)
543
-
544
- out, state = self.model(rnn_input, state)
545
-
546
- output = {
547
- "state": state,
548
- "embed": out,
549
- "logit": self.classifier(out),
550
- "attn_weight": attn_weight
551
- }
552
- return output
553
-
554
-
555
- class SpecificityBahAttnDecoder(RnnDecoder):
556
-
557
- def __init__(self, emb_dim, vocab_size, fc_emb_dim, attn_emb_dim,
558
- dropout, d_model, **kwargs):
559
- """
560
- concatenate fc, attn, word to feed to the rnn
561
- """
562
- super().__init__(emb_dim, vocab_size, fc_emb_dim, attn_emb_dim,
563
- dropout, d_model, **kwargs)
564
- attn_size = kwargs.get("attn_size", self.d_model)
565
- self.model = getattr(nn, self.rnn_type)(
566
- input_size=self.emb_dim + attn_emb_dim + 1,
567
- hidden_size=self.d_model,
568
- batch_first=True,
569
- num_layers=self.num_layers,
570
- bidirectional=self.bidirectional)
571
- self.attn = Seq2SeqAttention(self.attn_emb_dim,
572
- self.d_model * (self.bidirectional + 1) * \
573
- self.num_layers,
574
- attn_size)
575
- self.ctx_proj = lambda x: x
576
- self.apply(init)
577
-
578
- def forward(self, input_dict):
579
- word = input_dict["word"]
580
- state = input_dict.get("state", None) # [n_layer * n_dire, bs, d_model]
581
- fc_emb = input_dict["fc_emb"]
582
- attn_emb = input_dict["attn_emb"]
583
- attn_emb_len = input_dict["attn_emb_len"]
584
- condition = input_dict["condition"] # [N,]
585
-
586
- word = word.to(fc_emb.device)
587
- embed = self.in_dropout(self.word_embedding(word))
588
-
589
- # embed: [N, 1, embed_size]
590
- if state is None:
591
- state = self.init_hidden(word.size(0), fc_emb.device)
592
- if self.rnn_type == "LSTM":
593
- query = state[0].transpose(0, 1).flatten(1)
594
- else:
595
- query = state.transpose(0, 1).flatten(1)
596
- c, attn_weight = self.attn(query, attn_emb, attn_emb_len)
597
-
598
- p_ctx = self.ctx_proj(c)
599
- rnn_input = torch.cat(
600
- (embed, p_ctx.unsqueeze(1), condition.reshape(-1, 1, 1)),
601
- dim=-1)
602
-
603
- out, state = self.model(rnn_input, state)
604
-
605
- output = {
606
- "state": state,
607
- "embed": out,
608
- "logit": self.classifier(out),
609
- "attn_weight": attn_weight
610
- }
611
- return output
612
-
613
-
614
- class TransformerDecoder(BaseDecoder):
615
-
616
- def __init__(self, emb_dim, vocab_size, fc_emb_dim, attn_emb_dim, dropout, **kwargs):
617
- super().__init__(emb_dim, vocab_size, fc_emb_dim, attn_emb_dim,
618
- dropout=dropout,)
619
- self.d_model = emb_dim
620
- self.nhead = kwargs.get("nhead", self.d_model // 64)
621
- self.nlayers = kwargs.get("nlayers", 2)
622
- self.dim_feedforward = kwargs.get("dim_feedforward", self.d_model * 4)
623
-
624
- self.pos_encoder = PositionalEncoding(self.d_model, dropout)
625
- layer = nn.TransformerDecoderLayer(d_model=self.d_model,
626
- nhead=self.nhead,
627
- dim_feedforward=self.dim_feedforward,
628
- dropout=dropout)
629
- self.model = nn.TransformerDecoder(layer, self.nlayers)
630
- self.classifier = nn.Linear(self.d_model, vocab_size)
631
- self.attn_proj = nn.Sequential(
632
- nn.Linear(self.attn_emb_dim, self.d_model),
633
- nn.ReLU(),
634
- nn.Dropout(dropout),
635
- nn.LayerNorm(self.d_model)
636
- )
637
- # self.attn_proj = lambda x: x
638
- self.init_params()
639
-
640
- def init_params(self):
641
- for p in self.parameters():
642
- if p.dim() > 1:
643
- nn.init.xavier_uniform_(p)
644
-
645
- def generate_square_subsequent_mask(self, max_length):
646
- mask = (torch.triu(torch.ones(max_length, max_length)) == 1).transpose(0, 1)
647
- mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
648
- return mask
649
-
650
- def forward(self, input_dict):
651
- word = input_dict["word"]
652
- attn_emb = input_dict["attn_emb"]
653
- attn_emb_len = input_dict["attn_emb_len"]
654
- cap_padding_mask = input_dict["cap_padding_mask"]
655
-
656
- p_attn_emb = self.attn_proj(attn_emb)
657
- p_attn_emb = p_attn_emb.transpose(0, 1) # [T_src, N, emb_dim]
658
- word = word.to(attn_emb.device)
659
- embed = self.in_dropout(self.word_embedding(word)) * math.sqrt(self.emb_dim) # [N, T, emb_dim]
660
- embed = embed.transpose(0, 1) # [T, N, emb_dim]
661
- embed = self.pos_encoder(embed)
662
-
663
- tgt_mask = self.generate_square_subsequent_mask(embed.size(0)).to(attn_emb.device)
664
- memory_key_padding_mask = ~generate_length_mask(attn_emb_len, attn_emb.size(1)).to(attn_emb.device)
665
- output = self.model(embed, p_attn_emb, tgt_mask=tgt_mask,
666
- tgt_key_padding_mask=cap_padding_mask,
667
- memory_key_padding_mask=memory_key_padding_mask)
668
- output = output.transpose(0, 1)
669
- output = {
670
- "embed": output,
671
- "logit": self.classifier(output),
672
- }
673
- return output
674
-
675
-
676
-
677
-
678
- class EventTransformerDecoder(TransformerDecoder):
679
-
680
- def forward(self, input_dict):
681
- word = input_dict["word"] # index of word embeddings
682
- attn_emb = input_dict["attn_emb"]
683
- attn_emb_len = input_dict["attn_emb_len"]
684
- cap_padding_mask = input_dict["cap_padding_mask"]
685
- event_emb = input_dict["event"] # [N, emb_dim]
686
-
687
- p_attn_emb = self.attn_proj(attn_emb)
688
- p_attn_emb = p_attn_emb.transpose(0, 1) # [T_src, N, emb_dim]
689
- word = word.to(attn_emb.device)
690
- embed = self.in_dropout(self.word_embedding(word)) * math.sqrt(self.emb_dim) # [N, T, emb_dim]
691
-
692
- embed = embed.transpose(0, 1) # [T, N, emb_dim]
693
- embed += event_emb
694
- embed = self.pos_encoder(embed)
695
-
696
- tgt_mask = self.generate_square_subsequent_mask(embed.size(0)).to(attn_emb.device)
697
- memory_key_padding_mask = ~generate_length_mask(attn_emb_len, attn_emb.size(1)).to(attn_emb.device)
698
- output = self.model(embed, p_attn_emb, tgt_mask=tgt_mask,
699
- tgt_key_padding_mask=cap_padding_mask,
700
- memory_key_padding_mask=memory_key_padding_mask)
701
- output = output.transpose(0, 1)
702
- output = {
703
- "embed": output,
704
- "logit": self.classifier(output),
705
- }
706
- return output
707
-
708
-
709
- class KeywordProbTransformerDecoder(TransformerDecoder):
710
-
711
- def __init__(self, emb_dim, vocab_size, fc_emb_dim, attn_emb_dim,
712
- dropout, keyword_classes_num, **kwargs):
713
- super().__init__(emb_dim, vocab_size, fc_emb_dim, attn_emb_dim,
714
- dropout, **kwargs)
715
- self.keyword_proj = nn.Linear(keyword_classes_num, self.d_model)
716
- self.word_keyword_norm = nn.LayerNorm(self.d_model)
717
-
718
- def forward(self, input_dict):
719
- word = input_dict["word"] # index of word embeddings
720
- attn_emb = input_dict["attn_emb"]
721
- attn_emb_len = input_dict["attn_emb_len"]
722
- cap_padding_mask = input_dict["cap_padding_mask"]
723
- keyword = input_dict["keyword"] # [N, keyword_classes_num]
724
-
725
- p_attn_emb = self.attn_proj(attn_emb)
726
- p_attn_emb = p_attn_emb.transpose(0, 1) # [T_src, N, emb_dim]
727
- word = word.to(attn_emb.device)
728
- embed = self.in_dropout(self.word_embedding(word)) * math.sqrt(self.emb_dim) # [N, T, emb_dim]
729
-
730
- embed = embed.transpose(0, 1) # [T, N, emb_dim]
731
- embed += self.keyword_proj(keyword)
732
- embed = self.word_keyword_norm(embed)
733
-
734
- embed = self.pos_encoder(embed)
735
-
736
- tgt_mask = self.generate_square_subsequent_mask(embed.size(0)).to(attn_emb.device)
737
- memory_key_padding_mask = ~generate_length_mask(attn_emb_len, attn_emb.size(1)).to(attn_emb.device)
738
- output = self.model(embed, p_attn_emb, tgt_mask=tgt_mask,
739
- tgt_key_padding_mask=cap_padding_mask,
740
- memory_key_padding_mask=memory_key_padding_mask)
741
- output = output.transpose(0, 1)
742
- output = {
743
- "embed": output,
744
- "logit": self.classifier(output),
745
- }
746
- return output
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AP123/ai-avatars/app.py DELETED
@@ -1,616 +0,0 @@
1
- import gradio as gr
2
- import os
3
- from pathlib import Path
4
- import argparse
5
- import shutil
6
- from train_dreambooth import run_training
7
- from convertosd import convert
8
- from PIL import Image
9
- from slugify import slugify
10
- import requests
11
- import torch
12
- import zipfile
13
- import tarfile
14
- import urllib.parse
15
- import gc
16
- from diffusers import StableDiffusionPipeline
17
- from huggingface_hub import snapshot_download
18
-
19
-
20
- is_spaces = True if "SPACE_ID" in os.environ else False
21
- is_shared_ui = True if "IS_SHARED_UI" in os.environ else False
22
- is_gpu_associated = torch.cuda.is_available()
23
-
24
- css = '''
25
- .instruction{position: absolute; top: 0;right: 0;margin-top: 0px !important}
26
- .arrow{position: absolute;top: 0;right: -110px;margin-top: -8px !important}
27
- #component-4, #component-3, #component-10{min-height: 0}
28
- .duplicate-button img{margin: 0}
29
- '''
30
- maximum_concepts = 3
31
-
32
- #Pre download the files
33
- if(is_gpu_associated):
34
- model_v1 = snapshot_download(repo_id="multimodalart/sd-fine-tunable")
35
- model_v2 = snapshot_download(repo_id="stabilityai/stable-diffusion-2")
36
- model_v2_512 = snapshot_download(repo_id="stabilityai/stable-diffusion-2-base")
37
- safety_checker = snapshot_download(repo_id="multimodalart/sd-sc")
38
- model_to_load = model_v1
39
-
40
- with zipfile.ZipFile("mix.zip", 'r') as zip_ref:
41
- zip_ref.extractall(".")
42
-
43
- def swap_base_model(selected_model):
44
- if(is_gpu_associated):
45
- global model_to_load
46
- if(selected_model == "v1-5"):
47
- model_to_load = model_v1
48
- elif(selected_model == "v2-768"):
49
- model_to_load = model_v2
50
- else:
51
- model_to_load = model_v2_512
52
-
53
- def count_files(*inputs):
54
- file_counter = 0
55
- concept_counter = 0
56
- for i, input in enumerate(inputs):
57
- if(i < maximum_concepts-1):
58
- files = inputs[i]
59
- if(files):
60
- concept_counter+=1
61
- file_counter+=len(files)
62
- uses_custom = inputs[-1]
63
- selected_model = inputs[-4]
64
- experimental_faces = inputs[-5]
65
- if(uses_custom):
66
- Training_Steps = int(inputs[-3])
67
- else:
68
- Training_Steps = file_counter*150
69
- if(is_spaces):
70
- if(selected_model == "v1-5"):
71
- its = 1.1
72
- if(experimental_faces):
73
- its = 1
74
- elif(selected_model == "v2-512"):
75
- its = 0.8
76
- if(experimental_faces):
77
- its = 0.7
78
- elif(selected_model == "v2-768"):
79
- its = 0.5
80
- summary_sentence = f'''You are going to train {concept_counter}, with {file_counter} images for {Training_Steps} steps. The training should take around {round(Training_Steps/its, 2)} seconds, or {round((Training_Steps/its)/60, 2)} minutes.
81
- The setup, compression and uploading the model can take up to 20 minutes.<br>As the T4-Small GPU costs US$0.60 for 1h, <span style="font-size: 120%"><b>the estimated cost for this training is below US${round((((Training_Steps/its)/3600)+0.3+0.1)*0.60, 2)}.</b></span><br><br>
82
- If you check the box below the GPU attribution will automatically removed after training is done and the model is uploaded. If not, don't forget to come back here and swap the hardware back to CPU.<br><br>'''
83
- else:
84
- summary_sentence = f'''You are going to train {concept_counter}, with {file_counter} images for {Training_Steps} steps.<br><br>'''
85
-
86
- return([gr.update(visible=True), gr.update(visible=True, value=summary_sentence)])
87
-
88
- def update_steps(*files_list):
89
- file_counter = 0
90
- for i, files in enumerate(files_list):
91
- if(files):
92
- file_counter+=len(files)
93
- return(gr.update(value=file_counter*200))
94
-
95
- def pad_image(image):
96
- w, h = image.size
97
- if w == h:
98
- return image
99
- elif w > h:
100
- new_image = Image.new(image.mode, (w, w), (0, 0, 0))
101
- new_image.paste(image, (0, (w - h) // 2))
102
- return new_image
103
- else:
104
- new_image = Image.new(image.mode, (h, h), (0, 0, 0))
105
- new_image.paste(image, ((h - w) // 2, 0))
106
- return new_image
107
-
108
- def train(*inputs):
109
- if is_shared_ui:
110
- raise gr.Error("This Space only works in duplicated instances")
111
- if not is_gpu_associated:
112
- raise gr.Error("Please associate a T4 GPU for this Space")
113
- torch.cuda.empty_cache()
114
- if 'pipe' in globals():
115
- global pipe, pipe_is_set
116
- del pipe
117
- pipe_is_set = False
118
- gc.collect()
119
-
120
- if os.path.exists("output_model"): shutil.rmtree('output_model')
121
- if os.path.exists("instance_images"): shutil.rmtree('instance_images')
122
- if os.path.exists("diffusers_model.tar"): os.remove("diffusers_model.tar")
123
- if os.path.exists("model.ckpt"): os.remove("model.ckpt")
124
- if os.path.exists("hastrained.success"): os.remove("hastrained.success")
125
- file_counter = 0
126
- which_model = inputs[-10]
127
- resolution = 512 if which_model != "v2-768" else 768
128
- for i, input in enumerate(inputs):
129
- if(i < maximum_concepts-1):
130
- if(input):
131
- os.makedirs('instance_images',exist_ok=True)
132
- files = inputs[i+(maximum_concepts*2)]
133
- prompt = inputs[i+maximum_concepts]
134
- if(prompt == "" or prompt == None):
135
- raise gr.Error("You forgot to define your concept prompt")
136
- for j, file_temp in enumerate(files):
137
- file = Image.open(file_temp.name)
138
- image = pad_image(file)
139
- image = image.resize((resolution, resolution))
140
- extension = file_temp.name.split(".")[1]
141
- image = image.convert('RGB')
142
- image.save(f'instance_images/{prompt}_({j+1}).jpg', format="JPEG", quality = 100)
143
- file_counter += 1
144
-
145
- os.makedirs('output_model',exist_ok=True)
146
- uses_custom = inputs[-1]
147
- remove_attribution_after = inputs[-5]
148
- experimental_face_improvement = inputs[-8]
149
-
150
- if(uses_custom):
151
- Training_Steps = int(inputs[-3])
152
- Train_text_encoder_for = int(inputs[-2])
153
- else:
154
- Train_text_encoder_for=30
155
- Training_Steps = file_counter*150
156
- stptxt = int((Training_Steps*Train_text_encoder_for)/100)
157
- gradient_checkpointing = True if (experimental_face_improvement or which_model != "v1-5") else False
158
- cache_latents = True if which_model != "v1-5" else False
159
- args_general = argparse.Namespace(
160
- image_captions_filename = True,
161
- train_text_encoder = True if stptxt > 0 else False,
162
- stop_text_encoder_training = stptxt,
163
- save_n_steps = 0,
164
- pretrained_model_name_or_path = model_to_load,
165
- instance_data_dir="instance_images",
166
- class_data_dir="Mix",
167
- output_dir="output_model",
168
- with_prior_preservation=True,
169
- prior_loss_weight=1.0,
170
- instance_prompt="",
171
- seed=42,
172
- resolution=resolution,
173
- mixed_precision="fp16",
174
- train_batch_size=1,
175
- gradient_accumulation_steps=1,
176
- use_8bit_adam=True,
177
- learning_rate=2e-6,
178
- lr_scheduler="polynomial",
179
- lr_warmup_steps = 0,
180
- max_train_steps=Training_Steps,
181
- num_class_images=200,
182
- gradient_checkpointing=gradient_checkpointing,
183
- cache_latents=cache_latents,
184
- )
185
- print("Starting multi-training...")
186
- lock_file = open("intraining.lock", "w")
187
- lock_file.close()
188
- run_training(args_general)
189
- gc.collect()
190
- torch.cuda.empty_cache()
191
- if(which_model == "v1-5"):
192
- print("Adding Safety Checker to the model...")
193
- shutil.copytree(f"{safety_checker}/feature_extractor", "output_model/feature_extractor")
194
- shutil.copytree(f"{safety_checker}/safety_checker", "output_model/safety_checker")
195
- shutil.copy(f"model_index.json", "output_model/model_index.json")
196
-
197
- if(not remove_attribution_after):
198
- print("Archiving model file...")
199
- with tarfile.open("diffusers_model.tar", "w") as tar:
200
- tar.add("output_model", arcname=os.path.basename("output_model"))
201
- if os.path.exists("intraining.lock"): os.remove("intraining.lock")
202
- trained_file = open("hastrained.success", "w")
203
- trained_file.close()
204
- print("Training completed!")
205
- return [
206
- gr.update(visible=True, value=["diffusers_model.tar"]), #result
207
- gr.update(visible=True), #try_your_model
208
- gr.update(visible=True), #push_to_hub
209
- gr.update(visible=True), #convert_button
210
- gr.update(visible=False), #training_ongoing
211
- gr.update(visible=True) #completed_training
212
- ]
213
- else:
214
- hf_token = inputs[-4]
215
- model_name = inputs[-6]
216
- where_to_upload = inputs[-7]
217
- push(model_name, where_to_upload, hf_token, which_model, True)
218
- hardware_url = f"https://huggingface.co/spaces/{os.environ['SPACE_ID']}/hardware"
219
- headers = { "authorization" : f"Bearer {hf_token}"}
220
- body = {'flavor': 'cpu-basic'}
221
- requests.post(hardware_url, json = body, headers=headers)
222
-
223
- pipe_is_set = False
224
- def generate(prompt, steps):
225
- torch.cuda.empty_cache()
226
- from diffusers import StableDiffusionPipeline
227
- global pipe_is_set
228
- if(not pipe_is_set):
229
- global pipe
230
- pipe = StableDiffusionPipeline.from_pretrained("./output_model", torch_dtype=torch.float16)
231
- pipe = pipe.to("cuda")
232
- pipe_is_set = True
233
-
234
- image = pipe(prompt, num_inference_steps=steps).images[0]
235
- return(image)
236
-
237
- def push(model_name, where_to_upload, hf_token, which_model, comes_from_automated=False):
238
- if(not os.path.exists("model.ckpt")):
239
- convert("output_model", "model.ckpt")
240
- from huggingface_hub import HfApi, HfFolder, CommitOperationAdd
241
- from huggingface_hub import create_repo
242
- model_name_slug = slugify(model_name)
243
- api = HfApi()
244
- your_username = api.whoami(token=hf_token)["name"]
245
- if(where_to_upload == "My personal profile"):
246
- model_id = f"{your_username}/{model_name_slug}"
247
- else:
248
- model_id = f"sd-dreambooth-library/{model_name_slug}"
249
- headers = {"Authorization" : f"Bearer: {hf_token}", "Content-Type": "application/json"}
250
- response = requests.post("https://huggingface.co/organizations/sd-dreambooth-library/share/SSeOwppVCscfTEzFGQaqpfcjukVeNrKNHX", headers=headers)
251
-
252
- images_upload = os.listdir("instance_images")
253
- image_string = ""
254
- instance_prompt_list = []
255
- previous_instance_prompt = ''
256
- for i, image in enumerate(images_upload):
257
- instance_prompt = image.split("_")[0]
258
- if(instance_prompt != previous_instance_prompt):
259
- title_instance_prompt_string = instance_prompt
260
- instance_prompt_list.append(instance_prompt)
261
- else:
262
- title_instance_prompt_string = ''
263
- previous_instance_prompt = instance_prompt
264
- image_string = f'''{title_instance_prompt_string} {"(use that on your prompt)" if title_instance_prompt_string != "" else ""}
265
- {image_string}![{instance_prompt} {i}](https://huggingface.co/{model_id}/resolve/main/concept_images/{urllib.parse.quote(image)})'''
266
- readme_text = f'''---
267
- license: creativeml-openrail-m
268
- tags:
269
- - text-to-image
270
- widget:
271
- - text: {instance_prompt_list[0]}
272
- ---
273
- ### {model_name} Dreambooth model trained by {api.whoami(token=hf_token)["name"]} with [Hugging Face Dreambooth Training Space](https://huggingface.co/spaces/multimodalart/dreambooth-training) with the {which_model} base model
274
-
275
- You run your new concept via `diffusers` [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb). Don't forget to use the concept prompts!
276
-
277
- Sample pictures of:
278
- {image_string}
279
- '''
280
- #Save the readme to a file
281
- readme_file = open("model.README.md", "w")
282
- readme_file.write(readme_text)
283
- readme_file.close()
284
- #Save the token identifier to a file
285
- text_file = open("token_identifier.txt", "w")
286
- text_file.write(', '.join(instance_prompt_list))
287
- text_file.close()
288
- try:
289
- create_repo(model_id,private=True, token=hf_token)
290
- except:
291
- import time
292
- epoch_time = str(int(time.time()))
293
- create_repo(f"{model_id}-{epoch_time}", private=True,token=hf_token)
294
- operations = [
295
- CommitOperationAdd(path_in_repo="token_identifier.txt", path_or_fileobj="token_identifier.txt"),
296
- CommitOperationAdd(path_in_repo="README.md", path_or_fileobj="model.README.md"),
297
- CommitOperationAdd(path_in_repo=f"model.ckpt",path_or_fileobj="model.ckpt")
298
- ]
299
- api.create_commit(
300
- repo_id=model_id,
301
- operations=operations,
302
- commit_message=f"Upload the model {model_name}",
303
- token=hf_token
304
- )
305
- api.upload_folder(
306
- folder_path="output_model",
307
- repo_id=model_id,
308
- token=hf_token
309
- )
310
- api.upload_folder(
311
- folder_path="instance_images",
312
- path_in_repo="concept_images",
313
- repo_id=model_id,
314
- token=hf_token
315
- )
316
- if is_spaces:
317
- if(not comes_from_automated):
318
- extra_message = "Don't forget to remove the GPU attribution after you play with it."
319
- else:
320
- extra_message = "The GPU has been removed automatically as requested, and you can try the model via the model page"
321
- api.create_discussion(repo_id=os.environ['SPACE_ID'], title=f"Your model {model_name} has finished trained from the Dreambooth Train Spaces!", description=f"Your model has been successfully uploaded to: https://huggingface.co/{model_id}. {extra_message}",repo_type="space", token=hf_token)
322
-
323
- return [gr.update(visible=True, value=f"Successfully uploaded your model. Access it [here](https://huggingface.co/{model_id})"), gr.update(visible=True, value=["diffusers_model.tar", "model.ckpt"])]
324
-
325
- def convert_to_ckpt():
326
- if 'pipe' in globals():
327
- global pipe, pipe_is_set
328
- del pipe
329
- pipe_is_set = False
330
- gc.collect()
331
- convert("output_model", "model.ckpt")
332
- return gr.update(visible=True, value=["diffusers_model.tar", "model.ckpt"])
333
-
334
- def check_status(top_description):
335
- if os.path.exists("hastrained.success"):
336
- if is_spaces:
337
- update_top_tag = gr.update(value=f'''
338
- <div class="gr-prose" style="max-width: 80%">
339
- <h2>Your model has finished training ✅</h2>
340
- <p>Yay, congratulations on training your model. Scroll down to play with with it, save it (either downloading it or on the Hugging Face Hub). Once you are done, your model is safe, and you don't want to train a new one, go to the <a href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}" target="_blank">settings page</a> and downgrade your Space to a CPU Basic</p>
341
- </div>
342
- ''')
343
- else:
344
- update_top_tag = gr.update(value=f'''
345
- <div style="text-align: center; max-width: 650px; margin: 0 auto;">
346
- <div>
347
- style="margin: auto; max-width: 7rem;">
348
- <br />
349
- <h1 style="font-weight: 900; font-size: 2.5rem;">
350
- Dreambooth Web UI
351
- </h1>
352
- <br />
353
- <a class="duplicate-button" style="display:inline-block" target="_blank" href="https://huggingface.co/spaces/MirageML/dreambooth?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a>
354
- </div>
355
- <br />
356
- <br />
357
- <h2>Your model has finished training ✅</h2>
358
- <p>Yay, congratulations on training your model. Scroll down to play with with it, save it (either downloading it or on the Hugging Face Hub).</p>
359
- </div>
360
- ''')
361
- show_outputs = True
362
- elif os.path.exists("intraining.lock"):
363
- update_top_tag = gr.update(value='''
364
- <div style="text-align: center; max-width: 650px; margin: 0 auto;">
365
- <div>
366
- <br />
367
- <h1 style="font-weight: 900; font-size: 2.5rem;">
368
- AI Generated Avatars 🔥
369
- </h1>
370
- <br />
371
- <a class="duplicate-button" style="display:inline-block" target="_blank" href="https://huggingface.co/spaces/MirageML/dreambooth?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a>
372
- </div>
373
- <br />
374
- <br />
375
- <h2>Don't worry, your model is still training! ⌛</h2>
376
- <p>You closed the tab while your model was training, but it's all good! It is still training right now. You can click the "Open logs" button above here to check the training status. Once training is done, reload this tab to interact with your model</p>
377
- </div>
378
- ''')
379
- show_outputs = False
380
- else:
381
- update_top_tag = gr.update(value=top_description)
382
- show_outputs = False
383
- if os.path.exists("diffusers_model.tar"):
384
- update_files_tag = gr.update(visible=show_outputs, value=["diffusers_model.tar"])
385
- else:
386
- update_files_tag = gr.update(visible=show_outputs)
387
- return [
388
- update_top_tag, #top_description
389
- gr.update(visible=show_outputs), #try_your_model
390
- gr.update(visible=show_outputs), #push_to_hub
391
- update_files_tag, #result
392
- gr.update(visible=show_outputs), #convert_button
393
- ]
394
-
395
- def checkbox_swap(checkbox):
396
- return [gr.update(visible=checkbox), gr.update(visible=checkbox), gr.update(visible=checkbox), gr.update(visible=checkbox)]
397
-
398
- with gr.Blocks(css=css) as demo:
399
- with gr.Box():
400
- if is_shared_ui:
401
- top_description = gr.HTML(f'''
402
- <div class="gr-prose" style="max-width: 80%">
403
- <h2>Attention - This Space doesn't work in this shared UI</h2>
404
- <p>For it to work, you can either run locally or duplicate the Space and run it on your own profile using a (paid) private T4 GPU for training. As each T4 costs US$0.60/h, it should cost < US$1 to train most models using default settings!&nbsp;&nbsp;<a class="duplicate-button" style="display:inline-block" target="_blank" href="https://huggingface.co/spaces/MirageML/dreambooth?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></p>
405
- <img class="instruction" src="file/duplicate.png">
406
- <img class="arrow" src="file/arrow.png" />
407
- </div>
408
- ''')
409
- elif(is_spaces):
410
- if(is_gpu_associated):
411
- top_description = gr.HTML(f'''
412
- <div style="text-align: center; max-width: 650px; margin: 0 auto;">
413
- <div>
414
- style="margin: auto; max-width: 7rem;">
415
- <br />
416
- <h1 style="font-weight: 900; font-size: 2.5rem;">
417
- Dreambooth Web UI
418
- </h1>
419
- <br />
420
- <a class="duplicate-button" style="display:inline-block" target="_blank" href="https://huggingface.co/spaces/MirageML/dreambooth?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a>
421
- </div>
422
- <br />
423
- <br />
424
- <p style="margin-bottom: 10px; font-size: 94%">
425
- Customize Stable Diffusion v1 or v2 by giving it a few examples of a concept.
426
- Based on the <a href="https://github.com/huggingface/diffusers">diffusers</a> implementation, additional techniques from <a href="https://github.com/TheLastBen/diffusers">TheLastBen</a> and <a href="https://github.com/ShivamShrirao/diffusers">ShivamShrirao</a>"
427
- </p>
428
- <br />
429
- <p>There's only one step left before you can train your model: <a href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}/settings" style="text-decoration: underline" target="_blank">attribute a <b>T4 GPU</b> to it (via the Settings tab)</a> and run the training below. Other GPUs are not compatible for now. You will be billed by the minute from when you activate the GPU until when it is turned it off.</p>
430
- </div>
431
- ''')
432
- else:
433
- top_description = gr.HTML(f'''
434
- <div style="text-align: center; max-width: 650px; margin: 0 auto;">
435
- <div>>
436
- <br />
437
- <h1 style="font-weight: 900; font-size: 2.5rem;">
438
- AI Generated Avatars 🔥
439
- </h1>
440
- <br />
441
- <a class="duplicate-button" style="display:inline-block" target="_blank" href="https://huggingface.co/spaces/MirageML/dreambooth?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a>
442
- </div>
443
- <br />
444
- <br />
445
- <p style="margin-bottom: 10px; font-size: 94%">
446
- Customize Stable Diffusion v1 or v2 by giving it a few examples of a concept.
447
- Based on the <a href="https://github.com/huggingface/diffusers">diffusers</a> implementation, additional techniques from <a href="https://github.com/TheLastBen/diffusers">TheLastBen</a> and <a href="https://github.com/ShivamShrirao/diffusers">ShivamShrirao</a>"
448
- </p>
449
- <br />
450
- <p>There's only one step left before you can train your model: <a href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}/settings" style="text-decoration: underline" target="_blank">attribute a <b>T4 GPU</b> to it (via the Settings tab)</a> and run the training below. Other GPUs are not compatible for now. You will be billed by the minute from when you activate the GPU until when it is turned it off.</p>
451
- </div>
452
- ''')
453
- else:
454
- top_description = gr.HTML(f'''
455
- <div style="text-align: center; max-width: 650px; margin: 0 auto;">
456
- <div>
457
- <br />
458
- <h1 style="font-weight: 900; font-size: 2.5rem;">
459
- AI Generated Avatars 🔥
460
- </h1>
461
- <br />
462
- <a class="duplicate-button" style="display:inline-block" target="_blank" href="https://huggingface.co/spaces/MirageML/dreambooth?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a>
463
- </div>
464
- <br />
465
- <br />
466
- <p style="margin-bottom: 10px; font-size: 94%">
467
- Customize Stable Diffusion v1 or v2 by giving it a few examples of a concept.
468
- Based on the <a href="https://github.com/huggingface/diffusers">diffusers</a> implementation, additional techniques from <a href="https://github.com/TheLastBen/diffusers">TheLastBen</a> and <a href="https://github.com/ShivamShrirao/diffusers">ShivamShrirao</a>"
469
- </p>
470
- </div>
471
- ''')
472
-
473
- #Very hacky approach to emulate dynamically created Gradio components
474
- with gr.Column() as upload_your_concept:
475
- with gr.Column():
476
- thing_description = gr.Markdown("Choose 10 photos of yourself, your partner, or your pet (e.g. a dog or cat) to use as input for the AI avatar generator. The AI will then create personalized avatars that look like you in any style you prefer!")
477
- thing_description = gr.Markdown("Steps:")
478
- thing_description = gr.Markdown("1) Attribute a GPU in the settings tab ⚙️")
479
- thing_description = gr.Markdown("2) Get 10 photos of yourself with no other people in the background 📷")
480
- thing_description = gr.Markdown("3) Upload those photos below 🎞️")
481
- thing_description = gr.Markdown("4) Set your custom prompt as 'name1' and run the training 🏃‍♀️")
482
- thing_description = gr.Markdown("5) Test your prompts and flex on social media 💪")
483
- thing_experimental = gr.Checkbox(label="Improve faces (prior preservation) - can take longer training but can improve faces", visible=False, value=False)
484
- thing_image_example = gr.HTML('''<div style="display:flex;justify-content:center"><img src="file/sofia.png" /></div>''')
485
- things_naming = gr.Markdown("When naming your custom prompt use a made up word that has low chance of the model already knowing it (e.g.: `name1` here). Images will be automatically cropped to 512x512.")
486
-
487
-
488
- file_collection = []
489
- concept_collection = []
490
- buttons_collection = []
491
- delete_collection = []
492
- is_visible = []
493
-
494
- row = [None] * maximum_concepts
495
- for x in range(maximum_concepts):
496
- ordinal = lambda n: "%d%s" % (n, "tsnrhtdd"[(n // 10 % 10 != 1) * (n % 10 < 4) * n % 10::4])
497
- if(x == 0):
498
- visible = True
499
- is_visible.append(gr.State(value=True))
500
- else:
501
- visible = False
502
- is_visible.append(gr.State(value=False))
503
-
504
- file_collection.append(gr.File(label=f'''Upload the images for your {ordinal(x+1) if (x>0) else ""} concept''', file_count="multiple", interactive=True, visible=visible))
505
- with gr.Column(visible=visible) as row[x]:
506
- concept_collection.append(gr.Textbox(label=f'''{ordinal(x+1) if (x>0) else ""} concept prompt - use a unique, made up word to avoid collisions'''))
507
- # with gr.Row():
508
- # if(x < maximum_concepts-1):
509
- # buttons_collection.append(gr.Button(value="Add +1 concept", visible=visible))
510
- # if(x > 0):
511
- # delete_collection.append(gr.Button(value=f"Delete {ordinal(x+1)} concept"))
512
-
513
- counter_add = 1
514
- for button in buttons_collection:
515
- if(counter_add < len(buttons_collection)):
516
- button.click(lambda:
517
- [gr.update(visible=True),gr.update(visible=True), gr.update(visible=False), gr.update(visible=True), True, None],
518
- None,
519
- [row[counter_add], file_collection[counter_add], buttons_collection[counter_add-1], buttons_collection[counter_add], is_visible[counter_add], file_collection[counter_add]], queue=False)
520
- else:
521
- button.click(lambda:[gr.update(visible=True),gr.update(visible=True), gr.update(visible=False), True], None, [row[counter_add], file_collection[counter_add], buttons_collection[counter_add-1], is_visible[counter_add]], queue=False)
522
- counter_add += 1
523
-
524
- counter_delete = 1
525
- for delete_button in delete_collection:
526
- if(counter_delete < len(delete_collection)+1):
527
- delete_button.click(lambda:[gr.update(visible=False),gr.update(visible=False), gr.update(visible=True), False], None, [file_collection[counter_delete], row[counter_delete], buttons_collection[counter_delete-1], is_visible[counter_delete]], queue=False)
528
- counter_delete += 1
529
-
530
-
531
- with gr.Accordion("Custom Settings", open=False):
532
- with gr.Row() as what_are_you_training:
533
- base_model_to_use = gr.Dropdown(label="Which base model would you like to use?", choices=["v1-5", "v2-512", "v2-768"], value="v1-5", interactive=True)
534
-
535
- swap_auto_calculated = gr.Checkbox(label="Use custom settings")
536
- gr.Markdown("If not checked, the % of frozen encoder will be tuned automatically to whether you are training an `object`, `person` or `style`. The text-encoder is frozen after 10% of the steps for a style, 30% of the steps for an object and 75% trained for persons. The number of steps varies between 1400 and 2400 depending on how many images uploaded. If you see too many artifacts in your output, it means it may have overfit and you need less steps. If your results aren't really what you wanted, it may be underfitting and you need more steps.")
537
- steps = gr.Number(label="How many steps", value=2400)
538
- perc_txt_encoder = gr.Number(label="Percentage of the training steps the text-encoder should be trained as well", value=30)
539
-
540
- with gr.Box(visible=False) as training_summary:
541
- training_summary_text = gr.HTML("", visible=True, label="Training Summary")
542
- is_advanced_visible = True if is_spaces else False
543
- training_summary_checkbox = gr.Checkbox(label="Automatically remove paid GPU attribution and upload model to the Hugging Face Hub after training", value=True, visible=is_advanced_visible)
544
- training_summary_model_name = gr.Textbox(label="Name of your model", visible=True)
545
- training_summary_where_to_upload = gr.Dropdown(["My personal profile", "Public Library"], value="My personal profile", label="Upload to", visible=True)
546
- training_summary_token_message = gr.Markdown("[A Hugging Face write access token](https://huggingface.co/settings/tokens), go to \"New token\" -> Role : Write. A regular read token won't work here.", visible=True)
547
- training_summary_token = gr.Textbox(label="Hugging Face Write Token", type="password", visible=True)
548
-
549
- train_btn = gr.Button("Start Training")
550
- if(is_shared_ui):
551
- training_ongoing = gr.Markdown("## This Space only works in duplicated instances. Please duplicate it and try again!", visible=False)
552
- elif(not is_gpu_associated):
553
- training_ongoing = gr.Markdown("## Oops, you haven't associated your T4 GPU to this Space. Visit the Settings tab, associate and try again.", visible=False)
554
- else:
555
- training_ongoing = gr.Markdown("## Training is ongoing ⌛... You can close this tab if you like or just wait. If you did not check the `Remove GPU After training`, you can come back here to try your model and upload it after training. Don't forget to remove the GPU attribution after you are done. ", visible=False)
556
-
557
- #Post-training UI
558
- completed_training = gr.Markdown('''# ✅ Training completed.
559
- ### Don't forget to remove the GPU attribution after you are done trying and uploading your model''', visible=False)
560
-
561
- with gr.Row():
562
- with gr.Box(visible=False) as try_your_model:
563
- gr.Markdown("## Try your model")
564
- prompt = gr.Textbox(label="Type your prompt")
565
- result_image = gr.Image()
566
- inference_steps = gr.Slider(minimum=1, maximum=150, value=50, step=1)
567
- generate_button = gr.Button("Generate Image")
568
-
569
- with gr.Box(visible=False) as push_to_hub:
570
- gr.Markdown("## Push to Hugging Face Hub")
571
- model_name = gr.Textbox(label="Name of your model", placeholder="Tarsila do Amaral Style")
572
- where_to_upload = gr.Dropdown(["My personal profile", "Public Library"], label="Upload to")
573
- gr.Markdown("[A Hugging Face write access token](https://huggingface.co/settings/tokens), go to \"New token\" -> Role : Write. A regular read token won't work here.")
574
- hf_token = gr.Textbox(label="Hugging Face Write Token", type="password")
575
-
576
- push_button = gr.Button("Push to the Hub")
577
-
578
- result = gr.File(label="Download the uploaded models in the diffusers format", visible=True)
579
- success_message_upload = gr.Markdown(visible=False)
580
- convert_button = gr.Button("Convert to CKPT", visible=False)
581
-
582
- #Swap the examples and the % of text encoder trained depending if it is an object, person or style
583
-
584
- #Swap the base model
585
- base_model_to_use.change(fn=swap_base_model, inputs=base_model_to_use, outputs=[])
586
-
587
- #Update the summary box below the UI according to how many images are uploaded and whether users are using custom settings or not
588
- for file in file_collection:
589
- #file.change(fn=update_steps,inputs=file_collection, outputs=steps)
590
- file.change(fn=count_files, inputs=file_collection+[thing_experimental]+[base_model_to_use]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary, training_summary_text], queue=False)
591
-
592
- thing_experimental.change(fn=count_files, inputs=file_collection+[thing_experimental]+[base_model_to_use]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary, training_summary_text], queue=False)
593
- base_model_to_use.change(fn=count_files, inputs=file_collection+[thing_experimental]+[base_model_to_use]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary, training_summary_text], queue=False)
594
- steps.change(fn=count_files, inputs=file_collection+[thing_experimental]+[base_model_to_use]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary, training_summary_text], queue=False)
595
- perc_txt_encoder.change(fn=count_files, inputs=file_collection+[thing_experimental]+[base_model_to_use]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary, training_summary_text], queue=False)
596
-
597
- #Give more options if the user wants to finish everything after training
598
- if(is_spaces):
599
- training_summary_checkbox.change(fn=checkbox_swap, inputs=training_summary_checkbox, outputs=[training_summary_token_message, training_summary_token, training_summary_model_name, training_summary_where_to_upload],queue=False, show_progress=False)
600
- #Add a message for while it is in training
601
- train_btn.click(lambda:gr.update(visible=True), inputs=None, outputs=training_ongoing)
602
-
603
- #The main train function
604
- train_btn.click(fn=train, inputs=is_visible+concept_collection+file_collection+[base_model_to_use]+[thing_experimental]+[training_summary_where_to_upload]+[training_summary_model_name]+[training_summary_checkbox]+[training_summary_token]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[result, try_your_model, push_to_hub, convert_button, training_ongoing, completed_training], queue=False)
605
-
606
- #Button to generate an image from your trained model after training
607
- generate_button.click(fn=generate, inputs=[prompt, inference_steps], outputs=result_image, queue=False)
608
- #Button to push the model to the Hugging Face Hub
609
- push_button.click(fn=push, inputs=[model_name, where_to_upload, hf_token, base_model_to_use], outputs=[success_message_upload, result], queue=False)
610
- #Button to convert the model to ckpt format
611
- convert_button.click(fn=convert_to_ckpt, inputs=[], outputs=result, queue=False)
612
-
613
- #Checks if the training is running
614
- demo.load(fn=check_status, inputs=top_description, outputs=[top_description, try_your_model, push_to_hub, result, convert_button], queue=False, show_progress=False)
615
-
616
- demo.queue(default_enabled=False).launch(debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ASJMO/freegpt/g4f/Provider/Providers/Gravityengine.py DELETED
@@ -1,27 +0,0 @@
1
- import requests
2
- import os
3
- import json
4
- from ...typing import sha256, Dict, get_type_hints
5
-
6
- url = 'https://gpt4.xunika.uk/'
7
- model = ['gpt-3.5-turbo-16k', 'gpt-3.5-turbo-0613']
8
- supports_stream = True
9
- needs_auth = False
10
-
11
- def _create_completion(model: str, messages: list, stream: bool, temperature: float = 0.7, **kwargs):
12
- headers = {
13
- 'Content-Type': 'application/json',
14
- }
15
- data = {
16
- 'model': model,
17
- 'temperature': 0.7,
18
- 'presence_penalty': 0,
19
- 'messages': messages,
20
- }
21
- response = requests.post(url + '/api/openai/v1/chat/completions',
22
- json=data, stream=True)
23
-
24
- yield response.json()['choices'][0]['message']['content']
25
-
26
- params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
27
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/__init__.py DELETED
File without changes
spaces/AchyuthGamer/OpenGPT/g4f/Provider/Opchatgpts.py DELETED
@@ -1,8 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from .ChatgptLogin import ChatgptLogin
4
-
5
-
6
- class Opchatgpts(ChatgptLogin):
7
- url = "https://opchatgpts.net"
8
- working = True
 
 
 
 
 
 
 
 
 
spaces/Adam111/stable-diffusion-webui/app.py DELETED
@@ -1,76 +0,0 @@
1
- import os
2
- from subprocess import getoutput
3
-
4
- gpu_info = getoutput('nvidia-smi')
5
- if("A10G" in gpu_info):
6
- os.system(f"pip install -q https://github.com/camenduru/stable-diffusion-webui-colab/releases/download/0.0.15/xformers-0.0.15.dev0+4c06c79.d20221205-cp38-cp38-linux_x86_64.whl")
7
- elif("T4" in gpu_info):
8
- os.system(f"pip install -q https://github.com/camenduru/stable-diffusion-webui-colab/releases/download/0.0.15/xformers-0.0.15.dev0+1515f77.d20221130-cp38-cp38-linux_x86_64.whl")
9
-
10
- os.system(f"git clone https://github.com/camenduru/stable-diffusion-webui /home/user/app/stable-diffusion-webui")
11
- os.chdir("/home/user/app/stable-diffusion-webui")
12
-
13
- os.system(f"wget -q https://github.com/camenduru/webui/raw/main/env_patch.py -O /home/user/app/env_patch.py")
14
- os.system(f"sed -i -e '/import image_from_url_text/r /home/user/app/env_patch.py' /home/user/app/stable-diffusion-webui/modules/ui.py")
15
- os.system(f"sed -i -e '/(modelmerger_interface, \"Checkpoint Merger\", \"modelmerger\"),/d' /home/user/app/stable-diffusion-webui/modules/ui.py")
16
- os.system(f"sed -i -e '/(train_interface, \"Train\", \"ti\"),/d' /home/user/app/stable-diffusion-webui/modules/ui.py")
17
- os.system(f"sed -i -e '/extensions_interface, \"Extensions\", \"extensions\"/d' /home/user/app/stable-diffusion-webui/modules/ui.py")
18
- os.system(f"sed -i -e '/settings_interface, \"Settings\", \"settings\"/d' /home/user/app/stable-diffusion-webui/modules/ui.py")
19
- os.system(f'''sed -i -e "s/document.getElementsByTagName('gradio-app')\[0\].shadowRoot/!!document.getElementsByTagName('gradio-app')[0].shadowRoot ? document.getElementsByTagName('gradio-app')[0].shadowRoot : document/g" /home/user/app/stable-diffusion-webui/script.js''')
20
- os.system(f"sed -i -e 's/ show_progress=False,/ show_progress=True,/g' /home/user/app/stable-diffusion-webui/modules/ui.py")
21
- os.system(f"sed -i -e 's/shared.demo.launch/shared.demo.queue().launch/g' /home/user/app/stable-diffusion-webui/webui.py")
22
- os.system(f"sed -i -e 's/ outputs=\[/queue=False, &/g' /home/user/app/stable-diffusion-webui/modules/ui.py")
23
- os.system(f"sed -i -e 's/ queue=False, / /g' /home/user/app/stable-diffusion-webui/modules/ui.py")
24
-
25
- # ----------------------------Please duplicate this space and delete this block if you don't want to see the extra header----------------------------
26
- os.system(f"wget -q https://github.com/camenduru/webui/raw/main/header_patch.py -O /home/user/app/header_patch.py")
27
- os.system(f"sed -i -e '/demo:/r /home/user/app/header_patch.py' /home/user/app/stable-diffusion-webui/modules/ui.py")
28
- # ---------------------------------------------------------------------------------------------------------------------------------------------------
29
-
30
- if "IS_SHARED_UI" in os.environ:
31
- os.system(f"rm -rfv /home/user/app/stable-diffusion-webui/scripts/")
32
-
33
- os.system(f"wget -q https://github.com/camenduru/webui/raw/main/shared-config.json -O /home/user/app/shared-config.json")
34
- os.system(f"wget -q https://github.com/camenduru/webui/raw/main/shared-ui-config.json -O /home/user/app/shared-ui-config.json")
35
-
36
- os.system(f"wget -q {os.getenv('MODEL_LINK')} -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/{os.getenv('MODEL_NAME')}")
37
- os.system(f"wget -q {os.getenv('VAE_LINK')} -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/{os.getenv('VAE_NAME')}")
38
- os.system(f"wget -q {os.getenv('YAML_LINK')} -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/{os.getenv('YAML_NAME')}")
39
-
40
- os.system(f"python launch.py --force-enable-xformers --disable-console-progressbars --enable-console-prompts --ui-config-file /home/user/app/shared-ui-config.json --ui-settings-file /home/user/app/shared-config.json --cors-allow-origins huggingface.co,hf.space --no-progressbar-hiding")
41
- else:
42
- # Please duplicate this space and delete # character in front of the custom script you want to use or add here more custom scripts with same structure os.system(f"wget -q https://CUSTOM_SCRIPT_URL -O /home/user/app/stable-diffusion-webui/scripts/CUSTOM_SCRIPT_NAME.py")
43
- os.system(f"wget -q https://gist.github.com/camenduru/9ec5f8141db9902e375967e93250860f/raw/d0bcf01786f20107c329c03f8968584ee67be12a/run_n_times.py -O /home/user/app/stable-diffusion-webui/scripts/run_n_times.py")
44
-
45
- # Please duplicate this space and delete # character in front of the extension you want to use or add here more extensions with same structure os.system(f"git clone https://EXTENSION_GIT_URL /home/user/app/stable-diffusion-webui/extensions/EXTENSION_NAME")
46
- #os.system(f"git clone https://github.com/camenduru/stable-diffusion-webui-artists-to-study /home/user/app/stable-diffusion-webui/extensions/stable-diffusion-webui-artists-to-study")
47
- os.system(f"git clone https://github.com/yfszzx/stable-diffusion-webui-images-browser /home/user/app/stable-diffusion-webui/extensions/stable-diffusion-webui-images-browser")
48
- os.system(f"git clone https://github.com/deforum-art/deforum-for-automatic1111-webui /home/user/app/stable-diffusion-webui/extensions/deforum-for-automatic1111-webui")
49
-
50
- # Please duplicate this space and delete # character in front of the model you want to use or add here more ckpts with same structure os.system(f"wget -q https://CKPT_URL -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/CKPT_NAME.ckpt")
51
- #os.system(f"wget -q https://huggingface.co/nitrosocke/Arcane-Diffusion/resolve/main/arcane-diffusion-v3.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/arcane-diffusion-v3.ckpt")
52
- #os.system(f"wget -q https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion/resolve/main/Cyberpunk-Anime-Diffusion.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/Cyberpunk-Anime-Diffusion.ckpt")
53
- #os.system(f"wget -q https://huggingface.co/prompthero/midjourney-v4-diffusion/resolve/main/mdjrny-v4.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/mdjrny-v4.ckpt")
54
- #os.system(f"wget -q https://huggingface.co/nitrosocke/mo-di-diffusion/resolve/main/moDi-v1-pruned.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/moDi-v1-pruned.ckpt")
55
- #os.system(f"wget -q https://huggingface.co/Fictiverse/Stable_Diffusion_PaperCut_Model/resolve/main/PaperCut_v1.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/PaperCut_v1.ckpt")
56
- #os.system(f"wget -q https://huggingface.co/lilpotat/sa/resolve/main/samdoesarts_style.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/samdoesarts_style.ckpt")
57
- #os.system(f"wget -q https://huggingface.co/hakurei/waifu-diffusion-v1-3/resolve/main/wd-v1-3-float32.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/wd-v1-3-float32.ckpt")
58
- #os.system(f"wget -q https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/sd-v1-4.ckpt")
59
- #os.system(f"wget -q https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/v1-5-pruned-emaonly.ckpt")
60
- #os.system(f"wget -q https://huggingface.co/runwayml/stable-diffusion-inpainting/resolve/main/sd-v1-5-inpainting.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/sd-v1-5-inpainting.ckpt")
61
-
62
- #os.system(f"wget -q https://huggingface.co/Linaqruf/anything-v3.0/resolve/main/Anything-V3.0-pruned.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/Anything-V3.0-pruned.ckpt")
63
- #os.system(f"wget -q https://huggingface.co/Linaqruf/anything-v3.0/resolve/main/Anything-V3.0.vae.pt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/Anything-V3.0-pruned.vae.pt")
64
-
65
- #os.system(f"wget -q https://huggingface.co/stabilityai/stable-diffusion-2/resolve/main/768-v-ema.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/768-v-ema.ckpt")
66
- os.system(f"wget -q https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/v2-inference-v.yaml -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/768-v-ema.yaml")
67
- os.system(f"wget -q https://r2.kamiya-b.me/dreambooth_lib/akakura-sn.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/akakura-sn.ckpt")
68
- os.system(f"wget -q https://huggingface.co/stabilityai/stable-diffusion-2-1/resolve/main/v2-1_768-ema-pruned.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/v2-1_768-ema-pruned.ckpt")
69
- os.system(f"wget -q https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/v2-inference-v.yaml -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/v2-1_768-ema-pruned.yaml")
70
-
71
- os.system(f"wget -q {os.getenv('MODEL_LINK')} -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/{os.getenv('MODEL_NAME')}")
72
- os.system(f"wget -q {os.getenv('VAE_LINK')} -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/{os.getenv('VAE_NAME')}")
73
- os.system(f"wget -q {os.getenv('YAML_LINK')} -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/{os.getenv('YAML_NAME')}")
74
-
75
- os.system(f"python launch.py --force-enable-xformers --ui-config-file /home/user/app/ui-config.json --ui-settings-file /home/user/app/config.json --disable-console-progressbars --enable-console-prompts --cors-allow-origins huggingface.co,hf.space --no-progressbar-hiding --api --skip-torch-cuda-test")
76
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/bars/Bars.d.ts DELETED
@@ -1,2 +0,0 @@
1
- import Base from '../base/Base';
2
- export default class Bars extends Base { }
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/slider/UpdateThumb.js DELETED
@@ -1,26 +0,0 @@
1
- import PercentToPosition from './PercentToPosition.js';
2
-
3
- var UpdateThumb = function (t) {
4
- var thumb = this.childrenMap.thumb;
5
- if (thumb === undefined) {
6
- return this;
7
- }
8
-
9
- if (t === undefined) {
10
- t = this.value;
11
- }
12
-
13
- var startPoint, endPoint;
14
- if (!this.reverseAxis) {
15
- startPoint = this.getStartPoint();
16
- endPoint = this.getEndPoint();
17
- } else {
18
- startPoint = this.getEndPoint();
19
- endPoint = this.getStartPoint();
20
- }
21
- PercentToPosition(t, startPoint, endPoint, thumb);
22
- this.resetChildPositionState(thumb);
23
- return this;
24
- }
25
-
26
- export default UpdateThumb;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Akmyradov/chatbot_testing/app.py DELETED
@@ -1,25 +0,0 @@
1
- import random
2
- import gradio as gr
3
-
4
- def chat(message, history):
5
- history = history or []
6
- message = message.lower()
7
- if message.startswith("how many"):
8
- response = random.randint(1, 10)
9
- elif message.startswith("how"):
10
- response = random.choice(["Great", "Good", "Okay", "Bad"])
11
- elif message.startswith("where"):
12
- response = random.choice(["Here", "There", "Somewhere"])
13
- else:
14
- response = "I don't know"
15
- history.append((message, response))
16
- return history, history
17
-
18
- chatbot = gr.Chatbot().style(color_map=("green", "pink"))
19
- demo = gr.Interface(
20
- chat,
21
- ["text", "state"],
22
- [chatbot, "state"],
23
- allow_flagging="never",
24
- )
25
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/lpw_stable_diffusion.py DELETED
@@ -1,1470 +0,0 @@
1
- import inspect
2
- import re
3
- from typing import Any, Callable, Dict, List, Optional, Union
4
-
5
- import numpy as np
6
- import PIL
7
- import torch
8
- from packaging import version
9
- from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
10
-
11
- from diffusers import DiffusionPipeline
12
- from diffusers.configuration_utils import FrozenDict
13
- from diffusers.image_processor import VaeImageProcessor
14
- from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
15
- from diffusers.models import AutoencoderKL, UNet2DConditionModel
16
- from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker
17
- from diffusers.schedulers import KarrasDiffusionSchedulers
18
- from diffusers.utils import (
19
- PIL_INTERPOLATION,
20
- deprecate,
21
- is_accelerate_available,
22
- is_accelerate_version,
23
- logging,
24
- randn_tensor,
25
- )
26
-
27
-
28
- # ------------------------------------------------------------------------------
29
-
30
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
31
-
32
- re_attention = re.compile(
33
- r"""
34
- \\\(|
35
- \\\)|
36
- \\\[|
37
- \\]|
38
- \\\\|
39
- \\|
40
- \(|
41
- \[|
42
- :([+-]?[.\d]+)\)|
43
- \)|
44
- ]|
45
- [^\\()\[\]:]+|
46
- :
47
- """,
48
- re.X,
49
- )
50
-
51
-
52
- def parse_prompt_attention(text):
53
- """
54
- Parses a string with attention tokens and returns a list of pairs: text and its associated weight.
55
- Accepted tokens are:
56
- (abc) - increases attention to abc by a multiplier of 1.1
57
- (abc:3.12) - increases attention to abc by a multiplier of 3.12
58
- [abc] - decreases attention to abc by a multiplier of 1.1
59
- \( - literal character '('
60
- \[ - literal character '['
61
- \) - literal character ')'
62
- \] - literal character ']'
63
- \\ - literal character '\'
64
- anything else - just text
65
- >>> parse_prompt_attention('normal text')
66
- [['normal text', 1.0]]
67
- >>> parse_prompt_attention('an (important) word')
68
- [['an ', 1.0], ['important', 1.1], [' word', 1.0]]
69
- >>> parse_prompt_attention('(unbalanced')
70
- [['unbalanced', 1.1]]
71
- >>> parse_prompt_attention('\(literal\]')
72
- [['(literal]', 1.0]]
73
- >>> parse_prompt_attention('(unnecessary)(parens)')
74
- [['unnecessaryparens', 1.1]]
75
- >>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).')
76
- [['a ', 1.0],
77
- ['house', 1.5730000000000004],
78
- [' ', 1.1],
79
- ['on', 1.0],
80
- [' a ', 1.1],
81
- ['hill', 0.55],
82
- [', sun, ', 1.1],
83
- ['sky', 1.4641000000000006],
84
- ['.', 1.1]]
85
- """
86
-
87
- res = []
88
- round_brackets = []
89
- square_brackets = []
90
-
91
- round_bracket_multiplier = 1.1
92
- square_bracket_multiplier = 1 / 1.1
93
-
94
- def multiply_range(start_position, multiplier):
95
- for p in range(start_position, len(res)):
96
- res[p][1] *= multiplier
97
-
98
- for m in re_attention.finditer(text):
99
- text = m.group(0)
100
- weight = m.group(1)
101
-
102
- if text.startswith("\\"):
103
- res.append([text[1:], 1.0])
104
- elif text == "(":
105
- round_brackets.append(len(res))
106
- elif text == "[":
107
- square_brackets.append(len(res))
108
- elif weight is not None and len(round_brackets) > 0:
109
- multiply_range(round_brackets.pop(), float(weight))
110
- elif text == ")" and len(round_brackets) > 0:
111
- multiply_range(round_brackets.pop(), round_bracket_multiplier)
112
- elif text == "]" and len(square_brackets) > 0:
113
- multiply_range(square_brackets.pop(), square_bracket_multiplier)
114
- else:
115
- res.append([text, 1.0])
116
-
117
- for pos in round_brackets:
118
- multiply_range(pos, round_bracket_multiplier)
119
-
120
- for pos in square_brackets:
121
- multiply_range(pos, square_bracket_multiplier)
122
-
123
- if len(res) == 0:
124
- res = [["", 1.0]]
125
-
126
- # merge runs of identical weights
127
- i = 0
128
- while i + 1 < len(res):
129
- if res[i][1] == res[i + 1][1]:
130
- res[i][0] += res[i + 1][0]
131
- res.pop(i + 1)
132
- else:
133
- i += 1
134
-
135
- return res
136
-
137
-
138
- def get_prompts_with_weights(pipe: DiffusionPipeline, prompt: List[str], max_length: int):
139
- r"""
140
- Tokenize a list of prompts and return its tokens with weights of each token.
141
-
142
- No padding, starting or ending token is included.
143
- """
144
- tokens = []
145
- weights = []
146
- truncated = False
147
- for text in prompt:
148
- texts_and_weights = parse_prompt_attention(text)
149
- text_token = []
150
- text_weight = []
151
- for word, weight in texts_and_weights:
152
- # tokenize and discard the starting and the ending token
153
- token = pipe.tokenizer(word).input_ids[1:-1]
154
- text_token += token
155
- # copy the weight by length of token
156
- text_weight += [weight] * len(token)
157
- # stop if the text is too long (longer than truncation limit)
158
- if len(text_token) > max_length:
159
- truncated = True
160
- break
161
- # truncate
162
- if len(text_token) > max_length:
163
- truncated = True
164
- text_token = text_token[:max_length]
165
- text_weight = text_weight[:max_length]
166
- tokens.append(text_token)
167
- weights.append(text_weight)
168
- if truncated:
169
- logger.warning("Prompt was truncated. Try to shorten the prompt or increase max_embeddings_multiples")
170
- return tokens, weights
171
-
172
-
173
- def pad_tokens_and_weights(tokens, weights, max_length, bos, eos, pad, no_boseos_middle=True, chunk_length=77):
174
- r"""
175
- Pad the tokens (with starting and ending tokens) and weights (with 1.0) to max_length.
176
- """
177
- max_embeddings_multiples = (max_length - 2) // (chunk_length - 2)
178
- weights_length = max_length if no_boseos_middle else max_embeddings_multiples * chunk_length
179
- for i in range(len(tokens)):
180
- tokens[i] = [bos] + tokens[i] + [pad] * (max_length - 1 - len(tokens[i]) - 1) + [eos]
181
- if no_boseos_middle:
182
- weights[i] = [1.0] + weights[i] + [1.0] * (max_length - 1 - len(weights[i]))
183
- else:
184
- w = []
185
- if len(weights[i]) == 0:
186
- w = [1.0] * weights_length
187
- else:
188
- for j in range(max_embeddings_multiples):
189
- w.append(1.0) # weight for starting token in this chunk
190
- w += weights[i][j * (chunk_length - 2) : min(len(weights[i]), (j + 1) * (chunk_length - 2))]
191
- w.append(1.0) # weight for ending token in this chunk
192
- w += [1.0] * (weights_length - len(w))
193
- weights[i] = w[:]
194
-
195
- return tokens, weights
196
-
197
-
198
- def get_unweighted_text_embeddings(
199
- pipe: DiffusionPipeline,
200
- text_input: torch.Tensor,
201
- chunk_length: int,
202
- no_boseos_middle: Optional[bool] = True,
203
- ):
204
- """
205
- When the length of tokens is a multiple of the capacity of the text encoder,
206
- it should be split into chunks and sent to the text encoder individually.
207
- """
208
- max_embeddings_multiples = (text_input.shape[1] - 2) // (chunk_length - 2)
209
- if max_embeddings_multiples > 1:
210
- text_embeddings = []
211
- for i in range(max_embeddings_multiples):
212
- # extract the i-th chunk
213
- text_input_chunk = text_input[:, i * (chunk_length - 2) : (i + 1) * (chunk_length - 2) + 2].clone()
214
-
215
- # cover the head and the tail by the starting and the ending tokens
216
- text_input_chunk[:, 0] = text_input[0, 0]
217
- text_input_chunk[:, -1] = text_input[0, -1]
218
- text_embedding = pipe.text_encoder(text_input_chunk)[0]
219
-
220
- if no_boseos_middle:
221
- if i == 0:
222
- # discard the ending token
223
- text_embedding = text_embedding[:, :-1]
224
- elif i == max_embeddings_multiples - 1:
225
- # discard the starting token
226
- text_embedding = text_embedding[:, 1:]
227
- else:
228
- # discard both starting and ending tokens
229
- text_embedding = text_embedding[:, 1:-1]
230
-
231
- text_embeddings.append(text_embedding)
232
- text_embeddings = torch.concat(text_embeddings, axis=1)
233
- else:
234
- text_embeddings = pipe.text_encoder(text_input)[0]
235
- return text_embeddings
236
-
237
-
238
- def get_weighted_text_embeddings(
239
- pipe: DiffusionPipeline,
240
- prompt: Union[str, List[str]],
241
- uncond_prompt: Optional[Union[str, List[str]]] = None,
242
- max_embeddings_multiples: Optional[int] = 3,
243
- no_boseos_middle: Optional[bool] = False,
244
- skip_parsing: Optional[bool] = False,
245
- skip_weighting: Optional[bool] = False,
246
- ):
247
- r"""
248
- Prompts can be assigned with local weights using brackets. For example,
249
- prompt 'A (very beautiful) masterpiece' highlights the words 'very beautiful',
250
- and the embedding tokens corresponding to the words get multiplied by a constant, 1.1.
251
-
252
- Also, to regularize of the embedding, the weighted embedding would be scaled to preserve the original mean.
253
-
254
- Args:
255
- pipe (`DiffusionPipeline`):
256
- Pipe to provide access to the tokenizer and the text encoder.
257
- prompt (`str` or `List[str]`):
258
- The prompt or prompts to guide the image generation.
259
- uncond_prompt (`str` or `List[str]`):
260
- The unconditional prompt or prompts for guide the image generation. If unconditional prompt
261
- is provided, the embeddings of prompt and uncond_prompt are concatenated.
262
- max_embeddings_multiples (`int`, *optional*, defaults to `3`):
263
- The max multiple length of prompt embeddings compared to the max output length of text encoder.
264
- no_boseos_middle (`bool`, *optional*, defaults to `False`):
265
- If the length of text token is multiples of the capacity of text encoder, whether reserve the starting and
266
- ending token in each of the chunk in the middle.
267
- skip_parsing (`bool`, *optional*, defaults to `False`):
268
- Skip the parsing of brackets.
269
- skip_weighting (`bool`, *optional*, defaults to `False`):
270
- Skip the weighting. When the parsing is skipped, it is forced True.
271
- """
272
- max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
273
- if isinstance(prompt, str):
274
- prompt = [prompt]
275
-
276
- if not skip_parsing:
277
- prompt_tokens, prompt_weights = get_prompts_with_weights(pipe, prompt, max_length - 2)
278
- if uncond_prompt is not None:
279
- if isinstance(uncond_prompt, str):
280
- uncond_prompt = [uncond_prompt]
281
- uncond_tokens, uncond_weights = get_prompts_with_weights(pipe, uncond_prompt, max_length - 2)
282
- else:
283
- prompt_tokens = [
284
- token[1:-1] for token in pipe.tokenizer(prompt, max_length=max_length, truncation=True).input_ids
285
- ]
286
- prompt_weights = [[1.0] * len(token) for token in prompt_tokens]
287
- if uncond_prompt is not None:
288
- if isinstance(uncond_prompt, str):
289
- uncond_prompt = [uncond_prompt]
290
- uncond_tokens = [
291
- token[1:-1]
292
- for token in pipe.tokenizer(uncond_prompt, max_length=max_length, truncation=True).input_ids
293
- ]
294
- uncond_weights = [[1.0] * len(token) for token in uncond_tokens]
295
-
296
- # round up the longest length of tokens to a multiple of (model_max_length - 2)
297
- max_length = max([len(token) for token in prompt_tokens])
298
- if uncond_prompt is not None:
299
- max_length = max(max_length, max([len(token) for token in uncond_tokens]))
300
-
301
- max_embeddings_multiples = min(
302
- max_embeddings_multiples,
303
- (max_length - 1) // (pipe.tokenizer.model_max_length - 2) + 1,
304
- )
305
- max_embeddings_multiples = max(1, max_embeddings_multiples)
306
- max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
307
-
308
- # pad the length of tokens and weights
309
- bos = pipe.tokenizer.bos_token_id
310
- eos = pipe.tokenizer.eos_token_id
311
- pad = getattr(pipe.tokenizer, "pad_token_id", eos)
312
- prompt_tokens, prompt_weights = pad_tokens_and_weights(
313
- prompt_tokens,
314
- prompt_weights,
315
- max_length,
316
- bos,
317
- eos,
318
- pad,
319
- no_boseos_middle=no_boseos_middle,
320
- chunk_length=pipe.tokenizer.model_max_length,
321
- )
322
- prompt_tokens = torch.tensor(prompt_tokens, dtype=torch.long, device=pipe.device)
323
- if uncond_prompt is not None:
324
- uncond_tokens, uncond_weights = pad_tokens_and_weights(
325
- uncond_tokens,
326
- uncond_weights,
327
- max_length,
328
- bos,
329
- eos,
330
- pad,
331
- no_boseos_middle=no_boseos_middle,
332
- chunk_length=pipe.tokenizer.model_max_length,
333
- )
334
- uncond_tokens = torch.tensor(uncond_tokens, dtype=torch.long, device=pipe.device)
335
-
336
- # get the embeddings
337
- text_embeddings = get_unweighted_text_embeddings(
338
- pipe,
339
- prompt_tokens,
340
- pipe.tokenizer.model_max_length,
341
- no_boseos_middle=no_boseos_middle,
342
- )
343
- prompt_weights = torch.tensor(prompt_weights, dtype=text_embeddings.dtype, device=text_embeddings.device)
344
- if uncond_prompt is not None:
345
- uncond_embeddings = get_unweighted_text_embeddings(
346
- pipe,
347
- uncond_tokens,
348
- pipe.tokenizer.model_max_length,
349
- no_boseos_middle=no_boseos_middle,
350
- )
351
- uncond_weights = torch.tensor(uncond_weights, dtype=uncond_embeddings.dtype, device=uncond_embeddings.device)
352
-
353
- # assign weights to the prompts and normalize in the sense of mean
354
- # TODO: should we normalize by chunk or in a whole (current implementation)?
355
- if (not skip_parsing) and (not skip_weighting):
356
- previous_mean = text_embeddings.float().mean(axis=[-2, -1]).to(text_embeddings.dtype)
357
- text_embeddings *= prompt_weights.unsqueeze(-1)
358
- current_mean = text_embeddings.float().mean(axis=[-2, -1]).to(text_embeddings.dtype)
359
- text_embeddings *= (previous_mean / current_mean).unsqueeze(-1).unsqueeze(-1)
360
- if uncond_prompt is not None:
361
- previous_mean = uncond_embeddings.float().mean(axis=[-2, -1]).to(uncond_embeddings.dtype)
362
- uncond_embeddings *= uncond_weights.unsqueeze(-1)
363
- current_mean = uncond_embeddings.float().mean(axis=[-2, -1]).to(uncond_embeddings.dtype)
364
- uncond_embeddings *= (previous_mean / current_mean).unsqueeze(-1).unsqueeze(-1)
365
-
366
- if uncond_prompt is not None:
367
- return text_embeddings, uncond_embeddings
368
- return text_embeddings, None
369
-
370
-
371
- def preprocess_image(image, batch_size):
372
- w, h = image.size
373
- w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
374
- image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"])
375
- image = np.array(image).astype(np.float32) / 255.0
376
- image = np.vstack([image[None].transpose(0, 3, 1, 2)] * batch_size)
377
- image = torch.from_numpy(image)
378
- return 2.0 * image - 1.0
379
-
380
-
381
- def preprocess_mask(mask, batch_size, scale_factor=8):
382
- if not isinstance(mask, torch.FloatTensor):
383
- mask = mask.convert("L")
384
- w, h = mask.size
385
- w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
386
- mask = mask.resize((w // scale_factor, h // scale_factor), resample=PIL_INTERPOLATION["nearest"])
387
- mask = np.array(mask).astype(np.float32) / 255.0
388
- mask = np.tile(mask, (4, 1, 1))
389
- mask = np.vstack([mask[None]] * batch_size)
390
- mask = 1 - mask # repaint white, keep black
391
- mask = torch.from_numpy(mask)
392
- return mask
393
-
394
- else:
395
- valid_mask_channel_sizes = [1, 3]
396
- # if mask channel is fourth tensor dimension, permute dimensions to pytorch standard (B, C, H, W)
397
- if mask.shape[3] in valid_mask_channel_sizes:
398
- mask = mask.permute(0, 3, 1, 2)
399
- elif mask.shape[1] not in valid_mask_channel_sizes:
400
- raise ValueError(
401
- f"Mask channel dimension of size in {valid_mask_channel_sizes} should be second or fourth dimension,"
402
- f" but received mask of shape {tuple(mask.shape)}"
403
- )
404
- # (potentially) reduce mask channel dimension from 3 to 1 for broadcasting to latent shape
405
- mask = mask.mean(dim=1, keepdim=True)
406
- h, w = mask.shape[-2:]
407
- h, w = (x - x % 8 for x in (h, w)) # resize to integer multiple of 8
408
- mask = torch.nn.functional.interpolate(mask, (h // scale_factor, w // scale_factor))
409
- return mask
410
-
411
-
412
- class StableDiffusionLongPromptWeightingPipeline(
413
- DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin
414
- ):
415
- r"""
416
- Pipeline for text-to-image generation using Stable Diffusion without tokens length limit, and support parsing
417
- weighting in prompt.
418
-
419
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
420
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
421
-
422
- Args:
423
- vae ([`AutoencoderKL`]):
424
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
425
- text_encoder ([`CLIPTextModel`]):
426
- Frozen text-encoder. Stable Diffusion uses the text portion of
427
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
428
- the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
429
- tokenizer (`CLIPTokenizer`):
430
- Tokenizer of class
431
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
432
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
433
- scheduler ([`SchedulerMixin`]):
434
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
435
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
436
- safety_checker ([`StableDiffusionSafetyChecker`]):
437
- Classification module that estimates whether generated images could be considered offensive or harmful.
438
- Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details.
439
- feature_extractor ([`CLIPImageProcessor`]):
440
- Model that extracts features from generated images to be used as inputs for the `safety_checker`.
441
- """
442
-
443
- _optional_components = ["safety_checker", "feature_extractor"]
444
-
445
- def __init__(
446
- self,
447
- vae: AutoencoderKL,
448
- text_encoder: CLIPTextModel,
449
- tokenizer: CLIPTokenizer,
450
- unet: UNet2DConditionModel,
451
- scheduler: KarrasDiffusionSchedulers,
452
- safety_checker: StableDiffusionSafetyChecker,
453
- feature_extractor: CLIPImageProcessor,
454
- requires_safety_checker: bool = True,
455
- ):
456
- super().__init__()
457
-
458
- if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
459
- deprecation_message = (
460
- f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
461
- f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
462
- "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
463
- " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
464
- " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
465
- " file"
466
- )
467
- deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
468
- new_config = dict(scheduler.config)
469
- new_config["steps_offset"] = 1
470
- scheduler._internal_dict = FrozenDict(new_config)
471
-
472
- if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
473
- deprecation_message = (
474
- f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
475
- " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
476
- " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
477
- " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
478
- " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
479
- )
480
- deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
481
- new_config = dict(scheduler.config)
482
- new_config["clip_sample"] = False
483
- scheduler._internal_dict = FrozenDict(new_config)
484
-
485
- if safety_checker is None and requires_safety_checker:
486
- logger.warning(
487
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
488
- " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
489
- " results in services or applications open to the public. Both the diffusers team and Hugging Face"
490
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
491
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
492
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
493
- )
494
-
495
- if safety_checker is not None and feature_extractor is None:
496
- raise ValueError(
497
- "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
498
- " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
499
- )
500
-
501
- is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
502
- version.parse(unet.config._diffusers_version).base_version
503
- ) < version.parse("0.9.0.dev0")
504
- is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
505
- if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
506
- deprecation_message = (
507
- "The configuration file of the unet has set the default `sample_size` to smaller than"
508
- " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
509
- " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
510
- " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
511
- " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
512
- " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
513
- " in the config might lead to incorrect results in future versions. If you have downloaded this"
514
- " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
515
- " the `unet/config.json` file"
516
- )
517
- deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
518
- new_config = dict(unet.config)
519
- new_config["sample_size"] = 64
520
- unet._internal_dict = FrozenDict(new_config)
521
- self.register_modules(
522
- vae=vae,
523
- text_encoder=text_encoder,
524
- tokenizer=tokenizer,
525
- unet=unet,
526
- scheduler=scheduler,
527
- safety_checker=safety_checker,
528
- feature_extractor=feature_extractor,
529
- )
530
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
531
-
532
- self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
533
- self.register_to_config(
534
- requires_safety_checker=requires_safety_checker,
535
- )
536
-
537
- def enable_vae_slicing(self):
538
- r"""
539
- Enable sliced VAE decoding.
540
-
541
- When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several
542
- steps. This is useful to save some memory and allow larger batch sizes.
543
- """
544
- self.vae.enable_slicing()
545
-
546
- def disable_vae_slicing(self):
547
- r"""
548
- Disable sliced VAE decoding. If `enable_vae_slicing` was previously invoked, this method will go back to
549
- computing decoding in one step.
550
- """
551
- self.vae.disable_slicing()
552
-
553
- def enable_vae_tiling(self):
554
- r"""
555
- Enable tiled VAE decoding.
556
-
557
- When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in
558
- several steps. This is useful to save a large amount of memory and to allow the processing of larger images.
559
- """
560
- self.vae.enable_tiling()
561
-
562
- def disable_vae_tiling(self):
563
- r"""
564
- Disable tiled VAE decoding. If `enable_vae_tiling` was previously invoked, this method will go back to
565
- computing decoding in one step.
566
- """
567
- self.vae.disable_tiling()
568
-
569
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_sequential_cpu_offload
570
- def enable_sequential_cpu_offload(self, gpu_id=0):
571
- r"""
572
- Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
573
- text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a
574
- `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.
575
- Note that offloading happens on a submodule basis. Memory savings are higher than with
576
- `enable_model_cpu_offload`, but performance is lower.
577
- """
578
- if is_accelerate_available() and is_accelerate_version(">=", "0.14.0"):
579
- from accelerate import cpu_offload
580
- else:
581
- raise ImportError("`enable_sequential_cpu_offload` requires `accelerate v0.14.0` or higher")
582
-
583
- device = torch.device(f"cuda:{gpu_id}")
584
-
585
- if self.device.type != "cpu":
586
- self.to("cpu", silence_dtype_warnings=True)
587
- torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
588
-
589
- for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]:
590
- cpu_offload(cpu_offloaded_model, device)
591
-
592
- if self.safety_checker is not None:
593
- cpu_offload(self.safety_checker, execution_device=device, offload_buffers=True)
594
-
595
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_model_cpu_offload
596
- def enable_model_cpu_offload(self, gpu_id=0):
597
- r"""
598
- Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
599
- to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
600
- method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
601
- `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
602
- """
603
- if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
604
- from accelerate import cpu_offload_with_hook
605
- else:
606
- raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
607
-
608
- device = torch.device(f"cuda:{gpu_id}")
609
-
610
- if self.device.type != "cpu":
611
- self.to("cpu", silence_dtype_warnings=True)
612
- torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
613
-
614
- hook = None
615
- for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:
616
- _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
617
-
618
- if self.safety_checker is not None:
619
- _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)
620
-
621
- # We'll offload the last model manually.
622
- self.final_offload_hook = hook
623
-
624
- @property
625
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
626
- def _execution_device(self):
627
- r"""
628
- Returns the device on which the pipeline's models will be executed. After calling
629
- `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
630
- hooks.
631
- """
632
- if not hasattr(self.unet, "_hf_hook"):
633
- return self.device
634
- for module in self.unet.modules():
635
- if (
636
- hasattr(module, "_hf_hook")
637
- and hasattr(module._hf_hook, "execution_device")
638
- and module._hf_hook.execution_device is not None
639
- ):
640
- return torch.device(module._hf_hook.execution_device)
641
- return self.device
642
-
643
- def _encode_prompt(
644
- self,
645
- prompt,
646
- device,
647
- num_images_per_prompt,
648
- do_classifier_free_guidance,
649
- negative_prompt=None,
650
- max_embeddings_multiples=3,
651
- prompt_embeds: Optional[torch.FloatTensor] = None,
652
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
653
- ):
654
- r"""
655
- Encodes the prompt into text encoder hidden states.
656
-
657
- Args:
658
- prompt (`str` or `list(int)`):
659
- prompt to be encoded
660
- device: (`torch.device`):
661
- torch device
662
- num_images_per_prompt (`int`):
663
- number of images that should be generated per prompt
664
- do_classifier_free_guidance (`bool`):
665
- whether to use classifier free guidance or not
666
- negative_prompt (`str` or `List[str]`):
667
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
668
- if `guidance_scale` is less than `1`).
669
- max_embeddings_multiples (`int`, *optional*, defaults to `3`):
670
- The max multiple length of prompt embeddings compared to the max output length of text encoder.
671
- """
672
- if prompt is not None and isinstance(prompt, str):
673
- batch_size = 1
674
- elif prompt is not None and isinstance(prompt, list):
675
- batch_size = len(prompt)
676
- else:
677
- batch_size = prompt_embeds.shape[0]
678
-
679
- if negative_prompt_embeds is None:
680
- if negative_prompt is None:
681
- negative_prompt = [""] * batch_size
682
- elif isinstance(negative_prompt, str):
683
- negative_prompt = [negative_prompt] * batch_size
684
- if batch_size != len(negative_prompt):
685
- raise ValueError(
686
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
687
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
688
- " the batch size of `prompt`."
689
- )
690
- if prompt_embeds is None or negative_prompt_embeds is None:
691
- if isinstance(self, TextualInversionLoaderMixin):
692
- prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
693
- if do_classifier_free_guidance and negative_prompt_embeds is None:
694
- negative_prompt = self.maybe_convert_prompt(negative_prompt, self.tokenizer)
695
-
696
- prompt_embeds1, negative_prompt_embeds1 = get_weighted_text_embeddings(
697
- pipe=self,
698
- prompt=prompt,
699
- uncond_prompt=negative_prompt if do_classifier_free_guidance else None,
700
- max_embeddings_multiples=max_embeddings_multiples,
701
- )
702
- if prompt_embeds is None:
703
- prompt_embeds = prompt_embeds1
704
- if negative_prompt_embeds is None:
705
- negative_prompt_embeds = negative_prompt_embeds1
706
-
707
- bs_embed, seq_len, _ = prompt_embeds.shape
708
- # duplicate text embeddings for each generation per prompt, using mps friendly method
709
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
710
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
711
-
712
- if do_classifier_free_guidance:
713
- bs_embed, seq_len, _ = negative_prompt_embeds.shape
714
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
715
- negative_prompt_embeds = negative_prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
716
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
717
-
718
- return prompt_embeds
719
-
720
- def check_inputs(
721
- self,
722
- prompt,
723
- height,
724
- width,
725
- strength,
726
- callback_steps,
727
- negative_prompt=None,
728
- prompt_embeds=None,
729
- negative_prompt_embeds=None,
730
- ):
731
- if height % 8 != 0 or width % 8 != 0:
732
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
733
-
734
- if strength < 0 or strength > 1:
735
- raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
736
-
737
- if (callback_steps is None) or (
738
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
739
- ):
740
- raise ValueError(
741
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
742
- f" {type(callback_steps)}."
743
- )
744
-
745
- if prompt is not None and prompt_embeds is not None:
746
- raise ValueError(
747
- f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
748
- " only forward one of the two."
749
- )
750
- elif prompt is None and prompt_embeds is None:
751
- raise ValueError(
752
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
753
- )
754
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
755
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
756
-
757
- if negative_prompt is not None and negative_prompt_embeds is not None:
758
- raise ValueError(
759
- f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
760
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
761
- )
762
-
763
- if prompt_embeds is not None and negative_prompt_embeds is not None:
764
- if prompt_embeds.shape != negative_prompt_embeds.shape:
765
- raise ValueError(
766
- "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
767
- f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
768
- f" {negative_prompt_embeds.shape}."
769
- )
770
-
771
- def get_timesteps(self, num_inference_steps, strength, device, is_text2img):
772
- if is_text2img:
773
- return self.scheduler.timesteps.to(device), num_inference_steps
774
- else:
775
- # get the original timestep using init_timestep
776
- init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
777
-
778
- t_start = max(num_inference_steps - init_timestep, 0)
779
- timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
780
-
781
- return timesteps, num_inference_steps - t_start
782
-
783
- def run_safety_checker(self, image, device, dtype):
784
- if self.safety_checker is not None:
785
- safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
786
- image, has_nsfw_concept = self.safety_checker(
787
- images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
788
- )
789
- else:
790
- has_nsfw_concept = None
791
- return image, has_nsfw_concept
792
-
793
- def decode_latents(self, latents):
794
- latents = 1 / self.vae.config.scaling_factor * latents
795
- image = self.vae.decode(latents).sample
796
- image = (image / 2 + 0.5).clamp(0, 1)
797
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
798
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
799
- return image
800
-
801
- def prepare_extra_step_kwargs(self, generator, eta):
802
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
803
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
804
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
805
- # and should be between [0, 1]
806
-
807
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
808
- extra_step_kwargs = {}
809
- if accepts_eta:
810
- extra_step_kwargs["eta"] = eta
811
-
812
- # check if the scheduler accepts generator
813
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
814
- if accepts_generator:
815
- extra_step_kwargs["generator"] = generator
816
- return extra_step_kwargs
817
-
818
- def prepare_latents(
819
- self,
820
- image,
821
- timestep,
822
- num_images_per_prompt,
823
- batch_size,
824
- num_channels_latents,
825
- height,
826
- width,
827
- dtype,
828
- device,
829
- generator,
830
- latents=None,
831
- ):
832
- if image is None:
833
- batch_size = batch_size * num_images_per_prompt
834
- shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
835
- if isinstance(generator, list) and len(generator) != batch_size:
836
- raise ValueError(
837
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
838
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
839
- )
840
-
841
- if latents is None:
842
- latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
843
- else:
844
- latents = latents.to(device)
845
-
846
- # scale the initial noise by the standard deviation required by the scheduler
847
- latents = latents * self.scheduler.init_noise_sigma
848
- return latents, None, None
849
- else:
850
- image = image.to(device=self.device, dtype=dtype)
851
- init_latent_dist = self.vae.encode(image).latent_dist
852
- init_latents = init_latent_dist.sample(generator=generator)
853
- init_latents = self.vae.config.scaling_factor * init_latents
854
-
855
- # Expand init_latents for batch_size and num_images_per_prompt
856
- init_latents = torch.cat([init_latents] * num_images_per_prompt, dim=0)
857
- init_latents_orig = init_latents
858
-
859
- # add noise to latents using the timesteps
860
- noise = randn_tensor(init_latents.shape, generator=generator, device=self.device, dtype=dtype)
861
- init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
862
- latents = init_latents
863
- return latents, init_latents_orig, noise
864
-
865
- @torch.no_grad()
866
- def __call__(
867
- self,
868
- prompt: Union[str, List[str]],
869
- negative_prompt: Optional[Union[str, List[str]]] = None,
870
- image: Union[torch.FloatTensor, PIL.Image.Image] = None,
871
- mask_image: Union[torch.FloatTensor, PIL.Image.Image] = None,
872
- height: int = 512,
873
- width: int = 512,
874
- num_inference_steps: int = 50,
875
- guidance_scale: float = 7.5,
876
- strength: float = 0.8,
877
- num_images_per_prompt: Optional[int] = 1,
878
- add_predicted_noise: Optional[bool] = False,
879
- eta: float = 0.0,
880
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
881
- latents: Optional[torch.FloatTensor] = None,
882
- prompt_embeds: Optional[torch.FloatTensor] = None,
883
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
884
- max_embeddings_multiples: Optional[int] = 3,
885
- output_type: Optional[str] = "pil",
886
- return_dict: bool = True,
887
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
888
- is_cancelled_callback: Optional[Callable[[], bool]] = None,
889
- callback_steps: int = 1,
890
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
891
- ):
892
- r"""
893
- Function invoked when calling the pipeline for generation.
894
-
895
- Args:
896
- prompt (`str` or `List[str]`):
897
- The prompt or prompts to guide the image generation.
898
- negative_prompt (`str` or `List[str]`, *optional*):
899
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
900
- if `guidance_scale` is less than `1`).
901
- image (`torch.FloatTensor` or `PIL.Image.Image`):
902
- `Image`, or tensor representing an image batch, that will be used as the starting point for the
903
- process.
904
- mask_image (`torch.FloatTensor` or `PIL.Image.Image`):
905
- `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
906
- replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a
907
- PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should
908
- contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`.
909
- height (`int`, *optional*, defaults to 512):
910
- The height in pixels of the generated image.
911
- width (`int`, *optional*, defaults to 512):
912
- The width in pixels of the generated image.
913
- num_inference_steps (`int`, *optional*, defaults to 50):
914
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
915
- expense of slower inference.
916
- guidance_scale (`float`, *optional*, defaults to 7.5):
917
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
918
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
919
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
920
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
921
- usually at the expense of lower image quality.
922
- strength (`float`, *optional*, defaults to 0.8):
923
- Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1.
924
- `image` will be used as a starting point, adding more noise to it the larger the `strength`. The
925
- number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added
926
- noise will be maximum and the denoising process will run for the full number of iterations specified in
927
- `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
928
- num_images_per_prompt (`int`, *optional*, defaults to 1):
929
- The number of images to generate per prompt.
930
- add_predicted_noise (`bool`, *optional*, defaults to True):
931
- Use predicted noise instead of random noise when constructing noisy versions of the original image in
932
- the reverse diffusion process
933
- eta (`float`, *optional*, defaults to 0.0):
934
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
935
- [`schedulers.DDIMScheduler`], will be ignored for others.
936
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
937
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
938
- to make generation deterministic.
939
- latents (`torch.FloatTensor`, *optional*):
940
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
941
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
942
- tensor will ge generated by sampling using the supplied random `generator`.
943
- prompt_embeds (`torch.FloatTensor`, *optional*):
944
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
945
- provided, text embeddings will be generated from `prompt` input argument.
946
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
947
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
948
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
949
- argument.
950
- max_embeddings_multiples (`int`, *optional*, defaults to `3`):
951
- The max multiple length of prompt embeddings compared to the max output length of text encoder.
952
- output_type (`str`, *optional*, defaults to `"pil"`):
953
- The output format of the generate image. Choose between
954
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
955
- return_dict (`bool`, *optional*, defaults to `True`):
956
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
957
- plain tuple.
958
- callback (`Callable`, *optional*):
959
- A function that will be called every `callback_steps` steps during inference. The function will be
960
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
961
- is_cancelled_callback (`Callable`, *optional*):
962
- A function that will be called every `callback_steps` steps during inference. If the function returns
963
- `True`, the inference will be cancelled.
964
- callback_steps (`int`, *optional*, defaults to 1):
965
- The frequency at which the `callback` function will be called. If not specified, the callback will be
966
- called at every step.
967
- cross_attention_kwargs (`dict`, *optional*):
968
- A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
969
- `self.processor` in
970
- [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
971
-
972
- Returns:
973
- `None` if cancelled by `is_cancelled_callback`,
974
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
975
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
976
- When returning a tuple, the first element is a list with the generated images, and the second element is a
977
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
978
- (nsfw) content, according to the `safety_checker`.
979
- """
980
- # 0. Default height and width to unet
981
- height = height or self.unet.config.sample_size * self.vae_scale_factor
982
- width = width or self.unet.config.sample_size * self.vae_scale_factor
983
-
984
- # 1. Check inputs. Raise error if not correct
985
- self.check_inputs(
986
- prompt, height, width, strength, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds
987
- )
988
-
989
- # 2. Define call parameters
990
- if prompt is not None and isinstance(prompt, str):
991
- batch_size = 1
992
- elif prompt is not None and isinstance(prompt, list):
993
- batch_size = len(prompt)
994
- else:
995
- batch_size = prompt_embeds.shape[0]
996
-
997
- device = self._execution_device
998
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
999
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
1000
- # corresponds to doing no classifier free guidance.
1001
- do_classifier_free_guidance = guidance_scale > 1.0
1002
-
1003
- # 3. Encode input prompt
1004
- prompt_embeds = self._encode_prompt(
1005
- prompt,
1006
- device,
1007
- num_images_per_prompt,
1008
- do_classifier_free_guidance,
1009
- negative_prompt,
1010
- max_embeddings_multiples,
1011
- prompt_embeds=prompt_embeds,
1012
- negative_prompt_embeds=negative_prompt_embeds,
1013
- )
1014
- dtype = prompt_embeds.dtype
1015
-
1016
- # 4. Preprocess image and mask
1017
- if isinstance(image, PIL.Image.Image):
1018
- image = preprocess_image(image, batch_size)
1019
- if image is not None:
1020
- image = image.to(device=self.device, dtype=dtype)
1021
- if isinstance(mask_image, PIL.Image.Image):
1022
- mask_image = preprocess_mask(mask_image, batch_size, self.vae_scale_factor)
1023
- if mask_image is not None:
1024
- mask = mask_image.to(device=self.device, dtype=dtype)
1025
- mask = torch.cat([mask] * num_images_per_prompt)
1026
- else:
1027
- mask = None
1028
-
1029
- # 5. set timesteps
1030
- self.scheduler.set_timesteps(num_inference_steps, device=device)
1031
- timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device, image is None)
1032
- latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
1033
-
1034
- # 6. Prepare latent variables
1035
- latents, init_latents_orig, noise = self.prepare_latents(
1036
- image,
1037
- latent_timestep,
1038
- num_images_per_prompt,
1039
- batch_size,
1040
- self.unet.config.in_channels,
1041
- height,
1042
- width,
1043
- dtype,
1044
- device,
1045
- generator,
1046
- latents,
1047
- )
1048
-
1049
- # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1050
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1051
-
1052
- # 8. Denoising loop
1053
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
1054
- with self.progress_bar(total=num_inference_steps) as progress_bar:
1055
- for i, t in enumerate(timesteps):
1056
- # expand the latents if we are doing classifier free guidance
1057
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
1058
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1059
-
1060
- # predict the noise residual
1061
- noise_pred = self.unet(
1062
- latent_model_input,
1063
- t,
1064
- encoder_hidden_states=prompt_embeds,
1065
- cross_attention_kwargs=cross_attention_kwargs,
1066
- ).sample
1067
-
1068
- # perform guidance
1069
- if do_classifier_free_guidance:
1070
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1071
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1072
-
1073
- # compute the previous noisy sample x_t -> x_t-1
1074
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
1075
-
1076
- if mask is not None:
1077
- # masking
1078
- if add_predicted_noise:
1079
- init_latents_proper = self.scheduler.add_noise(
1080
- init_latents_orig, noise_pred_uncond, torch.tensor([t])
1081
- )
1082
- else:
1083
- init_latents_proper = self.scheduler.add_noise(init_latents_orig, noise, torch.tensor([t]))
1084
- latents = (init_latents_proper * mask) + (latents * (1 - mask))
1085
-
1086
- # call the callback, if provided
1087
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1088
- progress_bar.update()
1089
- if i % callback_steps == 0:
1090
- if callback is not None:
1091
- callback(i, t, latents)
1092
- if is_cancelled_callback is not None and is_cancelled_callback():
1093
- return None
1094
-
1095
- if output_type == "latent":
1096
- image = latents
1097
- has_nsfw_concept = None
1098
- elif output_type == "pil":
1099
- # 9. Post-processing
1100
- image = self.decode_latents(latents)
1101
-
1102
- # 10. Run safety checker
1103
- image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
1104
-
1105
- # 11. Convert to PIL
1106
- image = self.numpy_to_pil(image)
1107
- else:
1108
- # 9. Post-processing
1109
- image = self.decode_latents(latents)
1110
-
1111
- # 10. Run safety checker
1112
- image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
1113
-
1114
- # Offload last model to CPU
1115
- if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
1116
- self.final_offload_hook.offload()
1117
-
1118
- if not return_dict:
1119
- return image, has_nsfw_concept
1120
-
1121
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
1122
-
1123
- def text2img(
1124
- self,
1125
- prompt: Union[str, List[str]],
1126
- negative_prompt: Optional[Union[str, List[str]]] = None,
1127
- height: int = 512,
1128
- width: int = 512,
1129
- num_inference_steps: int = 50,
1130
- guidance_scale: float = 7.5,
1131
- num_images_per_prompt: Optional[int] = 1,
1132
- eta: float = 0.0,
1133
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
1134
- latents: Optional[torch.FloatTensor] = None,
1135
- prompt_embeds: Optional[torch.FloatTensor] = None,
1136
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
1137
- max_embeddings_multiples: Optional[int] = 3,
1138
- output_type: Optional[str] = "pil",
1139
- return_dict: bool = True,
1140
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
1141
- is_cancelled_callback: Optional[Callable[[], bool]] = None,
1142
- callback_steps: int = 1,
1143
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1144
- ):
1145
- r"""
1146
- Function for text-to-image generation.
1147
- Args:
1148
- prompt (`str` or `List[str]`):
1149
- The prompt or prompts to guide the image generation.
1150
- negative_prompt (`str` or `List[str]`, *optional*):
1151
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
1152
- if `guidance_scale` is less than `1`).
1153
- height (`int`, *optional*, defaults to 512):
1154
- The height in pixels of the generated image.
1155
- width (`int`, *optional*, defaults to 512):
1156
- The width in pixels of the generated image.
1157
- num_inference_steps (`int`, *optional*, defaults to 50):
1158
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
1159
- expense of slower inference.
1160
- guidance_scale (`float`, *optional*, defaults to 7.5):
1161
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
1162
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
1163
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1164
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
1165
- usually at the expense of lower image quality.
1166
- num_images_per_prompt (`int`, *optional*, defaults to 1):
1167
- The number of images to generate per prompt.
1168
- eta (`float`, *optional*, defaults to 0.0):
1169
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
1170
- [`schedulers.DDIMScheduler`], will be ignored for others.
1171
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
1172
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
1173
- to make generation deterministic.
1174
- latents (`torch.FloatTensor`, *optional*):
1175
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
1176
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1177
- tensor will ge generated by sampling using the supplied random `generator`.
1178
- prompt_embeds (`torch.FloatTensor`, *optional*):
1179
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
1180
- provided, text embeddings will be generated from `prompt` input argument.
1181
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
1182
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
1183
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
1184
- argument.
1185
- max_embeddings_multiples (`int`, *optional*, defaults to `3`):
1186
- The max multiple length of prompt embeddings compared to the max output length of text encoder.
1187
- output_type (`str`, *optional*, defaults to `"pil"`):
1188
- The output format of the generate image. Choose between
1189
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
1190
- return_dict (`bool`, *optional*, defaults to `True`):
1191
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
1192
- plain tuple.
1193
- callback (`Callable`, *optional*):
1194
- A function that will be called every `callback_steps` steps during inference. The function will be
1195
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
1196
- is_cancelled_callback (`Callable`, *optional*):
1197
- A function that will be called every `callback_steps` steps during inference. If the function returns
1198
- `True`, the inference will be cancelled.
1199
- callback_steps (`int`, *optional*, defaults to 1):
1200
- The frequency at which the `callback` function will be called. If not specified, the callback will be
1201
- called at every step.
1202
- cross_attention_kwargs (`dict`, *optional*):
1203
- A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
1204
- `self.processor` in
1205
- [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
1206
-
1207
- Returns:
1208
- `None` if cancelled by `is_cancelled_callback`,
1209
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
1210
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
1211
- When returning a tuple, the first element is a list with the generated images, and the second element is a
1212
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
1213
- (nsfw) content, according to the `safety_checker`.
1214
- """
1215
- return self.__call__(
1216
- prompt=prompt,
1217
- negative_prompt=negative_prompt,
1218
- height=height,
1219
- width=width,
1220
- num_inference_steps=num_inference_steps,
1221
- guidance_scale=guidance_scale,
1222
- num_images_per_prompt=num_images_per_prompt,
1223
- eta=eta,
1224
- generator=generator,
1225
- latents=latents,
1226
- prompt_embeds=prompt_embeds,
1227
- negative_prompt_embeds=negative_prompt_embeds,
1228
- max_embeddings_multiples=max_embeddings_multiples,
1229
- output_type=output_type,
1230
- return_dict=return_dict,
1231
- callback=callback,
1232
- is_cancelled_callback=is_cancelled_callback,
1233
- callback_steps=callback_steps,
1234
- cross_attention_kwargs=cross_attention_kwargs,
1235
- )
1236
-
1237
- def img2img(
1238
- self,
1239
- image: Union[torch.FloatTensor, PIL.Image.Image],
1240
- prompt: Union[str, List[str]],
1241
- negative_prompt: Optional[Union[str, List[str]]] = None,
1242
- strength: float = 0.8,
1243
- num_inference_steps: Optional[int] = 50,
1244
- guidance_scale: Optional[float] = 7.5,
1245
- num_images_per_prompt: Optional[int] = 1,
1246
- eta: Optional[float] = 0.0,
1247
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
1248
- prompt_embeds: Optional[torch.FloatTensor] = None,
1249
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
1250
- max_embeddings_multiples: Optional[int] = 3,
1251
- output_type: Optional[str] = "pil",
1252
- return_dict: bool = True,
1253
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
1254
- is_cancelled_callback: Optional[Callable[[], bool]] = None,
1255
- callback_steps: int = 1,
1256
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1257
- ):
1258
- r"""
1259
- Function for image-to-image generation.
1260
- Args:
1261
- image (`torch.FloatTensor` or `PIL.Image.Image`):
1262
- `Image`, or tensor representing an image batch, that will be used as the starting point for the
1263
- process.
1264
- prompt (`str` or `List[str]`):
1265
- The prompt or prompts to guide the image generation.
1266
- negative_prompt (`str` or `List[str]`, *optional*):
1267
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
1268
- if `guidance_scale` is less than `1`).
1269
- strength (`float`, *optional*, defaults to 0.8):
1270
- Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1.
1271
- `image` will be used as a starting point, adding more noise to it the larger the `strength`. The
1272
- number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added
1273
- noise will be maximum and the denoising process will run for the full number of iterations specified in
1274
- `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
1275
- num_inference_steps (`int`, *optional*, defaults to 50):
1276
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
1277
- expense of slower inference. This parameter will be modulated by `strength`.
1278
- guidance_scale (`float`, *optional*, defaults to 7.5):
1279
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
1280
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
1281
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1282
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
1283
- usually at the expense of lower image quality.
1284
- num_images_per_prompt (`int`, *optional*, defaults to 1):
1285
- The number of images to generate per prompt.
1286
- eta (`float`, *optional*, defaults to 0.0):
1287
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
1288
- [`schedulers.DDIMScheduler`], will be ignored for others.
1289
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
1290
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
1291
- to make generation deterministic.
1292
- prompt_embeds (`torch.FloatTensor`, *optional*):
1293
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
1294
- provided, text embeddings will be generated from `prompt` input argument.
1295
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
1296
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
1297
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
1298
- argument.
1299
- max_embeddings_multiples (`int`, *optional*, defaults to `3`):
1300
- The max multiple length of prompt embeddings compared to the max output length of text encoder.
1301
- output_type (`str`, *optional*, defaults to `"pil"`):
1302
- The output format of the generate image. Choose between
1303
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
1304
- return_dict (`bool`, *optional*, defaults to `True`):
1305
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
1306
- plain tuple.
1307
- callback (`Callable`, *optional*):
1308
- A function that will be called every `callback_steps` steps during inference. The function will be
1309
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
1310
- is_cancelled_callback (`Callable`, *optional*):
1311
- A function that will be called every `callback_steps` steps during inference. If the function returns
1312
- `True`, the inference will be cancelled.
1313
- callback_steps (`int`, *optional*, defaults to 1):
1314
- The frequency at which the `callback` function will be called. If not specified, the callback will be
1315
- called at every step.
1316
- cross_attention_kwargs (`dict`, *optional*):
1317
- A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
1318
- `self.processor` in
1319
- [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
1320
-
1321
- Returns:
1322
- `None` if cancelled by `is_cancelled_callback`,
1323
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
1324
- When returning a tuple, the first element is a list with the generated images, and the second element is a
1325
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
1326
- (nsfw) content, according to the `safety_checker`.
1327
- """
1328
- return self.__call__(
1329
- prompt=prompt,
1330
- negative_prompt=negative_prompt,
1331
- image=image,
1332
- num_inference_steps=num_inference_steps,
1333
- guidance_scale=guidance_scale,
1334
- strength=strength,
1335
- num_images_per_prompt=num_images_per_prompt,
1336
- eta=eta,
1337
- generator=generator,
1338
- prompt_embeds=prompt_embeds,
1339
- negative_prompt_embeds=negative_prompt_embeds,
1340
- max_embeddings_multiples=max_embeddings_multiples,
1341
- output_type=output_type,
1342
- return_dict=return_dict,
1343
- callback=callback,
1344
- is_cancelled_callback=is_cancelled_callback,
1345
- callback_steps=callback_steps,
1346
- cross_attention_kwargs=cross_attention_kwargs,
1347
- )
1348
-
1349
- def inpaint(
1350
- self,
1351
- image: Union[torch.FloatTensor, PIL.Image.Image],
1352
- mask_image: Union[torch.FloatTensor, PIL.Image.Image],
1353
- prompt: Union[str, List[str]],
1354
- negative_prompt: Optional[Union[str, List[str]]] = None,
1355
- strength: float = 0.8,
1356
- num_inference_steps: Optional[int] = 50,
1357
- guidance_scale: Optional[float] = 7.5,
1358
- num_images_per_prompt: Optional[int] = 1,
1359
- add_predicted_noise: Optional[bool] = False,
1360
- eta: Optional[float] = 0.0,
1361
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
1362
- prompt_embeds: Optional[torch.FloatTensor] = None,
1363
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
1364
- max_embeddings_multiples: Optional[int] = 3,
1365
- output_type: Optional[str] = "pil",
1366
- return_dict: bool = True,
1367
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
1368
- is_cancelled_callback: Optional[Callable[[], bool]] = None,
1369
- callback_steps: int = 1,
1370
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1371
- ):
1372
- r"""
1373
- Function for inpaint.
1374
- Args:
1375
- image (`torch.FloatTensor` or `PIL.Image.Image`):
1376
- `Image`, or tensor representing an image batch, that will be used as the starting point for the
1377
- process. This is the image whose masked region will be inpainted.
1378
- mask_image (`torch.FloatTensor` or `PIL.Image.Image`):
1379
- `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
1380
- replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a
1381
- PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should
1382
- contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`.
1383
- prompt (`str` or `List[str]`):
1384
- The prompt or prompts to guide the image generation.
1385
- negative_prompt (`str` or `List[str]`, *optional*):
1386
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
1387
- if `guidance_scale` is less than `1`).
1388
- strength (`float`, *optional*, defaults to 0.8):
1389
- Conceptually, indicates how much to inpaint the masked area. Must be between 0 and 1. When `strength`
1390
- is 1, the denoising process will be run on the masked area for the full number of iterations specified
1391
- in `num_inference_steps`. `image` will be used as a reference for the masked area, adding more
1392
- noise to that region the larger the `strength`. If `strength` is 0, no inpainting will occur.
1393
- num_inference_steps (`int`, *optional*, defaults to 50):
1394
- The reference number of denoising steps. More denoising steps usually lead to a higher quality image at
1395
- the expense of slower inference. This parameter will be modulated by `strength`, as explained above.
1396
- guidance_scale (`float`, *optional*, defaults to 7.5):
1397
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
1398
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
1399
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1400
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
1401
- usually at the expense of lower image quality.
1402
- num_images_per_prompt (`int`, *optional*, defaults to 1):
1403
- The number of images to generate per prompt.
1404
- add_predicted_noise (`bool`, *optional*, defaults to True):
1405
- Use predicted noise instead of random noise when constructing noisy versions of the original image in
1406
- the reverse diffusion process
1407
- eta (`float`, *optional*, defaults to 0.0):
1408
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
1409
- [`schedulers.DDIMScheduler`], will be ignored for others.
1410
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
1411
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
1412
- to make generation deterministic.
1413
- prompt_embeds (`torch.FloatTensor`, *optional*):
1414
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
1415
- provided, text embeddings will be generated from `prompt` input argument.
1416
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
1417
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
1418
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
1419
- argument.
1420
- max_embeddings_multiples (`int`, *optional*, defaults to `3`):
1421
- The max multiple length of prompt embeddings compared to the max output length of text encoder.
1422
- output_type (`str`, *optional*, defaults to `"pil"`):
1423
- The output format of the generate image. Choose between
1424
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
1425
- return_dict (`bool`, *optional*, defaults to `True`):
1426
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
1427
- plain tuple.
1428
- callback (`Callable`, *optional*):
1429
- A function that will be called every `callback_steps` steps during inference. The function will be
1430
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
1431
- is_cancelled_callback (`Callable`, *optional*):
1432
- A function that will be called every `callback_steps` steps during inference. If the function returns
1433
- `True`, the inference will be cancelled.
1434
- callback_steps (`int`, *optional*, defaults to 1):
1435
- The frequency at which the `callback` function will be called. If not specified, the callback will be
1436
- called at every step.
1437
- cross_attention_kwargs (`dict`, *optional*):
1438
- A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
1439
- `self.processor` in
1440
- [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
1441
-
1442
- Returns:
1443
- `None` if cancelled by `is_cancelled_callback`,
1444
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
1445
- When returning a tuple, the first element is a list with the generated images, and the second element is a
1446
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
1447
- (nsfw) content, according to the `safety_checker`.
1448
- """
1449
- return self.__call__(
1450
- prompt=prompt,
1451
- negative_prompt=negative_prompt,
1452
- image=image,
1453
- mask_image=mask_image,
1454
- num_inference_steps=num_inference_steps,
1455
- guidance_scale=guidance_scale,
1456
- strength=strength,
1457
- num_images_per_prompt=num_images_per_prompt,
1458
- add_predicted_noise=add_predicted_noise,
1459
- eta=eta,
1460
- generator=generator,
1461
- prompt_embeds=prompt_embeds,
1462
- negative_prompt_embeds=negative_prompt_embeds,
1463
- max_embeddings_multiples=max_embeddings_multiples,
1464
- output_type=output_type,
1465
- return_dict=return_dict,
1466
- callback=callback,
1467
- is_cancelled_callback=is_cancelled_callback,
1468
- callback_steps=callback_steps,
1469
- cross_attention_kwargs=cross_attention_kwargs,
1470
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/unet_2d_condition_flax.py DELETED
@@ -1,357 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- from typing import Optional, Tuple, Union
15
-
16
- import flax
17
- import flax.linen as nn
18
- import jax
19
- import jax.numpy as jnp
20
- from flax.core.frozen_dict import FrozenDict
21
-
22
- from ..configuration_utils import ConfigMixin, flax_register_to_config
23
- from ..utils import BaseOutput
24
- from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
25
- from .modeling_flax_utils import FlaxModelMixin
26
- from .unet_2d_blocks_flax import (
27
- FlaxCrossAttnDownBlock2D,
28
- FlaxCrossAttnUpBlock2D,
29
- FlaxDownBlock2D,
30
- FlaxUNetMidBlock2DCrossAttn,
31
- FlaxUpBlock2D,
32
- )
33
-
34
-
35
- @flax.struct.dataclass
36
- class FlaxUNet2DConditionOutput(BaseOutput):
37
- """
38
- The output of [`FlaxUNet2DConditionModel`].
39
-
40
- Args:
41
- sample (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)`):
42
- The hidden states output conditioned on `encoder_hidden_states` input. Output of last layer of model.
43
- """
44
-
45
- sample: jnp.ndarray
46
-
47
-
48
- @flax_register_to_config
49
- class FlaxUNet2DConditionModel(nn.Module, FlaxModelMixin, ConfigMixin):
50
- r"""
51
- A conditional 2D UNet model that takes a noisy sample, conditional state, and a timestep and returns a sample
52
- shaped output.
53
-
54
- This model inherits from [`FlaxModelMixin`]. Check the superclass documentation for it's generic methods
55
- implemented for all models (such as downloading or saving).
56
-
57
- This model is also a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module)
58
- subclass. Use it as a regular Flax Linen module and refer to the Flax documentation for all matters related to its
59
- general usage and behavior.
60
-
61
- Inherent JAX features such as the following are supported:
62
- - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
63
- - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
64
- - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
65
- - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
66
-
67
- Parameters:
68
- sample_size (`int`, *optional*):
69
- The size of the input sample.
70
- in_channels (`int`, *optional*, defaults to 4):
71
- The number of channels in the input sample.
72
- out_channels (`int`, *optional*, defaults to 4):
73
- The number of channels in the output.
74
- down_block_types (`Tuple[str]`, *optional*, defaults to `("FlaxCrossAttnDownBlock2D", "FlaxCrossAttnDownBlock2D", "FlaxCrossAttnDownBlock2D", "FlaxDownBlock2D")`):
75
- The tuple of downsample blocks to use.
76
- up_block_types (`Tuple[str]`, *optional*, defaults to `("FlaxUpBlock2D", "FlaxCrossAttnUpBlock2D", "FlaxCrossAttnUpBlock2D", "FlaxCrossAttnUpBlock2D")`):
77
- The tuple of upsample blocks to use.
78
- block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):
79
- The tuple of output channels for each block.
80
- layers_per_block (`int`, *optional*, defaults to 2):
81
- The number of layers per block.
82
- attention_head_dim (`int` or `Tuple[int]`, *optional*, defaults to 8):
83
- The dimension of the attention heads.
84
- num_attention_heads (`int` or `Tuple[int]`, *optional*):
85
- The number of attention heads.
86
- cross_attention_dim (`int`, *optional*, defaults to 768):
87
- The dimension of the cross attention features.
88
- dropout (`float`, *optional*, defaults to 0):
89
- Dropout probability for down, up and bottleneck blocks.
90
- flip_sin_to_cos (`bool`, *optional*, defaults to `True`):
91
- Whether to flip the sin to cos in the time embedding.
92
- freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding.
93
- use_memory_efficient_attention (`bool`, *optional*, defaults to `False`):
94
- Enable memory efficient attention as described [here](https://arxiv.org/abs/2112.05682).
95
- """
96
-
97
- sample_size: int = 32
98
- in_channels: int = 4
99
- out_channels: int = 4
100
- down_block_types: Tuple[str] = (
101
- "CrossAttnDownBlock2D",
102
- "CrossAttnDownBlock2D",
103
- "CrossAttnDownBlock2D",
104
- "DownBlock2D",
105
- )
106
- up_block_types: Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
107
- only_cross_attention: Union[bool, Tuple[bool]] = False
108
- block_out_channels: Tuple[int] = (320, 640, 1280, 1280)
109
- layers_per_block: int = 2
110
- attention_head_dim: Union[int, Tuple[int]] = 8
111
- num_attention_heads: Optional[Union[int, Tuple[int]]] = None
112
- cross_attention_dim: int = 1280
113
- dropout: float = 0.0
114
- use_linear_projection: bool = False
115
- dtype: jnp.dtype = jnp.float32
116
- flip_sin_to_cos: bool = True
117
- freq_shift: int = 0
118
- use_memory_efficient_attention: bool = False
119
-
120
- def init_weights(self, rng: jax.random.KeyArray) -> FrozenDict:
121
- # init input tensors
122
- sample_shape = (1, self.in_channels, self.sample_size, self.sample_size)
123
- sample = jnp.zeros(sample_shape, dtype=jnp.float32)
124
- timesteps = jnp.ones((1,), dtype=jnp.int32)
125
- encoder_hidden_states = jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.float32)
126
-
127
- params_rng, dropout_rng = jax.random.split(rng)
128
- rngs = {"params": params_rng, "dropout": dropout_rng}
129
-
130
- return self.init(rngs, sample, timesteps, encoder_hidden_states)["params"]
131
-
132
- def setup(self):
133
- block_out_channels = self.block_out_channels
134
- time_embed_dim = block_out_channels[0] * 4
135
-
136
- if self.num_attention_heads is not None:
137
- raise ValueError(
138
- "At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19."
139
- )
140
-
141
- # If `num_attention_heads` is not defined (which is the case for most models)
142
- # it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
143
- # The reason for this behavior is to correct for incorrectly named variables that were introduced
144
- # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
145
- # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
146
- # which is why we correct for the naming here.
147
- num_attention_heads = self.num_attention_heads or self.attention_head_dim
148
-
149
- # input
150
- self.conv_in = nn.Conv(
151
- block_out_channels[0],
152
- kernel_size=(3, 3),
153
- strides=(1, 1),
154
- padding=((1, 1), (1, 1)),
155
- dtype=self.dtype,
156
- )
157
-
158
- # time
159
- self.time_proj = FlaxTimesteps(
160
- block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift
161
- )
162
- self.time_embedding = FlaxTimestepEmbedding(time_embed_dim, dtype=self.dtype)
163
-
164
- only_cross_attention = self.only_cross_attention
165
- if isinstance(only_cross_attention, bool):
166
- only_cross_attention = (only_cross_attention,) * len(self.down_block_types)
167
-
168
- if isinstance(num_attention_heads, int):
169
- num_attention_heads = (num_attention_heads,) * len(self.down_block_types)
170
-
171
- # down
172
- down_blocks = []
173
- output_channel = block_out_channels[0]
174
- for i, down_block_type in enumerate(self.down_block_types):
175
- input_channel = output_channel
176
- output_channel = block_out_channels[i]
177
- is_final_block = i == len(block_out_channels) - 1
178
-
179
- if down_block_type == "CrossAttnDownBlock2D":
180
- down_block = FlaxCrossAttnDownBlock2D(
181
- in_channels=input_channel,
182
- out_channels=output_channel,
183
- dropout=self.dropout,
184
- num_layers=self.layers_per_block,
185
- num_attention_heads=num_attention_heads[i],
186
- add_downsample=not is_final_block,
187
- use_linear_projection=self.use_linear_projection,
188
- only_cross_attention=only_cross_attention[i],
189
- use_memory_efficient_attention=self.use_memory_efficient_attention,
190
- dtype=self.dtype,
191
- )
192
- else:
193
- down_block = FlaxDownBlock2D(
194
- in_channels=input_channel,
195
- out_channels=output_channel,
196
- dropout=self.dropout,
197
- num_layers=self.layers_per_block,
198
- add_downsample=not is_final_block,
199
- dtype=self.dtype,
200
- )
201
-
202
- down_blocks.append(down_block)
203
- self.down_blocks = down_blocks
204
-
205
- # mid
206
- self.mid_block = FlaxUNetMidBlock2DCrossAttn(
207
- in_channels=block_out_channels[-1],
208
- dropout=self.dropout,
209
- num_attention_heads=num_attention_heads[-1],
210
- use_linear_projection=self.use_linear_projection,
211
- use_memory_efficient_attention=self.use_memory_efficient_attention,
212
- dtype=self.dtype,
213
- )
214
-
215
- # up
216
- up_blocks = []
217
- reversed_block_out_channels = list(reversed(block_out_channels))
218
- reversed_num_attention_heads = list(reversed(num_attention_heads))
219
- only_cross_attention = list(reversed(only_cross_attention))
220
- output_channel = reversed_block_out_channels[0]
221
- for i, up_block_type in enumerate(self.up_block_types):
222
- prev_output_channel = output_channel
223
- output_channel = reversed_block_out_channels[i]
224
- input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
225
-
226
- is_final_block = i == len(block_out_channels) - 1
227
-
228
- if up_block_type == "CrossAttnUpBlock2D":
229
- up_block = FlaxCrossAttnUpBlock2D(
230
- in_channels=input_channel,
231
- out_channels=output_channel,
232
- prev_output_channel=prev_output_channel,
233
- num_layers=self.layers_per_block + 1,
234
- num_attention_heads=reversed_num_attention_heads[i],
235
- add_upsample=not is_final_block,
236
- dropout=self.dropout,
237
- use_linear_projection=self.use_linear_projection,
238
- only_cross_attention=only_cross_attention[i],
239
- use_memory_efficient_attention=self.use_memory_efficient_attention,
240
- dtype=self.dtype,
241
- )
242
- else:
243
- up_block = FlaxUpBlock2D(
244
- in_channels=input_channel,
245
- out_channels=output_channel,
246
- prev_output_channel=prev_output_channel,
247
- num_layers=self.layers_per_block + 1,
248
- add_upsample=not is_final_block,
249
- dropout=self.dropout,
250
- dtype=self.dtype,
251
- )
252
-
253
- up_blocks.append(up_block)
254
- prev_output_channel = output_channel
255
- self.up_blocks = up_blocks
256
-
257
- # out
258
- self.conv_norm_out = nn.GroupNorm(num_groups=32, epsilon=1e-5)
259
- self.conv_out = nn.Conv(
260
- self.out_channels,
261
- kernel_size=(3, 3),
262
- strides=(1, 1),
263
- padding=((1, 1), (1, 1)),
264
- dtype=self.dtype,
265
- )
266
-
267
- def __call__(
268
- self,
269
- sample,
270
- timesteps,
271
- encoder_hidden_states,
272
- down_block_additional_residuals=None,
273
- mid_block_additional_residual=None,
274
- return_dict: bool = True,
275
- train: bool = False,
276
- ) -> Union[FlaxUNet2DConditionOutput, Tuple]:
277
- r"""
278
- Args:
279
- sample (`jnp.ndarray`): (batch, channel, height, width) noisy inputs tensor
280
- timestep (`jnp.ndarray` or `float` or `int`): timesteps
281
- encoder_hidden_states (`jnp.ndarray`): (batch_size, sequence_length, hidden_size) encoder hidden states
282
- return_dict (`bool`, *optional*, defaults to `True`):
283
- Whether or not to return a [`models.unet_2d_condition_flax.FlaxUNet2DConditionOutput`] instead of a
284
- plain tuple.
285
- train (`bool`, *optional*, defaults to `False`):
286
- Use deterministic functions and disable dropout when not training.
287
-
288
- Returns:
289
- [`~models.unet_2d_condition_flax.FlaxUNet2DConditionOutput`] or `tuple`:
290
- [`~models.unet_2d_condition_flax.FlaxUNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`.
291
- When returning a tuple, the first element is the sample tensor.
292
- """
293
- # 1. time
294
- if not isinstance(timesteps, jnp.ndarray):
295
- timesteps = jnp.array([timesteps], dtype=jnp.int32)
296
- elif isinstance(timesteps, jnp.ndarray) and len(timesteps.shape) == 0:
297
- timesteps = timesteps.astype(dtype=jnp.float32)
298
- timesteps = jnp.expand_dims(timesteps, 0)
299
-
300
- t_emb = self.time_proj(timesteps)
301
- t_emb = self.time_embedding(t_emb)
302
-
303
- # 2. pre-process
304
- sample = jnp.transpose(sample, (0, 2, 3, 1))
305
- sample = self.conv_in(sample)
306
-
307
- # 3. down
308
- down_block_res_samples = (sample,)
309
- for down_block in self.down_blocks:
310
- if isinstance(down_block, FlaxCrossAttnDownBlock2D):
311
- sample, res_samples = down_block(sample, t_emb, encoder_hidden_states, deterministic=not train)
312
- else:
313
- sample, res_samples = down_block(sample, t_emb, deterministic=not train)
314
- down_block_res_samples += res_samples
315
-
316
- if down_block_additional_residuals is not None:
317
- new_down_block_res_samples = ()
318
-
319
- for down_block_res_sample, down_block_additional_residual in zip(
320
- down_block_res_samples, down_block_additional_residuals
321
- ):
322
- down_block_res_sample += down_block_additional_residual
323
- new_down_block_res_samples += (down_block_res_sample,)
324
-
325
- down_block_res_samples = new_down_block_res_samples
326
-
327
- # 4. mid
328
- sample = self.mid_block(sample, t_emb, encoder_hidden_states, deterministic=not train)
329
-
330
- if mid_block_additional_residual is not None:
331
- sample += mid_block_additional_residual
332
-
333
- # 5. up
334
- for up_block in self.up_blocks:
335
- res_samples = down_block_res_samples[-(self.layers_per_block + 1) :]
336
- down_block_res_samples = down_block_res_samples[: -(self.layers_per_block + 1)]
337
- if isinstance(up_block, FlaxCrossAttnUpBlock2D):
338
- sample = up_block(
339
- sample,
340
- temb=t_emb,
341
- encoder_hidden_states=encoder_hidden_states,
342
- res_hidden_states_tuple=res_samples,
343
- deterministic=not train,
344
- )
345
- else:
346
- sample = up_block(sample, temb=t_emb, res_hidden_states_tuple=res_samples, deterministic=not train)
347
-
348
- # 6. post-process
349
- sample = self.conv_norm_out(sample)
350
- sample = nn.silu(sample)
351
- sample = self.conv_out(sample)
352
- sample = jnp.transpose(sample, (0, 3, 1, 2))
353
-
354
- if not return_dict:
355
- return (sample,)
356
-
357
- return FlaxUNet2DConditionOutput(sample=sample)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/dit/__init__.py DELETED
File without changes
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/schedulers/test_scheduler_dpm_multi_inverse.py DELETED
@@ -1,266 +0,0 @@
1
- import tempfile
2
-
3
- import torch
4
-
5
- from diffusers import DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler
6
-
7
- from .test_schedulers import SchedulerCommonTest
8
-
9
-
10
- class DPMSolverMultistepSchedulerTest(SchedulerCommonTest):
11
- scheduler_classes = (DPMSolverMultistepInverseScheduler,)
12
- forward_default_kwargs = (("num_inference_steps", 25),)
13
-
14
- def get_scheduler_config(self, **kwargs):
15
- config = {
16
- "num_train_timesteps": 1000,
17
- "beta_start": 0.0001,
18
- "beta_end": 0.02,
19
- "beta_schedule": "linear",
20
- "solver_order": 2,
21
- "prediction_type": "epsilon",
22
- "thresholding": False,
23
- "sample_max_value": 1.0,
24
- "algorithm_type": "dpmsolver++",
25
- "solver_type": "midpoint",
26
- "lower_order_final": False,
27
- "lambda_min_clipped": -float("inf"),
28
- "variance_type": None,
29
- }
30
-
31
- config.update(**kwargs)
32
- return config
33
-
34
- def check_over_configs(self, time_step=0, **config):
35
- kwargs = dict(self.forward_default_kwargs)
36
- num_inference_steps = kwargs.pop("num_inference_steps", None)
37
- sample = self.dummy_sample
38
- residual = 0.1 * sample
39
- dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10]
40
-
41
- for scheduler_class in self.scheduler_classes:
42
- scheduler_config = self.get_scheduler_config(**config)
43
- scheduler = scheduler_class(**scheduler_config)
44
- scheduler.set_timesteps(num_inference_steps)
45
- # copy over dummy past residuals
46
- scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order]
47
-
48
- with tempfile.TemporaryDirectory() as tmpdirname:
49
- scheduler.save_config(tmpdirname)
50
- new_scheduler = scheduler_class.from_pretrained(tmpdirname)
51
- new_scheduler.set_timesteps(num_inference_steps)
52
- # copy over dummy past residuals
53
- new_scheduler.model_outputs = dummy_past_residuals[: new_scheduler.config.solver_order]
54
-
55
- output, new_output = sample, sample
56
- for t in range(time_step, time_step + scheduler.config.solver_order + 1):
57
- output = scheduler.step(residual, t, output, **kwargs).prev_sample
58
- new_output = new_scheduler.step(residual, t, new_output, **kwargs).prev_sample
59
-
60
- assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
61
-
62
- def test_from_save_pretrained(self):
63
- pass
64
-
65
- def check_over_forward(self, time_step=0, **forward_kwargs):
66
- kwargs = dict(self.forward_default_kwargs)
67
- num_inference_steps = kwargs.pop("num_inference_steps", None)
68
- sample = self.dummy_sample
69
- residual = 0.1 * sample
70
- dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10]
71
-
72
- for scheduler_class in self.scheduler_classes:
73
- scheduler_config = self.get_scheduler_config()
74
- scheduler = scheduler_class(**scheduler_config)
75
- scheduler.set_timesteps(num_inference_steps)
76
-
77
- # copy over dummy past residuals (must be after setting timesteps)
78
- scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order]
79
-
80
- with tempfile.TemporaryDirectory() as tmpdirname:
81
- scheduler.save_config(tmpdirname)
82
- new_scheduler = scheduler_class.from_pretrained(tmpdirname)
83
- # copy over dummy past residuals
84
- new_scheduler.set_timesteps(num_inference_steps)
85
-
86
- # copy over dummy past residual (must be after setting timesteps)
87
- new_scheduler.model_outputs = dummy_past_residuals[: new_scheduler.config.solver_order]
88
-
89
- output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample
90
- new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample
91
-
92
- assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
93
-
94
- def full_loop(self, scheduler=None, **config):
95
- if scheduler is None:
96
- scheduler_class = self.scheduler_classes[0]
97
- scheduler_config = self.get_scheduler_config(**config)
98
- scheduler = scheduler_class(**scheduler_config)
99
-
100
- num_inference_steps = 10
101
- model = self.dummy_model()
102
- sample = self.dummy_sample_deter
103
- scheduler.set_timesteps(num_inference_steps)
104
-
105
- for i, t in enumerate(scheduler.timesteps):
106
- residual = model(sample, t)
107
- sample = scheduler.step(residual, t, sample).prev_sample
108
-
109
- return sample
110
-
111
- def test_step_shape(self):
112
- kwargs = dict(self.forward_default_kwargs)
113
-
114
- num_inference_steps = kwargs.pop("num_inference_steps", None)
115
-
116
- for scheduler_class in self.scheduler_classes:
117
- scheduler_config = self.get_scheduler_config()
118
- scheduler = scheduler_class(**scheduler_config)
119
-
120
- sample = self.dummy_sample
121
- residual = 0.1 * sample
122
-
123
- if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
124
- scheduler.set_timesteps(num_inference_steps)
125
- elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
126
- kwargs["num_inference_steps"] = num_inference_steps
127
-
128
- # copy over dummy past residuals (must be done after set_timesteps)
129
- dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10]
130
- scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order]
131
-
132
- time_step_0 = scheduler.timesteps[5]
133
- time_step_1 = scheduler.timesteps[6]
134
-
135
- output_0 = scheduler.step(residual, time_step_0, sample, **kwargs).prev_sample
136
- output_1 = scheduler.step(residual, time_step_1, sample, **kwargs).prev_sample
137
-
138
- self.assertEqual(output_0.shape, sample.shape)
139
- self.assertEqual(output_0.shape, output_1.shape)
140
-
141
- def test_timesteps(self):
142
- for timesteps in [25, 50, 100, 999, 1000]:
143
- self.check_over_configs(num_train_timesteps=timesteps)
144
-
145
- def test_thresholding(self):
146
- self.check_over_configs(thresholding=False)
147
- for order in [1, 2, 3]:
148
- for solver_type in ["midpoint", "heun"]:
149
- for threshold in [0.5, 1.0, 2.0]:
150
- for prediction_type in ["epsilon", "sample"]:
151
- self.check_over_configs(
152
- thresholding=True,
153
- prediction_type=prediction_type,
154
- sample_max_value=threshold,
155
- algorithm_type="dpmsolver++",
156
- solver_order=order,
157
- solver_type=solver_type,
158
- )
159
-
160
- def test_prediction_type(self):
161
- for prediction_type in ["epsilon", "v_prediction"]:
162
- self.check_over_configs(prediction_type=prediction_type)
163
-
164
- def test_solver_order_and_type(self):
165
- for algorithm_type in ["dpmsolver", "dpmsolver++"]:
166
- for solver_type in ["midpoint", "heun"]:
167
- for order in [1, 2, 3]:
168
- for prediction_type in ["epsilon", "sample"]:
169
- self.check_over_configs(
170
- solver_order=order,
171
- solver_type=solver_type,
172
- prediction_type=prediction_type,
173
- algorithm_type=algorithm_type,
174
- )
175
- sample = self.full_loop(
176
- solver_order=order,
177
- solver_type=solver_type,
178
- prediction_type=prediction_type,
179
- algorithm_type=algorithm_type,
180
- )
181
- assert not torch.isnan(sample).any(), "Samples have nan numbers"
182
-
183
- def test_lower_order_final(self):
184
- self.check_over_configs(lower_order_final=True)
185
- self.check_over_configs(lower_order_final=False)
186
-
187
- def test_lambda_min_clipped(self):
188
- self.check_over_configs(lambda_min_clipped=-float("inf"))
189
- self.check_over_configs(lambda_min_clipped=-5.1)
190
-
191
- def test_variance_type(self):
192
- self.check_over_configs(variance_type=None)
193
- self.check_over_configs(variance_type="learned_range")
194
-
195
- def test_timestep_spacing(self):
196
- for timestep_spacing in ["trailing", "leading"]:
197
- self.check_over_configs(timestep_spacing=timestep_spacing)
198
-
199
- def test_inference_steps(self):
200
- for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
201
- self.check_over_forward(num_inference_steps=num_inference_steps, time_step=0)
202
-
203
- def test_full_loop_no_noise(self):
204
- sample = self.full_loop()
205
- result_mean = torch.mean(torch.abs(sample))
206
-
207
- assert abs(result_mean.item() - 0.7047) < 1e-3
208
-
209
- def test_full_loop_no_noise_thres(self):
210
- sample = self.full_loop(thresholding=True, dynamic_thresholding_ratio=0.87, sample_max_value=0.5)
211
- result_mean = torch.mean(torch.abs(sample))
212
-
213
- assert abs(result_mean.item() - 19.8933) < 1e-3
214
-
215
- def test_full_loop_with_v_prediction(self):
216
- sample = self.full_loop(prediction_type="v_prediction")
217
- result_mean = torch.mean(torch.abs(sample))
218
-
219
- assert abs(result_mean.item() - 1.5194) < 1e-3
220
-
221
- def test_full_loop_with_karras_and_v_prediction(self):
222
- sample = self.full_loop(prediction_type="v_prediction", use_karras_sigmas=True)
223
- result_mean = torch.mean(torch.abs(sample))
224
-
225
- assert abs(result_mean.item() - 1.7833) < 1e-3
226
-
227
- def test_switch(self):
228
- # make sure that iterating over schedulers with same config names gives same results
229
- # for defaults
230
- scheduler = DPMSolverMultistepInverseScheduler(**self.get_scheduler_config())
231
- sample = self.full_loop(scheduler=scheduler)
232
- result_mean = torch.mean(torch.abs(sample))
233
-
234
- assert abs(result_mean.item() - 0.7047) < 1e-3
235
-
236
- scheduler = DPMSolverMultistepScheduler.from_config(scheduler.config)
237
- scheduler = DPMSolverMultistepInverseScheduler.from_config(scheduler.config)
238
-
239
- sample = self.full_loop(scheduler=scheduler)
240
- new_result_mean = torch.mean(torch.abs(sample))
241
-
242
- assert abs(new_result_mean.item() - result_mean.item()) < 1e-3
243
-
244
- def test_fp16_support(self):
245
- scheduler_class = self.scheduler_classes[0]
246
- scheduler_config = self.get_scheduler_config(thresholding=True, dynamic_thresholding_ratio=0)
247
- scheduler = scheduler_class(**scheduler_config)
248
-
249
- num_inference_steps = 10
250
- model = self.dummy_model()
251
- sample = self.dummy_sample_deter.half()
252
- scheduler.set_timesteps(num_inference_steps)
253
-
254
- for i, t in enumerate(scheduler.timesteps):
255
- residual = model(sample, t)
256
- sample = scheduler.step(residual, t, sample).prev_sample
257
-
258
- assert sample.dtype == torch.float16
259
-
260
- def test_unique_timesteps(self, **config):
261
- for scheduler_class in self.scheduler_classes:
262
- scheduler_config = self.get_scheduler_config(**config)
263
- scheduler = scheduler_class(**scheduler_config)
264
-
265
- scheduler.set_timesteps(scheduler.config.num_train_timesteps)
266
- assert len(scheduler.timesteps.unique()) == scheduler.num_inference_steps
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/_base_/datasets/cityscapes_instance.py DELETED
@@ -1,55 +0,0 @@
1
- dataset_type = 'CityscapesDataset'
2
- data_root = 'data/cityscapes/'
3
- img_norm_cfg = dict(
4
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
5
- train_pipeline = [
6
- dict(type='LoadImageFromFile'),
7
- dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
8
- dict(
9
- type='Resize', img_scale=[(2048, 800), (2048, 1024)], keep_ratio=True),
10
- dict(type='RandomFlip', flip_ratio=0.5),
11
- dict(type='Normalize', **img_norm_cfg),
12
- dict(type='Pad', size_divisor=32),
13
- dict(type='DefaultFormatBundle'),
14
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
15
- ]
16
- test_pipeline = [
17
- dict(type='LoadImageFromFile'),
18
- dict(
19
- type='MultiScaleFlipAug',
20
- img_scale=(2048, 1024),
21
- flip=False,
22
- transforms=[
23
- dict(type='Resize', keep_ratio=True),
24
- dict(type='RandomFlip'),
25
- dict(type='Normalize', **img_norm_cfg),
26
- dict(type='Pad', size_divisor=32),
27
- dict(type='ImageToTensor', keys=['img']),
28
- dict(type='Collect', keys=['img']),
29
- ])
30
- ]
31
- data = dict(
32
- samples_per_gpu=1,
33
- workers_per_gpu=2,
34
- train=dict(
35
- type='RepeatDataset',
36
- times=8,
37
- dataset=dict(
38
- type=dataset_type,
39
- ann_file=data_root +
40
- 'annotations/instancesonly_filtered_gtFine_train.json',
41
- img_prefix=data_root + 'leftImg8bit/train/',
42
- pipeline=train_pipeline)),
43
- val=dict(
44
- type=dataset_type,
45
- ann_file=data_root +
46
- 'annotations/instancesonly_filtered_gtFine_val.json',
47
- img_prefix=data_root + 'leftImg8bit/val/',
48
- pipeline=test_pipeline),
49
- test=dict(
50
- type=dataset_type,
51
- ann_file=data_root +
52
- 'annotations/instancesonly_filtered_gtFine_test.json',
53
- img_prefix=data_root + 'leftImg8bit/test/',
54
- pipeline=test_pipeline))
55
- evaluation = dict(metric=['bbox', 'segm'])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/cornernet/cornernet_hourglass104_mstest_32x3_210e_coco.py DELETED
@@ -1,105 +0,0 @@
1
- _base_ = [
2
- '../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py'
3
- ]
4
-
5
- # model settings
6
- model = dict(
7
- type='CornerNet',
8
- backbone=dict(
9
- type='HourglassNet',
10
- downsample_times=5,
11
- num_stacks=2,
12
- stage_channels=[256, 256, 384, 384, 384, 512],
13
- stage_blocks=[2, 2, 2, 2, 2, 4],
14
- norm_cfg=dict(type='BN', requires_grad=True)),
15
- neck=None,
16
- bbox_head=dict(
17
- type='CornerHead',
18
- num_classes=80,
19
- in_channels=256,
20
- num_feat_levels=2,
21
- corner_emb_channels=1,
22
- loss_heatmap=dict(
23
- type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1),
24
- loss_embedding=dict(
25
- type='AssociativeEmbeddingLoss',
26
- pull_weight=0.10,
27
- push_weight=0.10),
28
- loss_offset=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1)),
29
- # training and testing settings
30
- train_cfg=None,
31
- test_cfg=dict(
32
- corner_topk=100,
33
- local_maximum_kernel=3,
34
- distance_threshold=0.5,
35
- score_thr=0.05,
36
- max_per_img=100,
37
- nms=dict(type='soft_nms', iou_threshold=0.5, method='gaussian')))
38
- # data settings
39
- img_norm_cfg = dict(
40
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
41
- train_pipeline = [
42
- dict(type='LoadImageFromFile', to_float32=True),
43
- dict(type='LoadAnnotations', with_bbox=True),
44
- dict(
45
- type='PhotoMetricDistortion',
46
- brightness_delta=32,
47
- contrast_range=(0.5, 1.5),
48
- saturation_range=(0.5, 1.5),
49
- hue_delta=18),
50
- dict(
51
- type='RandomCenterCropPad',
52
- crop_size=(511, 511),
53
- ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),
54
- test_mode=False,
55
- test_pad_mode=None,
56
- **img_norm_cfg),
57
- dict(type='Resize', img_scale=(511, 511), keep_ratio=False),
58
- dict(type='RandomFlip', flip_ratio=0.5),
59
- dict(type='Normalize', **img_norm_cfg),
60
- dict(type='DefaultFormatBundle'),
61
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
62
- ]
63
- test_pipeline = [
64
- dict(type='LoadImageFromFile', to_float32=True),
65
- dict(
66
- type='MultiScaleFlipAug',
67
- scale_factor=1.0,
68
- flip=True,
69
- transforms=[
70
- dict(type='Resize'),
71
- dict(
72
- type='RandomCenterCropPad',
73
- crop_size=None,
74
- ratios=None,
75
- border=None,
76
- test_mode=True,
77
- test_pad_mode=['logical_or', 127],
78
- **img_norm_cfg),
79
- dict(type='RandomFlip'),
80
- dict(type='Normalize', **img_norm_cfg),
81
- dict(type='ImageToTensor', keys=['img']),
82
- dict(
83
- type='Collect',
84
- keys=['img'],
85
- meta_keys=('filename', 'ori_shape', 'img_shape', 'pad_shape',
86
- 'scale_factor', 'flip', 'img_norm_cfg', 'border')),
87
- ])
88
- ]
89
- data = dict(
90
- samples_per_gpu=3,
91
- workers_per_gpu=3,
92
- train=dict(pipeline=train_pipeline),
93
- val=dict(pipeline=test_pipeline),
94
- test=dict(pipeline=test_pipeline))
95
- # optimizer
96
- optimizer = dict(type='Adam', lr=0.0005)
97
- optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
98
- # learning policy
99
- lr_config = dict(
100
- policy='step',
101
- warmup='linear',
102
- warmup_iters=500,
103
- warmup_ratio=1.0 / 3,
104
- step=[180])
105
- runner = dict(type='EpochBasedRunner', max_epochs=210)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/centripetal_head.py DELETED
@@ -1,421 +0,0 @@
1
- import torch.nn as nn
2
- from mmcv.cnn import ConvModule, normal_init
3
- from mmcv.ops import DeformConv2d
4
-
5
- from mmdet.core import multi_apply
6
- from ..builder import HEADS, build_loss
7
- from .corner_head import CornerHead
8
-
9
-
10
- @HEADS.register_module()
11
- class CentripetalHead(CornerHead):
12
- """Head of CentripetalNet: Pursuing High-quality Keypoint Pairs for Object
13
- Detection.
14
-
15
- CentripetalHead inherits from :class:`CornerHead`. It removes the
16
- embedding branch and adds guiding shift and centripetal shift branches.
17
- More details can be found in the `paper
18
- <https://arxiv.org/abs/2003.09119>`_ .
19
-
20
- Args:
21
- num_classes (int): Number of categories excluding the background
22
- category.
23
- in_channels (int): Number of channels in the input feature map.
24
- num_feat_levels (int): Levels of feature from the previous module. 2
25
- for HourglassNet-104 and 1 for HourglassNet-52. HourglassNet-104
26
- outputs the final feature and intermediate supervision feature and
27
- HourglassNet-52 only outputs the final feature. Default: 2.
28
- corner_emb_channels (int): Channel of embedding vector. Default: 1.
29
- train_cfg (dict | None): Training config. Useless in CornerHead,
30
- but we keep this variable for SingleStageDetector. Default: None.
31
- test_cfg (dict | None): Testing config of CornerHead. Default: None.
32
- loss_heatmap (dict | None): Config of corner heatmap loss. Default:
33
- GaussianFocalLoss.
34
- loss_embedding (dict | None): Config of corner embedding loss. Default:
35
- AssociativeEmbeddingLoss.
36
- loss_offset (dict | None): Config of corner offset loss. Default:
37
- SmoothL1Loss.
38
- loss_guiding_shift (dict): Config of guiding shift loss. Default:
39
- SmoothL1Loss.
40
- loss_centripetal_shift (dict): Config of centripetal shift loss.
41
- Default: SmoothL1Loss.
42
- """
43
-
44
- def __init__(self,
45
- *args,
46
- centripetal_shift_channels=2,
47
- guiding_shift_channels=2,
48
- feat_adaption_conv_kernel=3,
49
- loss_guiding_shift=dict(
50
- type='SmoothL1Loss', beta=1.0, loss_weight=0.05),
51
- loss_centripetal_shift=dict(
52
- type='SmoothL1Loss', beta=1.0, loss_weight=1),
53
- **kwargs):
54
- assert centripetal_shift_channels == 2, (
55
- 'CentripetalHead only support centripetal_shift_channels == 2')
56
- self.centripetal_shift_channels = centripetal_shift_channels
57
- assert guiding_shift_channels == 2, (
58
- 'CentripetalHead only support guiding_shift_channels == 2')
59
- self.guiding_shift_channels = guiding_shift_channels
60
- self.feat_adaption_conv_kernel = feat_adaption_conv_kernel
61
- super(CentripetalHead, self).__init__(*args, **kwargs)
62
- self.loss_guiding_shift = build_loss(loss_guiding_shift)
63
- self.loss_centripetal_shift = build_loss(loss_centripetal_shift)
64
-
65
- def _init_centripetal_layers(self):
66
- """Initialize centripetal layers.
67
-
68
- Including feature adaption deform convs (feat_adaption), deform offset
69
- prediction convs (dcn_off), guiding shift (guiding_shift) and
70
- centripetal shift ( centripetal_shift). Each branch has two parts:
71
- prefix `tl_` for top-left and `br_` for bottom-right.
72
- """
73
- self.tl_feat_adaption = nn.ModuleList()
74
- self.br_feat_adaption = nn.ModuleList()
75
- self.tl_dcn_offset = nn.ModuleList()
76
- self.br_dcn_offset = nn.ModuleList()
77
- self.tl_guiding_shift = nn.ModuleList()
78
- self.br_guiding_shift = nn.ModuleList()
79
- self.tl_centripetal_shift = nn.ModuleList()
80
- self.br_centripetal_shift = nn.ModuleList()
81
-
82
- for _ in range(self.num_feat_levels):
83
- self.tl_feat_adaption.append(
84
- DeformConv2d(self.in_channels, self.in_channels,
85
- self.feat_adaption_conv_kernel, 1, 1))
86
- self.br_feat_adaption.append(
87
- DeformConv2d(self.in_channels, self.in_channels,
88
- self.feat_adaption_conv_kernel, 1, 1))
89
-
90
- self.tl_guiding_shift.append(
91
- self._make_layers(
92
- out_channels=self.guiding_shift_channels,
93
- in_channels=self.in_channels))
94
- self.br_guiding_shift.append(
95
- self._make_layers(
96
- out_channels=self.guiding_shift_channels,
97
- in_channels=self.in_channels))
98
-
99
- self.tl_dcn_offset.append(
100
- ConvModule(
101
- self.guiding_shift_channels,
102
- self.feat_adaption_conv_kernel**2 *
103
- self.guiding_shift_channels,
104
- 1,
105
- bias=False,
106
- act_cfg=None))
107
- self.br_dcn_offset.append(
108
- ConvModule(
109
- self.guiding_shift_channels,
110
- self.feat_adaption_conv_kernel**2 *
111
- self.guiding_shift_channels,
112
- 1,
113
- bias=False,
114
- act_cfg=None))
115
-
116
- self.tl_centripetal_shift.append(
117
- self._make_layers(
118
- out_channels=self.centripetal_shift_channels,
119
- in_channels=self.in_channels))
120
- self.br_centripetal_shift.append(
121
- self._make_layers(
122
- out_channels=self.centripetal_shift_channels,
123
- in_channels=self.in_channels))
124
-
125
- def _init_layers(self):
126
- """Initialize layers for CentripetalHead.
127
-
128
- Including two parts: CornerHead layers and CentripetalHead layers
129
- """
130
- super()._init_layers() # using _init_layers in CornerHead
131
- self._init_centripetal_layers()
132
-
133
- def init_weights(self):
134
- """Initialize weights of the head."""
135
- super().init_weights()
136
- for i in range(self.num_feat_levels):
137
- normal_init(self.tl_feat_adaption[i], std=0.01)
138
- normal_init(self.br_feat_adaption[i], std=0.01)
139
- normal_init(self.tl_dcn_offset[i].conv, std=0.1)
140
- normal_init(self.br_dcn_offset[i].conv, std=0.1)
141
- _ = [x.conv.reset_parameters() for x in self.tl_guiding_shift[i]]
142
- _ = [x.conv.reset_parameters() for x in self.br_guiding_shift[i]]
143
- _ = [
144
- x.conv.reset_parameters() for x in self.tl_centripetal_shift[i]
145
- ]
146
- _ = [
147
- x.conv.reset_parameters() for x in self.br_centripetal_shift[i]
148
- ]
149
-
150
- def forward_single(self, x, lvl_ind):
151
- """Forward feature of a single level.
152
-
153
- Args:
154
- x (Tensor): Feature of a single level.
155
- lvl_ind (int): Level index of current feature.
156
-
157
- Returns:
158
- tuple[Tensor]: A tuple of CentripetalHead's output for current
159
- feature level. Containing the following Tensors:
160
-
161
- - tl_heat (Tensor): Predicted top-left corner heatmap.
162
- - br_heat (Tensor): Predicted bottom-right corner heatmap.
163
- - tl_off (Tensor): Predicted top-left offset heatmap.
164
- - br_off (Tensor): Predicted bottom-right offset heatmap.
165
- - tl_guiding_shift (Tensor): Predicted top-left guiding shift
166
- heatmap.
167
- - br_guiding_shift (Tensor): Predicted bottom-right guiding
168
- shift heatmap.
169
- - tl_centripetal_shift (Tensor): Predicted top-left centripetal
170
- shift heatmap.
171
- - br_centripetal_shift (Tensor): Predicted bottom-right
172
- centripetal shift heatmap.
173
- """
174
- tl_heat, br_heat, _, _, tl_off, br_off, tl_pool, br_pool = super(
175
- ).forward_single(
176
- x, lvl_ind, return_pool=True)
177
-
178
- tl_guiding_shift = self.tl_guiding_shift[lvl_ind](tl_pool)
179
- br_guiding_shift = self.br_guiding_shift[lvl_ind](br_pool)
180
-
181
- tl_dcn_offset = self.tl_dcn_offset[lvl_ind](tl_guiding_shift.detach())
182
- br_dcn_offset = self.br_dcn_offset[lvl_ind](br_guiding_shift.detach())
183
-
184
- tl_feat_adaption = self.tl_feat_adaption[lvl_ind](tl_pool,
185
- tl_dcn_offset)
186
- br_feat_adaption = self.br_feat_adaption[lvl_ind](br_pool,
187
- br_dcn_offset)
188
-
189
- tl_centripetal_shift = self.tl_centripetal_shift[lvl_ind](
190
- tl_feat_adaption)
191
- br_centripetal_shift = self.br_centripetal_shift[lvl_ind](
192
- br_feat_adaption)
193
-
194
- result_list = [
195
- tl_heat, br_heat, tl_off, br_off, tl_guiding_shift,
196
- br_guiding_shift, tl_centripetal_shift, br_centripetal_shift
197
- ]
198
- return result_list
199
-
200
- def loss(self,
201
- tl_heats,
202
- br_heats,
203
- tl_offs,
204
- br_offs,
205
- tl_guiding_shifts,
206
- br_guiding_shifts,
207
- tl_centripetal_shifts,
208
- br_centripetal_shifts,
209
- gt_bboxes,
210
- gt_labels,
211
- img_metas,
212
- gt_bboxes_ignore=None):
213
- """Compute losses of the head.
214
-
215
- Args:
216
- tl_heats (list[Tensor]): Top-left corner heatmaps for each level
217
- with shape (N, num_classes, H, W).
218
- br_heats (list[Tensor]): Bottom-right corner heatmaps for each
219
- level with shape (N, num_classes, H, W).
220
- tl_offs (list[Tensor]): Top-left corner offsets for each level
221
- with shape (N, corner_offset_channels, H, W).
222
- br_offs (list[Tensor]): Bottom-right corner offsets for each level
223
- with shape (N, corner_offset_channels, H, W).
224
- tl_guiding_shifts (list[Tensor]): Top-left guiding shifts for each
225
- level with shape (N, guiding_shift_channels, H, W).
226
- br_guiding_shifts (list[Tensor]): Bottom-right guiding shifts for
227
- each level with shape (N, guiding_shift_channels, H, W).
228
- tl_centripetal_shifts (list[Tensor]): Top-left centripetal shifts
229
- for each level with shape (N, centripetal_shift_channels, H,
230
- W).
231
- br_centripetal_shifts (list[Tensor]): Bottom-right centripetal
232
- shifts for each level with shape (N,
233
- centripetal_shift_channels, H, W).
234
- gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
235
- shape (num_gts, 4) in [left, top, right, bottom] format.
236
- gt_labels (list[Tensor]): Class indices corresponding to each box.
237
- img_metas (list[dict]): Meta information of each image, e.g.,
238
- image size, scaling factor, etc.
239
- gt_bboxes_ignore (list[Tensor] | None): Specify which bounding
240
- boxes can be ignored when computing the loss.
241
-
242
- Returns:
243
- dict[str, Tensor]: A dictionary of loss components. Containing the
244
- following losses:
245
-
246
- - det_loss (list[Tensor]): Corner keypoint losses of all
247
- feature levels.
248
- - off_loss (list[Tensor]): Corner offset losses of all feature
249
- levels.
250
- - guiding_loss (list[Tensor]): Guiding shift losses of all
251
- feature levels.
252
- - centripetal_loss (list[Tensor]): Centripetal shift losses of
253
- all feature levels.
254
- """
255
- targets = self.get_targets(
256
- gt_bboxes,
257
- gt_labels,
258
- tl_heats[-1].shape,
259
- img_metas[0]['pad_shape'],
260
- with_corner_emb=self.with_corner_emb,
261
- with_guiding_shift=True,
262
- with_centripetal_shift=True)
263
- mlvl_targets = [targets for _ in range(self.num_feat_levels)]
264
- [det_losses, off_losses, guiding_losses, centripetal_losses
265
- ] = multi_apply(self.loss_single, tl_heats, br_heats, tl_offs,
266
- br_offs, tl_guiding_shifts, br_guiding_shifts,
267
- tl_centripetal_shifts, br_centripetal_shifts,
268
- mlvl_targets)
269
- loss_dict = dict(
270
- det_loss=det_losses,
271
- off_loss=off_losses,
272
- guiding_loss=guiding_losses,
273
- centripetal_loss=centripetal_losses)
274
- return loss_dict
275
-
276
- def loss_single(self, tl_hmp, br_hmp, tl_off, br_off, tl_guiding_shift,
277
- br_guiding_shift, tl_centripetal_shift,
278
- br_centripetal_shift, targets):
279
- """Compute losses for single level.
280
-
281
- Args:
282
- tl_hmp (Tensor): Top-left corner heatmap for current level with
283
- shape (N, num_classes, H, W).
284
- br_hmp (Tensor): Bottom-right corner heatmap for current level with
285
- shape (N, num_classes, H, W).
286
- tl_off (Tensor): Top-left corner offset for current level with
287
- shape (N, corner_offset_channels, H, W).
288
- br_off (Tensor): Bottom-right corner offset for current level with
289
- shape (N, corner_offset_channels, H, W).
290
- tl_guiding_shift (Tensor): Top-left guiding shift for current level
291
- with shape (N, guiding_shift_channels, H, W).
292
- br_guiding_shift (Tensor): Bottom-right guiding shift for current
293
- level with shape (N, guiding_shift_channels, H, W).
294
- tl_centripetal_shift (Tensor): Top-left centripetal shift for
295
- current level with shape (N, centripetal_shift_channels, H, W).
296
- br_centripetal_shift (Tensor): Bottom-right centripetal shift for
297
- current level with shape (N, centripetal_shift_channels, H, W).
298
- targets (dict): Corner target generated by `get_targets`.
299
-
300
- Returns:
301
- tuple[torch.Tensor]: Losses of the head's differnet branches
302
- containing the following losses:
303
-
304
- - det_loss (Tensor): Corner keypoint loss.
305
- - off_loss (Tensor): Corner offset loss.
306
- - guiding_loss (Tensor): Guiding shift loss.
307
- - centripetal_loss (Tensor): Centripetal shift loss.
308
- """
309
- targets['corner_embedding'] = None
310
-
311
- det_loss, _, _, off_loss = super().loss_single(tl_hmp, br_hmp, None,
312
- None, tl_off, br_off,
313
- targets)
314
-
315
- gt_tl_guiding_shift = targets['topleft_guiding_shift']
316
- gt_br_guiding_shift = targets['bottomright_guiding_shift']
317
- gt_tl_centripetal_shift = targets['topleft_centripetal_shift']
318
- gt_br_centripetal_shift = targets['bottomright_centripetal_shift']
319
-
320
- gt_tl_heatmap = targets['topleft_heatmap']
321
- gt_br_heatmap = targets['bottomright_heatmap']
322
- # We only compute the offset loss at the real corner position.
323
- # The value of real corner would be 1 in heatmap ground truth.
324
- # The mask is computed in class agnostic mode and its shape is
325
- # batch * 1 * width * height.
326
- tl_mask = gt_tl_heatmap.eq(1).sum(1).gt(0).unsqueeze(1).type_as(
327
- gt_tl_heatmap)
328
- br_mask = gt_br_heatmap.eq(1).sum(1).gt(0).unsqueeze(1).type_as(
329
- gt_br_heatmap)
330
-
331
- # Guiding shift loss
332
- tl_guiding_loss = self.loss_guiding_shift(
333
- tl_guiding_shift,
334
- gt_tl_guiding_shift,
335
- tl_mask,
336
- avg_factor=tl_mask.sum())
337
- br_guiding_loss = self.loss_guiding_shift(
338
- br_guiding_shift,
339
- gt_br_guiding_shift,
340
- br_mask,
341
- avg_factor=br_mask.sum())
342
- guiding_loss = (tl_guiding_loss + br_guiding_loss) / 2.0
343
- # Centripetal shift loss
344
- tl_centripetal_loss = self.loss_centripetal_shift(
345
- tl_centripetal_shift,
346
- gt_tl_centripetal_shift,
347
- tl_mask,
348
- avg_factor=tl_mask.sum())
349
- br_centripetal_loss = self.loss_centripetal_shift(
350
- br_centripetal_shift,
351
- gt_br_centripetal_shift,
352
- br_mask,
353
- avg_factor=br_mask.sum())
354
- centripetal_loss = (tl_centripetal_loss + br_centripetal_loss) / 2.0
355
-
356
- return det_loss, off_loss, guiding_loss, centripetal_loss
357
-
358
- def get_bboxes(self,
359
- tl_heats,
360
- br_heats,
361
- tl_offs,
362
- br_offs,
363
- tl_guiding_shifts,
364
- br_guiding_shifts,
365
- tl_centripetal_shifts,
366
- br_centripetal_shifts,
367
- img_metas,
368
- rescale=False,
369
- with_nms=True):
370
- """Transform network output for a batch into bbox predictions.
371
-
372
- Args:
373
- tl_heats (list[Tensor]): Top-left corner heatmaps for each level
374
- with shape (N, num_classes, H, W).
375
- br_heats (list[Tensor]): Bottom-right corner heatmaps for each
376
- level with shape (N, num_classes, H, W).
377
- tl_offs (list[Tensor]): Top-left corner offsets for each level
378
- with shape (N, corner_offset_channels, H, W).
379
- br_offs (list[Tensor]): Bottom-right corner offsets for each level
380
- with shape (N, corner_offset_channels, H, W).
381
- tl_guiding_shifts (list[Tensor]): Top-left guiding shifts for each
382
- level with shape (N, guiding_shift_channels, H, W). Useless in
383
- this function, we keep this arg because it's the raw output
384
- from CentripetalHead.
385
- br_guiding_shifts (list[Tensor]): Bottom-right guiding shifts for
386
- each level with shape (N, guiding_shift_channels, H, W).
387
- Useless in this function, we keep this arg because it's the
388
- raw output from CentripetalHead.
389
- tl_centripetal_shifts (list[Tensor]): Top-left centripetal shifts
390
- for each level with shape (N, centripetal_shift_channels, H,
391
- W).
392
- br_centripetal_shifts (list[Tensor]): Bottom-right centripetal
393
- shifts for each level with shape (N,
394
- centripetal_shift_channels, H, W).
395
- img_metas (list[dict]): Meta information of each image, e.g.,
396
- image size, scaling factor, etc.
397
- rescale (bool): If True, return boxes in original image space.
398
- Default: False.
399
- with_nms (bool): If True, do nms before return boxes.
400
- Default: True.
401
- """
402
- assert tl_heats[-1].shape[0] == br_heats[-1].shape[0] == len(img_metas)
403
- result_list = []
404
- for img_id in range(len(img_metas)):
405
- result_list.append(
406
- self._get_bboxes_single(
407
- tl_heats[-1][img_id:img_id + 1, :],
408
- br_heats[-1][img_id:img_id + 1, :],
409
- tl_offs[-1][img_id:img_id + 1, :],
410
- br_offs[-1][img_id:img_id + 1, :],
411
- img_metas[img_id],
412
- tl_emb=None,
413
- br_emb=None,
414
- tl_centripetal_shift=tl_centripetal_shifts[-1][
415
- img_id:img_id + 1, :],
416
- br_centripetal_shift=br_centripetal_shifts[-1][
417
- img_id:img_id + 1, :],
418
- rescale=rescale,
419
- with_nms=with_nms))
420
-
421
- return result_list
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/gcnet/gcnet_r101-d8_512x1024_80k_cityscapes.py DELETED
@@ -1,2 +0,0 @@
1
- _base_ = './gcnet_r50-d8_512x1024_80k_cityscapes.py'
2
- model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_hr18_512x512_40k_voc12aug.py DELETED
@@ -1,36 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/ocrnet_hr18.py',
3
- '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py',
4
- '../_base_/schedules/schedule_40k.py'
5
- ]
6
- norm_cfg = dict(type='SyncBN', requires_grad=True)
7
- model = dict(decode_head=[
8
- dict(
9
- type='FCNHead',
10
- in_channels=[18, 36, 72, 144],
11
- channels=sum([18, 36, 72, 144]),
12
- in_index=(0, 1, 2, 3),
13
- input_transform='resize_concat',
14
- kernel_size=1,
15
- num_convs=1,
16
- concat_input=False,
17
- dropout_ratio=-1,
18
- num_classes=21,
19
- norm_cfg=norm_cfg,
20
- align_corners=False,
21
- loss_decode=dict(
22
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
23
- dict(
24
- type='OCRHead',
25
- in_channels=[18, 36, 72, 144],
26
- in_index=(0, 1, 2, 3),
27
- input_transform='resize_concat',
28
- channels=512,
29
- ocr_channels=256,
30
- dropout_ratio=-1,
31
- num_classes=21,
32
- norm_cfg=norm_cfg,
33
- align_corners=False,
34
- loss_decode=dict(
35
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
36
- ])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ank0X0/text-to-3d-shap-e-webui/app.py DELETED
@@ -1,49 +0,0 @@
1
- import torch
2
- import gradio as gr
3
-
4
- from shap_e.diffusion.sample import sample_latents
5
- from shap_e.diffusion.gaussian_diffusion import diffusion_from_config
6
- from shap_e.models.download import load_model, load_config
7
- from shap_e.util.notebooks import create_pan_cameras, decode_latent_images, gif_widget
8
- from shap_e.util.image_util import load_image
9
-
10
- device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
11
-
12
- xm = load_model('transmitter', device=device)
13
- model = load_model('text300M', device=device)
14
- diffusion = diffusion_from_config(load_config('diffusion'))
15
-
16
- batch_size = 1
17
- guidance_scale = 9
18
-
19
- def get_latents(prompt):
20
- return sample_latents(
21
- batch_size=batch_size,
22
- model=model,
23
- diffusion=diffusion,
24
- guidance_scale=guidance_scale,
25
- model_kwargs=dict(texts=[prompt] * batch_size),
26
- progress=True,
27
- clip_denoised=False,
28
- use_fp16=True,
29
- use_karras=True,
30
- karras_steps=64,
31
- sigma_min=1e-3,
32
- sigma_max=160,
33
- s_churn=0,
34
- )
35
-
36
- render_mode = 'nerf'
37
- size = 128
38
-
39
- cameras = create_pan_cameras(size, device)
40
-
41
- def get_gif(prompt):
42
- for i, latent in enumerate(get_latents(prompt)):
43
- images = decode_latent_images(xm, latent, cameras, rendering_mode=render_mode)
44
- return gif_widget(images)
45
-
46
- iface = gr.Interface(fn = get_gif, inputs = "text", outputs=["html" , "text"] ,title = 'LatentVerse')
47
- iface.queue().launch(inline = False)
48
-
49
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/model/base_function.py DELETED
@@ -1,611 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- from torch.nn import init
4
- import functools
5
- from torch.optim import lr_scheduler
6
- import torch.nn.functional as F
7
- import math
8
- from einops import rearrange
9
- from .transformer_ops.transformer_function import TransformerEncoderLayer
10
-
11
-
12
- ######################################################################################
13
- # Attention-Aware Layer
14
- ######################################################################################
15
- class AttnAware(nn.Module):
16
- def __init__(self, input_nc, activation='gelu', norm='pixel', num_heads=2):
17
- super(AttnAware, self).__init__()
18
-
19
- activation_layer = get_nonlinearity_layer(activation)
20
- norm_layer = get_norm_layer(norm)
21
- head_dim = input_nc // num_heads
22
- self.num_heads = num_heads
23
- self.input_nc = input_nc
24
- self.scale = head_dim ** -0.5
25
-
26
- self.query_conv = nn.Sequential(
27
- norm_layer(input_nc),
28
- activation_layer,
29
- nn.Conv2d(input_nc, input_nc, kernel_size=1)
30
- )
31
- self.key_conv = nn.Sequential(
32
- norm_layer(input_nc),
33
- activation_layer,
34
- nn.Conv2d(input_nc, input_nc, kernel_size=1)
35
- )
36
-
37
- self.weight = nn.Conv2d(self.num_heads*2, 2, kernel_size=1, stride=1)
38
- self.to_out = ResnetBlock(input_nc * 2, input_nc, 1, 0, activation, norm)
39
-
40
- def forward(self, x, pre=None, mask=None):
41
- B, C, W, H = x.size()
42
- q = self.query_conv(x).view(B, -1, W*H)
43
- k = self.key_conv(x).view(B, -1, W*H)
44
- v = x.view(B, -1, W*H)
45
-
46
- q = rearrange(q, 'b (h d) n -> b h n d', h=self.num_heads)
47
- k = rearrange(k, 'b (h d) n -> b h n d', h=self.num_heads)
48
- v = rearrange(v, 'b (h d) n -> b h n d', h=self.num_heads)
49
- dots = torch.einsum('bhid,bhjd->bhij', q, k) * self.scale
50
-
51
- if pre is not None:
52
- # attention-aware weight
53
- B, head, N, N = dots.size()
54
- mask_n = mask.view(B, -1, 1, W * H).expand_as(dots)
55
- w_visible = (dots.detach() * mask_n).max(dim=-1, keepdim=True)[0]
56
- w_invisible = (dots.detach() * (1-mask_n)).max(dim=-1, keepdim=True)[0]
57
- weight = torch.cat([w_visible.view(B, head, W, H), w_invisible.view(B, head, W, H)], dim=1)
58
- weight = self.weight(weight)
59
- weight = F.softmax(weight, dim=1)
60
- # visible attention score
61
- pre_v = pre.view(B, -1, W*H)
62
- pre_v = rearrange(pre_v, 'b (h d) n -> b h n d', h=self.num_heads)
63
- dots_visible = torch.where(dots > 0, dots * mask_n, dots / (mask_n + 1e-8))
64
- attn_visible = dots_visible.softmax(dim=-1)
65
- context_flow = torch.einsum('bhij, bhjd->bhid', attn_visible, pre_v)
66
- context_flow = rearrange(context_flow, 'b h n d -> b (h d) n').view(B, -1, W, H)
67
- # invisible attention score
68
- dots_invisible = torch.where(dots > 0, dots * (1 - mask_n), dots / ((1 - mask_n) + 1e-8))
69
- attn_invisible = dots_invisible.softmax(dim=-1)
70
- self_attention = torch.einsum('bhij, bhjd->bhid', attn_invisible, v)
71
- self_attention = rearrange(self_attention, 'b h n d -> b (h d) n').view(B, -1, W, H)
72
- # out
73
- out = weight[:, :1, :, :]*context_flow + weight[:, 1:, :, :]*self_attention
74
- else:
75
- attn = dots.softmax(dim=-1)
76
- out = torch.einsum('bhij, bhjd->bhid', attn, v)
77
-
78
- out = rearrange(out, 'b h n d -> b (h d) n').view(B, -1, W, H)
79
-
80
- out = self.to_out(torch.cat([out, x], dim=1))
81
- return out
82
-
83
-
84
- ######################################################################################
85
- # base modules
86
- ######################################################################################
87
- class NoiseInjection(nn.Module):
88
- def __init__(self):
89
- super(NoiseInjection, self).__init__()
90
-
91
- self.alpha = nn.Parameter(torch.zeros(1))
92
-
93
- def forward(self, x, noise=None, mask=None):
94
- if noise is None:
95
- b, _, h, w = x.size()
96
- noise = x.new_empty(b, 1, h, w).normal_()
97
- if mask is not None:
98
- mask = F.interpolate(mask, size=x.size()[2:], mode='bilinear', align_corners=True)
99
- return x + self.alpha * noise * (1 - mask) # add noise only to the invisible part
100
- return x + self.alpha * noise
101
-
102
-
103
- class ConstantInput(nn.Module):
104
- """
105
- add position embedding for each learned VQ word
106
- """
107
- def __init__(self, channel, size=16):
108
- super().__init__()
109
-
110
- self.input = nn.Parameter(torch.randn(1, channel, size, size))
111
-
112
- def forward(self, input):
113
- batch = input.shape[0]
114
- out = self.input.repeat(batch, 1, 1, 1)
115
-
116
- return out
117
-
118
-
119
- class UpSample(nn.Module):
120
- """ sample with convolutional operation
121
- :param input_nc: input channel
122
- :param with_conv: use convolution to refine the feature
123
- :param kernel_size: feature size
124
- :param return_mask: return mask for the confidential score
125
- """
126
- def __init__(self, input_nc, with_conv=False, kernel_size=3, return_mask=False):
127
- super(UpSample, self).__init__()
128
- self.with_conv = with_conv
129
- self.return_mask = return_mask
130
- if self.with_conv:
131
- self.conv = PartialConv2d(input_nc, input_nc, kernel_size=kernel_size, stride=1,
132
- padding=int(int(kernel_size-1)/2), return_mask=True)
133
-
134
- def forward(self, x, mask=None):
135
- x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=True)
136
- mask = F.interpolate(mask, scale_factor=2, mode='bilinear', align_corners=True) if mask is not None else mask
137
- if self.with_conv:
138
- x, mask = self.conv(x, mask)
139
- if self.return_mask:
140
- return x, mask
141
- else:
142
- return x
143
-
144
-
145
- class DownSample(nn.Module):
146
- """ sample with convolutional operation
147
- :param input_nc: input channel
148
- :param with_conv: use convolution to refine the feature
149
- :param kernel_size: feature size
150
- :param return_mask: return mask for the confidential score
151
- """
152
- def __init__(self, input_nc, with_conv=False, kernel_size=3, return_mask=False):
153
- super(DownSample, self).__init__()
154
- self.with_conv = with_conv
155
- self.return_mask = return_mask
156
- if self.with_conv:
157
- self.conv = PartialConv2d(input_nc, input_nc, kernel_size=kernel_size, stride=2,
158
- padding=int(int(kernel_size-1)/2), return_mask=True)
159
-
160
- def forward(self, x, mask=None):
161
- if self.with_conv:
162
- x, mask = self.conv(x, mask)
163
- else:
164
- x = F.avg_pool2d(x, kernel_size=2, stride=2)
165
- mask = F.avg_pool2d(mask, kernel_size=2, stride=2) if mask is not None else mask
166
- if self.return_mask:
167
- return x, mask
168
- else:
169
- return x
170
-
171
-
172
- class ResnetBlock(nn.Module):
173
- def __init__(self, input_nc, output_nc=None, kernel=3, dropout=0.0, activation='gelu', norm='pixel', return_mask=False):
174
- super(ResnetBlock, self).__init__()
175
-
176
- activation_layer = get_nonlinearity_layer(activation)
177
- norm_layer = get_norm_layer(norm)
178
- self.return_mask = return_mask
179
-
180
- output_nc = input_nc if output_nc is None else output_nc
181
-
182
- self.norm1 = norm_layer(input_nc)
183
- self.conv1 = PartialConv2d(input_nc, output_nc, kernel_size=kernel, padding=int((kernel-1)/2), return_mask=True)
184
- self.norm2 = norm_layer(output_nc)
185
- self.conv2 = PartialConv2d(output_nc, output_nc, kernel_size=kernel, padding=int((kernel-1)/2), return_mask=True)
186
- self.dropout = nn.Dropout(dropout)
187
- self.act = activation_layer
188
-
189
- if input_nc != output_nc:
190
- self.short = PartialConv2d(input_nc, output_nc, kernel_size=1, stride=1, padding=0)
191
- else:
192
- self.short = Identity()
193
-
194
- def forward(self, x, mask=None):
195
- x_short = self.short(x)
196
- x, mask = self.conv1(self.act(self.norm1(x)), mask)
197
- x, mask = self.conv2(self.dropout(self.act(self.norm2(x))), mask)
198
- if self.return_mask:
199
- return (x + x_short) / math.sqrt(2), mask
200
- else:
201
- return (x + x_short) / math.sqrt(2)
202
-
203
-
204
- class DiffEncoder(nn.Module):
205
- def __init__(self, input_nc, ngf=64, kernel_size=2, embed_dim=512, down_scale=4, num_res_blocks=2, dropout=0.0,
206
- rample_with_conv=True, activation='gelu', norm='pixel', use_attn=False):
207
- super(DiffEncoder, self).__init__()
208
-
209
- activation_layer = get_nonlinearity_layer(activation)
210
- norm_layer = get_norm_layer(norm)
211
-
212
- # start
213
- self.encode = PartialConv2d(input_nc, ngf, kernel_size=kernel_size, stride=1, padding=int((kernel_size-1)/2), return_mask=True)
214
- # down
215
- self.use_attn = use_attn
216
- self.down_scale = down_scale
217
- self.num_res_blocks = num_res_blocks
218
- self.down = nn.ModuleList()
219
- out_dim = ngf
220
- for i in range(down_scale):
221
- block = nn.ModuleList()
222
- down = nn.Module()
223
- in_dim = out_dim
224
- out_dim = int(in_dim * 2)
225
- down.downsample = DownSample(in_dim, rample_with_conv, kernel_size=2, return_mask=True)
226
- for i_block in range(num_res_blocks):
227
- block.append(ResnetBlock(in_dim, out_dim, kernel_size, dropout, activation, norm, return_mask=True))
228
- in_dim = out_dim
229
- down.block = block
230
- self.down.append(down)
231
- # middle
232
- self.mid = nn.Module()
233
- self.mid.block1 = ResnetBlock(out_dim, out_dim, kernel_size, dropout, activation, norm, return_mask=True)
234
- if self.use_attn:
235
- self.mid.attn = TransformerEncoderLayer(out_dim, kernel=1)
236
- self.mid.block2 = ResnetBlock(out_dim, out_dim, kernel_size, dropout, activation, norm, return_mask=True)
237
- # end
238
- self.conv_out = ResnetBlock(out_dim, embed_dim, kernel_size, dropout, activation, norm, return_mask=True)
239
-
240
- def forward(self, x, mask=None, return_mask=False):
241
- x, mask = self.encode(x, mask)
242
- # down sampling
243
- for i in range(self.down_scale):
244
- x, mask = self.down[i].downsample(x, mask)
245
- for i_block in range(self.num_res_blocks):
246
- x, mask = self.down[i].block[i_block](x, mask)
247
- # middle
248
- x, mask = self.mid.block1(x, mask)
249
- if self.use_attn:
250
- x = self.mid.attn(x)
251
- x, mask = self.mid.block2(x, mask)
252
- # end
253
- x, mask = self.conv_out(x, mask)
254
- if return_mask:
255
- return x, mask
256
- return x
257
-
258
-
259
- class DiffDecoder(nn.Module):
260
- def __init__(self, output_nc, ngf=64, kernel_size=3, embed_dim=512, up_scale=4, num_res_blocks=2, dropout=0.0, word_size=16,
261
- rample_with_conv=True, activation='gelu', norm='pixel', add_noise=False, use_attn=True, use_pos=True):
262
- super(DiffDecoder, self).__init__()
263
-
264
- activation_layer = get_nonlinearity_layer(activation)
265
- norm_layer = get_norm_layer(norm)
266
- self.up_scale = up_scale
267
- self.num_res_blocks = num_res_blocks
268
- self.add_noise = add_noise
269
- self.use_attn = use_attn
270
- self.use_pos = use_pos
271
- in_dim = ngf * (2 ** self.up_scale)
272
-
273
- # start
274
- if use_pos:
275
- self.pos_embed = ConstantInput(embed_dim, size=word_size)
276
- self.conv_in = PartialConv2d(embed_dim, in_dim, kernel_size=kernel_size, stride=1, padding=int((kernel_size-1)/2))
277
- # middle
278
- self.mid = nn.Module()
279
- self.mid.block1 = ResnetBlock(in_dim, in_dim, kernel_size, dropout, activation, norm)
280
- if self.use_attn:
281
- self.mid.attn = TransformerEncoderLayer(in_dim, kernel=1)
282
- self.mid.block2 = ResnetBlock(in_dim, in_dim, kernel_size, dropout, activation, norm)
283
- # up
284
- self.up = nn.ModuleList()
285
- out_dim = in_dim
286
- for i in range(up_scale):
287
- block = nn.ModuleList()
288
- attn = nn.ModuleList()
289
- noise = nn.ModuleList()
290
- up = nn.Module()
291
- in_dim = out_dim
292
- out_dim = int(in_dim / 2)
293
- for i_block in range(num_res_blocks):
294
- if add_noise:
295
- noise.append(NoiseInjection())
296
- block.append(ResnetBlock(in_dim, out_dim, kernel_size, dropout, activation, norm))
297
- in_dim = out_dim
298
- if i == 0 and self.use_attn:
299
- attn.append(TransformerEncoderLayer(in_dim, kernel=1))
300
- up.block = block
301
- up.attn = attn
302
- up.noise = noise
303
- upsample = True if (i != 0) else False
304
- up.out = ToRGB(in_dim, output_nc, upsample, activation, norm)
305
- up.upsample = UpSample(in_dim, rample_with_conv, kernel_size=3)
306
- self.up.append(up)
307
- # end
308
- self.decode = ToRGB(in_dim, output_nc, True, activation, norm)
309
-
310
- def forward(self, x, mask=None):
311
- x = x + self.pos_embed(x) if self.use_pos else x
312
- x = self.conv_in(x)
313
- # middle
314
- x = self.mid.block1(x)
315
- if self.use_attn:
316
- x = self.mid.attn(x)
317
- x = self.mid.block2(x)
318
- # up
319
- skip = None
320
- for i in range(self.up_scale):
321
- for i_block in range(self.num_res_blocks):
322
- if self.add_noise:
323
- x = self.up[i].noise[i_block](x, mask=mask)
324
- x = self.up[i].block[i_block](x)
325
- if len(self.up[i].attn) > 0:
326
- x = self.up[i].attn[i_block](x)
327
- skip = self.up[i].out(x, skip)
328
- x = self.up[i].upsample(x)
329
- # end
330
- x = self.decode(x, skip)
331
- return x
332
-
333
-
334
- class LinearEncoder(nn.Module):
335
- def __init__(self, input_nc, kernel_size=16, embed_dim=512):
336
- super(LinearEncoder, self).__init__()
337
-
338
- self.encode = PartialConv2d(input_nc, embed_dim, kernel_size=kernel_size, stride=kernel_size, return_mask=True)
339
-
340
- def forward(self, x, mask=None, return_mask=False):
341
- x, mask = self.encode(x, mask)
342
- if return_mask:
343
- return x, mask
344
- return x
345
-
346
-
347
- class LinearDecoder(nn.Module):
348
- def __init__(self, output_nc, ngf=64, kernel_size=16, embed_dim=512, activation='gelu', norm='pixel'):
349
- super(LinearDecoder, self).__init__()
350
-
351
- activation_layer = get_nonlinearity_layer(activation)
352
- norm_layer = get_norm_layer(norm)
353
-
354
- self.decode = nn.Sequential(
355
- norm_layer(embed_dim),
356
- activation_layer,
357
- PartialConv2d(embed_dim, ngf*kernel_size*kernel_size, kernel_size=3, padding=1),
358
- nn.PixelShuffle(kernel_size),
359
- norm_layer(ngf),
360
- activation_layer,
361
- PartialConv2d(ngf, output_nc, kernel_size=3, padding=1)
362
- )
363
-
364
- def forward(self, x, mask=None):
365
- x = self.decode(x)
366
-
367
- return torch.tanh(x)
368
-
369
-
370
- class ToRGB(nn.Module):
371
- def __init__(self, input_nc, output_nc, upsample=True, activation='gelu', norm='pixel'):
372
- super().__init__()
373
-
374
- activation_layer = get_nonlinearity_layer(activation)
375
- norm_layer = get_norm_layer(norm)
376
-
377
- if upsample:
378
- self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
379
- input_nc = input_nc + output_nc
380
-
381
- self.conv = nn.Sequential(
382
- norm_layer(input_nc),
383
- activation_layer,
384
- PartialConv2d(input_nc, output_nc, kernel_size=3, padding=1)
385
- )
386
-
387
- def forward(self, input, skip=None):
388
- if skip is not None:
389
- skip = self.upsample(skip)
390
- input = torch.cat([input, skip], dim=1)
391
-
392
- out = self.conv(input)
393
-
394
- return torch.tanh(out)
395
-
396
-
397
- ######################################################################################
398
- # base function for network structure
399
- ######################################################################################
400
- def get_scheduler(optimizer, opt):
401
- """Return a learning rate scheduler
402
- Parameters:
403
- optimizer -- the optimizer of the network
404
- opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions. 
405
- opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
406
- """
407
- if opt.lr_policy == 'linear':
408
- def lambda_rule(iter):
409
- lr_l = 1.0 - max(0, iter + opt.iter_count - opt.n_iter) / float(opt.n_iter_decay + 1)
410
- return lr_l
411
- scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
412
- elif opt.lr_policy == 'plateau':
413
- scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
414
- elif opt.lr_policy == 'cosine':
415
- scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.n_epochs, eta_min=0)
416
- else:
417
- return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
418
- return scheduler
419
-
420
-
421
- def init_weights(net, init_type='normal', init_gain=0.02, debug=False):
422
- """Initialize network weights.
423
-
424
- Parameters:
425
- net (network) -- network to be initialized
426
- init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
427
- init_gain (float) -- scaling factor for normal, xavier and orthogonal.
428
-
429
- We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
430
- work better for some applications. Feel free to try yourself.
431
- """
432
- def init_func(m): # define the initialization function
433
- classname = m.__class__.__name__
434
- if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
435
- if debug:
436
- print(classname)
437
- if init_type == 'normal':
438
- init.normal_(m.weight.data, 0.0, init_gain)
439
- elif init_type == 'xavier':
440
- init.xavier_normal_(m.weight.data, gain=init_gain)
441
- elif init_type == 'kaiming':
442
- init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
443
- elif init_type == 'orthogonal':
444
- init.orthogonal_(m.weight.data, gain=init_gain)
445
- else:
446
- raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
447
- if hasattr(m, 'bias') and m.bias is not None:
448
- init.constant_(m.bias.data, 0.0)
449
- elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
450
- init.normal_(m.weight.data, 1.0, init_gain)
451
- init.constant_(m.bias.data, 0.0)
452
-
453
- net.apply(init_func) # apply the initialization function <init_func>
454
-
455
-
456
- def init_net(net, init_type='normal', init_gain=0.02, debug=False, initialize_weights=True):
457
- """Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
458
- Parameters:
459
- net (network) -- the network to be initialized
460
- init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
461
- gain (float) -- scaling factor for normal, xavier and orthogonal.
462
- gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
463
-
464
- Return an initialized network.
465
- """
466
- if initialize_weights:
467
- init_weights(net, init_type, init_gain=init_gain, debug=debug)
468
- return net
469
-
470
-
471
- class Identity(nn.Module):
472
- def forward(self, x):
473
- return x
474
-
475
-
476
- def get_norm_layer(norm_type='instance'):
477
- """Return a normalization layer
478
-
479
- Parameters:
480
- norm_type (str) -- the name of the normalization layer: batch | instance | none
481
-
482
- For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
483
- For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
484
- """
485
- if norm_type == 'batch':
486
- norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
487
- elif norm_type == 'instance':
488
- norm_layer = functools.partial(nn.InstanceNorm2d, affine=True)
489
- elif norm_type == 'pixel':
490
- norm_layer = functools.partial(PixelwiseNorm)
491
- elif norm_type == 'layer':
492
- norm_layer = functools.partial(nn.LayerNorm)
493
- elif norm_type == 'none':
494
- def norm_layer(x): return Identity()
495
- else:
496
- raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
497
- return norm_layer
498
-
499
-
500
- def get_nonlinearity_layer(activation_type='PReLU'):
501
- """Get the activation layer for the networks"""
502
- if activation_type == 'relu':
503
- nonlinearity_layer = nn.ReLU()
504
- elif activation_type == 'gelu':
505
- nonlinearity_layer = nn.GELU()
506
- elif activation_type == 'leakyrelu':
507
- nonlinearity_layer = nn.LeakyReLU(0.2)
508
- elif activation_type == 'prelu':
509
- nonlinearity_layer = nn.PReLU()
510
- else:
511
- raise NotImplementedError('activation layer [%s] is not found' % activation_type)
512
- return nonlinearity_layer
513
-
514
-
515
- class PixelwiseNorm(nn.Module):
516
- def __init__(self, input_nc):
517
- super(PixelwiseNorm, self).__init__()
518
- self.init = False
519
- self.alpha = nn.Parameter(torch.ones(1, input_nc, 1, 1))
520
-
521
- def forward(self, x, alpha=1e-8):
522
- """
523
- forward pass of the module
524
- :param x: input activations volume
525
- :param alpha: small number for numerical stability
526
- :return: y => pixel normalized activations
527
- """
528
- # x = x - x.mean(dim=1, keepdim=True)
529
- y = x.pow(2.).mean(dim=1, keepdim=True).add(alpha).rsqrt() # [N1HW]
530
- y = x * y # normalize the input x volume
531
- return self.alpha*y
532
-
533
-
534
- ###############################################################################
535
- # BSD 3-Clause License
536
- #
537
- # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
538
- #
539
- # Author & Contact: Guilin Liu ([email protected])
540
- ###############################################################################
541
- class PartialConv2d(nn.Conv2d):
542
- def __init__(self, *args, **kwargs):
543
-
544
- # whether the mask is multi-channel or not
545
- if 'multi_channel' in kwargs:
546
- self.multi_channel = kwargs['multi_channel']
547
- kwargs.pop('multi_channel')
548
- else:
549
- self.multi_channel = False
550
-
551
- if 'return_mask' in kwargs:
552
- self.return_mask = kwargs['return_mask']
553
- kwargs.pop('return_mask')
554
- else:
555
- self.return_mask = False
556
-
557
- super(PartialConv2d, self).__init__(*args, **kwargs)
558
-
559
- if self.multi_channel:
560
- self.weight_maskUpdater = torch.ones(self.out_channels, self.in_channels, self.kernel_size[0],
561
- self.kernel_size[1])
562
- else:
563
- self.weight_maskUpdater = torch.ones(1, 1, self.kernel_size[0], self.kernel_size[1])
564
-
565
- self.slide_winsize = self.weight_maskUpdater.shape[1] * self.weight_maskUpdater.shape[2] * \
566
- self.weight_maskUpdater.shape[3]
567
-
568
- self.last_size = (None, None, None, None)
569
- self.update_mask = None
570
- self.mask_ratio = None
571
-
572
- def forward(self, input, mask_in=None):
573
- assert len(input.shape) == 4
574
- if mask_in is not None or self.last_size != tuple(input.shape):
575
- self.last_size = tuple(input.shape)
576
-
577
- with torch.no_grad():
578
- if self.weight_maskUpdater.type() != input.type():
579
- self.weight_maskUpdater = self.weight_maskUpdater.to(input)
580
-
581
- if mask_in is None:
582
- # if mask is not provided, create a mask
583
- if self.multi_channel:
584
- mask = torch.ones(input.data.shape[0], input.data.shape[1], input.data.shape[2],
585
- input.data.shape[3]).to(input)
586
- else:
587
- mask = torch.ones(1, 1, input.data.shape[2], input.data.shape[3]).to(input)
588
- else:
589
- mask = mask_in
590
-
591
- self.update_mask = F.conv2d(mask, self.weight_maskUpdater, bias=None, stride=self.stride,
592
- padding=self.padding, dilation=self.dilation, groups=1)
593
-
594
- # for mixed precision training, change 1e-8 to 1e-6
595
- self.mask_ratio = self.slide_winsize / (self.update_mask + 1e-8)
596
- self.update_mask1 = torch.clamp(self.update_mask, 0, 1)
597
- self.mask_ratio = torch.mul(self.mask_ratio, self.update_mask1)
598
-
599
- raw_out = super(PartialConv2d, self).forward(torch.mul(input, mask) if mask_in is not None else input)
600
-
601
- if self.bias is not None:
602
- bias_view = self.bias.view(1, self.out_channels, 1, 1)
603
- output = torch.mul(raw_out - bias_view, self.mask_ratio) + bias_view
604
- output = torch.mul(output, self.update_mask1)
605
- else:
606
- output = torch.mul(raw_out, self.mask_ratio)
607
-
608
- if self.return_mask:
609
- return output, self.update_mask / self.slide_winsize # replace the valid value to confident score
610
- else:
611
- return output
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AntX-ai/Fintech/README.md DELETED
@@ -1,10 +0,0 @@
1
- ---
2
- title: Fintech
3
- emoji: 📈
4
- colorFrom: purple
5
- colorTo: indigo
6
- sdk: static
7
- pinned: false
8
- ---
9
-
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
spaces/AriaMei/TTSdemo/text/symbols.py DELETED
@@ -1,69 +0,0 @@
1
- '''
2
- Defines the set of symbols used in text input to the model.
3
- '''
4
-
5
- _pad = '_'
6
- _punctuation = ',.!?-'
7
- _letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧ↓↑ '
8
-
9
- '''
10
- # japanese_cleaners2
11
- _pad = '_'
12
- _punctuation = ',.!?-~…'
13
- _letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧʦ↓↑ '
14
- '''
15
-
16
- '''# korean_cleaners
17
- _pad = '_'
18
- _punctuation = ',.!?…~'
19
- _letters = 'ㄱㄴㄷㄹㅁㅂㅅㅇㅈㅊㅋㅌㅍㅎㄲㄸㅃㅆㅉㅏㅓㅗㅜㅡㅣㅐㅔ '
20
- '''
21
-
22
- '''# chinese_cleaners
23
- _pad = '_'
24
- _punctuation = ',。!?—…'
25
- _letters = 'ㄅㄆㄇㄈㄉㄊㄋㄌㄍㄎㄏㄐㄑㄒㄓㄔㄕㄖㄗㄘㄙㄚㄛㄜㄝㄞㄟㄠㄡㄢㄣㄤㄥㄦㄧㄨㄩˉˊˇˋ˙ '
26
- '''
27
-
28
-
29
- '''# sanskrit_cleaners
30
- _pad = '_'
31
- _punctuation = '।'
32
- _letters = 'ँंःअआइईउऊऋएऐओऔकखगघङचछजझञटठडढणतथदधनपफबभमयरलळवशषसहऽािीुूृॄेैोौ्ॠॢ '
33
- '''
34
-
35
- '''# cjks_cleaners
36
- _pad = '_'
37
- _punctuation = ',.!?-~…'
38
- _letters = 'NQabdefghijklmnopstuvwxyzʃʧʥʦɯɹəɥçɸɾβŋɦː⁼ʰ`^#*=→↓↑ '
39
- '''
40
-
41
- '''# thai_cleaners
42
- _pad = '_'
43
- _punctuation = '.!? '
44
- _letters = 'กขฃคฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรฤลวศษสหฬอฮฯะัาำิีึืุูเแโใไๅๆ็่้๊๋์'
45
- '''
46
-
47
- '''# cjke_cleaners2
48
- _pad = '_'
49
- _punctuation = ',.!?-~…'
50
- _letters = 'NQabdefghijklmnopstuvwxyzɑæʃʑçɯɪɔɛɹðəɫɥɸʊɾʒθβŋɦ⁼ʰ`^#*=ˈˌ→↓↑ '
51
- '''
52
-
53
- '''# shanghainese_cleaners
54
- _pad = '_'
55
- _punctuation = ',.!?…'
56
- _letters = 'abdfghiklmnopstuvyzøŋȵɑɔɕəɤɦɪɿʑʔʰ̩̃ᴀᴇ15678 '
57
- '''
58
-
59
- '''# chinese_dialect_cleaners
60
- _pad = '_'
61
- _punctuation = ',.!?~…─'
62
- _letters = '#Nabdefghijklmnoprstuvwxyzæçøŋœȵɐɑɒɓɔɕɗɘəɚɛɜɣɤɦɪɭɯɵɷɸɻɾɿʂʅʊʋʌʏʑʔʦʮʰʷˀː˥˦˧˨˩̥̩̃̚αᴀᴇ↑↓∅ⱼ '
63
- '''
64
-
65
- # Export all symbols:
66
- symbols = [_pad] + list(_punctuation) + list(_letters)
67
-
68
- # Special symbol ids
69
- SPACE_ID = symbols.index(" ")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Artrajz/vits-simple-api/vits/transforms.py DELETED
@@ -1,193 +0,0 @@
1
- import torch
2
- from torch.nn import functional as F
3
-
4
- import numpy as np
5
-
6
-
7
- DEFAULT_MIN_BIN_WIDTH = 1e-3
8
- DEFAULT_MIN_BIN_HEIGHT = 1e-3
9
- DEFAULT_MIN_DERIVATIVE = 1e-3
10
-
11
-
12
- def piecewise_rational_quadratic_transform(inputs,
13
- unnormalized_widths,
14
- unnormalized_heights,
15
- unnormalized_derivatives,
16
- inverse=False,
17
- tails=None,
18
- tail_bound=1.,
19
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
20
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
21
- min_derivative=DEFAULT_MIN_DERIVATIVE):
22
-
23
- if tails is None:
24
- spline_fn = rational_quadratic_spline
25
- spline_kwargs = {}
26
- else:
27
- spline_fn = unconstrained_rational_quadratic_spline
28
- spline_kwargs = {
29
- 'tails': tails,
30
- 'tail_bound': tail_bound
31
- }
32
-
33
- outputs, logabsdet = spline_fn(
34
- inputs=inputs,
35
- unnormalized_widths=unnormalized_widths,
36
- unnormalized_heights=unnormalized_heights,
37
- unnormalized_derivatives=unnormalized_derivatives,
38
- inverse=inverse,
39
- min_bin_width=min_bin_width,
40
- min_bin_height=min_bin_height,
41
- min_derivative=min_derivative,
42
- **spline_kwargs
43
- )
44
- return outputs, logabsdet
45
-
46
-
47
- def searchsorted(bin_locations, inputs, eps=1e-6):
48
- bin_locations[..., -1] += eps
49
- return torch.sum(
50
- inputs[..., None] >= bin_locations,
51
- dim=-1
52
- ) - 1
53
-
54
-
55
- def unconstrained_rational_quadratic_spline(inputs,
56
- unnormalized_widths,
57
- unnormalized_heights,
58
- unnormalized_derivatives,
59
- inverse=False,
60
- tails='linear',
61
- tail_bound=1.,
62
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
63
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
64
- min_derivative=DEFAULT_MIN_DERIVATIVE):
65
- inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
66
- outside_interval_mask = ~inside_interval_mask
67
-
68
- outputs = torch.zeros_like(inputs)
69
- logabsdet = torch.zeros_like(inputs)
70
-
71
- if tails == 'linear':
72
- unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
73
- constant = np.log(np.exp(1 - min_derivative) - 1)
74
- unnormalized_derivatives[..., 0] = constant
75
- unnormalized_derivatives[..., -1] = constant
76
-
77
- outputs[outside_interval_mask] = inputs[outside_interval_mask]
78
- logabsdet[outside_interval_mask] = 0
79
- else:
80
- raise RuntimeError('{} tails are not implemented.'.format(tails))
81
-
82
- outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline(
83
- inputs=inputs[inside_interval_mask],
84
- unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
85
- unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
86
- unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
87
- inverse=inverse,
88
- left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound,
89
- min_bin_width=min_bin_width,
90
- min_bin_height=min_bin_height,
91
- min_derivative=min_derivative
92
- )
93
-
94
- return outputs, logabsdet
95
-
96
- def rational_quadratic_spline(inputs,
97
- unnormalized_widths,
98
- unnormalized_heights,
99
- unnormalized_derivatives,
100
- inverse=False,
101
- left=0., right=1., bottom=0., top=1.,
102
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
103
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
104
- min_derivative=DEFAULT_MIN_DERIVATIVE):
105
- if torch.min(inputs) < left or torch.max(inputs) > right:
106
- raise ValueError('Input to a transform is not within its domain')
107
-
108
- num_bins = unnormalized_widths.shape[-1]
109
-
110
- if min_bin_width * num_bins > 1.0:
111
- raise ValueError('Minimal bin width too large for the number of bins')
112
- if min_bin_height * num_bins > 1.0:
113
- raise ValueError('Minimal bin height too large for the number of bins')
114
-
115
- widths = F.softmax(unnormalized_widths, dim=-1)
116
- widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
117
- cumwidths = torch.cumsum(widths, dim=-1)
118
- cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0)
119
- cumwidths = (right - left) * cumwidths + left
120
- cumwidths[..., 0] = left
121
- cumwidths[..., -1] = right
122
- widths = cumwidths[..., 1:] - cumwidths[..., :-1]
123
-
124
- derivatives = min_derivative + F.softplus(unnormalized_derivatives)
125
-
126
- heights = F.softmax(unnormalized_heights, dim=-1)
127
- heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
128
- cumheights = torch.cumsum(heights, dim=-1)
129
- cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0)
130
- cumheights = (top - bottom) * cumheights + bottom
131
- cumheights[..., 0] = bottom
132
- cumheights[..., -1] = top
133
- heights = cumheights[..., 1:] - cumheights[..., :-1]
134
-
135
- if inverse:
136
- bin_idx = searchsorted(cumheights, inputs)[..., None]
137
- else:
138
- bin_idx = searchsorted(cumwidths, inputs)[..., None]
139
-
140
- input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
141
- input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
142
-
143
- input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
144
- delta = heights / widths
145
- input_delta = delta.gather(-1, bin_idx)[..., 0]
146
-
147
- input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
148
- input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
149
-
150
- input_heights = heights.gather(-1, bin_idx)[..., 0]
151
-
152
- if inverse:
153
- a = (((inputs - input_cumheights) * (input_derivatives
154
- + input_derivatives_plus_one
155
- - 2 * input_delta)
156
- + input_heights * (input_delta - input_derivatives)))
157
- b = (input_heights * input_derivatives
158
- - (inputs - input_cumheights) * (input_derivatives
159
- + input_derivatives_plus_one
160
- - 2 * input_delta))
161
- c = - input_delta * (inputs - input_cumheights)
162
-
163
- discriminant = b.pow(2) - 4 * a * c
164
- assert (discriminant >= 0).all()
165
-
166
- root = (2 * c) / (-b - torch.sqrt(discriminant))
167
- outputs = root * input_bin_widths + input_cumwidths
168
-
169
- theta_one_minus_theta = root * (1 - root)
170
- denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
171
- * theta_one_minus_theta)
172
- derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2)
173
- + 2 * input_delta * theta_one_minus_theta
174
- + input_derivatives * (1 - root).pow(2))
175
- logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
176
-
177
- return outputs, -logabsdet
178
- else:
179
- theta = (inputs - input_cumwidths) / input_bin_widths
180
- theta_one_minus_theta = theta * (1 - theta)
181
-
182
- numerator = input_heights * (input_delta * theta.pow(2)
183
- + input_derivatives * theta_one_minus_theta)
184
- denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
185
- * theta_one_minus_theta)
186
- outputs = input_cumheights + numerator / denominator
187
-
188
- derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2)
189
- + 2 * input_delta * theta_one_minus_theta
190
- + input_derivatives * (1 - theta).pow(2))
191
- logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
192
-
193
- return outputs, logabsdet
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/controller.py DELETED
@@ -1,439 +0,0 @@
1
- # SPDX-FileCopyrightText: 2015 Eric Larson
2
- #
3
- # SPDX-License-Identifier: Apache-2.0
4
-
5
- """
6
- The httplib2 algorithms ported for use with requests.
7
- """
8
- import logging
9
- import re
10
- import calendar
11
- import time
12
- from email.utils import parsedate_tz
13
-
14
- from pip._vendor.requests.structures import CaseInsensitiveDict
15
-
16
- from .cache import DictCache, SeparateBodyBaseCache
17
- from .serialize import Serializer
18
-
19
-
20
- logger = logging.getLogger(__name__)
21
-
22
- URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?")
23
-
24
- PERMANENT_REDIRECT_STATUSES = (301, 308)
25
-
26
-
27
- def parse_uri(uri):
28
- """Parses a URI using the regex given in Appendix B of RFC 3986.
29
-
30
- (scheme, authority, path, query, fragment) = parse_uri(uri)
31
- """
32
- groups = URI.match(uri).groups()
33
- return (groups[1], groups[3], groups[4], groups[6], groups[8])
34
-
35
-
36
- class CacheController(object):
37
- """An interface to see if request should cached or not."""
38
-
39
- def __init__(
40
- self, cache=None, cache_etags=True, serializer=None, status_codes=None
41
- ):
42
- self.cache = DictCache() if cache is None else cache
43
- self.cache_etags = cache_etags
44
- self.serializer = serializer or Serializer()
45
- self.cacheable_status_codes = status_codes or (200, 203, 300, 301, 308)
46
-
47
- @classmethod
48
- def _urlnorm(cls, uri):
49
- """Normalize the URL to create a safe key for the cache"""
50
- (scheme, authority, path, query, fragment) = parse_uri(uri)
51
- if not scheme or not authority:
52
- raise Exception("Only absolute URIs are allowed. uri = %s" % uri)
53
-
54
- scheme = scheme.lower()
55
- authority = authority.lower()
56
-
57
- if not path:
58
- path = "/"
59
-
60
- # Could do syntax based normalization of the URI before
61
- # computing the digest. See Section 6.2.2 of Std 66.
62
- request_uri = query and "?".join([path, query]) or path
63
- defrag_uri = scheme + "://" + authority + request_uri
64
-
65
- return defrag_uri
66
-
67
- @classmethod
68
- def cache_url(cls, uri):
69
- return cls._urlnorm(uri)
70
-
71
- def parse_cache_control(self, headers):
72
- known_directives = {
73
- # https://tools.ietf.org/html/rfc7234#section-5.2
74
- "max-age": (int, True),
75
- "max-stale": (int, False),
76
- "min-fresh": (int, True),
77
- "no-cache": (None, False),
78
- "no-store": (None, False),
79
- "no-transform": (None, False),
80
- "only-if-cached": (None, False),
81
- "must-revalidate": (None, False),
82
- "public": (None, False),
83
- "private": (None, False),
84
- "proxy-revalidate": (None, False),
85
- "s-maxage": (int, True),
86
- }
87
-
88
- cc_headers = headers.get("cache-control", headers.get("Cache-Control", ""))
89
-
90
- retval = {}
91
-
92
- for cc_directive in cc_headers.split(","):
93
- if not cc_directive.strip():
94
- continue
95
-
96
- parts = cc_directive.split("=", 1)
97
- directive = parts[0].strip()
98
-
99
- try:
100
- typ, required = known_directives[directive]
101
- except KeyError:
102
- logger.debug("Ignoring unknown cache-control directive: %s", directive)
103
- continue
104
-
105
- if not typ or not required:
106
- retval[directive] = None
107
- if typ:
108
- try:
109
- retval[directive] = typ(parts[1].strip())
110
- except IndexError:
111
- if required:
112
- logger.debug(
113
- "Missing value for cache-control " "directive: %s",
114
- directive,
115
- )
116
- except ValueError:
117
- logger.debug(
118
- "Invalid value for cache-control directive " "%s, must be %s",
119
- directive,
120
- typ.__name__,
121
- )
122
-
123
- return retval
124
-
125
- def cached_request(self, request):
126
- """
127
- Return a cached response if it exists in the cache, otherwise
128
- return False.
129
- """
130
- cache_url = self.cache_url(request.url)
131
- logger.debug('Looking up "%s" in the cache', cache_url)
132
- cc = self.parse_cache_control(request.headers)
133
-
134
- # Bail out if the request insists on fresh data
135
- if "no-cache" in cc:
136
- logger.debug('Request header has "no-cache", cache bypassed')
137
- return False
138
-
139
- if "max-age" in cc and cc["max-age"] == 0:
140
- logger.debug('Request header has "max_age" as 0, cache bypassed')
141
- return False
142
-
143
- # Request allows serving from the cache, let's see if we find something
144
- cache_data = self.cache.get(cache_url)
145
- if cache_data is None:
146
- logger.debug("No cache entry available")
147
- return False
148
-
149
- if isinstance(self.cache, SeparateBodyBaseCache):
150
- body_file = self.cache.get_body(cache_url)
151
- else:
152
- body_file = None
153
-
154
- # Check whether it can be deserialized
155
- resp = self.serializer.loads(request, cache_data, body_file)
156
- if not resp:
157
- logger.warning("Cache entry deserialization failed, entry ignored")
158
- return False
159
-
160
- # If we have a cached permanent redirect, return it immediately. We
161
- # don't need to test our response for other headers b/c it is
162
- # intrinsically "cacheable" as it is Permanent.
163
- #
164
- # See:
165
- # https://tools.ietf.org/html/rfc7231#section-6.4.2
166
- #
167
- # Client can try to refresh the value by repeating the request
168
- # with cache busting headers as usual (ie no-cache).
169
- if int(resp.status) in PERMANENT_REDIRECT_STATUSES:
170
- msg = (
171
- "Returning cached permanent redirect response "
172
- "(ignoring date and etag information)"
173
- )
174
- logger.debug(msg)
175
- return resp
176
-
177
- headers = CaseInsensitiveDict(resp.headers)
178
- if not headers or "date" not in headers:
179
- if "etag" not in headers:
180
- # Without date or etag, the cached response can never be used
181
- # and should be deleted.
182
- logger.debug("Purging cached response: no date or etag")
183
- self.cache.delete(cache_url)
184
- logger.debug("Ignoring cached response: no date")
185
- return False
186
-
187
- now = time.time()
188
- date = calendar.timegm(parsedate_tz(headers["date"]))
189
- current_age = max(0, now - date)
190
- logger.debug("Current age based on date: %i", current_age)
191
-
192
- # TODO: There is an assumption that the result will be a
193
- # urllib3 response object. This may not be best since we
194
- # could probably avoid instantiating or constructing the
195
- # response until we know we need it.
196
- resp_cc = self.parse_cache_control(headers)
197
-
198
- # determine freshness
199
- freshness_lifetime = 0
200
-
201
- # Check the max-age pragma in the cache control header
202
- if "max-age" in resp_cc:
203
- freshness_lifetime = resp_cc["max-age"]
204
- logger.debug("Freshness lifetime from max-age: %i", freshness_lifetime)
205
-
206
- # If there isn't a max-age, check for an expires header
207
- elif "expires" in headers:
208
- expires = parsedate_tz(headers["expires"])
209
- if expires is not None:
210
- expire_time = calendar.timegm(expires) - date
211
- freshness_lifetime = max(0, expire_time)
212
- logger.debug("Freshness lifetime from expires: %i", freshness_lifetime)
213
-
214
- # Determine if we are setting freshness limit in the
215
- # request. Note, this overrides what was in the response.
216
- if "max-age" in cc:
217
- freshness_lifetime = cc["max-age"]
218
- logger.debug(
219
- "Freshness lifetime from request max-age: %i", freshness_lifetime
220
- )
221
-
222
- if "min-fresh" in cc:
223
- min_fresh = cc["min-fresh"]
224
- # adjust our current age by our min fresh
225
- current_age += min_fresh
226
- logger.debug("Adjusted current age from min-fresh: %i", current_age)
227
-
228
- # Return entry if it is fresh enough
229
- if freshness_lifetime > current_age:
230
- logger.debug('The response is "fresh", returning cached response')
231
- logger.debug("%i > %i", freshness_lifetime, current_age)
232
- return resp
233
-
234
- # we're not fresh. If we don't have an Etag, clear it out
235
- if "etag" not in headers:
236
- logger.debug('The cached response is "stale" with no etag, purging')
237
- self.cache.delete(cache_url)
238
-
239
- # return the original handler
240
- return False
241
-
242
- def conditional_headers(self, request):
243
- cache_url = self.cache_url(request.url)
244
- resp = self.serializer.loads(request, self.cache.get(cache_url))
245
- new_headers = {}
246
-
247
- if resp:
248
- headers = CaseInsensitiveDict(resp.headers)
249
-
250
- if "etag" in headers:
251
- new_headers["If-None-Match"] = headers["ETag"]
252
-
253
- if "last-modified" in headers:
254
- new_headers["If-Modified-Since"] = headers["Last-Modified"]
255
-
256
- return new_headers
257
-
258
- def _cache_set(self, cache_url, request, response, body=None, expires_time=None):
259
- """
260
- Store the data in the cache.
261
- """
262
- if isinstance(self.cache, SeparateBodyBaseCache):
263
- # We pass in the body separately; just put a placeholder empty
264
- # string in the metadata.
265
- self.cache.set(
266
- cache_url,
267
- self.serializer.dumps(request, response, b""),
268
- expires=expires_time,
269
- )
270
- self.cache.set_body(cache_url, body)
271
- else:
272
- self.cache.set(
273
- cache_url,
274
- self.serializer.dumps(request, response, body),
275
- expires=expires_time,
276
- )
277
-
278
- def cache_response(self, request, response, body=None, status_codes=None):
279
- """
280
- Algorithm for caching requests.
281
-
282
- This assumes a requests Response object.
283
- """
284
- # From httplib2: Don't cache 206's since we aren't going to
285
- # handle byte range requests
286
- cacheable_status_codes = status_codes or self.cacheable_status_codes
287
- if response.status not in cacheable_status_codes:
288
- logger.debug(
289
- "Status code %s not in %s", response.status, cacheable_status_codes
290
- )
291
- return
292
-
293
- response_headers = CaseInsensitiveDict(response.headers)
294
-
295
- if "date" in response_headers:
296
- date = calendar.timegm(parsedate_tz(response_headers["date"]))
297
- else:
298
- date = 0
299
-
300
- # If we've been given a body, our response has a Content-Length, that
301
- # Content-Length is valid then we can check to see if the body we've
302
- # been given matches the expected size, and if it doesn't we'll just
303
- # skip trying to cache it.
304
- if (
305
- body is not None
306
- and "content-length" in response_headers
307
- and response_headers["content-length"].isdigit()
308
- and int(response_headers["content-length"]) != len(body)
309
- ):
310
- return
311
-
312
- cc_req = self.parse_cache_control(request.headers)
313
- cc = self.parse_cache_control(response_headers)
314
-
315
- cache_url = self.cache_url(request.url)
316
- logger.debug('Updating cache with response from "%s"', cache_url)
317
-
318
- # Delete it from the cache if we happen to have it stored there
319
- no_store = False
320
- if "no-store" in cc:
321
- no_store = True
322
- logger.debug('Response header has "no-store"')
323
- if "no-store" in cc_req:
324
- no_store = True
325
- logger.debug('Request header has "no-store"')
326
- if no_store and self.cache.get(cache_url):
327
- logger.debug('Purging existing cache entry to honor "no-store"')
328
- self.cache.delete(cache_url)
329
- if no_store:
330
- return
331
-
332
- # https://tools.ietf.org/html/rfc7234#section-4.1:
333
- # A Vary header field-value of "*" always fails to match.
334
- # Storing such a response leads to a deserialization warning
335
- # during cache lookup and is not allowed to ever be served,
336
- # so storing it can be avoided.
337
- if "*" in response_headers.get("vary", ""):
338
- logger.debug('Response header has "Vary: *"')
339
- return
340
-
341
- # If we've been given an etag, then keep the response
342
- if self.cache_etags and "etag" in response_headers:
343
- expires_time = 0
344
- if response_headers.get("expires"):
345
- expires = parsedate_tz(response_headers["expires"])
346
- if expires is not None:
347
- expires_time = calendar.timegm(expires) - date
348
-
349
- expires_time = max(expires_time, 14 * 86400)
350
-
351
- logger.debug("etag object cached for {0} seconds".format(expires_time))
352
- logger.debug("Caching due to etag")
353
- self._cache_set(cache_url, request, response, body, expires_time)
354
-
355
- # Add to the cache any permanent redirects. We do this before looking
356
- # that the Date headers.
357
- elif int(response.status) in PERMANENT_REDIRECT_STATUSES:
358
- logger.debug("Caching permanent redirect")
359
- self._cache_set(cache_url, request, response, b"")
360
-
361
- # Add to the cache if the response headers demand it. If there
362
- # is no date header then we can't do anything about expiring
363
- # the cache.
364
- elif "date" in response_headers:
365
- date = calendar.timegm(parsedate_tz(response_headers["date"]))
366
- # cache when there is a max-age > 0
367
- if "max-age" in cc and cc["max-age"] > 0:
368
- logger.debug("Caching b/c date exists and max-age > 0")
369
- expires_time = cc["max-age"]
370
- self._cache_set(
371
- cache_url,
372
- request,
373
- response,
374
- body,
375
- expires_time,
376
- )
377
-
378
- # If the request can expire, it means we should cache it
379
- # in the meantime.
380
- elif "expires" in response_headers:
381
- if response_headers["expires"]:
382
- expires = parsedate_tz(response_headers["expires"])
383
- if expires is not None:
384
- expires_time = calendar.timegm(expires) - date
385
- else:
386
- expires_time = None
387
-
388
- logger.debug(
389
- "Caching b/c of expires header. expires in {0} seconds".format(
390
- expires_time
391
- )
392
- )
393
- self._cache_set(
394
- cache_url,
395
- request,
396
- response,
397
- body,
398
- expires_time,
399
- )
400
-
401
- def update_cached_response(self, request, response):
402
- """On a 304 we will get a new set of headers that we want to
403
- update our cached value with, assuming we have one.
404
-
405
- This should only ever be called when we've sent an ETag and
406
- gotten a 304 as the response.
407
- """
408
- cache_url = self.cache_url(request.url)
409
-
410
- cached_response = self.serializer.loads(request, self.cache.get(cache_url))
411
-
412
- if not cached_response:
413
- # we didn't have a cached response
414
- return response
415
-
416
- # Lets update our headers with the headers from the new request:
417
- # http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1
418
- #
419
- # The server isn't supposed to send headers that would make
420
- # the cached body invalid. But... just in case, we'll be sure
421
- # to strip out ones we know that might be problmatic due to
422
- # typical assumptions.
423
- excluded_headers = ["content-length"]
424
-
425
- cached_response.headers.update(
426
- dict(
427
- (k, v)
428
- for k, v in response.headers.items()
429
- if k.lower() not in excluded_headers
430
- )
431
- )
432
-
433
- # we want a 200 b/c we have content via the cache
434
- cached_response.status = 200
435
-
436
- # update our cache
437
- self._cache_set(cache_url, request, cached_response)
438
-
439
- return cached_response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/charsetgroupprober.py DELETED
@@ -1,106 +0,0 @@
1
- ######################## BEGIN LICENSE BLOCK ########################
2
- # The Original Code is Mozilla Communicator client code.
3
- #
4
- # The Initial Developer of the Original Code is
5
- # Netscape Communications Corporation.
6
- # Portions created by the Initial Developer are Copyright (C) 1998
7
- # the Initial Developer. All Rights Reserved.
8
- #
9
- # Contributor(s):
10
- # Mark Pilgrim - port to Python
11
- #
12
- # This library is free software; you can redistribute it and/or
13
- # modify it under the terms of the GNU Lesser General Public
14
- # License as published by the Free Software Foundation; either
15
- # version 2.1 of the License, or (at your option) any later version.
16
- #
17
- # This library is distributed in the hope that it will be useful,
18
- # but WITHOUT ANY WARRANTY; without even the implied warranty of
19
- # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20
- # Lesser General Public License for more details.
21
- #
22
- # You should have received a copy of the GNU Lesser General Public
23
- # License along with this library; if not, write to the Free Software
24
- # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25
- # 02110-1301 USA
26
- ######################### END LICENSE BLOCK #########################
27
-
28
- from typing import List, Optional, Union
29
-
30
- from .charsetprober import CharSetProber
31
- from .enums import LanguageFilter, ProbingState
32
-
33
-
34
- class CharSetGroupProber(CharSetProber):
35
- def __init__(self, lang_filter: LanguageFilter = LanguageFilter.NONE) -> None:
36
- super().__init__(lang_filter=lang_filter)
37
- self._active_num = 0
38
- self.probers: List[CharSetProber] = []
39
- self._best_guess_prober: Optional[CharSetProber] = None
40
-
41
- def reset(self) -> None:
42
- super().reset()
43
- self._active_num = 0
44
- for prober in self.probers:
45
- prober.reset()
46
- prober.active = True
47
- self._active_num += 1
48
- self._best_guess_prober = None
49
-
50
- @property
51
- def charset_name(self) -> Optional[str]:
52
- if not self._best_guess_prober:
53
- self.get_confidence()
54
- if not self._best_guess_prober:
55
- return None
56
- return self._best_guess_prober.charset_name
57
-
58
- @property
59
- def language(self) -> Optional[str]:
60
- if not self._best_guess_prober:
61
- self.get_confidence()
62
- if not self._best_guess_prober:
63
- return None
64
- return self._best_guess_prober.language
65
-
66
- def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
67
- for prober in self.probers:
68
- if not prober.active:
69
- continue
70
- state = prober.feed(byte_str)
71
- if not state:
72
- continue
73
- if state == ProbingState.FOUND_IT:
74
- self._best_guess_prober = prober
75
- self._state = ProbingState.FOUND_IT
76
- return self.state
77
- if state == ProbingState.NOT_ME:
78
- prober.active = False
79
- self._active_num -= 1
80
- if self._active_num <= 0:
81
- self._state = ProbingState.NOT_ME
82
- return self.state
83
- return self.state
84
-
85
- def get_confidence(self) -> float:
86
- state = self.state
87
- if state == ProbingState.FOUND_IT:
88
- return 0.99
89
- if state == ProbingState.NOT_ME:
90
- return 0.01
91
- best_conf = 0.0
92
- self._best_guess_prober = None
93
- for prober in self.probers:
94
- if not prober.active:
95
- self.logger.debug("%s not active", prober.charset_name)
96
- continue
97
- conf = prober.get_confidence()
98
- self.logger.debug(
99
- "%s %s confidence = %s", prober.charset_name, prober.language, conf
100
- )
101
- if best_conf < conf:
102
- best_conf = conf
103
- self._best_guess_prober = prober
104
- if not self._best_guess_prober:
105
- return 0.0
106
- return best_conf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/euckrfreq.py DELETED
@@ -1,196 +0,0 @@
1
- ######################## BEGIN LICENSE BLOCK ########################
2
- # The Original Code is Mozilla Communicator client code.
3
- #
4
- # The Initial Developer of the Original Code is
5
- # Netscape Communications Corporation.
6
- # Portions created by the Initial Developer are Copyright (C) 1998
7
- # the Initial Developer. All Rights Reserved.
8
- #
9
- # Contributor(s):
10
- # Mark Pilgrim - port to Python
11
- #
12
- # This library is free software; you can redistribute it and/or
13
- # modify it under the terms of the GNU Lesser General Public
14
- # License as published by the Free Software Foundation; either
15
- # version 2.1 of the License, or (at your option) any later version.
16
- #
17
- # This library is distributed in the hope that it will be useful,
18
- # but WITHOUT ANY WARRANTY; without even the implied warranty of
19
- # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20
- # Lesser General Public License for more details.
21
- #
22
- # You should have received a copy of the GNU Lesser General Public
23
- # License along with this library; if not, write to the Free Software
24
- # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25
- # 02110-1301 USA
26
- ######################### END LICENSE BLOCK #########################
27
-
28
- # Sampling from about 20M text materials include literature and computer technology
29
-
30
- # 128 --> 0.79
31
- # 256 --> 0.92
32
- # 512 --> 0.986
33
- # 1024 --> 0.99944
34
- # 2048 --> 0.99999
35
- #
36
- # Idea Distribution Ratio = 0.98653 / (1-0.98653) = 73.24
37
- # Random Distribution Ration = 512 / (2350-512) = 0.279.
38
- #
39
- # Typical Distribution Ratio
40
-
41
- EUCKR_TYPICAL_DISTRIBUTION_RATIO = 6.0
42
-
43
- EUCKR_TABLE_SIZE = 2352
44
-
45
- # Char to FreqOrder table ,
46
- # fmt: off
47
- EUCKR_CHAR_TO_FREQ_ORDER = (
48
- 13, 130, 120,1396, 481,1719,1720, 328, 609, 212,1721, 707, 400, 299,1722, 87,
49
- 1397,1723, 104, 536,1117,1203,1724,1267, 685,1268, 508,1725,1726,1727,1728,1398,
50
- 1399,1729,1730,1731, 141, 621, 326,1057, 368,1732, 267, 488, 20,1733,1269,1734,
51
- 945,1400,1735, 47, 904,1270,1736,1737, 773, 248,1738, 409, 313, 786, 429,1739,
52
- 116, 987, 813,1401, 683, 75,1204, 145,1740,1741,1742,1743, 16, 847, 667, 622,
53
- 708,1744,1745,1746, 966, 787, 304, 129,1747, 60, 820, 123, 676,1748,1749,1750,
54
- 1751, 617,1752, 626,1753,1754,1755,1756, 653,1757,1758,1759,1760,1761,1762, 856,
55
- 344,1763,1764,1765,1766, 89, 401, 418, 806, 905, 848,1767,1768,1769, 946,1205,
56
- 709,1770,1118,1771, 241,1772,1773,1774,1271,1775, 569,1776, 999,1777,1778,1779,
57
- 1780, 337, 751,1058, 28, 628, 254,1781, 177, 906, 270, 349, 891,1079,1782, 19,
58
- 1783, 379,1784, 315,1785, 629, 754,1402, 559,1786, 636, 203,1206,1787, 710, 567,
59
- 1788, 935, 814,1789,1790,1207, 766, 528,1791,1792,1208,1793,1794,1795,1796,1797,
60
- 1403,1798,1799, 533,1059,1404,1405,1156,1406, 936, 884,1080,1800, 351,1801,1802,
61
- 1803,1804,1805, 801,1806,1807,1808,1119,1809,1157, 714, 474,1407,1810, 298, 899,
62
- 885,1811,1120, 802,1158,1812, 892,1813,1814,1408, 659,1815,1816,1121,1817,1818,
63
- 1819,1820,1821,1822, 319,1823, 594, 545,1824, 815, 937,1209,1825,1826, 573,1409,
64
- 1022,1827,1210,1828,1829,1830,1831,1832,1833, 556, 722, 807,1122,1060,1834, 697,
65
- 1835, 900, 557, 715,1836,1410, 540,1411, 752,1159, 294, 597,1211, 976, 803, 770,
66
- 1412,1837,1838, 39, 794,1413, 358,1839, 371, 925,1840, 453, 661, 788, 531, 723,
67
- 544,1023,1081, 869, 91,1841, 392, 430, 790, 602,1414, 677,1082, 457,1415,1416,
68
- 1842,1843, 475, 327,1024,1417, 795, 121,1844, 733, 403,1418,1845,1846,1847, 300,
69
- 119, 711,1212, 627,1848,1272, 207,1849,1850, 796,1213, 382,1851, 519,1852,1083,
70
- 893,1853,1854,1855, 367, 809, 487, 671,1856, 663,1857,1858, 956, 471, 306, 857,
71
- 1859,1860,1160,1084,1861,1862,1863,1864,1865,1061,1866,1867,1868,1869,1870,1871,
72
- 282, 96, 574,1872, 502,1085,1873,1214,1874, 907,1875,1876, 827, 977,1419,1420,
73
- 1421, 268,1877,1422,1878,1879,1880, 308,1881, 2, 537,1882,1883,1215,1884,1885,
74
- 127, 791,1886,1273,1423,1887, 34, 336, 404, 643,1888, 571, 654, 894, 840,1889,
75
- 0, 886,1274, 122, 575, 260, 908, 938,1890,1275, 410, 316,1891,1892, 100,1893,
76
- 1894,1123, 48,1161,1124,1025,1895, 633, 901,1276,1896,1897, 115, 816,1898, 317,
77
- 1899, 694,1900, 909, 734,1424, 572, 866,1425, 691, 85, 524,1010, 543, 394, 841,
78
- 1901,1902,1903,1026,1904,1905,1906,1907,1908,1909, 30, 451, 651, 988, 310,1910,
79
- 1911,1426, 810,1216, 93,1912,1913,1277,1217,1914, 858, 759, 45, 58, 181, 610,
80
- 269,1915,1916, 131,1062, 551, 443,1000, 821,1427, 957, 895,1086,1917,1918, 375,
81
- 1919, 359,1920, 687,1921, 822,1922, 293,1923,1924, 40, 662, 118, 692, 29, 939,
82
- 887, 640, 482, 174,1925, 69,1162, 728,1428, 910,1926,1278,1218,1279, 386, 870,
83
- 217, 854,1163, 823,1927,1928,1929,1930, 834,1931, 78,1932, 859,1933,1063,1934,
84
- 1935,1936,1937, 438,1164, 208, 595,1938,1939,1940,1941,1219,1125,1942, 280, 888,
85
- 1429,1430,1220,1431,1943,1944,1945,1946,1947,1280, 150, 510,1432,1948,1949,1950,
86
- 1951,1952,1953,1954,1011,1087,1955,1433,1043,1956, 881,1957, 614, 958,1064,1065,
87
- 1221,1958, 638,1001, 860, 967, 896,1434, 989, 492, 553,1281,1165,1959,1282,1002,
88
- 1283,1222,1960,1961,1962,1963, 36, 383, 228, 753, 247, 454,1964, 876, 678,1965,
89
- 1966,1284, 126, 464, 490, 835, 136, 672, 529, 940,1088,1435, 473,1967,1968, 467,
90
- 50, 390, 227, 587, 279, 378, 598, 792, 968, 240, 151, 160, 849, 882,1126,1285,
91
- 639,1044, 133, 140, 288, 360, 811, 563,1027, 561, 142, 523,1969,1970,1971, 7,
92
- 103, 296, 439, 407, 506, 634, 990,1972,1973,1974,1975, 645,1976,1977,1978,1979,
93
- 1980,1981, 236,1982,1436,1983,1984,1089, 192, 828, 618, 518,1166, 333,1127,1985,
94
- 818,1223,1986,1987,1988,1989,1990,1991,1992,1993, 342,1128,1286, 746, 842,1994,
95
- 1995, 560, 223,1287, 98, 8, 189, 650, 978,1288,1996,1437,1997, 17, 345, 250,
96
- 423, 277, 234, 512, 226, 97, 289, 42, 167,1998, 201,1999,2000, 843, 836, 824,
97
- 532, 338, 783,1090, 182, 576, 436,1438,1439, 527, 500,2001, 947, 889,2002,2003,
98
- 2004,2005, 262, 600, 314, 447,2006, 547,2007, 693, 738,1129,2008, 71,1440, 745,
99
- 619, 688,2009, 829,2010,2011, 147,2012, 33, 948,2013,2014, 74, 224,2015, 61,
100
- 191, 918, 399, 637,2016,1028,1130, 257, 902,2017,2018,2019,2020,2021,2022,2023,
101
- 2024,2025,2026, 837,2027,2028,2029,2030, 179, 874, 591, 52, 724, 246,2031,2032,
102
- 2033,2034,1167, 969,2035,1289, 630, 605, 911,1091,1168,2036,2037,2038,1441, 912,
103
- 2039, 623,2040,2041, 253,1169,1290,2042,1442, 146, 620, 611, 577, 433,2043,1224,
104
- 719,1170, 959, 440, 437, 534, 84, 388, 480,1131, 159, 220, 198, 679,2044,1012,
105
- 819,1066,1443, 113,1225, 194, 318,1003,1029,2045,2046,2047,2048,1067,2049,2050,
106
- 2051,2052,2053, 59, 913, 112,2054, 632,2055, 455, 144, 739,1291,2056, 273, 681,
107
- 499,2057, 448,2058,2059, 760,2060,2061, 970, 384, 169, 245,1132,2062,2063, 414,
108
- 1444,2064,2065, 41, 235,2066, 157, 252, 877, 568, 919, 789, 580,2067, 725,2068,
109
- 2069,1292,2070,2071,1445,2072,1446,2073,2074, 55, 588, 66,1447, 271,1092,2075,
110
- 1226,2076, 960,1013, 372,2077,2078,2079,2080,2081,1293,2082,2083,2084,2085, 850,
111
- 2086,2087,2088,2089,2090, 186,2091,1068, 180,2092,2093,2094, 109,1227, 522, 606,
112
- 2095, 867,1448,1093, 991,1171, 926, 353,1133,2096, 581,2097,2098,2099,1294,1449,
113
- 1450,2100, 596,1172,1014,1228,2101,1451,1295,1173,1229,2102,2103,1296,1134,1452,
114
- 949,1135,2104,2105,1094,1453,1454,1455,2106,1095,2107,2108,2109,2110,2111,2112,
115
- 2113,2114,2115,2116,2117, 804,2118,2119,1230,1231, 805,1456, 405,1136,2120,2121,
116
- 2122,2123,2124, 720, 701,1297, 992,1457, 927,1004,2125,2126,2127,2128,2129,2130,
117
- 22, 417,2131, 303,2132, 385,2133, 971, 520, 513,2134,1174, 73,1096, 231, 274,
118
- 962,1458, 673,2135,1459,2136, 152,1137,2137,2138,2139,2140,1005,1138,1460,1139,
119
- 2141,2142,2143,2144, 11, 374, 844,2145, 154,1232, 46,1461,2146, 838, 830, 721,
120
- 1233, 106,2147, 90, 428, 462, 578, 566,1175, 352,2148,2149, 538,1234, 124,1298,
121
- 2150,1462, 761, 565,2151, 686,2152, 649,2153, 72, 173,2154, 460, 415,2155,1463,
122
- 2156,1235, 305,2157,2158,2159,2160,2161,2162, 579,2163,2164,2165,2166,2167, 747,
123
- 2168,2169,2170,2171,1464, 669,2172,2173,2174,2175,2176,1465,2177, 23, 530, 285,
124
- 2178, 335, 729,2179, 397,2180,2181,2182,1030,2183,2184, 698,2185,2186, 325,2187,
125
- 2188, 369,2189, 799,1097,1015, 348,2190,1069, 680,2191, 851,1466,2192,2193, 10,
126
- 2194, 613, 424,2195, 979, 108, 449, 589, 27, 172, 81,1031, 80, 774, 281, 350,
127
- 1032, 525, 301, 582,1176,2196, 674,1045,2197,2198,1467, 730, 762,2199,2200,2201,
128
- 2202,1468,2203, 993,2204,2205, 266,1070, 963,1140,2206,2207,2208, 664,1098, 972,
129
- 2209,2210,2211,1177,1469,1470, 871,2212,2213,2214,2215,2216,1471,2217,2218,2219,
130
- 2220,2221,2222,2223,2224,2225,2226,2227,1472,1236,2228,2229,2230,2231,2232,2233,
131
- 2234,2235,1299,2236,2237, 200,2238, 477, 373,2239,2240, 731, 825, 777,2241,2242,
132
- 2243, 521, 486, 548,2244,2245,2246,1473,1300, 53, 549, 137, 875, 76, 158,2247,
133
- 1301,1474, 469, 396,1016, 278, 712,2248, 321, 442, 503, 767, 744, 941,1237,1178,
134
- 1475,2249, 82, 178,1141,1179, 973,2250,1302,2251, 297,2252,2253, 570,2254,2255,
135
- 2256, 18, 450, 206,2257, 290, 292,1142,2258, 511, 162, 99, 346, 164, 735,2259,
136
- 1476,1477, 4, 554, 343, 798,1099,2260,1100,2261, 43, 171,1303, 139, 215,2262,
137
- 2263, 717, 775,2264,1033, 322, 216,2265, 831,2266, 149,2267,1304,2268,2269, 702,
138
- 1238, 135, 845, 347, 309,2270, 484,2271, 878, 655, 238,1006,1478,2272, 67,2273,
139
- 295,2274,2275, 461,2276, 478, 942, 412,2277,1034,2278,2279,2280, 265,2281, 541,
140
- 2282,2283,2284,2285,2286, 70, 852,1071,2287,2288,2289,2290, 21, 56, 509, 117,
141
- 432,2291,2292, 331, 980, 552,1101, 148, 284, 105, 393,1180,1239, 755,2293, 187,
142
- 2294,1046,1479,2295, 340,2296, 63,1047, 230,2297,2298,1305, 763,1306, 101, 800,
143
- 808, 494,2299,2300,2301, 903,2302, 37,1072, 14, 5,2303, 79, 675,2304, 312,
144
- 2305,2306,2307,2308,2309,1480, 6,1307,2310,2311,2312, 1, 470, 35, 24, 229,
145
- 2313, 695, 210, 86, 778, 15, 784, 592, 779, 32, 77, 855, 964,2314, 259,2315,
146
- 501, 380,2316,2317, 83, 981, 153, 689,1308,1481,1482,1483,2318,2319, 716,1484,
147
- 2320,2321,2322,2323,2324,2325,1485,2326,2327, 128, 57, 68, 261,1048, 211, 170,
148
- 1240, 31,2328, 51, 435, 742,2329,2330,2331, 635,2332, 264, 456,2333,2334,2335,
149
- 425,2336,1486, 143, 507, 263, 943,2337, 363, 920,1487, 256,1488,1102, 243, 601,
150
- 1489,2338,2339,2340,2341,2342,2343,2344, 861,2345,2346,2347,2348,2349,2350, 395,
151
- 2351,1490,1491, 62, 535, 166, 225,2352,2353, 668, 419,1241, 138, 604, 928,2354,
152
- 1181,2355,1492,1493,2356,2357,2358,1143,2359, 696,2360, 387, 307,1309, 682, 476,
153
- 2361,2362, 332, 12, 222, 156,2363, 232,2364, 641, 276, 656, 517,1494,1495,1035,
154
- 416, 736,1496,2365,1017, 586,2366,2367,2368,1497,2369, 242,2370,2371,2372,1498,
155
- 2373, 965, 713,2374,2375,2376,2377, 740, 982,1499, 944,1500,1007,2378,2379,1310,
156
- 1501,2380,2381,2382, 785, 329,2383,2384,1502,2385,2386,2387, 932,2388,1503,2389,
157
- 2390,2391,2392,1242,2393,2394,2395,2396,2397, 994, 950,2398,2399,2400,2401,1504,
158
- 1311,2402,2403,2404,2405,1049, 749,2406,2407, 853, 718,1144,1312,2408,1182,1505,
159
- 2409,2410, 255, 516, 479, 564, 550, 214,1506,1507,1313, 413, 239, 444, 339,1145,
160
- 1036,1508,1509,1314,1037,1510,1315,2411,1511,2412,2413,2414, 176, 703, 497, 624,
161
- 593, 921, 302,2415, 341, 165,1103,1512,2416,1513,2417,2418,2419, 376,2420, 700,
162
- 2421,2422,2423, 258, 768,1316,2424,1183,2425, 995, 608,2426,2427,2428,2429, 221,
163
- 2430,2431,2432,2433,2434,2435,2436,2437, 195, 323, 726, 188, 897, 983,1317, 377,
164
- 644,1050, 879,2438, 452,2439,2440,2441,2442,2443,2444, 914,2445,2446,2447,2448,
165
- 915, 489,2449,1514,1184,2450,2451, 515, 64, 427, 495,2452, 583,2453, 483, 485,
166
- 1038, 562, 213,1515, 748, 666,2454,2455,2456,2457, 334,2458, 780, 996,1008, 705,
167
- 1243,2459,2460,2461,2462,2463, 114,2464, 493,1146, 366, 163,1516, 961,1104,2465,
168
- 291,2466,1318,1105,2467,1517, 365,2468, 355, 951,1244,2469,1319,2470, 631,2471,
169
- 2472, 218,1320, 364, 320, 756,1518,1519,1321,1520,1322,2473,2474,2475,2476, 997,
170
- 2477,2478,2479,2480, 665,1185,2481, 916,1521,2482,2483,2484, 584, 684,2485,2486,
171
- 797,2487,1051,1186,2488,2489,2490,1522,2491,2492, 370,2493,1039,1187, 65,2494,
172
- 434, 205, 463,1188,2495, 125, 812, 391, 402, 826, 699, 286, 398, 155, 781, 771,
173
- 585,2496, 590, 505,1073,2497, 599, 244, 219, 917,1018, 952, 646,1523,2498,1323,
174
- 2499,2500, 49, 984, 354, 741,2501, 625,2502,1324,2503,1019, 190, 357, 757, 491,
175
- 95, 782, 868,2504,2505,2506,2507,2508,2509, 134,1524,1074, 422,1525, 898,2510,
176
- 161,2511,2512,2513,2514, 769,2515,1526,2516,2517, 411,1325,2518, 472,1527,2519,
177
- 2520,2521,2522,2523,2524, 985,2525,2526,2527,2528,2529,2530, 764,2531,1245,2532,
178
- 2533, 25, 204, 311,2534, 496,2535,1052,2536,2537,2538,2539,2540,2541,2542, 199,
179
- 704, 504, 468, 758, 657,1528, 196, 44, 839,1246, 272, 750,2543, 765, 862,2544,
180
- 2545,1326,2546, 132, 615, 933,2547, 732,2548,2549,2550,1189,1529,2551, 283,1247,
181
- 1053, 607, 929,2552,2553,2554, 930, 183, 872, 616,1040,1147,2555,1148,1020, 441,
182
- 249,1075,2556,2557,2558, 466, 743,2559,2560,2561, 92, 514, 426, 420, 526,2562,
183
- 2563,2564,2565,2566,2567,2568, 185,2569,2570,2571,2572, 776,1530, 658,2573, 362,
184
- 2574, 361, 922,1076, 793,2575,2576,2577,2578,2579,2580,1531, 251,2581,2582,2583,
185
- 2584,1532, 54, 612, 237,1327,2585,2586, 275, 408, 647, 111,2587,1533,1106, 465,
186
- 3, 458, 9, 38,2588, 107, 110, 890, 209, 26, 737, 498,2589,1534,2590, 431,
187
- 202, 88,1535, 356, 287,1107, 660,1149,2591, 381,1536, 986,1150, 445,1248,1151,
188
- 974,2592,2593, 846,2594, 446, 953, 184,1249,1250, 727,2595, 923, 193, 883,2596,
189
- 2597,2598, 102, 324, 539, 817,2599, 421,1041,2600, 832,2601, 94, 175, 197, 406,
190
- 2602, 459,2603,2604,2605,2606,2607, 330, 555,2608,2609,2610, 706,1108, 389,2611,
191
- 2612,2613,2614, 233,2615, 833, 558, 931, 954,1251,2616,2617,1537, 546,2618,2619,
192
- 1009,2620,2621,2622,1538, 690,1328,2623, 955,2624,1539,2625,2626, 772,2627,2628,
193
- 2629,2630,2631, 924, 648, 863, 603,2632,2633, 934,1540, 864, 865,2634, 642,1042,
194
- 670,1190,2635,2636,2637,2638, 168,2639, 652, 873, 542,1054,1541,2640,2641,2642, # 512, 256
195
- )
196
- # fmt: on
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/core.py DELETED
The diff for this file is too large to render. See raw diff
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/installer.py DELETED
@@ -1,104 +0,0 @@
1
- import glob
2
- import os
3
- import subprocess
4
- import sys
5
- import tempfile
6
- import warnings
7
- from distutils import log
8
- from distutils.errors import DistutilsError
9
-
10
- import pkg_resources
11
- from setuptools.wheel import Wheel
12
- from ._deprecation_warning import SetuptoolsDeprecationWarning
13
-
14
-
15
- def _fixup_find_links(find_links):
16
- """Ensure find-links option end-up being a list of strings."""
17
- if isinstance(find_links, str):
18
- return find_links.split()
19
- assert isinstance(find_links, (tuple, list))
20
- return find_links
21
-
22
-
23
- def fetch_build_egg(dist, req): # noqa: C901 # is too complex (16) # FIXME
24
- """Fetch an egg needed for building.
25
-
26
- Use pip/wheel to fetch/build a wheel."""
27
- warnings.warn(
28
- "setuptools.installer is deprecated. Requirements should "
29
- "be satisfied by a PEP 517 installer.",
30
- SetuptoolsDeprecationWarning,
31
- )
32
- # Warn if wheel is not available
33
- try:
34
- pkg_resources.get_distribution('wheel')
35
- except pkg_resources.DistributionNotFound:
36
- dist.announce('WARNING: The wheel package is not available.', log.WARN)
37
- # Ignore environment markers; if supplied, it is required.
38
- req = strip_marker(req)
39
- # Take easy_install options into account, but do not override relevant
40
- # pip environment variables (like PIP_INDEX_URL or PIP_QUIET); they'll
41
- # take precedence.
42
- opts = dist.get_option_dict('easy_install')
43
- if 'allow_hosts' in opts:
44
- raise DistutilsError('the `allow-hosts` option is not supported '
45
- 'when using pip to install requirements.')
46
- quiet = 'PIP_QUIET' not in os.environ and 'PIP_VERBOSE' not in os.environ
47
- if 'PIP_INDEX_URL' in os.environ:
48
- index_url = None
49
- elif 'index_url' in opts:
50
- index_url = opts['index_url'][1]
51
- else:
52
- index_url = None
53
- find_links = (
54
- _fixup_find_links(opts['find_links'][1])[:] if 'find_links' in opts
55
- else []
56
- )
57
- if dist.dependency_links:
58
- find_links.extend(dist.dependency_links)
59
- eggs_dir = os.path.realpath(dist.get_egg_cache_dir())
60
- environment = pkg_resources.Environment()
61
- for egg_dist in pkg_resources.find_distributions(eggs_dir):
62
- if egg_dist in req and environment.can_add(egg_dist):
63
- return egg_dist
64
- with tempfile.TemporaryDirectory() as tmpdir:
65
- cmd = [
66
- sys.executable, '-m', 'pip',
67
- '--disable-pip-version-check',
68
- 'wheel', '--no-deps',
69
- '-w', tmpdir,
70
- ]
71
- if quiet:
72
- cmd.append('--quiet')
73
- if index_url is not None:
74
- cmd.extend(('--index-url', index_url))
75
- for link in find_links or []:
76
- cmd.extend(('--find-links', link))
77
- # If requirement is a PEP 508 direct URL, directly pass
78
- # the URL to pip, as `req @ url` does not work on the
79
- # command line.
80
- cmd.append(req.url or str(req))
81
- try:
82
- subprocess.check_call(cmd)
83
- except subprocess.CalledProcessError as e:
84
- raise DistutilsError(str(e)) from e
85
- wheel = Wheel(glob.glob(os.path.join(tmpdir, '*.whl'))[0])
86
- dist_location = os.path.join(eggs_dir, wheel.egg_name())
87
- wheel.install_as_egg(dist_location)
88
- dist_metadata = pkg_resources.PathMetadata(
89
- dist_location, os.path.join(dist_location, 'EGG-INFO'))
90
- dist = pkg_resources.Distribution.from_filename(
91
- dist_location, metadata=dist_metadata)
92
- return dist
93
-
94
-
95
- def strip_marker(req):
96
- """
97
- Return a new requirement without the environment marker to avoid
98
- calling pip with something like `babel; extra == "i18n"`, which
99
- would always be ignored.
100
- """
101
- # create a copy to avoid mutating the input
102
- req = pkg_resources.Requirement.parse(str(req))
103
- req.marker = None
104
- return req
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/layers/test_roi_align.py DELETED
@@ -1,210 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import numpy as np
3
- import unittest
4
- from copy import copy
5
- import cv2
6
- import torch
7
- from fvcore.common.benchmark import benchmark
8
- from torch.nn import functional as F
9
-
10
- from detectron2.layers.roi_align import ROIAlign, roi_align
11
-
12
-
13
- class ROIAlignTest(unittest.TestCase):
14
- def test_forward_output(self):
15
- input = np.arange(25).reshape(5, 5).astype("float32")
16
- """
17
- 0 1 2 3 4
18
- 5 6 7 8 9
19
- 10 11 12 13 14
20
- 15 16 17 18 19
21
- 20 21 22 23 24
22
- """
23
-
24
- output = self._simple_roialign(input, [1, 1, 3, 3], (4, 4), aligned=False)
25
- output_correct = self._simple_roialign(input, [1, 1, 3, 3], (4, 4), aligned=True)
26
-
27
- # without correction:
28
- old_results = [
29
- [7.5, 8, 8.5, 9],
30
- [10, 10.5, 11, 11.5],
31
- [12.5, 13, 13.5, 14],
32
- [15, 15.5, 16, 16.5],
33
- ]
34
-
35
- # with 0.5 correction:
36
- correct_results = [
37
- [4.5, 5.0, 5.5, 6.0],
38
- [7.0, 7.5, 8.0, 8.5],
39
- [9.5, 10.0, 10.5, 11.0],
40
- [12.0, 12.5, 13.0, 13.5],
41
- ]
42
- # This is an upsampled version of [[6, 7], [11, 12]]
43
-
44
- self.assertTrue(np.allclose(output.flatten(), np.asarray(old_results).flatten()))
45
- self.assertTrue(
46
- np.allclose(output_correct.flatten(), np.asarray(correct_results).flatten())
47
- )
48
-
49
- # Also see similar issues in tensorflow at
50
- # https://github.com/tensorflow/tensorflow/issues/26278
51
-
52
- def test_resize(self):
53
- H, W = 30, 30
54
- input = np.random.rand(H, W).astype("float32") * 100
55
- box = [10, 10, 20, 20]
56
- output = self._simple_roialign(input, box, (5, 5), aligned=True)
57
-
58
- input2x = cv2.resize(input, (W // 2, H // 2), interpolation=cv2.INTER_LINEAR)
59
- box2x = [x / 2 for x in box]
60
- output2x = self._simple_roialign(input2x, box2x, (5, 5), aligned=True)
61
- diff = np.abs(output2x - output)
62
- self.assertTrue(diff.max() < 1e-4)
63
-
64
- def test_grid_sample_equivalence(self):
65
- H, W = 30, 30
66
- input = np.random.rand(H, W).astype("float32") * 100
67
- box = [10, 10, 20, 20]
68
- for ratio in [1, 2, 3]:
69
- output = self._simple_roialign(input, box, (5, 5), sampling_ratio=ratio)
70
- output_grid_sample = grid_sample_roi_align(
71
- torch.from_numpy(input[None, None, :, :]).float(),
72
- torch.as_tensor(box).float()[None, :],
73
- 5,
74
- 1.0,
75
- ratio,
76
- )
77
- self.assertTrue(torch.allclose(output, output_grid_sample))
78
-
79
- def _simple_roialign(self, img, box, resolution, sampling_ratio=0, aligned=True):
80
- """
81
- RoiAlign with scale 1.0.
82
- """
83
- if isinstance(resolution, int):
84
- resolution = (resolution, resolution)
85
- op = ROIAlign(resolution, 1.0, sampling_ratio, aligned=aligned)
86
- input = torch.from_numpy(img[None, None, :, :].astype("float32"))
87
-
88
- rois = [0] + list(box)
89
- rois = torch.from_numpy(np.asarray(rois)[None, :].astype("float32"))
90
- output = op.forward(input, rois)
91
- if torch.cuda.is_available():
92
- output_cuda = op.forward(input.cuda(), rois.cuda()).cpu()
93
- self.assertTrue(torch.allclose(output, output_cuda))
94
- return output[0, 0]
95
-
96
- def _simple_roialign_with_grad(self, img, box, resolution, device):
97
- if isinstance(resolution, int):
98
- resolution = (resolution, resolution)
99
-
100
- op = ROIAlign(resolution, 1.0, 0, aligned=True)
101
- input = torch.from_numpy(img[None, None, :, :].astype("float32"))
102
-
103
- rois = [0] + list(box)
104
- rois = torch.from_numpy(np.asarray(rois)[None, :].astype("float32"))
105
- input = input.to(device=device)
106
- rois = rois.to(device=device)
107
- input.requires_grad = True
108
- output = op.forward(input, rois)
109
- return input, output
110
-
111
- def test_empty_box(self):
112
- img = np.random.rand(5, 5)
113
- box = [3, 4, 5, 4]
114
- o = self._simple_roialign(img, box, 7)
115
- self.assertTrue(o.shape == (7, 7))
116
- self.assertTrue((o == 0).all())
117
-
118
- for dev in ["cpu"] + ["cuda"] if torch.cuda.is_available() else []:
119
- input, output = self._simple_roialign_with_grad(img, box, 7, torch.device(dev))
120
- output.sum().backward()
121
- self.assertTrue(torch.allclose(input.grad, torch.zeros_like(input)))
122
-
123
- def test_empty_batch(self):
124
- input = torch.zeros(0, 3, 10, 10, dtype=torch.float32)
125
- rois = torch.zeros(0, 5, dtype=torch.float32)
126
- op = ROIAlign((7, 7), 1.0, 0, aligned=True)
127
- output = op.forward(input, rois)
128
- self.assertTrue(output.shape == (0, 3, 7, 7))
129
-
130
-
131
- def grid_sample_roi_align(input, boxes, output_size, scale, sampling_ratio):
132
- # unlike true roi_align, this does not support different batch_idx
133
- from detectron2.projects.point_rend.point_features import (
134
- generate_regular_grid_point_coords,
135
- get_point_coords_wrt_image,
136
- point_sample,
137
- )
138
-
139
- N, _, H, W = input.shape
140
- R = len(boxes)
141
- assert N == 1
142
- boxes = boxes * scale
143
- grid = generate_regular_grid_point_coords(R, output_size * sampling_ratio, device=boxes.device)
144
- coords = get_point_coords_wrt_image(boxes, grid)
145
- coords = coords / torch.as_tensor([W, H], device=coords.device) # R, s^2, 2
146
- res = point_sample(input, coords.unsqueeze(0), align_corners=False) # 1,C, R,s^2
147
- res = (
148
- res.squeeze(0)
149
- .permute(1, 0, 2)
150
- .reshape(R, -1, output_size * sampling_ratio, output_size * sampling_ratio)
151
- )
152
- res = F.avg_pool2d(res, sampling_ratio)
153
- return res
154
-
155
-
156
- def benchmark_roi_align():
157
- def random_boxes(mean_box, stdev, N, maxsize):
158
- ret = torch.rand(N, 4) * stdev + torch.tensor(mean_box, dtype=torch.float)
159
- ret.clamp_(min=0, max=maxsize)
160
- return ret
161
-
162
- def func(shape, nboxes_per_img, sampling_ratio, device, box_size="large"):
163
- N, _, H, _ = shape
164
- input = torch.rand(*shape)
165
- boxes = []
166
- batch_idx = []
167
- for k in range(N):
168
- if box_size == "large":
169
- b = random_boxes([80, 80, 130, 130], 24, nboxes_per_img, H)
170
- else:
171
- b = random_boxes([100, 100, 110, 110], 4, nboxes_per_img, H)
172
- boxes.append(b)
173
- batch_idx.append(torch.zeros(nboxes_per_img, 1, dtype=torch.float32) + k)
174
- boxes = torch.cat(boxes, axis=0)
175
- batch_idx = torch.cat(batch_idx, axis=0)
176
- boxes = torch.cat([batch_idx, boxes], axis=1)
177
-
178
- input = input.to(device=device)
179
- boxes = boxes.to(device=device)
180
-
181
- def bench():
182
- if False and sampling_ratio > 0 and N == 1:
183
- # enable to benchmark grid_sample (slower)
184
- grid_sample_roi_align(input, boxes[:, 1:], 7, 1.0, sampling_ratio)
185
- else:
186
- roi_align(input, boxes, 7, 1.0, sampling_ratio, True)
187
- if device == "cuda":
188
- torch.cuda.synchronize()
189
-
190
- return bench
191
-
192
- def gen_args(arg):
193
- args = []
194
- for size in ["small", "large"]:
195
- for ratio in [0, 2]:
196
- args.append(copy(arg))
197
- args[-1]["sampling_ratio"] = ratio
198
- args[-1]["box_size"] = size
199
- return args
200
-
201
- arg = dict(shape=(1, 512, 256, 256), nboxes_per_img=512, device="cuda")
202
- benchmark(func, "cuda_roialign", gen_args(arg), num_iters=20, warmup_iters=1)
203
- arg.update({"device": "cpu", "shape": (1, 256, 128, 128)})
204
- benchmark(func, "cpu_roialign", gen_args(arg), num_iters=5, warmup_iters=1)
205
-
206
-
207
- if __name__ == "__main__":
208
- if torch.cuda.is_available():
209
- benchmark_roi_align()
210
- unittest.main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descarga De La Herramienta Flash Del Jio Profesional A23.md DELETED
@@ -1,109 +0,0 @@
1
-
2
- <h1>Cómo flashear Itel A23 Pro con la herramienta Jio Flash</h1>
3
- <p>Si estás buscando una manera de flashear tu smartphone Itel A23 Pro, entonces has venido al lugar correcto. En este artículo, le mostraremos cómo usar Jio Flash Tool para flashear su Itel A23 Pro con facilidad. Si desea desbloquear el teléfono, quitar el bloqueo de FRP, o actualizar/ degradar su versión de Android, Jio Flash Tool puede ayudarle a hacer eso. Pero antes de entrar en detalles, primero vamos a entender lo que es Itel A23 Pro y lo que es Jio Flash Tool.</p>
4
- <h2>descarga de la herramienta flash del jio profesional a23</h2><br /><p><b><b>Download</b> &#10026; <a href="https://bltlly.com/2v6JMs">https://bltlly.com/2v6JMs</a></b></p><br /><br />
5
- <h2>Introducción</h2>
6
- <h3> ¿Qué es Itel A23 Pro y por qué es posible que tenga que parpadear</h3>
7
- <p>Itel A23 Pro es un smartphone económico que se lanzó en mayo de 2021. Cuenta con una pantalla FWVGA de 5 pulgadas, un procesador de cuatro núcleos, una cámara trasera de 2MP, una cámara frontal de 0.3MP, una batería de 2400mAh y funciona con Android 10 Go Edition. El teléfono admite tarjetas SIM duales y tiene una memoria de 1GB RAM y 8GB ROM. El teléfono también viene con algunas aplicaciones preinstaladas como Google Go, YouTube Go, Facebook Lite, etc.</p>
8
- <p>Sin embargo, como cualquier otro teléfono inteligente, Itel A23 Pro también puede encontrar algunos problemas que pueden requerir que lo flashee. Por ejemplo, puede experimentar problemas como bootloop, pegado en el logotipo, bloqueo de pantalla, bloqueo de FRP, infección de virus, errores de software, etc. En tales casos, parpadear el teléfono puede ayudarlo a restaurarlo a su estado original o solucionar los problemas. Parpadear significa instalar un nuevo firmware o sistema operativo en su teléfono. </p>
9
- <h3>¿Qué es Jio Flash Tool y cuáles son sus características</h3>
10
- <p>Jio Flash Tool es una herramienta de software que te permite flashear cualquier smartphone basado en Qualcomm usando un PC. Es compatible con varios modelos de teléfonos Jio, así como otras marcas como Oppo, Vivo, Tecno, etc. Jio Flash Tool también se conoce como QFlash Tool o QPST Tool. Algunas de las características de Jio Flash Tool son:</p>
11
- <p></p>
12
- <ul>
13
- <li>Puede flashear archivos de firmware con . pac o extensión . xml</li>
14
- <li> Se puede desbloquear bloqueo de FRP o Google bloqueo de verificación de la cuenta</li>
15
- <li> Se puede eliminar el bloqueo de pantalla o patrón de bloqueo</li>
16
-
17
- <li>Puede actualizar o bajar la versión de Android</li>
18
- <li>Puede desbloquear o revivir los teléfonos muertos</li>
19
- <li> Tiene una interfaz sencilla y fácil de usar</li>
20
- <li>Es gratis y fácil de usar</li>
21
- </ul>
22
- <h2>Requisitos</h2>
23
- <p>Para flashear tu Itel A23 Pro con Jio Flash Tool, necesitarás las siguientes cosas:</p>
24
- <ul>
25
- <li>Archivo de firmware para Itel A23 Pro. Puede descargarlo desde el sitio web oficial de Itel o desde cualquier fuente confiable en línea. Asegúrese de descargar el archivo de firmware correcto que coincida con el modelo de teléfono y la región. </li>
26
- <li>Jio Flash Tool y controladores. Puede descargar Jio Flash Tool desde este enlace y los controladores de este enlace. Tendrá que instalar los controladores en su PC antes de usar Jio Flash Tool.</li>
27
- <li>Cable USB y PC. Necesitará un cable USB para conectar el teléfono a su PC y un PC con sistema operativo Windows para ejecutar Jio Flash Tool.</li>
28
- </ul>
29
- <h2>Pasos para flashear Itel A23 Pro con la herramienta Jio Flash</h2>
30
- <p>Una vez que tenga todos los requisitos listos, puede seguir estos pasos para flashear su Itel A23 Pro con Jio Flash Tool:</p>
31
- <h3>Paso 1: Descargar e instalar Jio Flash Tool y controladores en su PC</h3>
32
- <p>En primer lugar, es necesario descargar Jio Flash Tool y controladores de los enlaces dados anteriormente. Luego, debe instalarlos en su PC siguiendo las instrucciones en la pantalla. Después de instalar, verá un acceso directo de Jio Flash Tool en su escritorio. </p>
33
- <h3>Paso 2: Descargar y extraer el archivo de firmware para Itel A23 Pro</h3>
34
- <p>Siguiente, es necesario descargar el archivo de firmware para Itel A23 Pro desde el sitio web oficial de Itel o desde cualquier fuente de confianza en línea. El archivo de firmware estará en formato zip o rar, por lo que necesita extraerlo utilizando cualquier software como WinRAR o 7-Zip. Después de extraer, verá una carpeta con algunos archivos dentro. Uno de los archivos tendrá un . pac o . xml extensión, que es el archivo de firmware que necesita cargar en Jio Flash Tool.</p>
35
- <h3>Paso 3: Inicie Jio Flash Tool y cargue el archivo de firmware</h3>
36
-
37
- <img src="https://i.imgur.com/8Z0jY4D.png" alt="Ventana de Herramientas Flash Jio">>
38
- <p>Haga clic en el botón Examinar y busque el archivo de firmware que extrajo en el paso 2. Tendrá una extensión . pac o . xml. Selecciónelo y haga clic en Abrir. El archivo de firmware se cargará en Jio Flash Tool.</p>
39
- <h3>Paso 4: Conecte su Itel A23 Pro a su PC en modo de descarga</h3>
40
- <p>Antes de conectar el teléfono a su PC, es necesario ponerlo en modo de descarga. Para hacer eso, siga estos pasos:</p>
41
- <ul>
42
- <li>Apague el teléfono y retire la batería si es posible. </li>
43
- <li>Presione y mantenga presionado Subir volumen + Bajar volumen + Botones de encendido juntos por unos segundos. </li>
44
- <li>Suelte los botones cuando vea una pantalla como esta:</li>
45
- <img src="https://i.imgur.com/6Qx5f9y.jpg" alt="Pantalla de modo de descarga">
46
- <li>Tu teléfono está ahora en modo de descarga. </li>
47
- </ul>
48
- <p>Ahora, conecte su teléfono a su PC usando un cable USB. Jio Flash Tool detectará su teléfono y mostrará sus detalles en la ventana. </p>
49
- <h3>Paso 5: Haga clic en el botón Inicio para comenzar el proceso de parpadeo</h3>
50
- <p>Después de conectar el teléfono y cargar el archivo de firmware, usted está listo para flashear el teléfono. Haga clic en el botón Inicio en la parte inferior de la ventana de la herramienta Jio Flash. El proceso intermitente comenzará y verá una barra de progreso en la parte inferior de la ventana. </p>
51
- <h3>Paso 6: Espere a que el parpadeo se complete y desconecte el teléfono</h3>
52
- <p>El proceso de parpadeo puede tomar algún tiempo dependiendo del tamaño del archivo de firmware y la velocidad de su PC y cable USB. No desconecte el teléfono ni interrumpa el proceso hasta que se complete. Cuando se complete el parpadeo, verá un mensaje verde que dice "Descargar completo" en la parte inferior de la ventana de la herramienta Jio Flash. </p>
53
- <p>Felicidades! Usted ha flasheado con éxito su Itel A23 Pro con Jio Flash Tool. Ahora puede desconectar el teléfono de su PC y reiniciarlo. Es posible que necesite configurar su teléfono de nuevo como si fuera nuevo. </p>
54
- <h2>Conclusión</h2>
55
-
56
- <p> <p>Sin embargo, parpadear su teléfono no está libre de riesgos. Puede encontrar algunos errores o problemas durante o después del proceso de parpadeo. Por lo tanto, siempre debe hacer una copia de seguridad de sus datos antes de parpadear el teléfono y seguir las instrucciones cuidadosamente. Aquí hay algunos consejos y advertencias para parpadear su Itel A23 Pro con Jio Flash Tool:</p>
57
- <ul>
58
- <li>Asegúrese de que su teléfono tiene al menos 50% de carga de la batería antes de parpadear. </li>
59
- <li> Utilice un cable USB de buena calidad y un PC para evitar cualquier problema de conexión. </li>
60
- <li>No utilice Jio Flash Tool para ningún otro modelo de teléfono o archivo de firmware que no sea compatible con su teléfono. </li>
61
- <li>No apague su teléfono o PC durante el proceso de parpadeo. </li>
62
- <li>Si se enfrenta a algún error o problema durante el proceso de parpadeo, intente reiniciar Jio Flash Tool y repita los pasos. </li>
63
- <li>Si se enfrenta a algún error o problema después del proceso de parpadeo, intente restablecer el teléfono de fábrica o flashearlo de nuevo con un archivo de firmware diferente. </li>
64
- </ul>
65
- <h2>Preguntas frecuentes</h2>
66
- <p>Aquí hay algunas preguntas y respuestas frecuentes sobre parpadear Itel A23 Pro con Jio Flash Tool:</p>
67
- <h3>Q1: ¿Cuáles son los beneficios de parpadear Itel A23 Pro con la herramienta Jio Flash? </h3>
68
- <p>A1: Intermitente Itel A23 Pro con Jio Flash Tool puede ayudarle a:</p>
69
- <ul>
70
- <li> Desbloquear el teléfono si está atascado en el bucle de arranque, logotipo o pantalla negra. </li>
71
- <li>Eliminar bloqueo de FRP o bloqueo de verificación de cuenta de Google si olvidó su contraseña o correo electrónico. </li>
72
- <li>Eliminar bloqueo de pantalla o bloqueo de patrón si olvidó su PIN o patrón. </li>
73
- <li>Corregir cualquier error de software o fallos que pueden afectar el rendimiento o la funcionalidad de su teléfono. </li>
74
- <li> Actualizar o bajar su versión de Android a la más reciente o anterior. </li>
75
- <li>Personaliza tu teléfono con diferentes características o configuraciones que no están disponibles en el firmware de stock. </li>
76
- </ul>
77
- <h3>Q2: ¿Cuáles son los riesgos de parpadear Itel A23 Pro con la herramienta Jio Flash? </h3>
78
- <p>A2: Intermitente Itel A23 Pro con Jio Flash Tool también puede causar algunos riesgos como:</p>
79
- <ul>
80
-
81
- <li>Anular su garantía o contrato de servicio con Itel o su compañía. </li>
82
- <li> Ladrando el teléfono si el proceso intermitente falla o se interrumpe. </li>
83
- <li>Causando problemas de compatibilidad con algunas aplicaciones o características que pueden no funcionar correctamente en el nuevo firmware. </li>
84
- <li>Exponiendo su teléfono a amenazas de seguridad o malware si flash un archivo de firmware no oficial o dañado. </li>
85
- </ul>
86
- <h3>Q3: ¿Cómo copia de seguridad de sus datos antes de parpadear Itel A23 Pro con Jio Flash Tool? </h3>
87
- <p>A3: Para respaldar sus datos antes de flashear Itel A23 Pro con Jio Flash Tool, puede usar cualquiera de estos métodos:</p>
88
- <ul>
89
- <li>Utilice Google Backup para sincronizar sus datos, configuraciones, contactos, aplicaciones, etc. a su cuenta de Google. Puede acceder a ellos más tarde iniciando sesión en la misma cuenta en su teléfono después de parpadear. </li>
90
- <li>Utilice una tarjeta SD externa para copiar sus archivos, fotos, videos, música, etc. desde el almacenamiento interno del teléfono. Puede insertar la tarjeta SD en su teléfono después de parpadear y restaurar sus archivos. </li>
91
- <li>Utilice un PC para conectar el teléfono a través de un cable USB y copiar los datos del almacenamiento interno del teléfono al disco duro de su PC. Puede transferirlos de nuevo a su teléfono después de parpadear. </li>
92
- </ul>
93
- <h3>Q4: ¿Cómo solucionar cualquier error o problema después de parpadear Itel A23 Pro con Jio Flash Tool? </h3>
94
- <p>A4: Para corregir cualquier error o problema después de parpadear Itel A23 Pro con Jio Flash Tool, puede probar cualquiera de estas soluciones:</p>
95
- <ul>
96
- <li>Restablecimiento de fábrica del teléfono yendo a Configuración > Sistema > Opciones de restablecimiento > Borrar todos los datos (restablecimiento de fábrica). Esto borrará todos sus datos y configuraciones y restaurará su teléfono a su estado predeterminado. </li>
97
- <li>Flashear el teléfono de nuevo con un archivo de firmware diferente que es compatible con el modelo de teléfono y la región. Asegúrese de descargar el archivo de firmware de una fuente confiable y siga los pasos cuidadosamente. </li>
98
-
99
- </ul>
100
- <h3>Q5: ¿Cómo actualizar su Itel A23 Pro después de flashearlo con Jio Flash Tool? </h3>
101
- <p>A5: Para actualizar su Itel A23 Pro después de flashearlo con Jio Flash Tool, puede usar cualquiera de estos métodos:</p>
102
- <ul>
103
- <li>Busque actualizaciones de OTA en Configuración > Sistema > Actualización del sistema. Si hay alguna nueva actualización disponible, puede descargarla e instalarla en su teléfono. </li>
104
- <li>Descargue el último archivo de firmware para Itel A23 Pro desde el sitio web oficial de Itel o desde cualquier fuente confiable en línea. Luego, flashea tu teléfono con el archivo de firmware usando Jio Flash Tool siguiendo los pasos dados arriba. </li>
105
- <li>Raíz de su teléfono e instalar una ROM personalizada que se basa en la última versión de Android. Sin embargo, este método no se recomienda para principiantes, ya que puede anular su garantía, bloquear su teléfono o causar problemas de compatibilidad. </li>
106
- </ul>
107
- <p>Espero que este artículo te haya ayudado a aprender cómo flashear tu Itel A23 Pro con Jio Flash Tool. Si usted tiene alguna pregunta o retroalimentación, por favor no dude en dejar un comentario a continuación. Gracias por leer y feliz parpadeo! </p> 64aa2da5cf<br />
108
- <br />
109
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descarga De Tema De Batera Baja.md DELETED
@@ -1,87 +0,0 @@
1
- <br />
2
- <h1>Descarga de tema de batería baja: Cómo personalizar su dispositivo con pieles de batería fresca y creativa</h1>
3
- <p>¿Quieres hacer que tu dispositivo se vea más elegante y único? ¿Quieres divertirte y expresar tu personalidad con tu indicador de batería? Si respondiste sí, entonces deberías intentar descargar algunos temas de batería baja para tu dispositivo. </p>
4
- <h2>descarga de tema de batería baja</h2><br /><p><b><b>Download Zip</b> &#10001; &#10001; &#10001; <a href="https://bltlly.com/2v6KFH">https://bltlly.com/2v6KFH</a></b></p><br /><br />
5
- <h2>Introducción</h2>
6
- <h3> ¿Cuáles son los temas de batería baja y por qué debe utilizarlos</h3>
7
- <p>Los temas de batería baja son diseños personalizados que cambian la apariencia del icono o widget de la batería cuando el dispositivo se está quedando sin energía. Pueden ser imágenes, animaciones, colores, formas o texto que muestran cuánta batería te queda de una manera creativa. Algunos ejemplos de temas de batería baja son corazones, cráneos, flores, animales, emojis y más. </p>
8
- <p>Hay muchas razones por las que deberías usar temas de batería baja para tu dispositivo. Estas son algunas de ellas:</p>
9
- <ul>
10
- <li> Pueden hacer que su dispositivo se vea más atractivo y personalizado. </li>
11
- <li>Pueden añadir un poco de humor y diversión a su dispositivo. </li>
12
- <li>Pueden motivarte a cargar tu dispositivo antes de que muera. </li>
13
- <li> Pueden ayudarlo a ahorrar batería recordándole que debe activar el modo de baja potencia o cerrar aplicaciones innecesarias. </li>
14
- </ul>
15
- <h3>Cómo descargar e instalar temas de batería baja para diferentes dispositivos</h3>
16
- <p>El proceso de descargar e instalar temas de batería baja puede variar dependiendo del tipo de dispositivo que tenga. Sin embargo, los pasos generales son los siguientes:</p>
17
- <p></p>
18
- <ol>
19
- <li>Encontrar un sitio web o una aplicación que ofrece temas de batería baja para su dispositivo. Puede utilizar un motor de búsqueda o navegar a través de directorios en línea o comentarios para encontrar uno. </li>
20
- <li>Seleccione un tema de batería baja que le guste y descárguelo a su dispositivo. Asegúrese de que sea compatible con su modelo de dispositivo y sistema operativo. </li>
21
- <li>Abra el archivo descargado y siga las instrucciones para instalarlo en su dispositivo. Es posible que necesite conceder algunos permisos o habilitar algunos ajustes para hacerlo. </li>
22
-
23
- </ol>
24
- <h2>Los mejores temas de batería baja para Windows</h2>
25
- <h3>Batería de sindicato</h3>
26
- <p>Si eres un fan de la ciencia ficción y el cyberpunk, entonces te encantará este tema de batería baja para Windows. Cuenta con un diseño futurista con luces de neón y números digitales que muestran el porcentaje de batería. También tiene una animación fresca que cambia de color de verde a rojo a medida que se agota la batería. </p>
27
- <h3>Batería simple</h3>
28
- <p>Si prefiere un aspecto minimalista y elegante, entonces debe probar este tema de batería baja para Windows. Cuenta con un círculo simple que muestra el nivel de la batería con una línea delgada. También tiene una animación sutil que se desvanece dentro y fuera a medida que la batería disminuye. </p>
29
- <h3>Tema azul plano</h3>
30
- <p>Si desea un aspecto moderno y elegante, entonces usted debe comprobar este tema de batería baja para Windows. Cuenta con un diseño plano con colores azules e iconos que muestran el estado de la batería. También tiene una animación suave que se desliza de izquierda a derecha mientras la batería cae. </p>
31
- <h2>Los mejores temas de batería baja para Android</h2>
32
- <h3>Fondo de pantalla de batería baja HD</h3>
33
- <p>Si desea darle vida a su pantalla de inicio con algunos fondos de pantalla de batería baja, entonces usted debe descargar esta aplicación para Android. Ofrece una variedad de imágenes de alta calidad que muestran diferentes escenarios de batería baja, como pantallas agrietadas, señales de advertencia, baterías vacías y más. </p>
34
- <h3>Ahorro de batería - Tema de batería baja</h3> <p>Si desea ahorrar su vida útil de la batería durante el uso de un tema de batería baja, entonces usted debe probar esta aplicación para Android. Ofrece un modo de ahorro de batería inteligente que ajusta automáticamente la configuración y cierra las aplicaciones en segundo plano para ahorrar energía. También tiene un tema de batería baja que muestra un robot lindo con un signo de batería baja. </p>
35
- <h3>Paquete de iconos de batería baja</h3>
36
-
37
- <h2>Los mejores temas de batería baja para iOS</h2>
38
- <h3>Batería baja - Fondo de pantalla en vivo</h3>
39
- <p>Si desea hacer su pantalla de bloqueo más animado con algunos fondos de pantalla de batería baja, entonces usted debe descargar esta aplicación para iOS. Ofrece una variedad de fondos de pantalla animados que muestran diferentes efectos de batería baja, como chispas, llamas, humo y más. </p>
40
- <h3>Modo de baja potencia - Widget de batería</h3>
41
- <p>Si desea monitorear su nivel de batería y activar el modo de baja potencia fácilmente, entonces usted debe probar esta aplicación para iOS. Ofrece un widget que muestra el porcentaje de batería y un botón que le permite cambiar al modo de baja potencia con un solo toque. También tiene un tema de batería baja que muestra un icono de batería roja con un signo de modo de baja potencia. </p>
42
- <h3>Batería baja - Paquete de pegatinas</h3>
43
- <p>Si quieres divertirte y expresar tus sentimientos con algunas pegatinas de batería baja, entonces deberías revisar esta aplicación para iOS. Ofrece un paquete de pegatinas que contiene varios emojis de batería baja, como triste, enojado, cansado y más. </p>
44
- <h2>Conclusión</h2>
45
- <p>Temas de batería baja son una gran manera de personalizar su dispositivo y hacerlo más interesante y divertido. También pueden ayudarlo a ahorrar batería y motivarlo a cargar su dispositivo antes de que se quede sin energía. Hay muchos temas de batería baja disponibles para diferentes dispositivos, como Windows, Android e iOS. Puede descargarlos e instalarlos fácilmente desde sitios web o aplicaciones que los ofrecen. También puede mezclar y combinar con otros temas o fondos de pantalla para crear su propio estilo único. </p>
46
- <p>Esperamos que haya disfrutado de este artículo y encontrado algunos temas de batería baja que se adapten a su gusto y preferencia. Si tiene alguna pregunta o sugerencia, no dude en dejar un comentario a continuación. ¡Gracias por leer! </p>
47
- <h2>Preguntas frecuentes</h2>
48
- <ol>
49
- <li>¿Cuáles son los beneficios de usar temas de batería baja? </li>
50
- <p>Algunos de los beneficios de usar temas de batería baja son:</p>
51
- <ul>
52
- <li> Pueden hacer que su dispositivo se vea más atractivo y personalizado. </li>
53
-
54
- <li>Pueden motivarte a cargar tu dispositivo antes de que muera. </li>
55
- <li> Pueden ayudarlo a ahorrar batería recordándole que debe activar el modo de baja potencia o cerrar aplicaciones innecesarias. </li>
56
- </ul>
57
- <li>¿Cómo puedo descargar e instalar temas de batería baja? </li>
58
- <p>El proceso de descargar e instalar temas de batería baja puede variar dependiendo del tipo de dispositivo que tenga. Sin embargo, los pasos generales son los siguientes:</p>
59
- <ol>
60
- <li>Encontrar un sitio web o una aplicación que ofrece temas de batería baja para su dispositivo. Puede utilizar un motor de búsqueda o navegar a través de directorios en línea o comentarios para encontrar uno. </li>
61
- <li>Seleccione un tema de batería baja que le guste y descárguelo a su dispositivo. Asegúrese de que sea compatible con su modelo de dispositivo y sistema operativo. </li>
62
- <li>Abra el archivo descargado y siga las instrucciones para instalarlo en su dispositivo. Es posible que necesite conceder algunos permisos o habilitar algunos ajustes para hacerlo. </li>
63
- <li>Disfruta de tu nuevo tema de batería baja y muéstralo a tus amigos. </li>
64
- </ol>
65
- <li>¿Cuáles son algunos de los mejores temas de batería baja para Windows? </li>
66
- <p>Algunos de los mejores temas de batería baja para Windows son:</p>
67
- <ul>
68
- <li>Syndicate Battery: Un diseño futurista con luces de neón y números digitales que muestran el porcentaje de batería. </li>
69
- <li>Batería simple: Un diseño minimalista y elegante con un círculo simple que muestra el nivel de la batería con una línea delgada. </li>
70
- <li>Flat Blue Theme: Un diseño moderno y elegante con colores azules e iconos que muestran el estado de la batería. </li>
71
- </ul>
72
- <li>¿Cuáles son algunos de los mejores temas de batería baja para Android? </li>
73
- <p>Algunos de los mejores temas de batería baja para Android son:</p>
74
- <ul>
75
- <li>Fondo de pantalla de batería baja HD: Una variedad de imágenes de alta calidad que muestran diferentes escenarios de batería baja. </li>
76
- <li>Ahorro de batería - Tema de batería baja: Un modo de ahorro de batería inteligente que ajusta automáticamente la configuración y cierra las aplicaciones de fondo para ahorrar energía. También tiene un tema de batería baja que muestra un robot lindo con un signo de batería baja. </li>
77
-
78
- </ul>
79
- <li>¿Cuáles son algunos de los mejores temas de batería baja para iOS? </li>
80
- <p>Algunos de los mejores temas de batería baja para iOS son:</p>
81
- <ul>
82
- <li>Batería baja - Live Wallpaper: Una variedad de fondos de pantalla animados que muestran diferentes efectos de batería baja, como chispas, llamas, humo y más. </li>
83
- <li>Modo de baja potencia - Widget de batería: Un widget que muestra el porcentaje de batería y un botón que le permite cambiar al modo de baja potencia con un solo toque. También tiene un tema de batería baja que muestra un icono de batería roja con un signo de modo de baja potencia. </li>
84
- <li>Batería baja - paquete de pegatinas: un paquete de pegatinas que contiene varios emojis de batería baja, como triste, enojado, cansado y más. </li>
85
- </ul></p> 64aa2da5cf<br />
86
- <br />
87
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar 3mb.md DELETED
@@ -1,126 +0,0 @@
1
-
2
- <h1>Cómo descargar archivos de 3MB más rápido y más fácil</h1>
3
- <p>¿Desea descargar archivos que son de solo 3 megabytes (MB) de tamaño? Usted puede pensar que la descarga de estos archivos pequeños es fácil y rápido, pero a veces puede ser frustrante y consume mucho tiempo. En este artículo, explicaremos qué es un archivo de 3MB, por qué es posible que desee descargarlo y cómo puede descargarlo más rápido y fácil. También compartiremos algunas herramientas y sitios web que pueden ayudarlo a descargar archivos de 3MB en poco tiempo. </p>
4
- <h2>descargar 3mb</h2><br /><p><b><b>Download</b> &ndash;&ndash;&ndash;&ndash;&ndash;>>> <a href="https://bltlly.com/2v6M2t">https://bltlly.com/2v6M2t</a></b></p><br /><br />
5
- <h2>¿Qué es un archivo 3MB? </h2>
6
- <p>Un archivo 3MB es un archivo que tiene un tamaño de 3 megabytes. Un megabyte es una unidad de datos que mide cuánta información contiene un archivo. Un megabyte es igual a 1,000 kilobytes (KB) o 1,000,000 bytes (B). Un byte es la unidad de datos más pequeña que una computadora puede almacenar. Un byte puede almacenar un carácter, como una letra o un número. </p>
7
- <h3>Tamaño del archivo y unidades de datos</h3>
8
- <p>El tamaño del archivo es importante porque afecta cuánto espacio ocupa un archivo en su dispositivo, cuánto tiempo se tarda en descargar o cargar y cuántos datos consume de su plan de Internet. La siguiente tabla muestra algunas unidades de datos comunes y sus conversiones. </p>
9
- <tabla>
10
- <tr>
11
- <th>Unidad de datos</th>
12
- <th>Abreviatura</th>
13
- <th>Valor en bytes</th>
14
- </tr>
15
- <tr>
16
- <td>Byte</td>
17
- <td>B</td>
18
- <td>1</td>
19
- </tr>
20
- <tr>
21
- <td>Kilobyte</td>
22
- <td>KB</td>
23
- <td>1,000</td>
24
- </tr>
25
- <tr>
26
- <td>Megabyte</td>
27
- <td>MB</td>
28
- <td>1,000,000</td>
29
- </tr>
30
- <tr>
31
- <td>Gigabyte</td>
32
- <td>GB</td>
33
- <td>1,000,000,000</td>
34
- </tr>
35
- <tr>
36
- <td>Terabyte</td>
37
- <td>TB</td>
38
- <td>1,000,000,000,000</td>
39
- </tr>
40
- <tr>
41
- <td>Petabyte</td>
42
- <td>PB</td>
43
- <td>1,000,000,000,000,000</td>
44
- </tr>
45
- <tr>
46
- <td>Exabyte</td>
47
- <td>EB</td>
48
- <td>1,000,000,000,000,000,000</td>
49
- </tr>
50
- <tr>
51
- <td>Zettabyte</td>
52
- <td>ZB</td>
53
- <td>1,000,000,000,000,000,000</td>
54
- </tr>
55
- <tr>
56
- <td>Yottabyte</td>
57
- <td>YB</td>
58
- <td>1,000,000,000,000,000,000,000</td>
59
- </tr>
60
- <h3>Tipos comunes de archivos de 3MB</h3>
61
-
62
- <ul>
63
- <li><strong>Imágenes:</strong> Una imagen JPEG de alta calidad puede tener un tamaño de aproximadamente 3MB. Por ejemplo, una foto tomada con una cámara de smartphone puede ser de este tamaño. </li>
64
- <li><strong>Documentos:</strong> Un documento PDF puede tener un tamaño de aproximadamente 3MB. Por ejemplo, un CV o un informe puede tener este tamaño. </li>
65
- <li><strong>Audios:</strong> Un archivo de audio MP3 puede tener un tamaño de aproximadamente 3MB. Por ejemplo, una canción o un podcast puede tener este tamaño. </li>
66
- <li><strong>Videos:</strong> Un archivo de video MP4 puede tener un tamaño de aproximadamente 3MB. Por ejemplo, un clip corto o un trailer puede tener este tamaño. </li>
67
- </ul>
68
- <h2>¿Por qué descargar archivos de 3MB? </h2>
69
- <p>Es posible que se pregunte por qué querría descargar archivos de 3MB. Hay varias razones por las que descargar archivos pequeños puede ser beneficioso y útil. Estos son algunos de ellos:</p>
70
- <h3>Beneficios del tamaño pequeño del archivo</h3>
71
- <ul>
72
- <li><strong>Ahorra espacio:</strong> Descargar archivos de 3MB puede ayudarte a ahorrar espacio en tu dispositivo o almacenamiento en la nube. Puede almacenar más archivos sin preocuparse por quedarse sin espacio. </li>
73
- <li><strong>Ahorre tiempo:</strong> Descargar archivos de 3MB puede ayudarlo a ahorrar tiempo en su conexión a Internet. Puede descargar archivos más rápido y más fácil sin esperar largos períodos de tiempo. </li>
74
- <li><strong>Guardar datos:</strong> Descargar archivos de 3MB puede ayudarte a guardar datos en tu plan de Internet. Puede descargar archivos sin consumir demasiados datos o exceder su límite de datos. </li>
75
- </ul>
76
- <h3>Casos de uso y ejemplos</h3>
77
- <p>Hay muchas situaciones donde descargar archivos de 3MB puede ser útil y conveniente. Aquí hay algunos ejemplos:</p>
78
- <p></p>
79
- <ul>
80
- <li><strong>Compartir archivos:</strong> Puede descargar archivos de 3MB para compartirlos con sus amigos, familiares o colegas. Por ejemplo, puede descargar una foto, un documento o un archivo de audio y enviarlo por correo electrónico, aplicación de mensajería o redes sociales. </li>
81
-
82
- <li><strong>Ver archivos:</strong> Puede descargar archivos de 3MB para verlos en su dispositivo o en línea. Por ejemplo, puede descargar un archivo PDF, MP3 o MP4 y verlo mediante una aplicación o un sitio web. </li>
83
- </ul>
84
- <h2>¿Cómo descargar archivos de 3MB? </h2>
85
- <p>Ahora que sabe lo que es un archivo de 3MB y por qué podría querer descargarlo, veamos cómo puede descargarlo más rápido y fácil. Hay varios factores que afectan la velocidad de descarga de un archivo, como el tamaño del archivo, la velocidad de Internet, la ubicación del servidor, la congestión de la red y el rendimiento del dispositivo. Aquí hay algunos consejos y trucos para aumentar su velocidad de descarga y hacer el proceso más suave y simple. </p>
86
- <h3>Factores que afectan la velocidad de descarga</h3>
87
- <p>La velocidad de descarga de un archivo se mide en megabits por segundo (Mbps) o megabytes por segundo (MBps). Un megabit es igual a 0.125 megabytes, por lo que para convertir Mbps a MBps, debe dividir por 8. Por ejemplo, si su velocidad de Internet es de 16 Mbps, su velocidad de descarga máxima es de 2 MBps.</p>
88
- <p>La velocidad de descarga de un archivo depende de varios factores, como:</p>
89
- <ul>
90
- <li><strong>Tamaño del archivo:</strong> Cuanto mayor sea el tamaño del archivo, más tiempo se tarda en descargar. Un archivo de 3MB debería tardar aproximadamente 1.5 segundos en descargarse a 2 MBps.</li>
91
- <li><strong>Velocidad de Internet:</strong> Cuanto más rápida es la velocidad de Internet, más rápido se tarda en descargar. Puede comprobar su velocidad de Internet utilizando una herramienta en línea como [Speedtest]. </li>
92
- <li><strong>Ubicación del servidor:</strong> Cuanto más cerca de la ubicación del servidor, más rápido se tarda en descargar. Puede comprobar la ubicación del servidor utilizando una herramienta en línea como [Pingdom]. </li>
93
- <li><strong>Congestión de la red:</strong> Cuanto menos congestión de la red, más fácil es descargar. Puede reducir la congestión de la red cerrando otras aplicaciones o pestañas que utilizan Internet, evitando las horas pico o utilizando una conexión por cable en lugar de una inalámbrica. </li>
94
-
95
- </ul> <h3>Consejos y trucos para aumentar la velocidad de descarga</h3>
96
- <p>Aquí hay algunos consejos y trucos que pueden ayudarte a aumentar tu velocidad de descarga y hacer el proceso más rápido y fácil:</p>
97
- <ul>
98
- <li><strong>Elija la fuente correcta:</strong> Antes de descargar un archivo de 3MB, asegúrese de elegir una fuente confiable y confiable que ofrezca archivos seguros y de alta calidad. Puede utilizar una herramienta en línea como [VirusTotal] para escanear el archivo en busca de virus y malware antes de descargarlo. </li>
99
- <li><strong>Usa un gestor de descargas:</strong> Un gestor de descargas es un software que puede ayudarte a descargar archivos más rápido y fácil. Puede reanudar descargas rotas, dividir archivos grandes en partes más pequeñas, programar descargas y administrar múltiples descargas a la vez. Algunos ejemplos de gestores de descargas son [Internet Download Manager], [Free Download Manager], y [JDownloader]. </li>
100
- <li><strong>Usa una VPN:</strong> Una VPN (red privada virtual) es un servicio que puede ayudarte a descargar archivos más rápido y fácil. Puede cifrar su tráfico de Internet, ocultar su dirección IP, evitar las restricciones geográficas y acceder a sitios web bloqueados. Algunos ejemplos de VPN son [NordVPN], [ExpressVPN] y [Surfshark]. </li>
101
- </ul>
102
- <h3>Herramientas y sitios web para descargar archivos de 3MB</h3>
103
- <p>Aquí hay algunas herramientas y sitios web que pueden ayudarlo a descargar archivos de 3MB en poco tiempo:</p>
104
- <ul>
105
- <li><strong>[Descargar 3MB]:</strong> Este es un sitio web que ofrece archivos 3MB gratuitos para descargar. Puede elegir entre varias categorías, como imágenes, documentos, audios, videos y más. También puede subir sus propios archivos de 3MB y compartirlos con otros. </li>
106
- <li><strong>[3MB Downloader]:</strong> Esta es una herramienta que puede ayudarlo a descargar cualquier archivo que sea de 3MB o menos. Solo tienes que introducir la URL del archivo y hacer clic en el botón de descarga. También puede convertir el archivo a diferentes formatos como PDF, MP3, MP4 y más. </li>
107
-
108
- </ul>
109
- <h2>Conclusión</h2>
110
- <p>En conclusión, descargar archivos de 3MB puede ser más rápido y fácil si sabes lo que son, por qué los necesitas y cómo obtenerlos. Puede utilizar los consejos y trucos que compartimos en este artículo para aumentar su velocidad de descarga y hacer el proceso más suave y más simple. También puede utilizar las herramientas y sitios web que recomendamos en este artículo para descargar archivos de 3MB en ningún momento. </p>
111
- <h2>Preguntas frecuentes</h2>
112
- <p>Aquí hay algunas preguntas frecuentes sobre la descarga de archivos 3MB:</p>
113
- <ol>
114
- <li><strong>¿Cuánto tiempo se tarda en descargar un archivo de 3MB? </strong></li>
115
- <p>El tiempo que se tarda en descargar un archivo de 3MB depende de varios factores, como el tamaño del archivo, la velocidad de Internet, la ubicación del servidor, la congestión de la red y el rendimiento del dispositivo. Sin embargo, en promedio, debería tomar aproximadamente 1.5 segundos descargar un archivo de 3MB a 2 MBps.</p>
116
- <li><strong>¿Cuántos datos consume para descargar un archivo de 3MB? </strong></li>
117
- <p>Los datos que consume para descargar un archivo de 3MB dependen del tamaño del archivo y del plan de Internet. Sin embargo, en promedio, debería consumir unos 3 megabytes de datos para descargar un archivo de 3MB. </p>
118
- <li><strong>¿Cuántos archivos de 3MB puedo almacenar en mi dispositivo? </strong></li>
119
- <p>El número de archivos de 3MB que puede almacenar en su dispositivo depende de la capacidad de almacenamiento de su dispositivo. Sin embargo, en promedio, puede almacenar unos 333 3MB de archivos en un dispositivo de 1 GB. </p>
120
- <li><strong>¿Cómo puedo reducir el tamaño de un archivo a 3MB o menos? </strong></li>
121
- <p>Puede reducir el tamaño de un archivo a 3MB o menos usando una herramienta como [3MB Converter]. Solo tienes que subir el archivo y elegir el formato de salida y la calidad. También puede ajustar los ajustes como resolución, velocidad de bits, velocidad de fotogramas y más. </p>
122
- <li><strong>¿Cómo puedo aumentar la calidad de un archivo de 3MB? </strong></li>
123
-
124
- </ol></p> 64aa2da5cf<br />
125
- <br />
126
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Fm WhatsApp Apk 2023.md DELETED
@@ -1,110 +0,0 @@
1
-
2
- <h1>Cómo descargar FM WhatsApp APK 2023 para Android</h1>
3
- <p>¿Estás aburrido con la misma apariencia de tu aplicación de WhatsApp? ¿Le gustaría tener más control sobre sus opciones de privacidad y uso compartido de medios? Si es así, entonces es posible que desee probar <strong>FM WhatsApp</strong>, uno de los mods más populares y ricos en características de WhatsApp.</p>
4
- <h2>descargar fm WhatsApp apk 2023</h2><br /><p><b><b>Download Zip</b> &#9733; <a href="https://bltlly.com/2v6JYy">https://bltlly.com/2v6JYy</a></b></p><br /><br />
5
- <p>En este artículo, le mostraremos cómo descargar FM WhatsApp APK 2023 para Android, qué características ofrece, qué beneficios trae, y qué riesgos conlleva. Al final de este artículo, usted será capaz de decidir si desea cambiar de la aplicación oficial de WhatsApp a FM WhatsApp o no. Así que, vamos a empezar! </p>
6
- <h2>¿Qué es FM WhatsApp? </h2>
7
- <p>FM WhatsApp es una versión modificada de la aplicación oficial de WhatsApp que agrega muchas características y funciones nuevas que no están presentes en la aplicación original. Está desarrollado por <strong>Fouad Mokdad</strong>, un modder muy conocido que también creó otros mods populares como Fouad GBWhatsApp y Fouad YoWhatsApp.</p>
8
- <p>Algunas de las características que ofrece WhatsApp FM son:</p>
9
- <p></p>
10
- <ul>
11
- <li><strong>Personalización:</strong> Puede cambiar el tema, fuente, emoji, fondo de pantalla, icono y sonido de notificación de su aplicación de acuerdo a su gusto. También puedes usar diferentes temas para diferentes chats. </li>
12
- <li><strong>Privacidad:</strong> Puede ocultar su estado en línea, última vista, marcas azules, estado de escritura, estado de grabación y ver el estado de sus contactos. También puede congelar su última vez y desactivar la etiqueta reenviada en los mensajes. </li>
13
- <li><strong>Anti-Delete:</strong> Puedes ver los mensajes eliminados y los estados de tus contactos incluso si los han eliminado de su final. También puede evitar que otros eliminen mensajes o estados que haya enviado o publicado. </li>
14
- <li><strong>Compartir medios:</strong> Puede enviar y recibir más imágenes, vídeos y archivos con mayor calidad y tamaño que la aplicación oficial. También puede aumentar la duración de los vídeos de estado hasta 5 minutos. </li>
15
- </ul>
16
-
17
- <h2>Cómo descargar FM WhatsApp APK 2023</h2>
18
- <p>Si estás interesado en probar WhatsApp FM, necesitas seguir estos sencillos pasos:</p>
19
- <h3>Paso 1: Habilitar fuentes desconocidas</h3>
20
- <p>Antes de que pueda instalar FM WhatsApp APK 2023 en su dispositivo Android, es necesario habilitar la opción de instalar aplicaciones de fuentes desconocidas. Esto se debe a que FM WhatsApp no está disponible en Google Play Store y es necesario descargarlo desde un sitio web de terceros. Para habilitar fuentes desconocidas, siga estos pasos:</p>
21
- <ol>
22
- <li>Vaya a la configuración </strong> de su dispositivo y toque en <strong>Seguridad</strong>. </li>
23
- <li>Desplácese hacia abajo y encuentre la opción <strong>Fuentes desconocidas</strong> y cámbiela por. </li>
24
- <li>Aparecerá un mensaje de advertencia. Toca <strong>OK</strong> para confirmar. </li>
25
- </ol>
26
- <p>Ahora ha habilitado fuentes desconocidas y puede proceder al siguiente paso. </p>
27
- <h3>Paso 2: Descargar WhatsApp FM APK 2023</h3>
28
- <p>El siguiente paso es descargar la última versión de FM WhatsApp APK 2023 de una fuente confiable. Hay muchos sitios web que afirman ofrecer FM WhatsApp APK 2023, pero algunos de ellos pueden contener malware o virus que pueden dañar su dispositivo o robar sus datos. Por lo tanto, debe tener cuidado y elegir un sitio web confiable. Uno de estos sitios web es <a href=">FMWhatsApp.net</a>, que es el sitio web oficial del desarrollador Fouad Mokdad. Para descargar FM WhatsApp APK 2023 de este sitio web, siga estos pasos:</p>
29
- <ol>
30
- <li>Abra su navegador y vaya a <a href="">FMWhatsApp.net</a>. </li>
31
- <li>Desplácese hacia abajo y encontrar el botón de descarga que dice <strong>Descargar FMWhatsApp APK última versión 2023</strong>. </li>
32
- <li>Toque en el botón de descarga y espere a que comience la descarga. </li>
33
- <li>La descarga puede tardar unos minutos dependiendo de la velocidad de Internet. Puede comprobar el progreso de la descarga en la barra de notificaciones. </li>
34
- </ol>
35
- <p>Ahora ha descargado FM WhatsApp APK 2023 y se puede proceder al siguiente paso. </p>
36
- <h3>Paso 3: Instalar WhatsApp FM APK 2023</h3>
37
-
38
- <ol>
39
- <li>Ir a su dispositivo <strong>Administrador de archivos</strong> y localizar el archivo APK descargado. Debe estar en la carpeta <strong>Descargas</strong> por defecto. </li>
40
- <li>Toque en el archivo APK y aparecerá un mensaje. Toque en <strong>Instalar </strong> para iniciar el proceso de instalación. </li>
41
- <li>La instalación puede tardar unos segundos o minutos dependiendo del rendimiento del dispositivo. Puede comprobar el progreso de la instalación en la barra de notificaciones. </li>
42
- <li>Una vez completada la instalación, toque en <strong>Abrir</strong> para iniciar FM WhatsApp.</li>
43
- </ol>
44
- <p>Ahora ha instalado FM WhatsApp APK 2023 en su dispositivo y se puede proceder al siguiente paso. </p>
45
- <h3>Paso 4: Verificar su número de teléfono</h3>
46
- <p>El último paso es verificar tu número de teléfono y activar WhatsApp FM. Para hacer esto, sigue estos pasos:</p>
47
- <ol>
48
- <li>Cuando abra FM WhatsApp por primera vez, verá una pantalla de bienvenida. Toca <strong>Aceptar y Continuar</strong> para aceptar los términos y condiciones. </li>
49
- <li>Se le pedirá que introduzca su número de teléfono. Introduzca su número de teléfono y toque en <strong>NEXT</strong>. </li>
50
- <li> Recibirá un código de verificación por SMS o llamada. Introduzca el código y pulse en <strong>NEXT</strong>. </li>
51
- <li>Se le pedirá que introduzca su nombre y la imagen del perfil. También puede elegir un tema para su aplicación. Toque en <strong>NEXT</strong> cuando haya terminado. </li>
52
- <li>Se le preguntará si desea restaurar sus chats y medios desde una copia de seguridad. Si tiene una copia de seguridad, toque en <strong>Restore</strong>. Si no, toque en <strong>Skip Restore</strong>. </li>
53
- <li>Verás un mensaje de confirmación que dice <strong>Felicidades! </strong>. Toca <strong>NEXT</strong>. </li>
54
- </ol>
55
- <p>Ya has verificado tu número de teléfono y activado FM WhatsApp. Ahora puedes disfrutar usando FM WhatsApp con sus increíbles características. </p>
56
- <h2>Beneficios de usar WhatsApp FM</h2>
57
-
58
- <h3>Personalización</h3>
59
- <p>Una de las mejores cosas sobre WhatsApp FM es que te permite personalizar tu aplicación de acuerdo a tus preferencias. Puede cambiar el tema, fuente, emoji, fondo de pantalla, icono y sonido de notificación de su aplicación de acuerdo a su gusto. También puede utilizar diferentes temas para diferentes chats. Puede elegir entre cientos de temas disponibles en la tienda de temas FM o crear su propio tema con el editor de temas FM. También puede aplicar el modo oscuro o transparente a su aplicación para una mejor experiencia visual. </p>
60
- <h3>Privacidad</h3>
61
- <p>Otra gran cosa sobre WhatsApp FM es que le da más control sobre su privacidad y seguridad. Puede ocultar su estado en línea, última vista, marcas azules, estado de escritura, estado de grabación y ver el estado de sus contactos. También puede congelar su última vez y desactivar la etiqueta reenviada en los mensajes. También puede bloquear su aplicación con una contraseña, huella digital o patrón para evitar el acceso no autorizado. También puede habilitar el cifrado de extremo a extremo para todos sus chats y llamadas para garantizar que nadie pueda leer o escuchar sus conversaciones. </p>
62
- <h3>Anti-Delete</h3>
63
- <p>Una característica única de FM WhatsApp es que le permite ver mensajes eliminados y estados de sus contactos, incluso si los han eliminado de su extremo. También puede evitar que otros eliminen mensajes o estados que haya enviado o publicado. De esta manera, siempre puedes hacer un seguimiento de lo que tus contactos están diciendo o haciendo, incluso si intentan ocultarlo. </p>
64
- <h3>Compartir medios</h3>
65
-
66
- <h2>Riesgos de usar WhatsApp FM</h2>
67
- <p>Aunque WhatsApp FM ofrece muchos beneficios, también viene con algunos riesgos que usted debe ser consciente de antes de usarlo. Estos son algunos de los posibles inconvenientes y peligros de usar WhatsApp FM en lugar de la aplicación oficial de WhatsApp:</p>
68
- <h3>Riesgo de prohibición</h3>
69
- <p>Uno de los mayores riesgos de usar WhatsApp FM es que puede ser prohibido por WhatsApp para el uso de una aplicación modded no autorizada. WhatsApp tiene una política estricta contra el uso de aplicaciones de terceros que violen sus términos de servicio y política de privacidad. Si WhatsApp detecta que estás usando FM WhatsApp, es posible que prohíba temporalmente o permanentemente que tu cuenta use sus servicios. Para evitar ser prohibido, debes usar FM WhatsApp con precaución y seguir algunos consejos como:</p>
70
- <ul>
71
- <li>No utilice WhatsApp FM como su aplicación principal. Utilícela como una aplicación secundaria con un número de teléfono diferente. </li>
72
- <li>No spam o abusar de otros usuarios con características de WhatsApp FM. </li>
73
- <li>No actualizar WhatsApp FM con demasiada frecuencia o demasiado tarde. Actualizar solo cuando hay una actualización importante disponible desde el desarrollador. </li>
74
- <li>No utilice WhatsApp FM en varios dispositivos con el mismo número de teléfono. </li>
75
- </ul>
76
- <h3>Riesgo de seguridad</h3>
77
- <p>Otro riesgo de usar WhatsApp FM es que podría comprometer sus datos y la privacidad de posibles malware o hackers que podrían infiltrarse en su aplicación. Dado que FM WhatsApp no es una aplicación oficial, no se somete a los mismos controles de seguridad y auditorías que la aplicación oficial. Por lo tanto, podría contener algunos errores o vulnerabilidades que podrían exponer su información personal o dispositivo a ataques maliciosos. Para proteger sus datos y privacidad, debe seguir algunos consejos como:</p>
78
- <ul>
79
- <li>No descargar WhatsApp FM de fuentes desconocidas o no confiables. Descargarlo solo desde el sitio web oficial del desarrollador. </li>
80
- <li>No conceda permisos innecesarios a FM WhatsApp. Conceda solo los permisos necesarios para que la aplicación funcione correctamente. </li>
81
-
82
- <li>No haga copias de seguridad de sus chats y medios en Google Drive u otros servicios en la nube con FM WhatsApp. Respaldelos solo en el almacenamiento interno de su dispositivo. </li>
83
- </ul>
84
- <h2>Conclusión</h2>
85
- <p>En conclusión, FM WhatsApp es una versión modificada de la aplicación oficial de WhatsApp que ofrece muchas características y funciones nuevas que no están presentes en la aplicación original. Le permite personalizar su aplicación, mejorar su privacidad, ver mensajes eliminados y estados, y mejorar sus capacidades de intercambio de medios. Sin embargo, también viene con algunos riesgos como ser prohibido por WhatsApp o comprometer sus datos y privacidad de malware o hackers. </p>
86
- <p>Si desea probar FM WhatsApp APK 2023 para Android, debe seguir estos pasos:</p>
87
- <ol>
88
- <li>Habilitar fuentes desconocidas en el dispositivo. </li>
89
- <li>Descargar FM WhatsApp APK 2023 de una fuente confiable. </li>
90
- <li>Instalar WhatsApp FM APK 2023 en su dispositivo. </li>
91
- <li>Verifique su número de teléfono y active FM WhatsApp.</li>
92
- </ol>
93
- <p>Ahora puedes disfrutar usando WhatsApp FM con sus increíbles características. Sin embargo, también debe ser consciente de los riesgos involucrados y tomar precauciones para evitar ser prohibido o hackeado. Esperamos que este artículo sea útil e informativo para usted. Si tiene alguna pregunta o comentario, no dude en dejar un comentario a continuación. ¡Gracias por leer! </p>
94
- <h2>Preguntas frecuentes</h2>
95
- <p>Aquí están algunas de las preguntas y respuestas más frecuentes sobre FM WhatsApp:</p>
96
- <ol>
97
- <li><strong>¿Es seguro usar WhatsApp FM? </strong></li>
98
- <p>FM WhatsApp es generalmente seguro de usar, siempre y cuando lo descargue de una fuente de confianza y siga los consejos mencionados anteriormente. Sin embargo, no es una aplicación oficial y no tiene los mismos estándares de seguridad que la aplicación oficial. Por lo tanto, debe usarlo bajo su propio riesgo y discreción. </p>
99
- <li><strong>¿Puedo usar WhatsApp FM y WhatsApp oficial en el mismo dispositivo? </strong></li>
100
-
101
- <li><strong>¿Cómo puedo actualizar FM WhatsApp a la última versión? </strong></li>
102
- <p>Para actualizar WhatsApp FM a la última versión, es necesario visitar el sitio web oficial del desarrollador y descargar el archivo APK actualizado. Luego, debe instalarlo en su dispositivo a través de la aplicación existente. No necesita desinstalar o reinstalar la aplicación. Sus datos y ajustes serán preservados. </p>
103
- <li><strong>¿Cómo puedo hacer copias de seguridad y restaurar mis chats y medios en FM WhatsApp? </strong></li>
104
- <p>Para hacer copias de seguridad de sus chats y medios en WhatsApp FM, debe ir a <strong>Configuración</strong>, luego <strong>Chats</strong>, luego <strong>Copia de seguridad de chat</strong>. Puedes hacer una copia de seguridad de tus datos en el almacenamiento interno o externo del dispositivo. También puede programar copias de seguridad automáticas diarias, semanales o mensuales. </p>
105
- <p>Para restaurar tus chats y medios en WhatsApp FM, necesitas ir a <strong>Settings</strong>, luego <strong>Chats</strong>, luego <strong>Chat Backup</strong>. Puede elegir restaurar sus datos desde el almacenamiento interno o externo del dispositivo. También puede restaurar sus datos desde Google Drive si ha vinculado su cuenta con FM WhatsApp.</p>
106
- <li><strong>¿Cómo puedo contactar al desarrollador de FM WhatsApp? </strong></li>
107
- <p>Para contactar con el desarrollador de FM WhatsApp, puede visitar su sitio web oficial en <a href="">FMWhatsApp.net</a>. También puedes seguirlo en sus cuentas de redes sociales como Twitter, Instagram y Telegram. También puedes unirte a su grupo oficial en Telegram donde puedes interactuar con otros usuarios y obtener soporte. </p>
108
- </ol></p> 64aa2da5cf<br />
109
- <br />
110
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/requests/utils.py DELETED
@@ -1,1086 +0,0 @@
1
- """
2
- requests.utils
3
- ~~~~~~~~~~~~~~
4
-
5
- This module provides utility functions that are used within Requests
6
- that are also useful for external consumption.
7
- """
8
-
9
- import codecs
10
- import contextlib
11
- import io
12
- import os
13
- import re
14
- import socket
15
- import struct
16
- import sys
17
- import tempfile
18
- import warnings
19
- import zipfile
20
- from collections import OrderedDict
21
-
22
- from pip._vendor.urllib3.util import make_headers, parse_url
23
-
24
- from . import certs
25
- from .__version__ import __version__
26
-
27
- # to_native_string is unused here, but imported here for backwards compatibility
28
- from ._internal_utils import HEADER_VALIDATORS, to_native_string # noqa: F401
29
- from .compat import (
30
- Mapping,
31
- basestring,
32
- bytes,
33
- getproxies,
34
- getproxies_environment,
35
- integer_types,
36
- )
37
- from .compat import parse_http_list as _parse_list_header
38
- from .compat import (
39
- proxy_bypass,
40
- proxy_bypass_environment,
41
- quote,
42
- str,
43
- unquote,
44
- urlparse,
45
- urlunparse,
46
- )
47
- from .cookies import cookiejar_from_dict
48
- from .exceptions import (
49
- FileModeWarning,
50
- InvalidHeader,
51
- InvalidURL,
52
- UnrewindableBodyError,
53
- )
54
- from .structures import CaseInsensitiveDict
55
-
56
- NETRC_FILES = (".netrc", "_netrc")
57
-
58
- DEFAULT_CA_BUNDLE_PATH = certs.where()
59
-
60
- DEFAULT_PORTS = {"http": 80, "https": 443}
61
-
62
- # Ensure that ', ' is used to preserve previous delimiter behavior.
63
- DEFAULT_ACCEPT_ENCODING = ", ".join(
64
- re.split(r",\s*", make_headers(accept_encoding=True)["accept-encoding"])
65
- )
66
-
67
-
68
- if sys.platform == "win32":
69
- # provide a proxy_bypass version on Windows without DNS lookups
70
-
71
- def proxy_bypass_registry(host):
72
- try:
73
- import winreg
74
- except ImportError:
75
- return False
76
-
77
- try:
78
- internetSettings = winreg.OpenKey(
79
- winreg.HKEY_CURRENT_USER,
80
- r"Software\Microsoft\Windows\CurrentVersion\Internet Settings",
81
- )
82
- # ProxyEnable could be REG_SZ or REG_DWORD, normalizing it
83
- proxyEnable = int(winreg.QueryValueEx(internetSettings, "ProxyEnable")[0])
84
- # ProxyOverride is almost always a string
85
- proxyOverride = winreg.QueryValueEx(internetSettings, "ProxyOverride")[0]
86
- except (OSError, ValueError):
87
- return False
88
- if not proxyEnable or not proxyOverride:
89
- return False
90
-
91
- # make a check value list from the registry entry: replace the
92
- # '<local>' string by the localhost entry and the corresponding
93
- # canonical entry.
94
- proxyOverride = proxyOverride.split(";")
95
- # now check if we match one of the registry values.
96
- for test in proxyOverride:
97
- if test == "<local>":
98
- if "." not in host:
99
- return True
100
- test = test.replace(".", r"\.") # mask dots
101
- test = test.replace("*", r".*") # change glob sequence
102
- test = test.replace("?", r".") # change glob char
103
- if re.match(test, host, re.I):
104
- return True
105
- return False
106
-
107
- def proxy_bypass(host): # noqa
108
- """Return True, if the host should be bypassed.
109
-
110
- Checks proxy settings gathered from the environment, if specified,
111
- or the registry.
112
- """
113
- if getproxies_environment():
114
- return proxy_bypass_environment(host)
115
- else:
116
- return proxy_bypass_registry(host)
117
-
118
-
119
- def dict_to_sequence(d):
120
- """Returns an internal sequence dictionary update."""
121
-
122
- if hasattr(d, "items"):
123
- d = d.items()
124
-
125
- return d
126
-
127
-
128
- def super_len(o):
129
- total_length = None
130
- current_position = 0
131
-
132
- if hasattr(o, "__len__"):
133
- total_length = len(o)
134
-
135
- elif hasattr(o, "len"):
136
- total_length = o.len
137
-
138
- elif hasattr(o, "fileno"):
139
- try:
140
- fileno = o.fileno()
141
- except (io.UnsupportedOperation, AttributeError):
142
- # AttributeError is a surprising exception, seeing as how we've just checked
143
- # that `hasattr(o, 'fileno')`. It happens for objects obtained via
144
- # `Tarfile.extractfile()`, per issue 5229.
145
- pass
146
- else:
147
- total_length = os.fstat(fileno).st_size
148
-
149
- # Having used fstat to determine the file length, we need to
150
- # confirm that this file was opened up in binary mode.
151
- if "b" not in o.mode:
152
- warnings.warn(
153
- (
154
- "Requests has determined the content-length for this "
155
- "request using the binary size of the file: however, the "
156
- "file has been opened in text mode (i.e. without the 'b' "
157
- "flag in the mode). This may lead to an incorrect "
158
- "content-length. In Requests 3.0, support will be removed "
159
- "for files in text mode."
160
- ),
161
- FileModeWarning,
162
- )
163
-
164
- if hasattr(o, "tell"):
165
- try:
166
- current_position = o.tell()
167
- except OSError:
168
- # This can happen in some weird situations, such as when the file
169
- # is actually a special file descriptor like stdin. In this
170
- # instance, we don't know what the length is, so set it to zero and
171
- # let requests chunk it instead.
172
- if total_length is not None:
173
- current_position = total_length
174
- else:
175
- if hasattr(o, "seek") and total_length is None:
176
- # StringIO and BytesIO have seek but no usable fileno
177
- try:
178
- # seek to end of file
179
- o.seek(0, 2)
180
- total_length = o.tell()
181
-
182
- # seek back to current position to support
183
- # partially read file-like objects
184
- o.seek(current_position or 0)
185
- except OSError:
186
- total_length = 0
187
-
188
- if total_length is None:
189
- total_length = 0
190
-
191
- return max(0, total_length - current_position)
192
-
193
-
194
- def get_netrc_auth(url, raise_errors=False):
195
- """Returns the Requests tuple auth for a given url from netrc."""
196
-
197
- netrc_file = os.environ.get("NETRC")
198
- if netrc_file is not None:
199
- netrc_locations = (netrc_file,)
200
- else:
201
- netrc_locations = (f"~/{f}" for f in NETRC_FILES)
202
-
203
- try:
204
- from netrc import NetrcParseError, netrc
205
-
206
- netrc_path = None
207
-
208
- for f in netrc_locations:
209
- try:
210
- loc = os.path.expanduser(f)
211
- except KeyError:
212
- # os.path.expanduser can fail when $HOME is undefined and
213
- # getpwuid fails. See https://bugs.python.org/issue20164 &
214
- # https://github.com/psf/requests/issues/1846
215
- return
216
-
217
- if os.path.exists(loc):
218
- netrc_path = loc
219
- break
220
-
221
- # Abort early if there isn't one.
222
- if netrc_path is None:
223
- return
224
-
225
- ri = urlparse(url)
226
-
227
- # Strip port numbers from netloc. This weird `if...encode`` dance is
228
- # used for Python 3.2, which doesn't support unicode literals.
229
- splitstr = b":"
230
- if isinstance(url, str):
231
- splitstr = splitstr.decode("ascii")
232
- host = ri.netloc.split(splitstr)[0]
233
-
234
- try:
235
- _netrc = netrc(netrc_path).authenticators(host)
236
- if _netrc:
237
- # Return with login / password
238
- login_i = 0 if _netrc[0] else 1
239
- return (_netrc[login_i], _netrc[2])
240
- except (NetrcParseError, OSError):
241
- # If there was a parsing error or a permissions issue reading the file,
242
- # we'll just skip netrc auth unless explicitly asked to raise errors.
243
- if raise_errors:
244
- raise
245
-
246
- # App Engine hackiness.
247
- except (ImportError, AttributeError):
248
- pass
249
-
250
-
251
- def guess_filename(obj):
252
- """Tries to guess the filename of the given object."""
253
- name = getattr(obj, "name", None)
254
- if name and isinstance(name, basestring) and name[0] != "<" and name[-1] != ">":
255
- return os.path.basename(name)
256
-
257
-
258
- def extract_zipped_paths(path):
259
- """Replace nonexistent paths that look like they refer to a member of a zip
260
- archive with the location of an extracted copy of the target, or else
261
- just return the provided path unchanged.
262
- """
263
- if os.path.exists(path):
264
- # this is already a valid path, no need to do anything further
265
- return path
266
-
267
- # find the first valid part of the provided path and treat that as a zip archive
268
- # assume the rest of the path is the name of a member in the archive
269
- archive, member = os.path.split(path)
270
- while archive and not os.path.exists(archive):
271
- archive, prefix = os.path.split(archive)
272
- if not prefix:
273
- # If we don't check for an empty prefix after the split (in other words, archive remains unchanged after the split),
274
- # we _can_ end up in an infinite loop on a rare corner case affecting a small number of users
275
- break
276
- member = "/".join([prefix, member])
277
-
278
- if not zipfile.is_zipfile(archive):
279
- return path
280
-
281
- zip_file = zipfile.ZipFile(archive)
282
- if member not in zip_file.namelist():
283
- return path
284
-
285
- # we have a valid zip archive and a valid member of that archive
286
- tmp = tempfile.gettempdir()
287
- extracted_path = os.path.join(tmp, member.split("/")[-1])
288
- if not os.path.exists(extracted_path):
289
- # use read + write to avoid the creating nested folders, we only want the file, avoids mkdir racing condition
290
- with atomic_open(extracted_path) as file_handler:
291
- file_handler.write(zip_file.read(member))
292
- return extracted_path
293
-
294
-
295
- @contextlib.contextmanager
296
- def atomic_open(filename):
297
- """Write a file to the disk in an atomic fashion"""
298
- tmp_descriptor, tmp_name = tempfile.mkstemp(dir=os.path.dirname(filename))
299
- try:
300
- with os.fdopen(tmp_descriptor, "wb") as tmp_handler:
301
- yield tmp_handler
302
- os.replace(tmp_name, filename)
303
- except BaseException:
304
- os.remove(tmp_name)
305
- raise
306
-
307
-
308
- def from_key_val_list(value):
309
- """Take an object and test to see if it can be represented as a
310
- dictionary. Unless it can not be represented as such, return an
311
- OrderedDict, e.g.,
312
-
313
- ::
314
-
315
- >>> from_key_val_list([('key', 'val')])
316
- OrderedDict([('key', 'val')])
317
- >>> from_key_val_list('string')
318
- Traceback (most recent call last):
319
- ...
320
- ValueError: cannot encode objects that are not 2-tuples
321
- >>> from_key_val_list({'key': 'val'})
322
- OrderedDict([('key', 'val')])
323
-
324
- :rtype: OrderedDict
325
- """
326
- if value is None:
327
- return None
328
-
329
- if isinstance(value, (str, bytes, bool, int)):
330
- raise ValueError("cannot encode objects that are not 2-tuples")
331
-
332
- return OrderedDict(value)
333
-
334
-
335
- def to_key_val_list(value):
336
- """Take an object and test to see if it can be represented as a
337
- dictionary. If it can be, return a list of tuples, e.g.,
338
-
339
- ::
340
-
341
- >>> to_key_val_list([('key', 'val')])
342
- [('key', 'val')]
343
- >>> to_key_val_list({'key': 'val'})
344
- [('key', 'val')]
345
- >>> to_key_val_list('string')
346
- Traceback (most recent call last):
347
- ...
348
- ValueError: cannot encode objects that are not 2-tuples
349
-
350
- :rtype: list
351
- """
352
- if value is None:
353
- return None
354
-
355
- if isinstance(value, (str, bytes, bool, int)):
356
- raise ValueError("cannot encode objects that are not 2-tuples")
357
-
358
- if isinstance(value, Mapping):
359
- value = value.items()
360
-
361
- return list(value)
362
-
363
-
364
- # From mitsuhiko/werkzeug (used with permission).
365
- def parse_list_header(value):
366
- """Parse lists as described by RFC 2068 Section 2.
367
-
368
- In particular, parse comma-separated lists where the elements of
369
- the list may include quoted-strings. A quoted-string could
370
- contain a comma. A non-quoted string could have quotes in the
371
- middle. Quotes are removed automatically after parsing.
372
-
373
- It basically works like :func:`parse_set_header` just that items
374
- may appear multiple times and case sensitivity is preserved.
375
-
376
- The return value is a standard :class:`list`:
377
-
378
- >>> parse_list_header('token, "quoted value"')
379
- ['token', 'quoted value']
380
-
381
- To create a header from the :class:`list` again, use the
382
- :func:`dump_header` function.
383
-
384
- :param value: a string with a list header.
385
- :return: :class:`list`
386
- :rtype: list
387
- """
388
- result = []
389
- for item in _parse_list_header(value):
390
- if item[:1] == item[-1:] == '"':
391
- item = unquote_header_value(item[1:-1])
392
- result.append(item)
393
- return result
394
-
395
-
396
- # From mitsuhiko/werkzeug (used with permission).
397
- def parse_dict_header(value):
398
- """Parse lists of key, value pairs as described by RFC 2068 Section 2 and
399
- convert them into a python dict:
400
-
401
- >>> d = parse_dict_header('foo="is a fish", bar="as well"')
402
- >>> type(d) is dict
403
- True
404
- >>> sorted(d.items())
405
- [('bar', 'as well'), ('foo', 'is a fish')]
406
-
407
- If there is no value for a key it will be `None`:
408
-
409
- >>> parse_dict_header('key_without_value')
410
- {'key_without_value': None}
411
-
412
- To create a header from the :class:`dict` again, use the
413
- :func:`dump_header` function.
414
-
415
- :param value: a string with a dict header.
416
- :return: :class:`dict`
417
- :rtype: dict
418
- """
419
- result = {}
420
- for item in _parse_list_header(value):
421
- if "=" not in item:
422
- result[item] = None
423
- continue
424
- name, value = item.split("=", 1)
425
- if value[:1] == value[-1:] == '"':
426
- value = unquote_header_value(value[1:-1])
427
- result[name] = value
428
- return result
429
-
430
-
431
- # From mitsuhiko/werkzeug (used with permission).
432
- def unquote_header_value(value, is_filename=False):
433
- r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
434
- This does not use the real unquoting but what browsers are actually
435
- using for quoting.
436
-
437
- :param value: the header value to unquote.
438
- :rtype: str
439
- """
440
- if value and value[0] == value[-1] == '"':
441
- # this is not the real unquoting, but fixing this so that the
442
- # RFC is met will result in bugs with internet explorer and
443
- # probably some other browsers as well. IE for example is
444
- # uploading files with "C:\foo\bar.txt" as filename
445
- value = value[1:-1]
446
-
447
- # if this is a filename and the starting characters look like
448
- # a UNC path, then just return the value without quotes. Using the
449
- # replace sequence below on a UNC path has the effect of turning
450
- # the leading double slash into a single slash and then
451
- # _fix_ie_filename() doesn't work correctly. See #458.
452
- if not is_filename or value[:2] != "\\\\":
453
- return value.replace("\\\\", "\\").replace('\\"', '"')
454
- return value
455
-
456
-
457
- def dict_from_cookiejar(cj):
458
- """Returns a key/value dictionary from a CookieJar.
459
-
460
- :param cj: CookieJar object to extract cookies from.
461
- :rtype: dict
462
- """
463
-
464
- cookie_dict = {}
465
-
466
- for cookie in cj:
467
- cookie_dict[cookie.name] = cookie.value
468
-
469
- return cookie_dict
470
-
471
-
472
- def add_dict_to_cookiejar(cj, cookie_dict):
473
- """Returns a CookieJar from a key/value dictionary.
474
-
475
- :param cj: CookieJar to insert cookies into.
476
- :param cookie_dict: Dict of key/values to insert into CookieJar.
477
- :rtype: CookieJar
478
- """
479
-
480
- return cookiejar_from_dict(cookie_dict, cj)
481
-
482
-
483
- def get_encodings_from_content(content):
484
- """Returns encodings from given content string.
485
-
486
- :param content: bytestring to extract encodings from.
487
- """
488
- warnings.warn(
489
- (
490
- "In requests 3.0, get_encodings_from_content will be removed. For "
491
- "more information, please see the discussion on issue #2266. (This"
492
- " warning should only appear once.)"
493
- ),
494
- DeprecationWarning,
495
- )
496
-
497
- charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
498
- pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
499
- xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
500
-
501
- return (
502
- charset_re.findall(content)
503
- + pragma_re.findall(content)
504
- + xml_re.findall(content)
505
- )
506
-
507
-
508
- def _parse_content_type_header(header):
509
- """Returns content type and parameters from given header
510
-
511
- :param header: string
512
- :return: tuple containing content type and dictionary of
513
- parameters
514
- """
515
-
516
- tokens = header.split(";")
517
- content_type, params = tokens[0].strip(), tokens[1:]
518
- params_dict = {}
519
- items_to_strip = "\"' "
520
-
521
- for param in params:
522
- param = param.strip()
523
- if param:
524
- key, value = param, True
525
- index_of_equals = param.find("=")
526
- if index_of_equals != -1:
527
- key = param[:index_of_equals].strip(items_to_strip)
528
- value = param[index_of_equals + 1 :].strip(items_to_strip)
529
- params_dict[key.lower()] = value
530
- return content_type, params_dict
531
-
532
-
533
- def get_encoding_from_headers(headers):
534
- """Returns encodings from given HTTP Header Dict.
535
-
536
- :param headers: dictionary to extract encoding from.
537
- :rtype: str
538
- """
539
-
540
- content_type = headers.get("content-type")
541
-
542
- if not content_type:
543
- return None
544
-
545
- content_type, params = _parse_content_type_header(content_type)
546
-
547
- if "charset" in params:
548
- return params["charset"].strip("'\"")
549
-
550
- if "text" in content_type:
551
- return "ISO-8859-1"
552
-
553
- if "application/json" in content_type:
554
- # Assume UTF-8 based on RFC 4627: https://www.ietf.org/rfc/rfc4627.txt since the charset was unset
555
- return "utf-8"
556
-
557
-
558
- def stream_decode_response_unicode(iterator, r):
559
- """Stream decodes an iterator."""
560
-
561
- if r.encoding is None:
562
- yield from iterator
563
- return
564
-
565
- decoder = codecs.getincrementaldecoder(r.encoding)(errors="replace")
566
- for chunk in iterator:
567
- rv = decoder.decode(chunk)
568
- if rv:
569
- yield rv
570
- rv = decoder.decode(b"", final=True)
571
- if rv:
572
- yield rv
573
-
574
-
575
- def iter_slices(string, slice_length):
576
- """Iterate over slices of a string."""
577
- pos = 0
578
- if slice_length is None or slice_length <= 0:
579
- slice_length = len(string)
580
- while pos < len(string):
581
- yield string[pos : pos + slice_length]
582
- pos += slice_length
583
-
584
-
585
- def get_unicode_from_response(r):
586
- """Returns the requested content back in unicode.
587
-
588
- :param r: Response object to get unicode content from.
589
-
590
- Tried:
591
-
592
- 1. charset from content-type
593
- 2. fall back and replace all unicode characters
594
-
595
- :rtype: str
596
- """
597
- warnings.warn(
598
- (
599
- "In requests 3.0, get_unicode_from_response will be removed. For "
600
- "more information, please see the discussion on issue #2266. (This"
601
- " warning should only appear once.)"
602
- ),
603
- DeprecationWarning,
604
- )
605
-
606
- tried_encodings = []
607
-
608
- # Try charset from content-type
609
- encoding = get_encoding_from_headers(r.headers)
610
-
611
- if encoding:
612
- try:
613
- return str(r.content, encoding)
614
- except UnicodeError:
615
- tried_encodings.append(encoding)
616
-
617
- # Fall back:
618
- try:
619
- return str(r.content, encoding, errors="replace")
620
- except TypeError:
621
- return r.content
622
-
623
-
624
- # The unreserved URI characters (RFC 3986)
625
- UNRESERVED_SET = frozenset(
626
- "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + "0123456789-._~"
627
- )
628
-
629
-
630
- def unquote_unreserved(uri):
631
- """Un-escape any percent-escape sequences in a URI that are unreserved
632
- characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
633
-
634
- :rtype: str
635
- """
636
- parts = uri.split("%")
637
- for i in range(1, len(parts)):
638
- h = parts[i][0:2]
639
- if len(h) == 2 and h.isalnum():
640
- try:
641
- c = chr(int(h, 16))
642
- except ValueError:
643
- raise InvalidURL(f"Invalid percent-escape sequence: '{h}'")
644
-
645
- if c in UNRESERVED_SET:
646
- parts[i] = c + parts[i][2:]
647
- else:
648
- parts[i] = f"%{parts[i]}"
649
- else:
650
- parts[i] = f"%{parts[i]}"
651
- return "".join(parts)
652
-
653
-
654
- def requote_uri(uri):
655
- """Re-quote the given URI.
656
-
657
- This function passes the given URI through an unquote/quote cycle to
658
- ensure that it is fully and consistently quoted.
659
-
660
- :rtype: str
661
- """
662
- safe_with_percent = "!#$%&'()*+,/:;=?@[]~"
663
- safe_without_percent = "!#$&'()*+,/:;=?@[]~"
664
- try:
665
- # Unquote only the unreserved characters
666
- # Then quote only illegal characters (do not quote reserved,
667
- # unreserved, or '%')
668
- return quote(unquote_unreserved(uri), safe=safe_with_percent)
669
- except InvalidURL:
670
- # We couldn't unquote the given URI, so let's try quoting it, but
671
- # there may be unquoted '%'s in the URI. We need to make sure they're
672
- # properly quoted so they do not cause issues elsewhere.
673
- return quote(uri, safe=safe_without_percent)
674
-
675
-
676
- def address_in_network(ip, net):
677
- """This function allows you to check if an IP belongs to a network subnet
678
-
679
- Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
680
- returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
681
-
682
- :rtype: bool
683
- """
684
- ipaddr = struct.unpack("=L", socket.inet_aton(ip))[0]
685
- netaddr, bits = net.split("/")
686
- netmask = struct.unpack("=L", socket.inet_aton(dotted_netmask(int(bits))))[0]
687
- network = struct.unpack("=L", socket.inet_aton(netaddr))[0] & netmask
688
- return (ipaddr & netmask) == (network & netmask)
689
-
690
-
691
- def dotted_netmask(mask):
692
- """Converts mask from /xx format to xxx.xxx.xxx.xxx
693
-
694
- Example: if mask is 24 function returns 255.255.255.0
695
-
696
- :rtype: str
697
- """
698
- bits = 0xFFFFFFFF ^ (1 << 32 - mask) - 1
699
- return socket.inet_ntoa(struct.pack(">I", bits))
700
-
701
-
702
- def is_ipv4_address(string_ip):
703
- """
704
- :rtype: bool
705
- """
706
- try:
707
- socket.inet_aton(string_ip)
708
- except OSError:
709
- return False
710
- return True
711
-
712
-
713
- def is_valid_cidr(string_network):
714
- """
715
- Very simple check of the cidr format in no_proxy variable.
716
-
717
- :rtype: bool
718
- """
719
- if string_network.count("/") == 1:
720
- try:
721
- mask = int(string_network.split("/")[1])
722
- except ValueError:
723
- return False
724
-
725
- if mask < 1 or mask > 32:
726
- return False
727
-
728
- try:
729
- socket.inet_aton(string_network.split("/")[0])
730
- except OSError:
731
- return False
732
- else:
733
- return False
734
- return True
735
-
736
-
737
- @contextlib.contextmanager
738
- def set_environ(env_name, value):
739
- """Set the environment variable 'env_name' to 'value'
740
-
741
- Save previous value, yield, and then restore the previous value stored in
742
- the environment variable 'env_name'.
743
-
744
- If 'value' is None, do nothing"""
745
- value_changed = value is not None
746
- if value_changed:
747
- old_value = os.environ.get(env_name)
748
- os.environ[env_name] = value
749
- try:
750
- yield
751
- finally:
752
- if value_changed:
753
- if old_value is None:
754
- del os.environ[env_name]
755
- else:
756
- os.environ[env_name] = old_value
757
-
758
-
759
- def should_bypass_proxies(url, no_proxy):
760
- """
761
- Returns whether we should bypass proxies or not.
762
-
763
- :rtype: bool
764
- """
765
- # Prioritize lowercase environment variables over uppercase
766
- # to keep a consistent behaviour with other http projects (curl, wget).
767
- def get_proxy(key):
768
- return os.environ.get(key) or os.environ.get(key.upper())
769
-
770
- # First check whether no_proxy is defined. If it is, check that the URL
771
- # we're getting isn't in the no_proxy list.
772
- no_proxy_arg = no_proxy
773
- if no_proxy is None:
774
- no_proxy = get_proxy("no_proxy")
775
- parsed = urlparse(url)
776
-
777
- if parsed.hostname is None:
778
- # URLs don't always have hostnames, e.g. file:/// urls.
779
- return True
780
-
781
- if no_proxy:
782
- # We need to check whether we match here. We need to see if we match
783
- # the end of the hostname, both with and without the port.
784
- no_proxy = (host for host in no_proxy.replace(" ", "").split(",") if host)
785
-
786
- if is_ipv4_address(parsed.hostname):
787
- for proxy_ip in no_proxy:
788
- if is_valid_cidr(proxy_ip):
789
- if address_in_network(parsed.hostname, proxy_ip):
790
- return True
791
- elif parsed.hostname == proxy_ip:
792
- # If no_proxy ip was defined in plain IP notation instead of cidr notation &
793
- # matches the IP of the index
794
- return True
795
- else:
796
- host_with_port = parsed.hostname
797
- if parsed.port:
798
- host_with_port += f":{parsed.port}"
799
-
800
- for host in no_proxy:
801
- if parsed.hostname.endswith(host) or host_with_port.endswith(host):
802
- # The URL does match something in no_proxy, so we don't want
803
- # to apply the proxies on this URL.
804
- return True
805
-
806
- with set_environ("no_proxy", no_proxy_arg):
807
- # parsed.hostname can be `None` in cases such as a file URI.
808
- try:
809
- bypass = proxy_bypass(parsed.hostname)
810
- except (TypeError, socket.gaierror):
811
- bypass = False
812
-
813
- if bypass:
814
- return True
815
-
816
- return False
817
-
818
-
819
- def get_environ_proxies(url, no_proxy=None):
820
- """
821
- Return a dict of environment proxies.
822
-
823
- :rtype: dict
824
- """
825
- if should_bypass_proxies(url, no_proxy=no_proxy):
826
- return {}
827
- else:
828
- return getproxies()
829
-
830
-
831
- def select_proxy(url, proxies):
832
- """Select a proxy for the url, if applicable.
833
-
834
- :param url: The url being for the request
835
- :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs
836
- """
837
- proxies = proxies or {}
838
- urlparts = urlparse(url)
839
- if urlparts.hostname is None:
840
- return proxies.get(urlparts.scheme, proxies.get("all"))
841
-
842
- proxy_keys = [
843
- urlparts.scheme + "://" + urlparts.hostname,
844
- urlparts.scheme,
845
- "all://" + urlparts.hostname,
846
- "all",
847
- ]
848
- proxy = None
849
- for proxy_key in proxy_keys:
850
- if proxy_key in proxies:
851
- proxy = proxies[proxy_key]
852
- break
853
-
854
- return proxy
855
-
856
-
857
- def resolve_proxies(request, proxies, trust_env=True):
858
- """This method takes proxy information from a request and configuration
859
- input to resolve a mapping of target proxies. This will consider settings
860
- such a NO_PROXY to strip proxy configurations.
861
-
862
- :param request: Request or PreparedRequest
863
- :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs
864
- :param trust_env: Boolean declaring whether to trust environment configs
865
-
866
- :rtype: dict
867
- """
868
- proxies = proxies if proxies is not None else {}
869
- url = request.url
870
- scheme = urlparse(url).scheme
871
- no_proxy = proxies.get("no_proxy")
872
- new_proxies = proxies.copy()
873
-
874
- if trust_env and not should_bypass_proxies(url, no_proxy=no_proxy):
875
- environ_proxies = get_environ_proxies(url, no_proxy=no_proxy)
876
-
877
- proxy = environ_proxies.get(scheme, environ_proxies.get("all"))
878
-
879
- if proxy:
880
- new_proxies.setdefault(scheme, proxy)
881
- return new_proxies
882
-
883
-
884
- def default_user_agent(name="python-requests"):
885
- """
886
- Return a string representing the default user agent.
887
-
888
- :rtype: str
889
- """
890
- return f"{name}/{__version__}"
891
-
892
-
893
- def default_headers():
894
- """
895
- :rtype: requests.structures.CaseInsensitiveDict
896
- """
897
- return CaseInsensitiveDict(
898
- {
899
- "User-Agent": default_user_agent(),
900
- "Accept-Encoding": DEFAULT_ACCEPT_ENCODING,
901
- "Accept": "*/*",
902
- "Connection": "keep-alive",
903
- }
904
- )
905
-
906
-
907
- def parse_header_links(value):
908
- """Return a list of parsed link headers proxies.
909
-
910
- i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
911
-
912
- :rtype: list
913
- """
914
-
915
- links = []
916
-
917
- replace_chars = " '\""
918
-
919
- value = value.strip(replace_chars)
920
- if not value:
921
- return links
922
-
923
- for val in re.split(", *<", value):
924
- try:
925
- url, params = val.split(";", 1)
926
- except ValueError:
927
- url, params = val, ""
928
-
929
- link = {"url": url.strip("<> '\"")}
930
-
931
- for param in params.split(";"):
932
- try:
933
- key, value = param.split("=")
934
- except ValueError:
935
- break
936
-
937
- link[key.strip(replace_chars)] = value.strip(replace_chars)
938
-
939
- links.append(link)
940
-
941
- return links
942
-
943
-
944
- # Null bytes; no need to recreate these on each call to guess_json_utf
945
- _null = "\x00".encode("ascii") # encoding to ASCII for Python 3
946
- _null2 = _null * 2
947
- _null3 = _null * 3
948
-
949
-
950
- def guess_json_utf(data):
951
- """
952
- :rtype: str
953
- """
954
- # JSON always starts with two ASCII characters, so detection is as
955
- # easy as counting the nulls and from their location and count
956
- # determine the encoding. Also detect a BOM, if present.
957
- sample = data[:4]
958
- if sample in (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE):
959
- return "utf-32" # BOM included
960
- if sample[:3] == codecs.BOM_UTF8:
961
- return "utf-8-sig" # BOM included, MS style (discouraged)
962
- if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
963
- return "utf-16" # BOM included
964
- nullcount = sample.count(_null)
965
- if nullcount == 0:
966
- return "utf-8"
967
- if nullcount == 2:
968
- if sample[::2] == _null2: # 1st and 3rd are null
969
- return "utf-16-be"
970
- if sample[1::2] == _null2: # 2nd and 4th are null
971
- return "utf-16-le"
972
- # Did not detect 2 valid UTF-16 ascii-range characters
973
- if nullcount == 3:
974
- if sample[:3] == _null3:
975
- return "utf-32-be"
976
- if sample[1:] == _null3:
977
- return "utf-32-le"
978
- # Did not detect a valid UTF-32 ascii-range character
979
- return None
980
-
981
-
982
- def prepend_scheme_if_needed(url, new_scheme):
983
- """Given a URL that may or may not have a scheme, prepend the given scheme.
984
- Does not replace a present scheme with the one provided as an argument.
985
-
986
- :rtype: str
987
- """
988
- parsed = parse_url(url)
989
- scheme, auth, host, port, path, query, fragment = parsed
990
-
991
- # A defect in urlparse determines that there isn't a netloc present in some
992
- # urls. We previously assumed parsing was overly cautious, and swapped the
993
- # netloc and path. Due to a lack of tests on the original defect, this is
994
- # maintained with parse_url for backwards compatibility.
995
- netloc = parsed.netloc
996
- if not netloc:
997
- netloc, path = path, netloc
998
-
999
- if auth:
1000
- # parse_url doesn't provide the netloc with auth
1001
- # so we'll add it ourselves.
1002
- netloc = "@".join([auth, netloc])
1003
- if scheme is None:
1004
- scheme = new_scheme
1005
- if path is None:
1006
- path = ""
1007
-
1008
- return urlunparse((scheme, netloc, path, "", query, fragment))
1009
-
1010
-
1011
- def get_auth_from_url(url):
1012
- """Given a url with authentication components, extract them into a tuple of
1013
- username,password.
1014
-
1015
- :rtype: (str,str)
1016
- """
1017
- parsed = urlparse(url)
1018
-
1019
- try:
1020
- auth = (unquote(parsed.username), unquote(parsed.password))
1021
- except (AttributeError, TypeError):
1022
- auth = ("", "")
1023
-
1024
- return auth
1025
-
1026
-
1027
- def check_header_validity(header):
1028
- """Verifies that header parts don't contain leading whitespace
1029
- reserved characters, or return characters.
1030
-
1031
- :param header: tuple, in the format (name, value).
1032
- """
1033
- name, value = header
1034
-
1035
- for part in header:
1036
- if type(part) not in HEADER_VALIDATORS:
1037
- raise InvalidHeader(
1038
- f"Header part ({part!r}) from {{{name!r}: {value!r}}} must be "
1039
- f"of type str or bytes, not {type(part)}"
1040
- )
1041
-
1042
- _validate_header_part(name, "name", HEADER_VALIDATORS[type(name)][0])
1043
- _validate_header_part(value, "value", HEADER_VALIDATORS[type(value)][1])
1044
-
1045
-
1046
- def _validate_header_part(header_part, header_kind, validator):
1047
- if not validator.match(header_part):
1048
- raise InvalidHeader(
1049
- f"Invalid leading whitespace, reserved character(s), or return"
1050
- f"character(s) in header {header_kind}: {header_part!r}"
1051
- )
1052
-
1053
-
1054
- def urldefragauth(url):
1055
- """
1056
- Given a url remove the fragment and the authentication part.
1057
-
1058
- :rtype: str
1059
- """
1060
- scheme, netloc, path, params, query, fragment = urlparse(url)
1061
-
1062
- # see func:`prepend_scheme_if_needed`
1063
- if not netloc:
1064
- netloc, path = path, netloc
1065
-
1066
- netloc = netloc.rsplit("@", 1)[-1]
1067
-
1068
- return urlunparse((scheme, netloc, path, params, query, ""))
1069
-
1070
-
1071
- def rewind_body(prepared_request):
1072
- """Move file pointer back to its recorded starting position
1073
- so it can be read again on redirect.
1074
- """
1075
- body_seek = getattr(prepared_request.body, "seek", None)
1076
- if body_seek is not None and isinstance(
1077
- prepared_request._body_position, integer_types
1078
- ):
1079
- try:
1080
- body_seek(prepared_request._body_position)
1081
- except OSError:
1082
- raise UnrewindableBodyError(
1083
- "An error occurred when rewinding request body for redirect."
1084
- )
1085
- else:
1086
- raise UnrewindableBodyError("Unable to rewind request body for redirect.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/command/build_ext.py DELETED
@@ -1,787 +0,0 @@
1
- """distutils.command.build_ext
2
-
3
- Implements the Distutils 'build_ext' command, for building extension
4
- modules (currently limited to C extensions, should accommodate C++
5
- extensions ASAP)."""
6
-
7
- import contextlib
8
- import os
9
- import re
10
- import sys
11
- from distutils.core import Command
12
- from distutils.errors import (
13
- DistutilsOptionError,
14
- DistutilsSetupError,
15
- CCompilerError,
16
- DistutilsError,
17
- CompileError,
18
- DistutilsPlatformError,
19
- )
20
- from distutils.sysconfig import customize_compiler, get_python_version
21
- from distutils.sysconfig import get_config_h_filename
22
- from distutils.dep_util import newer_group
23
- from distutils.extension import Extension
24
- from distutils.util import get_platform
25
- from distutils import log
26
- from . import py37compat
27
-
28
- from site import USER_BASE
29
-
30
- # An extension name is just a dot-separated list of Python NAMEs (ie.
31
- # the same as a fully-qualified module name).
32
- extension_name_re = re.compile(r'^[a-zA-Z_][a-zA-Z_0-9]*(\.[a-zA-Z_][a-zA-Z_0-9]*)*$')
33
-
34
-
35
- def show_compilers():
36
- from distutils.ccompiler import show_compilers
37
-
38
- show_compilers()
39
-
40
-
41
- class build_ext(Command):
42
-
43
- description = "build C/C++ extensions (compile/link to build directory)"
44
-
45
- # XXX thoughts on how to deal with complex command-line options like
46
- # these, i.e. how to make it so fancy_getopt can suck them off the
47
- # command line and make it look like setup.py defined the appropriate
48
- # lists of tuples of what-have-you.
49
- # - each command needs a callback to process its command-line options
50
- # - Command.__init__() needs access to its share of the whole
51
- # command line (must ultimately come from
52
- # Distribution.parse_command_line())
53
- # - it then calls the current command class' option-parsing
54
- # callback to deal with weird options like -D, which have to
55
- # parse the option text and churn out some custom data
56
- # structure
57
- # - that data structure (in this case, a list of 2-tuples)
58
- # will then be present in the command object by the time
59
- # we get to finalize_options() (i.e. the constructor
60
- # takes care of both command-line and client options
61
- # in between initialize_options() and finalize_options())
62
-
63
- sep_by = " (separated by '%s')" % os.pathsep
64
- user_options = [
65
- ('build-lib=', 'b', "directory for compiled extension modules"),
66
- ('build-temp=', 't', "directory for temporary files (build by-products)"),
67
- (
68
- 'plat-name=',
69
- 'p',
70
- "platform name to cross-compile for, if supported "
71
- "(default: %s)" % get_platform(),
72
- ),
73
- (
74
- 'inplace',
75
- 'i',
76
- "ignore build-lib and put compiled extensions into the source "
77
- + "directory alongside your pure Python modules",
78
- ),
79
- (
80
- 'include-dirs=',
81
- 'I',
82
- "list of directories to search for header files" + sep_by,
83
- ),
84
- ('define=', 'D', "C preprocessor macros to define"),
85
- ('undef=', 'U', "C preprocessor macros to undefine"),
86
- ('libraries=', 'l', "external C libraries to link with"),
87
- (
88
- 'library-dirs=',
89
- 'L',
90
- "directories to search for external C libraries" + sep_by,
91
- ),
92
- ('rpath=', 'R', "directories to search for shared C libraries at runtime"),
93
- ('link-objects=', 'O', "extra explicit link objects to include in the link"),
94
- ('debug', 'g', "compile/link with debugging information"),
95
- ('force', 'f', "forcibly build everything (ignore file timestamps)"),
96
- ('compiler=', 'c', "specify the compiler type"),
97
- ('parallel=', 'j', "number of parallel build jobs"),
98
- ('swig-cpp', None, "make SWIG create C++ files (default is C)"),
99
- ('swig-opts=', None, "list of SWIG command line options"),
100
- ('swig=', None, "path to the SWIG executable"),
101
- ('user', None, "add user include, library and rpath"),
102
- ]
103
-
104
- boolean_options = ['inplace', 'debug', 'force', 'swig-cpp', 'user']
105
-
106
- help_options = [
107
- ('help-compiler', None, "list available compilers", show_compilers),
108
- ]
109
-
110
- def initialize_options(self):
111
- self.extensions = None
112
- self.build_lib = None
113
- self.plat_name = None
114
- self.build_temp = None
115
- self.inplace = 0
116
- self.package = None
117
-
118
- self.include_dirs = None
119
- self.define = None
120
- self.undef = None
121
- self.libraries = None
122
- self.library_dirs = None
123
- self.rpath = None
124
- self.link_objects = None
125
- self.debug = None
126
- self.force = None
127
- self.compiler = None
128
- self.swig = None
129
- self.swig_cpp = None
130
- self.swig_opts = None
131
- self.user = None
132
- self.parallel = None
133
-
134
- def finalize_options(self): # noqa: C901
135
- from distutils import sysconfig
136
-
137
- self.set_undefined_options(
138
- 'build',
139
- ('build_lib', 'build_lib'),
140
- ('build_temp', 'build_temp'),
141
- ('compiler', 'compiler'),
142
- ('debug', 'debug'),
143
- ('force', 'force'),
144
- ('parallel', 'parallel'),
145
- ('plat_name', 'plat_name'),
146
- )
147
-
148
- if self.package is None:
149
- self.package = self.distribution.ext_package
150
-
151
- self.extensions = self.distribution.ext_modules
152
-
153
- # Make sure Python's include directories (for Python.h, pyconfig.h,
154
- # etc.) are in the include search path.
155
- py_include = sysconfig.get_python_inc()
156
- plat_py_include = sysconfig.get_python_inc(plat_specific=1)
157
- if self.include_dirs is None:
158
- self.include_dirs = self.distribution.include_dirs or []
159
- if isinstance(self.include_dirs, str):
160
- self.include_dirs = self.include_dirs.split(os.pathsep)
161
-
162
- # If in a virtualenv, add its include directory
163
- # Issue 16116
164
- if sys.exec_prefix != sys.base_exec_prefix:
165
- self.include_dirs.append(os.path.join(sys.exec_prefix, 'include'))
166
-
167
- # Put the Python "system" include dir at the end, so that
168
- # any local include dirs take precedence.
169
- self.include_dirs.extend(py_include.split(os.path.pathsep))
170
- if plat_py_include != py_include:
171
- self.include_dirs.extend(plat_py_include.split(os.path.pathsep))
172
-
173
- self.ensure_string_list('libraries')
174
- self.ensure_string_list('link_objects')
175
-
176
- # Life is easier if we're not forever checking for None, so
177
- # simplify these options to empty lists if unset
178
- if self.libraries is None:
179
- self.libraries = []
180
- if self.library_dirs is None:
181
- self.library_dirs = []
182
- elif isinstance(self.library_dirs, str):
183
- self.library_dirs = self.library_dirs.split(os.pathsep)
184
-
185
- if self.rpath is None:
186
- self.rpath = []
187
- elif isinstance(self.rpath, str):
188
- self.rpath = self.rpath.split(os.pathsep)
189
-
190
- # for extensions under windows use different directories
191
- # for Release and Debug builds.
192
- # also Python's library directory must be appended to library_dirs
193
- if os.name == 'nt':
194
- # the 'libs' directory is for binary installs - we assume that
195
- # must be the *native* platform. But we don't really support
196
- # cross-compiling via a binary install anyway, so we let it go.
197
- self.library_dirs.append(os.path.join(sys.exec_prefix, 'libs'))
198
- if sys.base_exec_prefix != sys.prefix: # Issue 16116
199
- self.library_dirs.append(os.path.join(sys.base_exec_prefix, 'libs'))
200
- if self.debug:
201
- self.build_temp = os.path.join(self.build_temp, "Debug")
202
- else:
203
- self.build_temp = os.path.join(self.build_temp, "Release")
204
-
205
- # Append the source distribution include and library directories,
206
- # this allows distutils on windows to work in the source tree
207
- self.include_dirs.append(os.path.dirname(get_config_h_filename()))
208
- self.library_dirs.append(sys.base_exec_prefix)
209
-
210
- # Use the .lib files for the correct architecture
211
- if self.plat_name == 'win32':
212
- suffix = 'win32'
213
- else:
214
- # win-amd64
215
- suffix = self.plat_name[4:]
216
- new_lib = os.path.join(sys.exec_prefix, 'PCbuild')
217
- if suffix:
218
- new_lib = os.path.join(new_lib, suffix)
219
- self.library_dirs.append(new_lib)
220
-
221
- # For extensions under Cygwin, Python's library directory must be
222
- # appended to library_dirs
223
- if sys.platform[:6] == 'cygwin':
224
- if not sysconfig.python_build:
225
- # building third party extensions
226
- self.library_dirs.append(
227
- os.path.join(
228
- sys.prefix, "lib", "python" + get_python_version(), "config"
229
- )
230
- )
231
- else:
232
- # building python standard extensions
233
- self.library_dirs.append('.')
234
-
235
- # For building extensions with a shared Python library,
236
- # Python's library directory must be appended to library_dirs
237
- # See Issues: #1600860, #4366
238
- if sysconfig.get_config_var('Py_ENABLE_SHARED'):
239
- if not sysconfig.python_build:
240
- # building third party extensions
241
- self.library_dirs.append(sysconfig.get_config_var('LIBDIR'))
242
- else:
243
- # building python standard extensions
244
- self.library_dirs.append('.')
245
-
246
- # The argument parsing will result in self.define being a string, but
247
- # it has to be a list of 2-tuples. All the preprocessor symbols
248
- # specified by the 'define' option will be set to '1'. Multiple
249
- # symbols can be separated with commas.
250
-
251
- if self.define:
252
- defines = self.define.split(',')
253
- self.define = [(symbol, '1') for symbol in defines]
254
-
255
- # The option for macros to undefine is also a string from the
256
- # option parsing, but has to be a list. Multiple symbols can also
257
- # be separated with commas here.
258
- if self.undef:
259
- self.undef = self.undef.split(',')
260
-
261
- if self.swig_opts is None:
262
- self.swig_opts = []
263
- else:
264
- self.swig_opts = self.swig_opts.split(' ')
265
-
266
- # Finally add the user include and library directories if requested
267
- if self.user:
268
- user_include = os.path.join(USER_BASE, "include")
269
- user_lib = os.path.join(USER_BASE, "lib")
270
- if os.path.isdir(user_include):
271
- self.include_dirs.append(user_include)
272
- if os.path.isdir(user_lib):
273
- self.library_dirs.append(user_lib)
274
- self.rpath.append(user_lib)
275
-
276
- if isinstance(self.parallel, str):
277
- try:
278
- self.parallel = int(self.parallel)
279
- except ValueError:
280
- raise DistutilsOptionError("parallel should be an integer")
281
-
282
- def run(self): # noqa: C901
283
- from distutils.ccompiler import new_compiler
284
-
285
- # 'self.extensions', as supplied by setup.py, is a list of
286
- # Extension instances. See the documentation for Extension (in
287
- # distutils.extension) for details.
288
- #
289
- # For backwards compatibility with Distutils 0.8.2 and earlier, we
290
- # also allow the 'extensions' list to be a list of tuples:
291
- # (ext_name, build_info)
292
- # where build_info is a dictionary containing everything that
293
- # Extension instances do except the name, with a few things being
294
- # differently named. We convert these 2-tuples to Extension
295
- # instances as needed.
296
-
297
- if not self.extensions:
298
- return
299
-
300
- # If we were asked to build any C/C++ libraries, make sure that the
301
- # directory where we put them is in the library search path for
302
- # linking extensions.
303
- if self.distribution.has_c_libraries():
304
- build_clib = self.get_finalized_command('build_clib')
305
- self.libraries.extend(build_clib.get_library_names() or [])
306
- self.library_dirs.append(build_clib.build_clib)
307
-
308
- # Setup the CCompiler object that we'll use to do all the
309
- # compiling and linking
310
- self.compiler = new_compiler(
311
- compiler=self.compiler,
312
- verbose=self.verbose,
313
- dry_run=self.dry_run,
314
- force=self.force,
315
- )
316
- customize_compiler(self.compiler)
317
- # If we are cross-compiling, init the compiler now (if we are not
318
- # cross-compiling, init would not hurt, but people may rely on
319
- # late initialization of compiler even if they shouldn't...)
320
- if os.name == 'nt' and self.plat_name != get_platform():
321
- self.compiler.initialize(self.plat_name)
322
-
323
- # And make sure that any compile/link-related options (which might
324
- # come from the command-line or from the setup script) are set in
325
- # that CCompiler object -- that way, they automatically apply to
326
- # all compiling and linking done here.
327
- if self.include_dirs is not None:
328
- self.compiler.set_include_dirs(self.include_dirs)
329
- if self.define is not None:
330
- # 'define' option is a list of (name,value) tuples
331
- for (name, value) in self.define:
332
- self.compiler.define_macro(name, value)
333
- if self.undef is not None:
334
- for macro in self.undef:
335
- self.compiler.undefine_macro(macro)
336
- if self.libraries is not None:
337
- self.compiler.set_libraries(self.libraries)
338
- if self.library_dirs is not None:
339
- self.compiler.set_library_dirs(self.library_dirs)
340
- if self.rpath is not None:
341
- self.compiler.set_runtime_library_dirs(self.rpath)
342
- if self.link_objects is not None:
343
- self.compiler.set_link_objects(self.link_objects)
344
-
345
- # Now actually compile and link everything.
346
- self.build_extensions()
347
-
348
- def check_extensions_list(self, extensions): # noqa: C901
349
- """Ensure that the list of extensions (presumably provided as a
350
- command option 'extensions') is valid, i.e. it is a list of
351
- Extension objects. We also support the old-style list of 2-tuples,
352
- where the tuples are (ext_name, build_info), which are converted to
353
- Extension instances here.
354
-
355
- Raise DistutilsSetupError if the structure is invalid anywhere;
356
- just returns otherwise.
357
- """
358
- if not isinstance(extensions, list):
359
- raise DistutilsSetupError(
360
- "'ext_modules' option must be a list of Extension instances"
361
- )
362
-
363
- for i, ext in enumerate(extensions):
364
- if isinstance(ext, Extension):
365
- continue # OK! (assume type-checking done
366
- # by Extension constructor)
367
-
368
- if not isinstance(ext, tuple) or len(ext) != 2:
369
- raise DistutilsSetupError(
370
- "each element of 'ext_modules' option must be an "
371
- "Extension instance or 2-tuple"
372
- )
373
-
374
- ext_name, build_info = ext
375
-
376
- log.warn(
377
- "old-style (ext_name, build_info) tuple found in "
378
- "ext_modules for extension '%s' "
379
- "-- please convert to Extension instance",
380
- ext_name,
381
- )
382
-
383
- if not (isinstance(ext_name, str) and extension_name_re.match(ext_name)):
384
- raise DistutilsSetupError(
385
- "first element of each tuple in 'ext_modules' "
386
- "must be the extension name (a string)"
387
- )
388
-
389
- if not isinstance(build_info, dict):
390
- raise DistutilsSetupError(
391
- "second element of each tuple in 'ext_modules' "
392
- "must be a dictionary (build info)"
393
- )
394
-
395
- # OK, the (ext_name, build_info) dict is type-safe: convert it
396
- # to an Extension instance.
397
- ext = Extension(ext_name, build_info['sources'])
398
-
399
- # Easy stuff: one-to-one mapping from dict elements to
400
- # instance attributes.
401
- for key in (
402
- 'include_dirs',
403
- 'library_dirs',
404
- 'libraries',
405
- 'extra_objects',
406
- 'extra_compile_args',
407
- 'extra_link_args',
408
- ):
409
- val = build_info.get(key)
410
- if val is not None:
411
- setattr(ext, key, val)
412
-
413
- # Medium-easy stuff: same syntax/semantics, different names.
414
- ext.runtime_library_dirs = build_info.get('rpath')
415
- if 'def_file' in build_info:
416
- log.warn("'def_file' element of build info dict " "no longer supported")
417
-
418
- # Non-trivial stuff: 'macros' split into 'define_macros'
419
- # and 'undef_macros'.
420
- macros = build_info.get('macros')
421
- if macros:
422
- ext.define_macros = []
423
- ext.undef_macros = []
424
- for macro in macros:
425
- if not (isinstance(macro, tuple) and len(macro) in (1, 2)):
426
- raise DistutilsSetupError(
427
- "'macros' element of build info dict "
428
- "must be 1- or 2-tuple"
429
- )
430
- if len(macro) == 1:
431
- ext.undef_macros.append(macro[0])
432
- elif len(macro) == 2:
433
- ext.define_macros.append(macro)
434
-
435
- extensions[i] = ext
436
-
437
- def get_source_files(self):
438
- self.check_extensions_list(self.extensions)
439
- filenames = []
440
-
441
- # Wouldn't it be neat if we knew the names of header files too...
442
- for ext in self.extensions:
443
- filenames.extend(ext.sources)
444
- return filenames
445
-
446
- def get_outputs(self):
447
- # Sanity check the 'extensions' list -- can't assume this is being
448
- # done in the same run as a 'build_extensions()' call (in fact, we
449
- # can probably assume that it *isn't*!).
450
- self.check_extensions_list(self.extensions)
451
-
452
- # And build the list of output (built) filenames. Note that this
453
- # ignores the 'inplace' flag, and assumes everything goes in the
454
- # "build" tree.
455
- outputs = []
456
- for ext in self.extensions:
457
- outputs.append(self.get_ext_fullpath(ext.name))
458
- return outputs
459
-
460
- def build_extensions(self):
461
- # First, sanity-check the 'extensions' list
462
- self.check_extensions_list(self.extensions)
463
- if self.parallel:
464
- self._build_extensions_parallel()
465
- else:
466
- self._build_extensions_serial()
467
-
468
- def _build_extensions_parallel(self):
469
- workers = self.parallel
470
- if self.parallel is True:
471
- workers = os.cpu_count() # may return None
472
- try:
473
- from concurrent.futures import ThreadPoolExecutor
474
- except ImportError:
475
- workers = None
476
-
477
- if workers is None:
478
- self._build_extensions_serial()
479
- return
480
-
481
- with ThreadPoolExecutor(max_workers=workers) as executor:
482
- futures = [
483
- executor.submit(self.build_extension, ext) for ext in self.extensions
484
- ]
485
- for ext, fut in zip(self.extensions, futures):
486
- with self._filter_build_errors(ext):
487
- fut.result()
488
-
489
- def _build_extensions_serial(self):
490
- for ext in self.extensions:
491
- with self._filter_build_errors(ext):
492
- self.build_extension(ext)
493
-
494
- @contextlib.contextmanager
495
- def _filter_build_errors(self, ext):
496
- try:
497
- yield
498
- except (CCompilerError, DistutilsError, CompileError) as e:
499
- if not ext.optional:
500
- raise
501
- self.warn('building extension "{}" failed: {}'.format(ext.name, e))
502
-
503
- def build_extension(self, ext):
504
- sources = ext.sources
505
- if sources is None or not isinstance(sources, (list, tuple)):
506
- raise DistutilsSetupError(
507
- "in 'ext_modules' option (extension '%s'), "
508
- "'sources' must be present and must be "
509
- "a list of source filenames" % ext.name
510
- )
511
- # sort to make the resulting .so file build reproducible
512
- sources = sorted(sources)
513
-
514
- ext_path = self.get_ext_fullpath(ext.name)
515
- depends = sources + ext.depends
516
- if not (self.force or newer_group(depends, ext_path, 'newer')):
517
- log.debug("skipping '%s' extension (up-to-date)", ext.name)
518
- return
519
- else:
520
- log.info("building '%s' extension", ext.name)
521
-
522
- # First, scan the sources for SWIG definition files (.i), run
523
- # SWIG on 'em to create .c files, and modify the sources list
524
- # accordingly.
525
- sources = self.swig_sources(sources, ext)
526
-
527
- # Next, compile the source code to object files.
528
-
529
- # XXX not honouring 'define_macros' or 'undef_macros' -- the
530
- # CCompiler API needs to change to accommodate this, and I
531
- # want to do one thing at a time!
532
-
533
- # Two possible sources for extra compiler arguments:
534
- # - 'extra_compile_args' in Extension object
535
- # - CFLAGS environment variable (not particularly
536
- # elegant, but people seem to expect it and I
537
- # guess it's useful)
538
- # The environment variable should take precedence, and
539
- # any sensible compiler will give precedence to later
540
- # command line args. Hence we combine them in order:
541
- extra_args = ext.extra_compile_args or []
542
-
543
- macros = ext.define_macros[:]
544
- for undef in ext.undef_macros:
545
- macros.append((undef,))
546
-
547
- objects = self.compiler.compile(
548
- sources,
549
- output_dir=self.build_temp,
550
- macros=macros,
551
- include_dirs=ext.include_dirs,
552
- debug=self.debug,
553
- extra_postargs=extra_args,
554
- depends=ext.depends,
555
- )
556
-
557
- # XXX outdated variable, kept here in case third-part code
558
- # needs it.
559
- self._built_objects = objects[:]
560
-
561
- # Now link the object files together into a "shared object" --
562
- # of course, first we have to figure out all the other things
563
- # that go into the mix.
564
- if ext.extra_objects:
565
- objects.extend(ext.extra_objects)
566
- extra_args = ext.extra_link_args or []
567
-
568
- # Detect target language, if not provided
569
- language = ext.language or self.compiler.detect_language(sources)
570
-
571
- self.compiler.link_shared_object(
572
- objects,
573
- ext_path,
574
- libraries=self.get_libraries(ext),
575
- library_dirs=ext.library_dirs,
576
- runtime_library_dirs=ext.runtime_library_dirs,
577
- extra_postargs=extra_args,
578
- export_symbols=self.get_export_symbols(ext),
579
- debug=self.debug,
580
- build_temp=self.build_temp,
581
- target_lang=language,
582
- )
583
-
584
- def swig_sources(self, sources, extension):
585
- """Walk the list of source files in 'sources', looking for SWIG
586
- interface (.i) files. Run SWIG on all that are found, and
587
- return a modified 'sources' list with SWIG source files replaced
588
- by the generated C (or C++) files.
589
- """
590
- new_sources = []
591
- swig_sources = []
592
- swig_targets = {}
593
-
594
- # XXX this drops generated C/C++ files into the source tree, which
595
- # is fine for developers who want to distribute the generated
596
- # source -- but there should be an option to put SWIG output in
597
- # the temp dir.
598
-
599
- if self.swig_cpp:
600
- log.warn("--swig-cpp is deprecated - use --swig-opts=-c++")
601
-
602
- if (
603
- self.swig_cpp
604
- or ('-c++' in self.swig_opts)
605
- or ('-c++' in extension.swig_opts)
606
- ):
607
- target_ext = '.cpp'
608
- else:
609
- target_ext = '.c'
610
-
611
- for source in sources:
612
- (base, ext) = os.path.splitext(source)
613
- if ext == ".i": # SWIG interface file
614
- new_sources.append(base + '_wrap' + target_ext)
615
- swig_sources.append(source)
616
- swig_targets[source] = new_sources[-1]
617
- else:
618
- new_sources.append(source)
619
-
620
- if not swig_sources:
621
- return new_sources
622
-
623
- swig = self.swig or self.find_swig()
624
- swig_cmd = [swig, "-python"]
625
- swig_cmd.extend(self.swig_opts)
626
- if self.swig_cpp:
627
- swig_cmd.append("-c++")
628
-
629
- # Do not override commandline arguments
630
- if not self.swig_opts:
631
- for o in extension.swig_opts:
632
- swig_cmd.append(o)
633
-
634
- for source in swig_sources:
635
- target = swig_targets[source]
636
- log.info("swigging %s to %s", source, target)
637
- self.spawn(swig_cmd + ["-o", target, source])
638
-
639
- return new_sources
640
-
641
- def find_swig(self):
642
- """Return the name of the SWIG executable. On Unix, this is
643
- just "swig" -- it should be in the PATH. Tries a bit harder on
644
- Windows.
645
- """
646
- if os.name == "posix":
647
- return "swig"
648
- elif os.name == "nt":
649
- # Look for SWIG in its standard installation directory on
650
- # Windows (or so I presume!). If we find it there, great;
651
- # if not, act like Unix and assume it's in the PATH.
652
- for vers in ("1.3", "1.2", "1.1"):
653
- fn = os.path.join("c:\\swig%s" % vers, "swig.exe")
654
- if os.path.isfile(fn):
655
- return fn
656
- else:
657
- return "swig.exe"
658
- else:
659
- raise DistutilsPlatformError(
660
- "I don't know how to find (much less run) SWIG "
661
- "on platform '%s'" % os.name
662
- )
663
-
664
- # -- Name generators -----------------------------------------------
665
- # (extension names, filenames, whatever)
666
- def get_ext_fullpath(self, ext_name):
667
- """Returns the path of the filename for a given extension.
668
-
669
- The file is located in `build_lib` or directly in the package
670
- (inplace option).
671
- """
672
- fullname = self.get_ext_fullname(ext_name)
673
- modpath = fullname.split('.')
674
- filename = self.get_ext_filename(modpath[-1])
675
-
676
- if not self.inplace:
677
- # no further work needed
678
- # returning :
679
- # build_dir/package/path/filename
680
- filename = os.path.join(*modpath[:-1] + [filename])
681
- return os.path.join(self.build_lib, filename)
682
-
683
- # the inplace option requires to find the package directory
684
- # using the build_py command for that
685
- package = '.'.join(modpath[0:-1])
686
- build_py = self.get_finalized_command('build_py')
687
- package_dir = os.path.abspath(build_py.get_package_dir(package))
688
-
689
- # returning
690
- # package_dir/filename
691
- return os.path.join(package_dir, filename)
692
-
693
- def get_ext_fullname(self, ext_name):
694
- """Returns the fullname of a given extension name.
695
-
696
- Adds the `package.` prefix"""
697
- if self.package is None:
698
- return ext_name
699
- else:
700
- return self.package + '.' + ext_name
701
-
702
- def get_ext_filename(self, ext_name):
703
- r"""Convert the name of an extension (eg. "foo.bar") into the name
704
- of the file from which it will be loaded (eg. "foo/bar.so", or
705
- "foo\bar.pyd").
706
- """
707
- from distutils.sysconfig import get_config_var
708
-
709
- ext_path = ext_name.split('.')
710
- ext_suffix = get_config_var('EXT_SUFFIX')
711
- return os.path.join(*ext_path) + ext_suffix
712
-
713
- def get_export_symbols(self, ext):
714
- """Return the list of symbols that a shared extension has to
715
- export. This either uses 'ext.export_symbols' or, if it's not
716
- provided, "PyInit_" + module_name. Only relevant on Windows, where
717
- the .pyd file (DLL) must export the module "PyInit_" function.
718
- """
719
- name = ext.name.split('.')[-1]
720
- try:
721
- # Unicode module name support as defined in PEP-489
722
- # https://www.python.org/dev/peps/pep-0489/#export-hook-name
723
- name.encode('ascii')
724
- except UnicodeEncodeError:
725
- suffix = 'U_' + name.encode('punycode').replace(b'-', b'_').decode('ascii')
726
- else:
727
- suffix = "_" + name
728
-
729
- initfunc_name = "PyInit" + suffix
730
- if initfunc_name not in ext.export_symbols:
731
- ext.export_symbols.append(initfunc_name)
732
- return ext.export_symbols
733
-
734
- def get_libraries(self, ext): # noqa: C901
735
- """Return the list of libraries to link against when building a
736
- shared extension. On most platforms, this is just 'ext.libraries';
737
- on Windows, we add the Python library (eg. python20.dll).
738
- """
739
- # The python library is always needed on Windows. For MSVC, this
740
- # is redundant, since the library is mentioned in a pragma in
741
- # pyconfig.h that MSVC groks. The other Windows compilers all seem
742
- # to need it mentioned explicitly, though, so that's what we do.
743
- # Append '_d' to the python import library on debug builds.
744
- if sys.platform == "win32":
745
- from distutils._msvccompiler import MSVCCompiler
746
-
747
- if not isinstance(self.compiler, MSVCCompiler):
748
- template = "python%d%d"
749
- if self.debug:
750
- template = template + '_d'
751
- pythonlib = template % (
752
- sys.hexversion >> 24,
753
- (sys.hexversion >> 16) & 0xFF,
754
- )
755
- # don't extend ext.libraries, it may be shared with other
756
- # extensions, it is a reference to the original list
757
- return ext.libraries + [pythonlib]
758
- else:
759
- # On Android only the main executable and LD_PRELOADs are considered
760
- # to be RTLD_GLOBAL, all the dependencies of the main executable
761
- # remain RTLD_LOCAL and so the shared libraries must be linked with
762
- # libpython when python is built with a shared python library (issue
763
- # bpo-21536).
764
- # On Cygwin (and if required, other POSIX-like platforms based on
765
- # Windows like MinGW) it is simply necessary that all symbols in
766
- # shared libraries are resolved at link time.
767
- from distutils.sysconfig import get_config_var
768
-
769
- link_libpython = False
770
- if get_config_var('Py_ENABLE_SHARED'):
771
- # A native build on an Android device or on Cygwin
772
- if hasattr(sys, 'getandroidapilevel'):
773
- link_libpython = True
774
- elif sys.platform == 'cygwin':
775
- link_libpython = True
776
- elif '_PYTHON_HOST_PLATFORM' in os.environ:
777
- # We are cross-compiling for one of the relevant platforms
778
- if get_config_var('ANDROID_API_LEVEL') != 0:
779
- link_libpython = True
780
- elif get_config_var('MACHDEP') == 'cygwin':
781
- link_libpython = True
782
-
783
- if link_libpython:
784
- ldversion = get_config_var('LDVERSION')
785
- return ext.libraries + ['python' + ldversion]
786
-
787
- return ext.libraries + py37compat.pythonlib()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BilalSardar/karlo-cpu-api/app.py DELETED
@@ -1,26 +0,0 @@
1
-
2
- import gradio as gr
3
- import urllib
4
- from PIL import Image
5
- karlo = gr.Interface.load(name="spaces/kakaobrain/karlo")
6
- def get_images(prompt):
7
- try:
8
- gallery_dir = karlo(prompt, fn_index=1)
9
- except Exception as e:
10
- text=str(e)
11
- url = "https://kakaobrain-karlo.hf.space/file="+text[38:len(text)-1]
12
- save_as = "file.png"
13
-
14
- data1 = urllib.request.urlopen(url)
15
-
16
- f = open(save_as,'wb')
17
- f.write(data1.read())
18
- f.close()
19
- img = Image.open(save_as)
20
- return img
21
-
22
- demo=gr.Interface(fn=get_images,
23
- inputs="text",
24
- outputs="image",
25
- title="Karlo CPU/API")
26
- demo.launch(debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/pybind11/pybind11/__main__.py DELETED
@@ -1,37 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- from __future__ import print_function
3
-
4
- import argparse
5
- import sys
6
- import sysconfig
7
-
8
- from . import get_include
9
-
10
-
11
- def print_includes():
12
- dirs = [sysconfig.get_path('include'),
13
- sysconfig.get_path('platinclude'),
14
- get_include()]
15
-
16
- # Make unique but preserve order
17
- unique_dirs = []
18
- for d in dirs:
19
- if d not in unique_dirs:
20
- unique_dirs.append(d)
21
-
22
- print(' '.join('-I' + d for d in unique_dirs))
23
-
24
-
25
- def main():
26
- parser = argparse.ArgumentParser(prog='python -m pybind11')
27
- parser.add_argument('--includes', action='store_true',
28
- help='Include flags for both pybind11 and Python headers.')
29
- args = parser.parse_args()
30
- if not sys.argv[1:]:
31
- parser.print_help()
32
- if args.includes:
33
- print_includes()
34
-
35
-
36
- if __name__ == '__main__':
37
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/regionclip-demo/detectron2/data/common.py DELETED
@@ -1,186 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import copy
3
- import itertools
4
- import logging
5
- import numpy as np
6
- import pickle
7
- import random
8
- import torch.utils.data as data
9
- from torch.utils.data.sampler import Sampler
10
-
11
- from detectron2.utils.serialize import PicklableWrapper
12
-
13
- __all__ = ["MapDataset", "DatasetFromList", "AspectRatioGroupedDataset", "ToIterableDataset"]
14
-
15
-
16
- class MapDataset(data.Dataset):
17
- """
18
- Map a function over the elements in a dataset.
19
-
20
- Args:
21
- dataset: a dataset where map function is applied.
22
- map_func: a callable which maps the element in dataset. map_func is
23
- responsible for error handling, when error happens, it needs to
24
- return None so the MapDataset will randomly use other
25
- elements from the dataset.
26
- """
27
-
28
- def __init__(self, dataset, map_func):
29
- self._dataset = dataset
30
- self._map_func = PicklableWrapper(map_func) # wrap so that a lambda will work
31
-
32
- self._rng = random.Random(42)
33
- self._fallback_candidates = set(range(len(dataset)))
34
-
35
- def __len__(self):
36
- return len(self._dataset)
37
-
38
- def __getitem__(self, idx):
39
- retry_count = 0
40
- cur_idx = int(idx)
41
-
42
- while True:
43
- data = self._map_func(self._dataset[cur_idx])
44
- if data is not None:
45
- self._fallback_candidates.add(cur_idx)
46
- return data
47
-
48
- # _map_func fails for this idx, use a random new index from the pool
49
- retry_count += 1
50
- self._fallback_candidates.discard(cur_idx)
51
- cur_idx = self._rng.sample(self._fallback_candidates, k=1)[0]
52
-
53
- if retry_count >= 3:
54
- logger = logging.getLogger(__name__)
55
- logger.warning(
56
- "Failed to apply `_map_func` for idx: {}, retry count: {}".format(
57
- idx, retry_count
58
- )
59
- )
60
-
61
-
62
- class DatasetFromList(data.Dataset):
63
- """
64
- Wrap a list to a torch Dataset. It produces elements of the list as data.
65
- """
66
-
67
- def __init__(self, lst: list, copy: bool = True, serialize: bool = True):
68
- """
69
- Args:
70
- lst (list): a list which contains elements to produce.
71
- copy (bool): whether to deepcopy the element when producing it,
72
- so that the result can be modified in place without affecting the
73
- source in the list.
74
- serialize (bool): whether to hold memory using serialized objects, when
75
- enabled, data loader workers can use shared RAM from master
76
- process instead of making a copy.
77
- """
78
- self._lst = lst
79
- self._copy = copy
80
- self._serialize = serialize
81
-
82
- def _serialize(data):
83
- buffer = pickle.dumps(data, protocol=-1)
84
- return np.frombuffer(buffer, dtype=np.uint8)
85
-
86
- if self._serialize:
87
- logger = logging.getLogger(__name__)
88
- logger.info(
89
- "Serializing {} elements to byte tensors and concatenating them all ...".format(
90
- len(self._lst)
91
- )
92
- )
93
- self._lst = [_serialize(x) for x in self._lst]
94
- self._addr = np.asarray([len(x) for x in self._lst], dtype=np.int64)
95
- self._addr = np.cumsum(self._addr)
96
- self._lst = np.concatenate(self._lst)
97
- logger.info("Serialized dataset takes {:.2f} MiB".format(len(self._lst) / 1024 ** 2))
98
-
99
- def __len__(self):
100
- if self._serialize:
101
- return len(self._addr)
102
- else:
103
- return len(self._lst)
104
-
105
- def __getitem__(self, idx):
106
- if self._serialize:
107
- start_addr = 0 if idx == 0 else self._addr[idx - 1].item()
108
- end_addr = self._addr[idx].item()
109
- bytes = memoryview(self._lst[start_addr:end_addr])
110
- return pickle.loads(bytes)
111
- elif self._copy:
112
- return copy.deepcopy(self._lst[idx])
113
- else:
114
- return self._lst[idx]
115
-
116
-
117
- class ToIterableDataset(data.IterableDataset):
118
- """
119
- Convert an old indices-based (also called map-style) dataset
120
- to an iterable-style dataset.
121
- """
122
-
123
- def __init__(self, dataset, sampler):
124
- """
125
- Args:
126
- dataset (torch.utils.data.Dataset): an old-style dataset with ``__getitem__``
127
- sampler (torch.utils.data.sampler.Sampler): a cheap iterable that produces indices
128
- to be applied on ``dataset``.
129
- """
130
- assert not isinstance(dataset, data.IterableDataset), dataset
131
- assert isinstance(sampler, Sampler), sampler
132
- self.dataset = dataset
133
- self.sampler = sampler
134
-
135
- def __iter__(self):
136
- worker_info = data.get_worker_info()
137
- if worker_info is None or worker_info.num_workers == 1:
138
- for idx in self.sampler:
139
- yield self.dataset[idx]
140
- else:
141
- # With map-style dataset, `DataLoader(dataset, sampler)` runs the
142
- # sampler in main process only. But `DataLoader(ToIterableDataset(dataset, sampler))`
143
- # will run sampler in every of the N worker and only keep 1/N of the ids on each
144
- # worker. The assumption is that sampler is cheap to iterate and it's fine to discard
145
- # ids in workers.
146
- for idx in itertools.islice(
147
- self.sampler, worker_info.id, None, worker_info.num_workers
148
- ):
149
- yield self.dataset[idx]
150
-
151
-
152
- class AspectRatioGroupedDataset(data.IterableDataset):
153
- """
154
- Batch data that have similar aspect ratio together.
155
- In this implementation, images whose aspect ratio < (or >) 1 will
156
- be batched together.
157
- This improves training speed because the images then need less padding
158
- to form a batch.
159
-
160
- It assumes the underlying dataset produces dicts with "width" and "height" keys.
161
- It will then produce a list of original dicts with length = batch_size,
162
- all with similar aspect ratios.
163
- """
164
-
165
- def __init__(self, dataset, batch_size):
166
- """
167
- Args:
168
- dataset: an iterable. Each element must be a dict with keys
169
- "width" and "height", which will be used to batch data.
170
- batch_size (int):
171
- """
172
- self.dataset = dataset
173
- self.batch_size = batch_size
174
- self._buckets = [[] for _ in range(2)]
175
- # Hard-coded two aspect ratio groups: w > h and w < h.
176
- # Can add support for more aspect ratio groups, but doesn't seem useful
177
-
178
- def __iter__(self):
179
- for d in self.dataset:
180
- w, h = d["width"], d["height"]
181
- bucket_id = 0 if w > h else 1
182
- bucket = self._buckets[bucket_id]
183
- bucket.append(d)
184
- if len(bucket) == self.batch_size:
185
- yield bucket[:]
186
- del bucket[:]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/transfiner/configs/new_baselines/mask_rcnn_R_101_FPN_200ep_LSJ.py DELETED
@@ -1,14 +0,0 @@
1
- from .mask_rcnn_R_101_FPN_100ep_LSJ import (
2
- dataloader,
3
- lr_multiplier,
4
- model,
5
- optimizer,
6
- train,
7
- )
8
-
9
- train.max_iter *= 2 # 100ep -> 200ep
10
-
11
- lr_multiplier.scheduler.milestones = [
12
- milestone * 2 for milestone in lr_multiplier.scheduler.milestones
13
- ]
14
- lr_multiplier.scheduler.num_updates = train.max_iter