Commit
·
bc8e748
1
Parent(s):
08d4f02
Update parquet files (step 73 of 249)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Acrobat Distiller 9 Full Version Free Download The Ultimate Guide to PDF Creation and Conversion.md +0 -133
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Clonedvd-7-0-0-10-ultimate-crack How to Backup Edit and Enjoy Your DVD Collection.md +0 -150
- spaces/1pelhydcardo/ChatGPT-prompt-generator/Tagalog-Christian-Songs-Lyrics-And-Chords-Pdf-Download-HOT.md +0 -108
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Extreme Car Driving Simulator Mod APK Latest Version for Free.md +0 -120
- spaces/1phancelerku/anime-remove-background/Animal Kingdom MOD APK Everything You Need to Know About this Amazing Game.md +0 -110
- spaces/1phancelerku/anime-remove-background/Experience the Thrill of Hill Climb Racing on Your PC for Free.md +0 -108
- spaces/2ndelement/voicevox/voicevox_engine/acoustic_feature_extractor.py +0 -332
- spaces/4Taps/SadTalker/src/face3d/models/bfm.py +0 -331
- spaces/801artistry/RVC801/julius/bands.py +0 -119
- spaces/A666sxr/Genshin_TTS/text/cleaners.py +0 -188
- spaces/AIFILMS/ControlNet-Video/share_btn.py +0 -86
- spaces/AIGText/GlyphControl/ldm/models/autoencoder.py +0 -278
- spaces/AchyuthGamer/ImMagician/style.css +0 -97
- spaces/AgentVerse/agentVerse/ui/dist/assets/tilemaps/tiles/tileset.tsx +0 -4
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/spinner/Factory.js +0 -13
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorinput/methods/Methods.js +0 -13
- spaces/Akmyradov/TurkmenTTSweSTT/uroman/lib/JSON.pm +0 -2317
- spaces/Alfaxad/BioGalacticModels/README.md +0 -13
- spaces/Alpaca233/SadTalker/src/audio2pose_models/discriminator.py +0 -76
- spaces/Ame42/UBTH/README.md +0 -14
- spaces/Amon1/ChatGPTForAcadamic/crazy_functions/test_project/cpp/libJPG/jpgd.h +0 -316
- spaces/Amrrs/DragGan-Inversion/PTI/torch_utils/ops/conv2d_resample.py +0 -156
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/training/lora.md +0 -405
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/models/test_models_vae_flax.py +0 -39
- spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/guided_anchor_head.py +0 -860
- spaces/Andy1621/uniformer_image_detection/tools/misc/browse_dataset.py +0 -96
- spaces/Andy1621/uniformer_image_segmentation/configs/fp16/pspnet_r101-d8_512x1024_80k_fp16_cityscapes.py +0 -5
- spaces/AnnasBlackHat/Image-Similarity/src/model/similarity_interface.py +0 -3
- spaces/Anonymous-sub/Rerender/ControlNet/ldm/models/diffusion/plms.py +0 -244
- spaces/AriaMei/TTSdemo/text/japanese.py +0 -153
- spaces/Ataturk-Chatbot/HuggingFaceChat/app.py +0 -119
- spaces/Awesimo/jojogan/e4e/configs/transforms_config.py +0 -62
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/transforms/transform.py +0 -351
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/structures/__init__.py +0 -0
- spaces/BatuhanYilmaz/Whisper-Auto-Subtitled-Video-Generator/01_🎥_Input_YouTube_Link.py +0 -258
- spaces/Benson/text-generation/Examples/8 Bola Piscina Apk Gua.md +0 -55
- spaces/Benson/text-generation/Examples/Can I Download There Is Day.md +0 -65
- spaces/Benson/text-generation/Examples/Descargar Gratis Fuego Avance Servidor Versi Terbaru.md +0 -58
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/colorama/ansi.py +0 -102
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/idna/idnadata.py +0 -2151
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/__init__.py +0 -102
- spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/packaging/__init__.py +0 -25
- spaces/Boadiwaa/Recipes/openai/api_resources/abstract/updateable_api_resource.py +0 -10
- spaces/CVPR/LIVE/thrust/thrust/adjacent_difference.h +0 -246
- spaces/CVPR/LIVE/thrust/thrust/mr/polymorphic_adaptor.h +0 -56
- spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/unique_by_key.h +0 -44
- spaces/CVPR/TokenCut/app.py +0 -22
- spaces/CVPR/lama-example/saicinpainting/evaluation/masks/countless/countless2d.py +0 -529
- spaces/CikeyQI/meme-api/meme_generator/memes/can_can_need/__init__.py +0 -20
- spaces/CikeyQI/meme-api/meme_generator/memes/my_friend/__init__.py +0 -79
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Acrobat Distiller 9 Full Version Free Download The Ultimate Guide to PDF Creation and Conversion.md
DELETED
@@ -1,133 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Acrobat Distiller 9: What Is It and How to Download It for Free</h1>
|
3 |
-
<p>If you are looking for a reliable and easy way to create high-quality PDF files from any application, you might want to consider using Acrobat Distiller 9. This software is a part of Adobe Acrobat 9, which is a comprehensive solution for creating, editing, and sharing PDF documents. In this article, we will explain what Acrobat Distiller 9 is, what are its main features and benefits, and how you can download it for free. We will also show you how to use Acrobat Distiller 9 to convert PostScript files to PDFs, manage the conversion queue, and customize the Adobe PDF settings.</p>
|
4 |
-
<h2>What Is Acrobat Distiller 9?</h2>
|
5 |
-
<p>Acrobat Distiller 9 is a software that allows you to convert PostScript files (PS) or Encapsulated PostScript files (EPS) to Portable Document Format files (PDF). PostScript files are created by applications that can print, such as word processors, spreadsheets, or graphics programs. They contain instructions for printers on how to render the document on paper. PDF files are universal files that can be viewed, printed, or shared on any device or platform. They preserve the layout, fonts, colors, and graphics of the original document.</p>
|
6 |
-
<h2>acrobat distiller 9 full version free download</h2><br /><p><b><b>DOWNLOAD</b> ———>>> <a href="https://byltly.com/2uKzqW">https://byltly.com/2uKzqW</a></b></p><br /><br />
|
7 |
-
<h3>The Main Features of Acrobat Distiller 9</h3>
|
8 |
-
<p>Acrobat Distiller 9 has several features that make it a powerful tool for creating PDF files. Some of these features are:</p>
|
9 |
-
<ul>
|
10 |
-
<li>It supports various standards and specifications for PDF creation, such as PDF/X, PDF/A, PDF/E, and PDF/VT.</li>
|
11 |
-
<li>It allows you to choose from different Adobe PDF settings that control the quality and size of the output PDF file. You can also create your own custom settings or edit the existing ones.</li>
|
12 |
-
<li>It lets you apply security options to your PDF files, such as encryption, password protection, digital signatures, and permissions.</li>
|
13 |
-
<li>It enables you to embed fonts in your PDF files, which ensures that your text will be displayed correctly on any device.</li>
|
14 |
-
<li>It provides you with a user-friendly interface that lets you monitor the conversion process, manage the conversion queue, and view the converted PDF files.</li>
|
15 |
-
</ul>
|
16 |
-
<h3>The Benefits of Using Acrobat Distiller 9</h3>
|
17 |
-
<p>Acrobat Distiller 9 offers many benefits for users who need to create PDF files from various applications. Some of these benefits are:</p>
|
18 |
-
<ul>
|
19 |
-
<li>It helps you save time and resources by automating the conversion process. You can set up a watched folder in Acrobat Distiller 9 that will automatically convert any PostScript files that you place in it.</li>
|
20 |
-
<li>It ensures that your PDF files are compatible with any device or platform that supports PDF viewing. You can also optimize your PDF files for different purposes, such as web publishing, printing, or archiving.</li>
|
21 |
-
<li>It enhances the security and integrity of your PDF files by applying encryption and digital signatures. You can also restrict access and editing of your PDF files by setting passwords and permissions.</li>
|
22 |
-
<li>It improves the quality and appearance of your PDF files by embedding fonts and using high-resolution images. You can also adjust the color management and compression settings of your PDF files.</li>
|
23 |
-
</ul>
|
24 |
-
<h2>How to Download Acrobat Distiller 9 for Free?</h2>
|
25 |
-
<p>If you want to download Acrobat Distiller 9 for free, you have two options: the official way and the alternative way. Let's see how they work.</p>
|
26 |
-
<h3>The Official Way to Download Acrobat Distiller 9</h3>
|
27 |
-
<p>The official way to download Acrobat Distiller 9 is to download Adobe Acrobat 9 Pro Extended trial version from Adobe's website. This trial version includes Acrobat Distiller 9 as well as other features of Adobe Acrobat 9 Pro Extended, such as Adobe Presenter, Adobe LiveCycle Designer ES, Adobe 3D Reviewer, and more. You can use this trial version for free for up to 30 days.</p>
|
28 |
-
<p>To download Adobe Acrobat 9 Pro Extended trial version, follow these steps:</p>
|
29 |
-
<ol>
|
30 |
-
<li>Go to <a href="https://www.adobe.com/downloads/other-downloads.html">https://www.adobe.com/downloads/other-downloads.html</a> and scroll down to find Adobe Acrobat Pro Extended (Windows only).</li>
|
31 |
-
<li>Click on Try Now button and sign in with your Adobe ID or create one if you don't have one.</li>
|
32 |
-
<li>Select your language and click on Download Now button. A file named ADBEPHSPCS4_LS1.exe will be downloaded.</li>
|
33 |
-
<li>Double-click on the downloaded file and follow the instructions on the screen to install Adobe Acrobat Pro Extended trial version on your computer.</li>
|
34 |
-
<li>Launch Adobe Acrobat Pro Extended from your desktop or start menu and enjoy using it for free for up to 30 days.</li>
|
35 |
-
</ol>
|
36 |
-
<h3>The Alternative Way to Download Acrobat Distiller 9</h3>
|
37 |
-
<p>The alternative way to download Acrobat Distiller 9 is to use a third-party website that offers free downloads of software. However, this method is not recommended because it may expose your computer to viruses, malware, or other security risks. Moreover, it may violate the terms and conditions of Adobe's license agreement. Therefore, we advise you to use this method at your own risk and discretion.</p>
|
38 |
-
<p>To download Acrobat Distiller 9 from a third-party website, follow these steps:</p>
|
39 |
-
<ol>
|
40 |
-
<li>Go to <a href="https://en.softonic.com/download/adobe-acrobat-distiller/windows/post-download">https://en.softonic.com/download/adobe-acrobat-distiller/windows/post-download</a> and click on Free Download button.</li>
|
41 |
-
<li>A file named adobe-acrobat-distiller-4-0.exe will be downloaded. Double-click on it and follow the instructions on the screen to install Acrobat Distiller 9 on your computer.</li>
|
42 |
-
<li>Launch Acrobat Distiller 9 from your desktop or start menu and use it as long as you want.</li>
|
43 |
-
</ol>
|
44 |
-
<h2>How to Use Acrobat Distiller 9 to Create PDFs?</h2>
|
45 |
-
<p>Now that you have downloaded and installed Acrobat Distiller 9 on your computer, you can start using it to create PDFs from any application that can print. Here are some tips on how to use Acrobat Distiller 9 effectively:</p>
|
46 |
-
<p>How to get acrobat distiller 9 for free<br />
|
47 |
-
Acrobat distiller 9 crack download<br />
|
48 |
-
Acrobat distiller 9 serial key generator<br />
|
49 |
-
Acrobat distiller 9 license key free<br />
|
50 |
-
Acrobat distiller 9 activation code online<br />
|
51 |
-
Acrobat distiller 9 offline installer download<br />
|
52 |
-
Acrobat distiller 9 portable version download<br />
|
53 |
-
Acrobat distiller 9 full setup file download<br />
|
54 |
-
Acrobat distiller 9 latest update download<br />
|
55 |
-
Acrobat distiller 9 patch file download<br />
|
56 |
-
Acrobat distiller 9 torrent download link<br />
|
57 |
-
Acrobat distiller 9 direct download link<br />
|
58 |
-
Acrobat distiller 9 alternative software free<br />
|
59 |
-
Acrobat distiller 9 compatible windows versions<br />
|
60 |
-
Acrobat distiller 9 system requirements<br />
|
61 |
-
Acrobat distiller 9 features and benefits<br />
|
62 |
-
Acrobat distiller 9 user guide pdf download<br />
|
63 |
-
Acrobat distiller 9 tutorial videos online<br />
|
64 |
-
Acrobat distiller 9 tips and tricks<br />
|
65 |
-
Acrobat distiller 9 best practices and recommendations<br />
|
66 |
-
Acrobat distiller 9 reviews and ratings<br />
|
67 |
-
Acrobat distiller 9 customer testimonials and feedback<br />
|
68 |
-
Acrobat distiller 9 comparison with other pdf tools<br />
|
69 |
-
Acrobat distiller 9 pros and cons<br />
|
70 |
-
Acrobat distiller 9 advantages and disadvantages<br />
|
71 |
-
Acrobat distiller 9 price and discounts<br />
|
72 |
-
Acrobat distiller 9 coupon codes and offers<br />
|
73 |
-
Acrobat distiller 9 free trial period and duration<br />
|
74 |
-
Acrobat distiller 9 refund policy and guarantee<br />
|
75 |
-
Acrobat distiller 9 customer support and service<br />
|
76 |
-
Acrobat distiller 9 technical issues and solutions<br />
|
77 |
-
Acrobat distiller 9 error messages and fixes<br />
|
78 |
-
Acrobat distiller 9 troubleshooting steps and guides<br />
|
79 |
-
Acrobat distiller 9 frequently asked questions and answers<br />
|
80 |
-
Acrobat distiller 9 forum and community online<br />
|
81 |
-
Acrobat distiller 9 blog and news updates<br />
|
82 |
-
Acrobat distiller 9 webinar and training sessions online<br />
|
83 |
-
Acrobat distiller 9 case studies and success stories online<br />
|
84 |
-
Acrobat distiller 9 awards and recognition online<br />
|
85 |
-
Acrobat distiller 9 legal and ethical issues online<br />
|
86 |
-
How to uninstall acrobat distiller 9 from windows pc <br />
|
87 |
-
How to upgrade acrobat distiller 9 to latest version <br />
|
88 |
-
How to downgrade acrobat distiller 9 to previous version <br />
|
89 |
-
How to backup acrobat distiller 9 settings and files <br />
|
90 |
-
How to restore acrobat distiller 9 settings and files <br />
|
91 |
-
How to customize acrobat distiller 9 preferences and options <br />
|
92 |
-
How to optimize acrobat distiller 9 performance and speed <br />
|
93 |
-
How to secure acrobat distiller 9 from malware and viruses <br />
|
94 |
-
How to integrate acrobat distiller 9 with other applications <br />
|
95 |
-
How to convert pdf files using acrobat distiller 9</p>
|
96 |
-
<h3>How to Convert PostScript Files to PDFs with Acrobat Distiller 9</h3>
|
97 |
-
<p>To convert PostScript files (PS) or Encapsulated PostScript files (EPS) to PDFs with Acrobat Distiller 9, follow these steps:</p>
|
98 |
-
<ol>
|
99 |
-
<li>In your application that can print, choose File > Print and select Adobe PDF as the printer name.</li>
|
100 |
-
<li>In the Print dialog box, click on Properties button and select an Adobe PDF setting from the Default Settings drop-down menu. You can also click on Edit button to modify or create your own custom setting.</li>
|
101 |
-
<li>In the same dialog box, click on OK button and then click on Print button. A Save As dialog box will appear where you can choose a name and location for your PostScript file.</li>
|
102 |
-
<li>Open Acrobat Distiller 9 from your desktop or start menu and drag-and-drop your PostScript file into its window. Alternatively, you can choose File > Open in Acrobat Distiller 9 and browse for your PostScript file.</li>
|
103 |
-
<li>The conversion process will start automatically and a progress bar will show you its status. When it is done, a new PDF file will be created in the same folder as your PostScript file.</li>
|
104 |
-
<li>You can double-click on the new PDF file to open it in Adobe Reader or any other PDF viewer application.</li>
|
105 |
-
</ol>
|
106 |
-
<h3>How to Manage the Conversion Queue in Acrobat Distiller 9</h3>
|
107 |
-
<h3>How to Customize the Adobe PDF Settings in Acrobat Distiller 9</h3>
|
108 |
-
<p>Acrobat Distiller 9 allows you to customize the Adobe PDF settings that control the quality and size of the output PDF file. You can edit the existing settings or create your own custom settings. To customize the Adobe PDF settings in Acrobat Distiller 9, follow these steps:</p>
|
109 |
-
<ol>
|
110 |
-
<li>In Acrobat Distiller 9, choose Settings > Edit Adobe PDF Settings. A dialog box will appear where you can see and modify the settings for the selected Adobe PDF setting.</li>
|
111 |
-
<li>In the General tab, you can change the description, compatibility, resolution, and other options for your PDF file.</li>
|
112 |
-
<li>In the Images tab, you can change the compression, downsampling, and color conversion options for your images.</li>
|
113 |
-
<li>In the Fonts tab, you can change the embedding and subsetting options for your fonts.</li>
|
114 |
-
<li>In the Color tab, you can change the color management and conversion options for your colors.</li>
|
115 |
-
<li>In the Advanced tab, you can change the transparency flattening, optimization, and security options for your PDF file.</li>
|
116 |
-
<li>In the Standards tab, you can change the standards compliance and reporting options for your PDF file.</li>
|
117 |
-
<li>When you are done with your changes, click on Save As button and give a name to your custom Adobe PDF setting. You can also click on OK button to overwrite the existing setting.</li>
|
118 |
-
<li>You can now use your custom Adobe PDF setting to convert your PostScript files to PDFs with Acrobat Distiller 9.</li>
|
119 |
-
</ol>
|
120 |
-
<h2>Conclusion</h2>
|
121 |
-
<p>Acrobat Distiller 9 is a useful software that lets you create high-quality PDF files from any application that can print. It has many features and benefits that make it a powerful tool for PDF creation. You can download it for free either from Adobe's website or from a third-party website. However, we recommend using the official way to avoid any security risks or license violations. You can also use Acrobat Distiller 9 to convert PostScript files to PDFs, manage the conversion queue, and customize the Adobe PDF settings. We hope this article has helped you understand what Acrobat Distiller 9 is and how to download it and use it for free.</p>
|
122 |
-
<h2>FAQs</h2>
|
123 |
-
<p>Here are some frequently asked questions about Acrobat Distiller 9:</p>
|
124 |
-
<ul>
|
125 |
-
<li><b>What is the difference between Acrobat Distiller 9 and Adobe Acrobat 9?</b><br>Acrobat Distiller 9 is a part of Adobe Acrobat 9 Pro Extended, which is a comprehensive solution for creating, editing, and sharing PDF documents. Acrobat Distiller 9 is mainly used for converting PostScript files to PDFs, while Adobe Acrobat 9 is used for viewing, editing, annotating, signing, and securing PDF files.</li>
|
126 |
-
<li><b>Can I use Acrobat Distiller 9 without Adobe Acrobat 9?</b><br>Yes, you can use Acrobat Distiller 9 as a standalone application without Adobe Acrobat 9. However, you will need Adobe Reader or any other PDF viewer application to open and view the converted PDF files.</li>
|
127 |
-
<li><b>Is Acrobat Distiller 9 compatible with Windows 10?</b><br>No, Acrobat Distiller 9 is not compatible with Windows 10. The latest version of Acrobat Distiller that is compatible with Windows 10 is Acrobat Distiller DC, which is a part of Adobe Acrobat DC.</li>
|
128 |
-
<li><b>How can I update Acrobat Distiller 9?</b><br>You can update Acrobat Distiller 9 by downloading and installing the latest updates from Adobe's website. You can also check for updates by choosing Help > Check For Updates in Acrobat Distiller 9.</li>
|
129 |
-
<li><b>How can I uninstall Acrobat Distiller 9?</b><br>You can uninstall Acrobat Distiller 9 by using the Windows Control Panel or by using the Adobe Creative Suite Cleaner Tool. You can also uninstall Adobe Acrobat Pro Extended trial version by using the same methods.</li>
|
130 |
-
</ul>
|
131 |
-
</p> 0a6ba089eb<br />
|
132 |
-
<br />
|
133 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Clonedvd-7-0-0-10-ultimate-crack How to Backup Edit and Enjoy Your DVD Collection.md
DELETED
@@ -1,150 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>CloneDVD 7 Ultimate 7.0.0.10 Crack: A Complete Guide</h1>
|
3 |
-
<p>If you are looking for a powerful and easy-to-use DVD copying and converting software, you may have heard of CloneDVD 7 Ultimate. This software is designed to meet your various DVD needs, such as cloning, ripping, creating, and converting DVDs. But what exactly is CloneDVD 7 Ultimate and how can you get it for free? In this article, we will give you a complete guide on CloneDVD 7 Ultimate 7.0.0.10 crack, including its features, system requirements, download and installation steps, and usage tips.</p>
|
4 |
-
<h2>Clonedvd-7-0-0-10-ultimate-crack</h2><br /><p><b><b>DOWNLOAD</b> ->>->>->> <a href="https://byltly.com/2uKyic">https://byltly.com/2uKyic</a></b></p><br /><br />
|
5 |
-
<h2>What is CloneDVD 7 Ultimate?</h2>
|
6 |
-
<p>CloneDVD 7 Ultimate is a comprehensive DVD solution that allows you to clone, copy, backup, rip, create, and convert any DVD disc or video file. It supports all popular video formats and devices, such as AVI, MP4, MPG, WMV, MOV, iPhone, iPad, Android phones, etc. It also enables you to edit and customize your DVDs and videos with various effects and settings.</p>
|
7 |
-
<p>CloneDVD 7 Ultimate has four main modules: DVD Copy, DVD Ripper, DVD Creator, and Video Converter. Each module has its own functions and features that we will introduce in the next section.</p>
|
8 |
-
<h3>Features of CloneDVD 7 Ultimate</h3>
|
9 |
-
<h4>DVD Copy</h4>
|
10 |
-
<p>The DVD Copy module allows you to clone, copy, and backup any DVD disc or folder with high quality and fast speed. You can choose from multiple copy modes and languages according to your needs. You can also compress or split a DVD9 disc into two DVD5 discs.</p>
|
11 |
-
<h4>DVD Ripper</h4>
|
12 |
-
<p>The DVD Ripper module allows you to rip any DVD disc or folder into other video formats that can be played on various devices. You can choose from a wide range of output formats and profiles according to your device type and preference. You can also edit and adjust the video parameters such as resolution, bitrate, frame rate, aspect ratio, etc.</p>
|
13 |
-
<h4>DVD Creator</h4>
|
14 |
-
<p>The DVD Creator module allows you to create your own DVD masterpieces from your collected videos or movies. You can drag and drop any video file into the program and burn it to a blank DVD disc or save it as an ISO file or a DVD folder. You can also customize your DVD menu with different templates, backgrounds, music, etc.</p>
|
15 |
-
<h4>Video Converter</h4>
|
16 |
-
<p>The Video Converter module allows you to convert any video file between different formats with high quality and fast speed. You can choose from a large number of output formats and profiles according to your device type and preference. You can also edit and enhance your videos with various effects and settings.</p>
|
17 |
-
<h3>System Requirements for CloneDVD 7 Ultimate</h3>
|
18 |
-
<p>Before you download and install CloneDVD 7 Ultimate 7.0.0.10 crack, you need to make sure that your computer meets the following system requirements:</p>
|
19 |
-
<p>Clone DVD 7 Ultimate Multilingual Incl Crack<br />
|
20 |
-
CloneDVD 7 Ultimate Crack with License Code<br />
|
21 |
-
CloneDVD 7 Ultimate Serial Number<br />
|
22 |
-
CloneDVD 7 Ultimate Full Version Free Download<br />
|
23 |
-
CloneDVD 7 Ultimate Keygen<br />
|
24 |
-
CloneDVD 7 Ultimate Patch<br />
|
25 |
-
CloneDVD 7 Ultimate Activation Code<br />
|
26 |
-
CloneDVD 7 Ultimate Registration Code<br />
|
27 |
-
CloneDVD 7 Ultimate Portable<br />
|
28 |
-
CloneDVD 7 Ultimate Review<br />
|
29 |
-
CloneDVD 7 Ultimate Tutorial<br />
|
30 |
-
CloneDVD 7 Ultimate Features<br />
|
31 |
-
CloneDVD 7 Ultimate System Requirements<br />
|
32 |
-
CloneDVD 7 Ultimate DVD Copy Software<br />
|
33 |
-
CloneDVD 7 Ultimate DVD Ripper Software<br />
|
34 |
-
CloneDVD 7 Ultimate DVD Creator Software<br />
|
35 |
-
CloneDVD 7 Ultimate Video Converter Software<br />
|
36 |
-
CloneDVD 7 Ultimate DVD Cloner<br />
|
37 |
-
CloneDVD 7 Ultimate DVD Backup<br />
|
38 |
-
CloneDVD 7 Ultimate DVD Burner<br />
|
39 |
-
CloneDVD 7 Ultimate DVD Editor<br />
|
40 |
-
CloneDVD 7 Ultimate DVD Maker<br />
|
41 |
-
CloneDVD 7 Ultimate DVD Slideshow<br />
|
42 |
-
CloneDVD 7 Ultimate Video Editor<br />
|
43 |
-
CloneDVD 7 Ultimate Video Maker<br />
|
44 |
-
CloneDVD 7 Ultimate Video Slideshow<br />
|
45 |
-
CloneDVD 7 Ultimate Convert DVD to MP4<br />
|
46 |
-
CloneDVD 7 Ultimate Convert DVD to AVI<br />
|
47 |
-
CloneDVD 7 Ultimate Convert DVD to WMV<br />
|
48 |
-
CloneDVD 7 Ultimate Convert DVD to MOV<br />
|
49 |
-
CloneDVD 7 Ultimate Convert DVD to MKV<br />
|
50 |
-
CloneDVD 7 Ultimate Convert DVD to FLV<br />
|
51 |
-
CloneDVD 7 Ultimate Convert DVD to MP3<br />
|
52 |
-
CloneDVD 7 Ultimate Convert DVD to iPhone<br />
|
53 |
-
CloneDVD 7 Ultimate Convert DVD to iPad<br />
|
54 |
-
CloneDVD 7 Ultimate Convert DVD to Android<br />
|
55 |
-
CloneDVD 7 Ultimate Rip DVD to MP4<br />
|
56 |
-
CloneDVD 7 Ultimate Rip DVD to AVI<br />
|
57 |
-
CloneDVD 7 Ultimate Rip DVD to WMV<br />
|
58 |
-
CloneDVD 7 Ultimate Rip DVD to MOV<br />
|
59 |
-
CloneDVD 7 Ultimate Rip DVD to MKV<br />
|
60 |
-
CloneDVD 7 Ultimate Rip DVD to FLV<br />
|
61 |
-
CloneDVD 7 Ultimate Rip DVD to MP3<br />
|
62 |
-
CloneDVD 7 Ultimate Rip DVD to iPhone<br />
|
63 |
-
CloneDVD 7 Ultimate Rip DVD to iPad<br />
|
64 |
-
CloneDVD 7 Ultimate Rip DVD to Android</p>
|
65 |
-
<ul>
|
66 |
-
<li>OS: Microsoft Windows XP (SP2 or later), Windows Vista, Windows 7, Windows 8, Windows 10</li>
|
67 |
-
<li>Processor: 1GHz Intel/AMD processor or above</li>
|
68 |
-
<li>RAM: 256MB RAM (512MB or above recommended)</li>
|
69 |
-
<li>Free Hard Disk: 100MB space for installation</li>
|
70 |
-
<li>Devices: All DVD-ROM drives</li>
|
71 |
-
</ul>
|
72 |
-
<h2>How to Download and Install CloneDVD 7 Ultimate 7.0.0.10 Crack?</h2>
|
73 |
-
<p>If you want to enjoy the full features of CloneDVD 7 Ultimate without paying for it, you need to download and install its cracked version from a reliable source. Here are the steps you need to follow:</p>
|
74 |
-
<h3>Step 1: Download the Setup File and Crack File</h3>
|
75 |
-
<p>The first step is to download the setup file and the crack file of CloneDVD 7 Ultimate from a trusted website. For example, you can download them from <a href="https://archive.org/details/CloneDVD7Ultimate7.0.0.13MultilingualInclCrack_20180924_0728">this link</a>. The setup file is named <code>CloneDVDSetup.exe</code> and the crack file is named <code>CloneDVDCrack.exe</code>. Save them in a folder on your computer.</p>
|
76 |
-
<h3>Step 2: Run the Setup File and Follow the Instructions</h3>
|
77 |
-
<p>The second step is to run the setup file and follow the instructions on the screen to install CloneDVD 7 Ultimate on your computer. Choose a destination folder for the installation and agree to the terms and conditions.</p>
|
78 |
-
<h3>Step 3: Copy and Paste the Crack File into the Installation Folder</h3>
|
79 |
-
<p>The third step is to copy and paste the crack file into the installation folder of CloneDVD 7 Ultimate on your computer. The installation folder is usually located at <code>C:\Program Files (x86)\CloneDVDCrack\CloneDVDCrack.exe</code>. Replace the original file with the crack file.</p>
|
80 |
-
<h3>Step 4: Enjoy the Full Version of CloneDVD 7 Ultimate</h3>
|
81 |
-
<p>The fourth step is to enjoy the full version of CloneDVD 7 Ultimate without any limitations or restrictions. You can launch the program from your desktop shortcut or start menu.</p>
|
82 |
-
<h2>How to Use CloneDVD 7 Ultimate 7.0.0.10 Crack?</h2>
|
83 |
-
<p>Now that you have installed CloneDVD 7 Ultimate crack successfully on your computer, you may wonder how to use it effectively for your various DVD needs. Here are some tips on how to use each module of CloneDVD 7 Ultimate:</p>
|
84 |
-
<h3>How to Copy a DVD with CloneDVD 7 Ultimate?</h3>
|
85 |
-
<ol>
|
86 |
-
<li>Launch CloneDVD 7 Ultimate and select "Clone DVD" from the main interface.</li>
|
87 |
-
<li>Insert the source DVD disc into your DVD drive or choose a DVD folder from your computer.</li>
|
88 |
-
<li>Select an output target from "Copy as" option: ISO Image File (to save as an ISO file), DVD Folder (to save as a folder), or Writer Device (to burn directly).</li>
|
89 |
-
<li>Select a copy mode from "Copy Mode" option: Entire Disc (to copy all contents), Main Movie (to copy only main movie), Customize (to select specific titles), Split Disc (to split a large disc into two smaller ones).</li>
|
90 |
-
<li>Select an output quality from "Output Quality" option: High Quality (to keep original quality), Standard Quality (to reduce size slightly), Compress Quality (to reduce size significantly).</li>
|
91 |
-
<li>Select an output language from "Audio" option: Auto (to keep original language), English (to change audio language), Other Languages (to select other languages).</li>
|
92 |
-
<li>Select an output subtitle from "Subtitle" option: Auto (to keep original subtitle), English (to change subtitle language), Other Languages (to select other languages), None (to remove subtitle).</li>
|
93 |
-
<li>Click "Start" button to begin copying process.</li>
|
94 |
-
</ol>
|
95 |
-
<h3>How to Rip a DVD with CloneDVD 7 Ultimate?</h3>
|
96 |
-
<ol>
|
97 |
-
<li>Launch CloneDVD 7 Ultimate and select "Rip DVD" from the main interface.</li>
|
98 |
-
<li>Insert the source DVD disc into your DVD drive or choose a DVD folder from your computer.</li>
|
99 |
-
<li>Select an output format from "Output Format" option according to your device type or preference.</li>
|
100 |
-
```html to save the ripped files.</li>
|
101 |
-
<li>Click "Start" button to begin ripping process.</li>
|
102 |
-
</ol>
|
103 |
-
<h3>How to Create a DVD with CloneDVD 7 Ultimate?</h3>
|
104 |
-
<ol>
|
105 |
-
<li>Launch CloneDVD 7 Ultimate and select "Create DVD" from the main interface.</li>
|
106 |
-
<li>Drag and drop any video file into the program or click "Add File" button to browse and select video files from your computer.</li>
|
107 |
-
<li>Select a DVD menu template from "Menu Template" option according to your preference. You can also customize the menu with different backgrounds, music, buttons, etc.</li>
|
108 |
-
<li>Select a DVD disc type from "DVD Type" option: DVD-5 (4.7GB) or DVD-9 (8.5GB).</li>
|
109 |
-
<li>Select an output target from "Output Target" option: ISO Image File (to save as an ISO file), DVD Folder (to save as a folder), or Writer Device (to burn directly).</li>
|
110 |
-
<li>Click "Start" button to begin creating process.</li>
|
111 |
-
</ol>
|
112 |
-
<h3>How to Convert a Video with CloneDVD 7 Ultimate?</h3>
|
113 |
-
<ol>
|
114 |
-
<li>Launch CloneDVD 7 Ultimate and select "Video Converter" from the main interface.</li>
|
115 |
-
<li>Drag and drop any video file into the program or click "Add File" button to browse and select video files from your computer.</li>
|
116 |
-
<li>Select an output format from "Output Format" option according to your device type or preference.</li>
|
117 |
-
<li>Select an output folder from "Output Folder" option where you want to save the converted files.</li>
|
118 |
-
<li>Click "Start" button to begin converting process.</li>
|
119 |
-
</ol>
|
120 |
-
<h2>Conclusion</h2>
|
121 |
-
<p>In conclusion, CloneDVD 7 Ultimate 7.0.0.10 crack is a comprehensive DVD solution that allows you to clone, copy, backup, rip, create, and convert any DVD disc or video file. It supports all popular video formats and devices, such as AVI, MP4, MPG, WMV, MOV, iPhone, iPad, Android phones, etc. It also enables you to edit and customize your DVDs and videos with various effects and settings. You can download and install CloneDVD 7 Ultimate crack for free from a reliable source and enjoy its full features without any limitations or restrictions. We hope this article has given you a complete guide on CloneDVD 7 Ultimate crack and how to use it effectively for your various DVD needs.</p>
|
122 |
-
<h2>FAQs</h2>
|
123 |
-
<ul>
|
124 |
-
<li><b>Q: Is CloneDVD 7 Ultimate safe to use?</b></li>
|
125 |
-
<li>A: Yes, CloneDVD 7 Ultimate is safe to use as long as you download it from a trusted website and scan it with an antivirus program before installing it on your computer.</li>
|
126 |
-
<li><b>Q: Is CloneDVD 7 Ultimate legal to use?</b></li>
|
127 |
-
<li>A: No, CloneDVD 7 Ultimate is not legal to use as it violates the copyright laws and terms of service of the original software. You may face legal consequences if you use CloneDVD 7 Ultimate for commercial purposes or distribute it to others.</li>
|
128 |
-
<li><b>Q: What are the advantages of using CloneDVD 7 Ultimate over other DVD software?</b></li>
|
129 |
-
<li>A: Some of the advantages of using CloneDVD 7 Ultimate over other DVD software are:</li>
|
130 |
-
<ul>
|
131 |
-
<li>It has four modules that cover all your DVD needs in one program.</li>
|
132 |
-
<li>It has a user-friendly interface that is easy to navigate and operate.</li>
|
133 |
-
<li>It has a high compatibility that supports all popular video formats and devices.</li>
|
134 |
-
<li>It has a fast speed that saves your time and energy.</li>
|
135 |
-
<li>It has a high quality that preserves the original quality of your DVDs and videos.</li>
|
136 |
-
</ul>
|
137 |
-
<li><b>Q: What are the disadvantages of using CloneDVD 7 Ultimate over other DVD software?</b></li>
|
138 |
-
<li>A: Some of the disadvantages of using CloneDVD 7 Ultimate over other DVD software are:</li>
|
139 |
-
<ul>
|
140 |
-
<li>It is not legal to use as it infringes the copyright laws and terms of service of the original software.</li>
|
141 |
-
<li>It may contain viruses or malware that may harm your computer or data.</li>
|
142 |
-
<li>It may not work properly or crash frequently due to bugs or errors.</li>
|
143 |
-
<li>It may not support the latest updates or features of the original software.</li>
|
144 |
-
</ul>
|
145 |
-
<li><b>Q: How can I get technical support for CloneDVD 7 Ultimate?</b></li>
|
146 |
-
<li>A: You can get technical support for CloneDVD 7 Ultimate by visiting its official website or contacting its customer service via email or phone. However, you may not get satisfactory answers or solutions as you are using a cracked version of the software.</li>
|
147 |
-
</ul>
|
148 |
-
</p> 0a6ba089eb<br />
|
149 |
-
<br />
|
150 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/Tagalog-Christian-Songs-Lyrics-And-Chords-Pdf-Download-HOT.md
DELETED
@@ -1,108 +0,0 @@
|
|
1 |
-
## Tagalog Christian Songs Lyrics And Chords Pdf Download
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
**Click Here >>>>> [https://lodystiri.blogspot.com/?file=2txPB5](https://lodystiri.blogspot.com/?file=2txPB5)**
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
# How to Download Tagalog Christian Songs Lyrics and Chords in PDF Format
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
If you are looking for Tagalog Christian songs lyrics and chords in PDF format, you might have a hard time finding them online. Most of the websites that offer Tagalog worship songs only provide the lyrics or the chords, but not both. And if they do, they might not be in a printable or downloadable format.
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
But don't worry, because we have a solution for you. In this article, we will show you how to download Tagalog Christian songs lyrics and chords in PDF format using a simple and free tool. You will be able to access hundreds of Tagalog worship songs with lyrics and chords that you can print or save on your device.
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
## What is PDF Format?
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
PDF stands for Portable Document Format, which is a file format that preserves the layout and formatting of a document across different platforms and devices. PDF files can be viewed, printed, or edited using various software applications, such as Adobe Acrobat Reader, Microsoft Word, or Google Docs.
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
PDF files are ideal for sharing documents that contain text, images, graphics, or other elements that need to maintain their appearance and quality. For example, PDF files are commonly used for e-books, reports, flyers, resumes, contracts, and more.
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
## Why Download Tagalog Christian Songs Lyrics and Chords in PDF Format?
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
There are many benefits of downloading Tagalog Christian songs lyrics and chords in PDF format. Here are some of them:
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
- You can easily print them out and use them for your personal or group worship sessions.
|
58 |
-
|
59 |
-
- You can save them on your computer, tablet, smartphone, or other devices and access them anytime and anywhere.
|
60 |
-
|
61 |
-
- You can share them with your friends, family, church members, or anyone who loves Tagalog worship songs.
|
62 |
-
|
63 |
-
- You can edit them if you want to change the font size, color, style, or add notes.
|
64 |
-
|
65 |
-
- You can enjoy high-quality lyrics and chords that are clear and accurate.
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
## How to Download Tagalog Christian Songs Lyrics and Chords in PDF Format?
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
The tool that we will use to download Tagalog Christian songs lyrics and chords in PDF format is called [Kaps Worship](https://www.kapsworship.com/tagalog-christian-song-lyrics/). Kaps Worship is a website that offers a huge collection of Tagalog worship songs with lyrics and chords. You can browse through their categories or search for your favorite songs by title or artist.
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
Kaps Worship also provides a feature that allows you to download any song as a PDF file. Here are the steps to do it:
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
1. Go to [Kaps Worship](https://www.kapsworship.com/tagalog-christian-song-lyrics/) website and find the song that you want to download.
|
82 |
-
|
83 |
-
2. Click on the song title to open the song page.
|
84 |
-
|
85 |
-
3. On the song page, you will see the lyrics and chords of the song. You will also see a button that says "Download as PDF".
|
86 |
-
|
87 |
-
4. Click on the button and wait for a few seconds. A new tab will open with the PDF file of the song.
|
88 |
-
|
89 |
-
5. You can now view, print, save, or share the PDF file as you wish.
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
## Conclusion
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
Downloading Tagalog Christian songs lyrics and chords in PDF format is easy and convenient with Kaps Worship. You can access hundreds of Tagalog worship songs with lyrics and chords that you can use for your personal or group worship sessions. You can also print them out or save them on your device for offline access. You can also share them with others who love Tagalog worship songs.
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
We hope this article has helped you learn how to download Tagalog Christian songs lyrics and chords in PDF format using Kaps Worship. If you have any questions or feedback, please feel free to leave a comment below. God bless you!
|
102 |
-
|
103 |
-
dfd1c89656
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Extreme Car Driving Simulator Mod APK Latest Version for Free.md
DELETED
@@ -1,120 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Extreme Car Driving Simulator Latest Mod APK Download</h1>
|
3 |
-
<p>Do you love driving fast cars and performing stunts in an open world environment? If yes, then you should try Extreme Car Driving Simulator, one of the most popular car games on Android. In this game, you can drive, drift, and feel a racing sports car without any limits. You can also customize your cars and play different game modes.</p>
|
4 |
-
<h2>extreme car driving simulator latest mod apk download</h2><br /><p><b><b>DOWNLOAD</b> ✓ <a href="https://urlin.us/2uSU5v">https://urlin.us/2uSU5v</a></b></p><br /><br />
|
5 |
-
<p>But what if you want to enjoy the game without any restrictions or ads? Well, you can do that by downloading the latest mod apk version of Extreme Car Driving Simulator. In this article, we will tell you what is Extreme Car Driving Simulator, why you should download the mod apk version, and how to do it. We will also share some tips and tricks for playing the game.</p>
|
6 |
-
<h2>What is Extreme Car Driving Simulator?</h2>
|
7 |
-
<p>Extreme Car Driving Simulator is an open world car simulator game developed by AxesInMotion Racing. It was released in 2014 and has over 500 million downloads on Google Play Store. It is one of the best car games for Android thanks to its advanced real physics engine and realistic graphics.</p>
|
8 |
-
<h3>Features of the game</h3>
|
9 |
-
<p>Some of the features of Extreme Car Driving Simulator are:</p>
|
10 |
-
<ul>
|
11 |
-
<li>Mini game checkpoint mode</li>
|
12 |
-
<li>Drive with traffic</li>
|
13 |
-
<li>Full real HUD including revs, gear, and speed</li>
|
14 |
-
<li>ABS, TC, and ESP simulation. You can also turn them off!</li>
|
15 |
-
<li>Explore a detailed open world environment</li>
|
16 |
-
<li>Realistic car damage. Crash your car!</li>
|
17 |
-
<li>Accurate physics</li>
|
18 |
-
<li>Control your car with a steering wheel, accelerometer, or arrows</li>
|
19 |
-
<li>Several different cameras</li>
|
20 |
-
<li>Gamepad support</li>
|
21 |
-
</ul>
|
22 |
-
<h3>How to play the game</h3>
|
23 |
-
<p>To play Extreme Car Driving Simulator, you need to choose your vehicle and enter a free-roaming 3D world with various driving tasks to complete. You can also drive freely around the city and perform illegal stunt actions without worrying about the police chasing you. You can drift fast and do burnouts on the asphalt of this open world city.</p>
|
24 |
-
<p>You can also switch between different game modes, such as traffic mode, checkpoint mode, free mode, or airport mode. Each mode has its own challenges and objectives. You can also complete achievements and unlock new cars with different features and performance.</p>
|
25 |
-
<h2>Why download the mod apk version?</h2>
|
26 |
-
<p>If you want to enjoy Extreme Car Driving Simulator without any limitations or interruptions, you should download the mod apk version of the game. The mod apk version is a modified version of the original game that gives you some extra benefits and features that are not available in the official version.</p>
|
27 |
-
<h3>Benefits of the mod apk</h3>
|
28 |
-
<p>Some of the benefits of downloading the mod apk version of Extreme Car Driving Simulator are:</p>
|
29 |
-
<ul>
|
30 |
-
<li>Unlimited money: You can get unlimited money to buy and upgrade any car you want.</li>
|
31 |
-
<li>No ads: You can play the game without any annoying ads popping up on your screen.</li>
|
32 |
-
<li>All cars unlocked: You can access all the cars in the game without completing any achievements or missions.</li>
|
33 |
-
<li>All features unlocked: You can use all the features of the game, such as ABS, TC, ESP, nitro, etc., without any restrictions.</li>
|
34 |
-
</ul>
|
35 |
-
<h3>How to download and install the mod apk</h3>
|
36 |
-
<p>To download and install the mod apk version of Extreme Car Driving Simulator, you need to follow these steps:</p>
|
37 |
-
<ol>
|
38 |
-
<li>Go to [1](https://apkdone.com/extreme-car-driving-simulator/) and click on "Download APK".</li>
|
39 |
-
<li>Wait for the download to finish and then open the file.</li> <li>Allow installation from unknown sources if prompted by your device.</li>
|
40 |
-
<li>Follow the instructions on the screen to install the mod apk.</li>
|
41 |
-
<li>Launch the game and enjoy the mod features.</li>
|
42 |
-
</ol>
|
43 |
-
<p>Note: You may need to uninstall the original version of the game before installing the mod apk. Also, make sure you download the mod apk from a trusted source and scan it for viruses before installing it.</p>
|
44 |
-
<p>extreme car driving simulator mod apk unlimited money<br />
|
45 |
-
extreme car driving simulator hack apk download<br />
|
46 |
-
extreme car driving simulator mod apk android 1<br />
|
47 |
-
extreme car driving simulator mod apk revdl<br />
|
48 |
-
extreme car driving simulator mod apk happymod<br />
|
49 |
-
extreme car driving simulator mod apk rexdl<br />
|
50 |
-
extreme car driving simulator mod apk all cars unlocked<br />
|
51 |
-
extreme car driving simulator mod apk latest version 2023<br />
|
52 |
-
extreme car driving simulator mod apk free shopping<br />
|
53 |
-
extreme car driving simulator mod apk no ads<br />
|
54 |
-
extreme car driving simulator mod apk offline<br />
|
55 |
-
extreme car driving simulator mod apk 6.74.9<br />
|
56 |
-
extreme car driving simulator mod apk 6.75.0<br />
|
57 |
-
extreme car driving simulator mod apk 6.74.8<br />
|
58 |
-
extreme car driving simulator mod apk 6.74.7<br />
|
59 |
-
extreme car driving simulator mod apk 6.74.6<br />
|
60 |
-
extreme car driving simulator mod apk 6.74.5<br />
|
61 |
-
extreme car driving simulator mod apk 6.74.4<br />
|
62 |
-
extreme car driving simulator mod apk 6.74.3<br />
|
63 |
-
extreme car driving simulator mod apk 6.74.2<br />
|
64 |
-
extreme car driving simulator mod apk 6.74.1<br />
|
65 |
-
extreme car driving simulator mod apk 6.74.0<br />
|
66 |
-
extreme car driving simulator mod apk 6.73.9<br />
|
67 |
-
extreme car driving simulator mod apk 6.73.8<br />
|
68 |
-
extreme car driving simulator mod apk 6.73.7<br />
|
69 |
-
extreme car driving simulator mod apk unlimited nitro<br />
|
70 |
-
extreme car driving simulator mod apk unlimited coins and gems<br />
|
71 |
-
extreme car driving simulator mod apk unlimited everything<br />
|
72 |
-
extreme car driving simulator mod apk unlimited fuel and damage<br />
|
73 |
-
extreme car driving simulator mod apk unlimited gold and diamonds<br />
|
74 |
-
extreme car driving simulator mod apk unlimited keys and cash<br />
|
75 |
-
extreme car driving simulator mod apk unlimited stars and xp<br />
|
76 |
-
extreme car driving simulator mod apk unlimited tokens and credits<br />
|
77 |
-
extreme car driving simulator mod apk unlimited money and cars download for android<br />
|
78 |
-
extreme car driving simulator hack version download for android<br />
|
79 |
-
how to download extreme car driving simulator mod apk on android phone or tablet<br />
|
80 |
-
how to install and play extreme car driving simulator mod apk on pc or laptop using bluestacks emulator or other software <br />
|
81 |
-
how to update or upgrade extreme car driving simulator mod apk to the latest version available online or offline <br />
|
82 |
-
how to uninstall or remove extreme car driving simulator mod apk from your device without losing any data or progress <br />
|
83 |
-
how to fix or solve any errors or issues with extreme car driving simulator mod apk such as crashing, freezing, lagging, not working, not opening, etc.</p>
|
84 |
-
<h2>Tips and tricks for playing Extreme Car Driving Simulator</h2>
|
85 |
-
<p>Now that you have downloaded and installed the mod apk version of Extreme Car Driving Simulator, you may want to know some tips and tricks to play the game better. Here are some of them:</p>
|
86 |
-
<h3>Use drift mode and nitro</h3>
|
87 |
-
<p>One of the most fun aspects of Extreme Car Driving Simulator is drifting. You can drift by pressing the brake button while turning. This will make your car slide sideways and create smoke trails. Drifting is not only cool, but also useful for avoiding obstacles and taking sharp turns. You can also use nitro to boost your speed and perform longer drifts. Nitro is activated by pressing the N button on the screen. You can refill your nitro by driving fast or drifting.</p>
|
88 |
-
<h3>Explore different game modes and environments</h3>
|
89 |
-
<p>Extreme Car Driving Simulator has several game modes and environments to choose from. You can switch between them by tapping the map icon on the screen. Some of the game modes are:</p>
|
90 |
-
<ul>
|
91 |
-
<li>Traffic mode: In this mode, you have to drive with traffic and avoid collisions with other vehicles.</li>
|
92 |
-
<li>Checkpoint mode: In this mode, you have to reach checkpoints within a time limit.</li>
|
93 |
-
<li>Free mode: In this mode, you can drive freely without any rules or objectives.</li>
|
94 |
-
<li>Airport mode: In this mode, you can drive on a runway and take off with a plane.</li>
|
95 |
-
</ul>
|
96 |
-
<p>Some of the environments are:</p>
|
97 |
-
<ul>
|
98 |
-
<li>City: This is the default environment where you can drive in a urban area with buildings, roads, bridges, etc.</li>
|
99 |
-
<li>Offroad: This is an environment where you can drive in a rural area with dirt roads, hills, trees, etc.</li>
|
100 |
-
<li>Desert: This is an environment where you can drive in a sandy area with dunes, rocks, cacti, etc.</li>
|
101 |
-
<li>Snowy: This is an environment where you can drive in a snowy area with ice, snowmen, igloos, etc.</li>
|
102 |
-
</ul>
|
103 |
-
<h3>Collect rewards and unlock new cars</h3>
|
104 |
-
<p>As you play Extreme Car Driving Simulator, you can collect rewards and unlock new cars. You can collect rewards by completing achievements, missions, or daily tasks. You can also find coins and gems scattered around the map. You can use these currencies to buy and upgrade new cars. There are over 20 cars to choose from, each with different features and performance. You can also customize your cars by changing their color, wheels, spoilers, etc.</p>
|
105 |
-
<h2>Conclusion</h2>
|
106 |
-
<p>Extreme Car Driving Simulator is a fun and realistic car simulator game that lets you drive fast cars and perform stunts in an open world environment. You can also download the mod apk version of the game to enjoy unlimited money, no ads, all cars unlocked, and all features unlocked. To download and install the mod apk version of Extreme Car Driving Simulator, follow the steps mentioned above. Also, don't forget to check out some tips and tricks for playing the game better.</p>
|
107 |
-
<h2>FAQs</h2>
|
108 |
-
<p>Here are some frequently asked questions about Extreme Car Driving Simulator:</p>
|
109 |
-
<h4>Q: Is Extreme Car Driving Simulator free to play?</h4>
|
110 |
-
<p>A: Yes, Extreme Car Driving Simulator is free to play on Android devices. However, it contains ads and in-app purchases that may affect your gaming experience. You can download the mod apk version of the game to remove ads and get unlimited money.</p>
|
111 |
-
<h4>Q: Is Extreme Car Driving Simulator safe to download?</h4>
|
112 |
-
<p>A: Yes, Extreme Car Driving Simulator is safe to download from Google Play Store or other trusted sources. However, be careful when downloading the mod apk version of the game from unknown sources as they may contain viruses or malware that may harm your device.</p>
|
113 |
-
<h4>Q: How do I update Extreme Car Driving Simulator?</h4>
|
114 |
-
<p>A: You can update Extreme Car Driving Simulator by visiting Google Play Store or other sources where you downloaded the game from. However, if you are using the mod apk version of the game, you may need to uninstall it and download the latest version from [1](https://apkdone.com/extreme-car-driving-simulator/).</p>
|
115 |
-
<h4>Q: How do I contact the developers of Extreme Car Driving Simulator <h4>Q: How do I contact the developers of Extreme Car Driving Simulator?</h4>
|
116 |
-
<p>A: You can contact the developers of Extreme Car Driving Simulator by visiting their website [2](https://www.axesinmotion.com/) or their Facebook page [3](https://www.facebook.com/AxesInMotion/). You can also send them an email at [email protected].</p>
|
117 |
-
<h4>Q: Can I play Extreme Car Driving Simulator offline?</h4>
|
118 |
-
<p>A: Yes, you can play Extreme Car Driving Simulator offline without an internet connection. However, some features of the game may not work properly or may require an update. You can also play the game online with other players and compete in leaderboards and rankings.</p> 197e85843d<br />
|
119 |
-
<br />
|
120 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Animal Kingdom MOD APK Everything You Need to Know About this Amazing Game.md
DELETED
@@ -1,110 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Animal Kingdom APK Mod: A Fun and Addictive Adventure Game</h1>
|
3 |
-
<p>Do you love animals and adventure games? If yes, then you should try Animal Kingdom APK Mod, a game that lets you build your own animal kingdom, raid other players' islands, and collect treasure island coins. In this article, we will tell you everything you need to know about this game, including its features, how to download and install it, how to play it, and its pros and cons.</p>
|
4 |
-
<h2>animal kingdom apk mod</h2><br /><p><b><b>Download File</b> ✅ <a href="https://jinyurl.com/2uNLzR">https://jinyurl.com/2uNLzR</a></b></p><br /><br />
|
5 |
-
<h2>What is Animal Kingdom APK Mod?</h2>
|
6 |
-
<p>Animal Kingdom APK Mod is a modified version of Animal Kingdom, a popular game developed by Playrix. It is an adventure game that allows you to play with millions of players around the globe in this addictive animal adventure game. You can build islands and bridges, raid lands, and collect treasure island coins. You can also explore the animal island, steal coins, and build your kingdom to become the ultimate raid master.</p>
|
7 |
-
<h3>Features of Animal Kingdom APK Mod</h3>
|
8 |
-
<p>Animal Kingdom APK Mod has many features that make it more fun and enjoyable than the original game. Here are some of them:</p>
|
9 |
-
<h4>- Unlocked islands and animals</h4>
|
10 |
-
<p>With Animal Kingdom APK Mod, you can access all the islands and animals in the game without spending any money or waiting for hours. You can choose from a variety of animals, such as lions, tigers, bears, pandas, elephants, monkeys, and more. You can also customize your islands with different themes, such as jungle, desert, ice, candy, and more.</p>
|
11 |
-
<p>animal kingdom mod apk latest version<br />
|
12 |
-
animal kingdom mod apk unlimited coins<br />
|
13 |
-
animal kingdom mod apk download for android<br />
|
14 |
-
animal kingdom mod apk free shopping<br />
|
15 |
-
animal kingdom mod apk offline<br />
|
16 |
-
animal kingdom mod apk no ads<br />
|
17 |
-
animal kingdom mod apk hack<br />
|
18 |
-
animal kingdom mod apk revdl<br />
|
19 |
-
animal kingdom mod apk rexdl<br />
|
20 |
-
animal kingdom mod apk happymod<br />
|
21 |
-
animal kingdom mod apk 2023<br />
|
22 |
-
animal kingdom mod apk android 1<br />
|
23 |
-
animal kingdom mod apk 12.8.3<br />
|
24 |
-
animal kingdom mod apk 12.7.2<br />
|
25 |
-
animal kingdom mod apk 12.6.1<br />
|
26 |
-
animal kingdom adventure game mod apk<br />
|
27 |
-
animal kingdom battle simulator 3d mod apk<br />
|
28 |
-
animal kingdom online mod apk<br />
|
29 |
-
animal kingdom wild lands mod apk<br />
|
30 |
-
animal kingdom wildlife park mod apk<br />
|
31 |
-
animal kingdom zoo tycoon mod apk<br />
|
32 |
-
animal kingdom zoo simulator mod apk<br />
|
33 |
-
animal kingdom zoo craft mod apk<br />
|
34 |
-
animal kingdom zoo builder mod apk<br />
|
35 |
-
animal kingdom zoo world mod apk<br />
|
36 |
-
animal kingdom survival simulator mod apk<br />
|
37 |
-
animal kingdom safari craft mod apk<br />
|
38 |
-
animal kingdom safari hunting 3d mod apk<br />
|
39 |
-
animal kingdom safari shooter 3d mod apk<br />
|
40 |
-
animal kingdom safari sniper hunter 3d mod apk<br />
|
41 |
-
animal kingdom dinosaur hunter 3d mod apk<br />
|
42 |
-
animal kingdom dinosaur world 3d mod apk<br />
|
43 |
-
animal kingdom dinosaur simulator 3d mod apk<br />
|
44 |
-
animal kingdom dinosaur rampage 3d mod apk<br />
|
45 |
-
animal kingdom dinosaur attack 3d mod apk<br />
|
46 |
-
animal kingdom farm simulator 3d mod apk<br />
|
47 |
-
animal kingdom farm frenzy 3d mod apk<br />
|
48 |
-
animal kingdom farm story 3d mod apk<br />
|
49 |
-
animal kingdom farm village 3d mod apk<br />
|
50 |
-
animal kingdom farm rescue 3d mod apk</p>
|
51 |
-
<h4>- Unlimited coins and gems</h4>
|
52 |
-
<p>Coins and gems are the main currencies in Animal Kingdom. You need them to buy new animals, upgrade your islands, spin the wheel of fortune, and more. With Animal Kingdom APK Mod, you can get unlimited coins and gems for free. You can use them to buy anything you want in the game without worrying about running out.</p>
|
53 |
-
<h4>- No ads and root required</h4>
|
54 |
-
<p>Animal Kingdom APK Mod is free from annoying ads that interrupt your gameplay. You can enjoy the game without any distractions or interruptions. Moreover, you don't need to root your device to install Animal Kingdom APK Mod. You can simply download the APK file and install it on your device without any hassle.</p>
|
55 |
-
<h2>How to download and install Animal Kingdom APK Mod?</h2>
|
56 |
-
<p>If you want to download and install Animal Kingdom APK Mod on your device, you need to follow these simple steps:</p>
|
57 |
-
<h3>Step 1: Download the APK file from a trusted source</h3>
|
58 |
-
<p>You can download the APK file of Animal Kingdom APK Mod from a trusted source like [Moddroid](^1^). Make sure you download the latest version of the game that is compatible with your device.</p>
|
59 |
-
<h3>Step 2: Enable unknown sources on your device</h3>
|
60 |
-
<p>Before you install the APK file, you need to enable unknown sources on your device. This will allow you to install apps from sources other than the Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on.</p>
|
61 |
-
<h3>Step 3: Install the APK file and launch the game</h3>
|
62 |
-
<p>After you download the APK file, locate it on your device and tap on it to install it. Follow the instructions on the screen to complete the installation. Once the installation is done, launch the game and enjoy Animal Kingdom APK Mod.</p>
|
63 |
-
<h2>How to play Animal Kingdom APK Mod?</h2>
|
64 |
-
<p>Playing Animal Kingdom APK Mod is easy and fun. Here are some tips on how to play the game:</p>
|
65 |
-
<h3>Build your own animal kingdom</h3>
|
66 |
-
<p>The main goal of Animal Kingdom APK Mod is to build your own animal kingdom. You can do this by buying new animals, upgrading your islands, and decorating them with various items. You can also unlock new islands and bridges as you progress in the game. To buy new animals, you need to spin the wheel of fortune, which costs coins. You can also get animals from treasure chests, which cost gems. To upgrade your islands, you need to spend coins and gems as well. You can also earn coins and gems by completing quests and achievements.</p>
|
67 |
-
<h3>Raid other players' islands</h3>
|
68 |
-
<p>Another fun aspect of Animal Kingdom APK Mod is raiding other players' islands. You can do this by tapping on the map icon and choosing an island to attack. You can also use the search function to find a specific player or a random one. Once you select an island, you can use your animals to raid it and steal coins from it. You can also destroy buildings and decorations to get more coins. However, be careful, as other players can also raid your island and take your coins. You can protect your island by using shields, which cost gems.</p>
|
69 |
-
<h3>Collect treasure island coins and gems</h3>
|
70 |
-
<p>Treasure island coins and gems are special currencies that you can use to buy exclusive items in the game. You can get treasure island coins and gems by playing the treasure island mode, which is unlocked after you reach level 10. In this mode, you can explore different islands and collect treasure chests that contain coins and gems. You can also find hidden items and secrets that give you more rewards. However, you need to be quick, as the treasure island mode has a time limit.</p>
|
71 |
-
<h2>Pros and cons of Animal Kingdom APK Mod</h2>
|
72 |
-
<p>Animal Kingdom APK Mod is a great game that offers many benefits, but it also has some drawbacks. Here are some of them:</p>
|
73 |
-
<h3>Pros</h3>
|
74 |
-
<h4>- Fun and engaging gameplay</h4>
|
75 |
-
<p>Animal Kingdom APK Mod is a fun and engaging game that will keep you entertained for hours. You can build your own animal kingdom, raid other players' islands, collect treasure island coins and gems, and more. You can also play with millions of players around the world and chat with them in the game.</p>
|
76 |
-
<h4>- Beautiful graphics and sound effects</h4>
|
77 |
-
<p>Animal Kingdom APK Mod has beautiful graphics and sound effects that make the game more realistic and immersive. You can enjoy the colorful and detailed graphics of the animals, islands, buildings, and items in the game. You can also listen to the soothing and cheerful sound effects of the animals, coins, chests, and more.</p>
|
78 |
-
<h4>- Variety of animals and islands to explore</h4>
|
79 |
-
<p>Animal Kingdom APK Mod has a variety of animals and islands to explore in the game. You can choose from a wide range of animals, such as lions, tigers, bears, pandas, elephants, monkeys, and more. You can also customize your islands with different themes, such as jungle, desert, ice, candy, and more. You can also unlock new islands and bridges as you progress in the game.</p>
|
80 |
-
<h3>Cons</h3>
|
81 |
-
<h4>- Some bugs and glitches may occur</h4>
|
82 |
-
<p>Animal Kingdom APK Mod is not a perfect game, and it may have some bugs and glitches that may affect your gameplay. For example, some users have reported that the game crashes or freezes sometimes, or that some features do not work properly. If you encounter any problems with the game, you can try to update it or reinstall it.</p>
|
83 |
-
<h4>- Requires internet connection to play online mode</h4>
|
84 |
-
<p>Animal Kingdom APK Mod requires an internet connection to play online mode, which is where you can play with other players and raid their islands. If you don't have a stable internet connection or if you want to play offline mode, you may not be able to enjoy all the features of the game.</p>
|
85 |
-
<h2>Conclusion</h2>
|
86 |
-
<p>Animal Kingdom APK Mod is a fun and addictive adventure game that lets you build your own animal kingdom, raid other players' islands, and collect treasure island coins. It has many features that make it more enjoyable than the original game, such as unlocked islands and animals, unlimited coins and gems, no ads and root required. However, it also has some drawbacks, such as some bugs and glitches, and the need for an internet connection to play online mode. If you are looking for a fun and addictive adventure game that lets you play with animals and islands, you should give Animal Kingdom APK Mod a try. You can download it from a trusted source like [Moddroid] and install it on your device easily. You can also check out the official website of Animal Kingdom for more information and updates.</p>
|
87 |
-
<p>Here are some FAQs that you may have about Animal Kingdom APK Mod:</p>
|
88 |
-
<h4>Q: Is Animal Kingdom APK Mod safe to use?</h4>
|
89 |
-
<p>A: Yes, Animal Kingdom APK Mod is safe to use, as long as you download it from a trusted source like [Moddroid]. However, you should always be careful when downloading and installing any modded apps, as they may contain viruses or malware that can harm your device. You should also scan the APK file with an antivirus app before installing it.</p>
|
90 |
-
<h4>Q: Can I play Animal Kingdom APK Mod with my friends?</h4>
|
91 |
-
<p>A: Yes, you can play Animal Kingdom APK Mod with your friends, as long as they also have the same version of the game installed on their devices. You can add them as friends in the game and chat with them. You can also raid their islands and steal their coins, or help them defend their islands from other players.</p>
|
92 |
-
<h4>Q: How can I get more coins and gems in Animal Kingdom APK Mod?</h4>
|
93 |
-
<p>A: There are many ways to get more coins and gems in Animal Kingdom APK Mod. You can get unlimited coins and gems for free by using the modded features of the game. You can also earn coins and gems by completing quests and achievements, spinning the wheel of fortune, opening treasure chests, raiding other players' islands, and playing the treasure island mode. You can also buy coins and gems with real money if you want to support the developers of the game.</p>
|
94 |
-
<h4>Q: What are the minimum requirements to play Animal Kingdom APK Mod?</h4>
|
95 |
-
<p>A: The minimum requirements to play Animal Kingdom APK Mod are as follows:</p>
|
96 |
-
<table>
|
97 |
-
<tr><td>Operating system</td><td>Android 5.0 or higher</td></tr>
|
98 |
-
<tr><td>RAM</td><td>2 GB or higher</td></tr>
|
99 |
-
<tr><td>Storage space</td><td>100 MB or higher</td></tr>
|
100 |
-
<tr><td>Internet connection</td><td>Required for online mode</td></tr>
|
101 |
-
</table>
|
102 |
-
<h4>Q: What are some alternatives to Animal Kingdom APK Mod?</h4>
|
103 |
-
<p>A: If you like Animal Kingdom APK Mod, you may also like some other adventure games that involve animals and islands, such as:</p>
|
104 |
-
<ul>
|
105 |
-
<li>[Animal Crossing: Pocket Camp]: A game that lets you create your own campsite, interact with animal friends, and explore various locations.</li>
|
106 |
-
<li>[ZooCraft: Animal Family]: A game that lets you build your own zoo, breed new animals, and discover new species.</li>
|
107 |
-
<li>[Island King]: A game that lets you spin the wheel of fortune, attack other players' islands, and build your own island paradise.</li>
|
108 |
-
</ul></p> 401be4b1e0<br />
|
109 |
-
<br />
|
110 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Experience the Thrill of Hill Climb Racing on Your PC for Free.md
DELETED
@@ -1,108 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download Hill Climb Racing for PC Free</h1>
|
3 |
-
<p>Hill Climb Racing is one of the most addictive and entertaining physics-based driving games ever made. It features a variety of vehicles, stages, challenges, and upgrades that will keep you hooked for hours. You can race your way uphill in different environments, perform stunts, collect coins, and unlock new cars and parts. Hill Climb Racing is available for free on Android and iOS devices, but did you know that you can also play it on your PC? In this article, we will show you how to download hill climb racing for pc free using three different methods. Whether you want to use the Microsoft Store, direct download, or Steam, we have you covered. Follow these simple steps and enjoy this fun and exciting game on your computer.</p>
|
4 |
-
<h2>download hill climb racing for pc free</h2><br /><p><b><b>DOWNLOAD</b> ○○○ <a href="https://jinyurl.com/2uNKbk">https://jinyurl.com/2uNKbk</a></b></p><br /><br />
|
5 |
-
<h2>Method 1: Microsoft Store</h2>
|
6 |
-
<p>The Microsoft Store is a convenient way to get games for your PC. It offers a variety of free and paid games that you can download directly from the store. You don't need any additional software or accounts to use this method. Here's how to download hill climb racing for pc free from the Microsoft Store:</p>
|
7 |
-
<ol>
|
8 |
-
<li>Open the Microsoft Store. You can find it on your Start menu or by pressing Windows Key + S and typing "Microsoft Store".</li>
|
9 |
-
<li>Click Gaming in the sidebar. It has a video game controller icon.</li>
|
10 |
-
<li>Select Hill Climb Racing from the list of games. You can also use the search bar to find it faster.</li>
|
11 |
-
<li>Purchase the game (if needed). Hill Climb Racing is free to play, but it has some optional in-app purchases that you can buy if you want. Click the Get button on the game's info page to start the download. If you want to buy any in-app purchases, click the Buy button instead.</li>
|
12 |
-
<li>Install the game. The download should start automatically after you click Get or Buy. You can check the progress on your Downloads & Updates page. Once it's done, you can launch the game from your Start menu or by clicking Play on the game's info page.</li>
|
13 |
-
<li>Play the game. Enjoy racing uphill in this physics-based driving game. You can use your keyboard or mouse to control your car, or connect a controller if you prefer. You can also adjust the graphics settings, sound effects, music, and language from the options menu.</li>
|
14 |
-
</ol>
|
15 |
-
<h2>Method 2: Direct Download</h2>
|
16 |
-
<p>If you don't want to use the Microsoft Store, you can also download hill climb racing for pc free directly from the official website of the game. This method requires you to have an internet browser and a file extractor program like WinRAR or 7-Zip. Here's how to do it:</p>
|
17 |
-
<ol>
|
18 |
-
<li>Search for "hill climb racing official website" in Google or any other search engine. The first result should be <a href="(^1^)">https://fingersoft.com/games/hill-climb-racing/</a>, which is the official website of Fingersoft, the developer of Hill Climb Racing.</li>
|
19 |
-
<li>Click the Download for Windows button on the website. It will take you to another page where you can download the game as a ZIP file. Click the Download Now button and save the file to your preferred location.</li>
|
20 |
-
<li>Extract the ZIP file. You will need a file extractor program like WinRAR or 7-Zip to do this. Right-click on the ZIP file and select Extract Here or Extract to Hill Climb Racing (depending on your program). It will create a folder with the same name as the ZIP file.</li>
|
21 |
-
<li>Install the game. Open the folder and double-click on the Hill Climb Racing.exe file. It will launch the game installer. Follow the instructions on the screen to install the game on your PC. You can choose where to install it and create a desktop shortcut if you want.</li>
|
22 |
-
<li>Play the game. Once the installation is complete, you can launch the game from your Start menu or desktop shortcut. You can also open the folder where you installed it and double-click on the Hill Climb Racing.exe file. Enjoy racing uphill in this physics-based driving game. You can use your keyboard or mouse to control your car, or connect a controller if you prefer. You can also adjust the graphics settings, sound effects, music, and language from the options menu.</li>
|
23 |
-
</ol>
|
24 |
-
<h2>Method 3: Steam</h2>
|
25 |
-
<p>Steam is a popular platform for gaming on PC. It offers a huge library of games that you can buy, download, and play online. You can also access various features like achievements, leaderboards, chat, and more. To use this method, you will need to download and install Steam on your PC, create an account, and log in to Steam. Here's how to download hill climb racing for pc free from Steam:</p>
|
26 |
-
<ol>
|
27 |
-
<li>Download and install Steam on your PC. You can get it from <a href="">https://store.steampowered.com/about/</a>, which is the official website of Steam. Click the Install Steam button and save the file to your preferred location. Run the file and follow the instructions on the screen to install Steam on your PC.</li>
|
28 |
-
<li>Create an account and log in to Steam. You will need an email address and a password to create an account. You can also use your Facebook or Google account to sign up. Once you have an account, log in to Steam with your username and password.</li>
|
29 |
-
<li>Find and purchase (if needed) Hill Climb Racing on Steam. You can use the search bar at the top of the Steam window to find it faster. Alternatively, you can browse through the categories and genres in the sidebar. Hill Climb Racing is under Casual, Indie, Racing, Simulation, and Sports. Click on the game's name or image to go to its info page.</li>
|
30 |
-
<li>Download and install Hill Climb Racing from Steam. Hill Climb Racing is free to play, but it has some optional in-app purchases that you can buy if you want. Click the Play Game button on the game's info page to start the download. You can check the progress on your Library page. Once it's done, you can launch the game from your Library or by clicking Play Game on the game's info page.</li>
|
31 |
-
<li>Play Hill Climb Racing from Steam. Enjoy racing uphill in this physics-based driving game. You can use your keyboard or mouse to control your car, or connect a controller if you prefer. You can also adjust the graphics settings, sound effects, music, and language from the options menu. You can also access various features like achievements, leaderboards, chat, and more from Steam.</li>
|
32 |
-
</ol>
|
33 |
-
<h2>Conclusion</h2>
|
34 |
-
<p>Hill Climb Racing is a fun and addictive physics-based driving game that you can play on your PC for free using different methods. Whether you use the Microsoft Store, direct download, or Steam, you can enjoy this game on your computer with ease. Here are some tips and tricks for playing hill climb racing on PC:</p>
|
35 |
-
<p>How to download hill climb racing game for pc without bluestacks<br />
|
36 |
-
Hill climb racing free download for windows 10 laptop<br />
|
37 |
-
Best settings for hill climb racing on pc with emulator<br />
|
38 |
-
Hill climb racing pc version online play<br />
|
39 |
-
Download hill climb racing mod apk for pc unlimited money<br />
|
40 |
-
Hill climb racing 2 download for pc windows 7<br />
|
41 |
-
Hill climb racing cheats and hacks for pc<br />
|
42 |
-
Hill climb racing offline installer for pc<br />
|
43 |
-
Hill climb racing pc gameplay and review<br />
|
44 |
-
Hill climb racing tips and tricks for pc beginners<br />
|
45 |
-
Hill climb racing latest update download for pc<br />
|
46 |
-
Hill climb racing system requirements for pc<br />
|
47 |
-
Hill climb racing alternatives and similar games for pc<br />
|
48 |
-
Hill climb racing multiplayer mode on pc<br />
|
49 |
-
Hill climb racing achievements and leaderboards for pc<br />
|
50 |
-
Hill climb racing best vehicles and upgrades for pc<br />
|
51 |
-
Hill climb racing custom maps and levels for pc<br />
|
52 |
-
Hill climb racing fan art and wallpapers for pc<br />
|
53 |
-
Hill climb racing bugs and glitches fix for pc<br />
|
54 |
-
Hill climb racing developer contact and support for pc<br />
|
55 |
-
Download hill climb racing old version for pc<br />
|
56 |
-
Hill climb racing backup and restore data on pc<br />
|
57 |
-
Hill climb racing keyboard controls and shortcuts for pc<br />
|
58 |
-
Hill climb racing hidden features and secrets for pc<br />
|
59 |
-
Hill climb racing fun facts and trivia for pc<br />
|
60 |
-
Download hill climb racing 3d for pc<br />
|
61 |
-
Hill climb racing soundtracks and music for pc<br />
|
62 |
-
Hill climb racing memes and jokes for pc<br />
|
63 |
-
Hill climb racing merchandise and gifts for pc fans<br />
|
64 |
-
Hill climb racing community and forums for pc players<br />
|
65 |
-
Download hill climb racing hd graphics for pc<br />
|
66 |
-
Hill climb racing challenges and competitions for pc<br />
|
67 |
-
Hill climb racing new cars and stages for pc<br />
|
68 |
-
Hill climb racing easter eggs and references for pc<br />
|
69 |
-
Hill climb racing ratings and reviews for pc<br />
|
70 |
-
Download hill climb racing windows store edition for pc<br />
|
71 |
-
Hill climb racing no ads and in-app purchases for pc<br />
|
72 |
-
Hill climb racing languages and subtitles for pc<br />
|
73 |
-
Hill climb racing parental guide and age rating for pc<br />
|
74 |
-
Hill climb racing awards and nominations for pc<br />
|
75 |
-
Download hill climb racing mac version for free<br />
|
76 |
-
Hill climb racing vr mode and headset compatibility for pc<br />
|
77 |
-
Hill climb racing speedrun and world record for pc<br />
|
78 |
-
Hill climb racing wiki and guide for pc<br />
|
79 |
-
Download hill climb racing cracked version for free on pc <br />
|
80 |
-
Hill climb racing mod menu and codes for pc <br />
|
81 |
-
Hill climb racing best moments and highlights for pc <br />
|
82 |
-
Download hill climb racing from official website for free on pc <br />
|
83 |
-
Hill climb racing comparison and difference between mobile and pc versions</p>
|
84 |
-
<ul>
|
85 |
-
<li>Upgrade your car regularly. You can use the coins that you collect during each race to upgrade your car's engine, suspension, tires, and fuel capacity. This will help you improve your performance and reach higher distances.</li>
|
86 |
-
<li>Balance your car carefully. You need to use your brake and gas pedals wisely to avoid flipping over or running out of fuel. Try to keep your car stable and avoid hitting obstacles or falling into gaps.</li>
|
87 |
-
<li>Explore different stages and vehicles. Hill Climb Racing has a variety of stages and vehicles that you can unlock as you progress in the game. Each stage has its own terrain, obstacles, and challenges that require different strategies and skills. Each vehicle has its own characteristics, advantages, and disadvantages that affect how it handles and performs.</li>
|
88 |
-
</ul>
|
89 |
-
<p>We hope this article helped you learn how to download hill climb racing for pc free using different methods. If you have any questions or feedback, please feel free to share them with us in the comments section below. We would love to hear from you and help you with any issues you may have. Happy racing!</p>
|
90 |
-
<h2>FAQs</h2>
|
91 |
-
<p>Here are some frequently asked questions about hill climb racing and how to download it for pc free:</p>
|
92 |
-
<h3>What are the minimum requirements for playing hill climb racing on PC?</h3>
|
93 |
-
<p>The minimum requirements for playing hill climb racing on PC are as follows:</p>
|
94 |
-
<table>
|
95 |
-
<tr><th>OS</th><th>Processor</th><th>Memory</th><th>Graphics</th><th>Storage</th></tr>
|
96 |
-
<tr><td>Windows 7 or higher</td><td>1 GHz or faster</td><td>1 GB RAM</td><td>DirectX 9 compatible</td><td>100 MB available space</td></tr>
|
97 |
-
</table>
|
98 |
-
<p>Note that these are the minimum requirements and your performance may vary depending on your system configuration and settings.</p>
|
99 |
-
<h3>Is hill climb racing compatible with Windows 10?</h3>
|
100 |
-
<p>Yes, hill climb racing is compatible with Windows 10. You can download it from the Microsoft Store, direct download, or Steam without any issues. However, you may need to update your drivers and software to ensure optimal performance and compatibility.</p>
|
101 |
-
<h3>Can I play hill climb racing offline?</h3>
|
102 |
-
<p>Yes, you can play hill climb racing offline. You don't need an internet connection to play the game once you have downloaded and installed it on your PC. However, you may need an internet connection to access some features like in-app purchases, leaderboards, achievements, and updates.</p>
|
103 |
-
<h3>Can I use a controller or a steering wheel to play hill climb racing on PC?</h3>
|
104 |
-
<p>Yes, you can use a controller or a steering wheel to play hill climb racing on PC. The game supports various input devices and you can customize the controls from the options menu. You can also use your keyboard or mouse if you prefer.</p>
|
105 |
-
<h3>Can I play hill climb racing with my friends online?</h3>
|
106 |
-
<p>No, hill climb racing does not have an online multiplayer mode. You can only play the game solo and compete with yourself or other players on the leaderboards. However, you can share your screenshots and videos of your gameplay with your friends on social media or chat platforms.</p> 401be4b1e0<br />
|
107 |
-
<br />
|
108 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/2ndelement/voicevox/voicevox_engine/acoustic_feature_extractor.py
DELETED
@@ -1,332 +0,0 @@
|
|
1 |
-
from abc import abstractmethod
|
2 |
-
from enum import Enum
|
3 |
-
from pathlib import Path
|
4 |
-
from typing import List, Sequence
|
5 |
-
|
6 |
-
import numpy
|
7 |
-
|
8 |
-
|
9 |
-
class BasePhoneme(object):
|
10 |
-
"""
|
11 |
-
音素の応用クラス群の抽象基底クラス
|
12 |
-
|
13 |
-
Attributes
|
14 |
-
----------
|
15 |
-
phoneme_list : Sequence[str]
|
16 |
-
音素のリスト
|
17 |
-
num_phoneme : int
|
18 |
-
音素リストの要素数
|
19 |
-
space_phoneme : str
|
20 |
-
読点に値する音素
|
21 |
-
"""
|
22 |
-
|
23 |
-
phoneme_list: Sequence[str]
|
24 |
-
num_phoneme: int
|
25 |
-
space_phoneme: str
|
26 |
-
|
27 |
-
def __init__(
|
28 |
-
self,
|
29 |
-
phoneme: str,
|
30 |
-
start: float,
|
31 |
-
end: float,
|
32 |
-
):
|
33 |
-
self.phoneme = phoneme
|
34 |
-
self.start = numpy.round(start, decimals=2)
|
35 |
-
self.end = numpy.round(end, decimals=2)
|
36 |
-
|
37 |
-
def __repr__(self):
|
38 |
-
return f"Phoneme(phoneme='{self.phoneme}', start={self.start}, end={self.end})"
|
39 |
-
|
40 |
-
def __eq__(self, o: object):
|
41 |
-
return isinstance(o, BasePhoneme) and (
|
42 |
-
self.phoneme == o.phoneme and self.start == o.start and self.end == o.end
|
43 |
-
)
|
44 |
-
|
45 |
-
def verify(self):
|
46 |
-
"""
|
47 |
-
音素クラスとして、データが正しいかassertする
|
48 |
-
"""
|
49 |
-
assert self.phoneme in self.phoneme_list, f"{self.phoneme} is not defined."
|
50 |
-
|
51 |
-
@property
|
52 |
-
def phoneme_id(self):
|
53 |
-
"""
|
54 |
-
phoneme_id (phoneme list内でのindex)を取得する
|
55 |
-
Returns
|
56 |
-
-------
|
57 |
-
id : int
|
58 |
-
phoneme_idを返す
|
59 |
-
"""
|
60 |
-
return self.phoneme_list.index(self.phoneme)
|
61 |
-
|
62 |
-
@property
|
63 |
-
def duration(self):
|
64 |
-
"""
|
65 |
-
音素継続期間を取得する
|
66 |
-
Returns
|
67 |
-
-------
|
68 |
-
duration : int
|
69 |
-
音素継続期間を返す
|
70 |
-
"""
|
71 |
-
return self.end - self.start
|
72 |
-
|
73 |
-
@property
|
74 |
-
def onehot(self):
|
75 |
-
"""
|
76 |
-
phoneme listの長さ分の0埋め配列のうち、phoneme id番目がTrue(1)の配列を返す
|
77 |
-
Returns
|
78 |
-
-------
|
79 |
-
onehot : numpu.ndarray
|
80 |
-
関数内で変更された配列を返す
|
81 |
-
"""
|
82 |
-
array = numpy.zeros(self.num_phoneme, dtype=bool)
|
83 |
-
array[self.phoneme_id] = True
|
84 |
-
return array
|
85 |
-
|
86 |
-
@classmethod
|
87 |
-
def parse(cls, s: str):
|
88 |
-
"""
|
89 |
-
文字列をパースして音素クラスを作る
|
90 |
-
Parameters
|
91 |
-
----------
|
92 |
-
s : str
|
93 |
-
パースしたい文字列
|
94 |
-
|
95 |
-
Returns
|
96 |
-
-------
|
97 |
-
phoneme : BasePhoneme
|
98 |
-
パース結果を用いた音素クラスを返す
|
99 |
-
|
100 |
-
Examples
|
101 |
-
--------
|
102 |
-
>>> BasePhoneme.parse('1.7425000 1.9125000 o:')
|
103 |
-
Phoneme(phoneme='o:', start=1.74, end=1.91)
|
104 |
-
"""
|
105 |
-
words = s.split()
|
106 |
-
return cls(
|
107 |
-
start=float(words[0]),
|
108 |
-
end=float(words[1]),
|
109 |
-
phoneme=words[2],
|
110 |
-
)
|
111 |
-
|
112 |
-
@classmethod
|
113 |
-
@abstractmethod
|
114 |
-
def convert(cls, phonemes: List["BasePhoneme"]) -> List["BasePhoneme"]:
|
115 |
-
raise NotImplementedError
|
116 |
-
|
117 |
-
@classmethod
|
118 |
-
def load_lab_list(cls, path: Path):
|
119 |
-
"""
|
120 |
-
labファイルを読み込む
|
121 |
-
Parameters
|
122 |
-
----------
|
123 |
-
path : Path
|
124 |
-
読み込みたいlabファイルのパス
|
125 |
-
|
126 |
-
Returns
|
127 |
-
-------
|
128 |
-
phonemes : List[BasePhoneme]
|
129 |
-
パース結果を用いた音素クラスを返す
|
130 |
-
"""
|
131 |
-
phonemes = [cls.parse(s) for s in path.read_text().split("\n") if len(s) > 0]
|
132 |
-
phonemes = cls.convert(phonemes)
|
133 |
-
|
134 |
-
for phoneme in phonemes:
|
135 |
-
phoneme.verify()
|
136 |
-
return phonemes
|
137 |
-
|
138 |
-
@classmethod
|
139 |
-
def save_lab_list(cls, phonemes: List["BasePhoneme"], path: Path):
|
140 |
-
"""
|
141 |
-
音素クラスのリストをlabファイル形式で保存する
|
142 |
-
Parameters
|
143 |
-
----------
|
144 |
-
phonemes : List[BasePhoneme]
|
145 |
-
保存したい音素クラスのリスト
|
146 |
-
path : Path
|
147 |
-
labファイルの保存先パス
|
148 |
-
"""
|
149 |
-
text = "\n".join(
|
150 |
-
[
|
151 |
-
f"{numpy.round(p.start, decimals=2):.2f}\t"
|
152 |
-
f"{numpy.round(p.end, decimals=2):.2f}\t"
|
153 |
-
f"{p.phoneme}"
|
154 |
-
for p in phonemes
|
155 |
-
]
|
156 |
-
)
|
157 |
-
path.write_text(text)
|
158 |
-
|
159 |
-
|
160 |
-
class JvsPhoneme(BasePhoneme):
|
161 |
-
"""
|
162 |
-
JVS(Japanese versatile speech)コーパスに含まれる音素群クラス
|
163 |
-
|
164 |
-
Attributes
|
165 |
-
----------
|
166 |
-
phoneme_list : Sequence[str]
|
167 |
-
音素のリスト
|
168 |
-
num_phoneme : int
|
169 |
-
音素リストの要素数
|
170 |
-
space_phoneme : str
|
171 |
-
読点に値する音素
|
172 |
-
"""
|
173 |
-
|
174 |
-
phoneme_list = (
|
175 |
-
"pau",
|
176 |
-
"I",
|
177 |
-
"N",
|
178 |
-
"U",
|
179 |
-
"a",
|
180 |
-
"b",
|
181 |
-
"by",
|
182 |
-
"ch",
|
183 |
-
"cl",
|
184 |
-
"d",
|
185 |
-
"dy",
|
186 |
-
"e",
|
187 |
-
"f",
|
188 |
-
"g",
|
189 |
-
"gy",
|
190 |
-
"h",
|
191 |
-
"hy",
|
192 |
-
"i",
|
193 |
-
"j",
|
194 |
-
"k",
|
195 |
-
"ky",
|
196 |
-
"m",
|
197 |
-
"my",
|
198 |
-
"n",
|
199 |
-
"ny",
|
200 |
-
"o",
|
201 |
-
"p",
|
202 |
-
"py",
|
203 |
-
"r",
|
204 |
-
"ry",
|
205 |
-
"s",
|
206 |
-
"sh",
|
207 |
-
"t",
|
208 |
-
"ts",
|
209 |
-
"u",
|
210 |
-
"v",
|
211 |
-
"w",
|
212 |
-
"y",
|
213 |
-
"z",
|
214 |
-
)
|
215 |
-
num_phoneme = len(phoneme_list)
|
216 |
-
space_phoneme = "pau"
|
217 |
-
|
218 |
-
@classmethod
|
219 |
-
def convert(cls, phonemes: List["JvsPhoneme"]) -> List["JvsPhoneme"]:
|
220 |
-
"""
|
221 |
-
最初と最後のsil(silent)をspace_phoneme(pau)に置き換え(変換)する
|
222 |
-
Parameters
|
223 |
-
----------
|
224 |
-
phonemes : List[JvsPhoneme]
|
225 |
-
変換したいphonemeのリスト
|
226 |
-
|
227 |
-
Returns
|
228 |
-
-------
|
229 |
-
phonemes : List[JvsPhoneme]
|
230 |
-
変換されたphonemeのリスト
|
231 |
-
"""
|
232 |
-
if "sil" in phonemes[0].phoneme:
|
233 |
-
phonemes[0].phoneme = cls.space_phoneme
|
234 |
-
if "sil" in phonemes[-1].phoneme:
|
235 |
-
phonemes[-1].phoneme = cls.space_phoneme
|
236 |
-
return phonemes
|
237 |
-
|
238 |
-
|
239 |
-
class OjtPhoneme(BasePhoneme):
|
240 |
-
"""
|
241 |
-
OpenJTalkに含まれる音素群クラス
|
242 |
-
|
243 |
-
Attributes
|
244 |
-
----------
|
245 |
-
phoneme_list : Sequence[str]
|
246 |
-
音素のリスト
|
247 |
-
num_phoneme : int
|
248 |
-
音素リストの要素数
|
249 |
-
space_phoneme : str
|
250 |
-
読点に値する音素
|
251 |
-
"""
|
252 |
-
|
253 |
-
phoneme_list = (
|
254 |
-
"pau",
|
255 |
-
"A",
|
256 |
-
"E",
|
257 |
-
"I",
|
258 |
-
"N",
|
259 |
-
"O",
|
260 |
-
"U",
|
261 |
-
"a",
|
262 |
-
"b",
|
263 |
-
"by",
|
264 |
-
"ch",
|
265 |
-
"cl",
|
266 |
-
"d",
|
267 |
-
"dy",
|
268 |
-
"e",
|
269 |
-
"f",
|
270 |
-
"g",
|
271 |
-
"gw",
|
272 |
-
"gy",
|
273 |
-
"h",
|
274 |
-
"hy",
|
275 |
-
"i",
|
276 |
-
"j",
|
277 |
-
"k",
|
278 |
-
"kw",
|
279 |
-
"ky",
|
280 |
-
"m",
|
281 |
-
"my",
|
282 |
-
"n",
|
283 |
-
"ny",
|
284 |
-
"o",
|
285 |
-
"p",
|
286 |
-
"py",
|
287 |
-
"r",
|
288 |
-
"ry",
|
289 |
-
"s",
|
290 |
-
"sh",
|
291 |
-
"t",
|
292 |
-
"ts",
|
293 |
-
"ty",
|
294 |
-
"u",
|
295 |
-
"v",
|
296 |
-
"w",
|
297 |
-
"y",
|
298 |
-
"z",
|
299 |
-
)
|
300 |
-
num_phoneme = len(phoneme_list)
|
301 |
-
space_phoneme = "pau"
|
302 |
-
|
303 |
-
@classmethod
|
304 |
-
def convert(cls, phonemes: List["OjtPhoneme"]):
|
305 |
-
"""
|
306 |
-
最初と最後のsil(silent)をspace_phoneme(pau)に置き換え(変換)する
|
307 |
-
Parameters
|
308 |
-
----------
|
309 |
-
phonemes : List[OjtPhoneme]
|
310 |
-
変換したいphonemeのリスト
|
311 |
-
|
312 |
-
Returns
|
313 |
-
-------
|
314 |
-
phonemes : List[OjtPhoneme]
|
315 |
-
変換されたphonemeのリスト
|
316 |
-
"""
|
317 |
-
if "sil" in phonemes[0].phoneme:
|
318 |
-
phonemes[0].phoneme = cls.space_phoneme
|
319 |
-
if "sil" in phonemes[-1].phoneme:
|
320 |
-
phonemes[-1].phoneme = cls.space_phoneme
|
321 |
-
return phonemes
|
322 |
-
|
323 |
-
|
324 |
-
class PhonemeType(str, Enum):
|
325 |
-
jvs = "jvs"
|
326 |
-
openjtalk = "openjtalk"
|
327 |
-
|
328 |
-
|
329 |
-
phoneme_type_to_class = {
|
330 |
-
PhonemeType.jvs: JvsPhoneme,
|
331 |
-
PhonemeType.openjtalk: OjtPhoneme,
|
332 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/4Taps/SadTalker/src/face3d/models/bfm.py
DELETED
@@ -1,331 +0,0 @@
|
|
1 |
-
"""This script defines the parametric 3d face model for Deep3DFaceRecon_pytorch
|
2 |
-
"""
|
3 |
-
|
4 |
-
import numpy as np
|
5 |
-
import torch
|
6 |
-
import torch.nn.functional as F
|
7 |
-
from scipy.io import loadmat
|
8 |
-
from src.face3d.util.load_mats import transferBFM09
|
9 |
-
import os
|
10 |
-
|
11 |
-
def perspective_projection(focal, center):
|
12 |
-
# return p.T (N, 3) @ (3, 3)
|
13 |
-
return np.array([
|
14 |
-
focal, 0, center,
|
15 |
-
0, focal, center,
|
16 |
-
0, 0, 1
|
17 |
-
]).reshape([3, 3]).astype(np.float32).transpose()
|
18 |
-
|
19 |
-
class SH:
|
20 |
-
def __init__(self):
|
21 |
-
self.a = [np.pi, 2 * np.pi / np.sqrt(3.), 2 * np.pi / np.sqrt(8.)]
|
22 |
-
self.c = [1/np.sqrt(4 * np.pi), np.sqrt(3.) / np.sqrt(4 * np.pi), 3 * np.sqrt(5.) / np.sqrt(12 * np.pi)]
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
class ParametricFaceModel:
|
27 |
-
def __init__(self,
|
28 |
-
bfm_folder='./BFM',
|
29 |
-
recenter=True,
|
30 |
-
camera_distance=10.,
|
31 |
-
init_lit=np.array([
|
32 |
-
0.8, 0, 0, 0, 0, 0, 0, 0, 0
|
33 |
-
]),
|
34 |
-
focal=1015.,
|
35 |
-
center=112.,
|
36 |
-
is_train=True,
|
37 |
-
default_name='BFM_model_front.mat'):
|
38 |
-
|
39 |
-
if not os.path.isfile(os.path.join(bfm_folder, default_name)):
|
40 |
-
transferBFM09(bfm_folder)
|
41 |
-
|
42 |
-
model = loadmat(os.path.join(bfm_folder, default_name))
|
43 |
-
# mean face shape. [3*N,1]
|
44 |
-
self.mean_shape = model['meanshape'].astype(np.float32)
|
45 |
-
# identity basis. [3*N,80]
|
46 |
-
self.id_base = model['idBase'].astype(np.float32)
|
47 |
-
# expression basis. [3*N,64]
|
48 |
-
self.exp_base = model['exBase'].astype(np.float32)
|
49 |
-
# mean face texture. [3*N,1] (0-255)
|
50 |
-
self.mean_tex = model['meantex'].astype(np.float32)
|
51 |
-
# texture basis. [3*N,80]
|
52 |
-
self.tex_base = model['texBase'].astype(np.float32)
|
53 |
-
# face indices for each vertex that lies in. starts from 0. [N,8]
|
54 |
-
self.point_buf = model['point_buf'].astype(np.int64) - 1
|
55 |
-
# vertex indices for each face. starts from 0. [F,3]
|
56 |
-
self.face_buf = model['tri'].astype(np.int64) - 1
|
57 |
-
# vertex indices for 68 landmarks. starts from 0. [68,1]
|
58 |
-
self.keypoints = np.squeeze(model['keypoints']).astype(np.int64) - 1
|
59 |
-
|
60 |
-
if is_train:
|
61 |
-
# vertex indices for small face region to compute photometric error. starts from 0.
|
62 |
-
self.front_mask = np.squeeze(model['frontmask2_idx']).astype(np.int64) - 1
|
63 |
-
# vertex indices for each face from small face region. starts from 0. [f,3]
|
64 |
-
self.front_face_buf = model['tri_mask2'].astype(np.int64) - 1
|
65 |
-
# vertex indices for pre-defined skin region to compute reflectance loss
|
66 |
-
self.skin_mask = np.squeeze(model['skinmask'])
|
67 |
-
|
68 |
-
if recenter:
|
69 |
-
mean_shape = self.mean_shape.reshape([-1, 3])
|
70 |
-
mean_shape = mean_shape - np.mean(mean_shape, axis=0, keepdims=True)
|
71 |
-
self.mean_shape = mean_shape.reshape([-1, 1])
|
72 |
-
|
73 |
-
self.persc_proj = perspective_projection(focal, center)
|
74 |
-
self.device = 'cpu'
|
75 |
-
self.camera_distance = camera_distance
|
76 |
-
self.SH = SH()
|
77 |
-
self.init_lit = init_lit.reshape([1, 1, -1]).astype(np.float32)
|
78 |
-
|
79 |
-
|
80 |
-
def to(self, device):
|
81 |
-
self.device = device
|
82 |
-
for key, value in self.__dict__.items():
|
83 |
-
if type(value).__module__ == np.__name__:
|
84 |
-
setattr(self, key, torch.tensor(value).to(device))
|
85 |
-
|
86 |
-
|
87 |
-
def compute_shape(self, id_coeff, exp_coeff):
|
88 |
-
"""
|
89 |
-
Return:
|
90 |
-
face_shape -- torch.tensor, size (B, N, 3)
|
91 |
-
|
92 |
-
Parameters:
|
93 |
-
id_coeff -- torch.tensor, size (B, 80), identity coeffs
|
94 |
-
exp_coeff -- torch.tensor, size (B, 64), expression coeffs
|
95 |
-
"""
|
96 |
-
batch_size = id_coeff.shape[0]
|
97 |
-
id_part = torch.einsum('ij,aj->ai', self.id_base, id_coeff)
|
98 |
-
exp_part = torch.einsum('ij,aj->ai', self.exp_base, exp_coeff)
|
99 |
-
face_shape = id_part + exp_part + self.mean_shape.reshape([1, -1])
|
100 |
-
return face_shape.reshape([batch_size, -1, 3])
|
101 |
-
|
102 |
-
|
103 |
-
def compute_texture(self, tex_coeff, normalize=True):
|
104 |
-
"""
|
105 |
-
Return:
|
106 |
-
face_texture -- torch.tensor, size (B, N, 3), in RGB order, range (0, 1.)
|
107 |
-
|
108 |
-
Parameters:
|
109 |
-
tex_coeff -- torch.tensor, size (B, 80)
|
110 |
-
"""
|
111 |
-
batch_size = tex_coeff.shape[0]
|
112 |
-
face_texture = torch.einsum('ij,aj->ai', self.tex_base, tex_coeff) + self.mean_tex
|
113 |
-
if normalize:
|
114 |
-
face_texture = face_texture / 255.
|
115 |
-
return face_texture.reshape([batch_size, -1, 3])
|
116 |
-
|
117 |
-
|
118 |
-
def compute_norm(self, face_shape):
|
119 |
-
"""
|
120 |
-
Return:
|
121 |
-
vertex_norm -- torch.tensor, size (B, N, 3)
|
122 |
-
|
123 |
-
Parameters:
|
124 |
-
face_shape -- torch.tensor, size (B, N, 3)
|
125 |
-
"""
|
126 |
-
|
127 |
-
v1 = face_shape[:, self.face_buf[:, 0]]
|
128 |
-
v2 = face_shape[:, self.face_buf[:, 1]]
|
129 |
-
v3 = face_shape[:, self.face_buf[:, 2]]
|
130 |
-
e1 = v1 - v2
|
131 |
-
e2 = v2 - v3
|
132 |
-
face_norm = torch.cross(e1, e2, dim=-1)
|
133 |
-
face_norm = F.normalize(face_norm, dim=-1, p=2)
|
134 |
-
face_norm = torch.cat([face_norm, torch.zeros(face_norm.shape[0], 1, 3).to(self.device)], dim=1)
|
135 |
-
|
136 |
-
vertex_norm = torch.sum(face_norm[:, self.point_buf], dim=2)
|
137 |
-
vertex_norm = F.normalize(vertex_norm, dim=-1, p=2)
|
138 |
-
return vertex_norm
|
139 |
-
|
140 |
-
|
141 |
-
def compute_color(self, face_texture, face_norm, gamma):
|
142 |
-
"""
|
143 |
-
Return:
|
144 |
-
face_color -- torch.tensor, size (B, N, 3), range (0, 1.)
|
145 |
-
|
146 |
-
Parameters:
|
147 |
-
face_texture -- torch.tensor, size (B, N, 3), from texture model, range (0, 1.)
|
148 |
-
face_norm -- torch.tensor, size (B, N, 3), rotated face normal
|
149 |
-
gamma -- torch.tensor, size (B, 27), SH coeffs
|
150 |
-
"""
|
151 |
-
batch_size = gamma.shape[0]
|
152 |
-
v_num = face_texture.shape[1]
|
153 |
-
a, c = self.SH.a, self.SH.c
|
154 |
-
gamma = gamma.reshape([batch_size, 3, 9])
|
155 |
-
gamma = gamma + self.init_lit
|
156 |
-
gamma = gamma.permute(0, 2, 1)
|
157 |
-
Y = torch.cat([
|
158 |
-
a[0] * c[0] * torch.ones_like(face_norm[..., :1]).to(self.device),
|
159 |
-
-a[1] * c[1] * face_norm[..., 1:2],
|
160 |
-
a[1] * c[1] * face_norm[..., 2:],
|
161 |
-
-a[1] * c[1] * face_norm[..., :1],
|
162 |
-
a[2] * c[2] * face_norm[..., :1] * face_norm[..., 1:2],
|
163 |
-
-a[2] * c[2] * face_norm[..., 1:2] * face_norm[..., 2:],
|
164 |
-
0.5 * a[2] * c[2] / np.sqrt(3.) * (3 * face_norm[..., 2:] ** 2 - 1),
|
165 |
-
-a[2] * c[2] * face_norm[..., :1] * face_norm[..., 2:],
|
166 |
-
0.5 * a[2] * c[2] * (face_norm[..., :1] ** 2 - face_norm[..., 1:2] ** 2)
|
167 |
-
], dim=-1)
|
168 |
-
r = Y @ gamma[..., :1]
|
169 |
-
g = Y @ gamma[..., 1:2]
|
170 |
-
b = Y @ gamma[..., 2:]
|
171 |
-
face_color = torch.cat([r, g, b], dim=-1) * face_texture
|
172 |
-
return face_color
|
173 |
-
|
174 |
-
|
175 |
-
def compute_rotation(self, angles):
|
176 |
-
"""
|
177 |
-
Return:
|
178 |
-
rot -- torch.tensor, size (B, 3, 3) pts @ trans_mat
|
179 |
-
|
180 |
-
Parameters:
|
181 |
-
angles -- torch.tensor, size (B, 3), radian
|
182 |
-
"""
|
183 |
-
|
184 |
-
batch_size = angles.shape[0]
|
185 |
-
ones = torch.ones([batch_size, 1]).to(self.device)
|
186 |
-
zeros = torch.zeros([batch_size, 1]).to(self.device)
|
187 |
-
x, y, z = angles[:, :1], angles[:, 1:2], angles[:, 2:],
|
188 |
-
|
189 |
-
rot_x = torch.cat([
|
190 |
-
ones, zeros, zeros,
|
191 |
-
zeros, torch.cos(x), -torch.sin(x),
|
192 |
-
zeros, torch.sin(x), torch.cos(x)
|
193 |
-
], dim=1).reshape([batch_size, 3, 3])
|
194 |
-
|
195 |
-
rot_y = torch.cat([
|
196 |
-
torch.cos(y), zeros, torch.sin(y),
|
197 |
-
zeros, ones, zeros,
|
198 |
-
-torch.sin(y), zeros, torch.cos(y)
|
199 |
-
], dim=1).reshape([batch_size, 3, 3])
|
200 |
-
|
201 |
-
rot_z = torch.cat([
|
202 |
-
torch.cos(z), -torch.sin(z), zeros,
|
203 |
-
torch.sin(z), torch.cos(z), zeros,
|
204 |
-
zeros, zeros, ones
|
205 |
-
], dim=1).reshape([batch_size, 3, 3])
|
206 |
-
|
207 |
-
rot = rot_z @ rot_y @ rot_x
|
208 |
-
return rot.permute(0, 2, 1)
|
209 |
-
|
210 |
-
|
211 |
-
def to_camera(self, face_shape):
|
212 |
-
face_shape[..., -1] = self.camera_distance - face_shape[..., -1]
|
213 |
-
return face_shape
|
214 |
-
|
215 |
-
def to_image(self, face_shape):
|
216 |
-
"""
|
217 |
-
Return:
|
218 |
-
face_proj -- torch.tensor, size (B, N, 2), y direction is opposite to v direction
|
219 |
-
|
220 |
-
Parameters:
|
221 |
-
face_shape -- torch.tensor, size (B, N, 3)
|
222 |
-
"""
|
223 |
-
# to image_plane
|
224 |
-
face_proj = face_shape @ self.persc_proj
|
225 |
-
face_proj = face_proj[..., :2] / face_proj[..., 2:]
|
226 |
-
|
227 |
-
return face_proj
|
228 |
-
|
229 |
-
|
230 |
-
def transform(self, face_shape, rot, trans):
|
231 |
-
"""
|
232 |
-
Return:
|
233 |
-
face_shape -- torch.tensor, size (B, N, 3) pts @ rot + trans
|
234 |
-
|
235 |
-
Parameters:
|
236 |
-
face_shape -- torch.tensor, size (B, N, 3)
|
237 |
-
rot -- torch.tensor, size (B, 3, 3)
|
238 |
-
trans -- torch.tensor, size (B, 3)
|
239 |
-
"""
|
240 |
-
return face_shape @ rot + trans.unsqueeze(1)
|
241 |
-
|
242 |
-
|
243 |
-
def get_landmarks(self, face_proj):
|
244 |
-
"""
|
245 |
-
Return:
|
246 |
-
face_lms -- torch.tensor, size (B, 68, 2)
|
247 |
-
|
248 |
-
Parameters:
|
249 |
-
face_proj -- torch.tensor, size (B, N, 2)
|
250 |
-
"""
|
251 |
-
return face_proj[:, self.keypoints]
|
252 |
-
|
253 |
-
def split_coeff(self, coeffs):
|
254 |
-
"""
|
255 |
-
Return:
|
256 |
-
coeffs_dict -- a dict of torch.tensors
|
257 |
-
|
258 |
-
Parameters:
|
259 |
-
coeffs -- torch.tensor, size (B, 256)
|
260 |
-
"""
|
261 |
-
id_coeffs = coeffs[:, :80]
|
262 |
-
exp_coeffs = coeffs[:, 80: 144]
|
263 |
-
tex_coeffs = coeffs[:, 144: 224]
|
264 |
-
angles = coeffs[:, 224: 227]
|
265 |
-
gammas = coeffs[:, 227: 254]
|
266 |
-
translations = coeffs[:, 254:]
|
267 |
-
return {
|
268 |
-
'id': id_coeffs,
|
269 |
-
'exp': exp_coeffs,
|
270 |
-
'tex': tex_coeffs,
|
271 |
-
'angle': angles,
|
272 |
-
'gamma': gammas,
|
273 |
-
'trans': translations
|
274 |
-
}
|
275 |
-
def compute_for_render(self, coeffs):
|
276 |
-
"""
|
277 |
-
Return:
|
278 |
-
face_vertex -- torch.tensor, size (B, N, 3), in camera coordinate
|
279 |
-
face_color -- torch.tensor, size (B, N, 3), in RGB order
|
280 |
-
landmark -- torch.tensor, size (B, 68, 2), y direction is opposite to v direction
|
281 |
-
Parameters:
|
282 |
-
coeffs -- torch.tensor, size (B, 257)
|
283 |
-
"""
|
284 |
-
coef_dict = self.split_coeff(coeffs)
|
285 |
-
face_shape = self.compute_shape(coef_dict['id'], coef_dict['exp'])
|
286 |
-
rotation = self.compute_rotation(coef_dict['angle'])
|
287 |
-
|
288 |
-
|
289 |
-
face_shape_transformed = self.transform(face_shape, rotation, coef_dict['trans'])
|
290 |
-
face_vertex = self.to_camera(face_shape_transformed)
|
291 |
-
|
292 |
-
face_proj = self.to_image(face_vertex)
|
293 |
-
landmark = self.get_landmarks(face_proj)
|
294 |
-
|
295 |
-
face_texture = self.compute_texture(coef_dict['tex'])
|
296 |
-
face_norm = self.compute_norm(face_shape)
|
297 |
-
face_norm_roted = face_norm @ rotation
|
298 |
-
face_color = self.compute_color(face_texture, face_norm_roted, coef_dict['gamma'])
|
299 |
-
|
300 |
-
return face_vertex, face_texture, face_color, landmark
|
301 |
-
|
302 |
-
def compute_for_render_woRotation(self, coeffs):
|
303 |
-
"""
|
304 |
-
Return:
|
305 |
-
face_vertex -- torch.tensor, size (B, N, 3), in camera coordinate
|
306 |
-
face_color -- torch.tensor, size (B, N, 3), in RGB order
|
307 |
-
landmark -- torch.tensor, size (B, 68, 2), y direction is opposite to v direction
|
308 |
-
Parameters:
|
309 |
-
coeffs -- torch.tensor, size (B, 257)
|
310 |
-
"""
|
311 |
-
coef_dict = self.split_coeff(coeffs)
|
312 |
-
face_shape = self.compute_shape(coef_dict['id'], coef_dict['exp'])
|
313 |
-
#rotation = self.compute_rotation(coef_dict['angle'])
|
314 |
-
|
315 |
-
|
316 |
-
#face_shape_transformed = self.transform(face_shape, rotation, coef_dict['trans'])
|
317 |
-
face_vertex = self.to_camera(face_shape)
|
318 |
-
|
319 |
-
face_proj = self.to_image(face_vertex)
|
320 |
-
landmark = self.get_landmarks(face_proj)
|
321 |
-
|
322 |
-
face_texture = self.compute_texture(coef_dict['tex'])
|
323 |
-
face_norm = self.compute_norm(face_shape)
|
324 |
-
face_norm_roted = face_norm # @ rotation
|
325 |
-
face_color = self.compute_color(face_texture, face_norm_roted, coef_dict['gamma'])
|
326 |
-
|
327 |
-
return face_vertex, face_texture, face_color, landmark
|
328 |
-
|
329 |
-
|
330 |
-
if __name__ == '__main__':
|
331 |
-
transferBFM09()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/801artistry/RVC801/julius/bands.py
DELETED
@@ -1,119 +0,0 @@
|
|
1 |
-
# File under the MIT license, see https://github.com/adefossez/julius/LICENSE for details.
|
2 |
-
# Author: adefossez, 2020
|
3 |
-
"""
|
4 |
-
Decomposition of a signal over frequency bands in the waveform domain.
|
5 |
-
"""
|
6 |
-
from typing import Optional, Sequence
|
7 |
-
import torch
|
8 |
-
|
9 |
-
from .core import mel_frequencies
|
10 |
-
from .lowpass import LowPassFilters
|
11 |
-
from .utils import simple_repr
|
12 |
-
|
13 |
-
|
14 |
-
class SplitBands(torch.nn.Module):
|
15 |
-
"""
|
16 |
-
Decomposes a signal over the given frequency bands in the waveform domain using
|
17 |
-
a cascade of low pass filters as implemented by `julius.lowpass.LowPassFilters`.
|
18 |
-
You can either specify explicitely the frequency cutoffs, or just the number of bands,
|
19 |
-
in which case the frequency cutoffs will be spread out evenly in mel scale.
|
20 |
-
|
21 |
-
Args:
|
22 |
-
sample_rate (float): Sample rate of the input signal in Hz.
|
23 |
-
n_bands (int or None): number of bands, when not giving them explictely with `cutoffs`.
|
24 |
-
In that case, the cutoff frequencies will be evenly spaced in mel-space.
|
25 |
-
cutoffs (list[float] or None): list of frequency cutoffs in Hz.
|
26 |
-
pad (bool): if True, appropriately pad the input with zero over the edge. If `stride=1`,
|
27 |
-
the output will have the same length as the input.
|
28 |
-
zeros (float): Number of zero crossings to keep. See `LowPassFilters` for more informations.
|
29 |
-
fft (bool or None): See `LowPassFilters` for more info.
|
30 |
-
|
31 |
-
..note::
|
32 |
-
The sum of all the bands will always be the input signal.
|
33 |
-
|
34 |
-
..warning::
|
35 |
-
Unlike `julius.lowpass.LowPassFilters`, the cutoffs frequencies must be provided in Hz along
|
36 |
-
with the sample rate.
|
37 |
-
|
38 |
-
Shape:
|
39 |
-
|
40 |
-
- Input: `[*, T]`
|
41 |
-
- Output: `[B, *, T']`, with `T'=T` if `pad` is True.
|
42 |
-
If `n_bands` was provided, `B = n_bands` otherwise `B = len(cutoffs) + 1`
|
43 |
-
|
44 |
-
>>> bands = SplitBands(sample_rate=128, n_bands=10)
|
45 |
-
>>> x = torch.randn(6, 4, 1024)
|
46 |
-
>>> list(bands(x).shape)
|
47 |
-
[10, 6, 4, 1024]
|
48 |
-
"""
|
49 |
-
|
50 |
-
def __init__(self, sample_rate: float, n_bands: Optional[int] = None,
|
51 |
-
cutoffs: Optional[Sequence[float]] = None, pad: bool = True,
|
52 |
-
zeros: float = 8, fft: Optional[bool] = None):
|
53 |
-
super().__init__()
|
54 |
-
if (cutoffs is None) + (n_bands is None) != 1:
|
55 |
-
raise ValueError("You must provide either n_bands, or cutoffs, but not boths.")
|
56 |
-
|
57 |
-
self.sample_rate = sample_rate
|
58 |
-
self.n_bands = n_bands
|
59 |
-
self._cutoffs = list(cutoffs) if cutoffs is not None else None
|
60 |
-
self.pad = pad
|
61 |
-
self.zeros = zeros
|
62 |
-
self.fft = fft
|
63 |
-
|
64 |
-
if cutoffs is None:
|
65 |
-
if n_bands is None:
|
66 |
-
raise ValueError("You must provide one of n_bands or cutoffs.")
|
67 |
-
if not n_bands >= 1:
|
68 |
-
raise ValueError(f"n_bands must be greater than one (got {n_bands})")
|
69 |
-
cutoffs = mel_frequencies(n_bands + 1, 0, sample_rate / 2)[1:-1]
|
70 |
-
else:
|
71 |
-
if max(cutoffs) > 0.5 * sample_rate:
|
72 |
-
raise ValueError("A cutoff above sample_rate/2 does not make sense.")
|
73 |
-
if len(cutoffs) > 0:
|
74 |
-
self.lowpass = LowPassFilters(
|
75 |
-
[c / sample_rate for c in cutoffs], pad=pad, zeros=zeros, fft=fft)
|
76 |
-
else:
|
77 |
-
# Here I cannot make both TorchScript and MyPy happy.
|
78 |
-
# I miss the good old times, before all this madness was created.
|
79 |
-
self.lowpass = None # type: ignore
|
80 |
-
|
81 |
-
def forward(self, input):
|
82 |
-
if self.lowpass is None:
|
83 |
-
return input[None]
|
84 |
-
lows = self.lowpass(input)
|
85 |
-
low = lows[0]
|
86 |
-
bands = [low]
|
87 |
-
for low_and_band in lows[1:]:
|
88 |
-
# Get a bandpass filter by substracting lowpasses
|
89 |
-
band = low_and_band - low
|
90 |
-
bands.append(band)
|
91 |
-
low = low_and_band
|
92 |
-
# Last band is whatever is left in the signal
|
93 |
-
bands.append(input - low)
|
94 |
-
return torch.stack(bands)
|
95 |
-
|
96 |
-
@property
|
97 |
-
def cutoffs(self):
|
98 |
-
if self._cutoffs is not None:
|
99 |
-
return self._cutoffs
|
100 |
-
elif self.lowpass is not None:
|
101 |
-
return [c * self.sample_rate for c in self.lowpass.cutoffs]
|
102 |
-
else:
|
103 |
-
return []
|
104 |
-
|
105 |
-
def __repr__(self):
|
106 |
-
return simple_repr(self, overrides={"cutoffs": self._cutoffs})
|
107 |
-
|
108 |
-
|
109 |
-
def split_bands(signal: torch.Tensor, sample_rate: float, n_bands: Optional[int] = None,
|
110 |
-
cutoffs: Optional[Sequence[float]] = None, pad: bool = True,
|
111 |
-
zeros: float = 8, fft: Optional[bool] = None):
|
112 |
-
"""
|
113 |
-
Functional version of `SplitBands`, refer to this class for more information.
|
114 |
-
|
115 |
-
>>> x = torch.randn(6, 4, 1024)
|
116 |
-
>>> list(split_bands(x, sample_rate=64, cutoffs=[12, 24]).shape)
|
117 |
-
[3, 6, 4, 1024]
|
118 |
-
"""
|
119 |
-
return SplitBands(sample_rate, n_bands, cutoffs, pad, zeros, fft).to(signal)(signal)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/A666sxr/Genshin_TTS/text/cleaners.py
DELETED
@@ -1,188 +0,0 @@
|
|
1 |
-
import re
|
2 |
-
#from text.japanese import japanese_to_romaji_with_accent, japanese_to_ipa, japanese_to_ipa2, japanese_to_ipa3
|
3 |
-
from text.mandarin import number_to_chinese, chinese_to_bopomofo, latin_to_bopomofo, chinese_to_romaji, chinese_to_lazy_ipa, chinese_to_ipa, chinese_to_ipa2
|
4 |
-
# from text.sanskrit import devanagari_to_ipa
|
5 |
-
from text.english import english_to_lazy_ipa, english_to_ipa2
|
6 |
-
# from text.thai import num_to_thai, latin_to_thai
|
7 |
-
# from text.shanghainese import shanghainese_to_ipa
|
8 |
-
# from text.cantonese import cantonese_to_ipa
|
9 |
-
# from text.ngu_dialect import ngu_dialect_to_ipa
|
10 |
-
|
11 |
-
|
12 |
-
def japanese_cleaners(text):
|
13 |
-
text = japanese_to_romaji_with_accent(text)
|
14 |
-
if re.match('[A-Za-z]', text[-1]):
|
15 |
-
text += '.'
|
16 |
-
return text
|
17 |
-
|
18 |
-
|
19 |
-
def japanese_cleaners2(text):
|
20 |
-
return japanese_cleaners(text).replace('ts', 'ʦ').replace('...', '…')
|
21 |
-
|
22 |
-
|
23 |
-
def korean_cleaners(text):
|
24 |
-
'''Pipeline for Korean text'''
|
25 |
-
text = latin_to_hangul(text)
|
26 |
-
text = number_to_hangul(text)
|
27 |
-
text = divide_hangul(text)
|
28 |
-
if re.match('[\u3131-\u3163]', text[-1]):
|
29 |
-
text += '.'
|
30 |
-
return text
|
31 |
-
|
32 |
-
|
33 |
-
def chinese_cleaners(text):
|
34 |
-
'''Pipeline for Chinese text'''
|
35 |
-
text = number_to_chinese(text)
|
36 |
-
text = chinese_to_bopomofo(text)
|
37 |
-
text = latin_to_bopomofo(text)
|
38 |
-
if re.match('[ˉˊˇˋ˙]', text[-1]):
|
39 |
-
text += '。'
|
40 |
-
return text
|
41 |
-
|
42 |
-
|
43 |
-
def zh_ja_mixture_cleaners(text):
|
44 |
-
chinese_texts = re.findall(r'\[ZH\].*?\[ZH\]', text)
|
45 |
-
japanese_texts = re.findall(r'\[JA\].*?\[JA\]', text)
|
46 |
-
for chinese_text in chinese_texts:
|
47 |
-
cleaned_text = chinese_to_romaji(chinese_text[4:-4])
|
48 |
-
text = text.replace(chinese_text, cleaned_text+' ', 1)
|
49 |
-
for japanese_text in japanese_texts:
|
50 |
-
cleaned_text = japanese_to_romaji_with_accent(
|
51 |
-
japanese_text[4:-4]).replace('ts', 'ʦ').replace('u', 'ɯ').replace('...', '…')
|
52 |
-
text = text.replace(japanese_text, cleaned_text+' ', 1)
|
53 |
-
text = text[:-1]
|
54 |
-
if re.match('[A-Za-zɯɹəɥ→↓↑]', text[-1]):
|
55 |
-
text += '.'
|
56 |
-
return text
|
57 |
-
|
58 |
-
|
59 |
-
def sanskrit_cleaners(text):
|
60 |
-
text = text.replace('॥', '।').replace('ॐ', 'ओम्')
|
61 |
-
if text[-1] != '।':
|
62 |
-
text += ' ।'
|
63 |
-
return text
|
64 |
-
|
65 |
-
def zh_en_cleaners(text):
|
66 |
-
chinese_texts = re.findall(r'\[ZH\].*?\[ZH\]', text)
|
67 |
-
english_texts = re.findall(r'\[EN\].*?\[EN\]', text)
|
68 |
-
for chinese_text in chinese_texts:
|
69 |
-
cleaned_text = chinese_to_lazy_ipa(chinese_text[4:-4])
|
70 |
-
text = text.replace(chinese_text, cleaned_text+' ', 1)
|
71 |
-
for english_text in english_texts:
|
72 |
-
cleaned_text = english_to_lazy_ipa(english_text[4:-4])
|
73 |
-
text = text.replace(english_text, cleaned_text+' ', 1)
|
74 |
-
text = text[:-1]
|
75 |
-
if re.match(r'[^\.,!\?\-…~]', text[-1]):
|
76 |
-
text += '.'
|
77 |
-
return text
|
78 |
-
|
79 |
-
def cjks_cleaners(text):
|
80 |
-
chinese_texts = re.findall(r'\[ZH\].*?\[ZH\]', text)
|
81 |
-
japanese_texts = re.findall(r'\[JA\].*?\[JA\]', text)
|
82 |
-
korean_texts = re.findall(r'\[KO\].*?\[KO\]', text)
|
83 |
-
sanskrit_texts = re.findall(r'\[SA\].*?\[SA\]', text)
|
84 |
-
english_texts = re.findall(r'\[EN\].*?\[EN\]', text)
|
85 |
-
for chinese_text in chinese_texts:
|
86 |
-
cleaned_text = chinese_to_lazy_ipa(chinese_text[4:-4])
|
87 |
-
text = text.replace(chinese_text, cleaned_text+' ', 1)
|
88 |
-
for japanese_text in japanese_texts:
|
89 |
-
cleaned_text = japanese_to_ipa(japanese_text[4:-4])
|
90 |
-
text = text.replace(japanese_text, cleaned_text+' ', 1)
|
91 |
-
for korean_text in korean_texts:
|
92 |
-
cleaned_text = korean_to_lazy_ipa(korean_text[4:-4])
|
93 |
-
text = text.replace(korean_text, cleaned_text+' ', 1)
|
94 |
-
for sanskrit_text in sanskrit_texts:
|
95 |
-
cleaned_text = devanagari_to_ipa(sanskrit_text[4:-4])
|
96 |
-
text = text.replace(sanskrit_text, cleaned_text+' ', 1)
|
97 |
-
for english_text in english_texts:
|
98 |
-
cleaned_text = english_to_lazy_ipa(english_text[4:-4])
|
99 |
-
text = text.replace(english_text, cleaned_text+' ', 1)
|
100 |
-
text = text[:-1]
|
101 |
-
if re.match(r'[^\.,!\?\-…~]', text[-1]):
|
102 |
-
text += '.'
|
103 |
-
return text
|
104 |
-
|
105 |
-
|
106 |
-
def cjke_cleaners(text):
|
107 |
-
chinese_texts = re.findall(r'\[ZH\].*?\[ZH\]', text)
|
108 |
-
japanese_texts = re.findall(r'\[JA\].*?\[JA\]', text)
|
109 |
-
korean_texts = re.findall(r'\[KO\].*?\[KO\]', text)
|
110 |
-
english_texts = re.findall(r'\[EN\].*?\[EN\]', text)
|
111 |
-
for chinese_text in chinese_texts:
|
112 |
-
cleaned_text = chinese_to_lazy_ipa(chinese_text[4:-4])
|
113 |
-
cleaned_text = cleaned_text.replace(
|
114 |
-
'ʧ', 'tʃ').replace('ʦ', 'ts').replace('ɥan', 'ɥæn')
|
115 |
-
text = text.replace(chinese_text, cleaned_text+' ', 1)
|
116 |
-
for japanese_text in japanese_texts:
|
117 |
-
cleaned_text = japanese_to_ipa(japanese_text[4:-4])
|
118 |
-
cleaned_text = cleaned_text.replace('ʧ', 'tʃ').replace(
|
119 |
-
'ʦ', 'ts').replace('ɥan', 'ɥæn').replace('ʥ', 'dz')
|
120 |
-
text = text.replace(japanese_text, cleaned_text+' ', 1)
|
121 |
-
for korean_text in korean_texts:
|
122 |
-
cleaned_text = korean_to_ipa(korean_text[4:-4])
|
123 |
-
text = text.replace(korean_text, cleaned_text+' ', 1)
|
124 |
-
for english_text in english_texts:
|
125 |
-
cleaned_text = english_to_ipa2(english_text[4:-4])
|
126 |
-
cleaned_text = cleaned_text.replace('ɑ', 'a').replace(
|
127 |
-
'ɔ', 'o').replace('ɛ', 'e').replace('ɪ', 'i').replace('ʊ', 'u')
|
128 |
-
text = text.replace(english_text, cleaned_text+' ', 1)
|
129 |
-
text = text[:-1]
|
130 |
-
if re.match(r'[^\.,!\?\-…~]', text[-1]):
|
131 |
-
text += '.'
|
132 |
-
return text
|
133 |
-
|
134 |
-
|
135 |
-
def cjke_cleaners2(text):
|
136 |
-
chinese_texts = re.findall(r'\[ZH\].*?\[ZH\]', text)
|
137 |
-
japanese_texts = re.findall(r'\[JA\].*?\[JA\]', text)
|
138 |
-
korean_texts = re.findall(r'\[KO\].*?\[KO\]', text)
|
139 |
-
english_texts = re.findall(r'\[EN\].*?\[EN\]', text)
|
140 |
-
for chinese_text in chinese_texts:
|
141 |
-
cleaned_text = chinese_to_ipa(chinese_text[4:-4])
|
142 |
-
text = text.replace(chinese_text, cleaned_text+' ', 1)
|
143 |
-
for japanese_text in japanese_texts:
|
144 |
-
cleaned_text = japanese_to_ipa2(japanese_text[4:-4])
|
145 |
-
text = text.replace(japanese_text, cleaned_text+' ', 1)
|
146 |
-
for korean_text in korean_texts:
|
147 |
-
cleaned_text = korean_to_ipa(korean_text[4:-4])
|
148 |
-
text = text.replace(korean_text, cleaned_text+' ', 1)
|
149 |
-
for english_text in english_texts:
|
150 |
-
cleaned_text = english_to_ipa2(english_text[4:-4])
|
151 |
-
text = text.replace(english_text, cleaned_text+' ', 1)
|
152 |
-
text = text[:-1]
|
153 |
-
if re.match(r'[^\.,!\?\-…~]', text[-1]):
|
154 |
-
text += '.'
|
155 |
-
return text
|
156 |
-
|
157 |
-
|
158 |
-
def thai_cleaners(text):
|
159 |
-
text = num_to_thai(text)
|
160 |
-
text = latin_to_thai(text)
|
161 |
-
return text
|
162 |
-
|
163 |
-
|
164 |
-
def shanghainese_cleaners(text):
|
165 |
-
text = shanghainese_to_ipa(text)
|
166 |
-
if re.match(r'[^\.,!\?\-…~]', text[-1]):
|
167 |
-
text += '.'
|
168 |
-
return text
|
169 |
-
|
170 |
-
|
171 |
-
def chinese_dialect_cleaners(text):
|
172 |
-
text = re.sub(r'\[MD\](.*?)\[MD\]',
|
173 |
-
lambda x: chinese_to_ipa2(x.group(1))+' ', text)
|
174 |
-
text = re.sub(r'\[TW\](.*?)\[TW\]',
|
175 |
-
lambda x: chinese_to_ipa2(x.group(1), True)+' ', text)
|
176 |
-
text = re.sub(r'\[JA\](.*?)\[JA\]',
|
177 |
-
lambda x: japanese_to_ipa3(x.group(1)).replace('Q', 'ʔ')+' ', text)
|
178 |
-
text = re.sub(r'\[SH\](.*?)\[SH\]', lambda x: shanghainese_to_ipa(x.group(1)).replace('1', '˥˧').replace('5',
|
179 |
-
'˧˧˦').replace('6', '˩˩˧').replace('7', '˥').replace('8', '˩˨').replace('ᴀ', 'ɐ').replace('ᴇ', 'e')+' ', text)
|
180 |
-
text = re.sub(r'\[GD\](.*?)\[GD\]',
|
181 |
-
lambda x: cantonese_to_ipa(x.group(1))+' ', text)
|
182 |
-
text = re.sub(r'\[EN\](.*?)\[EN\]',
|
183 |
-
lambda x: english_to_lazy_ipa2(x.group(1))+' ', text)
|
184 |
-
text = re.sub(r'\[([A-Z]{2})\](.*?)\[\1\]', lambda x: ngu_dialect_to_ipa(x.group(2), x.group(
|
185 |
-
1)).replace('ʣ', 'dz').replace('ʥ', 'dʑ').replace('ʦ', 'ts').replace('ʨ', 'tɕ')+' ', text)
|
186 |
-
text = re.sub(r'\s+$', '', text)
|
187 |
-
text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
|
188 |
-
return text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIFILMS/ControlNet-Video/share_btn.py
DELETED
@@ -1,86 +0,0 @@
|
|
1 |
-
community_icon_html = """<svg id="share-btn-share-icon" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32">
|
2 |
-
<path d="M20.6081 3C21.7684 3 22.8053 3.49196 23.5284 4.38415C23.9756 4.93678 24.4428 5.82749 24.4808 7.16133C24.9674 7.01707 25.4353 6.93643 25.8725 6.93643C26.9833 6.93643 27.9865 7.37587 28.696 8.17411C29.6075 9.19872 30.0124 10.4579 29.8361 11.7177C29.7523 12.3177 29.5581 12.8555 29.2678 13.3534C29.8798 13.8646 30.3306 14.5763 30.5485 15.4322C30.719 16.1032 30.8939 17.5006 29.9808 18.9403C30.0389 19.0342 30.0934 19.1319 30.1442 19.2318C30.6932 20.3074 30.7283 21.5229 30.2439 22.6548C29.5093 24.3704 27.6841 25.7219 24.1397 27.1727C21.9347 28.0753 19.9174 28.6523 19.8994 28.6575C16.9842 29.4379 14.3477 29.8345 12.0653 29.8345C7.87017 29.8345 4.8668 28.508 3.13831 25.8921C0.356375 21.6797 0.754104 17.8269 4.35369 14.1131C6.34591 12.058 7.67023 9.02782 7.94613 8.36275C8.50224 6.39343 9.97271 4.20438 12.4172 4.20438H12.4179C12.6236 4.20438 12.8314 4.2214 13.0364 4.25468C14.107 4.42854 15.0428 5.06476 15.7115 6.02205C16.4331 5.09583 17.134 4.359 17.7682 3.94323C18.7242 3.31737 19.6794 3 20.6081 3ZM20.6081 5.95917C20.2427 5.95917 19.7963 6.1197 19.3039 6.44225C17.7754 7.44319 14.8258 12.6772 13.7458 14.7131C13.3839 15.3952 12.7655 15.6837 12.2086 15.6837C11.1036 15.6837 10.2408 14.5497 12.1076 13.1085C14.9146 10.9402 13.9299 7.39584 12.5898 7.1776C12.5311 7.16799 12.4731 7.16355 12.4172 7.16355C11.1989 7.16355 10.6615 9.33114 10.6615 9.33114C10.6615 9.33114 9.0863 13.4148 6.38031 16.206C3.67434 18.998 3.5346 21.2388 5.50675 24.2246C6.85185 26.2606 9.42666 26.8753 12.0653 26.8753C14.8021 26.8753 17.6077 26.2139 19.1799 25.793C19.2574 25.7723 28.8193 22.984 27.6081 20.6107C27.4046 20.212 27.0693 20.0522 26.6471 20.0522C24.9416 20.0522 21.8393 22.6726 20.5057 22.6726C20.2076 22.6726 19.9976 22.5416 19.9116 22.222C19.3433 20.1173 28.552 19.2325 27.7758 16.1839C27.639 15.6445 27.2677 15.4256 26.746 15.4263C24.4923 15.4263 19.4358 19.5181 18.3759 19.5181C18.2949 19.5181 18.2368 19.4937 18.2053 19.4419C17.6743 18.557 17.9653 17.9394 21.7082 15.6009C25.4511 13.2617 28.0783 11.8545 26.5841 10.1752C26.4121 9.98141 26.1684 9.8956 25.8725 9.8956C23.6001 9.89634 18.2311 14.9403 18.2311 14.9403C18.2311 14.9403 16.7821 16.496 15.9057 16.496C15.7043 16.496 15.533 16.4139 15.4169 16.2112C14.7956 15.1296 21.1879 10.1286 21.5484 8.06535C21.7928 6.66715 21.3771 5.95917 20.6081 5.95917Z" fill="#FF9D00"></path>
|
3 |
-
<path d="M5.50686 24.2246C3.53472 21.2387 3.67446 18.9979 6.38043 16.206C9.08641 13.4147 10.6615 9.33111 10.6615 9.33111C10.6615 9.33111 11.2499 6.95933 12.59 7.17757C13.93 7.39581 14.9139 10.9401 12.1069 13.1084C9.29997 15.276 12.6659 16.7489 13.7459 14.713C14.8258 12.6772 17.7747 7.44316 19.304 6.44221C20.8326 5.44128 21.9089 6.00204 21.5484 8.06532C21.188 10.1286 14.795 15.1295 15.4171 16.2118C16.0391 17.2934 18.2312 14.9402 18.2312 14.9402C18.2312 14.9402 25.0907 8.49588 26.5842 10.1752C28.0776 11.8545 25.4512 13.2616 21.7082 15.6008C17.9646 17.9393 17.6744 18.557 18.2054 19.4418C18.7372 20.3266 26.9998 13.1351 27.7759 16.1838C28.5513 19.2324 19.3434 20.1173 19.9117 22.2219C20.48 24.3274 26.3979 18.2382 27.6082 20.6107C28.8193 22.9839 19.2574 25.7722 19.18 25.7929C16.0914 26.62 8.24723 28.3726 5.50686 24.2246Z" fill="#FFD21E"></path>
|
4 |
-
</svg>"""
|
5 |
-
|
6 |
-
loading_icon_html = """<svg id="share-btn-loading-icon" style="display:none;" class="animate-spin"
|
7 |
-
style="color: #ffffff;
|
8 |
-
"
|
9 |
-
xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" fill="none" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><circle style="opacity: 0.25;" cx="12" cy="12" r="10" stroke="white" stroke-width="4"></circle><path style="opacity: 0.75;" fill="white" d="M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4zm2 5.291A7.962 7.962 0 014 12H0c0 3.042 1.135 5.824 3 7.938l3-2.647z"></path></svg>"""
|
10 |
-
|
11 |
-
share_js = """async () => {
|
12 |
-
async function uploadFile(file){
|
13 |
-
const UPLOAD_URL = 'https://huggingface.co/uploads';
|
14 |
-
const response = await fetch(UPLOAD_URL, {
|
15 |
-
method: 'POST',
|
16 |
-
headers: {
|
17 |
-
'Content-Type': file.type,
|
18 |
-
'X-Requested-With': 'XMLHttpRequest',
|
19 |
-
},
|
20 |
-
body: file, /// <- File inherits from Blob
|
21 |
-
});
|
22 |
-
const url = await response.text();
|
23 |
-
return url;
|
24 |
-
}
|
25 |
-
|
26 |
-
async function getVideoBlobFile(videoEL){
|
27 |
-
const res = await fetch(videoEL.src);
|
28 |
-
const blob = await res.blob();
|
29 |
-
const videoId = Date.now() % 200;
|
30 |
-
const fileName = `vid-pix2pix-${{videoId}}.wav`;
|
31 |
-
const videoBlob = new File([blob], fileName, { type: 'video/mp4' });
|
32 |
-
console.log(videoBlob);
|
33 |
-
return videoBlob;
|
34 |
-
}
|
35 |
-
|
36 |
-
const gradioEl = document.querySelector("gradio-app").shadowRoot || document.querySelector('body > gradio-app');
|
37 |
-
const captionTxt = gradioEl.querySelector('#prompt-in textarea').value;
|
38 |
-
const controlTask = gradioEl.querySelector('#controltask-in select').value;
|
39 |
-
const seedValue = gradioEl.querySelector('#seed-in input').value;
|
40 |
-
const inputVidEl = gradioEl.querySelector('#input-vid video');
|
41 |
-
const outputVideo = gradioEl.querySelector('#video-output video');
|
42 |
-
const outputPrepVideo = gradioEl.querySelector('#prep-video-output video');
|
43 |
-
|
44 |
-
const shareBtnEl = gradioEl.querySelector('#share-btn');
|
45 |
-
const shareIconEl = gradioEl.querySelector('#share-btn-share-icon');
|
46 |
-
const loadingIconEl = gradioEl.querySelector('#share-btn-loading-icon');
|
47 |
-
if(!outputVideo){
|
48 |
-
return;
|
49 |
-
};
|
50 |
-
shareBtnEl.style.pointerEvents = 'none';
|
51 |
-
shareIconEl.style.display = 'none';
|
52 |
-
loadingIconEl.style.removeProperty('display');
|
53 |
-
|
54 |
-
const inputFile = await getVideoBlobFile(inputVidEl);
|
55 |
-
const urlInputVid = await uploadFile(inputFile);
|
56 |
-
|
57 |
-
const prepVideoOutFile = await getVideoBlobFile(outputPrepVideo);
|
58 |
-
const dataOutputPrepVid = await uploadFile(prepVideoOutFile);
|
59 |
-
|
60 |
-
const videoOutFile = await getVideoBlobFile(outputVideo);
|
61 |
-
const dataOutputVid = await uploadFile(videoOutFile);
|
62 |
-
|
63 |
-
const descriptionMd = `
|
64 |
-
#### Settings
|
65 |
-
Prompt: ${captionTxt}
|
66 |
-
Control Task: ${controlTask} • Seed: ${seedValue}
|
67 |
-
|
68 |
-
#### Video input:
|
69 |
-
${urlInputVid}
|
70 |
-
|
71 |
-
#### Preprcessor output:
|
72 |
-
${dataOutputPrepVid}
|
73 |
-
|
74 |
-
#### ControlNet result:
|
75 |
-
${dataOutputVid}
|
76 |
-
`;
|
77 |
-
const params = new URLSearchParams({
|
78 |
-
title: captionTxt,
|
79 |
-
description: descriptionMd,
|
80 |
-
});
|
81 |
-
const paramsStr = params.toString();
|
82 |
-
window.open(`https://huggingface.co/spaces/fffiloni/ControlNet-Video/discussions/new?${paramsStr}`, '_blank');
|
83 |
-
shareBtnEl.style.removeProperty('pointer-events');
|
84 |
-
shareIconEl.style.removeProperty('display');
|
85 |
-
loadingIconEl.style.display = 'none';
|
86 |
-
}"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGText/GlyphControl/ldm/models/autoencoder.py
DELETED
@@ -1,278 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import pytorch_lightning as pl
|
3 |
-
import torch.nn.functional as F
|
4 |
-
from contextlib import contextmanager
|
5 |
-
|
6 |
-
from ldm.modules.diffusionmodules.model import Encoder, Decoder
|
7 |
-
from ldm.modules.distributions.distributions import DiagonalGaussianDistribution
|
8 |
-
|
9 |
-
from ldm.util import instantiate_from_config
|
10 |
-
from ldm.modules.ema import LitEma
|
11 |
-
|
12 |
-
|
13 |
-
class AutoencoderKL(pl.LightningModule):
|
14 |
-
def __init__(self,
|
15 |
-
ddconfig,
|
16 |
-
lossconfig,
|
17 |
-
embed_dim,
|
18 |
-
ckpt_path=None,
|
19 |
-
ignore_keys=[],
|
20 |
-
image_key="image",
|
21 |
-
colorize_nlabels=None,
|
22 |
-
monitor=None,
|
23 |
-
ema_decay=None,
|
24 |
-
learn_logvar=False,
|
25 |
-
keep_keys = [],
|
26 |
-
):
|
27 |
-
super().__init__()
|
28 |
-
self.learn_logvar = learn_logvar
|
29 |
-
self.image_key = image_key
|
30 |
-
self.encoder = Encoder(**ddconfig)
|
31 |
-
self.decoder = Decoder(**ddconfig)
|
32 |
-
self.loss = instantiate_from_config(lossconfig)
|
33 |
-
assert ddconfig["double_z"]
|
34 |
-
self.quant_conv = torch.nn.Conv2d(2*ddconfig["z_channels"], 2*embed_dim, 1)
|
35 |
-
self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
|
36 |
-
self.embed_dim = embed_dim
|
37 |
-
if colorize_nlabels is not None:
|
38 |
-
assert type(colorize_nlabels)==int
|
39 |
-
self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1))
|
40 |
-
if monitor is not None:
|
41 |
-
self.monitor = monitor
|
42 |
-
|
43 |
-
self.use_ema = ema_decay is not None
|
44 |
-
if self.use_ema:
|
45 |
-
self.ema_decay = ema_decay
|
46 |
-
assert 0. < ema_decay < 1.
|
47 |
-
self.model_ema = LitEma(self, decay=ema_decay)
|
48 |
-
print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
|
49 |
-
|
50 |
-
if ckpt_path is not None:
|
51 |
-
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, keep_keys=keep_keys)
|
52 |
-
|
53 |
-
def init_from_ckpt(self, path, ignore_keys=list(), keep_keys=list()):
|
54 |
-
# if path.endswith(".ckpt"):
|
55 |
-
# sd = torch.load(path, map_location="cpu")["state_dict"]
|
56 |
-
# elif path.endswith(".bin"):
|
57 |
-
# sd = torch.load(path, map_location="cpu")
|
58 |
-
# else:
|
59 |
-
# raise ValueError
|
60 |
-
sd = torch.load(path, map_location="cpu")
|
61 |
-
if "state_dict" in list(sd.keys()):
|
62 |
-
sd = sd["state_dict"]
|
63 |
-
keys = list(sd.keys())
|
64 |
-
for k in keys:
|
65 |
-
for ik in ignore_keys:
|
66 |
-
if k.startswith(ik):
|
67 |
-
print("Deleting key {} from state_dict.".format(k))
|
68 |
-
del sd[k]
|
69 |
-
if len(keep_keys):
|
70 |
-
sd_new = {}
|
71 |
-
for k in list(sd.keys()):
|
72 |
-
for kk in keep_keys:
|
73 |
-
if k.startswith(kk):
|
74 |
-
if kk == "first_stage_model":
|
75 |
-
k_new = k.split(kk + ".")[1]
|
76 |
-
else:
|
77 |
-
k_new = k
|
78 |
-
sd_new[k_new] = sd[k]
|
79 |
-
else:
|
80 |
-
sd_new = sd
|
81 |
-
# new_k = k
|
82 |
-
# if ".mid_block." in k:
|
83 |
-
# new_k = new_k.replace(".mid_block.", ".mid.")
|
84 |
-
# if "attentions.0." in k:
|
85 |
-
# new_k = new_k.replace("attentions.0.", "attn_1.")
|
86 |
-
# if ".resnets.0" in k:
|
87 |
-
# new_k = new_k.replace(".resnets.0", ".block_1")
|
88 |
-
# if ".resnets.1" in k:
|
89 |
-
# new_k = new_k.replace(".resnets.1", ".block_2")
|
90 |
-
# else:
|
91 |
-
# if ".up_blocks." in k:
|
92 |
-
# new_k = new_k.replace(".up_blocks.", ".up.")
|
93 |
-
# # sd[k.replace(".up_blocks.", ".up.")] = sd[k]
|
94 |
-
# # del sd[k]
|
95 |
-
# if ".down_blocks." in k:
|
96 |
-
# new_k = new_k.replace(".down_blocks.", ".down.")
|
97 |
-
# if ".resnets." in k:
|
98 |
-
# new_k = new_k.replace(".resnets.", ".block.")
|
99 |
-
# if "samplers.0." in k:
|
100 |
-
# new_k = new_k.replace("samplers.0.", "sample.")
|
101 |
-
# # sd[k.replace(".down_blocks.", ".down.")] = sd[k]
|
102 |
-
# # del sd[k]
|
103 |
-
|
104 |
-
# # sd[k.replace(".mid_block.", ".mid.")] = sd[k]
|
105 |
-
# # del sd[k]
|
106 |
-
# if ".conv_norm_out." in k:
|
107 |
-
# new_k = new_k.replace(".conv_norm_out.", ".norm_out.")
|
108 |
-
# # sd[k.replace(".conv_norm_out.", ".norm_out.")] = sd[k]
|
109 |
-
# if new_k != k:
|
110 |
-
# sd[new_k] = sd[k]
|
111 |
-
# del sd[k]
|
112 |
-
|
113 |
-
# self.load_state_dict(sd, strict=True)
|
114 |
-
missing, unexpected = self.load_state_dict(sd_new, strict=False)
|
115 |
-
print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
|
116 |
-
if len(missing) > 0:
|
117 |
-
print(f"Missing Keys:\n {missing}")
|
118 |
-
if len(unexpected) > 0:
|
119 |
-
print(f"\nUnexpected Keys:\n {unexpected}")
|
120 |
-
# print(f"Restored from {path}")
|
121 |
-
|
122 |
-
@contextmanager
|
123 |
-
def ema_scope(self, context=None):
|
124 |
-
if self.use_ema:
|
125 |
-
self.model_ema.store(self.parameters())
|
126 |
-
self.model_ema.copy_to(self)
|
127 |
-
if context is not None:
|
128 |
-
print(f"{context}: Switched to EMA weights")
|
129 |
-
try:
|
130 |
-
yield None
|
131 |
-
finally:
|
132 |
-
if self.use_ema:
|
133 |
-
self.model_ema.restore(self.parameters())
|
134 |
-
if context is not None:
|
135 |
-
print(f"{context}: Restored training weights")
|
136 |
-
|
137 |
-
def on_train_batch_end(self, *args, **kwargs):
|
138 |
-
if self.use_ema:
|
139 |
-
self.model_ema(self)
|
140 |
-
|
141 |
-
def encode(self, x):
|
142 |
-
h = self.encoder(x)
|
143 |
-
moments = self.quant_conv(h)
|
144 |
-
posterior = DiagonalGaussianDistribution(moments)
|
145 |
-
return posterior
|
146 |
-
|
147 |
-
def decode(self, z):
|
148 |
-
z = self.post_quant_conv(z)
|
149 |
-
dec = self.decoder(z)
|
150 |
-
return dec
|
151 |
-
|
152 |
-
def forward(self, input, sample_posterior=True):
|
153 |
-
posterior = self.encode(input)
|
154 |
-
if sample_posterior:
|
155 |
-
z = posterior.sample()
|
156 |
-
else:
|
157 |
-
z = posterior.mode()
|
158 |
-
dec = self.decode(z)
|
159 |
-
return dec, posterior
|
160 |
-
|
161 |
-
def get_input(self, batch, k):
|
162 |
-
x = batch[k]
|
163 |
-
if len(x.shape) == 3:
|
164 |
-
x = x[..., None]
|
165 |
-
x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()
|
166 |
-
return x
|
167 |
-
|
168 |
-
def training_step(self, batch, batch_idx, optimizer_idx):
|
169 |
-
inputs = self.get_input(batch, self.image_key)
|
170 |
-
reconstructions, posterior = self(inputs)
|
171 |
-
|
172 |
-
if optimizer_idx == 0:
|
173 |
-
# train encoder+decoder+logvar
|
174 |
-
aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,
|
175 |
-
last_layer=self.get_last_layer(), split="train")
|
176 |
-
self.log("aeloss", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
|
177 |
-
self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)
|
178 |
-
return aeloss
|
179 |
-
|
180 |
-
if optimizer_idx == 1:
|
181 |
-
# train the discriminator
|
182 |
-
discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,
|
183 |
-
last_layer=self.get_last_layer(), split="train")
|
184 |
-
|
185 |
-
self.log("discloss", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
|
186 |
-
self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)
|
187 |
-
return discloss
|
188 |
-
|
189 |
-
def validation_step(self, batch, batch_idx):
|
190 |
-
log_dict = self._validation_step(batch, batch_idx)
|
191 |
-
with self.ema_scope():
|
192 |
-
log_dict_ema = self._validation_step(batch, batch_idx, postfix="_ema")
|
193 |
-
return log_dict
|
194 |
-
|
195 |
-
def _validation_step(self, batch, batch_idx, postfix=""):
|
196 |
-
inputs = self.get_input(batch, self.image_key)
|
197 |
-
reconstructions, posterior = self(inputs)
|
198 |
-
aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,
|
199 |
-
last_layer=self.get_last_layer(), split="val"+postfix)
|
200 |
-
|
201 |
-
discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,
|
202 |
-
last_layer=self.get_last_layer(), split="val"+postfix)
|
203 |
-
|
204 |
-
self.log(f"val{postfix}/rec_loss", log_dict_ae[f"val{postfix}/rec_loss"])
|
205 |
-
self.log_dict(log_dict_ae)
|
206 |
-
self.log_dict(log_dict_disc)
|
207 |
-
return self.log_dict
|
208 |
-
|
209 |
-
def configure_optimizers(self):
|
210 |
-
lr = self.learning_rate
|
211 |
-
ae_params_list = list(self.encoder.parameters()) + list(self.decoder.parameters()) + list(
|
212 |
-
self.quant_conv.parameters()) + list(self.post_quant_conv.parameters())
|
213 |
-
if self.learn_logvar:
|
214 |
-
print(f"{self.__class__.__name__}: Learning logvar")
|
215 |
-
ae_params_list.append(self.loss.logvar)
|
216 |
-
opt_ae = torch.optim.Adam(ae_params_list,
|
217 |
-
lr=lr, betas=(0.5, 0.9))
|
218 |
-
opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),
|
219 |
-
lr=lr, betas=(0.5, 0.9))
|
220 |
-
return [opt_ae, opt_disc], []
|
221 |
-
|
222 |
-
def get_last_layer(self):
|
223 |
-
return self.decoder.conv_out.weight
|
224 |
-
|
225 |
-
@torch.no_grad()
|
226 |
-
def log_images(self, batch, only_inputs=False, log_ema=False, **kwargs):
|
227 |
-
log = dict()
|
228 |
-
x = self.get_input(batch, self.image_key)
|
229 |
-
x = x.to(self.device)
|
230 |
-
if not only_inputs:
|
231 |
-
xrec, posterior = self(x)
|
232 |
-
if x.shape[1] > 3:
|
233 |
-
# colorize with random projection
|
234 |
-
assert xrec.shape[1] > 3
|
235 |
-
x = self.to_rgb(x)
|
236 |
-
xrec = self.to_rgb(xrec)
|
237 |
-
log["samples"] = self.decode(torch.randn_like(posterior.sample()))
|
238 |
-
log["reconstructions"] = xrec
|
239 |
-
if log_ema or self.use_ema:
|
240 |
-
with self.ema_scope():
|
241 |
-
xrec_ema, posterior_ema = self(x)
|
242 |
-
if x.shape[1] > 3:
|
243 |
-
# colorize with random projection
|
244 |
-
assert xrec_ema.shape[1] > 3
|
245 |
-
xrec_ema = self.to_rgb(xrec_ema)
|
246 |
-
log["samples_ema"] = self.decode(torch.randn_like(posterior_ema.sample()))
|
247 |
-
log["reconstructions_ema"] = xrec_ema
|
248 |
-
log["inputs"] = x
|
249 |
-
return log
|
250 |
-
|
251 |
-
def to_rgb(self, x):
|
252 |
-
assert self.image_key == "segmentation"
|
253 |
-
if not hasattr(self, "colorize"):
|
254 |
-
self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x))
|
255 |
-
x = F.conv2d(x, weight=self.colorize)
|
256 |
-
x = 2.*(x-x.min())/(x.max()-x.min()) - 1.
|
257 |
-
return x
|
258 |
-
|
259 |
-
|
260 |
-
class IdentityFirstStage(torch.nn.Module):
|
261 |
-
def __init__(self, *args, vq_interface=False, **kwargs):
|
262 |
-
self.vq_interface = vq_interface
|
263 |
-
super().__init__()
|
264 |
-
|
265 |
-
def encode(self, x, *args, **kwargs):
|
266 |
-
return x
|
267 |
-
|
268 |
-
def decode(self, x, *args, **kwargs):
|
269 |
-
return x
|
270 |
-
|
271 |
-
def quantize(self, x, *args, **kwargs):
|
272 |
-
if self.vq_interface:
|
273 |
-
return x, None, [None, None, None]
|
274 |
-
return x
|
275 |
-
|
276 |
-
def forward(self, x, *args, **kwargs):
|
277 |
-
return x
|
278 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/ImMagician/style.css
DELETED
@@ -1,97 +0,0 @@
|
|
1 |
-
#col-container {color: white;
|
2 |
-
max-width: 1200px;
|
3 |
-
margin-left: auto;
|
4 |
-
margin-right: auto;
|
5 |
-
}
|
6 |
-
a {
|
7 |
-
color: inherit;
|
8 |
-
text-decoration: underline;
|
9 |
-
}
|
10 |
-
.gradio-container {
|
11 |
-
color: #ffaa66;
|
12 |
-
background-color: #005566;
|
13 |
-
font-family: 'IBM Plex Sans', sans-serif;
|
14 |
-
}
|
15 |
-
.gr-button {
|
16 |
-
color: #ffffff !important;
|
17 |
-
text-shadow: 1px 1px 0 rgba(0, 0, 0, 1) !important;
|
18 |
-
background-image: linear-gradient(#76635a, #d2a489) !important;
|
19 |
-
border-radius: 24px !important;
|
20 |
-
border: solid 1px !important;
|
21 |
-
border-top-color: #ffc99f !important;
|
22 |
-
border-right-color: #000000 !important;
|
23 |
-
border-bottom-color: #000000 !important;
|
24 |
-
border-left-color: #ffc99f !important;
|
25 |
-
padding: 6px 30px;
|
26 |
-
}
|
27 |
-
input[type='range'] {
|
28 |
-
accent-color: #9d66e5;
|
29 |
-
}
|
30 |
-
.dark input[type='range'] {
|
31 |
-
accent-color: #dfdfdf;
|
32 |
-
}
|
33 |
-
.container {
|
34 |
-
color: #ffaa66;
|
35 |
-
max-width: 1200px;
|
36 |
-
margin: auto;
|
37 |
-
padding-top: 1.5rem;
|
38 |
-
}
|
39 |
-
#gallery {
|
40 |
-
color: #ffaa66;
|
41 |
-
min-height: 22rem;
|
42 |
-
margin-bottom: 15px;
|
43 |
-
margin-left: auto;
|
44 |
-
margin-right: auto;
|
45 |
-
border-bottom-right-radius: .5rem !important;
|
46 |
-
border-bottom-left-radius: .5rem !important;
|
47 |
-
}
|
48 |
-
#gallery>div>.h-full {
|
49 |
-
color: #ffaa66;
|
50 |
-
min-height: 20rem;
|
51 |
-
}
|
52 |
-
.details:hover {
|
53 |
-
text-decoration: underline;
|
54 |
-
}
|
55 |
-
.gr-button:focus {
|
56 |
-
border-color: rgb(255 160 0 / var(--tw-border-opacity));
|
57 |
-
outline: none;
|
58 |
-
box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000);
|
59 |
-
--tw-border-opacity: 1;
|
60 |
-
--tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);
|
61 |
-
--tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color);
|
62 |
-
--tw-ring-color: rgb(0 0 0 / var(--tw-ring-opacity));
|
63 |
-
--tw-ring-opacity: .5;
|
64 |
-
}
|
65 |
-
#advanced-options {
|
66 |
-
color: #ffaa66;
|
67 |
-
margin-bottom: 20px;
|
68 |
-
}
|
69 |
-
.footer {
|
70 |
-
color: #ffaa66;
|
71 |
-
margin-bottom: 45px;
|
72 |
-
margin-top: 35px;
|
73 |
-
text-align: center;
|
74 |
-
border-bottom: 1px solid #e5e5e5;
|
75 |
-
}
|
76 |
-
.footer>p {
|
77 |
-
color: #ffaa66;
|
78 |
-
font-size: .8rem;
|
79 |
-
display: inline-block;
|
80 |
-
padding: 0 10px;
|
81 |
-
transform: translateY(10px);
|
82 |
-
background: white;
|
83 |
-
}
|
84 |
-
.dark .logo{ filter: invert(1); }
|
85 |
-
.dark .footer {
|
86 |
-
border-color: #303030;
|
87 |
-
}
|
88 |
-
.dark .footer>p {
|
89 |
-
background: #0b0f19;
|
90 |
-
}
|
91 |
-
.acknowledgments h4{
|
92 |
-
color: #ffaa66;
|
93 |
-
margin: 1.25em 0 .25em 0;
|
94 |
-
font-weight: bold;
|
95 |
-
font-size: 115%;
|
96 |
-
}
|
97 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/dist/assets/tilemaps/tiles/tileset.tsx
DELETED
@@ -1,4 +0,0 @@
|
|
1 |
-
<?xml version="1.0" encoding="UTF-8"?>
|
2 |
-
<tileset version="1.10" tiledversion="1.10.1" name="tileset" tilewidth="16" tileheight="16" tilecount="7984" columns="8">
|
3 |
-
<image source="tileset.png" width="128" height="15968"/>
|
4 |
-
</tileset>
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/spinner/Factory.js
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
import Spinner from './Spinner.js';
|
2 |
-
import ObjectFactory from '../ObjectFactory.js';
|
3 |
-
import SetValue from '../../../plugins/utils/object/SetValue.js';
|
4 |
-
|
5 |
-
ObjectFactory.register('spinner', function (config) {
|
6 |
-
var gameObject = new Spinner(this.scene, config);
|
7 |
-
this.scene.add.existing(gameObject);
|
8 |
-
return gameObject;
|
9 |
-
});
|
10 |
-
|
11 |
-
SetValue(window, 'RexPlugins.Spinner.Spinner', Spinner);
|
12 |
-
|
13 |
-
export default Spinner;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorinput/methods/Methods.js
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
import ConfigurationMethods from './ConfigurationMethods.js'
|
2 |
-
import OpenColorPicker from './OpenColorPicker.js';
|
3 |
-
|
4 |
-
var methods = {
|
5 |
-
openColorPicker: OpenColorPicker
|
6 |
-
}
|
7 |
-
|
8 |
-
Object.assign(
|
9 |
-
methods,
|
10 |
-
ConfigurationMethods,
|
11 |
-
);
|
12 |
-
|
13 |
-
export default methods;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Akmyradov/TurkmenTTSweSTT/uroman/lib/JSON.pm
DELETED
@@ -1,2317 +0,0 @@
|
|
1 |
-
package JSON;
|
2 |
-
|
3 |
-
|
4 |
-
use strict;
|
5 |
-
use Carp ();
|
6 |
-
use base qw(Exporter);
|
7 |
-
@JSON::EXPORT = qw(from_json to_json jsonToObj objToJson encode_json decode_json);
|
8 |
-
|
9 |
-
BEGIN {
|
10 |
-
$JSON::VERSION = '2.90';
|
11 |
-
$JSON::DEBUG = 0 unless (defined $JSON::DEBUG);
|
12 |
-
$JSON::DEBUG = $ENV{ PERL_JSON_DEBUG } if exists $ENV{ PERL_JSON_DEBUG };
|
13 |
-
}
|
14 |
-
|
15 |
-
my $Module_XS = 'JSON::XS';
|
16 |
-
my $Module_PP = 'JSON::PP';
|
17 |
-
my $Module_bp = 'JSON::backportPP'; # included in JSON distribution
|
18 |
-
my $PP_Version = '2.27203';
|
19 |
-
my $XS_Version = '2.34';
|
20 |
-
|
21 |
-
|
22 |
-
# XS and PP common methods
|
23 |
-
|
24 |
-
my @PublicMethods = qw/
|
25 |
-
ascii latin1 utf8 pretty indent space_before space_after relaxed canonical allow_nonref
|
26 |
-
allow_blessed convert_blessed filter_json_object filter_json_single_key_object
|
27 |
-
shrink max_depth max_size encode decode decode_prefix allow_unknown
|
28 |
-
/;
|
29 |
-
|
30 |
-
my @Properties = qw/
|
31 |
-
ascii latin1 utf8 indent space_before space_after relaxed canonical allow_nonref
|
32 |
-
allow_blessed convert_blessed shrink max_depth max_size allow_unknown
|
33 |
-
/;
|
34 |
-
|
35 |
-
my @XSOnlyMethods = qw/allow_tags/; # Currently nothing
|
36 |
-
|
37 |
-
my @PPOnlyMethods = qw/
|
38 |
-
indent_length sort_by
|
39 |
-
allow_singlequote allow_bignum loose allow_barekey escape_slash as_nonblessed
|
40 |
-
/; # JSON::PP specific
|
41 |
-
|
42 |
-
|
43 |
-
# used in _load_xs and _load_pp ($INSTALL_ONLY is not used currently)
|
44 |
-
my $_INSTALL_DONT_DIE = 1; # When _load_xs fails to load XS, don't die.
|
45 |
-
my $_INSTALL_ONLY = 2; # Don't call _set_methods()
|
46 |
-
my $_ALLOW_UNSUPPORTED = 0;
|
47 |
-
my $_UNIV_CONV_BLESSED = 0;
|
48 |
-
my $_USSING_bpPP = 0;
|
49 |
-
|
50 |
-
|
51 |
-
# Check the environment variable to decide worker module.
|
52 |
-
|
53 |
-
unless ($JSON::Backend) {
|
54 |
-
$JSON::DEBUG and Carp::carp("Check used worker module...");
|
55 |
-
|
56 |
-
my $backend = exists $ENV{PERL_JSON_BACKEND} ? $ENV{PERL_JSON_BACKEND} : 1;
|
57 |
-
|
58 |
-
if ($backend eq '1' or $backend =~ /JSON::XS\s*,\s*JSON::PP/) {
|
59 |
-
_load_xs($_INSTALL_DONT_DIE) or _load_pp();
|
60 |
-
}
|
61 |
-
elsif ($backend eq '0' or $backend eq 'JSON::PP') {
|
62 |
-
_load_pp();
|
63 |
-
}
|
64 |
-
elsif ($backend eq '2' or $backend eq 'JSON::XS') {
|
65 |
-
_load_xs();
|
66 |
-
}
|
67 |
-
elsif ($backend eq 'JSON::backportPP') {
|
68 |
-
$_USSING_bpPP = 1;
|
69 |
-
_load_pp();
|
70 |
-
}
|
71 |
-
else {
|
72 |
-
Carp::croak "The value of environmental variable 'PERL_JSON_BACKEND' is invalid.";
|
73 |
-
}
|
74 |
-
}
|
75 |
-
|
76 |
-
|
77 |
-
sub import {
|
78 |
-
my $pkg = shift;
|
79 |
-
my @what_to_export;
|
80 |
-
my $no_export;
|
81 |
-
|
82 |
-
for my $tag (@_) {
|
83 |
-
if ($tag eq '-support_by_pp') {
|
84 |
-
if (!$_ALLOW_UNSUPPORTED++) {
|
85 |
-
JSON::Backend::XS
|
86 |
-
->support_by_pp(@PPOnlyMethods) if ($JSON::Backend eq $Module_XS);
|
87 |
-
}
|
88 |
-
next;
|
89 |
-
}
|
90 |
-
elsif ($tag eq '-no_export') {
|
91 |
-
$no_export++, next;
|
92 |
-
}
|
93 |
-
elsif ( $tag eq '-convert_blessed_universally' ) {
|
94 |
-
eval q|
|
95 |
-
require B;
|
96 |
-
*UNIVERSAL::TO_JSON = sub {
|
97 |
-
my $b_obj = B::svref_2object( $_[0] );
|
98 |
-
return $b_obj->isa('B::HV') ? { %{ $_[0] } }
|
99 |
-
: $b_obj->isa('B::AV') ? [ @{ $_[0] } ]
|
100 |
-
: undef
|
101 |
-
;
|
102 |
-
}
|
103 |
-
| if ( !$_UNIV_CONV_BLESSED++ );
|
104 |
-
next;
|
105 |
-
}
|
106 |
-
push @what_to_export, $tag;
|
107 |
-
}
|
108 |
-
|
109 |
-
return if ($no_export);
|
110 |
-
|
111 |
-
__PACKAGE__->export_to_level(1, $pkg, @what_to_export);
|
112 |
-
}
|
113 |
-
|
114 |
-
|
115 |
-
# OBSOLETED
|
116 |
-
|
117 |
-
sub jsonToObj {
|
118 |
-
my $alternative = 'from_json';
|
119 |
-
if (defined $_[0] and UNIVERSAL::isa($_[0], 'JSON')) {
|
120 |
-
shift @_; $alternative = 'decode';
|
121 |
-
}
|
122 |
-
Carp::carp "'jsonToObj' will be obsoleted. Please use '$alternative' instead.";
|
123 |
-
return JSON::from_json(@_);
|
124 |
-
};
|
125 |
-
|
126 |
-
sub objToJson {
|
127 |
-
my $alternative = 'to_json';
|
128 |
-
if (defined $_[0] and UNIVERSAL::isa($_[0], 'JSON')) {
|
129 |
-
shift @_; $alternative = 'encode';
|
130 |
-
}
|
131 |
-
Carp::carp "'objToJson' will be obsoleted. Please use '$alternative' instead.";
|
132 |
-
JSON::to_json(@_);
|
133 |
-
};
|
134 |
-
|
135 |
-
|
136 |
-
# INTERFACES
|
137 |
-
|
138 |
-
sub to_json ($@) {
|
139 |
-
if (
|
140 |
-
ref($_[0]) eq 'JSON'
|
141 |
-
or (@_ > 2 and $_[0] eq 'JSON')
|
142 |
-
) {
|
143 |
-
Carp::croak "to_json should not be called as a method.";
|
144 |
-
}
|
145 |
-
my $json = JSON->new;
|
146 |
-
|
147 |
-
if (@_ == 2 and ref $_[1] eq 'HASH') {
|
148 |
-
my $opt = $_[1];
|
149 |
-
for my $method (keys %$opt) {
|
150 |
-
$json->$method( $opt->{$method} );
|
151 |
-
}
|
152 |
-
}
|
153 |
-
|
154 |
-
$json->encode($_[0]);
|
155 |
-
}
|
156 |
-
|
157 |
-
|
158 |
-
sub from_json ($@) {
|
159 |
-
if ( ref($_[0]) eq 'JSON' or $_[0] eq 'JSON' ) {
|
160 |
-
Carp::croak "from_json should not be called as a method.";
|
161 |
-
}
|
162 |
-
my $json = JSON->new;
|
163 |
-
|
164 |
-
if (@_ == 2 and ref $_[1] eq 'HASH') {
|
165 |
-
my $opt = $_[1];
|
166 |
-
for my $method (keys %$opt) {
|
167 |
-
$json->$method( $opt->{$method} );
|
168 |
-
}
|
169 |
-
}
|
170 |
-
|
171 |
-
return $json->decode( $_[0] );
|
172 |
-
}
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
sub true { $JSON::true }
|
177 |
-
|
178 |
-
sub false { $JSON::false }
|
179 |
-
|
180 |
-
sub null { undef; }
|
181 |
-
|
182 |
-
|
183 |
-
sub require_xs_version { $XS_Version; }
|
184 |
-
|
185 |
-
sub backend {
|
186 |
-
my $proto = shift;
|
187 |
-
$JSON::Backend;
|
188 |
-
}
|
189 |
-
|
190 |
-
#*module = *backend;
|
191 |
-
|
192 |
-
|
193 |
-
sub is_xs {
|
194 |
-
return $_[0]->backend eq $Module_XS;
|
195 |
-
}
|
196 |
-
|
197 |
-
|
198 |
-
sub is_pp {
|
199 |
-
return not $_[0]->is_xs;
|
200 |
-
}
|
201 |
-
|
202 |
-
|
203 |
-
sub pureperl_only_methods { @PPOnlyMethods; }
|
204 |
-
|
205 |
-
|
206 |
-
sub property {
|
207 |
-
my ($self, $name, $value) = @_;
|
208 |
-
|
209 |
-
if (@_ == 1) {
|
210 |
-
my %props;
|
211 |
-
for $name (@Properties) {
|
212 |
-
my $method = 'get_' . $name;
|
213 |
-
if ($name eq 'max_size') {
|
214 |
-
my $value = $self->$method();
|
215 |
-
$props{$name} = $value == 1 ? 0 : $value;
|
216 |
-
next;
|
217 |
-
}
|
218 |
-
$props{$name} = $self->$method();
|
219 |
-
}
|
220 |
-
return \%props;
|
221 |
-
}
|
222 |
-
elsif (@_ > 3) {
|
223 |
-
Carp::croak('property() can take only the option within 2 arguments.');
|
224 |
-
}
|
225 |
-
elsif (@_ == 2) {
|
226 |
-
if ( my $method = $self->can('get_' . $name) ) {
|
227 |
-
if ($name eq 'max_size') {
|
228 |
-
my $value = $self->$method();
|
229 |
-
return $value == 1 ? 0 : $value;
|
230 |
-
}
|
231 |
-
$self->$method();
|
232 |
-
}
|
233 |
-
}
|
234 |
-
else {
|
235 |
-
$self->$name($value);
|
236 |
-
}
|
237 |
-
|
238 |
-
}
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
# INTERNAL
|
243 |
-
|
244 |
-
sub _load_xs {
|
245 |
-
my $opt = shift;
|
246 |
-
|
247 |
-
$JSON::DEBUG and Carp::carp "Load $Module_XS.";
|
248 |
-
|
249 |
-
# if called after install module, overload is disable.... why?
|
250 |
-
JSON::Boolean::_overrride_overload($Module_XS);
|
251 |
-
JSON::Boolean::_overrride_overload($Module_PP);
|
252 |
-
|
253 |
-
eval qq|
|
254 |
-
use $Module_XS $XS_Version ();
|
255 |
-
|;
|
256 |
-
|
257 |
-
if ($@) {
|
258 |
-
if (defined $opt and $opt & $_INSTALL_DONT_DIE) {
|
259 |
-
$JSON::DEBUG and Carp::carp "Can't load $Module_XS...($@)";
|
260 |
-
return 0;
|
261 |
-
}
|
262 |
-
Carp::croak $@;
|
263 |
-
}
|
264 |
-
|
265 |
-
unless (defined $opt and $opt & $_INSTALL_ONLY) {
|
266 |
-
_set_module( $JSON::Backend = $Module_XS );
|
267 |
-
my $data = join("", <DATA>); # this code is from Jcode 2.xx.
|
268 |
-
close(DATA);
|
269 |
-
eval $data;
|
270 |
-
JSON::Backend::XS->init;
|
271 |
-
}
|
272 |
-
|
273 |
-
return 1;
|
274 |
-
};
|
275 |
-
|
276 |
-
|
277 |
-
sub _load_pp {
|
278 |
-
my $opt = shift;
|
279 |
-
my $backend = $_USSING_bpPP ? $Module_bp : $Module_PP;
|
280 |
-
|
281 |
-
$JSON::DEBUG and Carp::carp "Load $backend.";
|
282 |
-
|
283 |
-
# if called after install module, overload is disable.... why?
|
284 |
-
JSON::Boolean::_overrride_overload($Module_XS);
|
285 |
-
JSON::Boolean::_overrride_overload($backend);
|
286 |
-
|
287 |
-
if ( $_USSING_bpPP ) {
|
288 |
-
eval qq| require $backend |;
|
289 |
-
}
|
290 |
-
else {
|
291 |
-
eval qq| use $backend $PP_Version () |;
|
292 |
-
}
|
293 |
-
|
294 |
-
if ($@) {
|
295 |
-
if ( $backend eq $Module_PP ) {
|
296 |
-
$JSON::DEBUG and Carp::carp "Can't load $Module_PP ($@), so try to load $Module_bp";
|
297 |
-
$_USSING_bpPP++;
|
298 |
-
$backend = $Module_bp;
|
299 |
-
JSON::Boolean::_overrride_overload($backend);
|
300 |
-
local $^W; # if PP installed but invalid version, backportPP redefines methods.
|
301 |
-
eval qq| require $Module_bp |;
|
302 |
-
}
|
303 |
-
Carp::croak $@ if $@;
|
304 |
-
}
|
305 |
-
|
306 |
-
unless (defined $opt and $opt & $_INSTALL_ONLY) {
|
307 |
-
_set_module( $JSON::Backend = $Module_PP ); # even if backportPP, set $Backend with 'JSON::PP'
|
308 |
-
JSON::Backend::PP->init;
|
309 |
-
}
|
310 |
-
};
|
311 |
-
|
312 |
-
|
313 |
-
sub _set_module {
|
314 |
-
return if defined $JSON::true;
|
315 |
-
|
316 |
-
my $module = shift;
|
317 |
-
|
318 |
-
local $^W;
|
319 |
-
no strict qw(refs);
|
320 |
-
|
321 |
-
$JSON::true = ${"$module\::true"};
|
322 |
-
$JSON::false = ${"$module\::false"};
|
323 |
-
|
324 |
-
push @JSON::ISA, $module;
|
325 |
-
if ( JSON->is_xs and JSON->backend->VERSION < 3 ) {
|
326 |
-
eval 'package JSON::PP::Boolean';
|
327 |
-
push @{"$module\::Boolean::ISA"}, qw(JSON::PP::Boolean);
|
328 |
-
}
|
329 |
-
|
330 |
-
*{"JSON::is_bool"} = \&{"$module\::is_bool"};
|
331 |
-
|
332 |
-
for my $method ($module eq $Module_XS ? @PPOnlyMethods : @XSOnlyMethods) {
|
333 |
-
*{"JSON::$method"} = sub {
|
334 |
-
Carp::carp("$method is not supported in $module.");
|
335 |
-
$_[0];
|
336 |
-
};
|
337 |
-
}
|
338 |
-
|
339 |
-
return 1;
|
340 |
-
}
|
341 |
-
|
342 |
-
|
343 |
-
|
344 |
-
#
|
345 |
-
# JSON Boolean
|
346 |
-
#
|
347 |
-
|
348 |
-
package JSON::Boolean;
|
349 |
-
|
350 |
-
my %Installed;
|
351 |
-
|
352 |
-
sub _overrride_overload {
|
353 |
-
return; # this function is currently disable.
|
354 |
-
return if ($Installed{ $_[0] }++);
|
355 |
-
|
356 |
-
my $boolean = $_[0] . '::Boolean';
|
357 |
-
|
358 |
-
eval sprintf(q|
|
359 |
-
package %s;
|
360 |
-
use overload (
|
361 |
-
'""' => sub { ${$_[0]} == 1 ? 'true' : 'false' },
|
362 |
-
'eq' => sub {
|
363 |
-
my ($obj, $op) = ref ($_[0]) ? ($_[0], $_[1]) : ($_[1], $_[0]);
|
364 |
-
if ($op eq 'true' or $op eq 'false') {
|
365 |
-
return "$obj" eq 'true' ? 'true' eq $op : 'false' eq $op;
|
366 |
-
}
|
367 |
-
else {
|
368 |
-
return $obj ? 1 == $op : 0 == $op;
|
369 |
-
}
|
370 |
-
},
|
371 |
-
);
|
372 |
-
|, $boolean);
|
373 |
-
|
374 |
-
if ($@) { Carp::croak $@; }
|
375 |
-
|
376 |
-
if ( exists $INC{'JSON/XS.pm'} and $boolean eq 'JSON::XS::Boolean' ) {
|
377 |
-
local $^W;
|
378 |
-
my $true = do { bless \(my $dummy = 1), $boolean };
|
379 |
-
my $false = do { bless \(my $dummy = 0), $boolean };
|
380 |
-
*JSON::XS::true = sub () { $true };
|
381 |
-
*JSON::XS::false = sub () { $false };
|
382 |
-
}
|
383 |
-
elsif ( exists $INC{'JSON/PP.pm'} and $boolean eq 'JSON::PP::Boolean' ) {
|
384 |
-
local $^W;
|
385 |
-
my $true = do { bless \(my $dummy = 1), $boolean };
|
386 |
-
my $false = do { bless \(my $dummy = 0), $boolean };
|
387 |
-
*JSON::PP::true = sub { $true };
|
388 |
-
*JSON::PP::false = sub { $false };
|
389 |
-
}
|
390 |
-
|
391 |
-
return 1;
|
392 |
-
}
|
393 |
-
|
394 |
-
|
395 |
-
#
|
396 |
-
# Helper classes for Backend Module (PP)
|
397 |
-
#
|
398 |
-
|
399 |
-
package JSON::Backend::PP;
|
400 |
-
|
401 |
-
sub init {
|
402 |
-
local $^W;
|
403 |
-
no strict qw(refs); # this routine may be called after JSON::Backend::XS init was called.
|
404 |
-
*{"JSON::decode_json"} = \&{"JSON::PP::decode_json"};
|
405 |
-
*{"JSON::encode_json"} = \&{"JSON::PP::encode_json"};
|
406 |
-
*{"JSON::PP::is_xs"} = sub { 0 };
|
407 |
-
*{"JSON::PP::is_pp"} = sub { 1 };
|
408 |
-
return 1;
|
409 |
-
}
|
410 |
-
|
411 |
-
#
|
412 |
-
# To save memory, the below lines are read only when XS backend is used.
|
413 |
-
#
|
414 |
-
|
415 |
-
package JSON;
|
416 |
-
|
417 |
-
1;
|
418 |
-
__DATA__
|
419 |
-
|
420 |
-
|
421 |
-
#
|
422 |
-
# Helper classes for Backend Module (XS)
|
423 |
-
#
|
424 |
-
|
425 |
-
package JSON::Backend::XS;
|
426 |
-
|
427 |
-
use constant INDENT_LENGTH_FLAG => 15 << 12;
|
428 |
-
|
429 |
-
use constant UNSUPPORTED_ENCODE_FLAG => {
|
430 |
-
ESCAPE_SLASH => 0x00000010,
|
431 |
-
ALLOW_BIGNUM => 0x00000020,
|
432 |
-
AS_NONBLESSED => 0x00000040,
|
433 |
-
EXPANDED => 0x10000000, # for developer's
|
434 |
-
};
|
435 |
-
|
436 |
-
use constant UNSUPPORTED_DECODE_FLAG => {
|
437 |
-
LOOSE => 0x00000001,
|
438 |
-
ALLOW_BIGNUM => 0x00000002,
|
439 |
-
ALLOW_BAREKEY => 0x00000004,
|
440 |
-
ALLOW_SINGLEQUOTE => 0x00000008,
|
441 |
-
EXPANDED => 0x20000000, # for developer's
|
442 |
-
};
|
443 |
-
|
444 |
-
|
445 |
-
sub init {
|
446 |
-
local $^W;
|
447 |
-
no strict qw(refs);
|
448 |
-
*{"JSON::decode_json"} = \&{"JSON::XS::decode_json"};
|
449 |
-
*{"JSON::encode_json"} = \&{"JSON::XS::encode_json"};
|
450 |
-
*{"JSON::XS::is_xs"} = sub { 1 };
|
451 |
-
*{"JSON::XS::is_pp"} = sub { 0 };
|
452 |
-
return 1;
|
453 |
-
}
|
454 |
-
|
455 |
-
|
456 |
-
sub support_by_pp {
|
457 |
-
my ($class, @methods) = @_;
|
458 |
-
|
459 |
-
local $^W;
|
460 |
-
no strict qw(refs);
|
461 |
-
|
462 |
-
my $JSON_XS_encode_orignal = \&JSON::XS::encode;
|
463 |
-
my $JSON_XS_decode_orignal = \&JSON::XS::decode;
|
464 |
-
my $JSON_XS_incr_parse_orignal = \&JSON::XS::incr_parse;
|
465 |
-
|
466 |
-
*JSON::XS::decode = \&JSON::Backend::XS::Supportable::_decode;
|
467 |
-
*JSON::XS::encode = \&JSON::Backend::XS::Supportable::_encode;
|
468 |
-
*JSON::XS::incr_parse = \&JSON::Backend::XS::Supportable::_incr_parse;
|
469 |
-
|
470 |
-
*{JSON::XS::_original_decode} = $JSON_XS_decode_orignal;
|
471 |
-
*{JSON::XS::_original_encode} = $JSON_XS_encode_orignal;
|
472 |
-
*{JSON::XS::_original_incr_parse} = $JSON_XS_incr_parse_orignal;
|
473 |
-
|
474 |
-
push @JSON::Backend::XS::Supportable::ISA, 'JSON';
|
475 |
-
|
476 |
-
my $pkg = 'JSON::Backend::XS::Supportable';
|
477 |
-
|
478 |
-
*{JSON::new} = sub {
|
479 |
-
my $proto = JSON::XS->new; $$proto = 0;
|
480 |
-
bless $proto, $pkg;
|
481 |
-
};
|
482 |
-
|
483 |
-
|
484 |
-
for my $method (@methods) {
|
485 |
-
my $flag = uc($method);
|
486 |
-
my $type |= (UNSUPPORTED_ENCODE_FLAG->{$flag} || 0);
|
487 |
-
$type |= (UNSUPPORTED_DECODE_FLAG->{$flag} || 0);
|
488 |
-
|
489 |
-
next unless($type);
|
490 |
-
|
491 |
-
$pkg->_make_unsupported_method($method => $type);
|
492 |
-
}
|
493 |
-
|
494 |
-
# push @{"JSON::XS::Boolean::ISA"}, qw(JSON::PP::Boolean);
|
495 |
-
# push @{"JSON::PP::Boolean::ISA"}, qw(JSON::Boolean);
|
496 |
-
|
497 |
-
$JSON::DEBUG and Carp::carp("set -support_by_pp mode.");
|
498 |
-
|
499 |
-
return 1;
|
500 |
-
}
|
501 |
-
|
502 |
-
|
503 |
-
|
504 |
-
|
505 |
-
#
|
506 |
-
# Helper classes for XS
|
507 |
-
#
|
508 |
-
|
509 |
-
package JSON::Backend::XS::Supportable;
|
510 |
-
|
511 |
-
$Carp::Internal{'JSON::Backend::XS::Supportable'} = 1;
|
512 |
-
|
513 |
-
sub _make_unsupported_method {
|
514 |
-
my ($pkg, $method, $type) = @_;
|
515 |
-
|
516 |
-
local $^W;
|
517 |
-
no strict qw(refs);
|
518 |
-
|
519 |
-
*{"$pkg\::$method"} = sub {
|
520 |
-
local $^W;
|
521 |
-
if (defined $_[1] ? $_[1] : 1) {
|
522 |
-
${$_[0]} |= $type;
|
523 |
-
}
|
524 |
-
else {
|
525 |
-
${$_[0]} &= ~$type;
|
526 |
-
}
|
527 |
-
$_[0];
|
528 |
-
};
|
529 |
-
|
530 |
-
*{"$pkg\::get_$method"} = sub {
|
531 |
-
${$_[0]} & $type ? 1 : '';
|
532 |
-
};
|
533 |
-
|
534 |
-
}
|
535 |
-
|
536 |
-
|
537 |
-
sub _set_for_pp {
|
538 |
-
JSON::_load_pp( $_INSTALL_ONLY );
|
539 |
-
|
540 |
-
my $type = shift;
|
541 |
-
my $pp = JSON::PP->new;
|
542 |
-
my $prop = $_[0]->property;
|
543 |
-
|
544 |
-
for my $name (keys %$prop) {
|
545 |
-
$pp->$name( $prop->{$name} ? $prop->{$name} : 0 );
|
546 |
-
}
|
547 |
-
|
548 |
-
my $unsupported = $type eq 'encode' ? JSON::Backend::XS::UNSUPPORTED_ENCODE_FLAG
|
549 |
-
: JSON::Backend::XS::UNSUPPORTED_DECODE_FLAG;
|
550 |
-
my $flags = ${$_[0]} || 0;
|
551 |
-
|
552 |
-
for my $name (keys %$unsupported) {
|
553 |
-
next if ($name eq 'EXPANDED'); # for developer's
|
554 |
-
my $enable = ($flags & $unsupported->{$name}) ? 1 : 0;
|
555 |
-
my $method = lc $name;
|
556 |
-
$pp->$method($enable);
|
557 |
-
}
|
558 |
-
|
559 |
-
$pp->indent_length( $_[0]->get_indent_length );
|
560 |
-
|
561 |
-
return $pp;
|
562 |
-
}
|
563 |
-
|
564 |
-
sub _encode { # using with PP encode
|
565 |
-
if (${$_[0]}) {
|
566 |
-
_set_for_pp('encode' => @_)->encode($_[1]);
|
567 |
-
}
|
568 |
-
else {
|
569 |
-
$_[0]->_original_encode( $_[1] );
|
570 |
-
}
|
571 |
-
}
|
572 |
-
|
573 |
-
|
574 |
-
sub _decode { # if unsupported-flag is set, use PP
|
575 |
-
if (${$_[0]}) {
|
576 |
-
_set_for_pp('decode' => @_)->decode($_[1]);
|
577 |
-
}
|
578 |
-
else {
|
579 |
-
$_[0]->_original_decode( $_[1] );
|
580 |
-
}
|
581 |
-
}
|
582 |
-
|
583 |
-
|
584 |
-
sub decode_prefix { # if unsupported-flag is set, use PP
|
585 |
-
_set_for_pp('decode' => @_)->decode_prefix($_[1]);
|
586 |
-
}
|
587 |
-
|
588 |
-
|
589 |
-
sub _incr_parse {
|
590 |
-
if (${$_[0]}) {
|
591 |
-
_set_for_pp('decode' => @_)->incr_parse($_[1]);
|
592 |
-
}
|
593 |
-
else {
|
594 |
-
$_[0]->_original_incr_parse( $_[1] );
|
595 |
-
}
|
596 |
-
}
|
597 |
-
|
598 |
-
|
599 |
-
sub get_indent_length {
|
600 |
-
${$_[0]} << 4 >> 16;
|
601 |
-
}
|
602 |
-
|
603 |
-
|
604 |
-
sub indent_length {
|
605 |
-
my $length = $_[1];
|
606 |
-
|
607 |
-
if (!defined $length or $length > 15 or $length < 0) {
|
608 |
-
Carp::carp "The acceptable range of indent_length() is 0 to 15.";
|
609 |
-
}
|
610 |
-
else {
|
611 |
-
local $^W;
|
612 |
-
$length <<= 12;
|
613 |
-
${$_[0]} &= ~ JSON::Backend::XS::INDENT_LENGTH_FLAG;
|
614 |
-
${$_[0]} |= $length;
|
615 |
-
*JSON::XS::encode = \&JSON::Backend::XS::Supportable::_encode;
|
616 |
-
}
|
617 |
-
|
618 |
-
$_[0];
|
619 |
-
}
|
620 |
-
|
621 |
-
|
622 |
-
1;
|
623 |
-
__END__
|
624 |
-
|
625 |
-
=head1 NAME
|
626 |
-
|
627 |
-
JSON - JSON (JavaScript Object Notation) encoder/decoder
|
628 |
-
|
629 |
-
=head1 SYNOPSIS
|
630 |
-
|
631 |
-
use JSON; # imports encode_json, decode_json, to_json and from_json.
|
632 |
-
|
633 |
-
# simple and fast interfaces (expect/generate UTF-8)
|
634 |
-
|
635 |
-
$utf8_encoded_json_text = encode_json $perl_hash_or_arrayref;
|
636 |
-
$perl_hash_or_arrayref = decode_json $utf8_encoded_json_text;
|
637 |
-
|
638 |
-
# OO-interface
|
639 |
-
|
640 |
-
$json = JSON->new->allow_nonref;
|
641 |
-
|
642 |
-
$json_text = $json->encode( $perl_scalar );
|
643 |
-
$perl_scalar = $json->decode( $json_text );
|
644 |
-
|
645 |
-
$pretty_printed = $json->pretty->encode( $perl_scalar ); # pretty-printing
|
646 |
-
|
647 |
-
# If you want to use PP only support features, call with '-support_by_pp'
|
648 |
-
# When XS unsupported feature is enable, using PP (de|en)code instead of XS ones.
|
649 |
-
|
650 |
-
use JSON -support_by_pp;
|
651 |
-
|
652 |
-
# option-acceptable interfaces (expect/generate UNICODE by default)
|
653 |
-
|
654 |
-
$json_text = to_json( $perl_scalar, { ascii => 1, pretty => 1 } );
|
655 |
-
$perl_scalar = from_json( $json_text, { utf8 => 1 } );
|
656 |
-
|
657 |
-
# Between (en|de)code_json and (to|from)_json, if you want to write
|
658 |
-
# a code which communicates to an outer world (encoded in UTF-8),
|
659 |
-
# recommend to use (en|de)code_json.
|
660 |
-
|
661 |
-
=head1 VERSION
|
662 |
-
|
663 |
-
2.90
|
664 |
-
|
665 |
-
This version is compatible with JSON::XS B<2.34> and later.
|
666 |
-
(Not yet compatble to JSON::XS B<3.0x>.)
|
667 |
-
|
668 |
-
|
669 |
-
=head1 NOTE
|
670 |
-
|
671 |
-
JSON::PP was earlier included in the C<JSON> distribution, but
|
672 |
-
has since Perl 5.14 been a core module. For this reason,
|
673 |
-
L<JSON::PP> was removed from the JSON distribution and can now
|
674 |
-
be found also in the Perl5 repository at
|
675 |
-
|
676 |
-
=over
|
677 |
-
|
678 |
-
=item * L<http://perl5.git.perl.org/perl.git>
|
679 |
-
|
680 |
-
=back
|
681 |
-
|
682 |
-
(The newest JSON::PP version still exists in CPAN.)
|
683 |
-
|
684 |
-
Instead, the C<JSON> distribution will include JSON::backportPP
|
685 |
-
for backwards computability. JSON.pm should thus work as it did
|
686 |
-
before.
|
687 |
-
|
688 |
-
=head1 DESCRIPTION
|
689 |
-
|
690 |
-
*************************** CAUTION **************************************
|
691 |
-
* *
|
692 |
-
* INCOMPATIBLE CHANGE (JSON::XS version 2.90) *
|
693 |
-
* *
|
694 |
-
* JSON.pm had patched JSON::XS::Boolean and JSON::PP::Boolean internally *
|
695 |
-
* on loading time for making these modules inherit JSON::Boolean. *
|
696 |
-
* But since JSON::XS v3.0 it use Types::Serialiser as boolean class. *
|
697 |
-
* Then now JSON.pm breaks boolean classe overload features and *
|
698 |
-
* -support_by_pp if JSON::XS v3.0 or later is installed. *
|
699 |
-
* *
|
700 |
-
* JSON::true and JSON::false returned JSON::Boolean objects. *
|
701 |
-
* For workaround, they return JSON::PP::Boolean objects in this version. *
|
702 |
-
* *
|
703 |
-
* isa_ok(JSON::true, 'JSON::PP::Boolean'); *
|
704 |
-
* *
|
705 |
-
* And it discards a feature: *
|
706 |
-
* *
|
707 |
-
* ok(JSON::true eq 'true'); *
|
708 |
-
* *
|
709 |
-
* In other word, JSON::PP::Boolean overload numeric only. *
|
710 |
-
* *
|
711 |
-
* ok( JSON::true == 1 ); *
|
712 |
-
* *
|
713 |
-
**************************************************************************
|
714 |
-
|
715 |
-
************************** CAUTION ********************************
|
716 |
-
* This is 'JSON module version 2' and there are many differences *
|
717 |
-
* to version 1.xx *
|
718 |
-
* Please check your applications using old version. *
|
719 |
-
* See to 'INCOMPATIBLE CHANGES TO OLD VERSION' *
|
720 |
-
*******************************************************************
|
721 |
-
|
722 |
-
JSON (JavaScript Object Notation) is a simple data format.
|
723 |
-
See to L<http://www.json.org/> and C<RFC4627>(L<http://www.ietf.org/rfc/rfc4627.txt>).
|
724 |
-
|
725 |
-
This module converts Perl data structures to JSON and vice versa using either
|
726 |
-
L<JSON::XS> or L<JSON::PP>.
|
727 |
-
|
728 |
-
JSON::XS is the fastest and most proper JSON module on CPAN which must be
|
729 |
-
compiled and installed in your environment.
|
730 |
-
JSON::PP is a pure-Perl module which is bundled in this distribution and
|
731 |
-
has a strong compatibility to JSON::XS.
|
732 |
-
|
733 |
-
This module try to use JSON::XS by default and fail to it, use JSON::PP instead.
|
734 |
-
So its features completely depend on JSON::XS or JSON::PP.
|
735 |
-
|
736 |
-
See to L<BACKEND MODULE DECISION>.
|
737 |
-
|
738 |
-
To distinguish the module name 'JSON' and the format type JSON,
|
739 |
-
the former is quoted by CE<lt>E<gt> (its results vary with your using media),
|
740 |
-
and the latter is left just as it is.
|
741 |
-
|
742 |
-
Module name : C<JSON>
|
743 |
-
|
744 |
-
Format type : JSON
|
745 |
-
|
746 |
-
=head2 FEATURES
|
747 |
-
|
748 |
-
=over
|
749 |
-
|
750 |
-
=item * correct unicode handling
|
751 |
-
|
752 |
-
This module (i.e. backend modules) knows how to handle Unicode, documents
|
753 |
-
how and when it does so, and even documents what "correct" means.
|
754 |
-
|
755 |
-
Even though there are limitations, this feature is available since Perl version 5.6.
|
756 |
-
|
757 |
-
JSON::XS requires Perl 5.8.2 (but works correctly in 5.8.8 or later), so in older versions
|
758 |
-
C<JSON> should call JSON::PP as the backend which can be used since Perl 5.005.
|
759 |
-
|
760 |
-
With Perl 5.8.x JSON::PP works, but from 5.8.0 to 5.8.2, because of a Perl side problem,
|
761 |
-
JSON::PP works slower in the versions. And in 5.005, the Unicode handling is not available.
|
762 |
-
See to L<JSON::PP/UNICODE HANDLING ON PERLS> for more information.
|
763 |
-
|
764 |
-
See also to L<JSON::XS/A FEW NOTES ON UNICODE AND PERL>
|
765 |
-
and L<JSON::XS/ENCODING/CODESET_FLAG_NOTES>.
|
766 |
-
|
767 |
-
|
768 |
-
=item * round-trip integrity
|
769 |
-
|
770 |
-
When you serialise a perl data structure using only data types supported
|
771 |
-
by JSON and Perl, the deserialised data structure is identical on the Perl
|
772 |
-
level. (e.g. the string "2.0" doesn't suddenly become "2" just because
|
773 |
-
it looks like a number). There I<are> minor exceptions to this, read the
|
774 |
-
L</MAPPING> section below to learn about those.
|
775 |
-
|
776 |
-
|
777 |
-
=item * strict checking of JSON correctness
|
778 |
-
|
779 |
-
There is no guessing, no generating of illegal JSON texts by default,
|
780 |
-
and only JSON is accepted as input by default (the latter is a security
|
781 |
-
feature).
|
782 |
-
|
783 |
-
See to L<JSON::XS/FEATURES> and L<JSON::PP/FEATURES>.
|
784 |
-
|
785 |
-
=item * fast
|
786 |
-
|
787 |
-
This module returns a JSON::XS object itself if available.
|
788 |
-
Compared to other JSON modules and other serialisers such as Storable,
|
789 |
-
JSON::XS usually compares favorably in terms of speed, too.
|
790 |
-
|
791 |
-
If not available, C<JSON> returns a JSON::PP object instead of JSON::XS and
|
792 |
-
it is very slow as pure-Perl.
|
793 |
-
|
794 |
-
=item * simple to use
|
795 |
-
|
796 |
-
This module has both a simple functional interface as well as an
|
797 |
-
object oriented interface interface.
|
798 |
-
|
799 |
-
=item * reasonably versatile output formats
|
800 |
-
|
801 |
-
You can choose between the most compact guaranteed-single-line format possible
|
802 |
-
(nice for simple line-based protocols), a pure-ASCII format (for when your transport
|
803 |
-
is not 8-bit clean, still supports the whole Unicode range), or a pretty-printed
|
804 |
-
format (for when you want to read that stuff). Or you can combine those features
|
805 |
-
in whatever way you like.
|
806 |
-
|
807 |
-
=back
|
808 |
-
|
809 |
-
=head1 FUNCTIONAL INTERFACE
|
810 |
-
|
811 |
-
Some documents are copied and modified from L<JSON::XS/FUNCTIONAL INTERFACE>.
|
812 |
-
C<to_json> and C<from_json> are additional functions.
|
813 |
-
|
814 |
-
=head2 encode_json
|
815 |
-
|
816 |
-
$json_text = encode_json $perl_scalar
|
817 |
-
|
818 |
-
Converts the given Perl data structure to a UTF-8 encoded, binary string.
|
819 |
-
|
820 |
-
This function call is functionally identical to:
|
821 |
-
|
822 |
-
$json_text = JSON->new->utf8->encode($perl_scalar)
|
823 |
-
|
824 |
-
=head2 decode_json
|
825 |
-
|
826 |
-
$perl_scalar = decode_json $json_text
|
827 |
-
|
828 |
-
The opposite of C<encode_json>: expects an UTF-8 (binary) string and tries
|
829 |
-
to parse that as an UTF-8 encoded JSON text, returning the resulting
|
830 |
-
reference.
|
831 |
-
|
832 |
-
This function call is functionally identical to:
|
833 |
-
|
834 |
-
$perl_scalar = JSON->new->utf8->decode($json_text)
|
835 |
-
|
836 |
-
|
837 |
-
=head2 to_json
|
838 |
-
|
839 |
-
$json_text = to_json($perl_scalar)
|
840 |
-
|
841 |
-
Converts the given Perl data structure to a json string.
|
842 |
-
|
843 |
-
This function call is functionally identical to:
|
844 |
-
|
845 |
-
$json_text = JSON->new->encode($perl_scalar)
|
846 |
-
|
847 |
-
Takes a hash reference as the second.
|
848 |
-
|
849 |
-
$json_text = to_json($perl_scalar, $flag_hashref)
|
850 |
-
|
851 |
-
So,
|
852 |
-
|
853 |
-
$json_text = to_json($perl_scalar, {utf8 => 1, pretty => 1})
|
854 |
-
|
855 |
-
equivalent to:
|
856 |
-
|
857 |
-
$json_text = JSON->new->utf8(1)->pretty(1)->encode($perl_scalar)
|
858 |
-
|
859 |
-
If you want to write a modern perl code which communicates to outer world,
|
860 |
-
you should use C<encode_json> (supposed that JSON data are encoded in UTF-8).
|
861 |
-
|
862 |
-
=head2 from_json
|
863 |
-
|
864 |
-
$perl_scalar = from_json($json_text)
|
865 |
-
|
866 |
-
The opposite of C<to_json>: expects a json string and tries
|
867 |
-
to parse it, returning the resulting reference.
|
868 |
-
|
869 |
-
This function call is functionally identical to:
|
870 |
-
|
871 |
-
$perl_scalar = JSON->decode($json_text)
|
872 |
-
|
873 |
-
Takes a hash reference as the second.
|
874 |
-
|
875 |
-
$perl_scalar = from_json($json_text, $flag_hashref)
|
876 |
-
|
877 |
-
So,
|
878 |
-
|
879 |
-
$perl_scalar = from_json($json_text, {utf8 => 1})
|
880 |
-
|
881 |
-
equivalent to:
|
882 |
-
|
883 |
-
$perl_scalar = JSON->new->utf8(1)->decode($json_text)
|
884 |
-
|
885 |
-
If you want to write a modern perl code which communicates to outer world,
|
886 |
-
you should use C<decode_json> (supposed that JSON data are encoded in UTF-8).
|
887 |
-
|
888 |
-
=head2 JSON::is_bool
|
889 |
-
|
890 |
-
$is_boolean = JSON::is_bool($scalar)
|
891 |
-
|
892 |
-
Returns true if the passed scalar represents either JSON::true or
|
893 |
-
JSON::false, two constants that act like C<1> and C<0> respectively
|
894 |
-
and are also used to represent JSON C<true> and C<false> in Perl strings.
|
895 |
-
|
896 |
-
=head2 JSON::true
|
897 |
-
|
898 |
-
Returns JSON true value which is blessed object.
|
899 |
-
It C<isa> JSON::Boolean object.
|
900 |
-
|
901 |
-
=head2 JSON::false
|
902 |
-
|
903 |
-
Returns JSON false value which is blessed object.
|
904 |
-
It C<isa> JSON::Boolean object.
|
905 |
-
|
906 |
-
=head2 JSON::null
|
907 |
-
|
908 |
-
Returns C<undef>.
|
909 |
-
|
910 |
-
See L<MAPPING>, below, for more information on how JSON values are mapped to
|
911 |
-
Perl.
|
912 |
-
|
913 |
-
=head1 HOW DO I DECODE A DATA FROM OUTER AND ENCODE TO OUTER
|
914 |
-
|
915 |
-
This section supposes that your perl version is 5.8 or later.
|
916 |
-
|
917 |
-
If you know a JSON text from an outer world - a network, a file content, and so on,
|
918 |
-
is encoded in UTF-8, you should use C<decode_json> or C<JSON> module object
|
919 |
-
with C<utf8> enable. And the decoded result will contain UNICODE characters.
|
920 |
-
|
921 |
-
# from network
|
922 |
-
my $json = JSON->new->utf8;
|
923 |
-
my $json_text = CGI->new->param( 'json_data' );
|
924 |
-
my $perl_scalar = $json->decode( $json_text );
|
925 |
-
|
926 |
-
# from file content
|
927 |
-
local $/;
|
928 |
-
open( my $fh, '<', 'json.data' );
|
929 |
-
$json_text = <$fh>;
|
930 |
-
$perl_scalar = decode_json( $json_text );
|
931 |
-
|
932 |
-
If an outer data is not encoded in UTF-8, firstly you should C<decode> it.
|
933 |
-
|
934 |
-
use Encode;
|
935 |
-
local $/;
|
936 |
-
open( my $fh, '<', 'json.data' );
|
937 |
-
my $encoding = 'cp932';
|
938 |
-
my $unicode_json_text = decode( $encoding, <$fh> ); # UNICODE
|
939 |
-
|
940 |
-
# or you can write the below code.
|
941 |
-
#
|
942 |
-
# open( my $fh, "<:encoding($encoding)", 'json.data' );
|
943 |
-
# $unicode_json_text = <$fh>;
|
944 |
-
|
945 |
-
In this case, C<$unicode_json_text> is of course UNICODE string.
|
946 |
-
So you B<cannot> use C<decode_json> nor C<JSON> module object with C<utf8> enable.
|
947 |
-
Instead of them, you use C<JSON> module object with C<utf8> disable or C<from_json>.
|
948 |
-
|
949 |
-
$perl_scalar = $json->utf8(0)->decode( $unicode_json_text );
|
950 |
-
# or
|
951 |
-
$perl_scalar = from_json( $unicode_json_text );
|
952 |
-
|
953 |
-
Or C<encode 'utf8'> and C<decode_json>:
|
954 |
-
|
955 |
-
$perl_scalar = decode_json( encode( 'utf8', $unicode_json_text ) );
|
956 |
-
# this way is not efficient.
|
957 |
-
|
958 |
-
And now, you want to convert your C<$perl_scalar> into JSON data and
|
959 |
-
send it to an outer world - a network or a file content, and so on.
|
960 |
-
|
961 |
-
Your data usually contains UNICODE strings and you want the converted data to be encoded
|
962 |
-
in UTF-8, you should use C<encode_json> or C<JSON> module object with C<utf8> enable.
|
963 |
-
|
964 |
-
print encode_json( $perl_scalar ); # to a network? file? or display?
|
965 |
-
# or
|
966 |
-
print $json->utf8->encode( $perl_scalar );
|
967 |
-
|
968 |
-
If C<$perl_scalar> does not contain UNICODE but C<$encoding>-encoded strings
|
969 |
-
for some reason, then its characters are regarded as B<latin1> for perl
|
970 |
-
(because it does not concern with your $encoding).
|
971 |
-
You B<cannot> use C<encode_json> nor C<JSON> module object with C<utf8> enable.
|
972 |
-
Instead of them, you use C<JSON> module object with C<utf8> disable or C<to_json>.
|
973 |
-
Note that the resulted text is a UNICODE string but no problem to print it.
|
974 |
-
|
975 |
-
# $perl_scalar contains $encoding encoded string values
|
976 |
-
$unicode_json_text = $json->utf8(0)->encode( $perl_scalar );
|
977 |
-
# or
|
978 |
-
$unicode_json_text = to_json( $perl_scalar );
|
979 |
-
# $unicode_json_text consists of characters less than 0x100
|
980 |
-
print $unicode_json_text;
|
981 |
-
|
982 |
-
Or C<decode $encoding> all string values and C<encode_json>:
|
983 |
-
|
984 |
-
$perl_scalar->{ foo } = decode( $encoding, $perl_scalar->{ foo } );
|
985 |
-
# ... do it to each string values, then encode_json
|
986 |
-
$json_text = encode_json( $perl_scalar );
|
987 |
-
|
988 |
-
This method is a proper way but probably not efficient.
|
989 |
-
|
990 |
-
See to L<Encode>, L<perluniintro>.
|
991 |
-
|
992 |
-
|
993 |
-
=head1 COMMON OBJECT-ORIENTED INTERFACE
|
994 |
-
|
995 |
-
=head2 new
|
996 |
-
|
997 |
-
$json = JSON->new
|
998 |
-
|
999 |
-
Returns a new C<JSON> object inherited from either JSON::XS or JSON::PP
|
1000 |
-
that can be used to de/encode JSON strings.
|
1001 |
-
|
1002 |
-
All boolean flags described below are by default I<disabled>.
|
1003 |
-
|
1004 |
-
The mutators for flags all return the JSON object again and thus calls can
|
1005 |
-
be chained:
|
1006 |
-
|
1007 |
-
my $json = JSON->new->utf8->space_after->encode({a => [1,2]})
|
1008 |
-
=> {"a": [1, 2]}
|
1009 |
-
|
1010 |
-
=head2 ascii
|
1011 |
-
|
1012 |
-
$json = $json->ascii([$enable])
|
1013 |
-
|
1014 |
-
$enabled = $json->get_ascii
|
1015 |
-
|
1016 |
-
If $enable is true (or missing), then the encode method will not generate characters outside
|
1017 |
-
the code range 0..127. Any Unicode characters outside that range will be escaped using either
|
1018 |
-
a single \uXXXX or a double \uHHHH\uLLLLL escape sequence, as per RFC4627.
|
1019 |
-
|
1020 |
-
If $enable is false, then the encode method will not escape Unicode characters unless
|
1021 |
-
required by the JSON syntax or other flags. This results in a faster and more compact format.
|
1022 |
-
|
1023 |
-
This feature depends on the used Perl version and environment.
|
1024 |
-
|
1025 |
-
See to L<JSON::PP/UNICODE HANDLING ON PERLS> if the backend is PP.
|
1026 |
-
|
1027 |
-
JSON->new->ascii(1)->encode([chr 0x10401])
|
1028 |
-
=> ["\ud801\udc01"]
|
1029 |
-
|
1030 |
-
=head2 latin1
|
1031 |
-
|
1032 |
-
$json = $json->latin1([$enable])
|
1033 |
-
|
1034 |
-
$enabled = $json->get_latin1
|
1035 |
-
|
1036 |
-
If $enable is true (or missing), then the encode method will encode the resulting JSON
|
1037 |
-
text as latin1 (or iso-8859-1), escaping any characters outside the code range 0..255.
|
1038 |
-
|
1039 |
-
If $enable is false, then the encode method will not escape Unicode characters
|
1040 |
-
unless required by the JSON syntax or other flags.
|
1041 |
-
|
1042 |
-
JSON->new->latin1->encode (["\x{89}\x{abc}"]
|
1043 |
-
=> ["\x{89}\\u0abc"] # (perl syntax, U+abc escaped, U+89 not)
|
1044 |
-
|
1045 |
-
=head2 utf8
|
1046 |
-
|
1047 |
-
$json = $json->utf8([$enable])
|
1048 |
-
|
1049 |
-
$enabled = $json->get_utf8
|
1050 |
-
|
1051 |
-
If $enable is true (or missing), then the encode method will encode the JSON result
|
1052 |
-
into UTF-8, as required by many protocols, while the decode method expects to be handled
|
1053 |
-
an UTF-8-encoded string. Please note that UTF-8-encoded strings do not contain any
|
1054 |
-
characters outside the range 0..255, they are thus useful for bytewise/binary I/O.
|
1055 |
-
|
1056 |
-
In future versions, enabling this option might enable autodetection of the UTF-16 and UTF-32
|
1057 |
-
encoding families, as described in RFC4627.
|
1058 |
-
|
1059 |
-
If $enable is false, then the encode method will return the JSON string as a (non-encoded)
|
1060 |
-
Unicode string, while decode expects thus a Unicode string. Any decoding or encoding
|
1061 |
-
(e.g. to UTF-8 or UTF-16) needs to be done yourself, e.g. using the Encode module.
|
1062 |
-
|
1063 |
-
|
1064 |
-
Example, output UTF-16BE-encoded JSON:
|
1065 |
-
|
1066 |
-
use Encode;
|
1067 |
-
$jsontext = encode "UTF-16BE", JSON::XS->new->encode ($object);
|
1068 |
-
|
1069 |
-
Example, decode UTF-32LE-encoded JSON:
|
1070 |
-
|
1071 |
-
use Encode;
|
1072 |
-
$object = JSON::XS->new->decode (decode "UTF-32LE", $jsontext);
|
1073 |
-
|
1074 |
-
See to L<JSON::PP/UNICODE HANDLING ON PERLS> if the backend is PP.
|
1075 |
-
|
1076 |
-
|
1077 |
-
=head2 pretty
|
1078 |
-
|
1079 |
-
$json = $json->pretty([$enable])
|
1080 |
-
|
1081 |
-
This enables (or disables) all of the C<indent>, C<space_before> and
|
1082 |
-
C<space_after> (and in the future possibly more) flags in one call to
|
1083 |
-
generate the most readable (or most compact) form possible.
|
1084 |
-
|
1085 |
-
Equivalent to:
|
1086 |
-
|
1087 |
-
$json->indent->space_before->space_after
|
1088 |
-
|
1089 |
-
The indent space length is three and JSON::XS cannot change the indent
|
1090 |
-
space length.
|
1091 |
-
|
1092 |
-
=head2 indent
|
1093 |
-
|
1094 |
-
$json = $json->indent([$enable])
|
1095 |
-
|
1096 |
-
$enabled = $json->get_indent
|
1097 |
-
|
1098 |
-
If C<$enable> is true (or missing), then the C<encode> method will use a multiline
|
1099 |
-
format as output, putting every array member or object/hash key-value pair
|
1100 |
-
into its own line, identifying them properly.
|
1101 |
-
|
1102 |
-
If C<$enable> is false, no newlines or indenting will be produced, and the
|
1103 |
-
resulting JSON text is guaranteed not to contain any C<newlines>.
|
1104 |
-
|
1105 |
-
This setting has no effect when decoding JSON texts.
|
1106 |
-
|
1107 |
-
The indent space length is three.
|
1108 |
-
With JSON::PP, you can also access C<indent_length> to change indent space length.
|
1109 |
-
|
1110 |
-
|
1111 |
-
=head2 space_before
|
1112 |
-
|
1113 |
-
$json = $json->space_before([$enable])
|
1114 |
-
|
1115 |
-
$enabled = $json->get_space_before
|
1116 |
-
|
1117 |
-
If C<$enable> is true (or missing), then the C<encode> method will add an extra
|
1118 |
-
optional space before the C<:> separating keys from values in JSON objects.
|
1119 |
-
|
1120 |
-
If C<$enable> is false, then the C<encode> method will not add any extra
|
1121 |
-
space at those places.
|
1122 |
-
|
1123 |
-
This setting has no effect when decoding JSON texts.
|
1124 |
-
|
1125 |
-
Example, space_before enabled, space_after and indent disabled:
|
1126 |
-
|
1127 |
-
{"key" :"value"}
|
1128 |
-
|
1129 |
-
|
1130 |
-
=head2 space_after
|
1131 |
-
|
1132 |
-
$json = $json->space_after([$enable])
|
1133 |
-
|
1134 |
-
$enabled = $json->get_space_after
|
1135 |
-
|
1136 |
-
If C<$enable> is true (or missing), then the C<encode> method will add an extra
|
1137 |
-
optional space after the C<:> separating keys from values in JSON objects
|
1138 |
-
and extra whitespace after the C<,> separating key-value pairs and array
|
1139 |
-
members.
|
1140 |
-
|
1141 |
-
If C<$enable> is false, then the C<encode> method will not add any extra
|
1142 |
-
space at those places.
|
1143 |
-
|
1144 |
-
This setting has no effect when decoding JSON texts.
|
1145 |
-
|
1146 |
-
Example, space_before and indent disabled, space_after enabled:
|
1147 |
-
|
1148 |
-
{"key": "value"}
|
1149 |
-
|
1150 |
-
|
1151 |
-
=head2 relaxed
|
1152 |
-
|
1153 |
-
$json = $json->relaxed([$enable])
|
1154 |
-
|
1155 |
-
$enabled = $json->get_relaxed
|
1156 |
-
|
1157 |
-
If C<$enable> is true (or missing), then C<decode> will accept some
|
1158 |
-
extensions to normal JSON syntax (see below). C<encode> will not be
|
1159 |
-
affected in anyway. I<Be aware that this option makes you accept invalid
|
1160 |
-
JSON texts as if they were valid!>. I suggest only to use this option to
|
1161 |
-
parse application-specific files written by humans (configuration files,
|
1162 |
-
resource files etc.)
|
1163 |
-
|
1164 |
-
If C<$enable> is false (the default), then C<decode> will only accept
|
1165 |
-
valid JSON texts.
|
1166 |
-
|
1167 |
-
Currently accepted extensions are:
|
1168 |
-
|
1169 |
-
=over 4
|
1170 |
-
|
1171 |
-
=item * list items can have an end-comma
|
1172 |
-
|
1173 |
-
JSON I<separates> array elements and key-value pairs with commas. This
|
1174 |
-
can be annoying if you write JSON texts manually and want to be able to
|
1175 |
-
quickly append elements, so this extension accepts comma at the end of
|
1176 |
-
such items not just between them:
|
1177 |
-
|
1178 |
-
[
|
1179 |
-
1,
|
1180 |
-
2, <- this comma not normally allowed
|
1181 |
-
]
|
1182 |
-
{
|
1183 |
-
"k1": "v1",
|
1184 |
-
"k2": "v2", <- this comma not normally allowed
|
1185 |
-
}
|
1186 |
-
|
1187 |
-
=item * shell-style '#'-comments
|
1188 |
-
|
1189 |
-
Whenever JSON allows whitespace, shell-style comments are additionally
|
1190 |
-
allowed. They are terminated by the first carriage-return or line-feed
|
1191 |
-
character, after which more white-space and comments are allowed.
|
1192 |
-
|
1193 |
-
[
|
1194 |
-
1, # this comment not allowed in JSON
|
1195 |
-
# neither this one...
|
1196 |
-
]
|
1197 |
-
|
1198 |
-
=back
|
1199 |
-
|
1200 |
-
|
1201 |
-
=head2 canonical
|
1202 |
-
|
1203 |
-
$json = $json->canonical([$enable])
|
1204 |
-
|
1205 |
-
$enabled = $json->get_canonical
|
1206 |
-
|
1207 |
-
If C<$enable> is true (or missing), then the C<encode> method will output JSON objects
|
1208 |
-
by sorting their keys. This is adding a comparatively high overhead.
|
1209 |
-
|
1210 |
-
If C<$enable> is false, then the C<encode> method will output key-value
|
1211 |
-
pairs in the order Perl stores them (which will likely change between runs
|
1212 |
-
of the same script).
|
1213 |
-
|
1214 |
-
This option is useful if you want the same data structure to be encoded as
|
1215 |
-
the same JSON text (given the same overall settings). If it is disabled,
|
1216 |
-
the same hash might be encoded differently even if contains the same data,
|
1217 |
-
as key-value pairs have no inherent ordering in Perl.
|
1218 |
-
|
1219 |
-
This setting has no effect when decoding JSON texts.
|
1220 |
-
|
1221 |
-
=head2 allow_nonref
|
1222 |
-
|
1223 |
-
$json = $json->allow_nonref([$enable])
|
1224 |
-
|
1225 |
-
$enabled = $json->get_allow_nonref
|
1226 |
-
|
1227 |
-
If C<$enable> is true (or missing), then the C<encode> method can convert a
|
1228 |
-
non-reference into its corresponding string, number or null JSON value,
|
1229 |
-
which is an extension to RFC4627. Likewise, C<decode> will accept those JSON
|
1230 |
-
values instead of croaking.
|
1231 |
-
|
1232 |
-
If C<$enable> is false, then the C<encode> method will croak if it isn't
|
1233 |
-
passed an arrayref or hashref, as JSON texts must either be an object
|
1234 |
-
or array. Likewise, C<decode> will croak if given something that is not a
|
1235 |
-
JSON object or array.
|
1236 |
-
|
1237 |
-
JSON->new->allow_nonref->encode ("Hello, World!")
|
1238 |
-
=> "Hello, World!"
|
1239 |
-
|
1240 |
-
=head2 allow_unknown
|
1241 |
-
|
1242 |
-
$json = $json->allow_unknown ([$enable])
|
1243 |
-
|
1244 |
-
$enabled = $json->get_allow_unknown
|
1245 |
-
|
1246 |
-
If $enable is true (or missing), then "encode" will *not* throw an
|
1247 |
-
exception when it encounters values it cannot represent in JSON (for
|
1248 |
-
example, filehandles) but instead will encode a JSON "null" value.
|
1249 |
-
Note that blessed objects are not included here and are handled
|
1250 |
-
separately by c<allow_nonref>.
|
1251 |
-
|
1252 |
-
If $enable is false (the default), then "encode" will throw an
|
1253 |
-
exception when it encounters anything it cannot encode as JSON.
|
1254 |
-
|
1255 |
-
This option does not affect "decode" in any way, and it is
|
1256 |
-
recommended to leave it off unless you know your communications
|
1257 |
-
partner.
|
1258 |
-
|
1259 |
-
=head2 allow_blessed
|
1260 |
-
|
1261 |
-
$json = $json->allow_blessed([$enable])
|
1262 |
-
|
1263 |
-
$enabled = $json->get_allow_blessed
|
1264 |
-
|
1265 |
-
If C<$enable> is true (or missing), then the C<encode> method will not
|
1266 |
-
barf when it encounters a blessed reference. Instead, the value of the
|
1267 |
-
B<convert_blessed> option will decide whether C<null> (C<convert_blessed>
|
1268 |
-
disabled or no C<TO_JSON> method found) or a representation of the
|
1269 |
-
object (C<convert_blessed> enabled and C<TO_JSON> method found) is being
|
1270 |
-
encoded. Has no effect on C<decode>.
|
1271 |
-
|
1272 |
-
If C<$enable> is false (the default), then C<encode> will throw an
|
1273 |
-
exception when it encounters a blessed object.
|
1274 |
-
|
1275 |
-
|
1276 |
-
=head2 convert_blessed
|
1277 |
-
|
1278 |
-
$json = $json->convert_blessed([$enable])
|
1279 |
-
|
1280 |
-
$enabled = $json->get_convert_blessed
|
1281 |
-
|
1282 |
-
If C<$enable> is true (or missing), then C<encode>, upon encountering a
|
1283 |
-
blessed object, will check for the availability of the C<TO_JSON> method
|
1284 |
-
on the object's class. If found, it will be called in scalar context
|
1285 |
-
and the resulting scalar will be encoded instead of the object. If no
|
1286 |
-
C<TO_JSON> method is found, the value of C<allow_blessed> will decide what
|
1287 |
-
to do.
|
1288 |
-
|
1289 |
-
The C<TO_JSON> method may safely call die if it wants. If C<TO_JSON>
|
1290 |
-
returns other blessed objects, those will be handled in the same
|
1291 |
-
way. C<TO_JSON> must take care of not causing an endless recursion cycle
|
1292 |
-
(== crash) in this case. The name of C<TO_JSON> was chosen because other
|
1293 |
-
methods called by the Perl core (== not by the user of the object) are
|
1294 |
-
usually in upper case letters and to avoid collisions with the C<to_json>
|
1295 |
-
function or method.
|
1296 |
-
|
1297 |
-
This setting does not yet influence C<decode> in any way.
|
1298 |
-
|
1299 |
-
If C<$enable> is false, then the C<allow_blessed> setting will decide what
|
1300 |
-
to do when a blessed object is found.
|
1301 |
-
|
1302 |
-
=over
|
1303 |
-
|
1304 |
-
=item convert_blessed_universally mode
|
1305 |
-
|
1306 |
-
If use C<JSON> with C<-convert_blessed_universally>, the C<UNIVERSAL::TO_JSON>
|
1307 |
-
subroutine is defined as the below code:
|
1308 |
-
|
1309 |
-
*UNIVERSAL::TO_JSON = sub {
|
1310 |
-
my $b_obj = B::svref_2object( $_[0] );
|
1311 |
-
return $b_obj->isa('B::HV') ? { %{ $_[0] } }
|
1312 |
-
: $b_obj->isa('B::AV') ? [ @{ $_[0] } ]
|
1313 |
-
: undef
|
1314 |
-
;
|
1315 |
-
}
|
1316 |
-
|
1317 |
-
This will cause that C<encode> method converts simple blessed objects into
|
1318 |
-
JSON objects as non-blessed object.
|
1319 |
-
|
1320 |
-
JSON -convert_blessed_universally;
|
1321 |
-
$json->allow_blessed->convert_blessed->encode( $blessed_object )
|
1322 |
-
|
1323 |
-
This feature is experimental and may be removed in the future.
|
1324 |
-
|
1325 |
-
=back
|
1326 |
-
|
1327 |
-
=head2 filter_json_object
|
1328 |
-
|
1329 |
-
$json = $json->filter_json_object([$coderef])
|
1330 |
-
|
1331 |
-
When C<$coderef> is specified, it will be called from C<decode> each
|
1332 |
-
time it decodes a JSON object. The only argument passed to the coderef
|
1333 |
-
is a reference to the newly-created hash. If the code references returns
|
1334 |
-
a single scalar (which need not be a reference), this value
|
1335 |
-
(i.e. a copy of that scalar to avoid aliasing) is inserted into the
|
1336 |
-
deserialised data structure. If it returns an empty list
|
1337 |
-
(NOTE: I<not> C<undef>, which is a valid scalar), the original deserialised
|
1338 |
-
hash will be inserted. This setting can slow down decoding considerably.
|
1339 |
-
|
1340 |
-
When C<$coderef> is omitted or undefined, any existing callback will
|
1341 |
-
be removed and C<decode> will not change the deserialised hash in any
|
1342 |
-
way.
|
1343 |
-
|
1344 |
-
Example, convert all JSON objects into the integer 5:
|
1345 |
-
|
1346 |
-
my $js = JSON->new->filter_json_object (sub { 5 });
|
1347 |
-
# returns [5]
|
1348 |
-
$js->decode ('[{}]'); # the given subroutine takes a hash reference.
|
1349 |
-
# throw an exception because allow_nonref is not enabled
|
1350 |
-
# so a lone 5 is not allowed.
|
1351 |
-
$js->decode ('{"a":1, "b":2}');
|
1352 |
-
|
1353 |
-
|
1354 |
-
=head2 filter_json_single_key_object
|
1355 |
-
|
1356 |
-
$json = $json->filter_json_single_key_object($key [=> $coderef])
|
1357 |
-
|
1358 |
-
Works remotely similar to C<filter_json_object>, but is only called for
|
1359 |
-
JSON objects having a single key named C<$key>.
|
1360 |
-
|
1361 |
-
This C<$coderef> is called before the one specified via
|
1362 |
-
C<filter_json_object>, if any. It gets passed the single value in the JSON
|
1363 |
-
object. If it returns a single value, it will be inserted into the data
|
1364 |
-
structure. If it returns nothing (not even C<undef> but the empty list),
|
1365 |
-
the callback from C<filter_json_object> will be called next, as if no
|
1366 |
-
single-key callback were specified.
|
1367 |
-
|
1368 |
-
If C<$coderef> is omitted or undefined, the corresponding callback will be
|
1369 |
-
disabled. There can only ever be one callback for a given key.
|
1370 |
-
|
1371 |
-
As this callback gets called less often then the C<filter_json_object>
|
1372 |
-
one, decoding speed will not usually suffer as much. Therefore, single-key
|
1373 |
-
objects make excellent targets to serialise Perl objects into, especially
|
1374 |
-
as single-key JSON objects are as close to the type-tagged value concept
|
1375 |
-
as JSON gets (it's basically an ID/VALUE tuple). Of course, JSON does not
|
1376 |
-
support this in any way, so you need to make sure your data never looks
|
1377 |
-
like a serialised Perl hash.
|
1378 |
-
|
1379 |
-
Typical names for the single object key are C<__class_whatever__>, or
|
1380 |
-
C<$__dollars_are_rarely_used__$> or C<}ugly_brace_placement>, or even
|
1381 |
-
things like C<__class_md5sum(classname)__>, to reduce the risk of clashing
|
1382 |
-
with real hashes.
|
1383 |
-
|
1384 |
-
Example, decode JSON objects of the form C<< { "__widget__" => <id> } >>
|
1385 |
-
into the corresponding C<< $WIDGET{<id>} >> object:
|
1386 |
-
|
1387 |
-
# return whatever is in $WIDGET{5}:
|
1388 |
-
JSON
|
1389 |
-
->new
|
1390 |
-
->filter_json_single_key_object (__widget__ => sub {
|
1391 |
-
$WIDGET{ $_[0] }
|
1392 |
-
})
|
1393 |
-
->decode ('{"__widget__": 5')
|
1394 |
-
|
1395 |
-
# this can be used with a TO_JSON method in some "widget" class
|
1396 |
-
# for serialisation to json:
|
1397 |
-
sub WidgetBase::TO_JSON {
|
1398 |
-
my ($self) = @_;
|
1399 |
-
|
1400 |
-
unless ($self->{id}) {
|
1401 |
-
$self->{id} = ..get..some..id..;
|
1402 |
-
$WIDGET{$self->{id}} = $self;
|
1403 |
-
}
|
1404 |
-
|
1405 |
-
{ __widget__ => $self->{id} }
|
1406 |
-
}
|
1407 |
-
|
1408 |
-
|
1409 |
-
=head2 shrink
|
1410 |
-
|
1411 |
-
$json = $json->shrink([$enable])
|
1412 |
-
|
1413 |
-
$enabled = $json->get_shrink
|
1414 |
-
|
1415 |
-
With JSON::XS, this flag resizes strings generated by either
|
1416 |
-
C<encode> or C<decode> to their minimum size possible. This can save
|
1417 |
-
memory when your JSON texts are either very very long or you have many
|
1418 |
-
short strings. It will also try to downgrade any strings to octet-form
|
1419 |
-
if possible: perl stores strings internally either in an encoding called
|
1420 |
-
UTF-X or in octet-form. The latter cannot store everything but uses less
|
1421 |
-
space in general (and some buggy Perl or C code might even rely on that
|
1422 |
-
internal representation being used).
|
1423 |
-
|
1424 |
-
With JSON::PP, it is noop about resizing strings but tries
|
1425 |
-
C<utf8::downgrade> to the returned string by C<encode>. See to L<utf8>.
|
1426 |
-
|
1427 |
-
See to L<JSON::XS/OBJECT-ORIENTED INTERFACE> and L<JSON::PP/METHODS>.
|
1428 |
-
|
1429 |
-
=head2 max_depth
|
1430 |
-
|
1431 |
-
$json = $json->max_depth([$maximum_nesting_depth])
|
1432 |
-
|
1433 |
-
$max_depth = $json->get_max_depth
|
1434 |
-
|
1435 |
-
Sets the maximum nesting level (default C<512>) accepted while encoding
|
1436 |
-
or decoding. If a higher nesting level is detected in JSON text or a Perl
|
1437 |
-
data structure, then the encoder and decoder will stop and croak at that
|
1438 |
-
point.
|
1439 |
-
|
1440 |
-
Nesting level is defined by number of hash- or arrayrefs that the encoder
|
1441 |
-
needs to traverse to reach a given point or the number of C<{> or C<[>
|
1442 |
-
characters without their matching closing parenthesis crossed to reach a
|
1443 |
-
given character in a string.
|
1444 |
-
|
1445 |
-
If no argument is given, the highest possible setting will be used, which
|
1446 |
-
is rarely useful.
|
1447 |
-
|
1448 |
-
Note that nesting is implemented by recursion in C. The default value has
|
1449 |
-
been chosen to be as large as typical operating systems allow without
|
1450 |
-
crashing. (JSON::XS)
|
1451 |
-
|
1452 |
-
With JSON::PP as the backend, when a large value (100 or more) was set and
|
1453 |
-
it de/encodes a deep nested object/text, it may raise a warning
|
1454 |
-
'Deep recursion on subroutine' at the perl runtime phase.
|
1455 |
-
|
1456 |
-
See L<JSON::XS/SECURITY CONSIDERATIONS> for more info on why this is useful.
|
1457 |
-
|
1458 |
-
=head2 max_size
|
1459 |
-
|
1460 |
-
$json = $json->max_size([$maximum_string_size])
|
1461 |
-
|
1462 |
-
$max_size = $json->get_max_size
|
1463 |
-
|
1464 |
-
Set the maximum length a JSON text may have (in bytes) where decoding is
|
1465 |
-
being attempted. The default is C<0>, meaning no limit. When C<decode>
|
1466 |
-
is called on a string that is longer then this many bytes, it will not
|
1467 |
-
attempt to decode the string but throw an exception. This setting has no
|
1468 |
-
effect on C<encode> (yet).
|
1469 |
-
|
1470 |
-
If no argument is given, the limit check will be deactivated (same as when
|
1471 |
-
C<0> is specified).
|
1472 |
-
|
1473 |
-
See L<JSON::XS/SECURITY CONSIDERATIONS>, below, for more info on why this is useful.
|
1474 |
-
|
1475 |
-
=head2 encode
|
1476 |
-
|
1477 |
-
$json_text = $json->encode($perl_scalar)
|
1478 |
-
|
1479 |
-
Converts the given Perl data structure (a simple scalar or a reference
|
1480 |
-
to a hash or array) to its JSON representation. Simple scalars will be
|
1481 |
-
converted into JSON string or number sequences, while references to arrays
|
1482 |
-
become JSON arrays and references to hashes become JSON objects. Undefined
|
1483 |
-
Perl values (e.g. C<undef>) become JSON C<null> values.
|
1484 |
-
References to the integers C<0> and C<1> are converted into C<true> and C<false>.
|
1485 |
-
|
1486 |
-
=head2 decode
|
1487 |
-
|
1488 |
-
$perl_scalar = $json->decode($json_text)
|
1489 |
-
|
1490 |
-
The opposite of C<encode>: expects a JSON text and tries to parse it,
|
1491 |
-
returning the resulting simple scalar or reference. Croaks on error.
|
1492 |
-
|
1493 |
-
JSON numbers and strings become simple Perl scalars. JSON arrays become
|
1494 |
-
Perl arrayrefs and JSON objects become Perl hashrefs. C<true> becomes
|
1495 |
-
C<1> (C<JSON::true>), C<false> becomes C<0> (C<JSON::false>) and
|
1496 |
-
C<null> becomes C<undef>.
|
1497 |
-
|
1498 |
-
=head2 decode_prefix
|
1499 |
-
|
1500 |
-
($perl_scalar, $characters) = $json->decode_prefix($json_text)
|
1501 |
-
|
1502 |
-
This works like the C<decode> method, but instead of raising an exception
|
1503 |
-
when there is trailing garbage after the first JSON object, it will
|
1504 |
-
silently stop parsing there and return the number of characters consumed
|
1505 |
-
so far.
|
1506 |
-
|
1507 |
-
JSON->new->decode_prefix ("[1] the tail")
|
1508 |
-
=> ([], 3)
|
1509 |
-
|
1510 |
-
See to L<JSON::XS/OBJECT-ORIENTED INTERFACE>
|
1511 |
-
|
1512 |
-
=head2 property
|
1513 |
-
|
1514 |
-
$boolean = $json->property($property_name)
|
1515 |
-
|
1516 |
-
Returns a boolean value about above some properties.
|
1517 |
-
|
1518 |
-
The available properties are C<ascii>, C<latin1>, C<utf8>,
|
1519 |
-
C<indent>,C<space_before>, C<space_after>, C<relaxed>, C<canonical>,
|
1520 |
-
C<allow_nonref>, C<allow_unknown>, C<allow_blessed>, C<convert_blessed>,
|
1521 |
-
C<shrink>, C<max_depth> and C<max_size>.
|
1522 |
-
|
1523 |
-
$boolean = $json->property('utf8');
|
1524 |
-
=> 0
|
1525 |
-
$json->utf8;
|
1526 |
-
$boolean = $json->property('utf8');
|
1527 |
-
=> 1
|
1528 |
-
|
1529 |
-
Sets the property with a given boolean value.
|
1530 |
-
|
1531 |
-
$json = $json->property($property_name => $boolean);
|
1532 |
-
|
1533 |
-
With no argument, it returns all the above properties as a hash reference.
|
1534 |
-
|
1535 |
-
$flag_hashref = $json->property();
|
1536 |
-
|
1537 |
-
=head1 INCREMENTAL PARSING
|
1538 |
-
|
1539 |
-
Most of this section are copied and modified from L<JSON::XS/INCREMENTAL PARSING>.
|
1540 |
-
|
1541 |
-
In some cases, there is the need for incremental parsing of JSON texts.
|
1542 |
-
This module does allow you to parse a JSON stream incrementally.
|
1543 |
-
It does so by accumulating text until it has a full JSON object, which
|
1544 |
-
it then can decode. This process is similar to using C<decode_prefix>
|
1545 |
-
to see if a full JSON object is available, but is much more efficient
|
1546 |
-
(and can be implemented with a minimum of method calls).
|
1547 |
-
|
1548 |
-
The backend module will only attempt to parse the JSON text once it is sure it
|
1549 |
-
has enough text to get a decisive result, using a very simple but
|
1550 |
-
truly incremental parser. This means that it sometimes won't stop as
|
1551 |
-
early as the full parser, for example, it doesn't detect parenthesis
|
1552 |
-
mismatches. The only thing it guarantees is that it starts decoding as
|
1553 |
-
soon as a syntactically valid JSON text has been seen. This means you need
|
1554 |
-
to set resource limits (e.g. C<max_size>) to ensure the parser will stop
|
1555 |
-
parsing in the presence if syntax errors.
|
1556 |
-
|
1557 |
-
The following methods implement this incremental parser.
|
1558 |
-
|
1559 |
-
=head2 incr_parse
|
1560 |
-
|
1561 |
-
$json->incr_parse( [$string] ) # void context
|
1562 |
-
|
1563 |
-
$obj_or_undef = $json->incr_parse( [$string] ) # scalar context
|
1564 |
-
|
1565 |
-
@obj_or_empty = $json->incr_parse( [$string] ) # list context
|
1566 |
-
|
1567 |
-
This is the central parsing function. It can both append new text and
|
1568 |
-
extract objects from the stream accumulated so far (both of these
|
1569 |
-
functions are optional).
|
1570 |
-
|
1571 |
-
If C<$string> is given, then this string is appended to the already
|
1572 |
-
existing JSON fragment stored in the C<$json> object.
|
1573 |
-
|
1574 |
-
After that, if the function is called in void context, it will simply
|
1575 |
-
return without doing anything further. This can be used to add more text
|
1576 |
-
in as many chunks as you want.
|
1577 |
-
|
1578 |
-
If the method is called in scalar context, then it will try to extract
|
1579 |
-
exactly I<one> JSON object. If that is successful, it will return this
|
1580 |
-
object, otherwise it will return C<undef>. If there is a parse error,
|
1581 |
-
this method will croak just as C<decode> would do (one can then use
|
1582 |
-
C<incr_skip> to skip the erroneous part). This is the most common way of
|
1583 |
-
using the method.
|
1584 |
-
|
1585 |
-
And finally, in list context, it will try to extract as many objects
|
1586 |
-
from the stream as it can find and return them, or the empty list
|
1587 |
-
otherwise. For this to work, there must be no separators between the JSON
|
1588 |
-
objects or arrays, instead they must be concatenated back-to-back. If
|
1589 |
-
an error occurs, an exception will be raised as in the scalar context
|
1590 |
-
case. Note that in this case, any previously-parsed JSON texts will be
|
1591 |
-
lost.
|
1592 |
-
|
1593 |
-
Example: Parse some JSON arrays/objects in a given string and return them.
|
1594 |
-
|
1595 |
-
my @objs = JSON->new->incr_parse ("[5][7][1,2]");
|
1596 |
-
|
1597 |
-
=head2 incr_text
|
1598 |
-
|
1599 |
-
$lvalue_string = $json->incr_text
|
1600 |
-
|
1601 |
-
This method returns the currently stored JSON fragment as an lvalue, that
|
1602 |
-
is, you can manipulate it. This I<only> works when a preceding call to
|
1603 |
-
C<incr_parse> in I<scalar context> successfully returned an object. Under
|
1604 |
-
all other circumstances you must not call this function (I mean it.
|
1605 |
-
although in simple tests it might actually work, it I<will> fail under
|
1606 |
-
real world conditions). As a special exception, you can also call this
|
1607 |
-
method before having parsed anything.
|
1608 |
-
|
1609 |
-
This function is useful in two cases: a) finding the trailing text after a
|
1610 |
-
JSON object or b) parsing multiple JSON objects separated by non-JSON text
|
1611 |
-
(such as commas).
|
1612 |
-
|
1613 |
-
$json->incr_text =~ s/\s*,\s*//;
|
1614 |
-
|
1615 |
-
In Perl 5.005, C<lvalue> attribute is not available.
|
1616 |
-
You must write codes like the below:
|
1617 |
-
|
1618 |
-
$string = $json->incr_text;
|
1619 |
-
$string =~ s/\s*,\s*//;
|
1620 |
-
$json->incr_text( $string );
|
1621 |
-
|
1622 |
-
=head2 incr_skip
|
1623 |
-
|
1624 |
-
$json->incr_skip
|
1625 |
-
|
1626 |
-
This will reset the state of the incremental parser and will remove the
|
1627 |
-
parsed text from the input buffer. This is useful after C<incr_parse>
|
1628 |
-
died, in which case the input buffer and incremental parser state is left
|
1629 |
-
unchanged, to skip the text parsed so far and to reset the parse state.
|
1630 |
-
|
1631 |
-
=head2 incr_reset
|
1632 |
-
|
1633 |
-
$json->incr_reset
|
1634 |
-
|
1635 |
-
This completely resets the incremental parser, that is, after this call,
|
1636 |
-
it will be as if the parser had never parsed anything.
|
1637 |
-
|
1638 |
-
This is useful if you want to repeatedly parse JSON objects and want to
|
1639 |
-
ignore any trailing data, which means you have to reset the parser after
|
1640 |
-
each successful decode.
|
1641 |
-
|
1642 |
-
See to L<JSON::XS/INCREMENTAL PARSING> for examples.
|
1643 |
-
|
1644 |
-
|
1645 |
-
=head1 JSON::PP SUPPORT METHODS
|
1646 |
-
|
1647 |
-
The below methods are JSON::PP own methods, so when C<JSON> works
|
1648 |
-
with JSON::PP (i.e. the created object is a JSON::PP object), available.
|
1649 |
-
See to L<JSON::PP/JSON::PP OWN METHODS> in detail.
|
1650 |
-
|
1651 |
-
If you use C<JSON> with additional C<-support_by_pp>, some methods
|
1652 |
-
are available even with JSON::XS. See to L<USE PP FEATURES EVEN THOUGH XS BACKEND>.
|
1653 |
-
|
1654 |
-
BEING { $ENV{PERL_JSON_BACKEND} = 'JSON::XS' }
|
1655 |
-
|
1656 |
-
use JSON -support_by_pp;
|
1657 |
-
|
1658 |
-
my $json = JSON->new;
|
1659 |
-
$json->allow_nonref->escape_slash->encode("/");
|
1660 |
-
|
1661 |
-
# functional interfaces too.
|
1662 |
-
print to_json(["/"], {escape_slash => 1});
|
1663 |
-
print from_json('["foo"]', {utf8 => 1});
|
1664 |
-
|
1665 |
-
If you do not want to all functions but C<-support_by_pp>,
|
1666 |
-
use C<-no_export>.
|
1667 |
-
|
1668 |
-
use JSON -support_by_pp, -no_export;
|
1669 |
-
# functional interfaces are not exported.
|
1670 |
-
|
1671 |
-
=head2 allow_singlequote
|
1672 |
-
|
1673 |
-
$json = $json->allow_singlequote([$enable])
|
1674 |
-
|
1675 |
-
If C<$enable> is true (or missing), then C<decode> will accept
|
1676 |
-
any JSON strings quoted by single quotations that are invalid JSON
|
1677 |
-
format.
|
1678 |
-
|
1679 |
-
$json->allow_singlequote->decode({"foo":'bar'});
|
1680 |
-
$json->allow_singlequote->decode({'foo':"bar"});
|
1681 |
-
$json->allow_singlequote->decode({'foo':'bar'});
|
1682 |
-
|
1683 |
-
As same as the C<relaxed> option, this option may be used to parse
|
1684 |
-
application-specific files written by humans.
|
1685 |
-
|
1686 |
-
=head2 allow_barekey
|
1687 |
-
|
1688 |
-
$json = $json->allow_barekey([$enable])
|
1689 |
-
|
1690 |
-
If C<$enable> is true (or missing), then C<decode> will accept
|
1691 |
-
bare keys of JSON object that are invalid JSON format.
|
1692 |
-
|
1693 |
-
As same as the C<relaxed> option, this option may be used to parse
|
1694 |
-
application-specific files written by humans.
|
1695 |
-
|
1696 |
-
$json->allow_barekey->decode('{foo:"bar"}');
|
1697 |
-
|
1698 |
-
=head2 allow_bignum
|
1699 |
-
|
1700 |
-
$json = $json->allow_bignum([$enable])
|
1701 |
-
|
1702 |
-
If C<$enable> is true (or missing), then C<decode> will convert
|
1703 |
-
the big integer Perl cannot handle as integer into a L<Math::BigInt>
|
1704 |
-
object and convert a floating number (any) into a L<Math::BigFloat>.
|
1705 |
-
|
1706 |
-
On the contrary, C<encode> converts C<Math::BigInt> objects and C<Math::BigFloat>
|
1707 |
-
objects into JSON numbers with C<allow_blessed> enable.
|
1708 |
-
|
1709 |
-
$json->allow_nonref->allow_blessed->allow_bignum;
|
1710 |
-
$bigfloat = $json->decode('2.000000000000000000000000001');
|
1711 |
-
print $json->encode($bigfloat);
|
1712 |
-
# => 2.000000000000000000000000001
|
1713 |
-
|
1714 |
-
See to L<MAPPING> about the conversion of JSON number.
|
1715 |
-
|
1716 |
-
=head2 loose
|
1717 |
-
|
1718 |
-
$json = $json->loose([$enable])
|
1719 |
-
|
1720 |
-
The unescaped [\x00-\x1f\x22\x2f\x5c] strings are invalid in JSON strings
|
1721 |
-
and the module doesn't allow to C<decode> to these (except for \x2f).
|
1722 |
-
If C<$enable> is true (or missing), then C<decode> will accept these
|
1723 |
-
unescaped strings.
|
1724 |
-
|
1725 |
-
$json->loose->decode(qq|["abc
|
1726 |
-
def"]|);
|
1727 |
-
|
1728 |
-
See to L<JSON::PP/JSON::PP OWN METHODS>.
|
1729 |
-
|
1730 |
-
=head2 escape_slash
|
1731 |
-
|
1732 |
-
$json = $json->escape_slash([$enable])
|
1733 |
-
|
1734 |
-
According to JSON Grammar, I<slash> (U+002F) is escaped. But by default
|
1735 |
-
JSON backend modules encode strings without escaping slash.
|
1736 |
-
|
1737 |
-
If C<$enable> is true (or missing), then C<encode> will escape slashes.
|
1738 |
-
|
1739 |
-
=head2 indent_length
|
1740 |
-
|
1741 |
-
$json = $json->indent_length($length)
|
1742 |
-
|
1743 |
-
With JSON::XS, The indent space length is 3 and cannot be changed.
|
1744 |
-
With JSON::PP, it sets the indent space length with the given $length.
|
1745 |
-
The default is 3. The acceptable range is 0 to 15.
|
1746 |
-
|
1747 |
-
=head2 sort_by
|
1748 |
-
|
1749 |
-
$json = $json->sort_by($function_name)
|
1750 |
-
$json = $json->sort_by($subroutine_ref)
|
1751 |
-
|
1752 |
-
If $function_name or $subroutine_ref are set, its sort routine are used.
|
1753 |
-
|
1754 |
-
$js = $pc->sort_by(sub { $JSON::PP::a cmp $JSON::PP::b })->encode($obj);
|
1755 |
-
# is($js, q|{"a":1,"b":2,"c":3,"d":4,"e":5,"f":6,"g":7,"h":8,"i":9}|);
|
1756 |
-
|
1757 |
-
$js = $pc->sort_by('own_sort')->encode($obj);
|
1758 |
-
# is($js, q|{"a":1,"b":2,"c":3,"d":4,"e":5,"f":6,"g":7,"h":8,"i":9}|);
|
1759 |
-
|
1760 |
-
sub JSON::PP::own_sort { $JSON::PP::a cmp $JSON::PP::b }
|
1761 |
-
|
1762 |
-
As the sorting routine runs in the JSON::PP scope, the given
|
1763 |
-
subroutine name and the special variables C<$a>, C<$b> will begin
|
1764 |
-
with 'JSON::PP::'.
|
1765 |
-
|
1766 |
-
If $integer is set, then the effect is same as C<canonical> on.
|
1767 |
-
|
1768 |
-
See to L<JSON::PP/JSON::PP OWN METHODS>.
|
1769 |
-
|
1770 |
-
=head1 MAPPING
|
1771 |
-
|
1772 |
-
This section is copied from JSON::XS and modified to C<JSON>.
|
1773 |
-
JSON::XS and JSON::PP mapping mechanisms are almost equivalent.
|
1774 |
-
|
1775 |
-
See to L<JSON::XS/MAPPING>.
|
1776 |
-
|
1777 |
-
=head2 JSON -> PERL
|
1778 |
-
|
1779 |
-
=over 4
|
1780 |
-
|
1781 |
-
=item object
|
1782 |
-
|
1783 |
-
A JSON object becomes a reference to a hash in Perl. No ordering of object
|
1784 |
-
keys is preserved (JSON does not preserver object key ordering itself).
|
1785 |
-
|
1786 |
-
=item array
|
1787 |
-
|
1788 |
-
A JSON array becomes a reference to an array in Perl.
|
1789 |
-
|
1790 |
-
=item string
|
1791 |
-
|
1792 |
-
A JSON string becomes a string scalar in Perl - Unicode codepoints in JSON
|
1793 |
-
are represented by the same codepoints in the Perl string, so no manual
|
1794 |
-
decoding is necessary.
|
1795 |
-
|
1796 |
-
=item number
|
1797 |
-
|
1798 |
-
A JSON number becomes either an integer, numeric (floating point) or
|
1799 |
-
string scalar in perl, depending on its range and any fractional parts. On
|
1800 |
-
the Perl level, there is no difference between those as Perl handles all
|
1801 |
-
the conversion details, but an integer may take slightly less memory and
|
1802 |
-
might represent more values exactly than floating point numbers.
|
1803 |
-
|
1804 |
-
If the number consists of digits only, C<JSON> will try to represent
|
1805 |
-
it as an integer value. If that fails, it will try to represent it as
|
1806 |
-
a numeric (floating point) value if that is possible without loss of
|
1807 |
-
precision. Otherwise it will preserve the number as a string value (in
|
1808 |
-
which case you lose roundtripping ability, as the JSON number will be
|
1809 |
-
re-encoded to a JSON string).
|
1810 |
-
|
1811 |
-
Numbers containing a fractional or exponential part will always be
|
1812 |
-
represented as numeric (floating point) values, possibly at a loss of
|
1813 |
-
precision (in which case you might lose perfect roundtripping ability, but
|
1814 |
-
the JSON number will still be re-encoded as a JSON number).
|
1815 |
-
|
1816 |
-
Note that precision is not accuracy - binary floating point values cannot
|
1817 |
-
represent most decimal fractions exactly, and when converting from and to
|
1818 |
-
floating point, C<JSON> only guarantees precision up to but not including
|
1819 |
-
the least significant bit.
|
1820 |
-
|
1821 |
-
If the backend is JSON::PP and C<allow_bignum> is enable, the big integers
|
1822 |
-
and the numeric can be optionally converted into L<Math::BigInt> and
|
1823 |
-
L<Math::BigFloat> objects.
|
1824 |
-
|
1825 |
-
=item true, false
|
1826 |
-
|
1827 |
-
These JSON atoms become C<JSON::true> and C<JSON::false>,
|
1828 |
-
respectively. They are overloaded to act almost exactly like the numbers
|
1829 |
-
C<1> and C<0>. You can check whether a scalar is a JSON boolean by using
|
1830 |
-
the C<JSON::is_bool> function.
|
1831 |
-
|
1832 |
-
print JSON::true + 1;
|
1833 |
-
=> 1
|
1834 |
-
|
1835 |
-
ok(JSON::true eq '1');
|
1836 |
-
ok(JSON::true == 1);
|
1837 |
-
|
1838 |
-
C<JSON> will install these missing overloading features to the backend modules.
|
1839 |
-
|
1840 |
-
|
1841 |
-
=item null
|
1842 |
-
|
1843 |
-
A JSON null atom becomes C<undef> in Perl.
|
1844 |
-
|
1845 |
-
C<JSON::null> returns C<undef>.
|
1846 |
-
|
1847 |
-
=back
|
1848 |
-
|
1849 |
-
|
1850 |
-
=head2 PERL -> JSON
|
1851 |
-
|
1852 |
-
The mapping from Perl to JSON is slightly more difficult, as Perl is a
|
1853 |
-
truly typeless language, so we can only guess which JSON type is meant by
|
1854 |
-
a Perl value.
|
1855 |
-
|
1856 |
-
=over 4
|
1857 |
-
|
1858 |
-
=item hash references
|
1859 |
-
|
1860 |
-
Perl hash references become JSON objects. As there is no inherent ordering
|
1861 |
-
in hash keys (or JSON objects), they will usually be encoded in a
|
1862 |
-
pseudo-random order that can change between runs of the same program but
|
1863 |
-
stays generally the same within a single run of a program. C<JSON>
|
1864 |
-
optionally sort the hash keys (determined by the I<canonical> flag), so
|
1865 |
-
the same data structure will serialise to the same JSON text (given same
|
1866 |
-
settings and version of JSON::XS), but this incurs a runtime overhead
|
1867 |
-
and is only rarely useful, e.g. when you want to compare some JSON text
|
1868 |
-
against another for equality.
|
1869 |
-
|
1870 |
-
In future, the ordered object feature will be added to JSON::PP using C<tie> mechanism.
|
1871 |
-
|
1872 |
-
|
1873 |
-
=item array references
|
1874 |
-
|
1875 |
-
Perl array references become JSON arrays.
|
1876 |
-
|
1877 |
-
=item other references
|
1878 |
-
|
1879 |
-
Other unblessed references are generally not allowed and will cause an
|
1880 |
-
exception to be thrown, except for references to the integers C<0> and
|
1881 |
-
C<1>, which get turned into C<false> and C<true> atoms in JSON. You can
|
1882 |
-
also use C<JSON::false> and C<JSON::true> to improve readability.
|
1883 |
-
|
1884 |
-
to_json [\0,JSON::true] # yields [false,true]
|
1885 |
-
|
1886 |
-
=item JSON::true, JSON::false, JSON::null
|
1887 |
-
|
1888 |
-
These special values become JSON true and JSON false values,
|
1889 |
-
respectively. You can also use C<\1> and C<\0> directly if you want.
|
1890 |
-
|
1891 |
-
JSON::null returns C<undef>.
|
1892 |
-
|
1893 |
-
=item blessed objects
|
1894 |
-
|
1895 |
-
Blessed objects are not directly representable in JSON. See the
|
1896 |
-
C<allow_blessed> and C<convert_blessed> methods on various options on
|
1897 |
-
how to deal with this: basically, you can choose between throwing an
|
1898 |
-
exception, encoding the reference as if it weren't blessed, or provide
|
1899 |
-
your own serialiser method.
|
1900 |
-
|
1901 |
-
With C<convert_blessed_universally> mode, C<encode> converts blessed
|
1902 |
-
hash references or blessed array references (contains other blessed references)
|
1903 |
-
into JSON members and arrays.
|
1904 |
-
|
1905 |
-
use JSON -convert_blessed_universally;
|
1906 |
-
JSON->new->allow_blessed->convert_blessed->encode( $blessed_object );
|
1907 |
-
|
1908 |
-
See to L<convert_blessed>.
|
1909 |
-
|
1910 |
-
=item simple scalars
|
1911 |
-
|
1912 |
-
Simple Perl scalars (any scalar that is not a reference) are the most
|
1913 |
-
difficult objects to encode: JSON::XS and JSON::PP will encode undefined scalars as
|
1914 |
-
JSON C<null> values, scalars that have last been used in a string context
|
1915 |
-
before encoding as JSON strings, and anything else as number value:
|
1916 |
-
|
1917 |
-
# dump as number
|
1918 |
-
encode_json [2] # yields [2]
|
1919 |
-
encode_json [-3.0e17] # yields [-3e+17]
|
1920 |
-
my $value = 5; encode_json [$value] # yields [5]
|
1921 |
-
|
1922 |
-
# used as string, so dump as string
|
1923 |
-
print $value;
|
1924 |
-
encode_json [$value] # yields ["5"]
|
1925 |
-
|
1926 |
-
# undef becomes null
|
1927 |
-
encode_json [undef] # yields [null]
|
1928 |
-
|
1929 |
-
You can force the type to be a string by stringifying it:
|
1930 |
-
|
1931 |
-
my $x = 3.1; # some variable containing a number
|
1932 |
-
"$x"; # stringified
|
1933 |
-
$x .= ""; # another, more awkward way to stringify
|
1934 |
-
print $x; # perl does it for you, too, quite often
|
1935 |
-
|
1936 |
-
You can force the type to be a number by numifying it:
|
1937 |
-
|
1938 |
-
my $x = "3"; # some variable containing a string
|
1939 |
-
$x += 0; # numify it, ensuring it will be dumped as a number
|
1940 |
-
$x *= 1; # same thing, the choice is yours.
|
1941 |
-
|
1942 |
-
You can not currently force the type in other, less obscure, ways.
|
1943 |
-
|
1944 |
-
Note that numerical precision has the same meaning as under Perl (so
|
1945 |
-
binary to decimal conversion follows the same rules as in Perl, which
|
1946 |
-
can differ to other languages). Also, your perl interpreter might expose
|
1947 |
-
extensions to the floating point numbers of your platform, such as
|
1948 |
-
infinities or NaN's - these cannot be represented in JSON, and it is an
|
1949 |
-
error to pass those in.
|
1950 |
-
|
1951 |
-
=item Big Number
|
1952 |
-
|
1953 |
-
If the backend is JSON::PP and C<allow_bignum> is enable,
|
1954 |
-
C<encode> converts C<Math::BigInt> objects and C<Math::BigFloat>
|
1955 |
-
objects into JSON numbers.
|
1956 |
-
|
1957 |
-
|
1958 |
-
=back
|
1959 |
-
|
1960 |
-
=head1 JSON and ECMAscript
|
1961 |
-
|
1962 |
-
See to L<JSON::XS/JSON and ECMAscript>.
|
1963 |
-
|
1964 |
-
=head1 JSON and YAML
|
1965 |
-
|
1966 |
-
JSON is not a subset of YAML.
|
1967 |
-
See to L<JSON::XS/JSON and YAML>.
|
1968 |
-
|
1969 |
-
|
1970 |
-
=head1 BACKEND MODULE DECISION
|
1971 |
-
|
1972 |
-
When you use C<JSON>, C<JSON> tries to C<use> JSON::XS. If this call failed, it will
|
1973 |
-
C<uses> JSON::PP. The required JSON::XS version is I<2.2> or later.
|
1974 |
-
|
1975 |
-
The C<JSON> constructor method returns an object inherited from the backend module,
|
1976 |
-
and JSON::XS object is a blessed scalar reference while JSON::PP is a blessed hash
|
1977 |
-
reference.
|
1978 |
-
|
1979 |
-
So, your program should not depend on the backend module, especially
|
1980 |
-
returned objects should not be modified.
|
1981 |
-
|
1982 |
-
my $json = JSON->new; # XS or PP?
|
1983 |
-
$json->{stash} = 'this is xs object'; # this code may raise an error!
|
1984 |
-
|
1985 |
-
To check the backend module, there are some methods - C<backend>, C<is_pp> and C<is_xs>.
|
1986 |
-
|
1987 |
-
JSON->backend; # 'JSON::XS' or 'JSON::PP'
|
1988 |
-
|
1989 |
-
JSON->backend->is_pp: # 0 or 1
|
1990 |
-
|
1991 |
-
JSON->backend->is_xs: # 1 or 0
|
1992 |
-
|
1993 |
-
$json->is_xs; # 1 or 0
|
1994 |
-
|
1995 |
-
$json->is_pp; # 0 or 1
|
1996 |
-
|
1997 |
-
|
1998 |
-
If you set an environment variable C<PERL_JSON_BACKEND>, the calling action will be changed.
|
1999 |
-
|
2000 |
-
=over
|
2001 |
-
|
2002 |
-
=item PERL_JSON_BACKEND = 0 or PERL_JSON_BACKEND = 'JSON::PP'
|
2003 |
-
|
2004 |
-
Always use JSON::PP
|
2005 |
-
|
2006 |
-
=item PERL_JSON_BACKEND == 1 or PERL_JSON_BACKEND = 'JSON::XS,JSON::PP'
|
2007 |
-
|
2008 |
-
(The default) Use compiled JSON::XS if it is properly compiled & installed,
|
2009 |
-
otherwise use JSON::PP.
|
2010 |
-
|
2011 |
-
=item PERL_JSON_BACKEND == 2 or PERL_JSON_BACKEND = 'JSON::XS'
|
2012 |
-
|
2013 |
-
Always use compiled JSON::XS, die if it isn't properly compiled & installed.
|
2014 |
-
|
2015 |
-
=item PERL_JSON_BACKEND = 'JSON::backportPP'
|
2016 |
-
|
2017 |
-
Always use JSON::backportPP.
|
2018 |
-
JSON::backportPP is JSON::PP back port module.
|
2019 |
-
C<JSON> includes JSON::backportPP instead of JSON::PP.
|
2020 |
-
|
2021 |
-
=back
|
2022 |
-
|
2023 |
-
These ideas come from L<DBI::PurePerl> mechanism.
|
2024 |
-
|
2025 |
-
example:
|
2026 |
-
|
2027 |
-
BEGIN { $ENV{PERL_JSON_BACKEND} = 'JSON::PP' }
|
2028 |
-
use JSON; # always uses JSON::PP
|
2029 |
-
|
2030 |
-
In future, it may be able to specify another module.
|
2031 |
-
|
2032 |
-
=head1 USE PP FEATURES EVEN THOUGH XS BACKEND
|
2033 |
-
|
2034 |
-
Many methods are available with either JSON::XS or JSON::PP and
|
2035 |
-
when the backend module is JSON::XS, if any JSON::PP specific (i.e. JSON::XS unsupported)
|
2036 |
-
method is called, it will C<warn> and be noop.
|
2037 |
-
|
2038 |
-
But If you C<use> C<JSON> passing the optional string C<-support_by_pp>,
|
2039 |
-
it makes a part of those unsupported methods available.
|
2040 |
-
This feature is achieved by using JSON::PP in C<de/encode>.
|
2041 |
-
|
2042 |
-
BEGIN { $ENV{PERL_JSON_BACKEND} = 2 } # with JSON::XS
|
2043 |
-
use JSON -support_by_pp;
|
2044 |
-
my $json = JSON->new;
|
2045 |
-
$json->allow_nonref->escape_slash->encode("/");
|
2046 |
-
|
2047 |
-
At this time, the returned object is a C<JSON::Backend::XS::Supportable>
|
2048 |
-
object (re-blessed XS object), and by checking JSON::XS unsupported flags
|
2049 |
-
in de/encoding, can support some unsupported methods - C<loose>, C<allow_bignum>,
|
2050 |
-
C<allow_barekey>, C<allow_singlequote>, C<escape_slash> and C<indent_length>.
|
2051 |
-
|
2052 |
-
When any unsupported methods are not enable, C<XS de/encode> will be
|
2053 |
-
used as is. The switch is achieved by changing the symbolic tables.
|
2054 |
-
|
2055 |
-
C<-support_by_pp> is effective only when the backend module is JSON::XS
|
2056 |
-
and it makes the de/encoding speed down a bit.
|
2057 |
-
|
2058 |
-
See to L<JSON::PP SUPPORT METHODS>.
|
2059 |
-
|
2060 |
-
=head1 INCOMPATIBLE CHANGES TO OLD VERSION
|
2061 |
-
|
2062 |
-
There are big incompatibility between new version (2.00) and old (1.xx).
|
2063 |
-
If you use old C<JSON> 1.xx in your code, please check it.
|
2064 |
-
|
2065 |
-
See to L<Transition ways from 1.xx to 2.xx.>
|
2066 |
-
|
2067 |
-
=over
|
2068 |
-
|
2069 |
-
=item jsonToObj and objToJson are obsoleted.
|
2070 |
-
|
2071 |
-
Non Perl-style name C<jsonToObj> and C<objToJson> are obsoleted
|
2072 |
-
(but not yet deleted from the source).
|
2073 |
-
If you use these functions in your code, please replace them
|
2074 |
-
with C<from_json> and C<to_json>.
|
2075 |
-
|
2076 |
-
|
2077 |
-
=item Global variables are no longer available.
|
2078 |
-
|
2079 |
-
C<JSON> class variables - C<$JSON::AUTOCONVERT>, C<$JSON::BareKey>, etc...
|
2080 |
-
- are not available any longer.
|
2081 |
-
Instead, various features can be used through object methods.
|
2082 |
-
|
2083 |
-
|
2084 |
-
=item Package JSON::Converter and JSON::Parser are deleted.
|
2085 |
-
|
2086 |
-
Now C<JSON> bundles with JSON::PP which can handle JSON more properly than them.
|
2087 |
-
|
2088 |
-
=item Package JSON::NotString is deleted.
|
2089 |
-
|
2090 |
-
There was C<JSON::NotString> class which represents JSON value C<true>, C<false>, C<null>
|
2091 |
-
and numbers. It was deleted and replaced by C<JSON::Boolean>.
|
2092 |
-
|
2093 |
-
C<JSON::Boolean> represents C<true> and C<false>.
|
2094 |
-
|
2095 |
-
C<JSON::Boolean> does not represent C<null>.
|
2096 |
-
|
2097 |
-
C<JSON::null> returns C<undef>.
|
2098 |
-
|
2099 |
-
C<JSON> makes L<JSON::XS::Boolean> and L<JSON::PP::Boolean> is-a relation
|
2100 |
-
to L<JSON::Boolean>.
|
2101 |
-
|
2102 |
-
=item function JSON::Number is obsoleted.
|
2103 |
-
|
2104 |
-
C<JSON::Number> is now needless because JSON::XS and JSON::PP have
|
2105 |
-
round-trip integrity.
|
2106 |
-
|
2107 |
-
=item JSONRPC modules are deleted.
|
2108 |
-
|
2109 |
-
Perl implementation of JSON-RPC protocol - C<JSONRPC >, C<JSONRPC::Transport::HTTP>
|
2110 |
-
and C<Apache::JSONRPC > are deleted in this distribution.
|
2111 |
-
Instead of them, there is L<JSON::RPC> which supports JSON-RPC protocol version 1.1.
|
2112 |
-
|
2113 |
-
=back
|
2114 |
-
|
2115 |
-
=head2 Transition ways from 1.xx to 2.xx.
|
2116 |
-
|
2117 |
-
You should set C<suport_by_pp> mode firstly, because
|
2118 |
-
it is always successful for the below codes even with JSON::XS.
|
2119 |
-
|
2120 |
-
use JSON -support_by_pp;
|
2121 |
-
|
2122 |
-
=over
|
2123 |
-
|
2124 |
-
=item Exported jsonToObj (simple)
|
2125 |
-
|
2126 |
-
from_json($json_text);
|
2127 |
-
|
2128 |
-
=item Exported objToJson (simple)
|
2129 |
-
|
2130 |
-
to_json($perl_scalar);
|
2131 |
-
|
2132 |
-
=item Exported jsonToObj (advanced)
|
2133 |
-
|
2134 |
-
$flags = {allow_barekey => 1, allow_singlequote => 1};
|
2135 |
-
from_json($json_text, $flags);
|
2136 |
-
|
2137 |
-
equivalent to:
|
2138 |
-
|
2139 |
-
$JSON::BareKey = 1;
|
2140 |
-
$JSON::QuotApos = 1;
|
2141 |
-
jsonToObj($json_text);
|
2142 |
-
|
2143 |
-
=item Exported objToJson (advanced)
|
2144 |
-
|
2145 |
-
$flags = {allow_blessed => 1, allow_barekey => 1};
|
2146 |
-
to_json($perl_scalar, $flags);
|
2147 |
-
|
2148 |
-
equivalent to:
|
2149 |
-
|
2150 |
-
$JSON::BareKey = 1;
|
2151 |
-
objToJson($perl_scalar);
|
2152 |
-
|
2153 |
-
=item jsonToObj as object method
|
2154 |
-
|
2155 |
-
$json->decode($json_text);
|
2156 |
-
|
2157 |
-
=item objToJson as object method
|
2158 |
-
|
2159 |
-
$json->encode($perl_scalar);
|
2160 |
-
|
2161 |
-
=item new method with parameters
|
2162 |
-
|
2163 |
-
The C<new> method in 2.x takes any parameters no longer.
|
2164 |
-
You can set parameters instead;
|
2165 |
-
|
2166 |
-
$json = JSON->new->pretty;
|
2167 |
-
|
2168 |
-
=item $JSON::Pretty, $JSON::Indent, $JSON::Delimiter
|
2169 |
-
|
2170 |
-
If C<indent> is enable, that means C<$JSON::Pretty> flag set. And
|
2171 |
-
C<$JSON::Delimiter> was substituted by C<space_before> and C<space_after>.
|
2172 |
-
In conclusion:
|
2173 |
-
|
2174 |
-
$json->indent->space_before->space_after;
|
2175 |
-
|
2176 |
-
Equivalent to:
|
2177 |
-
|
2178 |
-
$json->pretty;
|
2179 |
-
|
2180 |
-
To change indent length, use C<indent_length>.
|
2181 |
-
|
2182 |
-
(Only with JSON::PP, if C<-support_by_pp> is not used.)
|
2183 |
-
|
2184 |
-
$json->pretty->indent_length(2)->encode($perl_scalar);
|
2185 |
-
|
2186 |
-
=item $JSON::BareKey
|
2187 |
-
|
2188 |
-
(Only with JSON::PP, if C<-support_by_pp> is not used.)
|
2189 |
-
|
2190 |
-
$json->allow_barekey->decode($json_text)
|
2191 |
-
|
2192 |
-
=item $JSON::ConvBlessed
|
2193 |
-
|
2194 |
-
use C<-convert_blessed_universally>. See to L<convert_blessed>.
|
2195 |
-
|
2196 |
-
=item $JSON::QuotApos
|
2197 |
-
|
2198 |
-
(Only with JSON::PP, if C<-support_by_pp> is not used.)
|
2199 |
-
|
2200 |
-
$json->allow_singlequote->decode($json_text)
|
2201 |
-
|
2202 |
-
=item $JSON::SingleQuote
|
2203 |
-
|
2204 |
-
Disable. C<JSON> does not make such a invalid JSON string any longer.
|
2205 |
-
|
2206 |
-
=item $JSON::KeySort
|
2207 |
-
|
2208 |
-
$json->canonical->encode($perl_scalar)
|
2209 |
-
|
2210 |
-
This is the ascii sort.
|
2211 |
-
|
2212 |
-
If you want to use with your own sort routine, check the C<sort_by> method.
|
2213 |
-
|
2214 |
-
(Only with JSON::PP, even if C<-support_by_pp> is used currently.)
|
2215 |
-
|
2216 |
-
$json->sort_by($sort_routine_ref)->encode($perl_scalar)
|
2217 |
-
|
2218 |
-
$json->sort_by(sub { $JSON::PP::a <=> $JSON::PP::b })->encode($perl_scalar)
|
2219 |
-
|
2220 |
-
Can't access C<$a> and C<$b> but C<$JSON::PP::a> and C<$JSON::PP::b>.
|
2221 |
-
|
2222 |
-
=item $JSON::SkipInvalid
|
2223 |
-
|
2224 |
-
$json->allow_unknown
|
2225 |
-
|
2226 |
-
=item $JSON::AUTOCONVERT
|
2227 |
-
|
2228 |
-
Needless. C<JSON> backend modules have the round-trip integrity.
|
2229 |
-
|
2230 |
-
=item $JSON::UTF8
|
2231 |
-
|
2232 |
-
Needless because C<JSON> (JSON::XS/JSON::PP) sets
|
2233 |
-
the UTF8 flag on properly.
|
2234 |
-
|
2235 |
-
# With UTF8-flagged strings
|
2236 |
-
|
2237 |
-
$json->allow_nonref;
|
2238 |
-
$str = chr(1000); # UTF8-flagged
|
2239 |
-
|
2240 |
-
$json_text = $json->utf8(0)->encode($str);
|
2241 |
-
utf8::is_utf8($json_text);
|
2242 |
-
# true
|
2243 |
-
$json_text = $json->utf8(1)->encode($str);
|
2244 |
-
utf8::is_utf8($json_text);
|
2245 |
-
# false
|
2246 |
-
|
2247 |
-
$str = '"' . chr(1000) . '"'; # UTF8-flagged
|
2248 |
-
|
2249 |
-
$perl_scalar = $json->utf8(0)->decode($str);
|
2250 |
-
utf8::is_utf8($perl_scalar);
|
2251 |
-
# true
|
2252 |
-
$perl_scalar = $json->utf8(1)->decode($str);
|
2253 |
-
# died because of 'Wide character in subroutine'
|
2254 |
-
|
2255 |
-
See to L<JSON::XS/A FEW NOTES ON UNICODE AND PERL>.
|
2256 |
-
|
2257 |
-
=item $JSON::UnMapping
|
2258 |
-
|
2259 |
-
Disable. See to L<MAPPING>.
|
2260 |
-
|
2261 |
-
=item $JSON::SelfConvert
|
2262 |
-
|
2263 |
-
This option was deleted.
|
2264 |
-
Instead of it, if a given blessed object has the C<TO_JSON> method,
|
2265 |
-
C<TO_JSON> will be executed with C<convert_blessed>.
|
2266 |
-
|
2267 |
-
$json->convert_blessed->encode($blessed_hashref_or_arrayref)
|
2268 |
-
# if need, call allow_blessed
|
2269 |
-
|
2270 |
-
Note that it was C<toJson> in old version, but now not C<toJson> but C<TO_JSON>.
|
2271 |
-
|
2272 |
-
=back
|
2273 |
-
|
2274 |
-
=head1 TODO
|
2275 |
-
|
2276 |
-
=over
|
2277 |
-
|
2278 |
-
=item example programs
|
2279 |
-
|
2280 |
-
=back
|
2281 |
-
|
2282 |
-
=head1 THREADS
|
2283 |
-
|
2284 |
-
No test with JSON::PP. If with JSON::XS, See to L<JSON::XS/THREADS>.
|
2285 |
-
|
2286 |
-
|
2287 |
-
=head1 BUGS
|
2288 |
-
|
2289 |
-
Please report bugs relevant to C<JSON> to E<lt>makamaka[at]cpan.orgE<gt>.
|
2290 |
-
|
2291 |
-
|
2292 |
-
=head1 SEE ALSO
|
2293 |
-
|
2294 |
-
Most of the document is copied and modified from JSON::XS doc.
|
2295 |
-
|
2296 |
-
L<JSON::XS>, L<JSON::PP>
|
2297 |
-
|
2298 |
-
C<RFC4627>(L<http://www.ietf.org/rfc/rfc4627.txt>)
|
2299 |
-
|
2300 |
-
=head1 AUTHOR
|
2301 |
-
|
2302 |
-
Makamaka Hannyaharamitu, E<lt>makamaka[at]cpan.orgE<gt>
|
2303 |
-
|
2304 |
-
JSON::XS was written by Marc Lehmann <schmorp[at]schmorp.de>
|
2305 |
-
|
2306 |
-
The release of this new version owes to the courtesy of Marc Lehmann.
|
2307 |
-
|
2308 |
-
|
2309 |
-
=head1 COPYRIGHT AND LICENSE
|
2310 |
-
|
2311 |
-
Copyright 2005-2013 by Makamaka Hannyaharamitu
|
2312 |
-
|
2313 |
-
This library is free software; you can redistribute it and/or modify
|
2314 |
-
it under the same terms as Perl itself.
|
2315 |
-
|
2316 |
-
=cut
|
2317 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alfaxad/BioGalacticModels/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Explore Biology & Biochem Foundation Models
|
3 |
-
emoji: 🧬
|
4 |
-
colorFrom: blue
|
5 |
-
colorTo: purple
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.19.1
|
8 |
-
app_file: app.py
|
9 |
-
pinned: true
|
10 |
-
duplicated_from: hf-ml4h/biomedical-language-models
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alpaca233/SadTalker/src/audio2pose_models/discriminator.py
DELETED
@@ -1,76 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn.functional as F
|
3 |
-
from torch import nn
|
4 |
-
|
5 |
-
class ConvNormRelu(nn.Module):
|
6 |
-
def __init__(self, conv_type='1d', in_channels=3, out_channels=64, downsample=False,
|
7 |
-
kernel_size=None, stride=None, padding=None, norm='BN', leaky=False):
|
8 |
-
super().__init__()
|
9 |
-
if kernel_size is None:
|
10 |
-
if downsample:
|
11 |
-
kernel_size, stride, padding = 4, 2, 1
|
12 |
-
else:
|
13 |
-
kernel_size, stride, padding = 3, 1, 1
|
14 |
-
|
15 |
-
if conv_type == '2d':
|
16 |
-
self.conv = nn.Conv2d(
|
17 |
-
in_channels,
|
18 |
-
out_channels,
|
19 |
-
kernel_size,
|
20 |
-
stride,
|
21 |
-
padding,
|
22 |
-
bias=False,
|
23 |
-
)
|
24 |
-
if norm == 'BN':
|
25 |
-
self.norm = nn.BatchNorm2d(out_channels)
|
26 |
-
elif norm == 'IN':
|
27 |
-
self.norm = nn.InstanceNorm2d(out_channels)
|
28 |
-
else:
|
29 |
-
raise NotImplementedError
|
30 |
-
elif conv_type == '1d':
|
31 |
-
self.conv = nn.Conv1d(
|
32 |
-
in_channels,
|
33 |
-
out_channels,
|
34 |
-
kernel_size,
|
35 |
-
stride,
|
36 |
-
padding,
|
37 |
-
bias=False,
|
38 |
-
)
|
39 |
-
if norm == 'BN':
|
40 |
-
self.norm = nn.BatchNorm1d(out_channels)
|
41 |
-
elif norm == 'IN':
|
42 |
-
self.norm = nn.InstanceNorm1d(out_channels)
|
43 |
-
else:
|
44 |
-
raise NotImplementedError
|
45 |
-
nn.init.kaiming_normal_(self.conv.weight)
|
46 |
-
|
47 |
-
self.act = nn.LeakyReLU(negative_slope=0.2, inplace=False) if leaky else nn.ReLU(inplace=True)
|
48 |
-
|
49 |
-
def forward(self, x):
|
50 |
-
x = self.conv(x)
|
51 |
-
if isinstance(self.norm, nn.InstanceNorm1d):
|
52 |
-
x = self.norm(x.permute((0, 2, 1))).permute((0, 2, 1)) # normalize on [C]
|
53 |
-
else:
|
54 |
-
x = self.norm(x)
|
55 |
-
x = self.act(x)
|
56 |
-
return x
|
57 |
-
|
58 |
-
|
59 |
-
class PoseSequenceDiscriminator(nn.Module):
|
60 |
-
def __init__(self, cfg):
|
61 |
-
super().__init__()
|
62 |
-
self.cfg = cfg
|
63 |
-
leaky = self.cfg.MODEL.DISCRIMINATOR.LEAKY_RELU
|
64 |
-
|
65 |
-
self.seq = nn.Sequential(
|
66 |
-
ConvNormRelu('1d', cfg.MODEL.DISCRIMINATOR.INPUT_CHANNELS, 256, downsample=True, leaky=leaky), # B, 256, 64
|
67 |
-
ConvNormRelu('1d', 256, 512, downsample=True, leaky=leaky), # B, 512, 32
|
68 |
-
ConvNormRelu('1d', 512, 1024, kernel_size=3, stride=1, padding=1, leaky=leaky), # B, 1024, 16
|
69 |
-
nn.Conv1d(1024, 1, kernel_size=3, stride=1, padding=1, bias=True) # B, 1, 16
|
70 |
-
)
|
71 |
-
|
72 |
-
def forward(self, x):
|
73 |
-
x = x.reshape(x.size(0), x.size(1), -1).transpose(1, 2)
|
74 |
-
x = self.seq(x)
|
75 |
-
x = x.squeeze(1)
|
76 |
-
return x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ame42/UBTH/README.md
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: UBTH
|
3 |
-
emoji: 📚
|
4 |
-
colorFrom: indigo
|
5 |
-
colorTo: gray
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.42.0
|
8 |
-
python_version: 3.11
|
9 |
-
app_file: app.py
|
10 |
-
pinned: false
|
11 |
-
license: other
|
12 |
-
---
|
13 |
-
|
14 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amon1/ChatGPTForAcadamic/crazy_functions/test_project/cpp/libJPG/jpgd.h
DELETED
@@ -1,316 +0,0 @@
|
|
1 |
-
// jpgd.h - C++ class for JPEG decompression.
|
2 |
-
// Public domain, Rich Geldreich <[email protected]>
|
3 |
-
#ifndef JPEG_DECODER_H
|
4 |
-
#define JPEG_DECODER_H
|
5 |
-
|
6 |
-
#include <stdlib.h>
|
7 |
-
#include <stdio.h>
|
8 |
-
#include <setjmp.h>
|
9 |
-
|
10 |
-
namespace jpgd
|
11 |
-
{
|
12 |
-
typedef unsigned char uint8;
|
13 |
-
typedef signed short int16;
|
14 |
-
typedef unsigned short uint16;
|
15 |
-
typedef unsigned int uint;
|
16 |
-
typedef signed int int32;
|
17 |
-
|
18 |
-
// Loads a JPEG image from a memory buffer or a file.
|
19 |
-
// req_comps can be 1 (grayscale), 3 (RGB), or 4 (RGBA).
|
20 |
-
// On return, width/height will be set to the image's dimensions, and actual_comps will be set to the either 1 (grayscale) or 3 (RGB).
|
21 |
-
// Notes: For more control over where and how the source data is read, see the decompress_jpeg_image_from_stream() function below, or call the jpeg_decoder class directly.
|
22 |
-
// Requesting a 8 or 32bpp image is currently a little faster than 24bpp because the jpeg_decoder class itself currently always unpacks to either 8 or 32bpp.
|
23 |
-
// BEGIN EPIC MOD
|
24 |
-
//unsigned char *decompress_jpeg_image_from_memory(const unsigned char *pSrc_data, int src_data_size, int *width, int *height, int *actual_comps, int req_comps);
|
25 |
-
unsigned char *decompress_jpeg_image_from_memory(const unsigned char *pSrc_data, int src_data_size, int *width, int *height, int *actual_comps, int req_comps, int format);
|
26 |
-
// END EPIC MOD
|
27 |
-
unsigned char *decompress_jpeg_image_from_file(const char *pSrc_filename, int *width, int *height, int *actual_comps, int req_comps);
|
28 |
-
|
29 |
-
// Success/failure error codes.
|
30 |
-
enum jpgd_status
|
31 |
-
{
|
32 |
-
JPGD_SUCCESS = 0, JPGD_FAILED = -1, JPGD_DONE = 1,
|
33 |
-
JPGD_BAD_DHT_COUNTS = -256, JPGD_BAD_DHT_INDEX, JPGD_BAD_DHT_MARKER, JPGD_BAD_DQT_MARKER, JPGD_BAD_DQT_TABLE,
|
34 |
-
JPGD_BAD_PRECISION, JPGD_BAD_HEIGHT, JPGD_BAD_WIDTH, JPGD_TOO_MANY_COMPONENTS,
|
35 |
-
JPGD_BAD_SOF_LENGTH, JPGD_BAD_VARIABLE_MARKER, JPGD_BAD_DRI_LENGTH, JPGD_BAD_SOS_LENGTH,
|
36 |
-
JPGD_BAD_SOS_COMP_ID, JPGD_W_EXTRA_BYTES_BEFORE_MARKER, JPGD_NO_ARITHMITIC_SUPPORT, JPGD_UNEXPECTED_MARKER,
|
37 |
-
JPGD_NOT_JPEG, JPGD_UNSUPPORTED_MARKER, JPGD_BAD_DQT_LENGTH, JPGD_TOO_MANY_BLOCKS,
|
38 |
-
JPGD_UNDEFINED_QUANT_TABLE, JPGD_UNDEFINED_HUFF_TABLE, JPGD_NOT_SINGLE_SCAN, JPGD_UNSUPPORTED_COLORSPACE,
|
39 |
-
JPGD_UNSUPPORTED_SAMP_FACTORS, JPGD_DECODE_ERROR, JPGD_BAD_RESTART_MARKER, JPGD_ASSERTION_ERROR,
|
40 |
-
JPGD_BAD_SOS_SPECTRAL, JPGD_BAD_SOS_SUCCESSIVE, JPGD_STREAM_READ, JPGD_NOTENOUGHMEM
|
41 |
-
};
|
42 |
-
|
43 |
-
// Input stream interface.
|
44 |
-
// Derive from this class to read input data from sources other than files or memory. Set m_eof_flag to true when no more data is available.
|
45 |
-
// The decoder is rather greedy: it will keep on calling this method until its internal input buffer is full, or until the EOF flag is set.
|
46 |
-
// It the input stream contains data after the JPEG stream's EOI (end of image) marker it will probably be pulled into the internal buffer.
|
47 |
-
// Call the get_total_bytes_read() method to determine the actual size of the JPEG stream after successful decoding.
|
48 |
-
class jpeg_decoder_stream
|
49 |
-
{
|
50 |
-
public:
|
51 |
-
jpeg_decoder_stream() { }
|
52 |
-
virtual ~jpeg_decoder_stream() { }
|
53 |
-
|
54 |
-
// The read() method is called when the internal input buffer is empty.
|
55 |
-
// Parameters:
|
56 |
-
// pBuf - input buffer
|
57 |
-
// max_bytes_to_read - maximum bytes that can be written to pBuf
|
58 |
-
// pEOF_flag - set this to true if at end of stream (no more bytes remaining)
|
59 |
-
// Returns -1 on error, otherwise return the number of bytes actually written to the buffer (which may be 0).
|
60 |
-
// Notes: This method will be called in a loop until you set *pEOF_flag to true or the internal buffer is full.
|
61 |
-
virtual int read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag) = 0;
|
62 |
-
};
|
63 |
-
|
64 |
-
// stdio FILE stream class.
|
65 |
-
class jpeg_decoder_file_stream : public jpeg_decoder_stream
|
66 |
-
{
|
67 |
-
jpeg_decoder_file_stream(const jpeg_decoder_file_stream &);
|
68 |
-
jpeg_decoder_file_stream &operator =(const jpeg_decoder_file_stream &);
|
69 |
-
|
70 |
-
FILE *m_pFile;
|
71 |
-
bool m_eof_flag, m_error_flag;
|
72 |
-
|
73 |
-
public:
|
74 |
-
jpeg_decoder_file_stream();
|
75 |
-
virtual ~jpeg_decoder_file_stream();
|
76 |
-
|
77 |
-
bool open(const char *Pfilename);
|
78 |
-
void close();
|
79 |
-
|
80 |
-
virtual int read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag);
|
81 |
-
};
|
82 |
-
|
83 |
-
// Memory stream class.
|
84 |
-
class jpeg_decoder_mem_stream : public jpeg_decoder_stream
|
85 |
-
{
|
86 |
-
const uint8 *m_pSrc_data;
|
87 |
-
uint m_ofs, m_size;
|
88 |
-
|
89 |
-
public:
|
90 |
-
jpeg_decoder_mem_stream() : m_pSrc_data(NULL), m_ofs(0), m_size(0) { }
|
91 |
-
jpeg_decoder_mem_stream(const uint8 *pSrc_data, uint size) : m_pSrc_data(pSrc_data), m_ofs(0), m_size(size) { }
|
92 |
-
|
93 |
-
virtual ~jpeg_decoder_mem_stream() { }
|
94 |
-
|
95 |
-
bool open(const uint8 *pSrc_data, uint size);
|
96 |
-
void close() { m_pSrc_data = NULL; m_ofs = 0; m_size = 0; }
|
97 |
-
|
98 |
-
virtual int read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag);
|
99 |
-
};
|
100 |
-
|
101 |
-
// Loads JPEG file from a jpeg_decoder_stream.
|
102 |
-
unsigned char *decompress_jpeg_image_from_stream(jpeg_decoder_stream *pStream, int *width, int *height, int *actual_comps, int req_comps);
|
103 |
-
|
104 |
-
enum
|
105 |
-
{
|
106 |
-
JPGD_IN_BUF_SIZE = 8192, JPGD_MAX_BLOCKS_PER_MCU = 10, JPGD_MAX_HUFF_TABLES = 8, JPGD_MAX_QUANT_TABLES = 4,
|
107 |
-
JPGD_MAX_COMPONENTS = 4, JPGD_MAX_COMPS_IN_SCAN = 4, JPGD_MAX_BLOCKS_PER_ROW = 8192, JPGD_MAX_HEIGHT = 16384, JPGD_MAX_WIDTH = 16384
|
108 |
-
};
|
109 |
-
|
110 |
-
typedef int16 jpgd_quant_t;
|
111 |
-
typedef int16 jpgd_block_t;
|
112 |
-
|
113 |
-
class jpeg_decoder
|
114 |
-
{
|
115 |
-
public:
|
116 |
-
// Call get_error_code() after constructing to determine if the stream is valid or not. You may call the get_width(), get_height(), etc.
|
117 |
-
// methods after the constructor is called. You may then either destruct the object, or begin decoding the image by calling begin_decoding(), then decode() on each scanline.
|
118 |
-
jpeg_decoder(jpeg_decoder_stream *pStream);
|
119 |
-
|
120 |
-
~jpeg_decoder();
|
121 |
-
|
122 |
-
// Call this method after constructing the object to begin decompression.
|
123 |
-
// If JPGD_SUCCESS is returned you may then call decode() on each scanline.
|
124 |
-
int begin_decoding();
|
125 |
-
|
126 |
-
// Returns the next scan line.
|
127 |
-
// For grayscale images, pScan_line will point to a buffer containing 8-bit pixels (get_bytes_per_pixel() will return 1).
|
128 |
-
// Otherwise, it will always point to a buffer containing 32-bit RGBA pixels (A will always be 255, and get_bytes_per_pixel() will return 4).
|
129 |
-
// Returns JPGD_SUCCESS if a scan line has been returned.
|
130 |
-
// Returns JPGD_DONE if all scan lines have been returned.
|
131 |
-
// Returns JPGD_FAILED if an error occurred. Call get_error_code() for a more info.
|
132 |
-
int decode(const void** pScan_line, uint* pScan_line_len);
|
133 |
-
|
134 |
-
inline jpgd_status get_error_code() const { return m_error_code; }
|
135 |
-
|
136 |
-
inline int get_width() const { return m_image_x_size; }
|
137 |
-
inline int get_height() const { return m_image_y_size; }
|
138 |
-
|
139 |
-
inline int get_num_components() const { return m_comps_in_frame; }
|
140 |
-
|
141 |
-
inline int get_bytes_per_pixel() const { return m_dest_bytes_per_pixel; }
|
142 |
-
inline int get_bytes_per_scan_line() const { return m_image_x_size * get_bytes_per_pixel(); }
|
143 |
-
|
144 |
-
// Returns the total number of bytes actually consumed by the decoder (which should equal the actual size of the JPEG file).
|
145 |
-
inline int get_total_bytes_read() const { return m_total_bytes_read; }
|
146 |
-
|
147 |
-
private:
|
148 |
-
jpeg_decoder(const jpeg_decoder &);
|
149 |
-
jpeg_decoder &operator =(const jpeg_decoder &);
|
150 |
-
|
151 |
-
typedef void (*pDecode_block_func)(jpeg_decoder *, int, int, int);
|
152 |
-
|
153 |
-
struct huff_tables
|
154 |
-
{
|
155 |
-
bool ac_table;
|
156 |
-
uint look_up[256];
|
157 |
-
uint look_up2[256];
|
158 |
-
uint8 code_size[256];
|
159 |
-
uint tree[512];
|
160 |
-
};
|
161 |
-
|
162 |
-
struct coeff_buf
|
163 |
-
{
|
164 |
-
uint8 *pData;
|
165 |
-
int block_num_x, block_num_y;
|
166 |
-
int block_len_x, block_len_y;
|
167 |
-
int block_size;
|
168 |
-
};
|
169 |
-
|
170 |
-
struct mem_block
|
171 |
-
{
|
172 |
-
mem_block *m_pNext;
|
173 |
-
size_t m_used_count;
|
174 |
-
size_t m_size;
|
175 |
-
char m_data[1];
|
176 |
-
};
|
177 |
-
|
178 |
-
jmp_buf m_jmp_state;
|
179 |
-
mem_block *m_pMem_blocks;
|
180 |
-
int m_image_x_size;
|
181 |
-
int m_image_y_size;
|
182 |
-
jpeg_decoder_stream *m_pStream;
|
183 |
-
int m_progressive_flag;
|
184 |
-
uint8 m_huff_ac[JPGD_MAX_HUFF_TABLES];
|
185 |
-
uint8* m_huff_num[JPGD_MAX_HUFF_TABLES]; // pointer to number of Huffman codes per bit size
|
186 |
-
uint8* m_huff_val[JPGD_MAX_HUFF_TABLES]; // pointer to Huffman codes per bit size
|
187 |
-
jpgd_quant_t* m_quant[JPGD_MAX_QUANT_TABLES]; // pointer to quantization tables
|
188 |
-
int m_scan_type; // Gray, Yh1v1, Yh1v2, Yh2v1, Yh2v2 (CMYK111, CMYK4114 no longer supported)
|
189 |
-
int m_comps_in_frame; // # of components in frame
|
190 |
-
int m_comp_h_samp[JPGD_MAX_COMPONENTS]; // component's horizontal sampling factor
|
191 |
-
int m_comp_v_samp[JPGD_MAX_COMPONENTS]; // component's vertical sampling factor
|
192 |
-
int m_comp_quant[JPGD_MAX_COMPONENTS]; // component's quantization table selector
|
193 |
-
int m_comp_ident[JPGD_MAX_COMPONENTS]; // component's ID
|
194 |
-
int m_comp_h_blocks[JPGD_MAX_COMPONENTS];
|
195 |
-
int m_comp_v_blocks[JPGD_MAX_COMPONENTS];
|
196 |
-
int m_comps_in_scan; // # of components in scan
|
197 |
-
int m_comp_list[JPGD_MAX_COMPS_IN_SCAN]; // components in this scan
|
198 |
-
int m_comp_dc_tab[JPGD_MAX_COMPONENTS]; // component's DC Huffman coding table selector
|
199 |
-
int m_comp_ac_tab[JPGD_MAX_COMPONENTS]; // component's AC Huffman coding table selector
|
200 |
-
int m_spectral_start; // spectral selection start
|
201 |
-
int m_spectral_end; // spectral selection end
|
202 |
-
int m_successive_low; // successive approximation low
|
203 |
-
int m_successive_high; // successive approximation high
|
204 |
-
int m_max_mcu_x_size; // MCU's max. X size in pixels
|
205 |
-
int m_max_mcu_y_size; // MCU's max. Y size in pixels
|
206 |
-
int m_blocks_per_mcu;
|
207 |
-
int m_max_blocks_per_row;
|
208 |
-
int m_mcus_per_row, m_mcus_per_col;
|
209 |
-
int m_mcu_org[JPGD_MAX_BLOCKS_PER_MCU];
|
210 |
-
int m_total_lines_left; // total # lines left in image
|
211 |
-
int m_mcu_lines_left; // total # lines left in this MCU
|
212 |
-
int m_real_dest_bytes_per_scan_line;
|
213 |
-
int m_dest_bytes_per_scan_line; // rounded up
|
214 |
-
int m_dest_bytes_per_pixel; // 4 (RGB) or 1 (Y)
|
215 |
-
huff_tables* m_pHuff_tabs[JPGD_MAX_HUFF_TABLES];
|
216 |
-
coeff_buf* m_dc_coeffs[JPGD_MAX_COMPONENTS];
|
217 |
-
coeff_buf* m_ac_coeffs[JPGD_MAX_COMPONENTS];
|
218 |
-
int m_eob_run;
|
219 |
-
int m_block_y_mcu[JPGD_MAX_COMPONENTS];
|
220 |
-
uint8* m_pIn_buf_ofs;
|
221 |
-
int m_in_buf_left;
|
222 |
-
int m_tem_flag;
|
223 |
-
bool m_eof_flag;
|
224 |
-
uint8 m_in_buf_pad_start[128];
|
225 |
-
uint8 m_in_buf[JPGD_IN_BUF_SIZE + 128];
|
226 |
-
uint8 m_in_buf_pad_end[128];
|
227 |
-
int m_bits_left;
|
228 |
-
uint m_bit_buf;
|
229 |
-
int m_restart_interval;
|
230 |
-
int m_restarts_left;
|
231 |
-
int m_next_restart_num;
|
232 |
-
int m_max_mcus_per_row;
|
233 |
-
int m_max_blocks_per_mcu;
|
234 |
-
int m_expanded_blocks_per_mcu;
|
235 |
-
int m_expanded_blocks_per_row;
|
236 |
-
int m_expanded_blocks_per_component;
|
237 |
-
bool m_freq_domain_chroma_upsample;
|
238 |
-
int m_max_mcus_per_col;
|
239 |
-
uint m_last_dc_val[JPGD_MAX_COMPONENTS];
|
240 |
-
jpgd_block_t* m_pMCU_coefficients;
|
241 |
-
int m_mcu_block_max_zag[JPGD_MAX_BLOCKS_PER_MCU];
|
242 |
-
uint8* m_pSample_buf;
|
243 |
-
int m_crr[256];
|
244 |
-
int m_cbb[256];
|
245 |
-
int m_crg[256];
|
246 |
-
int m_cbg[256];
|
247 |
-
uint8* m_pScan_line_0;
|
248 |
-
uint8* m_pScan_line_1;
|
249 |
-
jpgd_status m_error_code;
|
250 |
-
bool m_ready_flag;
|
251 |
-
int m_total_bytes_read;
|
252 |
-
|
253 |
-
void free_all_blocks();
|
254 |
-
// BEGIN EPIC MOD
|
255 |
-
UE_NORETURN void stop_decoding(jpgd_status status);
|
256 |
-
// END EPIC MOD
|
257 |
-
void *alloc(size_t n, bool zero = false);
|
258 |
-
void word_clear(void *p, uint16 c, uint n);
|
259 |
-
void prep_in_buffer();
|
260 |
-
void read_dht_marker();
|
261 |
-
void read_dqt_marker();
|
262 |
-
void read_sof_marker();
|
263 |
-
void skip_variable_marker();
|
264 |
-
void read_dri_marker();
|
265 |
-
void read_sos_marker();
|
266 |
-
int next_marker();
|
267 |
-
int process_markers();
|
268 |
-
void locate_soi_marker();
|
269 |
-
void locate_sof_marker();
|
270 |
-
int locate_sos_marker();
|
271 |
-
void init(jpeg_decoder_stream * pStream);
|
272 |
-
void create_look_ups();
|
273 |
-
void fix_in_buffer();
|
274 |
-
void transform_mcu(int mcu_row);
|
275 |
-
void transform_mcu_expand(int mcu_row);
|
276 |
-
coeff_buf* coeff_buf_open(int block_num_x, int block_num_y, int block_len_x, int block_len_y);
|
277 |
-
inline jpgd_block_t *coeff_buf_getp(coeff_buf *cb, int block_x, int block_y);
|
278 |
-
void load_next_row();
|
279 |
-
void decode_next_row();
|
280 |
-
void make_huff_table(int index, huff_tables *pH);
|
281 |
-
void check_quant_tables();
|
282 |
-
void check_huff_tables();
|
283 |
-
void calc_mcu_block_order();
|
284 |
-
int init_scan();
|
285 |
-
void init_frame();
|
286 |
-
void process_restart();
|
287 |
-
void decode_scan(pDecode_block_func decode_block_func);
|
288 |
-
void init_progressive();
|
289 |
-
void init_sequential();
|
290 |
-
void decode_start();
|
291 |
-
void decode_init(jpeg_decoder_stream * pStream);
|
292 |
-
void H2V2Convert();
|
293 |
-
void H2V1Convert();
|
294 |
-
void H1V2Convert();
|
295 |
-
void H1V1Convert();
|
296 |
-
void gray_convert();
|
297 |
-
void expanded_convert();
|
298 |
-
void find_eoi();
|
299 |
-
inline uint get_char();
|
300 |
-
inline uint get_char(bool *pPadding_flag);
|
301 |
-
inline void stuff_char(uint8 q);
|
302 |
-
inline uint8 get_octet();
|
303 |
-
inline uint get_bits(int num_bits);
|
304 |
-
inline uint get_bits_no_markers(int numbits);
|
305 |
-
inline int huff_decode(huff_tables *pH);
|
306 |
-
inline int huff_decode(huff_tables *pH, int& extrabits);
|
307 |
-
static inline uint8 clamp(int i);
|
308 |
-
static void decode_block_dc_first(jpeg_decoder *pD, int component_id, int block_x, int block_y);
|
309 |
-
static void decode_block_dc_refine(jpeg_decoder *pD, int component_id, int block_x, int block_y);
|
310 |
-
static void decode_block_ac_first(jpeg_decoder *pD, int component_id, int block_x, int block_y);
|
311 |
-
static void decode_block_ac_refine(jpeg_decoder *pD, int component_id, int block_x, int block_y);
|
312 |
-
};
|
313 |
-
|
314 |
-
} // namespace jpgd
|
315 |
-
|
316 |
-
#endif // JPEG_DECODER_H
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/PTI/torch_utils/ops/conv2d_resample.py
DELETED
@@ -1,156 +0,0 @@
|
|
1 |
-
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
2 |
-
#
|
3 |
-
# NVIDIA CORPORATION and its licensors retain all intellectual property
|
4 |
-
# and proprietary rights in and to this software, related documentation
|
5 |
-
# and any modifications thereto. Any use, reproduction, disclosure or
|
6 |
-
# distribution of this software and related documentation without an express
|
7 |
-
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
8 |
-
|
9 |
-
"""2D convolution with optional up/downsampling."""
|
10 |
-
|
11 |
-
import torch
|
12 |
-
|
13 |
-
from .. import misc
|
14 |
-
from . import conv2d_gradfix
|
15 |
-
from . import upfirdn2d
|
16 |
-
from .upfirdn2d import _parse_padding
|
17 |
-
from .upfirdn2d import _get_filter_size
|
18 |
-
|
19 |
-
#----------------------------------------------------------------------------
|
20 |
-
|
21 |
-
def _get_weight_shape(w):
|
22 |
-
with misc.suppress_tracer_warnings(): # this value will be treated as a constant
|
23 |
-
shape = [int(sz) for sz in w.shape]
|
24 |
-
misc.assert_shape(w, shape)
|
25 |
-
return shape
|
26 |
-
|
27 |
-
#----------------------------------------------------------------------------
|
28 |
-
|
29 |
-
def _conv2d_wrapper(x, w, stride=1, padding=0, groups=1, transpose=False, flip_weight=True):
|
30 |
-
"""Wrapper for the underlying `conv2d()` and `conv_transpose2d()` implementations.
|
31 |
-
"""
|
32 |
-
out_channels, in_channels_per_group, kh, kw = _get_weight_shape(w)
|
33 |
-
|
34 |
-
# Flip weight if requested.
|
35 |
-
if not flip_weight: # conv2d() actually performs correlation (flip_weight=True) not convolution (flip_weight=False).
|
36 |
-
w = w.flip([2, 3])
|
37 |
-
|
38 |
-
# Workaround performance pitfall in cuDNN 8.0.5, triggered when using
|
39 |
-
# 1x1 kernel + memory_format=channels_last + less than 64 channels.
|
40 |
-
if kw == 1 and kh == 1 and stride == 1 and padding in [0, [0, 0], (0, 0)] and not transpose:
|
41 |
-
if x.stride()[1] == 1 and min(out_channels, in_channels_per_group) < 64:
|
42 |
-
if out_channels <= 4 and groups == 1:
|
43 |
-
in_shape = x.shape
|
44 |
-
x = w.squeeze(3).squeeze(2) @ x.reshape([in_shape[0], in_channels_per_group, -1])
|
45 |
-
x = x.reshape([in_shape[0], out_channels, in_shape[2], in_shape[3]])
|
46 |
-
else:
|
47 |
-
x = x.to(memory_format=torch.contiguous_format)
|
48 |
-
w = w.to(memory_format=torch.contiguous_format)
|
49 |
-
x = conv2d_gradfix.conv2d(x, w, groups=groups)
|
50 |
-
return x.to(memory_format=torch.channels_last)
|
51 |
-
|
52 |
-
# Otherwise => execute using conv2d_gradfix.
|
53 |
-
op = conv2d_gradfix.conv_transpose2d if transpose else conv2d_gradfix.conv2d
|
54 |
-
return op(x, w, stride=stride, padding=padding, groups=groups)
|
55 |
-
|
56 |
-
#----------------------------------------------------------------------------
|
57 |
-
|
58 |
-
@misc.profiled_function
|
59 |
-
def conv2d_resample(x, w, f=None, up=1, down=1, padding=0, groups=1, flip_weight=True, flip_filter=False):
|
60 |
-
r"""2D convolution with optional up/downsampling.
|
61 |
-
|
62 |
-
Padding is performed only once at the beginning, not between the operations.
|
63 |
-
|
64 |
-
Args:
|
65 |
-
x: Input tensor of shape
|
66 |
-
`[batch_size, in_channels, in_height, in_width]`.
|
67 |
-
w: Weight tensor of shape
|
68 |
-
`[out_channels, in_channels//groups, kernel_height, kernel_width]`.
|
69 |
-
f: Low-pass filter for up/downsampling. Must be prepared beforehand by
|
70 |
-
calling upfirdn2d.setup_filter(). None = identity (default).
|
71 |
-
up: Integer upsampling factor (default: 1).
|
72 |
-
down: Integer downsampling factor (default: 1).
|
73 |
-
padding: Padding with respect to the upsampled image. Can be a single number
|
74 |
-
or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
|
75 |
-
(default: 0).
|
76 |
-
groups: Split input channels into N groups (default: 1).
|
77 |
-
flip_weight: False = convolution, True = correlation (default: True).
|
78 |
-
flip_filter: False = convolution, True = correlation (default: False).
|
79 |
-
|
80 |
-
Returns:
|
81 |
-
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
|
82 |
-
"""
|
83 |
-
# Validate arguments.
|
84 |
-
assert isinstance(x, torch.Tensor) and (x.ndim == 4)
|
85 |
-
assert isinstance(w, torch.Tensor) and (w.ndim == 4) and (w.dtype == x.dtype)
|
86 |
-
assert f is None or (isinstance(f, torch.Tensor) and f.ndim in [1, 2] and f.dtype == torch.float32)
|
87 |
-
assert isinstance(up, int) and (up >= 1)
|
88 |
-
assert isinstance(down, int) and (down >= 1)
|
89 |
-
assert isinstance(groups, int) and (groups >= 1)
|
90 |
-
out_channels, in_channels_per_group, kh, kw = _get_weight_shape(w)
|
91 |
-
fw, fh = _get_filter_size(f)
|
92 |
-
px0, px1, py0, py1 = _parse_padding(padding)
|
93 |
-
|
94 |
-
# Adjust padding to account for up/downsampling.
|
95 |
-
if up > 1:
|
96 |
-
px0 += (fw + up - 1) // 2
|
97 |
-
px1 += (fw - up) // 2
|
98 |
-
py0 += (fh + up - 1) // 2
|
99 |
-
py1 += (fh - up) // 2
|
100 |
-
if down > 1:
|
101 |
-
px0 += (fw - down + 1) // 2
|
102 |
-
px1 += (fw - down) // 2
|
103 |
-
py0 += (fh - down + 1) // 2
|
104 |
-
py1 += (fh - down) // 2
|
105 |
-
|
106 |
-
# Fast path: 1x1 convolution with downsampling only => downsample first, then convolve.
|
107 |
-
if kw == 1 and kh == 1 and (down > 1 and up == 1):
|
108 |
-
x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, padding=[px0,px1,py0,py1], flip_filter=flip_filter)
|
109 |
-
x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
|
110 |
-
return x
|
111 |
-
|
112 |
-
# Fast path: 1x1 convolution with upsampling only => convolve first, then upsample.
|
113 |
-
if kw == 1 and kh == 1 and (up > 1 and down == 1):
|
114 |
-
x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
|
115 |
-
x = upfirdn2d.upfirdn2d(x=x, f=f, up=up, padding=[px0,px1,py0,py1], gain=up**2, flip_filter=flip_filter)
|
116 |
-
return x
|
117 |
-
|
118 |
-
# Fast path: downsampling only => use strided convolution.
|
119 |
-
if down > 1 and up == 1:
|
120 |
-
x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0,px1,py0,py1], flip_filter=flip_filter)
|
121 |
-
x = _conv2d_wrapper(x=x, w=w, stride=down, groups=groups, flip_weight=flip_weight)
|
122 |
-
return x
|
123 |
-
|
124 |
-
# Fast path: upsampling with optional downsampling => use transpose strided convolution.
|
125 |
-
if up > 1:
|
126 |
-
if groups == 1:
|
127 |
-
w = w.transpose(0, 1)
|
128 |
-
else:
|
129 |
-
w = w.reshape(groups, out_channels // groups, in_channels_per_group, kh, kw)
|
130 |
-
w = w.transpose(1, 2)
|
131 |
-
w = w.reshape(groups * in_channels_per_group, out_channels // groups, kh, kw)
|
132 |
-
px0 -= kw - 1
|
133 |
-
px1 -= kw - up
|
134 |
-
py0 -= kh - 1
|
135 |
-
py1 -= kh - up
|
136 |
-
pxt = max(min(-px0, -px1), 0)
|
137 |
-
pyt = max(min(-py0, -py1), 0)
|
138 |
-
x = _conv2d_wrapper(x=x, w=w, stride=up, padding=[pyt,pxt], groups=groups, transpose=True, flip_weight=(not flip_weight))
|
139 |
-
x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0+pxt,px1+pxt,py0+pyt,py1+pyt], gain=up**2, flip_filter=flip_filter)
|
140 |
-
if down > 1:
|
141 |
-
x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter)
|
142 |
-
return x
|
143 |
-
|
144 |
-
# Fast path: no up/downsampling, padding supported by the underlying implementation => use plain conv2d.
|
145 |
-
if up == 1 and down == 1:
|
146 |
-
if px0 == px1 and py0 == py1 and px0 >= 0 and py0 >= 0:
|
147 |
-
return _conv2d_wrapper(x=x, w=w, padding=[py0,px0], groups=groups, flip_weight=flip_weight)
|
148 |
-
|
149 |
-
# Fallback: Generic reference implementation.
|
150 |
-
x = upfirdn2d.upfirdn2d(x=x, f=(f if up > 1 else None), up=up, padding=[px0,px1,py0,py1], gain=up**2, flip_filter=flip_filter)
|
151 |
-
x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
|
152 |
-
if down > 1:
|
153 |
-
x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter)
|
154 |
-
return x
|
155 |
-
|
156 |
-
#----------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/training/lora.md
DELETED
@@ -1,405 +0,0 @@
|
|
1 |
-
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
|
3 |
-
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
4 |
-
the License. You may obtain a copy of the License at
|
5 |
-
|
6 |
-
http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
|
8 |
-
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
9 |
-
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
10 |
-
specific language governing permissions and limitations under the License.
|
11 |
-
-->
|
12 |
-
|
13 |
-
# Low-Rank Adaptation of Large Language Models (LoRA)
|
14 |
-
|
15 |
-
<Tip warning={true}>
|
16 |
-
|
17 |
-
This is an experimental feature. Its APIs can change in future.
|
18 |
-
|
19 |
-
</Tip>
|
20 |
-
|
21 |
-
[Low-Rank Adaptation of Large Language Models (LoRA)](https://arxiv.org/abs/2106.09685) is a training method that accelerates the training of large models while consuming less memory. It adds pairs of rank-decomposition weight matrices (called **update matrices**) to existing weights, and **only** trains those newly added weights. This has a couple of advantages:
|
22 |
-
|
23 |
-
- Previous pretrained weights are kept frozen so the model is not as prone to [catastrophic forgetting](https://www.pnas.org/doi/10.1073/pnas.1611835114).
|
24 |
-
- Rank-decomposition matrices have significantly fewer parameters than the original model, which means that trained LoRA weights are easily portable.
|
25 |
-
- LoRA matrices are generally added to the attention layers of the original model. 🧨 Diffusers provides the [`~diffusers.loaders.UNet2DConditionLoadersMixin.load_attn_procs`] method to load the LoRA weights into a model's attention layers. You can control the extent to which the model is adapted toward new training images via a `scale` parameter.
|
26 |
-
- The greater memory-efficiency allows you to run fine-tuning on consumer GPUs like the Tesla T4, RTX 3080 or even the RTX 2080 Ti! GPUs like the T4 are free and readily accessible in Kaggle or Google Colab notebooks.
|
27 |
-
|
28 |
-
<Tip>
|
29 |
-
|
30 |
-
💡 LoRA is not only limited to attention layers. The authors found that amending
|
31 |
-
the attention layers of a language model is sufficient to obtain good downstream performance with great efficiency. This is why it's common to just add the LoRA weights to the attention layers of a model. Check out the [Using LoRA for efficient Stable Diffusion fine-tuning](https://huggingface.co/blog/lora) blog for more information about how LoRA works!
|
32 |
-
|
33 |
-
</Tip>
|
34 |
-
|
35 |
-
[cloneofsimo](https://github.com/cloneofsimo) was the first to try out LoRA training for Stable Diffusion in the popular [lora](https://github.com/cloneofsimo/lora) GitHub repository. 🧨 Diffusers now supports finetuning with LoRA for [text-to-image generation](https://github.com/huggingface/diffusers/tree/main/examples/text_to_image#training-with-lora) and [DreamBooth](https://github.com/huggingface/diffusers/tree/main/examples/dreambooth#training-with-low-rank-adaptation-of-large-language-models-lora). This guide will show you how to do both.
|
36 |
-
|
37 |
-
If you'd like to store or share your model with the community, login to your Hugging Face account (create [one](hf.co/join) if you don't have one already):
|
38 |
-
|
39 |
-
```bash
|
40 |
-
huggingface-cli login
|
41 |
-
```
|
42 |
-
|
43 |
-
## Text-to-image
|
44 |
-
|
45 |
-
Finetuning a model like Stable Diffusion, which has billions of parameters, can be slow and difficult. With LoRA, it is much easier and faster to finetune a diffusion model. It can run on hardware with as little as 11GB of GPU RAM without resorting to tricks such as 8-bit optimizers.
|
46 |
-
|
47 |
-
### Training[[text-to-image-training]]
|
48 |
-
|
49 |
-
Let's finetune [`stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5) on the [Pokémon BLIP captions](https://huggingface.co/datasets/lambdalabs/pokemon-blip-captions) dataset to generate your own Pokémon.
|
50 |
-
|
51 |
-
Specify the `MODEL_NAME` environment variable (either a Hub model repository id or a path to the directory containing the model weights) and pass it to the [`pretrained_model_name_or_path`](https://huggingface.co/docs/diffusers/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.from_pretrained.pretrained_model_name_or_path) argument. You'll also need to set the `DATASET_NAME` environment variable to the name of the dataset you want to train on. To use your own dataset, take a look at the [Create a dataset for training](create_dataset) guide.
|
52 |
-
|
53 |
-
The `OUTPUT_DIR` and `HUB_MODEL_ID` variables are optional and specify where to save the model to on the Hub:
|
54 |
-
|
55 |
-
```bash
|
56 |
-
export MODEL_NAME="runwayml/stable-diffusion-v1-5"
|
57 |
-
export OUTPUT_DIR="/sddata/finetune/lora/pokemon"
|
58 |
-
export HUB_MODEL_ID="pokemon-lora"
|
59 |
-
export DATASET_NAME="lambdalabs/pokemon-blip-captions"
|
60 |
-
```
|
61 |
-
|
62 |
-
There are some flags to be aware of before you start training:
|
63 |
-
|
64 |
-
* `--push_to_hub` stores the trained LoRA embeddings on the Hub.
|
65 |
-
* `--report_to=wandb` reports and logs the training results to your Weights & Biases dashboard (as an example, take a look at this [report](https://wandb.ai/pcuenq/text2image-fine-tune/runs/b4k1w0tn?workspace=user-pcuenq)).
|
66 |
-
* `--learning_rate=1e-04`, you can afford to use a higher learning rate than you normally would with LoRA.
|
67 |
-
|
68 |
-
Now you're ready to launch the training (you can find the full training script [here](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image_lora.py)). Training takes about 5 hours on a 2080 Ti GPU with 11GB of RAM, and it'll create and save model checkpoints and the `pytorch_lora_weights` in your repository.
|
69 |
-
|
70 |
-
```bash
|
71 |
-
accelerate launch --mixed_precision="fp16" train_text_to_image_lora.py \
|
72 |
-
--pretrained_model_name_or_path=$MODEL_NAME \
|
73 |
-
--dataset_name=$DATASET_NAME \
|
74 |
-
--dataloader_num_workers=8 \
|
75 |
-
--resolution=512 --center_crop --random_flip \
|
76 |
-
--train_batch_size=1 \
|
77 |
-
--gradient_accumulation_steps=4 \
|
78 |
-
--max_train_steps=15000 \
|
79 |
-
--learning_rate=1e-04 \
|
80 |
-
--max_grad_norm=1 \
|
81 |
-
--lr_scheduler="cosine" --lr_warmup_steps=0 \
|
82 |
-
--output_dir=${OUTPUT_DIR} \
|
83 |
-
--push_to_hub \
|
84 |
-
--hub_model_id=${HUB_MODEL_ID} \
|
85 |
-
--report_to=wandb \
|
86 |
-
--checkpointing_steps=500 \
|
87 |
-
--validation_prompt="A pokemon with blue eyes." \
|
88 |
-
--seed=1337
|
89 |
-
```
|
90 |
-
|
91 |
-
### Inference[[text-to-image-inference]]
|
92 |
-
|
93 |
-
Now you can use the model for inference by loading the base model in the [`StableDiffusionPipeline`] and then the [`DPMSolverMultistepScheduler`]:
|
94 |
-
|
95 |
-
```py
|
96 |
-
>>> import torch
|
97 |
-
>>> from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler
|
98 |
-
|
99 |
-
>>> model_base = "runwayml/stable-diffusion-v1-5"
|
100 |
-
|
101 |
-
>>> pipe = StableDiffusionPipeline.from_pretrained(model_base, torch_dtype=torch.float16)
|
102 |
-
>>> pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
|
103 |
-
```
|
104 |
-
|
105 |
-
Load the LoRA weights from your finetuned model *on top of the base model weights*, and then move the pipeline to a GPU for faster inference. When you merge the LoRA weights with the frozen pretrained model weights, you can optionally adjust how much of the weights to merge with the `scale` parameter:
|
106 |
-
|
107 |
-
<Tip>
|
108 |
-
|
109 |
-
💡 A `scale` value of `0` is the same as not using your LoRA weights and you're only using the base model weights, and a `scale` value of `1` means you're only using the fully finetuned LoRA weights. Values between `0` and `1` interpolates between the two weights.
|
110 |
-
|
111 |
-
</Tip>
|
112 |
-
|
113 |
-
```py
|
114 |
-
>>> pipe.unet.load_attn_procs(lora_model_path)
|
115 |
-
>>> pipe.to("cuda")
|
116 |
-
# use half the weights from the LoRA finetuned model and half the weights from the base model
|
117 |
-
|
118 |
-
>>> image = pipe(
|
119 |
-
... "A pokemon with blue eyes.", num_inference_steps=25, guidance_scale=7.5, cross_attention_kwargs={"scale": 0.5}
|
120 |
-
... ).images[0]
|
121 |
-
# use the weights from the fully finetuned LoRA model
|
122 |
-
|
123 |
-
>>> image = pipe("A pokemon with blue eyes.", num_inference_steps=25, guidance_scale=7.5).images[0]
|
124 |
-
>>> image.save("blue_pokemon.png")
|
125 |
-
```
|
126 |
-
|
127 |
-
<Tip>
|
128 |
-
|
129 |
-
If you are loading the LoRA parameters from the Hub and if the Hub repository has
|
130 |
-
a `base_model` tag (such as [this](https://huggingface.co/sayakpaul/sd-model-finetuned-lora-t4/blob/main/README.md?code=true#L4)), then
|
131 |
-
you can do:
|
132 |
-
|
133 |
-
```py
|
134 |
-
from huggingface_hub.repocard import RepoCard
|
135 |
-
|
136 |
-
lora_model_id = "sayakpaul/sd-model-finetuned-lora-t4"
|
137 |
-
card = RepoCard.load(lora_model_id)
|
138 |
-
base_model_id = card.data.to_dict()["base_model"]
|
139 |
-
|
140 |
-
pipe = StableDiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16)
|
141 |
-
...
|
142 |
-
```
|
143 |
-
|
144 |
-
</Tip>
|
145 |
-
|
146 |
-
|
147 |
-
## DreamBooth
|
148 |
-
|
149 |
-
[DreamBooth](https://arxiv.org/abs/2208.12242) is a finetuning technique for personalizing a text-to-image model like Stable Diffusion to generate photorealistic images of a subject in different contexts, given a few images of the subject. However, DreamBooth is very sensitive to hyperparameters and it is easy to overfit. Some important hyperparameters to consider include those that affect the training time (learning rate, number of training steps), and inference time (number of steps, scheduler type).
|
150 |
-
|
151 |
-
<Tip>
|
152 |
-
|
153 |
-
💡 Take a look at the [Training Stable Diffusion with DreamBooth using 🧨 Diffusers](https://huggingface.co/blog/dreambooth) blog for an in-depth analysis of DreamBooth experiments and recommended settings.
|
154 |
-
|
155 |
-
</Tip>
|
156 |
-
|
157 |
-
### Training[[dreambooth-training]]
|
158 |
-
|
159 |
-
Let's finetune [`stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5) with DreamBooth and LoRA with some 🐶 [dog images](https://drive.google.com/drive/folders/1BO_dyz-p65qhBRRMRA4TbZ8qW4rB99JZ). Download and save these images to a directory. To use your own dataset, take a look at the [Create a dataset for training](create_dataset) guide.
|
160 |
-
|
161 |
-
To start, specify the `MODEL_NAME` environment variable (either a Hub model repository id or a path to the directory containing the model weights) and pass it to the [`pretrained_model_name_or_path`](https://huggingface.co/docs/diffusers/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.from_pretrained.pretrained_model_name_or_path) argument. You'll also need to set `INSTANCE_DIR` to the path of the directory containing the images.
|
162 |
-
|
163 |
-
The `OUTPUT_DIR` variables is optional and specifies where to save the model to on the Hub:
|
164 |
-
|
165 |
-
```bash
|
166 |
-
export MODEL_NAME="runwayml/stable-diffusion-v1-5"
|
167 |
-
export INSTANCE_DIR="path-to-instance-images"
|
168 |
-
export OUTPUT_DIR="path-to-save-model"
|
169 |
-
```
|
170 |
-
|
171 |
-
There are some flags to be aware of before you start training:
|
172 |
-
|
173 |
-
* `--push_to_hub` stores the trained LoRA embeddings on the Hub.
|
174 |
-
* `--report_to=wandb` reports and logs the training results to your Weights & Biases dashboard (as an example, take a look at this [report](https://wandb.ai/pcuenq/text2image-fine-tune/runs/b4k1w0tn?workspace=user-pcuenq)).
|
175 |
-
* `--learning_rate=1e-04`, you can afford to use a higher learning rate than you normally would with LoRA.
|
176 |
-
|
177 |
-
Now you're ready to launch the training (you can find the full training script [here](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth_lora.py)). The script creates and saves model checkpoints and the `pytorch_lora_weights.bin` file in your repository.
|
178 |
-
|
179 |
-
It's also possible to additionally fine-tune the text encoder with LoRA. This, in most cases, leads
|
180 |
-
to better results with a slight increase in the compute. To allow fine-tuning the text encoder with LoRA,
|
181 |
-
specify the `--train_text_encoder` while launching the `train_dreambooth_lora.py` script.
|
182 |
-
|
183 |
-
```bash
|
184 |
-
accelerate launch train_dreambooth_lora.py \
|
185 |
-
--pretrained_model_name_or_path=$MODEL_NAME \
|
186 |
-
--instance_data_dir=$INSTANCE_DIR \
|
187 |
-
--output_dir=$OUTPUT_DIR \
|
188 |
-
--instance_prompt="a photo of sks dog" \
|
189 |
-
--resolution=512 \
|
190 |
-
--train_batch_size=1 \
|
191 |
-
--gradient_accumulation_steps=1 \
|
192 |
-
--checkpointing_steps=100 \
|
193 |
-
--learning_rate=1e-4 \
|
194 |
-
--report_to="wandb" \
|
195 |
-
--lr_scheduler="constant" \
|
196 |
-
--lr_warmup_steps=0 \
|
197 |
-
--max_train_steps=500 \
|
198 |
-
--validation_prompt="A photo of sks dog in a bucket" \
|
199 |
-
--validation_epochs=50 \
|
200 |
-
--seed="0" \
|
201 |
-
--push_to_hub
|
202 |
-
```
|
203 |
-
|
204 |
-
### Inference[[dreambooth-inference]]
|
205 |
-
|
206 |
-
Now you can use the model for inference by loading the base model in the [`StableDiffusionPipeline`]:
|
207 |
-
|
208 |
-
```py
|
209 |
-
>>> import torch
|
210 |
-
>>> from diffusers import StableDiffusionPipeline
|
211 |
-
|
212 |
-
>>> model_base = "runwayml/stable-diffusion-v1-5"
|
213 |
-
|
214 |
-
>>> pipe = StableDiffusionPipeline.from_pretrained(model_base, torch_dtype=torch.float16)
|
215 |
-
```
|
216 |
-
|
217 |
-
Load the LoRA weights from your finetuned DreamBooth model *on top of the base model weights*, and then move the pipeline to a GPU for faster inference. When you merge the LoRA weights with the frozen pretrained model weights, you can optionally adjust how much of the weights to merge with the `scale` parameter:
|
218 |
-
|
219 |
-
<Tip>
|
220 |
-
|
221 |
-
💡 A `scale` value of `0` is the same as not using your LoRA weights and you're only using the base model weights, and a `scale` value of `1` means you're only using the fully finetuned LoRA weights. Values between `0` and `1` interpolates between the two weights.
|
222 |
-
|
223 |
-
</Tip>
|
224 |
-
|
225 |
-
```py
|
226 |
-
>>> pipe.unet.load_attn_procs(lora_model_path)
|
227 |
-
>>> pipe.to("cuda")
|
228 |
-
# use half the weights from the LoRA finetuned model and half the weights from the base model
|
229 |
-
|
230 |
-
>>> image = pipe(
|
231 |
-
... "A picture of a sks dog in a bucket.",
|
232 |
-
... num_inference_steps=25,
|
233 |
-
... guidance_scale=7.5,
|
234 |
-
... cross_attention_kwargs={"scale": 0.5},
|
235 |
-
... ).images[0]
|
236 |
-
# use the weights from the fully finetuned LoRA model
|
237 |
-
|
238 |
-
>>> image = pipe("A picture of a sks dog in a bucket.", num_inference_steps=25, guidance_scale=7.5).images[0]
|
239 |
-
>>> image.save("bucket-dog.png")
|
240 |
-
```
|
241 |
-
|
242 |
-
If you used `--train_text_encoder` during training, then use `pipe.load_lora_weights()` to load the LoRA
|
243 |
-
weights. For example:
|
244 |
-
|
245 |
-
```python
|
246 |
-
from huggingface_hub.repocard import RepoCard
|
247 |
-
from diffusers import StableDiffusionPipeline
|
248 |
-
import torch
|
249 |
-
|
250 |
-
lora_model_id = "sayakpaul/dreambooth-text-encoder-test"
|
251 |
-
card = RepoCard.load(lora_model_id)
|
252 |
-
base_model_id = card.data.to_dict()["base_model"]
|
253 |
-
|
254 |
-
pipe = StableDiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16)
|
255 |
-
pipe = pipe.to("cuda")
|
256 |
-
pipe.load_lora_weights(lora_model_id)
|
257 |
-
image = pipe("A picture of a sks dog in a bucket", num_inference_steps=25).images[0]
|
258 |
-
```
|
259 |
-
|
260 |
-
<Tip>
|
261 |
-
|
262 |
-
If your LoRA parameters involve the UNet as well as the Text Encoder, then passing
|
263 |
-
`cross_attention_kwargs={"scale": 0.5}` will apply the `scale` value to both the UNet
|
264 |
-
and the Text Encoder.
|
265 |
-
|
266 |
-
</Tip>
|
267 |
-
|
268 |
-
Note that the use of [`~diffusers.loaders.LoraLoaderMixin.load_lora_weights`] is preferred to [`~diffusers.loaders.UNet2DConditionLoadersMixin.load_attn_procs`] for loading LoRA parameters. This is because
|
269 |
-
[`~diffusers.loaders.LoraLoaderMixin.load_lora_weights`] can handle the following situations:
|
270 |
-
|
271 |
-
* LoRA parameters that don't have separate identifiers for the UNet and the text encoder (such as [`"patrickvonplaten/lora_dreambooth_dog_example"`](https://huggingface.co/patrickvonplaten/lora_dreambooth_dog_example)). So, you can just do:
|
272 |
-
|
273 |
-
```py
|
274 |
-
pipe.load_lora_weights(lora_model_path)
|
275 |
-
```
|
276 |
-
|
277 |
-
* LoRA parameters that have separate identifiers for the UNet and the text encoder such as: [`"sayakpaul/dreambooth"`](https://huggingface.co/sayakpaul/dreambooth).
|
278 |
-
|
279 |
-
**Note** that it is possible to provide a local directory path to [`~diffusers.loaders.LoraLoaderMixin.load_lora_weights`] as well as [`~diffusers.loaders.UNet2DConditionLoadersMixin.load_attn_procs`]. To know about the supported inputs,
|
280 |
-
refer to the respective docstrings.
|
281 |
-
|
282 |
-
## Unloading LoRA parameters
|
283 |
-
|
284 |
-
You can call [`~diffusers.loaders.LoraLoaderMixin.unload_lora_weights`] on a pipeline to unload the LoRA parameters.
|
285 |
-
|
286 |
-
## Supporting A1111 themed LoRA checkpoints from Diffusers
|
287 |
-
|
288 |
-
This support was made possible because of our amazing contributors: [@takuma104](https://github.com/takuma104) and [@isidentical](https://github.com/isidentical).
|
289 |
-
|
290 |
-
To provide seamless interoperability with A1111 to our users, we support loading A1111 formatted
|
291 |
-
LoRA checkpoints using [`~diffusers.loaders.LoraLoaderMixin.load_lora_weights`] in a limited capacity.
|
292 |
-
In this section, we explain how to load an A1111 formatted LoRA checkpoint from [CivitAI](https://civitai.com/)
|
293 |
-
in Diffusers and perform inference with it.
|
294 |
-
|
295 |
-
First, download a checkpoint. We'll use
|
296 |
-
[this one](https://civitai.com/models/13239/light-and-shadow) for demonstration purposes.
|
297 |
-
|
298 |
-
```bash
|
299 |
-
wget https://civitai.com/api/download/models/15603 -O light_and_shadow.safetensors
|
300 |
-
```
|
301 |
-
|
302 |
-
Next, we initialize a [`~DiffusionPipeline`]:
|
303 |
-
|
304 |
-
```python
|
305 |
-
import torch
|
306 |
-
|
307 |
-
from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler
|
308 |
-
|
309 |
-
pipeline = StableDiffusionPipeline.from_pretrained(
|
310 |
-
"gsdf/Counterfeit-V2.5", torch_dtype=torch.float16, safety_checker=None
|
311 |
-
).to("cuda")
|
312 |
-
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(
|
313 |
-
pipeline.scheduler.config, use_karras_sigmas=True
|
314 |
-
)
|
315 |
-
```
|
316 |
-
|
317 |
-
We then load the checkpoint downloaded from CivitAI:
|
318 |
-
|
319 |
-
```python
|
320 |
-
pipeline.load_lora_weights(".", weight_name="light_and_shadow.safetensors")
|
321 |
-
```
|
322 |
-
|
323 |
-
<Tip warning={true}>
|
324 |
-
|
325 |
-
If you're loading a checkpoint in the `safetensors` format, please ensure you have `safetensors` installed.
|
326 |
-
|
327 |
-
</Tip>
|
328 |
-
|
329 |
-
And then it's time for running inference:
|
330 |
-
|
331 |
-
```python
|
332 |
-
prompt = "masterpiece, best quality, 1girl, at dusk"
|
333 |
-
negative_prompt = ("(low quality, worst quality:1.4), (bad anatomy), (inaccurate limb:1.2), "
|
334 |
-
"bad composition, inaccurate eyes, extra digit, fewer digits, (extra arms:1.2), large breasts")
|
335 |
-
|
336 |
-
images = pipeline(prompt=prompt,
|
337 |
-
negative_prompt=negative_prompt,
|
338 |
-
width=512,
|
339 |
-
height=768,
|
340 |
-
num_inference_steps=15,
|
341 |
-
num_images_per_prompt=4,
|
342 |
-
generator=torch.manual_seed(0)
|
343 |
-
).images
|
344 |
-
```
|
345 |
-
|
346 |
-
Below is a comparison between the LoRA and the non-LoRA results:
|
347 |
-
|
348 |
-

|
349 |
-
|
350 |
-
You have a similar checkpoint stored on the Hugging Face Hub, you can load it
|
351 |
-
directly with [`~diffusers.loaders.LoraLoaderMixin.load_lora_weights`] like so:
|
352 |
-
|
353 |
-
```python
|
354 |
-
lora_model_id = "sayakpaul/civitai-light-shadow-lora"
|
355 |
-
lora_filename = "light_and_shadow.safetensors"
|
356 |
-
pipeline.load_lora_weights(lora_model_id, weight_name=lora_filename)
|
357 |
-
```
|
358 |
-
|
359 |
-
### Supporting Stable Diffusion XL LoRAs trained using the Kohya-trainer
|
360 |
-
|
361 |
-
With this [PR](https://github.com/huggingface/diffusers/pull/4287), there should now be better support for loading Kohya-style LoRAs trained on Stable Diffusion XL (SDXL).
|
362 |
-
|
363 |
-
Here are some example checkpoints we tried out:
|
364 |
-
|
365 |
-
* SDXL 0.9:
|
366 |
-
* https://civitai.com/models/22279?modelVersionId=118556
|
367 |
-
* https://civitai.com/models/104515/sdxlor30costumesrevue-starlight-saijoclaudine-lora
|
368 |
-
* https://civitai.com/models/108448/daiton-sdxl-test
|
369 |
-
* https://filebin.net/2ntfqqnapiu9q3zx/pixelbuildings128-v1.safetensors
|
370 |
-
* SDXL 1.0:
|
371 |
-
* https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_offset_example-lora_1.0.safetensors
|
372 |
-
|
373 |
-
Here is an example of how to perform inference with these checkpoints in `diffusers`:
|
374 |
-
|
375 |
-
```python
|
376 |
-
from diffusers import DiffusionPipeline
|
377 |
-
import torch
|
378 |
-
|
379 |
-
base_model_id = "stabilityai/stable-diffusion-xl-base-0.9"
|
380 |
-
pipeline = DiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16).to("cuda")
|
381 |
-
pipeline.load_lora_weights(".", weight_name="Kamepan.safetensors")
|
382 |
-
|
383 |
-
prompt = "anime screencap, glint, drawing, best quality, light smile, shy, a full body of a girl wearing wedding dress in the middle of the forest beneath the trees, fireflies, big eyes, 2d, cute, anime girl, waifu, cel shading, magical girl, vivid colors, (outline:1.1), manga anime artstyle, masterpiece, offical wallpaper, glint <lora:kame_sdxl_v2:1>"
|
384 |
-
negative_prompt = "(deformed, bad quality, sketch, depth of field, blurry:1.1), grainy, bad anatomy, bad perspective, old, ugly, realistic, cartoon, disney, bad propotions"
|
385 |
-
generator = torch.manual_seed(2947883060)
|
386 |
-
num_inference_steps = 30
|
387 |
-
guidance_scale = 7
|
388 |
-
|
389 |
-
image = pipeline(
|
390 |
-
prompt=prompt, negative_prompt=negative_prompt, num_inference_steps=num_inference_steps,
|
391 |
-
generator=generator, guidance_scale=guidance_scale
|
392 |
-
).images[0]
|
393 |
-
image.save("Kamepan.png")
|
394 |
-
```
|
395 |
-
|
396 |
-
`Kamepan.safetensors` comes from https://civitai.com/models/22279?modelVersionId=118556 .
|
397 |
-
|
398 |
-
If you notice carefully, the inference UX is exactly identical to what we presented in the sections above.
|
399 |
-
|
400 |
-
Thanks to [@isidentical](https://github.com/isidentical) for helping us on integrating this feature.
|
401 |
-
|
402 |
-
### Known limitations specific to the Kohya-styled LoRAs
|
403 |
-
|
404 |
-
* SDXL LoRAs that have both the text encoders are currently leading to weird results. We're actively investigating the issue.
|
405 |
-
* When images don't looks similar to other UIs such ComfyUI, it can be beacause of multiple reasons as explained [here](https://github.com/huggingface/diffusers/pull/4287/#issuecomment-1655110736).
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/models/test_models_vae_flax.py
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
import unittest
|
2 |
-
|
3 |
-
from diffusers import FlaxAutoencoderKL
|
4 |
-
from diffusers.utils import is_flax_available
|
5 |
-
from diffusers.utils.testing_utils import require_flax
|
6 |
-
|
7 |
-
from .test_modeling_common_flax import FlaxModelTesterMixin
|
8 |
-
|
9 |
-
|
10 |
-
if is_flax_available():
|
11 |
-
import jax
|
12 |
-
|
13 |
-
|
14 |
-
@require_flax
|
15 |
-
class FlaxAutoencoderKLTests(FlaxModelTesterMixin, unittest.TestCase):
|
16 |
-
model_class = FlaxAutoencoderKL
|
17 |
-
|
18 |
-
@property
|
19 |
-
def dummy_input(self):
|
20 |
-
batch_size = 4
|
21 |
-
num_channels = 3
|
22 |
-
sizes = (32, 32)
|
23 |
-
|
24 |
-
prng_key = jax.random.PRNGKey(0)
|
25 |
-
image = jax.random.uniform(prng_key, ((batch_size, num_channels) + sizes))
|
26 |
-
|
27 |
-
return {"sample": image, "prng_key": prng_key}
|
28 |
-
|
29 |
-
def prepare_init_args_and_inputs_for_common(self):
|
30 |
-
init_dict = {
|
31 |
-
"block_out_channels": [32, 64],
|
32 |
-
"in_channels": 3,
|
33 |
-
"out_channels": 3,
|
34 |
-
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
|
35 |
-
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
|
36 |
-
"latent_channels": 4,
|
37 |
-
}
|
38 |
-
inputs_dict = self.dummy_input
|
39 |
-
return init_dict, inputs_dict
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/guided_anchor_head.py
DELETED
@@ -1,860 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
from mmcv.cnn import bias_init_with_prob, normal_init
|
4 |
-
from mmcv.ops import DeformConv2d, MaskedConv2d
|
5 |
-
from mmcv.runner import force_fp32
|
6 |
-
|
7 |
-
from mmdet.core import (anchor_inside_flags, build_anchor_generator,
|
8 |
-
build_assigner, build_bbox_coder, build_sampler,
|
9 |
-
calc_region, images_to_levels, multi_apply,
|
10 |
-
multiclass_nms, unmap)
|
11 |
-
from ..builder import HEADS, build_loss
|
12 |
-
from .anchor_head import AnchorHead
|
13 |
-
|
14 |
-
|
15 |
-
class FeatureAdaption(nn.Module):
|
16 |
-
"""Feature Adaption Module.
|
17 |
-
|
18 |
-
Feature Adaption Module is implemented based on DCN v1.
|
19 |
-
It uses anchor shape prediction rather than feature map to
|
20 |
-
predict offsets of deform conv layer.
|
21 |
-
|
22 |
-
Args:
|
23 |
-
in_channels (int): Number of channels in the input feature map.
|
24 |
-
out_channels (int): Number of channels in the output feature map.
|
25 |
-
kernel_size (int): Deformable conv kernel size.
|
26 |
-
deform_groups (int): Deformable conv group size.
|
27 |
-
"""
|
28 |
-
|
29 |
-
def __init__(self,
|
30 |
-
in_channels,
|
31 |
-
out_channels,
|
32 |
-
kernel_size=3,
|
33 |
-
deform_groups=4):
|
34 |
-
super(FeatureAdaption, self).__init__()
|
35 |
-
offset_channels = kernel_size * kernel_size * 2
|
36 |
-
self.conv_offset = nn.Conv2d(
|
37 |
-
2, deform_groups * offset_channels, 1, bias=False)
|
38 |
-
self.conv_adaption = DeformConv2d(
|
39 |
-
in_channels,
|
40 |
-
out_channels,
|
41 |
-
kernel_size=kernel_size,
|
42 |
-
padding=(kernel_size - 1) // 2,
|
43 |
-
deform_groups=deform_groups)
|
44 |
-
self.relu = nn.ReLU(inplace=True)
|
45 |
-
|
46 |
-
def init_weights(self):
|
47 |
-
normal_init(self.conv_offset, std=0.1)
|
48 |
-
normal_init(self.conv_adaption, std=0.01)
|
49 |
-
|
50 |
-
def forward(self, x, shape):
|
51 |
-
offset = self.conv_offset(shape.detach())
|
52 |
-
x = self.relu(self.conv_adaption(x, offset))
|
53 |
-
return x
|
54 |
-
|
55 |
-
|
56 |
-
@HEADS.register_module()
|
57 |
-
class GuidedAnchorHead(AnchorHead):
|
58 |
-
"""Guided-Anchor-based head (GA-RPN, GA-RetinaNet, etc.).
|
59 |
-
|
60 |
-
This GuidedAnchorHead will predict high-quality feature guided
|
61 |
-
anchors and locations where anchors will be kept in inference.
|
62 |
-
There are mainly 3 categories of bounding-boxes.
|
63 |
-
|
64 |
-
- Sampled 9 pairs for target assignment. (approxes)
|
65 |
-
- The square boxes where the predicted anchors are based on. (squares)
|
66 |
-
- Guided anchors.
|
67 |
-
|
68 |
-
Please refer to https://arxiv.org/abs/1901.03278 for more details.
|
69 |
-
|
70 |
-
Args:
|
71 |
-
num_classes (int): Number of classes.
|
72 |
-
in_channels (int): Number of channels in the input feature map.
|
73 |
-
feat_channels (int): Number of hidden channels.
|
74 |
-
approx_anchor_generator (dict): Config dict for approx generator
|
75 |
-
square_anchor_generator (dict): Config dict for square generator
|
76 |
-
anchor_coder (dict): Config dict for anchor coder
|
77 |
-
bbox_coder (dict): Config dict for bbox coder
|
78 |
-
reg_decoded_bbox (bool): If true, the regression loss would be
|
79 |
-
applied directly on decoded bounding boxes, converting both
|
80 |
-
the predicted boxes and regression targets to absolute
|
81 |
-
coordinates format. Default False. It should be `True` when
|
82 |
-
using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head.
|
83 |
-
deform_groups: (int): Group number of DCN in
|
84 |
-
FeatureAdaption module.
|
85 |
-
loc_filter_thr (float): Threshold to filter out unconcerned regions.
|
86 |
-
loss_loc (dict): Config of location loss.
|
87 |
-
loss_shape (dict): Config of anchor shape loss.
|
88 |
-
loss_cls (dict): Config of classification loss.
|
89 |
-
loss_bbox (dict): Config of bbox regression loss.
|
90 |
-
"""
|
91 |
-
|
92 |
-
def __init__(
|
93 |
-
self,
|
94 |
-
num_classes,
|
95 |
-
in_channels,
|
96 |
-
feat_channels=256,
|
97 |
-
approx_anchor_generator=dict(
|
98 |
-
type='AnchorGenerator',
|
99 |
-
octave_base_scale=8,
|
100 |
-
scales_per_octave=3,
|
101 |
-
ratios=[0.5, 1.0, 2.0],
|
102 |
-
strides=[4, 8, 16, 32, 64]),
|
103 |
-
square_anchor_generator=dict(
|
104 |
-
type='AnchorGenerator',
|
105 |
-
ratios=[1.0],
|
106 |
-
scales=[8],
|
107 |
-
strides=[4, 8, 16, 32, 64]),
|
108 |
-
anchor_coder=dict(
|
109 |
-
type='DeltaXYWHBBoxCoder',
|
110 |
-
target_means=[.0, .0, .0, .0],
|
111 |
-
target_stds=[1.0, 1.0, 1.0, 1.0]
|
112 |
-
),
|
113 |
-
bbox_coder=dict(
|
114 |
-
type='DeltaXYWHBBoxCoder',
|
115 |
-
target_means=[.0, .0, .0, .0],
|
116 |
-
target_stds=[1.0, 1.0, 1.0, 1.0]
|
117 |
-
),
|
118 |
-
reg_decoded_bbox=False,
|
119 |
-
deform_groups=4,
|
120 |
-
loc_filter_thr=0.01,
|
121 |
-
train_cfg=None,
|
122 |
-
test_cfg=None,
|
123 |
-
loss_loc=dict(
|
124 |
-
type='FocalLoss',
|
125 |
-
use_sigmoid=True,
|
126 |
-
gamma=2.0,
|
127 |
-
alpha=0.25,
|
128 |
-
loss_weight=1.0),
|
129 |
-
loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
|
130 |
-
loss_cls=dict(
|
131 |
-
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
|
132 |
-
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
|
133 |
-
loss_weight=1.0)): # yapf: disable
|
134 |
-
super(AnchorHead, self).__init__()
|
135 |
-
self.in_channels = in_channels
|
136 |
-
self.num_classes = num_classes
|
137 |
-
self.feat_channels = feat_channels
|
138 |
-
self.deform_groups = deform_groups
|
139 |
-
self.loc_filter_thr = loc_filter_thr
|
140 |
-
|
141 |
-
# build approx_anchor_generator and square_anchor_generator
|
142 |
-
assert (approx_anchor_generator['octave_base_scale'] ==
|
143 |
-
square_anchor_generator['scales'][0])
|
144 |
-
assert (approx_anchor_generator['strides'] ==
|
145 |
-
square_anchor_generator['strides'])
|
146 |
-
self.approx_anchor_generator = build_anchor_generator(
|
147 |
-
approx_anchor_generator)
|
148 |
-
self.square_anchor_generator = build_anchor_generator(
|
149 |
-
square_anchor_generator)
|
150 |
-
self.approxs_per_octave = self.approx_anchor_generator \
|
151 |
-
.num_base_anchors[0]
|
152 |
-
|
153 |
-
self.reg_decoded_bbox = reg_decoded_bbox
|
154 |
-
|
155 |
-
# one anchor per location
|
156 |
-
self.num_anchors = 1
|
157 |
-
self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)
|
158 |
-
self.loc_focal_loss = loss_loc['type'] in ['FocalLoss']
|
159 |
-
self.sampling = loss_cls['type'] not in ['FocalLoss']
|
160 |
-
self.ga_sampling = train_cfg is not None and hasattr(
|
161 |
-
train_cfg, 'ga_sampler')
|
162 |
-
if self.use_sigmoid_cls:
|
163 |
-
self.cls_out_channels = self.num_classes
|
164 |
-
else:
|
165 |
-
self.cls_out_channels = self.num_classes + 1
|
166 |
-
|
167 |
-
# build bbox_coder
|
168 |
-
self.anchor_coder = build_bbox_coder(anchor_coder)
|
169 |
-
self.bbox_coder = build_bbox_coder(bbox_coder)
|
170 |
-
|
171 |
-
# build losses
|
172 |
-
self.loss_loc = build_loss(loss_loc)
|
173 |
-
self.loss_shape = build_loss(loss_shape)
|
174 |
-
self.loss_cls = build_loss(loss_cls)
|
175 |
-
self.loss_bbox = build_loss(loss_bbox)
|
176 |
-
|
177 |
-
self.train_cfg = train_cfg
|
178 |
-
self.test_cfg = test_cfg
|
179 |
-
|
180 |
-
if self.train_cfg:
|
181 |
-
self.assigner = build_assigner(self.train_cfg.assigner)
|
182 |
-
# use PseudoSampler when sampling is False
|
183 |
-
if self.sampling and hasattr(self.train_cfg, 'sampler'):
|
184 |
-
sampler_cfg = self.train_cfg.sampler
|
185 |
-
else:
|
186 |
-
sampler_cfg = dict(type='PseudoSampler')
|
187 |
-
self.sampler = build_sampler(sampler_cfg, context=self)
|
188 |
-
|
189 |
-
self.ga_assigner = build_assigner(self.train_cfg.ga_assigner)
|
190 |
-
if self.ga_sampling:
|
191 |
-
ga_sampler_cfg = self.train_cfg.ga_sampler
|
192 |
-
else:
|
193 |
-
ga_sampler_cfg = dict(type='PseudoSampler')
|
194 |
-
self.ga_sampler = build_sampler(ga_sampler_cfg, context=self)
|
195 |
-
|
196 |
-
self.fp16_enabled = False
|
197 |
-
|
198 |
-
self._init_layers()
|
199 |
-
|
200 |
-
def _init_layers(self):
|
201 |
-
self.relu = nn.ReLU(inplace=True)
|
202 |
-
self.conv_loc = nn.Conv2d(self.in_channels, 1, 1)
|
203 |
-
self.conv_shape = nn.Conv2d(self.in_channels, self.num_anchors * 2, 1)
|
204 |
-
self.feature_adaption = FeatureAdaption(
|
205 |
-
self.in_channels,
|
206 |
-
self.feat_channels,
|
207 |
-
kernel_size=3,
|
208 |
-
deform_groups=self.deform_groups)
|
209 |
-
self.conv_cls = MaskedConv2d(self.feat_channels,
|
210 |
-
self.num_anchors * self.cls_out_channels,
|
211 |
-
1)
|
212 |
-
self.conv_reg = MaskedConv2d(self.feat_channels, self.num_anchors * 4,
|
213 |
-
1)
|
214 |
-
|
215 |
-
def init_weights(self):
|
216 |
-
normal_init(self.conv_cls, std=0.01)
|
217 |
-
normal_init(self.conv_reg, std=0.01)
|
218 |
-
|
219 |
-
bias_cls = bias_init_with_prob(0.01)
|
220 |
-
normal_init(self.conv_loc, std=0.01, bias=bias_cls)
|
221 |
-
normal_init(self.conv_shape, std=0.01)
|
222 |
-
|
223 |
-
self.feature_adaption.init_weights()
|
224 |
-
|
225 |
-
def forward_single(self, x):
|
226 |
-
loc_pred = self.conv_loc(x)
|
227 |
-
shape_pred = self.conv_shape(x)
|
228 |
-
x = self.feature_adaption(x, shape_pred)
|
229 |
-
# masked conv is only used during inference for speed-up
|
230 |
-
if not self.training:
|
231 |
-
mask = loc_pred.sigmoid()[0] >= self.loc_filter_thr
|
232 |
-
else:
|
233 |
-
mask = None
|
234 |
-
cls_score = self.conv_cls(x, mask)
|
235 |
-
bbox_pred = self.conv_reg(x, mask)
|
236 |
-
return cls_score, bbox_pred, shape_pred, loc_pred
|
237 |
-
|
238 |
-
def forward(self, feats):
|
239 |
-
return multi_apply(self.forward_single, feats)
|
240 |
-
|
241 |
-
def get_sampled_approxs(self, featmap_sizes, img_metas, device='cuda'):
|
242 |
-
"""Get sampled approxs and inside flags according to feature map sizes.
|
243 |
-
|
244 |
-
Args:
|
245 |
-
featmap_sizes (list[tuple]): Multi-level feature map sizes.
|
246 |
-
img_metas (list[dict]): Image meta info.
|
247 |
-
device (torch.device | str): device for returned tensors
|
248 |
-
|
249 |
-
Returns:
|
250 |
-
tuple: approxes of each image, inside flags of each image
|
251 |
-
"""
|
252 |
-
num_imgs = len(img_metas)
|
253 |
-
|
254 |
-
# since feature map sizes of all images are the same, we only compute
|
255 |
-
# approxes for one time
|
256 |
-
multi_level_approxs = self.approx_anchor_generator.grid_anchors(
|
257 |
-
featmap_sizes, device=device)
|
258 |
-
approxs_list = [multi_level_approxs for _ in range(num_imgs)]
|
259 |
-
|
260 |
-
# for each image, we compute inside flags of multi level approxes
|
261 |
-
inside_flag_list = []
|
262 |
-
for img_id, img_meta in enumerate(img_metas):
|
263 |
-
multi_level_flags = []
|
264 |
-
multi_level_approxs = approxs_list[img_id]
|
265 |
-
|
266 |
-
# obtain valid flags for each approx first
|
267 |
-
multi_level_approx_flags = self.approx_anchor_generator \
|
268 |
-
.valid_flags(featmap_sizes,
|
269 |
-
img_meta['pad_shape'],
|
270 |
-
device=device)
|
271 |
-
|
272 |
-
for i, flags in enumerate(multi_level_approx_flags):
|
273 |
-
approxs = multi_level_approxs[i]
|
274 |
-
inside_flags_list = []
|
275 |
-
for i in range(self.approxs_per_octave):
|
276 |
-
split_valid_flags = flags[i::self.approxs_per_octave]
|
277 |
-
split_approxs = approxs[i::self.approxs_per_octave, :]
|
278 |
-
inside_flags = anchor_inside_flags(
|
279 |
-
split_approxs, split_valid_flags,
|
280 |
-
img_meta['img_shape'][:2],
|
281 |
-
self.train_cfg.allowed_border)
|
282 |
-
inside_flags_list.append(inside_flags)
|
283 |
-
# inside_flag for a position is true if any anchor in this
|
284 |
-
# position is true
|
285 |
-
inside_flags = (
|
286 |
-
torch.stack(inside_flags_list, 0).sum(dim=0) > 0)
|
287 |
-
multi_level_flags.append(inside_flags)
|
288 |
-
inside_flag_list.append(multi_level_flags)
|
289 |
-
return approxs_list, inside_flag_list
|
290 |
-
|
291 |
-
def get_anchors(self,
|
292 |
-
featmap_sizes,
|
293 |
-
shape_preds,
|
294 |
-
loc_preds,
|
295 |
-
img_metas,
|
296 |
-
use_loc_filter=False,
|
297 |
-
device='cuda'):
|
298 |
-
"""Get squares according to feature map sizes and guided anchors.
|
299 |
-
|
300 |
-
Args:
|
301 |
-
featmap_sizes (list[tuple]): Multi-level feature map sizes.
|
302 |
-
shape_preds (list[tensor]): Multi-level shape predictions.
|
303 |
-
loc_preds (list[tensor]): Multi-level location predictions.
|
304 |
-
img_metas (list[dict]): Image meta info.
|
305 |
-
use_loc_filter (bool): Use loc filter or not.
|
306 |
-
device (torch.device | str): device for returned tensors
|
307 |
-
|
308 |
-
Returns:
|
309 |
-
tuple: square approxs of each image, guided anchors of each image,
|
310 |
-
loc masks of each image
|
311 |
-
"""
|
312 |
-
num_imgs = len(img_metas)
|
313 |
-
num_levels = len(featmap_sizes)
|
314 |
-
|
315 |
-
# since feature map sizes of all images are the same, we only compute
|
316 |
-
# squares for one time
|
317 |
-
multi_level_squares = self.square_anchor_generator.grid_anchors(
|
318 |
-
featmap_sizes, device=device)
|
319 |
-
squares_list = [multi_level_squares for _ in range(num_imgs)]
|
320 |
-
|
321 |
-
# for each image, we compute multi level guided anchors
|
322 |
-
guided_anchors_list = []
|
323 |
-
loc_mask_list = []
|
324 |
-
for img_id, img_meta in enumerate(img_metas):
|
325 |
-
multi_level_guided_anchors = []
|
326 |
-
multi_level_loc_mask = []
|
327 |
-
for i in range(num_levels):
|
328 |
-
squares = squares_list[img_id][i]
|
329 |
-
shape_pred = shape_preds[i][img_id]
|
330 |
-
loc_pred = loc_preds[i][img_id]
|
331 |
-
guided_anchors, loc_mask = self._get_guided_anchors_single(
|
332 |
-
squares,
|
333 |
-
shape_pred,
|
334 |
-
loc_pred,
|
335 |
-
use_loc_filter=use_loc_filter)
|
336 |
-
multi_level_guided_anchors.append(guided_anchors)
|
337 |
-
multi_level_loc_mask.append(loc_mask)
|
338 |
-
guided_anchors_list.append(multi_level_guided_anchors)
|
339 |
-
loc_mask_list.append(multi_level_loc_mask)
|
340 |
-
return squares_list, guided_anchors_list, loc_mask_list
|
341 |
-
|
342 |
-
def _get_guided_anchors_single(self,
|
343 |
-
squares,
|
344 |
-
shape_pred,
|
345 |
-
loc_pred,
|
346 |
-
use_loc_filter=False):
|
347 |
-
"""Get guided anchors and loc masks for a single level.
|
348 |
-
|
349 |
-
Args:
|
350 |
-
square (tensor): Squares of a single level.
|
351 |
-
shape_pred (tensor): Shape predections of a single level.
|
352 |
-
loc_pred (tensor): Loc predections of a single level.
|
353 |
-
use_loc_filter (list[tensor]): Use loc filter or not.
|
354 |
-
|
355 |
-
Returns:
|
356 |
-
tuple: guided anchors, location masks
|
357 |
-
"""
|
358 |
-
# calculate location filtering mask
|
359 |
-
loc_pred = loc_pred.sigmoid().detach()
|
360 |
-
if use_loc_filter:
|
361 |
-
loc_mask = loc_pred >= self.loc_filter_thr
|
362 |
-
else:
|
363 |
-
loc_mask = loc_pred >= 0.0
|
364 |
-
mask = loc_mask.permute(1, 2, 0).expand(-1, -1, self.num_anchors)
|
365 |
-
mask = mask.contiguous().view(-1)
|
366 |
-
# calculate guided anchors
|
367 |
-
squares = squares[mask]
|
368 |
-
anchor_deltas = shape_pred.permute(1, 2, 0).contiguous().view(
|
369 |
-
-1, 2).detach()[mask]
|
370 |
-
bbox_deltas = anchor_deltas.new_full(squares.size(), 0)
|
371 |
-
bbox_deltas[:, 2:] = anchor_deltas
|
372 |
-
guided_anchors = self.anchor_coder.decode(
|
373 |
-
squares, bbox_deltas, wh_ratio_clip=1e-6)
|
374 |
-
return guided_anchors, mask
|
375 |
-
|
376 |
-
def ga_loc_targets(self, gt_bboxes_list, featmap_sizes):
|
377 |
-
"""Compute location targets for guided anchoring.
|
378 |
-
|
379 |
-
Each feature map is divided into positive, negative and ignore regions.
|
380 |
-
- positive regions: target 1, weight 1
|
381 |
-
- ignore regions: target 0, weight 0
|
382 |
-
- negative regions: target 0, weight 0.1
|
383 |
-
|
384 |
-
Args:
|
385 |
-
gt_bboxes_list (list[Tensor]): Gt bboxes of each image.
|
386 |
-
featmap_sizes (list[tuple]): Multi level sizes of each feature
|
387 |
-
maps.
|
388 |
-
|
389 |
-
Returns:
|
390 |
-
tuple
|
391 |
-
"""
|
392 |
-
anchor_scale = self.approx_anchor_generator.octave_base_scale
|
393 |
-
anchor_strides = self.approx_anchor_generator.strides
|
394 |
-
# Currently only supports same stride in x and y direction.
|
395 |
-
for stride in anchor_strides:
|
396 |
-
assert (stride[0] == stride[1])
|
397 |
-
anchor_strides = [stride[0] for stride in anchor_strides]
|
398 |
-
|
399 |
-
center_ratio = self.train_cfg.center_ratio
|
400 |
-
ignore_ratio = self.train_cfg.ignore_ratio
|
401 |
-
img_per_gpu = len(gt_bboxes_list)
|
402 |
-
num_lvls = len(featmap_sizes)
|
403 |
-
r1 = (1 - center_ratio) / 2
|
404 |
-
r2 = (1 - ignore_ratio) / 2
|
405 |
-
all_loc_targets = []
|
406 |
-
all_loc_weights = []
|
407 |
-
all_ignore_map = []
|
408 |
-
for lvl_id in range(num_lvls):
|
409 |
-
h, w = featmap_sizes[lvl_id]
|
410 |
-
loc_targets = torch.zeros(
|
411 |
-
img_per_gpu,
|
412 |
-
1,
|
413 |
-
h,
|
414 |
-
w,
|
415 |
-
device=gt_bboxes_list[0].device,
|
416 |
-
dtype=torch.float32)
|
417 |
-
loc_weights = torch.full_like(loc_targets, -1)
|
418 |
-
ignore_map = torch.zeros_like(loc_targets)
|
419 |
-
all_loc_targets.append(loc_targets)
|
420 |
-
all_loc_weights.append(loc_weights)
|
421 |
-
all_ignore_map.append(ignore_map)
|
422 |
-
for img_id in range(img_per_gpu):
|
423 |
-
gt_bboxes = gt_bboxes_list[img_id]
|
424 |
-
scale = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0]) *
|
425 |
-
(gt_bboxes[:, 3] - gt_bboxes[:, 1]))
|
426 |
-
min_anchor_size = scale.new_full(
|
427 |
-
(1, ), float(anchor_scale * anchor_strides[0]))
|
428 |
-
# assign gt bboxes to different feature levels w.r.t. their scales
|
429 |
-
target_lvls = torch.floor(
|
430 |
-
torch.log2(scale) - torch.log2(min_anchor_size) + 0.5)
|
431 |
-
target_lvls = target_lvls.clamp(min=0, max=num_lvls - 1).long()
|
432 |
-
for gt_id in range(gt_bboxes.size(0)):
|
433 |
-
lvl = target_lvls[gt_id].item()
|
434 |
-
# rescaled to corresponding feature map
|
435 |
-
gt_ = gt_bboxes[gt_id, :4] / anchor_strides[lvl]
|
436 |
-
# calculate ignore regions
|
437 |
-
ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region(
|
438 |
-
gt_, r2, featmap_sizes[lvl])
|
439 |
-
# calculate positive (center) regions
|
440 |
-
ctr_x1, ctr_y1, ctr_x2, ctr_y2 = calc_region(
|
441 |
-
gt_, r1, featmap_sizes[lvl])
|
442 |
-
all_loc_targets[lvl][img_id, 0, ctr_y1:ctr_y2 + 1,
|
443 |
-
ctr_x1:ctr_x2 + 1] = 1
|
444 |
-
all_loc_weights[lvl][img_id, 0, ignore_y1:ignore_y2 + 1,
|
445 |
-
ignore_x1:ignore_x2 + 1] = 0
|
446 |
-
all_loc_weights[lvl][img_id, 0, ctr_y1:ctr_y2 + 1,
|
447 |
-
ctr_x1:ctr_x2 + 1] = 1
|
448 |
-
# calculate ignore map on nearby low level feature
|
449 |
-
if lvl > 0:
|
450 |
-
d_lvl = lvl - 1
|
451 |
-
# rescaled to corresponding feature map
|
452 |
-
gt_ = gt_bboxes[gt_id, :4] / anchor_strides[d_lvl]
|
453 |
-
ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region(
|
454 |
-
gt_, r2, featmap_sizes[d_lvl])
|
455 |
-
all_ignore_map[d_lvl][img_id, 0, ignore_y1:ignore_y2 + 1,
|
456 |
-
ignore_x1:ignore_x2 + 1] = 1
|
457 |
-
# calculate ignore map on nearby high level feature
|
458 |
-
if lvl < num_lvls - 1:
|
459 |
-
u_lvl = lvl + 1
|
460 |
-
# rescaled to corresponding feature map
|
461 |
-
gt_ = gt_bboxes[gt_id, :4] / anchor_strides[u_lvl]
|
462 |
-
ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region(
|
463 |
-
gt_, r2, featmap_sizes[u_lvl])
|
464 |
-
all_ignore_map[u_lvl][img_id, 0, ignore_y1:ignore_y2 + 1,
|
465 |
-
ignore_x1:ignore_x2 + 1] = 1
|
466 |
-
for lvl_id in range(num_lvls):
|
467 |
-
# ignore negative regions w.r.t. ignore map
|
468 |
-
all_loc_weights[lvl_id][(all_loc_weights[lvl_id] < 0)
|
469 |
-
& (all_ignore_map[lvl_id] > 0)] = 0
|
470 |
-
# set negative regions with weight 0.1
|
471 |
-
all_loc_weights[lvl_id][all_loc_weights[lvl_id] < 0] = 0.1
|
472 |
-
# loc average factor to balance loss
|
473 |
-
loc_avg_factor = sum(
|
474 |
-
[t.size(0) * t.size(-1) * t.size(-2)
|
475 |
-
for t in all_loc_targets]) / 200
|
476 |
-
return all_loc_targets, all_loc_weights, loc_avg_factor
|
477 |
-
|
478 |
-
def _ga_shape_target_single(self,
|
479 |
-
flat_approxs,
|
480 |
-
inside_flags,
|
481 |
-
flat_squares,
|
482 |
-
gt_bboxes,
|
483 |
-
gt_bboxes_ignore,
|
484 |
-
img_meta,
|
485 |
-
unmap_outputs=True):
|
486 |
-
"""Compute guided anchoring targets.
|
487 |
-
|
488 |
-
This function returns sampled anchors and gt bboxes directly
|
489 |
-
rather than calculates regression targets.
|
490 |
-
|
491 |
-
Args:
|
492 |
-
flat_approxs (Tensor): flat approxs of a single image,
|
493 |
-
shape (n, 4)
|
494 |
-
inside_flags (Tensor): inside flags of a single image,
|
495 |
-
shape (n, ).
|
496 |
-
flat_squares (Tensor): flat squares of a single image,
|
497 |
-
shape (approxs_per_octave * n, 4)
|
498 |
-
gt_bboxes (Tensor): Ground truth bboxes of a single image.
|
499 |
-
img_meta (dict): Meta info of a single image.
|
500 |
-
approxs_per_octave (int): number of approxs per octave
|
501 |
-
cfg (dict): RPN train configs.
|
502 |
-
unmap_outputs (bool): unmap outputs or not.
|
503 |
-
|
504 |
-
Returns:
|
505 |
-
tuple
|
506 |
-
"""
|
507 |
-
if not inside_flags.any():
|
508 |
-
return (None, ) * 5
|
509 |
-
# assign gt and sample anchors
|
510 |
-
expand_inside_flags = inside_flags[:, None].expand(
|
511 |
-
-1, self.approxs_per_octave).reshape(-1)
|
512 |
-
approxs = flat_approxs[expand_inside_flags, :]
|
513 |
-
squares = flat_squares[inside_flags, :]
|
514 |
-
|
515 |
-
assign_result = self.ga_assigner.assign(approxs, squares,
|
516 |
-
self.approxs_per_octave,
|
517 |
-
gt_bboxes, gt_bboxes_ignore)
|
518 |
-
sampling_result = self.ga_sampler.sample(assign_result, squares,
|
519 |
-
gt_bboxes)
|
520 |
-
|
521 |
-
bbox_anchors = torch.zeros_like(squares)
|
522 |
-
bbox_gts = torch.zeros_like(squares)
|
523 |
-
bbox_weights = torch.zeros_like(squares)
|
524 |
-
|
525 |
-
pos_inds = sampling_result.pos_inds
|
526 |
-
neg_inds = sampling_result.neg_inds
|
527 |
-
if len(pos_inds) > 0:
|
528 |
-
bbox_anchors[pos_inds, :] = sampling_result.pos_bboxes
|
529 |
-
bbox_gts[pos_inds, :] = sampling_result.pos_gt_bboxes
|
530 |
-
bbox_weights[pos_inds, :] = 1.0
|
531 |
-
|
532 |
-
# map up to original set of anchors
|
533 |
-
if unmap_outputs:
|
534 |
-
num_total_anchors = flat_squares.size(0)
|
535 |
-
bbox_anchors = unmap(bbox_anchors, num_total_anchors, inside_flags)
|
536 |
-
bbox_gts = unmap(bbox_gts, num_total_anchors, inside_flags)
|
537 |
-
bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)
|
538 |
-
|
539 |
-
return (bbox_anchors, bbox_gts, bbox_weights, pos_inds, neg_inds)
|
540 |
-
|
541 |
-
def ga_shape_targets(self,
|
542 |
-
approx_list,
|
543 |
-
inside_flag_list,
|
544 |
-
square_list,
|
545 |
-
gt_bboxes_list,
|
546 |
-
img_metas,
|
547 |
-
gt_bboxes_ignore_list=None,
|
548 |
-
unmap_outputs=True):
|
549 |
-
"""Compute guided anchoring targets.
|
550 |
-
|
551 |
-
Args:
|
552 |
-
approx_list (list[list]): Multi level approxs of each image.
|
553 |
-
inside_flag_list (list[list]): Multi level inside flags of each
|
554 |
-
image.
|
555 |
-
square_list (list[list]): Multi level squares of each image.
|
556 |
-
gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.
|
557 |
-
img_metas (list[dict]): Meta info of each image.
|
558 |
-
gt_bboxes_ignore_list (list[Tensor]): ignore list of gt bboxes.
|
559 |
-
unmap_outputs (bool): unmap outputs or not.
|
560 |
-
|
561 |
-
Returns:
|
562 |
-
tuple
|
563 |
-
"""
|
564 |
-
num_imgs = len(img_metas)
|
565 |
-
assert len(approx_list) == len(inside_flag_list) == len(
|
566 |
-
square_list) == num_imgs
|
567 |
-
# anchor number of multi levels
|
568 |
-
num_level_squares = [squares.size(0) for squares in square_list[0]]
|
569 |
-
# concat all level anchors and flags to a single tensor
|
570 |
-
inside_flag_flat_list = []
|
571 |
-
approx_flat_list = []
|
572 |
-
square_flat_list = []
|
573 |
-
for i in range(num_imgs):
|
574 |
-
assert len(square_list[i]) == len(inside_flag_list[i])
|
575 |
-
inside_flag_flat_list.append(torch.cat(inside_flag_list[i]))
|
576 |
-
approx_flat_list.append(torch.cat(approx_list[i]))
|
577 |
-
square_flat_list.append(torch.cat(square_list[i]))
|
578 |
-
|
579 |
-
# compute targets for each image
|
580 |
-
if gt_bboxes_ignore_list is None:
|
581 |
-
gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
|
582 |
-
(all_bbox_anchors, all_bbox_gts, all_bbox_weights, pos_inds_list,
|
583 |
-
neg_inds_list) = multi_apply(
|
584 |
-
self._ga_shape_target_single,
|
585 |
-
approx_flat_list,
|
586 |
-
inside_flag_flat_list,
|
587 |
-
square_flat_list,
|
588 |
-
gt_bboxes_list,
|
589 |
-
gt_bboxes_ignore_list,
|
590 |
-
img_metas,
|
591 |
-
unmap_outputs=unmap_outputs)
|
592 |
-
# no valid anchors
|
593 |
-
if any([bbox_anchors is None for bbox_anchors in all_bbox_anchors]):
|
594 |
-
return None
|
595 |
-
# sampled anchors of all images
|
596 |
-
num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
|
597 |
-
num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
|
598 |
-
# split targets to a list w.r.t. multiple levels
|
599 |
-
bbox_anchors_list = images_to_levels(all_bbox_anchors,
|
600 |
-
num_level_squares)
|
601 |
-
bbox_gts_list = images_to_levels(all_bbox_gts, num_level_squares)
|
602 |
-
bbox_weights_list = images_to_levels(all_bbox_weights,
|
603 |
-
num_level_squares)
|
604 |
-
return (bbox_anchors_list, bbox_gts_list, bbox_weights_list,
|
605 |
-
num_total_pos, num_total_neg)
|
606 |
-
|
607 |
-
def loss_shape_single(self, shape_pred, bbox_anchors, bbox_gts,
|
608 |
-
anchor_weights, anchor_total_num):
|
609 |
-
shape_pred = shape_pred.permute(0, 2, 3, 1).contiguous().view(-1, 2)
|
610 |
-
bbox_anchors = bbox_anchors.contiguous().view(-1, 4)
|
611 |
-
bbox_gts = bbox_gts.contiguous().view(-1, 4)
|
612 |
-
anchor_weights = anchor_weights.contiguous().view(-1, 4)
|
613 |
-
bbox_deltas = bbox_anchors.new_full(bbox_anchors.size(), 0)
|
614 |
-
bbox_deltas[:, 2:] += shape_pred
|
615 |
-
# filter out negative samples to speed-up weighted_bounded_iou_loss
|
616 |
-
inds = torch.nonzero(
|
617 |
-
anchor_weights[:, 0] > 0, as_tuple=False).squeeze(1)
|
618 |
-
bbox_deltas_ = bbox_deltas[inds]
|
619 |
-
bbox_anchors_ = bbox_anchors[inds]
|
620 |
-
bbox_gts_ = bbox_gts[inds]
|
621 |
-
anchor_weights_ = anchor_weights[inds]
|
622 |
-
pred_anchors_ = self.anchor_coder.decode(
|
623 |
-
bbox_anchors_, bbox_deltas_, wh_ratio_clip=1e-6)
|
624 |
-
loss_shape = self.loss_shape(
|
625 |
-
pred_anchors_,
|
626 |
-
bbox_gts_,
|
627 |
-
anchor_weights_,
|
628 |
-
avg_factor=anchor_total_num)
|
629 |
-
return loss_shape
|
630 |
-
|
631 |
-
def loss_loc_single(self, loc_pred, loc_target, loc_weight,
|
632 |
-
loc_avg_factor):
|
633 |
-
loss_loc = self.loss_loc(
|
634 |
-
loc_pred.reshape(-1, 1),
|
635 |
-
loc_target.reshape(-1).long(),
|
636 |
-
loc_weight.reshape(-1),
|
637 |
-
avg_factor=loc_avg_factor)
|
638 |
-
return loss_loc
|
639 |
-
|
640 |
-
@force_fp32(
|
641 |
-
apply_to=('cls_scores', 'bbox_preds', 'shape_preds', 'loc_preds'))
|
642 |
-
def loss(self,
|
643 |
-
cls_scores,
|
644 |
-
bbox_preds,
|
645 |
-
shape_preds,
|
646 |
-
loc_preds,
|
647 |
-
gt_bboxes,
|
648 |
-
gt_labels,
|
649 |
-
img_metas,
|
650 |
-
gt_bboxes_ignore=None):
|
651 |
-
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
|
652 |
-
assert len(featmap_sizes) == self.approx_anchor_generator.num_levels
|
653 |
-
|
654 |
-
device = cls_scores[0].device
|
655 |
-
|
656 |
-
# get loc targets
|
657 |
-
loc_targets, loc_weights, loc_avg_factor = self.ga_loc_targets(
|
658 |
-
gt_bboxes, featmap_sizes)
|
659 |
-
|
660 |
-
# get sampled approxes
|
661 |
-
approxs_list, inside_flag_list = self.get_sampled_approxs(
|
662 |
-
featmap_sizes, img_metas, device=device)
|
663 |
-
# get squares and guided anchors
|
664 |
-
squares_list, guided_anchors_list, _ = self.get_anchors(
|
665 |
-
featmap_sizes, shape_preds, loc_preds, img_metas, device=device)
|
666 |
-
|
667 |
-
# get shape targets
|
668 |
-
shape_targets = self.ga_shape_targets(approxs_list, inside_flag_list,
|
669 |
-
squares_list, gt_bboxes,
|
670 |
-
img_metas)
|
671 |
-
if shape_targets is None:
|
672 |
-
return None
|
673 |
-
(bbox_anchors_list, bbox_gts_list, anchor_weights_list, anchor_fg_num,
|
674 |
-
anchor_bg_num) = shape_targets
|
675 |
-
anchor_total_num = (
|
676 |
-
anchor_fg_num if not self.ga_sampling else anchor_fg_num +
|
677 |
-
anchor_bg_num)
|
678 |
-
|
679 |
-
# get anchor targets
|
680 |
-
label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
|
681 |
-
cls_reg_targets = self.get_targets(
|
682 |
-
guided_anchors_list,
|
683 |
-
inside_flag_list,
|
684 |
-
gt_bboxes,
|
685 |
-
img_metas,
|
686 |
-
gt_bboxes_ignore_list=gt_bboxes_ignore,
|
687 |
-
gt_labels_list=gt_labels,
|
688 |
-
label_channels=label_channels)
|
689 |
-
if cls_reg_targets is None:
|
690 |
-
return None
|
691 |
-
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
|
692 |
-
num_total_pos, num_total_neg) = cls_reg_targets
|
693 |
-
num_total_samples = (
|
694 |
-
num_total_pos + num_total_neg if self.sampling else num_total_pos)
|
695 |
-
|
696 |
-
# anchor number of multi levels
|
697 |
-
num_level_anchors = [
|
698 |
-
anchors.size(0) for anchors in guided_anchors_list[0]
|
699 |
-
]
|
700 |
-
# concat all level anchors to a single tensor
|
701 |
-
concat_anchor_list = []
|
702 |
-
for i in range(len(guided_anchors_list)):
|
703 |
-
concat_anchor_list.append(torch.cat(guided_anchors_list[i]))
|
704 |
-
all_anchor_list = images_to_levels(concat_anchor_list,
|
705 |
-
num_level_anchors)
|
706 |
-
|
707 |
-
# get classification and bbox regression losses
|
708 |
-
losses_cls, losses_bbox = multi_apply(
|
709 |
-
self.loss_single,
|
710 |
-
cls_scores,
|
711 |
-
bbox_preds,
|
712 |
-
all_anchor_list,
|
713 |
-
labels_list,
|
714 |
-
label_weights_list,
|
715 |
-
bbox_targets_list,
|
716 |
-
bbox_weights_list,
|
717 |
-
num_total_samples=num_total_samples)
|
718 |
-
|
719 |
-
# get anchor location loss
|
720 |
-
losses_loc = []
|
721 |
-
for i in range(len(loc_preds)):
|
722 |
-
loss_loc = self.loss_loc_single(
|
723 |
-
loc_preds[i],
|
724 |
-
loc_targets[i],
|
725 |
-
loc_weights[i],
|
726 |
-
loc_avg_factor=loc_avg_factor)
|
727 |
-
losses_loc.append(loss_loc)
|
728 |
-
|
729 |
-
# get anchor shape loss
|
730 |
-
losses_shape = []
|
731 |
-
for i in range(len(shape_preds)):
|
732 |
-
loss_shape = self.loss_shape_single(
|
733 |
-
shape_preds[i],
|
734 |
-
bbox_anchors_list[i],
|
735 |
-
bbox_gts_list[i],
|
736 |
-
anchor_weights_list[i],
|
737 |
-
anchor_total_num=anchor_total_num)
|
738 |
-
losses_shape.append(loss_shape)
|
739 |
-
|
740 |
-
return dict(
|
741 |
-
loss_cls=losses_cls,
|
742 |
-
loss_bbox=losses_bbox,
|
743 |
-
loss_shape=losses_shape,
|
744 |
-
loss_loc=losses_loc)
|
745 |
-
|
746 |
-
@force_fp32(
|
747 |
-
apply_to=('cls_scores', 'bbox_preds', 'shape_preds', 'loc_preds'))
|
748 |
-
def get_bboxes(self,
|
749 |
-
cls_scores,
|
750 |
-
bbox_preds,
|
751 |
-
shape_preds,
|
752 |
-
loc_preds,
|
753 |
-
img_metas,
|
754 |
-
cfg=None,
|
755 |
-
rescale=False):
|
756 |
-
assert len(cls_scores) == len(bbox_preds) == len(shape_preds) == len(
|
757 |
-
loc_preds)
|
758 |
-
num_levels = len(cls_scores)
|
759 |
-
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
|
760 |
-
device = cls_scores[0].device
|
761 |
-
# get guided anchors
|
762 |
-
_, guided_anchors, loc_masks = self.get_anchors(
|
763 |
-
featmap_sizes,
|
764 |
-
shape_preds,
|
765 |
-
loc_preds,
|
766 |
-
img_metas,
|
767 |
-
use_loc_filter=not self.training,
|
768 |
-
device=device)
|
769 |
-
result_list = []
|
770 |
-
for img_id in range(len(img_metas)):
|
771 |
-
cls_score_list = [
|
772 |
-
cls_scores[i][img_id].detach() for i in range(num_levels)
|
773 |
-
]
|
774 |
-
bbox_pred_list = [
|
775 |
-
bbox_preds[i][img_id].detach() for i in range(num_levels)
|
776 |
-
]
|
777 |
-
guided_anchor_list = [
|
778 |
-
guided_anchors[img_id][i].detach() for i in range(num_levels)
|
779 |
-
]
|
780 |
-
loc_mask_list = [
|
781 |
-
loc_masks[img_id][i].detach() for i in range(num_levels)
|
782 |
-
]
|
783 |
-
img_shape = img_metas[img_id]['img_shape']
|
784 |
-
scale_factor = img_metas[img_id]['scale_factor']
|
785 |
-
proposals = self._get_bboxes_single(cls_score_list, bbox_pred_list,
|
786 |
-
guided_anchor_list,
|
787 |
-
loc_mask_list, img_shape,
|
788 |
-
scale_factor, cfg, rescale)
|
789 |
-
result_list.append(proposals)
|
790 |
-
return result_list
|
791 |
-
|
792 |
-
def _get_bboxes_single(self,
|
793 |
-
cls_scores,
|
794 |
-
bbox_preds,
|
795 |
-
mlvl_anchors,
|
796 |
-
mlvl_masks,
|
797 |
-
img_shape,
|
798 |
-
scale_factor,
|
799 |
-
cfg,
|
800 |
-
rescale=False):
|
801 |
-
cfg = self.test_cfg if cfg is None else cfg
|
802 |
-
assert len(cls_scores) == len(bbox_preds) == len(mlvl_anchors)
|
803 |
-
mlvl_bboxes = []
|
804 |
-
mlvl_scores = []
|
805 |
-
for cls_score, bbox_pred, anchors, mask in zip(cls_scores, bbox_preds,
|
806 |
-
mlvl_anchors,
|
807 |
-
mlvl_masks):
|
808 |
-
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
|
809 |
-
# if no location is kept, end.
|
810 |
-
if mask.sum() == 0:
|
811 |
-
continue
|
812 |
-
# reshape scores and bbox_pred
|
813 |
-
cls_score = cls_score.permute(1, 2,
|
814 |
-
0).reshape(-1, self.cls_out_channels)
|
815 |
-
if self.use_sigmoid_cls:
|
816 |
-
scores = cls_score.sigmoid()
|
817 |
-
else:
|
818 |
-
scores = cls_score.softmax(-1)
|
819 |
-
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
|
820 |
-
# filter scores, bbox_pred w.r.t. mask.
|
821 |
-
# anchors are filtered in get_anchors() beforehand.
|
822 |
-
scores = scores[mask, :]
|
823 |
-
bbox_pred = bbox_pred[mask, :]
|
824 |
-
if scores.dim() == 0:
|
825 |
-
anchors = anchors.unsqueeze(0)
|
826 |
-
scores = scores.unsqueeze(0)
|
827 |
-
bbox_pred = bbox_pred.unsqueeze(0)
|
828 |
-
# filter anchors, bbox_pred, scores w.r.t. scores
|
829 |
-
nms_pre = cfg.get('nms_pre', -1)
|
830 |
-
if nms_pre > 0 and scores.shape[0] > nms_pre:
|
831 |
-
if self.use_sigmoid_cls:
|
832 |
-
max_scores, _ = scores.max(dim=1)
|
833 |
-
else:
|
834 |
-
# remind that we set FG labels to [0, num_class-1]
|
835 |
-
# since mmdet v2.0
|
836 |
-
# BG cat_id: num_class
|
837 |
-
max_scores, _ = scores[:, :-1].max(dim=1)
|
838 |
-
_, topk_inds = max_scores.topk(nms_pre)
|
839 |
-
anchors = anchors[topk_inds, :]
|
840 |
-
bbox_pred = bbox_pred[topk_inds, :]
|
841 |
-
scores = scores[topk_inds, :]
|
842 |
-
bboxes = self.bbox_coder.decode(
|
843 |
-
anchors, bbox_pred, max_shape=img_shape)
|
844 |
-
mlvl_bboxes.append(bboxes)
|
845 |
-
mlvl_scores.append(scores)
|
846 |
-
mlvl_bboxes = torch.cat(mlvl_bboxes)
|
847 |
-
if rescale:
|
848 |
-
mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
|
849 |
-
mlvl_scores = torch.cat(mlvl_scores)
|
850 |
-
if self.use_sigmoid_cls:
|
851 |
-
# Add a dummy background class to the backend when using sigmoid
|
852 |
-
# remind that we set FG labels to [0, num_class-1] since mmdet v2.0
|
853 |
-
# BG cat_id: num_class
|
854 |
-
padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
|
855 |
-
mlvl_scores = torch.cat([mlvl_scores, padding], dim=1)
|
856 |
-
# multi class NMS
|
857 |
-
det_bboxes, det_labels = multiclass_nms(mlvl_bboxes, mlvl_scores,
|
858 |
-
cfg.score_thr, cfg.nms,
|
859 |
-
cfg.max_per_img)
|
860 |
-
return det_bboxes, det_labels
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/tools/misc/browse_dataset.py
DELETED
@@ -1,96 +0,0 @@
|
|
1 |
-
import argparse
|
2 |
-
import os
|
3 |
-
from pathlib import Path
|
4 |
-
|
5 |
-
import mmcv
|
6 |
-
from mmcv import Config, DictAction
|
7 |
-
|
8 |
-
from mmdet.core.utils import mask2ndarray
|
9 |
-
from mmdet.core.visualization import imshow_det_bboxes
|
10 |
-
from mmdet.datasets.builder import build_dataset
|
11 |
-
|
12 |
-
|
13 |
-
def parse_args():
|
14 |
-
parser = argparse.ArgumentParser(description='Browse a dataset')
|
15 |
-
parser.add_argument('config', help='train config file path')
|
16 |
-
parser.add_argument(
|
17 |
-
'--skip-type',
|
18 |
-
type=str,
|
19 |
-
nargs='+',
|
20 |
-
default=['DefaultFormatBundle', 'Normalize', 'Collect'],
|
21 |
-
help='skip some useless pipeline')
|
22 |
-
parser.add_argument(
|
23 |
-
'--output-dir',
|
24 |
-
default=None,
|
25 |
-
type=str,
|
26 |
-
help='If there is no display interface, you can save it')
|
27 |
-
parser.add_argument('--not-show', default=False, action='store_true')
|
28 |
-
parser.add_argument(
|
29 |
-
'--show-interval',
|
30 |
-
type=float,
|
31 |
-
default=2,
|
32 |
-
help='the interval of show (s)')
|
33 |
-
parser.add_argument(
|
34 |
-
'--cfg-options',
|
35 |
-
nargs='+',
|
36 |
-
action=DictAction,
|
37 |
-
help='override some settings in the used config, the key-value pair '
|
38 |
-
'in xxx=yyy format will be merged into config file. If the value to '
|
39 |
-
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
|
40 |
-
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
|
41 |
-
'Note that the quotation marks are necessary and that no white space '
|
42 |
-
'is allowed.')
|
43 |
-
args = parser.parse_args()
|
44 |
-
return args
|
45 |
-
|
46 |
-
|
47 |
-
def retrieve_data_cfg(config_path, skip_type, cfg_options):
|
48 |
-
cfg = Config.fromfile(config_path)
|
49 |
-
if cfg_options is not None:
|
50 |
-
cfg.merge_from_dict(cfg_options)
|
51 |
-
# import modules from string list.
|
52 |
-
if cfg.get('custom_imports', None):
|
53 |
-
from mmcv.utils import import_modules_from_strings
|
54 |
-
import_modules_from_strings(**cfg['custom_imports'])
|
55 |
-
train_data_cfg = cfg.data.train
|
56 |
-
train_data_cfg['pipeline'] = [
|
57 |
-
x for x in train_data_cfg.pipeline if x['type'] not in skip_type
|
58 |
-
]
|
59 |
-
|
60 |
-
return cfg
|
61 |
-
|
62 |
-
|
63 |
-
def main():
|
64 |
-
args = parse_args()
|
65 |
-
cfg = retrieve_data_cfg(args.config, args.skip_type, args.cfg_options)
|
66 |
-
|
67 |
-
dataset = build_dataset(cfg.data.train)
|
68 |
-
|
69 |
-
progress_bar = mmcv.ProgressBar(len(dataset))
|
70 |
-
|
71 |
-
for item in dataset:
|
72 |
-
filename = os.path.join(args.output_dir,
|
73 |
-
Path(item['filename']).name
|
74 |
-
) if args.output_dir is not None else None
|
75 |
-
|
76 |
-
gt_masks = item.get('gt_masks', None)
|
77 |
-
if gt_masks is not None:
|
78 |
-
gt_masks = mask2ndarray(gt_masks)
|
79 |
-
|
80 |
-
imshow_det_bboxes(
|
81 |
-
item['img'],
|
82 |
-
item['gt_bboxes'],
|
83 |
-
item['gt_labels'],
|
84 |
-
gt_masks,
|
85 |
-
class_names=dataset.CLASSES,
|
86 |
-
show=not args.not_show,
|
87 |
-
wait_time=args.show_interval,
|
88 |
-
out_file=filename,
|
89 |
-
bbox_color=(255, 102, 61),
|
90 |
-
text_color=(255, 102, 61))
|
91 |
-
|
92 |
-
progress_bar.update()
|
93 |
-
|
94 |
-
|
95 |
-
if __name__ == '__main__':
|
96 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/fp16/pspnet_r101-d8_512x1024_80k_fp16_cityscapes.py
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
_base_ = '../pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py'
|
2 |
-
# fp16 settings
|
3 |
-
optimizer_config = dict(type='Fp16OptimizerHook', loss_scale=512.)
|
4 |
-
# fp16 placeholder
|
5 |
-
fp16 = dict()
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnnasBlackHat/Image-Similarity/src/model/similarity_interface.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
class SimilarityInterface:
|
2 |
-
def extract_feature(img):
|
3 |
-
return []
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/ldm/models/diffusion/plms.py
DELETED
@@ -1,244 +0,0 @@
|
|
1 |
-
"""SAMPLING ONLY."""
|
2 |
-
|
3 |
-
import torch
|
4 |
-
import numpy as np
|
5 |
-
from tqdm import tqdm
|
6 |
-
from functools import partial
|
7 |
-
|
8 |
-
from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like
|
9 |
-
from ldm.models.diffusion.sampling_util import norm_thresholding
|
10 |
-
|
11 |
-
|
12 |
-
class PLMSSampler(object):
|
13 |
-
def __init__(self, model, schedule="linear", **kwargs):
|
14 |
-
super().__init__()
|
15 |
-
self.model = model
|
16 |
-
self.ddpm_num_timesteps = model.num_timesteps
|
17 |
-
self.schedule = schedule
|
18 |
-
|
19 |
-
def register_buffer(self, name, attr):
|
20 |
-
if type(attr) == torch.Tensor:
|
21 |
-
if attr.device != torch.device("cuda"):
|
22 |
-
attr = attr.to(torch.device("cuda"))
|
23 |
-
setattr(self, name, attr)
|
24 |
-
|
25 |
-
def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
|
26 |
-
if ddim_eta != 0:
|
27 |
-
raise ValueError('ddim_eta must be 0 for PLMS')
|
28 |
-
self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,
|
29 |
-
num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)
|
30 |
-
alphas_cumprod = self.model.alphas_cumprod
|
31 |
-
assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'
|
32 |
-
to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)
|
33 |
-
|
34 |
-
self.register_buffer('betas', to_torch(self.model.betas))
|
35 |
-
self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
|
36 |
-
self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))
|
37 |
-
|
38 |
-
# calculations for diffusion q(x_t | x_{t-1}) and others
|
39 |
-
self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))
|
40 |
-
self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))
|
41 |
-
self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))
|
42 |
-
self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))
|
43 |
-
self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))
|
44 |
-
|
45 |
-
# ddim sampling parameters
|
46 |
-
ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),
|
47 |
-
ddim_timesteps=self.ddim_timesteps,
|
48 |
-
eta=ddim_eta,verbose=verbose)
|
49 |
-
self.register_buffer('ddim_sigmas', ddim_sigmas)
|
50 |
-
self.register_buffer('ddim_alphas', ddim_alphas)
|
51 |
-
self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
|
52 |
-
self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))
|
53 |
-
sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(
|
54 |
-
(1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (
|
55 |
-
1 - self.alphas_cumprod / self.alphas_cumprod_prev))
|
56 |
-
self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)
|
57 |
-
|
58 |
-
@torch.no_grad()
|
59 |
-
def sample(self,
|
60 |
-
S,
|
61 |
-
batch_size,
|
62 |
-
shape,
|
63 |
-
conditioning=None,
|
64 |
-
callback=None,
|
65 |
-
normals_sequence=None,
|
66 |
-
img_callback=None,
|
67 |
-
quantize_x0=False,
|
68 |
-
eta=0.,
|
69 |
-
mask=None,
|
70 |
-
x0=None,
|
71 |
-
temperature=1.,
|
72 |
-
noise_dropout=0.,
|
73 |
-
score_corrector=None,
|
74 |
-
corrector_kwargs=None,
|
75 |
-
verbose=True,
|
76 |
-
x_T=None,
|
77 |
-
log_every_t=100,
|
78 |
-
unconditional_guidance_scale=1.,
|
79 |
-
unconditional_conditioning=None,
|
80 |
-
# this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
|
81 |
-
dynamic_threshold=None,
|
82 |
-
**kwargs
|
83 |
-
):
|
84 |
-
if conditioning is not None:
|
85 |
-
if isinstance(conditioning, dict):
|
86 |
-
cbs = conditioning[list(conditioning.keys())[0]].shape[0]
|
87 |
-
if cbs != batch_size:
|
88 |
-
print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
|
89 |
-
else:
|
90 |
-
if conditioning.shape[0] != batch_size:
|
91 |
-
print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
|
92 |
-
|
93 |
-
self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
|
94 |
-
# sampling
|
95 |
-
C, H, W = shape
|
96 |
-
size = (batch_size, C, H, W)
|
97 |
-
print(f'Data shape for PLMS sampling is {size}')
|
98 |
-
|
99 |
-
samples, intermediates = self.plms_sampling(conditioning, size,
|
100 |
-
callback=callback,
|
101 |
-
img_callback=img_callback,
|
102 |
-
quantize_denoised=quantize_x0,
|
103 |
-
mask=mask, x0=x0,
|
104 |
-
ddim_use_original_steps=False,
|
105 |
-
noise_dropout=noise_dropout,
|
106 |
-
temperature=temperature,
|
107 |
-
score_corrector=score_corrector,
|
108 |
-
corrector_kwargs=corrector_kwargs,
|
109 |
-
x_T=x_T,
|
110 |
-
log_every_t=log_every_t,
|
111 |
-
unconditional_guidance_scale=unconditional_guidance_scale,
|
112 |
-
unconditional_conditioning=unconditional_conditioning,
|
113 |
-
dynamic_threshold=dynamic_threshold,
|
114 |
-
)
|
115 |
-
return samples, intermediates
|
116 |
-
|
117 |
-
@torch.no_grad()
|
118 |
-
def plms_sampling(self, cond, shape,
|
119 |
-
x_T=None, ddim_use_original_steps=False,
|
120 |
-
callback=None, timesteps=None, quantize_denoised=False,
|
121 |
-
mask=None, x0=None, img_callback=None, log_every_t=100,
|
122 |
-
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
|
123 |
-
unconditional_guidance_scale=1., unconditional_conditioning=None,
|
124 |
-
dynamic_threshold=None):
|
125 |
-
device = self.model.betas.device
|
126 |
-
b = shape[0]
|
127 |
-
if x_T is None:
|
128 |
-
img = torch.randn(shape, device=device)
|
129 |
-
else:
|
130 |
-
img = x_T
|
131 |
-
|
132 |
-
if timesteps is None:
|
133 |
-
timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps
|
134 |
-
elif timesteps is not None and not ddim_use_original_steps:
|
135 |
-
subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1
|
136 |
-
timesteps = self.ddim_timesteps[:subset_end]
|
137 |
-
|
138 |
-
intermediates = {'x_inter': [img], 'pred_x0': [img]}
|
139 |
-
time_range = list(reversed(range(0,timesteps))) if ddim_use_original_steps else np.flip(timesteps)
|
140 |
-
total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]
|
141 |
-
print(f"Running PLMS Sampling with {total_steps} timesteps")
|
142 |
-
|
143 |
-
iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps)
|
144 |
-
old_eps = []
|
145 |
-
|
146 |
-
for i, step in enumerate(iterator):
|
147 |
-
index = total_steps - i - 1
|
148 |
-
ts = torch.full((b,), step, device=device, dtype=torch.long)
|
149 |
-
ts_next = torch.full((b,), time_range[min(i + 1, len(time_range) - 1)], device=device, dtype=torch.long)
|
150 |
-
|
151 |
-
if mask is not None:
|
152 |
-
assert x0 is not None
|
153 |
-
img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?
|
154 |
-
img = img_orig * mask + (1. - mask) * img
|
155 |
-
|
156 |
-
outs = self.p_sample_plms(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
|
157 |
-
quantize_denoised=quantize_denoised, temperature=temperature,
|
158 |
-
noise_dropout=noise_dropout, score_corrector=score_corrector,
|
159 |
-
corrector_kwargs=corrector_kwargs,
|
160 |
-
unconditional_guidance_scale=unconditional_guidance_scale,
|
161 |
-
unconditional_conditioning=unconditional_conditioning,
|
162 |
-
old_eps=old_eps, t_next=ts_next,
|
163 |
-
dynamic_threshold=dynamic_threshold)
|
164 |
-
img, pred_x0, e_t = outs
|
165 |
-
old_eps.append(e_t)
|
166 |
-
if len(old_eps) >= 4:
|
167 |
-
old_eps.pop(0)
|
168 |
-
if callback: callback(i)
|
169 |
-
if img_callback: img_callback(pred_x0, i)
|
170 |
-
|
171 |
-
if index % log_every_t == 0 or index == total_steps - 1:
|
172 |
-
intermediates['x_inter'].append(img)
|
173 |
-
intermediates['pred_x0'].append(pred_x0)
|
174 |
-
|
175 |
-
return img, intermediates
|
176 |
-
|
177 |
-
@torch.no_grad()
|
178 |
-
def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
|
179 |
-
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
|
180 |
-
unconditional_guidance_scale=1., unconditional_conditioning=None, old_eps=None, t_next=None,
|
181 |
-
dynamic_threshold=None):
|
182 |
-
b, *_, device = *x.shape, x.device
|
183 |
-
|
184 |
-
def get_model_output(x, t):
|
185 |
-
if unconditional_conditioning is None or unconditional_guidance_scale == 1.:
|
186 |
-
e_t = self.model.apply_model(x, t, c)
|
187 |
-
else:
|
188 |
-
x_in = torch.cat([x] * 2)
|
189 |
-
t_in = torch.cat([t] * 2)
|
190 |
-
c_in = torch.cat([unconditional_conditioning, c])
|
191 |
-
e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)
|
192 |
-
e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
|
193 |
-
|
194 |
-
if score_corrector is not None:
|
195 |
-
assert self.model.parameterization == "eps"
|
196 |
-
e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)
|
197 |
-
|
198 |
-
return e_t
|
199 |
-
|
200 |
-
alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
|
201 |
-
alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev
|
202 |
-
sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas
|
203 |
-
sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas
|
204 |
-
|
205 |
-
def get_x_prev_and_pred_x0(e_t, index):
|
206 |
-
# select parameters corresponding to the currently considered timestep
|
207 |
-
a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
|
208 |
-
a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
|
209 |
-
sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
|
210 |
-
sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)
|
211 |
-
|
212 |
-
# current prediction for x_0
|
213 |
-
pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
|
214 |
-
if quantize_denoised:
|
215 |
-
pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
|
216 |
-
if dynamic_threshold is not None:
|
217 |
-
pred_x0 = norm_thresholding(pred_x0, dynamic_threshold)
|
218 |
-
# direction pointing to x_t
|
219 |
-
dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t
|
220 |
-
noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature
|
221 |
-
if noise_dropout > 0.:
|
222 |
-
noise = torch.nn.functional.dropout(noise, p=noise_dropout)
|
223 |
-
x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise
|
224 |
-
return x_prev, pred_x0
|
225 |
-
|
226 |
-
e_t = get_model_output(x, t)
|
227 |
-
if len(old_eps) == 0:
|
228 |
-
# Pseudo Improved Euler (2nd order)
|
229 |
-
x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index)
|
230 |
-
e_t_next = get_model_output(x_prev, t_next)
|
231 |
-
e_t_prime = (e_t + e_t_next) / 2
|
232 |
-
elif len(old_eps) == 1:
|
233 |
-
# 2nd order Pseudo Linear Multistep (Adams-Bashforth)
|
234 |
-
e_t_prime = (3 * e_t - old_eps[-1]) / 2
|
235 |
-
elif len(old_eps) == 2:
|
236 |
-
# 3nd order Pseudo Linear Multistep (Adams-Bashforth)
|
237 |
-
e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12
|
238 |
-
elif len(old_eps) >= 3:
|
239 |
-
# 4nd order Pseudo Linear Multistep (Adams-Bashforth)
|
240 |
-
e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24
|
241 |
-
|
242 |
-
x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index)
|
243 |
-
|
244 |
-
return x_prev, pred_x0, e_t
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AriaMei/TTSdemo/text/japanese.py
DELETED
@@ -1,153 +0,0 @@
|
|
1 |
-
import re
|
2 |
-
from unidecode import unidecode
|
3 |
-
import pyopenjtalk
|
4 |
-
|
5 |
-
|
6 |
-
# Regular expression matching Japanese without punctuation marks:
|
7 |
-
_japanese_characters = re.compile(
|
8 |
-
r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
|
9 |
-
|
10 |
-
# Regular expression matching non-Japanese characters or punctuation marks:
|
11 |
-
_japanese_marks = re.compile(
|
12 |
-
r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
|
13 |
-
|
14 |
-
# List of (symbol, Japanese) pairs for marks:
|
15 |
-
_symbols_to_japanese = [(re.compile('%s' % x[0]), x[1]) for x in [
|
16 |
-
('%', 'パーセント')
|
17 |
-
]]
|
18 |
-
|
19 |
-
# List of (romaji, ipa) pairs for marks:
|
20 |
-
_romaji_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [
|
21 |
-
('ts', 'ʦ'),
|
22 |
-
('u', 'ɯ'),
|
23 |
-
('j', 'ʥ'),
|
24 |
-
('y', 'j'),
|
25 |
-
('ni', 'n^i'),
|
26 |
-
('nj', 'n^'),
|
27 |
-
('hi', 'çi'),
|
28 |
-
('hj', 'ç'),
|
29 |
-
('f', 'ɸ'),
|
30 |
-
('I', 'i*'),
|
31 |
-
('U', 'ɯ*'),
|
32 |
-
('r', 'ɾ')
|
33 |
-
]]
|
34 |
-
|
35 |
-
# List of (romaji, ipa2) pairs for marks:
|
36 |
-
_romaji_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [
|
37 |
-
('u', 'ɯ'),
|
38 |
-
('ʧ', 'tʃ'),
|
39 |
-
('j', 'dʑ'),
|
40 |
-
('y', 'j'),
|
41 |
-
('ni', 'n^i'),
|
42 |
-
('nj', 'n^'),
|
43 |
-
('hi', 'çi'),
|
44 |
-
('hj', 'ç'),
|
45 |
-
('f', 'ɸ'),
|
46 |
-
('I', 'i*'),
|
47 |
-
('U', 'ɯ*'),
|
48 |
-
('r', 'ɾ')
|
49 |
-
]]
|
50 |
-
|
51 |
-
# List of (consonant, sokuon) pairs:
|
52 |
-
_real_sokuon = [(re.compile('%s' % x[0]), x[1]) for x in [
|
53 |
-
(r'Q([↑↓]*[kg])', r'k#\1'),
|
54 |
-
(r'Q([↑↓]*[tdjʧ])', r't#\1'),
|
55 |
-
(r'Q([↑↓]*[sʃ])', r's\1'),
|
56 |
-
(r'Q([↑↓]*[pb])', r'p#\1')
|
57 |
-
]]
|
58 |
-
|
59 |
-
# List of (consonant, hatsuon) pairs:
|
60 |
-
_real_hatsuon = [(re.compile('%s' % x[0]), x[1]) for x in [
|
61 |
-
(r'N([↑↓]*[pbm])', r'm\1'),
|
62 |
-
(r'N([↑↓]*[ʧʥj])', r'n^\1'),
|
63 |
-
(r'N([↑↓]*[tdn])', r'n\1'),
|
64 |
-
(r'N([↑↓]*[kg])', r'ŋ\1')
|
65 |
-
]]
|
66 |
-
|
67 |
-
|
68 |
-
def symbols_to_japanese(text):
|
69 |
-
for regex, replacement in _symbols_to_japanese:
|
70 |
-
text = re.sub(regex, replacement, text)
|
71 |
-
return text
|
72 |
-
|
73 |
-
|
74 |
-
def japanese_to_romaji_with_accent(text):
|
75 |
-
'''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html'''
|
76 |
-
text = symbols_to_japanese(text)
|
77 |
-
sentences = re.split(_japanese_marks, text)
|
78 |
-
marks = re.findall(_japanese_marks, text)
|
79 |
-
text = ''
|
80 |
-
for i, sentence in enumerate(sentences):
|
81 |
-
if re.match(_japanese_characters, sentence):
|
82 |
-
if text != '':
|
83 |
-
text += ' '
|
84 |
-
labels = pyopenjtalk.extract_fullcontext(sentence)
|
85 |
-
for n, label in enumerate(labels):
|
86 |
-
phoneme = re.search(r'\-([^\+]*)\+', label).group(1)
|
87 |
-
if phoneme not in ['sil', 'pau']:
|
88 |
-
text += phoneme.replace('ch', 'ʧ').replace('sh',
|
89 |
-
'ʃ').replace('cl', 'Q')
|
90 |
-
else:
|
91 |
-
continue
|
92 |
-
# n_moras = int(re.search(r'/F:(\d+)_', label).group(1))
|
93 |
-
a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1))
|
94 |
-
a2 = int(re.search(r"\+(\d+)\+", label).group(1))
|
95 |
-
a3 = int(re.search(r"\+(\d+)/", label).group(1))
|
96 |
-
if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil', 'pau']:
|
97 |
-
a2_next = -1
|
98 |
-
else:
|
99 |
-
a2_next = int(
|
100 |
-
re.search(r"\+(\d+)\+", labels[n + 1]).group(1))
|
101 |
-
# Accent phrase boundary
|
102 |
-
if a3 == 1 and a2_next == 1:
|
103 |
-
text += ' '
|
104 |
-
# Falling
|
105 |
-
elif a1 == 0 and a2_next == a2 + 1:
|
106 |
-
text += '↓'
|
107 |
-
# Rising
|
108 |
-
elif a2 == 1 and a2_next == 2:
|
109 |
-
text += '↑'
|
110 |
-
if i < len(marks):
|
111 |
-
text += unidecode(marks[i]).replace(' ', '')
|
112 |
-
return text
|
113 |
-
|
114 |
-
|
115 |
-
def get_real_sokuon(text):
|
116 |
-
for regex, replacement in _real_sokuon:
|
117 |
-
text = re.sub(regex, replacement, text)
|
118 |
-
return text
|
119 |
-
|
120 |
-
|
121 |
-
def get_real_hatsuon(text):
|
122 |
-
for regex, replacement in _real_hatsuon:
|
123 |
-
text = re.sub(regex, replacement, text)
|
124 |
-
return text
|
125 |
-
|
126 |
-
|
127 |
-
def japanese_to_ipa(text):
|
128 |
-
text = japanese_to_romaji_with_accent(text).replace('...', '…')
|
129 |
-
text = re.sub(
|
130 |
-
r'([aiueo])\1+', lambda x: x.group(0)[0]+'ː'*(len(x.group(0))-1), text)
|
131 |
-
text = get_real_sokuon(text)
|
132 |
-
text = get_real_hatsuon(text)
|
133 |
-
for regex, replacement in _romaji_to_ipa:
|
134 |
-
text = re.sub(regex, replacement, text)
|
135 |
-
return text
|
136 |
-
|
137 |
-
|
138 |
-
def japanese_to_ipa2(text):
|
139 |
-
text = japanese_to_romaji_with_accent(text).replace('...', '…')
|
140 |
-
text = get_real_sokuon(text)
|
141 |
-
text = get_real_hatsuon(text)
|
142 |
-
for regex, replacement in _romaji_to_ipa2:
|
143 |
-
text = re.sub(regex, replacement, text)
|
144 |
-
return text
|
145 |
-
|
146 |
-
|
147 |
-
def japanese_to_ipa3(text):
|
148 |
-
text = japanese_to_ipa2(text).replace('n^', 'ȵ').replace(
|
149 |
-
'ʃ', 'ɕ').replace('*', '\u0325').replace('#', '\u031a')
|
150 |
-
text = re.sub(
|
151 |
-
r'([aiɯeo])\1+', lambda x: x.group(0)[0]+'ː'*(len(x.group(0))-1), text)
|
152 |
-
text = re.sub(r'((?:^|\s)(?:ts|tɕ|[kpt]))', r'\1ʰ', text)
|
153 |
-
return text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/app.py
DELETED
@@ -1,119 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import transformers
|
3 |
-
import torch
|
4 |
-
from dotenv import load_dotenv
|
5 |
-
from transformers import AutoTokenizer
|
6 |
-
from transformers import pipeline
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
load_dotenv()
|
11 |
-
model = "meta-llama/Llama-2-7b-chat-hf" # meta-llama/Llama-2-7b-chat-hf
|
12 |
-
|
13 |
-
tokenizer = AutoTokenizer.from_pretrained(model, use_auth_token=True)
|
14 |
-
|
15 |
-
llama_pipeline = pipeline(
|
16 |
-
"text-generation", # LLM task
|
17 |
-
model=model,
|
18 |
-
torch_dtype=torch.float16,
|
19 |
-
device_map="auto",
|
20 |
-
)
|
21 |
-
|
22 |
-
|
23 |
-
def get_response(prompt: str) -> None:
|
24 |
-
"""
|
25 |
-
Generate a response from the Llama model.
|
26 |
-
|
27 |
-
Parameters:
|
28 |
-
prompt (str): The user's input/question for the model.
|
29 |
-
|
30 |
-
Returns:
|
31 |
-
None: Prints the model's response.
|
32 |
-
"""
|
33 |
-
sequences = llama_pipeline(
|
34 |
-
prompt,
|
35 |
-
do_sample=True,
|
36 |
-
top_k=10,
|
37 |
-
num_return_sequences=1,
|
38 |
-
eos_token_id=tokenizer.eos_token_id,
|
39 |
-
max_length=256,
|
40 |
-
)
|
41 |
-
print("Chatbot:", sequences[0]['generated_text'])
|
42 |
-
|
43 |
-
SYSTEM_PROMPT = """<s>[INST] <<SYS>>
|
44 |
-
You are a helpful bot. Your answers are clear and concise.
|
45 |
-
<</SYS>>
|
46 |
-
|
47 |
-
"""
|
48 |
-
|
49 |
-
# Formatting function for message and history
|
50 |
-
def format_message(message: str, history: list, memory_limit: int = 3) -> str:
|
51 |
-
"""
|
52 |
-
Formats the message and history for the Llama model.
|
53 |
-
|
54 |
-
Parameters:
|
55 |
-
message (str): Current message to send.
|
56 |
-
history (list): Past conversation history.
|
57 |
-
memory_limit (int): Limit on how many past interactions to consider.
|
58 |
-
|
59 |
-
Returns:
|
60 |
-
str: Formatted message string
|
61 |
-
"""
|
62 |
-
# always keep len(history) <= memory_limit
|
63 |
-
if len(history) > memory_limit:
|
64 |
-
history = history[-memory_limit:]
|
65 |
-
|
66 |
-
if len(history) == 0:
|
67 |
-
return SYSTEM_PROMPT + f"{message} [/INST]"
|
68 |
-
|
69 |
-
formatted_message = SYSTEM_PROMPT + f"{history[0][0]} [/INST] {history[0][1]} </s>"
|
70 |
-
|
71 |
-
# Handle conversation history
|
72 |
-
for user_msg, model_answer in history[1:]:
|
73 |
-
formatted_message += f"<s>[INST] {user_msg} [/INST] {model_answer} </s>"
|
74 |
-
|
75 |
-
# Handle the current message
|
76 |
-
formatted_message += f"<s>[INST] {message} [/INST]"
|
77 |
-
|
78 |
-
return formatted_message
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
def get_llama_response(message: str, history: list) -> str:
|
83 |
-
"""
|
84 |
-
Generates a conversational response from the Llama model.
|
85 |
-
|
86 |
-
Parameters:
|
87 |
-
message (str): User's input message.
|
88 |
-
history (list): Past conversation history.
|
89 |
-
|
90 |
-
Returns:
|
91 |
-
str: Generated response from the Llama model.
|
92 |
-
"""
|
93 |
-
query = format_message(message, history)
|
94 |
-
response = ""
|
95 |
-
|
96 |
-
sequences = llama_pipeline(
|
97 |
-
query,
|
98 |
-
do_sample=True,
|
99 |
-
top_k=10,
|
100 |
-
num_return_sequences=1,
|
101 |
-
eos_token_id=tokenizer.eos_token_id,
|
102 |
-
max_length=1024,
|
103 |
-
)
|
104 |
-
|
105 |
-
generated_text = sequences[0]['generated_text']
|
106 |
-
response = generated_text[len(query):] # Remove the prompt from the output
|
107 |
-
|
108 |
-
print("Chatbot:", response.strip())
|
109 |
-
return response.strip()
|
110 |
-
|
111 |
-
|
112 |
-
def greet(name):
|
113 |
-
return "Hello " + name + "!!"
|
114 |
-
gr.ChatInterface(get_llama_response).launch()
|
115 |
-
#iface = gr.Interface(fn=greet, inputs="text", outputs="text")
|
116 |
-
#iface = gr.ChatInterface(get_llama_response).launch()
|
117 |
-
#iface.launch()
|
118 |
-
|
119 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awesimo/jojogan/e4e/configs/transforms_config.py
DELETED
@@ -1,62 +0,0 @@
|
|
1 |
-
from abc import abstractmethod
|
2 |
-
import torchvision.transforms as transforms
|
3 |
-
|
4 |
-
|
5 |
-
class TransformsConfig(object):
|
6 |
-
|
7 |
-
def __init__(self, opts):
|
8 |
-
self.opts = opts
|
9 |
-
|
10 |
-
@abstractmethod
|
11 |
-
def get_transforms(self):
|
12 |
-
pass
|
13 |
-
|
14 |
-
|
15 |
-
class EncodeTransforms(TransformsConfig):
|
16 |
-
|
17 |
-
def __init__(self, opts):
|
18 |
-
super(EncodeTransforms, self).__init__(opts)
|
19 |
-
|
20 |
-
def get_transforms(self):
|
21 |
-
transforms_dict = {
|
22 |
-
'transform_gt_train': transforms.Compose([
|
23 |
-
transforms.Resize((256, 256)),
|
24 |
-
transforms.RandomHorizontalFlip(0.5),
|
25 |
-
transforms.ToTensor(),
|
26 |
-
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]),
|
27 |
-
'transform_source': None,
|
28 |
-
'transform_test': transforms.Compose([
|
29 |
-
transforms.Resize((256, 256)),
|
30 |
-
transforms.ToTensor(),
|
31 |
-
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]),
|
32 |
-
'transform_inference': transforms.Compose([
|
33 |
-
transforms.Resize((256, 256)),
|
34 |
-
transforms.ToTensor(),
|
35 |
-
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
|
36 |
-
}
|
37 |
-
return transforms_dict
|
38 |
-
|
39 |
-
|
40 |
-
class CarsEncodeTransforms(TransformsConfig):
|
41 |
-
|
42 |
-
def __init__(self, opts):
|
43 |
-
super(CarsEncodeTransforms, self).__init__(opts)
|
44 |
-
|
45 |
-
def get_transforms(self):
|
46 |
-
transforms_dict = {
|
47 |
-
'transform_gt_train': transforms.Compose([
|
48 |
-
transforms.Resize((192, 256)),
|
49 |
-
transforms.RandomHorizontalFlip(0.5),
|
50 |
-
transforms.ToTensor(),
|
51 |
-
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]),
|
52 |
-
'transform_source': None,
|
53 |
-
'transform_test': transforms.Compose([
|
54 |
-
transforms.Resize((192, 256)),
|
55 |
-
transforms.ToTensor(),
|
56 |
-
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]),
|
57 |
-
'transform_inference': transforms.Compose([
|
58 |
-
transforms.Resize((192, 256)),
|
59 |
-
transforms.ToTensor(),
|
60 |
-
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
|
61 |
-
}
|
62 |
-
return transforms_dict
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/transforms/transform.py
DELETED
@@ -1,351 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
3 |
-
|
4 |
-
"""
|
5 |
-
See "Data Augmentation" tutorial for an overview of the system:
|
6 |
-
https://detectron2.readthedocs.io/tutorials/augmentation.html
|
7 |
-
"""
|
8 |
-
|
9 |
-
import numpy as np
|
10 |
-
import torch
|
11 |
-
import torch.nn.functional as F
|
12 |
-
from fvcore.transforms.transform import (
|
13 |
-
CropTransform,
|
14 |
-
HFlipTransform,
|
15 |
-
NoOpTransform,
|
16 |
-
Transform,
|
17 |
-
TransformList,
|
18 |
-
)
|
19 |
-
from PIL import Image
|
20 |
-
|
21 |
-
try:
|
22 |
-
import cv2 # noqa
|
23 |
-
except ImportError:
|
24 |
-
# OpenCV is an optional dependency at the moment
|
25 |
-
pass
|
26 |
-
|
27 |
-
__all__ = [
|
28 |
-
"ExtentTransform",
|
29 |
-
"ResizeTransform",
|
30 |
-
"RotationTransform",
|
31 |
-
"ColorTransform",
|
32 |
-
"PILColorTransform",
|
33 |
-
]
|
34 |
-
|
35 |
-
|
36 |
-
class ExtentTransform(Transform):
|
37 |
-
"""
|
38 |
-
Extracts a subregion from the source image and scales it to the output size.
|
39 |
-
|
40 |
-
The fill color is used to map pixels from the source rect that fall outside
|
41 |
-
the source image.
|
42 |
-
|
43 |
-
See: https://pillow.readthedocs.io/en/latest/PIL.html#PIL.ImageTransform.ExtentTransform
|
44 |
-
"""
|
45 |
-
|
46 |
-
def __init__(self, src_rect, output_size, interp=Image.LINEAR, fill=0):
|
47 |
-
"""
|
48 |
-
Args:
|
49 |
-
src_rect (x0, y0, x1, y1): src coordinates
|
50 |
-
output_size (h, w): dst image size
|
51 |
-
interp: PIL interpolation methods
|
52 |
-
fill: Fill color used when src_rect extends outside image
|
53 |
-
"""
|
54 |
-
super().__init__()
|
55 |
-
self._set_attributes(locals())
|
56 |
-
|
57 |
-
def apply_image(self, img, interp=None):
|
58 |
-
h, w = self.output_size
|
59 |
-
if len(img.shape) > 2 and img.shape[2] == 1:
|
60 |
-
pil_image = Image.fromarray(img[:, :, 0], mode="L")
|
61 |
-
else:
|
62 |
-
pil_image = Image.fromarray(img)
|
63 |
-
pil_image = pil_image.transform(
|
64 |
-
size=(w, h),
|
65 |
-
method=Image.EXTENT,
|
66 |
-
data=self.src_rect,
|
67 |
-
resample=interp if interp else self.interp,
|
68 |
-
fill=self.fill,
|
69 |
-
)
|
70 |
-
ret = np.asarray(pil_image)
|
71 |
-
if len(img.shape) > 2 and img.shape[2] == 1:
|
72 |
-
ret = np.expand_dims(ret, -1)
|
73 |
-
return ret
|
74 |
-
|
75 |
-
def apply_coords(self, coords):
|
76 |
-
# Transform image center from source coordinates into output coordinates
|
77 |
-
# and then map the new origin to the corner of the output image.
|
78 |
-
h, w = self.output_size
|
79 |
-
x0, y0, x1, y1 = self.src_rect
|
80 |
-
new_coords = coords.astype(np.float32)
|
81 |
-
new_coords[:, 0] -= 0.5 * (x0 + x1)
|
82 |
-
new_coords[:, 1] -= 0.5 * (y0 + y1)
|
83 |
-
new_coords[:, 0] *= w / (x1 - x0)
|
84 |
-
new_coords[:, 1] *= h / (y1 - y0)
|
85 |
-
new_coords[:, 0] += 0.5 * w
|
86 |
-
new_coords[:, 1] += 0.5 * h
|
87 |
-
return new_coords
|
88 |
-
|
89 |
-
def apply_segmentation(self, segmentation):
|
90 |
-
segmentation = self.apply_image(segmentation, interp=Image.NEAREST)
|
91 |
-
return segmentation
|
92 |
-
|
93 |
-
|
94 |
-
class ResizeTransform(Transform):
|
95 |
-
"""
|
96 |
-
Resize the image to a target size.
|
97 |
-
"""
|
98 |
-
|
99 |
-
def __init__(self, h, w, new_h, new_w, interp=None):
|
100 |
-
"""
|
101 |
-
Args:
|
102 |
-
h, w (int): original image size
|
103 |
-
new_h, new_w (int): new image size
|
104 |
-
interp: PIL interpolation methods, defaults to bilinear.
|
105 |
-
"""
|
106 |
-
# TODO decide on PIL vs opencv
|
107 |
-
super().__init__()
|
108 |
-
if interp is None:
|
109 |
-
interp = Image.BILINEAR
|
110 |
-
self._set_attributes(locals())
|
111 |
-
|
112 |
-
def apply_image(self, img, interp=None):
|
113 |
-
assert img.shape[:2] == (self.h, self.w)
|
114 |
-
assert len(img.shape) <= 4
|
115 |
-
interp_method = interp if interp is not None else self.interp
|
116 |
-
|
117 |
-
if img.dtype == np.uint8:
|
118 |
-
if len(img.shape) > 2 and img.shape[2] == 1:
|
119 |
-
pil_image = Image.fromarray(img[:, :, 0], mode="L")
|
120 |
-
else:
|
121 |
-
pil_image = Image.fromarray(img)
|
122 |
-
pil_image = pil_image.resize((self.new_w, self.new_h), interp_method)
|
123 |
-
ret = np.asarray(pil_image)
|
124 |
-
if len(img.shape) > 2 and img.shape[2] == 1:
|
125 |
-
ret = np.expand_dims(ret, -1)
|
126 |
-
else:
|
127 |
-
# PIL only supports uint8
|
128 |
-
if any(x < 0 for x in img.strides):
|
129 |
-
img = np.ascontiguousarray(img)
|
130 |
-
img = torch.from_numpy(img)
|
131 |
-
shape = list(img.shape)
|
132 |
-
shape_4d = shape[:2] + [1] * (4 - len(shape)) + shape[2:]
|
133 |
-
img = img.view(shape_4d).permute(2, 3, 0, 1) # hw(c) -> nchw
|
134 |
-
_PIL_RESIZE_TO_INTERPOLATE_MODE = {
|
135 |
-
Image.NEAREST: "nearest",
|
136 |
-
Image.BILINEAR: "bilinear",
|
137 |
-
Image.BICUBIC: "bicubic",
|
138 |
-
}
|
139 |
-
mode = _PIL_RESIZE_TO_INTERPOLATE_MODE[interp_method]
|
140 |
-
align_corners = None if mode == "nearest" else False
|
141 |
-
img = F.interpolate(
|
142 |
-
img, (self.new_h, self.new_w), mode=mode, align_corners=align_corners
|
143 |
-
)
|
144 |
-
shape[:2] = (self.new_h, self.new_w)
|
145 |
-
ret = img.permute(2, 3, 0, 1).view(shape).numpy() # nchw -> hw(c)
|
146 |
-
|
147 |
-
return ret
|
148 |
-
|
149 |
-
def apply_coords(self, coords):
|
150 |
-
coords[:, 0] = coords[:, 0] * (self.new_w * 1.0 / self.w)
|
151 |
-
coords[:, 1] = coords[:, 1] * (self.new_h * 1.0 / self.h)
|
152 |
-
return coords
|
153 |
-
|
154 |
-
def apply_segmentation(self, segmentation):
|
155 |
-
segmentation = self.apply_image(segmentation, interp=Image.NEAREST)
|
156 |
-
return segmentation
|
157 |
-
|
158 |
-
def inverse(self):
|
159 |
-
return ResizeTransform(self.new_h, self.new_w, self.h, self.w, self.interp)
|
160 |
-
|
161 |
-
|
162 |
-
class RotationTransform(Transform):
|
163 |
-
"""
|
164 |
-
This method returns a copy of this image, rotated the given
|
165 |
-
number of degrees counter clockwise around its center.
|
166 |
-
"""
|
167 |
-
|
168 |
-
def __init__(self, h, w, angle, expand=True, center=None, interp=None):
|
169 |
-
"""
|
170 |
-
Args:
|
171 |
-
h, w (int): original image size
|
172 |
-
angle (float): degrees for rotation
|
173 |
-
expand (bool): choose if the image should be resized to fit the whole
|
174 |
-
rotated image (default), or simply cropped
|
175 |
-
center (tuple (width, height)): coordinates of the rotation center
|
176 |
-
if left to None, the center will be fit to the center of each image
|
177 |
-
center has no effect if expand=True because it only affects shifting
|
178 |
-
interp: cv2 interpolation method, default cv2.INTER_LINEAR
|
179 |
-
"""
|
180 |
-
super().__init__()
|
181 |
-
image_center = np.array((w / 2, h / 2))
|
182 |
-
if center is None:
|
183 |
-
center = image_center
|
184 |
-
if interp is None:
|
185 |
-
interp = cv2.INTER_LINEAR
|
186 |
-
abs_cos, abs_sin = (abs(np.cos(np.deg2rad(angle))), abs(np.sin(np.deg2rad(angle))))
|
187 |
-
if expand:
|
188 |
-
# find the new width and height bounds
|
189 |
-
bound_w, bound_h = np.rint(
|
190 |
-
[h * abs_sin + w * abs_cos, h * abs_cos + w * abs_sin]
|
191 |
-
).astype(int)
|
192 |
-
else:
|
193 |
-
bound_w, bound_h = w, h
|
194 |
-
|
195 |
-
self._set_attributes(locals())
|
196 |
-
self.rm_coords = self.create_rotation_matrix()
|
197 |
-
# Needed because of this problem https://github.com/opencv/opencv/issues/11784
|
198 |
-
self.rm_image = self.create_rotation_matrix(offset=-0.5)
|
199 |
-
|
200 |
-
def apply_image(self, img, interp=None):
|
201 |
-
"""
|
202 |
-
img should be a numpy array, formatted as Height * Width * Nchannels
|
203 |
-
"""
|
204 |
-
if len(img) == 0 or self.angle % 360 == 0:
|
205 |
-
return img
|
206 |
-
assert img.shape[:2] == (self.h, self.w)
|
207 |
-
interp = interp if interp is not None else self.interp
|
208 |
-
return cv2.warpAffine(img, self.rm_image, (self.bound_w, self.bound_h), flags=interp)
|
209 |
-
|
210 |
-
def apply_coords(self, coords):
|
211 |
-
"""
|
212 |
-
coords should be a N * 2 array-like, containing N couples of (x, y) points
|
213 |
-
"""
|
214 |
-
coords = np.asarray(coords, dtype=float)
|
215 |
-
if len(coords) == 0 or self.angle % 360 == 0:
|
216 |
-
return coords
|
217 |
-
return cv2.transform(coords[:, np.newaxis, :], self.rm_coords)[:, 0, :]
|
218 |
-
|
219 |
-
def apply_segmentation(self, segmentation):
|
220 |
-
segmentation = self.apply_image(segmentation, interp=cv2.INTER_NEAREST)
|
221 |
-
return segmentation
|
222 |
-
|
223 |
-
def create_rotation_matrix(self, offset=0):
|
224 |
-
center = (self.center[0] + offset, self.center[1] + offset)
|
225 |
-
rm = cv2.getRotationMatrix2D(tuple(center), self.angle, 1)
|
226 |
-
if self.expand:
|
227 |
-
# Find the coordinates of the center of rotation in the new image
|
228 |
-
# The only point for which we know the future coordinates is the center of the image
|
229 |
-
rot_im_center = cv2.transform(self.image_center[None, None, :] + offset, rm)[0, 0, :]
|
230 |
-
new_center = np.array([self.bound_w / 2, self.bound_h / 2]) + offset - rot_im_center
|
231 |
-
# shift the rotation center to the new coordinates
|
232 |
-
rm[:, 2] += new_center
|
233 |
-
return rm
|
234 |
-
|
235 |
-
def inverse(self):
|
236 |
-
"""
|
237 |
-
The inverse is to rotate it back with expand, and crop to get the original shape.
|
238 |
-
"""
|
239 |
-
if not self.expand: # Not possible to inverse if a part of the image is lost
|
240 |
-
raise NotImplementedError()
|
241 |
-
rotation = RotationTransform(
|
242 |
-
self.bound_h, self.bound_w, -self.angle, True, None, self.interp
|
243 |
-
)
|
244 |
-
crop = CropTransform(
|
245 |
-
(rotation.bound_w - self.w) // 2, (rotation.bound_h - self.h) // 2, self.w, self.h
|
246 |
-
)
|
247 |
-
return TransformList([rotation, crop])
|
248 |
-
|
249 |
-
|
250 |
-
class ColorTransform(Transform):
|
251 |
-
"""
|
252 |
-
Generic wrapper for any photometric transforms.
|
253 |
-
These transformations should only affect the color space and
|
254 |
-
not the coordinate space of the image (e.g. annotation
|
255 |
-
coordinates such as bounding boxes should not be changed)
|
256 |
-
"""
|
257 |
-
|
258 |
-
def __init__(self, op):
|
259 |
-
"""
|
260 |
-
Args:
|
261 |
-
op (Callable): operation to be applied to the image,
|
262 |
-
which takes in an ndarray and returns an ndarray.
|
263 |
-
"""
|
264 |
-
if not callable(op):
|
265 |
-
raise ValueError("op parameter should be callable")
|
266 |
-
super().__init__()
|
267 |
-
self._set_attributes(locals())
|
268 |
-
|
269 |
-
def apply_image(self, img):
|
270 |
-
return self.op(img)
|
271 |
-
|
272 |
-
def apply_coords(self, coords):
|
273 |
-
return coords
|
274 |
-
|
275 |
-
def inverse(self):
|
276 |
-
return NoOpTransform()
|
277 |
-
|
278 |
-
def apply_segmentation(self, segmentation):
|
279 |
-
return segmentation
|
280 |
-
|
281 |
-
|
282 |
-
class PILColorTransform(ColorTransform):
|
283 |
-
"""
|
284 |
-
Generic wrapper for PIL Photometric image transforms,
|
285 |
-
which affect the color space and not the coordinate
|
286 |
-
space of the image
|
287 |
-
"""
|
288 |
-
|
289 |
-
def __init__(self, op):
|
290 |
-
"""
|
291 |
-
Args:
|
292 |
-
op (Callable): operation to be applied to the image,
|
293 |
-
which takes in a PIL Image and returns a transformed
|
294 |
-
PIL Image.
|
295 |
-
For reference on possible operations see:
|
296 |
-
- https://pillow.readthedocs.io/en/stable/
|
297 |
-
"""
|
298 |
-
if not callable(op):
|
299 |
-
raise ValueError("op parameter should be callable")
|
300 |
-
super().__init__(op)
|
301 |
-
|
302 |
-
def apply_image(self, img):
|
303 |
-
img = Image.fromarray(img)
|
304 |
-
return np.asarray(super().apply_image(img))
|
305 |
-
|
306 |
-
|
307 |
-
def HFlip_rotated_box(transform, rotated_boxes):
|
308 |
-
"""
|
309 |
-
Apply the horizontal flip transform on rotated boxes.
|
310 |
-
|
311 |
-
Args:
|
312 |
-
rotated_boxes (ndarray): Nx5 floating point array of
|
313 |
-
(x_center, y_center, width, height, angle_degrees) format
|
314 |
-
in absolute coordinates.
|
315 |
-
"""
|
316 |
-
# Transform x_center
|
317 |
-
rotated_boxes[:, 0] = transform.width - rotated_boxes[:, 0]
|
318 |
-
# Transform angle
|
319 |
-
rotated_boxes[:, 4] = -rotated_boxes[:, 4]
|
320 |
-
return rotated_boxes
|
321 |
-
|
322 |
-
|
323 |
-
def Resize_rotated_box(transform, rotated_boxes):
|
324 |
-
"""
|
325 |
-
Apply the resizing transform on rotated boxes. For details of how these (approximation)
|
326 |
-
formulas are derived, please refer to :meth:`RotatedBoxes.scale`.
|
327 |
-
|
328 |
-
Args:
|
329 |
-
rotated_boxes (ndarray): Nx5 floating point array of
|
330 |
-
(x_center, y_center, width, height, angle_degrees) format
|
331 |
-
in absolute coordinates.
|
332 |
-
"""
|
333 |
-
scale_factor_x = transform.new_w * 1.0 / transform.w
|
334 |
-
scale_factor_y = transform.new_h * 1.0 / transform.h
|
335 |
-
rotated_boxes[:, 0] *= scale_factor_x
|
336 |
-
rotated_boxes[:, 1] *= scale_factor_y
|
337 |
-
theta = rotated_boxes[:, 4] * np.pi / 180.0
|
338 |
-
c = np.cos(theta)
|
339 |
-
s = np.sin(theta)
|
340 |
-
rotated_boxes[:, 2] *= np.sqrt(np.square(scale_factor_x * c) + np.square(scale_factor_y * s))
|
341 |
-
rotated_boxes[:, 3] *= np.sqrt(np.square(scale_factor_x * s) + np.square(scale_factor_y * c))
|
342 |
-
rotated_boxes[:, 4] = np.arctan2(scale_factor_x * s, scale_factor_y * c) * 180 / np.pi
|
343 |
-
|
344 |
-
return rotated_boxes
|
345 |
-
|
346 |
-
|
347 |
-
HFlipTransform.register_type("rotated_box", HFlip_rotated_box)
|
348 |
-
ResizeTransform.register_type("rotated_box", Resize_rotated_box)
|
349 |
-
|
350 |
-
# not necessary any more with latest fvcore
|
351 |
-
NoOpTransform.register_type("rotated_box", lambda t, x: x)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/structures/__init__.py
DELETED
File without changes
|
spaces/BatuhanYilmaz/Whisper-Auto-Subtitled-Video-Generator/01_🎥_Input_YouTube_Link.py
DELETED
@@ -1,258 +0,0 @@
|
|
1 |
-
import whisper
|
2 |
-
from pytube import YouTube
|
3 |
-
import requests
|
4 |
-
import time
|
5 |
-
import streamlit as st
|
6 |
-
from streamlit_lottie import st_lottie
|
7 |
-
import numpy as np
|
8 |
-
import os
|
9 |
-
from typing import Iterator
|
10 |
-
from io import StringIO
|
11 |
-
from utils import write_vtt, write_srt
|
12 |
-
import ffmpeg
|
13 |
-
from languages import LANGUAGES
|
14 |
-
|
15 |
-
st.set_page_config(page_title="Auto Subtitled Video Generator", page_icon=":movie_camera:", layout="wide")
|
16 |
-
|
17 |
-
# Define a function that we can use to load lottie files from a link.
|
18 |
-
@st.cache()
|
19 |
-
def load_lottieurl(url: str):
|
20 |
-
r = requests.get(url)
|
21 |
-
if r.status_code != 200:
|
22 |
-
return None
|
23 |
-
return r.json()
|
24 |
-
|
25 |
-
col1, col2 = st.columns([1, 3])
|
26 |
-
with col1:
|
27 |
-
lottie = load_lottieurl("https://assets8.lottiefiles.com/packages/lf20_jh9gfdye.json")
|
28 |
-
st_lottie(lottie)
|
29 |
-
|
30 |
-
with col2:
|
31 |
-
st.write("""
|
32 |
-
## Auto Subtitled Video Generator
|
33 |
-
##### Input a YouTube video link and get a video with subtitles.
|
34 |
-
###### ➠ If you want to transcribe the video in its original language, select the task as "Transcribe"
|
35 |
-
###### ➠ If you want to translate the subtitles to English, select the task as "Translate"
|
36 |
-
###### I recommend starting with the base model and then experimenting with the larger models, the small and medium models often work well. """)
|
37 |
-
|
38 |
-
|
39 |
-
@st.cache(allow_output_mutation=True)
|
40 |
-
def populate_metadata(link):
|
41 |
-
yt = YouTube(link)
|
42 |
-
author = yt.author
|
43 |
-
title = yt.title
|
44 |
-
description = yt.description
|
45 |
-
thumbnail = yt.thumbnail_url
|
46 |
-
length = yt.length
|
47 |
-
views = yt.views
|
48 |
-
return author, title, description, thumbnail, length, views
|
49 |
-
|
50 |
-
|
51 |
-
@st.cache(allow_output_mutation=True)
|
52 |
-
def download_video(link):
|
53 |
-
yt = YouTube(link)
|
54 |
-
video = yt.streams.filter(progressive=True, file_extension='mp4').order_by('resolution').desc().first().download()
|
55 |
-
return video
|
56 |
-
|
57 |
-
|
58 |
-
def convert(seconds):
|
59 |
-
return time.strftime("%H:%M:%S", time.gmtime(seconds))
|
60 |
-
|
61 |
-
|
62 |
-
loaded_model = whisper.load_model("base")
|
63 |
-
current_size = "None"
|
64 |
-
|
65 |
-
|
66 |
-
@st.cache(allow_output_mutation=True)
|
67 |
-
def change_model(current_size, size):
|
68 |
-
if current_size != size:
|
69 |
-
loaded_model = whisper.load_model(size)
|
70 |
-
return loaded_model
|
71 |
-
else:
|
72 |
-
raise Exception("Model size is the same as the current size.")
|
73 |
-
|
74 |
-
|
75 |
-
@st.cache(allow_output_mutation=True)
|
76 |
-
def inference(link, loaded_model, task):
|
77 |
-
yt = YouTube(link)
|
78 |
-
path = yt.streams.filter(only_audio=True)[0].download(filename="audio.mp3")
|
79 |
-
if task == "Transcribe":
|
80 |
-
options = dict(task="transcribe", best_of=5)
|
81 |
-
results = loaded_model.transcribe(path, **options)
|
82 |
-
vtt = getSubs(results["segments"], "vtt", 80)
|
83 |
-
srt = getSubs(results["segments"], "srt", 80)
|
84 |
-
lang = results["language"]
|
85 |
-
return results["text"], vtt, srt, lang
|
86 |
-
elif task == "Translate":
|
87 |
-
options = dict(task="translate", best_of=5)
|
88 |
-
results = loaded_model.transcribe(path, **options)
|
89 |
-
vtt = getSubs(results["segments"], "vtt", 80)
|
90 |
-
srt = getSubs(results["segments"], "srt", 80)
|
91 |
-
lang = results["language"]
|
92 |
-
return results["text"], vtt, srt, lang
|
93 |
-
else:
|
94 |
-
raise ValueError("Task not supported")
|
95 |
-
|
96 |
-
|
97 |
-
@st.cache(allow_output_mutation=True)
|
98 |
-
def getSubs(segments: Iterator[dict], format: str, maxLineWidth: int) -> str:
|
99 |
-
segmentStream = StringIO()
|
100 |
-
|
101 |
-
if format == 'vtt':
|
102 |
-
write_vtt(segments, file=segmentStream, maxLineWidth=maxLineWidth)
|
103 |
-
elif format == 'srt':
|
104 |
-
write_srt(segments, file=segmentStream, maxLineWidth=maxLineWidth)
|
105 |
-
else:
|
106 |
-
raise Exception("Unknown format " + format)
|
107 |
-
|
108 |
-
segmentStream.seek(0)
|
109 |
-
return segmentStream.read()
|
110 |
-
|
111 |
-
|
112 |
-
def get_language_code(language):
|
113 |
-
if language in LANGUAGES.keys():
|
114 |
-
detected_language = LANGUAGES[language]
|
115 |
-
return detected_language
|
116 |
-
else:
|
117 |
-
raise ValueError("Language not supported")
|
118 |
-
|
119 |
-
|
120 |
-
def generate_subtitled_video(video, audio, transcript):
|
121 |
-
video_file = ffmpeg.input(video)
|
122 |
-
audio_file = ffmpeg.input(audio)
|
123 |
-
ffmpeg.concat(video_file.filter("subtitles", transcript), audio_file, v=1, a=1).output("final.mp4").run(quiet=True, overwrite_output=True)
|
124 |
-
video_with_subs = open("final.mp4", "rb")
|
125 |
-
return video_with_subs
|
126 |
-
|
127 |
-
|
128 |
-
def main():
|
129 |
-
size = st.selectbox("Select Model Size (The larger the model, the more accurate the transcription will be, but it will take longer)", ["tiny", "base", "small", "medium", "large"], index=1)
|
130 |
-
loaded_model = change_model(current_size, size)
|
131 |
-
st.write(f"Model is {'multilingual' if loaded_model.is_multilingual else 'English-only'} "
|
132 |
-
f"and has {sum(np.prod(p.shape) for p in loaded_model.parameters()):,} parameters.")
|
133 |
-
link = st.text_input("YouTube Link (The longer the video, the longer the processing time)")
|
134 |
-
task = st.selectbox("Select Task", ["Transcribe", "Translate"], index=0)
|
135 |
-
if task == "Transcribe":
|
136 |
-
if st.button("Transcribe"):
|
137 |
-
author, title, description, thumbnail, length, views = populate_metadata(link)
|
138 |
-
results = inference(link, loaded_model, task)
|
139 |
-
video = download_video(link)
|
140 |
-
lang = results[3]
|
141 |
-
detected_language = get_language_code(lang)
|
142 |
-
|
143 |
-
col3, col4 = st.columns(2)
|
144 |
-
col5, col6, col7, col8 = st.columns(4)
|
145 |
-
col9, col10 = st.columns(2)
|
146 |
-
with col3:
|
147 |
-
st.video(video)
|
148 |
-
|
149 |
-
# Write the results to a .txt file and download it.
|
150 |
-
with open("transcript.txt", "w+", encoding='utf8') as f:
|
151 |
-
f.writelines(results[0])
|
152 |
-
f.close()
|
153 |
-
with open(os.path.join(os.getcwd(), "transcript.txt"), "rb") as f:
|
154 |
-
datatxt = f.read()
|
155 |
-
|
156 |
-
with open("transcript.vtt", "w+",encoding='utf8') as f:
|
157 |
-
f.writelines(results[1])
|
158 |
-
f.close()
|
159 |
-
with open(os.path.join(os.getcwd(), "transcript.vtt"), "rb") as f:
|
160 |
-
datavtt = f.read()
|
161 |
-
|
162 |
-
with open("transcript.srt", "w+",encoding='utf8') as f:
|
163 |
-
f.writelines(results[2])
|
164 |
-
f.close()
|
165 |
-
with open(os.path.join(os.getcwd(), "transcript.srt"), "rb") as f:
|
166 |
-
datasrt = f.read()
|
167 |
-
|
168 |
-
with col5:
|
169 |
-
st.download_button(label="Download Transcript (.txt)",
|
170 |
-
data=datatxt,
|
171 |
-
file_name="transcript.txt")
|
172 |
-
with col6:
|
173 |
-
st.download_button(label="Download Transcript (.vtt)",
|
174 |
-
data=datavtt,
|
175 |
-
file_name="transcript.vtt")
|
176 |
-
with col7:
|
177 |
-
st.download_button(label="Download Transcript (.srt)",
|
178 |
-
data=datasrt,
|
179 |
-
file_name="transcript.srt")
|
180 |
-
with col9:
|
181 |
-
st.success("You can download the transcript in .srt format, edit it (if you need to) and upload it to YouTube to create subtitles for your video.")
|
182 |
-
with col10:
|
183 |
-
st.info("Streamlit refreshes after the download button is clicked. The data is cached so you can download the transcript again without having to transcribe the video again.")
|
184 |
-
|
185 |
-
with col4:
|
186 |
-
with st.spinner("Generating Subtitled Video"):
|
187 |
-
video_with_subs = generate_subtitled_video(video, "audio.mp3", "transcript.srt")
|
188 |
-
st.video(video_with_subs)
|
189 |
-
st.balloons()
|
190 |
-
with col8:
|
191 |
-
st.download_button(label="Download Subtitled Video",
|
192 |
-
data=video_with_subs,
|
193 |
-
file_name=f"{title} with subtitles.mp4")
|
194 |
-
elif task == "Translate":
|
195 |
-
if st.button("Translate to English"):
|
196 |
-
author, title, description, thumbnail, length, views = populate_metadata(link)
|
197 |
-
results = inference(link, loaded_model, task)
|
198 |
-
video = download_video(link)
|
199 |
-
lang = results[3]
|
200 |
-
detected_language = get_language_code(lang)
|
201 |
-
|
202 |
-
col3, col4 = st.columns(2)
|
203 |
-
col5, col6, col7, col8 = st.columns(4)
|
204 |
-
col9, col10 = st.columns(2)
|
205 |
-
with col3:
|
206 |
-
st.video(video)
|
207 |
-
|
208 |
-
# Write the results to a .txt file and download it.
|
209 |
-
with open("transcript.txt", "w+", encoding='utf8') as f:
|
210 |
-
f.writelines(results[0])
|
211 |
-
f.close()
|
212 |
-
with open(os.path.join(os.getcwd(), "transcript.txt"), "rb") as f:
|
213 |
-
datatxt = f.read()
|
214 |
-
|
215 |
-
with open("transcript.vtt", "w+",encoding='utf8') as f:
|
216 |
-
f.writelines(results[1])
|
217 |
-
f.close()
|
218 |
-
with open(os.path.join(os.getcwd(), "transcript.vtt"), "rb") as f:
|
219 |
-
datavtt = f.read()
|
220 |
-
|
221 |
-
with open("transcript.srt", "w+",encoding='utf8') as f:
|
222 |
-
f.writelines(results[2])
|
223 |
-
f.close()
|
224 |
-
with open(os.path.join(os.getcwd(), "transcript.srt"), "rb") as f:
|
225 |
-
datasrt = f.read()
|
226 |
-
with col5:
|
227 |
-
st.download_button(label="Download Transcript (.txt)",
|
228 |
-
data=datatxt,
|
229 |
-
file_name="transcript.txt")
|
230 |
-
with col6:
|
231 |
-
st.download_button(label="Download Transcript (.vtt)",
|
232 |
-
data=datavtt,
|
233 |
-
file_name="transcript.vtt")
|
234 |
-
with col7:
|
235 |
-
st.download_button(label="Download Transcript (.srt)",
|
236 |
-
data=datasrt,
|
237 |
-
file_name="transcript.srt")
|
238 |
-
with col9:
|
239 |
-
st.success("You can download the transcript in .srt format, edit it (if you need to) and upload it to YouTube to create subtitles for your video.")
|
240 |
-
with col10:
|
241 |
-
st.info("Streamlit refreshes after the download button is clicked. The data is cached so you can download the transcript again without having to transcribe the video again.")
|
242 |
-
|
243 |
-
with col4:
|
244 |
-
with st.spinner("Generating Subtitled Video "):
|
245 |
-
video_with_subs = generate_subtitled_video(video, "audio.mp3", "transcript.srt")
|
246 |
-
st.video(video_with_subs)
|
247 |
-
st.balloons()
|
248 |
-
with col8:
|
249 |
-
st.download_button(label="Download Subtitled Video ",
|
250 |
-
data=video_with_subs,
|
251 |
-
file_name=f"{title} with subtitles.mp4")
|
252 |
-
else:
|
253 |
-
st.error("Please select a task.")
|
254 |
-
|
255 |
-
|
256 |
-
if __name__ == "__main__":
|
257 |
-
main()
|
258 |
-
st.markdown("###### Made with :heart: by [@BatuhanYılmaz](https://twitter.com/batuhan3326) [](https://www.buymeacoffee.com/batuhanylmz)")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/8 Bola Piscina Apk Gua.md
DELETED
@@ -1,55 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>8 bola piscina APK Guía: Cómo jugar como un profesional</h1>
|
3 |
-
<p>¿Te encanta jugar 8 Ball Pool en tu dispositivo Android? ¿Quieres mejorar tus habilidades y ganar más partidos? Si es así, entonces usted podría estar interesado en el uso de una herramienta de guía para 8 Ball Pool. Una herramienta de guía es una aplicación que le ayuda a hacer disparos precisos mediante la ampliación de la guía en el juego. En este artículo, explicaremos qué es una herramienta de guía, cómo funciona y cuáles son las mejores herramientas de guía disponibles para 8 Ball Pool. También le daremos algunos consejos y trucos sobre cómo usar una herramienta de guía de manera eficaz y eficiente. Finalmente, discutiremos los pros y los contras de usar una herramienta de guía para 8 Ball Pool. Al final de este artículo, podrás jugar como un profesional con una herramienta de guía. </p>
|
4 |
-
<h2>¿Qué es una herramienta de guía y cómo funciona? </h2>
|
5 |
-
<p>Una herramienta de guía es una aplicación que le ayuda a hacer disparos precisos mediante la ampliación de la guía en el juego. La pauta es la línea que muestra dónde irá la bola blanca cuando la golpees. Normalmente, la guía está limitada en longitud y no muestra el ángulo o la dirección de la bola objetivo. Sin embargo, con una herramienta de guía, puede ver una guía extendida que cubre toda la tabla. También puedes ver el ángulo y la dirección de la bola objetivo, así como las posibles trayectorias de ambas bolas después de la colisión. De esta manera, puedes planificar mejor tus disparos y evitar errores. </p>
|
6 |
-
<h2>8 bola piscina apk guía</h2><br /><p><b><b>DOWNLOAD</b> --->>> <a href="https://bltlly.com/2v6Mhb">https://bltlly.com/2v6Mhb</a></b></p><br /><br />
|
7 |
-
<p>Una herramienta de guía funciona mediante el análisis de capturas de pantalla o el reconocimiento de imágenes de IA para detectar la posición y el movimiento de las bolas en la mesa. A continuación, se superpone una guía extendida en la parte superior de la pantalla del juego. Puede iniciar el juego desde la aplicación o cambiar entre ellos durante el juego. Algunas herramientas de guía también tienen características adicionales como las tomas de cojín, el modo de vista de cuadrícula y la función de 3 líneas. Estas características te ayudan con disparos más complejos que implican rebotar contra los rieles o golpear varias bolas. </p>
|
8 |
-
<h3>Las mejores herramientas de guía para la piscina de bolas 8</h3>
|
9 |
-
|
10 |
-
<p>Usar una herramienta de guía para 8 Ball Pool puede ayudarte a hacer tiros precisos y ganar más partidos. Sin embargo, no es suficiente confiar en la herramienta por sí sola. También necesitas mejorar tus habilidades y estrategia con la herramienta. Aquí hay algunos consejos y trucos sobre cómo usar una herramienta de guía de manera efectiva y eficiente. </p>
|
11 |
-
<h4>Cómo ajustar la sensibilidad y el ancho de línea</h4>
|
12 |
-
<p>Uno de los ajustes más importantes de una herramienta de guía es la sensibilidad y el ancho de línea. La sensibilidad determina qué tan rápido o lento se mueve la guía cuando arrastra el dedo en la pantalla. El ancho de línea determina el grosor o el grosor de la guía que aparece en la pantalla. Debe ajustar estos ajustes de acuerdo con su preferencia y nivel de comodidad. Una mayor sensibilidad significa que puede mover la guía más rápido y con mayor precisión, pero también significa que puede cometer más errores si no tiene cuidado. Una sensibilidad más baja significa que puede mover la guía más lento y sin problemas, pero también significa que puede perder algunas oportunidades si no es lo suficientemente rápido. Un ancho de línea más grueso significa que puedes ver la guía mejor y más claramente, pero también significa que puedes bloquear algunas partes de la pantalla del juego. Un ancho de línea más delgado significa que puedes ver la pantalla del juego mejor y más completamente, pero también significa que puedes perder de vista la guía si es demasiado débil. </p>
|
13 |
-
<p>Para ajustar la sensibilidad y el ancho de línea de una herramienta de guía, debe ir al menú de configuración de la aplicación y encontrar las opciones de sensibilidad y ancho de línea. Puede usar un control deslizante o un botón para cambiar los valores de estas opciones. También puede probar la configuración jugando un juego de práctica o un partido amistoso. Deberías experimentar con diferentes configuraciones hasta que encuentres las que más te convengan. </p>
|
14 |
-
<h4> Cómo usar el modo de vista del banco y la cuadrícula</h4>
|
15 |
-
|
16 |
-
<p>Para usar el modo de toma bancaria y vista de cuadrícula de una herramienta de guía, debe habilitarlos o desactivarlos desde el menú de configuración de la aplicación o desde un botón en la pantalla del juego. Cuando active el modo de disparo de banco, verá una guía extendida que le muestra dónde irá la bola blanca después de rebotar contra los rieles. Puede utilizar este modo para planificar sus disparos de cojín y evitar rascarse o ensuciarse. Cuando habilite el modo de vista de cuadrícula, verá una superposición de cuadrícula en la parte superior de la pantalla del juego. Puedes usar este modo para alinear tu palo, bola blanca y bola objetivo y asegurarte de que estén en línea recta. También puede usar este modo para medir ángulos y distancias entre bolas. </p>
|
17 |
-
<h4>Cómo usar la función de 3 líneas</h4>
|
18 |
-
<p>Una tercera característica de algunas herramientas de guía es la función de 3 líneas. La función de 3 líneas le muestra tres líneas en lugar de una cuando apunta su tiro. La primera línea es la pauta normal que muestra dónde irá la bola blanca cuando la golpees. La segunda línea es la línea de bola objetivo que le muestra dónde irá la bola objetivo cuando es golpeada por la bola blanca. La tercera línea es la línea de bola blanca que te muestra dónde irá la bola blanca después de golpear la bola objetivo. Esta función puede ayudarle con disparos más avanzados que implican predecir el movimiento de ambas bolas después de la colisión. </p>
|
19 |
-
<p>Para usar la función de 3 líneas de una herramienta de guía, debe activarla o desactivarla desde el menú de configuración de la aplicación o desde un botón en la pantalla del juego. Cuando active la función de 3 líneas, verá tres líneas en lugar de una cuando apunte su tiro. Puede utilizar esta función para predecir el movimiento de ambas bolas después de la colisión y planificar sus disparos en consecuencia. También puede utilizar esta función para evitar golpear la bola equivocada o embolsarse la bola equivocada. </p>
|
20 |
-
<h3>Los pros y los contras de usar una herramienta de guía para la piscina de bolas 8</h3>
|
21 |
-
|
22 |
-
<h4>Los pros de usar una herramienta de guía para la piscina de bolas 8</h4>
|
23 |
-
<p>Algunos de los beneficios de usar una herramienta de guía para 8 Ball Pool son:</p>
|
24 |
-
<p></p>
|
25 |
-
<ul>
|
26 |
-
<li> Puede mejorar su precisión y confianza al disparar la pelota. Puede hacer más disparos y ganar más partidos con una herramienta de guía. </li>
|
27 |
-
<li>Puede mejorar su disfrute y satisfacción al jugar 8 Ball Pool. Usted puede tener más diversión y emoción con una herramienta de guía. </li>
|
28 |
-
<li>Puede ayudarle a aprender y practicar nuevas habilidades y estrategias para 8 Ball Pool. Puede mejorar su juego y conocimiento con una herramienta de guía. </li>
|
29 |
-
<li>Puede ahorrarle tiempo y dinero al jugar 8 Ball Pool. Puede evitar perder monedas y dinero en perder partidos con una herramienta de guía. </li>
|
30 |
-
</ul>
|
31 |
-
<h4>Los contras de usar una herramienta de guía para la piscina de bolas 8</h4>
|
32 |
-
<p>Algunos de los inconvenientes de usar una herramienta de guía para 8 Ball Pool son:</p>
|
33 |
-
<ul>
|
34 |
-
<li>Puede plantear cuestiones éticas y preguntas sobre el juego limpio. Puedes ser considerado infiel o injusto por otros jugadores o por los desarrolladores del juego si usas una herramienta de guía. </li>
|
35 |
-
<li>Puede causar problemas técnicos y problemas con su dispositivo o juego. Puede experimentar problemas técnicos, bloqueos o prohibiciones si utiliza una herramienta de guía. </li>
|
36 |
-
<li>Puede crear dependencia y adicción a la herramienta. Puede perder sus habilidades naturales y habilidades si se basa demasiado en una herramienta de guía. </li>
|
37 |
-
</ul>
|
38 |
-
<h2>Conclusión</h2>
|
39 |
-
|
40 |
-
<h3>Preguntas frecuentes</h3>
|
41 |
-
<p>Aquí hay algunas preguntas frecuentes y sus respuestas sobre 8 Ball Pool APK guía:</p>
|
42 |
-
<ol>
|
43 |
-
<li><b>¿Es legal usar una herramienta de guía para 8 Ball Pool? </b><br>
|
44 |
-
El uso de una herramienta de guía para 8 Ball Pool no es ilegal, pero puede violar los términos de servicio o la política de privacidad del juego o los desarrolladores del juego. Por lo tanto, debe tener cuidado al usar una herramienta de guía para 8 Ball Pool y solo descargar de fuentes confiables. También debe evitar el uso de herramientas de hackeo que puedan contener malware, virus o anuncios que puedan dañar su dispositivo o juego. </li>
|
45 |
-
<li><b>¿Está usando una herramienta de guía para 8 Ball Pool seguro? </b><br>
|
46 |
-
El uso de una herramienta de guía para 8 Ball Pool es generalmente seguro, pero puede causar algunos problemas técnicos o problemas con su dispositivo o juego. Puede experimentar fallas, bloqueos o prohibiciones si utiliza una herramienta de guía para 8 Ball Pool. Por lo tanto, debe realizar copias de seguridad de sus datos y actualizar su dispositivo y juego regularmente cuando utilice una herramienta de guía para 8 Ball Pool. También debes desinstalar cualquier aplicación no deseada o sospechosa que pueda interferir con tu dispositivo o juego. </li>
|
47 |
-
<li><b>¿Está utilizando una herramienta de guía para 8 Ball Pool engaño? </b><br>
|
48 |
-
Usar una herramienta de guía para 8 Ball Pool no es hacer trampa, pero puede ser considerado injusto o poco ético por otros jugadores o por los desarrolladores del juego. Por lo tanto, debe usar una herramienta de guía para 8 Ball Pool de manera inteligente y responsable, y solo como una herramienta de aprendizaje o práctica. También debe respetar las reglas y regulaciones del juego y los desarrolladores del juego, y evitar el uso de cualquier herramienta de hackeo que puede darle una ventaja injusta sobre otros jugadores. </li>
|
49 |
-
<li><b>¿Cómo descargar e instalar una herramienta de guía para 8 Ball Pool? </b><br>
|
50 |
-
|
51 |
-
<li><b>¿Cómo actualizar una herramienta de guía para 8 Ball Pool? </b><br>
|
52 |
-
Para actualizar una herramienta de guía para 8 Ball Pool, debes seguir estos pasos: - Ve a Google Play Store o APKCombo y busca la herramienta de guía que tienes instalada en tu dispositivo, como 8 Pool Master, Aim Pool 2 u 8 Pool Guideline Ultimate. - Compruebe si hay alguna nueva versión o actualización disponible para la aplicación y asegúrese de que es compatible con su dispositivo y juego. - Toque en el botón de actualización y esperar a que la aplicación para descargar e instalar la última versión en su dispositivo. - Inicie la aplicación y compruebe si hay nuevas características o mejoras en la aplicación. - Disfrute de la herramienta de guía actualizada para 8 Ball Pool.</li>
|
53 |
-
</ol></p> 64aa2da5cf<br />
|
54 |
-
<br />
|
55 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Can I Download There Is Day.md
DELETED
@@ -1,65 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>¿Puedo descargar Hay Day? </h1>
|
3 |
-
<p>Si usted está buscando un juego divertido y relajante que le permite experimentar la vida sencilla de trabajar la tierra, entonces es posible que desee probar Hay Day. Hay Day es un popular simulador de agricultura que tiene millones de jugadores en todo el mundo. Pero, ¿puedes descargar Hay Day en tu dispositivo? ¡La respuesta es sí, usted puede! En este artículo, le diremos qué día del heno es, cómo descargarlo, y cuáles son las ventajas de jugarlo. </p>
|
4 |
-
<h2>¿Qué es el día del heno? </h2>
|
5 |
-
<p>Hay Day es un juego desarrollado por Supercell, la misma compañía que creó otros juegos exitosos como Clash of Clans y Brawl Stars. Hay Day fue lanzado en 2012 y desde entonces se ha convertido en uno de los juegos más descargados en ambas plataformas Android e iOS. Pero ¿qué es Hay Day exactamente? </p>
|
6 |
-
<h2>can i download there is day</h2><br /><p><b><b>Download Zip</b> ✔✔✔ <a href="https://bltlly.com/2v6LG3">https://bltlly.com/2v6LG3</a></b></p><br /><br />
|
7 |
-
<h3>Un juego de simulador de agricultura</h3>
|
8 |
-
<p>Hay Day es un juego que te permite crear tu propia granja y cultivar, criar animales y hacer bienes. Puedes cosechar trigo, maíz, zanahorias y más, y usarlos para hornear pan, hacer queso o producir azúcar. También puede alimentar a sus pollos, vacas, cerdos y otros animales, y recoger huevos, leche, tocino y lana. Incluso se puede pescar en el lago o en la mina. </p>
|
9 |
-
<h3>Un juego social</h3>
|
10 |
-
<p>Hay Day no es solo un juego en solitario. También puedes jugar con amigos y vecinos de todo el mundo. Puede unirse o crear un vecindario y chatear con otros jugadores. También puede comerciar y vender sus cultivos y productos con ellos, o ayudarles con sus pedidos y solicitudes. También puedes competir en eventos semanales y ganar recompensas. </p>
|
11 |
-
<h3>Un juego gratuito</h3>
|
12 |
-
|
13 |
-
<h2>¿Cómo descargar Hay Day? </h2>
|
14 |
-
<p>Descargar Hay Day es fácil y rápido. Dependiendo de tu dispositivo, puedes seguir estos pasos:</p>
|
15 |
-
<h3>Para dispositivos Android</h3>
|
16 |
-
<ol>
|
17 |
-
<li>Ir a la aplicación Google Play Store en su dispositivo. </li>
|
18 |
-
<li>Buscar "Hay Day" en la barra de búsqueda. </li>
|
19 |
-
<li>Toque en el botón "Instalar" y espere a que termine la descarga. </li>
|
20 |
-
<li>Abre la aplicación y disfruta jugando Hay Day.</li>
|
21 |
-
</ol>
|
22 |
-
<h3>Para dispositivos iOS</h3>
|
23 |
-
<ol>
|
24 |
-
<li>Ir a la aplicación App Store en su dispositivo. </li>
|
25 |
-
<li>Buscar "Hay Day" en la barra de búsqueda. </li>
|
26 |
-
<li>Toque en el botón "Obtener" e introduzca su contraseña de Apple ID si se le solicita. </li>
|
27 |
-
<li>Espere a que la descarga termine y abra la aplicación. </li>
|
28 |
-
<li>Diviértete jugando Hay Day.</li>
|
29 |
-
</ol>
|
30 |
-
<h3>Para dispositivos Windows</h3>
|
31 |
-
<p>Desafortunadamente, Hay Day no está disponible oficialmente para dispositivos Windows. Sin embargo, todavía se puede jugar en su PC o portátil mediante el uso de un emulador de Android. Un emulador de Android es un software que te permite ejecutar aplicaciones de Android en tu dispositivo Windows. Estos son algunos pasos para hacerlo:</p>
|
32 |
-
<ol>
|
33 |
-
<li>Descargar e instalar un emulador de Android de su elección. Algunos populares son BlueStacks, NoxPlayer, o LDPlayer.</li>
|
34 |
-
<li>Abre el emulador e inicia sesión con tu cuenta de Google. </li>
|
35 |
-
<li>Ir a la aplicación Google Play Store dentro del emulador. </ <li>Buscar "Hay Day" en la barra de búsqueda e instalarlo. </li>
|
36 |
-
<li>Abra la aplicación y comience a jugar Hay Day en su dispositivo Windows. </li>
|
37 |
-
</ol>
|
38 |
-
<h2>¿Cuáles son los beneficios de descargar el día del heno? </h2>
|
39 |
-
<p>Descargar Hay Day puede traerte muchos beneficios, como:</p>
|
40 |
-
<h3>Disfrutar de la agricultura en cualquier momento, en cualquier lugar</h3>
|
41 |
-
<p>Con Hay Day, puede experimentar la alegría de la agricultura en cualquier momento y en cualquier lugar que desee. Puedes jugar Hay Day sin conexión o en línea, en tu teléfono o tableta, o en tu PC o portátil. También puedes pausar y reanudar tu juego cuando quieras. Puedes cultivar tu granja a tu propio ritmo y estilo. </p>
|
42 |
-
<h3>Personalizar su granja y decorarla</h3>
|
43 |
-
|
44 |
-
<h3>Comercio y venta de bienes con amigos y vecinos</h3>
|
45 |
-
<p>Hay Day no es solo un juego de agricultura, sino también un juego de comercio. Puede intercambiar y vender sus productos con amigos y vecinos a través de la tienda de carretera, el periódico, el barco o el camión. También puede comprar productos de ellos o ayudarles con sus pedidos. Puede ganar monedas y puntos de experiencia al hacerlo. </p>
|
46 |
-
<p></p>
|
47 |
-
<h3>Explorar el valle y la ciudad</h3>
|
48 |
-
<p>Hay Day no se limita a su granja. También puede explorar el valle y la ciudad, donde puede encontrar más actividades y sorpresas. Usted puede conducir su coche en el valle y recoger fichas, o tomar el tren a la ciudad y servir a los visitantes. También puedes descubrir nuevos lugares y personajes en el camino. </p>
|
49 |
-
<h2>Conclusión</h2>
|
50 |
-
<p>Hay Day es un juego divertido y relajante que te permite crear tu propia granja y disfrutar de la vida sencilla de trabajar la tierra. Puede descargar Hay Day en su dispositivo Android o iOS, o reproducirlo en su dispositivo Windows utilizando un emulador de Android. También puede jugar con amigos y vecinos de todo el mundo, personalizar su granja y decorarla, el comercio y la venta de bienes, y explorar el valle y la ciudad. Hay Day es un juego gratuito que puedes jugar sin gastar dinero, pero también puedes comprar diamantes si quieres conseguir algunos objetos extra o acelerar tu progreso. Si estás buscando un juego que combine agricultura, comercio y socialización, entonces Hay Day es el juego para ti. </p>
|
51 |
-
<h2>Preguntas frecuentes</h2>
|
52 |
-
<ul>
|
53 |
-
<li><b>Q: ¿Cómo puedo obtener más diamantes en Hay Day? </b></li>
|
54 |
-
<li>A: Puedes obtener más diamantes completando logros, viendo anuncios, encontrándolos en cajas misteriosas o comprándolos con dinero real. </li>
|
55 |
-
<li><b>Q: ¿Cómo puedo subir de nivel más rápido en Hay Day? </b></li>
|
56 |
-
<li>A: Puedes subir de nivel más rápido completando pedidos, cosechando cosechas, alimentando animales, haciendo bienes, comerciando con amigos y vecinos, o usando Tom o boosters. </li>
|
57 |
-
<li><b>Q: ¿Cómo puedo unirme o crear un vecindario en Hay Day? </b></li>
|
58 |
-
|
59 |
-
<li><b>Q: ¿Cómo participo en el derby en Hay Day? </b></li>
|
60 |
-
<li>A: Puedes participar en el derby uniéndote a un vecindario que está inscrito en el derby. Necesitas tener al menos el nivel 18 para hacerlo. A continuación, puede completar las tareas de la junta de derby para ganar puntos para su barrio. </li>
|
61 |
-
<li><b>Q: ¿Cómo puedo contactar al soporte de Supercell en Hay Day? </b></li>
|
62 |
-
<li>A: Puede ponerse en contacto con el soporte de Supercell tocando en el icono de configuración en la esquina superior izquierda de la pantalla, luego tocando en "Ayuda y soporte". A continuación, puede navegar a través de las preguntas frecuentes o enviar una solicitud. </li>
|
63 |
-
</ul></p> 64aa2da5cf<br />
|
64 |
-
<br />
|
65 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar Gratis Fuego Avance Servidor Versi Terbaru.md
DELETED
@@ -1,58 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Cómo descargar gratis Fire Advance Server Versi Terbaru</h1>
|
3 |
-
<p>Free Fire es uno de los juegos battle royale más populares del mundo, con millones de jugadores disfrutando de su emocionante juego y características. Pero ¿sabías que hay una versión especial de Free Fire que te permite probar nuevas actualizaciones antes de que se publiquen en el servidor normal? Esta versión se llama Free Fire Advance Server, y solo está disponible por un tiempo limitado y para un número limitado de jugadores. En este artículo, le diremos todo lo que necesita saber sobre Free Fire Advance Server versi terbaru, o la última versión de Free Fire Advance Server. Le explicaremos qué es, cómo registrarse e iniciar sesión, cómo descargarlo e instalarlo, y cómo obtener recompensas de él. Así que, si estás interesado en ser uno de los primeros jugadores en experimentar nuevas características y contenido en Free Fire, ¡sigue leyendo! </p>
|
4 |
-
<h2>descargar gratis fuego avance servidor versi terbaru</h2><br /><p><b><b>Download File</b> ✶ <a href="https://bltlly.com/2v6JK1">https://bltlly.com/2v6JK1</a></b></p><br /><br />
|
5 |
-
<h2>¿Qué es Free Fire Advance Server? </h2>
|
6 |
-
<p>Free Fire Advance Server es un servidor de prueba o un servidor beta que Garena lanza como un lugar para probar nuevas actualizaciones. Aquí, puedes probar varios contenidos nuevos que aún no se han lanzado, como personajes, mascotas, armas, modos y mapas. El propósito de Free Fire Advance Server es recopilar comentarios de los jugadores y corregir cualquier error o error antes de iniciar las actualizaciones en el servidor normal. </p>
|
7 |
-
<h3>Los beneficios de jugar en Free Fire Advance Server</h3>
|
8 |
-
<p>Jugar en Free Fire Advance Server tiene varios beneficios para los jugadores que quieren tener una experiencia de juego diferente y exclusiva. Algunos de estos beneficios son:</p>
|
9 |
-
<ul>
|
10 |
-
<li>Puede acceder a nuevo contenido que no está disponible en el servidor regular. </li>
|
11 |
-
<li>Puedes dar tus sugerencias y opiniones a Garena y ayudar a mejorar el juego. </li>
|
12 |
-
<li>Puedes obtener recompensas en forma de diamantes si encuentras y reportas errores en Free Fire Advance Server.</li>
|
13 |
-
<li>Puedes divertirte y desafiarte con nuevos elementos de juego. </li>
|
14 |
-
</ul>
|
15 |
-
|
16 |
-
<p>Free Fire Advance Server y el servidor regular tienen algunas diferencias que debe tener en cuenta antes de jugar en él. Algunas de estas diferencias son:</p <p>- Free Fire Advance Server tiene una capacidad limitada y solo está abierto durante un cierto período de tiempo. Necesita registrarse y obtener un código de activación para unirse a él. El servidor normal está abierto para todos y no requiere ningún código. </p>
|
17 |
-
<p></p>
|
18 |
-
<p>- Free Fire Advance Server puede tener algunos errores o errores que pueden afectar el juego. El servidor regular es más estable y suave. </p>
|
19 |
-
<p>- Gratis Fire Advance Server no puede tener todas las características o contenido que el servidor regular tiene. El servidor regular tiene más variedad y opciones para los jugadores. </p>
|
20 |
-
<p>- Free Fire Advance Server puede tener diferentes reglas o configuraciones que el servidor normal. Por ejemplo, el sistema de clasificación, el matchmaking, la moneda y las recompensas pueden ser diferentes. </p>
|
21 |
-
<h2>Cómo registrarse e iniciar sesión en Free Fire Advance Server</h2>
|
22 |
-
<p>Si quieres jugar en Free Fire Advance Server, primero debes registrarte e iniciar sesión. Estos son los pasos para hacerlo:</p>
|
23 |
-
<h3>Los pasos para registrarse en Free Fire Advance Server</h3>
|
24 |
-
<ol>
|
25 |
-
<li>Ir al sitio web oficial de Free Fire Advance Server en https://ff-advance.ff.garena.com/.</li>
|
26 |
-
<li>Haga clic en el botón "Login Facebook" e introduzca los detalles de su cuenta de Facebook. </li>
|
27 |
-
<li>Rellene su información personal, como su nombre, correo electrónico, número de teléfono y Free Fire ID.</li>
|
28 |
-
<li>Haga clic en el botón "Unirse ahora" y espere el correo electrónico de confirmación. </li>
|
29 |
-
<li>Si está seleccionado, recibirá un correo electrónico con un código de activación y un enlace para descargar el archivo APK. </li>
|
30 |
-
</ol>
|
31 |
-
<h3>Los requisitos para unirse a Free Fire Advance Server</h3>
|
32 |
-
<p>No todo el mundo puede unirse a Free Fire Advance Server, ya que hay algunos requisitos que debe cumplir. Algunos de estos requisitos son:</p>
|
33 |
-
<ul>
|
34 |
-
<li>Necesitas tener una cuenta de Facebook que esté vinculada a tu cuenta de Free Fire. </li>
|
35 |
-
|
36 |
-
<li>Necesitas tener suficiente espacio de almacenamiento en tu dispositivo para descargar e instalar el archivo APK. </li>
|
37 |
-
<li>Necesitas estar dispuesto a reportar cualquier error que encuentres en Free Fire Advance Server.</li>
|
38 |
-
</ul>
|
39 |
-
<h3>Cómo obtener el código de activación de Free Fire Advance Server</h3>
|
40 |
-
<p>Un código de activación es un código único que debe ingresar cuando inicia sesión en Free Fire Advance Server por primera vez. Sin él, no se puede acceder al juego. El código de activación solo se da a un número limitado de jugadores que se registren en Free Fire Advance Server. Estos son algunos consejos para obtener un código de activación:</p>
|
41 |
-
<ul>
|
42 |
-
<li>Regístrese lo antes posible cuando el registro esté abierto. Cuanto antes se registre, mayores serán las posibilidades de obtener un código de activación. </li>
|
43 |
-
<li>Revise su correo electrónico regularmente para cualquier actualización de Garena. A veces, pueden enviar códigos de activación al azar o en función de ciertos criterios. </li>
|
44 |
-
<li>Sigue las cuentas oficiales de Free Fire y Garena. Pueden anunciar algunos eventos o sorteos donde puedes ganar un código de activación. </li>
|
45 |
-
<li>Sé activo y leal en Free Fire. Garena puede recompensar a algunos jugadores que juegan con frecuencia y gastan dinero en el juego con un código de activación. </li>
|
46 |
-
</ul> usted entiende cómo descargar gratis Fire Advance Server versi terbaru y disfrutar de sus características. Si tiene alguna pregunta o comentario, no dude en dejar un comentario a continuación. ¡Gracias por leer y jugar feliz! </p>
|
47 |
-
<h3>Preguntas frecuentes</h3>
|
48 |
-
<p>Aquí hay algunas preguntas y respuestas frecuentes sobre Free Fire Advance Server:</p>
|
49 |
-
<ol>
|
50 |
-
<li>Q: ¿Cuándo estará abierto Free Fire Advance Server? </li>
|
51 |
-
R: Free Fire Advance Server generalmente está abierto durante unos días o semanas antes de que se lance una actualización importante en el servidor regular. Puede consultar el sitio web oficial o las cuentas de redes sociales de Free Fire y Garena para las fechas exactas y los horarios de Free Fire Advance Server. <li>P: ¿Puedo jugar con mis amigos en Free Fire Advance Server? </li>
|
52 |
-
|
53 |
-
R: No, su progreso y los datos en Free Fire Advance Server no se transferirán al servidor normal. Son servidores separados con diferentes contenidos y configuraciones. Tendrá que empezar desde cero en el servidor normal. <li>Q: ¿Cómo puedo obtener más diamantes en Free Fire Advance Server? </li>
|
54 |
-
R: Puedes obtener más diamantes en Free Fire Advance Server al reportar errores o errores que encuentres en el juego. Garena te recompensará con diamantes según la gravedad y validez de tus informes. También puede obtener diamantes participando en algunos eventos o regalos que Garena puede alojar en Free Fire Advance Server. <li>P: ¿Cómo puedo contactar con Garena si tengo algún problema o sugerencia en Free Fire Advance Server? </li>
|
55 |
-
R: Puede ponerse en contacto con Garena usando el botón "Reportar" en el menú del juego, enviando un correo electrónico a [email protected], o visitando su página de servicio al cliente en https://ffsupport.zendesk.com/hc/en-us.</li>
|
56 |
-
</ol></p> 64aa2da5cf<br />
|
57 |
-
<br />
|
58 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/colorama/ansi.py
DELETED
@@ -1,102 +0,0 @@
|
|
1 |
-
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
|
2 |
-
'''
|
3 |
-
This module generates ANSI character codes to printing colors to terminals.
|
4 |
-
See: http://en.wikipedia.org/wiki/ANSI_escape_code
|
5 |
-
'''
|
6 |
-
|
7 |
-
CSI = '\033['
|
8 |
-
OSC = '\033]'
|
9 |
-
BEL = '\a'
|
10 |
-
|
11 |
-
|
12 |
-
def code_to_chars(code):
|
13 |
-
return CSI + str(code) + 'm'
|
14 |
-
|
15 |
-
def set_title(title):
|
16 |
-
return OSC + '2;' + title + BEL
|
17 |
-
|
18 |
-
def clear_screen(mode=2):
|
19 |
-
return CSI + str(mode) + 'J'
|
20 |
-
|
21 |
-
def clear_line(mode=2):
|
22 |
-
return CSI + str(mode) + 'K'
|
23 |
-
|
24 |
-
|
25 |
-
class AnsiCodes(object):
|
26 |
-
def __init__(self):
|
27 |
-
# the subclasses declare class attributes which are numbers.
|
28 |
-
# Upon instantiation we define instance attributes, which are the same
|
29 |
-
# as the class attributes but wrapped with the ANSI escape sequence
|
30 |
-
for name in dir(self):
|
31 |
-
if not name.startswith('_'):
|
32 |
-
value = getattr(self, name)
|
33 |
-
setattr(self, name, code_to_chars(value))
|
34 |
-
|
35 |
-
|
36 |
-
class AnsiCursor(object):
|
37 |
-
def UP(self, n=1):
|
38 |
-
return CSI + str(n) + 'A'
|
39 |
-
def DOWN(self, n=1):
|
40 |
-
return CSI + str(n) + 'B'
|
41 |
-
def FORWARD(self, n=1):
|
42 |
-
return CSI + str(n) + 'C'
|
43 |
-
def BACK(self, n=1):
|
44 |
-
return CSI + str(n) + 'D'
|
45 |
-
def POS(self, x=1, y=1):
|
46 |
-
return CSI + str(y) + ';' + str(x) + 'H'
|
47 |
-
|
48 |
-
|
49 |
-
class AnsiFore(AnsiCodes):
|
50 |
-
BLACK = 30
|
51 |
-
RED = 31
|
52 |
-
GREEN = 32
|
53 |
-
YELLOW = 33
|
54 |
-
BLUE = 34
|
55 |
-
MAGENTA = 35
|
56 |
-
CYAN = 36
|
57 |
-
WHITE = 37
|
58 |
-
RESET = 39
|
59 |
-
|
60 |
-
# These are fairly well supported, but not part of the standard.
|
61 |
-
LIGHTBLACK_EX = 90
|
62 |
-
LIGHTRED_EX = 91
|
63 |
-
LIGHTGREEN_EX = 92
|
64 |
-
LIGHTYELLOW_EX = 93
|
65 |
-
LIGHTBLUE_EX = 94
|
66 |
-
LIGHTMAGENTA_EX = 95
|
67 |
-
LIGHTCYAN_EX = 96
|
68 |
-
LIGHTWHITE_EX = 97
|
69 |
-
|
70 |
-
|
71 |
-
class AnsiBack(AnsiCodes):
|
72 |
-
BLACK = 40
|
73 |
-
RED = 41
|
74 |
-
GREEN = 42
|
75 |
-
YELLOW = 43
|
76 |
-
BLUE = 44
|
77 |
-
MAGENTA = 45
|
78 |
-
CYAN = 46
|
79 |
-
WHITE = 47
|
80 |
-
RESET = 49
|
81 |
-
|
82 |
-
# These are fairly well supported, but not part of the standard.
|
83 |
-
LIGHTBLACK_EX = 100
|
84 |
-
LIGHTRED_EX = 101
|
85 |
-
LIGHTGREEN_EX = 102
|
86 |
-
LIGHTYELLOW_EX = 103
|
87 |
-
LIGHTBLUE_EX = 104
|
88 |
-
LIGHTMAGENTA_EX = 105
|
89 |
-
LIGHTCYAN_EX = 106
|
90 |
-
LIGHTWHITE_EX = 107
|
91 |
-
|
92 |
-
|
93 |
-
class AnsiStyle(AnsiCodes):
|
94 |
-
BRIGHT = 1
|
95 |
-
DIM = 2
|
96 |
-
NORMAL = 22
|
97 |
-
RESET_ALL = 0
|
98 |
-
|
99 |
-
Fore = AnsiFore()
|
100 |
-
Back = AnsiBack()
|
101 |
-
Style = AnsiStyle()
|
102 |
-
Cursor = AnsiCursor()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/idna/idnadata.py
DELETED
@@ -1,2151 +0,0 @@
|
|
1 |
-
# This file is automatically generated by tools/idna-data
|
2 |
-
|
3 |
-
__version__ = '15.0.0'
|
4 |
-
scripts = {
|
5 |
-
'Greek': (
|
6 |
-
0x37000000374,
|
7 |
-
0x37500000378,
|
8 |
-
0x37a0000037e,
|
9 |
-
0x37f00000380,
|
10 |
-
0x38400000385,
|
11 |
-
0x38600000387,
|
12 |
-
0x3880000038b,
|
13 |
-
0x38c0000038d,
|
14 |
-
0x38e000003a2,
|
15 |
-
0x3a3000003e2,
|
16 |
-
0x3f000000400,
|
17 |
-
0x1d2600001d2b,
|
18 |
-
0x1d5d00001d62,
|
19 |
-
0x1d6600001d6b,
|
20 |
-
0x1dbf00001dc0,
|
21 |
-
0x1f0000001f16,
|
22 |
-
0x1f1800001f1e,
|
23 |
-
0x1f2000001f46,
|
24 |
-
0x1f4800001f4e,
|
25 |
-
0x1f5000001f58,
|
26 |
-
0x1f5900001f5a,
|
27 |
-
0x1f5b00001f5c,
|
28 |
-
0x1f5d00001f5e,
|
29 |
-
0x1f5f00001f7e,
|
30 |
-
0x1f8000001fb5,
|
31 |
-
0x1fb600001fc5,
|
32 |
-
0x1fc600001fd4,
|
33 |
-
0x1fd600001fdc,
|
34 |
-
0x1fdd00001ff0,
|
35 |
-
0x1ff200001ff5,
|
36 |
-
0x1ff600001fff,
|
37 |
-
0x212600002127,
|
38 |
-
0xab650000ab66,
|
39 |
-
0x101400001018f,
|
40 |
-
0x101a0000101a1,
|
41 |
-
0x1d2000001d246,
|
42 |
-
),
|
43 |
-
'Han': (
|
44 |
-
0x2e8000002e9a,
|
45 |
-
0x2e9b00002ef4,
|
46 |
-
0x2f0000002fd6,
|
47 |
-
0x300500003006,
|
48 |
-
0x300700003008,
|
49 |
-
0x30210000302a,
|
50 |
-
0x30380000303c,
|
51 |
-
0x340000004dc0,
|
52 |
-
0x4e000000a000,
|
53 |
-
0xf9000000fa6e,
|
54 |
-
0xfa700000fada,
|
55 |
-
0x16fe200016fe4,
|
56 |
-
0x16ff000016ff2,
|
57 |
-
0x200000002a6e0,
|
58 |
-
0x2a7000002b73a,
|
59 |
-
0x2b7400002b81e,
|
60 |
-
0x2b8200002cea2,
|
61 |
-
0x2ceb00002ebe1,
|
62 |
-
0x2f8000002fa1e,
|
63 |
-
0x300000003134b,
|
64 |
-
0x31350000323b0,
|
65 |
-
),
|
66 |
-
'Hebrew': (
|
67 |
-
0x591000005c8,
|
68 |
-
0x5d0000005eb,
|
69 |
-
0x5ef000005f5,
|
70 |
-
0xfb1d0000fb37,
|
71 |
-
0xfb380000fb3d,
|
72 |
-
0xfb3e0000fb3f,
|
73 |
-
0xfb400000fb42,
|
74 |
-
0xfb430000fb45,
|
75 |
-
0xfb460000fb50,
|
76 |
-
),
|
77 |
-
'Hiragana': (
|
78 |
-
0x304100003097,
|
79 |
-
0x309d000030a0,
|
80 |
-
0x1b0010001b120,
|
81 |
-
0x1b1320001b133,
|
82 |
-
0x1b1500001b153,
|
83 |
-
0x1f2000001f201,
|
84 |
-
),
|
85 |
-
'Katakana': (
|
86 |
-
0x30a1000030fb,
|
87 |
-
0x30fd00003100,
|
88 |
-
0x31f000003200,
|
89 |
-
0x32d0000032ff,
|
90 |
-
0x330000003358,
|
91 |
-
0xff660000ff70,
|
92 |
-
0xff710000ff9e,
|
93 |
-
0x1aff00001aff4,
|
94 |
-
0x1aff50001affc,
|
95 |
-
0x1affd0001afff,
|
96 |
-
0x1b0000001b001,
|
97 |
-
0x1b1200001b123,
|
98 |
-
0x1b1550001b156,
|
99 |
-
0x1b1640001b168,
|
100 |
-
),
|
101 |
-
}
|
102 |
-
joining_types = {
|
103 |
-
0x600: 85,
|
104 |
-
0x601: 85,
|
105 |
-
0x602: 85,
|
106 |
-
0x603: 85,
|
107 |
-
0x604: 85,
|
108 |
-
0x605: 85,
|
109 |
-
0x608: 85,
|
110 |
-
0x60b: 85,
|
111 |
-
0x620: 68,
|
112 |
-
0x621: 85,
|
113 |
-
0x622: 82,
|
114 |
-
0x623: 82,
|
115 |
-
0x624: 82,
|
116 |
-
0x625: 82,
|
117 |
-
0x626: 68,
|
118 |
-
0x627: 82,
|
119 |
-
0x628: 68,
|
120 |
-
0x629: 82,
|
121 |
-
0x62a: 68,
|
122 |
-
0x62b: 68,
|
123 |
-
0x62c: 68,
|
124 |
-
0x62d: 68,
|
125 |
-
0x62e: 68,
|
126 |
-
0x62f: 82,
|
127 |
-
0x630: 82,
|
128 |
-
0x631: 82,
|
129 |
-
0x632: 82,
|
130 |
-
0x633: 68,
|
131 |
-
0x634: 68,
|
132 |
-
0x635: 68,
|
133 |
-
0x636: 68,
|
134 |
-
0x637: 68,
|
135 |
-
0x638: 68,
|
136 |
-
0x639: 68,
|
137 |
-
0x63a: 68,
|
138 |
-
0x63b: 68,
|
139 |
-
0x63c: 68,
|
140 |
-
0x63d: 68,
|
141 |
-
0x63e: 68,
|
142 |
-
0x63f: 68,
|
143 |
-
0x640: 67,
|
144 |
-
0x641: 68,
|
145 |
-
0x642: 68,
|
146 |
-
0x643: 68,
|
147 |
-
0x644: 68,
|
148 |
-
0x645: 68,
|
149 |
-
0x646: 68,
|
150 |
-
0x647: 68,
|
151 |
-
0x648: 82,
|
152 |
-
0x649: 68,
|
153 |
-
0x64a: 68,
|
154 |
-
0x66e: 68,
|
155 |
-
0x66f: 68,
|
156 |
-
0x671: 82,
|
157 |
-
0x672: 82,
|
158 |
-
0x673: 82,
|
159 |
-
0x674: 85,
|
160 |
-
0x675: 82,
|
161 |
-
0x676: 82,
|
162 |
-
0x677: 82,
|
163 |
-
0x678: 68,
|
164 |
-
0x679: 68,
|
165 |
-
0x67a: 68,
|
166 |
-
0x67b: 68,
|
167 |
-
0x67c: 68,
|
168 |
-
0x67d: 68,
|
169 |
-
0x67e: 68,
|
170 |
-
0x67f: 68,
|
171 |
-
0x680: 68,
|
172 |
-
0x681: 68,
|
173 |
-
0x682: 68,
|
174 |
-
0x683: 68,
|
175 |
-
0x684: 68,
|
176 |
-
0x685: 68,
|
177 |
-
0x686: 68,
|
178 |
-
0x687: 68,
|
179 |
-
0x688: 82,
|
180 |
-
0x689: 82,
|
181 |
-
0x68a: 82,
|
182 |
-
0x68b: 82,
|
183 |
-
0x68c: 82,
|
184 |
-
0x68d: 82,
|
185 |
-
0x68e: 82,
|
186 |
-
0x68f: 82,
|
187 |
-
0x690: 82,
|
188 |
-
0x691: 82,
|
189 |
-
0x692: 82,
|
190 |
-
0x693: 82,
|
191 |
-
0x694: 82,
|
192 |
-
0x695: 82,
|
193 |
-
0x696: 82,
|
194 |
-
0x697: 82,
|
195 |
-
0x698: 82,
|
196 |
-
0x699: 82,
|
197 |
-
0x69a: 68,
|
198 |
-
0x69b: 68,
|
199 |
-
0x69c: 68,
|
200 |
-
0x69d: 68,
|
201 |
-
0x69e: 68,
|
202 |
-
0x69f: 68,
|
203 |
-
0x6a0: 68,
|
204 |
-
0x6a1: 68,
|
205 |
-
0x6a2: 68,
|
206 |
-
0x6a3: 68,
|
207 |
-
0x6a4: 68,
|
208 |
-
0x6a5: 68,
|
209 |
-
0x6a6: 68,
|
210 |
-
0x6a7: 68,
|
211 |
-
0x6a8: 68,
|
212 |
-
0x6a9: 68,
|
213 |
-
0x6aa: 68,
|
214 |
-
0x6ab: 68,
|
215 |
-
0x6ac: 68,
|
216 |
-
0x6ad: 68,
|
217 |
-
0x6ae: 68,
|
218 |
-
0x6af: 68,
|
219 |
-
0x6b0: 68,
|
220 |
-
0x6b1: 68,
|
221 |
-
0x6b2: 68,
|
222 |
-
0x6b3: 68,
|
223 |
-
0x6b4: 68,
|
224 |
-
0x6b5: 68,
|
225 |
-
0x6b6: 68,
|
226 |
-
0x6b7: 68,
|
227 |
-
0x6b8: 68,
|
228 |
-
0x6b9: 68,
|
229 |
-
0x6ba: 68,
|
230 |
-
0x6bb: 68,
|
231 |
-
0x6bc: 68,
|
232 |
-
0x6bd: 68,
|
233 |
-
0x6be: 68,
|
234 |
-
0x6bf: 68,
|
235 |
-
0x6c0: 82,
|
236 |
-
0x6c1: 68,
|
237 |
-
0x6c2: 68,
|
238 |
-
0x6c3: 82,
|
239 |
-
0x6c4: 82,
|
240 |
-
0x6c5: 82,
|
241 |
-
0x6c6: 82,
|
242 |
-
0x6c7: 82,
|
243 |
-
0x6c8: 82,
|
244 |
-
0x6c9: 82,
|
245 |
-
0x6ca: 82,
|
246 |
-
0x6cb: 82,
|
247 |
-
0x6cc: 68,
|
248 |
-
0x6cd: 82,
|
249 |
-
0x6ce: 68,
|
250 |
-
0x6cf: 82,
|
251 |
-
0x6d0: 68,
|
252 |
-
0x6d1: 68,
|
253 |
-
0x6d2: 82,
|
254 |
-
0x6d3: 82,
|
255 |
-
0x6d5: 82,
|
256 |
-
0x6dd: 85,
|
257 |
-
0x6ee: 82,
|
258 |
-
0x6ef: 82,
|
259 |
-
0x6fa: 68,
|
260 |
-
0x6fb: 68,
|
261 |
-
0x6fc: 68,
|
262 |
-
0x6ff: 68,
|
263 |
-
0x70f: 84,
|
264 |
-
0x710: 82,
|
265 |
-
0x712: 68,
|
266 |
-
0x713: 68,
|
267 |
-
0x714: 68,
|
268 |
-
0x715: 82,
|
269 |
-
0x716: 82,
|
270 |
-
0x717: 82,
|
271 |
-
0x718: 82,
|
272 |
-
0x719: 82,
|
273 |
-
0x71a: 68,
|
274 |
-
0x71b: 68,
|
275 |
-
0x71c: 68,
|
276 |
-
0x71d: 68,
|
277 |
-
0x71e: 82,
|
278 |
-
0x71f: 68,
|
279 |
-
0x720: 68,
|
280 |
-
0x721: 68,
|
281 |
-
0x722: 68,
|
282 |
-
0x723: 68,
|
283 |
-
0x724: 68,
|
284 |
-
0x725: 68,
|
285 |
-
0x726: 68,
|
286 |
-
0x727: 68,
|
287 |
-
0x728: 82,
|
288 |
-
0x729: 68,
|
289 |
-
0x72a: 82,
|
290 |
-
0x72b: 68,
|
291 |
-
0x72c: 82,
|
292 |
-
0x72d: 68,
|
293 |
-
0x72e: 68,
|
294 |
-
0x72f: 82,
|
295 |
-
0x74d: 82,
|
296 |
-
0x74e: 68,
|
297 |
-
0x74f: 68,
|
298 |
-
0x750: 68,
|
299 |
-
0x751: 68,
|
300 |
-
0x752: 68,
|
301 |
-
0x753: 68,
|
302 |
-
0x754: 68,
|
303 |
-
0x755: 68,
|
304 |
-
0x756: 68,
|
305 |
-
0x757: 68,
|
306 |
-
0x758: 68,
|
307 |
-
0x759: 82,
|
308 |
-
0x75a: 82,
|
309 |
-
0x75b: 82,
|
310 |
-
0x75c: 68,
|
311 |
-
0x75d: 68,
|
312 |
-
0x75e: 68,
|
313 |
-
0x75f: 68,
|
314 |
-
0x760: 68,
|
315 |
-
0x761: 68,
|
316 |
-
0x762: 68,
|
317 |
-
0x763: 68,
|
318 |
-
0x764: 68,
|
319 |
-
0x765: 68,
|
320 |
-
0x766: 68,
|
321 |
-
0x767: 68,
|
322 |
-
0x768: 68,
|
323 |
-
0x769: 68,
|
324 |
-
0x76a: 68,
|
325 |
-
0x76b: 82,
|
326 |
-
0x76c: 82,
|
327 |
-
0x76d: 68,
|
328 |
-
0x76e: 68,
|
329 |
-
0x76f: 68,
|
330 |
-
0x770: 68,
|
331 |
-
0x771: 82,
|
332 |
-
0x772: 68,
|
333 |
-
0x773: 82,
|
334 |
-
0x774: 82,
|
335 |
-
0x775: 68,
|
336 |
-
0x776: 68,
|
337 |
-
0x777: 68,
|
338 |
-
0x778: 82,
|
339 |
-
0x779: 82,
|
340 |
-
0x77a: 68,
|
341 |
-
0x77b: 68,
|
342 |
-
0x77c: 68,
|
343 |
-
0x77d: 68,
|
344 |
-
0x77e: 68,
|
345 |
-
0x77f: 68,
|
346 |
-
0x7ca: 68,
|
347 |
-
0x7cb: 68,
|
348 |
-
0x7cc: 68,
|
349 |
-
0x7cd: 68,
|
350 |
-
0x7ce: 68,
|
351 |
-
0x7cf: 68,
|
352 |
-
0x7d0: 68,
|
353 |
-
0x7d1: 68,
|
354 |
-
0x7d2: 68,
|
355 |
-
0x7d3: 68,
|
356 |
-
0x7d4: 68,
|
357 |
-
0x7d5: 68,
|
358 |
-
0x7d6: 68,
|
359 |
-
0x7d7: 68,
|
360 |
-
0x7d8: 68,
|
361 |
-
0x7d9: 68,
|
362 |
-
0x7da: 68,
|
363 |
-
0x7db: 68,
|
364 |
-
0x7dc: 68,
|
365 |
-
0x7dd: 68,
|
366 |
-
0x7de: 68,
|
367 |
-
0x7df: 68,
|
368 |
-
0x7e0: 68,
|
369 |
-
0x7e1: 68,
|
370 |
-
0x7e2: 68,
|
371 |
-
0x7e3: 68,
|
372 |
-
0x7e4: 68,
|
373 |
-
0x7e5: 68,
|
374 |
-
0x7e6: 68,
|
375 |
-
0x7e7: 68,
|
376 |
-
0x7e8: 68,
|
377 |
-
0x7e9: 68,
|
378 |
-
0x7ea: 68,
|
379 |
-
0x7fa: 67,
|
380 |
-
0x840: 82,
|
381 |
-
0x841: 68,
|
382 |
-
0x842: 68,
|
383 |
-
0x843: 68,
|
384 |
-
0x844: 68,
|
385 |
-
0x845: 68,
|
386 |
-
0x846: 82,
|
387 |
-
0x847: 82,
|
388 |
-
0x848: 68,
|
389 |
-
0x849: 82,
|
390 |
-
0x84a: 68,
|
391 |
-
0x84b: 68,
|
392 |
-
0x84c: 68,
|
393 |
-
0x84d: 68,
|
394 |
-
0x84e: 68,
|
395 |
-
0x84f: 68,
|
396 |
-
0x850: 68,
|
397 |
-
0x851: 68,
|
398 |
-
0x852: 68,
|
399 |
-
0x853: 68,
|
400 |
-
0x854: 82,
|
401 |
-
0x855: 68,
|
402 |
-
0x856: 82,
|
403 |
-
0x857: 82,
|
404 |
-
0x858: 82,
|
405 |
-
0x860: 68,
|
406 |
-
0x861: 85,
|
407 |
-
0x862: 68,
|
408 |
-
0x863: 68,
|
409 |
-
0x864: 68,
|
410 |
-
0x865: 68,
|
411 |
-
0x866: 85,
|
412 |
-
0x867: 82,
|
413 |
-
0x868: 68,
|
414 |
-
0x869: 82,
|
415 |
-
0x86a: 82,
|
416 |
-
0x870: 82,
|
417 |
-
0x871: 82,
|
418 |
-
0x872: 82,
|
419 |
-
0x873: 82,
|
420 |
-
0x874: 82,
|
421 |
-
0x875: 82,
|
422 |
-
0x876: 82,
|
423 |
-
0x877: 82,
|
424 |
-
0x878: 82,
|
425 |
-
0x879: 82,
|
426 |
-
0x87a: 82,
|
427 |
-
0x87b: 82,
|
428 |
-
0x87c: 82,
|
429 |
-
0x87d: 82,
|
430 |
-
0x87e: 82,
|
431 |
-
0x87f: 82,
|
432 |
-
0x880: 82,
|
433 |
-
0x881: 82,
|
434 |
-
0x882: 82,
|
435 |
-
0x883: 67,
|
436 |
-
0x884: 67,
|
437 |
-
0x885: 67,
|
438 |
-
0x886: 68,
|
439 |
-
0x887: 85,
|
440 |
-
0x888: 85,
|
441 |
-
0x889: 68,
|
442 |
-
0x88a: 68,
|
443 |
-
0x88b: 68,
|
444 |
-
0x88c: 68,
|
445 |
-
0x88d: 68,
|
446 |
-
0x88e: 82,
|
447 |
-
0x890: 85,
|
448 |
-
0x891: 85,
|
449 |
-
0x8a0: 68,
|
450 |
-
0x8a1: 68,
|
451 |
-
0x8a2: 68,
|
452 |
-
0x8a3: 68,
|
453 |
-
0x8a4: 68,
|
454 |
-
0x8a5: 68,
|
455 |
-
0x8a6: 68,
|
456 |
-
0x8a7: 68,
|
457 |
-
0x8a8: 68,
|
458 |
-
0x8a9: 68,
|
459 |
-
0x8aa: 82,
|
460 |
-
0x8ab: 82,
|
461 |
-
0x8ac: 82,
|
462 |
-
0x8ad: 85,
|
463 |
-
0x8ae: 82,
|
464 |
-
0x8af: 68,
|
465 |
-
0x8b0: 68,
|
466 |
-
0x8b1: 82,
|
467 |
-
0x8b2: 82,
|
468 |
-
0x8b3: 68,
|
469 |
-
0x8b4: 68,
|
470 |
-
0x8b5: 68,
|
471 |
-
0x8b6: 68,
|
472 |
-
0x8b7: 68,
|
473 |
-
0x8b8: 68,
|
474 |
-
0x8b9: 82,
|
475 |
-
0x8ba: 68,
|
476 |
-
0x8bb: 68,
|
477 |
-
0x8bc: 68,
|
478 |
-
0x8bd: 68,
|
479 |
-
0x8be: 68,
|
480 |
-
0x8bf: 68,
|
481 |
-
0x8c0: 68,
|
482 |
-
0x8c1: 68,
|
483 |
-
0x8c2: 68,
|
484 |
-
0x8c3: 68,
|
485 |
-
0x8c4: 68,
|
486 |
-
0x8c5: 68,
|
487 |
-
0x8c6: 68,
|
488 |
-
0x8c7: 68,
|
489 |
-
0x8c8: 68,
|
490 |
-
0x8e2: 85,
|
491 |
-
0x1806: 85,
|
492 |
-
0x1807: 68,
|
493 |
-
0x180a: 67,
|
494 |
-
0x180e: 85,
|
495 |
-
0x1820: 68,
|
496 |
-
0x1821: 68,
|
497 |
-
0x1822: 68,
|
498 |
-
0x1823: 68,
|
499 |
-
0x1824: 68,
|
500 |
-
0x1825: 68,
|
501 |
-
0x1826: 68,
|
502 |
-
0x1827: 68,
|
503 |
-
0x1828: 68,
|
504 |
-
0x1829: 68,
|
505 |
-
0x182a: 68,
|
506 |
-
0x182b: 68,
|
507 |
-
0x182c: 68,
|
508 |
-
0x182d: 68,
|
509 |
-
0x182e: 68,
|
510 |
-
0x182f: 68,
|
511 |
-
0x1830: 68,
|
512 |
-
0x1831: 68,
|
513 |
-
0x1832: 68,
|
514 |
-
0x1833: 68,
|
515 |
-
0x1834: 68,
|
516 |
-
0x1835: 68,
|
517 |
-
0x1836: 68,
|
518 |
-
0x1837: 68,
|
519 |
-
0x1838: 68,
|
520 |
-
0x1839: 68,
|
521 |
-
0x183a: 68,
|
522 |
-
0x183b: 68,
|
523 |
-
0x183c: 68,
|
524 |
-
0x183d: 68,
|
525 |
-
0x183e: 68,
|
526 |
-
0x183f: 68,
|
527 |
-
0x1840: 68,
|
528 |
-
0x1841: 68,
|
529 |
-
0x1842: 68,
|
530 |
-
0x1843: 68,
|
531 |
-
0x1844: 68,
|
532 |
-
0x1845: 68,
|
533 |
-
0x1846: 68,
|
534 |
-
0x1847: 68,
|
535 |
-
0x1848: 68,
|
536 |
-
0x1849: 68,
|
537 |
-
0x184a: 68,
|
538 |
-
0x184b: 68,
|
539 |
-
0x184c: 68,
|
540 |
-
0x184d: 68,
|
541 |
-
0x184e: 68,
|
542 |
-
0x184f: 68,
|
543 |
-
0x1850: 68,
|
544 |
-
0x1851: 68,
|
545 |
-
0x1852: 68,
|
546 |
-
0x1853: 68,
|
547 |
-
0x1854: 68,
|
548 |
-
0x1855: 68,
|
549 |
-
0x1856: 68,
|
550 |
-
0x1857: 68,
|
551 |
-
0x1858: 68,
|
552 |
-
0x1859: 68,
|
553 |
-
0x185a: 68,
|
554 |
-
0x185b: 68,
|
555 |
-
0x185c: 68,
|
556 |
-
0x185d: 68,
|
557 |
-
0x185e: 68,
|
558 |
-
0x185f: 68,
|
559 |
-
0x1860: 68,
|
560 |
-
0x1861: 68,
|
561 |
-
0x1862: 68,
|
562 |
-
0x1863: 68,
|
563 |
-
0x1864: 68,
|
564 |
-
0x1865: 68,
|
565 |
-
0x1866: 68,
|
566 |
-
0x1867: 68,
|
567 |
-
0x1868: 68,
|
568 |
-
0x1869: 68,
|
569 |
-
0x186a: 68,
|
570 |
-
0x186b: 68,
|
571 |
-
0x186c: 68,
|
572 |
-
0x186d: 68,
|
573 |
-
0x186e: 68,
|
574 |
-
0x186f: 68,
|
575 |
-
0x1870: 68,
|
576 |
-
0x1871: 68,
|
577 |
-
0x1872: 68,
|
578 |
-
0x1873: 68,
|
579 |
-
0x1874: 68,
|
580 |
-
0x1875: 68,
|
581 |
-
0x1876: 68,
|
582 |
-
0x1877: 68,
|
583 |
-
0x1878: 68,
|
584 |
-
0x1880: 85,
|
585 |
-
0x1881: 85,
|
586 |
-
0x1882: 85,
|
587 |
-
0x1883: 85,
|
588 |
-
0x1884: 85,
|
589 |
-
0x1885: 84,
|
590 |
-
0x1886: 84,
|
591 |
-
0x1887: 68,
|
592 |
-
0x1888: 68,
|
593 |
-
0x1889: 68,
|
594 |
-
0x188a: 68,
|
595 |
-
0x188b: 68,
|
596 |
-
0x188c: 68,
|
597 |
-
0x188d: 68,
|
598 |
-
0x188e: 68,
|
599 |
-
0x188f: 68,
|
600 |
-
0x1890: 68,
|
601 |
-
0x1891: 68,
|
602 |
-
0x1892: 68,
|
603 |
-
0x1893: 68,
|
604 |
-
0x1894: 68,
|
605 |
-
0x1895: 68,
|
606 |
-
0x1896: 68,
|
607 |
-
0x1897: 68,
|
608 |
-
0x1898: 68,
|
609 |
-
0x1899: 68,
|
610 |
-
0x189a: 68,
|
611 |
-
0x189b: 68,
|
612 |
-
0x189c: 68,
|
613 |
-
0x189d: 68,
|
614 |
-
0x189e: 68,
|
615 |
-
0x189f: 68,
|
616 |
-
0x18a0: 68,
|
617 |
-
0x18a1: 68,
|
618 |
-
0x18a2: 68,
|
619 |
-
0x18a3: 68,
|
620 |
-
0x18a4: 68,
|
621 |
-
0x18a5: 68,
|
622 |
-
0x18a6: 68,
|
623 |
-
0x18a7: 68,
|
624 |
-
0x18a8: 68,
|
625 |
-
0x18aa: 68,
|
626 |
-
0x200c: 85,
|
627 |
-
0x200d: 67,
|
628 |
-
0x202f: 85,
|
629 |
-
0x2066: 85,
|
630 |
-
0x2067: 85,
|
631 |
-
0x2068: 85,
|
632 |
-
0x2069: 85,
|
633 |
-
0xa840: 68,
|
634 |
-
0xa841: 68,
|
635 |
-
0xa842: 68,
|
636 |
-
0xa843: 68,
|
637 |
-
0xa844: 68,
|
638 |
-
0xa845: 68,
|
639 |
-
0xa846: 68,
|
640 |
-
0xa847: 68,
|
641 |
-
0xa848: 68,
|
642 |
-
0xa849: 68,
|
643 |
-
0xa84a: 68,
|
644 |
-
0xa84b: 68,
|
645 |
-
0xa84c: 68,
|
646 |
-
0xa84d: 68,
|
647 |
-
0xa84e: 68,
|
648 |
-
0xa84f: 68,
|
649 |
-
0xa850: 68,
|
650 |
-
0xa851: 68,
|
651 |
-
0xa852: 68,
|
652 |
-
0xa853: 68,
|
653 |
-
0xa854: 68,
|
654 |
-
0xa855: 68,
|
655 |
-
0xa856: 68,
|
656 |
-
0xa857: 68,
|
657 |
-
0xa858: 68,
|
658 |
-
0xa859: 68,
|
659 |
-
0xa85a: 68,
|
660 |
-
0xa85b: 68,
|
661 |
-
0xa85c: 68,
|
662 |
-
0xa85d: 68,
|
663 |
-
0xa85e: 68,
|
664 |
-
0xa85f: 68,
|
665 |
-
0xa860: 68,
|
666 |
-
0xa861: 68,
|
667 |
-
0xa862: 68,
|
668 |
-
0xa863: 68,
|
669 |
-
0xa864: 68,
|
670 |
-
0xa865: 68,
|
671 |
-
0xa866: 68,
|
672 |
-
0xa867: 68,
|
673 |
-
0xa868: 68,
|
674 |
-
0xa869: 68,
|
675 |
-
0xa86a: 68,
|
676 |
-
0xa86b: 68,
|
677 |
-
0xa86c: 68,
|
678 |
-
0xa86d: 68,
|
679 |
-
0xa86e: 68,
|
680 |
-
0xa86f: 68,
|
681 |
-
0xa870: 68,
|
682 |
-
0xa871: 68,
|
683 |
-
0xa872: 76,
|
684 |
-
0xa873: 85,
|
685 |
-
0x10ac0: 68,
|
686 |
-
0x10ac1: 68,
|
687 |
-
0x10ac2: 68,
|
688 |
-
0x10ac3: 68,
|
689 |
-
0x10ac4: 68,
|
690 |
-
0x10ac5: 82,
|
691 |
-
0x10ac6: 85,
|
692 |
-
0x10ac7: 82,
|
693 |
-
0x10ac8: 85,
|
694 |
-
0x10ac9: 82,
|
695 |
-
0x10aca: 82,
|
696 |
-
0x10acb: 85,
|
697 |
-
0x10acc: 85,
|
698 |
-
0x10acd: 76,
|
699 |
-
0x10ace: 82,
|
700 |
-
0x10acf: 82,
|
701 |
-
0x10ad0: 82,
|
702 |
-
0x10ad1: 82,
|
703 |
-
0x10ad2: 82,
|
704 |
-
0x10ad3: 68,
|
705 |
-
0x10ad4: 68,
|
706 |
-
0x10ad5: 68,
|
707 |
-
0x10ad6: 68,
|
708 |
-
0x10ad7: 76,
|
709 |
-
0x10ad8: 68,
|
710 |
-
0x10ad9: 68,
|
711 |
-
0x10ada: 68,
|
712 |
-
0x10adb: 68,
|
713 |
-
0x10adc: 68,
|
714 |
-
0x10add: 82,
|
715 |
-
0x10ade: 68,
|
716 |
-
0x10adf: 68,
|
717 |
-
0x10ae0: 68,
|
718 |
-
0x10ae1: 82,
|
719 |
-
0x10ae2: 85,
|
720 |
-
0x10ae3: 85,
|
721 |
-
0x10ae4: 82,
|
722 |
-
0x10aeb: 68,
|
723 |
-
0x10aec: 68,
|
724 |
-
0x10aed: 68,
|
725 |
-
0x10aee: 68,
|
726 |
-
0x10aef: 82,
|
727 |
-
0x10b80: 68,
|
728 |
-
0x10b81: 82,
|
729 |
-
0x10b82: 68,
|
730 |
-
0x10b83: 82,
|
731 |
-
0x10b84: 82,
|
732 |
-
0x10b85: 82,
|
733 |
-
0x10b86: 68,
|
734 |
-
0x10b87: 68,
|
735 |
-
0x10b88: 68,
|
736 |
-
0x10b89: 82,
|
737 |
-
0x10b8a: 68,
|
738 |
-
0x10b8b: 68,
|
739 |
-
0x10b8c: 82,
|
740 |
-
0x10b8d: 68,
|
741 |
-
0x10b8e: 82,
|
742 |
-
0x10b8f: 82,
|
743 |
-
0x10b90: 68,
|
744 |
-
0x10b91: 82,
|
745 |
-
0x10ba9: 82,
|
746 |
-
0x10baa: 82,
|
747 |
-
0x10bab: 82,
|
748 |
-
0x10bac: 82,
|
749 |
-
0x10bad: 68,
|
750 |
-
0x10bae: 68,
|
751 |
-
0x10baf: 85,
|
752 |
-
0x10d00: 76,
|
753 |
-
0x10d01: 68,
|
754 |
-
0x10d02: 68,
|
755 |
-
0x10d03: 68,
|
756 |
-
0x10d04: 68,
|
757 |
-
0x10d05: 68,
|
758 |
-
0x10d06: 68,
|
759 |
-
0x10d07: 68,
|
760 |
-
0x10d08: 68,
|
761 |
-
0x10d09: 68,
|
762 |
-
0x10d0a: 68,
|
763 |
-
0x10d0b: 68,
|
764 |
-
0x10d0c: 68,
|
765 |
-
0x10d0d: 68,
|
766 |
-
0x10d0e: 68,
|
767 |
-
0x10d0f: 68,
|
768 |
-
0x10d10: 68,
|
769 |
-
0x10d11: 68,
|
770 |
-
0x10d12: 68,
|
771 |
-
0x10d13: 68,
|
772 |
-
0x10d14: 68,
|
773 |
-
0x10d15: 68,
|
774 |
-
0x10d16: 68,
|
775 |
-
0x10d17: 68,
|
776 |
-
0x10d18: 68,
|
777 |
-
0x10d19: 68,
|
778 |
-
0x10d1a: 68,
|
779 |
-
0x10d1b: 68,
|
780 |
-
0x10d1c: 68,
|
781 |
-
0x10d1d: 68,
|
782 |
-
0x10d1e: 68,
|
783 |
-
0x10d1f: 68,
|
784 |
-
0x10d20: 68,
|
785 |
-
0x10d21: 68,
|
786 |
-
0x10d22: 82,
|
787 |
-
0x10d23: 68,
|
788 |
-
0x10f30: 68,
|
789 |
-
0x10f31: 68,
|
790 |
-
0x10f32: 68,
|
791 |
-
0x10f33: 82,
|
792 |
-
0x10f34: 68,
|
793 |
-
0x10f35: 68,
|
794 |
-
0x10f36: 68,
|
795 |
-
0x10f37: 68,
|
796 |
-
0x10f38: 68,
|
797 |
-
0x10f39: 68,
|
798 |
-
0x10f3a: 68,
|
799 |
-
0x10f3b: 68,
|
800 |
-
0x10f3c: 68,
|
801 |
-
0x10f3d: 68,
|
802 |
-
0x10f3e: 68,
|
803 |
-
0x10f3f: 68,
|
804 |
-
0x10f40: 68,
|
805 |
-
0x10f41: 68,
|
806 |
-
0x10f42: 68,
|
807 |
-
0x10f43: 68,
|
808 |
-
0x10f44: 68,
|
809 |
-
0x10f45: 85,
|
810 |
-
0x10f51: 68,
|
811 |
-
0x10f52: 68,
|
812 |
-
0x10f53: 68,
|
813 |
-
0x10f54: 82,
|
814 |
-
0x10f70: 68,
|
815 |
-
0x10f71: 68,
|
816 |
-
0x10f72: 68,
|
817 |
-
0x10f73: 68,
|
818 |
-
0x10f74: 82,
|
819 |
-
0x10f75: 82,
|
820 |
-
0x10f76: 68,
|
821 |
-
0x10f77: 68,
|
822 |
-
0x10f78: 68,
|
823 |
-
0x10f79: 68,
|
824 |
-
0x10f7a: 68,
|
825 |
-
0x10f7b: 68,
|
826 |
-
0x10f7c: 68,
|
827 |
-
0x10f7d: 68,
|
828 |
-
0x10f7e: 68,
|
829 |
-
0x10f7f: 68,
|
830 |
-
0x10f80: 68,
|
831 |
-
0x10f81: 68,
|
832 |
-
0x10fb0: 68,
|
833 |
-
0x10fb1: 85,
|
834 |
-
0x10fb2: 68,
|
835 |
-
0x10fb3: 68,
|
836 |
-
0x10fb4: 82,
|
837 |
-
0x10fb5: 82,
|
838 |
-
0x10fb6: 82,
|
839 |
-
0x10fb7: 85,
|
840 |
-
0x10fb8: 68,
|
841 |
-
0x10fb9: 82,
|
842 |
-
0x10fba: 82,
|
843 |
-
0x10fbb: 68,
|
844 |
-
0x10fbc: 68,
|
845 |
-
0x10fbd: 82,
|
846 |
-
0x10fbe: 68,
|
847 |
-
0x10fbf: 68,
|
848 |
-
0x10fc0: 85,
|
849 |
-
0x10fc1: 68,
|
850 |
-
0x10fc2: 82,
|
851 |
-
0x10fc3: 82,
|
852 |
-
0x10fc4: 68,
|
853 |
-
0x10fc5: 85,
|
854 |
-
0x10fc6: 85,
|
855 |
-
0x10fc7: 85,
|
856 |
-
0x10fc8: 85,
|
857 |
-
0x10fc9: 82,
|
858 |
-
0x10fca: 68,
|
859 |
-
0x10fcb: 76,
|
860 |
-
0x110bd: 85,
|
861 |
-
0x110cd: 85,
|
862 |
-
0x1e900: 68,
|
863 |
-
0x1e901: 68,
|
864 |
-
0x1e902: 68,
|
865 |
-
0x1e903: 68,
|
866 |
-
0x1e904: 68,
|
867 |
-
0x1e905: 68,
|
868 |
-
0x1e906: 68,
|
869 |
-
0x1e907: 68,
|
870 |
-
0x1e908: 68,
|
871 |
-
0x1e909: 68,
|
872 |
-
0x1e90a: 68,
|
873 |
-
0x1e90b: 68,
|
874 |
-
0x1e90c: 68,
|
875 |
-
0x1e90d: 68,
|
876 |
-
0x1e90e: 68,
|
877 |
-
0x1e90f: 68,
|
878 |
-
0x1e910: 68,
|
879 |
-
0x1e911: 68,
|
880 |
-
0x1e912: 68,
|
881 |
-
0x1e913: 68,
|
882 |
-
0x1e914: 68,
|
883 |
-
0x1e915: 68,
|
884 |
-
0x1e916: 68,
|
885 |
-
0x1e917: 68,
|
886 |
-
0x1e918: 68,
|
887 |
-
0x1e919: 68,
|
888 |
-
0x1e91a: 68,
|
889 |
-
0x1e91b: 68,
|
890 |
-
0x1e91c: 68,
|
891 |
-
0x1e91d: 68,
|
892 |
-
0x1e91e: 68,
|
893 |
-
0x1e91f: 68,
|
894 |
-
0x1e920: 68,
|
895 |
-
0x1e921: 68,
|
896 |
-
0x1e922: 68,
|
897 |
-
0x1e923: 68,
|
898 |
-
0x1e924: 68,
|
899 |
-
0x1e925: 68,
|
900 |
-
0x1e926: 68,
|
901 |
-
0x1e927: 68,
|
902 |
-
0x1e928: 68,
|
903 |
-
0x1e929: 68,
|
904 |
-
0x1e92a: 68,
|
905 |
-
0x1e92b: 68,
|
906 |
-
0x1e92c: 68,
|
907 |
-
0x1e92d: 68,
|
908 |
-
0x1e92e: 68,
|
909 |
-
0x1e92f: 68,
|
910 |
-
0x1e930: 68,
|
911 |
-
0x1e931: 68,
|
912 |
-
0x1e932: 68,
|
913 |
-
0x1e933: 68,
|
914 |
-
0x1e934: 68,
|
915 |
-
0x1e935: 68,
|
916 |
-
0x1e936: 68,
|
917 |
-
0x1e937: 68,
|
918 |
-
0x1e938: 68,
|
919 |
-
0x1e939: 68,
|
920 |
-
0x1e93a: 68,
|
921 |
-
0x1e93b: 68,
|
922 |
-
0x1e93c: 68,
|
923 |
-
0x1e93d: 68,
|
924 |
-
0x1e93e: 68,
|
925 |
-
0x1e93f: 68,
|
926 |
-
0x1e940: 68,
|
927 |
-
0x1e941: 68,
|
928 |
-
0x1e942: 68,
|
929 |
-
0x1e943: 68,
|
930 |
-
0x1e94b: 84,
|
931 |
-
}
|
932 |
-
codepoint_classes = {
|
933 |
-
'PVALID': (
|
934 |
-
0x2d0000002e,
|
935 |
-
0x300000003a,
|
936 |
-
0x610000007b,
|
937 |
-
0xdf000000f7,
|
938 |
-
0xf800000100,
|
939 |
-
0x10100000102,
|
940 |
-
0x10300000104,
|
941 |
-
0x10500000106,
|
942 |
-
0x10700000108,
|
943 |
-
0x1090000010a,
|
944 |
-
0x10b0000010c,
|
945 |
-
0x10d0000010e,
|
946 |
-
0x10f00000110,
|
947 |
-
0x11100000112,
|
948 |
-
0x11300000114,
|
949 |
-
0x11500000116,
|
950 |
-
0x11700000118,
|
951 |
-
0x1190000011a,
|
952 |
-
0x11b0000011c,
|
953 |
-
0x11d0000011e,
|
954 |
-
0x11f00000120,
|
955 |
-
0x12100000122,
|
956 |
-
0x12300000124,
|
957 |
-
0x12500000126,
|
958 |
-
0x12700000128,
|
959 |
-
0x1290000012a,
|
960 |
-
0x12b0000012c,
|
961 |
-
0x12d0000012e,
|
962 |
-
0x12f00000130,
|
963 |
-
0x13100000132,
|
964 |
-
0x13500000136,
|
965 |
-
0x13700000139,
|
966 |
-
0x13a0000013b,
|
967 |
-
0x13c0000013d,
|
968 |
-
0x13e0000013f,
|
969 |
-
0x14200000143,
|
970 |
-
0x14400000145,
|
971 |
-
0x14600000147,
|
972 |
-
0x14800000149,
|
973 |
-
0x14b0000014c,
|
974 |
-
0x14d0000014e,
|
975 |
-
0x14f00000150,
|
976 |
-
0x15100000152,
|
977 |
-
0x15300000154,
|
978 |
-
0x15500000156,
|
979 |
-
0x15700000158,
|
980 |
-
0x1590000015a,
|
981 |
-
0x15b0000015c,
|
982 |
-
0x15d0000015e,
|
983 |
-
0x15f00000160,
|
984 |
-
0x16100000162,
|
985 |
-
0x16300000164,
|
986 |
-
0x16500000166,
|
987 |
-
0x16700000168,
|
988 |
-
0x1690000016a,
|
989 |
-
0x16b0000016c,
|
990 |
-
0x16d0000016e,
|
991 |
-
0x16f00000170,
|
992 |
-
0x17100000172,
|
993 |
-
0x17300000174,
|
994 |
-
0x17500000176,
|
995 |
-
0x17700000178,
|
996 |
-
0x17a0000017b,
|
997 |
-
0x17c0000017d,
|
998 |
-
0x17e0000017f,
|
999 |
-
0x18000000181,
|
1000 |
-
0x18300000184,
|
1001 |
-
0x18500000186,
|
1002 |
-
0x18800000189,
|
1003 |
-
0x18c0000018e,
|
1004 |
-
0x19200000193,
|
1005 |
-
0x19500000196,
|
1006 |
-
0x1990000019c,
|
1007 |
-
0x19e0000019f,
|
1008 |
-
0x1a1000001a2,
|
1009 |
-
0x1a3000001a4,
|
1010 |
-
0x1a5000001a6,
|
1011 |
-
0x1a8000001a9,
|
1012 |
-
0x1aa000001ac,
|
1013 |
-
0x1ad000001ae,
|
1014 |
-
0x1b0000001b1,
|
1015 |
-
0x1b4000001b5,
|
1016 |
-
0x1b6000001b7,
|
1017 |
-
0x1b9000001bc,
|
1018 |
-
0x1bd000001c4,
|
1019 |
-
0x1ce000001cf,
|
1020 |
-
0x1d0000001d1,
|
1021 |
-
0x1d2000001d3,
|
1022 |
-
0x1d4000001d5,
|
1023 |
-
0x1d6000001d7,
|
1024 |
-
0x1d8000001d9,
|
1025 |
-
0x1da000001db,
|
1026 |
-
0x1dc000001de,
|
1027 |
-
0x1df000001e0,
|
1028 |
-
0x1e1000001e2,
|
1029 |
-
0x1e3000001e4,
|
1030 |
-
0x1e5000001e6,
|
1031 |
-
0x1e7000001e8,
|
1032 |
-
0x1e9000001ea,
|
1033 |
-
0x1eb000001ec,
|
1034 |
-
0x1ed000001ee,
|
1035 |
-
0x1ef000001f1,
|
1036 |
-
0x1f5000001f6,
|
1037 |
-
0x1f9000001fa,
|
1038 |
-
0x1fb000001fc,
|
1039 |
-
0x1fd000001fe,
|
1040 |
-
0x1ff00000200,
|
1041 |
-
0x20100000202,
|
1042 |
-
0x20300000204,
|
1043 |
-
0x20500000206,
|
1044 |
-
0x20700000208,
|
1045 |
-
0x2090000020a,
|
1046 |
-
0x20b0000020c,
|
1047 |
-
0x20d0000020e,
|
1048 |
-
0x20f00000210,
|
1049 |
-
0x21100000212,
|
1050 |
-
0x21300000214,
|
1051 |
-
0x21500000216,
|
1052 |
-
0x21700000218,
|
1053 |
-
0x2190000021a,
|
1054 |
-
0x21b0000021c,
|
1055 |
-
0x21d0000021e,
|
1056 |
-
0x21f00000220,
|
1057 |
-
0x22100000222,
|
1058 |
-
0x22300000224,
|
1059 |
-
0x22500000226,
|
1060 |
-
0x22700000228,
|
1061 |
-
0x2290000022a,
|
1062 |
-
0x22b0000022c,
|
1063 |
-
0x22d0000022e,
|
1064 |
-
0x22f00000230,
|
1065 |
-
0x23100000232,
|
1066 |
-
0x2330000023a,
|
1067 |
-
0x23c0000023d,
|
1068 |
-
0x23f00000241,
|
1069 |
-
0x24200000243,
|
1070 |
-
0x24700000248,
|
1071 |
-
0x2490000024a,
|
1072 |
-
0x24b0000024c,
|
1073 |
-
0x24d0000024e,
|
1074 |
-
0x24f000002b0,
|
1075 |
-
0x2b9000002c2,
|
1076 |
-
0x2c6000002d2,
|
1077 |
-
0x2ec000002ed,
|
1078 |
-
0x2ee000002ef,
|
1079 |
-
0x30000000340,
|
1080 |
-
0x34200000343,
|
1081 |
-
0x3460000034f,
|
1082 |
-
0x35000000370,
|
1083 |
-
0x37100000372,
|
1084 |
-
0x37300000374,
|
1085 |
-
0x37700000378,
|
1086 |
-
0x37b0000037e,
|
1087 |
-
0x39000000391,
|
1088 |
-
0x3ac000003cf,
|
1089 |
-
0x3d7000003d8,
|
1090 |
-
0x3d9000003da,
|
1091 |
-
0x3db000003dc,
|
1092 |
-
0x3dd000003de,
|
1093 |
-
0x3df000003e0,
|
1094 |
-
0x3e1000003e2,
|
1095 |
-
0x3e3000003e4,
|
1096 |
-
0x3e5000003e6,
|
1097 |
-
0x3e7000003e8,
|
1098 |
-
0x3e9000003ea,
|
1099 |
-
0x3eb000003ec,
|
1100 |
-
0x3ed000003ee,
|
1101 |
-
0x3ef000003f0,
|
1102 |
-
0x3f3000003f4,
|
1103 |
-
0x3f8000003f9,
|
1104 |
-
0x3fb000003fd,
|
1105 |
-
0x43000000460,
|
1106 |
-
0x46100000462,
|
1107 |
-
0x46300000464,
|
1108 |
-
0x46500000466,
|
1109 |
-
0x46700000468,
|
1110 |
-
0x4690000046a,
|
1111 |
-
0x46b0000046c,
|
1112 |
-
0x46d0000046e,
|
1113 |
-
0x46f00000470,
|
1114 |
-
0x47100000472,
|
1115 |
-
0x47300000474,
|
1116 |
-
0x47500000476,
|
1117 |
-
0x47700000478,
|
1118 |
-
0x4790000047a,
|
1119 |
-
0x47b0000047c,
|
1120 |
-
0x47d0000047e,
|
1121 |
-
0x47f00000480,
|
1122 |
-
0x48100000482,
|
1123 |
-
0x48300000488,
|
1124 |
-
0x48b0000048c,
|
1125 |
-
0x48d0000048e,
|
1126 |
-
0x48f00000490,
|
1127 |
-
0x49100000492,
|
1128 |
-
0x49300000494,
|
1129 |
-
0x49500000496,
|
1130 |
-
0x49700000498,
|
1131 |
-
0x4990000049a,
|
1132 |
-
0x49b0000049c,
|
1133 |
-
0x49d0000049e,
|
1134 |
-
0x49f000004a0,
|
1135 |
-
0x4a1000004a2,
|
1136 |
-
0x4a3000004a4,
|
1137 |
-
0x4a5000004a6,
|
1138 |
-
0x4a7000004a8,
|
1139 |
-
0x4a9000004aa,
|
1140 |
-
0x4ab000004ac,
|
1141 |
-
0x4ad000004ae,
|
1142 |
-
0x4af000004b0,
|
1143 |
-
0x4b1000004b2,
|
1144 |
-
0x4b3000004b4,
|
1145 |
-
0x4b5000004b6,
|
1146 |
-
0x4b7000004b8,
|
1147 |
-
0x4b9000004ba,
|
1148 |
-
0x4bb000004bc,
|
1149 |
-
0x4bd000004be,
|
1150 |
-
0x4bf000004c0,
|
1151 |
-
0x4c2000004c3,
|
1152 |
-
0x4c4000004c5,
|
1153 |
-
0x4c6000004c7,
|
1154 |
-
0x4c8000004c9,
|
1155 |
-
0x4ca000004cb,
|
1156 |
-
0x4cc000004cd,
|
1157 |
-
0x4ce000004d0,
|
1158 |
-
0x4d1000004d2,
|
1159 |
-
0x4d3000004d4,
|
1160 |
-
0x4d5000004d6,
|
1161 |
-
0x4d7000004d8,
|
1162 |
-
0x4d9000004da,
|
1163 |
-
0x4db000004dc,
|
1164 |
-
0x4dd000004de,
|
1165 |
-
0x4df000004e0,
|
1166 |
-
0x4e1000004e2,
|
1167 |
-
0x4e3000004e4,
|
1168 |
-
0x4e5000004e6,
|
1169 |
-
0x4e7000004e8,
|
1170 |
-
0x4e9000004ea,
|
1171 |
-
0x4eb000004ec,
|
1172 |
-
0x4ed000004ee,
|
1173 |
-
0x4ef000004f0,
|
1174 |
-
0x4f1000004f2,
|
1175 |
-
0x4f3000004f4,
|
1176 |
-
0x4f5000004f6,
|
1177 |
-
0x4f7000004f8,
|
1178 |
-
0x4f9000004fa,
|
1179 |
-
0x4fb000004fc,
|
1180 |
-
0x4fd000004fe,
|
1181 |
-
0x4ff00000500,
|
1182 |
-
0x50100000502,
|
1183 |
-
0x50300000504,
|
1184 |
-
0x50500000506,
|
1185 |
-
0x50700000508,
|
1186 |
-
0x5090000050a,
|
1187 |
-
0x50b0000050c,
|
1188 |
-
0x50d0000050e,
|
1189 |
-
0x50f00000510,
|
1190 |
-
0x51100000512,
|
1191 |
-
0x51300000514,
|
1192 |
-
0x51500000516,
|
1193 |
-
0x51700000518,
|
1194 |
-
0x5190000051a,
|
1195 |
-
0x51b0000051c,
|
1196 |
-
0x51d0000051e,
|
1197 |
-
0x51f00000520,
|
1198 |
-
0x52100000522,
|
1199 |
-
0x52300000524,
|
1200 |
-
0x52500000526,
|
1201 |
-
0x52700000528,
|
1202 |
-
0x5290000052a,
|
1203 |
-
0x52b0000052c,
|
1204 |
-
0x52d0000052e,
|
1205 |
-
0x52f00000530,
|
1206 |
-
0x5590000055a,
|
1207 |
-
0x56000000587,
|
1208 |
-
0x58800000589,
|
1209 |
-
0x591000005be,
|
1210 |
-
0x5bf000005c0,
|
1211 |
-
0x5c1000005c3,
|
1212 |
-
0x5c4000005c6,
|
1213 |
-
0x5c7000005c8,
|
1214 |
-
0x5d0000005eb,
|
1215 |
-
0x5ef000005f3,
|
1216 |
-
0x6100000061b,
|
1217 |
-
0x62000000640,
|
1218 |
-
0x64100000660,
|
1219 |
-
0x66e00000675,
|
1220 |
-
0x679000006d4,
|
1221 |
-
0x6d5000006dd,
|
1222 |
-
0x6df000006e9,
|
1223 |
-
0x6ea000006f0,
|
1224 |
-
0x6fa00000700,
|
1225 |
-
0x7100000074b,
|
1226 |
-
0x74d000007b2,
|
1227 |
-
0x7c0000007f6,
|
1228 |
-
0x7fd000007fe,
|
1229 |
-
0x8000000082e,
|
1230 |
-
0x8400000085c,
|
1231 |
-
0x8600000086b,
|
1232 |
-
0x87000000888,
|
1233 |
-
0x8890000088f,
|
1234 |
-
0x898000008e2,
|
1235 |
-
0x8e300000958,
|
1236 |
-
0x96000000964,
|
1237 |
-
0x96600000970,
|
1238 |
-
0x97100000984,
|
1239 |
-
0x9850000098d,
|
1240 |
-
0x98f00000991,
|
1241 |
-
0x993000009a9,
|
1242 |
-
0x9aa000009b1,
|
1243 |
-
0x9b2000009b3,
|
1244 |
-
0x9b6000009ba,
|
1245 |
-
0x9bc000009c5,
|
1246 |
-
0x9c7000009c9,
|
1247 |
-
0x9cb000009cf,
|
1248 |
-
0x9d7000009d8,
|
1249 |
-
0x9e0000009e4,
|
1250 |
-
0x9e6000009f2,
|
1251 |
-
0x9fc000009fd,
|
1252 |
-
0x9fe000009ff,
|
1253 |
-
0xa0100000a04,
|
1254 |
-
0xa0500000a0b,
|
1255 |
-
0xa0f00000a11,
|
1256 |
-
0xa1300000a29,
|
1257 |
-
0xa2a00000a31,
|
1258 |
-
0xa3200000a33,
|
1259 |
-
0xa3500000a36,
|
1260 |
-
0xa3800000a3a,
|
1261 |
-
0xa3c00000a3d,
|
1262 |
-
0xa3e00000a43,
|
1263 |
-
0xa4700000a49,
|
1264 |
-
0xa4b00000a4e,
|
1265 |
-
0xa5100000a52,
|
1266 |
-
0xa5c00000a5d,
|
1267 |
-
0xa6600000a76,
|
1268 |
-
0xa8100000a84,
|
1269 |
-
0xa8500000a8e,
|
1270 |
-
0xa8f00000a92,
|
1271 |
-
0xa9300000aa9,
|
1272 |
-
0xaaa00000ab1,
|
1273 |
-
0xab200000ab4,
|
1274 |
-
0xab500000aba,
|
1275 |
-
0xabc00000ac6,
|
1276 |
-
0xac700000aca,
|
1277 |
-
0xacb00000ace,
|
1278 |
-
0xad000000ad1,
|
1279 |
-
0xae000000ae4,
|
1280 |
-
0xae600000af0,
|
1281 |
-
0xaf900000b00,
|
1282 |
-
0xb0100000b04,
|
1283 |
-
0xb0500000b0d,
|
1284 |
-
0xb0f00000b11,
|
1285 |
-
0xb1300000b29,
|
1286 |
-
0xb2a00000b31,
|
1287 |
-
0xb3200000b34,
|
1288 |
-
0xb3500000b3a,
|
1289 |
-
0xb3c00000b45,
|
1290 |
-
0xb4700000b49,
|
1291 |
-
0xb4b00000b4e,
|
1292 |
-
0xb5500000b58,
|
1293 |
-
0xb5f00000b64,
|
1294 |
-
0xb6600000b70,
|
1295 |
-
0xb7100000b72,
|
1296 |
-
0xb8200000b84,
|
1297 |
-
0xb8500000b8b,
|
1298 |
-
0xb8e00000b91,
|
1299 |
-
0xb9200000b96,
|
1300 |
-
0xb9900000b9b,
|
1301 |
-
0xb9c00000b9d,
|
1302 |
-
0xb9e00000ba0,
|
1303 |
-
0xba300000ba5,
|
1304 |
-
0xba800000bab,
|
1305 |
-
0xbae00000bba,
|
1306 |
-
0xbbe00000bc3,
|
1307 |
-
0xbc600000bc9,
|
1308 |
-
0xbca00000bce,
|
1309 |
-
0xbd000000bd1,
|
1310 |
-
0xbd700000bd8,
|
1311 |
-
0xbe600000bf0,
|
1312 |
-
0xc0000000c0d,
|
1313 |
-
0xc0e00000c11,
|
1314 |
-
0xc1200000c29,
|
1315 |
-
0xc2a00000c3a,
|
1316 |
-
0xc3c00000c45,
|
1317 |
-
0xc4600000c49,
|
1318 |
-
0xc4a00000c4e,
|
1319 |
-
0xc5500000c57,
|
1320 |
-
0xc5800000c5b,
|
1321 |
-
0xc5d00000c5e,
|
1322 |
-
0xc6000000c64,
|
1323 |
-
0xc6600000c70,
|
1324 |
-
0xc8000000c84,
|
1325 |
-
0xc8500000c8d,
|
1326 |
-
0xc8e00000c91,
|
1327 |
-
0xc9200000ca9,
|
1328 |
-
0xcaa00000cb4,
|
1329 |
-
0xcb500000cba,
|
1330 |
-
0xcbc00000cc5,
|
1331 |
-
0xcc600000cc9,
|
1332 |
-
0xcca00000cce,
|
1333 |
-
0xcd500000cd7,
|
1334 |
-
0xcdd00000cdf,
|
1335 |
-
0xce000000ce4,
|
1336 |
-
0xce600000cf0,
|
1337 |
-
0xcf100000cf4,
|
1338 |
-
0xd0000000d0d,
|
1339 |
-
0xd0e00000d11,
|
1340 |
-
0xd1200000d45,
|
1341 |
-
0xd4600000d49,
|
1342 |
-
0xd4a00000d4f,
|
1343 |
-
0xd5400000d58,
|
1344 |
-
0xd5f00000d64,
|
1345 |
-
0xd6600000d70,
|
1346 |
-
0xd7a00000d80,
|
1347 |
-
0xd8100000d84,
|
1348 |
-
0xd8500000d97,
|
1349 |
-
0xd9a00000db2,
|
1350 |
-
0xdb300000dbc,
|
1351 |
-
0xdbd00000dbe,
|
1352 |
-
0xdc000000dc7,
|
1353 |
-
0xdca00000dcb,
|
1354 |
-
0xdcf00000dd5,
|
1355 |
-
0xdd600000dd7,
|
1356 |
-
0xdd800000de0,
|
1357 |
-
0xde600000df0,
|
1358 |
-
0xdf200000df4,
|
1359 |
-
0xe0100000e33,
|
1360 |
-
0xe3400000e3b,
|
1361 |
-
0xe4000000e4f,
|
1362 |
-
0xe5000000e5a,
|
1363 |
-
0xe8100000e83,
|
1364 |
-
0xe8400000e85,
|
1365 |
-
0xe8600000e8b,
|
1366 |
-
0xe8c00000ea4,
|
1367 |
-
0xea500000ea6,
|
1368 |
-
0xea700000eb3,
|
1369 |
-
0xeb400000ebe,
|
1370 |
-
0xec000000ec5,
|
1371 |
-
0xec600000ec7,
|
1372 |
-
0xec800000ecf,
|
1373 |
-
0xed000000eda,
|
1374 |
-
0xede00000ee0,
|
1375 |
-
0xf0000000f01,
|
1376 |
-
0xf0b00000f0c,
|
1377 |
-
0xf1800000f1a,
|
1378 |
-
0xf2000000f2a,
|
1379 |
-
0xf3500000f36,
|
1380 |
-
0xf3700000f38,
|
1381 |
-
0xf3900000f3a,
|
1382 |
-
0xf3e00000f43,
|
1383 |
-
0xf4400000f48,
|
1384 |
-
0xf4900000f4d,
|
1385 |
-
0xf4e00000f52,
|
1386 |
-
0xf5300000f57,
|
1387 |
-
0xf5800000f5c,
|
1388 |
-
0xf5d00000f69,
|
1389 |
-
0xf6a00000f6d,
|
1390 |
-
0xf7100000f73,
|
1391 |
-
0xf7400000f75,
|
1392 |
-
0xf7a00000f81,
|
1393 |
-
0xf8200000f85,
|
1394 |
-
0xf8600000f93,
|
1395 |
-
0xf9400000f98,
|
1396 |
-
0xf9900000f9d,
|
1397 |
-
0xf9e00000fa2,
|
1398 |
-
0xfa300000fa7,
|
1399 |
-
0xfa800000fac,
|
1400 |
-
0xfad00000fb9,
|
1401 |
-
0xfba00000fbd,
|
1402 |
-
0xfc600000fc7,
|
1403 |
-
0x10000000104a,
|
1404 |
-
0x10500000109e,
|
1405 |
-
0x10d0000010fb,
|
1406 |
-
0x10fd00001100,
|
1407 |
-
0x120000001249,
|
1408 |
-
0x124a0000124e,
|
1409 |
-
0x125000001257,
|
1410 |
-
0x125800001259,
|
1411 |
-
0x125a0000125e,
|
1412 |
-
0x126000001289,
|
1413 |
-
0x128a0000128e,
|
1414 |
-
0x1290000012b1,
|
1415 |
-
0x12b2000012b6,
|
1416 |
-
0x12b8000012bf,
|
1417 |
-
0x12c0000012c1,
|
1418 |
-
0x12c2000012c6,
|
1419 |
-
0x12c8000012d7,
|
1420 |
-
0x12d800001311,
|
1421 |
-
0x131200001316,
|
1422 |
-
0x13180000135b,
|
1423 |
-
0x135d00001360,
|
1424 |
-
0x138000001390,
|
1425 |
-
0x13a0000013f6,
|
1426 |
-
0x14010000166d,
|
1427 |
-
0x166f00001680,
|
1428 |
-
0x16810000169b,
|
1429 |
-
0x16a0000016eb,
|
1430 |
-
0x16f1000016f9,
|
1431 |
-
0x170000001716,
|
1432 |
-
0x171f00001735,
|
1433 |
-
0x174000001754,
|
1434 |
-
0x17600000176d,
|
1435 |
-
0x176e00001771,
|
1436 |
-
0x177200001774,
|
1437 |
-
0x1780000017b4,
|
1438 |
-
0x17b6000017d4,
|
1439 |
-
0x17d7000017d8,
|
1440 |
-
0x17dc000017de,
|
1441 |
-
0x17e0000017ea,
|
1442 |
-
0x18100000181a,
|
1443 |
-
0x182000001879,
|
1444 |
-
0x1880000018ab,
|
1445 |
-
0x18b0000018f6,
|
1446 |
-
0x19000000191f,
|
1447 |
-
0x19200000192c,
|
1448 |
-
0x19300000193c,
|
1449 |
-
0x19460000196e,
|
1450 |
-
0x197000001975,
|
1451 |
-
0x1980000019ac,
|
1452 |
-
0x19b0000019ca,
|
1453 |
-
0x19d0000019da,
|
1454 |
-
0x1a0000001a1c,
|
1455 |
-
0x1a2000001a5f,
|
1456 |
-
0x1a6000001a7d,
|
1457 |
-
0x1a7f00001a8a,
|
1458 |
-
0x1a9000001a9a,
|
1459 |
-
0x1aa700001aa8,
|
1460 |
-
0x1ab000001abe,
|
1461 |
-
0x1abf00001acf,
|
1462 |
-
0x1b0000001b4d,
|
1463 |
-
0x1b5000001b5a,
|
1464 |
-
0x1b6b00001b74,
|
1465 |
-
0x1b8000001bf4,
|
1466 |
-
0x1c0000001c38,
|
1467 |
-
0x1c4000001c4a,
|
1468 |
-
0x1c4d00001c7e,
|
1469 |
-
0x1cd000001cd3,
|
1470 |
-
0x1cd400001cfb,
|
1471 |
-
0x1d0000001d2c,
|
1472 |
-
0x1d2f00001d30,
|
1473 |
-
0x1d3b00001d3c,
|
1474 |
-
0x1d4e00001d4f,
|
1475 |
-
0x1d6b00001d78,
|
1476 |
-
0x1d7900001d9b,
|
1477 |
-
0x1dc000001e00,
|
1478 |
-
0x1e0100001e02,
|
1479 |
-
0x1e0300001e04,
|
1480 |
-
0x1e0500001e06,
|
1481 |
-
0x1e0700001e08,
|
1482 |
-
0x1e0900001e0a,
|
1483 |
-
0x1e0b00001e0c,
|
1484 |
-
0x1e0d00001e0e,
|
1485 |
-
0x1e0f00001e10,
|
1486 |
-
0x1e1100001e12,
|
1487 |
-
0x1e1300001e14,
|
1488 |
-
0x1e1500001e16,
|
1489 |
-
0x1e1700001e18,
|
1490 |
-
0x1e1900001e1a,
|
1491 |
-
0x1e1b00001e1c,
|
1492 |
-
0x1e1d00001e1e,
|
1493 |
-
0x1e1f00001e20,
|
1494 |
-
0x1e2100001e22,
|
1495 |
-
0x1e2300001e24,
|
1496 |
-
0x1e2500001e26,
|
1497 |
-
0x1e2700001e28,
|
1498 |
-
0x1e2900001e2a,
|
1499 |
-
0x1e2b00001e2c,
|
1500 |
-
0x1e2d00001e2e,
|
1501 |
-
0x1e2f00001e30,
|
1502 |
-
0x1e3100001e32,
|
1503 |
-
0x1e3300001e34,
|
1504 |
-
0x1e3500001e36,
|
1505 |
-
0x1e3700001e38,
|
1506 |
-
0x1e3900001e3a,
|
1507 |
-
0x1e3b00001e3c,
|
1508 |
-
0x1e3d00001e3e,
|
1509 |
-
0x1e3f00001e40,
|
1510 |
-
0x1e4100001e42,
|
1511 |
-
0x1e4300001e44,
|
1512 |
-
0x1e4500001e46,
|
1513 |
-
0x1e4700001e48,
|
1514 |
-
0x1e4900001e4a,
|
1515 |
-
0x1e4b00001e4c,
|
1516 |
-
0x1e4d00001e4e,
|
1517 |
-
0x1e4f00001e50,
|
1518 |
-
0x1e5100001e52,
|
1519 |
-
0x1e5300001e54,
|
1520 |
-
0x1e5500001e56,
|
1521 |
-
0x1e5700001e58,
|
1522 |
-
0x1e5900001e5a,
|
1523 |
-
0x1e5b00001e5c,
|
1524 |
-
0x1e5d00001e5e,
|
1525 |
-
0x1e5f00001e60,
|
1526 |
-
0x1e6100001e62,
|
1527 |
-
0x1e6300001e64,
|
1528 |
-
0x1e6500001e66,
|
1529 |
-
0x1e6700001e68,
|
1530 |
-
0x1e6900001e6a,
|
1531 |
-
0x1e6b00001e6c,
|
1532 |
-
0x1e6d00001e6e,
|
1533 |
-
0x1e6f00001e70,
|
1534 |
-
0x1e7100001e72,
|
1535 |
-
0x1e7300001e74,
|
1536 |
-
0x1e7500001e76,
|
1537 |
-
0x1e7700001e78,
|
1538 |
-
0x1e7900001e7a,
|
1539 |
-
0x1e7b00001e7c,
|
1540 |
-
0x1e7d00001e7e,
|
1541 |
-
0x1e7f00001e80,
|
1542 |
-
0x1e8100001e82,
|
1543 |
-
0x1e8300001e84,
|
1544 |
-
0x1e8500001e86,
|
1545 |
-
0x1e8700001e88,
|
1546 |
-
0x1e8900001e8a,
|
1547 |
-
0x1e8b00001e8c,
|
1548 |
-
0x1e8d00001e8e,
|
1549 |
-
0x1e8f00001e90,
|
1550 |
-
0x1e9100001e92,
|
1551 |
-
0x1e9300001e94,
|
1552 |
-
0x1e9500001e9a,
|
1553 |
-
0x1e9c00001e9e,
|
1554 |
-
0x1e9f00001ea0,
|
1555 |
-
0x1ea100001ea2,
|
1556 |
-
0x1ea300001ea4,
|
1557 |
-
0x1ea500001ea6,
|
1558 |
-
0x1ea700001ea8,
|
1559 |
-
0x1ea900001eaa,
|
1560 |
-
0x1eab00001eac,
|
1561 |
-
0x1ead00001eae,
|
1562 |
-
0x1eaf00001eb0,
|
1563 |
-
0x1eb100001eb2,
|
1564 |
-
0x1eb300001eb4,
|
1565 |
-
0x1eb500001eb6,
|
1566 |
-
0x1eb700001eb8,
|
1567 |
-
0x1eb900001eba,
|
1568 |
-
0x1ebb00001ebc,
|
1569 |
-
0x1ebd00001ebe,
|
1570 |
-
0x1ebf00001ec0,
|
1571 |
-
0x1ec100001ec2,
|
1572 |
-
0x1ec300001ec4,
|
1573 |
-
0x1ec500001ec6,
|
1574 |
-
0x1ec700001ec8,
|
1575 |
-
0x1ec900001eca,
|
1576 |
-
0x1ecb00001ecc,
|
1577 |
-
0x1ecd00001ece,
|
1578 |
-
0x1ecf00001ed0,
|
1579 |
-
0x1ed100001ed2,
|
1580 |
-
0x1ed300001ed4,
|
1581 |
-
0x1ed500001ed6,
|
1582 |
-
0x1ed700001ed8,
|
1583 |
-
0x1ed900001eda,
|
1584 |
-
0x1edb00001edc,
|
1585 |
-
0x1edd00001ede,
|
1586 |
-
0x1edf00001ee0,
|
1587 |
-
0x1ee100001ee2,
|
1588 |
-
0x1ee300001ee4,
|
1589 |
-
0x1ee500001ee6,
|
1590 |
-
0x1ee700001ee8,
|
1591 |
-
0x1ee900001eea,
|
1592 |
-
0x1eeb00001eec,
|
1593 |
-
0x1eed00001eee,
|
1594 |
-
0x1eef00001ef0,
|
1595 |
-
0x1ef100001ef2,
|
1596 |
-
0x1ef300001ef4,
|
1597 |
-
0x1ef500001ef6,
|
1598 |
-
0x1ef700001ef8,
|
1599 |
-
0x1ef900001efa,
|
1600 |
-
0x1efb00001efc,
|
1601 |
-
0x1efd00001efe,
|
1602 |
-
0x1eff00001f08,
|
1603 |
-
0x1f1000001f16,
|
1604 |
-
0x1f2000001f28,
|
1605 |
-
0x1f3000001f38,
|
1606 |
-
0x1f4000001f46,
|
1607 |
-
0x1f5000001f58,
|
1608 |
-
0x1f6000001f68,
|
1609 |
-
0x1f7000001f71,
|
1610 |
-
0x1f7200001f73,
|
1611 |
-
0x1f7400001f75,
|
1612 |
-
0x1f7600001f77,
|
1613 |
-
0x1f7800001f79,
|
1614 |
-
0x1f7a00001f7b,
|
1615 |
-
0x1f7c00001f7d,
|
1616 |
-
0x1fb000001fb2,
|
1617 |
-
0x1fb600001fb7,
|
1618 |
-
0x1fc600001fc7,
|
1619 |
-
0x1fd000001fd3,
|
1620 |
-
0x1fd600001fd8,
|
1621 |
-
0x1fe000001fe3,
|
1622 |
-
0x1fe400001fe8,
|
1623 |
-
0x1ff600001ff7,
|
1624 |
-
0x214e0000214f,
|
1625 |
-
0x218400002185,
|
1626 |
-
0x2c3000002c60,
|
1627 |
-
0x2c6100002c62,
|
1628 |
-
0x2c6500002c67,
|
1629 |
-
0x2c6800002c69,
|
1630 |
-
0x2c6a00002c6b,
|
1631 |
-
0x2c6c00002c6d,
|
1632 |
-
0x2c7100002c72,
|
1633 |
-
0x2c7300002c75,
|
1634 |
-
0x2c7600002c7c,
|
1635 |
-
0x2c8100002c82,
|
1636 |
-
0x2c8300002c84,
|
1637 |
-
0x2c8500002c86,
|
1638 |
-
0x2c8700002c88,
|
1639 |
-
0x2c8900002c8a,
|
1640 |
-
0x2c8b00002c8c,
|
1641 |
-
0x2c8d00002c8e,
|
1642 |
-
0x2c8f00002c90,
|
1643 |
-
0x2c9100002c92,
|
1644 |
-
0x2c9300002c94,
|
1645 |
-
0x2c9500002c96,
|
1646 |
-
0x2c9700002c98,
|
1647 |
-
0x2c9900002c9a,
|
1648 |
-
0x2c9b00002c9c,
|
1649 |
-
0x2c9d00002c9e,
|
1650 |
-
0x2c9f00002ca0,
|
1651 |
-
0x2ca100002ca2,
|
1652 |
-
0x2ca300002ca4,
|
1653 |
-
0x2ca500002ca6,
|
1654 |
-
0x2ca700002ca8,
|
1655 |
-
0x2ca900002caa,
|
1656 |
-
0x2cab00002cac,
|
1657 |
-
0x2cad00002cae,
|
1658 |
-
0x2caf00002cb0,
|
1659 |
-
0x2cb100002cb2,
|
1660 |
-
0x2cb300002cb4,
|
1661 |
-
0x2cb500002cb6,
|
1662 |
-
0x2cb700002cb8,
|
1663 |
-
0x2cb900002cba,
|
1664 |
-
0x2cbb00002cbc,
|
1665 |
-
0x2cbd00002cbe,
|
1666 |
-
0x2cbf00002cc0,
|
1667 |
-
0x2cc100002cc2,
|
1668 |
-
0x2cc300002cc4,
|
1669 |
-
0x2cc500002cc6,
|
1670 |
-
0x2cc700002cc8,
|
1671 |
-
0x2cc900002cca,
|
1672 |
-
0x2ccb00002ccc,
|
1673 |
-
0x2ccd00002cce,
|
1674 |
-
0x2ccf00002cd0,
|
1675 |
-
0x2cd100002cd2,
|
1676 |
-
0x2cd300002cd4,
|
1677 |
-
0x2cd500002cd6,
|
1678 |
-
0x2cd700002cd8,
|
1679 |
-
0x2cd900002cda,
|
1680 |
-
0x2cdb00002cdc,
|
1681 |
-
0x2cdd00002cde,
|
1682 |
-
0x2cdf00002ce0,
|
1683 |
-
0x2ce100002ce2,
|
1684 |
-
0x2ce300002ce5,
|
1685 |
-
0x2cec00002ced,
|
1686 |
-
0x2cee00002cf2,
|
1687 |
-
0x2cf300002cf4,
|
1688 |
-
0x2d0000002d26,
|
1689 |
-
0x2d2700002d28,
|
1690 |
-
0x2d2d00002d2e,
|
1691 |
-
0x2d3000002d68,
|
1692 |
-
0x2d7f00002d97,
|
1693 |
-
0x2da000002da7,
|
1694 |
-
0x2da800002daf,
|
1695 |
-
0x2db000002db7,
|
1696 |
-
0x2db800002dbf,
|
1697 |
-
0x2dc000002dc7,
|
1698 |
-
0x2dc800002dcf,
|
1699 |
-
0x2dd000002dd7,
|
1700 |
-
0x2dd800002ddf,
|
1701 |
-
0x2de000002e00,
|
1702 |
-
0x2e2f00002e30,
|
1703 |
-
0x300500003008,
|
1704 |
-
0x302a0000302e,
|
1705 |
-
0x303c0000303d,
|
1706 |
-
0x304100003097,
|
1707 |
-
0x30990000309b,
|
1708 |
-
0x309d0000309f,
|
1709 |
-
0x30a1000030fb,
|
1710 |
-
0x30fc000030ff,
|
1711 |
-
0x310500003130,
|
1712 |
-
0x31a0000031c0,
|
1713 |
-
0x31f000003200,
|
1714 |
-
0x340000004dc0,
|
1715 |
-
0x4e000000a48d,
|
1716 |
-
0xa4d00000a4fe,
|
1717 |
-
0xa5000000a60d,
|
1718 |
-
0xa6100000a62c,
|
1719 |
-
0xa6410000a642,
|
1720 |
-
0xa6430000a644,
|
1721 |
-
0xa6450000a646,
|
1722 |
-
0xa6470000a648,
|
1723 |
-
0xa6490000a64a,
|
1724 |
-
0xa64b0000a64c,
|
1725 |
-
0xa64d0000a64e,
|
1726 |
-
0xa64f0000a650,
|
1727 |
-
0xa6510000a652,
|
1728 |
-
0xa6530000a654,
|
1729 |
-
0xa6550000a656,
|
1730 |
-
0xa6570000a658,
|
1731 |
-
0xa6590000a65a,
|
1732 |
-
0xa65b0000a65c,
|
1733 |
-
0xa65d0000a65e,
|
1734 |
-
0xa65f0000a660,
|
1735 |
-
0xa6610000a662,
|
1736 |
-
0xa6630000a664,
|
1737 |
-
0xa6650000a666,
|
1738 |
-
0xa6670000a668,
|
1739 |
-
0xa6690000a66a,
|
1740 |
-
0xa66b0000a66c,
|
1741 |
-
0xa66d0000a670,
|
1742 |
-
0xa6740000a67e,
|
1743 |
-
0xa67f0000a680,
|
1744 |
-
0xa6810000a682,
|
1745 |
-
0xa6830000a684,
|
1746 |
-
0xa6850000a686,
|
1747 |
-
0xa6870000a688,
|
1748 |
-
0xa6890000a68a,
|
1749 |
-
0xa68b0000a68c,
|
1750 |
-
0xa68d0000a68e,
|
1751 |
-
0xa68f0000a690,
|
1752 |
-
0xa6910000a692,
|
1753 |
-
0xa6930000a694,
|
1754 |
-
0xa6950000a696,
|
1755 |
-
0xa6970000a698,
|
1756 |
-
0xa6990000a69a,
|
1757 |
-
0xa69b0000a69c,
|
1758 |
-
0xa69e0000a6e6,
|
1759 |
-
0xa6f00000a6f2,
|
1760 |
-
0xa7170000a720,
|
1761 |
-
0xa7230000a724,
|
1762 |
-
0xa7250000a726,
|
1763 |
-
0xa7270000a728,
|
1764 |
-
0xa7290000a72a,
|
1765 |
-
0xa72b0000a72c,
|
1766 |
-
0xa72d0000a72e,
|
1767 |
-
0xa72f0000a732,
|
1768 |
-
0xa7330000a734,
|
1769 |
-
0xa7350000a736,
|
1770 |
-
0xa7370000a738,
|
1771 |
-
0xa7390000a73a,
|
1772 |
-
0xa73b0000a73c,
|
1773 |
-
0xa73d0000a73e,
|
1774 |
-
0xa73f0000a740,
|
1775 |
-
0xa7410000a742,
|
1776 |
-
0xa7430000a744,
|
1777 |
-
0xa7450000a746,
|
1778 |
-
0xa7470000a748,
|
1779 |
-
0xa7490000a74a,
|
1780 |
-
0xa74b0000a74c,
|
1781 |
-
0xa74d0000a74e,
|
1782 |
-
0xa74f0000a750,
|
1783 |
-
0xa7510000a752,
|
1784 |
-
0xa7530000a754,
|
1785 |
-
0xa7550000a756,
|
1786 |
-
0xa7570000a758,
|
1787 |
-
0xa7590000a75a,
|
1788 |
-
0xa75b0000a75c,
|
1789 |
-
0xa75d0000a75e,
|
1790 |
-
0xa75f0000a760,
|
1791 |
-
0xa7610000a762,
|
1792 |
-
0xa7630000a764,
|
1793 |
-
0xa7650000a766,
|
1794 |
-
0xa7670000a768,
|
1795 |
-
0xa7690000a76a,
|
1796 |
-
0xa76b0000a76c,
|
1797 |
-
0xa76d0000a76e,
|
1798 |
-
0xa76f0000a770,
|
1799 |
-
0xa7710000a779,
|
1800 |
-
0xa77a0000a77b,
|
1801 |
-
0xa77c0000a77d,
|
1802 |
-
0xa77f0000a780,
|
1803 |
-
0xa7810000a782,
|
1804 |
-
0xa7830000a784,
|
1805 |
-
0xa7850000a786,
|
1806 |
-
0xa7870000a789,
|
1807 |
-
0xa78c0000a78d,
|
1808 |
-
0xa78e0000a790,
|
1809 |
-
0xa7910000a792,
|
1810 |
-
0xa7930000a796,
|
1811 |
-
0xa7970000a798,
|
1812 |
-
0xa7990000a79a,
|
1813 |
-
0xa79b0000a79c,
|
1814 |
-
0xa79d0000a79e,
|
1815 |
-
0xa79f0000a7a0,
|
1816 |
-
0xa7a10000a7a2,
|
1817 |
-
0xa7a30000a7a4,
|
1818 |
-
0xa7a50000a7a6,
|
1819 |
-
0xa7a70000a7a8,
|
1820 |
-
0xa7a90000a7aa,
|
1821 |
-
0xa7af0000a7b0,
|
1822 |
-
0xa7b50000a7b6,
|
1823 |
-
0xa7b70000a7b8,
|
1824 |
-
0xa7b90000a7ba,
|
1825 |
-
0xa7bb0000a7bc,
|
1826 |
-
0xa7bd0000a7be,
|
1827 |
-
0xa7bf0000a7c0,
|
1828 |
-
0xa7c10000a7c2,
|
1829 |
-
0xa7c30000a7c4,
|
1830 |
-
0xa7c80000a7c9,
|
1831 |
-
0xa7ca0000a7cb,
|
1832 |
-
0xa7d10000a7d2,
|
1833 |
-
0xa7d30000a7d4,
|
1834 |
-
0xa7d50000a7d6,
|
1835 |
-
0xa7d70000a7d8,
|
1836 |
-
0xa7d90000a7da,
|
1837 |
-
0xa7f20000a7f5,
|
1838 |
-
0xa7f60000a7f8,
|
1839 |
-
0xa7fa0000a828,
|
1840 |
-
0xa82c0000a82d,
|
1841 |
-
0xa8400000a874,
|
1842 |
-
0xa8800000a8c6,
|
1843 |
-
0xa8d00000a8da,
|
1844 |
-
0xa8e00000a8f8,
|
1845 |
-
0xa8fb0000a8fc,
|
1846 |
-
0xa8fd0000a92e,
|
1847 |
-
0xa9300000a954,
|
1848 |
-
0xa9800000a9c1,
|
1849 |
-
0xa9cf0000a9da,
|
1850 |
-
0xa9e00000a9ff,
|
1851 |
-
0xaa000000aa37,
|
1852 |
-
0xaa400000aa4e,
|
1853 |
-
0xaa500000aa5a,
|
1854 |
-
0xaa600000aa77,
|
1855 |
-
0xaa7a0000aac3,
|
1856 |
-
0xaadb0000aade,
|
1857 |
-
0xaae00000aaf0,
|
1858 |
-
0xaaf20000aaf7,
|
1859 |
-
0xab010000ab07,
|
1860 |
-
0xab090000ab0f,
|
1861 |
-
0xab110000ab17,
|
1862 |
-
0xab200000ab27,
|
1863 |
-
0xab280000ab2f,
|
1864 |
-
0xab300000ab5b,
|
1865 |
-
0xab600000ab69,
|
1866 |
-
0xabc00000abeb,
|
1867 |
-
0xabec0000abee,
|
1868 |
-
0xabf00000abfa,
|
1869 |
-
0xac000000d7a4,
|
1870 |
-
0xfa0e0000fa10,
|
1871 |
-
0xfa110000fa12,
|
1872 |
-
0xfa130000fa15,
|
1873 |
-
0xfa1f0000fa20,
|
1874 |
-
0xfa210000fa22,
|
1875 |
-
0xfa230000fa25,
|
1876 |
-
0xfa270000fa2a,
|
1877 |
-
0xfb1e0000fb1f,
|
1878 |
-
0xfe200000fe30,
|
1879 |
-
0xfe730000fe74,
|
1880 |
-
0x100000001000c,
|
1881 |
-
0x1000d00010027,
|
1882 |
-
0x100280001003b,
|
1883 |
-
0x1003c0001003e,
|
1884 |
-
0x1003f0001004e,
|
1885 |
-
0x100500001005e,
|
1886 |
-
0x10080000100fb,
|
1887 |
-
0x101fd000101fe,
|
1888 |
-
0x102800001029d,
|
1889 |
-
0x102a0000102d1,
|
1890 |
-
0x102e0000102e1,
|
1891 |
-
0x1030000010320,
|
1892 |
-
0x1032d00010341,
|
1893 |
-
0x103420001034a,
|
1894 |
-
0x103500001037b,
|
1895 |
-
0x103800001039e,
|
1896 |
-
0x103a0000103c4,
|
1897 |
-
0x103c8000103d0,
|
1898 |
-
0x104280001049e,
|
1899 |
-
0x104a0000104aa,
|
1900 |
-
0x104d8000104fc,
|
1901 |
-
0x1050000010528,
|
1902 |
-
0x1053000010564,
|
1903 |
-
0x10597000105a2,
|
1904 |
-
0x105a3000105b2,
|
1905 |
-
0x105b3000105ba,
|
1906 |
-
0x105bb000105bd,
|
1907 |
-
0x1060000010737,
|
1908 |
-
0x1074000010756,
|
1909 |
-
0x1076000010768,
|
1910 |
-
0x1078000010786,
|
1911 |
-
0x10787000107b1,
|
1912 |
-
0x107b2000107bb,
|
1913 |
-
0x1080000010806,
|
1914 |
-
0x1080800010809,
|
1915 |
-
0x1080a00010836,
|
1916 |
-
0x1083700010839,
|
1917 |
-
0x1083c0001083d,
|
1918 |
-
0x1083f00010856,
|
1919 |
-
0x1086000010877,
|
1920 |
-
0x108800001089f,
|
1921 |
-
0x108e0000108f3,
|
1922 |
-
0x108f4000108f6,
|
1923 |
-
0x1090000010916,
|
1924 |
-
0x109200001093a,
|
1925 |
-
0x10980000109b8,
|
1926 |
-
0x109be000109c0,
|
1927 |
-
0x10a0000010a04,
|
1928 |
-
0x10a0500010a07,
|
1929 |
-
0x10a0c00010a14,
|
1930 |
-
0x10a1500010a18,
|
1931 |
-
0x10a1900010a36,
|
1932 |
-
0x10a3800010a3b,
|
1933 |
-
0x10a3f00010a40,
|
1934 |
-
0x10a6000010a7d,
|
1935 |
-
0x10a8000010a9d,
|
1936 |
-
0x10ac000010ac8,
|
1937 |
-
0x10ac900010ae7,
|
1938 |
-
0x10b0000010b36,
|
1939 |
-
0x10b4000010b56,
|
1940 |
-
0x10b6000010b73,
|
1941 |
-
0x10b8000010b92,
|
1942 |
-
0x10c0000010c49,
|
1943 |
-
0x10cc000010cf3,
|
1944 |
-
0x10d0000010d28,
|
1945 |
-
0x10d3000010d3a,
|
1946 |
-
0x10e8000010eaa,
|
1947 |
-
0x10eab00010ead,
|
1948 |
-
0x10eb000010eb2,
|
1949 |
-
0x10efd00010f1d,
|
1950 |
-
0x10f2700010f28,
|
1951 |
-
0x10f3000010f51,
|
1952 |
-
0x10f7000010f86,
|
1953 |
-
0x10fb000010fc5,
|
1954 |
-
0x10fe000010ff7,
|
1955 |
-
0x1100000011047,
|
1956 |
-
0x1106600011076,
|
1957 |
-
0x1107f000110bb,
|
1958 |
-
0x110c2000110c3,
|
1959 |
-
0x110d0000110e9,
|
1960 |
-
0x110f0000110fa,
|
1961 |
-
0x1110000011135,
|
1962 |
-
0x1113600011140,
|
1963 |
-
0x1114400011148,
|
1964 |
-
0x1115000011174,
|
1965 |
-
0x1117600011177,
|
1966 |
-
0x11180000111c5,
|
1967 |
-
0x111c9000111cd,
|
1968 |
-
0x111ce000111db,
|
1969 |
-
0x111dc000111dd,
|
1970 |
-
0x1120000011212,
|
1971 |
-
0x1121300011238,
|
1972 |
-
0x1123e00011242,
|
1973 |
-
0x1128000011287,
|
1974 |
-
0x1128800011289,
|
1975 |
-
0x1128a0001128e,
|
1976 |
-
0x1128f0001129e,
|
1977 |
-
0x1129f000112a9,
|
1978 |
-
0x112b0000112eb,
|
1979 |
-
0x112f0000112fa,
|
1980 |
-
0x1130000011304,
|
1981 |
-
0x113050001130d,
|
1982 |
-
0x1130f00011311,
|
1983 |
-
0x1131300011329,
|
1984 |
-
0x1132a00011331,
|
1985 |
-
0x1133200011334,
|
1986 |
-
0x113350001133a,
|
1987 |
-
0x1133b00011345,
|
1988 |
-
0x1134700011349,
|
1989 |
-
0x1134b0001134e,
|
1990 |
-
0x1135000011351,
|
1991 |
-
0x1135700011358,
|
1992 |
-
0x1135d00011364,
|
1993 |
-
0x113660001136d,
|
1994 |
-
0x1137000011375,
|
1995 |
-
0x114000001144b,
|
1996 |
-
0x114500001145a,
|
1997 |
-
0x1145e00011462,
|
1998 |
-
0x11480000114c6,
|
1999 |
-
0x114c7000114c8,
|
2000 |
-
0x114d0000114da,
|
2001 |
-
0x11580000115b6,
|
2002 |
-
0x115b8000115c1,
|
2003 |
-
0x115d8000115de,
|
2004 |
-
0x1160000011641,
|
2005 |
-
0x1164400011645,
|
2006 |
-
0x116500001165a,
|
2007 |
-
0x11680000116b9,
|
2008 |
-
0x116c0000116ca,
|
2009 |
-
0x117000001171b,
|
2010 |
-
0x1171d0001172c,
|
2011 |
-
0x117300001173a,
|
2012 |
-
0x1174000011747,
|
2013 |
-
0x118000001183b,
|
2014 |
-
0x118c0000118ea,
|
2015 |
-
0x118ff00011907,
|
2016 |
-
0x119090001190a,
|
2017 |
-
0x1190c00011914,
|
2018 |
-
0x1191500011917,
|
2019 |
-
0x1191800011936,
|
2020 |
-
0x1193700011939,
|
2021 |
-
0x1193b00011944,
|
2022 |
-
0x119500001195a,
|
2023 |
-
0x119a0000119a8,
|
2024 |
-
0x119aa000119d8,
|
2025 |
-
0x119da000119e2,
|
2026 |
-
0x119e3000119e5,
|
2027 |
-
0x11a0000011a3f,
|
2028 |
-
0x11a4700011a48,
|
2029 |
-
0x11a5000011a9a,
|
2030 |
-
0x11a9d00011a9e,
|
2031 |
-
0x11ab000011af9,
|
2032 |
-
0x11c0000011c09,
|
2033 |
-
0x11c0a00011c37,
|
2034 |
-
0x11c3800011c41,
|
2035 |
-
0x11c5000011c5a,
|
2036 |
-
0x11c7200011c90,
|
2037 |
-
0x11c9200011ca8,
|
2038 |
-
0x11ca900011cb7,
|
2039 |
-
0x11d0000011d07,
|
2040 |
-
0x11d0800011d0a,
|
2041 |
-
0x11d0b00011d37,
|
2042 |
-
0x11d3a00011d3b,
|
2043 |
-
0x11d3c00011d3e,
|
2044 |
-
0x11d3f00011d48,
|
2045 |
-
0x11d5000011d5a,
|
2046 |
-
0x11d6000011d66,
|
2047 |
-
0x11d6700011d69,
|
2048 |
-
0x11d6a00011d8f,
|
2049 |
-
0x11d9000011d92,
|
2050 |
-
0x11d9300011d99,
|
2051 |
-
0x11da000011daa,
|
2052 |
-
0x11ee000011ef7,
|
2053 |
-
0x11f0000011f11,
|
2054 |
-
0x11f1200011f3b,
|
2055 |
-
0x11f3e00011f43,
|
2056 |
-
0x11f5000011f5a,
|
2057 |
-
0x11fb000011fb1,
|
2058 |
-
0x120000001239a,
|
2059 |
-
0x1248000012544,
|
2060 |
-
0x12f9000012ff1,
|
2061 |
-
0x1300000013430,
|
2062 |
-
0x1344000013456,
|
2063 |
-
0x1440000014647,
|
2064 |
-
0x1680000016a39,
|
2065 |
-
0x16a4000016a5f,
|
2066 |
-
0x16a6000016a6a,
|
2067 |
-
0x16a7000016abf,
|
2068 |
-
0x16ac000016aca,
|
2069 |
-
0x16ad000016aee,
|
2070 |
-
0x16af000016af5,
|
2071 |
-
0x16b0000016b37,
|
2072 |
-
0x16b4000016b44,
|
2073 |
-
0x16b5000016b5a,
|
2074 |
-
0x16b6300016b78,
|
2075 |
-
0x16b7d00016b90,
|
2076 |
-
0x16e6000016e80,
|
2077 |
-
0x16f0000016f4b,
|
2078 |
-
0x16f4f00016f88,
|
2079 |
-
0x16f8f00016fa0,
|
2080 |
-
0x16fe000016fe2,
|
2081 |
-
0x16fe300016fe5,
|
2082 |
-
0x16ff000016ff2,
|
2083 |
-
0x17000000187f8,
|
2084 |
-
0x1880000018cd6,
|
2085 |
-
0x18d0000018d09,
|
2086 |
-
0x1aff00001aff4,
|
2087 |
-
0x1aff50001affc,
|
2088 |
-
0x1affd0001afff,
|
2089 |
-
0x1b0000001b123,
|
2090 |
-
0x1b1320001b133,
|
2091 |
-
0x1b1500001b153,
|
2092 |
-
0x1b1550001b156,
|
2093 |
-
0x1b1640001b168,
|
2094 |
-
0x1b1700001b2fc,
|
2095 |
-
0x1bc000001bc6b,
|
2096 |
-
0x1bc700001bc7d,
|
2097 |
-
0x1bc800001bc89,
|
2098 |
-
0x1bc900001bc9a,
|
2099 |
-
0x1bc9d0001bc9f,
|
2100 |
-
0x1cf000001cf2e,
|
2101 |
-
0x1cf300001cf47,
|
2102 |
-
0x1da000001da37,
|
2103 |
-
0x1da3b0001da6d,
|
2104 |
-
0x1da750001da76,
|
2105 |
-
0x1da840001da85,
|
2106 |
-
0x1da9b0001daa0,
|
2107 |
-
0x1daa10001dab0,
|
2108 |
-
0x1df000001df1f,
|
2109 |
-
0x1df250001df2b,
|
2110 |
-
0x1e0000001e007,
|
2111 |
-
0x1e0080001e019,
|
2112 |
-
0x1e01b0001e022,
|
2113 |
-
0x1e0230001e025,
|
2114 |
-
0x1e0260001e02b,
|
2115 |
-
0x1e0300001e06e,
|
2116 |
-
0x1e08f0001e090,
|
2117 |
-
0x1e1000001e12d,
|
2118 |
-
0x1e1300001e13e,
|
2119 |
-
0x1e1400001e14a,
|
2120 |
-
0x1e14e0001e14f,
|
2121 |
-
0x1e2900001e2af,
|
2122 |
-
0x1e2c00001e2fa,
|
2123 |
-
0x1e4d00001e4fa,
|
2124 |
-
0x1e7e00001e7e7,
|
2125 |
-
0x1e7e80001e7ec,
|
2126 |
-
0x1e7ed0001e7ef,
|
2127 |
-
0x1e7f00001e7ff,
|
2128 |
-
0x1e8000001e8c5,
|
2129 |
-
0x1e8d00001e8d7,
|
2130 |
-
0x1e9220001e94c,
|
2131 |
-
0x1e9500001e95a,
|
2132 |
-
0x200000002a6e0,
|
2133 |
-
0x2a7000002b73a,
|
2134 |
-
0x2b7400002b81e,
|
2135 |
-
0x2b8200002cea2,
|
2136 |
-
0x2ceb00002ebe1,
|
2137 |
-
0x300000003134b,
|
2138 |
-
0x31350000323b0,
|
2139 |
-
),
|
2140 |
-
'CONTEXTJ': (
|
2141 |
-
0x200c0000200e,
|
2142 |
-
),
|
2143 |
-
'CONTEXTO': (
|
2144 |
-
0xb7000000b8,
|
2145 |
-
0x37500000376,
|
2146 |
-
0x5f3000005f5,
|
2147 |
-
0x6600000066a,
|
2148 |
-
0x6f0000006fa,
|
2149 |
-
0x30fb000030fc,
|
2150 |
-
),
|
2151 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/__init__.py
DELETED
@@ -1,102 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
Python HTTP library with thread-safe connection pooling, file post support, user friendly, and more
|
3 |
-
"""
|
4 |
-
from __future__ import absolute_import
|
5 |
-
|
6 |
-
# Set default logging handler to avoid "No handler found" warnings.
|
7 |
-
import logging
|
8 |
-
import warnings
|
9 |
-
from logging import NullHandler
|
10 |
-
|
11 |
-
from . import exceptions
|
12 |
-
from ._version import __version__
|
13 |
-
from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, connection_from_url
|
14 |
-
from .filepost import encode_multipart_formdata
|
15 |
-
from .poolmanager import PoolManager, ProxyManager, proxy_from_url
|
16 |
-
from .response import HTTPResponse
|
17 |
-
from .util.request import make_headers
|
18 |
-
from .util.retry import Retry
|
19 |
-
from .util.timeout import Timeout
|
20 |
-
from .util.url import get_host
|
21 |
-
|
22 |
-
# === NOTE TO REPACKAGERS AND VENDORS ===
|
23 |
-
# Please delete this block, this logic is only
|
24 |
-
# for urllib3 being distributed via PyPI.
|
25 |
-
# See: https://github.com/urllib3/urllib3/issues/2680
|
26 |
-
try:
|
27 |
-
import urllib3_secure_extra # type: ignore # noqa: F401
|
28 |
-
except ImportError:
|
29 |
-
pass
|
30 |
-
else:
|
31 |
-
warnings.warn(
|
32 |
-
"'urllib3[secure]' extra is deprecated and will be removed "
|
33 |
-
"in a future release of urllib3 2.x. Read more in this issue: "
|
34 |
-
"https://github.com/urllib3/urllib3/issues/2680",
|
35 |
-
category=DeprecationWarning,
|
36 |
-
stacklevel=2,
|
37 |
-
)
|
38 |
-
|
39 |
-
__author__ = "Andrey Petrov ([email protected])"
|
40 |
-
__license__ = "MIT"
|
41 |
-
__version__ = __version__
|
42 |
-
|
43 |
-
__all__ = (
|
44 |
-
"HTTPConnectionPool",
|
45 |
-
"HTTPSConnectionPool",
|
46 |
-
"PoolManager",
|
47 |
-
"ProxyManager",
|
48 |
-
"HTTPResponse",
|
49 |
-
"Retry",
|
50 |
-
"Timeout",
|
51 |
-
"add_stderr_logger",
|
52 |
-
"connection_from_url",
|
53 |
-
"disable_warnings",
|
54 |
-
"encode_multipart_formdata",
|
55 |
-
"get_host",
|
56 |
-
"make_headers",
|
57 |
-
"proxy_from_url",
|
58 |
-
)
|
59 |
-
|
60 |
-
logging.getLogger(__name__).addHandler(NullHandler())
|
61 |
-
|
62 |
-
|
63 |
-
def add_stderr_logger(level=logging.DEBUG):
|
64 |
-
"""
|
65 |
-
Helper for quickly adding a StreamHandler to the logger. Useful for
|
66 |
-
debugging.
|
67 |
-
|
68 |
-
Returns the handler after adding it.
|
69 |
-
"""
|
70 |
-
# This method needs to be in this __init__.py to get the __name__ correct
|
71 |
-
# even if urllib3 is vendored within another package.
|
72 |
-
logger = logging.getLogger(__name__)
|
73 |
-
handler = logging.StreamHandler()
|
74 |
-
handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)s %(message)s"))
|
75 |
-
logger.addHandler(handler)
|
76 |
-
logger.setLevel(level)
|
77 |
-
logger.debug("Added a stderr logging handler to logger: %s", __name__)
|
78 |
-
return handler
|
79 |
-
|
80 |
-
|
81 |
-
# ... Clean up.
|
82 |
-
del NullHandler
|
83 |
-
|
84 |
-
|
85 |
-
# All warning filters *must* be appended unless you're really certain that they
|
86 |
-
# shouldn't be: otherwise, it's very hard for users to use most Python
|
87 |
-
# mechanisms to silence them.
|
88 |
-
# SecurityWarning's always go off by default.
|
89 |
-
warnings.simplefilter("always", exceptions.SecurityWarning, append=True)
|
90 |
-
# SubjectAltNameWarning's should go off once per host
|
91 |
-
warnings.simplefilter("default", exceptions.SubjectAltNameWarning, append=True)
|
92 |
-
# InsecurePlatformWarning's don't vary between requests, so we keep it default.
|
93 |
-
warnings.simplefilter("default", exceptions.InsecurePlatformWarning, append=True)
|
94 |
-
# SNIMissingWarnings should go off only once.
|
95 |
-
warnings.simplefilter("default", exceptions.SNIMissingWarning, append=True)
|
96 |
-
|
97 |
-
|
98 |
-
def disable_warnings(category=exceptions.HTTPWarning):
|
99 |
-
"""
|
100 |
-
Helper for quickly disabling all urllib3 warnings.
|
101 |
-
"""
|
102 |
-
warnings.simplefilter("ignore", category)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/packaging/__init__.py
DELETED
@@ -1,25 +0,0 @@
|
|
1 |
-
# This file is dual licensed under the terms of the Apache License, Version
|
2 |
-
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
|
3 |
-
# for complete details.
|
4 |
-
|
5 |
-
from .__about__ import (
|
6 |
-
__author__,
|
7 |
-
__copyright__,
|
8 |
-
__email__,
|
9 |
-
__license__,
|
10 |
-
__summary__,
|
11 |
-
__title__,
|
12 |
-
__uri__,
|
13 |
-
__version__,
|
14 |
-
)
|
15 |
-
|
16 |
-
__all__ = [
|
17 |
-
"__title__",
|
18 |
-
"__summary__",
|
19 |
-
"__uri__",
|
20 |
-
"__version__",
|
21 |
-
"__author__",
|
22 |
-
"__email__",
|
23 |
-
"__license__",
|
24 |
-
"__copyright__",
|
25 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Boadiwaa/Recipes/openai/api_resources/abstract/updateable_api_resource.py
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
from urllib.parse import quote_plus
|
2 |
-
|
3 |
-
from openai.api_resources.abstract.api_resource import APIResource
|
4 |
-
|
5 |
-
|
6 |
-
class UpdateableAPIResource(APIResource):
|
7 |
-
@classmethod
|
8 |
-
def modify(cls, sid, **params):
|
9 |
-
url = "%s/%s" % (cls.class_url(), quote_plus(sid))
|
10 |
-
return cls._static_request("post", url, **params)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/adjacent_difference.h
DELETED
@@ -1,246 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
|
18 |
-
/*! \file adjacent_difference.h
|
19 |
-
* \brief Compute difference between consecutive elements of a range
|
20 |
-
*/
|
21 |
-
|
22 |
-
#pragma once
|
23 |
-
|
24 |
-
#include <thrust/detail/config.h>
|
25 |
-
#include <thrust/detail/execution_policy.h>
|
26 |
-
|
27 |
-
namespace thrust
|
28 |
-
{
|
29 |
-
|
30 |
-
|
31 |
-
/*! \addtogroup transformations Transformations
|
32 |
-
* \{
|
33 |
-
*/
|
34 |
-
|
35 |
-
|
36 |
-
/*! \p adjacent_difference calculates the differences of adjacent elements in the
|
37 |
-
* range <tt>[first, last)</tt>. That is, <tt>\*first</tt> is assigned to
|
38 |
-
* <tt>\*result</tt>, and, for each iterator \p i in the range
|
39 |
-
* <tt>[first + 1, last)</tt>, the difference of <tt>\*i</tt> and <tt>*(i - 1)</tt>
|
40 |
-
* is assigned to <tt>\*(result + (i - first))</tt>.
|
41 |
-
*
|
42 |
-
* This version of \p adjacent_difference uses <tt>operator-</tt> to calculate
|
43 |
-
* differences.
|
44 |
-
*
|
45 |
-
* The algorithm's execution is parallelized as determined by \p exec.
|
46 |
-
*
|
47 |
-
* \param exec The execution policy to use for parallelization.
|
48 |
-
* \param first The beginning of the input range.
|
49 |
-
* \param last The end of the input range.
|
50 |
-
* \param result The beginning of the output range.
|
51 |
-
* \return The iterator <tt>result + (last - first)</tt>
|
52 |
-
*
|
53 |
-
* \tparam DerivedPolicy The name of the derived execution policy.
|
54 |
-
* \tparam InputIterator is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>,
|
55 |
-
* and \c x and \c y are objects of \p InputIterator's \c value_type, then \c x - \c is defined,
|
56 |
-
* and \p InputIterator's \c value_type is convertible to a type in \p OutputIterator's set of \c value_types,
|
57 |
-
* and the return type of <tt>x - y</tt> is convertible to a type in \p OutputIterator's set of \c value_types.
|
58 |
-
* \tparam OutputIterator is a model of <a href="http://www.sgi.com/tech/stl/OutputIterator.html">Output Iterator</a>.
|
59 |
-
*
|
60 |
-
* \remark Note that \p result is permitted to be the same iterator as \p first. This is
|
61 |
-
* useful for computing differences "in place".
|
62 |
-
*
|
63 |
-
* The following code snippet demonstrates how to use \p adjacent_difference to compute
|
64 |
-
* the difference between adjacent elements of a range using the \p thrust::device execution policy:
|
65 |
-
*
|
66 |
-
* \code
|
67 |
-
* #include <thrust/adjacent_difference.h>
|
68 |
-
* #include <thrust/device_vector.h>
|
69 |
-
* #include <thrust/execution_policy.h>
|
70 |
-
* ...
|
71 |
-
* int h_data[8] = {1, 2, 1, 2, 1, 2, 1, 2};
|
72 |
-
* thrust::device_vector<int> d_data(h_data, h_data + 8);
|
73 |
-
* thrust::device_vector<int> d_result(8);
|
74 |
-
*
|
75 |
-
* thrust::adjacent_difference(thrust::device, d_data.begin(), d_data.end(), d_result.begin());
|
76 |
-
*
|
77 |
-
* // d_result is now [1, 1, -1, 1, -1, 1, -1, 1]
|
78 |
-
* \endcode
|
79 |
-
*
|
80 |
-
* \see http://www.sgi.com/tech/stl/adjacent_difference.html
|
81 |
-
* \see inclusive_scan
|
82 |
-
*/
|
83 |
-
template<typename DerivedPolicy, typename InputIterator, typename OutputIterator>
|
84 |
-
__host__ __device__
|
85 |
-
OutputIterator adjacent_difference(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
86 |
-
InputIterator first, InputIterator last,
|
87 |
-
OutputIterator result);
|
88 |
-
|
89 |
-
/*! \p adjacent_difference calculates the differences of adjacent elements in the
|
90 |
-
* range <tt>[first, last)</tt>. That is, <tt>*first</tt> is assigned to
|
91 |
-
* <tt>\*result</tt>, and, for each iterator \p i in the range
|
92 |
-
* <tt>[first + 1, last)</tt>, <tt>binary_op(\*i, \*(i - 1))</tt> is assigned to
|
93 |
-
* <tt>\*(result + (i - first))</tt>.
|
94 |
-
*
|
95 |
-
* This version of \p adjacent_difference uses the binary function \p binary_op to
|
96 |
-
* calculate differences.
|
97 |
-
*
|
98 |
-
* The algorithm's execution is parallelized as determined by \p exec.
|
99 |
-
*
|
100 |
-
* \param exec The execution policy to use for parallelization.
|
101 |
-
* \param first The beginning of the input range.
|
102 |
-
* \param last The end of the input range.
|
103 |
-
* \param result The beginning of the output range.
|
104 |
-
* \param binary_op The binary function used to compute differences.
|
105 |
-
* \return The iterator <tt>result + (last - first)</tt>
|
106 |
-
*
|
107 |
-
* \tparam DerivedPolicy The name of the derived execution policy.
|
108 |
-
* \tparam InputIterator is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>,
|
109 |
-
* and \p InputIterator's \c value_type is convertible to \p BinaryFunction's \c first_argument_type and \c second_argument_type,
|
110 |
-
* and \p InputIterator's \c value_type is convertible to a type in \p OutputIterator's set of \c value_types.
|
111 |
-
* \tparam OutputIterator is a model of <a href="http://www.sgi.com/tech/stl/OutputIterator.html">Output Iterator</a>.
|
112 |
-
* \tparam BinaryFunction's \c result_type is convertible to a type in \p OutputIterator's set of \c value_types.
|
113 |
-
*
|
114 |
-
* \remark Note that \p result is permitted to be the same iterator as \p first. This is
|
115 |
-
* useful for computing differences "in place".
|
116 |
-
*
|
117 |
-
* The following code snippet demonstrates how to use \p adjacent_difference to compute
|
118 |
-
* the sum between adjacent elements of a range using the \p thrust::device execution policy:
|
119 |
-
*
|
120 |
-
* \code
|
121 |
-
* #include <thrust/adjacent_difference.h>
|
122 |
-
* #include <thrust/functional.h>
|
123 |
-
* #include <thrust/device_vector.h>
|
124 |
-
* #include <thrust/execution_policy.h>
|
125 |
-
* ...
|
126 |
-
* int h_data[8] = {1, 2, 1, 2, 1, 2, 1, 2};
|
127 |
-
* thrust::device_vector<int> d_data(h_data, h_data + 8);
|
128 |
-
* thrust::device_vector<int> d_result(8);
|
129 |
-
*
|
130 |
-
* thrust::adjacent_difference(thrust::device, d_data.begin(), d_data.end(), d_result.begin(), thrust::plus<int>());
|
131 |
-
*
|
132 |
-
* // d_result is now [1, 3, 3, 3, 3, 3, 3, 3]
|
133 |
-
* \endcode
|
134 |
-
*
|
135 |
-
* \see http://www.sgi.com/tech/stl/adjacent_difference.html
|
136 |
-
* \see inclusive_scan
|
137 |
-
*/
|
138 |
-
template<typename DerivedPolicy, typename InputIterator, typename OutputIterator, typename BinaryFunction>
|
139 |
-
__host__ __device__
|
140 |
-
OutputIterator adjacent_difference(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
141 |
-
InputIterator first, InputIterator last,
|
142 |
-
OutputIterator result,
|
143 |
-
BinaryFunction binary_op);
|
144 |
-
|
145 |
-
/*! \p adjacent_difference calculates the differences of adjacent elements in the
|
146 |
-
* range <tt>[first, last)</tt>. That is, <tt>\*first</tt> is assigned to
|
147 |
-
* <tt>\*result</tt>, and, for each iterator \p i in the range
|
148 |
-
* <tt>[first + 1, last)</tt>, the difference of <tt>\*i</tt> and <tt>*(i - 1)</tt>
|
149 |
-
* is assigned to <tt>\*(result + (i - first))</tt>.
|
150 |
-
*
|
151 |
-
* This version of \p adjacent_difference uses <tt>operator-</tt> to calculate
|
152 |
-
* differences.
|
153 |
-
*
|
154 |
-
* \param first The beginning of the input range.
|
155 |
-
* \param last The end of the input range.
|
156 |
-
* \param result The beginning of the output range.
|
157 |
-
* \return The iterator <tt>result + (last - first)</tt>
|
158 |
-
*
|
159 |
-
* \tparam InputIterator is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>,
|
160 |
-
* and \c x and \c y are objects of \p InputIterator's \c value_type, then \c x - \c is defined,
|
161 |
-
* and \p InputIterator's \c value_type is convertible to a type in \p OutputIterator's set of \c value_types,
|
162 |
-
* and the return type of <tt>x - y</tt> is convertible to a type in \p OutputIterator's set of \c value_types.
|
163 |
-
* \tparam OutputIterator is a model of <a href="http://www.sgi.com/tech/stl/OutputIterator.html">Output Iterator</a>.
|
164 |
-
*
|
165 |
-
* \remark Note that \p result is permitted to be the same iterator as \p first. This is
|
166 |
-
* useful for computing differences "in place".
|
167 |
-
*
|
168 |
-
* The following code snippet demonstrates how to use \p adjacent_difference to compute
|
169 |
-
* the difference between adjacent elements of a range.
|
170 |
-
*
|
171 |
-
* \code
|
172 |
-
* #include <thrust/adjacent_difference.h>
|
173 |
-
* #include <thrust/device_vector.h>
|
174 |
-
* ...
|
175 |
-
* int h_data[8] = {1, 2, 1, 2, 1, 2, 1, 2};
|
176 |
-
* thrust::device_vector<int> d_data(h_data, h_data + 8);
|
177 |
-
* thrust::device_vector<int> d_result(8);
|
178 |
-
*
|
179 |
-
* thrust::adjacent_difference(d_data.begin(), d_data.end(), d_result.begin());
|
180 |
-
*
|
181 |
-
* // d_result is now [1, 1, -1, 1, -1, 1, -1, 1]
|
182 |
-
* \endcode
|
183 |
-
*
|
184 |
-
* \see http://www.sgi.com/tech/stl/adjacent_difference.html
|
185 |
-
* \see inclusive_scan
|
186 |
-
*/
|
187 |
-
template <typename InputIterator, typename OutputIterator>
|
188 |
-
OutputIterator adjacent_difference(InputIterator first, InputIterator last,
|
189 |
-
OutputIterator result);
|
190 |
-
|
191 |
-
/*! \p adjacent_difference calculates the differences of adjacent elements in the
|
192 |
-
* range <tt>[first, last)</tt>. That is, <tt>*first</tt> is assigned to
|
193 |
-
* <tt>\*result</tt>, and, for each iterator \p i in the range
|
194 |
-
* <tt>[first + 1, last)</tt>, <tt>binary_op(\*i, \*(i - 1))</tt> is assigned to
|
195 |
-
* <tt>\*(result + (i - first))</tt>.
|
196 |
-
*
|
197 |
-
* This version of \p adjacent_difference uses the binary function \p binary_op to
|
198 |
-
* calculate differences.
|
199 |
-
*
|
200 |
-
* \param first The beginning of the input range.
|
201 |
-
* \param last The end of the input range.
|
202 |
-
* \param result The beginning of the output range.
|
203 |
-
* \param binary_op The binary function used to compute differences.
|
204 |
-
* \return The iterator <tt>result + (last - first)</tt>
|
205 |
-
*
|
206 |
-
* \tparam InputIterator is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>,
|
207 |
-
* and \p InputIterator's \c value_type is convertible to \p BinaryFunction's \c first_argument_type and \c second_argument_type,
|
208 |
-
* and \p InputIterator's \c value_type is convertible to a type in \p OutputIterator's set of \c value_types.
|
209 |
-
* \tparam OutputIterator is a model of <a href="http://www.sgi.com/tech/stl/OutputIterator.html">Output Iterator</a>.
|
210 |
-
* \tparam BinaryFunction's \c result_type is convertible to a type in \p OutputIterator's set of \c value_types.
|
211 |
-
*
|
212 |
-
* \remark Note that \p result is permitted to be the same iterator as \p first. This is
|
213 |
-
* useful for computing differences "in place".
|
214 |
-
*
|
215 |
-
* The following code snippet demonstrates how to use \p adjacent_difference to compute
|
216 |
-
* the sum between adjacent elements of a range.
|
217 |
-
*
|
218 |
-
* \code
|
219 |
-
* #include <thrust/adjacent_difference.h>
|
220 |
-
* #include <thrust/functional.h>
|
221 |
-
* #include <thrust/device_vector.h>
|
222 |
-
* ...
|
223 |
-
* int h_data[8] = {1, 2, 1, 2, 1, 2, 1, 2};
|
224 |
-
* thrust::device_vector<int> d_data(h_data, h_data + 8);
|
225 |
-
* thrust::device_vector<int> d_result(8);
|
226 |
-
*
|
227 |
-
* thrust::adjacent_difference(d_data.begin(), d_data.end(), d_result.begin(), thrust::plus<int>());
|
228 |
-
*
|
229 |
-
* // d_result is now [1, 3, 3, 3, 3, 3, 3, 3]
|
230 |
-
* \endcode
|
231 |
-
*
|
232 |
-
* \see http://www.sgi.com/tech/stl/adjacent_difference.html
|
233 |
-
* \see inclusive_scan
|
234 |
-
*/
|
235 |
-
template <typename InputIterator, typename OutputIterator, typename BinaryFunction>
|
236 |
-
OutputIterator adjacent_difference(InputIterator first, InputIterator last,
|
237 |
-
OutputIterator result,
|
238 |
-
BinaryFunction binary_op);
|
239 |
-
|
240 |
-
/*! \}
|
241 |
-
*/
|
242 |
-
|
243 |
-
} // end namespace thrust
|
244 |
-
|
245 |
-
#include <thrust/detail/adjacent_difference.inl>
|
246 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/mr/polymorphic_adaptor.h
DELETED
@@ -1,56 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2018-2019 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include "memory_resource.h"
|
20 |
-
|
21 |
-
namespace thrust
|
22 |
-
{
|
23 |
-
namespace mr
|
24 |
-
{
|
25 |
-
|
26 |
-
template<typename Pointer = void *>
|
27 |
-
class polymorphic_adaptor_resource THRUST_FINAL : public memory_resource<Pointer>
|
28 |
-
{
|
29 |
-
public:
|
30 |
-
polymorphic_adaptor_resource(memory_resource<Pointer> * t) : upstream_resource(t)
|
31 |
-
{
|
32 |
-
}
|
33 |
-
|
34 |
-
virtual Pointer do_allocate(std::size_t bytes, std::size_t alignment = THRUST_MR_DEFAULT_ALIGNMENT) THRUST_OVERRIDE
|
35 |
-
{
|
36 |
-
return upstream_resource->allocate(bytes, alignment);
|
37 |
-
}
|
38 |
-
|
39 |
-
virtual void do_deallocate(Pointer p, std::size_t bytes, std::size_t alignment) THRUST_OVERRIDE
|
40 |
-
{
|
41 |
-
return upstream_resource->deallocate(p, bytes, alignment);
|
42 |
-
}
|
43 |
-
|
44 |
-
__host__ __device__
|
45 |
-
virtual bool do_is_equal(const memory_resource<Pointer> & other) const THRUST_NOEXCEPT THRUST_OVERRIDE
|
46 |
-
{
|
47 |
-
return upstream_resource->is_equal(other);
|
48 |
-
}
|
49 |
-
|
50 |
-
private:
|
51 |
-
memory_resource<Pointer> * upstream_resource;
|
52 |
-
};
|
53 |
-
|
54 |
-
} // end mr
|
55 |
-
} // end thrust
|
56 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/unique_by_key.h
DELETED
@@ -1,44 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a fill of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
|
21 |
-
// the purpose of this header is to #include the unique_by_key.h header
|
22 |
-
// of the sequential, host, and device systems. It should be #included in any
|
23 |
-
// code which uses adl to dispatch unique_by_key
|
24 |
-
|
25 |
-
#include <thrust/system/detail/sequential/unique_by_key.h>
|
26 |
-
|
27 |
-
// SCons can't see through the #defines below to figure out what this header
|
28 |
-
// includes, so we fake it out by specifying all possible files we might end up
|
29 |
-
// including inside an #if 0.
|
30 |
-
#if 0
|
31 |
-
#include <thrust/system/cpp/detail/unique_by_key.h>
|
32 |
-
#include <thrust/system/cuda/detail/unique_by_key.h>
|
33 |
-
#include <thrust/system/omp/detail/unique_by_key.h>
|
34 |
-
#include <thrust/system/tbb/detail/unique_by_key.h>
|
35 |
-
#endif
|
36 |
-
|
37 |
-
#define __THRUST_HOST_SYSTEM_UNIQUE_BY_KEY_HEADER <__THRUST_HOST_SYSTEM_ROOT/detail/unique_by_key.h>
|
38 |
-
#include __THRUST_HOST_SYSTEM_UNIQUE_BY_KEY_HEADER
|
39 |
-
#undef __THRUST_HOST_SYSTEM_UNIQUE_BY_KEY_HEADER
|
40 |
-
|
41 |
-
#define __THRUST_DEVICE_SYSTEM_UNIQUE_BY_KEY_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/detail/unique_by_key.h>
|
42 |
-
#include __THRUST_DEVICE_SYSTEM_UNIQUE_BY_KEY_HEADER
|
43 |
-
#undef __THRUST_DEVICE_SYSTEM_UNIQUE_BY_KEY_HEADER
|
44 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/TokenCut/app.py
DELETED
@@ -1,22 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import gradio as gr
|
3 |
-
from pathlib import Path
|
4 |
-
|
5 |
-
|
6 |
-
os.system("git clone https://github.com/YangtaoWANG95/TokenCut.git")
|
7 |
-
os.chdir("TokenCut")
|
8 |
-
os.system("wget https://raw.githubusercontent.com/YangtaoWANG95/TokenCut/master/examples/VOC07_000064.jpg -O parrot.jpg")
|
9 |
-
|
10 |
-
def inference(img):
|
11 |
-
os.system("python main_tokencut.py --image_path "+img+" --visualize all --resize 320")
|
12 |
-
filename = Path(img).stem
|
13 |
-
return "./outputs/TokenCut-vit_small16_k/"+filename+"_TokenCut_attn.jpg","./outputs/TokenCut-vit_small16_k/"+filename+"_TokenCut_pred.jpg"
|
14 |
-
|
15 |
-
title="TokenCut"
|
16 |
-
description="Gradio demo for TokenCut: Self-Supervised Transformers for Unsupervised Object Discovery using Normalized Cut. To use it, simply upload your image or click on one of the examples to load them. We resize the smaller edge of the image to 320 to accelerate inference time. Read more at the links below"
|
17 |
-
|
18 |
-
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2202.11539' target='_blank'>Self-Supervised Transformers for Unsupervised Object Discovery using Normalized Cut</a> | <a href='https://github.com/YangtaoWANG95/TokenCut' target='_blank'>Github Repo</a></p>"
|
19 |
-
|
20 |
-
examples=[['parrot.jpg']]
|
21 |
-
gr.Interface(inference,gr.inputs.Image(type="filepath"),[gr.outputs.Image(type="filepath",label="TokenCut_attn"),gr.outputs.Image(type="filepath",label="TokenCut_predication")],title=title,description=description,article=article,examples=examples).launch(enable_queue=True)
|
22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/lama-example/saicinpainting/evaluation/masks/countless/countless2d.py
DELETED
@@ -1,529 +0,0 @@
|
|
1 |
-
from __future__ import print_function, division
|
2 |
-
|
3 |
-
"""
|
4 |
-
COUNTLESS performance test in Python.
|
5 |
-
|
6 |
-
python countless2d.py ./images/NAMEOFIMAGE
|
7 |
-
"""
|
8 |
-
|
9 |
-
import six
|
10 |
-
from six.moves import range
|
11 |
-
from collections import defaultdict
|
12 |
-
from functools import reduce
|
13 |
-
import operator
|
14 |
-
import io
|
15 |
-
import os
|
16 |
-
from PIL import Image
|
17 |
-
import math
|
18 |
-
import numpy as np
|
19 |
-
import random
|
20 |
-
import sys
|
21 |
-
import time
|
22 |
-
from tqdm import tqdm
|
23 |
-
from scipy import ndimage
|
24 |
-
|
25 |
-
def simplest_countless(data):
|
26 |
-
"""
|
27 |
-
Vectorized implementation of downsampling a 2D
|
28 |
-
image by 2 on each side using the COUNTLESS algorithm.
|
29 |
-
|
30 |
-
data is a 2D numpy array with even dimensions.
|
31 |
-
"""
|
32 |
-
sections = []
|
33 |
-
|
34 |
-
# This loop splits the 2D array apart into four arrays that are
|
35 |
-
# all the result of striding by 2 and offset by (0,0), (0,1), (1,0),
|
36 |
-
# and (1,1) representing the A, B, C, and D positions from Figure 1.
|
37 |
-
factor = (2,2)
|
38 |
-
for offset in np.ndindex(factor):
|
39 |
-
part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))]
|
40 |
-
sections.append(part)
|
41 |
-
|
42 |
-
a, b, c, d = sections
|
43 |
-
|
44 |
-
ab = a * (a == b) # PICK(A,B)
|
45 |
-
ac = a * (a == c) # PICK(A,C)
|
46 |
-
bc = b * (b == c) # PICK(B,C)
|
47 |
-
|
48 |
-
a = ab | ac | bc # Bitwise OR, safe b/c non-matches are zeroed
|
49 |
-
|
50 |
-
return a + (a == 0) * d # AB || AC || BC || D
|
51 |
-
|
52 |
-
def quick_countless(data):
|
53 |
-
"""
|
54 |
-
Vectorized implementation of downsampling a 2D
|
55 |
-
image by 2 on each side using the COUNTLESS algorithm.
|
56 |
-
|
57 |
-
data is a 2D numpy array with even dimensions.
|
58 |
-
"""
|
59 |
-
sections = []
|
60 |
-
|
61 |
-
# This loop splits the 2D array apart into four arrays that are
|
62 |
-
# all the result of striding by 2 and offset by (0,0), (0,1), (1,0),
|
63 |
-
# and (1,1) representing the A, B, C, and D positions from Figure 1.
|
64 |
-
factor = (2,2)
|
65 |
-
for offset in np.ndindex(factor):
|
66 |
-
part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))]
|
67 |
-
sections.append(part)
|
68 |
-
|
69 |
-
a, b, c, d = sections
|
70 |
-
|
71 |
-
ab_ac = a * ((a == b) | (a == c)) # PICK(A,B) || PICK(A,C) w/ optimization
|
72 |
-
bc = b * (b == c) # PICK(B,C)
|
73 |
-
|
74 |
-
a = ab_ac | bc # (PICK(A,B) || PICK(A,C)) or PICK(B,C)
|
75 |
-
return a + (a == 0) * d # AB || AC || BC || D
|
76 |
-
|
77 |
-
def quickest_countless(data):
|
78 |
-
"""
|
79 |
-
Vectorized implementation of downsampling a 2D
|
80 |
-
image by 2 on each side using the COUNTLESS algorithm.
|
81 |
-
|
82 |
-
data is a 2D numpy array with even dimensions.
|
83 |
-
"""
|
84 |
-
sections = []
|
85 |
-
|
86 |
-
# This loop splits the 2D array apart into four arrays that are
|
87 |
-
# all the result of striding by 2 and offset by (0,0), (0,1), (1,0),
|
88 |
-
# and (1,1) representing the A, B, C, and D positions from Figure 1.
|
89 |
-
factor = (2,2)
|
90 |
-
for offset in np.ndindex(factor):
|
91 |
-
part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))]
|
92 |
-
sections.append(part)
|
93 |
-
|
94 |
-
a, b, c, d = sections
|
95 |
-
|
96 |
-
ab_ac = a * ((a == b) | (a == c)) # PICK(A,B) || PICK(A,C) w/ optimization
|
97 |
-
ab_ac |= b * (b == c) # PICK(B,C)
|
98 |
-
return ab_ac + (ab_ac == 0) * d # AB || AC || BC || D
|
99 |
-
|
100 |
-
def quick_countless_xor(data):
|
101 |
-
"""
|
102 |
-
Vectorized implementation of downsampling a 2D
|
103 |
-
image by 2 on each side using the COUNTLESS algorithm.
|
104 |
-
|
105 |
-
data is a 2D numpy array with even dimensions.
|
106 |
-
"""
|
107 |
-
sections = []
|
108 |
-
|
109 |
-
# This loop splits the 2D array apart into four arrays that are
|
110 |
-
# all the result of striding by 2 and offset by (0,0), (0,1), (1,0),
|
111 |
-
# and (1,1) representing the A, B, C, and D positions from Figure 1.
|
112 |
-
factor = (2,2)
|
113 |
-
for offset in np.ndindex(factor):
|
114 |
-
part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))]
|
115 |
-
sections.append(part)
|
116 |
-
|
117 |
-
a, b, c, d = sections
|
118 |
-
|
119 |
-
ab = a ^ (a ^ b) # a or b
|
120 |
-
ab += (ab != a) * ((ab ^ (ab ^ c)) - b) # b or c
|
121 |
-
ab += (ab == c) * ((ab ^ (ab ^ d)) - c) # c or d
|
122 |
-
return ab
|
123 |
-
|
124 |
-
def stippled_countless(data):
|
125 |
-
"""
|
126 |
-
Vectorized implementation of downsampling a 2D
|
127 |
-
image by 2 on each side using the COUNTLESS algorithm
|
128 |
-
that treats zero as "background" and inflates lone
|
129 |
-
pixels.
|
130 |
-
|
131 |
-
data is a 2D numpy array with even dimensions.
|
132 |
-
"""
|
133 |
-
sections = []
|
134 |
-
|
135 |
-
# This loop splits the 2D array apart into four arrays that are
|
136 |
-
# all the result of striding by 2 and offset by (0,0), (0,1), (1,0),
|
137 |
-
# and (1,1) representing the A, B, C, and D positions from Figure 1.
|
138 |
-
factor = (2,2)
|
139 |
-
for offset in np.ndindex(factor):
|
140 |
-
part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))]
|
141 |
-
sections.append(part)
|
142 |
-
|
143 |
-
a, b, c, d = sections
|
144 |
-
|
145 |
-
ab_ac = a * ((a == b) | (a == c)) # PICK(A,B) || PICK(A,C) w/ optimization
|
146 |
-
ab_ac |= b * (b == c) # PICK(B,C)
|
147 |
-
|
148 |
-
nonzero = a + (a == 0) * (b + (b == 0) * c)
|
149 |
-
return ab_ac + (ab_ac == 0) * (d + (d == 0) * nonzero) # AB || AC || BC || D
|
150 |
-
|
151 |
-
def zero_corrected_countless(data):
|
152 |
-
"""
|
153 |
-
Vectorized implementation of downsampling a 2D
|
154 |
-
image by 2 on each side using the COUNTLESS algorithm.
|
155 |
-
|
156 |
-
data is a 2D numpy array with even dimensions.
|
157 |
-
"""
|
158 |
-
# allows us to prevent losing 1/2 a bit of information
|
159 |
-
# at the top end by using a bigger type. Without this 255 is handled incorrectly.
|
160 |
-
data, upgraded = upgrade_type(data)
|
161 |
-
|
162 |
-
# offset from zero, raw countless doesn't handle 0 correctly
|
163 |
-
# we'll remove the extra 1 at the end.
|
164 |
-
data += 1
|
165 |
-
|
166 |
-
sections = []
|
167 |
-
|
168 |
-
# This loop splits the 2D array apart into four arrays that are
|
169 |
-
# all the result of striding by 2 and offset by (0,0), (0,1), (1,0),
|
170 |
-
# and (1,1) representing the A, B, C, and D positions from Figure 1.
|
171 |
-
factor = (2,2)
|
172 |
-
for offset in np.ndindex(factor):
|
173 |
-
part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))]
|
174 |
-
sections.append(part)
|
175 |
-
|
176 |
-
a, b, c, d = sections
|
177 |
-
|
178 |
-
ab = a * (a == b) # PICK(A,B)
|
179 |
-
ac = a * (a == c) # PICK(A,C)
|
180 |
-
bc = b * (b == c) # PICK(B,C)
|
181 |
-
|
182 |
-
a = ab | ac | bc # Bitwise OR, safe b/c non-matches are zeroed
|
183 |
-
|
184 |
-
result = a + (a == 0) * d - 1 # a or d - 1
|
185 |
-
|
186 |
-
if upgraded:
|
187 |
-
return downgrade_type(result)
|
188 |
-
|
189 |
-
# only need to reset data if we weren't upgraded
|
190 |
-
# b/c no copy was made in that case
|
191 |
-
data -= 1
|
192 |
-
|
193 |
-
return result
|
194 |
-
|
195 |
-
def countless_extreme(data):
|
196 |
-
nonzeros = np.count_nonzero(data)
|
197 |
-
# print("nonzeros", nonzeros)
|
198 |
-
|
199 |
-
N = reduce(operator.mul, data.shape)
|
200 |
-
|
201 |
-
if nonzeros == N:
|
202 |
-
print("quick")
|
203 |
-
return quick_countless(data)
|
204 |
-
elif np.count_nonzero(data + 1) == N:
|
205 |
-
print("quick")
|
206 |
-
# print("upper", nonzeros)
|
207 |
-
return quick_countless(data)
|
208 |
-
else:
|
209 |
-
return countless(data)
|
210 |
-
|
211 |
-
|
212 |
-
def countless(data):
|
213 |
-
"""
|
214 |
-
Vectorized implementation of downsampling a 2D
|
215 |
-
image by 2 on each side using the COUNTLESS algorithm.
|
216 |
-
|
217 |
-
data is a 2D numpy array with even dimensions.
|
218 |
-
"""
|
219 |
-
# allows us to prevent losing 1/2 a bit of information
|
220 |
-
# at the top end by using a bigger type. Without this 255 is handled incorrectly.
|
221 |
-
data, upgraded = upgrade_type(data)
|
222 |
-
|
223 |
-
# offset from zero, raw countless doesn't handle 0 correctly
|
224 |
-
# we'll remove the extra 1 at the end.
|
225 |
-
data += 1
|
226 |
-
|
227 |
-
sections = []
|
228 |
-
|
229 |
-
# This loop splits the 2D array apart into four arrays that are
|
230 |
-
# all the result of striding by 2 and offset by (0,0), (0,1), (1,0),
|
231 |
-
# and (1,1) representing the A, B, C, and D positions from Figure 1.
|
232 |
-
factor = (2,2)
|
233 |
-
for offset in np.ndindex(factor):
|
234 |
-
part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))]
|
235 |
-
sections.append(part)
|
236 |
-
|
237 |
-
a, b, c, d = sections
|
238 |
-
|
239 |
-
ab_ac = a * ((a == b) | (a == c)) # PICK(A,B) || PICK(A,C) w/ optimization
|
240 |
-
ab_ac |= b * (b == c) # PICK(B,C)
|
241 |
-
result = ab_ac + (ab_ac == 0) * d - 1 # (matches or d) - 1
|
242 |
-
|
243 |
-
if upgraded:
|
244 |
-
return downgrade_type(result)
|
245 |
-
|
246 |
-
# only need to reset data if we weren't upgraded
|
247 |
-
# b/c no copy was made in that case
|
248 |
-
data -= 1
|
249 |
-
|
250 |
-
return result
|
251 |
-
|
252 |
-
def upgrade_type(arr):
|
253 |
-
dtype = arr.dtype
|
254 |
-
|
255 |
-
if dtype == np.uint8:
|
256 |
-
return arr.astype(np.uint16), True
|
257 |
-
elif dtype == np.uint16:
|
258 |
-
return arr.astype(np.uint32), True
|
259 |
-
elif dtype == np.uint32:
|
260 |
-
return arr.astype(np.uint64), True
|
261 |
-
|
262 |
-
return arr, False
|
263 |
-
|
264 |
-
def downgrade_type(arr):
|
265 |
-
dtype = arr.dtype
|
266 |
-
|
267 |
-
if dtype == np.uint64:
|
268 |
-
return arr.astype(np.uint32)
|
269 |
-
elif dtype == np.uint32:
|
270 |
-
return arr.astype(np.uint16)
|
271 |
-
elif dtype == np.uint16:
|
272 |
-
return arr.astype(np.uint8)
|
273 |
-
|
274 |
-
return arr
|
275 |
-
|
276 |
-
def odd_to_even(image):
|
277 |
-
"""
|
278 |
-
To facilitate 2x2 downsampling segmentation, change an odd sized image into an even sized one.
|
279 |
-
Works by mirroring the starting 1 pixel edge of the image on odd shaped sides.
|
280 |
-
|
281 |
-
e.g. turn a 3x3x5 image into a 4x4x5 (the x and y are what are getting downsampled)
|
282 |
-
|
283 |
-
For example: [ 3, 2, 4 ] => [ 3, 3, 2, 4 ] which is now easy to downsample.
|
284 |
-
|
285 |
-
"""
|
286 |
-
shape = np.array(image.shape)
|
287 |
-
|
288 |
-
offset = (shape % 2)[:2] # x,y offset
|
289 |
-
|
290 |
-
# detect if we're dealing with an even
|
291 |
-
# image. if so it's fine, just return.
|
292 |
-
if not np.any(offset):
|
293 |
-
return image
|
294 |
-
|
295 |
-
oddshape = image.shape[:2] + offset
|
296 |
-
oddshape = np.append(oddshape, shape[2:])
|
297 |
-
oddshape = oddshape.astype(int)
|
298 |
-
|
299 |
-
newimg = np.empty(shape=oddshape, dtype=image.dtype)
|
300 |
-
|
301 |
-
ox,oy = offset
|
302 |
-
sx,sy = oddshape
|
303 |
-
|
304 |
-
newimg[0,0] = image[0,0] # corner
|
305 |
-
newimg[ox:sx,0] = image[:,0] # x axis line
|
306 |
-
newimg[0,oy:sy] = image[0,:] # y axis line
|
307 |
-
|
308 |
-
return newimg
|
309 |
-
|
310 |
-
def counting(array):
|
311 |
-
factor = (2, 2, 1)
|
312 |
-
shape = array.shape
|
313 |
-
|
314 |
-
while len(shape) < 4:
|
315 |
-
array = np.expand_dims(array, axis=-1)
|
316 |
-
shape = array.shape
|
317 |
-
|
318 |
-
output_shape = tuple(int(math.ceil(s / f)) for s, f in zip(shape, factor))
|
319 |
-
output = np.zeros(output_shape, dtype=array.dtype)
|
320 |
-
|
321 |
-
for chan in range(0, shape[3]):
|
322 |
-
for z in range(0, shape[2]):
|
323 |
-
for x in range(0, shape[0], 2):
|
324 |
-
for y in range(0, shape[1], 2):
|
325 |
-
block = array[ x:x+2, y:y+2, z, chan ] # 2x2 block
|
326 |
-
|
327 |
-
hashtable = defaultdict(int)
|
328 |
-
for subx, suby in np.ndindex(block.shape[0], block.shape[1]):
|
329 |
-
hashtable[block[subx, suby]] += 1
|
330 |
-
|
331 |
-
best = (0, 0)
|
332 |
-
for segid, val in six.iteritems(hashtable):
|
333 |
-
if best[1] < val:
|
334 |
-
best = (segid, val)
|
335 |
-
|
336 |
-
output[ x // 2, y // 2, chan ] = best[0]
|
337 |
-
|
338 |
-
return output
|
339 |
-
|
340 |
-
def ndzoom(array):
|
341 |
-
if len(array.shape) == 3:
|
342 |
-
ratio = ( 1 / 2.0, 1 / 2.0, 1.0 )
|
343 |
-
else:
|
344 |
-
ratio = ( 1 / 2.0, 1 / 2.0)
|
345 |
-
return ndimage.interpolation.zoom(array, ratio, order=1)
|
346 |
-
|
347 |
-
def countless_if(array):
|
348 |
-
factor = (2, 2, 1)
|
349 |
-
shape = array.shape
|
350 |
-
|
351 |
-
if len(shape) < 3:
|
352 |
-
array = array[ :,:, np.newaxis ]
|
353 |
-
shape = array.shape
|
354 |
-
|
355 |
-
output_shape = tuple(int(math.ceil(s / f)) for s, f in zip(shape, factor))
|
356 |
-
output = np.zeros(output_shape, dtype=array.dtype)
|
357 |
-
|
358 |
-
for chan in range(0, shape[2]):
|
359 |
-
for x in range(0, shape[0], 2):
|
360 |
-
for y in range(0, shape[1], 2):
|
361 |
-
block = array[ x:x+2, y:y+2, chan ] # 2x2 block
|
362 |
-
|
363 |
-
if block[0,0] == block[1,0]:
|
364 |
-
pick = block[0,0]
|
365 |
-
elif block[0,0] == block[0,1]:
|
366 |
-
pick = block[0,0]
|
367 |
-
elif block[1,0] == block[0,1]:
|
368 |
-
pick = block[1,0]
|
369 |
-
else:
|
370 |
-
pick = block[1,1]
|
371 |
-
|
372 |
-
output[ x // 2, y // 2, chan ] = pick
|
373 |
-
|
374 |
-
return np.squeeze(output)
|
375 |
-
|
376 |
-
def downsample_with_averaging(array):
|
377 |
-
"""
|
378 |
-
Downsample x by factor using averaging.
|
379 |
-
|
380 |
-
@return: The downsampled array, of the same type as x.
|
381 |
-
"""
|
382 |
-
|
383 |
-
if len(array.shape) == 3:
|
384 |
-
factor = (2,2,1)
|
385 |
-
else:
|
386 |
-
factor = (2,2)
|
387 |
-
|
388 |
-
if np.array_equal(factor[:3], np.array([1,1,1])):
|
389 |
-
return array
|
390 |
-
|
391 |
-
output_shape = tuple(int(math.ceil(s / f)) for s, f in zip(array.shape, factor))
|
392 |
-
temp = np.zeros(output_shape, float)
|
393 |
-
counts = np.zeros(output_shape, np.int)
|
394 |
-
for offset in np.ndindex(factor):
|
395 |
-
part = array[tuple(np.s_[o::f] for o, f in zip(offset, factor))]
|
396 |
-
indexing_expr = tuple(np.s_[:s] for s in part.shape)
|
397 |
-
temp[indexing_expr] += part
|
398 |
-
counts[indexing_expr] += 1
|
399 |
-
return np.cast[array.dtype](temp / counts)
|
400 |
-
|
401 |
-
def downsample_with_max_pooling(array):
|
402 |
-
|
403 |
-
factor = (2,2)
|
404 |
-
|
405 |
-
if np.all(np.array(factor, int) == 1):
|
406 |
-
return array
|
407 |
-
|
408 |
-
sections = []
|
409 |
-
|
410 |
-
for offset in np.ndindex(factor):
|
411 |
-
part = array[tuple(np.s_[o::f] for o, f in zip(offset, factor))]
|
412 |
-
sections.append(part)
|
413 |
-
|
414 |
-
output = sections[0].copy()
|
415 |
-
|
416 |
-
for section in sections[1:]:
|
417 |
-
np.maximum(output, section, output)
|
418 |
-
|
419 |
-
return output
|
420 |
-
|
421 |
-
def striding(array):
|
422 |
-
"""Downsample x by factor using striding.
|
423 |
-
|
424 |
-
@return: The downsampled array, of the same type as x.
|
425 |
-
"""
|
426 |
-
factor = (2,2)
|
427 |
-
if np.all(np.array(factor, int) == 1):
|
428 |
-
return array
|
429 |
-
return array[tuple(np.s_[::f] for f in factor)]
|
430 |
-
|
431 |
-
def benchmark():
|
432 |
-
filename = sys.argv[1]
|
433 |
-
img = Image.open(filename)
|
434 |
-
data = np.array(img.getdata(), dtype=np.uint8)
|
435 |
-
|
436 |
-
if len(data.shape) == 1:
|
437 |
-
n_channels = 1
|
438 |
-
reshape = (img.height, img.width)
|
439 |
-
else:
|
440 |
-
n_channels = min(data.shape[1], 3)
|
441 |
-
data = data[:, :n_channels]
|
442 |
-
reshape = (img.height, img.width, n_channels)
|
443 |
-
|
444 |
-
data = data.reshape(reshape).astype(np.uint8)
|
445 |
-
|
446 |
-
methods = [
|
447 |
-
simplest_countless,
|
448 |
-
quick_countless,
|
449 |
-
quick_countless_xor,
|
450 |
-
quickest_countless,
|
451 |
-
stippled_countless,
|
452 |
-
zero_corrected_countless,
|
453 |
-
countless,
|
454 |
-
downsample_with_averaging,
|
455 |
-
downsample_with_max_pooling,
|
456 |
-
ndzoom,
|
457 |
-
striding,
|
458 |
-
# countless_if,
|
459 |
-
# counting,
|
460 |
-
]
|
461 |
-
|
462 |
-
formats = {
|
463 |
-
1: 'L',
|
464 |
-
3: 'RGB',
|
465 |
-
4: 'RGBA'
|
466 |
-
}
|
467 |
-
|
468 |
-
if not os.path.exists('./results'):
|
469 |
-
os.mkdir('./results')
|
470 |
-
|
471 |
-
N = 500
|
472 |
-
img_size = float(img.width * img.height) / 1024.0 / 1024.0
|
473 |
-
print("N = %d, %dx%d (%.2f MPx) %d chan, %s" % (N, img.width, img.height, img_size, n_channels, filename))
|
474 |
-
print("Algorithm\tMPx/sec\tMB/sec\tSec")
|
475 |
-
for fn in methods:
|
476 |
-
print(fn.__name__, end='')
|
477 |
-
sys.stdout.flush()
|
478 |
-
|
479 |
-
start = time.time()
|
480 |
-
# tqdm is here to show you what's going on the first time you run it.
|
481 |
-
# Feel free to remove it to get slightly more accurate timing results.
|
482 |
-
for _ in tqdm(range(N), desc=fn.__name__, disable=True):
|
483 |
-
result = fn(data)
|
484 |
-
end = time.time()
|
485 |
-
print("\r", end='')
|
486 |
-
|
487 |
-
total_time = (end - start)
|
488 |
-
mpx = N * img_size / total_time
|
489 |
-
mbytes = N * img_size * n_channels / total_time
|
490 |
-
# Output in tab separated format to enable copy-paste into excel/numbers
|
491 |
-
print("%s\t%.3f\t%.3f\t%.2f" % (fn.__name__, mpx, mbytes, total_time))
|
492 |
-
outimg = Image.fromarray(np.squeeze(result), formats[n_channels])
|
493 |
-
outimg.save('./results/{}.png'.format(fn.__name__, "PNG"))
|
494 |
-
|
495 |
-
if __name__ == '__main__':
|
496 |
-
benchmark()
|
497 |
-
|
498 |
-
|
499 |
-
# Example results:
|
500 |
-
# N = 5, 1024x1024 (1.00 MPx) 1 chan, images/gray_segmentation.png
|
501 |
-
# Function MPx/sec MB/sec Sec
|
502 |
-
# simplest_countless 752.855 752.855 0.01
|
503 |
-
# quick_countless 920.328 920.328 0.01
|
504 |
-
# zero_corrected_countless 534.143 534.143 0.01
|
505 |
-
# countless 644.247 644.247 0.01
|
506 |
-
# downsample_with_averaging 372.575 372.575 0.01
|
507 |
-
# downsample_with_max_pooling 974.060 974.060 0.01
|
508 |
-
# ndzoom 137.517 137.517 0.04
|
509 |
-
# striding 38550.588 38550.588 0.00
|
510 |
-
# countless_if 4.377 4.377 1.14
|
511 |
-
# counting 0.117 0.117 42.85
|
512 |
-
|
513 |
-
# Run without non-numpy implementations:
|
514 |
-
# N = 2000, 1024x1024 (1.00 MPx) 1 chan, images/gray_segmentation.png
|
515 |
-
# Algorithm MPx/sec MB/sec Sec
|
516 |
-
# simplest_countless 800.522 800.522 2.50
|
517 |
-
# quick_countless 945.420 945.420 2.12
|
518 |
-
# quickest_countless 947.256 947.256 2.11
|
519 |
-
# stippled_countless 544.049 544.049 3.68
|
520 |
-
# zero_corrected_countless 575.310 575.310 3.48
|
521 |
-
# countless 646.684 646.684 3.09
|
522 |
-
# downsample_with_averaging 385.132 385.132 5.19
|
523 |
-
# downsample_with_max_poolin 988.361 988.361 2.02
|
524 |
-
# ndzoom 163.104 163.104 12.26
|
525 |
-
# striding 81589.340 81589.340 0.02
|
526 |
-
|
527 |
-
|
528 |
-
|
529 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CikeyQI/meme-api/meme_generator/memes/can_can_need/__init__.py
DELETED
@@ -1,20 +0,0 @@
|
|
1 |
-
from pathlib import Path
|
2 |
-
from typing import List
|
3 |
-
|
4 |
-
from meme_generator import add_meme
|
5 |
-
from pil_utils import BuildImage
|
6 |
-
|
7 |
-
img_dir = Path(__file__).parent / "images"
|
8 |
-
|
9 |
-
|
10 |
-
def can_can_need(images: List[BuildImage], texts, args):
|
11 |
-
frame = BuildImage.open(img_dir / "0.jpg")
|
12 |
-
frame.paste(
|
13 |
-
images[1].convert("RGBA").circle().resize((340, 340)), (120, 21), alpha=True
|
14 |
-
).paste(
|
15 |
-
images[0].convert("RGBA").circle().resize((300, 300)), (611, 718), alpha=True
|
16 |
-
)
|
17 |
-
return frame.save_jpg()
|
18 |
-
|
19 |
-
|
20 |
-
add_meme("can_can_need", can_can_need, min_images=2, max_images=2, keywords=["看看你的"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CikeyQI/meme-api/meme_generator/memes/my_friend/__init__.py
DELETED
@@ -1,79 +0,0 @@
|
|
1 |
-
from pathlib import Path
|
2 |
-
from typing import List
|
3 |
-
|
4 |
-
from pil_utils import BuildImage, Text2Image
|
5 |
-
from pydantic import Field
|
6 |
-
|
7 |
-
from meme_generator import MemeArgsModel, MemeArgsParser, MemeArgsType, add_meme
|
8 |
-
from meme_generator.exception import TextOverLength
|
9 |
-
|
10 |
-
img_dir = Path(__file__).parent / "images"
|
11 |
-
|
12 |
-
help = "指定名字"
|
13 |
-
|
14 |
-
parser = MemeArgsParser()
|
15 |
-
parser.add_argument("-n", "--name", type=str, default="", help=help)
|
16 |
-
|
17 |
-
|
18 |
-
class Model(MemeArgsModel):
|
19 |
-
name: str = Field("", description=help)
|
20 |
-
|
21 |
-
|
22 |
-
def my_friend(images: List[BuildImage], texts: List[str], args: Model):
|
23 |
-
name = args.name or (args.user_infos[-1].name if args.user_infos else "") or "朋友"
|
24 |
-
img = images[0].convert("RGBA").circle().resize((100, 100))
|
25 |
-
|
26 |
-
name_img = Text2Image.from_text(name, 25, fill="#868894").to_image()
|
27 |
-
name_w, name_h = name_img.size
|
28 |
-
if name_w >= 600:
|
29 |
-
raise TextOverLength(name)
|
30 |
-
|
31 |
-
corner1 = BuildImage.open(img_dir / "corner1.png")
|
32 |
-
corner2 = BuildImage.open(img_dir / "corner2.png")
|
33 |
-
corner3 = BuildImage.open(img_dir / "corner3.png")
|
34 |
-
corner4 = BuildImage.open(img_dir / "corner4.png")
|
35 |
-
label = BuildImage.open(img_dir / "label.png")
|
36 |
-
|
37 |
-
def make_dialog(text: str) -> BuildImage:
|
38 |
-
text_img = Text2Image.from_text(text, 40).wrap(600).to_image()
|
39 |
-
text_w, text_h = text_img.size
|
40 |
-
box_w = max(text_w, name_w + 15) + 140
|
41 |
-
box_h = max(text_h + 103, 150)
|
42 |
-
box = BuildImage.new("RGBA", (box_w, box_h))
|
43 |
-
box.paste(corner1, (0, 0))
|
44 |
-
box.paste(corner2, (0, box_h - 75))
|
45 |
-
box.paste(corner3, (text_w + 70, 0))
|
46 |
-
box.paste(corner4, (text_w + 70, box_h - 75))
|
47 |
-
box.paste(BuildImage.new("RGBA", (text_w, box_h - 40), "white"), (70, 20))
|
48 |
-
box.paste(BuildImage.new("RGBA", (text_w + 88, box_h - 150), "white"), (27, 75))
|
49 |
-
box.paste(text_img, (70, 17 + (box_h - 40 - text_h) // 2), alpha=True)
|
50 |
-
|
51 |
-
dialog = BuildImage.new("RGBA", (box.width + 130, box.height + 60), "#eaedf4")
|
52 |
-
dialog.paste(img, (20, 20), alpha=True)
|
53 |
-
dialog.paste(box, (130, 60), alpha=True)
|
54 |
-
dialog.paste(label, (160, 25))
|
55 |
-
dialog.paste(name_img, (260, 22 + (35 - name_h) // 2), alpha=True)
|
56 |
-
return dialog
|
57 |
-
|
58 |
-
dialogs = [make_dialog(text) for text in texts]
|
59 |
-
frame_w = max((dialog.width for dialog in dialogs))
|
60 |
-
frame_h = sum((dialog.height for dialog in dialogs))
|
61 |
-
frame = BuildImage.new("RGBA", (frame_w, frame_h), "#eaedf4")
|
62 |
-
current_h = 0
|
63 |
-
for dialog in dialogs:
|
64 |
-
frame.paste(dialog, (0, current_h))
|
65 |
-
current_h += dialog.height
|
66 |
-
return frame.save_jpg()
|
67 |
-
|
68 |
-
|
69 |
-
add_meme(
|
70 |
-
"my_friend",
|
71 |
-
my_friend,
|
72 |
-
min_images=1,
|
73 |
-
max_images=1,
|
74 |
-
min_texts=1,
|
75 |
-
max_texts=10,
|
76 |
-
default_texts=["让我康康"],
|
77 |
-
args_type=MemeArgsType(parser, Model),
|
78 |
-
keywords=["我朋友说"],
|
79 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|