Commit
·
14f8764
1
Parent(s):
3d7e344
Update parquet files (step 33 of 296)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/1gistliPinn/ChatGPT4/Examples/Bobby Fischer Teaches Chess How to Download the EPUB Version from Forum 6.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/Design Transformer Indrajit Dasgupta Pdf Download [HOT].md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/DigiDNA IMazing 2.3.5 With Crack TOP.md +0 -52
- spaces/1gistliPinn/ChatGPT4/Examples/Durood E Tanjeena Pdf Free 485.md +0 -38
- spaces/1phancelerku/anime-remove-background/Archer Attack 3D Shooter War - How to Become a Master Archer in Action Games.md +0 -170
- spaces/2023Liu2023/bingo/src/components/ui/badge.tsx +0 -36
- spaces/2ndelement/voicevox/voicevox_engine/morphing.py +0 -208
- spaces/4Taps/SadTalker/src/utils/hparams.py +0 -160
- spaces/AI-Hobbyist/Hoyo-RVC/docs/faiss_tips_ja.md +0 -101
- spaces/AIGC-Audio/AudioGPT/NeuralSeq/data_gen/tts/base_preprocess.py +0 -254
- spaces/AIZ2H/07-GraphViz-PyDeck-Map-AIUIUX-Demo/app.py +0 -509
- spaces/AchyuthGamer/OpenGPT/g4f/Provider/CodeLinkAva.py +0 -64
- spaces/Adapter/CoAdapter/t2i_adapters/t2i_adapters_for_canny.py +0 -47
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/GetParentSizerMethods.js +0 -56
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/utils/ReplaceSliderConfig.js +0 -14
- spaces/AlekseyKorshuk/instagram-filter-removal/modeling/ifrnet.py +0 -166
- spaces/AlexZou/Deploy_Restoration/Lowlight.py +0 -45
- spaces/AlhitawiMohammed22/CER_Hu-Evaluation-Metrics/README.md +0 -161
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/models/controlnet.md +0 -38
- spaces/Andy1621/uniformer_image_detection/configs/res2net/htc_r2_101_fpn_20e_coco.py +0 -7
- spaces/Andy1621/uniformer_image_detection/configs/ssd/ssd512_coco.py +0 -71
- spaces/Andy1621/uniformer_image_detection/configs/yolact/yolact_r101_1x8_coco.py +0 -3
- spaces/Anew1007/extras/constants.py +0 -50
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/models/candidate.py +0 -34
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/modeling/roi_heads/keypoint_head.py +0 -272
- spaces/Banbri/zcvzcv/src/lib/base64ToFile.ts +0 -11
- spaces/BartPoint/VoiceChange_Beta/infer_pack/modules.py +0 -522
- spaces/Benson/text-generation/Examples/Descargar Familias Virtuales 3 Mod Apk Dinero Ilimitado.md +0 -64
- spaces/Big-Web/MMSD/env/Lib/site-packages/dateutil/tz/__init__.py +0 -12
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/gb2312prober.py +0 -47
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/requests/api.py +0 -157
- spaces/CVPR/LIVE/thrust/thrust/detail/config/global_workarounds.h +0 -27
- spaces/CVPR/LIVE/thrust/thrust/transform.h +0 -725
- spaces/CVPR/SPOTER_Sign_Language_Recognition/spoter_mod/skeleton_extractor.py +0 -60
- spaces/CVPR/WALT/mmdet/models/builder.py +0 -77
- spaces/CVPR/regionclip-demo/detectron2/checkpoint/__init__.py +0 -10
- spaces/CVPR/regionclip-demo/detectron2/layers/nms.py +0 -158
- spaces/Chris4K/llms_compare/Dragon Ball Z Raging Blast 2 Psp Iso Download 41 118.md +0 -80
- spaces/CodeDoes/FrostAura-gpt-neox-20b-fiction-novel-generation/README.md +0 -12
- spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/modeling/rpn/retinanet/loss.py +0 -107
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/dateutil/parser/isoparser.py +0 -416
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/H_V_A_R_.py +0 -5
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/woff2.py +0 -1688
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/ModifyUpload-d8fc50ab.js +0 -2
- spaces/DaleChen/AutoGPT/run_continuous.sh +0 -3
- spaces/DeepLabCut/MegaDetector_DeepLabCut/app.py +0 -179
- spaces/DhanushPrabhuS/pothole_yolov8_nano/README.md +0 -13
- spaces/EuroPython2022/mmocr-demo/configs/_base_/recog_pipelines/master_pipeline.py +0 -42
- spaces/FL33TW00D/whisper-turbo/_next/static/chunks/pages/_error-84d94505c9f773f4.js +0 -1
- spaces/Farazquraishi/pendora/app.py +0 -203
spaces/1gistliPinn/ChatGPT4/Examples/Bobby Fischer Teaches Chess How to Download the EPUB Version from Forum 6.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>bobby fischer teaches chess epub download forum 6</h2><br /><p><b><b>Download Zip</b> ⚹⚹⚹ <a href="https://imgfil.com/2uy0pz">https://imgfil.com/2uy0pz</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
aaccfb2cb3<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Design Transformer Indrajit Dasgupta Pdf Download [HOT].md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>design transformer indrajit dasgupta pdf download</h2><br /><p><b><b>Download File</b> ⚹ <a href="https://imgfil.com/2uxXzE">https://imgfil.com/2uxXzE</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
tidisvirbwork/design-transformer-indrajit-dasgupta-pdf-download ... This repository has no tags. If you need specific material, you'll have to find it elsewhere. Here we only briefly label the parts we particularly like. If you want to learn more about these design techniques and how they help create holistic design, find Jim Quick's book, Design Thinking for Successful Product Development. And don't forget "Research Design and Design Analysis": the book is for those who want to better understand how academics and 8a78ff9644<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/DigiDNA IMazing 2.3.5 With Crack TOP.md
DELETED
@@ -1,52 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>DigiDNA iMazing 2.3.5 With Crack: How to Transfer and Manage Your iOS Data Easily</h1>
|
3 |
-
<p>If you are looking for a reliable and powerful software to transfer and manage your iOS data, you may want to check out DigiDNA iMazing 2.3.5 with crack. This software allows you to connect your iPhone, iPad, or iPod touch to your computer and access your data without iTunes or iCloud. You can also backup, restore, and clone your devices, as well as transfer music, photos, videos, messages, contacts, and more.</p>
|
4 |
-
<p>In this article, we will show you how to download and install DigiDNA iMazing 2.3.5 with crack, as well as some of its key features and benefits.</p>
|
5 |
-
<h2>DigiDNA iMazing 2.3.5 With Crack</h2><br /><p><b><b>Download</b> · <a href="https://imgfil.com/2uy1dO">https://imgfil.com/2uy1dO</a></b></p><br /><br />
|
6 |
-
<h2>How to Download and Install DigiDNA iMazing 2.3.5 With Crack</h2>
|
7 |
-
<p>To download and install DigiDNA iMazing 2.3.5 with crack, you need to follow these steps:</p>
|
8 |
-
<ol>
|
9 |
-
<li>Click on the link below to download the setup file and the crack file.</li>
|
10 |
-
<li>Extract the files using WinRAR or any other extraction tool.</li>
|
11 |
-
<li>Run the setup file and follow the instructions to install the software.</li>
|
12 |
-
<li>Copy the crack file and paste it into the installation folder.</li>
|
13 |
-
<li>Launch the software and enjoy its full features.</li>
|
14 |
-
</ol>
|
15 |
-
<p><a href="https://example.com/download">Download DigiDNA iMazing 2.3.5 With Crack Here</a></p>
|
16 |
-
<h2>Key Features and Benefits of DigiDNA iMazing 2.3.5 With Crack</h2>
|
17 |
-
<p>DigiDNA iMazing 2.3.5 with crack is a versatile and user-friendly software that offers many features and benefits for iOS users. Some of them are:</p>
|
18 |
-
<ul>
|
19 |
-
<li>You can transfer data between your iOS devices and your computer without iTunes or iCloud.</li>
|
20 |
-
<li>You can backup your devices with encryption and restore them selectively or fully.</li>
|
21 |
-
<li>You can clone your devices or transfer data from one device to another.</li>
|
22 |
-
<li>You can manage your music, photos, videos, messages, contacts, notes, voice memos, and more.</li>
|
23 |
-
<li>You can export or print your data in various formats.</li>
|
24 |
-
<li>You can access your device's file system and browse its contents.</li>
|
25 |
-
<li>You can update or reinstall iOS on your devices.</li>
|
26 |
-
<li>You can erase or reset your devices securely.</li>
|
27 |
-
</ul>
|
28 |
-
<h2>Conclusion</h2>
|
29 |
-
<p>DigiDNA iMazing 2.3.5 with crack is a powerful and reliable software that can help you transfer and manage your iOS data easily and efficiently. It is compatible with Windows and Mac OS, and supports all iOS devices from iPhone 4s to iPhone Xs Max, iPad 1 to iPad Pro, and iPod touch 1 to iPod touch 6. If you want to try this software for free, you can download it from the link below.</p>
|
30 |
-
<p><a href="https://example.com/download">Download DigiDNA iMazing 2.3.5 With Crack Here</a></p>
|
31 |
-
|
32 |
-
<h2>How to Use DigiDNA iMazing 2.3.5 With Crack</h2>
|
33 |
-
<p>Using DigiDNA iMazing 2.3.5 with crack is very easy and intuitive. Here are some steps to get you started:</p>
|
34 |
-
<ol>
|
35 |
-
<li>Connect your iOS device to your computer using a USB cable or Wi-Fi.</li>
|
36 |
-
<li>Launch DigiDNA iMazing 2.3.5 and select your device from the sidebar.</li>
|
37 |
-
<li>Choose the action you want to perform from the main window or the toolbar.</li>
|
38 |
-
<li>Follow the on-screen instructions and confirm your choices.</li>
|
39 |
-
<li>Wait for the process to complete and check the results.</li>
|
40 |
-
</ol>
|
41 |
-
<p>You can also customize the settings and preferences of DigiDNA iMazing 2.3.5 from the menu bar. You can change the backup location, backup encryption, backup frequency, device name, device icon, and more.</p>
|
42 |
-
<h2>Frequently Asked Questions About DigiDNA iMazing 2.3.5 With Crack</h2>
|
43 |
-
<p>Here are some common questions and answers about DigiDNA iMazing 2.3.5 with crack:</p>
|
44 |
-
<p></p>
|
45 |
-
<h3>Is DigiDNA iMazing 2.3.5 with crack safe to use?</h3>
|
46 |
-
<p>DigiDNA iMazing 2.3.5 with crack is safe to use as long as you download it from a trusted source and scan it with an antivirus program before installing it. However, using cracked software is illegal and may violate the terms and conditions of the original software developer. We do not recommend or endorse using cracked software and we are not responsible for any consequences that may arise from doing so.</p>
|
47 |
-
<h3>Does DigiDNA iMazing 2.3.5 with crack require an internet connection?</h3>
|
48 |
-
<p>DigiDNA iMazing 2.3.5 with crack does not require an internet connection to function properly. However, you may need an internet connection to download and install the software, update the software, or access some online features such as iCloud or App Store.</p>
|
49 |
-
<h3>Can I use DigiDNA iMazing 2.3.5 with crack on multiple devices?</h3>
|
50 |
-
<p>DigiDNA iMazing 2.3.5 with crack can be used on multiple devices as long as they are connected to the same computer. You can also transfer your license to another computer by deactivating it on the old computer and activating it on the new computer.</p> d5da3c52bf<br />
|
51 |
-
<br />
|
52 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Durood E Tanjeena Pdf Free 485.md
DELETED
@@ -1,38 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Download and Recite Durood E Tanjeena PDF for Free</h1>
|
3 |
-
<p>Durood E Tanjeena is a powerful prayer that is often recited by Muslims in times of difficulty. It is said to be very beneficial and is believed to bring blessings and protection from Allah. In this article, we will show you how to download and recite Durood E Tanjeena PDF for free.</p>
|
4 |
-
<h2>What is Durood E Tanjeena?</h2>
|
5 |
-
<p>Durood E Tanjeena is a supplication that can be translated to "Praise be to Allah, the Highest." It is a short prayer that consists of 11 words in Arabic. The prayer is as follows:</p>
|
6 |
-
<h2>durood e tanjeena pdf free 485</h2><br /><p><b><b>Download File</b> ……… <a href="https://imgfil.com/2uy0Pm">https://imgfil.com/2uy0Pm</a></b></p><br /><br />
|
7 |
-
<blockquote>
|
8 |
-
<p>اÙÙÙÙ
ص٠عÙÙ Ù
ØÙ
د ÙØ¹Ù٠آ٠Ù
ØÙ
د ÙÙ
ا صÙÙØª عÙ٠إبراÙÙÙ
ÙØ¹Ù٠آ٠إبراÙÙÙ
Ø¥ÙÙ ØÙ
ÙØ¯ Ù
Ø¬ÙØ¯</p>
|
9 |
-
<p>Allahumma salli ala Muhammad wa ala ali Muhammad kama sallayta ala Ibrahim wa ala ali Ibrahim innaka Hamidun Majid</p>
|
10 |
-
</blockquote>
|
11 |
-
<p>The meaning of Durood E Tanjeena can be translated to "O Allah, send blessings upon Muhammad and upon the family of Muhammad, as You sent blessings upon Ibrahim and upon the family of Ibrahim. Verily, You are Praiseworthy and Glorious."</p>
|
12 |
-
<h2>How to Download Durood E Tanjeena PDF for Free?</h2>
|
13 |
-
<p>If you want to download Durood E Tanjeena PDF for free, you can use one of the following links:</p>
|
14 |
-
<ul>
|
15 |
-
<li><a href="https://quranwork.com/darood-tanjeena-pdf-benefits-and-read-online/">Darood Tanjeena PDF Download & Read Online - Quran Work[^1^]</a></li>
|
16 |
-
<li><a href="https://ilmkidunya.org/darood-e-tanjeena-pdf-free-download/">Darood E Tanjeena PDF Free Download - ilmkidunya.org[^2^]</a></li>
|
17 |
-
<li><a href="https://www.scribd.com/document/126895606/Darood-E-Tanjeena">Darood E Tanjeena | PDF | Abrahamic Religions - Scribd[^3^]</a></li>
|
18 |
-
</ul>
|
19 |
-
<p>These links will allow you to download Durood E Tanjeena PDF with clear Arabic text and Urdu translation. You can also read online or print the PDF file for your convenience.</p>
|
20 |
-
<h2>How to Recite Durood E Tanjeena?</h2>
|
21 |
-
<p>There are different ways to recite Durood E Tanjeena, depending on your intention and situation. Here are some common methods:</p>
|
22 |
-
<ul>
|
23 |
-
<li>If you want to recite Durood E Tanjeena for general blessings and protection, you can recite it three times a day, preferably after Fajr, Asr, and Isha prayers.</li>
|
24 |
-
<li>If you want to recite Durood E Tanjeena for a specific need or problem, you can recite it 100 times or more in one sitting, preferably after Isha prayer. You can also make a sincere dua after reciting it.</li>
|
25 |
-
<li>If you want to recite Durood E Tanjeena for spiritual benefits and closeness to Allah, you can recite it as much as you can, especially on Fridays and during the night.</li>
|
26 |
-
</ul>
|
27 |
-
<p>When reciting Durood E Tanjeena, you should have faith and sincerity in your heart. You should also send salutations upon the Prophet Muhammad (peace be upon him) before and after reciting it.</p>
|
28 |
-
<h2>What are the Benefits of Durood E Tanjeena?</h2>
|
29 |
-
<p>Durood E Tanjeena has many benefits for those who recite it regularly. Some of these benefits include:</p>
|
30 |
-
<p></p>
|
31 |
-
<ul>
|
32 |
-
<li>It increases your blessings from Allah and removes your difficulties.</li>
|
33 |
-
<li>It protects you from harm and evil.</li>
|
34 |
-
<li>It improves your mental and physical health.</li>
|
35 |
-
<li>It grants you success in all areas of life.</li>
|
36 |
-
<li>It purifies your</p> d5da3c52bf<br />
|
37 |
-
<br />
|
38 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Archer Attack 3D Shooter War - How to Become a Master Archer in Action Games.md
DELETED
@@ -1,170 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Archer Attack 3D: Shooter War Download - A Guide for Gamers</h1>
|
3 |
-
<p>Are you looking for a fun and exciting archer game that will challenge your skills and immerse you in an action-packed adventure? If so, you should check out Archer Attack 3D: Shooter War, one of the best bowman games and shooting games available on Android devices. In this article, we will tell you everything you need to know about this game, including what it is, how to download it, how to play it, and why you should play it. Let's get started!</p>
|
4 |
-
<h2>archer attack 3d shooting war download</h2><br /><p><b><b>Download File</b> · <a href="https://jinyurl.com/2uNSnT">https://jinyurl.com/2uNSnT</a></b></p><br /><br />
|
5 |
-
<h2>What is Archer Attack 3D: Shooter War?</h2>
|
6 |
-
<p>Archer Attack 3D: Shooter War is an action game developed by Matchingham Games that will give you an exciting archer experience with its unique graphics and designs. In this game, you will be the hero and you will be able to direct the bowman the way you want. The decisions will be entirely yours, you will destroy the targets step by step and achieve victory.</p>
|
7 |
-
<h3>The story and gameplay of Archer Attack 3D: Shooter War</h3>
|
8 |
-
<p>The game has a simple but engaging story that will take you to various conflict areas and scenarios, such as the capture of a military base by militants. You will have to use your bow and arrow to eliminate the enemies and complete the missions. You will also have to face different challenges and obstacles along the way, such as moving targets, traps, bombs, helicopters, tanks, and more.</p>
|
9 |
-
<p>The gameplay of Archer Attack 3D: Shooter War is easy to learn but hard to master. You will have to swipe on the screen to aim and release to shoot your arrow. You will also have to adjust your angle and power according to the distance and wind. You will have to be careful not to miss or hit the wrong target, as that will cost you time and health. You will also have to be quick and accurate, as the enemies will shoot back at you or try to escape.</p>
|
10 |
-
<h3>The features and graphics of Archer Attack 3D: Shooter War</h3>
|
11 |
-
<p>Archer Attack 3D: Shooter War has many features that make it stand out from other archer games and shooting games. Some of these features are:</p>
|
12 |
-
<p>archer attack 3d shooter war game<br />
|
13 |
-
archer attack 3d apk download<br />
|
14 |
-
archer attack 3d android game<br />
|
15 |
-
archer attack 3d action game<br />
|
16 |
-
archer attack 3d bowman game<br />
|
17 |
-
archer attack 3d shooting range<br />
|
18 |
-
archer attack 3d combat games<br />
|
19 |
-
archer attack 3d longbow games<br />
|
20 |
-
archer attack 3d adventure game<br />
|
21 |
-
archer attack 3d offline game<br />
|
22 |
-
archer attack 3d free download<br />
|
23 |
-
archer attack 3d play store<br />
|
24 |
-
archer attack 3d app store<br />
|
25 |
-
archer attack 3d google play<br />
|
26 |
-
archer attack 3d ios game<br />
|
27 |
-
archer attack 3d pc game<br />
|
28 |
-
archer attack 3d bluestacks<br />
|
29 |
-
archer attack 3d windows game<br />
|
30 |
-
archer attack 3d mac game<br />
|
31 |
-
archer attack 3d laptop game<br />
|
32 |
-
archer attack 3d chromebook game<br />
|
33 |
-
archer attack 3d matchingham games<br />
|
34 |
-
archer attack 3d latest version<br />
|
35 |
-
archer attack 3d update download<br />
|
36 |
-
archer attack 3d new features<br />
|
37 |
-
archer attack 3d unique graphics<br />
|
38 |
-
archer attack 3d level design<br />
|
39 |
-
archer attack 3d target shooting<br />
|
40 |
-
archer attack 3d arrow shooting<br />
|
41 |
-
archer attack 3d military base<br />
|
42 |
-
archer attack 3d militants shooting<br />
|
43 |
-
archer attack 3d sniper shooting<br />
|
44 |
-
archer attack 3d stealth shooting<br />
|
45 |
-
archer attack 3d war games<br />
|
46 |
-
archer attack 3d gun games<br />
|
47 |
-
archer attack 3d battlefield games<br />
|
48 |
-
archer attack 3d rank up games<br />
|
49 |
-
archer attack 3d fun games<br />
|
50 |
-
archer attack 3d casual games<br />
|
51 |
-
archer attack 3d single player games<br />
|
52 |
-
archer attack 3d stylized games<br />
|
53 |
-
archer attack 3d data safety games<br />
|
54 |
-
archer attack 3d ratings and reviews <br />
|
55 |
-
archer attack 3d apkcombo download <br />
|
56 |
-
archer attack 3d xapk download <br />
|
57 |
-
archer attack 3d old versions download <br />
|
58 |
-
archer attack 3d trending searches <br />
|
59 |
-
archer attack 3d popular searches <br />
|
60 |
-
archer attack 3d hot games</p>
|
61 |
-
<ul>
|
62 |
-
<li>More than 100 levels with different difficulty levels and objectives.</li>
|
63 |
-
<li>A variety of arrows and gear that you can unlock and upgrade as you progress in the game.</li>
|
64 |
-
<li>A realistic physics system that simulates the trajectory and impact of your arrows.</li>
|
65 |
-
<li>A stunning 3D graphics style that creates a unique visual experience.</li>
|
66 |
-
<li>A smooth and responsive control system that allows you to aim and shoot with ease.</li>
|
67 |
-
<li>An immersive sound effects and music that enhance the atmosphere of the game.</li>
|
68 |
-
</ul>
|
69 |
-
<p>Archer Attack 3D: Shooter War is a game that will impress you with its amazing graphics and features. You will not get bored with this game, as it offers a lot of variety and challenge. You will also enjoy the thrill of shooting arrows and hitting your targets with precision and skill.</p>
|
70 |
-
<h2>How to download Archer Attack 3D: Shooter War on your device?</h2>
|
71 |
-
<p>If you are interested in playing Archer Attack 3D: Shooter War, you will be glad to know that it is free to download and play on your Android device. There are different ways to download this game, depending on your preference and convenience. Here are some of the methods you can use:</p>
|
72 |
-
<h3>Downloading Archer Attack 3D: Shooter War from Google Play Store</h3>
|
73 |
-
<p>This is the easiest and most recommended way to download Archer Attack 3D: Shooter War on your device. All you have to do is follow these simple steps:</p>
|
74 |
-
<ol>
|
75 |
-
<li>Open the Google Play Store app on your device.</li>
|
76 |
-
<li>Search for "Archer Attack 3D: Shooter War" in the search bar.</li>
|
77 |
-
<li>Select the game from the list of results and tap on "Install".</li>
|
78 |
-
<li>Wait for the game to download and install on your device.</li>
|
79 |
-
<li>Launch the game and enjoy!</li>
|
80 |
-
</ol>
|
81 |
-
<p>This method ensures that you get the latest version of the game and that it is compatible with your device. You will also be able to update the game automatically whenever there is a new version available.</p>
|
82 |
-
<h3>Downloading Archer Attack 3D: Shooter War from APKCombo</h3>
|
83 |
-
<p>If you are unable to access the Google Play Store or if you want to download an older version of the game, you can use APKCombo, a website that provides APK files for various Android apps and games. To download Archer Attack 3D: Shooter War from APKCombo, follow these steps:</p>
|
84 |
-
<ol>
|
85 |
-
<li>Open your browser and go to <a href="">https://apkcombo.com/en-us/apk-downloader/</a>.</li>
|
86 |
-
<li>Type "Archer Attack 3D: Shooter War" in the search box and click on "Search".</li>
|
87 |
-
<li>Select the game from the list of results and choose the version you want to download.</li>
|
88 |
-
<li>Click on "Download APK" and wait for the file to download on your device.</li>
|
89 |
-
<li>Open the file manager app on your device and locate the downloaded APK file.</li>
|
90 |
-
<li>Tap on the file and allow the installation of unknown sources if prompted.</li>
|
91 |
-
<li>Wait for the game to install on your device.</li>
|
92 |
-
<li>Launch the game and enjoy!</li>
|
93 |
-
</ol>
|
94 |
-
<p>This method allows you to download any version of the game you want, even if it is not available on the Google Play Store. However, you have to be careful about the source of the APK file, as some websites may contain malware or viruses. You also have to update the game manually whenever there is a new version available.</p>
|
95 |
-
<h3>Downloading Archer Attack 3D: Shooter War on PC using BlueStacks</h3>
|
96 |
-
<p>If you want to play Archer Attack 3D: Shooter War on your PC, you can use BlueStacks, an emulator that allows you to run Android apps and games on your computer. To download Archer Attack 3D: Shooter War on PC using BlueStacks, follow these steps:</p>
|
97 |
-
<ol>
|
98 |
-
<li>Download and install BlueStacks from <a href="">https://www.bluestacks.com/</a>.</li>
|
99 |
-
<li>Launch BlueStacks and sign in with your Google account.</li>
|
100 |
-
<li>Open the Google Play Store app within BlueStacks.</li>
|
101 |
-
<li>Search for "Archer Attack 3D: Shooter War" in the search bar.</li>
|
102 |
-
<li>Select the game from the list of results and click on "Install".</li>
|
103 |
-
<li>Wait for the game to download and install on BlueStacks.</li>
|
104 |
-
<li>Launch the game and enjoy!</li>
|
105 |
-
</ol>
|
106 |
-
<p>This method allows you to play Archer Attack 3D: Shooter War on a bigger screen and with better controls. You can also use your keyboard and mouse to aim and shoot your arrows. However, you have to make sure that your PC meets the minimum requirements for running BlueStacks smoothly.</p>
|
107 |
-
<h2>How to play Archer Attack 3D: Shooter War like a pro?</h2>
|
108 |
-
<p>Now that you have downloaded Archer Attack 3D: Shooter War on your device, you may be wondering how to play it like a pro. Well, don't worry, we have some tips and tricks for you that will help you improve your skills and performance in this game. Here are some of them:</p>
|
109 |
-
<h3>Tips and tricks for aiming and shooting</h3>
|
110 |
-
<ul>
|
111 |
-
<li>Aim carefully before releasing your arrow. Don't rush or shoot randomly, as that will waste your arrows and time. Try to aim for the head or the chest of the enemies, as that will deal more damage and earn you more points.</li>
|
112 |
-
<li>Use the wind indicator to adjust your angle and power. The wind can affect the direction and speed of your arrow, so you have to compensate for it. The wind indicator will show you the direction and strength of the wind, so you can use it as a guide.</li>
|
113 |
-
<li>Use the zoom feature to get a better view of your target. You can zoom in and out by pinching on the screen. This will help you see the details and movements of your target, and make your shots more accurate.</li>
|
114 |
-
</ul>
|
115 |
-
<h3>Tips and tricks for leveling up and ranking up</h3>
|
116 |
-
<ul>
|
117 |
-
<li>Complete the missions and challenges to earn coins and stars. Coins are the currency of the game, which you can use to unlock and upgrade your arrows and gear. Stars are the indicators of your progress and performance, which you can use to unlock new levels and modes.</li>
|
118 |
-
<li>Play the different modes to earn more coins and stars. The game has four modes: Campaign, Survival, Time Trial, and PvP. Each mode has its own objectives and rewards, so you can choose the one that suits your preference and skill level.</li>
|
119 |
-
<li>Watch ads to get free coins and stars. The game will occasionally offer you to watch a short video ad in exchange for some coins or stars. This is an easy way to boost your resources without spending any real money.</li>
|
120 |
-
</ul>
|
121 |
-
<h3>Tips and tricks for using different arrows and gear</h3>
|
122 |
-
<ul>
|
123 |
-
<li>Experiment with different arrows and gear to find the best combination for your style. The game has a variety of arrows and gear that you can unlock and upgrade, such as fire arrows, ice arrows, explosive arrows, helmets, vests, gloves, boots, and more. Each arrow and gear has its own advantages and disadvantages, such as damage, range, speed, durability, effect, etc. You can mix and match them to create your own custom loadout.</li>
|
124 |
-
<li>Use the right arrow and gear for the right situation. Some arrows and gear are more effective than others in certain scenarios, such as fire arrows against wooden targets, ice arrows against metal targets, explosive arrows against groups of enemies, helmets against headshots, vests against body shots, gloves against traps, boots against bombs, etc. You have to be smart and strategic about your choices.</li>
|
125 |
-
<li>Upgrade your arrows and gear regularly to improve their performance. You can upgrade your arrows and gear by spending coins in the shop. Upgrading will increase their stats and abilities, such as damage, range, speed, durability, effect, etc. Upgrading will also change their appearance and make them look cooler.</li>
|
126 |
-
</ul>
|
127 |
-
<h2>Why should you play Archer Attack 3D: Shooter War?</h2>
|
128 |
-
<p>Archer Attack 3D: Shooter War is a game that will appeal to anyone who loves archery games or shooting games. It is a game that will provide you with hours of fun and entertainment with its addictive gameplay and stunning graphics. Here are some of the reasons why you should play Archer Attack 3D: Shooter War:</p>
|
129 |
-
<h3>The benefits of playing Archer Attack 3D: Shooter War</h3>
|
130 |
-
<p>Playing Archer Attack 3D: Shooter War can have many benefits for you, such as:</p>
|
131 |
-
<ul>
|
132 |
-
<li>Improving your concentration and focus. You have to pay attention to every detail in this game, such as the wind, the distance, the movement of your target, etc. This will help you sharpen your mind and enhance your cognitive skills.</li>
|
133 |
-
<li>Improving your hand-eye coordination and reflexes. You have to swipe on the screen to aim and release to shoot your arrow. You also have to react quickly to avoid or counterattack the enemies. This will help you improve your motor skills and reaction time.</li>
|
134 |
-
<li>Improving your creativity and problem-solving skills. You have to use different arrows to win the game. This will help you improve your creativity and problem-solving skills.</li>
|
135 |
-
<li>Relieving your stress and boredom. You can play this game anytime and anywhere, as it does not require an internet connection or a lot of storage space. You can also play this game for as long or as short as you want, as it has no time limit or energy system. You can also enjoy the fun and satisfaction of shooting arrows and hitting your targets. This will help you relieve your stress and boredom.</li>
|
136 |
-
</ul>
|
137 |
-
<h3>The reviews and ratings of Archer Attack 3D: Shooter War</h3>
|
138 |
-
<p>Archer Attack 3D: Shooter War is a game that has received positive reviews and ratings from many players and critics. The game has a rating of 4.2 out of 5 stars on the Google Play Store, based on over 10,000 reviews. Some of the comments from the players are:</p>
|
139 |
-
<blockquote>
|
140 |
-
<p>"This is one of the best archer games I have ever played. The graphics are amazing and the gameplay is addictive. I love the different modes and levels, they are challenging and fun. I also like the different arrows and gear, they are cool and useful. I highly recommend this game to anyone who likes archery games or shooting games."</p>
|
141 |
-
</blockquote>
|
142 |
-
<blockquote>
|
143 |
-
<p>"This game is awesome! It is very realistic and exciting. The physics and the effects are very good. The controls are smooth and easy to use. The missions and the scenarios are very interesting and varied. The sound and the music are also very good. This game is a must-have for archer fans."</p>
|
144 |
-
</blockquote>
|
145 |
-
<blockquote>
|
146 |
-
<p>"This game is very entertaining and enjoyable. It is a great way to pass time and have fun. The graphics are beautiful and the design is unique. The gameplay is simple but challenging. The arrows and the gear are awesome and customizable. The modes and the levels are diverse and rewarding. This game is a great archer game."</p>
|
147 |
-
</blockquote>
|
148 |
-
<h2>Conclusion</h2>
|
149 |
-
<p>Archer Attack 3D: Shooter War is a game that will give you an amazing archer experience with its stunning graphics and features. You will be able to download it for free on your Android device or your PC using different methods. You will also be able to play it like a pro using some tips and tricks that we have shared with you. You will also be able to enjoy the benefits of playing this game, such as improving your concentration, coordination, creativity, problem-solving skills, stress relief, and boredom relief. You will also be able to see the positive reviews and ratings of this game from other players and critics.</p>
|
150 |
-
<p>If you are looking for a fun and exciting archer game that will challenge your skills and immerse you in an action-packed adventure, you should definitely try Archer Attack 3D: Shooter War. It is one of the best bowman games and shooting games available on Android devices. Download it now and enjoy!</p>
|
151 |
-
<h2>FAQs</h2>
|
152 |
-
<p>Here are some of the frequently asked questions about Archer Attack 3D: Shooter War:</p>
|
153 |
-
<h4>Q: Is Archer Attack 3D: Shooter War free to play?</h4>
|
154 |
-
<p>A: Yes, Archer Attack 3D: Shooter War is free to download and play on your Android device or your PC using BlueStacks. However, the game may contain some in-app purchases that can enhance your gameplay or remove ads.</p>
|
155 |
-
<h4>Q: How can I contact the developer of Archer Attack 3D: Shooter War?</h4>
|
156 |
-
<p>A: You can contact the developer of Archer Attack 3D: Shooter War by sending an email to <a href="">[email protected]</a>. You can also follow them on Facebook at <a href="">https://www.facebook.com/matchinghamgames/</a>.</p>
|
157 |
-
<h4>Q: How can I report a bug or a problem in Archer Attack 3D: Shooter War?</h4>
|
158 |
-
<p>A: You can report a bug or a problem in Archer Attack 3D: Shooter War by sending an email to <a href="">[email protected]</a>. You can also leave a comment on the Google Play Store page of the game or on their Facebook page.</p>
|
159 |
-
<h4>Q: How can I share my feedback or suggestions for Archer Attack 3D: Shooter War?</h4>
|
160 |
-
<p>A: You can share your feedback or suggestions for Archer Attack 3D: Shooter War by sending an email to <a href="">[email protected]</a>. You can also leave a review on the Google Play Store page of the game or on their Facebook page.</p>
|
161 |
-
<h4>Q: How can I support Archer Attack 3D: Shooter War?</h4>
|
162 |
-
<p>A: You can support Archer Attack 3D: Shooter War by doing any of the following things:</p>
|
163 |
-
<ul>
|
164 |
-
<li>Rate and review the game on the Google Play Store or on their Facebook page.</li>
|
165 |
-
<li>Share the game with your friends and family on social media or by word of mouth.</li>
|
166 |
-
<li>Make an in-app purchase to get more coins, stars, or remove ads.</li>
|
167 |
-
</ul>
|
168 |
-
<p>These actions will help the developer to improve the game and to create more games like this in the future.</p> 197e85843d<br />
|
169 |
-
<br />
|
170 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/2023Liu2023/bingo/src/components/ui/badge.tsx
DELETED
@@ -1,36 +0,0 @@
|
|
1 |
-
import * as React from 'react'
|
2 |
-
import { cva, type VariantProps } from 'class-variance-authority'
|
3 |
-
|
4 |
-
import { cn } from '@/lib/utils'
|
5 |
-
|
6 |
-
const badgeVariants = cva(
|
7 |
-
'inline-flex items-center rounded-full border px-2.5 py-0.5 text-xs font-semibold transition-colors focus:outline-none focus:ring-2 focus:ring-ring focus:ring-offset-2',
|
8 |
-
{
|
9 |
-
variants: {
|
10 |
-
variant: {
|
11 |
-
default:
|
12 |
-
'border-transparent bg-primary text-primary-foreground hover:bg-primary/80',
|
13 |
-
secondary:
|
14 |
-
'border-transparent bg-secondary text-secondary-foreground hover:bg-secondary/80',
|
15 |
-
destructive:
|
16 |
-
'border-transparent bg-destructive text-destructive-foreground hover:bg-destructive/80',
|
17 |
-
outline: 'text-foreground'
|
18 |
-
}
|
19 |
-
},
|
20 |
-
defaultVariants: {
|
21 |
-
variant: 'default'
|
22 |
-
}
|
23 |
-
}
|
24 |
-
)
|
25 |
-
|
26 |
-
export interface BadgeProps
|
27 |
-
extends React.HTMLAttributes<HTMLDivElement>,
|
28 |
-
VariantProps<typeof badgeVariants> {}
|
29 |
-
|
30 |
-
function Badge({ className, variant, ...props }: BadgeProps) {
|
31 |
-
return (
|
32 |
-
<div className={cn(badgeVariants({ variant }), className)} {...props} />
|
33 |
-
)
|
34 |
-
}
|
35 |
-
|
36 |
-
export { Badge, badgeVariants }
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/2ndelement/voicevox/voicevox_engine/morphing.py
DELETED
@@ -1,208 +0,0 @@
|
|
1 |
-
from copy import deepcopy
|
2 |
-
from dataclasses import dataclass
|
3 |
-
from itertools import chain
|
4 |
-
from typing import Dict, List, Tuple
|
5 |
-
|
6 |
-
import numpy as np
|
7 |
-
import pyworld as pw
|
8 |
-
from scipy.signal import resample
|
9 |
-
|
10 |
-
from .metas.Metas import Speaker, SpeakerSupportPermittedSynthesisMorphing, StyleInfo
|
11 |
-
from .metas.MetasStore import construct_lookup
|
12 |
-
from .model import AudioQuery, MorphableTargetInfo, SpeakerNotFoundError
|
13 |
-
from .synthesis_engine import SynthesisEngine
|
14 |
-
|
15 |
-
|
16 |
-
# FIXME: ndarray type hint, https://github.com/JeremyCCHsu/Python-Wrapper-for-World-Vocoder/blob/2b64f86197573497c685c785c6e0e743f407b63e/pyworld/pyworld.pyx#L398 # noqa
|
17 |
-
@dataclass(frozen=True)
|
18 |
-
class MorphingParameter:
|
19 |
-
fs: int
|
20 |
-
frame_period: float
|
21 |
-
base_f0: np.ndarray
|
22 |
-
base_aperiodicity: np.ndarray
|
23 |
-
base_spectrogram: np.ndarray
|
24 |
-
target_spectrogram: np.ndarray
|
25 |
-
|
26 |
-
|
27 |
-
def create_morphing_parameter(
|
28 |
-
base_wave: np.ndarray,
|
29 |
-
target_wave: np.ndarray,
|
30 |
-
fs: int,
|
31 |
-
) -> MorphingParameter:
|
32 |
-
frame_period = 1.0
|
33 |
-
base_f0, base_time_axis = pw.harvest(base_wave, fs, frame_period=frame_period)
|
34 |
-
base_spectrogram = pw.cheaptrick(base_wave, base_f0, base_time_axis, fs)
|
35 |
-
base_aperiodicity = pw.d4c(base_wave, base_f0, base_time_axis, fs)
|
36 |
-
|
37 |
-
target_f0, morph_time_axis = pw.harvest(target_wave, fs, frame_period=frame_period)
|
38 |
-
target_spectrogram = pw.cheaptrick(target_wave, target_f0, morph_time_axis, fs)
|
39 |
-
target_spectrogram.resize(base_spectrogram.shape)
|
40 |
-
|
41 |
-
return MorphingParameter(
|
42 |
-
fs=fs,
|
43 |
-
frame_period=frame_period,
|
44 |
-
base_f0=base_f0,
|
45 |
-
base_aperiodicity=base_aperiodicity,
|
46 |
-
base_spectrogram=base_spectrogram,
|
47 |
-
target_spectrogram=target_spectrogram,
|
48 |
-
)
|
49 |
-
|
50 |
-
|
51 |
-
def get_morphable_targets(
|
52 |
-
speakers: List[Speaker],
|
53 |
-
base_speakers: List[int],
|
54 |
-
) -> List[Dict[int, MorphableTargetInfo]]:
|
55 |
-
"""
|
56 |
-
speakers: 全話者の情報
|
57 |
-
base_speakers: モーフィング可能か判定したいベースの話者リスト(スタイルID)
|
58 |
-
"""
|
59 |
-
speaker_lookup = construct_lookup(speakers)
|
60 |
-
|
61 |
-
morphable_targets_arr = []
|
62 |
-
for base_speaker in base_speakers:
|
63 |
-
morphable_targets = dict()
|
64 |
-
for style in chain.from_iterable(speaker.styles for speaker in speakers):
|
65 |
-
morphable_targets[style.id] = MorphableTargetInfo(
|
66 |
-
is_morphable=is_synthesis_morphing_permitted(
|
67 |
-
speaker_lookup=speaker_lookup,
|
68 |
-
base_speaker=base_speaker,
|
69 |
-
target_speaker=style.id,
|
70 |
-
)
|
71 |
-
)
|
72 |
-
morphable_targets_arr.append(morphable_targets)
|
73 |
-
|
74 |
-
return morphable_targets_arr
|
75 |
-
|
76 |
-
|
77 |
-
def is_synthesis_morphing_permitted(
|
78 |
-
speaker_lookup: Dict[int, Tuple[Speaker, StyleInfo]],
|
79 |
-
base_speaker: int,
|
80 |
-
target_speaker: int,
|
81 |
-
) -> bool:
|
82 |
-
"""
|
83 |
-
指定されたspeakerがモーフィング可能かどうか返す
|
84 |
-
speakerが見つからない場合はSpeakerNotFoundErrorを送出する
|
85 |
-
"""
|
86 |
-
|
87 |
-
base_speaker_data = speaker_lookup[base_speaker]
|
88 |
-
target_speaker_data = speaker_lookup[target_speaker]
|
89 |
-
|
90 |
-
if base_speaker_data is None or target_speaker_data is None:
|
91 |
-
raise SpeakerNotFoundError(
|
92 |
-
base_speaker if base_speaker_data is None else target_speaker
|
93 |
-
)
|
94 |
-
|
95 |
-
base_speaker_info, _ = base_speaker_data
|
96 |
-
target_speaker_info, _ = target_speaker_data
|
97 |
-
|
98 |
-
base_speaker_uuid = base_speaker_info.speaker_uuid
|
99 |
-
target_speaker_uuid = target_speaker_info.speaker_uuid
|
100 |
-
|
101 |
-
base_speaker_morphing_info: SpeakerSupportPermittedSynthesisMorphing = (
|
102 |
-
base_speaker_info.supported_features.permitted_synthesis_morphing
|
103 |
-
)
|
104 |
-
|
105 |
-
target_speaker_morphing_info: SpeakerSupportPermittedSynthesisMorphing = (
|
106 |
-
target_speaker_info.supported_features.permitted_synthesis_morphing
|
107 |
-
)
|
108 |
-
|
109 |
-
# 禁止されている場合はFalse
|
110 |
-
if (
|
111 |
-
base_speaker_morphing_info == SpeakerSupportPermittedSynthesisMorphing.NOTHING
|
112 |
-
or target_speaker_morphing_info
|
113 |
-
== SpeakerSupportPermittedSynthesisMorphing.NOTHING
|
114 |
-
):
|
115 |
-
return False
|
116 |
-
# 同一話者のみの場合は同一話者判定
|
117 |
-
if (
|
118 |
-
base_speaker_morphing_info == SpeakerSupportPermittedSynthesisMorphing.SELF_ONLY
|
119 |
-
or target_speaker_morphing_info
|
120 |
-
== SpeakerSupportPermittedSynthesisMorphing.SELF_ONLY
|
121 |
-
):
|
122 |
-
return base_speaker_uuid == target_speaker_uuid
|
123 |
-
# 念のため許可されているかチェック
|
124 |
-
return (
|
125 |
-
base_speaker_morphing_info == SpeakerSupportPermittedSynthesisMorphing.ALL
|
126 |
-
and target_speaker_morphing_info == SpeakerSupportPermittedSynthesisMorphing.ALL
|
127 |
-
)
|
128 |
-
|
129 |
-
|
130 |
-
def synthesis_morphing_parameter(
|
131 |
-
engine: SynthesisEngine,
|
132 |
-
query: AudioQuery,
|
133 |
-
base_speaker: int,
|
134 |
-
target_speaker: int,
|
135 |
-
) -> MorphingParameter:
|
136 |
-
query = deepcopy(query)
|
137 |
-
|
138 |
-
# 不具合回避のためデフォルトのサンプリングレートでWORLDに掛けた後に指定のサンプリングレートに変換する
|
139 |
-
query.outputSamplingRate = engine.default_sampling_rate
|
140 |
-
|
141 |
-
# WORLDに掛けるため合成はモノラルで行う
|
142 |
-
query.outputStereo = False
|
143 |
-
|
144 |
-
base_wave = engine.synthesis(query=query, speaker_id=base_speaker).astype("float")
|
145 |
-
target_wave = engine.synthesis(query=query, speaker_id=target_speaker).astype(
|
146 |
-
"float"
|
147 |
-
)
|
148 |
-
|
149 |
-
return create_morphing_parameter(
|
150 |
-
base_wave=base_wave,
|
151 |
-
target_wave=target_wave,
|
152 |
-
fs=query.outputSamplingRate,
|
153 |
-
)
|
154 |
-
|
155 |
-
|
156 |
-
def synthesis_morphing(
|
157 |
-
morph_param: MorphingParameter,
|
158 |
-
morph_rate: float,
|
159 |
-
output_fs: int,
|
160 |
-
output_stereo: bool = False,
|
161 |
-
) -> np.ndarray:
|
162 |
-
"""
|
163 |
-
指定した割合で、パラメータをもとにモーフィングした音声を生成します。
|
164 |
-
|
165 |
-
Parameters
|
166 |
-
----------
|
167 |
-
morph_param : MorphingParameter
|
168 |
-
`synthesis_morphing_parameter`または`create_morphing_parameter`で作成したパラメータ
|
169 |
-
|
170 |
-
morph_rate : float
|
171 |
-
モーフィングの割合
|
172 |
-
0.0でベースの話者、1.0でターゲットの話者に近づきます。
|
173 |
-
|
174 |
-
Returns
|
175 |
-
-------
|
176 |
-
generated : np.ndarray
|
177 |
-
モーフィングした音声
|
178 |
-
|
179 |
-
Raises
|
180 |
-
-------
|
181 |
-
ValueError
|
182 |
-
morph_rate ∈ [0, 1]
|
183 |
-
"""
|
184 |
-
|
185 |
-
if morph_rate < 0.0 or morph_rate > 1.0:
|
186 |
-
raise ValueError("morph_rateは0.0から1.0の範囲で指定してください")
|
187 |
-
|
188 |
-
morph_spectrogram = (
|
189 |
-
morph_param.base_spectrogram * (1.0 - morph_rate)
|
190 |
-
+ morph_param.target_spectrogram * morph_rate
|
191 |
-
)
|
192 |
-
|
193 |
-
y_h = pw.synthesize(
|
194 |
-
morph_param.base_f0,
|
195 |
-
morph_spectrogram,
|
196 |
-
morph_param.base_aperiodicity,
|
197 |
-
morph_param.fs,
|
198 |
-
morph_param.frame_period,
|
199 |
-
)
|
200 |
-
|
201 |
-
# TODO: synthesis_engine.py でのリサンプル処理と共通化する
|
202 |
-
if output_fs != morph_param.fs:
|
203 |
-
y_h = resample(y_h, output_fs * len(y_h) // morph_param.fs)
|
204 |
-
|
205 |
-
if output_stereo:
|
206 |
-
y_h = np.array([y_h, y_h]).T
|
207 |
-
|
208 |
-
return y_h
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/4Taps/SadTalker/src/utils/hparams.py
DELETED
@@ -1,160 +0,0 @@
|
|
1 |
-
from glob import glob
|
2 |
-
import os
|
3 |
-
|
4 |
-
class HParams:
|
5 |
-
def __init__(self, **kwargs):
|
6 |
-
self.data = {}
|
7 |
-
|
8 |
-
for key, value in kwargs.items():
|
9 |
-
self.data[key] = value
|
10 |
-
|
11 |
-
def __getattr__(self, key):
|
12 |
-
if key not in self.data:
|
13 |
-
raise AttributeError("'HParams' object has no attribute %s" % key)
|
14 |
-
return self.data[key]
|
15 |
-
|
16 |
-
def set_hparam(self, key, value):
|
17 |
-
self.data[key] = value
|
18 |
-
|
19 |
-
|
20 |
-
# Default hyperparameters
|
21 |
-
hparams = HParams(
|
22 |
-
num_mels=80, # Number of mel-spectrogram channels and local conditioning dimensionality
|
23 |
-
# network
|
24 |
-
rescale=True, # Whether to rescale audio prior to preprocessing
|
25 |
-
rescaling_max=0.9, # Rescaling value
|
26 |
-
|
27 |
-
# Use LWS (https://github.com/Jonathan-LeRoux/lws) for STFT and phase reconstruction
|
28 |
-
# It"s preferred to set True to use with https://github.com/r9y9/wavenet_vocoder
|
29 |
-
# Does not work if n_ffit is not multiple of hop_size!!
|
30 |
-
use_lws=False,
|
31 |
-
|
32 |
-
n_fft=800, # Extra window size is filled with 0 paddings to match this parameter
|
33 |
-
hop_size=200, # For 16000Hz, 200 = 12.5 ms (0.0125 * sample_rate)
|
34 |
-
win_size=800, # For 16000Hz, 800 = 50 ms (If None, win_size = n_fft) (0.05 * sample_rate)
|
35 |
-
sample_rate=16000, # 16000Hz (corresponding to librispeech) (sox --i <filename>)
|
36 |
-
|
37 |
-
frame_shift_ms=None, # Can replace hop_size parameter. (Recommended: 12.5)
|
38 |
-
|
39 |
-
# Mel and Linear spectrograms normalization/scaling and clipping
|
40 |
-
signal_normalization=True,
|
41 |
-
# Whether to normalize mel spectrograms to some predefined range (following below parameters)
|
42 |
-
allow_clipping_in_normalization=True, # Only relevant if mel_normalization = True
|
43 |
-
symmetric_mels=True,
|
44 |
-
# Whether to scale the data to be symmetric around 0. (Also multiplies the output range by 2,
|
45 |
-
# faster and cleaner convergence)
|
46 |
-
max_abs_value=4.,
|
47 |
-
# max absolute value of data. If symmetric, data will be [-max, max] else [0, max] (Must not
|
48 |
-
# be too big to avoid gradient explosion,
|
49 |
-
# not too small for fast convergence)
|
50 |
-
# Contribution by @begeekmyfriend
|
51 |
-
# Spectrogram Pre-Emphasis (Lfilter: Reduce spectrogram noise and helps model certitude
|
52 |
-
# levels. Also allows for better G&L phase reconstruction)
|
53 |
-
preemphasize=True, # whether to apply filter
|
54 |
-
preemphasis=0.97, # filter coefficient.
|
55 |
-
|
56 |
-
# Limits
|
57 |
-
min_level_db=-100,
|
58 |
-
ref_level_db=20,
|
59 |
-
fmin=55,
|
60 |
-
# Set this to 55 if your speaker is male! if female, 95 should help taking off noise. (To
|
61 |
-
# test depending on dataset. Pitch info: male~[65, 260], female~[100, 525])
|
62 |
-
fmax=7600, # To be increased/reduced depending on data.
|
63 |
-
|
64 |
-
###################### Our training parameters #################################
|
65 |
-
img_size=96,
|
66 |
-
fps=25,
|
67 |
-
|
68 |
-
batch_size=16,
|
69 |
-
initial_learning_rate=1e-4,
|
70 |
-
nepochs=300000, ### ctrl + c, stop whenever eval loss is consistently greater than train loss for ~10 epochs
|
71 |
-
num_workers=20,
|
72 |
-
checkpoint_interval=3000,
|
73 |
-
eval_interval=3000,
|
74 |
-
writer_interval=300,
|
75 |
-
save_optimizer_state=True,
|
76 |
-
|
77 |
-
syncnet_wt=0.0, # is initially zero, will be set automatically to 0.03 later. Leads to faster convergence.
|
78 |
-
syncnet_batch_size=64,
|
79 |
-
syncnet_lr=1e-4,
|
80 |
-
syncnet_eval_interval=1000,
|
81 |
-
syncnet_checkpoint_interval=10000,
|
82 |
-
|
83 |
-
disc_wt=0.07,
|
84 |
-
disc_initial_learning_rate=1e-4,
|
85 |
-
)
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
# Default hyperparameters
|
90 |
-
hparamsdebug = HParams(
|
91 |
-
num_mels=80, # Number of mel-spectrogram channels and local conditioning dimensionality
|
92 |
-
# network
|
93 |
-
rescale=True, # Whether to rescale audio prior to preprocessing
|
94 |
-
rescaling_max=0.9, # Rescaling value
|
95 |
-
|
96 |
-
# Use LWS (https://github.com/Jonathan-LeRoux/lws) for STFT and phase reconstruction
|
97 |
-
# It"s preferred to set True to use with https://github.com/r9y9/wavenet_vocoder
|
98 |
-
# Does not work if n_ffit is not multiple of hop_size!!
|
99 |
-
use_lws=False,
|
100 |
-
|
101 |
-
n_fft=800, # Extra window size is filled with 0 paddings to match this parameter
|
102 |
-
hop_size=200, # For 16000Hz, 200 = 12.5 ms (0.0125 * sample_rate)
|
103 |
-
win_size=800, # For 16000Hz, 800 = 50 ms (If None, win_size = n_fft) (0.05 * sample_rate)
|
104 |
-
sample_rate=16000, # 16000Hz (corresponding to librispeech) (sox --i <filename>)
|
105 |
-
|
106 |
-
frame_shift_ms=None, # Can replace hop_size parameter. (Recommended: 12.5)
|
107 |
-
|
108 |
-
# Mel and Linear spectrograms normalization/scaling and clipping
|
109 |
-
signal_normalization=True,
|
110 |
-
# Whether to normalize mel spectrograms to some predefined range (following below parameters)
|
111 |
-
allow_clipping_in_normalization=True, # Only relevant if mel_normalization = True
|
112 |
-
symmetric_mels=True,
|
113 |
-
# Whether to scale the data to be symmetric around 0. (Also multiplies the output range by 2,
|
114 |
-
# faster and cleaner convergence)
|
115 |
-
max_abs_value=4.,
|
116 |
-
# max absolute value of data. If symmetric, data will be [-max, max] else [0, max] (Must not
|
117 |
-
# be too big to avoid gradient explosion,
|
118 |
-
# not too small for fast convergence)
|
119 |
-
# Contribution by @begeekmyfriend
|
120 |
-
# Spectrogram Pre-Emphasis (Lfilter: Reduce spectrogram noise and helps model certitude
|
121 |
-
# levels. Also allows for better G&L phase reconstruction)
|
122 |
-
preemphasize=True, # whether to apply filter
|
123 |
-
preemphasis=0.97, # filter coefficient.
|
124 |
-
|
125 |
-
# Limits
|
126 |
-
min_level_db=-100,
|
127 |
-
ref_level_db=20,
|
128 |
-
fmin=55,
|
129 |
-
# Set this to 55 if your speaker is male! if female, 95 should help taking off noise. (To
|
130 |
-
# test depending on dataset. Pitch info: male~[65, 260], female~[100, 525])
|
131 |
-
fmax=7600, # To be increased/reduced depending on data.
|
132 |
-
|
133 |
-
###################### Our training parameters #################################
|
134 |
-
img_size=96,
|
135 |
-
fps=25,
|
136 |
-
|
137 |
-
batch_size=2,
|
138 |
-
initial_learning_rate=1e-3,
|
139 |
-
nepochs=100000, ### ctrl + c, stop whenever eval loss is consistently greater than train loss for ~10 epochs
|
140 |
-
num_workers=0,
|
141 |
-
checkpoint_interval=10000,
|
142 |
-
eval_interval=10,
|
143 |
-
writer_interval=5,
|
144 |
-
save_optimizer_state=True,
|
145 |
-
|
146 |
-
syncnet_wt=0.0, # is initially zero, will be set automatically to 0.03 later. Leads to faster convergence.
|
147 |
-
syncnet_batch_size=64,
|
148 |
-
syncnet_lr=1e-4,
|
149 |
-
syncnet_eval_interval=10000,
|
150 |
-
syncnet_checkpoint_interval=10000,
|
151 |
-
|
152 |
-
disc_wt=0.07,
|
153 |
-
disc_initial_learning_rate=1e-4,
|
154 |
-
)
|
155 |
-
|
156 |
-
|
157 |
-
def hparams_debug_string():
|
158 |
-
values = hparams.values()
|
159 |
-
hp = [" %s: %s" % (name, values[name]) for name in sorted(values) if name != "sentences"]
|
160 |
-
return "Hyperparameters:\n" + "\n".join(hp)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AI-Hobbyist/Hoyo-RVC/docs/faiss_tips_ja.md
DELETED
@@ -1,101 +0,0 @@
|
|
1 |
-
faiss tuning TIPS
|
2 |
-
==================
|
3 |
-
# about faiss
|
4 |
-
faissはfacebook researchの開発する、密なベクトルに対する近傍探索をまとめたライブラリで、多くの近似近傍探索の手法を効率的に実装しています。
|
5 |
-
近似近傍探索はある程度精度を犠牲にしながら高速に類似するベクトルを探します。
|
6 |
-
|
7 |
-
## faiss in RVC
|
8 |
-
RVCではHuBERTで変換した特徴量のEmbeddingに対し、学習データから生成されたEmbeddingと類似するものを検索し、混ぜることでより元の音声に近い変換を実現しています。ただ、この検索は愚直に行うと時間がかかるため、近似近傍探索を用いることで高速な変換を実現しています。
|
9 |
-
|
10 |
-
# 実装のoverview
|
11 |
-
モデルが配置されている '/logs/your-experiment/3_feature256'には各音声データからHuBERTで抽出された特徴量が配置されています。
|
12 |
-
ここからnpyファイルをファイル名でソートした順番で読み込み、ベクトルを連結してbig_npyを作成しfaissを学習させます。(このベクトルのshapeは[N, 256]です。)
|
13 |
-
|
14 |
-
本Tipsではまずこれらのパラメータの意味を解説します。
|
15 |
-
|
16 |
-
# 手法の解説
|
17 |
-
## index factory
|
18 |
-
index factoryは複数の近似近傍探索の手法を繋げるパイプラインをstringで表記するfaiss独自の記法です。
|
19 |
-
これにより、index factoryの文字列を変更するだけで様々な近似近傍探索の手法を試せます。
|
20 |
-
RVCでは以下のように使われています。
|
21 |
-
|
22 |
-
```python
|
23 |
-
index = faiss.index_factory(256, "IVF%s,Flat" % n_ivf)
|
24 |
-
```
|
25 |
-
index_factoryの引数のうち、1つ目はベクトルの次元数、2つ目はindex factoryの文字列で、3つ目には用いる距離を指定することができます。
|
26 |
-
|
27 |
-
より詳細な記法については
|
28 |
-
https://github.com/facebookresearch/faiss/wiki/The-index-factory
|
29 |
-
|
30 |
-
## 距離指標
|
31 |
-
embeddingの類似度として用いられる代表的な指標として以下の二つがあります。
|
32 |
-
|
33 |
-
- ユークリッド距離(METRIC_L2)
|
34 |
-
- 内積(METRIC_INNER_PRODUCT)
|
35 |
-
|
36 |
-
ユークリッド距離では各次元において二乗の差をとり、全次元の差を足してから平方根をとります。これは日常的に用いる2次元、3次元での距離と同じです。
|
37 |
-
内積はこのままでは類似度の指標として用いず、一般的にはL2ノルムで正規化してから内積をとるコサイン類似度を用います。
|
38 |
-
|
39 |
-
どちらがよいかは場合によりますが、word2vec等で得られるembeddingやArcFace等で学習した類似画像検索のモデルではコサイン類似度が用いられることが多いです。ベクトルXに対してl2正規化をnumpyで行う場合は、0 divisionを避けるために十分に小さな値をepsとして以下のコードで可能です。
|
40 |
-
|
41 |
-
```python
|
42 |
-
X_normed = X / np.maximum(eps, np.linalg.norm(X, ord=2, axis=-1, keepdims=True))
|
43 |
-
```
|
44 |
-
|
45 |
-
また、index factoryには第3引数に渡す値を選ぶことで計算に用いる距離指標を変更できます。
|
46 |
-
|
47 |
-
```python
|
48 |
-
index = faiss.index_factory(dimention, text, faiss.METRIC_INNER_PRODUCT)
|
49 |
-
```
|
50 |
-
|
51 |
-
## IVF
|
52 |
-
IVF(Inverted file indexes)は全文検索における転置インデックスと似たようなアルゴリズムです。
|
53 |
-
学習時には検索対象に対してkmeansでクラスタリングを行い、クラスタ中心を用いてボロノイ分割を行います。各データ点には一つずつクラスタが割り当てられるので、クラスタからデータ点を逆引きする辞書を作成します。
|
54 |
-
|
55 |
-
例えば以下のようにクラスタが割り当てられた場合
|
56 |
-
|index|クラスタ|
|
57 |
-
|-----|-------|
|
58 |
-
|1|A|
|
59 |
-
|2|B|
|
60 |
-
|3|A|
|
61 |
-
|4|C|
|
62 |
-
|5|B|
|
63 |
-
|
64 |
-
作成される転置インデックスは以下のようになります。
|
65 |
-
|
66 |
-
|クラスタ|index|
|
67 |
-
|-------|-----|
|
68 |
-
|A|1, 3|
|
69 |
-
|B|2, 5|
|
70 |
-
|C|4|
|
71 |
-
|
72 |
-
検索時にはまずクラスタからn_probe個のクラスタを検索し、次にそれぞれのクラスタに属するデータ点について距離を計算します。
|
73 |
-
|
74 |
-
# 推奨されるパラメータ
|
75 |
-
indexの選び方については公式にガイドラインがあるので、それに準じて説明します。
|
76 |
-
https://github.com/facebookresearch/faiss/wiki/Guidelines-to-choose-an-index
|
77 |
-
|
78 |
-
1M以下のデータセットにおいては4bit-PQが2023年4月時点ではfaissで利用できる最も効率的な手法です。
|
79 |
-
これをIVFと組み合わせ、4bit-PQで候補を絞り、最後に正確な指標で距離を再計算するには以下のindex factoryを用いることで記載できます。
|
80 |
-
|
81 |
-
```python
|
82 |
-
index = faiss.index_factory(256, "IVF1024,PQ128x4fs,RFlat")
|
83 |
-
```
|
84 |
-
|
85 |
-
## IVFの推奨パラメータ
|
86 |
-
IVFの数が多すぎる場合、たとえばデータ数の数だけIVFによる粗量子化を行うと、これは愚直な全探索と同じになり効率が悪いです。
|
87 |
-
1M以下の場合ではIVFの値はデータ点の数Nに対して4*sqrt(N) ~ 16*sqrt(N)に推奨しています。
|
88 |
-
|
89 |
-
n_probeはn_probeの数に比例して計算時間が増えるので、精度と相談して適切に選んでください。個人的にはRVCにおいてそこまで精度は必要ないと思うのでn_probe = 1で良いと思います。
|
90 |
-
|
91 |
-
## FastScan
|
92 |
-
FastScanは直積量子化で大まかに距離を近似するのを、レジスタ内で行うことにより高速に行うようにした手法です。
|
93 |
-
直積量子化は学習時にd次元ごと(通常はd=2)に独立してクラスタリングを行い、クラスタ同士の距離を事前計算してlookup tableを作成します。予測時はlookup tableを見ることで各次元の距離をO(1)で計算できます。
|
94 |
-
そのため、PQの次に指定する数字は通常ベクトルの半分の次元を指定します。
|
95 |
-
|
96 |
-
FastScanに関するより詳細な説明は公式のドキュメントを参照してください。
|
97 |
-
https://github.com/facebookresearch/faiss/wiki/Fast-accumulation-of-PQ-and-AQ-codes-(FastScan)
|
98 |
-
|
99 |
-
## RFlat
|
100 |
-
RFlatはFastScanで計算した大まかな距離を、index factoryの第三引数で指定した正確な距離で再計算する指示です。
|
101 |
-
k個の近傍を取得する際は、k*k_factor個の点について再計算が行われます。
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/NeuralSeq/data_gen/tts/base_preprocess.py
DELETED
@@ -1,254 +0,0 @@
|
|
1 |
-
import json
|
2 |
-
import os
|
3 |
-
import random
|
4 |
-
import re
|
5 |
-
import traceback
|
6 |
-
from collections import Counter
|
7 |
-
from functools import partial
|
8 |
-
import pandas as pd
|
9 |
-
import librosa
|
10 |
-
from tqdm import tqdm
|
11 |
-
from data_gen.tts.txt_processors.base_text_processor import get_txt_processor_cls
|
12 |
-
from data_gen.tts.wav_processors.base_processor import get_wav_processor_cls
|
13 |
-
from utils.hparams import hparams
|
14 |
-
from utils.multiprocess_utils import multiprocess_run_tqdm
|
15 |
-
from utils.os_utils import link_file, move_file, remove_file
|
16 |
-
from data_gen.tts.data_gen_utils import is_sil_phoneme, build_token_encoder
|
17 |
-
|
18 |
-
|
19 |
-
class BasePreprocessor:
|
20 |
-
def __init__(self):
|
21 |
-
self.preprocess_args = hparams['preprocess_args']
|
22 |
-
txt_processor = self.preprocess_args['txt_processor']
|
23 |
-
self.txt_processor = get_txt_processor_cls(txt_processor)
|
24 |
-
self.raw_data_dir = hparams['raw_data_dir']
|
25 |
-
self.processed_dir = hparams['processed_data_dir']
|
26 |
-
self.spk_map_fn = f"{self.processed_dir}/spk_map.json"
|
27 |
-
|
28 |
-
def meta_data(self):
|
29 |
-
"""
|
30 |
-
:return: {'item_name': Str, 'wav_fn': Str, 'txt': Str, 'spk_name': Str, 'txt_loader': None or Func}
|
31 |
-
"""
|
32 |
-
raise NotImplementedError
|
33 |
-
|
34 |
-
def process(self):
|
35 |
-
processed_dir = self.processed_dir
|
36 |
-
wav_processed_tmp_dir = f'{processed_dir}/processed_tmp'
|
37 |
-
remove_file(wav_processed_tmp_dir)
|
38 |
-
os.makedirs(wav_processed_tmp_dir, exist_ok=True)
|
39 |
-
wav_processed_dir = f'{processed_dir}/{self.wav_processed_dirname}'
|
40 |
-
remove_file(wav_processed_dir)
|
41 |
-
os.makedirs(wav_processed_dir, exist_ok=True)
|
42 |
-
|
43 |
-
meta_data = list(tqdm(self.meta_data(), desc='Load meta data'))
|
44 |
-
item_names = [d['item_name'] for d in meta_data]
|
45 |
-
assert len(item_names) == len(set(item_names)), 'Key `item_name` should be Unique.'
|
46 |
-
|
47 |
-
# preprocess data
|
48 |
-
phone_list = []
|
49 |
-
word_list = []
|
50 |
-
spk_names = set()
|
51 |
-
process_item = partial(self.preprocess_first_pass,
|
52 |
-
txt_processor=self.txt_processor,
|
53 |
-
wav_processed_dir=wav_processed_dir,
|
54 |
-
wav_processed_tmp=wav_processed_tmp_dir,
|
55 |
-
preprocess_args=self.preprocess_args)
|
56 |
-
items = []
|
57 |
-
args = [{
|
58 |
-
'item_name': item_raw['item_name'],
|
59 |
-
'txt_raw': item_raw['txt'],
|
60 |
-
'wav_fn': item_raw['wav_fn'],
|
61 |
-
'txt_loader': item_raw.get('txt_loader'),
|
62 |
-
'others': item_raw.get('others', None)
|
63 |
-
} for item_raw in meta_data]
|
64 |
-
for item_, (item_id, item) in zip(meta_data, multiprocess_run_tqdm(process_item, args, desc='Preprocess')):
|
65 |
-
if item is not None:
|
66 |
-
item_.update(item)
|
67 |
-
item = item_
|
68 |
-
if 'txt_loader' in item:
|
69 |
-
del item['txt_loader']
|
70 |
-
item['id'] = item_id
|
71 |
-
item['spk_name'] = item.get('spk_name', '<SINGLE_SPK>')
|
72 |
-
item['others'] = item.get('others', None)
|
73 |
-
phone_list += item['ph'].split(" ")
|
74 |
-
word_list += item['word'].split(" ")
|
75 |
-
spk_names.add(item['spk_name'])
|
76 |
-
items.append(item)
|
77 |
-
|
78 |
-
# add encoded tokens
|
79 |
-
ph_encoder, word_encoder = self._phone_encoder(phone_list), self._word_encoder(word_list)
|
80 |
-
spk_map = self.build_spk_map(spk_names)
|
81 |
-
args = [{
|
82 |
-
'ph': item['ph'], 'word': item['word'], 'spk_name': item['spk_name'],
|
83 |
-
'word_encoder': word_encoder, 'ph_encoder': ph_encoder, 'spk_map': spk_map
|
84 |
-
} for item in items]
|
85 |
-
for idx, item_new_kv in multiprocess_run_tqdm(self.preprocess_second_pass, args, desc='Add encoded tokens'):
|
86 |
-
items[idx].update(item_new_kv)
|
87 |
-
|
88 |
-
# build mfa data
|
89 |
-
if self.preprocess_args['use_mfa']:
|
90 |
-
mfa_dict = set()
|
91 |
-
mfa_input_dir = f'{processed_dir}/mfa_inputs'
|
92 |
-
remove_file(mfa_input_dir)
|
93 |
-
# group MFA inputs for better parallelism
|
94 |
-
mfa_groups = [i // self.preprocess_args['nsample_per_mfa_group'] for i in range(len(items))]
|
95 |
-
if self.preprocess_args['mfa_group_shuffle']:
|
96 |
-
random.seed(hparams['seed'])
|
97 |
-
random.shuffle(mfa_groups)
|
98 |
-
args = [{
|
99 |
-
'item': item, 'mfa_input_dir': mfa_input_dir,
|
100 |
-
'mfa_group': mfa_group, 'wav_processed_tmp': wav_processed_tmp_dir,
|
101 |
-
'preprocess_args': self.preprocess_args
|
102 |
-
} for item, mfa_group in zip(items, mfa_groups)]
|
103 |
-
for i, (ph_gb_word_nosil, new_wav_align_fn) in multiprocess_run_tqdm(
|
104 |
-
self.build_mfa_inputs, args, desc='Build MFA data'):
|
105 |
-
items[i]['wav_align_fn'] = new_wav_align_fn
|
106 |
-
for w in ph_gb_word_nosil.split(" "):
|
107 |
-
mfa_dict.add(f"{w} {w.replace('_', ' ')}")
|
108 |
-
mfa_dict = sorted(mfa_dict)
|
109 |
-
with open(f'{processed_dir}/mfa_dict.txt', 'w') as f:
|
110 |
-
f.writelines([f'{l}\n' for l in mfa_dict])
|
111 |
-
with open(f"{processed_dir}/{self.meta_csv_filename}.json", 'w') as f:
|
112 |
-
f.write(re.sub(r'\n\s+([\d+\]])', r'\1', json.dumps(items, ensure_ascii=False, sort_keys=False, indent=1)))
|
113 |
-
remove_file(wav_processed_tmp_dir)
|
114 |
-
|
115 |
-
|
116 |
-
@classmethod
|
117 |
-
def preprocess_first_pass(cls, item_name, txt_raw, txt_processor,
|
118 |
-
wav_fn, wav_processed_dir, wav_processed_tmp,
|
119 |
-
preprocess_args, txt_loader=None, others=None):
|
120 |
-
try:
|
121 |
-
if txt_loader is not None:
|
122 |
-
txt_raw = txt_loader(txt_raw)
|
123 |
-
ph, txt, word, ph2word, ph_gb_word = cls.txt_to_ph(txt_processor, txt_raw, preprocess_args)
|
124 |
-
wav_fn, wav_align_fn = cls.process_wav(
|
125 |
-
item_name, wav_fn,
|
126 |
-
hparams['processed_data_dir'],
|
127 |
-
wav_processed_tmp, preprocess_args)
|
128 |
-
|
129 |
-
# wav for binarization
|
130 |
-
ext = os.path.splitext(wav_fn)[1]
|
131 |
-
os.makedirs(wav_processed_dir, exist_ok=True)
|
132 |
-
new_wav_fn = f"{wav_processed_dir}/{item_name}{ext}"
|
133 |
-
move_link_func = move_file if os.path.dirname(wav_fn) == wav_processed_tmp else link_file
|
134 |
-
move_link_func(wav_fn, new_wav_fn)
|
135 |
-
return {
|
136 |
-
'txt': txt, 'txt_raw': txt_raw, 'ph': ph,
|
137 |
-
'word': word, 'ph2word': ph2word, 'ph_gb_word': ph_gb_word,
|
138 |
-
'wav_fn': new_wav_fn, 'wav_align_fn': wav_align_fn,
|
139 |
-
'others': others
|
140 |
-
}
|
141 |
-
except:
|
142 |
-
traceback.print_exc()
|
143 |
-
print(f"| Error is caught. item_name: {item_name}.")
|
144 |
-
return None
|
145 |
-
|
146 |
-
@staticmethod
|
147 |
-
def txt_to_ph(txt_processor, txt_raw, preprocess_args):
|
148 |
-
txt_struct, txt = txt_processor.process(txt_raw, preprocess_args)
|
149 |
-
ph = [p for w in txt_struct for p in w[1]]
|
150 |
-
ph_gb_word = ["_".join(w[1]) for w in txt_struct]
|
151 |
-
words = [w[0] for w in txt_struct]
|
152 |
-
# word_id=0 is reserved for padding
|
153 |
-
ph2word = [w_id + 1 for w_id, w in enumerate(txt_struct) for _ in range(len(w[1]))]
|
154 |
-
return " ".join(ph), txt, " ".join(words), ph2word, " ".join(ph_gb_word)
|
155 |
-
|
156 |
-
@staticmethod
|
157 |
-
def process_wav(item_name, wav_fn, processed_dir, wav_processed_tmp, preprocess_args):
|
158 |
-
processors = [get_wav_processor_cls(v) for v in preprocess_args['wav_processors']]
|
159 |
-
processors = [k() for k in processors if k is not None]
|
160 |
-
if len(processors) >= 1:
|
161 |
-
sr_file = librosa.core.get_samplerate(wav_fn)
|
162 |
-
output_fn_for_align = None
|
163 |
-
ext = os.path.splitext(wav_fn)[1]
|
164 |
-
input_fn = f"{wav_processed_tmp}/{item_name}{ext}"
|
165 |
-
link_file(wav_fn, input_fn)
|
166 |
-
for p in processors:
|
167 |
-
outputs = p.process(input_fn, sr_file, wav_processed_tmp, processed_dir, item_name, preprocess_args)
|
168 |
-
if len(outputs) == 3:
|
169 |
-
input_fn, sr, output_fn_for_align = outputs
|
170 |
-
else:
|
171 |
-
input_fn, sr = outputs
|
172 |
-
if output_fn_for_align is None:
|
173 |
-
return input_fn, input_fn
|
174 |
-
else:
|
175 |
-
return input_fn, output_fn_for_align
|
176 |
-
else:
|
177 |
-
return wav_fn, wav_fn
|
178 |
-
|
179 |
-
def _phone_encoder(self, ph_set):
|
180 |
-
ph_set_fn = f"{self.processed_dir}/phone_set.json"
|
181 |
-
if self.preprocess_args['reset_phone_dict'] or not os.path.exists(ph_set_fn):
|
182 |
-
ph_set = sorted(set(ph_set))
|
183 |
-
json.dump(ph_set, open(ph_set_fn, 'w'), ensure_ascii=False)
|
184 |
-
print("| Build phone set: ", ph_set)
|
185 |
-
else:
|
186 |
-
ph_set = json.load(open(ph_set_fn, 'r'))
|
187 |
-
print("| Load phone set: ", ph_set)
|
188 |
-
return build_token_encoder(ph_set_fn)
|
189 |
-
|
190 |
-
def _word_encoder(self, word_set):
|
191 |
-
word_set_fn = f"{self.processed_dir}/word_set.json"
|
192 |
-
if self.preprocess_args['reset_word_dict']:
|
193 |
-
word_set = Counter(word_set)
|
194 |
-
total_words = sum(word_set.values())
|
195 |
-
word_set = word_set.most_common(hparams['word_dict_size'])
|
196 |
-
num_unk_words = total_words - sum([x[1] for x in word_set])
|
197 |
-
word_set = ['<BOS>', '<EOS>'] + [x[0] for x in word_set]
|
198 |
-
word_set = sorted(set(word_set))
|
199 |
-
json.dump(word_set, open(word_set_fn, 'w'), ensure_ascii=False)
|
200 |
-
print(f"| Build word set. Size: {len(word_set)}, #total words: {total_words},"
|
201 |
-
f" #unk_words: {num_unk_words}, word_set[:10]:, {word_set[:10]}.")
|
202 |
-
else:
|
203 |
-
word_set = json.load(open(word_set_fn, 'r'))
|
204 |
-
print("| Load word set. Size: ", len(word_set), word_set[:10])
|
205 |
-
return build_token_encoder(word_set_fn)
|
206 |
-
|
207 |
-
@classmethod
|
208 |
-
def preprocess_second_pass(cls, word, ph, spk_name, word_encoder, ph_encoder, spk_map):
|
209 |
-
word_token = word_encoder.encode(word)
|
210 |
-
ph_token = ph_encoder.encode(ph)
|
211 |
-
spk_id = spk_map[spk_name]
|
212 |
-
return {'word_token': word_token, 'ph_token': ph_token, 'spk_id': spk_id}
|
213 |
-
|
214 |
-
def build_spk_map(self, spk_names):
|
215 |
-
spk_map = {x: i for i, x in enumerate(sorted(list(spk_names)))}
|
216 |
-
assert len(spk_map) == 0 or len(spk_map) <= hparams['num_spk'], len(spk_map)
|
217 |
-
print(f"| Number of spks: {len(spk_map)}, spk_map: {spk_map}")
|
218 |
-
json.dump(spk_map, open(self.spk_map_fn, 'w'), ensure_ascii=False)
|
219 |
-
return spk_map
|
220 |
-
|
221 |
-
@classmethod
|
222 |
-
def build_mfa_inputs(cls, item, mfa_input_dir, mfa_group, wav_processed_tmp, preprocess_args):
|
223 |
-
item_name = item['item_name']
|
224 |
-
wav_align_fn = item['wav_align_fn']
|
225 |
-
ph_gb_word = item['ph_gb_word']
|
226 |
-
ext = os.path.splitext(wav_align_fn)[1]
|
227 |
-
mfa_input_group_dir = f'{mfa_input_dir}/{mfa_group}'
|
228 |
-
os.makedirs(mfa_input_group_dir, exist_ok=True)
|
229 |
-
new_wav_align_fn = f"{mfa_input_group_dir}/{item_name}{ext}"
|
230 |
-
move_link_func = move_file if os.path.dirname(wav_align_fn) == wav_processed_tmp else link_file
|
231 |
-
move_link_func(wav_align_fn, new_wav_align_fn)
|
232 |
-
ph_gb_word_nosil = " ".join(["_".join([p for p in w.split("_") if not is_sil_phoneme(p)])
|
233 |
-
for w in ph_gb_word.split(" ") if not is_sil_phoneme(w)])
|
234 |
-
with open(f'{mfa_input_group_dir}/{item_name}.lab', 'w') as f_txt:
|
235 |
-
f_txt.write(ph_gb_word_nosil)
|
236 |
-
return ph_gb_word_nosil, new_wav_align_fn
|
237 |
-
|
238 |
-
def load_spk_map(self, base_dir):
|
239 |
-
spk_map_fn = f"{base_dir}/spk_map.json"
|
240 |
-
spk_map = json.load(open(spk_map_fn, 'r'))
|
241 |
-
return spk_map
|
242 |
-
|
243 |
-
def load_dict(self, base_dir):
|
244 |
-
ph_encoder = build_token_encoder(f'{base_dir}/phone_set.json')
|
245 |
-
word_encoder = build_token_encoder(f'{base_dir}/word_set.json')
|
246 |
-
return ph_encoder, word_encoder
|
247 |
-
|
248 |
-
@property
|
249 |
-
def meta_csv_filename(self):
|
250 |
-
return 'metadata'
|
251 |
-
|
252 |
-
@property
|
253 |
-
def wav_processed_dirname(self):
|
254 |
-
return 'wav_processed'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIZ2H/07-GraphViz-PyDeck-Map-AIUIUX-Demo/app.py
DELETED
@@ -1,509 +0,0 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
import graphviz as graphviz
|
3 |
-
import pandas as pd
|
4 |
-
import numpy as np
|
5 |
-
|
6 |
-
st.title('Graphviz Gallery: https://graphviz.org/gallery/')
|
7 |
-
|
8 |
-
# Using code:
|
9 |
-
|
10 |
-
# Create a graphlib graph object
|
11 |
-
graph = graphviz.Digraph()
|
12 |
-
graph.edge('Grandpa', 'Ancestors')
|
13 |
-
graph.edge('Grandma', 'Ancestors')
|
14 |
-
graph.edge('Uncle', 'Grandma')
|
15 |
-
graph.edge('Aunt', 'Grandma')
|
16 |
-
graph.edge('Mom', 'Grandma')
|
17 |
-
graph.edge('Cousin Bob', 'Aunt')
|
18 |
-
graph.edge('Cousin Sue', 'Aunt')
|
19 |
-
graph.edge('Brother', 'Mom')
|
20 |
-
graph.edge('Sister', 'Mom')
|
21 |
-
st.graphviz_chart(graph)
|
22 |
-
|
23 |
-
|
24 |
-
st.graphviz_chart('''
|
25 |
-
digraph G2 {
|
26 |
-
node [shape=plaintext];
|
27 |
-
struct1 [label=<<TABLE>
|
28 |
-
<TR><TD><IMG SRC="1.png"></IMG></TD></TR>
|
29 |
-
<TR><TD>caption</TD></TR>
|
30 |
-
</TABLE>>];
|
31 |
-
}
|
32 |
-
''')
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
st.title('Graphviz Dot Language: https://graphviz.org/doc/info/lang.html')
|
37 |
-
|
38 |
-
# Using graph language:
|
39 |
-
st.graphviz_chart('''
|
40 |
-
digraph G {
|
41 |
-
rankdir=LR
|
42 |
-
node [shape=plaintext]
|
43 |
-
a [
|
44 |
-
label=<
|
45 |
-
<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0">
|
46 |
-
<TR><TD ROWSPAN="3" BGCOLOR="yellow">class</TD></TR>
|
47 |
-
<TR><TD PORT="here" BGCOLOR="lightblue">qualifier</TD></TR>
|
48 |
-
</TABLE>>
|
49 |
-
]
|
50 |
-
b [shape=ellipse style=filled
|
51 |
-
label=<
|
52 |
-
<TABLE BGCOLOR="bisque">
|
53 |
-
<TR>
|
54 |
-
<TD COLSPAN="3">elephant</TD>
|
55 |
-
<TD ROWSPAN="2" BGCOLOR="chartreuse"
|
56 |
-
VALIGN="bottom" ALIGN="right">two</TD>
|
57 |
-
</TR>
|
58 |
-
<TR>
|
59 |
-
<TD COLSPAN="2" ROWSPAN="2">
|
60 |
-
<TABLE BGCOLOR="grey">
|
61 |
-
<TR><TD>corn</TD></TR>
|
62 |
-
<TR><TD BGCOLOR="yellow">c</TD></TR>
|
63 |
-
<TR><TD>f</TD></TR>
|
64 |
-
</TABLE>
|
65 |
-
</TD>
|
66 |
-
<TD BGCOLOR="white">penguin</TD>
|
67 |
-
</TR>
|
68 |
-
<TR>
|
69 |
-
<TD COLSPAN="2" BORDER="4" ALIGN="right" PORT="there">4</TD>
|
70 |
-
</TR>
|
71 |
-
</TABLE>>
|
72 |
-
]
|
73 |
-
c [
|
74 |
-
label=<long line 1<BR/>line 2<BR ALIGN="LEFT"/>line 3<BR ALIGN="RIGHT"/>>
|
75 |
-
]
|
76 |
-
subgraph { rank=same b c }
|
77 |
-
a:here -> b:there [dir=both arrowtail=diamond]
|
78 |
-
c -> b
|
79 |
-
d [shape=triangle]
|
80 |
-
d -> c [label=<
|
81 |
-
<TABLE>
|
82 |
-
<TR>
|
83 |
-
<TD BGCOLOR="red" WIDTH="10"> </TD>
|
84 |
-
<TD>Edge labels<BR/>also</TD>
|
85 |
-
<TD BGCOLOR="blue" WIDTH="10"> </TD>
|
86 |
-
</TR>
|
87 |
-
</TABLE>>
|
88 |
-
]
|
89 |
-
}
|
90 |
-
''')
|
91 |
-
|
92 |
-
st.graphviz_chart('''
|
93 |
-
digraph R {
|
94 |
-
rankdir=LR
|
95 |
-
node [style=rounded]
|
96 |
-
node1 [shape=box]
|
97 |
-
node2 [fillcolor=yellow, style="rounded,filled", shape=diamond]
|
98 |
-
node3 [shape=record, label="{ a | b | c }"]
|
99 |
-
node1 -> node2 -> node3
|
100 |
-
}
|
101 |
-
''')
|
102 |
-
|
103 |
-
st.title('Vega Lite Example: https://docs.streamlit.io/library/api-reference/charts/st.vega_lite_chart ')
|
104 |
-
df = pd.DataFrame(
|
105 |
-
np.random.randn(200, 3),
|
106 |
-
columns=['a', 'b', 'c'])
|
107 |
-
|
108 |
-
st.vega_lite_chart(df, {
|
109 |
-
'mark': {'type': 'circle', 'tooltip': True},
|
110 |
-
'encoding': {
|
111 |
-
'x': {'field': 'a', 'type': 'quantitative'},
|
112 |
-
'y': {'field': 'b', 'type': 'quantitative'},
|
113 |
-
'size': {'field': 'c', 'type': 'quantitative'},
|
114 |
-
'color': {'field': 'c', 'type': 'quantitative'},
|
115 |
-
},
|
116 |
-
})
|
117 |
-
|
118 |
-
# More graph examples
|
119 |
-
|
120 |
-
st.graphviz_chart('''
|
121 |
-
digraph structs {
|
122 |
-
node [shape=record];
|
123 |
-
struct1 [label="<f0> left|<f1> mid\ dle|<f2> right"];
|
124 |
-
struct2 [label="<f0> one|<f1> two"];
|
125 |
-
struct3 [label="hello\nworld |{ b |{c|<here> d|e}| f}| g | h"];
|
126 |
-
struct1:f1 -> struct2:f0;
|
127 |
-
struct1:f2 -> struct3:here;
|
128 |
-
}
|
129 |
-
''')
|
130 |
-
|
131 |
-
st.graphviz_chart('''
|
132 |
-
graph G {
|
133 |
-
fontname="Helvetica,Arial,sans-serif"
|
134 |
-
node [fontname="Helvetica,Arial,sans-serif"]
|
135 |
-
edge [fontname="Helvetica,Arial,sans-serif"]
|
136 |
-
layout=fdp
|
137 |
-
e
|
138 |
-
subgraph clusterA {
|
139 |
-
a -- b;
|
140 |
-
subgraph clusterC {
|
141 |
-
C -- D;
|
142 |
-
}
|
143 |
-
}
|
144 |
-
subgraph clusterB {
|
145 |
-
d -- f
|
146 |
-
}
|
147 |
-
d -- D
|
148 |
-
e -- clusterB
|
149 |
-
clusterC -- clusterB
|
150 |
-
}
|
151 |
-
''')
|
152 |
-
|
153 |
-
st.graphviz_chart('''
|
154 |
-
graph Transparency {
|
155 |
-
layout=neato
|
156 |
-
start=11 // empiric value to set orientation
|
157 |
-
bgcolor="#0000ff11"
|
158 |
-
node [shape=circle width=2.22 label="" style=filled]
|
159 |
-
5 [color="#0000ff80"]
|
160 |
-
6 [color="#ee00ee80"]
|
161 |
-
1 [color="#ff000080"]
|
162 |
-
2 [color="#eeee0080"]
|
163 |
-
3 [color="#00ff0080"]
|
164 |
-
4 [color="#00eeee80"]
|
165 |
-
1 -- 2 -- 3 -- 4 -- 5 -- 6 -- 1
|
166 |
-
}
|
167 |
-
''')
|
168 |
-
|
169 |
-
st.graphviz_chart('''
|
170 |
-
digraph UML_Class_diagram {
|
171 |
-
fontname="Helvetica,Arial,sans-serif"
|
172 |
-
node [fontname="Helvetica,Arial,sans-serif"]
|
173 |
-
edge [fontname="Helvetica,Arial,sans-serif"]
|
174 |
-
labelloc="t"
|
175 |
-
label="UML Class diagram demo"
|
176 |
-
graph [splines=false]
|
177 |
-
node [shape=record style=filled fillcolor=gray95]
|
178 |
-
edge [arrowhead=vee style=dashed]
|
179 |
-
Client -> Interface1 [xlabel=dependency]
|
180 |
-
Client -> Interface2
|
181 |
-
edge [dir=back arrowtail=empty style=""]
|
182 |
-
Interface1 -> Class1 [xlabel=inheritance]
|
183 |
-
Interface2 -> Class1 [dir=none]
|
184 |
-
Interface2 [label="" xlabel="Simple\ninterface" shape=circle]
|
185 |
-
Interface1[label = <{<b>«interface» I/O</b> | + property<br align="left"/>...<br align="left"/>|+ method<br align="left"/>...<br align="left"/>}>]
|
186 |
-
Class1[label = <{<b>I/O class</b> | + property<br align="left"/>...<br align="left"/>|+ method<br align="left"/>...<br align="left"/>}>]
|
187 |
-
edge [dir=back arrowtail=empty style=dashed]
|
188 |
-
Class1 -> System_1 [xlabel=implementation]
|
189 |
-
System_1 [label = <{<b>System</b> | + property<br align="left"/>...<br align="left"/>|+ method<br align="left"/>...<br align="left"/>}>]
|
190 |
-
"Shared resource" [label = <{<b>Shared resource</b> | + property<br align="left"/>...<br align="left"/>|+ method<br align="left"/>...<br align="left"/>}>]
|
191 |
-
edge [dir=back arrowtail=diamond]
|
192 |
-
"System_1" -> Subsystem_1 [xlabel="composition"]
|
193 |
-
Subsystem_1[label = <{<b>Subsystem 1</b> | + property<br align="left"/>...<br align="left"/>|+ method<br align="left"/>...<br align="left"/>}>]
|
194 |
-
Subsystem_2[label = <{<b>Subsystem 2</b> | + property<br align="left"/>...<br align="left"/>|+ method<br align="left"/>...<br align="left"/>}>]
|
195 |
-
Subsystem_3[label = <{<b>Subsystem 3</b> | + property<br align="left"/>...<br align="left"/>|+ method<br align="left"/>...<br align="left"/>}>]
|
196 |
-
"System_1" -> Subsystem_2
|
197 |
-
"System_1" -> Subsystem_3
|
198 |
-
edge [xdir=back arrowtail=odiamond]
|
199 |
-
Subsystem_1 -> "Shared resource" [xlabel=aggregation]
|
200 |
-
{Subsystem_2 Subsystem_3 } -> "Shared resource"
|
201 |
-
}
|
202 |
-
''')
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
st.graphviz_chart('''
|
207 |
-
digraph G {
|
208 |
-
fontname="Helvetica,Arial,sans-serif"
|
209 |
-
node [fontname="Helvetica,Arial,sans-serif"]
|
210 |
-
edge [fontname="Helvetica,Arial,sans-serif"]
|
211 |
-
subgraph cluster_1 {
|
212 |
-
node [ style=filled,shape="box",fillcolor="antiquewhite:aquamarine" ]n5;
|
213 |
-
node [ shape="ellipse",fillcolor="bisque4:blue2" ]n4;
|
214 |
-
node [ shape="circle",fillcolor="cadetblue1:chocolate1" ]n3;
|
215 |
-
node [ shape="diamond",fillcolor="crimson:cyan4" ]n2;
|
216 |
-
node [ shape="triangle",fillcolor="deepskyblue2:firebrick" ]n1;
|
217 |
-
node [ shape="pentagon",fillcolor="gray24:gray88" ]n0;
|
218 |
-
label = "X11 Colors";
|
219 |
-
}
|
220 |
-
subgraph cluster_2 {
|
221 |
-
node [ style=filled,shape="box",fillcolor="bisque:brown" ]n11;
|
222 |
-
node [ shape="ellipse",fillcolor="green:darkorchid" ]n10;
|
223 |
-
node [ shape="circle",fillcolor="deepskyblue:gold" ]n9;
|
224 |
-
node [ shape="diamond",fillcolor="lightseagreen:orangered" ]n8;
|
225 |
-
node [ shape="triangle",fillcolor="turquoise:salmon" ]n7;
|
226 |
-
node [ shape="pentagon",fillcolor="snow:black" ]n6;
|
227 |
-
label = "SVG Colors";
|
228 |
-
}
|
229 |
-
subgraph cluster_3 {
|
230 |
-
node [ style=filled,shape="box",fillcolor="/accent3/1:/accent3/3" ]n17;
|
231 |
-
node [ shape="ellipse",fillcolor="/accent4/1:/accent4/4" ]n16;
|
232 |
-
node [ shape="circle",fillcolor="/accent5/1:/accent5/5" ]n15;
|
233 |
-
node [ shape="diamond",fillcolor="/accent6/1:/accent6/6" ]n14;
|
234 |
-
node [ shape="triangle",fillcolor="/accent7/1:/accent7/7" ]n13;
|
235 |
-
node [ shape="pentagon",fillcolor="/accent8/1:/accent8/8" ]n12;
|
236 |
-
label = "Brewer - accent";
|
237 |
-
}
|
238 |
-
subgraph cluster_4 {
|
239 |
-
node [ style=filled,shape="box",fillcolor="/blues3/1:/blues3/2" ]n23;
|
240 |
-
node [ shape="ellipse",fillcolor="/blues4/1:/blues4/3" ]n22;
|
241 |
-
node [ shape="circle",fillcolor="/blues5/1:/blues5/4" ]n21;
|
242 |
-
node [ shape="diamond",fillcolor="/blues6/1:/blues6/5" ]n20;
|
243 |
-
node [ shape="triangle",fillcolor="/blues7/1:/blues7/6" ]n19;
|
244 |
-
node [ shape="pentagon",fillcolor="/blues8/1:/blues8/7" ]n18;
|
245 |
-
label = "Brewer - blues";
|
246 |
-
}
|
247 |
-
n3 -> n9 -> n15 -> n21;
|
248 |
-
}
|
249 |
-
''')
|
250 |
-
|
251 |
-
st.graphviz_chart('''
|
252 |
-
digraph G {bgcolor="#0000FF44:#FF000044" gradientangle=90
|
253 |
-
fontname="Helvetica,Arial,sans-serif"
|
254 |
-
node [fontname="Helvetica,Arial,sans-serif"]
|
255 |
-
edge [fontname="Helvetica,Arial,sans-serif"]
|
256 |
-
subgraph cluster_0 {
|
257 |
-
style=filled;
|
258 |
-
color=lightgrey;
|
259 |
-
fillcolor="darkgray:gold";
|
260 |
-
gradientangle=0
|
261 |
-
node [fillcolor="yellow:green" style=filled gradientangle=270] a0;
|
262 |
-
node [fillcolor="lightgreen:red"] a1;
|
263 |
-
node [fillcolor="lightskyblue:darkcyan"] a2;
|
264 |
-
node [fillcolor="cyan:lightslateblue"] a3;
|
265 |
-
a0 -> a1 -> a2 -> a3;
|
266 |
-
label = "process #1";
|
267 |
-
}
|
268 |
-
subgraph cluster_1 {
|
269 |
-
node [fillcolor="yellow:magenta"
|
270 |
-
style=filled gradientangle=270] b0;
|
271 |
-
node [fillcolor="violet:darkcyan"] b1;
|
272 |
-
node [fillcolor="peachpuff:red"] b2;
|
273 |
-
node [fillcolor="mediumpurple:purple"] b3;
|
274 |
-
b0 -> b1 -> b2 -> b3;
|
275 |
-
label = "process #2";
|
276 |
-
color=blue
|
277 |
-
fillcolor="darkgray:gold";
|
278 |
-
gradientangle=0
|
279 |
-
style=filled;
|
280 |
-
}
|
281 |
-
start -> a0;
|
282 |
-
start -> b0;
|
283 |
-
a1 -> b3;
|
284 |
-
b2 -> a3;
|
285 |
-
a3 -> a0;
|
286 |
-
a3 -> end;
|
287 |
-
b3 -> end;
|
288 |
-
start [shape=Mdiamond ,
|
289 |
-
fillcolor="pink:red",
|
290 |
-
gradientangle=90,
|
291 |
-
style=radial];
|
292 |
-
end [shape=Msquare,
|
293 |
-
fillcolor="lightyellow:orange",
|
294 |
-
style=radial,
|
295 |
-
gradientangle=90];
|
296 |
-
}
|
297 |
-
''')
|
298 |
-
|
299 |
-
st.graphviz_chart('''
|
300 |
-
graph Color_wheel {
|
301 |
-
graph [
|
302 |
-
layout = neato
|
303 |
-
label = "Color wheel, 33 colors.\nNeato layout"
|
304 |
-
labelloc = b
|
305 |
-
fontname = "Helvetica,Arial,sans-serif"
|
306 |
-
start = regular
|
307 |
-
normalize = 0
|
308 |
-
]
|
309 |
-
node [
|
310 |
-
shape = circle
|
311 |
-
style = filled
|
312 |
-
color = "#00000088"
|
313 |
-
fontname = "Helvetica,Arial,sans-serif"
|
314 |
-
]
|
315 |
-
edge [
|
316 |
-
len = 2.7
|
317 |
-
color = "#00000088"
|
318 |
-
fontname = "Helvetica,Arial,sans-serif"
|
319 |
-
]
|
320 |
-
subgraph Dark {
|
321 |
-
node [fontcolor = white width = 1.4]
|
322 |
-
center [width = 1 style = invis shape = point]
|
323 |
-
center -- darkred [label = "0°/360°"]
|
324 |
-
darkred [fillcolor = darkred]
|
325 |
-
brown [fillcolor = brown]
|
326 |
-
brown -- center [label = "30°"]
|
327 |
-
olive [fillcolor = olive]
|
328 |
-
olive -- center [label = "60°"]
|
329 |
-
darkolivegreen [fillcolor = darkolivegreen fontsize = 10]
|
330 |
-
darkolivegreen -- center [label = "90°"]
|
331 |
-
darkgreen [fillcolor = darkgreen]
|
332 |
-
darkgreen -- center [label = "120°"]
|
333 |
-
"dark hue 0.416" [color = ".416 1 .6" fontcolor = white]
|
334 |
-
"dark hue 0.416" -- center [label = "150°"]
|
335 |
-
darkcyan [fillcolor = darkcyan]
|
336 |
-
darkcyan -- center [label = "180°"]
|
337 |
-
"dark hue 0.583" [color = ".583 1 .6" fontcolor = white]
|
338 |
-
"dark hue 0.583" -- center [label = "210°"]
|
339 |
-
darkblue [fillcolor = darkblue]
|
340 |
-
darkblue -- center [label = "240°"]
|
341 |
-
"dark hue 0.750" [color = ".750 1 .6"]
|
342 |
-
"dark hue 0.750" -- center [label = "270°"]
|
343 |
-
darkmagenta [fillcolor = darkmagenta]
|
344 |
-
darkmagenta -- center [label = "300°"]
|
345 |
-
"dark hue 0.916" [color = ".916 1 .6"]
|
346 |
-
"dark hue 0.916" -- center [label = "330°"]
|
347 |
-
}
|
348 |
-
subgraph Tue {
|
349 |
-
node [width = 1.3]
|
350 |
-
"hue 0.083" -- brown
|
351 |
-
"hue 0.083" [color = ".083 1 1"]
|
352 |
-
"hue 0.125" [color = ".125 1 1"]
|
353 |
-
"hue 0.166" -- olive
|
354 |
-
"hue 0.166" [color = ".166 1 1"]
|
355 |
-
"hue 0.208" [color = ".208 1 1"]
|
356 |
-
"hue 0.250" -- darkolivegreen
|
357 |
-
"hue 0.250" [color = ".250 1 1"]
|
358 |
-
"hue 0.291" [color = ".291 1 1"]
|
359 |
-
"hue 0.333" -- darkgreen
|
360 |
-
"hue 0.333" [color = ".333 1 1"]
|
361 |
-
"hue 0.375" [color = ".375 1 1"]
|
362 |
-
"hue 0.416" -- "dark hue 0.416"
|
363 |
-
"hue 0.416" [color = ".416 1 1"]
|
364 |
-
"hue 0.458" [color = ".458 1 1"]
|
365 |
-
"hue 0.500" -- darkcyan
|
366 |
-
"hue 0.500" [color = ".500 1 1"]
|
367 |
-
"hue 0.541" [color = ".541 1 1"]
|
368 |
-
node [fontcolor = white]
|
369 |
-
"hue 0.000" [color = ".000 1 1"]
|
370 |
-
"hue 0.000" -- darkred
|
371 |
-
"hue 0.041" [color = ".041 1 1"]
|
372 |
-
"hue 0.583" -- "dark hue 0.583"
|
373 |
-
"hue 0.583" [color = ".583 1 1"]
|
374 |
-
"hue 0.625" [color = ".625 1 1"]
|
375 |
-
"hue 0.666" -- darkblue
|
376 |
-
"hue 0.666" [color = ".666 1 1"]
|
377 |
-
"hue 0.708" [color = ".708 1 1"]
|
378 |
-
"hue 0.750" -- "dark hue 0.750"
|
379 |
-
"hue 0.750" [color = ".750 1 1"]
|
380 |
-
"hue 0.791" [color = ".791 1 1"]
|
381 |
-
"hue 0.833" -- darkmagenta
|
382 |
-
"hue 0.833" [color = ".833 1 1"]
|
383 |
-
"hue 0.875" [color = ".875 1 1"]
|
384 |
-
"hue 0.916" -- "dark hue 0.916"
|
385 |
-
"hue 0.916" [color = ".916 1 1"]
|
386 |
-
"hue 0.958" [color = ".958 1 1"]
|
387 |
-
edge [len = 1]
|
388 |
-
"hue 0.000" -- "hue 0.041" -- "hue 0.083" -- "hue 0.125" -- "hue 0.166" -- "hue 0.208"
|
389 |
-
"hue 0.208" -- "hue 0.250" -- "hue 0.291" -- "hue 0.333" -- "hue 0.375" -- "hue 0.416"
|
390 |
-
"hue 0.416" -- "hue 0.458" -- "hue 0.500" --"hue 0.541" -- "hue 0.583" -- "hue 0.625"
|
391 |
-
"hue 0.625" -- "hue 0.666" -- "hue 0.708" -- "hue 0.750" -- "hue 0.791" -- "hue 0.833"
|
392 |
-
"hue 0.833" -- "hue 0.875" -- "hue 0.916" -- "hue 0.958" -- "hue 0.000"
|
393 |
-
}
|
394 |
-
subgraph Main_colors {
|
395 |
-
node [width = 2 fontsize = 20]
|
396 |
-
red [fillcolor = red fontcolor = white]
|
397 |
-
orangered [fillcolor = orangered]
|
398 |
-
orange [fillcolor = orange]
|
399 |
-
gold [fillcolor = gold]
|
400 |
-
yellow [fillcolor = yellow]
|
401 |
-
yellowgreen [fillcolor = yellowgreen]
|
402 |
-
deeppink [fillcolor = deeppink fontcolor = white]
|
403 |
-
fuchsia [label = "fuchsia\nmagenta" fillcolor = fuchsia fontcolor = white]
|
404 |
-
purple [fillcolor = purple fontcolor = white]
|
405 |
-
blue [fillcolor = blue fontcolor = white]
|
406 |
-
cornflowerblue [fillcolor = cornflowerblue]
|
407 |
-
deepskyblue [fillcolor = deepskyblue]
|
408 |
-
aqua [fillcolor = aqua label = "aqua\ncyan"]
|
409 |
-
springgreen [fillcolor = springgreen]
|
410 |
-
green [fillcolor = green]
|
411 |
-
purple -- fuchsia -- deeppink -- red
|
412 |
-
cornflowerblue -- blue -- purple
|
413 |
-
cornflowerblue -- deepskyblue -- aqua [len = 1.7]
|
414 |
-
aqua -- springgreen -- green -- yellowgreen -- yellow
|
415 |
-
yellow -- gold -- orange -- orangered -- red [len = 1.6]
|
416 |
-
orange -- "hue 0.083"
|
417 |
-
deeppink -- "hue 0.916"
|
418 |
-
deeppink -- "hue 0.875"
|
419 |
-
red -- "hue 0.000"
|
420 |
-
yellowgreen -- "hue 0.250"
|
421 |
-
blue -- "hue 0.666"
|
422 |
-
yellow -- "hue 0.166"
|
423 |
-
gold -- "hue 0.125"
|
424 |
-
green -- "hue 0.333"
|
425 |
-
springgreen -- "hue 0.416"
|
426 |
-
aqua -- "hue 0.500"
|
427 |
-
cornflowerblue -- "hue 0.583"
|
428 |
-
deepskyblue -- "hue 0.541"
|
429 |
-
purple -- "hue 0.791"
|
430 |
-
purple -- "hue 0.750"
|
431 |
-
fuchsia -- "hue 0.833"
|
432 |
-
}
|
433 |
-
subgraph Light_colors {
|
434 |
-
node [width = 2 fontsize = 20]
|
435 |
-
node [shape = circle width = 1.8]
|
436 |
-
edge [len = 2.1]
|
437 |
-
pink [fillcolor = pink]
|
438 |
-
pink -- red
|
439 |
-
lightyellow [fillcolor = lightyellow]
|
440 |
-
lightyellow -- yellow
|
441 |
-
mediumpurple [fillcolor = mediumpurple]
|
442 |
-
mediumpurple -- purple
|
443 |
-
violet [fillcolor = violet]
|
444 |
-
violet -- fuchsia
|
445 |
-
hotpink [fillcolor = hotpink]
|
446 |
-
hotpink -- deeppink
|
447 |
-
"light hue 0.250" [color = ".250 .2 1"]
|
448 |
-
"light hue 0.250" -- yellowgreen
|
449 |
-
lightcyan [fillcolor = lightcyan]
|
450 |
-
lightcyan -- aqua
|
451 |
-
lightslateblue [fillcolor = lightslateblue]
|
452 |
-
lightslateblue -- blue
|
453 |
-
lightgreen [fillcolor = lightgreen]
|
454 |
-
lightgreen -- green
|
455 |
-
lightskyblue [fillcolor = lightskyblue]
|
456 |
-
lightskyblue -- deepskyblue
|
457 |
-
peachpuff [fillcolor = peachpuff]
|
458 |
-
peachpuff -- orange
|
459 |
-
"light hue 0.416" [color = ".416 .2 1"]
|
460 |
-
"light hue 0.416" -- springgreen
|
461 |
-
}
|
462 |
-
subgraph Tints {
|
463 |
-
node [width = 1]
|
464 |
-
edge [len = 2.4]
|
465 |
-
"hue 0 tint" -- pink
|
466 |
-
"hue 0 tint" [color = "0 .1 1"]
|
467 |
-
"hue 0.041 tint" [color = ".041 .1 1"]
|
468 |
-
"hue 0.083 tint" -- peachpuff
|
469 |
-
"hue 0.083 tint" [color = ".083 .1 1"]
|
470 |
-
"hue 0.125 tint" [color = ".125 .1 1"]
|
471 |
-
"hue 0.166 tint" -- lightyellow
|
472 |
-
"hue 0.166 tint" [color = ".166 .1 1"]
|
473 |
-
"hue 0.208 tint" [color = ".208 .1 1"]
|
474 |
-
"hue 0.250 tint" -- "light hue 0.250"
|
475 |
-
"hue 0.250 tint" [color = ".250 .1 1"]
|
476 |
-
"hue 0.291 tint" [color = ".291 .1 1"]
|
477 |
-
"hue 0.333 tint" -- lightgreen
|
478 |
-
"hue 0.333 tint" [color = ".333 .1 1"]
|
479 |
-
"hue 0.375 tint" [color = ".375 .1 1"]
|
480 |
-
"hue 0.416 tint" -- "light hue 0.416"
|
481 |
-
"hue 0.416 tint" [color = ".416 .1 1"]
|
482 |
-
"hue 0.458 tint" [color = ".458 .1 1"]
|
483 |
-
"hue 0.5 tint" -- lightcyan
|
484 |
-
"hue 0.5 tint" [color = ".5 .1 1"]
|
485 |
-
"hue 0.541 tint" -- lightskyblue
|
486 |
-
"hue 0.541 tint" [color = ".541 .1 1"]
|
487 |
-
"hue 0.583 tint" [color = ".583 .1 1"]
|
488 |
-
"hue 0.625 tint" [color = ".625 .1 1"]
|
489 |
-
"hue 0.666 tint" -- lightslateblue
|
490 |
-
"hue 0.666 tint" [color = ".666 .1 1"]
|
491 |
-
"hue 0.708 tint" [color = ".708 .1 1"]
|
492 |
-
"hue 0.750 tint" -- mediumpurple
|
493 |
-
"hue 0.750 tint" [color = ".750 .1 1"]
|
494 |
-
"hue 0.791 tint" [color = ".791 .1 1"]
|
495 |
-
"hue 0.833 tint" -- violet
|
496 |
-
"hue 0.833 tint" [color = ".833 .1 1"]
|
497 |
-
"hue 0.875 tint" [color = ".875 .1 1"]
|
498 |
-
"hue 0.916 tint" -- hotpink
|
499 |
-
"hue 0.916 tint" [color = ".916 .1 1"]
|
500 |
-
"hue 0.958 tint" [color = ".958 .1 1"]
|
501 |
-
edge [len = 2]
|
502 |
-
"hue 0 tint" -- "hue 0.041 tint" -- "hue 0.083 tint" -- "hue 0.125 tint" -- "hue 0.166 tint" -- "hue 0.208 tint"
|
503 |
-
"hue 0.208 tint" -- "hue 0.250 tint" -- "hue 0.291 tint" -- "hue 0.333 tint" -- "hue 0.375 tint" -- "hue 0.416 tint"
|
504 |
-
"hue 0.416 tint" -- "hue 0.458 tint" -- "hue 0.5 tint" --"hue 0.541 tint" -- "hue 0.583 tint" -- "hue 0.625 tint"
|
505 |
-
"hue 0.625 tint" -- "hue 0.666 tint" -- "hue 0.708 tint" -- "hue 0.750 tint" -- "hue 0.791 tint" -- "hue 0.833 tint"
|
506 |
-
"hue 0.833 tint" -- "hue 0.875 tint" -- "hue 0.916 tint" -- "hue 0.958 tint" -- "hue 0 tint"
|
507 |
-
}
|
508 |
-
}
|
509 |
-
''')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT/g4f/Provider/CodeLinkAva.py
DELETED
@@ -1,64 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
-
from aiohttp import ClientSession
|
4 |
-
import json
|
5 |
-
|
6 |
-
from ..typing import AsyncGenerator
|
7 |
-
from .base_provider import AsyncGeneratorProvider
|
8 |
-
|
9 |
-
|
10 |
-
class CodeLinkAva(AsyncGeneratorProvider):
|
11 |
-
url = "https://ava-ai-ef611.web.app"
|
12 |
-
supports_gpt_35_turbo = True
|
13 |
-
working = True
|
14 |
-
|
15 |
-
@classmethod
|
16 |
-
async def create_async_generator(
|
17 |
-
cls,
|
18 |
-
model: str,
|
19 |
-
messages: list[dict[str, str]],
|
20 |
-
**kwargs
|
21 |
-
) -> AsyncGenerator:
|
22 |
-
headers = {
|
23 |
-
"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
|
24 |
-
"Accept" : "*/*",
|
25 |
-
"Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
|
26 |
-
"Origin" : cls.url,
|
27 |
-
"Referer" : cls.url + "/",
|
28 |
-
"Sec-Fetch-Dest" : "empty",
|
29 |
-
"Sec-Fetch-Mode" : "cors",
|
30 |
-
"Sec-Fetch-Site" : "same-origin",
|
31 |
-
}
|
32 |
-
async with ClientSession(
|
33 |
-
headers=headers
|
34 |
-
) as session:
|
35 |
-
data = {
|
36 |
-
"messages": messages,
|
37 |
-
"temperature": 0.6,
|
38 |
-
"stream": True,
|
39 |
-
**kwargs
|
40 |
-
}
|
41 |
-
async with session.post("https://ava-alpha-api.codelink.io/api/chat", json=data) as response:
|
42 |
-
response.raise_for_status()
|
43 |
-
async for line in response.content:
|
44 |
-
line = line.decode()
|
45 |
-
if line.startswith("data: "):
|
46 |
-
if line.startswith("data: [DONE]"):
|
47 |
-
break
|
48 |
-
line = json.loads(line[6:-1])
|
49 |
-
content = line["choices"][0]["delta"].get("content")
|
50 |
-
if content:
|
51 |
-
yield content
|
52 |
-
|
53 |
-
|
54 |
-
@classmethod
|
55 |
-
@property
|
56 |
-
def params(cls):
|
57 |
-
params = [
|
58 |
-
("model", "str"),
|
59 |
-
("messages", "list[dict[str, str]]"),
|
60 |
-
("stream", "bool"),
|
61 |
-
("temperature", "float"),
|
62 |
-
]
|
63 |
-
param = ", ".join([": ".join(p) for p in params])
|
64 |
-
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Adapter/CoAdapter/t2i_adapters/t2i_adapters_for_canny.py
DELETED
@@ -1,47 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
|
3 |
-
from ldm.models.diffusion.ddpm import LatentDiffusion
|
4 |
-
from ldm.util import instantiate_from_config
|
5 |
-
|
6 |
-
|
7 |
-
class T2IAdapterCannyBase(LatentDiffusion):
|
8 |
-
|
9 |
-
def __init__(self, adapter_config, extra_cond_key, noise_schedule, *args, **kwargs):
|
10 |
-
super(T2IAdapterCannyBase, self).__init__(*args, **kwargs)
|
11 |
-
self.adapter = instantiate_from_config(adapter_config)
|
12 |
-
self.extra_cond_key = extra_cond_key
|
13 |
-
self.noise_schedule = noise_schedule
|
14 |
-
|
15 |
-
def shared_step(self, batch, **kwargs):
|
16 |
-
for k in self.ucg_training:
|
17 |
-
p = self.ucg_training[k]
|
18 |
-
for i in range(len(batch[k])):
|
19 |
-
if self.ucg_prng.choice(2, p=[1 - p, p]):
|
20 |
-
if isinstance(batch[k], list):
|
21 |
-
batch[k][i] = ""
|
22 |
-
else:
|
23 |
-
raise NotImplementedError("only text ucg is currently supported")
|
24 |
-
batch['jpg'] = batch['jpg'] * 2 - 1
|
25 |
-
x, c = self.get_input(batch, self.first_stage_key)
|
26 |
-
extra_cond = super(LatentDiffusion, self).get_input(batch, self.extra_cond_key).to(self.device)
|
27 |
-
features_adapter = self.adapter(extra_cond)
|
28 |
-
t = self.get_time_with_schedule(self.noise_schedule, x.size(0))
|
29 |
-
loss, loss_dict = self(x, c, t=t, features_adapter=features_adapter)
|
30 |
-
return loss, loss_dict
|
31 |
-
|
32 |
-
def configure_optimizers(self):
|
33 |
-
lr = self.learning_rate
|
34 |
-
params = list(self.adapter.parameters())
|
35 |
-
opt = torch.optim.AdamW(params, lr=lr)
|
36 |
-
return opt
|
37 |
-
|
38 |
-
def on_save_checkpoint(self, checkpoint):
|
39 |
-
keys = list(checkpoint['state_dict'].keys())
|
40 |
-
for key in keys:
|
41 |
-
if 'adapter' not in key:
|
42 |
-
del checkpoint['state_dict'][key]
|
43 |
-
|
44 |
-
def on_load_checkpoint(self, checkpoint):
|
45 |
-
for name in self.state_dict():
|
46 |
-
if 'adapter' not in name:
|
47 |
-
checkpoint['state_dict'][name] = self.state_dict()[name]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/GetParentSizerMethods.js
DELETED
@@ -1,56 +0,0 @@
|
|
1 |
-
var GetParent = function (gameObject, name) {
|
2 |
-
var parent = null;
|
3 |
-
if (name === undefined) {
|
4 |
-
if (gameObject.hasOwnProperty('rexContainer')) {
|
5 |
-
parent = gameObject.rexContainer.parent;
|
6 |
-
if (parent) {
|
7 |
-
if (!parent.isRexSizer) {
|
8 |
-
// Try to get sizer parent
|
9 |
-
parent = GetParent(parent);
|
10 |
-
}
|
11 |
-
} else {
|
12 |
-
parent = null;
|
13 |
-
}
|
14 |
-
}
|
15 |
-
|
16 |
-
} else {
|
17 |
-
parent = GetParent(gameObject);
|
18 |
-
while (parent) {
|
19 |
-
if (parent.name === name) {
|
20 |
-
break;
|
21 |
-
}
|
22 |
-
parent = GetParent(parent);
|
23 |
-
}
|
24 |
-
}
|
25 |
-
return parent;
|
26 |
-
}
|
27 |
-
|
28 |
-
var GetTopmostParent = function (gameObject) {
|
29 |
-
var parent = GetParent(gameObject);
|
30 |
-
while (parent) {
|
31 |
-
gameObject = parent;
|
32 |
-
parent = GetParent(parent);
|
33 |
-
}
|
34 |
-
return gameObject;
|
35 |
-
}
|
36 |
-
|
37 |
-
|
38 |
-
export default {
|
39 |
-
getParentSizer(gameObject, name) {
|
40 |
-
if (typeof (gameObject) === 'string') {
|
41 |
-
name = gameObject;
|
42 |
-
gameObject = undefined;
|
43 |
-
}
|
44 |
-
if (gameObject === undefined) {
|
45 |
-
gameObject = this;
|
46 |
-
}
|
47 |
-
return GetParent(gameObject, name);
|
48 |
-
},
|
49 |
-
|
50 |
-
getTopmostSizer(gameObject) {
|
51 |
-
if (gameObject === undefined) {
|
52 |
-
gameObject = this;
|
53 |
-
}
|
54 |
-
return GetTopmostParent(gameObject);
|
55 |
-
}
|
56 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/utils/ReplaceSliderConfig.js
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
import CreateChild from './CreateChild.js';
|
2 |
-
|
3 |
-
var ReplaceSliderConfig = function (scene, sliderConfig, view, styles, customBuilders) {
|
4 |
-
if (sliderConfig) {
|
5 |
-
CreateChild(scene, sliderConfig, 'background', view, styles, customBuilders);
|
6 |
-
CreateChild(scene, sliderConfig, 'track', view, styles, customBuilders);
|
7 |
-
CreateChild(scene, sliderConfig, 'indicator', view, styles, customBuilders);
|
8 |
-
CreateChild(scene, sliderConfig, 'thumb', view, styles, customBuilders);
|
9 |
-
}
|
10 |
-
|
11 |
-
return sliderConfig;
|
12 |
-
}
|
13 |
-
|
14 |
-
export default ReplaceSliderConfig;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlekseyKorshuk/instagram-filter-removal/modeling/ifrnet.py
DELETED
@@ -1,166 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from torch import nn
|
3 |
-
from torch.nn.utils import spectral_norm
|
4 |
-
|
5 |
-
from modeling.base import BaseNetwork
|
6 |
-
from modules.blocks import DestyleResBlock, Destyler, ResBlock
|
7 |
-
|
8 |
-
|
9 |
-
class IFRNet(BaseNetwork):
|
10 |
-
def __init__(self, base_n_channels, destyler_n_channels):
|
11 |
-
super(IFRNet, self).__init__()
|
12 |
-
self.destyler = Destyler(in_features=32768, num_features=destyler_n_channels) # from vgg features
|
13 |
-
|
14 |
-
self.ds_fc1 = nn.Linear(destyler_n_channels, base_n_channels * 2)
|
15 |
-
self.ds_res1 = DestyleResBlock(channels_in=3, channels_out=base_n_channels, kernel_size=5, stride=1, padding=2)
|
16 |
-
self.ds_fc2 = nn.Linear(destyler_n_channels, base_n_channels * 4)
|
17 |
-
self.ds_res2 = DestyleResBlock(channels_in=base_n_channels, channels_out=base_n_channels * 2, kernel_size=3, stride=2, padding=1)
|
18 |
-
self.ds_fc3 = nn.Linear(destyler_n_channels, base_n_channels * 4)
|
19 |
-
self.ds_res3 = DestyleResBlock(channels_in=base_n_channels * 2, channels_out=base_n_channels * 2, kernel_size=3, stride=1, padding=1)
|
20 |
-
self.ds_fc4 = nn.Linear(destyler_n_channels, base_n_channels * 8)
|
21 |
-
self.ds_res4 = DestyleResBlock(channels_in=base_n_channels * 2, channels_out=base_n_channels * 4, kernel_size=3, stride=2, padding=1)
|
22 |
-
self.ds_fc5 = nn.Linear(destyler_n_channels, base_n_channels * 8)
|
23 |
-
self.ds_res5 = DestyleResBlock(channels_in=base_n_channels * 4, channels_out=base_n_channels * 4, kernel_size=3, stride=1, padding=1)
|
24 |
-
self.ds_fc6 = nn.Linear(destyler_n_channels, base_n_channels * 16)
|
25 |
-
self.ds_res6 = DestyleResBlock(channels_in=base_n_channels * 4, channels_out=base_n_channels * 8, kernel_size=3, stride=2, padding=1)
|
26 |
-
|
27 |
-
self.upsample = nn.UpsamplingNearest2d(scale_factor=2.0)
|
28 |
-
|
29 |
-
self.res1 = ResBlock(channels_in=base_n_channels * 8, channels_out=base_n_channels * 4, kernel_size=3, stride=1, padding=1)
|
30 |
-
self.res2 = ResBlock(channels_in=base_n_channels * 4, channels_out=base_n_channels * 4, kernel_size=3, stride=1, padding=1)
|
31 |
-
self.res3 = ResBlock(channels_in=base_n_channels * 4, channels_out=base_n_channels * 2, kernel_size=3, stride=1, padding=1)
|
32 |
-
self.res4 = ResBlock(channels_in=base_n_channels * 2, channels_out=base_n_channels * 2, kernel_size=3, stride=1, padding=1)
|
33 |
-
self.res5 = ResBlock(channels_in=base_n_channels * 2, channels_out=base_n_channels, kernel_size=3, stride=1, padding=1)
|
34 |
-
|
35 |
-
self.conv1 = nn.Conv2d(base_n_channels, 3, kernel_size=3, stride=1, padding=1)
|
36 |
-
|
37 |
-
self.init_weights(init_type="normal", gain=0.02)
|
38 |
-
|
39 |
-
def forward(self, x, vgg_feat):
|
40 |
-
b_size, ch, h, w = vgg_feat.size()
|
41 |
-
vgg_feat = vgg_feat.view(b_size, ch * h * w)
|
42 |
-
vgg_feat = self.destyler(vgg_feat)
|
43 |
-
|
44 |
-
out = self.ds_res1(x, self.ds_fc1(vgg_feat))
|
45 |
-
out = self.ds_res2(out, self.ds_fc2(vgg_feat))
|
46 |
-
out = self.ds_res3(out, self.ds_fc3(vgg_feat))
|
47 |
-
out = self.ds_res4(out, self.ds_fc4(vgg_feat))
|
48 |
-
out = self.ds_res5(out, self.ds_fc5(vgg_feat))
|
49 |
-
aux = self.ds_res6(out, self.ds_fc6(vgg_feat))
|
50 |
-
|
51 |
-
out = self.upsample(aux)
|
52 |
-
out = self.res1(out)
|
53 |
-
out = self.res2(out)
|
54 |
-
out = self.upsample(out)
|
55 |
-
out = self.res3(out)
|
56 |
-
out = self.res4(out)
|
57 |
-
out = self.upsample(out)
|
58 |
-
out = self.res5(out)
|
59 |
-
out = self.conv1(out)
|
60 |
-
|
61 |
-
return out, aux
|
62 |
-
|
63 |
-
|
64 |
-
class MLP(nn.Module):
|
65 |
-
def __init__(self, base_n_channels, num_class=14):
|
66 |
-
super(MLP, self).__init__()
|
67 |
-
self.aux_classifier = nn.Sequential(
|
68 |
-
nn.Conv2d(base_n_channels * 8, base_n_channels * 4, kernel_size=3, stride=1, padding=1),
|
69 |
-
nn.MaxPool2d(2),
|
70 |
-
nn.Conv2d(base_n_channels * 4, base_n_channels * 2, kernel_size=3, stride=1, padding=1),
|
71 |
-
nn.MaxPool2d(2),
|
72 |
-
# nn.Conv2d(base_n_channels * 2, base_n_channels * 1, kernel_size=3, stride=1, padding=1),
|
73 |
-
# nn.MaxPool2d(2),
|
74 |
-
Flatten(),
|
75 |
-
nn.Linear(base_n_channels * 8 * 8 * 2, num_class),
|
76 |
-
# nn.Softmax(dim=-1)
|
77 |
-
)
|
78 |
-
|
79 |
-
def forward(self, x):
|
80 |
-
return self.aux_classifier(x)
|
81 |
-
|
82 |
-
|
83 |
-
class Flatten(nn.Module):
|
84 |
-
def forward(self, input):
|
85 |
-
"""
|
86 |
-
Note that input.size(0) is usually the batch size.
|
87 |
-
So what it does is that given any input with input.size(0) # of batches,
|
88 |
-
will flatten to be 1 * nb_elements.
|
89 |
-
"""
|
90 |
-
batch_size = input.size(0)
|
91 |
-
out = input.view(batch_size, -1)
|
92 |
-
return out # (batch_size, *size)
|
93 |
-
|
94 |
-
|
95 |
-
class Discriminator(BaseNetwork):
|
96 |
-
def __init__(self, base_n_channels):
|
97 |
-
"""
|
98 |
-
img_size : (int, int, int)
|
99 |
-
Height and width must be powers of 2. E.g. (32, 32, 1) or
|
100 |
-
(64, 128, 3). Last number indicates number of channels, e.g. 1 for
|
101 |
-
grayscale or 3 for RGB
|
102 |
-
"""
|
103 |
-
super(Discriminator, self).__init__()
|
104 |
-
|
105 |
-
self.image_to_features = nn.Sequential(
|
106 |
-
spectral_norm(nn.Conv2d(3, base_n_channels, 5, 2, 2)),
|
107 |
-
nn.LeakyReLU(0.2, inplace=True),
|
108 |
-
spectral_norm(nn.Conv2d(base_n_channels, 2 * base_n_channels, 5, 2, 2)),
|
109 |
-
nn.LeakyReLU(0.2, inplace=True),
|
110 |
-
spectral_norm(nn.Conv2d(2 * base_n_channels, 2 * base_n_channels, 5, 2, 2)),
|
111 |
-
nn.LeakyReLU(0.2, inplace=True),
|
112 |
-
spectral_norm(nn.Conv2d(2 * base_n_channels, 4 * base_n_channels, 5, 2, 2)),
|
113 |
-
nn.LeakyReLU(0.2, inplace=True),
|
114 |
-
# spectral_norm(nn.Conv2d(4 * base_n_channels, 4 * base_n_channels, 5, 2, 2)),
|
115 |
-
# nn.LeakyReLU(0.2, inplace=True),
|
116 |
-
spectral_norm(nn.Conv2d(4 * base_n_channels, 8 * base_n_channels, 5, 1, 1)),
|
117 |
-
nn.LeakyReLU(0.2, inplace=True),
|
118 |
-
)
|
119 |
-
|
120 |
-
output_size = 8 * base_n_channels * 3 * 3
|
121 |
-
self.features_to_prob = nn.Sequential(
|
122 |
-
spectral_norm(nn.Conv2d(8 * base_n_channels, 2 * base_n_channels, 5, 2, 1)),
|
123 |
-
Flatten(),
|
124 |
-
nn.Linear(output_size, 1)
|
125 |
-
)
|
126 |
-
|
127 |
-
self.init_weights(init_type="normal", gain=0.02)
|
128 |
-
|
129 |
-
def forward(self, input_data):
|
130 |
-
x = self.image_to_features(input_data)
|
131 |
-
return self.features_to_prob(x)
|
132 |
-
|
133 |
-
|
134 |
-
class PatchDiscriminator(Discriminator):
|
135 |
-
def __init__(self, base_n_channels):
|
136 |
-
super(PatchDiscriminator, self).__init__(base_n_channels)
|
137 |
-
|
138 |
-
self.features_to_prob = nn.Sequential(
|
139 |
-
spectral_norm(nn.Conv2d(8 * base_n_channels, 1, 1)),
|
140 |
-
Flatten()
|
141 |
-
)
|
142 |
-
|
143 |
-
def forward(self, input_data):
|
144 |
-
x = self.image_to_features(input_data)
|
145 |
-
return self.features_to_prob(x)
|
146 |
-
|
147 |
-
|
148 |
-
if __name__ == '__main__':
|
149 |
-
import torchvision
|
150 |
-
ifrnet = IFRNet(32, 128).cuda()
|
151 |
-
x = torch.rand((2, 3, 256, 256)).cuda()
|
152 |
-
vgg16 = torchvision.models.vgg16(pretrained=True).features.eval().cuda()
|
153 |
-
with torch.no_grad():
|
154 |
-
vgg_feat = vgg16(x)
|
155 |
-
output, aux_out = ifrnet(x, vgg_feat)
|
156 |
-
print(output.size())
|
157 |
-
print(aux_out.size())
|
158 |
-
|
159 |
-
disc = Discriminator(32).cuda()
|
160 |
-
d_out = disc(output)
|
161 |
-
print(d_out.size())
|
162 |
-
|
163 |
-
patch_disc = PatchDiscriminator(32).cuda()
|
164 |
-
p_d_out = patch_disc(output)
|
165 |
-
print(p_d_out.size())
|
166 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlexZou/Deploy_Restoration/Lowlight.py
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import torch
|
3 |
-
import numpy as np
|
4 |
-
from torchvision import transforms
|
5 |
-
from PIL import Image
|
6 |
-
import time
|
7 |
-
import torchvision
|
8 |
-
import cv2
|
9 |
-
import torchvision.utils as tvu
|
10 |
-
import torch.functional as F
|
11 |
-
import argparse
|
12 |
-
from model.IAT_main import IAT
|
13 |
-
|
14 |
-
def inference_img(img_path,Net):
|
15 |
-
|
16 |
-
low_image = Image.open(img_path).convert('RGB')
|
17 |
-
enhance_transforms = transforms.Compose([
|
18 |
-
transforms.ToTensor()
|
19 |
-
])
|
20 |
-
|
21 |
-
with torch.no_grad():
|
22 |
-
low_image = enhance_transforms(low_image)
|
23 |
-
low_image = low_image.unsqueeze(0)
|
24 |
-
start = time.time()
|
25 |
-
restored2 = Net(low_image)
|
26 |
-
end = time.time()
|
27 |
-
|
28 |
-
|
29 |
-
return restored2,end-start
|
30 |
-
|
31 |
-
if __name__ == '__main__':
|
32 |
-
parser=argparse.ArgumentParser()
|
33 |
-
parser.add_argument('--test_path',type=str,required=True,help='Path to test')
|
34 |
-
parser.add_argument('--save_path',type=str,required=True,help='Path to save')
|
35 |
-
parser.add_argument('--pk_path',type=str,default='model_zoo/underwater.pth',help='Path of the checkpoint')
|
36 |
-
opt = parser.parse_args()
|
37 |
-
if not os.path.isdir(opt.save_path):
|
38 |
-
os.mkdir(opt.save_path)
|
39 |
-
Net = IAT()
|
40 |
-
Net.load_state_dict(torch.load(opt.pk_path, map_location=torch.device('cpu')))
|
41 |
-
Net = Net.eval()
|
42 |
-
image = opt.test_path
|
43 |
-
print(image)
|
44 |
-
restored2,time_num = inference_img(image,Net)
|
45 |
-
torchvision.utils.save_image(restored2,opt.save_path+'output.png')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlhitawiMohammed22/CER_Hu-Evaluation-Metrics/README.md
DELETED
@@ -1,161 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: CER
|
3 |
-
emoji: 🤗🏃🤗🏃🤗🏃🤗🏃🤗
|
4 |
-
colorFrom: blue
|
5 |
-
colorTo: red
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.19.1
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
tags:
|
11 |
-
- evaluate
|
12 |
-
- metric
|
13 |
-
license: apache-2.0
|
14 |
-
---
|
15 |
-
---
|
16 |
-
|
17 |
-
description: >-
|
18 |
-
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
|
19 |
-
|
20 |
-
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
|
21 |
-
|
22 |
-
Character error rate can be computed as:
|
23 |
-
|
24 |
-
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
|
25 |
-
|
26 |
-
where
|
27 |
-
|
28 |
-
S is the number of substitutions,
|
29 |
-
D is the number of deletions,
|
30 |
-
I is the number of insertions,
|
31 |
-
C is the number of correct characters,
|
32 |
-
N is the number of characters in the reference (N=S+D+C).
|
33 |
-
|
34 |
-
CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
|
35 |
-
performance of the ASR system with a CER of 0 being a perfect score.
|
36 |
-
---
|
37 |
-
|
38 |
-
# Metric Card for CER
|
39 |
-
|
40 |
-
## Metric description
|
41 |
-
|
42 |
-
Character error rate (CER) is a common metric of the performance of an automatic speech recognition (ASR) system. CER is similar to Word Error Rate (WER), but operates on character instead of word.
|
43 |
-
|
44 |
-
Character error rate can be computed as:
|
45 |
-
|
46 |
-
`CER = (S + D + I) / N = (S + D + I) / (S + D + C)`
|
47 |
-
|
48 |
-
where
|
49 |
-
|
50 |
-
`S` is the number of substitutions,
|
51 |
-
|
52 |
-
`D` is the number of deletions,
|
53 |
-
|
54 |
-
`I` is the number of insertions,
|
55 |
-
|
56 |
-
`C` is the number of correct characters,
|
57 |
-
|
58 |
-
`N` is the number of characters in the reference (`N=S+D+C`).
|
59 |
-
|
60 |
-
|
61 |
-
## How to use
|
62 |
-
|
63 |
-
The metric takes two inputs: references (a list of references for each speech input) and predictions (a list of transcriptions to score).
|
64 |
-
|
65 |
-
```python
|
66 |
-
from evaluate import load
|
67 |
-
cer = load("cer")
|
68 |
-
cer_score = cer.compute(predictions=predictions, references=references)
|
69 |
-
```
|
70 |
-
## Output values
|
71 |
-
|
72 |
-
This metric outputs a float representing the character error rate.
|
73 |
-
|
74 |
-
```
|
75 |
-
print(cer_score)
|
76 |
-
0.34146341463414637
|
77 |
-
```
|
78 |
-
|
79 |
-
The **lower** the CER value, the **better** the performance of the ASR system, with a CER of 0 being a perfect score.
|
80 |
-
|
81 |
-
However, CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions (see [Examples](#Examples) below).
|
82 |
-
|
83 |
-
### Values from popular papers
|
84 |
-
|
85 |
-
## Examples
|
86 |
-
|
87 |
-
Perfect match between prediction and reference:
|
88 |
-
|
89 |
-
```python
|
90 |
-
!pip install evaluate jiwer
|
91 |
-
|
92 |
-
from evaluate import load
|
93 |
-
cer = load("cer")
|
94 |
-
predictions = ["hello világ", "jó éjszakát hold"]
|
95 |
-
references = ["hello világ", "jó éjszakát hold"]
|
96 |
-
cer_score = cer.compute(predictions=predictions, references=references)
|
97 |
-
print(cer_score)
|
98 |
-
0.0
|
99 |
-
```
|
100 |
-
Partial match between prediction and reference:
|
101 |
-
|
102 |
-
```python
|
103 |
-
from evaluate import load
|
104 |
-
cer = load("cer")
|
105 |
-
predictions = ["ez a jóslat", "van egy másik minta is"]
|
106 |
-
references = ["ez a hivatkozás", "van még egy"]
|
107 |
-
cer = evaluate.load("cer")
|
108 |
-
cer_score = cer.compute(predictions=predictions, references=references)
|
109 |
-
print(cer_score)
|
110 |
-
0.9615384615384616
|
111 |
-
```
|
112 |
-
|
113 |
-
No match between prediction and reference:
|
114 |
-
|
115 |
-
```python
|
116 |
-
from evaluate import load
|
117 |
-
cer = load("cer")
|
118 |
-
predictions = ["üdvözlet"]
|
119 |
-
references = ["jó!"]
|
120 |
-
cer_score = cer.compute(predictions=predictions, references=references)
|
121 |
-
print(cer_score)
|
122 |
-
1.5
|
123 |
-
```
|
124 |
-
|
125 |
-
CER above 1 due to insertion errors:
|
126 |
-
|
127 |
-
```python
|
128 |
-
from evaluate import load
|
129 |
-
cer = load("cer")
|
130 |
-
predictions = ["Helló Világ"]
|
131 |
-
references = ["Helló"]
|
132 |
-
cer_score = cer.compute(predictions=predictions, references=references)
|
133 |
-
print(cer_score)
|
134 |
-
1.2
|
135 |
-
```
|
136 |
-
|
137 |
-
## Limitations and bias
|
138 |
-
|
139 |
-
.
|
140 |
-
|
141 |
-
Also, in some cases, instead of reporting the raw CER, a normalized CER is reported where the number of mistakes is divided by the sum of the number of edit operations (`I` + `S` + `D`) and `C` (the number of correct characters), which results in CER values that fall within the range of 0–100%.
|
142 |
-
|
143 |
-
|
144 |
-
## Citation
|
145 |
-
|
146 |
-
|
147 |
-
```bibtex
|
148 |
-
@inproceedings{morris2004,
|
149 |
-
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
|
150 |
-
year = {2004},
|
151 |
-
month = {01},
|
152 |
-
pages = {},
|
153 |
-
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
|
154 |
-
}
|
155 |
-
```
|
156 |
-
|
157 |
-
## References
|
158 |
-
|
159 |
-
- [Hugging Face Tasks -- Automatic Speech Recognition](https://huggingface.co/tasks/automatic-speech-recognition)
|
160 |
-
- https://github.com/huggingface/evaluate
|
161 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/models/controlnet.md
DELETED
@@ -1,38 +0,0 @@
|
|
1 |
-
# ControlNet
|
2 |
-
|
3 |
-
The ControlNet model was introduced in [Adding Conditional Control to Text-to-Image Diffusion Models](https://huggingface.co/papers/2302.05543) by Lvmin Zhang and Maneesh Agrawala. It provides a greater degree of control over text-to-image generation by conditioning the model on additional inputs such as edge maps, depth maps, segmentation maps, and keypoints for pose detection.
|
4 |
-
|
5 |
-
The abstract from the paper is:
|
6 |
-
|
7 |
-
*We present a neural network structure, ControlNet, to control pretrained large diffusion models to support additional input conditions. The ControlNet learns task-specific conditions in an end-to-end way, and the learning is robust even when the training dataset is small (< 50k). Moreover, training a ControlNet is as fast as fine-tuning a diffusion model, and the model can be trained on a personal devices. Alternatively, if powerful computation clusters are available, the model can scale to large amounts (millions to billions) of data. We report that large diffusion models like Stable Diffusion can be augmented with ControlNets to enable conditional inputs like edge maps, segmentation maps, keypoints, etc. This may enrich the methods to control large diffusion models and further facilitate related applications.*
|
8 |
-
|
9 |
-
## Loading from the original format
|
10 |
-
|
11 |
-
By default the [`ControlNetModel`] should be loaded with [`~ModelMixin.from_pretrained`], but it can also be loaded
|
12 |
-
from the original format using [`FromOriginalControlnetMixin.from_single_file`] as follows:
|
13 |
-
|
14 |
-
```py
|
15 |
-
from diffusers import StableDiffusionControlnetPipeline, ControlNetModel
|
16 |
-
|
17 |
-
url = "https://huggingface.co/lllyasviel/ControlNet-v1-1/blob/main/control_v11p_sd15_canny.pth" # can also be a local path
|
18 |
-
controlnet = ControlNetModel.from_single_file(url)
|
19 |
-
|
20 |
-
url = "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned.safetensors" # can also be a local path
|
21 |
-
pipe = StableDiffusionControlnetPipeline.from_single_file(url, controlnet=controlnet)
|
22 |
-
```
|
23 |
-
|
24 |
-
## ControlNetModel
|
25 |
-
|
26 |
-
[[autodoc]] ControlNetModel
|
27 |
-
|
28 |
-
## ControlNetOutput
|
29 |
-
|
30 |
-
[[autodoc]] models.controlnet.ControlNetOutput
|
31 |
-
|
32 |
-
## FlaxControlNetModel
|
33 |
-
|
34 |
-
[[autodoc]] FlaxControlNetModel
|
35 |
-
|
36 |
-
## FlaxControlNetOutput
|
37 |
-
|
38 |
-
[[autodoc]] models.controlnet_flax.FlaxControlNetOutput
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/res2net/htc_r2_101_fpn_20e_coco.py
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
_base_ = '../htc/htc_r50_fpn_1x_coco.py'
|
2 |
-
model = dict(
|
3 |
-
pretrained='open-mmlab://res2net101_v1d_26w_4s',
|
4 |
-
backbone=dict(type='Res2Net', depth=101, scales=4, base_width=26))
|
5 |
-
# learning policy
|
6 |
-
lr_config = dict(step=[16, 19])
|
7 |
-
runner = dict(type='EpochBasedRunner', max_epochs=20)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/ssd/ssd512_coco.py
DELETED
@@ -1,71 +0,0 @@
|
|
1 |
-
_base_ = 'ssd300_coco.py'
|
2 |
-
input_size = 512
|
3 |
-
model = dict(
|
4 |
-
backbone=dict(input_size=input_size),
|
5 |
-
bbox_head=dict(
|
6 |
-
in_channels=(512, 1024, 512, 256, 256, 256, 256),
|
7 |
-
anchor_generator=dict(
|
8 |
-
type='SSDAnchorGenerator',
|
9 |
-
scale_major=False,
|
10 |
-
input_size=input_size,
|
11 |
-
basesize_ratio_range=(0.1, 0.9),
|
12 |
-
strides=[8, 16, 32, 64, 128, 256, 512],
|
13 |
-
ratios=[[2], [2, 3], [2, 3], [2, 3], [2, 3], [2], [2]])))
|
14 |
-
# dataset settings
|
15 |
-
dataset_type = 'CocoDataset'
|
16 |
-
data_root = 'data/coco/'
|
17 |
-
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
|
18 |
-
train_pipeline = [
|
19 |
-
dict(type='LoadImageFromFile', to_float32=True),
|
20 |
-
dict(type='LoadAnnotations', with_bbox=True),
|
21 |
-
dict(
|
22 |
-
type='PhotoMetricDistortion',
|
23 |
-
brightness_delta=32,
|
24 |
-
contrast_range=(0.5, 1.5),
|
25 |
-
saturation_range=(0.5, 1.5),
|
26 |
-
hue_delta=18),
|
27 |
-
dict(
|
28 |
-
type='Expand',
|
29 |
-
mean=img_norm_cfg['mean'],
|
30 |
-
to_rgb=img_norm_cfg['to_rgb'],
|
31 |
-
ratio_range=(1, 4)),
|
32 |
-
dict(
|
33 |
-
type='MinIoURandomCrop',
|
34 |
-
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
|
35 |
-
min_crop_size=0.3),
|
36 |
-
dict(type='Resize', img_scale=(512, 512), keep_ratio=False),
|
37 |
-
dict(type='Normalize', **img_norm_cfg),
|
38 |
-
dict(type='RandomFlip', flip_ratio=0.5),
|
39 |
-
dict(type='DefaultFormatBundle'),
|
40 |
-
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
|
41 |
-
]
|
42 |
-
test_pipeline = [
|
43 |
-
dict(type='LoadImageFromFile'),
|
44 |
-
dict(
|
45 |
-
type='MultiScaleFlipAug',
|
46 |
-
img_scale=(512, 512),
|
47 |
-
flip=False,
|
48 |
-
transforms=[
|
49 |
-
dict(type='Resize', keep_ratio=False),
|
50 |
-
dict(type='Normalize', **img_norm_cfg),
|
51 |
-
dict(type='ImageToTensor', keys=['img']),
|
52 |
-
dict(type='Collect', keys=['img']),
|
53 |
-
])
|
54 |
-
]
|
55 |
-
data = dict(
|
56 |
-
samples_per_gpu=8,
|
57 |
-
workers_per_gpu=3,
|
58 |
-
train=dict(
|
59 |
-
_delete_=True,
|
60 |
-
type='RepeatDataset',
|
61 |
-
times=5,
|
62 |
-
dataset=dict(
|
63 |
-
type=dataset_type,
|
64 |
-
ann_file=data_root + 'annotations/instances_train2017.json',
|
65 |
-
img_prefix=data_root + 'train2017/',
|
66 |
-
pipeline=train_pipeline)),
|
67 |
-
val=dict(pipeline=test_pipeline),
|
68 |
-
test=dict(pipeline=test_pipeline))
|
69 |
-
# optimizer
|
70 |
-
optimizer = dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4)
|
71 |
-
optimizer_config = dict(_delete_=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/yolact/yolact_r101_1x8_coco.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
_base_ = './yolact_r50_1x8_coco.py'
|
2 |
-
|
3 |
-
model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101))
|
|
|
|
|
|
|
|
spaces/Anew1007/extras/constants.py
DELETED
@@ -1,50 +0,0 @@
|
|
1 |
-
# Constants
|
2 |
-
DEFAULT_CUDA_DEVICE = "cuda:0"
|
3 |
-
# Also try: 'Qiliang/bart-large-cnn-samsum-ElectrifAi_v10'
|
4 |
-
DEFAULT_SUMMARIZATION_MODEL = "Qiliang/bart-large-cnn-samsum-ChatGPT_v3"
|
5 |
-
# Also try: 'joeddav/distilbert-base-uncased-go-emotions-student'
|
6 |
-
DEFAULT_CLASSIFICATION_MODEL = "nateraw/bert-base-uncased-emotion"
|
7 |
-
# Also try: 'Salesforce/blip-image-captioning-base'
|
8 |
-
DEFAULT_CAPTIONING_MODEL = "Salesforce/blip-image-captioning-large"
|
9 |
-
DEFAULT_SD_MODEL = "ckpt/anything-v4.5-vae-swapped"
|
10 |
-
DEFAULT_EMBEDDING_MODEL = "sentence-transformers/all-mpnet-base-v2"
|
11 |
-
DEFAULT_REMOTE_SD_HOST = "127.0.0.1"
|
12 |
-
DEFAULT_REMOTE_SD_PORT = 7860
|
13 |
-
DEFAULT_CHROMA_PORT = 8000
|
14 |
-
SILERO_SAMPLES_PATH = "tts_samples"
|
15 |
-
SILERO_SAMPLE_TEXT = "The quick brown fox jumps over the lazy dog"
|
16 |
-
# ALL_MODULES = ['caption', 'summarize', 'classify', 'keywords', 'prompt', 'sd']
|
17 |
-
DEFAULT_SUMMARIZE_PARAMS = {
|
18 |
-
"temperature": 1.0,
|
19 |
-
"repetition_penalty": 1.0,
|
20 |
-
"max_length": 500,
|
21 |
-
"min_length": 200,
|
22 |
-
"length_penalty": 1.5,
|
23 |
-
"bad_words": [
|
24 |
-
"\n",
|
25 |
-
'"',
|
26 |
-
"*",
|
27 |
-
"[",
|
28 |
-
"]",
|
29 |
-
"{",
|
30 |
-
"}",
|
31 |
-
":",
|
32 |
-
"(",
|
33 |
-
")",
|
34 |
-
"<",
|
35 |
-
">",
|
36 |
-
"Â",
|
37 |
-
"The text ends",
|
38 |
-
"The story ends",
|
39 |
-
"The text is",
|
40 |
-
"The story is",
|
41 |
-
],
|
42 |
-
}
|
43 |
-
|
44 |
-
PROMPT_PREFIX = "best quality, absurdres, "
|
45 |
-
NEGATIVE_PROMPT = """lowres, bad anatomy, error body, error hair, error arm,
|
46 |
-
error hands, bad hands, error fingers, bad fingers, missing fingers
|
47 |
-
error legs, bad legs, multiple legs, missing legs, error lighting,
|
48 |
-
error shadow, error reflection, text, error, extra digit, fewer digits,
|
49 |
-
cropped, worst quality, low quality, normal quality, jpeg artifacts,
|
50 |
-
signature, watermark, username, blurry"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/models/candidate.py
DELETED
@@ -1,34 +0,0 @@
|
|
1 |
-
from pip._vendor.packaging.version import parse as parse_version
|
2 |
-
|
3 |
-
from pip._internal.models.link import Link
|
4 |
-
from pip._internal.utils.models import KeyBasedCompareMixin
|
5 |
-
|
6 |
-
|
7 |
-
class InstallationCandidate(KeyBasedCompareMixin):
|
8 |
-
"""Represents a potential "candidate" for installation."""
|
9 |
-
|
10 |
-
__slots__ = ["name", "version", "link"]
|
11 |
-
|
12 |
-
def __init__(self, name: str, version: str, link: Link) -> None:
|
13 |
-
self.name = name
|
14 |
-
self.version = parse_version(version)
|
15 |
-
self.link = link
|
16 |
-
|
17 |
-
super().__init__(
|
18 |
-
key=(self.name, self.version, self.link),
|
19 |
-
defining_class=InstallationCandidate,
|
20 |
-
)
|
21 |
-
|
22 |
-
def __repr__(self) -> str:
|
23 |
-
return "<InstallationCandidate({!r}, {!r}, {!r})>".format(
|
24 |
-
self.name,
|
25 |
-
self.version,
|
26 |
-
self.link,
|
27 |
-
)
|
28 |
-
|
29 |
-
def __str__(self) -> str:
|
30 |
-
return "{!r} candidate (version {} at {})".format(
|
31 |
-
self.name,
|
32 |
-
self.version,
|
33 |
-
self.link,
|
34 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/modeling/roi_heads/keypoint_head.py
DELETED
@@ -1,272 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
from typing import List
|
3 |
-
import torch
|
4 |
-
from torch import nn
|
5 |
-
from torch.nn import functional as F
|
6 |
-
|
7 |
-
from detectron2.config import configurable
|
8 |
-
from detectron2.layers import Conv2d, ConvTranspose2d, cat, interpolate
|
9 |
-
from detectron2.structures import Instances, heatmaps_to_keypoints
|
10 |
-
from detectron2.utils.events import get_event_storage
|
11 |
-
from detectron2.utils.registry import Registry
|
12 |
-
|
13 |
-
_TOTAL_SKIPPED = 0
|
14 |
-
|
15 |
-
|
16 |
-
__all__ = [
|
17 |
-
"ROI_KEYPOINT_HEAD_REGISTRY",
|
18 |
-
"build_keypoint_head",
|
19 |
-
"BaseKeypointRCNNHead",
|
20 |
-
"KRCNNConvDeconvUpsampleHead",
|
21 |
-
]
|
22 |
-
|
23 |
-
|
24 |
-
ROI_KEYPOINT_HEAD_REGISTRY = Registry("ROI_KEYPOINT_HEAD")
|
25 |
-
ROI_KEYPOINT_HEAD_REGISTRY.__doc__ = """
|
26 |
-
Registry for keypoint heads, which make keypoint predictions from per-region features.
|
27 |
-
|
28 |
-
The registered object will be called with `obj(cfg, input_shape)`.
|
29 |
-
"""
|
30 |
-
|
31 |
-
|
32 |
-
def build_keypoint_head(cfg, input_shape):
|
33 |
-
"""
|
34 |
-
Build a keypoint head from `cfg.MODEL.ROI_KEYPOINT_HEAD.NAME`.
|
35 |
-
"""
|
36 |
-
name = cfg.MODEL.ROI_KEYPOINT_HEAD.NAME
|
37 |
-
return ROI_KEYPOINT_HEAD_REGISTRY.get(name)(cfg, input_shape)
|
38 |
-
|
39 |
-
|
40 |
-
def keypoint_rcnn_loss(pred_keypoint_logits, instances, normalizer):
|
41 |
-
"""
|
42 |
-
Arguments:
|
43 |
-
pred_keypoint_logits (Tensor): A tensor of shape (N, K, S, S) where N is the total number
|
44 |
-
of instances in the batch, K is the number of keypoints, and S is the side length
|
45 |
-
of the keypoint heatmap. The values are spatial logits.
|
46 |
-
instances (list[Instances]): A list of M Instances, where M is the batch size.
|
47 |
-
These instances are predictions from the model
|
48 |
-
that are in 1:1 correspondence with pred_keypoint_logits.
|
49 |
-
Each Instances should contain a `gt_keypoints` field containing a `structures.Keypoint`
|
50 |
-
instance.
|
51 |
-
normalizer (float): Normalize the loss by this amount.
|
52 |
-
If not specified, we normalize by the number of visible keypoints in the minibatch.
|
53 |
-
|
54 |
-
Returns a scalar tensor containing the loss.
|
55 |
-
"""
|
56 |
-
heatmaps = []
|
57 |
-
valid = []
|
58 |
-
|
59 |
-
keypoint_side_len = pred_keypoint_logits.shape[2]
|
60 |
-
for instances_per_image in instances:
|
61 |
-
if len(instances_per_image) == 0:
|
62 |
-
continue
|
63 |
-
keypoints = instances_per_image.gt_keypoints
|
64 |
-
heatmaps_per_image, valid_per_image = keypoints.to_heatmap(
|
65 |
-
instances_per_image.proposal_boxes.tensor, keypoint_side_len
|
66 |
-
)
|
67 |
-
heatmaps.append(heatmaps_per_image.view(-1))
|
68 |
-
valid.append(valid_per_image.view(-1))
|
69 |
-
|
70 |
-
if len(heatmaps):
|
71 |
-
keypoint_targets = cat(heatmaps, dim=0)
|
72 |
-
valid = cat(valid, dim=0).to(dtype=torch.uint8)
|
73 |
-
valid = torch.nonzero(valid).squeeze(1)
|
74 |
-
|
75 |
-
# torch.mean (in binary_cross_entropy_with_logits) doesn't
|
76 |
-
# accept empty tensors, so handle it separately
|
77 |
-
if len(heatmaps) == 0 or valid.numel() == 0:
|
78 |
-
global _TOTAL_SKIPPED
|
79 |
-
_TOTAL_SKIPPED += 1
|
80 |
-
storage = get_event_storage()
|
81 |
-
storage.put_scalar("kpts_num_skipped_batches", _TOTAL_SKIPPED, smoothing_hint=False)
|
82 |
-
return pred_keypoint_logits.sum() * 0
|
83 |
-
|
84 |
-
N, K, H, W = pred_keypoint_logits.shape
|
85 |
-
pred_keypoint_logits = pred_keypoint_logits.view(N * K, H * W)
|
86 |
-
|
87 |
-
keypoint_loss = F.cross_entropy(
|
88 |
-
pred_keypoint_logits[valid], keypoint_targets[valid], reduction="sum"
|
89 |
-
)
|
90 |
-
|
91 |
-
# If a normalizer isn't specified, normalize by the number of visible keypoints in the minibatch
|
92 |
-
if normalizer is None:
|
93 |
-
normalizer = valid.numel()
|
94 |
-
keypoint_loss /= normalizer
|
95 |
-
|
96 |
-
return keypoint_loss
|
97 |
-
|
98 |
-
|
99 |
-
def keypoint_rcnn_inference(pred_keypoint_logits: torch.Tensor, pred_instances: List[Instances]):
|
100 |
-
"""
|
101 |
-
Post process each predicted keypoint heatmap in `pred_keypoint_logits` into (x, y, score)
|
102 |
-
and add it to the `pred_instances` as a `pred_keypoints` field.
|
103 |
-
|
104 |
-
Args:
|
105 |
-
pred_keypoint_logits (Tensor): A tensor of shape (R, K, S, S) where R is the total number
|
106 |
-
of instances in the batch, K is the number of keypoints, and S is the side length of
|
107 |
-
the keypoint heatmap. The values are spatial logits.
|
108 |
-
pred_instances (list[Instances]): A list of N Instances, where N is the number of images.
|
109 |
-
|
110 |
-
Returns:
|
111 |
-
None. Each element in pred_instances will contain extra "pred_keypoints" and
|
112 |
-
"pred_keypoint_heatmaps" fields. "pred_keypoints" is a tensor of shape
|
113 |
-
(#instance, K, 3) where the last dimension corresponds to (x, y, score).
|
114 |
-
The scores are larger than 0. "pred_keypoint_heatmaps" contains the raw
|
115 |
-
keypoint logits as passed to this function.
|
116 |
-
"""
|
117 |
-
# flatten all bboxes from all images together (list[Boxes] -> Rx4 tensor)
|
118 |
-
bboxes_flat = cat([b.pred_boxes.tensor for b in pred_instances], dim=0)
|
119 |
-
|
120 |
-
pred_keypoint_logits = pred_keypoint_logits.detach()
|
121 |
-
keypoint_results = heatmaps_to_keypoints(pred_keypoint_logits, bboxes_flat.detach())
|
122 |
-
num_instances_per_image = [len(i) for i in pred_instances]
|
123 |
-
keypoint_results = keypoint_results[:, :, [0, 1, 3]].split(num_instances_per_image, dim=0)
|
124 |
-
heatmap_results = pred_keypoint_logits.split(num_instances_per_image, dim=0)
|
125 |
-
|
126 |
-
for keypoint_results_per_image, heatmap_results_per_image, instances_per_image in zip(
|
127 |
-
keypoint_results, heatmap_results, pred_instances
|
128 |
-
):
|
129 |
-
# keypoint_results_per_image is (num instances)x(num keypoints)x(x, y, score)
|
130 |
-
# heatmap_results_per_image is (num instances)x(num keypoints)x(side)x(side)
|
131 |
-
instances_per_image.pred_keypoints = keypoint_results_per_image
|
132 |
-
instances_per_image.pred_keypoint_heatmaps = heatmap_results_per_image
|
133 |
-
|
134 |
-
|
135 |
-
class BaseKeypointRCNNHead(nn.Module):
|
136 |
-
"""
|
137 |
-
Implement the basic Keypoint R-CNN losses and inference logic described in
|
138 |
-
Sec. 5 of :paper:`Mask R-CNN`.
|
139 |
-
"""
|
140 |
-
|
141 |
-
@configurable
|
142 |
-
def __init__(self, *, num_keypoints, loss_weight=1.0, loss_normalizer=1.0):
|
143 |
-
"""
|
144 |
-
NOTE: this interface is experimental.
|
145 |
-
|
146 |
-
Args:
|
147 |
-
num_keypoints (int): number of keypoints to predict
|
148 |
-
loss_weight (float): weight to multiple on the keypoint loss
|
149 |
-
loss_normalizer (float or str):
|
150 |
-
If float, divide the loss by `loss_normalizer * #images`.
|
151 |
-
If 'visible', the loss is normalized by the total number of
|
152 |
-
visible keypoints across images.
|
153 |
-
"""
|
154 |
-
super().__init__()
|
155 |
-
self.num_keypoints = num_keypoints
|
156 |
-
self.loss_weight = loss_weight
|
157 |
-
assert loss_normalizer == "visible" or isinstance(loss_normalizer, float), loss_normalizer
|
158 |
-
self.loss_normalizer = loss_normalizer
|
159 |
-
|
160 |
-
@classmethod
|
161 |
-
def from_config(cls, cfg, input_shape):
|
162 |
-
ret = {
|
163 |
-
"loss_weight": cfg.MODEL.ROI_KEYPOINT_HEAD.LOSS_WEIGHT,
|
164 |
-
"num_keypoints": cfg.MODEL.ROI_KEYPOINT_HEAD.NUM_KEYPOINTS,
|
165 |
-
}
|
166 |
-
normalize_by_visible = (
|
167 |
-
cfg.MODEL.ROI_KEYPOINT_HEAD.NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS
|
168 |
-
) # noqa
|
169 |
-
if not normalize_by_visible:
|
170 |
-
batch_size_per_image = cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE
|
171 |
-
positive_sample_fraction = cfg.MODEL.ROI_HEADS.POSITIVE_FRACTION
|
172 |
-
ret["loss_normalizer"] = (
|
173 |
-
ret["num_keypoints"] * batch_size_per_image * positive_sample_fraction
|
174 |
-
)
|
175 |
-
else:
|
176 |
-
ret["loss_normalizer"] = "visible"
|
177 |
-
return ret
|
178 |
-
|
179 |
-
def forward(self, x, instances: List[Instances]):
|
180 |
-
"""
|
181 |
-
Args:
|
182 |
-
x: input 4D region feature(s) provided by :class:`ROIHeads`.
|
183 |
-
instances (list[Instances]): contains the boxes & labels corresponding
|
184 |
-
to the input features.
|
185 |
-
Exact format is up to its caller to decide.
|
186 |
-
Typically, this is the foreground instances in training, with
|
187 |
-
"proposal_boxes" field and other gt annotations.
|
188 |
-
In inference, it contains boxes that are already predicted.
|
189 |
-
|
190 |
-
Returns:
|
191 |
-
A dict of losses if in training. The predicted "instances" if in inference.
|
192 |
-
"""
|
193 |
-
x = self.layers(x)
|
194 |
-
if self.training:
|
195 |
-
num_images = len(instances)
|
196 |
-
normalizer = (
|
197 |
-
None if self.loss_normalizer == "visible" else num_images * self.loss_normalizer
|
198 |
-
)
|
199 |
-
return {
|
200 |
-
"loss_keypoint": keypoint_rcnn_loss(x, instances, normalizer=normalizer)
|
201 |
-
* self.loss_weight
|
202 |
-
}
|
203 |
-
else:
|
204 |
-
keypoint_rcnn_inference(x, instances)
|
205 |
-
return instances
|
206 |
-
|
207 |
-
def layers(self, x):
|
208 |
-
"""
|
209 |
-
Neural network layers that makes predictions from regional input features.
|
210 |
-
"""
|
211 |
-
raise NotImplementedError
|
212 |
-
|
213 |
-
|
214 |
-
# To get torchscript support, we make the head a subclass of `nn.Sequential`.
|
215 |
-
# Therefore, to add new layers in this head class, please make sure they are
|
216 |
-
# added in the order they will be used in forward().
|
217 |
-
@ROI_KEYPOINT_HEAD_REGISTRY.register()
|
218 |
-
class KRCNNConvDeconvUpsampleHead(BaseKeypointRCNNHead, nn.Sequential):
|
219 |
-
"""
|
220 |
-
A standard keypoint head containing a series of 3x3 convs, followed by
|
221 |
-
a transpose convolution and bilinear interpolation for upsampling.
|
222 |
-
It is described in Sec. 5 of :paper:`Mask R-CNN`.
|
223 |
-
"""
|
224 |
-
|
225 |
-
@configurable
|
226 |
-
def __init__(self, input_shape, *, num_keypoints, conv_dims, **kwargs):
|
227 |
-
"""
|
228 |
-
NOTE: this interface is experimental.
|
229 |
-
|
230 |
-
Args:
|
231 |
-
input_shape (ShapeSpec): shape of the input feature
|
232 |
-
conv_dims: an iterable of output channel counts for each conv in the head
|
233 |
-
e.g. (512, 512, 512) for three convs outputting 512 channels.
|
234 |
-
"""
|
235 |
-
super().__init__(num_keypoints=num_keypoints, **kwargs)
|
236 |
-
|
237 |
-
# default up_scale to 2.0 (this can be made an option)
|
238 |
-
up_scale = 2.0
|
239 |
-
in_channels = input_shape.channels
|
240 |
-
|
241 |
-
for idx, layer_channels in enumerate(conv_dims, 1):
|
242 |
-
module = Conv2d(in_channels, layer_channels, 3, stride=1, padding=1)
|
243 |
-
self.add_module("conv_fcn{}".format(idx), module)
|
244 |
-
self.add_module("conv_fcn_relu{}".format(idx), nn.ReLU())
|
245 |
-
in_channels = layer_channels
|
246 |
-
|
247 |
-
deconv_kernel = 4
|
248 |
-
self.score_lowres = ConvTranspose2d(
|
249 |
-
in_channels, num_keypoints, deconv_kernel, stride=2, padding=deconv_kernel // 2 - 1
|
250 |
-
)
|
251 |
-
self.up_scale = up_scale
|
252 |
-
|
253 |
-
for name, param in self.named_parameters():
|
254 |
-
if "bias" in name:
|
255 |
-
nn.init.constant_(param, 0)
|
256 |
-
elif "weight" in name:
|
257 |
-
# Caffe2 implementation uses MSRAFill, which in fact
|
258 |
-
# corresponds to kaiming_normal_ in PyTorch
|
259 |
-
nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu")
|
260 |
-
|
261 |
-
@classmethod
|
262 |
-
def from_config(cls, cfg, input_shape):
|
263 |
-
ret = super().from_config(cfg, input_shape)
|
264 |
-
ret["input_shape"] = input_shape
|
265 |
-
ret["conv_dims"] = cfg.MODEL.ROI_KEYPOINT_HEAD.CONV_DIMS
|
266 |
-
return ret
|
267 |
-
|
268 |
-
def layers(self, x):
|
269 |
-
for layer in self:
|
270 |
-
x = layer(x)
|
271 |
-
x = interpolate(x, scale_factor=self.up_scale, mode="bilinear", align_corners=False)
|
272 |
-
return x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Banbri/zcvzcv/src/lib/base64ToFile.ts
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
export function base64ToFile(dataurl: string, filename: string) {
|
2 |
-
var arr = dataurl.split(','),
|
3 |
-
mime = arr[0].match(/:(.*?);/)?.[1],
|
4 |
-
bstr = atob(arr[arr.length - 1]),
|
5 |
-
n = bstr.length,
|
6 |
-
u8arr = new Uint8Array(n);
|
7 |
-
while(n--){
|
8 |
-
u8arr[n] = bstr.charCodeAt(n);
|
9 |
-
}
|
10 |
-
return new File([u8arr], filename, {type:mime});
|
11 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BartPoint/VoiceChange_Beta/infer_pack/modules.py
DELETED
@@ -1,522 +0,0 @@
|
|
1 |
-
import copy
|
2 |
-
import math
|
3 |
-
import numpy as np
|
4 |
-
import scipy
|
5 |
-
import torch
|
6 |
-
from torch import nn
|
7 |
-
from torch.nn import functional as F
|
8 |
-
|
9 |
-
from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
|
10 |
-
from torch.nn.utils import weight_norm, remove_weight_norm
|
11 |
-
|
12 |
-
from infer_pack import commons
|
13 |
-
from infer_pack.commons import init_weights, get_padding
|
14 |
-
from infer_pack.transforms import piecewise_rational_quadratic_transform
|
15 |
-
|
16 |
-
|
17 |
-
LRELU_SLOPE = 0.1
|
18 |
-
|
19 |
-
|
20 |
-
class LayerNorm(nn.Module):
|
21 |
-
def __init__(self, channels, eps=1e-5):
|
22 |
-
super().__init__()
|
23 |
-
self.channels = channels
|
24 |
-
self.eps = eps
|
25 |
-
|
26 |
-
self.gamma = nn.Parameter(torch.ones(channels))
|
27 |
-
self.beta = nn.Parameter(torch.zeros(channels))
|
28 |
-
|
29 |
-
def forward(self, x):
|
30 |
-
x = x.transpose(1, -1)
|
31 |
-
x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
|
32 |
-
return x.transpose(1, -1)
|
33 |
-
|
34 |
-
|
35 |
-
class ConvReluNorm(nn.Module):
|
36 |
-
def __init__(
|
37 |
-
self,
|
38 |
-
in_channels,
|
39 |
-
hidden_channels,
|
40 |
-
out_channels,
|
41 |
-
kernel_size,
|
42 |
-
n_layers,
|
43 |
-
p_dropout,
|
44 |
-
):
|
45 |
-
super().__init__()
|
46 |
-
self.in_channels = in_channels
|
47 |
-
self.hidden_channels = hidden_channels
|
48 |
-
self.out_channels = out_channels
|
49 |
-
self.kernel_size = kernel_size
|
50 |
-
self.n_layers = n_layers
|
51 |
-
self.p_dropout = p_dropout
|
52 |
-
assert n_layers > 1, "Number of layers should be larger than 0."
|
53 |
-
|
54 |
-
self.conv_layers = nn.ModuleList()
|
55 |
-
self.norm_layers = nn.ModuleList()
|
56 |
-
self.conv_layers.append(
|
57 |
-
nn.Conv1d(
|
58 |
-
in_channels, hidden_channels, kernel_size, padding=kernel_size // 2
|
59 |
-
)
|
60 |
-
)
|
61 |
-
self.norm_layers.append(LayerNorm(hidden_channels))
|
62 |
-
self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout))
|
63 |
-
for _ in range(n_layers - 1):
|
64 |
-
self.conv_layers.append(
|
65 |
-
nn.Conv1d(
|
66 |
-
hidden_channels,
|
67 |
-
hidden_channels,
|
68 |
-
kernel_size,
|
69 |
-
padding=kernel_size // 2,
|
70 |
-
)
|
71 |
-
)
|
72 |
-
self.norm_layers.append(LayerNorm(hidden_channels))
|
73 |
-
self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
|
74 |
-
self.proj.weight.data.zero_()
|
75 |
-
self.proj.bias.data.zero_()
|
76 |
-
|
77 |
-
def forward(self, x, x_mask):
|
78 |
-
x_org = x
|
79 |
-
for i in range(self.n_layers):
|
80 |
-
x = self.conv_layers[i](x * x_mask)
|
81 |
-
x = self.norm_layers[i](x)
|
82 |
-
x = self.relu_drop(x)
|
83 |
-
x = x_org + self.proj(x)
|
84 |
-
return x * x_mask
|
85 |
-
|
86 |
-
|
87 |
-
class DDSConv(nn.Module):
|
88 |
-
"""
|
89 |
-
Dialted and Depth-Separable Convolution
|
90 |
-
"""
|
91 |
-
|
92 |
-
def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0):
|
93 |
-
super().__init__()
|
94 |
-
self.channels = channels
|
95 |
-
self.kernel_size = kernel_size
|
96 |
-
self.n_layers = n_layers
|
97 |
-
self.p_dropout = p_dropout
|
98 |
-
|
99 |
-
self.drop = nn.Dropout(p_dropout)
|
100 |
-
self.convs_sep = nn.ModuleList()
|
101 |
-
self.convs_1x1 = nn.ModuleList()
|
102 |
-
self.norms_1 = nn.ModuleList()
|
103 |
-
self.norms_2 = nn.ModuleList()
|
104 |
-
for i in range(n_layers):
|
105 |
-
dilation = kernel_size**i
|
106 |
-
padding = (kernel_size * dilation - dilation) // 2
|
107 |
-
self.convs_sep.append(
|
108 |
-
nn.Conv1d(
|
109 |
-
channels,
|
110 |
-
channels,
|
111 |
-
kernel_size,
|
112 |
-
groups=channels,
|
113 |
-
dilation=dilation,
|
114 |
-
padding=padding,
|
115 |
-
)
|
116 |
-
)
|
117 |
-
self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
|
118 |
-
self.norms_1.append(LayerNorm(channels))
|
119 |
-
self.norms_2.append(LayerNorm(channels))
|
120 |
-
|
121 |
-
def forward(self, x, x_mask, g=None):
|
122 |
-
if g is not None:
|
123 |
-
x = x + g
|
124 |
-
for i in range(self.n_layers):
|
125 |
-
y = self.convs_sep[i](x * x_mask)
|
126 |
-
y = self.norms_1[i](y)
|
127 |
-
y = F.gelu(y)
|
128 |
-
y = self.convs_1x1[i](y)
|
129 |
-
y = self.norms_2[i](y)
|
130 |
-
y = F.gelu(y)
|
131 |
-
y = self.drop(y)
|
132 |
-
x = x + y
|
133 |
-
return x * x_mask
|
134 |
-
|
135 |
-
|
136 |
-
class WN(torch.nn.Module):
|
137 |
-
def __init__(
|
138 |
-
self,
|
139 |
-
hidden_channels,
|
140 |
-
kernel_size,
|
141 |
-
dilation_rate,
|
142 |
-
n_layers,
|
143 |
-
gin_channels=0,
|
144 |
-
p_dropout=0,
|
145 |
-
):
|
146 |
-
super(WN, self).__init__()
|
147 |
-
assert kernel_size % 2 == 1
|
148 |
-
self.hidden_channels = hidden_channels
|
149 |
-
self.kernel_size = (kernel_size,)
|
150 |
-
self.dilation_rate = dilation_rate
|
151 |
-
self.n_layers = n_layers
|
152 |
-
self.gin_channels = gin_channels
|
153 |
-
self.p_dropout = p_dropout
|
154 |
-
|
155 |
-
self.in_layers = torch.nn.ModuleList()
|
156 |
-
self.res_skip_layers = torch.nn.ModuleList()
|
157 |
-
self.drop = nn.Dropout(p_dropout)
|
158 |
-
|
159 |
-
if gin_channels != 0:
|
160 |
-
cond_layer = torch.nn.Conv1d(
|
161 |
-
gin_channels, 2 * hidden_channels * n_layers, 1
|
162 |
-
)
|
163 |
-
self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight")
|
164 |
-
|
165 |
-
for i in range(n_layers):
|
166 |
-
dilation = dilation_rate**i
|
167 |
-
padding = int((kernel_size * dilation - dilation) / 2)
|
168 |
-
in_layer = torch.nn.Conv1d(
|
169 |
-
hidden_channels,
|
170 |
-
2 * hidden_channels,
|
171 |
-
kernel_size,
|
172 |
-
dilation=dilation,
|
173 |
-
padding=padding,
|
174 |
-
)
|
175 |
-
in_layer = torch.nn.utils.weight_norm(in_layer, name="weight")
|
176 |
-
self.in_layers.append(in_layer)
|
177 |
-
|
178 |
-
# last one is not necessary
|
179 |
-
if i < n_layers - 1:
|
180 |
-
res_skip_channels = 2 * hidden_channels
|
181 |
-
else:
|
182 |
-
res_skip_channels = hidden_channels
|
183 |
-
|
184 |
-
res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
|
185 |
-
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight")
|
186 |
-
self.res_skip_layers.append(res_skip_layer)
|
187 |
-
|
188 |
-
def forward(self, x, x_mask, g=None, **kwargs):
|
189 |
-
output = torch.zeros_like(x)
|
190 |
-
n_channels_tensor = torch.IntTensor([self.hidden_channels])
|
191 |
-
|
192 |
-
if g is not None:
|
193 |
-
g = self.cond_layer(g)
|
194 |
-
|
195 |
-
for i in range(self.n_layers):
|
196 |
-
x_in = self.in_layers[i](x)
|
197 |
-
if g is not None:
|
198 |
-
cond_offset = i * 2 * self.hidden_channels
|
199 |
-
g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :]
|
200 |
-
else:
|
201 |
-
g_l = torch.zeros_like(x_in)
|
202 |
-
|
203 |
-
acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor)
|
204 |
-
acts = self.drop(acts)
|
205 |
-
|
206 |
-
res_skip_acts = self.res_skip_layers[i](acts)
|
207 |
-
if i < self.n_layers - 1:
|
208 |
-
res_acts = res_skip_acts[:, : self.hidden_channels, :]
|
209 |
-
x = (x + res_acts) * x_mask
|
210 |
-
output = output + res_skip_acts[:, self.hidden_channels :, :]
|
211 |
-
else:
|
212 |
-
output = output + res_skip_acts
|
213 |
-
return output * x_mask
|
214 |
-
|
215 |
-
def remove_weight_norm(self):
|
216 |
-
if self.gin_channels != 0:
|
217 |
-
torch.nn.utils.remove_weight_norm(self.cond_layer)
|
218 |
-
for l in self.in_layers:
|
219 |
-
torch.nn.utils.remove_weight_norm(l)
|
220 |
-
for l in self.res_skip_layers:
|
221 |
-
torch.nn.utils.remove_weight_norm(l)
|
222 |
-
|
223 |
-
|
224 |
-
class ResBlock1(torch.nn.Module):
|
225 |
-
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
|
226 |
-
super(ResBlock1, self).__init__()
|
227 |
-
self.convs1 = nn.ModuleList(
|
228 |
-
[
|
229 |
-
weight_norm(
|
230 |
-
Conv1d(
|
231 |
-
channels,
|
232 |
-
channels,
|
233 |
-
kernel_size,
|
234 |
-
1,
|
235 |
-
dilation=dilation[0],
|
236 |
-
padding=get_padding(kernel_size, dilation[0]),
|
237 |
-
)
|
238 |
-
),
|
239 |
-
weight_norm(
|
240 |
-
Conv1d(
|
241 |
-
channels,
|
242 |
-
channels,
|
243 |
-
kernel_size,
|
244 |
-
1,
|
245 |
-
dilation=dilation[1],
|
246 |
-
padding=get_padding(kernel_size, dilation[1]),
|
247 |
-
)
|
248 |
-
),
|
249 |
-
weight_norm(
|
250 |
-
Conv1d(
|
251 |
-
channels,
|
252 |
-
channels,
|
253 |
-
kernel_size,
|
254 |
-
1,
|
255 |
-
dilation=dilation[2],
|
256 |
-
padding=get_padding(kernel_size, dilation[2]),
|
257 |
-
)
|
258 |
-
),
|
259 |
-
]
|
260 |
-
)
|
261 |
-
self.convs1.apply(init_weights)
|
262 |
-
|
263 |
-
self.convs2 = nn.ModuleList(
|
264 |
-
[
|
265 |
-
weight_norm(
|
266 |
-
Conv1d(
|
267 |
-
channels,
|
268 |
-
channels,
|
269 |
-
kernel_size,
|
270 |
-
1,
|
271 |
-
dilation=1,
|
272 |
-
padding=get_padding(kernel_size, 1),
|
273 |
-
)
|
274 |
-
),
|
275 |
-
weight_norm(
|
276 |
-
Conv1d(
|
277 |
-
channels,
|
278 |
-
channels,
|
279 |
-
kernel_size,
|
280 |
-
1,
|
281 |
-
dilation=1,
|
282 |
-
padding=get_padding(kernel_size, 1),
|
283 |
-
)
|
284 |
-
),
|
285 |
-
weight_norm(
|
286 |
-
Conv1d(
|
287 |
-
channels,
|
288 |
-
channels,
|
289 |
-
kernel_size,
|
290 |
-
1,
|
291 |
-
dilation=1,
|
292 |
-
padding=get_padding(kernel_size, 1),
|
293 |
-
)
|
294 |
-
),
|
295 |
-
]
|
296 |
-
)
|
297 |
-
self.convs2.apply(init_weights)
|
298 |
-
|
299 |
-
def forward(self, x, x_mask=None):
|
300 |
-
for c1, c2 in zip(self.convs1, self.convs2):
|
301 |
-
xt = F.leaky_relu(x, LRELU_SLOPE)
|
302 |
-
if x_mask is not None:
|
303 |
-
xt = xt * x_mask
|
304 |
-
xt = c1(xt)
|
305 |
-
xt = F.leaky_relu(xt, LRELU_SLOPE)
|
306 |
-
if x_mask is not None:
|
307 |
-
xt = xt * x_mask
|
308 |
-
xt = c2(xt)
|
309 |
-
x = xt + x
|
310 |
-
if x_mask is not None:
|
311 |
-
x = x * x_mask
|
312 |
-
return x
|
313 |
-
|
314 |
-
def remove_weight_norm(self):
|
315 |
-
for l in self.convs1:
|
316 |
-
remove_weight_norm(l)
|
317 |
-
for l in self.convs2:
|
318 |
-
remove_weight_norm(l)
|
319 |
-
|
320 |
-
|
321 |
-
class ResBlock2(torch.nn.Module):
|
322 |
-
def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
|
323 |
-
super(ResBlock2, self).__init__()
|
324 |
-
self.convs = nn.ModuleList(
|
325 |
-
[
|
326 |
-
weight_norm(
|
327 |
-
Conv1d(
|
328 |
-
channels,
|
329 |
-
channels,
|
330 |
-
kernel_size,
|
331 |
-
1,
|
332 |
-
dilation=dilation[0],
|
333 |
-
padding=get_padding(kernel_size, dilation[0]),
|
334 |
-
)
|
335 |
-
),
|
336 |
-
weight_norm(
|
337 |
-
Conv1d(
|
338 |
-
channels,
|
339 |
-
channels,
|
340 |
-
kernel_size,
|
341 |
-
1,
|
342 |
-
dilation=dilation[1],
|
343 |
-
padding=get_padding(kernel_size, dilation[1]),
|
344 |
-
)
|
345 |
-
),
|
346 |
-
]
|
347 |
-
)
|
348 |
-
self.convs.apply(init_weights)
|
349 |
-
|
350 |
-
def forward(self, x, x_mask=None):
|
351 |
-
for c in self.convs:
|
352 |
-
xt = F.leaky_relu(x, LRELU_SLOPE)
|
353 |
-
if x_mask is not None:
|
354 |
-
xt = xt * x_mask
|
355 |
-
xt = c(xt)
|
356 |
-
x = xt + x
|
357 |
-
if x_mask is not None:
|
358 |
-
x = x * x_mask
|
359 |
-
return x
|
360 |
-
|
361 |
-
def remove_weight_norm(self):
|
362 |
-
for l in self.convs:
|
363 |
-
remove_weight_norm(l)
|
364 |
-
|
365 |
-
|
366 |
-
class Log(nn.Module):
|
367 |
-
def forward(self, x, x_mask, reverse=False, **kwargs):
|
368 |
-
if not reverse:
|
369 |
-
y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
|
370 |
-
logdet = torch.sum(-y, [1, 2])
|
371 |
-
return y, logdet
|
372 |
-
else:
|
373 |
-
x = torch.exp(x) * x_mask
|
374 |
-
return x
|
375 |
-
|
376 |
-
|
377 |
-
class Flip(nn.Module):
|
378 |
-
def forward(self, x, *args, reverse=False, **kwargs):
|
379 |
-
x = torch.flip(x, [1])
|
380 |
-
if not reverse:
|
381 |
-
logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
|
382 |
-
return x, logdet
|
383 |
-
else:
|
384 |
-
return x
|
385 |
-
|
386 |
-
|
387 |
-
class ElementwiseAffine(nn.Module):
|
388 |
-
def __init__(self, channels):
|
389 |
-
super().__init__()
|
390 |
-
self.channels = channels
|
391 |
-
self.m = nn.Parameter(torch.zeros(channels, 1))
|
392 |
-
self.logs = nn.Parameter(torch.zeros(channels, 1))
|
393 |
-
|
394 |
-
def forward(self, x, x_mask, reverse=False, **kwargs):
|
395 |
-
if not reverse:
|
396 |
-
y = self.m + torch.exp(self.logs) * x
|
397 |
-
y = y * x_mask
|
398 |
-
logdet = torch.sum(self.logs * x_mask, [1, 2])
|
399 |
-
return y, logdet
|
400 |
-
else:
|
401 |
-
x = (x - self.m) * torch.exp(-self.logs) * x_mask
|
402 |
-
return x
|
403 |
-
|
404 |
-
|
405 |
-
class ResidualCouplingLayer(nn.Module):
|
406 |
-
def __init__(
|
407 |
-
self,
|
408 |
-
channels,
|
409 |
-
hidden_channels,
|
410 |
-
kernel_size,
|
411 |
-
dilation_rate,
|
412 |
-
n_layers,
|
413 |
-
p_dropout=0,
|
414 |
-
gin_channels=0,
|
415 |
-
mean_only=False,
|
416 |
-
):
|
417 |
-
assert channels % 2 == 0, "channels should be divisible by 2"
|
418 |
-
super().__init__()
|
419 |
-
self.channels = channels
|
420 |
-
self.hidden_channels = hidden_channels
|
421 |
-
self.kernel_size = kernel_size
|
422 |
-
self.dilation_rate = dilation_rate
|
423 |
-
self.n_layers = n_layers
|
424 |
-
self.half_channels = channels // 2
|
425 |
-
self.mean_only = mean_only
|
426 |
-
|
427 |
-
self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
|
428 |
-
self.enc = WN(
|
429 |
-
hidden_channels,
|
430 |
-
kernel_size,
|
431 |
-
dilation_rate,
|
432 |
-
n_layers,
|
433 |
-
p_dropout=p_dropout,
|
434 |
-
gin_channels=gin_channels,
|
435 |
-
)
|
436 |
-
self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
|
437 |
-
self.post.weight.data.zero_()
|
438 |
-
self.post.bias.data.zero_()
|
439 |
-
|
440 |
-
def forward(self, x, x_mask, g=None, reverse=False):
|
441 |
-
x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
|
442 |
-
h = self.pre(x0) * x_mask
|
443 |
-
h = self.enc(h, x_mask, g=g)
|
444 |
-
stats = self.post(h) * x_mask
|
445 |
-
if not self.mean_only:
|
446 |
-
m, logs = torch.split(stats, [self.half_channels] * 2, 1)
|
447 |
-
else:
|
448 |
-
m = stats
|
449 |
-
logs = torch.zeros_like(m)
|
450 |
-
|
451 |
-
if not reverse:
|
452 |
-
x1 = m + x1 * torch.exp(logs) * x_mask
|
453 |
-
x = torch.cat([x0, x1], 1)
|
454 |
-
logdet = torch.sum(logs, [1, 2])
|
455 |
-
return x, logdet
|
456 |
-
else:
|
457 |
-
x1 = (x1 - m) * torch.exp(-logs) * x_mask
|
458 |
-
x = torch.cat([x0, x1], 1)
|
459 |
-
return x
|
460 |
-
|
461 |
-
def remove_weight_norm(self):
|
462 |
-
self.enc.remove_weight_norm()
|
463 |
-
|
464 |
-
|
465 |
-
class ConvFlow(nn.Module):
|
466 |
-
def __init__(
|
467 |
-
self,
|
468 |
-
in_channels,
|
469 |
-
filter_channels,
|
470 |
-
kernel_size,
|
471 |
-
n_layers,
|
472 |
-
num_bins=10,
|
473 |
-
tail_bound=5.0,
|
474 |
-
):
|
475 |
-
super().__init__()
|
476 |
-
self.in_channels = in_channels
|
477 |
-
self.filter_channels = filter_channels
|
478 |
-
self.kernel_size = kernel_size
|
479 |
-
self.n_layers = n_layers
|
480 |
-
self.num_bins = num_bins
|
481 |
-
self.tail_bound = tail_bound
|
482 |
-
self.half_channels = in_channels // 2
|
483 |
-
|
484 |
-
self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
|
485 |
-
self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0)
|
486 |
-
self.proj = nn.Conv1d(
|
487 |
-
filter_channels, self.half_channels * (num_bins * 3 - 1), 1
|
488 |
-
)
|
489 |
-
self.proj.weight.data.zero_()
|
490 |
-
self.proj.bias.data.zero_()
|
491 |
-
|
492 |
-
def forward(self, x, x_mask, g=None, reverse=False):
|
493 |
-
x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
|
494 |
-
h = self.pre(x0)
|
495 |
-
h = self.convs(h, x_mask, g=g)
|
496 |
-
h = self.proj(h) * x_mask
|
497 |
-
|
498 |
-
b, c, t = x0.shape
|
499 |
-
h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
|
500 |
-
|
501 |
-
unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels)
|
502 |
-
unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt(
|
503 |
-
self.filter_channels
|
504 |
-
)
|
505 |
-
unnormalized_derivatives = h[..., 2 * self.num_bins :]
|
506 |
-
|
507 |
-
x1, logabsdet = piecewise_rational_quadratic_transform(
|
508 |
-
x1,
|
509 |
-
unnormalized_widths,
|
510 |
-
unnormalized_heights,
|
511 |
-
unnormalized_derivatives,
|
512 |
-
inverse=reverse,
|
513 |
-
tails="linear",
|
514 |
-
tail_bound=self.tail_bound,
|
515 |
-
)
|
516 |
-
|
517 |
-
x = torch.cat([x0, x1], 1) * x_mask
|
518 |
-
logdet = torch.sum(logabsdet * x_mask, [1, 2])
|
519 |
-
if not reverse:
|
520 |
-
return x, logdet
|
521 |
-
else:
|
522 |
-
return x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar Familias Virtuales 3 Mod Apk Dinero Ilimitado.md
DELETED
@@ -1,64 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Descargar Familias Virtuales 3 Mod APK dinero ilimitado</h1>
|
3 |
-
<p>¿Te gustan los juegos de simulación que te permiten crear tu propia familia virtual y vivir una vida feliz? Si es así, entonces deberías probar Virtual Families 3, la última entrega de la popular serie de Last Day of Work. En este juego, puede adoptar a una persona pequeña de miles de opciones, construir un hogar para ellos, y ayudarles a lograr sus sueños. También puedes interactuar con otros jugadores online y visitar sus casas. ¿Pero qué pasa si quieres disfrutar del juego sin limitaciones o restricciones? Bueno, usted puede hacer eso mediante la descarga de familias virtuales 3 mod apk dinero ilimitado. En este artículo, le diremos qué es Virtual Families 3, por qué debe descargar la versión apk mod, y cómo instalarlo en su dispositivo. Así que, vamos a empezar! </p>
|
4 |
-
<h2>¿Qué es Familias Virtuales 3?</h2>
|
5 |
-
<p>Virtual Families 3 es un juego de simulación que te permite crear tu propia familia virtual y vivir una vida feliz. Puedes elegir entre miles de personajes para adoptar como tu pequeña persona, cada uno con su propia personalidad, apariencia y preferencias. A continuación, puede construir una casa para ellos, decorar con varios artículos, y actualizarlo como desee. También puede adoptar y educar a los niños, enseñarles habilidades y guiarlos a través de la vida. Puedes explorar el mundo virtual con tu familia, visitar los hogares de otros jugadores, chatear con ellos y hacer amigos. También puedes experimentar eventos aleatorios y sorpresas que hacen que el juego sea más divertido y realista. </p>
|
6 |
-
<h2>descargar familias virtuales 3 mod apk dinero ilimitado</h2><br /><p><b><b>Download Zip</b> ✪✪✪ <a href="https://bltlly.com/2v6Ji6">https://bltlly.com/2v6Ji6</a></b></p><br /><br />
|
7 |
-
<h3>Características de las familias virtuales 3</h3>
|
8 |
-
<p>Virtual Families 3 es un juego que ofrece muchas características para hacerlo agradable y realista. Estas son algunas de las características principales del juego:</p>
|
9 |
-
<h4>Personaliza tu hogar y familia</h4>
|
10 |
-
|
11 |
-
<h4>Adoptar y educar a los niños</h4>
|
12 |
-
<p>Puedes adoptar y nutrir a niños en Virtual Families 3. Puedes elegir entre miles de niños para adoptar como propios, cada uno con sus propios rasgos y talentos. Luego puedes cuidarlos, alimentarlos, jugar con ellos, enseñarles habilidades y ayudarlos a crecer. También puedes verlos interactuar entre ellos y con sus padres. También puedes influir en sus carreras y matrimonios cuando se conviertan en adultos. </p>
|
13 |
-
<h4>Explora el mundo virtual</h4>
|
14 |
-
<p>Puedes explorar el mundo virtual con tu familia en Virtual Families 3. Puedes visitar los hogares de otros jugadores, chatear con ellos, intercambiar regalos y hacer amigos. También puedes unirte a eventos y competiciones para ganar premios y recompensas. También puedes descubrir nuevos lugares y secretos en el mundo del juego. </p>
|
15 |
-
<h3>¿Por qué descargar Virtual Families 3 mod apk? </h3>
|
16 |
-
<p>Virtual Families 3 es un juego gratuito que puedes descargar desde la Google Play Store o la App Store. Sin embargo, el juego también tiene algunas limitaciones y restricciones que pueden afectar tu experiencia de juego. Por ejemplo, necesitas ganar dinero y monedas para comprar artículos y mejorar tu hogar. También necesitas ver anuncios para obtener algunos bonos o recompensas. También necesitas pagar por algunas funciones premium y acceso que pueden mejorar tu juego. Pero, ¿qué pasa si quieres disfrutar del juego sin limitaciones ni restricciones? Bueno, usted puede hacer eso mediante la descarga de familias virtuales 3 mod apk dinero ilimitado. Esta es una versión modificada del juego que te da dinero ilimitado y monedas para gastar en lo que quieras. También obtienes acceso premium y funciones que normalmente están bloqueadas o pagadas. Tampoco tienes que ver ningún anuncio ni rootear tu dispositivo para jugar. Estos son algunos de los beneficios de descargar Virtual Families 3 mod apk:</p>
|
17 |
-
<h4>Dinero y monedas ilimitados</h4>
|
18 |
-
|
19 |
-
<h4>Acceso y características Premium</h4>
|
20 |
-
<p>Con Virtual Families 3 mod apk, también obtienes acceso premium y características que normalmente están bloqueadas o pagadas. Por ejemplo, puedes desbloquear todos los personajes y niños a adoptar, todos los eventos y competiciones a los que unirte, todos los lugares y secretos para explorar, etc. También puedes acceder a algunas características exclusivas como cambiar el clima, tiempo, y las estaciones, la creación de sus propios eventos y competiciones, la personalización de sus propios personajes y niños, etc. Puede disfrutar del juego al máximo sin limitaciones. </p>
|
21 |
-
<p></p>
|
22 |
-
<h4>No se requieren anuncios ni root</h4>
|
23 |
-
<p>Con Virtual Families 3 mod apk, tampoco tienes que ver ningún anuncio o raíz de su dispositivo para jugar el juego. La versión apk mod elimina todos los anuncios molestos que pueden interrumpir su juego o perder el tiempo. Puede jugar el juego sin problemas y sin distracciones. También no tiene que rootear su dispositivo o arriesgarse a dañarlo para instalar el archivo apk mod. Puedes simplemente descargarlo e instalarlo como cualquier otra aplicación. </p>
|
24 |
-
<h2>Cómo descargar e instalar familias virtuales 3 mod apk? </h2>
|
25 |
-
<p>Ahora que usted sabe lo que es familias virtuales 3 mod apk y por qué debe descargarlo, usted puede preguntarse cómo descargar e instalar en su dispositivo. Bueno, no te preocupes, porque te guiaremos a través del proceso paso a paso. Solo sigue estos sencillos pasos:</p>
|
26 |
-
<h3>Paso 1: Descargar el archivo apk mod de una fuente de confianza</h3>
|
27 |
-
<p>El primer paso es descargar el archivo apk mod de una fuente de confianza. Hay muchos sitios web que ofrecen archivos apk mod para varios juegos, pero no todos ellos son seguros o fiables. Algunos de ellos pueden contener virus o malware que pueden dañar su dispositivo o robar sus datos. Por lo tanto, debe tener cuidado al elegir una fuente para descargar el archivo apk mod de. Le recomendamos que utilice este enlace para descargar la última versión de Virtual Families 3 mod apk dinero ilimitado. Este enlace es seguro y verificado por nosotros. </p>
|
28 |
-
|
29 |
-
<p>El siguiente paso es habilitar fuentes desconocidas en su dispositivo. Esto es necesario porque, de forma predeterminada, el dispositivo no permite instalar aplicaciones desde fuentes distintas de las tiendas de aplicaciones oficiales. Sin embargo, ya que estamos instalando un archivo apk mod que no está disponible en las tiendas de aplicaciones, necesitamos habilitar fuentes desconocidas en nuestro dispositivo. Para hacer esto, vaya a la configuración del dispositivo, luego la seguridad o la privacidad, luego encuentre fuentes desconocidas o instale la opción de aplicaciones desconocidas, luego cámbiela o permítala. </p>
|
30 |
-
<h3>Paso 3: Instalar el archivo apk mod y disfrutar del juego</h3>
|
31 |
-
<p>El paso final es instalar el archivo apk mod y disfrutar del juego. Para hacer esto, localizar el archivo apk mod descargado en el almacenamiento del dispositivo, a continuación, toque en él para iniciar el proceso de instalación. Siga las instrucciones de la pantalla y espere unos segundos hasta que se complete la instalación. Una vez hecho, puede iniciar el juego desde el cajón de la aplicación o la pantalla de inicio y empezar a jugar con dinero y monedas ilimitadas. </p>
|
32 |
-
<h2>Conclusión</h2>
|
33 |
-
<p>Virtual Families 3 es un juego de simulación que te permite crear tu propia familia virtual y vivir una vida feliz. Puede personalizar su hogar y familia, adoptar y nutrir a los niños, explorar el mundo virtual e interactuar con otros jugadores en línea. Sin embargo, si desea disfrutar del juego sin limitaciones o restricciones, debe descargar Virtual Families 3 mod apk dinero ilimitado. Esta es una versión modificada del juego que te da dinero ilimitado y monedas para gastar en lo que quieras. También obtienes acceso premium y funciones que normalmente están bloqueadas o pagadas. Tampoco tienes que ver ningún anuncio ni rootear tu dispositivo para jugar. </p>
|
34 |
-
<p>Esperamos que este artículo le ha ayudado a entender lo que es familias virtuales 3 mod apk dinero ilimitado, por qué debe descargarlo, y cómo instalarlo en su dispositivo. Si tiene alguna pregunta o sugerencia, no dude en dejarla en la sección de comentarios a continuación. Nos encantaría saber de usted. ¡Gracias por leer y jugar feliz! </p>
|
35 |
-
|
36 |
-
<p>Aquí hay algunas preguntas frecuentes sobre Virtual Families 3 mod apk unlimited money:</p>
|
37 |
-
<tabla>
|
38 |
-
<tr>
|
39 |
-
<th>Pregunta</th>
|
40 |
-
<th>Respuesta</th>
|
41 |
-
</tr>
|
42 |
-
<tr>
|
43 |
-
<td>Es familias virtuales 3 mod apk seguro para descargar e instalar? </td>
|
44 |
-
<td>Sí, Familias Virtuales 3 mod apk es seguro para descargar e instalar, siempre y cuando se utiliza una fuente de confianza como la que proporcionamos en este artículo. El archivo apk mod no contiene ningún virus o malware que pueda dañar su dispositivo o robar sus datos. </td>
|
45 |
-
</tr>
|
46 |
-
<tr>
|
47 |
-
<td>¿Se me prohibirá el uso de familias virtuales 3 mod apk? </td>
|
48 |
-
<td>No, no se le prohibirá el uso de familias virtuales 3 mod apk, como el archivo apk mod no interfiere con los servidores del juego o las cuentas de otros jugadores. Puede jugar el juego en línea sin ningún problema o riesgo. </td>
|
49 |
-
</tr>
|
50 |
-
<tr>
|
51 |
-
<td>¿Puedo actualizar Virtual Families 3 mod apk a la última versión? </td>
|
52 |
-
<td>Sí, puede actualizar familias virtuales 3 mod apk a la última versión, siempre y cuando descargue el archivo apk mod actualizado de la misma fuente que utilizó antes. Sin embargo, puede perder su progreso y los datos si desinstala la versión anterior del archivo apk mod. Por lo tanto, le recomendamos que haga una copia de seguridad de sus datos antes de actualizar el archivo apk mod. </td>
|
53 |
-
</tr>
|
54 |
-
<tr>
|
55 |
-
<td>¿Puedo jugar familias virtuales 3 mod apk en PC o dispositivos iOS? </td>
|
56 |
-
<td>No, no se puede jugar familias virtuales 3 mod apk en PC o dispositivos iOS, ya que el archivo mod apk solo es compatible con dispositivos Android. Sin embargo, puede utilizar un emulador de Android en su PC o un dispositivo iOS jailbreak para ejecutar el archivo apk mod. </td>
|
57 |
-
</tr>
|
58 |
-
<tr>
|
59 |
-
<td>¿Puedo solicitar más características o mods para familias virtuales 3?</td>
|
60 |
-
<td>Sí, puede solicitar más características o mods para Virtual Families 3, dejando un comentario a continuación o ponerse en contacto con el desarrollador del archivo apk mod. Sin embargo, no podemos garantizar que su solicitud se cumplirá o cuando estará disponible. </td>
|
61 |
-
</tr>
|
62 |
-
</tabla></p> 64aa2da5cf<br />
|
63 |
-
<br />
|
64 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/dateutil/tz/__init__.py
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
from .tz import *
|
3 |
-
from .tz import __doc__
|
4 |
-
|
5 |
-
__all__ = ["tzutc", "tzoffset", "tzlocal", "tzfile", "tzrange",
|
6 |
-
"tzstr", "tzical", "tzwin", "tzwinlocal", "gettz",
|
7 |
-
"enfold", "datetime_ambiguous", "datetime_exists",
|
8 |
-
"resolve_imaginary", "UTC", "DeprecatedTzFormatWarning"]
|
9 |
-
|
10 |
-
|
11 |
-
class DeprecatedTzFormatWarning(Warning):
|
12 |
-
"""Warning raised when time zones are parsed from deprecated formats."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/gb2312prober.py
DELETED
@@ -1,47 +0,0 @@
|
|
1 |
-
######################## BEGIN LICENSE BLOCK ########################
|
2 |
-
# The Original Code is mozilla.org code.
|
3 |
-
#
|
4 |
-
# The Initial Developer of the Original Code is
|
5 |
-
# Netscape Communications Corporation.
|
6 |
-
# Portions created by the Initial Developer are Copyright (C) 1998
|
7 |
-
# the Initial Developer. All Rights Reserved.
|
8 |
-
#
|
9 |
-
# Contributor(s):
|
10 |
-
# Mark Pilgrim - port to Python
|
11 |
-
#
|
12 |
-
# This library is free software; you can redistribute it and/or
|
13 |
-
# modify it under the terms of the GNU Lesser General Public
|
14 |
-
# License as published by the Free Software Foundation; either
|
15 |
-
# version 2.1 of the License, or (at your option) any later version.
|
16 |
-
#
|
17 |
-
# This library is distributed in the hope that it will be useful,
|
18 |
-
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
19 |
-
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
20 |
-
# Lesser General Public License for more details.
|
21 |
-
#
|
22 |
-
# You should have received a copy of the GNU Lesser General Public
|
23 |
-
# License along with this library; if not, write to the Free Software
|
24 |
-
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
|
25 |
-
# 02110-1301 USA
|
26 |
-
######################### END LICENSE BLOCK #########################
|
27 |
-
|
28 |
-
from .chardistribution import GB2312DistributionAnalysis
|
29 |
-
from .codingstatemachine import CodingStateMachine
|
30 |
-
from .mbcharsetprober import MultiByteCharSetProber
|
31 |
-
from .mbcssm import GB2312_SM_MODEL
|
32 |
-
|
33 |
-
|
34 |
-
class GB2312Prober(MultiByteCharSetProber):
|
35 |
-
def __init__(self) -> None:
|
36 |
-
super().__init__()
|
37 |
-
self.coding_sm = CodingStateMachine(GB2312_SM_MODEL)
|
38 |
-
self.distribution_analyzer = GB2312DistributionAnalysis()
|
39 |
-
self.reset()
|
40 |
-
|
41 |
-
@property
|
42 |
-
def charset_name(self) -> str:
|
43 |
-
return "GB2312"
|
44 |
-
|
45 |
-
@property
|
46 |
-
def language(self) -> str:
|
47 |
-
return "Chinese"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/requests/api.py
DELETED
@@ -1,157 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
requests.api
|
3 |
-
~~~~~~~~~~~~
|
4 |
-
|
5 |
-
This module implements the Requests API.
|
6 |
-
|
7 |
-
:copyright: (c) 2012 by Kenneth Reitz.
|
8 |
-
:license: Apache2, see LICENSE for more details.
|
9 |
-
"""
|
10 |
-
|
11 |
-
from . import sessions
|
12 |
-
|
13 |
-
|
14 |
-
def request(method, url, **kwargs):
|
15 |
-
"""Constructs and sends a :class:`Request <Request>`.
|
16 |
-
|
17 |
-
:param method: method for the new :class:`Request` object: ``GET``, ``OPTIONS``, ``HEAD``, ``POST``, ``PUT``, ``PATCH``, or ``DELETE``.
|
18 |
-
:param url: URL for the new :class:`Request` object.
|
19 |
-
:param params: (optional) Dictionary, list of tuples or bytes to send
|
20 |
-
in the query string for the :class:`Request`.
|
21 |
-
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
|
22 |
-
object to send in the body of the :class:`Request`.
|
23 |
-
:param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`.
|
24 |
-
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
|
25 |
-
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
|
26 |
-
:param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload.
|
27 |
-
``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')``
|
28 |
-
or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content-type'`` is a string
|
29 |
-
defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers
|
30 |
-
to add for the file.
|
31 |
-
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
|
32 |
-
:param timeout: (optional) How many seconds to wait for the server to send data
|
33 |
-
before giving up, as a float, or a :ref:`(connect timeout, read
|
34 |
-
timeout) <timeouts>` tuple.
|
35 |
-
:type timeout: float or tuple
|
36 |
-
:param allow_redirects: (optional) Boolean. Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection. Defaults to ``True``.
|
37 |
-
:type allow_redirects: bool
|
38 |
-
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
|
39 |
-
:param verify: (optional) Either a boolean, in which case it controls whether we verify
|
40 |
-
the server's TLS certificate, or a string, in which case it must be a path
|
41 |
-
to a CA bundle to use. Defaults to ``True``.
|
42 |
-
:param stream: (optional) if ``False``, the response content will be immediately downloaded.
|
43 |
-
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
|
44 |
-
:return: :class:`Response <Response>` object
|
45 |
-
:rtype: requests.Response
|
46 |
-
|
47 |
-
Usage::
|
48 |
-
|
49 |
-
>>> import requests
|
50 |
-
>>> req = requests.request('GET', 'https://httpbin.org/get')
|
51 |
-
>>> req
|
52 |
-
<Response [200]>
|
53 |
-
"""
|
54 |
-
|
55 |
-
# By using the 'with' statement we are sure the session is closed, thus we
|
56 |
-
# avoid leaving sockets open which can trigger a ResourceWarning in some
|
57 |
-
# cases, and look like a memory leak in others.
|
58 |
-
with sessions.Session() as session:
|
59 |
-
return session.request(method=method, url=url, **kwargs)
|
60 |
-
|
61 |
-
|
62 |
-
def get(url, params=None, **kwargs):
|
63 |
-
r"""Sends a GET request.
|
64 |
-
|
65 |
-
:param url: URL for the new :class:`Request` object.
|
66 |
-
:param params: (optional) Dictionary, list of tuples or bytes to send
|
67 |
-
in the query string for the :class:`Request`.
|
68 |
-
:param \*\*kwargs: Optional arguments that ``request`` takes.
|
69 |
-
:return: :class:`Response <Response>` object
|
70 |
-
:rtype: requests.Response
|
71 |
-
"""
|
72 |
-
|
73 |
-
return request("get", url, params=params, **kwargs)
|
74 |
-
|
75 |
-
|
76 |
-
def options(url, **kwargs):
|
77 |
-
r"""Sends an OPTIONS request.
|
78 |
-
|
79 |
-
:param url: URL for the new :class:`Request` object.
|
80 |
-
:param \*\*kwargs: Optional arguments that ``request`` takes.
|
81 |
-
:return: :class:`Response <Response>` object
|
82 |
-
:rtype: requests.Response
|
83 |
-
"""
|
84 |
-
|
85 |
-
return request("options", url, **kwargs)
|
86 |
-
|
87 |
-
|
88 |
-
def head(url, **kwargs):
|
89 |
-
r"""Sends a HEAD request.
|
90 |
-
|
91 |
-
:param url: URL for the new :class:`Request` object.
|
92 |
-
:param \*\*kwargs: Optional arguments that ``request`` takes. If
|
93 |
-
`allow_redirects` is not provided, it will be set to `False` (as
|
94 |
-
opposed to the default :meth:`request` behavior).
|
95 |
-
:return: :class:`Response <Response>` object
|
96 |
-
:rtype: requests.Response
|
97 |
-
"""
|
98 |
-
|
99 |
-
kwargs.setdefault("allow_redirects", False)
|
100 |
-
return request("head", url, **kwargs)
|
101 |
-
|
102 |
-
|
103 |
-
def post(url, data=None, json=None, **kwargs):
|
104 |
-
r"""Sends a POST request.
|
105 |
-
|
106 |
-
:param url: URL for the new :class:`Request` object.
|
107 |
-
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
|
108 |
-
object to send in the body of the :class:`Request`.
|
109 |
-
:param json: (optional) json data to send in the body of the :class:`Request`.
|
110 |
-
:param \*\*kwargs: Optional arguments that ``request`` takes.
|
111 |
-
:return: :class:`Response <Response>` object
|
112 |
-
:rtype: requests.Response
|
113 |
-
"""
|
114 |
-
|
115 |
-
return request("post", url, data=data, json=json, **kwargs)
|
116 |
-
|
117 |
-
|
118 |
-
def put(url, data=None, **kwargs):
|
119 |
-
r"""Sends a PUT request.
|
120 |
-
|
121 |
-
:param url: URL for the new :class:`Request` object.
|
122 |
-
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
|
123 |
-
object to send in the body of the :class:`Request`.
|
124 |
-
:param json: (optional) json data to send in the body of the :class:`Request`.
|
125 |
-
:param \*\*kwargs: Optional arguments that ``request`` takes.
|
126 |
-
:return: :class:`Response <Response>` object
|
127 |
-
:rtype: requests.Response
|
128 |
-
"""
|
129 |
-
|
130 |
-
return request("put", url, data=data, **kwargs)
|
131 |
-
|
132 |
-
|
133 |
-
def patch(url, data=None, **kwargs):
|
134 |
-
r"""Sends a PATCH request.
|
135 |
-
|
136 |
-
:param url: URL for the new :class:`Request` object.
|
137 |
-
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
|
138 |
-
object to send in the body of the :class:`Request`.
|
139 |
-
:param json: (optional) json data to send in the body of the :class:`Request`.
|
140 |
-
:param \*\*kwargs: Optional arguments that ``request`` takes.
|
141 |
-
:return: :class:`Response <Response>` object
|
142 |
-
:rtype: requests.Response
|
143 |
-
"""
|
144 |
-
|
145 |
-
return request("patch", url, data=data, **kwargs)
|
146 |
-
|
147 |
-
|
148 |
-
def delete(url, **kwargs):
|
149 |
-
r"""Sends a DELETE request.
|
150 |
-
|
151 |
-
:param url: URL for the new :class:`Request` object.
|
152 |
-
:param \*\*kwargs: Optional arguments that ``request`` takes.
|
153 |
-
:return: :class:`Response <Response>` object
|
154 |
-
:rtype: requests.Response
|
155 |
-
"""
|
156 |
-
|
157 |
-
return request("delete", url, **kwargs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/detail/config/global_workarounds.h
DELETED
@@ -1,27 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config/compiler.h>
|
20 |
-
|
21 |
-
// XXX workaround gcc 4.8+'s complaints about unused local typedefs by silencing them globally
|
22 |
-
#if defined(THRUST_GCC_VERSION) && (THRUST_GCC_VERSION >= 40800)
|
23 |
-
# if defined(__NVCC__) && (CUDART_VERSION >= 6000)
|
24 |
-
# pragma GCC diagnostic ignored "-Wunused-local-typedefs"
|
25 |
-
# endif // nvcc & cuda 6+
|
26 |
-
#endif // gcc 4.8
|
27 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/transform.h
DELETED
@@ -1,725 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
|
18 |
-
/*! \file thrust/transform.h
|
19 |
-
* \brief Transforms input ranges using a function object
|
20 |
-
*/
|
21 |
-
|
22 |
-
#pragma once
|
23 |
-
|
24 |
-
#include <thrust/detail/config.h>
|
25 |
-
#include <thrust/detail/execution_policy.h>
|
26 |
-
|
27 |
-
namespace thrust
|
28 |
-
{
|
29 |
-
|
30 |
-
|
31 |
-
/*! \addtogroup algorithms
|
32 |
-
*/
|
33 |
-
|
34 |
-
/*! \addtogroup transformations
|
35 |
-
* \ingroup algorithms
|
36 |
-
* \{
|
37 |
-
*/
|
38 |
-
|
39 |
-
|
40 |
-
/*! This version of \p transform applies a unary function to each element
|
41 |
-
* of an input sequence and stores the result in the corresponding
|
42 |
-
* position in an output sequence. Specifically, for each iterator
|
43 |
-
* <tt>i</tt> in the range [\p first, \p last) the operation
|
44 |
-
* <tt>op(*i)</tt> is performed and the result is assigned to <tt>*o</tt>,
|
45 |
-
* where <tt>o</tt> is the corresponding output iterator in the range
|
46 |
-
* [\p result, \p result + (\p last - \p first) ). The input and
|
47 |
-
* output sequences may coincide, resulting in an in-place transformation.
|
48 |
-
*
|
49 |
-
* The algorithm's execution is parallelized as determined by \p exec.
|
50 |
-
*
|
51 |
-
* \param exec The execution policy to use for parallelization.
|
52 |
-
* \param first The beginning of the input sequence.
|
53 |
-
* \param last The end of the input sequence.
|
54 |
-
* \param result The beginning of the output sequence.
|
55 |
-
* \param op The transformation operation.
|
56 |
-
* \return The end of the output sequence.
|
57 |
-
*
|
58 |
-
* \tparam DerivedPolicy The name of the derived execution policy.
|
59 |
-
* \tparam InputIterator is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>
|
60 |
-
* and \c InputIterator's \c value_type is convertible to \c UnaryFunction's \c argument_type.
|
61 |
-
* \tparam OutputIterator is a model of <a href="http://www.sgi.com/tech/stl/OutputIterator.html">Output Iterator</a>.
|
62 |
-
* \tparam UnaryFunction is a model of <a href="http://www.sgi.com/tech/stl/UnaryFunction.html">Unary Function</a>
|
63 |
-
* and \c UnaryFunction's \c result_type is convertible to \c OutputIterator's \c value_type.
|
64 |
-
*
|
65 |
-
* \pre \p first may equal \p result, but the range <tt>[first, last)</tt> shall not overlap the range <tt>[result, result + (last - first))</tt> otherwise.
|
66 |
-
*
|
67 |
-
* The following code snippet demonstrates how to use \p transform to negate a range in-place
|
68 |
-
* using the \p thrust::host execution policy for parallelization:
|
69 |
-
*
|
70 |
-
* \code
|
71 |
-
* #include <thrust/transform.h>
|
72 |
-
* #include <thrust/functional.h>
|
73 |
-
* #include <thrust/execution_policy.h>
|
74 |
-
* ...
|
75 |
-
*
|
76 |
-
* int data[10] = {-5, 0, 2, -3, 2, 4, 0, -1, 2, 8};
|
77 |
-
*
|
78 |
-
* thrust::negate<int> op;
|
79 |
-
*
|
80 |
-
* thrust::transform(thrust::host, data, data + 10, data, op); // in-place transformation
|
81 |
-
*
|
82 |
-
* // data is now {5, 0, -2, 3, -2, -4, 0, 1, -2, -8};
|
83 |
-
* \endcode
|
84 |
-
*
|
85 |
-
* \see http://www.sgi.com/tech/stl/transform.html
|
86 |
-
*/
|
87 |
-
template<typename DerivedPolicy,
|
88 |
-
typename InputIterator,
|
89 |
-
typename OutputIterator,
|
90 |
-
typename UnaryFunction>
|
91 |
-
__host__ __device__
|
92 |
-
OutputIterator transform(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
93 |
-
InputIterator first, InputIterator last,
|
94 |
-
OutputIterator result,
|
95 |
-
UnaryFunction op);
|
96 |
-
|
97 |
-
|
98 |
-
/*! This version of \p transform applies a unary function to each element
|
99 |
-
* of an input sequence and stores the result in the corresponding
|
100 |
-
* position in an output sequence. Specifically, for each iterator
|
101 |
-
* <tt>i</tt> in the range [\p first, \p last) the operation
|
102 |
-
* <tt>op(*i)</tt> is performed and the result is assigned to <tt>*o</tt>,
|
103 |
-
* where <tt>o</tt> is the corresponding output iterator in the range
|
104 |
-
* [\p result, \p result + (\p last - \p first) ). The input and
|
105 |
-
* output sequences may coincide, resulting in an in-place transformation.
|
106 |
-
*
|
107 |
-
* \param first The beginning of the input sequence.
|
108 |
-
* \param last The end of the input sequence.
|
109 |
-
* \param result The beginning of the output sequence.
|
110 |
-
* \param op The tranformation operation.
|
111 |
-
* \return The end of the output sequence.
|
112 |
-
*
|
113 |
-
* \tparam InputIterator is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>
|
114 |
-
* and \c InputIterator's \c value_type is convertible to \c UnaryFunction's \c argument_type.
|
115 |
-
* \tparam OutputIterator is a model of <a href="http://www.sgi.com/tech/stl/OutputIterator.html">Output Iterator</a>.
|
116 |
-
* \tparam UnaryFunction is a model of <a href="http://www.sgi.com/tech/stl/UnaryFunction.html">Unary Function</a>
|
117 |
-
* and \c UnaryFunction's \c result_type is convertible to \c OutputIterator's \c value_type.
|
118 |
-
*
|
119 |
-
* \pre \p first may equal \p result, but the range <tt>[first, last)</tt> shall not overlap the range <tt>[result, result + (last - first))</tt> otherwise.
|
120 |
-
*
|
121 |
-
* The following code snippet demonstrates how to use \p transform
|
122 |
-
*
|
123 |
-
* \code
|
124 |
-
* #include <thrust/transform.h>
|
125 |
-
* #include <thrust/functional.h>
|
126 |
-
*
|
127 |
-
* int data[10] = {-5, 0, 2, -3, 2, 4, 0, -1, 2, 8};
|
128 |
-
*
|
129 |
-
* thrust::negate<int> op;
|
130 |
-
*
|
131 |
-
* thrust::transform(data, data + 10, data, op); // in-place transformation
|
132 |
-
*
|
133 |
-
* // data is now {5, 0, -2, 3, -2, -4, 0, 1, -2, -8};
|
134 |
-
* \endcode
|
135 |
-
*
|
136 |
-
* \see http://www.sgi.com/tech/stl/transform.html
|
137 |
-
*/
|
138 |
-
template<typename InputIterator,
|
139 |
-
typename OutputIterator,
|
140 |
-
typename UnaryFunction>
|
141 |
-
OutputIterator transform(InputIterator first, InputIterator last,
|
142 |
-
OutputIterator result,
|
143 |
-
UnaryFunction op);
|
144 |
-
|
145 |
-
|
146 |
-
/*! This version of \p transform applies a binary function to each pair
|
147 |
-
* of elements from two input sequences and stores the result in the
|
148 |
-
* corresponding position in an output sequence. Specifically, for
|
149 |
-
* each iterator <tt>i</tt> in the range [\p first1, \p last1) and
|
150 |
-
* <tt>j = first + (i - first1)</tt> in the range [\p first2, \p last2)
|
151 |
-
* the operation <tt>op(*i,*j)</tt> is performed and the result is
|
152 |
-
* assigned to <tt>*o</tt>, where <tt>o</tt> is the corresponding
|
153 |
-
* output iterator in the range [\p result, \p result + (\p last - \p first) ).
|
154 |
-
* The input and output sequences may coincide, resulting in an
|
155 |
-
* in-place transformation.
|
156 |
-
*
|
157 |
-
* The algorithm's execution is parallelized as determined by \p exec.
|
158 |
-
*
|
159 |
-
* \param exec The execution policy to use for parallelization.
|
160 |
-
* \param first1 The beginning of the first input sequence.
|
161 |
-
* \param last1 The end of the first input sequence.
|
162 |
-
* \param first2 The beginning of the second input sequence.
|
163 |
-
* \param result The beginning of the output sequence.
|
164 |
-
* \param op The tranformation operation.
|
165 |
-
* \return The end of the output sequence.
|
166 |
-
*
|
167 |
-
* \tparam DerivedPolicy The name of the derived execution policy.
|
168 |
-
* \tparam InputIterator1 is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>
|
169 |
-
* and \c InputIterator1's \c value_type is convertible to \c BinaryFunction's \c first_argument_type.
|
170 |
-
* \tparam InputIterator2 is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>
|
171 |
-
* and \c InputIterator2's \c value_type is convertible to \c BinaryFunction's \c second_argument_type.
|
172 |
-
* \tparam OutputIterator is a model of <a href="http://www.sgi.com/tech/stl/OutputIterator.html">Output Iterator</a>.
|
173 |
-
* \tparam BinaryFunction is a model of <a href="http://www.sgi.com/tech/stl/BinaryFunction.html">Binary Function</a>
|
174 |
-
* and \c BinaryFunction's \c result_type is convertible to \c OutputIterator's \c value_type.
|
175 |
-
*
|
176 |
-
* \pre \p first1 may equal \p result, but the range <tt>[first1, last1)</tt> shall not overlap the range <tt>[result, result + (last1 - first1))</tt> otherwise.
|
177 |
-
* \pre \p first2 may equal \p result, but the range <tt>[first2, first2 + (last1 - first1))</tt> shall not overlap the range <tt>[result, result + (last1 - first1))</tt> otherwise.
|
178 |
-
*
|
179 |
-
* The following code snippet demonstrates how to use \p transform to compute the sum of two
|
180 |
-
* ranges using the \p thrust::host execution policy for parallelization:
|
181 |
-
*
|
182 |
-
* \code
|
183 |
-
* #include <thrust/transform.h>
|
184 |
-
* #include <thrust/functional.h>
|
185 |
-
* #include <thrust/execution_policy.h>
|
186 |
-
* ...
|
187 |
-
*
|
188 |
-
* int input1[6] = {-5, 0, 2, 3, 2, 4};
|
189 |
-
* int input2[6] = { 3, 6, -2, 1, 2, 3};
|
190 |
-
* int output[6];
|
191 |
-
*
|
192 |
-
* thrust::plus<int> op;
|
193 |
-
*
|
194 |
-
* thrust::transform(thrust::host, input1, input1 + 6, input2, output, op);
|
195 |
-
*
|
196 |
-
* // output is now {-2, 6, 0, 4, 4, 7};
|
197 |
-
* \endcode
|
198 |
-
*
|
199 |
-
* \see http://www.sgi.com/tech/stl/transform.html
|
200 |
-
*/
|
201 |
-
template<typename DerivedPolicy,
|
202 |
-
typename InputIterator1,
|
203 |
-
typename InputIterator2,
|
204 |
-
typename OutputIterator,
|
205 |
-
typename BinaryFunction>
|
206 |
-
__host__ __device__
|
207 |
-
OutputIterator transform(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
208 |
-
InputIterator1 first1, InputIterator1 last1,
|
209 |
-
InputIterator2 first2,
|
210 |
-
OutputIterator result,
|
211 |
-
BinaryFunction op);
|
212 |
-
|
213 |
-
|
214 |
-
/*! This version of \p transform applies a binary function to each pair
|
215 |
-
* of elements from two input sequences and stores the result in the
|
216 |
-
* corresponding position in an output sequence. Specifically, for
|
217 |
-
* each iterator <tt>i</tt> in the range [\p first1, \p last1) and
|
218 |
-
* <tt>j = first + (i - first1)</tt> in the range [\p first2, \p last2)
|
219 |
-
* the operation <tt>op(*i,*j)</tt> is performed and the result is
|
220 |
-
* assigned to <tt>*o</tt>, where <tt>o</tt> is the corresponding
|
221 |
-
* output iterator in the range [\p result, \p result + (\p last - \p first) ).
|
222 |
-
* The input and output sequences may coincide, resulting in an
|
223 |
-
* in-place transformation.
|
224 |
-
*
|
225 |
-
* \param first1 The beginning of the first input sequence.
|
226 |
-
* \param last1 The end of the first input sequence.
|
227 |
-
* \param first2 The beginning of the second input sequence.
|
228 |
-
* \param result The beginning of the output sequence.
|
229 |
-
* \param op The tranformation operation.
|
230 |
-
* \return The end of the output sequence.
|
231 |
-
*
|
232 |
-
* \tparam InputIterator1 is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>
|
233 |
-
* and \c InputIterator1's \c value_type is convertible to \c BinaryFunction's \c first_argument_type.
|
234 |
-
* \tparam InputIterator2 is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>
|
235 |
-
* and \c InputIterator2's \c value_type is convertible to \c BinaryFunction's \c second_argument_type.
|
236 |
-
* \tparam OutputIterator is a model of <a href="http://www.sgi.com/tech/stl/OutputIterator.html">Output Iterator</a>.
|
237 |
-
* \tparam BinaryFunction is a model of <a href="http://www.sgi.com/tech/stl/BinaryFunction.html">Binary Function</a>
|
238 |
-
* and \c BinaryFunction's \c result_type is convertible to \c OutputIterator's \c value_type.
|
239 |
-
*
|
240 |
-
* \pre \p first1 may equal \p result, but the range <tt>[first1, last1)</tt> shall not overlap the range <tt>[result, result + (last1 - first1))</tt> otherwise.
|
241 |
-
* \pre \p first2 may equal \p result, but the range <tt>[first2, first2 + (last1 - first1))</tt> shall not overlap the range <tt>[result, result + (last1 - first1))</tt> otherwise.
|
242 |
-
*
|
243 |
-
* The following code snippet demonstrates how to use \p transform
|
244 |
-
*
|
245 |
-
* \code
|
246 |
-
* #include <thrust/transform.h>
|
247 |
-
* #include <thrust/functional.h>
|
248 |
-
*
|
249 |
-
* int input1[6] = {-5, 0, 2, 3, 2, 4};
|
250 |
-
* int input2[6] = { 3, 6, -2, 1, 2, 3};
|
251 |
-
* int output[6];
|
252 |
-
*
|
253 |
-
* thrust::plus<int> op;
|
254 |
-
*
|
255 |
-
* thrust::transform(input1, input1 + 6, input2, output, op);
|
256 |
-
*
|
257 |
-
* // output is now {-2, 6, 0, 4, 4, 7};
|
258 |
-
* \endcode
|
259 |
-
*
|
260 |
-
* \see http://www.sgi.com/tech/stl/transform.html
|
261 |
-
*/
|
262 |
-
template<typename InputIterator1,
|
263 |
-
typename InputIterator2,
|
264 |
-
typename OutputIterator,
|
265 |
-
typename BinaryFunction>
|
266 |
-
OutputIterator transform(InputIterator1 first1, InputIterator1 last1,
|
267 |
-
InputIterator2 first2,
|
268 |
-
OutputIterator result,
|
269 |
-
BinaryFunction op);
|
270 |
-
|
271 |
-
|
272 |
-
/*! This version of \p transform_if conditionally applies a unary function
|
273 |
-
* to each element of an input sequence and stores the result in the corresponding
|
274 |
-
* position in an output sequence if the corresponding position in the input sequence
|
275 |
-
* satifies a predicate. Otherwise, the corresponding position in the
|
276 |
-
* output sequence is not modified.
|
277 |
-
*
|
278 |
-
* Specifically, for each iterator <tt>i</tt> in the range <tt>[first, last)</tt> the
|
279 |
-
* predicate <tt>pred(*i)</tt> is evaluated. If this predicate
|
280 |
-
* evaluates to \c true, the result of <tt>op(*i)</tt> is assigned to <tt>*o</tt>,
|
281 |
-
* where <tt>o</tt> is the corresponding output iterator in the range
|
282 |
-
* <tt>[result, result + (last - first) )</tt>. Otherwise, <tt>op(*i)</tt> is
|
283 |
-
* not evaluated and no assignment occurs. The input and output sequences may coincide,
|
284 |
-
* resulting in an in-place transformation.
|
285 |
-
*
|
286 |
-
* The algorithm's execution is parallelized as determined by \p exec.
|
287 |
-
*
|
288 |
-
* \param exec The execution policy to use for parallelization.
|
289 |
-
* \param first The beginning of the input sequence.
|
290 |
-
* \param last The end of the input sequence.
|
291 |
-
* \param result The beginning of the output sequence.
|
292 |
-
* \param op The tranformation operation.
|
293 |
-
* \param pred The predicate operation.
|
294 |
-
* \return The end of the output sequence.
|
295 |
-
*
|
296 |
-
* \tparam DerivedPolicy The name of the derived execution policy.
|
297 |
-
* \tparam InputIterator is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>,
|
298 |
-
* and \c InputIterator's \c value_type is convertible to \c Predicate's \c argument_type,
|
299 |
-
* and \c InputIterator's \c value_type is convertible to \c UnaryFunction's \c argument_type.
|
300 |
-
* \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a>.
|
301 |
-
* \tparam UnaryFunction is a model of <a href="http://www.sgi.com/tech/stl/UnaryFunction.html">Unary Function</a>
|
302 |
-
* and \c UnaryFunction's \c result_type is convertible to \c OutputIterator's \c value_type.
|
303 |
-
* \tparam Predicate is a model of <a href="http://www.sgi.com/tech/stl/Predicate.html">Predicate</a>.
|
304 |
-
*
|
305 |
-
* \pre \p first may equal \p result, but the range <tt>[first, last)</tt> shall not overlap the range <tt>[result, result + (last - first))</tt> otherwise.
|
306 |
-
*
|
307 |
-
* The following code snippet demonstrates how to use \p transform_if to negate the odd-valued
|
308 |
-
* elements of a range using the \p thrust::host execution policy for parallelization:
|
309 |
-
*
|
310 |
-
* \code
|
311 |
-
* #include <thrust/transform.h>
|
312 |
-
* #include <thrust/functional.h>
|
313 |
-
* #include <thrust/execution_policy.h>
|
314 |
-
* ...
|
315 |
-
*
|
316 |
-
* int data[10] = {-5, 0, 2, -3, 2, 4, 0, -1, 2, 8};
|
317 |
-
*
|
318 |
-
* struct is_odd
|
319 |
-
* {
|
320 |
-
* __host__ __device__
|
321 |
-
* bool operator()(int x)
|
322 |
-
* {
|
323 |
-
* return x % 2;
|
324 |
-
* }
|
325 |
-
* };
|
326 |
-
*
|
327 |
-
* thrust::negate<int> op;
|
328 |
-
* thrust::identity<int> identity;
|
329 |
-
*
|
330 |
-
* // negate odd elements
|
331 |
-
* thrust::transform_if(thrust::host, data, data + 10, data, op, is_odd()); // in-place transformation
|
332 |
-
*
|
333 |
-
* // data is now {5, 0, 2, 3, 2, 4, 0, 1, 2, 8};
|
334 |
-
* \endcode
|
335 |
-
*
|
336 |
-
* \see thrust::transform
|
337 |
-
*/
|
338 |
-
template<typename DerivedPolicy,
|
339 |
-
typename InputIterator,
|
340 |
-
typename ForwardIterator,
|
341 |
-
typename UnaryFunction,
|
342 |
-
typename Predicate>
|
343 |
-
__host__ __device__
|
344 |
-
ForwardIterator transform_if(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
345 |
-
InputIterator first, InputIterator last,
|
346 |
-
ForwardIterator result,
|
347 |
-
UnaryFunction op,
|
348 |
-
Predicate pred);
|
349 |
-
|
350 |
-
|
351 |
-
/*! This version of \p transform_if conditionally applies a unary function
|
352 |
-
* to each element of an input sequence and stores the result in the corresponding
|
353 |
-
* position in an output sequence if the corresponding position in the input sequence
|
354 |
-
* satifies a predicate. Otherwise, the corresponding position in the
|
355 |
-
* output sequence is not modified.
|
356 |
-
*
|
357 |
-
* Specifically, for each iterator <tt>i</tt> in the range <tt>[first, last)</tt> the
|
358 |
-
* predicate <tt>pred(*i)</tt> is evaluated. If this predicate
|
359 |
-
* evaluates to \c true, the result of <tt>op(*i)</tt> is assigned to <tt>*o</tt>,
|
360 |
-
* where <tt>o</tt> is the corresponding output iterator in the range
|
361 |
-
* <tt>[result, result + (last - first) )</tt>. Otherwise, <tt>op(*i)</tt> is
|
362 |
-
* not evaluated and no assignment occurs. The input and output sequences may coincide,
|
363 |
-
* resulting in an in-place transformation.
|
364 |
-
*
|
365 |
-
* \param first The beginning of the input sequence.
|
366 |
-
* \param last The end of the input sequence.
|
367 |
-
* \param result The beginning of the output sequence.
|
368 |
-
* \param op The tranformation operation.
|
369 |
-
* \param pred The predicate operation.
|
370 |
-
* \return The end of the output sequence.
|
371 |
-
*
|
372 |
-
* \tparam InputIterator is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>,
|
373 |
-
* and \c InputIterator's \c value_type is convertible to \c Predicate's \c argument_type,
|
374 |
-
* and \c InputIterator's \c value_type is convertible to \c UnaryFunction's \c argument_type.
|
375 |
-
* \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a>.
|
376 |
-
* \tparam UnaryFunction is a model of <a href="http://www.sgi.com/tech/stl/UnaryFunction.html">Unary Function</a>
|
377 |
-
* and \c UnaryFunction's \c result_type is convertible to \c OutputIterator's \c value_type.
|
378 |
-
* \tparam Predicate is a model of <a href="http://www.sgi.com/tech/stl/Predicate.html">Predicate</a>.
|
379 |
-
*
|
380 |
-
* \pre \p first may equal \p result, but the range <tt>[first, last)</tt> shall not overlap the range <tt>[result, result + (last - first))</tt> otherwise.
|
381 |
-
*
|
382 |
-
* The following code snippet demonstrates how to use \p transform_if:
|
383 |
-
*
|
384 |
-
* \code
|
385 |
-
* #include <thrust/transform.h>
|
386 |
-
* #include <thrust/functional.h>
|
387 |
-
*
|
388 |
-
* int data[10] = {-5, 0, 2, -3, 2, 4, 0, -1, 2, 8};
|
389 |
-
*
|
390 |
-
* struct is_odd
|
391 |
-
* {
|
392 |
-
* __host__ __device__
|
393 |
-
* bool operator()(int x)
|
394 |
-
* {
|
395 |
-
* return x % 2;
|
396 |
-
* }
|
397 |
-
* };
|
398 |
-
*
|
399 |
-
* thrust::negate<int> op;
|
400 |
-
* thrust::identity<int> identity;
|
401 |
-
*
|
402 |
-
* // negate odd elements
|
403 |
-
* thrust::transform_if(data, data + 10, data, op, is_odd()); // in-place transformation
|
404 |
-
*
|
405 |
-
* // data is now {5, 0, 2, 3, 2, 4, 0, 1, 2, 8};
|
406 |
-
* \endcode
|
407 |
-
*
|
408 |
-
* \see thrust::transform
|
409 |
-
*/
|
410 |
-
template<typename InputIterator,
|
411 |
-
typename ForwardIterator,
|
412 |
-
typename UnaryFunction,
|
413 |
-
typename Predicate>
|
414 |
-
ForwardIterator transform_if(InputIterator first, InputIterator last,
|
415 |
-
ForwardIterator result,
|
416 |
-
UnaryFunction op,
|
417 |
-
Predicate pred);
|
418 |
-
|
419 |
-
|
420 |
-
/*! This version of \p transform_if conditionally applies a unary function
|
421 |
-
* to each element of an input sequence and stores the result in the corresponding
|
422 |
-
* position in an output sequence if the corresponding position in a stencil sequence
|
423 |
-
* satisfies a predicate. Otherwise, the corresponding position in the
|
424 |
-
* output sequence is not modified.
|
425 |
-
*
|
426 |
-
* Specifically, for each iterator <tt>i</tt> in the range <tt>[first, last)</tt> the
|
427 |
-
* predicate <tt>pred(*s)</tt> is evaluated, where <tt>s</tt> is the corresponding input
|
428 |
-
* iterator in the range <tt>[stencil, stencil + (last - first) )</tt>. If this predicate
|
429 |
-
* evaluates to \c true, the result of <tt>op(*i)</tt> is assigned to <tt>*o</tt>,
|
430 |
-
* where <tt>o</tt> is the corresponding output iterator in the range
|
431 |
-
* <tt>[result, result + (last - first) )</tt>. Otherwise, <tt>op(*i)</tt> is
|
432 |
-
* not evaluated and no assignment occurs. The input and output sequences may coincide,
|
433 |
-
* resulting in an in-place transformation.
|
434 |
-
*
|
435 |
-
* The algorithm's execution is parallelized as determined by \p exec.
|
436 |
-
*
|
437 |
-
* \param exec The execution policy to use for parallelization.
|
438 |
-
* \param first The beginning of the input sequence.
|
439 |
-
* \param last The end of the input sequence.
|
440 |
-
* \param stencil The beginning of the stencil sequence.
|
441 |
-
* \param result The beginning of the output sequence.
|
442 |
-
* \param op The tranformation operation.
|
443 |
-
* \param pred The predicate operation.
|
444 |
-
* \return The end of the output sequence.
|
445 |
-
*
|
446 |
-
* \tparam DerivedPolicy The name of the derived execution policy.
|
447 |
-
* \tparam InputIterator1 is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>
|
448 |
-
* and \c InputIterator1's \c value_type is convertible to \c UnaryFunction's \c argument_type.
|
449 |
-
* \tparam InputIterator2 is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>
|
450 |
-
* and \c InputIterator2's \c value_type is convertible to \c Predicate's \c argument_type.
|
451 |
-
* \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a>.
|
452 |
-
* \tparam UnaryFunction is a model of <a href="http://www.sgi.com/tech/stl/UnaryFunction.html">Unary Function</a>
|
453 |
-
* and \c UnaryFunction's \c result_type is convertible to \c OutputIterator's \c value_type.
|
454 |
-
* \tparam Predicate is a model of <a href="http://www.sgi.com/tech/stl/Predicate.html">Predicate</a>.
|
455 |
-
*
|
456 |
-
* \pre \p first may equal \p result, but the range <tt>[first, last)</tt> shall not overlap the range <tt>[result, result + (last - first))</tt> otherwise.
|
457 |
-
* \pre \p stencil may equal \p result, but the range <tt>[stencil, stencil + (last - first))</tt> shall not overlap the range <tt>[result, result + (last - first))</tt> otherwise.
|
458 |
-
*
|
459 |
-
* The following code snippet demonstrates how to use \p transform_if using the \p thrust::host
|
460 |
-
* execution policy for parallelization:
|
461 |
-
*
|
462 |
-
* \code
|
463 |
-
* #include <thrust/transform.h>
|
464 |
-
* #include <thrust/functional.h>
|
465 |
-
* #include <thrust/execution_policy.h>
|
466 |
-
* ...
|
467 |
-
*
|
468 |
-
* int data[10] = {-5, 0, 2, -3, 2, 4, 0, -1, 2, 8};
|
469 |
-
* int stencil[10] = { 1, 0, 1, 0, 1, 0, 1, 0, 1, 0};
|
470 |
-
*
|
471 |
-
* thrust::negate<int> op;
|
472 |
-
* thrust::identity<int> identity;
|
473 |
-
*
|
474 |
-
* thrust::transform_if(thrust::host, data, data + 10, stencil, data, op, identity); // in-place transformation
|
475 |
-
*
|
476 |
-
* // data is now {5, 0, -2, -3, -2, 4, 0, -1, -2, 8};
|
477 |
-
* \endcode
|
478 |
-
*
|
479 |
-
* \see thrust::transform
|
480 |
-
*/
|
481 |
-
template<typename DerivedPolicy,
|
482 |
-
typename InputIterator1,
|
483 |
-
typename InputIterator2,
|
484 |
-
typename ForwardIterator,
|
485 |
-
typename UnaryFunction,
|
486 |
-
typename Predicate>
|
487 |
-
__host__ __device__
|
488 |
-
ForwardIterator transform_if(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
489 |
-
InputIterator1 first, InputIterator1 last,
|
490 |
-
InputIterator2 stencil,
|
491 |
-
ForwardIterator result,
|
492 |
-
UnaryFunction op,
|
493 |
-
Predicate pred);
|
494 |
-
|
495 |
-
|
496 |
-
/*! This version of \p transform_if conditionally applies a unary function
|
497 |
-
* to each element of an input sequence and stores the result in the corresponding
|
498 |
-
* position in an output sequence if the corresponding position in a stencil sequence
|
499 |
-
* satisfies a predicate. Otherwise, the corresponding position in the
|
500 |
-
* output sequence is not modified.
|
501 |
-
*
|
502 |
-
* Specifically, for each iterator <tt>i</tt> in the range <tt>[first, last)</tt> the
|
503 |
-
* predicate <tt>pred(*s)</tt> is evaluated, where <tt>s</tt> is the corresponding input
|
504 |
-
* iterator in the range <tt>[stencil, stencil + (last - first) )</tt>. If this predicate
|
505 |
-
* evaluates to \c true, the result of <tt>op(*i)</tt> is assigned to <tt>*o</tt>,
|
506 |
-
* where <tt>o</tt> is the corresponding output iterator in the range
|
507 |
-
* <tt>[result, result + (last - first) )</tt>. Otherwise, <tt>op(*i)</tt> is
|
508 |
-
* not evaluated and no assignment occurs. The input and output sequences may coincide,
|
509 |
-
* resulting in an in-place transformation.
|
510 |
-
*
|
511 |
-
* \param first The beginning of the input sequence.
|
512 |
-
* \param last The end of the input sequence.
|
513 |
-
* \param stencil The beginning of the stencil sequence.
|
514 |
-
* \param result The beginning of the output sequence.
|
515 |
-
* \param op The tranformation operation.
|
516 |
-
* \param pred The predicate operation.
|
517 |
-
* \return The end of the output sequence.
|
518 |
-
*
|
519 |
-
* \tparam InputIterator1 is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>
|
520 |
-
* and \c InputIterator1's \c value_type is convertible to \c UnaryFunction's \c argument_type.
|
521 |
-
* \tparam InputIterator2 is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>
|
522 |
-
* and \c InputIterator2's \c value_type is convertible to \c Predicate's \c argument_type.
|
523 |
-
* \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a>.
|
524 |
-
* \tparam UnaryFunction is a model of <a href="http://www.sgi.com/tech/stl/UnaryFunction.html">Unary Function</a>
|
525 |
-
* and \c UnaryFunction's \c result_type is convertible to \c OutputIterator's \c value_type.
|
526 |
-
* \tparam Predicate is a model of <a href="http://www.sgi.com/tech/stl/Predicate.html">Predicate</a>.
|
527 |
-
*
|
528 |
-
* \pre \p first may equal \p result, but the range <tt>[first, last)</tt> shall not overlap the range <tt>[result, result + (last - first))</tt> otherwise.
|
529 |
-
* \pre \p stencil may equal \p result, but the range <tt>[stencil, stencil + (last - first))</tt> shall not overlap the range <tt>[result, result + (last - first))</tt> otherwise.
|
530 |
-
*
|
531 |
-
* The following code snippet demonstrates how to use \p transform_if:
|
532 |
-
*
|
533 |
-
* \code
|
534 |
-
* #include <thrust/transform.h>
|
535 |
-
* #include <thrust/functional.h>
|
536 |
-
*
|
537 |
-
* int data[10] = {-5, 0, 2, -3, 2, 4, 0, -1, 2, 8};
|
538 |
-
* int stencil[10] = { 1, 0, 1, 0, 1, 0, 1, 0, 1, 0};
|
539 |
-
*
|
540 |
-
* thrust::negate<int> op;
|
541 |
-
* thrust::identity<int> identity;
|
542 |
-
*
|
543 |
-
* thrust::transform_if(data, data + 10, stencil, data, op, identity); // in-place transformation
|
544 |
-
*
|
545 |
-
* // data is now {5, 0, -2, -3, -2, 4, 0, -1, -2, 8};
|
546 |
-
* \endcode
|
547 |
-
*
|
548 |
-
* \see thrust::transform
|
549 |
-
*/
|
550 |
-
template<typename InputIterator1,
|
551 |
-
typename InputIterator2,
|
552 |
-
typename ForwardIterator,
|
553 |
-
typename UnaryFunction,
|
554 |
-
typename Predicate>
|
555 |
-
ForwardIterator transform_if(InputIterator1 first, InputIterator1 last,
|
556 |
-
InputIterator2 stencil,
|
557 |
-
ForwardIterator result,
|
558 |
-
UnaryFunction op,
|
559 |
-
Predicate pred);
|
560 |
-
|
561 |
-
|
562 |
-
/*! This version of \p transform_if conditionally applies a binary function
|
563 |
-
* to each pair of elements from two input sequences and stores the result in the corresponding
|
564 |
-
* position in an output sequence if the corresponding position in a stencil sequence
|
565 |
-
* satifies a predicate. Otherwise, the corresponding position in the
|
566 |
-
* output sequence is not modified.
|
567 |
-
*
|
568 |
-
* Specifically, for each iterator <tt>i</tt> in the range <tt>[first1, last1)</tt> and
|
569 |
-
* <tt>j = first2 + (i - first1)</tt> in the range <tt>[first2, first2 + (last1 - first1) )</tt>,
|
570 |
-
* the predicate <tt>pred(*s)</tt> is evaluated, where <tt>s</tt> is the corresponding input
|
571 |
-
* iterator in the range <tt>[stencil, stencil + (last1 - first1) )</tt>. If this predicate
|
572 |
-
* evaluates to \c true, the result of <tt>binary_op(*i,*j)</tt> is assigned to <tt>*o</tt>,
|
573 |
-
* where <tt>o</tt> is the corresponding output iterator in the range
|
574 |
-
* <tt>[result, result + (last1 - first1) )</tt>. Otherwise, <tt>binary_op(*i,*j)</tt> is
|
575 |
-
* not evaluated and no assignment occurs. The input and output sequences may coincide,
|
576 |
-
* resulting in an in-place transformation.
|
577 |
-
*
|
578 |
-
* The algorithm's execution is parallelized as determined by \p exec.
|
579 |
-
*
|
580 |
-
* \param exec The execution policy to use for parallelization.
|
581 |
-
* \param first1 The beginning of the first input sequence.
|
582 |
-
* \param last1 The end of the first input sequence.
|
583 |
-
* \param first2 The beginning of the second input sequence.
|
584 |
-
* \param stencil The beginning of the stencil sequence.
|
585 |
-
* \param result The beginning of the output sequence.
|
586 |
-
* \param binary_op The transformation operation.
|
587 |
-
* \param pred The predicate operation.
|
588 |
-
* \return The end of the output sequence.
|
589 |
-
*
|
590 |
-
* \tparam DerivedPolicy The name of the derived execution policy.
|
591 |
-
* \tparam InputIterator1 is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>
|
592 |
-
* and \c InputIterator1's \c value_type is convertible to \c BinaryFunction's \c first_argument_type.
|
593 |
-
* \tparam InputIterator2 is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>
|
594 |
-
* and \c InputIterator2's \c value_type is convertible to \c BinaryFunction's \c second_argument_type.
|
595 |
-
* \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a>.
|
596 |
-
* \tparam BinaryFunction is a model of <a href="http://www.sgi.com/tech/stl/BinaryFunction.html">Binary Function</a>
|
597 |
-
* and \c BinaryFunction's \c result_type is convertible to \c OutputIterator's \c value_type.
|
598 |
-
* \tparam Predicate is a model of <a href="http://www.sgi.com/tech/stl/Predicate.html">Predicate</a>.
|
599 |
-
*
|
600 |
-
* \pre \p first1 may equal \p result, but the range <tt>[first1, last1)</tt> shall not overlap the range <tt>[result, result + (last1 - first1))</tt> otherwise.
|
601 |
-
* \pre \p first2 may equal \p result, but the range <tt>[first2, first2 + (last1 - first1))</tt> shall not overlap the range <tt>[result, result + (last1 - first1))</tt> otherwise.
|
602 |
-
* \pre \p stencil may equal \p result, but the range <tt>[stencil, stencil + (last1 - first1))</tt> shall not overlap the range <tt>[result, result + (last1 - first1))</tt> otherwise.
|
603 |
-
*
|
604 |
-
* The following code snippet demonstrates how to use \p transform_if using the \p thrust::host
|
605 |
-
* execution policy for parallelization:
|
606 |
-
*
|
607 |
-
* \code
|
608 |
-
* #include <thrust/transform.h>
|
609 |
-
* #include <thrust/functional.h>
|
610 |
-
* #include <thrust/execution_policy.h>
|
611 |
-
* ...
|
612 |
-
*
|
613 |
-
* int input1[6] = {-5, 0, 2, 3, 2, 4};
|
614 |
-
* int input2[6] = { 3, 6, -2, 1, 2, 3};
|
615 |
-
* int stencil[8] = { 1, 0, 1, 0, 1, 0};
|
616 |
-
* int output[6];
|
617 |
-
*
|
618 |
-
* thrust::plus<int> op;
|
619 |
-
* thrust::identity<int> identity;
|
620 |
-
*
|
621 |
-
* thrust::transform_if(thrust::host, input1, input1 + 6, input2, stencil, output, op, identity);
|
622 |
-
*
|
623 |
-
* // output is now {-2, 0, 0, 3, 4, 4};
|
624 |
-
* \endcode
|
625 |
-
*
|
626 |
-
* \see thrust::transform
|
627 |
-
*/
|
628 |
-
template<typename DerivedPolicy,
|
629 |
-
typename InputIterator1,
|
630 |
-
typename InputIterator2,
|
631 |
-
typename InputIterator3,
|
632 |
-
typename ForwardIterator,
|
633 |
-
typename BinaryFunction,
|
634 |
-
typename Predicate>
|
635 |
-
__host__ __device__
|
636 |
-
ForwardIterator transform_if(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
637 |
-
InputIterator1 first1, InputIterator1 last1,
|
638 |
-
InputIterator2 first2,
|
639 |
-
InputIterator3 stencil,
|
640 |
-
ForwardIterator result,
|
641 |
-
BinaryFunction binary_op,
|
642 |
-
Predicate pred);
|
643 |
-
|
644 |
-
|
645 |
-
/*! This version of \p transform_if conditionally applies a binary function
|
646 |
-
* to each pair of elements from two input sequences and stores the result in the corresponding
|
647 |
-
* position in an output sequence if the corresponding position in a stencil sequence
|
648 |
-
* satifies a predicate. Otherwise, the corresponding position in the
|
649 |
-
* output sequence is not modified.
|
650 |
-
*
|
651 |
-
* Specifically, for each iterator <tt>i</tt> in the range <tt>[first1, last1)</tt> and
|
652 |
-
* <tt>j = first2 + (i - first1)</tt> in the range <tt>[first2, first2 + (last1 - first1) )</tt>,
|
653 |
-
* the predicate <tt>pred(*s)</tt> is evaluated, where <tt>s</tt> is the corresponding input
|
654 |
-
* iterator in the range <tt>[stencil, stencil + (last1 - first1) )</tt>. If this predicate
|
655 |
-
* evaluates to \c true, the result of <tt>binary_op(*i,*j)</tt> is assigned to <tt>*o</tt>,
|
656 |
-
* where <tt>o</tt> is the corresponding output iterator in the range
|
657 |
-
* <tt>[result, result + (last1 - first1) )</tt>. Otherwise, <tt>binary_op(*i,*j)</tt> is
|
658 |
-
* not evaluated and no assignment occurs. The input and output sequences may coincide,
|
659 |
-
* resulting in an in-place transformation.
|
660 |
-
*
|
661 |
-
* \param first1 The beginning of the first input sequence.
|
662 |
-
* \param last1 The end of the first input sequence.
|
663 |
-
* \param first2 The beginning of the second input sequence.
|
664 |
-
* \param stencil The beginning of the stencil sequence.
|
665 |
-
* \param result The beginning of the output sequence.
|
666 |
-
* \param binary_op The transformation operation.
|
667 |
-
* \param pred The predicate operation.
|
668 |
-
* \return The end of the output sequence.
|
669 |
-
*
|
670 |
-
* \tparam InputIterator1 is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>
|
671 |
-
* and \c InputIterator1's \c value_type is convertible to \c BinaryFunction's \c first_argument_type.
|
672 |
-
* \tparam InputIterator2 is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>
|
673 |
-
* and \c InputIterator2's \c value_type is convertible to \c BinaryFunction's \c second_argument_type.
|
674 |
-
* \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a>.
|
675 |
-
* \tparam BinaryFunction is a model of <a href="http://www.sgi.com/tech/stl/BinaryFunction.html">Binary Function</a>
|
676 |
-
* and \c BinaryFunction's \c result_type is convertible to \c OutputIterator's \c value_type.
|
677 |
-
* \tparam Predicate is a model of <a href="http://www.sgi.com/tech/stl/Predicate.html">Predicate</a>.
|
678 |
-
*
|
679 |
-
* \pre \p first1 may equal \p result, but the range <tt>[first1, last1)</tt> shall not overlap the range <tt>[result, result + (last1 - first1))</tt> otherwise.
|
680 |
-
* \pre \p first2 may equal \p result, but the range <tt>[first2, first2 + (last1 - first1))</tt> shall not overlap the range <tt>[result, result + (last1 - first1))</tt> otherwise.
|
681 |
-
* \pre \p stencil may equal \p result, but the range <tt>[stencil, stencil + (last1 - first1))</tt> shall not overlap the range <tt>[result, result + (last1 - first1))</tt> otherwise.
|
682 |
-
*
|
683 |
-
* The following code snippet demonstrates how to use \p transform_if:
|
684 |
-
*
|
685 |
-
* \code
|
686 |
-
* #include <thrust/transform.h>
|
687 |
-
* #include <thrust/functional.h>
|
688 |
-
*
|
689 |
-
* int input1[6] = {-5, 0, 2, 3, 2, 4};
|
690 |
-
* int input2[6] = { 3, 6, -2, 1, 2, 3};
|
691 |
-
* int stencil[8] = { 1, 0, 1, 0, 1, 0};
|
692 |
-
* int output[6];
|
693 |
-
*
|
694 |
-
* thrust::plus<int> op;
|
695 |
-
* thrust::identity<int> identity;
|
696 |
-
*
|
697 |
-
* thrust::transform_if(input1, input1 + 6, input2, stencil, output, op, identity);
|
698 |
-
*
|
699 |
-
* // output is now {-2, 0, 0, 3, 4, 4};
|
700 |
-
* \endcode
|
701 |
-
*
|
702 |
-
* \see thrust::transform
|
703 |
-
*/
|
704 |
-
template<typename InputIterator1,
|
705 |
-
typename InputIterator2,
|
706 |
-
typename InputIterator3,
|
707 |
-
typename ForwardIterator,
|
708 |
-
typename BinaryFunction,
|
709 |
-
typename Predicate>
|
710 |
-
ForwardIterator transform_if(InputIterator1 first1, InputIterator1 last1,
|
711 |
-
InputIterator2 first2,
|
712 |
-
InputIterator3 stencil,
|
713 |
-
ForwardIterator result,
|
714 |
-
BinaryFunction binary_op,
|
715 |
-
Predicate pred);
|
716 |
-
|
717 |
-
|
718 |
-
/*! \} // end transformations
|
719 |
-
*/
|
720 |
-
|
721 |
-
|
722 |
-
} // end namespace thrust
|
723 |
-
|
724 |
-
#include <thrust/detail/transform.inl>
|
725 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/SPOTER_Sign_Language_Recognition/spoter_mod/skeleton_extractor.py
DELETED
@@ -1,60 +0,0 @@
|
|
1 |
-
import warnings
|
2 |
-
|
3 |
-
import pandas as pd
|
4 |
-
from os import path
|
5 |
-
import cv2
|
6 |
-
import mediapipe as mp
|
7 |
-
import json
|
8 |
-
from spoter_mod.pose_model_identifier import BODY_IDENTIFIERS, HAND_IDENTIFIERS, mp_holistic_data
|
9 |
-
|
10 |
-
mp_drawing = mp.solutions.drawing_utils
|
11 |
-
mp_holistic = mp.solutions.holistic
|
12 |
-
mp_drawing_styles = mp.solutions.drawing_styles
|
13 |
-
|
14 |
-
holistic = mp_holistic.Holistic()
|
15 |
-
|
16 |
-
column_names = []
|
17 |
-
column_names.append('video_id')
|
18 |
-
for id_name in BODY_IDENTIFIERS.keys():
|
19 |
-
for xy in ["_X", "_Y"]:
|
20 |
-
column_names.append(id_name + xy)
|
21 |
-
|
22 |
-
for lr in ["_Right", "_Left"]:
|
23 |
-
for id_name in HAND_IDENTIFIERS.keys():
|
24 |
-
for xy in ["_X", "_Y"]:
|
25 |
-
column_names.append(id_name + lr + xy)
|
26 |
-
|
27 |
-
column_names.append('labels')
|
28 |
-
|
29 |
-
|
30 |
-
def create_df(flnm, column_names):
|
31 |
-
df = pd.DataFrame(columns=column_names)
|
32 |
-
return df
|
33 |
-
|
34 |
-
|
35 |
-
def save_data(df, data, flnm):
|
36 |
-
df = df.append(data.get_series(), ignore_index=True)
|
37 |
-
df.to_pickle(flnm)
|
38 |
-
|
39 |
-
|
40 |
-
def obtain_pose_data(path):
|
41 |
-
cap = cv2.VideoCapture(path)
|
42 |
-
data = mp_holistic_data(column_names)
|
43 |
-
while cap.isOpened():
|
44 |
-
ret, frame = cap.read()
|
45 |
-
if not ret:
|
46 |
-
break
|
47 |
-
# Recolor image to RGB
|
48 |
-
image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
49 |
-
|
50 |
-
# Make detection
|
51 |
-
holistic_results = holistic.process(image)
|
52 |
-
# Extract feature and save to mp_pose_data class
|
53 |
-
data.extract_data(holistic_results)
|
54 |
-
cap.release()
|
55 |
-
|
56 |
-
return data
|
57 |
-
|
58 |
-
|
59 |
-
if __name__ == '__main__':
|
60 |
-
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/mmdet/models/builder.py
DELETED
@@ -1,77 +0,0 @@
|
|
1 |
-
import warnings
|
2 |
-
|
3 |
-
from mmcv.utils import Registry, build_from_cfg
|
4 |
-
from torch import nn
|
5 |
-
|
6 |
-
BACKBONES = Registry('backbone')
|
7 |
-
NECKS = Registry('neck')
|
8 |
-
ROI_EXTRACTORS = Registry('roi_extractor')
|
9 |
-
SHARED_HEADS = Registry('shared_head')
|
10 |
-
HEADS = Registry('head')
|
11 |
-
LOSSES = Registry('loss')
|
12 |
-
DETECTORS = Registry('detector')
|
13 |
-
|
14 |
-
|
15 |
-
def build(cfg, registry, default_args=None):
|
16 |
-
"""Build a module.
|
17 |
-
|
18 |
-
Args:
|
19 |
-
cfg (dict, list[dict]): The config of modules, is is either a dict
|
20 |
-
or a list of configs.
|
21 |
-
registry (:obj:`Registry`): A registry the module belongs to.
|
22 |
-
default_args (dict, optional): Default arguments to build the module.
|
23 |
-
Defaults to None.
|
24 |
-
|
25 |
-
Returns:
|
26 |
-
nn.Module: A built nn module.
|
27 |
-
"""
|
28 |
-
if isinstance(cfg, list):
|
29 |
-
modules = [
|
30 |
-
build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg
|
31 |
-
]
|
32 |
-
return nn.Sequential(*modules)
|
33 |
-
else:
|
34 |
-
return build_from_cfg(cfg, registry, default_args)
|
35 |
-
|
36 |
-
|
37 |
-
def build_backbone(cfg):
|
38 |
-
"""Build backbone."""
|
39 |
-
return build(cfg, BACKBONES)
|
40 |
-
|
41 |
-
|
42 |
-
def build_neck(cfg):
|
43 |
-
"""Build neck."""
|
44 |
-
return build(cfg, NECKS)
|
45 |
-
|
46 |
-
|
47 |
-
def build_roi_extractor(cfg):
|
48 |
-
"""Build roi extractor."""
|
49 |
-
return build(cfg, ROI_EXTRACTORS)
|
50 |
-
|
51 |
-
|
52 |
-
def build_shared_head(cfg):
|
53 |
-
"""Build shared head."""
|
54 |
-
return build(cfg, SHARED_HEADS)
|
55 |
-
|
56 |
-
|
57 |
-
def build_head(cfg):
|
58 |
-
"""Build head."""
|
59 |
-
return build(cfg, HEADS)
|
60 |
-
|
61 |
-
|
62 |
-
def build_loss(cfg):
|
63 |
-
"""Build loss."""
|
64 |
-
return build(cfg, LOSSES)
|
65 |
-
|
66 |
-
|
67 |
-
def build_detector(cfg, train_cfg=None, test_cfg=None):
|
68 |
-
"""Build detector."""
|
69 |
-
if train_cfg is not None or test_cfg is not None:
|
70 |
-
warnings.warn(
|
71 |
-
'train_cfg and test_cfg is deprecated, '
|
72 |
-
'please specify them in model', UserWarning)
|
73 |
-
assert cfg.get('train_cfg') is None or train_cfg is None, \
|
74 |
-
'train_cfg specified in both outer field and model field '
|
75 |
-
assert cfg.get('test_cfg') is None or test_cfg is None, \
|
76 |
-
'test_cfg specified in both outer field and model field '
|
77 |
-
return build(cfg, DETECTORS, dict(train_cfg=train_cfg, test_cfg=test_cfg))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/regionclip-demo/detectron2/checkpoint/__init__.py
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
3 |
-
# File:
|
4 |
-
|
5 |
-
|
6 |
-
from . import catalog as _UNUSED # register the handler
|
7 |
-
from .detection_checkpoint import DetectionCheckpointer
|
8 |
-
from fvcore.common.checkpoint import Checkpointer, PeriodicCheckpointer
|
9 |
-
|
10 |
-
__all__ = ["Checkpointer", "PeriodicCheckpointer", "DetectionCheckpointer"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/regionclip-demo/detectron2/layers/nms.py
DELETED
@@ -1,158 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
3 |
-
|
4 |
-
from typing import List
|
5 |
-
import torch
|
6 |
-
from torchvision.ops import boxes as box_ops
|
7 |
-
from torchvision.ops import nms # BC-compat
|
8 |
-
|
9 |
-
from detectron2.utils.env import TORCH_VERSION
|
10 |
-
|
11 |
-
if TORCH_VERSION < (1, 7):
|
12 |
-
from detectron2 import _C
|
13 |
-
|
14 |
-
nms_rotated_func = _C.nms_rotated
|
15 |
-
else:
|
16 |
-
nms_rotated_func = torch.ops.detectron2.nms_rotated
|
17 |
-
|
18 |
-
|
19 |
-
def batched_nms(
|
20 |
-
boxes: torch.Tensor, scores: torch.Tensor, idxs: torch.Tensor, iou_threshold: float
|
21 |
-
):
|
22 |
-
"""
|
23 |
-
Same as torchvision.ops.boxes.batched_nms, but safer.
|
24 |
-
"""
|
25 |
-
assert boxes.shape[-1] == 4
|
26 |
-
# TODO may need better strategy.
|
27 |
-
# Investigate after having a fully-cuda NMS op.
|
28 |
-
if len(boxes) < 40000:
|
29 |
-
# fp16 does not have enough range for batched NMS
|
30 |
-
return box_ops.batched_nms(boxes.float(), scores, idxs, iou_threshold)
|
31 |
-
|
32 |
-
result_mask = scores.new_zeros(scores.size(), dtype=torch.bool)
|
33 |
-
for id in torch.jit.annotate(List[int], torch.unique(idxs).cpu().tolist()):
|
34 |
-
mask = (idxs == id).nonzero().view(-1)
|
35 |
-
keep = nms(boxes[mask], scores[mask], iou_threshold)
|
36 |
-
result_mask[mask[keep]] = True
|
37 |
-
keep = result_mask.nonzero().view(-1)
|
38 |
-
keep = keep[scores[keep].argsort(descending=True)]
|
39 |
-
return keep
|
40 |
-
|
41 |
-
|
42 |
-
# Note: this function (nms_rotated) might be moved into
|
43 |
-
# torchvision/ops/boxes.py in the future
|
44 |
-
def nms_rotated(boxes, scores, iou_threshold):
|
45 |
-
"""
|
46 |
-
Performs non-maximum suppression (NMS) on the rotated boxes according
|
47 |
-
to their intersection-over-union (IoU).
|
48 |
-
|
49 |
-
Rotated NMS iteratively removes lower scoring rotated boxes which have an
|
50 |
-
IoU greater than iou_threshold with another (higher scoring) rotated box.
|
51 |
-
|
52 |
-
Note that RotatedBox (5, 3, 4, 2, -90) covers exactly the same region as
|
53 |
-
RotatedBox (5, 3, 4, 2, 90) does, and their IoU will be 1. However, they
|
54 |
-
can be representing completely different objects in certain tasks, e.g., OCR.
|
55 |
-
|
56 |
-
As for the question of whether rotated-NMS should treat them as faraway boxes
|
57 |
-
even though their IOU is 1, it depends on the application and/or ground truth annotation.
|
58 |
-
|
59 |
-
As an extreme example, consider a single character v and the square box around it.
|
60 |
-
|
61 |
-
If the angle is 0 degree, the object (text) would be read as 'v';
|
62 |
-
|
63 |
-
If the angle is 90 degrees, the object (text) would become '>';
|
64 |
-
|
65 |
-
If the angle is 180 degrees, the object (text) would become '^';
|
66 |
-
|
67 |
-
If the angle is 270/-90 degrees, the object (text) would become '<'
|
68 |
-
|
69 |
-
All of these cases have IoU of 1 to each other, and rotated NMS that only
|
70 |
-
uses IoU as criterion would only keep one of them with the highest score -
|
71 |
-
which, practically, still makes sense in most cases because typically
|
72 |
-
only one of theses orientations is the correct one. Also, it does not matter
|
73 |
-
as much if the box is only used to classify the object (instead of transcribing
|
74 |
-
them with a sequential OCR recognition model) later.
|
75 |
-
|
76 |
-
On the other hand, when we use IoU to filter proposals that are close to the
|
77 |
-
ground truth during training, we should definitely take the angle into account if
|
78 |
-
we know the ground truth is labeled with the strictly correct orientation (as in,
|
79 |
-
upside-down words are annotated with -180 degrees even though they can be covered
|
80 |
-
with a 0/90/-90 degree box, etc.)
|
81 |
-
|
82 |
-
The way the original dataset is annotated also matters. For example, if the dataset
|
83 |
-
is a 4-point polygon dataset that does not enforce ordering of vertices/orientation,
|
84 |
-
we can estimate a minimum rotated bounding box to this polygon, but there's no way
|
85 |
-
we can tell the correct angle with 100% confidence (as shown above, there could be 4 different
|
86 |
-
rotated boxes, with angles differed by 90 degrees to each other, covering the exactly
|
87 |
-
same region). In that case we have to just use IoU to determine the box
|
88 |
-
proximity (as many detection benchmarks (even for text) do) unless there're other
|
89 |
-
assumptions we can make (like width is always larger than height, or the object is not
|
90 |
-
rotated by more than 90 degrees CCW/CW, etc.)
|
91 |
-
|
92 |
-
In summary, not considering angles in rotated NMS seems to be a good option for now,
|
93 |
-
but we should be aware of its implications.
|
94 |
-
|
95 |
-
Args:
|
96 |
-
boxes (Tensor[N, 5]): Rotated boxes to perform NMS on. They are expected to be in
|
97 |
-
(x_center, y_center, width, height, angle_degrees) format.
|
98 |
-
scores (Tensor[N]): Scores for each one of the rotated boxes
|
99 |
-
iou_threshold (float): Discards all overlapping rotated boxes with IoU < iou_threshold
|
100 |
-
|
101 |
-
Returns:
|
102 |
-
keep (Tensor): int64 tensor with the indices of the elements that have been kept
|
103 |
-
by Rotated NMS, sorted in decreasing order of scores
|
104 |
-
"""
|
105 |
-
return nms_rotated_func(boxes, scores, iou_threshold)
|
106 |
-
|
107 |
-
|
108 |
-
# Note: this function (batched_nms_rotated) might be moved into
|
109 |
-
# torchvision/ops/boxes.py in the future
|
110 |
-
def batched_nms_rotated(boxes, scores, idxs, iou_threshold):
|
111 |
-
"""
|
112 |
-
Performs non-maximum suppression in a batched fashion.
|
113 |
-
|
114 |
-
Each index value correspond to a category, and NMS
|
115 |
-
will not be applied between elements of different categories.
|
116 |
-
|
117 |
-
Args:
|
118 |
-
boxes (Tensor[N, 5]):
|
119 |
-
boxes where NMS will be performed. They
|
120 |
-
are expected to be in (x_ctr, y_ctr, width, height, angle_degrees) format
|
121 |
-
scores (Tensor[N]):
|
122 |
-
scores for each one of the boxes
|
123 |
-
idxs (Tensor[N]):
|
124 |
-
indices of the categories for each one of the boxes.
|
125 |
-
iou_threshold (float):
|
126 |
-
discards all overlapping boxes
|
127 |
-
with IoU < iou_threshold
|
128 |
-
|
129 |
-
Returns:
|
130 |
-
Tensor:
|
131 |
-
int64 tensor with the indices of the elements that have been kept
|
132 |
-
by NMS, sorted in decreasing order of scores
|
133 |
-
"""
|
134 |
-
assert boxes.shape[-1] == 5
|
135 |
-
|
136 |
-
if boxes.numel() == 0:
|
137 |
-
return torch.empty((0,), dtype=torch.int64, device=boxes.device)
|
138 |
-
boxes = boxes.float() # fp16 does not have enough range for batched NMS
|
139 |
-
# Strategy: in order to perform NMS independently per class,
|
140 |
-
# we add an offset to all the boxes. The offset is dependent
|
141 |
-
# only on the class idx, and is large enough so that boxes
|
142 |
-
# from different classes do not overlap
|
143 |
-
|
144 |
-
# Note that batched_nms in torchvision/ops/boxes.py only uses max_coordinate,
|
145 |
-
# which won't handle negative coordinates correctly.
|
146 |
-
# Here by using min_coordinate we can make sure the negative coordinates are
|
147 |
-
# correctly handled.
|
148 |
-
max_coordinate = (
|
149 |
-
torch.max(boxes[:, 0], boxes[:, 1]) + torch.max(boxes[:, 2], boxes[:, 3]) / 2
|
150 |
-
).max()
|
151 |
-
min_coordinate = (
|
152 |
-
torch.min(boxes[:, 0], boxes[:, 1]) - torch.max(boxes[:, 2], boxes[:, 3]) / 2
|
153 |
-
).min()
|
154 |
-
offsets = idxs.to(boxes) * (max_coordinate - min_coordinate + 1)
|
155 |
-
boxes_for_nms = boxes.clone() # avoid modifying the original values in boxes
|
156 |
-
boxes_for_nms[:, :2] += offsets[:, None]
|
157 |
-
keep = nms_rotated(boxes_for_nms, scores, iou_threshold)
|
158 |
-
return keep
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Chris4K/llms_compare/Dragon Ball Z Raging Blast 2 Psp Iso Download 41 118.md
DELETED
@@ -1,80 +0,0 @@
|
|
1 |
-
## Dragon Ball Z Raging Blast 2 Psp Iso Download 41 118
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-

|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
**LINK --->>> [https://www.google.com/url?q=https%3A%2F%2Fbytlly.com%2F2txP4a&sa=D&sntz=1&usg=AOvVaw3rNdiiv5XMALzaYeDjeglE](https://www.google.com/url?q=https%3A%2F%2Fbytlly.com%2F2txP4a&sa=D&sntz=1&usg=AOvVaw3rNdiiv5XMALzaYeDjeglE)**
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
Here is the title and article I generated for you:
|
26 |
-
|
27 |
-
# How to Download and Play Dragon Ball Raging Blast 2 on PSP
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
Dragon Ball Raging Blast 2 is a fighting game based on the popular anime and manga series Dragon Ball Z. It features over 90 characters, destructible environments, signature attacks and transformations, and a new Raging Soul system that boosts your combat abilities.
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
If you want to play this game on your PSP, you will need a PPSSPP emulator and a Dragon Ball Raging Blast 2 ISO file. Here are the steps to download and play Dragon Ball Raging Blast 2 on PSP:
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
1. Download the PPSSPP emulator from [here](https://www.ppsspp.org/downloads.html) and install it on your device.
|
40 |
-
|
41 |
-
2. Download the Dragon Ball Raging Blast 2 ISO file from [here](https://dlxbgame.com/dragon-ball-raging-blast-2-pal-iso-complex/) or [here](https://www.youtube.com/watch?v=D2zbYkLEKms). Make sure you choose the right region (PAL, NTSC-U, or NTSC-J) for your device.
|
42 |
-
|
43 |
-
3. Extract the ISO file using a file manager or a zip extractor app.
|
44 |
-
|
45 |
-
4. Copy the ISO file to the PSP/GAME folder on your device's storage.
|
46 |
-
|
47 |
-
5. Launch the PPSSPP emulator and browse to the PSP/GAME folder. Select the Dragon Ball Raging Blast 2 ISO file and tap on it to start playing.
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
Enjoy the game and unleash your inner Saiyan!
|
52 |
-
|
53 |
-
Here are some more paragraphs I added to the article:
|
54 |
-
|
55 |
-
## Dragon Ball Raging Blast 2 Gameplay Tips
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
Dragon Ball Raging Blast 2 is not just a button-mashing game. It requires skill, strategy, and timing to master the combat system and defeat your opponents. Here are some gameplay tips to help you improve your skills and enjoy the game more:
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
- Learn the basics of the fighting system by completing the tutorials. They will teach you how to perform different types of attacks, combos, dodges, counters, and special moves. You can access the tutorials from the main menu or the pause menu during a battle.
|
64 |
-
|
65 |
-
- Use the Raging Soul mode wisely. This mode allows you to unleash powerful attacks and combos without using ki, but it also drains your health over time. To activate it, press L2 and R2 when your ki gauge is full. To deactivate it, press L2 and R2 again or wait until your health reaches a critical level.
|
66 |
-
|
67 |
-
- Experiment with different characters and their abilities. Each character has their own strengths, weaknesses, and unique moves. Some characters can transform into stronger forms, while others can use support items or team attacks. Try out different combinations and find your favorite ones.
|
68 |
-
|
69 |
-
- Practice against the CPU or online players. The best way to improve your skills is to challenge yourself against different opponents and difficulty levels. You can play against the CPU in various modes such as Battle Zone, Galaxy Mode, or World Tournament. You can also play online against other players from around the world in Ranked or Player matches.
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
Dragon Ball Raging Blast 2 is a fun and exciting game for fans of the series and fighting games in general. With its impressive graphics, sound, and gameplay, it will keep you entertained for hours. Download it today and unleash your inner Saiyan!
|
74 |
-
|
75 |
-
dfd1c89656
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CodeDoes/FrostAura-gpt-neox-20b-fiction-novel-generation/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: FrostAura Gpt Neox 20b Fiction Novel Generation
|
3 |
-
emoji: 🏃
|
4 |
-
colorFrom: blue
|
5 |
-
colorTo: pink
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.12.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/modeling/rpn/retinanet/loss.py
DELETED
@@ -1,107 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
This file contains specific functions for computing losses on the RetinaNet
|
3 |
-
file
|
4 |
-
"""
|
5 |
-
|
6 |
-
import torch
|
7 |
-
from torch.nn import functional as F
|
8 |
-
|
9 |
-
from ..utils import concat_box_prediction_layers
|
10 |
-
|
11 |
-
from maskrcnn_benchmark.layers import smooth_l1_loss
|
12 |
-
from maskrcnn_benchmark.layers import SigmoidFocalLoss
|
13 |
-
from maskrcnn_benchmark.modeling.matcher import Matcher
|
14 |
-
from maskrcnn_benchmark.modeling.utils import cat
|
15 |
-
from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou
|
16 |
-
from maskrcnn_benchmark.structures.boxlist_ops import cat_boxlist
|
17 |
-
from maskrcnn_benchmark.modeling.rpn.loss import RPNLossComputation
|
18 |
-
|
19 |
-
class RetinaNetLossComputation(RPNLossComputation):
|
20 |
-
"""
|
21 |
-
This class computes the RetinaNet loss.
|
22 |
-
"""
|
23 |
-
|
24 |
-
def __init__(self, proposal_matcher, box_coder,
|
25 |
-
generate_labels_func,
|
26 |
-
sigmoid_focal_loss,
|
27 |
-
bbox_reg_beta=0.11,
|
28 |
-
regress_norm=1.0):
|
29 |
-
"""
|
30 |
-
Arguments:
|
31 |
-
proposal_matcher (Matcher)
|
32 |
-
box_coder (BoxCoder)
|
33 |
-
"""
|
34 |
-
self.proposal_matcher = proposal_matcher
|
35 |
-
self.box_coder = box_coder
|
36 |
-
self.box_cls_loss_func = sigmoid_focal_loss
|
37 |
-
self.bbox_reg_beta = bbox_reg_beta
|
38 |
-
self.copied_fields = ['labels']
|
39 |
-
self.generate_labels_func = generate_labels_func
|
40 |
-
self.discard_cases = ['between_thresholds']
|
41 |
-
self.regress_norm = regress_norm
|
42 |
-
|
43 |
-
def __call__(self, anchors, box_cls, box_regression, targets):
|
44 |
-
"""
|
45 |
-
Arguments:
|
46 |
-
anchors (list[BoxList])
|
47 |
-
box_cls (list[Tensor])
|
48 |
-
box_regression (list[Tensor])
|
49 |
-
targets (list[BoxList])
|
50 |
-
|
51 |
-
Returns:
|
52 |
-
retinanet_cls_loss (Tensor)
|
53 |
-
retinanet_regression_loss (Tensor
|
54 |
-
"""
|
55 |
-
anchors = [cat_boxlist(anchors_per_image) for anchors_per_image in anchors]
|
56 |
-
labels, regression_targets = self.prepare_targets(anchors, targets)
|
57 |
-
|
58 |
-
N = len(labels)
|
59 |
-
box_cls, box_regression = \
|
60 |
-
concat_box_prediction_layers(box_cls, box_regression)
|
61 |
-
|
62 |
-
labels = torch.cat(labels, dim=0)
|
63 |
-
regression_targets = torch.cat(regression_targets, dim=0)
|
64 |
-
pos_inds = torch.nonzero(labels > 0).squeeze(1)
|
65 |
-
|
66 |
-
retinanet_regression_loss = smooth_l1_loss(
|
67 |
-
box_regression[pos_inds],
|
68 |
-
regression_targets[pos_inds],
|
69 |
-
beta=self.bbox_reg_beta,
|
70 |
-
size_average=False,
|
71 |
-
) / (max(1, pos_inds.numel() * self.regress_norm))
|
72 |
-
|
73 |
-
labels = labels.int()
|
74 |
-
|
75 |
-
retinanet_cls_loss = self.box_cls_loss_func(
|
76 |
-
box_cls,
|
77 |
-
labels
|
78 |
-
) / (pos_inds.numel() + N)
|
79 |
-
|
80 |
-
return retinanet_cls_loss, retinanet_regression_loss
|
81 |
-
|
82 |
-
|
83 |
-
def generate_retinanet_labels(matched_targets):
|
84 |
-
labels_per_image = matched_targets.get_field("labels")
|
85 |
-
return labels_per_image
|
86 |
-
|
87 |
-
|
88 |
-
def make_retinanet_loss_evaluator(cfg, box_coder):
|
89 |
-
matcher = Matcher(
|
90 |
-
cfg.MODEL.RETINANET.FG_IOU_THRESHOLD,
|
91 |
-
cfg.MODEL.RETINANET.BG_IOU_THRESHOLD,
|
92 |
-
allow_low_quality_matches=True,
|
93 |
-
)
|
94 |
-
sigmoid_focal_loss = SigmoidFocalLoss(
|
95 |
-
cfg.MODEL.RETINANET.LOSS_GAMMA,
|
96 |
-
cfg.MODEL.RETINANET.LOSS_ALPHA
|
97 |
-
)
|
98 |
-
|
99 |
-
loss_evaluator = RetinaNetLossComputation(
|
100 |
-
matcher,
|
101 |
-
box_coder,
|
102 |
-
generate_retinanet_labels,
|
103 |
-
sigmoid_focal_loss,
|
104 |
-
bbox_reg_beta = cfg.MODEL.RETINANET.BBOX_REG_BETA,
|
105 |
-
regress_norm = cfg.MODEL.RETINANET.BBOX_REG_WEIGHT,
|
106 |
-
)
|
107 |
-
return loss_evaluator
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/dateutil/parser/isoparser.py
DELETED
@@ -1,416 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
"""
|
3 |
-
This module offers a parser for ISO-8601 strings
|
4 |
-
|
5 |
-
It is intended to support all valid date, time and datetime formats per the
|
6 |
-
ISO-8601 specification.
|
7 |
-
|
8 |
-
..versionadded:: 2.7.0
|
9 |
-
"""
|
10 |
-
from datetime import datetime, timedelta, time, date
|
11 |
-
import calendar
|
12 |
-
from dateutil import tz
|
13 |
-
|
14 |
-
from functools import wraps
|
15 |
-
|
16 |
-
import re
|
17 |
-
import six
|
18 |
-
|
19 |
-
__all__ = ["isoparse", "isoparser"]
|
20 |
-
|
21 |
-
|
22 |
-
def _takes_ascii(f):
|
23 |
-
@wraps(f)
|
24 |
-
def func(self, str_in, *args, **kwargs):
|
25 |
-
# If it's a stream, read the whole thing
|
26 |
-
str_in = getattr(str_in, 'read', lambda: str_in)()
|
27 |
-
|
28 |
-
# If it's unicode, turn it into bytes, since ISO-8601 only covers ASCII
|
29 |
-
if isinstance(str_in, six.text_type):
|
30 |
-
# ASCII is the same in UTF-8
|
31 |
-
try:
|
32 |
-
str_in = str_in.encode('ascii')
|
33 |
-
except UnicodeEncodeError as e:
|
34 |
-
msg = 'ISO-8601 strings should contain only ASCII characters'
|
35 |
-
six.raise_from(ValueError(msg), e)
|
36 |
-
|
37 |
-
return f(self, str_in, *args, **kwargs)
|
38 |
-
|
39 |
-
return func
|
40 |
-
|
41 |
-
|
42 |
-
class isoparser(object):
|
43 |
-
def __init__(self, sep=None):
|
44 |
-
"""
|
45 |
-
:param sep:
|
46 |
-
A single character that separates date and time portions. If
|
47 |
-
``None``, the parser will accept any single character.
|
48 |
-
For strict ISO-8601 adherence, pass ``'T'``.
|
49 |
-
"""
|
50 |
-
if sep is not None:
|
51 |
-
if (len(sep) != 1 or ord(sep) >= 128 or sep in '0123456789'):
|
52 |
-
raise ValueError('Separator must be a single, non-numeric ' +
|
53 |
-
'ASCII character')
|
54 |
-
|
55 |
-
sep = sep.encode('ascii')
|
56 |
-
|
57 |
-
self._sep = sep
|
58 |
-
|
59 |
-
@_takes_ascii
|
60 |
-
def isoparse(self, dt_str):
|
61 |
-
"""
|
62 |
-
Parse an ISO-8601 datetime string into a :class:`datetime.datetime`.
|
63 |
-
|
64 |
-
An ISO-8601 datetime string consists of a date portion, followed
|
65 |
-
optionally by a time portion - the date and time portions are separated
|
66 |
-
by a single character separator, which is ``T`` in the official
|
67 |
-
standard. Incomplete date formats (such as ``YYYY-MM``) may *not* be
|
68 |
-
combined with a time portion.
|
69 |
-
|
70 |
-
Supported date formats are:
|
71 |
-
|
72 |
-
Common:
|
73 |
-
|
74 |
-
- ``YYYY``
|
75 |
-
- ``YYYY-MM`` or ``YYYYMM``
|
76 |
-
- ``YYYY-MM-DD`` or ``YYYYMMDD``
|
77 |
-
|
78 |
-
Uncommon:
|
79 |
-
|
80 |
-
- ``YYYY-Www`` or ``YYYYWww`` - ISO week (day defaults to 0)
|
81 |
-
- ``YYYY-Www-D`` or ``YYYYWwwD`` - ISO week and day
|
82 |
-
|
83 |
-
The ISO week and day numbering follows the same logic as
|
84 |
-
:func:`datetime.date.isocalendar`.
|
85 |
-
|
86 |
-
Supported time formats are:
|
87 |
-
|
88 |
-
- ``hh``
|
89 |
-
- ``hh:mm`` or ``hhmm``
|
90 |
-
- ``hh:mm:ss`` or ``hhmmss``
|
91 |
-
- ``hh:mm:ss.ssssss`` (Up to 6 sub-second digits)
|
92 |
-
|
93 |
-
Midnight is a special case for `hh`, as the standard supports both
|
94 |
-
00:00 and 24:00 as a representation. The decimal separator can be
|
95 |
-
either a dot or a comma.
|
96 |
-
|
97 |
-
|
98 |
-
.. caution::
|
99 |
-
|
100 |
-
Support for fractional components other than seconds is part of the
|
101 |
-
ISO-8601 standard, but is not currently implemented in this parser.
|
102 |
-
|
103 |
-
Supported time zone offset formats are:
|
104 |
-
|
105 |
-
- `Z` (UTC)
|
106 |
-
- `±HH:MM`
|
107 |
-
- `±HHMM`
|
108 |
-
- `±HH`
|
109 |
-
|
110 |
-
Offsets will be represented as :class:`dateutil.tz.tzoffset` objects,
|
111 |
-
with the exception of UTC, which will be represented as
|
112 |
-
:class:`dateutil.tz.tzutc`. Time zone offsets equivalent to UTC (such
|
113 |
-
as `+00:00`) will also be represented as :class:`dateutil.tz.tzutc`.
|
114 |
-
|
115 |
-
:param dt_str:
|
116 |
-
A string or stream containing only an ISO-8601 datetime string
|
117 |
-
|
118 |
-
:return:
|
119 |
-
Returns a :class:`datetime.datetime` representing the string.
|
120 |
-
Unspecified components default to their lowest value.
|
121 |
-
|
122 |
-
.. warning::
|
123 |
-
|
124 |
-
As of version 2.7.0, the strictness of the parser should not be
|
125 |
-
considered a stable part of the contract. Any valid ISO-8601 string
|
126 |
-
that parses correctly with the default settings will continue to
|
127 |
-
parse correctly in future versions, but invalid strings that
|
128 |
-
currently fail (e.g. ``2017-01-01T00:00+00:00:00``) are not
|
129 |
-
guaranteed to continue failing in future versions if they encode
|
130 |
-
a valid date.
|
131 |
-
|
132 |
-
.. versionadded:: 2.7.0
|
133 |
-
"""
|
134 |
-
components, pos = self._parse_isodate(dt_str)
|
135 |
-
|
136 |
-
if len(dt_str) > pos:
|
137 |
-
if self._sep is None or dt_str[pos:pos + 1] == self._sep:
|
138 |
-
components += self._parse_isotime(dt_str[pos + 1:])
|
139 |
-
else:
|
140 |
-
raise ValueError('String contains unknown ISO components')
|
141 |
-
|
142 |
-
if len(components) > 3 and components[3] == 24:
|
143 |
-
components[3] = 0
|
144 |
-
return datetime(*components) + timedelta(days=1)
|
145 |
-
|
146 |
-
return datetime(*components)
|
147 |
-
|
148 |
-
@_takes_ascii
|
149 |
-
def parse_isodate(self, datestr):
|
150 |
-
"""
|
151 |
-
Parse the date portion of an ISO string.
|
152 |
-
|
153 |
-
:param datestr:
|
154 |
-
The string portion of an ISO string, without a separator
|
155 |
-
|
156 |
-
:return:
|
157 |
-
Returns a :class:`datetime.date` object
|
158 |
-
"""
|
159 |
-
components, pos = self._parse_isodate(datestr)
|
160 |
-
if pos < len(datestr):
|
161 |
-
raise ValueError('String contains unknown ISO ' +
|
162 |
-
'components: {!r}'.format(datestr.decode('ascii')))
|
163 |
-
return date(*components)
|
164 |
-
|
165 |
-
@_takes_ascii
|
166 |
-
def parse_isotime(self, timestr):
|
167 |
-
"""
|
168 |
-
Parse the time portion of an ISO string.
|
169 |
-
|
170 |
-
:param timestr:
|
171 |
-
The time portion of an ISO string, without a separator
|
172 |
-
|
173 |
-
:return:
|
174 |
-
Returns a :class:`datetime.time` object
|
175 |
-
"""
|
176 |
-
components = self._parse_isotime(timestr)
|
177 |
-
if components[0] == 24:
|
178 |
-
components[0] = 0
|
179 |
-
return time(*components)
|
180 |
-
|
181 |
-
@_takes_ascii
|
182 |
-
def parse_tzstr(self, tzstr, zero_as_utc=True):
|
183 |
-
"""
|
184 |
-
Parse a valid ISO time zone string.
|
185 |
-
|
186 |
-
See :func:`isoparser.isoparse` for details on supported formats.
|
187 |
-
|
188 |
-
:param tzstr:
|
189 |
-
A string representing an ISO time zone offset
|
190 |
-
|
191 |
-
:param zero_as_utc:
|
192 |
-
Whether to return :class:`dateutil.tz.tzutc` for zero-offset zones
|
193 |
-
|
194 |
-
:return:
|
195 |
-
Returns :class:`dateutil.tz.tzoffset` for offsets and
|
196 |
-
:class:`dateutil.tz.tzutc` for ``Z`` and (if ``zero_as_utc`` is
|
197 |
-
specified) offsets equivalent to UTC.
|
198 |
-
"""
|
199 |
-
return self._parse_tzstr(tzstr, zero_as_utc=zero_as_utc)
|
200 |
-
|
201 |
-
# Constants
|
202 |
-
_DATE_SEP = b'-'
|
203 |
-
_TIME_SEP = b':'
|
204 |
-
_FRACTION_REGEX = re.compile(b'[\\.,]([0-9]+)')
|
205 |
-
|
206 |
-
def _parse_isodate(self, dt_str):
|
207 |
-
try:
|
208 |
-
return self._parse_isodate_common(dt_str)
|
209 |
-
except ValueError:
|
210 |
-
return self._parse_isodate_uncommon(dt_str)
|
211 |
-
|
212 |
-
def _parse_isodate_common(self, dt_str):
|
213 |
-
len_str = len(dt_str)
|
214 |
-
components = [1, 1, 1]
|
215 |
-
|
216 |
-
if len_str < 4:
|
217 |
-
raise ValueError('ISO string too short')
|
218 |
-
|
219 |
-
# Year
|
220 |
-
components[0] = int(dt_str[0:4])
|
221 |
-
pos = 4
|
222 |
-
if pos >= len_str:
|
223 |
-
return components, pos
|
224 |
-
|
225 |
-
has_sep = dt_str[pos:pos + 1] == self._DATE_SEP
|
226 |
-
if has_sep:
|
227 |
-
pos += 1
|
228 |
-
|
229 |
-
# Month
|
230 |
-
if len_str - pos < 2:
|
231 |
-
raise ValueError('Invalid common month')
|
232 |
-
|
233 |
-
components[1] = int(dt_str[pos:pos + 2])
|
234 |
-
pos += 2
|
235 |
-
|
236 |
-
if pos >= len_str:
|
237 |
-
if has_sep:
|
238 |
-
return components, pos
|
239 |
-
else:
|
240 |
-
raise ValueError('Invalid ISO format')
|
241 |
-
|
242 |
-
if has_sep:
|
243 |
-
if dt_str[pos:pos + 1] != self._DATE_SEP:
|
244 |
-
raise ValueError('Invalid separator in ISO string')
|
245 |
-
pos += 1
|
246 |
-
|
247 |
-
# Day
|
248 |
-
if len_str - pos < 2:
|
249 |
-
raise ValueError('Invalid common day')
|
250 |
-
components[2] = int(dt_str[pos:pos + 2])
|
251 |
-
return components, pos + 2
|
252 |
-
|
253 |
-
def _parse_isodate_uncommon(self, dt_str):
|
254 |
-
if len(dt_str) < 4:
|
255 |
-
raise ValueError('ISO string too short')
|
256 |
-
|
257 |
-
# All ISO formats start with the year
|
258 |
-
year = int(dt_str[0:4])
|
259 |
-
|
260 |
-
has_sep = dt_str[4:5] == self._DATE_SEP
|
261 |
-
|
262 |
-
pos = 4 + has_sep # Skip '-' if it's there
|
263 |
-
if dt_str[pos:pos + 1] == b'W':
|
264 |
-
# YYYY-?Www-?D?
|
265 |
-
pos += 1
|
266 |
-
weekno = int(dt_str[pos:pos + 2])
|
267 |
-
pos += 2
|
268 |
-
|
269 |
-
dayno = 1
|
270 |
-
if len(dt_str) > pos:
|
271 |
-
if (dt_str[pos:pos + 1] == self._DATE_SEP) != has_sep:
|
272 |
-
raise ValueError('Inconsistent use of dash separator')
|
273 |
-
|
274 |
-
pos += has_sep
|
275 |
-
|
276 |
-
dayno = int(dt_str[pos:pos + 1])
|
277 |
-
pos += 1
|
278 |
-
|
279 |
-
base_date = self._calculate_weekdate(year, weekno, dayno)
|
280 |
-
else:
|
281 |
-
# YYYYDDD or YYYY-DDD
|
282 |
-
if len(dt_str) - pos < 3:
|
283 |
-
raise ValueError('Invalid ordinal day')
|
284 |
-
|
285 |
-
ordinal_day = int(dt_str[pos:pos + 3])
|
286 |
-
pos += 3
|
287 |
-
|
288 |
-
if ordinal_day < 1 or ordinal_day > (365 + calendar.isleap(year)):
|
289 |
-
raise ValueError('Invalid ordinal day' +
|
290 |
-
' {} for year {}'.format(ordinal_day, year))
|
291 |
-
|
292 |
-
base_date = date(year, 1, 1) + timedelta(days=ordinal_day - 1)
|
293 |
-
|
294 |
-
components = [base_date.year, base_date.month, base_date.day]
|
295 |
-
return components, pos
|
296 |
-
|
297 |
-
def _calculate_weekdate(self, year, week, day):
|
298 |
-
"""
|
299 |
-
Calculate the day of corresponding to the ISO year-week-day calendar.
|
300 |
-
|
301 |
-
This function is effectively the inverse of
|
302 |
-
:func:`datetime.date.isocalendar`.
|
303 |
-
|
304 |
-
:param year:
|
305 |
-
The year in the ISO calendar
|
306 |
-
|
307 |
-
:param week:
|
308 |
-
The week in the ISO calendar - range is [1, 53]
|
309 |
-
|
310 |
-
:param day:
|
311 |
-
The day in the ISO calendar - range is [1 (MON), 7 (SUN)]
|
312 |
-
|
313 |
-
:return:
|
314 |
-
Returns a :class:`datetime.date`
|
315 |
-
"""
|
316 |
-
if not 0 < week < 54:
|
317 |
-
raise ValueError('Invalid week: {}'.format(week))
|
318 |
-
|
319 |
-
if not 0 < day < 8: # Range is 1-7
|
320 |
-
raise ValueError('Invalid weekday: {}'.format(day))
|
321 |
-
|
322 |
-
# Get week 1 for the specific year:
|
323 |
-
jan_4 = date(year, 1, 4) # Week 1 always has January 4th in it
|
324 |
-
week_1 = jan_4 - timedelta(days=jan_4.isocalendar()[2] - 1)
|
325 |
-
|
326 |
-
# Now add the specific number of weeks and days to get what we want
|
327 |
-
week_offset = (week - 1) * 7 + (day - 1)
|
328 |
-
return week_1 + timedelta(days=week_offset)
|
329 |
-
|
330 |
-
def _parse_isotime(self, timestr):
|
331 |
-
len_str = len(timestr)
|
332 |
-
components = [0, 0, 0, 0, None]
|
333 |
-
pos = 0
|
334 |
-
comp = -1
|
335 |
-
|
336 |
-
if len_str < 2:
|
337 |
-
raise ValueError('ISO time too short')
|
338 |
-
|
339 |
-
has_sep = False
|
340 |
-
|
341 |
-
while pos < len_str and comp < 5:
|
342 |
-
comp += 1
|
343 |
-
|
344 |
-
if timestr[pos:pos + 1] in b'-+Zz':
|
345 |
-
# Detect time zone boundary
|
346 |
-
components[-1] = self._parse_tzstr(timestr[pos:])
|
347 |
-
pos = len_str
|
348 |
-
break
|
349 |
-
|
350 |
-
if comp == 1 and timestr[pos:pos+1] == self._TIME_SEP:
|
351 |
-
has_sep = True
|
352 |
-
pos += 1
|
353 |
-
elif comp == 2 and has_sep:
|
354 |
-
if timestr[pos:pos+1] != self._TIME_SEP:
|
355 |
-
raise ValueError('Inconsistent use of colon separator')
|
356 |
-
pos += 1
|
357 |
-
|
358 |
-
if comp < 3:
|
359 |
-
# Hour, minute, second
|
360 |
-
components[comp] = int(timestr[pos:pos + 2])
|
361 |
-
pos += 2
|
362 |
-
|
363 |
-
if comp == 3:
|
364 |
-
# Fraction of a second
|
365 |
-
frac = self._FRACTION_REGEX.match(timestr[pos:])
|
366 |
-
if not frac:
|
367 |
-
continue
|
368 |
-
|
369 |
-
us_str = frac.group(1)[:6] # Truncate to microseconds
|
370 |
-
components[comp] = int(us_str) * 10**(6 - len(us_str))
|
371 |
-
pos += len(frac.group())
|
372 |
-
|
373 |
-
if pos < len_str:
|
374 |
-
raise ValueError('Unused components in ISO string')
|
375 |
-
|
376 |
-
if components[0] == 24:
|
377 |
-
# Standard supports 00:00 and 24:00 as representations of midnight
|
378 |
-
if any(component != 0 for component in components[1:4]):
|
379 |
-
raise ValueError('Hour may only be 24 at 24:00:00.000')
|
380 |
-
|
381 |
-
return components
|
382 |
-
|
383 |
-
def _parse_tzstr(self, tzstr, zero_as_utc=True):
|
384 |
-
if tzstr == b'Z' or tzstr == b'z':
|
385 |
-
return tz.UTC
|
386 |
-
|
387 |
-
if len(tzstr) not in {3, 5, 6}:
|
388 |
-
raise ValueError('Time zone offset must be 1, 3, 5 or 6 characters')
|
389 |
-
|
390 |
-
if tzstr[0:1] == b'-':
|
391 |
-
mult = -1
|
392 |
-
elif tzstr[0:1] == b'+':
|
393 |
-
mult = 1
|
394 |
-
else:
|
395 |
-
raise ValueError('Time zone offset requires sign')
|
396 |
-
|
397 |
-
hours = int(tzstr[1:3])
|
398 |
-
if len(tzstr) == 3:
|
399 |
-
minutes = 0
|
400 |
-
else:
|
401 |
-
minutes = int(tzstr[(4 if tzstr[3:4] == self._TIME_SEP else 3):])
|
402 |
-
|
403 |
-
if zero_as_utc and hours == 0 and minutes == 0:
|
404 |
-
return tz.UTC
|
405 |
-
else:
|
406 |
-
if minutes > 59:
|
407 |
-
raise ValueError('Invalid minutes in time zone offset')
|
408 |
-
|
409 |
-
if hours > 23:
|
410 |
-
raise ValueError('Invalid hours in time zone offset')
|
411 |
-
|
412 |
-
return tz.tzoffset(None, mult * (hours * 60 + minutes) * 60)
|
413 |
-
|
414 |
-
|
415 |
-
DEFAULT_ISOPARSER = isoparser()
|
416 |
-
isoparse = DEFAULT_ISOPARSER.isoparse
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/H_V_A_R_.py
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
from .otBase import BaseTTXConverter
|
2 |
-
|
3 |
-
|
4 |
-
class table_H_V_A_R_(BaseTTXConverter):
|
5 |
-
pass
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/woff2.py
DELETED
@@ -1,1688 +0,0 @@
|
|
1 |
-
from io import BytesIO
|
2 |
-
import sys
|
3 |
-
import array
|
4 |
-
import struct
|
5 |
-
from collections import OrderedDict
|
6 |
-
from fontTools.misc import sstruct
|
7 |
-
from fontTools.misc.arrayTools import calcIntBounds
|
8 |
-
from fontTools.misc.textTools import Tag, bytechr, byteord, bytesjoin, pad
|
9 |
-
from fontTools.ttLib import (
|
10 |
-
TTFont,
|
11 |
-
TTLibError,
|
12 |
-
getTableModule,
|
13 |
-
getTableClass,
|
14 |
-
getSearchRange,
|
15 |
-
)
|
16 |
-
from fontTools.ttLib.sfnt import (
|
17 |
-
SFNTReader,
|
18 |
-
SFNTWriter,
|
19 |
-
DirectoryEntry,
|
20 |
-
WOFFFlavorData,
|
21 |
-
sfntDirectoryFormat,
|
22 |
-
sfntDirectorySize,
|
23 |
-
SFNTDirectoryEntry,
|
24 |
-
sfntDirectoryEntrySize,
|
25 |
-
calcChecksum,
|
26 |
-
)
|
27 |
-
from fontTools.ttLib.tables import ttProgram, _g_l_y_f
|
28 |
-
import logging
|
29 |
-
|
30 |
-
|
31 |
-
log = logging.getLogger("fontTools.ttLib.woff2")
|
32 |
-
|
33 |
-
haveBrotli = False
|
34 |
-
try:
|
35 |
-
try:
|
36 |
-
import brotlicffi as brotli
|
37 |
-
except ImportError:
|
38 |
-
import brotli
|
39 |
-
haveBrotli = True
|
40 |
-
except ImportError:
|
41 |
-
pass
|
42 |
-
|
43 |
-
|
44 |
-
class WOFF2Reader(SFNTReader):
|
45 |
-
|
46 |
-
flavor = "woff2"
|
47 |
-
|
48 |
-
def __init__(self, file, checkChecksums=0, fontNumber=-1):
|
49 |
-
if not haveBrotli:
|
50 |
-
log.error(
|
51 |
-
"The WOFF2 decoder requires the Brotli Python extension, available at: "
|
52 |
-
"https://github.com/google/brotli"
|
53 |
-
)
|
54 |
-
raise ImportError("No module named brotli")
|
55 |
-
|
56 |
-
self.file = file
|
57 |
-
|
58 |
-
signature = Tag(self.file.read(4))
|
59 |
-
if signature != b"wOF2":
|
60 |
-
raise TTLibError("Not a WOFF2 font (bad signature)")
|
61 |
-
|
62 |
-
self.file.seek(0)
|
63 |
-
self.DirectoryEntry = WOFF2DirectoryEntry
|
64 |
-
data = self.file.read(woff2DirectorySize)
|
65 |
-
if len(data) != woff2DirectorySize:
|
66 |
-
raise TTLibError("Not a WOFF2 font (not enough data)")
|
67 |
-
sstruct.unpack(woff2DirectoryFormat, data, self)
|
68 |
-
|
69 |
-
self.tables = OrderedDict()
|
70 |
-
offset = 0
|
71 |
-
for i in range(self.numTables):
|
72 |
-
entry = self.DirectoryEntry()
|
73 |
-
entry.fromFile(self.file)
|
74 |
-
tag = Tag(entry.tag)
|
75 |
-
self.tables[tag] = entry
|
76 |
-
entry.offset = offset
|
77 |
-
offset += entry.length
|
78 |
-
|
79 |
-
totalUncompressedSize = offset
|
80 |
-
compressedData = self.file.read(self.totalCompressedSize)
|
81 |
-
decompressedData = brotli.decompress(compressedData)
|
82 |
-
if len(decompressedData) != totalUncompressedSize:
|
83 |
-
raise TTLibError(
|
84 |
-
"unexpected size for decompressed font data: expected %d, found %d"
|
85 |
-
% (totalUncompressedSize, len(decompressedData))
|
86 |
-
)
|
87 |
-
self.transformBuffer = BytesIO(decompressedData)
|
88 |
-
|
89 |
-
self.file.seek(0, 2)
|
90 |
-
if self.length != self.file.tell():
|
91 |
-
raise TTLibError("reported 'length' doesn't match the actual file size")
|
92 |
-
|
93 |
-
self.flavorData = WOFF2FlavorData(self)
|
94 |
-
|
95 |
-
# make empty TTFont to store data while reconstructing tables
|
96 |
-
self.ttFont = TTFont(recalcBBoxes=False, recalcTimestamp=False)
|
97 |
-
|
98 |
-
def __getitem__(self, tag):
|
99 |
-
"""Fetch the raw table data. Reconstruct transformed tables."""
|
100 |
-
entry = self.tables[Tag(tag)]
|
101 |
-
if not hasattr(entry, "data"):
|
102 |
-
if entry.transformed:
|
103 |
-
entry.data = self.reconstructTable(tag)
|
104 |
-
else:
|
105 |
-
entry.data = entry.loadData(self.transformBuffer)
|
106 |
-
return entry.data
|
107 |
-
|
108 |
-
def reconstructTable(self, tag):
|
109 |
-
"""Reconstruct table named 'tag' from transformed data."""
|
110 |
-
entry = self.tables[Tag(tag)]
|
111 |
-
rawData = entry.loadData(self.transformBuffer)
|
112 |
-
if tag == "glyf":
|
113 |
-
# no need to pad glyph data when reconstructing
|
114 |
-
padding = self.padding if hasattr(self, "padding") else None
|
115 |
-
data = self._reconstructGlyf(rawData, padding)
|
116 |
-
elif tag == "loca":
|
117 |
-
data = self._reconstructLoca()
|
118 |
-
elif tag == "hmtx":
|
119 |
-
data = self._reconstructHmtx(rawData)
|
120 |
-
else:
|
121 |
-
raise TTLibError("transform for table '%s' is unknown" % tag)
|
122 |
-
return data
|
123 |
-
|
124 |
-
def _reconstructGlyf(self, data, padding=None):
|
125 |
-
"""Return recostructed glyf table data, and set the corresponding loca's
|
126 |
-
locations. Optionally pad glyph offsets to the specified number of bytes.
|
127 |
-
"""
|
128 |
-
self.ttFont["loca"] = WOFF2LocaTable()
|
129 |
-
glyfTable = self.ttFont["glyf"] = WOFF2GlyfTable()
|
130 |
-
glyfTable.reconstruct(data, self.ttFont)
|
131 |
-
if padding:
|
132 |
-
glyfTable.padding = padding
|
133 |
-
data = glyfTable.compile(self.ttFont)
|
134 |
-
return data
|
135 |
-
|
136 |
-
def _reconstructLoca(self):
|
137 |
-
"""Return reconstructed loca table data."""
|
138 |
-
if "loca" not in self.ttFont:
|
139 |
-
# make sure glyf is reconstructed first
|
140 |
-
self.tables["glyf"].data = self.reconstructTable("glyf")
|
141 |
-
locaTable = self.ttFont["loca"]
|
142 |
-
data = locaTable.compile(self.ttFont)
|
143 |
-
if len(data) != self.tables["loca"].origLength:
|
144 |
-
raise TTLibError(
|
145 |
-
"reconstructed 'loca' table doesn't match original size: "
|
146 |
-
"expected %d, found %d" % (self.tables["loca"].origLength, len(data))
|
147 |
-
)
|
148 |
-
return data
|
149 |
-
|
150 |
-
def _reconstructHmtx(self, data):
|
151 |
-
"""Return reconstructed hmtx table data."""
|
152 |
-
# Before reconstructing 'hmtx' table we need to parse other tables:
|
153 |
-
# 'glyf' is required for reconstructing the sidebearings from the glyphs'
|
154 |
-
# bounding box; 'hhea' is needed for the numberOfHMetrics field.
|
155 |
-
if "glyf" in self.flavorData.transformedTables:
|
156 |
-
# transformed 'glyf' table is self-contained, thus 'loca' not needed
|
157 |
-
tableDependencies = ("maxp", "hhea", "glyf")
|
158 |
-
else:
|
159 |
-
# decompiling untransformed 'glyf' requires 'loca', which requires 'head'
|
160 |
-
tableDependencies = ("maxp", "head", "hhea", "loca", "glyf")
|
161 |
-
for tag in tableDependencies:
|
162 |
-
self._decompileTable(tag)
|
163 |
-
hmtxTable = self.ttFont["hmtx"] = WOFF2HmtxTable()
|
164 |
-
hmtxTable.reconstruct(data, self.ttFont)
|
165 |
-
data = hmtxTable.compile(self.ttFont)
|
166 |
-
return data
|
167 |
-
|
168 |
-
def _decompileTable(self, tag):
|
169 |
-
"""Decompile table data and store it inside self.ttFont."""
|
170 |
-
data = self[tag]
|
171 |
-
if self.ttFont.isLoaded(tag):
|
172 |
-
return self.ttFont[tag]
|
173 |
-
tableClass = getTableClass(tag)
|
174 |
-
table = tableClass(tag)
|
175 |
-
self.ttFont.tables[tag] = table
|
176 |
-
table.decompile(data, self.ttFont)
|
177 |
-
|
178 |
-
|
179 |
-
class WOFF2Writer(SFNTWriter):
|
180 |
-
|
181 |
-
flavor = "woff2"
|
182 |
-
|
183 |
-
def __init__(
|
184 |
-
self,
|
185 |
-
file,
|
186 |
-
numTables,
|
187 |
-
sfntVersion="\000\001\000\000",
|
188 |
-
flavor=None,
|
189 |
-
flavorData=None,
|
190 |
-
):
|
191 |
-
if not haveBrotli:
|
192 |
-
log.error(
|
193 |
-
"The WOFF2 encoder requires the Brotli Python extension, available at: "
|
194 |
-
"https://github.com/google/brotli"
|
195 |
-
)
|
196 |
-
raise ImportError("No module named brotli")
|
197 |
-
|
198 |
-
self.file = file
|
199 |
-
self.numTables = numTables
|
200 |
-
self.sfntVersion = Tag(sfntVersion)
|
201 |
-
self.flavorData = WOFF2FlavorData(data=flavorData)
|
202 |
-
|
203 |
-
self.directoryFormat = woff2DirectoryFormat
|
204 |
-
self.directorySize = woff2DirectorySize
|
205 |
-
self.DirectoryEntry = WOFF2DirectoryEntry
|
206 |
-
|
207 |
-
self.signature = Tag("wOF2")
|
208 |
-
|
209 |
-
self.nextTableOffset = 0
|
210 |
-
self.transformBuffer = BytesIO()
|
211 |
-
|
212 |
-
self.tables = OrderedDict()
|
213 |
-
|
214 |
-
# make empty TTFont to store data while normalising and transforming tables
|
215 |
-
self.ttFont = TTFont(recalcBBoxes=False, recalcTimestamp=False)
|
216 |
-
|
217 |
-
def __setitem__(self, tag, data):
|
218 |
-
"""Associate new entry named 'tag' with raw table data."""
|
219 |
-
if tag in self.tables:
|
220 |
-
raise TTLibError("cannot rewrite '%s' table" % tag)
|
221 |
-
if tag == "DSIG":
|
222 |
-
# always drop DSIG table, since the encoding process can invalidate it
|
223 |
-
self.numTables -= 1
|
224 |
-
return
|
225 |
-
|
226 |
-
entry = self.DirectoryEntry()
|
227 |
-
entry.tag = Tag(tag)
|
228 |
-
entry.flags = getKnownTagIndex(entry.tag)
|
229 |
-
# WOFF2 table data are written to disk only on close(), after all tags
|
230 |
-
# have been specified
|
231 |
-
entry.data = data
|
232 |
-
|
233 |
-
self.tables[tag] = entry
|
234 |
-
|
235 |
-
def close(self):
|
236 |
-
"""All tags must have been specified. Now write the table data and directory."""
|
237 |
-
if len(self.tables) != self.numTables:
|
238 |
-
raise TTLibError(
|
239 |
-
"wrong number of tables; expected %d, found %d"
|
240 |
-
% (self.numTables, len(self.tables))
|
241 |
-
)
|
242 |
-
|
243 |
-
if self.sfntVersion in ("\x00\x01\x00\x00", "true"):
|
244 |
-
isTrueType = True
|
245 |
-
elif self.sfntVersion == "OTTO":
|
246 |
-
isTrueType = False
|
247 |
-
else:
|
248 |
-
raise TTLibError("Not a TrueType or OpenType font (bad sfntVersion)")
|
249 |
-
|
250 |
-
# The WOFF2 spec no longer requires the glyph offsets to be 4-byte aligned.
|
251 |
-
# However, the reference WOFF2 implementation still fails to reconstruct
|
252 |
-
# 'unpadded' glyf tables, therefore we need to 'normalise' them.
|
253 |
-
# See:
|
254 |
-
# https://github.com/khaledhosny/ots/issues/60
|
255 |
-
# https://github.com/google/woff2/issues/15
|
256 |
-
if (
|
257 |
-
isTrueType
|
258 |
-
and "glyf" in self.flavorData.transformedTables
|
259 |
-
and "glyf" in self.tables
|
260 |
-
):
|
261 |
-
self._normaliseGlyfAndLoca(padding=4)
|
262 |
-
self._setHeadTransformFlag()
|
263 |
-
|
264 |
-
# To pass the legacy OpenType Sanitiser currently included in browsers,
|
265 |
-
# we must sort the table directory and data alphabetically by tag.
|
266 |
-
# See:
|
267 |
-
# https://github.com/google/woff2/pull/3
|
268 |
-
# https://lists.w3.org/Archives/Public/public-webfonts-wg/2015Mar/0000.html
|
269 |
-
#
|
270 |
-
# 2023: We rely on this in _transformTables where we expect that
|
271 |
-
# "loca" comes after "glyf" table.
|
272 |
-
self.tables = OrderedDict(sorted(self.tables.items()))
|
273 |
-
|
274 |
-
self.totalSfntSize = self._calcSFNTChecksumsLengthsAndOffsets()
|
275 |
-
|
276 |
-
fontData = self._transformTables()
|
277 |
-
compressedFont = brotli.compress(fontData, mode=brotli.MODE_FONT)
|
278 |
-
|
279 |
-
self.totalCompressedSize = len(compressedFont)
|
280 |
-
self.length = self._calcTotalSize()
|
281 |
-
self.majorVersion, self.minorVersion = self._getVersion()
|
282 |
-
self.reserved = 0
|
283 |
-
|
284 |
-
directory = self._packTableDirectory()
|
285 |
-
self.file.seek(0)
|
286 |
-
self.file.write(pad(directory + compressedFont, size=4))
|
287 |
-
self._writeFlavorData()
|
288 |
-
|
289 |
-
def _normaliseGlyfAndLoca(self, padding=4):
|
290 |
-
"""Recompile glyf and loca tables, aligning glyph offsets to multiples of
|
291 |
-
'padding' size. Update the head table's 'indexToLocFormat' accordingly while
|
292 |
-
compiling loca.
|
293 |
-
"""
|
294 |
-
if self.sfntVersion == "OTTO":
|
295 |
-
return
|
296 |
-
|
297 |
-
for tag in ("maxp", "head", "loca", "glyf", "fvar"):
|
298 |
-
if tag in self.tables:
|
299 |
-
self._decompileTable(tag)
|
300 |
-
self.ttFont["glyf"].padding = padding
|
301 |
-
for tag in ("glyf", "loca"):
|
302 |
-
self._compileTable(tag)
|
303 |
-
|
304 |
-
def _setHeadTransformFlag(self):
|
305 |
-
"""Set bit 11 of 'head' table flags to indicate that the font has undergone
|
306 |
-
a lossless modifying transform. Re-compile head table data."""
|
307 |
-
self._decompileTable("head")
|
308 |
-
self.ttFont["head"].flags |= 1 << 11
|
309 |
-
self._compileTable("head")
|
310 |
-
|
311 |
-
def _decompileTable(self, tag):
|
312 |
-
"""Fetch table data, decompile it, and store it inside self.ttFont."""
|
313 |
-
tag = Tag(tag)
|
314 |
-
if tag not in self.tables:
|
315 |
-
raise TTLibError("missing required table: %s" % tag)
|
316 |
-
if self.ttFont.isLoaded(tag):
|
317 |
-
return
|
318 |
-
data = self.tables[tag].data
|
319 |
-
if tag == "loca":
|
320 |
-
tableClass = WOFF2LocaTable
|
321 |
-
elif tag == "glyf":
|
322 |
-
tableClass = WOFF2GlyfTable
|
323 |
-
elif tag == "hmtx":
|
324 |
-
tableClass = WOFF2HmtxTable
|
325 |
-
else:
|
326 |
-
tableClass = getTableClass(tag)
|
327 |
-
table = tableClass(tag)
|
328 |
-
self.ttFont.tables[tag] = table
|
329 |
-
table.decompile(data, self.ttFont)
|
330 |
-
|
331 |
-
def _compileTable(self, tag):
|
332 |
-
"""Compile table and store it in its 'data' attribute."""
|
333 |
-
self.tables[tag].data = self.ttFont[tag].compile(self.ttFont)
|
334 |
-
|
335 |
-
def _calcSFNTChecksumsLengthsAndOffsets(self):
|
336 |
-
"""Compute the 'original' SFNT checksums, lengths and offsets for checksum
|
337 |
-
adjustment calculation. Return the total size of the uncompressed font.
|
338 |
-
"""
|
339 |
-
offset = sfntDirectorySize + sfntDirectoryEntrySize * len(self.tables)
|
340 |
-
for tag, entry in self.tables.items():
|
341 |
-
data = entry.data
|
342 |
-
entry.origOffset = offset
|
343 |
-
entry.origLength = len(data)
|
344 |
-
if tag == "head":
|
345 |
-
entry.checkSum = calcChecksum(data[:8] + b"\0\0\0\0" + data[12:])
|
346 |
-
else:
|
347 |
-
entry.checkSum = calcChecksum(data)
|
348 |
-
offset += (entry.origLength + 3) & ~3
|
349 |
-
return offset
|
350 |
-
|
351 |
-
def _transformTables(self):
|
352 |
-
"""Return transformed font data."""
|
353 |
-
transformedTables = self.flavorData.transformedTables
|
354 |
-
for tag, entry in self.tables.items():
|
355 |
-
data = None
|
356 |
-
if tag in transformedTables:
|
357 |
-
data = self.transformTable(tag)
|
358 |
-
if data is not None:
|
359 |
-
entry.transformed = True
|
360 |
-
if data is None:
|
361 |
-
if tag == "glyf":
|
362 |
-
# Currently we always sort table tags so
|
363 |
-
# 'loca' comes after 'glyf'.
|
364 |
-
transformedTables.discard("loca")
|
365 |
-
# pass-through the table data without transformation
|
366 |
-
data = entry.data
|
367 |
-
entry.transformed = False
|
368 |
-
entry.offset = self.nextTableOffset
|
369 |
-
entry.saveData(self.transformBuffer, data)
|
370 |
-
self.nextTableOffset += entry.length
|
371 |
-
self.writeMasterChecksum()
|
372 |
-
fontData = self.transformBuffer.getvalue()
|
373 |
-
return fontData
|
374 |
-
|
375 |
-
def transformTable(self, tag):
|
376 |
-
"""Return transformed table data, or None if some pre-conditions aren't
|
377 |
-
met -- in which case, the non-transformed table data will be used.
|
378 |
-
"""
|
379 |
-
if tag == "loca":
|
380 |
-
data = b""
|
381 |
-
elif tag == "glyf":
|
382 |
-
for tag in ("maxp", "head", "loca", "glyf"):
|
383 |
-
self._decompileTable(tag)
|
384 |
-
glyfTable = self.ttFont["glyf"]
|
385 |
-
data = glyfTable.transform(self.ttFont)
|
386 |
-
elif tag == "hmtx":
|
387 |
-
if "glyf" not in self.tables:
|
388 |
-
return
|
389 |
-
for tag in ("maxp", "head", "hhea", "loca", "glyf", "hmtx"):
|
390 |
-
self._decompileTable(tag)
|
391 |
-
hmtxTable = self.ttFont["hmtx"]
|
392 |
-
data = hmtxTable.transform(self.ttFont) # can be None
|
393 |
-
else:
|
394 |
-
raise TTLibError("Transform for table '%s' is unknown" % tag)
|
395 |
-
return data
|
396 |
-
|
397 |
-
def _calcMasterChecksum(self):
|
398 |
-
"""Calculate checkSumAdjustment."""
|
399 |
-
tags = list(self.tables.keys())
|
400 |
-
checksums = []
|
401 |
-
for i in range(len(tags)):
|
402 |
-
checksums.append(self.tables[tags[i]].checkSum)
|
403 |
-
|
404 |
-
# Create a SFNT directory for checksum calculation purposes
|
405 |
-
self.searchRange, self.entrySelector, self.rangeShift = getSearchRange(
|
406 |
-
self.numTables, 16
|
407 |
-
)
|
408 |
-
directory = sstruct.pack(sfntDirectoryFormat, self)
|
409 |
-
tables = sorted(self.tables.items())
|
410 |
-
for tag, entry in tables:
|
411 |
-
sfntEntry = SFNTDirectoryEntry()
|
412 |
-
sfntEntry.tag = entry.tag
|
413 |
-
sfntEntry.checkSum = entry.checkSum
|
414 |
-
sfntEntry.offset = entry.origOffset
|
415 |
-
sfntEntry.length = entry.origLength
|
416 |
-
directory = directory + sfntEntry.toString()
|
417 |
-
|
418 |
-
directory_end = sfntDirectorySize + len(self.tables) * sfntDirectoryEntrySize
|
419 |
-
assert directory_end == len(directory)
|
420 |
-
|
421 |
-
checksums.append(calcChecksum(directory))
|
422 |
-
checksum = sum(checksums) & 0xFFFFFFFF
|
423 |
-
# BiboAfba!
|
424 |
-
checksumadjustment = (0xB1B0AFBA - checksum) & 0xFFFFFFFF
|
425 |
-
return checksumadjustment
|
426 |
-
|
427 |
-
def writeMasterChecksum(self):
|
428 |
-
"""Write checkSumAdjustment to the transformBuffer."""
|
429 |
-
checksumadjustment = self._calcMasterChecksum()
|
430 |
-
self.transformBuffer.seek(self.tables["head"].offset + 8)
|
431 |
-
self.transformBuffer.write(struct.pack(">L", checksumadjustment))
|
432 |
-
|
433 |
-
def _calcTotalSize(self):
|
434 |
-
"""Calculate total size of WOFF2 font, including any meta- and/or private data."""
|
435 |
-
offset = self.directorySize
|
436 |
-
for entry in self.tables.values():
|
437 |
-
offset += len(entry.toString())
|
438 |
-
offset += self.totalCompressedSize
|
439 |
-
offset = (offset + 3) & ~3
|
440 |
-
offset = self._calcFlavorDataOffsetsAndSize(offset)
|
441 |
-
return offset
|
442 |
-
|
443 |
-
def _calcFlavorDataOffsetsAndSize(self, start):
|
444 |
-
"""Calculate offsets and lengths for any meta- and/or private data."""
|
445 |
-
offset = start
|
446 |
-
data = self.flavorData
|
447 |
-
if data.metaData:
|
448 |
-
self.metaOrigLength = len(data.metaData)
|
449 |
-
self.metaOffset = offset
|
450 |
-
self.compressedMetaData = brotli.compress(
|
451 |
-
data.metaData, mode=brotli.MODE_TEXT
|
452 |
-
)
|
453 |
-
self.metaLength = len(self.compressedMetaData)
|
454 |
-
offset += self.metaLength
|
455 |
-
else:
|
456 |
-
self.metaOffset = self.metaLength = self.metaOrigLength = 0
|
457 |
-
self.compressedMetaData = b""
|
458 |
-
if data.privData:
|
459 |
-
# make sure private data is padded to 4-byte boundary
|
460 |
-
offset = (offset + 3) & ~3
|
461 |
-
self.privOffset = offset
|
462 |
-
self.privLength = len(data.privData)
|
463 |
-
offset += self.privLength
|
464 |
-
else:
|
465 |
-
self.privOffset = self.privLength = 0
|
466 |
-
return offset
|
467 |
-
|
468 |
-
def _getVersion(self):
|
469 |
-
"""Return the WOFF2 font's (majorVersion, minorVersion) tuple."""
|
470 |
-
data = self.flavorData
|
471 |
-
if data.majorVersion is not None and data.minorVersion is not None:
|
472 |
-
return data.majorVersion, data.minorVersion
|
473 |
-
else:
|
474 |
-
# if None, return 'fontRevision' from 'head' table
|
475 |
-
if "head" in self.tables:
|
476 |
-
return struct.unpack(">HH", self.tables["head"].data[4:8])
|
477 |
-
else:
|
478 |
-
return 0, 0
|
479 |
-
|
480 |
-
def _packTableDirectory(self):
|
481 |
-
"""Return WOFF2 table directory data."""
|
482 |
-
directory = sstruct.pack(self.directoryFormat, self)
|
483 |
-
for entry in self.tables.values():
|
484 |
-
directory = directory + entry.toString()
|
485 |
-
return directory
|
486 |
-
|
487 |
-
def _writeFlavorData(self):
|
488 |
-
"""Write metadata and/or private data using appropiate padding."""
|
489 |
-
compressedMetaData = self.compressedMetaData
|
490 |
-
privData = self.flavorData.privData
|
491 |
-
if compressedMetaData and privData:
|
492 |
-
compressedMetaData = pad(compressedMetaData, size=4)
|
493 |
-
if compressedMetaData:
|
494 |
-
self.file.seek(self.metaOffset)
|
495 |
-
assert self.file.tell() == self.metaOffset
|
496 |
-
self.file.write(compressedMetaData)
|
497 |
-
if privData:
|
498 |
-
self.file.seek(self.privOffset)
|
499 |
-
assert self.file.tell() == self.privOffset
|
500 |
-
self.file.write(privData)
|
501 |
-
|
502 |
-
def reordersTables(self):
|
503 |
-
return True
|
504 |
-
|
505 |
-
|
506 |
-
# -- woff2 directory helpers and cruft
|
507 |
-
|
508 |
-
woff2DirectoryFormat = """
|
509 |
-
> # big endian
|
510 |
-
signature: 4s # "wOF2"
|
511 |
-
sfntVersion: 4s
|
512 |
-
length: L # total woff2 file size
|
513 |
-
numTables: H # number of tables
|
514 |
-
reserved: H # set to 0
|
515 |
-
totalSfntSize: L # uncompressed size
|
516 |
-
totalCompressedSize: L # compressed size
|
517 |
-
majorVersion: H # major version of WOFF file
|
518 |
-
minorVersion: H # minor version of WOFF file
|
519 |
-
metaOffset: L # offset to metadata block
|
520 |
-
metaLength: L # length of compressed metadata
|
521 |
-
metaOrigLength: L # length of uncompressed metadata
|
522 |
-
privOffset: L # offset to private data block
|
523 |
-
privLength: L # length of private data block
|
524 |
-
"""
|
525 |
-
|
526 |
-
woff2DirectorySize = sstruct.calcsize(woff2DirectoryFormat)
|
527 |
-
|
528 |
-
woff2KnownTags = (
|
529 |
-
"cmap",
|
530 |
-
"head",
|
531 |
-
"hhea",
|
532 |
-
"hmtx",
|
533 |
-
"maxp",
|
534 |
-
"name",
|
535 |
-
"OS/2",
|
536 |
-
"post",
|
537 |
-
"cvt ",
|
538 |
-
"fpgm",
|
539 |
-
"glyf",
|
540 |
-
"loca",
|
541 |
-
"prep",
|
542 |
-
"CFF ",
|
543 |
-
"VORG",
|
544 |
-
"EBDT",
|
545 |
-
"EBLC",
|
546 |
-
"gasp",
|
547 |
-
"hdmx",
|
548 |
-
"kern",
|
549 |
-
"LTSH",
|
550 |
-
"PCLT",
|
551 |
-
"VDMX",
|
552 |
-
"vhea",
|
553 |
-
"vmtx",
|
554 |
-
"BASE",
|
555 |
-
"GDEF",
|
556 |
-
"GPOS",
|
557 |
-
"GSUB",
|
558 |
-
"EBSC",
|
559 |
-
"JSTF",
|
560 |
-
"MATH",
|
561 |
-
"CBDT",
|
562 |
-
"CBLC",
|
563 |
-
"COLR",
|
564 |
-
"CPAL",
|
565 |
-
"SVG ",
|
566 |
-
"sbix",
|
567 |
-
"acnt",
|
568 |
-
"avar",
|
569 |
-
"bdat",
|
570 |
-
"bloc",
|
571 |
-
"bsln",
|
572 |
-
"cvar",
|
573 |
-
"fdsc",
|
574 |
-
"feat",
|
575 |
-
"fmtx",
|
576 |
-
"fvar",
|
577 |
-
"gvar",
|
578 |
-
"hsty",
|
579 |
-
"just",
|
580 |
-
"lcar",
|
581 |
-
"mort",
|
582 |
-
"morx",
|
583 |
-
"opbd",
|
584 |
-
"prop",
|
585 |
-
"trak",
|
586 |
-
"Zapf",
|
587 |
-
"Silf",
|
588 |
-
"Glat",
|
589 |
-
"Gloc",
|
590 |
-
"Feat",
|
591 |
-
"Sill",
|
592 |
-
)
|
593 |
-
|
594 |
-
woff2FlagsFormat = """
|
595 |
-
> # big endian
|
596 |
-
flags: B # table type and flags
|
597 |
-
"""
|
598 |
-
|
599 |
-
woff2FlagsSize = sstruct.calcsize(woff2FlagsFormat)
|
600 |
-
|
601 |
-
woff2UnknownTagFormat = """
|
602 |
-
> # big endian
|
603 |
-
tag: 4s # 4-byte tag (optional)
|
604 |
-
"""
|
605 |
-
|
606 |
-
woff2UnknownTagSize = sstruct.calcsize(woff2UnknownTagFormat)
|
607 |
-
|
608 |
-
woff2UnknownTagIndex = 0x3F
|
609 |
-
|
610 |
-
woff2Base128MaxSize = 5
|
611 |
-
woff2DirectoryEntryMaxSize = (
|
612 |
-
woff2FlagsSize + woff2UnknownTagSize + 2 * woff2Base128MaxSize
|
613 |
-
)
|
614 |
-
|
615 |
-
woff2TransformedTableTags = ("glyf", "loca")
|
616 |
-
|
617 |
-
woff2GlyfTableFormat = """
|
618 |
-
> # big endian
|
619 |
-
version: H # = 0x0000
|
620 |
-
optionFlags: H # Bit 0: we have overlapSimpleBitmap[], Bits 1-15: reserved
|
621 |
-
numGlyphs: H # Number of glyphs
|
622 |
-
indexFormat: H # Offset format for loca table
|
623 |
-
nContourStreamSize: L # Size of nContour stream
|
624 |
-
nPointsStreamSize: L # Size of nPoints stream
|
625 |
-
flagStreamSize: L # Size of flag stream
|
626 |
-
glyphStreamSize: L # Size of glyph stream
|
627 |
-
compositeStreamSize: L # Size of composite stream
|
628 |
-
bboxStreamSize: L # Comnined size of bboxBitmap and bboxStream
|
629 |
-
instructionStreamSize: L # Size of instruction stream
|
630 |
-
"""
|
631 |
-
|
632 |
-
woff2GlyfTableFormatSize = sstruct.calcsize(woff2GlyfTableFormat)
|
633 |
-
|
634 |
-
bboxFormat = """
|
635 |
-
> # big endian
|
636 |
-
xMin: h
|
637 |
-
yMin: h
|
638 |
-
xMax: h
|
639 |
-
yMax: h
|
640 |
-
"""
|
641 |
-
|
642 |
-
woff2OverlapSimpleBitmapFlag = 0x0001
|
643 |
-
|
644 |
-
|
645 |
-
def getKnownTagIndex(tag):
|
646 |
-
"""Return index of 'tag' in woff2KnownTags list. Return 63 if not found."""
|
647 |
-
for i in range(len(woff2KnownTags)):
|
648 |
-
if tag == woff2KnownTags[i]:
|
649 |
-
return i
|
650 |
-
return woff2UnknownTagIndex
|
651 |
-
|
652 |
-
|
653 |
-
class WOFF2DirectoryEntry(DirectoryEntry):
|
654 |
-
def fromFile(self, file):
|
655 |
-
pos = file.tell()
|
656 |
-
data = file.read(woff2DirectoryEntryMaxSize)
|
657 |
-
left = self.fromString(data)
|
658 |
-
consumed = len(data) - len(left)
|
659 |
-
file.seek(pos + consumed)
|
660 |
-
|
661 |
-
def fromString(self, data):
|
662 |
-
if len(data) < 1:
|
663 |
-
raise TTLibError("can't read table 'flags': not enough data")
|
664 |
-
dummy, data = sstruct.unpack2(woff2FlagsFormat, data, self)
|
665 |
-
if self.flags & 0x3F == 0x3F:
|
666 |
-
# if bits [0..5] of the flags byte == 63, read a 4-byte arbitrary tag value
|
667 |
-
if len(data) < woff2UnknownTagSize:
|
668 |
-
raise TTLibError("can't read table 'tag': not enough data")
|
669 |
-
dummy, data = sstruct.unpack2(woff2UnknownTagFormat, data, self)
|
670 |
-
else:
|
671 |
-
# otherwise, tag is derived from a fixed 'Known Tags' table
|
672 |
-
self.tag = woff2KnownTags[self.flags & 0x3F]
|
673 |
-
self.tag = Tag(self.tag)
|
674 |
-
self.origLength, data = unpackBase128(data)
|
675 |
-
self.length = self.origLength
|
676 |
-
if self.transformed:
|
677 |
-
self.length, data = unpackBase128(data)
|
678 |
-
if self.tag == "loca" and self.length != 0:
|
679 |
-
raise TTLibError("the transformLength of the 'loca' table must be 0")
|
680 |
-
# return left over data
|
681 |
-
return data
|
682 |
-
|
683 |
-
def toString(self):
|
684 |
-
data = bytechr(self.flags)
|
685 |
-
if (self.flags & 0x3F) == 0x3F:
|
686 |
-
data += struct.pack(">4s", self.tag.tobytes())
|
687 |
-
data += packBase128(self.origLength)
|
688 |
-
if self.transformed:
|
689 |
-
data += packBase128(self.length)
|
690 |
-
return data
|
691 |
-
|
692 |
-
@property
|
693 |
-
def transformVersion(self):
|
694 |
-
"""Return bits 6-7 of table entry's flags, which indicate the preprocessing
|
695 |
-
transformation version number (between 0 and 3).
|
696 |
-
"""
|
697 |
-
return self.flags >> 6
|
698 |
-
|
699 |
-
@transformVersion.setter
|
700 |
-
def transformVersion(self, value):
|
701 |
-
assert 0 <= value <= 3
|
702 |
-
self.flags |= value << 6
|
703 |
-
|
704 |
-
@property
|
705 |
-
def transformed(self):
|
706 |
-
"""Return True if the table has any transformation, else return False."""
|
707 |
-
# For all tables in a font, except for 'glyf' and 'loca', the transformation
|
708 |
-
# version 0 indicates the null transform (where the original table data is
|
709 |
-
# passed directly to the Brotli compressor). For 'glyf' and 'loca' tables,
|
710 |
-
# transformation version 3 indicates the null transform
|
711 |
-
if self.tag in {"glyf", "loca"}:
|
712 |
-
return self.transformVersion != 3
|
713 |
-
else:
|
714 |
-
return self.transformVersion != 0
|
715 |
-
|
716 |
-
@transformed.setter
|
717 |
-
def transformed(self, booleanValue):
|
718 |
-
# here we assume that a non-null transform means version 0 for 'glyf' and
|
719 |
-
# 'loca' and 1 for every other table (e.g. hmtx); but that may change as
|
720 |
-
# new transformation formats are introduced in the future (if ever).
|
721 |
-
if self.tag in {"glyf", "loca"}:
|
722 |
-
self.transformVersion = 3 if not booleanValue else 0
|
723 |
-
else:
|
724 |
-
self.transformVersion = int(booleanValue)
|
725 |
-
|
726 |
-
|
727 |
-
class WOFF2LocaTable(getTableClass("loca")):
|
728 |
-
"""Same as parent class. The only difference is that it attempts to preserve
|
729 |
-
the 'indexFormat' as encoded in the WOFF2 glyf table.
|
730 |
-
"""
|
731 |
-
|
732 |
-
def __init__(self, tag=None):
|
733 |
-
self.tableTag = Tag(tag or "loca")
|
734 |
-
|
735 |
-
def compile(self, ttFont):
|
736 |
-
try:
|
737 |
-
max_location = max(self.locations)
|
738 |
-
except AttributeError:
|
739 |
-
self.set([])
|
740 |
-
max_location = 0
|
741 |
-
if "glyf" in ttFont and hasattr(ttFont["glyf"], "indexFormat"):
|
742 |
-
# copile loca using the indexFormat specified in the WOFF2 glyf table
|
743 |
-
indexFormat = ttFont["glyf"].indexFormat
|
744 |
-
if indexFormat == 0:
|
745 |
-
if max_location >= 0x20000:
|
746 |
-
raise TTLibError("indexFormat is 0 but local offsets > 0x20000")
|
747 |
-
if not all(l % 2 == 0 for l in self.locations):
|
748 |
-
raise TTLibError(
|
749 |
-
"indexFormat is 0 but local offsets not multiples of 2"
|
750 |
-
)
|
751 |
-
locations = array.array("H")
|
752 |
-
for i in range(len(self.locations)):
|
753 |
-
locations.append(self.locations[i] // 2)
|
754 |
-
else:
|
755 |
-
locations = array.array("I", self.locations)
|
756 |
-
if sys.byteorder != "big":
|
757 |
-
locations.byteswap()
|
758 |
-
data = locations.tobytes()
|
759 |
-
else:
|
760 |
-
# use the most compact indexFormat given the current glyph offsets
|
761 |
-
data = super(WOFF2LocaTable, self).compile(ttFont)
|
762 |
-
return data
|
763 |
-
|
764 |
-
|
765 |
-
class WOFF2GlyfTable(getTableClass("glyf")):
|
766 |
-
"""Decoder/Encoder for WOFF2 'glyf' table transform."""
|
767 |
-
|
768 |
-
subStreams = (
|
769 |
-
"nContourStream",
|
770 |
-
"nPointsStream",
|
771 |
-
"flagStream",
|
772 |
-
"glyphStream",
|
773 |
-
"compositeStream",
|
774 |
-
"bboxStream",
|
775 |
-
"instructionStream",
|
776 |
-
)
|
777 |
-
|
778 |
-
def __init__(self, tag=None):
|
779 |
-
self.tableTag = Tag(tag or "glyf")
|
780 |
-
|
781 |
-
def reconstruct(self, data, ttFont):
|
782 |
-
"""Decompile transformed 'glyf' data."""
|
783 |
-
inputDataSize = len(data)
|
784 |
-
|
785 |
-
if inputDataSize < woff2GlyfTableFormatSize:
|
786 |
-
raise TTLibError("not enough 'glyf' data")
|
787 |
-
dummy, data = sstruct.unpack2(woff2GlyfTableFormat, data, self)
|
788 |
-
offset = woff2GlyfTableFormatSize
|
789 |
-
|
790 |
-
for stream in self.subStreams:
|
791 |
-
size = getattr(self, stream + "Size")
|
792 |
-
setattr(self, stream, data[:size])
|
793 |
-
data = data[size:]
|
794 |
-
offset += size
|
795 |
-
|
796 |
-
hasOverlapSimpleBitmap = self.optionFlags & woff2OverlapSimpleBitmapFlag
|
797 |
-
self.overlapSimpleBitmap = None
|
798 |
-
if hasOverlapSimpleBitmap:
|
799 |
-
overlapSimpleBitmapSize = (self.numGlyphs + 7) >> 3
|
800 |
-
self.overlapSimpleBitmap = array.array("B", data[:overlapSimpleBitmapSize])
|
801 |
-
offset += overlapSimpleBitmapSize
|
802 |
-
|
803 |
-
if offset != inputDataSize:
|
804 |
-
raise TTLibError(
|
805 |
-
"incorrect size of transformed 'glyf' table: expected %d, received %d bytes"
|
806 |
-
% (offset, inputDataSize)
|
807 |
-
)
|
808 |
-
|
809 |
-
bboxBitmapSize = ((self.numGlyphs + 31) >> 5) << 2
|
810 |
-
bboxBitmap = self.bboxStream[:bboxBitmapSize]
|
811 |
-
self.bboxBitmap = array.array("B", bboxBitmap)
|
812 |
-
self.bboxStream = self.bboxStream[bboxBitmapSize:]
|
813 |
-
|
814 |
-
self.nContourStream = array.array("h", self.nContourStream)
|
815 |
-
if sys.byteorder != "big":
|
816 |
-
self.nContourStream.byteswap()
|
817 |
-
assert len(self.nContourStream) == self.numGlyphs
|
818 |
-
|
819 |
-
if "head" in ttFont:
|
820 |
-
ttFont["head"].indexToLocFormat = self.indexFormat
|
821 |
-
try:
|
822 |
-
self.glyphOrder = ttFont.getGlyphOrder()
|
823 |
-
except:
|
824 |
-
self.glyphOrder = None
|
825 |
-
if self.glyphOrder is None:
|
826 |
-
self.glyphOrder = [".notdef"]
|
827 |
-
self.glyphOrder.extend(["glyph%.5d" % i for i in range(1, self.numGlyphs)])
|
828 |
-
else:
|
829 |
-
if len(self.glyphOrder) != self.numGlyphs:
|
830 |
-
raise TTLibError(
|
831 |
-
"incorrect glyphOrder: expected %d glyphs, found %d"
|
832 |
-
% (len(self.glyphOrder), self.numGlyphs)
|
833 |
-
)
|
834 |
-
|
835 |
-
glyphs = self.glyphs = {}
|
836 |
-
for glyphID, glyphName in enumerate(self.glyphOrder):
|
837 |
-
glyph = self._decodeGlyph(glyphID)
|
838 |
-
glyphs[glyphName] = glyph
|
839 |
-
|
840 |
-
def transform(self, ttFont):
|
841 |
-
"""Return transformed 'glyf' data"""
|
842 |
-
self.numGlyphs = len(self.glyphs)
|
843 |
-
assert len(self.glyphOrder) == self.numGlyphs
|
844 |
-
if "maxp" in ttFont:
|
845 |
-
ttFont["maxp"].numGlyphs = self.numGlyphs
|
846 |
-
self.indexFormat = ttFont["head"].indexToLocFormat
|
847 |
-
|
848 |
-
for stream in self.subStreams:
|
849 |
-
setattr(self, stream, b"")
|
850 |
-
bboxBitmapSize = ((self.numGlyphs + 31) >> 5) << 2
|
851 |
-
self.bboxBitmap = array.array("B", [0] * bboxBitmapSize)
|
852 |
-
|
853 |
-
self.overlapSimpleBitmap = array.array("B", [0] * ((self.numGlyphs + 7) >> 3))
|
854 |
-
for glyphID in range(self.numGlyphs):
|
855 |
-
try:
|
856 |
-
self._encodeGlyph(glyphID)
|
857 |
-
except NotImplementedError:
|
858 |
-
return None
|
859 |
-
hasOverlapSimpleBitmap = any(self.overlapSimpleBitmap)
|
860 |
-
|
861 |
-
self.bboxStream = self.bboxBitmap.tobytes() + self.bboxStream
|
862 |
-
for stream in self.subStreams:
|
863 |
-
setattr(self, stream + "Size", len(getattr(self, stream)))
|
864 |
-
self.version = 0
|
865 |
-
self.optionFlags = 0
|
866 |
-
if hasOverlapSimpleBitmap:
|
867 |
-
self.optionFlags |= woff2OverlapSimpleBitmapFlag
|
868 |
-
data = sstruct.pack(woff2GlyfTableFormat, self)
|
869 |
-
data += bytesjoin([getattr(self, s) for s in self.subStreams])
|
870 |
-
if hasOverlapSimpleBitmap:
|
871 |
-
data += self.overlapSimpleBitmap.tobytes()
|
872 |
-
return data
|
873 |
-
|
874 |
-
def _decodeGlyph(self, glyphID):
|
875 |
-
glyph = getTableModule("glyf").Glyph()
|
876 |
-
glyph.numberOfContours = self.nContourStream[glyphID]
|
877 |
-
if glyph.numberOfContours == 0:
|
878 |
-
return glyph
|
879 |
-
elif glyph.isComposite():
|
880 |
-
self._decodeComponents(glyph)
|
881 |
-
else:
|
882 |
-
self._decodeCoordinates(glyph)
|
883 |
-
self._decodeOverlapSimpleFlag(glyph, glyphID)
|
884 |
-
self._decodeBBox(glyphID, glyph)
|
885 |
-
return glyph
|
886 |
-
|
887 |
-
def _decodeComponents(self, glyph):
|
888 |
-
data = self.compositeStream
|
889 |
-
glyph.components = []
|
890 |
-
more = 1
|
891 |
-
haveInstructions = 0
|
892 |
-
while more:
|
893 |
-
component = getTableModule("glyf").GlyphComponent()
|
894 |
-
more, haveInstr, data = component.decompile(data, self)
|
895 |
-
haveInstructions = haveInstructions | haveInstr
|
896 |
-
glyph.components.append(component)
|
897 |
-
self.compositeStream = data
|
898 |
-
if haveInstructions:
|
899 |
-
self._decodeInstructions(glyph)
|
900 |
-
|
901 |
-
def _decodeCoordinates(self, glyph):
|
902 |
-
data = self.nPointsStream
|
903 |
-
endPtsOfContours = []
|
904 |
-
endPoint = -1
|
905 |
-
for i in range(glyph.numberOfContours):
|
906 |
-
ptsOfContour, data = unpack255UShort(data)
|
907 |
-
endPoint += ptsOfContour
|
908 |
-
endPtsOfContours.append(endPoint)
|
909 |
-
glyph.endPtsOfContours = endPtsOfContours
|
910 |
-
self.nPointsStream = data
|
911 |
-
self._decodeTriplets(glyph)
|
912 |
-
self._decodeInstructions(glyph)
|
913 |
-
|
914 |
-
def _decodeOverlapSimpleFlag(self, glyph, glyphID):
|
915 |
-
if self.overlapSimpleBitmap is None or glyph.numberOfContours <= 0:
|
916 |
-
return
|
917 |
-
byte = glyphID >> 3
|
918 |
-
bit = glyphID & 7
|
919 |
-
if self.overlapSimpleBitmap[byte] & (0x80 >> bit):
|
920 |
-
glyph.flags[0] |= _g_l_y_f.flagOverlapSimple
|
921 |
-
|
922 |
-
def _decodeInstructions(self, glyph):
|
923 |
-
glyphStream = self.glyphStream
|
924 |
-
instructionStream = self.instructionStream
|
925 |
-
instructionLength, glyphStream = unpack255UShort(glyphStream)
|
926 |
-
glyph.program = ttProgram.Program()
|
927 |
-
glyph.program.fromBytecode(instructionStream[:instructionLength])
|
928 |
-
self.glyphStream = glyphStream
|
929 |
-
self.instructionStream = instructionStream[instructionLength:]
|
930 |
-
|
931 |
-
def _decodeBBox(self, glyphID, glyph):
|
932 |
-
haveBBox = bool(self.bboxBitmap[glyphID >> 3] & (0x80 >> (glyphID & 7)))
|
933 |
-
if glyph.isComposite() and not haveBBox:
|
934 |
-
raise TTLibError("no bbox values for composite glyph %d" % glyphID)
|
935 |
-
if haveBBox:
|
936 |
-
dummy, self.bboxStream = sstruct.unpack2(bboxFormat, self.bboxStream, glyph)
|
937 |
-
else:
|
938 |
-
glyph.recalcBounds(self)
|
939 |
-
|
940 |
-
def _decodeTriplets(self, glyph):
|
941 |
-
def withSign(flag, baseval):
|
942 |
-
assert 0 <= baseval and baseval < 65536, "integer overflow"
|
943 |
-
return baseval if flag & 1 else -baseval
|
944 |
-
|
945 |
-
nPoints = glyph.endPtsOfContours[-1] + 1
|
946 |
-
flagSize = nPoints
|
947 |
-
if flagSize > len(self.flagStream):
|
948 |
-
raise TTLibError("not enough 'flagStream' data")
|
949 |
-
flagsData = self.flagStream[:flagSize]
|
950 |
-
self.flagStream = self.flagStream[flagSize:]
|
951 |
-
flags = array.array("B", flagsData)
|
952 |
-
|
953 |
-
triplets = array.array("B", self.glyphStream)
|
954 |
-
nTriplets = len(triplets)
|
955 |
-
assert nPoints <= nTriplets
|
956 |
-
|
957 |
-
x = 0
|
958 |
-
y = 0
|
959 |
-
glyph.coordinates = getTableModule("glyf").GlyphCoordinates.zeros(nPoints)
|
960 |
-
glyph.flags = array.array("B")
|
961 |
-
tripletIndex = 0
|
962 |
-
for i in range(nPoints):
|
963 |
-
flag = flags[i]
|
964 |
-
onCurve = not bool(flag >> 7)
|
965 |
-
flag &= 0x7F
|
966 |
-
if flag < 84:
|
967 |
-
nBytes = 1
|
968 |
-
elif flag < 120:
|
969 |
-
nBytes = 2
|
970 |
-
elif flag < 124:
|
971 |
-
nBytes = 3
|
972 |
-
else:
|
973 |
-
nBytes = 4
|
974 |
-
assert (tripletIndex + nBytes) <= nTriplets
|
975 |
-
if flag < 10:
|
976 |
-
dx = 0
|
977 |
-
dy = withSign(flag, ((flag & 14) << 7) + triplets[tripletIndex])
|
978 |
-
elif flag < 20:
|
979 |
-
dx = withSign(flag, (((flag - 10) & 14) << 7) + triplets[tripletIndex])
|
980 |
-
dy = 0
|
981 |
-
elif flag < 84:
|
982 |
-
b0 = flag - 20
|
983 |
-
b1 = triplets[tripletIndex]
|
984 |
-
dx = withSign(flag, 1 + (b0 & 0x30) + (b1 >> 4))
|
985 |
-
dy = withSign(flag >> 1, 1 + ((b0 & 0x0C) << 2) + (b1 & 0x0F))
|
986 |
-
elif flag < 120:
|
987 |
-
b0 = flag - 84
|
988 |
-
dx = withSign(flag, 1 + ((b0 // 12) << 8) + triplets[tripletIndex])
|
989 |
-
dy = withSign(
|
990 |
-
flag >> 1, 1 + (((b0 % 12) >> 2) << 8) + triplets[tripletIndex + 1]
|
991 |
-
)
|
992 |
-
elif flag < 124:
|
993 |
-
b2 = triplets[tripletIndex + 1]
|
994 |
-
dx = withSign(flag, (triplets[tripletIndex] << 4) + (b2 >> 4))
|
995 |
-
dy = withSign(
|
996 |
-
flag >> 1, ((b2 & 0x0F) << 8) + triplets[tripletIndex + 2]
|
997 |
-
)
|
998 |
-
else:
|
999 |
-
dx = withSign(
|
1000 |
-
flag, (triplets[tripletIndex] << 8) + triplets[tripletIndex + 1]
|
1001 |
-
)
|
1002 |
-
dy = withSign(
|
1003 |
-
flag >> 1,
|
1004 |
-
(triplets[tripletIndex + 2] << 8) + triplets[tripletIndex + 3],
|
1005 |
-
)
|
1006 |
-
tripletIndex += nBytes
|
1007 |
-
x += dx
|
1008 |
-
y += dy
|
1009 |
-
glyph.coordinates[i] = (x, y)
|
1010 |
-
glyph.flags.append(int(onCurve))
|
1011 |
-
bytesConsumed = tripletIndex
|
1012 |
-
self.glyphStream = self.glyphStream[bytesConsumed:]
|
1013 |
-
|
1014 |
-
def _encodeGlyph(self, glyphID):
|
1015 |
-
glyphName = self.getGlyphName(glyphID)
|
1016 |
-
glyph = self[glyphName]
|
1017 |
-
self.nContourStream += struct.pack(">h", glyph.numberOfContours)
|
1018 |
-
if glyph.numberOfContours == 0:
|
1019 |
-
return
|
1020 |
-
elif glyph.isComposite():
|
1021 |
-
self._encodeComponents(glyph)
|
1022 |
-
elif glyph.isVarComposite():
|
1023 |
-
raise NotImplementedError
|
1024 |
-
else:
|
1025 |
-
self._encodeCoordinates(glyph)
|
1026 |
-
self._encodeOverlapSimpleFlag(glyph, glyphID)
|
1027 |
-
self._encodeBBox(glyphID, glyph)
|
1028 |
-
|
1029 |
-
def _encodeComponents(self, glyph):
|
1030 |
-
lastcomponent = len(glyph.components) - 1
|
1031 |
-
more = 1
|
1032 |
-
haveInstructions = 0
|
1033 |
-
for i in range(len(glyph.components)):
|
1034 |
-
if i == lastcomponent:
|
1035 |
-
haveInstructions = hasattr(glyph, "program")
|
1036 |
-
more = 0
|
1037 |
-
component = glyph.components[i]
|
1038 |
-
self.compositeStream += component.compile(more, haveInstructions, self)
|
1039 |
-
if haveInstructions:
|
1040 |
-
self._encodeInstructions(glyph)
|
1041 |
-
|
1042 |
-
def _encodeCoordinates(self, glyph):
|
1043 |
-
lastEndPoint = -1
|
1044 |
-
if _g_l_y_f.flagCubic in glyph.flags:
|
1045 |
-
raise NotImplementedError
|
1046 |
-
for endPoint in glyph.endPtsOfContours:
|
1047 |
-
ptsOfContour = endPoint - lastEndPoint
|
1048 |
-
self.nPointsStream += pack255UShort(ptsOfContour)
|
1049 |
-
lastEndPoint = endPoint
|
1050 |
-
self._encodeTriplets(glyph)
|
1051 |
-
self._encodeInstructions(glyph)
|
1052 |
-
|
1053 |
-
def _encodeOverlapSimpleFlag(self, glyph, glyphID):
|
1054 |
-
if glyph.numberOfContours <= 0:
|
1055 |
-
return
|
1056 |
-
if glyph.flags[0] & _g_l_y_f.flagOverlapSimple:
|
1057 |
-
byte = glyphID >> 3
|
1058 |
-
bit = glyphID & 7
|
1059 |
-
self.overlapSimpleBitmap[byte] |= 0x80 >> bit
|
1060 |
-
|
1061 |
-
def _encodeInstructions(self, glyph):
|
1062 |
-
instructions = glyph.program.getBytecode()
|
1063 |
-
self.glyphStream += pack255UShort(len(instructions))
|
1064 |
-
self.instructionStream += instructions
|
1065 |
-
|
1066 |
-
def _encodeBBox(self, glyphID, glyph):
|
1067 |
-
assert glyph.numberOfContours != 0, "empty glyph has no bbox"
|
1068 |
-
if not glyph.isComposite():
|
1069 |
-
# for simple glyphs, compare the encoded bounding box info with the calculated
|
1070 |
-
# values, and if they match omit the bounding box info
|
1071 |
-
currentBBox = glyph.xMin, glyph.yMin, glyph.xMax, glyph.yMax
|
1072 |
-
calculatedBBox = calcIntBounds(glyph.coordinates)
|
1073 |
-
if currentBBox == calculatedBBox:
|
1074 |
-
return
|
1075 |
-
self.bboxBitmap[glyphID >> 3] |= 0x80 >> (glyphID & 7)
|
1076 |
-
self.bboxStream += sstruct.pack(bboxFormat, glyph)
|
1077 |
-
|
1078 |
-
def _encodeTriplets(self, glyph):
|
1079 |
-
assert len(glyph.coordinates) == len(glyph.flags)
|
1080 |
-
coordinates = glyph.coordinates.copy()
|
1081 |
-
coordinates.absoluteToRelative()
|
1082 |
-
|
1083 |
-
flags = array.array("B")
|
1084 |
-
triplets = array.array("B")
|
1085 |
-
for i in range(len(coordinates)):
|
1086 |
-
onCurve = glyph.flags[i] & _g_l_y_f.flagOnCurve
|
1087 |
-
x, y = coordinates[i]
|
1088 |
-
absX = abs(x)
|
1089 |
-
absY = abs(y)
|
1090 |
-
onCurveBit = 0 if onCurve else 128
|
1091 |
-
xSignBit = 0 if (x < 0) else 1
|
1092 |
-
ySignBit = 0 if (y < 0) else 1
|
1093 |
-
xySignBits = xSignBit + 2 * ySignBit
|
1094 |
-
|
1095 |
-
if x == 0 and absY < 1280:
|
1096 |
-
flags.append(onCurveBit + ((absY & 0xF00) >> 7) + ySignBit)
|
1097 |
-
triplets.append(absY & 0xFF)
|
1098 |
-
elif y == 0 and absX < 1280:
|
1099 |
-
flags.append(onCurveBit + 10 + ((absX & 0xF00) >> 7) + xSignBit)
|
1100 |
-
triplets.append(absX & 0xFF)
|
1101 |
-
elif absX < 65 and absY < 65:
|
1102 |
-
flags.append(
|
1103 |
-
onCurveBit
|
1104 |
-
+ 20
|
1105 |
-
+ ((absX - 1) & 0x30)
|
1106 |
-
+ (((absY - 1) & 0x30) >> 2)
|
1107 |
-
+ xySignBits
|
1108 |
-
)
|
1109 |
-
triplets.append((((absX - 1) & 0xF) << 4) | ((absY - 1) & 0xF))
|
1110 |
-
elif absX < 769 and absY < 769:
|
1111 |
-
flags.append(
|
1112 |
-
onCurveBit
|
1113 |
-
+ 84
|
1114 |
-
+ 12 * (((absX - 1) & 0x300) >> 8)
|
1115 |
-
+ (((absY - 1) & 0x300) >> 6)
|
1116 |
-
+ xySignBits
|
1117 |
-
)
|
1118 |
-
triplets.append((absX - 1) & 0xFF)
|
1119 |
-
triplets.append((absY - 1) & 0xFF)
|
1120 |
-
elif absX < 4096 and absY < 4096:
|
1121 |
-
flags.append(onCurveBit + 120 + xySignBits)
|
1122 |
-
triplets.append(absX >> 4)
|
1123 |
-
triplets.append(((absX & 0xF) << 4) | (absY >> 8))
|
1124 |
-
triplets.append(absY & 0xFF)
|
1125 |
-
else:
|
1126 |
-
flags.append(onCurveBit + 124 + xySignBits)
|
1127 |
-
triplets.append(absX >> 8)
|
1128 |
-
triplets.append(absX & 0xFF)
|
1129 |
-
triplets.append(absY >> 8)
|
1130 |
-
triplets.append(absY & 0xFF)
|
1131 |
-
|
1132 |
-
self.flagStream += flags.tobytes()
|
1133 |
-
self.glyphStream += triplets.tobytes()
|
1134 |
-
|
1135 |
-
|
1136 |
-
class WOFF2HmtxTable(getTableClass("hmtx")):
|
1137 |
-
def __init__(self, tag=None):
|
1138 |
-
self.tableTag = Tag(tag or "hmtx")
|
1139 |
-
|
1140 |
-
def reconstruct(self, data, ttFont):
|
1141 |
-
(flags,) = struct.unpack(">B", data[:1])
|
1142 |
-
data = data[1:]
|
1143 |
-
if flags & 0b11111100 != 0:
|
1144 |
-
raise TTLibError("Bits 2-7 of '%s' flags are reserved" % self.tableTag)
|
1145 |
-
|
1146 |
-
# When bit 0 is _not_ set, the lsb[] array is present
|
1147 |
-
hasLsbArray = flags & 1 == 0
|
1148 |
-
# When bit 1 is _not_ set, the leftSideBearing[] array is present
|
1149 |
-
hasLeftSideBearingArray = flags & 2 == 0
|
1150 |
-
if hasLsbArray and hasLeftSideBearingArray:
|
1151 |
-
raise TTLibError(
|
1152 |
-
"either bits 0 or 1 (or both) must set in transformed '%s' flags"
|
1153 |
-
% self.tableTag
|
1154 |
-
)
|
1155 |
-
|
1156 |
-
glyfTable = ttFont["glyf"]
|
1157 |
-
headerTable = ttFont["hhea"]
|
1158 |
-
glyphOrder = glyfTable.glyphOrder
|
1159 |
-
numGlyphs = len(glyphOrder)
|
1160 |
-
numberOfHMetrics = min(int(headerTable.numberOfHMetrics), numGlyphs)
|
1161 |
-
|
1162 |
-
assert len(data) >= 2 * numberOfHMetrics
|
1163 |
-
advanceWidthArray = array.array("H", data[: 2 * numberOfHMetrics])
|
1164 |
-
if sys.byteorder != "big":
|
1165 |
-
advanceWidthArray.byteswap()
|
1166 |
-
data = data[2 * numberOfHMetrics :]
|
1167 |
-
|
1168 |
-
if hasLsbArray:
|
1169 |
-
assert len(data) >= 2 * numberOfHMetrics
|
1170 |
-
lsbArray = array.array("h", data[: 2 * numberOfHMetrics])
|
1171 |
-
if sys.byteorder != "big":
|
1172 |
-
lsbArray.byteswap()
|
1173 |
-
data = data[2 * numberOfHMetrics :]
|
1174 |
-
else:
|
1175 |
-
# compute (proportional) glyphs' lsb from their xMin
|
1176 |
-
lsbArray = array.array("h")
|
1177 |
-
for i, glyphName in enumerate(glyphOrder):
|
1178 |
-
if i >= numberOfHMetrics:
|
1179 |
-
break
|
1180 |
-
glyph = glyfTable[glyphName]
|
1181 |
-
xMin = getattr(glyph, "xMin", 0)
|
1182 |
-
lsbArray.append(xMin)
|
1183 |
-
|
1184 |
-
numberOfSideBearings = numGlyphs - numberOfHMetrics
|
1185 |
-
if hasLeftSideBearingArray:
|
1186 |
-
assert len(data) >= 2 * numberOfSideBearings
|
1187 |
-
leftSideBearingArray = array.array("h", data[: 2 * numberOfSideBearings])
|
1188 |
-
if sys.byteorder != "big":
|
1189 |
-
leftSideBearingArray.byteswap()
|
1190 |
-
data = data[2 * numberOfSideBearings :]
|
1191 |
-
else:
|
1192 |
-
# compute (monospaced) glyphs' leftSideBearing from their xMin
|
1193 |
-
leftSideBearingArray = array.array("h")
|
1194 |
-
for i, glyphName in enumerate(glyphOrder):
|
1195 |
-
if i < numberOfHMetrics:
|
1196 |
-
continue
|
1197 |
-
glyph = glyfTable[glyphName]
|
1198 |
-
xMin = getattr(glyph, "xMin", 0)
|
1199 |
-
leftSideBearingArray.append(xMin)
|
1200 |
-
|
1201 |
-
if data:
|
1202 |
-
raise TTLibError("too much '%s' table data" % self.tableTag)
|
1203 |
-
|
1204 |
-
self.metrics = {}
|
1205 |
-
for i in range(numberOfHMetrics):
|
1206 |
-
glyphName = glyphOrder[i]
|
1207 |
-
advanceWidth, lsb = advanceWidthArray[i], lsbArray[i]
|
1208 |
-
self.metrics[glyphName] = (advanceWidth, lsb)
|
1209 |
-
lastAdvance = advanceWidthArray[-1]
|
1210 |
-
for i in range(numberOfSideBearings):
|
1211 |
-
glyphName = glyphOrder[i + numberOfHMetrics]
|
1212 |
-
self.metrics[glyphName] = (lastAdvance, leftSideBearingArray[i])
|
1213 |
-
|
1214 |
-
def transform(self, ttFont):
|
1215 |
-
glyphOrder = ttFont.getGlyphOrder()
|
1216 |
-
glyf = ttFont["glyf"]
|
1217 |
-
hhea = ttFont["hhea"]
|
1218 |
-
numberOfHMetrics = hhea.numberOfHMetrics
|
1219 |
-
|
1220 |
-
# check if any of the proportional glyphs has left sidebearings that
|
1221 |
-
# differ from their xMin bounding box values.
|
1222 |
-
hasLsbArray = False
|
1223 |
-
for i in range(numberOfHMetrics):
|
1224 |
-
glyphName = glyphOrder[i]
|
1225 |
-
lsb = self.metrics[glyphName][1]
|
1226 |
-
if lsb != getattr(glyf[glyphName], "xMin", 0):
|
1227 |
-
hasLsbArray = True
|
1228 |
-
break
|
1229 |
-
|
1230 |
-
# do the same for the monospaced glyphs (if any) at the end of hmtx table
|
1231 |
-
hasLeftSideBearingArray = False
|
1232 |
-
for i in range(numberOfHMetrics, len(glyphOrder)):
|
1233 |
-
glyphName = glyphOrder[i]
|
1234 |
-
lsb = self.metrics[glyphName][1]
|
1235 |
-
if lsb != getattr(glyf[glyphName], "xMin", 0):
|
1236 |
-
hasLeftSideBearingArray = True
|
1237 |
-
break
|
1238 |
-
|
1239 |
-
# if we need to encode both sidebearings arrays, then no transformation is
|
1240 |
-
# applicable, and we must use the untransformed hmtx data
|
1241 |
-
if hasLsbArray and hasLeftSideBearingArray:
|
1242 |
-
return
|
1243 |
-
|
1244 |
-
# set bit 0 and 1 when the respective arrays are _not_ present
|
1245 |
-
flags = 0
|
1246 |
-
if not hasLsbArray:
|
1247 |
-
flags |= 1 << 0
|
1248 |
-
if not hasLeftSideBearingArray:
|
1249 |
-
flags |= 1 << 1
|
1250 |
-
|
1251 |
-
data = struct.pack(">B", flags)
|
1252 |
-
|
1253 |
-
advanceWidthArray = array.array(
|
1254 |
-
"H",
|
1255 |
-
[
|
1256 |
-
self.metrics[glyphName][0]
|
1257 |
-
for i, glyphName in enumerate(glyphOrder)
|
1258 |
-
if i < numberOfHMetrics
|
1259 |
-
],
|
1260 |
-
)
|
1261 |
-
if sys.byteorder != "big":
|
1262 |
-
advanceWidthArray.byteswap()
|
1263 |
-
data += advanceWidthArray.tobytes()
|
1264 |
-
|
1265 |
-
if hasLsbArray:
|
1266 |
-
lsbArray = array.array(
|
1267 |
-
"h",
|
1268 |
-
[
|
1269 |
-
self.metrics[glyphName][1]
|
1270 |
-
for i, glyphName in enumerate(glyphOrder)
|
1271 |
-
if i < numberOfHMetrics
|
1272 |
-
],
|
1273 |
-
)
|
1274 |
-
if sys.byteorder != "big":
|
1275 |
-
lsbArray.byteswap()
|
1276 |
-
data += lsbArray.tobytes()
|
1277 |
-
|
1278 |
-
if hasLeftSideBearingArray:
|
1279 |
-
leftSideBearingArray = array.array(
|
1280 |
-
"h",
|
1281 |
-
[
|
1282 |
-
self.metrics[glyphOrder[i]][1]
|
1283 |
-
for i in range(numberOfHMetrics, len(glyphOrder))
|
1284 |
-
],
|
1285 |
-
)
|
1286 |
-
if sys.byteorder != "big":
|
1287 |
-
leftSideBearingArray.byteswap()
|
1288 |
-
data += leftSideBearingArray.tobytes()
|
1289 |
-
|
1290 |
-
return data
|
1291 |
-
|
1292 |
-
|
1293 |
-
class WOFF2FlavorData(WOFFFlavorData):
|
1294 |
-
|
1295 |
-
Flavor = "woff2"
|
1296 |
-
|
1297 |
-
def __init__(self, reader=None, data=None, transformedTables=None):
|
1298 |
-
"""Data class that holds the WOFF2 header major/minor version, any
|
1299 |
-
metadata or private data (as bytes strings), and the set of
|
1300 |
-
table tags that have transformations applied (if reader is not None),
|
1301 |
-
or will have once the WOFF2 font is compiled.
|
1302 |
-
|
1303 |
-
Args:
|
1304 |
-
reader: an SFNTReader (or subclass) object to read flavor data from.
|
1305 |
-
data: another WOFFFlavorData object to initialise data from.
|
1306 |
-
transformedTables: set of strings containing table tags to be transformed.
|
1307 |
-
|
1308 |
-
Raises:
|
1309 |
-
ImportError if the brotli module is not installed.
|
1310 |
-
|
1311 |
-
NOTE: The 'reader' argument, on the one hand, and the 'data' and
|
1312 |
-
'transformedTables' arguments, on the other hand, are mutually exclusive.
|
1313 |
-
"""
|
1314 |
-
if not haveBrotli:
|
1315 |
-
raise ImportError("No module named brotli")
|
1316 |
-
|
1317 |
-
if reader is not None:
|
1318 |
-
if data is not None:
|
1319 |
-
raise TypeError("'reader' and 'data' arguments are mutually exclusive")
|
1320 |
-
if transformedTables is not None:
|
1321 |
-
raise TypeError(
|
1322 |
-
"'reader' and 'transformedTables' arguments are mutually exclusive"
|
1323 |
-
)
|
1324 |
-
|
1325 |
-
if transformedTables is not None and (
|
1326 |
-
"glyf" in transformedTables
|
1327 |
-
and "loca" not in transformedTables
|
1328 |
-
or "loca" in transformedTables
|
1329 |
-
and "glyf" not in transformedTables
|
1330 |
-
):
|
1331 |
-
raise ValueError("'glyf' and 'loca' must be transformed (or not) together")
|
1332 |
-
super(WOFF2FlavorData, self).__init__(reader=reader)
|
1333 |
-
if reader:
|
1334 |
-
transformedTables = [
|
1335 |
-
tag for tag, entry in reader.tables.items() if entry.transformed
|
1336 |
-
]
|
1337 |
-
elif data:
|
1338 |
-
self.majorVersion = data.majorVersion
|
1339 |
-
self.majorVersion = data.minorVersion
|
1340 |
-
self.metaData = data.metaData
|
1341 |
-
self.privData = data.privData
|
1342 |
-
if transformedTables is None and hasattr(data, "transformedTables"):
|
1343 |
-
transformedTables = data.transformedTables
|
1344 |
-
|
1345 |
-
if transformedTables is None:
|
1346 |
-
transformedTables = woff2TransformedTableTags
|
1347 |
-
|
1348 |
-
self.transformedTables = set(transformedTables)
|
1349 |
-
|
1350 |
-
def _decompress(self, rawData):
|
1351 |
-
return brotli.decompress(rawData)
|
1352 |
-
|
1353 |
-
|
1354 |
-
def unpackBase128(data):
|
1355 |
-
r"""Read one to five bytes from UIntBase128-encoded input string, and return
|
1356 |
-
a tuple containing the decoded integer plus any leftover data.
|
1357 |
-
|
1358 |
-
>>> unpackBase128(b'\x3f\x00\x00') == (63, b"\x00\x00")
|
1359 |
-
True
|
1360 |
-
>>> unpackBase128(b'\x8f\xff\xff\xff\x7f')[0] == 4294967295
|
1361 |
-
True
|
1362 |
-
>>> unpackBase128(b'\x80\x80\x3f') # doctest: +IGNORE_EXCEPTION_DETAIL
|
1363 |
-
Traceback (most recent call last):
|
1364 |
-
File "<stdin>", line 1, in ?
|
1365 |
-
TTLibError: UIntBase128 value must not start with leading zeros
|
1366 |
-
>>> unpackBase128(b'\x8f\xff\xff\xff\xff\x7f')[0] # doctest: +IGNORE_EXCEPTION_DETAIL
|
1367 |
-
Traceback (most recent call last):
|
1368 |
-
File "<stdin>", line 1, in ?
|
1369 |
-
TTLibError: UIntBase128-encoded sequence is longer than 5 bytes
|
1370 |
-
>>> unpackBase128(b'\x90\x80\x80\x80\x00')[0] # doctest: +IGNORE_EXCEPTION_DETAIL
|
1371 |
-
Traceback (most recent call last):
|
1372 |
-
File "<stdin>", line 1, in ?
|
1373 |
-
TTLibError: UIntBase128 value exceeds 2**32-1
|
1374 |
-
"""
|
1375 |
-
if len(data) == 0:
|
1376 |
-
raise TTLibError("not enough data to unpack UIntBase128")
|
1377 |
-
result = 0
|
1378 |
-
if byteord(data[0]) == 0x80:
|
1379 |
-
# font must be rejected if UIntBase128 value starts with 0x80
|
1380 |
-
raise TTLibError("UIntBase128 value must not start with leading zeros")
|
1381 |
-
for i in range(woff2Base128MaxSize):
|
1382 |
-
if len(data) == 0:
|
1383 |
-
raise TTLibError("not enough data to unpack UIntBase128")
|
1384 |
-
code = byteord(data[0])
|
1385 |
-
data = data[1:]
|
1386 |
-
# if any of the top seven bits are set then we're about to overflow
|
1387 |
-
if result & 0xFE000000:
|
1388 |
-
raise TTLibError("UIntBase128 value exceeds 2**32-1")
|
1389 |
-
# set current value = old value times 128 bitwise-or (byte bitwise-and 127)
|
1390 |
-
result = (result << 7) | (code & 0x7F)
|
1391 |
-
# repeat until the most significant bit of byte is false
|
1392 |
-
if (code & 0x80) == 0:
|
1393 |
-
# return result plus left over data
|
1394 |
-
return result, data
|
1395 |
-
# make sure not to exceed the size bound
|
1396 |
-
raise TTLibError("UIntBase128-encoded sequence is longer than 5 bytes")
|
1397 |
-
|
1398 |
-
|
1399 |
-
def base128Size(n):
|
1400 |
-
"""Return the length in bytes of a UIntBase128-encoded sequence with value n.
|
1401 |
-
|
1402 |
-
>>> base128Size(0)
|
1403 |
-
1
|
1404 |
-
>>> base128Size(24567)
|
1405 |
-
3
|
1406 |
-
>>> base128Size(2**32-1)
|
1407 |
-
5
|
1408 |
-
"""
|
1409 |
-
assert n >= 0
|
1410 |
-
size = 1
|
1411 |
-
while n >= 128:
|
1412 |
-
size += 1
|
1413 |
-
n >>= 7
|
1414 |
-
return size
|
1415 |
-
|
1416 |
-
|
1417 |
-
def packBase128(n):
|
1418 |
-
r"""Encode unsigned integer in range 0 to 2**32-1 (inclusive) to a string of
|
1419 |
-
bytes using UIntBase128 variable-length encoding. Produce the shortest possible
|
1420 |
-
encoding.
|
1421 |
-
|
1422 |
-
>>> packBase128(63) == b"\x3f"
|
1423 |
-
True
|
1424 |
-
>>> packBase128(2**32-1) == b'\x8f\xff\xff\xff\x7f'
|
1425 |
-
True
|
1426 |
-
"""
|
1427 |
-
if n < 0 or n >= 2**32:
|
1428 |
-
raise TTLibError("UIntBase128 format requires 0 <= integer <= 2**32-1")
|
1429 |
-
data = b""
|
1430 |
-
size = base128Size(n)
|
1431 |
-
for i in range(size):
|
1432 |
-
b = (n >> (7 * (size - i - 1))) & 0x7F
|
1433 |
-
if i < size - 1:
|
1434 |
-
b |= 0x80
|
1435 |
-
data += struct.pack("B", b)
|
1436 |
-
return data
|
1437 |
-
|
1438 |
-
|
1439 |
-
def unpack255UShort(data):
|
1440 |
-
"""Read one to three bytes from 255UInt16-encoded input string, and return a
|
1441 |
-
tuple containing the decoded integer plus any leftover data.
|
1442 |
-
|
1443 |
-
>>> unpack255UShort(bytechr(252))[0]
|
1444 |
-
252
|
1445 |
-
|
1446 |
-
Note that some numbers (e.g. 506) can have multiple encodings:
|
1447 |
-
>>> unpack255UShort(struct.pack("BB", 254, 0))[0]
|
1448 |
-
506
|
1449 |
-
>>> unpack255UShort(struct.pack("BB", 255, 253))[0]
|
1450 |
-
506
|
1451 |
-
>>> unpack255UShort(struct.pack("BBB", 253, 1, 250))[0]
|
1452 |
-
506
|
1453 |
-
"""
|
1454 |
-
code = byteord(data[:1])
|
1455 |
-
data = data[1:]
|
1456 |
-
if code == 253:
|
1457 |
-
# read two more bytes as an unsigned short
|
1458 |
-
if len(data) < 2:
|
1459 |
-
raise TTLibError("not enough data to unpack 255UInt16")
|
1460 |
-
(result,) = struct.unpack(">H", data[:2])
|
1461 |
-
data = data[2:]
|
1462 |
-
elif code == 254:
|
1463 |
-
# read another byte, plus 253 * 2
|
1464 |
-
if len(data) == 0:
|
1465 |
-
raise TTLibError("not enough data to unpack 255UInt16")
|
1466 |
-
result = byteord(data[:1])
|
1467 |
-
result += 506
|
1468 |
-
data = data[1:]
|
1469 |
-
elif code == 255:
|
1470 |
-
# read another byte, plus 253
|
1471 |
-
if len(data) == 0:
|
1472 |
-
raise TTLibError("not enough data to unpack 255UInt16")
|
1473 |
-
result = byteord(data[:1])
|
1474 |
-
result += 253
|
1475 |
-
data = data[1:]
|
1476 |
-
else:
|
1477 |
-
# leave as is if lower than 253
|
1478 |
-
result = code
|
1479 |
-
# return result plus left over data
|
1480 |
-
return result, data
|
1481 |
-
|
1482 |
-
|
1483 |
-
def pack255UShort(value):
|
1484 |
-
r"""Encode unsigned integer in range 0 to 65535 (inclusive) to a bytestring
|
1485 |
-
using 255UInt16 variable-length encoding.
|
1486 |
-
|
1487 |
-
>>> pack255UShort(252) == b'\xfc'
|
1488 |
-
True
|
1489 |
-
>>> pack255UShort(506) == b'\xfe\x00'
|
1490 |
-
True
|
1491 |
-
>>> pack255UShort(762) == b'\xfd\x02\xfa'
|
1492 |
-
True
|
1493 |
-
"""
|
1494 |
-
if value < 0 or value > 0xFFFF:
|
1495 |
-
raise TTLibError("255UInt16 format requires 0 <= integer <= 65535")
|
1496 |
-
if value < 253:
|
1497 |
-
return struct.pack(">B", value)
|
1498 |
-
elif value < 506:
|
1499 |
-
return struct.pack(">BB", 255, value - 253)
|
1500 |
-
elif value < 762:
|
1501 |
-
return struct.pack(">BB", 254, value - 506)
|
1502 |
-
else:
|
1503 |
-
return struct.pack(">BH", 253, value)
|
1504 |
-
|
1505 |
-
|
1506 |
-
def compress(input_file, output_file, transform_tables=None):
|
1507 |
-
"""Compress OpenType font to WOFF2.
|
1508 |
-
|
1509 |
-
Args:
|
1510 |
-
input_file: a file path, file or file-like object (open in binary mode)
|
1511 |
-
containing an OpenType font (either CFF- or TrueType-flavored).
|
1512 |
-
output_file: a file path, file or file-like object where to save the
|
1513 |
-
compressed WOFF2 font.
|
1514 |
-
transform_tables: Optional[Iterable[str]]: a set of table tags for which
|
1515 |
-
to enable preprocessing transformations. By default, only 'glyf'
|
1516 |
-
and 'loca' tables are transformed. An empty set means disable all
|
1517 |
-
transformations.
|
1518 |
-
"""
|
1519 |
-
log.info("Processing %s => %s" % (input_file, output_file))
|
1520 |
-
|
1521 |
-
font = TTFont(input_file, recalcBBoxes=False, recalcTimestamp=False)
|
1522 |
-
font.flavor = "woff2"
|
1523 |
-
|
1524 |
-
if transform_tables is not None:
|
1525 |
-
font.flavorData = WOFF2FlavorData(
|
1526 |
-
data=font.flavorData, transformedTables=transform_tables
|
1527 |
-
)
|
1528 |
-
|
1529 |
-
font.save(output_file, reorderTables=False)
|
1530 |
-
|
1531 |
-
|
1532 |
-
def decompress(input_file, output_file):
|
1533 |
-
"""Decompress WOFF2 font to OpenType font.
|
1534 |
-
|
1535 |
-
Args:
|
1536 |
-
input_file: a file path, file or file-like object (open in binary mode)
|
1537 |
-
containing a compressed WOFF2 font.
|
1538 |
-
output_file: a file path, file or file-like object where to save the
|
1539 |
-
decompressed OpenType font.
|
1540 |
-
"""
|
1541 |
-
log.info("Processing %s => %s" % (input_file, output_file))
|
1542 |
-
|
1543 |
-
font = TTFont(input_file, recalcBBoxes=False, recalcTimestamp=False)
|
1544 |
-
font.flavor = None
|
1545 |
-
font.flavorData = None
|
1546 |
-
font.save(output_file, reorderTables=True)
|
1547 |
-
|
1548 |
-
|
1549 |
-
def main(args=None):
|
1550 |
-
"""Compress and decompress WOFF2 fonts"""
|
1551 |
-
import argparse
|
1552 |
-
from fontTools import configLogger
|
1553 |
-
from fontTools.ttx import makeOutputFileName
|
1554 |
-
|
1555 |
-
class _HelpAction(argparse._HelpAction):
|
1556 |
-
def __call__(self, parser, namespace, values, option_string=None):
|
1557 |
-
subparsers_actions = [
|
1558 |
-
action
|
1559 |
-
for action in parser._actions
|
1560 |
-
if isinstance(action, argparse._SubParsersAction)
|
1561 |
-
]
|
1562 |
-
for subparsers_action in subparsers_actions:
|
1563 |
-
for choice, subparser in subparsers_action.choices.items():
|
1564 |
-
print(subparser.format_help())
|
1565 |
-
parser.exit()
|
1566 |
-
|
1567 |
-
class _NoGlyfTransformAction(argparse.Action):
|
1568 |
-
def __call__(self, parser, namespace, values, option_string=None):
|
1569 |
-
namespace.transform_tables.difference_update({"glyf", "loca"})
|
1570 |
-
|
1571 |
-
class _HmtxTransformAction(argparse.Action):
|
1572 |
-
def __call__(self, parser, namespace, values, option_string=None):
|
1573 |
-
namespace.transform_tables.add("hmtx")
|
1574 |
-
|
1575 |
-
parser = argparse.ArgumentParser(
|
1576 |
-
prog="fonttools ttLib.woff2", description=main.__doc__, add_help=False
|
1577 |
-
)
|
1578 |
-
|
1579 |
-
parser.add_argument(
|
1580 |
-
"-h", "--help", action=_HelpAction, help="show this help message and exit"
|
1581 |
-
)
|
1582 |
-
|
1583 |
-
parser_group = parser.add_subparsers(title="sub-commands")
|
1584 |
-
parser_compress = parser_group.add_parser(
|
1585 |
-
"compress", description="Compress a TTF or OTF font to WOFF2"
|
1586 |
-
)
|
1587 |
-
parser_decompress = parser_group.add_parser(
|
1588 |
-
"decompress", description="Decompress a WOFF2 font to OTF"
|
1589 |
-
)
|
1590 |
-
|
1591 |
-
for subparser in (parser_compress, parser_decompress):
|
1592 |
-
group = subparser.add_mutually_exclusive_group(required=False)
|
1593 |
-
group.add_argument(
|
1594 |
-
"-v",
|
1595 |
-
"--verbose",
|
1596 |
-
action="store_true",
|
1597 |
-
help="print more messages to console",
|
1598 |
-
)
|
1599 |
-
group.add_argument(
|
1600 |
-
"-q",
|
1601 |
-
"--quiet",
|
1602 |
-
action="store_true",
|
1603 |
-
help="do not print messages to console",
|
1604 |
-
)
|
1605 |
-
|
1606 |
-
parser_compress.add_argument(
|
1607 |
-
"input_file",
|
1608 |
-
metavar="INPUT",
|
1609 |
-
help="the input OpenType font (.ttf or .otf)",
|
1610 |
-
)
|
1611 |
-
parser_decompress.add_argument(
|
1612 |
-
"input_file",
|
1613 |
-
metavar="INPUT",
|
1614 |
-
help="the input WOFF2 font",
|
1615 |
-
)
|
1616 |
-
|
1617 |
-
parser_compress.add_argument(
|
1618 |
-
"-o",
|
1619 |
-
"--output-file",
|
1620 |
-
metavar="OUTPUT",
|
1621 |
-
help="the output WOFF2 font",
|
1622 |
-
)
|
1623 |
-
parser_decompress.add_argument(
|
1624 |
-
"-o",
|
1625 |
-
"--output-file",
|
1626 |
-
metavar="OUTPUT",
|
1627 |
-
help="the output OpenType font",
|
1628 |
-
)
|
1629 |
-
|
1630 |
-
transform_group = parser_compress.add_argument_group()
|
1631 |
-
transform_group.add_argument(
|
1632 |
-
"--no-glyf-transform",
|
1633 |
-
dest="transform_tables",
|
1634 |
-
nargs=0,
|
1635 |
-
action=_NoGlyfTransformAction,
|
1636 |
-
help="Do not transform glyf (and loca) tables",
|
1637 |
-
)
|
1638 |
-
transform_group.add_argument(
|
1639 |
-
"--hmtx-transform",
|
1640 |
-
dest="transform_tables",
|
1641 |
-
nargs=0,
|
1642 |
-
action=_HmtxTransformAction,
|
1643 |
-
help="Enable optional transformation for 'hmtx' table",
|
1644 |
-
)
|
1645 |
-
|
1646 |
-
parser_compress.set_defaults(
|
1647 |
-
subcommand=compress,
|
1648 |
-
transform_tables={"glyf", "loca"},
|
1649 |
-
)
|
1650 |
-
parser_decompress.set_defaults(subcommand=decompress)
|
1651 |
-
|
1652 |
-
options = vars(parser.parse_args(args))
|
1653 |
-
|
1654 |
-
subcommand = options.pop("subcommand", None)
|
1655 |
-
if not subcommand:
|
1656 |
-
parser.print_help()
|
1657 |
-
return
|
1658 |
-
|
1659 |
-
quiet = options.pop("quiet")
|
1660 |
-
verbose = options.pop("verbose")
|
1661 |
-
configLogger(
|
1662 |
-
level=("ERROR" if quiet else "DEBUG" if verbose else "INFO"),
|
1663 |
-
)
|
1664 |
-
|
1665 |
-
if not options["output_file"]:
|
1666 |
-
if subcommand is compress:
|
1667 |
-
extension = ".woff2"
|
1668 |
-
elif subcommand is decompress:
|
1669 |
-
# choose .ttf/.otf file extension depending on sfntVersion
|
1670 |
-
with open(options["input_file"], "rb") as f:
|
1671 |
-
f.seek(4) # skip 'wOF2' signature
|
1672 |
-
sfntVersion = f.read(4)
|
1673 |
-
assert len(sfntVersion) == 4, "not enough data"
|
1674 |
-
extension = ".otf" if sfntVersion == b"OTTO" else ".ttf"
|
1675 |
-
else:
|
1676 |
-
raise AssertionError(subcommand)
|
1677 |
-
options["output_file"] = makeOutputFileName(
|
1678 |
-
options["input_file"], outputDir=None, extension=extension
|
1679 |
-
)
|
1680 |
-
|
1681 |
-
try:
|
1682 |
-
subcommand(**options)
|
1683 |
-
except TTLibError as e:
|
1684 |
-
parser.error(e)
|
1685 |
-
|
1686 |
-
|
1687 |
-
if __name__ == "__main__":
|
1688 |
-
sys.exit(main())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/ModifyUpload-d8fc50ab.js
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import{S as g,e as w,s as _,J as p,K as o,L as i,p as k,M as m,n as u,A as b,N as z,O as B,k as $,U as v,o as C,z as d,u as I,v as h,y as E,x as M,B as j}from"./index-3370be2a.js";import"./Button-89624748.js";import{I as L}from"./IconButton-abe5ede9.js";import"./ModifyUpload.svelte_svelte_type_style_lang-d2acacf0.js";function S(a){let e,s,t,l;return{c(){e=p("svg"),s=p("g"),t=p("path"),l=p("path"),o(t,"d","M18,6L6.087,17.913"),i(t,"fill","none"),i(t,"fill-rule","nonzero"),i(t,"stroke-width","2px"),o(s,"transform","matrix(1.14096,-0.140958,-0.140958,1.14096,-0.0559523,0.0559523)"),o(l,"d","M4.364,4.364L19.636,19.636"),i(l,"fill","none"),i(l,"fill-rule","nonzero"),i(l,"stroke-width","2px"),o(e,"width","100%"),o(e,"height","100%"),o(e,"viewBox","0 0 24 24"),o(e,"version","1.1"),o(e,"xmlns","http://www.w3.org/2000/svg"),o(e,"xmlns:xlink","http://www.w3.org/1999/xlink"),o(e,"xml:space","preserve"),o(e,"stroke","currentColor"),i(e,"fill-rule","evenodd"),i(e,"clip-rule","evenodd"),i(e,"stroke-linecap","round"),i(e,"stroke-linejoin","round")},m(n,r){k(n,e,r),m(e,s),m(s,t),m(e,l)},p:u,i:u,o:u,d(n){n&&b(e)}}}class U extends g{constructor(e){super(),w(this,e,null,S,_,{})}}function q(a){let e,s;return{c(){e=p("svg"),s=p("path"),o(s,"d","M17 3a2.828 2.828 0 1 1 4 4L7.5 20.5 2 22l1.5-5.5L17 3z"),o(e,"xmlns","http://www.w3.org/2000/svg"),o(e,"width","100%"),o(e,"height","100%"),o(e,"viewBox","0 0 24 24"),o(e,"fill","none"),o(e,"stroke","currentColor"),o(e,"stroke-width","1.5"),o(e,"stroke-linecap","round"),o(e,"stroke-linejoin","round"),o(e,"class","feather feather-edit-2")},m(t,l){k(t,e,l),m(e,s)},p:u,i:u,o:u,d(t){t&&b(e)}}}class y extends g{constructor(e){super(),w(this,e,null,q,_,{})}}function x(a){let e,s;return e=new L({props:{Icon:y,label:"Edit"}}),e.$on("click",a[3]),{c(){$(e.$$.fragment)},m(t,l){C(e,t,l),s=!0},p:u,i(t){s||(d(e.$$.fragment,t),s=!0)},o(t){h(e.$$.fragment,t),s=!1},d(t){M(e,t)}}}function A(a){let e,s,t,l,n=a[0]&&x(a);return t=new L({props:{Icon:U,label:"Clear"}}),t.$on("click",a[4]),{c(){e=z("div"),n&&n.c(),s=B(),$(t.$$.fragment),o(e,"class","svelte-19sk1im"),v(e,"not-absolute",!a[1]),i(e,"position",a[1]?"absolute":"static")},m(r,c){k(r,e,c),n&&n.m(e,null),m(e,s),C(t,e,null),l=!0},p(r,[c]){r[0]?n?(n.p(r,c),c&1&&d(n,1)):(n=x(r),n.c(),d(n,1),n.m(e,s)):n&&(I(),h(n,1,1,()=>{n=null}),E()),(!l||c&2)&&v(e,"not-absolute",!r[1]),c&2&&i(e,"position",r[1]?"absolute":"static")},i(r){l||(d(n),d(t.$$.fragment,r),l=!0)},o(r){h(n),h(t.$$.fragment,r),l=!1},d(r){r&&b(e),n&&n.d(),M(t)}}}function D(a,e,s){let{editable:t=!1}=e,{absolute:l=!0}=e;const n=j(),r=()=>n("edit"),c=f=>{n("clear"),f.stopPropagation()};return a.$$set=f=>{"editable"in f&&s(0,t=f.editable),"absolute"in f&&s(1,l=f.absolute)},[t,l,n,r,c]}class P extends g{constructor(e){super(),w(this,e,D,A,_,{editable:0,absolute:1})}}export{U as C,P as M};
|
2 |
-
//# sourceMappingURL=ModifyUpload-d8fc50ab.js.map
|
|
|
|
|
|
spaces/DaleChen/AutoGPT/run_continuous.sh
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
#!/bin/bash
|
2 |
-
|
3 |
-
./run.sh --continuous $@
|
|
|
|
|
|
|
|
spaces/DeepLabCut/MegaDetector_DeepLabCut/app.py
DELETED
@@ -1,179 +0,0 @@
|
|
1 |
-
# Built from https://huggingface.co/spaces/hlydecker/MegaDetector_v5
|
2 |
-
# Built from https://huggingface.co/spaces/sofmi/MegaDetector_DLClive/blob/main/app.py
|
3 |
-
# Built from https://huggingface.co/spaces/Neslihan/megadetector_dlcmodels/blob/main/app.py
|
4 |
-
|
5 |
-
import os
|
6 |
-
import yaml
|
7 |
-
import numpy as np
|
8 |
-
from matplotlib import cm
|
9 |
-
import gradio as gr
|
10 |
-
|
11 |
-
from PIL import Image, ImageColor, ImageFont, ImageDraw
|
12 |
-
# check git lfs pull!!
|
13 |
-
from DLC_models.download_utils import DownloadModel
|
14 |
-
from dlclive import DLCLive, Processor
|
15 |
-
|
16 |
-
from viz_utils import save_results_as_json, draw_keypoints_on_image, draw_bbox_w_text, save_results_only_dlc
|
17 |
-
from detection_utils import predict_md, crop_animal_detections, predict_dlc
|
18 |
-
from ui_utils import gradio_inputs_for_MD_DLC, gradio_outputs_for_MD_DLC, gradio_description_and_examples
|
19 |
-
|
20 |
-
# import pdb
|
21 |
-
#########################################
|
22 |
-
# Input params - Global vars
|
23 |
-
|
24 |
-
MD_models_dict = {'md_v5a': "MD_models/md_v5a.0.0.pt", #
|
25 |
-
'md_v5b': "MD_models/md_v5b.0.0.pt"}
|
26 |
-
|
27 |
-
# DLC models target dirs
|
28 |
-
DLC_models_dict = {#'full_cat': "DLC_models/DLC_Cat/",
|
29 |
-
#'full_dog': "DLC_models/DLC_Dog/",
|
30 |
-
'full_human': "DLC_models/DLC_human_dancing/",
|
31 |
-
'full_macaque': 'DLC_models/DLC_monkey/',
|
32 |
-
'primate_face': "DLC_models/DLC_FacialLandmarks/"}
|
33 |
-
|
34 |
-
|
35 |
-
# FONTS = {'amiko': "fonts/Amiko-Regular.ttf",
|
36 |
-
# 'nature': "fonts/LoveNature.otf",
|
37 |
-
# 'painter':"fonts/PainterDecorator.otf",
|
38 |
-
# 'animals': "fonts/UncialAnimals.ttf",
|
39 |
-
# 'zen': "fonts/ZEN.TTF"}
|
40 |
-
#####################################################
|
41 |
-
def predict_pipeline(img_input,
|
42 |
-
mega_model_input,
|
43 |
-
dlc_model_input_str,
|
44 |
-
flag_dlc_only,
|
45 |
-
flag_show_str_labels,
|
46 |
-
bbox_likelihood_th,
|
47 |
-
kpts_likelihood_th,
|
48 |
-
font_style,
|
49 |
-
font_size,
|
50 |
-
keypt_color,
|
51 |
-
marker_size,
|
52 |
-
):
|
53 |
-
|
54 |
-
if not flag_dlc_only:
|
55 |
-
############################################################
|
56 |
-
# ### Run Megadetector
|
57 |
-
md_results = predict_md(img_input,
|
58 |
-
MD_models_dict[mega_model_input], #mega_model_input,
|
59 |
-
size=640) #Image.fromarray(results.imgs[0])
|
60 |
-
|
61 |
-
################################################################
|
62 |
-
# Obtain animal crops for bboxes with confidence above th
|
63 |
-
list_crops = crop_animal_detections(img_input,
|
64 |
-
md_results,
|
65 |
-
bbox_likelihood_th)
|
66 |
-
|
67 |
-
############################################################
|
68 |
-
## Get DLC model and label map
|
69 |
-
|
70 |
-
# If model is found: do not download (previous execution is likely within same day)
|
71 |
-
# TODO: can we ask the user whether to reload dlc model if a directory is found?
|
72 |
-
if os.path.isdir(DLC_models_dict[dlc_model_input_str]) and \
|
73 |
-
len(os.listdir(DLC_models_dict[dlc_model_input_str])) > 0:
|
74 |
-
path_to_DLCmodel = DLC_models_dict[dlc_model_input_str]
|
75 |
-
else:
|
76 |
-
path_to_DLCmodel = DownloadModel(dlc_model_input_str,
|
77 |
-
DLC_models_dict[dlc_model_input_str])
|
78 |
-
|
79 |
-
# extract map label ids to strings
|
80 |
-
pose_cfg_path = os.path.join(DLC_models_dict[dlc_model_input_str],
|
81 |
-
'pose_cfg.yaml')
|
82 |
-
with open(pose_cfg_path, "r") as stream:
|
83 |
-
pose_cfg_dict = yaml.safe_load(stream)
|
84 |
-
map_label_id_to_str = dict([(k,v) for k,v in zip([el[0] for el in pose_cfg_dict['all_joints']], # pose_cfg_dict['all_joints'] is a list of one-element lists,
|
85 |
-
pose_cfg_dict['all_joints_names'])])
|
86 |
-
|
87 |
-
##############################################################
|
88 |
-
# Run DLC and visualise results
|
89 |
-
dlc_proc = Processor()
|
90 |
-
|
91 |
-
# if required: ignore MD crops and run DLC on full image [mostly for testing]
|
92 |
-
if flag_dlc_only:
|
93 |
-
# compute kpts on input img
|
94 |
-
list_kpts_per_crop = predict_dlc([np.asarray(img_input)],
|
95 |
-
kpts_likelihood_th,
|
96 |
-
path_to_DLCmodel,
|
97 |
-
dlc_proc)
|
98 |
-
# draw kpts on input img #fix!
|
99 |
-
draw_keypoints_on_image(img_input,
|
100 |
-
list_kpts_per_crop[0], # a numpy array with shape [num_keypoints, 2].
|
101 |
-
map_label_id_to_str,
|
102 |
-
flag_show_str_labels,
|
103 |
-
use_normalized_coordinates=False,
|
104 |
-
font_style=font_style,
|
105 |
-
font_size=font_size,
|
106 |
-
keypt_color=keypt_color,
|
107 |
-
marker_size=marker_size)
|
108 |
-
|
109 |
-
donw_file = save_results_only_dlc(list_kpts_per_crop[0], map_label_id_to_str,dlc_model_input_str)
|
110 |
-
|
111 |
-
return img_input, donw_file
|
112 |
-
|
113 |
-
else:
|
114 |
-
# Compute kpts for each crop
|
115 |
-
list_kpts_per_crop = predict_dlc(list_crops,
|
116 |
-
kpts_likelihood_th,
|
117 |
-
path_to_DLCmodel,
|
118 |
-
dlc_proc)
|
119 |
-
|
120 |
-
# resize input image to match megadetector output
|
121 |
-
img_background = img_input.resize((md_results.ims[0].shape[1],
|
122 |
-
md_results.ims[0].shape[0]))
|
123 |
-
|
124 |
-
# draw keypoints on each crop and paste to background img
|
125 |
-
for ic, (np_crop, kpts_crop) in enumerate(zip(list_crops,
|
126 |
-
list_kpts_per_crop)):
|
127 |
-
|
128 |
-
img_crop = Image.fromarray(np_crop)
|
129 |
-
|
130 |
-
# Draw keypts on crop
|
131 |
-
draw_keypoints_on_image(img_crop,
|
132 |
-
kpts_crop, # a numpy array with shape [num_keypoints, 2].
|
133 |
-
map_label_id_to_str,
|
134 |
-
flag_show_str_labels,
|
135 |
-
use_normalized_coordinates=False, # if True, then I should use md_results.xyxyn for list_kpts_crop
|
136 |
-
font_style=font_style,
|
137 |
-
font_size=font_size,
|
138 |
-
keypt_color=keypt_color,
|
139 |
-
marker_size=marker_size)
|
140 |
-
|
141 |
-
# Paste crop in original image
|
142 |
-
img_background.paste(img_crop,
|
143 |
-
box = tuple([int(t) for t in md_results.xyxy[0][ic,:2]]))
|
144 |
-
|
145 |
-
# Plot bbox
|
146 |
-
bb_per_animal = md_results.xyxy[0].tolist()[ic]
|
147 |
-
pred = md_results.xyxy[0].tolist()[ic][4]
|
148 |
-
if bbox_likelihood_th < pred:
|
149 |
-
draw_bbox_w_text(img_background,
|
150 |
-
bb_per_animal,
|
151 |
-
font_style=font_style,
|
152 |
-
font_size=font_size) # TODO: add selectable color for bbox?
|
153 |
-
|
154 |
-
|
155 |
-
# Save detection results as json
|
156 |
-
download_file = save_results_as_json(md_results,list_kpts_per_crop,map_label_id_to_str, bbox_likelihood_th,dlc_model_input_str,mega_model_input)
|
157 |
-
|
158 |
-
return img_background, download_file
|
159 |
-
|
160 |
-
#########################################################
|
161 |
-
# Define user interface and launch
|
162 |
-
inputs = gradio_inputs_for_MD_DLC(list(MD_models_dict.keys()),
|
163 |
-
list(DLC_models_dict.keys()))
|
164 |
-
outputs = gradio_outputs_for_MD_DLC()
|
165 |
-
[gr_title,
|
166 |
-
gr_description,
|
167 |
-
examples] = gradio_description_and_examples()
|
168 |
-
|
169 |
-
# launch
|
170 |
-
demo = gr.Interface(predict_pipeline,
|
171 |
-
inputs=inputs,
|
172 |
-
outputs=outputs,
|
173 |
-
title=gr_title,
|
174 |
-
description=gr_description,
|
175 |
-
examples=examples,
|
176 |
-
theme="huggingface")
|
177 |
-
|
178 |
-
demo.launch(enable_queue=True, share=True)
|
179 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DhanushPrabhuS/pothole_yolov8_nano/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Pothole Yolov8 Nano
|
3 |
-
emoji: 🌖
|
4 |
-
colorFrom: pink
|
5 |
-
colorTo: blue
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.16.1
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/EuroPython2022/mmocr-demo/configs/_base_/recog_pipelines/master_pipeline.py
DELETED
@@ -1,42 +0,0 @@
|
|
1 |
-
img_norm_cfg = dict(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
|
2 |
-
train_pipeline = [
|
3 |
-
dict(type='LoadImageFromFile'),
|
4 |
-
dict(
|
5 |
-
type='ResizeOCR',
|
6 |
-
height=48,
|
7 |
-
min_width=48,
|
8 |
-
max_width=160,
|
9 |
-
keep_aspect_ratio=True),
|
10 |
-
dict(type='ToTensorOCR'),
|
11 |
-
dict(type='NormalizeOCR', **img_norm_cfg),
|
12 |
-
dict(
|
13 |
-
type='Collect',
|
14 |
-
keys=['img'],
|
15 |
-
meta_keys=[
|
16 |
-
'filename', 'ori_shape', 'img_shape', 'text', 'valid_ratio',
|
17 |
-
'resize_shape'
|
18 |
-
]),
|
19 |
-
]
|
20 |
-
test_pipeline = [
|
21 |
-
dict(type='LoadImageFromFile'),
|
22 |
-
dict(
|
23 |
-
type='MultiRotateAugOCR',
|
24 |
-
rotate_degrees=[0, 90, 270],
|
25 |
-
transforms=[
|
26 |
-
dict(
|
27 |
-
type='ResizeOCR',
|
28 |
-
height=48,
|
29 |
-
min_width=48,
|
30 |
-
max_width=160,
|
31 |
-
keep_aspect_ratio=True),
|
32 |
-
dict(type='ToTensorOCR'),
|
33 |
-
dict(type='NormalizeOCR', **img_norm_cfg),
|
34 |
-
dict(
|
35 |
-
type='Collect',
|
36 |
-
keys=['img'],
|
37 |
-
meta_keys=[
|
38 |
-
'filename', 'ori_shape', 'img_shape', 'valid_ratio',
|
39 |
-
'img_norm_cfg', 'ori_filename', 'resize_shape'
|
40 |
-
]),
|
41 |
-
])
|
42 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/FL33TW00D/whisper-turbo/_next/static/chunks/pages/_error-84d94505c9f773f4.js
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[820],{6354:function(n,_,u){(window.__NEXT_P=window.__NEXT_P||[]).push(["/_error",function(){return u(9549)}])}},function(n){n.O(0,[774,888,179],function(){return n(n.s=6354)}),_N_E=n.O()}]);
|
|
|
|
spaces/Farazquraishi/pendora/app.py
DELETED
@@ -1,203 +0,0 @@
|
|
1 |
-
import gradio
|
2 |
-
from huggingface_hub import Repository
|
3 |
-
import os
|
4 |
-
|
5 |
-
from utils.utils import norm_crop, estimate_norm, inverse_estimate_norm, transform_landmark_points, get_lm
|
6 |
-
from networks.layers import AdaIN, AdaptiveAttention
|
7 |
-
from tensorflow_addons.layers import InstanceNormalization
|
8 |
-
import numpy as np
|
9 |
-
import cv2
|
10 |
-
from scipy.ndimage import gaussian_filter
|
11 |
-
|
12 |
-
from tensorflow.keras.models import load_model
|
13 |
-
from options.swap_options import SwapOptions
|
14 |
-
|
15 |
-
# .
|
16 |
-
# token = os.environ['model_fetch']
|
17 |
-
|
18 |
-
opt = SwapOptions().parse()
|
19 |
-
token = os.environ['token']
|
20 |
-
|
21 |
-
retina_repo = Repository(local_dir="retina_models", clone_from="felixrosberg/RetinaFace")
|
22 |
-
|
23 |
-
from retinaface.models import *
|
24 |
-
|
25 |
-
RetinaFace = load_model("retina_models/RetinaFace-Res50.h5",
|
26 |
-
custom_objects={"FPN": FPN,
|
27 |
-
"SSH": SSH,
|
28 |
-
"BboxHead": BboxHead,
|
29 |
-
"LandmarkHead": LandmarkHead,
|
30 |
-
"ClassHead": ClassHead}
|
31 |
-
)
|
32 |
-
|
33 |
-
arc_repo = Repository(local_dir="arcface_model", clone_from="felixrosberg/ArcFace")
|
34 |
-
ArcFace = load_model("arcface_model/ArcFace-Res50.h5")
|
35 |
-
ArcFaceE = load_model("arcface_model/ArcFacePerceptual-Res50.h5")
|
36 |
-
|
37 |
-
g_repo = Repository(local_dir="g_model_c_hq", clone_from="felixrosberg/FaceDancer",use_auth_token=token)
|
38 |
-
G = load_model("g_model_c_hq/FaceDancer_config_c_HQ.h5", custom_objects={"AdaIN": AdaIN,
|
39 |
-
"AdaptiveAttention": AdaptiveAttention,
|
40 |
-
"InstanceNormalization": InstanceNormalization})
|
41 |
-
|
42 |
-
# r_repo = Repository(local_dir="reconstruction_attack", clone_from="felixrosberg/reconstruction_attack",
|
43 |
-
# private=True, use_auth_token=token)
|
44 |
-
# R = load_model("reconstruction_attack/reconstructor_42.h5", custom_objects={"AdaIN": AdaIN,
|
45 |
-
# "AdaptiveAttention": AdaptiveAttention,
|
46 |
-
# "InstanceNormalization": InstanceNormalization})
|
47 |
-
|
48 |
-
# permuter_repo = Repository(local_dir="identity_permuter", clone_from="felixrosberg/identitypermuter",
|
49 |
-
# private=True, use_auth_token=token, git_user="felixrosberg")
|
50 |
-
|
51 |
-
# from identity_permuter.id_permuter import identity_permuter
|
52 |
-
|
53 |
-
# IDP = identity_permuter(emb_size=32, min_arg=False)
|
54 |
-
# IDP.load_weights("identity_permuter/id_permuter.h5")
|
55 |
-
|
56 |
-
blend_mask_base = np.zeros(shape=(256, 256, 1))
|
57 |
-
blend_mask_base[80:244, 32:224] = 1
|
58 |
-
blend_mask_base = gaussian_filter(blend_mask_base, sigma=7)
|
59 |
-
|
60 |
-
|
61 |
-
def run_inference(target, source, slider, adv_slider, settings):
|
62 |
-
try:
|
63 |
-
source = np.array(source)
|
64 |
-
target = np.array(target)
|
65 |
-
|
66 |
-
# Prepare to load video
|
67 |
-
if "anonymize" not in settings:
|
68 |
-
source_a = RetinaFace(np.expand_dims(source, axis=0)).numpy()[0]
|
69 |
-
source_h, source_w, _ = source.shape
|
70 |
-
source_lm = get_lm(source_a, source_w, source_h)
|
71 |
-
source_aligned = norm_crop(source, source_lm, image_size=256)
|
72 |
-
source_z = ArcFace.predict(np.expand_dims(tf.image.resize(source_aligned, [112, 112]) / 255.0, axis=0))
|
73 |
-
else:
|
74 |
-
source_z = None
|
75 |
-
|
76 |
-
# read frame
|
77 |
-
im = target
|
78 |
-
im_h, im_w, _ = im.shape
|
79 |
-
im_shape = (im_w, im_h)
|
80 |
-
|
81 |
-
detection_scale = im_w // 640 if im_w > 640 else 1
|
82 |
-
|
83 |
-
faces = RetinaFace(np.expand_dims(cv2.resize(im,
|
84 |
-
(im_w // detection_scale,
|
85 |
-
im_h // detection_scale)), axis=0)).numpy()
|
86 |
-
|
87 |
-
total_img = im / 255.0
|
88 |
-
for annotation in faces:
|
89 |
-
lm_align = np.array([[annotation[4] * im_w, annotation[5] * im_h],
|
90 |
-
[annotation[6] * im_w, annotation[7] * im_h],
|
91 |
-
[annotation[8] * im_w, annotation[9] * im_h],
|
92 |
-
[annotation[10] * im_w, annotation[11] * im_h],
|
93 |
-
[annotation[12] * im_w, annotation[13] * im_h]],
|
94 |
-
dtype=np.float32)
|
95 |
-
|
96 |
-
# align the detected face
|
97 |
-
M, pose_index = estimate_norm(lm_align, 256, "arcface", shrink_factor=1.0)
|
98 |
-
im_aligned = (cv2.warpAffine(im, M, (256, 256), borderValue=0.0) - 127.5) / 127.5
|
99 |
-
|
100 |
-
if "adversarial defense" in settings:
|
101 |
-
eps = adv_slider / 200
|
102 |
-
X = tf.convert_to_tensor(np.expand_dims(im_aligned, axis=0))
|
103 |
-
with tf.GradientTape() as tape:
|
104 |
-
tape.watch(X)
|
105 |
-
|
106 |
-
X_z = ArcFaceE(tf.image.resize(X * 0.5 + 0.5, [112, 112]))
|
107 |
-
output = R([X, X_z])
|
108 |
-
|
109 |
-
loss = tf.reduce_mean(tf.abs(0 - output))
|
110 |
-
|
111 |
-
gradient = tf.sign(tape.gradient(loss, X))
|
112 |
-
|
113 |
-
adv_x = X + eps * gradient
|
114 |
-
im_aligned = tf.clip_by_value(adv_x, -1, 1)[0]
|
115 |
-
|
116 |
-
if "anonymize" in settings and "reconstruction attack" not in settings:
|
117 |
-
"""source_z = ArcFace.predict(np.expand_dims(tf.image.resize(im_aligned, [112, 112]) / 255.0, axis=0))
|
118 |
-
anon_ratio = int(512 * (slider / 100))
|
119 |
-
anon_vector = np.ones(shape=(1, 512))
|
120 |
-
anon_vector[:, :anon_ratio] = -1
|
121 |
-
np.random.shuffle(anon_vector)
|
122 |
-
source_z *= anon_vector"""
|
123 |
-
|
124 |
-
slider_weight = slider / 100
|
125 |
-
|
126 |
-
target_z = ArcFace.predict(np.expand_dims(tf.image.resize(im_aligned, [112, 112]) * 0.5 + 0.5, axis=0))
|
127 |
-
# source_z = IDP.predict(target_z)
|
128 |
-
|
129 |
-
source_z = slider_weight * source_z + (1 - slider_weight) * target_z
|
130 |
-
|
131 |
-
if "reconstruction attack" in settings:
|
132 |
-
source_z = ArcFaceE.predict(np.expand_dims(tf.image.resize(im_aligned, [112, 112]) * 0.5 + 0.5, axis=0))
|
133 |
-
|
134 |
-
# face swap
|
135 |
-
if "reconstruction attack" not in settings:
|
136 |
-
changed_face_cage = G.predict([np.expand_dims(im_aligned, axis=0),
|
137 |
-
source_z])
|
138 |
-
changed_face = changed_face_cage[0] * 0.5 + 0.5
|
139 |
-
|
140 |
-
# get inverse transformation landmarks
|
141 |
-
transformed_lmk = transform_landmark_points(M, lm_align)
|
142 |
-
|
143 |
-
# warp image back
|
144 |
-
iM, _ = inverse_estimate_norm(lm_align, transformed_lmk, 256, "arcface", shrink_factor=1.0)
|
145 |
-
iim_aligned = cv2.warpAffine(changed_face, iM, im_shape, borderValue=0.0)
|
146 |
-
|
147 |
-
# blend swapped face with target image
|
148 |
-
blend_mask = cv2.warpAffine(blend_mask_base, iM, im_shape, borderValue=0.0)
|
149 |
-
blend_mask = np.expand_dims(blend_mask, axis=-1)
|
150 |
-
total_img = (iim_aligned * blend_mask + total_img * (1 - blend_mask))
|
151 |
-
else:
|
152 |
-
changed_face_cage = R.predict([np.expand_dims(im_aligned, axis=0),
|
153 |
-
source_z])
|
154 |
-
changed_face = changed_face_cage[0] * 0.5 + 0.5
|
155 |
-
|
156 |
-
# get inverse transformation landmarks
|
157 |
-
transformed_lmk = transform_landmark_points(M, lm_align)
|
158 |
-
|
159 |
-
# warp image back
|
160 |
-
iM, _ = inverse_estimate_norm(lm_align, transformed_lmk, 256, "arcface", shrink_factor=1.0)
|
161 |
-
iim_aligned = cv2.warpAffine(changed_face, iM, im_shape, borderValue=0.0)
|
162 |
-
|
163 |
-
# blend swapped face with target image
|
164 |
-
blend_mask = cv2.warpAffine(blend_mask_base, iM, im_shape, borderValue=0.0)
|
165 |
-
blend_mask = np.expand_dims(blend_mask, axis=-1)
|
166 |
-
total_img = (iim_aligned * blend_mask + total_img * (1 - blend_mask))
|
167 |
-
|
168 |
-
if "compare" in settings:
|
169 |
-
total_img = np.concatenate((im / 255.0, total_img), axis=1)
|
170 |
-
|
171 |
-
total_img = np.clip(total_img, 0, 1)
|
172 |
-
total_img *= 255.0
|
173 |
-
total_img = total_img.astype('uint8')
|
174 |
-
|
175 |
-
return total_img
|
176 |
-
except Exception as e:
|
177 |
-
print(e)
|
178 |
-
return None
|
179 |
-
|
180 |
-
|
181 |
-
description = "Not Working"
|
182 |
-
examples = []
|
183 |
-
article = """
|
184 |
-
Demo is based of recent research from my Ph.D work. Results expects to be published in the coming months.
|
185 |
-
"""
|
186 |
-
|
187 |
-
iface = gradio.Interface(run_inference,
|
188 |
-
[gradio.Image(shape=None, type="pil", label='Target'),
|
189 |
-
gradio.Image(shape=None, type="pil", label='Source'),
|
190 |
-
gradio.Slider(0, 100, default=100, label="Anonymization ratio (%)"),
|
191 |
-
gradio.Slider(0, 100, default=100, label="Adversarial defense ratio (%)"),
|
192 |
-
gradio.CheckboxGroup(["compare",
|
193 |
-
"anonymize",
|
194 |
-
"reconstruction attack",
|
195 |
-
"adversarial defense"],
|
196 |
-
label='Options')],
|
197 |
-
"image",
|
198 |
-
title="Not Working",
|
199 |
-
description=description,
|
200 |
-
examples=examples,
|
201 |
-
article=article,
|
202 |
-
layout="vertical")
|
203 |
-
iface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|