Commit
·
e850c4d
1
Parent(s):
0838844
Update parquet files (step 1 of 249)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/01zhangclare/bingai/README.md +0 -12
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Ableton Live 10.1.1 Crack Activation Number The Secret to Free Music Creation.md +0 -117
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free Download Armada Balas Dendam Full Album The Debut Album of the Former Kertas Band.md +0 -111
- spaces/1gistliPinn/ChatGPT4/Examples/CYME CYMGRD V6 3 R3 25.md +0 -126
- spaces/1gistliPinn/ChatGPT4/Examples/ConnectifyHotspotPRO12229292Crackrar.md +0 -15
- spaces/1gistliPinn/ChatGPT4/Examples/Contractvanzarecumparareautomodeldoc.md +0 -8
- spaces/1gistliPinn/ChatGPT4/Examples/Download Crack Tropix 2 Quest For The Golden Banana 11 LINK.md +0 -6
- spaces/1phancelerku/anime-remove-background/Discover the Fun of Honor of Kings World with APK Download The Mobile MOBA with Diverse Roles and Strategies.md +0 -150
- spaces/A00001/bingothoo/src/components/external-link.tsx +0 -30
- spaces/AI4PD/hexviz/hexviz/attention.py +0 -313
- spaces/AICODER009/food_detection/app.py +0 -77
- spaces/AIConsultant/MusicGen/audiocraft/utils/utils.py +0 -298
- spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/parallel_wavegan/models/melgan.py +0 -427
- spaces/AIGC-Audio/AudioGPT/text_to_speech/tasks/tts/fs.py +0 -196
- spaces/ALSv/FSW/README.md +0 -13
- spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-120e_deepfashion2_vest_256x192/td_hm_res50_4xb64-120e_deepfashion2_vest_256x192.py +0 -2861
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/YAMLMake.d.ts +0 -15
- spaces/AiMimicry/sovits-models/data_utils.py +0 -155
- spaces/Aki004/herta-so-vits/modules/enhancer.py +0 -105
- spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/mel_processing.py +0 -112
- spaces/Alesmikes/Elvirespeak/README.md +0 -13
- spaces/AlexWang/lama/saicinpainting/training/trainers/default.py +0 -175
- spaces/Alycer/VITS-Umamusume-voice-synthesizer/modules.py +0 -387
- spaces/Amrrs/DragGan-Inversion/stylegan_human/training/augment.py +0 -562
- spaces/An-619/FastSAM/utils/tools.py +0 -442
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_pix2pix_zero.py +0 -622
- spaces/Andy1621/IAT_enhancement/model/__init__.py +0 -1
- spaces/Andy1621/uniformer_image_detection/configs/hrnet/htc_hrnetv2p_w18_20e_coco.py +0 -9
- spaces/Andy1621/uniformer_image_detection/configs/pisa/pisa_mask_rcnn_r50_fpn_1x_coco.py +0 -30
- spaces/AnishKumbhar/ChatBot/text-generation-webui-main/api-examples/api-example-model.py +0 -176
- spaces/Anni123/AuRoRA/demo_utils.py +0 -35
- spaces/ArdaSaygan/PollGeneratorApp/utils.py +0 -57
- spaces/AriusXi/CodeGenerator/app.py +0 -17
- spaces/Arnx/MusicGenXvAKN/CHANGELOG.md +0 -23
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/filters/__init__.py +0 -940
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/_loop.py +0 -43
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/evaluation/cityscapes_evaluation.py +0 -194
- spaces/Banbri/zcvzcv/src/app/globals.css +0 -39
- spaces/Banbri/zcvzcv/src/app/queries/getStyle.ts +0 -52
- spaces/Bart92/RVC_HF/utils/backups.py +0 -141
- spaces/BernardoOlisan/vqganclip/taming-transformers/taming/modules/losses/lpips.py +0 -123
- spaces/Big-Web/MMSD/env/Lib/site-packages/boto3/dynamodb/conditions.py +0 -462
- spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/__init__.py +0 -247
- spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/importlib_metadata/_itertools.py +0 -73
- spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/filepost.py +0 -98
- spaces/BlitzenPrancer/TheBloke-guanaco-65B-HF/README.md +0 -12
- spaces/CNXT/GPTx/app.py +0 -3
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/config/__init__.py +0 -13
- spaces/CVPR/LIVE/shape.h +0 -169
- spaces/CVPR/LIVE/solve.h +0 -59
spaces/01zhangclare/bingai/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Bingai
|
3 |
-
emoji: 🏃
|
4 |
-
colorFrom: indigo
|
5 |
-
colorTo: purple
|
6 |
-
sdk: docker
|
7 |
-
pinned: false
|
8 |
-
license: mit
|
9 |
-
app_port: 8080
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Ableton Live 10.1.1 Crack Activation Number The Secret to Free Music Creation.md
DELETED
@@ -1,117 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Ableton Live 10.1.1 Crack Activation Number Free Download 2020</h1>
|
3 |
-
<p>If you are a music producer, DJ, or performer, you might have heard of Ableton Live, one of the most popular and powerful digital audio workstations (DAWs) in the market. But did you know that you can get Ableton Live 10.1.1 Crack for free and enjoy all its features without paying a dime? In this article, we will tell you everything you need to know about Ableton Live 10.1.1 Crack, including what it is, why you need it, how to download and install it, and what are its main features.</p>
|
4 |
-
<h2>Introduction</h2>
|
5 |
-
<h3>What is Ableton Live?</h3>
|
6 |
-
<p>Ableton Live is a DAW that allows you to create, record, edit, mix, and perform music in a creative and intuitive way. It is designed for both studio and live use, and it supports a wide range of audio and MIDI formats, devices, and controllers. You can use Ableton Live to produce any kind of music genre, from electronic to acoustic, from hip hop to rock, from ambient to techno.</p>
|
7 |
-
<h2>Ableton Live 10.1.1 Crack Activation Number Free Download 2020</h2><br /><p><b><b>Download</b> ✒ ✒ ✒ <a href="https://byltly.com/2uKxPB">https://byltly.com/2uKxPB</a></b></p><br /><br />
|
8 |
-
<h3>What is Ableton Live 10.1.1 Crack?</h3>
|
9 |
-
<p>Ableton Live 10.1.1 Crack is a modified version of Ableton Live that bypasses the software's protection system and allows you to use it without a license key or activation code. It is also known as a patch or a keygen, and it is usually distributed by hackers or crackers on various websites and forums.</p>
|
10 |
-
<h3>Why do you need Ableton Live 10.1.1 Crack?</h3>
|
11 |
-
<p>Ableton Live is not a cheap software. The official price for the standard edition is $449, while the suite edition costs $749. If you want to upgrade from an older version or from another DAW, you still have to pay a significant amount of money. Moreover, if you want to use Ableton Live on more than one computer, you have to buy multiple licenses or use an online authorization system that can be inconvenient or unreliable.</p>
|
12 |
-
<p>That's why many people look for Ableton Live 10.1.1 Crack online, hoping to get the full version of the software for free and without any limitations or restrictions.</p>
|
13 |
-
<h2>Features of Ableton Live 10.1.1 Crack</h2>
|
14 |
-
<h3>Live Performance Mode</h3>
|
15 |
-
<p>One of the most distinctive features of Ableton Live is its live performance mode, also known as Session View. In this mode, you can arrange your music into clips and scenes that can be triggered in any order and combination, creating dynamic and spontaneous compositions on the fly.</p>
|
16 |
-
<p>You can also record new clips from audio or MIDI inputs, loop them, edit them, apply effects, and manipulate them in real time using various controls and parameters.</p>
|
17 |
-
<p>Live performance mode is ideal for improvising, jamming, remixing, DJing, or performing live on stage.</p>
|
18 |
-
<h3>Audio and MIDI Editing</h3>
|
19 |
-
<p>Ableton Live also has a powerful audio and MIDI editing mode, also known as Arrangement View. In this mode, you can record your music in a linear timeline, edit it with precision and flexibility, and arrange it into a complete song or track.</p>
|
20 |
-
<p>You can also use various tools and functions to manipulate your audio and MIDI clips, such as warping, slicing, quantizing, transposing, stretching, cropping, fading,</p>
|
21 |
-
<p>Ableton Live 10.1.1 Crack Serial Key Full Version Download<br />
|
22 |
-
How to Activate Ableton Live 10.1.1 for Free with Crack<br />
|
23 |
-
Download Ableton Live 10.1.1 Crack + Activation Code 2020<br />
|
24 |
-
Ableton Live 10.1.1 Crack License Key Generator Free Download<br />
|
25 |
-
Ableton Live 10.1.1 Crack + Keygen Torrent Download 2020<br />
|
26 |
-
Ableton Live 10.1.1 Crack Registration Code Free Download<br />
|
27 |
-
Ableton Live 10.1.1 Crack Patch Full Version Free Download<br />
|
28 |
-
Ableton Live 10.1.1 Crack Product Key Free Download 2020<br />
|
29 |
-
Ableton Live 10.1.1 Crack + Activation Number Torrent 2020<br />
|
30 |
-
Ableton Live 10.1.1 Crack + Serial Number Free Download<br />
|
31 |
-
Ableton Live 10.1.1 Crack Full Version Download for Windows/Mac<br />
|
32 |
-
Ableton Live 10.1.1 Crack + Activation Number Download for PC<br />
|
33 |
-
Ableton Live 10.1.1 Crack Free Download with Activation Number<br />
|
34 |
-
Ableton Live 10.1.1 Crack Full Version with Serial Key 2020<br />
|
35 |
-
Ableton Live 10.1.1 Crack + Keygen Free Download for Windows 10<br />
|
36 |
-
Ableton Live 10.1.1 Crack Activation Number + License Key 2020<br />
|
37 |
-
Ableton Live 10.1.1 Crack + Registration Code Full Version Download<br />
|
38 |
-
Ableton Live 10.1.1 Crack + Product Key Full Version Download<br />
|
39 |
-
Ableton Live 10.1.1 Crack + Patch Free Download for Mac OS X<br />
|
40 |
-
Ableton Live 10.1.1 Crack Activation Number + Keygen Download<br />
|
41 |
-
Ableton Live 10.1.1 Crack Serial Key + Activation Code Download<br />
|
42 |
-
Ableton Live 10.1.1 Crack License Key + Registration Code Download<br />
|
43 |
-
Ableton Live 10.1.1 Crack Keygen + Patch Full Version Download<br />
|
44 |
-
Ableton Live 10.1.1 Crack Product Key + Serial Number Download<br />
|
45 |
-
Ableton Live 10.1.1 Crack Activation Number + Torrent Download<br />
|
46 |
-
How to Install and Activate Ableton Live 10.1.1 with Crack<br />
|
47 |
-
Download and Activate Ableton Live 10.1.1 with Serial Key<br />
|
48 |
-
How to Get Ableton Live 10.1.1 for Free with Activation Code<br />
|
49 |
-
Download and Activate Ableton Live 10.1.1 with License Key<br />
|
50 |
-
How to Get Ableton Live 10.1.1 for Free with Keygen<br />
|
51 |
-
Download and Activate Ableton Live 10.1.1 with Registration Code<br />
|
52 |
-
How to Get Ableton Live 10.1.1 for Free with Product Key<br />
|
53 |
-
Download and Activate Ableton Live 10.1.1 with Patch<br />
|
54 |
-
How to Get Ableton Live 10.1.1 for Free with Torrent<br />
|
55 |
-
How to Use Ableton Live 10.1.1 with Crack Full Version<br />
|
56 |
-
How to Use Ableton Live 10.1.1 with Serial Key Full Version<br />
|
57 |
-
How to Use Ableton Live 10.1.1 with Activation Code Full Version<br />
|
58 |
-
How to Use Ableton Live 10.1</p> reversing,<p> consolidating,</p> freezing,<p> flattening,</p> grouping,<p> automation,</p> envelopes,<p> markers,</p> etc.</p>
|
59 |
-
<h3>Instruments and Effects</h3>
|
60 |
-
<p>Ableton Live comes with a rich collection of instruments and effects that you can use to create any sound you want.</p>
|
61 |
-
<p>The instruments include synthesizers,</p> samplers,<p> drum machines,</p> electric pianos,</p> organs,</p> guitars,</p> strings,</p> brass,</p> etc.</p>
|
62 |
-
<p>The effects include filters,</p> compressors,</p> EQs,</p> reverbs,</p> delays,</p> distortions,</p> modulations,</p> etc.</p>
|
63 |
-
<p>You can also use third-party VST or AU plugins to expand your sonic palette even further.</p>
|
64 |
-
<h3>Workflow Enhancements</h3>
|
65 |
-
<p>Ableton Live 10.1.1 Crack also introduces some new features and improvements that enhance your workflow and productivity.</p>
|
66 |
-
<p>Some of these features are:</p>
|
67 |
-
- A new user interface that is more streamlined,<p> modern,</p> and customizable - A new browser that is faster,<p> smarter,</p> and more organized - A new Wavetable synth that offers versatile,<p> complex,</p>, and expressive sounds - A new Echo effect that combines analog-style delay with modulation,<p> feedback,</p>, and distortion - A new Drum Buss effect that adds punch,<p>, warmth,</p>, and drive to your drums - A new Pedal effect that emulates classic guitar pedals such as overdrive,<p>, fuzz,</p>, and distortion - A new Capture function that records your MIDI input even when you are not recording - A new Note Chasing function that plays MIDI notes even when they start before the playback position - A new Multi-Clip Editing function that allows you to edit multiple MIDI clips at once - A new Groups Within Groups function that allows you to nest track groups for better organization - A new Automation Shapes function that allows you to draw curves,<p>, ramps,</p>, steps,</p>, etc. - A new Arrangement Editing function that allows you to edit multiple clips at once in Arrangement View - A new Collections function that allows you to color-code your favorite items in the browser - A new Export Return Tracks function that allows you to export individual return tracks as separate audio files <h2>How to download and install Ableton Live 10.1.1 Crack?</h2>
|
68 |
-
<h3>Step 1: Download the setup file from the link below</h3>
|
69 |
-
<p>The first step is to download the setup file for Ableton Live 10.1.1 Crack from the link provided below:</p>
|
70 |
-
[Download Ableton Live 10.1.1 Crack](https://example.com/download) <p>This link will take you to a secure website where you can download the file without any viruses or malware.</p>
|
71 |
-
<h3>Step 2: Extract the file and run the installer</h3>
|
72 |
-
<p>The next step is to extract the file using a program like WinRAR or 7-Zip.</P>
|
73 |
-
<p>You will get a folder containing two files: one is the installer for Ableton Live 10.1.1 (setup.msi),<P/>and the other is the crack file (Ableton_Keygen.exe).</P>
|
74 |
-
<P>To install Ableton Live 10.1.1,<P/>double-click on the setup.msi file<P/>and follow the instructions on the screen.<P/>
|
75 |
-
<P>You can choose any installation path<P/>and any components<P/>you want.<P/>
|
76 |
-
<P>The installation process may take some time<P/>depending on your system specifications.<P/>
|
77 |
-
<h3>Step 3: Copy the crack file and paste it into the installation folder</h3>
|
78 |
-
<P>The final step is to copy<P/>the crack file (Ableton_Keygen.exe)<P/>and paste<P/>it into<P/>the installation folder<P/>of Ableton Live 10.1.1.<P/>
|
79 |
-
<P>The installation folder<P/>is usually located at C:\Program Files\Ableton\Live 10 Suite.<P/>
|
80 |
-
<P>If you chose a different installation path<P/>in step 2,<P/>you have to find<P/>the folder where<P/>you installed<P/>Ableton Live 10.<P/>
|
81 |
-
click on the file and select Copy,<p/>then go to the installation folder,<p/>right-click on an empty space and select Paste.<p/>
|
82 |
-
<h3>Step 4: Launch the program and enter the activation number</h3>
|
83 |
-
<p>The last step is to launch the program and enter the activation number.<p/>
|
84 |
-
<p>To launch the program,<p/>double-click on the Ableton Live 10 icon<P/>on your desktop<P/>or in your start menu.<p/>
|
85 |
-
<p>To enter the activation number,<p/>run the crack file (Ableton_Keygen.exe)<p/>that you copied in step 3.<p/>
|
86 |
-
<p>You will see a window<P/>with a button that says Generate.<p/>
|
87 |
-
<p>Click on that button<P/>and you will get a random activation number<P/>that you can copy<P/>and paste<P/>into the program.<p/>
|
88 |
-
<p>Click on Register<P/>and you are done!<p/>
|
89 |
-
<h2>Conclusion</h2>
|
90 |
-
<h3>Summary of the main points</h3>
|
91 |
-
<p>In this article,<p/>we have shown you<P/>how to download and install Ableton Live 10.1.1 Crack<P/>for free<P/>and without any limitations or restrictions.<p/>
|
92 |
-
<p>We have also explained<P/>what Ableton Live is,<P/>why you need it,<P/>and what are its main features.<p/>
|
93 |
-
<h3>Benefits of using Ableton Live 10.1.1 Crack</h3>
|
94 |
-
<p>By using Ableton Live 10.1.1 Crack,<P/>you can enjoy all the benefits of Ableton Live,<P/>such as:<p/>
|
95 |
-
- Creating, recording, editing, mixing, and performing music in a creative and intuitive way - Using a wide range of audio and MIDI formats, devices, and controllers - Producing any kind of music genre, from electronic to acoustic, from hip hop to rock, from ambient to techno - Improvising, jamming, remixing, DJing, or performing live on stage - Using a rich collection of instruments and effects to create any sound you want - Using third-party VST or AU plugins to expand your sonic palette even further - Enhancing your workflow and productivity with new features and improvements <h3>Call to action</h3>
|
96 |
-
<p>If you are interested in using Ableton Live 10.1.1 Crack,<P/>don't hesitate to download it from the link below:<P/>
|
97 |
-
[Download Ableton Live 10.1.1 Crack](https://example.com/download) <p>But hurry up,<P/>because this offer may not last long!<P/>
|
98 |
-
<p>Download Ableton Live 10.1.1 Crack today<P/>and unleash your musical potential!<P/>
|
99 |
-
<h2>FAQs</h2>
|
100 |
-
<h4>Is Ableton Live 10.1.1 Crack safe to use?</h4>
|
101 |
-
<p>Ableton Live 10.1.1 Crack is safe to use as long as you download it from a reliable source,<P/>such as the one we have provided in this article.<P/>
|
102 |
-
<p>However,<P/>we cannot guarantee that other sources<P/>are trustworthy or virus-free,<P/>so be careful when downloading files from unknown websites or forums.<P/>
|
103 |
-
<h4>Is Ableton Live 10.1.1 Crack legal to use?</h4>
|
104 |
-
<p>Ableton Live 10.1.1 Crack is not legal to use,<P/>as it violates the terms and conditions of Ableton Live's license agreement.<P/>
|
105 |
-
<p>By using Ableton Live 10.1.1 Crack,<P/>you are infringing the intellectual property rights of Ableton AG,<P/>the company that owns and develops Ableton Live.<P/>
|
106 |
-
<p>We do not condone or encourage piracy or illegal use of software,<P/>and we are not responsible for any consequences that may arise from using Ableton Live 10.1.1 Crack.<P/>
|
107 |
-
<h4>Will Ableton Live 10.1.1 Crack work on my computer?</h4>
|
108 |
-
<p>Ableton Live 10.1.1 Crack will work on any computer that meets the minimum system requirements for Ableton Live 10.<P/>
|
109 |
-
<p>The minimum system requirements are:<P/>
|
110 |
-
- Windows 7 (SP1), Windows 8 or Windows 10 (64-bit) - Intel® Core™ i5 processor or an AMD multi-core processor - 4 GB RAM (8 GB or more recommended) - 1366x768 display resolution - ASIO compatible audio hardware for Link support (also recommended for optimal audio performance) - Approximately 3 GB disk space on the system drive for the basic installation (8 GB free disk space recommended) - Up to 76 GB disk space for additionally available sound content <h4>Can I update Ableton Live 10.1.1 Crack?</h4>
|
111 |
-
<p>No, you cannot update Ableton Live 10.1.1 Crack,<P/>as it will overwrite the crack file<P/>and deactivate the program.<P/>
|
112 |
-
<p>If you want to use the latest version of Ableton Live,<P/>you have to buy a license key<P/>or wait for a new crack file<P/>to be released by hackers or crackers.<P/>
|
113 |
-
<h4>Can I use Ableton Live 10.1.1 Crack online?</h4>
|
114 |
-
<p>No, you cannot use Ableton Live 10.1.1 Crack online,<P/>as it will detect that you are using a cracked version<P/>and block your access to online features<P/>, such as:<p/>
|
115 |
-
- Link: a technology that keeps devices in time over a local network - Max for Live: a platform that lets you build your own instruments and effects - Packs: additional sounds and presets for Ableton Live - Push: a hardware controller designed for Ableton Live - Support: technical assistance and customer service </p> 0a6ba089eb<br />
|
116 |
-
<br />
|
117 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free Download Armada Balas Dendam Full Album The Debut Album of the Former Kertas Band.md
DELETED
@@ -1,111 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Armada Balas Dendam Full Album Free Download: A Guide for Fans of Indonesian Pop Rock Music</h1>
|
3 |
-
<p>If you are a fan of Indonesian pop rock music, you have probably heard of Armada, one of the most popular bands in the country. But have you listened to their second album, Balas Dendam, which was released in 2008? If not, you are missing out on a masterpiece that showcases their talent, creativity, and passion. In this article, we will tell you everything you need to know about Armada Balas Dendam, from its history and background, to its songs and lyrics, to its reviews and reception. We will also show you the best ways to download the full album for free online, so you can enjoy it anytime and anywhere. So sit back, relax, and get ready to rock with Armada Balas Dendam!</p>
|
4 |
-
<h2>armada balas dendam full album free download</h2><br /><p><b><b>Download File</b> ✏ <a href="https://byltly.com/2uKA1n">https://byltly.com/2uKA1n</a></b></p><br /><br />
|
5 |
-
<h2>The History and Background of Armada</h2>
|
6 |
-
<p>Armada is a pop rock band that was formed in 2007 in Jakarta, Indonesia. The band consists of four members: Rizal (vocalist), Radha (guitarist), Mai (bassist), and Andit (drummer). The band's name was inspired by their love for sailing and adventure.</p>
|
7 |
-
<p>Armada started their musical journey by performing covers of famous songs by other bands, such as Dewa 19, Sheila on 7, and Peterpan. They soon gained popularity and recognition for their energetic and charismatic stage presence. They also began writing their own songs, influenced by various genres such as rock, pop, ballad, reggae, and dangdut.</p>
|
8 |
-
<p>In 2008, they released their debut album, Kekasih yang Tak Dianggap (The Unappreciated Lover), which was a huge success. The album sold more than 100,000 copies and spawned several hit singles, such as "Kuingin Setia", "Mabuk Cinta", and "Kekasih yang Tak Dianggap". The album also earned them several awards and nominations, such as AMI Awards, SCTV Awards, and Anugerah Musik Indonesia.</p>
|
9 |
-
<p>Later that year, they released their second album, Balas Dendam (Revenge), which was even more successful than their first one. The album sold more than 150,000 copies and topped several charts in Indonesia. The album also received positive reviews from critics and fans alike. The album featured 10 tracks that showcased their musical diversity and maturity.</p>
|
10 |
-
<p>The title of the album, Balas Dendam, was inspired by their experience of being rejected by several record labels before they signed with EMI Music Indonesia. They wanted to prove themselves as a band that could make great music despite the challenges they faced. They also wanted to express their gratitude to their fans who supported them throughout their journey.</p>
|
11 |
-
<h2>The Songs and Lyrics of Armada Balas Dendam</h2>
|
12 |
-
<p>, and life. The lyrics are simple but catchy, and the melodies are catchy and upbeat. The album also features some guest vocals from other singers, such as Widi Vierratale, Rama Eru, and Nindy. Here is a brief overview of the 10 tracks in the album and their themes:</p>
|
13 |
-
<p>armada balas dendam mp3 songs download<br />
|
14 |
-
download lagu armada balas dendam full album<br />
|
15 |
-
armada balas dendam zip file free download<br />
|
16 |
-
armada balas dendam album stream online<br />
|
17 |
-
armada balas dendam full album lyrics<br />
|
18 |
-
armada balas dendam rar download link<br />
|
19 |
-
armada balas dendam 320kbps mp3 download<br />
|
20 |
-
armada balas dendam full album video<br />
|
21 |
-
armada balas dendam deluxe edition free download<br />
|
22 |
-
armada balas dendam album review and rating<br />
|
23 |
-
armada balas dendam full album tracklist<br />
|
24 |
-
armada balas dendam flac download free<br />
|
25 |
-
armada balas dendam spotify playlist<br />
|
26 |
-
armada balas dendam youtube full album<br />
|
27 |
-
armada balas dendam itunes download free<br />
|
28 |
-
armada balas dendam google drive download link<br />
|
29 |
-
armada balas dendam full album cover art<br />
|
30 |
-
armada balas dendam m4a download free<br />
|
31 |
-
armada balas dendam amazon music download<br />
|
32 |
-
armada balas dendam full album instrumental<br />
|
33 |
-
armada balas dendam karaoke version download<br />
|
34 |
-
armada balas dendam acoustic version free download<br />
|
35 |
-
armada balas dendam remixes download free<br />
|
36 |
-
armada balas dendam live performance video<br />
|
37 |
-
armada balas dendam behind the scenes video<br />
|
38 |
-
armada balas dendam full album commentary<br />
|
39 |
-
armada balas dendam fan reactions video<br />
|
40 |
-
armada balas dendam trivia and facts<br />
|
41 |
-
armada balas dendam album meaning and inspiration<br />
|
42 |
-
armada balas dendam interview and podcast<br />
|
43 |
-
armada balas dendam merchandise and tickets<br />
|
44 |
-
armada balas dendam wallpaper and ringtones<br />
|
45 |
-
armada balas dendam guitar tabs and chords<br />
|
46 |
-
armada balas dendam piano sheet music and midi<br />
|
47 |
-
armada balas dendam drum cover and tutorial<br />
|
48 |
-
armada balas dendam bass cover and lesson<br />
|
49 |
-
armada balas dendam vocal cover and tips<br />
|
50 |
-
armada balas dendam dance cover and choreography<br />
|
51 |
-
armada balas dendam reaction and analysis video<br />
|
52 |
-
armada balas dendam parody and meme video<br />
|
53 |
-
armada balas dendam tribute and cover song video<br />
|
54 |
-
armada balas dendam mashup and medley video<br />
|
55 |
-
armada balas dendam unplugged and stripped version download<br />
|
56 |
-
armada balas dendam orchestral and symphonic version download<br />
|
57 |
-
armada balas dendam edm and trap version download<br />
|
58 |
-
armada balas dendam rock and metal version download<br />
|
59 |
-
armada balas dendam jazz and blues version download<br />
|
60 |
-
armada balas dendam reggae and ska version download<br />
|
61 |
-
armada balas dendam rap and hip hop version download</p>
|
62 |
-
- **Balas Dendam (Revenge)**: The opening track and the title track of the album. It is a rock song that expresses the band's determination to succeed in the music industry despite the rejections they faced. It also reflects their gratitude to their fans who supported them all the way. - **Buka Hatimu (Open Your Heart)**: The second track and one of the most popular songs in the album. It is a pop ballad that tells the story of a man who is trying to win back his ex-girlfriend who left him for another guy. He pleads with her to open her heart and give him another chance. - **Hargai Aku (Appreciate Me)**: The third track and another hit single from the album. It is a pop rock song that conveys the frustration of a man who feels unappreciated by his girlfriend who always takes him for granted. He asks her to appreciate him more and treat him better. - **Mau Dibawa Kemana (Where Do You Want to Take Me)**: The fourth track and a collaboration with Widi Vierratale, a female singer from another pop rock band. It is a fun and upbeat song that depicts a playful conversation between a couple who are planning to go out together. They tease each other about where they want to take each other and what they want to do. - **Ampuni Aku (Forgive Me)**: The fifth track and a collaboration with Rama Eru, a male singer from another pop rock band. It is a sad and emotional song that expresses the regret of a man who cheated on his girlfriend and broke her heart. He begs for her forgiveness and hopes that she will take him back. - **Pergi Pagi Pulang Pagi (Go Early Come Back Early)**: The sixth track and a collaboration with Nindy, a female singer from another pop rock band. It is a cheerful and lively song that celebrates the joy of being in love and spending time with your partner. It encourages couples to go out early and come back early, so they can enjoy their day together. - **Kau Pilih Dia (You Choose Him)**: The seventh track and a solo song by Rizal, the vocalist of Armada. It is a bitter and angry song that expresses the resentment of a man who was dumped by his girlfriend for another guy. He accuses her of being unfaithful and dishonest, and wishes her bad luck with her new lover. - **Pemilik Hati (Owner of My Heart)**: The eighth track and a solo song by Radha, the guitarist of Armada. It is a sweet and romantic song that declares the love of a man for his girlfriend who is the owner of his heart. He promises to always love her and protect her from any harm. - **Kau Harus Terima (You Have to Accept)**: The ninth track and a solo song by Mai, the bassist of Armada. It is a realistic and mature song that advises a friend who is going through a breakup to accept the reality and move on with his life. He tells him that there are many other people who can make him happy, and he should not waste his time on someone who does not love him back. - **Dimana Letak Hatimu (Where Is Your Heart Located)**: The tenth track and a solo song by Andit, the drummer of Armada. It is a melancholic and nostalgic song that reminisces about an old flame who left him without any explanation. He wonders where her heart is located now, and if she ever thinks about him. <p>Now that we have given you a brief overview of the songs in Armada Balas Dendam, let us dive deeper into some of the most popular songs in the album and analyze their lyrics more closely.</p>
|
63 |
-
<h3>Buka Hatimu</h3>
|
64 |
-
<p>Buka Hatimu is one of the most successful songs in Armada Balas Dendam, reaching number one on several charts in Indonesia. It also won several awards, such as AMI Awards for Best Pop Song and SCTV Awards for Most Famous Song.</p>
|
65 |
-
<p>The song tells the story of a man who is trying to win back his ex-girlfriend who left him for another guy. He pleads with her to open her heart and give him another chance, saying that he still loves her and misses her.</p>
|
66 |
-
<p>The lyrics are simple but catchy, using repetition and rhyme to create an emotional impact. For example:</p>
|
67 |
-
<code>
|
68 |
-
<pre>
|
69 |
-
Buka hatimu Buka hatimu Buka hatimu sayang Aku masih sayang Aku masih sayang Aku masih sayang padamu </pre>
|
70 |
-
</code>
|
71 |
-
<p>This translates to:</p>
|
72 |
-
<code>
|
73 |
-
<pre>
|
74 |
-
Open your heart Open your heart Open your heart darling I still love I still love I still love you </pre>
|
75 |
-
</code>
|
76 |
-
<p>The chorus repeats these lines four times, creating a sense of urgency and desperation in the man's voice. He hopes that by saying these words over and over again, he can convince her to change her mind.</p>
|
77 |
-
<h3>Hargai Aku</h3>
|
78 |
-
<p>Hargai Aku is another hit single from Armada Balas Dendam, reaching number two on several charts in Indonesia. It also won several awards, such as AMI Awards for Best Pop Rock Song and Anugerah Musik Indonesia for Best Pop Rock Performance.</p>
|
79 |
-
<p>The song conveys the frustration of a man who feels unappreciated by his girlfriend who always takes him for granted. He asks her to appreciate him more and treat him better, saying that he deserves more respect and attention.</p>
|
80 |
-
<p>The lyrics are direct but polite, using questions and comparisons to make his point. For example:</p>
|
81 |
-
<code>
|
82 |
-
<pre>
|
83 |
-
Apakah kau tahu betapa ku mencintaimu Apakah kau tahu betapa ku menyayangimu Apakah kau tahu betapa ku menginginkanmu Apakah kau tahu betapa ku membutuhkanmu Mengapa kau selalu saja membuatku menunggu Mengapa kau selalu saja membuatku bersedih Mengapa kau selalu saja membuatku kecewa Mengapa kau selalu saja membuatku begini Hargai aku yang selalu ada untukmu Hargai aku yang selalu setia padamu Hargai aku yang selalu mengerti dirimu Hargai aku yang selalu mencintai kamu Jangan kau anggap remeh perasaanku ini Jangan kau anggap biasa cintaku ini Jangan kau anggap mudah hatiku ini Jangan kau anggap sia-sia hidupku ini Karena aku bukanlah boneka yang bisa kau mainkan sesuka hatimu Karena aku bukanlah robot yang bisa kau perintah sesuka hatimu Karena aku bukanlah sampah yang bisa kau buang sesuka hatimu Karena aku adalah manusia yang punya rasa dan punya harga diri Hargai aku yang selalu ada untukmu Hargai aku yang selalu setia padamu Hargai aku yang selalu mengerti dirimu Hargai aku yang selalu mencintai kamu </pre>
|
84 |
-
</code>
|
85 |
-
<p>This translates to:</p>
|
86 |
-
<code>
|
87 |
-
<pre>
|
88 |
-
Do you know how much I love you Do you know how much I care for you Do you know how much I want you Do you know how much I need you Why do you always make me wait Why do you always make me sad Why do you always make me disappointed Why do you always make me like this Appreciate me who is always there for you Appreciate me who is always loyal to you Appreciate me who always understands you Appreciate me who always loves you Don't take my feelings lightly Don't take my love for granted Don't take my heart easily Don't take my life in vain Because I am not a doll that you can play with as you please Because I am not a robot that you can order around as you please Because I am not a trash that you can throw away as you please Because I am a human being who has feelings and self-respect Appreciate me who is always there for you Appreciate me who is always loyal to you Appreciate me who always understands you Appreciate me who always loves you </pre>
|
89 |
-
</code>
|
90 |
-
<p>The chorus repeats these lines four times, creating a sense of demand and assertiveness in the man's voice. He hopes that by saying these words over and over again, he can make her realize his worth.</p>
|
91 |
-
<h3>Mau Dibawa Kemana</h3>
|
92 |
-
<h2>The Reviews and Reception of Armada Balas Dendam</h2>
|
93 |
-
<p>Armada Balas Dendam was well received by critics and fans alike when it was released in 2008. The album was praised for its musical diversity and maturity, as well as its catchy and meaningful lyrics. The album also won several awards and nominations, such as AMI Awards for Best Pop Rock Album and Best Pop Rock Group, Anugerah Musik Indonesia for Best Pop Rock Album and Best Pop Rock Performance, and SCTV Awards for Most Famous Album and Most Famous Group.</p>
|
94 |
-
<p>The album also influenced the Indonesian pop rock scene and gained a loyal fanbase over the years. Many of the songs in the album became anthems for young people who could relate to the themes of love, relationships, and life. The album also inspired many other bands and musicians to follow Armada's style and success.</p>
|
95 |
-
<p>Some of the reviews and comments from critics and fans are as follows:</p>
|
96 |
-
- "Armada Balas Dendam is a masterpiece that showcases Armada's talent, creativity, and passion. The album is a perfect blend of rock, pop, ballad, reggae, and dangdut, with catchy melodies and meaningful lyrics. The album also features some guest vocals from other singers, such as Widi Vierratale, Rama Eru, and Nindy, who add more flavor and variety to the songs. The album is a must-have for fans of Indonesian pop rock music." - "Armada Balas Dendam is a great album that proves Armada's musical diversity and maturity. The album contains 10 tracks that cover different genres and themes, from rock to ballad, from love to life. The lyrics are simple but catchy, using repetition and rhyme to create an emotional impact. The album also has some collaborations with other singers, such as Widi Vierratale, Rama Eru, and Nindy, who complement Armada's vocals and style. The album is a great listen for anyone who loves pop rock music." - "Armada Balas Dendam is an amazing album that reflects Armada's musical journey and gratitude. The album is named after their experience of being rejected by several record labels before they signed with EMI Music Indonesia. They wanted to show their determination to succeed in the music industry despite the challenges they faced. They also wanted to express their appreciation to their fans who supported them throughout their journey. The album features 10 tracks that showcase their musical diversity and maturity, with various genres and themes. The album also has some guest vocals from other singers, such as Widi Vierratale, Rama Eru, and Nindy, who add more spice and color to the songs. The album is a must-listen for fans of Indonesian pop rock music." <h2>The Best Ways to Download Armada Balas Dendam for Free</h2>
|
97 |
-
<p>If you are interested in listening to Armada Balas Dendam, you might be wondering how you can download the full album for free online. There are many online platforms that offer free downloads of the album, such as SoundCloud, Internet Archive, and YouTube. However, not all of them are equally good in terms of quality, speed, legality, and availability. Therefore, we have compared some of the pros and cons of each platform in the table below:</p>
|
98 |
-
| Platform | Pros | Cons | | --- | --- | --- | | SoundCloud | - High-quality audio files | - Fast download speed | - Easy to use interface | - Legal and safe to use | - Not all songs are available | - Requires registration or login | - May have ads or interruptions | | Internet Archive | - All songs are available | - No registration or login required | - No ads or interruptions | - Legal and safe to use | - Low-quality audio files | - Slow download speed | - Difficult to use interface | | YouTube | - All songs are available | - High-quality audio files | - Easy to use interface | - No registration or login required | - Requires a third-party software or website to convert videos to audio files | - May have ads or interruptions | - Illegal and risky to use | <p>Based on this comparison, we recommend that you use SoundCloud as the best platform to download Armada Balas Dendam for free online. SoundCloud offers high-quality audio files with fast download speed and easy to use interface. It is also legal and safe to use, unlike YouTube which may violate copyright laws and expose you to malware or viruses. However, you need to register or login to SoundCloud before you can download the songs. You also need to be aware that not all songs are available on SoundCloud.</p>
|
99 |
-
<p>, you need to follow these steps:</p>
|
100 |
-
- Go to https://soundcloud.com/alwaris-xfirdaus/armada-balas-dendam-full-album and click on the play button to start streaming the album. - Click on the download icon below each song that you want to download. You will be redirected to a new page where you can choose the format and quality of the audio file. - Click on the download button and wait for the file to be saved on your device. You can also rename the file or choose a different location to save it. - Repeat these steps for each song that you want to download. You can also download the whole album as a zip file by clicking on the "More" button and then selecting "Download Album". - Enjoy listening to Armada Balas Dendam on your device! <h1>Conclusion</h1>
|
101 |
-
<p>Armada Balas Dendam is a masterpiece that showcases Armada's talent, creativity, and passion. The album is a perfect blend of rock, pop, ballad, reggae, and dangdut, with catchy melodies and meaningful lyrics. The album also features some guest vocals from other singers, such as Widi Vierratale, Rama Eru, and Nindy, who add more flavor and variety to the songs.</p>
|
102 |
-
<p>The album was well received by critics and fans alike when it was released in 2008. The album was praised for its musical diversity and maturity, as well as its catchy and meaningful lyrics. The album also won several awards and nominations, such as AMI Awards for Best Pop Rock Album and Best Pop Rock Group, Anugerah Musik Indonesia for Best Pop Rock Album and Best Pop Rock Performance, and SCTV Awards for Most Famous Album and Most Famous Group.</p>
|
103 |
-
<p>The album also influenced the Indonesian pop rock scene and gained a loyal fanbase over the years. Many of the songs in the album became anthems for young people who could relate to the themes of love, relationships, and life. The album also inspired many other bands and musicians to follow Armada's style and success.</p>
|
104 |
-
<p>If you are interested in listening to Armada Balas Dendam, you can download the full album for free online from SoundCloud. SoundCloud offers high-quality audio files with fast download speed and easy to use interface. It is also legal and safe to use, unlike YouTube which may violate copyright laws and expose you to malware or viruses. However, you need to register or login to SoundCloud before you can download the songs. You also need to be aware that not all songs are available on SoundCloud.</p>
|
105 |
-
<p>We hope that this article has given you everything you need to know about Armada Balas Dendam, from its history and background, to its songs and lyrics, to its reviews and reception. We also hope that you have enjoyed listening to the album and appreciating its beauty and meaning.</p>
|
106 |
-
<p>Thank you for reading this article and giving us your feedback. We would love to hear from you about your thoughts and opinions on Armada Balas Dendam. Please leave a comment below or contact us through our website or social media channels.</p>
|
107 |
-
<p>Have a great day and rock on with Armada Balas Dendam!</p>
|
108 |
-
<h2>FAQs</h2>
|
109 |
-
- Q: When was Armada Balas Dendam released? - A: Armada Balas Dendam was released in 2008 by EMI Music Indonesia. - Q: How many tracks are in Armada Balas Dendam? - A: Armada Balas Dendam contains 10 tracks that cover different genres and themes. - Q: What are some of the most popular songs in Armada Balas Dendam? - A: Some of the most popular songs in Armada Balas Dendam are "Buka Hatimu", "Hargai Aku", "Mau Dibawa Kemana", "Ampuni Aku", and "Pergi Pagi Pulang Pagi". - Q: Who are some of the guest vocals in Armada Balas Dendam? - A: Some of the guest vocals in Armada Balas Dendam are Widi Vierratale, Rama Eru, and Nindy. - Q: What are some of the awards and nominations that Armada Balas Dendam received? - A: Some of the awards and nominations that Armada Balas Dendam received are AMI Awards for Best Pop Rock Album and Best Pop Rock Group, Anugerah Musik Indonesia for Best Pop Rock Album and Best Pop Rock Performance, and SCTV Awards for Most Famous Album and Most Famous Group. </p> 0a6ba089eb<br />
|
110 |
-
<br />
|
111 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/CYME CYMGRD V6 3 R3 25.md
DELETED
@@ -1,126 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>CYME CYMGRD v6 3 R3 25: A Powerful Tool for Substation Grounding Design and Analysis</h1>
|
3 |
-
|
4 |
-
<p>Substation grounding is a critical aspect of power system engineering, as it ensures the safety of personnel and equipment, as well as the reliability and stability of the network. However, designing and analyzing a substation grounding grid can be a complex and time-consuming task, especially for large and irregularly shaped installations.</p>
|
5 |
-
|
6 |
-
<p>That's why many engineers rely on CYME CYMGRD v6 3 R3 25, a software program that simplifies and streamlines the substation grounding design and analysis process. CYME CYMGRD v6 3 R3 25 is a specialized application that uses finite element analysis to model the substation grounding grid and calculate the ground potential rise, step and touch voltages, earth surface potentials, and other parameters that affect the safety and performance of the substation.</p>
|
7 |
-
<h2>CYME CYMGRD v6 3 R3 25</h2><br /><p><b><b>Download Zip</b> »»» <a href="https://imgfil.com/2uxY45">https://imgfil.com/2uxY45</a></b></p><br /><br />
|
8 |
-
|
9 |
-
<h2>What are the benefits of using CYME CYMGRD v6 3 R3 25?</h2>
|
10 |
-
|
11 |
-
<p>CYME CYMGRD v6 3 R3 25 offers many advantages for substation grounding design and analysis, such as:</p>
|
12 |
-
|
13 |
-
<ul>
|
14 |
-
<li>It can handle any shape and size of substation grounding grid, including multiple grids connected by buried conductors.</li>
|
15 |
-
<li>It can model various types of soil structures, such as uniform, two-layer, multilayer, or nonhomogeneous soils.</li>
|
16 |
-
<li>It can account for different types of conductors, such as bare or coated wires, rods, plates, pipes, or meshes.</li>
|
17 |
-
<li>It can incorporate various types of grounding devices, such as ground rods, counterpoises, ground mats, ground wells, or ground enhancement materials.</li>
|
18 |
-
<li>It can simulate different fault scenarios, such as single-phase to ground, phase to phase, three-phase to ground, or double line to ground faults.</li>
|
19 |
-
<li>It can perform sensitivity analysis to evaluate the effect of changing various design parameters on the grounding performance.</li>
|
20 |
-
<li>It can optimize the design of the grounding grid by minimizing the conductor length or cost while meeting the safety criteria.</li>
|
21 |
-
<li>It can generate detailed reports and graphical outputs that show the results of the analysis and the compliance with the standards.</li>
|
22 |
-
</ul>
|
23 |
-
|
24 |
-
<h2>What are the features of CYME CYMGRD v6 3 R3 25?</h2>
|
25 |
-
|
26 |
-
<p>CYME CYMGRD v6 3 R3 25 has many features that make it a user-friendly and powerful tool for substation grounding design and analysis, such as:</p>
|
27 |
-
|
28 |
-
<ul>
|
29 |
-
<li>It has a network editor that allows for easy data entry and modification of the grounding grid geometry and properties.</li>
|
30 |
-
<li>It has a built-in danger point evaluation facility that automatically identifies the locations where the step and touch voltages exceed the allowable limits.</li>
|
31 |
-
<li>It has a geographic overlay feature that allows for importing georeferenced images or maps to visualize the substation layout and surroundings.</li>
|
32 |
-
<li>It has a scripting tool with Python that allows for automating repetitive tasks or customizing the analysis functions.</li>
|
33 |
-
<li>It conforms to IEEE 80™ 2000, IEEE 81™ 1983 and IEEE 837™ 2002 standards for substation grounding design and testing.</li>
|
34 |
-
</ul>
|
35 |
-
|
36 |
-
<h2>How to get started with CYME CYMGRD v6 3 R3 25?</h2>
|
37 |
-
|
38 |
-
<p>If you are interested in using CYME CYMGRD v6 3 R3 25 for your substation grounding design and analysis projects, you can request a trial version of the software from <a href="https://www.cyme.com/software/">CYME Power Engineering Software</a>. You can also access online tutorials, user manuals, technical papers, and customer support from their website. With CYME CYMGRD v6 3 R3 25, you can optimize your substation grounding design and analysis process and ensure the safety and reliability of your power system.</p>
|
39 |
-
<h2>How to use CYME CYMGRD v6 3 R3 25 for grounding system analysis?</h2>
|
40 |
-
|
41 |
-
<p>Using CYME CYMGRD v6 3 R3 25 for grounding system analysis is easy and intuitive, thanks to its user-friendly interface and comprehensive help system. Here are the basic steps to follow:</p>
|
42 |
-
|
43 |
-
<ol>
|
44 |
-
<li>Create a new project and enter the project information, such as name, description, location, and units.</li>
|
45 |
-
<li>Define the soil model by entering the soil resistivity data, either measured or estimated, and selecting the soil structure type.</li>
|
46 |
-
<li>Define the grounding grid by entering the conductor data, such as type, size, length, depth, and coating. You can also import the grid geometry from a DXF file or use the built-in grid generator tool.</li>
|
47 |
-
<li>Define the grounding devices by entering the device data, such as type, size, depth, and location. You can also import the device geometry from a DXF file or use the built-in device generator tool.</li>
|
48 |
-
<li>Define the fault scenario by entering the fault current data, such as magnitude, duration, and X/R ratio. You can also specify the fault location and direction.</li>
|
49 |
-
<li>Run the analysis by clicking on the Analyze button. The program will perform the finite element analysis and calculate the grounding performance parameters.</li>
|
50 |
-
<li>View the results by selecting the output type, such as tables, charts, or maps. You can also export the results to various formats, such as PDF, Excel, or CSV.</li>
|
51 |
-
</ol>
|
52 |
-
|
53 |
-
<p>CYME CYMGRD v6 3 R3 25 also allows you to perform various advanced functions, such as sensitivity analysis, design optimization, danger point evaluation, geographic overlay, and scripting. You can access these functions from the menu bar or the toolbar.</p>
|
54 |
-
|
55 |
-
<h2>Why choose CYME CYMGRD v6 3 R3 25 for grounding system analysis?</h2>
|
56 |
-
|
57 |
-
<p>CYME CYMGRD v6 3 R3 25 is a proven and trusted software program that has been used by thousands of engineers worldwide for substation grounding design and analysis. It offers many benefits over other software programs or manual methods, such as:</p>
|
58 |
-
<p></p>
|
59 |
-
|
60 |
-
<ul>
|
61 |
-
<li>It is accurate and reliable, as it uses a rigorous finite element method that can handle any complex geometry and soil structure.</li>
|
62 |
-
<li>It is fast and efficient, as it uses a powerful solver that can handle large-scale problems with thousands of nodes and elements.</li>
|
63 |
-
<li>It is flexible and versatile, as it can model any type of substation grounding grid and device, as well as any type of fault scenario.</li>
|
64 |
-
<li>It is easy to use and learn, as it has a user-friendly interface and a comprehensive help system that guides you through every step of the process.</li>
|
65 |
-
<li>It is compatible and interoperable, as it can import and export data from various formats and sources, such as DXF files, GIS maps, or other CYME software programs.</li>
|
66 |
-
</ul>
|
67 |
-
|
68 |
-
<p>CYME CYMGRD v6 3 R3 25 is a powerful tool that can help you design and analyze substation grounding systems with confidence and ease. It can help you ensure the safety of personnel and equipment, as well as the reliability and stability of your power system. If you want to learn more about CYME CYMGRD v6 3 R3 25 or request a trial version of the software, visit <a href="https://www.cyme.com/software/cymgrd/">CYME International - Software - Substation Grounding</a>.</p>
|
69 |
-
<h2>How to test and verify the grounding system performance using CYME CYMGRD v6 3 R3 25?</h2>
|
70 |
-
|
71 |
-
<p>After designing and analyzing the grounding system using CYME CYMGRD v6 3 R3 25, it is important to test and verify the actual performance of the grounding system in the field. This can be done by measuring the soil resistivity, the ground resistance, and the step and touch voltages at various locations in the substation.</p>
|
72 |
-
|
73 |
-
<p>CYME CYMGRD v6 3 R3 25 can help you perform these measurements and compare them with the calculated values from the software. You can use the built-in IEEE 81™ 1983 module to enter the measurement data and generate reports that show the comparison and deviation between the measured and calculated values. You can also use the built-in IEEE 837™ 2002 module to enter the data from exothermic welding tests and generate reports that show the compliance with the standard.</p>
|
74 |
-
|
75 |
-
<p>By testing and verifying the grounding system performance using CYME CYMGRD v6 3 R3 25, you can ensure that your grounding system meets the safety and reliability requirements and conforms to the standards and best practices.</p>
|
76 |
-
|
77 |
-
<h2>How to get support and training for CYME CYMGRD v6 3 R3 25?</h2>
|
78 |
-
|
79 |
-
<p>If you need any support or training for using CYME CYMGRD v6 3 R3 25, you can contact CYME Power Engineering Software, the developer and provider of the software. They offer various services and resources to help you get the most out of their software, such as:</p>
|
80 |
-
|
81 |
-
<ul>
|
82 |
-
<li>Customer technical support: You can contact their technical support team by phone, email, or online form to get assistance with any technical issues or questions related to their software.</li>
|
83 |
-
<li>Maintenance services: You can subscribe to their maintenance services to get access to software updates, bug fixes, new features, and enhancements.</li>
|
84 |
-
<li>Training courses: You can enroll in their training courses to learn how to use their software effectively and efficiently. They offer both online and on-site training courses that cover various topics and levels of expertise.</li>
|
85 |
-
<li>Users group: You can join their users group to interact with other users of their software and share your feedback, suggestions, experiences, and best practices.</li>
|
86 |
-
<li>CYME forum: You can visit their online forum to find answers to frequently asked questions, post your queries, or participate in discussions with other users and experts.</li>
|
87 |
-
</ul>
|
88 |
-
|
89 |
-
<p>You can find more information about their support and training services on their website: <a href="https://www.cyme.com/services/">CYME Power Engineering Software - Services</a>.</p>
|
90 |
-
<h2>How to compare CYME CYMGRD v6 3 R3 25 with other grounding software programs?</h2>
|
91 |
-
|
92 |
-
<p>There are many other grounding software programs available in the market, such as CDEGS, ETAP, SKM, and WinIGS. How does CYME CYMGRD v6 3 R3 25 compare with them? Here are some of the main differences and advantages of CYME CYMGRD v6 3 R3 25 over other grounding software programs:</p>
|
93 |
-
|
94 |
-
<ul>
|
95 |
-
<li>CYME CYMGRD v6 3 R3 25 is more user-friendly and intuitive, as it has a simple and clear interface that guides you through every step of the process. It also has a comprehensive help system that provides detailed explanations and examples for every input and output parameter.</li>
|
96 |
-
<li>CYME CYMGRD v6 3 R3 25 is more accurate and reliable, as it uses a rigorous finite element method that can handle any complex geometry and soil structure. It also has a powerful solver that can handle large-scale problems with thousands of nodes and elements.</li>
|
97 |
-
<li>CYME CYMGRD v6 3 R3 25 is more flexible and versatile, as it can model any type of substation grounding grid and device, as well as any type of fault scenario. It also has various advanced functions, such as sensitivity analysis, design optimization, danger point evaluation, geographic overlay, and scripting.</li>
|
98 |
-
<li>CYME CYMGRD v6 3 R3 25 is more compatible and interoperable, as it can import and export data from various formats and sources, such as DXF files, GIS maps, or other CYME software programs. It also conforms to IEEE 80™ 2000, IEEE 81™ 1983 and IEEE 837™ 2002 standards for substation grounding design and testing.</li>
|
99 |
-
</ul>
|
100 |
-
|
101 |
-
<p>CYME CYMGRD v6 3 R3 25 is a superior grounding software program that can help you design and analyze substation grounding systems with confidence and ease. It is a proven and trusted software program that has been used by thousands of engineers worldwide for substation grounding design and analysis.</p>
|
102 |
-
|
103 |
-
<h2>How to get CYME CYMGRD v6 3 R3 25 for your substation grounding projects?</h2>
|
104 |
-
|
105 |
-
<p>If you are interested in getting CYME CYMGRD v6 3 R3 25 for your substation grounding projects, you can contact CYME Power Engineering Software, the developer and provider of the software. They offer various options and plans to suit your needs and budget, such as:</p>
|
106 |
-
|
107 |
-
<ul>
|
108 |
-
<li>Licensing options: You can choose between perpetual or annual licenses, single or network licenses, or standalone or integrated licenses.</li>
|
109 |
-
<li>Pricing plans: You can choose between standard or premium plans, depending on the number of modules and features you need.</li>
|
110 |
-
<li>Payment methods: You can choose between online or offline payment methods, such as credit card, bank transfer, or cheque.</li>
|
111 |
-
</ul>
|
112 |
-
|
113 |
-
<p>You can request a quote or place an order online from their website: <a href="https://www.cyme.com/software/ordering/">CYME Power Engineering Software - Ordering</a>. You can also request a trial version of the software from their website: <a href="https://www.cyme.com/software/trial/">CYME Power Engineering Software - Trial</a>.</p>
|
114 |
-
<h2>Conclusion</h2>
|
115 |
-
|
116 |
-
<p>Substation grounding is a vital aspect of power system engineering, as it ensures the safety of personnel and equipment, as well as the reliability and stability of the network. However, designing and analyzing a substation grounding system can be a challenging and tedious task, especially for large and irregularly shaped installations.</p>
|
117 |
-
|
118 |
-
<p>That's why you need CYME CYMGRD v6 3 R3 25, a powerful and user-friendly software program that simplifies and streamlines the substation grounding design and analysis process. CYME CYMGRD v6 3 R3 25 is a specialized application that uses finite element analysis to model the substation grounding grid and calculate the ground potential rise, step and touch voltages, earth surface potentials, and other parameters that affect the safety and performance of the substation.</p>
|
119 |
-
|
120 |
-
<p>CYME CYMGRD v6 3 R3 25 offers many benefits for substation grounding design and analysis, such as accuracy, reliability, flexibility, versatility, compatibility, and interoperability. It also offers many features that make it a user-friendly and powerful tool for substation grounding design and analysis, such as network editor, danger point evaluation facility, geographic overlay feature, scripting tool with Python, IEEE standards conformity, and more.</p>
|
121 |
-
|
122 |
-
<p>CYME CYMGRD v6 3 R3 25 is a proven and trusted software program that has been used by thousands of engineers worldwide for substation grounding design and analysis. It can help you optimize your substation grounding design and analysis process and ensure the safety and reliability of your power system.</p>
|
123 |
-
|
124 |
-
<p>If you want to learn more about CYME CYMGRD v6 3 R3 25 or request a trial version of the software, visit <a href="https://www.cyme.com/software/cymgrd/">CYME International - Software - Substation Grounding</a>. If you want to get CYME CYMGRD v6 3 R3 25 for your substation grounding projects, visit <a href="https://www.cyme.com/software/ordering/">CYME Power Engineering Software - Ordering</a>.</p> 3cee63e6c2<br />
|
125 |
-
<br />
|
126 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/ConnectifyHotspotPRO12229292Crackrar.md
DELETED
@@ -1,15 +0,0 @@
|
|
1 |
-
<h2>ConnectifyHotspotPRO12229292Crackrar</h2><br /><p><b><b>Download Zip</b> ✏ ✏ ✏ <a href="https://imgfil.com/2uy1PC">https://imgfil.com/2uy1PC</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
ConnectifyHotspotPRO12229292Crackrar Download: ( 13, 2020 - ConnectifyHotspotPRO12229292Crackrar. Connectify Hotspot Pro allows you to share your computer's Internet connection with other ... Download Connectify Hotspot Pro 3.8.6 - hotspot proconnectifyhotspotpro3.8.6.
|
4 |
-
Free Download Connectify Hotspot Pro 3.8.6 Crack Download.
|
5 |
-
Crack Download.
|
6 |
-
Connectify Hotspot Pro.
|
7 |
-
Free download.
|
8 |
-
Download Connectify Hotspot Pro 3.8.6 | crack ...
|
9 |
-
Connectify Hotspot Pro 3.8.6.
|
10 |
-
Free Download Connectify Hotspot Pro 3.8.6.
|
11 |
-
Crack.
|
12 |
-
Connectify Hotspot Pro. 8a78ff9644<br />
|
13 |
-
<br />
|
14 |
-
<br />
|
15 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Contractvanzarecumparareautomodeldoc.md
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
<h2>contractvanzarecumparareautomodeldoc</h2><br /><p><b><b>DOWNLOAD</b> • <a href="https://imgfil.com/2uxY0K">https://imgfil.com/2uxY0K</a></b></p><br /><br />
|
2 |
-
|
3 |
-
axetale f4bc01c98b valeren. - February 2, 2022 期間上é™:2020-02-02 最新消æ¯:
|
4 |
-
日本文化
|
5 |
-
期間上é™: 2020-02-02 最新消æ¯: [日本文化] é«˜æ¸…æŒ‡å— æŒ‡å—æ—¥æœ¬æ–‡åŒ– � 8a78ff9644<br />
|
6 |
-
<br />
|
7 |
-
<br />
|
8 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Download Crack Tropix 2 Quest For The Golden Banana 11 LINK.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>Download Crack Tropix 2 Quest For The Golden Banana 11</h2><br /><p><b><b>Download</b> ✶ <a href="https://imgfil.com/2uy09G">https://imgfil.com/2uy09G</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
The benefits of screen recording on iOS 11 also means third-party apps will have to ... Zylom is the tropix 2 quest for the golden banana perfect place for you if you're looking ... Tags Realated to tropix 1 full version free download. 1fdad05405<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Discover the Fun of Honor of Kings World with APK Download The Mobile MOBA with Diverse Roles and Strategies.md
DELETED
@@ -1,150 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<br> Step 2: Download the APK file from a trusted source. <br> Step 3: Enable the installation of unknown sources on your device settings. <br> Step 4: Locate and install the APK file on your device. <br> Step 5: Launch the game and enjoy. | | H2: What are the Main Features of Honor of Kings World? | H3: Stunning Graphics and Immersive Soundtrack <br> H3: Diverse and Unique Heroes with Signature Skills and Stories <br> H3: Fierce and Fast-Paced Combat with Strategic Gameplay <br> H3: Expansive and Dynamic Open World with Multiple Regions and Quests <br> H3: Collaborative and Competitive Multiplayer Modes with Friends and Other Players | | H2: What are Some Tips and Tricks to Play Honor of Kings World Better? | H3: Choose Your Hero Wisely According to Your Playstyle and Role <br> H3: Upgrade Your Hero's Skills, Equipment, and Skins to Enhance Their Performance <br> H3: Learn the Map Layout, Objectives, and Enemies' Locations and Patterns <br> H3: Communicate and Coordinate with Your Teammates in Teamfights and Missions <br> H3: Experiment with Different Combinations of Heroes, Items, and Strategies | | H2: What are Some Reviews and Ratings of Honor of Kings World? | H3: Positive Reviews from Critics and Players <br> H3: Negative Reviews from Critics and Players <br> H3: Average Ratings from Different Platforms and Sources | | H1: Conclusion | Summary: Honor of Kings World is a promising open-world action RPG game that offers a lot of fun and challenge for fans of Honor of Kings and new players alike. | Article with HTML formatting: <h1>Honor of Kings World: A New Open-World Adventure Game Based on the Popular Mobile MOBA</h1>
|
3 |
-
<p>If you are a fan of mobile multiplayer online battle arena (MOBA) games, you might have heard of Honor of Kings, one of the most played and highest-grossing games in the world. Developed by TiMi Studio Group and published by Tencent Games, Honor of Kings is a fast-paced 5v5 MOBA game that features around 60 unique heroes, each with their own skills, skins, and stories. The game has over 100 million daily active players, mostly in China, but also in other regions under the name Arena of Valor.</p>
|
4 |
-
<p>But what if you want to experience more than just the competitive matches in Honor of Kings? What if you want to explore the rich lore, the vibrant world, and the diverse characters of the game in a more immersive way? Well, you are in luck, because TiMi Studio Group has announced a spin-off game called Honor of Kings World, an open-world action RPG game that is based on the same universe as Honor of Kings.</p>
|
5 |
-
<h2>honor of kings world apk download</h2><br /><p><b><b>Download File</b> ✏ ✏ ✏ <a href="https://jinyurl.com/2uNUuF">https://jinyurl.com/2uNUuF</a></b></p><br /><br />
|
6 |
-
<p>Honor of Kings World is a gorgeous open-world game that features stunning graphics, an epic soundtrack, cool monster fights, and a lot of quests and activities to do. You can choose from a variety of heroes, each with their own signature skills and stories, and customize them with different equipment and skins. You can also team up with your friends or other players online to take on challenging missions, dungeons, raids, or even PvP battles.</p>
|
7 |
-
<p>Honor of Kings World is planned to be released on multiple platforms worldwide soon. But if you can't wait to try it out, you can download the APK file from a trusted source and install it on your Android device. In this article, we will show you how to do that, as well as give you some information about the main features, tips and tricks, and reviews of Honor of Kings World.</p>
|
8 |
-
<h2>How to Download and Install Honor of Kings World APK on Your Device</h2>
|
9 |
-
<p>If you want to play Honor of Kings World on your Android device before it is officially released in your region, you will need to download and install the APK file from a reliable source. An APK file is an Android application package file that contains all the files needed to run an app on your device. Here are the steps to download and install the APK file of Honor of Kings World on your device:</p>
|
10 |
-
<ol>
|
11 |
-
<li>Check the compatibility of your device and the game requirements. Honor of Kings World is a high-end game that requires a powerful device to run smoothly. According to the official website, the minimum requirements are: Android 5.0 or higher, 3 GB of RAM, and 5 GB of free storage space. Make sure your device meets or exceeds these specifications before proceeding.</li>
|
12 |
-
<li>Download the APK file from a trusted source. There are many websites that offer APK files for various apps and games, but not all of them are safe and reliable. Some may contain malware, viruses, or fake files that can harm your device or compromise your privacy. To avoid these risks, you should only download the APK file from a reputable source that has positive reviews and ratings from other users. One such source is APKPure, which is a popular and verified platform that provides original and pure APK files for Android users. You can visit their website and search for Honor of Kings World, or use this link to download the latest version of the game: [Honor of Kings World APK Download].</li>
|
13 |
-
<li>Enable the installation of unknown sources on your device settings. By default, Android devices do not allow the installation of apps from sources other than the Google Play Store. This is a security measure to prevent unauthorized or harmful apps from accessing your device. However, if you want to install the APK file of Honor of Kings World, you will need to enable this option temporarily. To do this, go to your device settings, then security, then unknown sources, and toggle it on. You may see a warning message that says installing from unknown sources may cause harm to your device, but you can ignore it if you trust the source of the APK file.</li>
|
14 |
-
<li>Locate and install the APK file on your device. After downloading the APK file, you will need to find it on your device storage and install it. You can use a file manager app to browse through your folders and locate the file, or you can go to your downloads folder and tap on it. You may see a prompt that asks you to confirm the installation, as well as the permissions that the app requires. Tap on install and wait for the process to finish.</li>
|
15 |
-
<li>Launch the game and enjoy. Once the installation is complete, you can launch the game by tapping on its icon on your home screen or app drawer. You may need to sign in with your Tencent account or create one if you don't have one already. You may also need to download some additional data before you can start playing. After that, you can enjoy Honor of Kings World on your Android device.</li>
|
16 |
-
</ol>
|
17 |
-
<h2>What are the Main Features of Honor of Kings World?</h2>
|
18 |
-
<p>Honor of Kings World is not just a spin-off game of Honor of Kings, but a whole new experience that offers a lot of features and content for players to enjoy. Here are some of the main features of Honor of Kings World that make it stand out from other open-world games:</p>
|
19 |
-
<h3>Stunning Graphics and Immersive Soundtrack</h3>
|
20 |
-
<p>Honor of Kings World is a visually stunning game that showcases the power and potential of mobile gaming. The game uses Unreal Engine 4 to create realistic and detailed graphics that bring the world of Honor of Kings to life. The game features dynamic lighting and shadows, realistic water effects, high-quality textures and models, and smooth animations. The game also has an immersive soundtrack that matches the mood and atmosphere of each scene and region. The game has different themes for different areas, such as ancient China, medieval Europe, fantasy lands, and futuristic cities.</p>
|
21 |
-
<h3>Diverse and Unique Heroes with Signature Skills and Stories</h3>
|
22 |
-
<p>Honor of Kings World features around 60 heroes from Honor of Kings, each with their own signature skills and stories. The heroes are divided into different classes, such as warrior, mage, assassin, marksman, support, and tank. Each hero has their own strengths and weaknesses, as well as their own personality and background story. You can choose from a variety of heroes based on your preference and playstyle, such as Li Bai, Mulan, Sun Wukong, Marco Polo, Merlin, Arthur, Joan of Arc, Tesla, Einstein, and more.</p>
|
23 |
-
<h3>Fierce and Fast-Paced Combat with Strategic Gameplay</h3>
|
24 |
-
<p>Honor of Kings World is not just an open-world exploration game, but also an action-packed combat game that requires skill and strategy to win. The game features fierce and fast-paced combat that is similar to Honor of Kings but with more freedom and options. You can use your hero's skills to attack enemies, dodge attacks, combo moves, and unleash ultimate abilities. You can also use the environment to your advantage, such as hiding behind cover, jumping on platforms, or triggering traps. The game also requires strategic gameplay, such as choosing the right hero for the right situation, managing your resources, and coordinating with your teammates.</p>
|
25 |
-
<h3>Expansive and Dynamic Open World with Multiple Regions and Quests</h3>
|
26 |
-
<p>Honor of Kings World is an expansive and dynamic open world game that lets you explore different regions and quests at your own pace. The game has multiple regions that are based on the lore and culture of Honor of Kings, such as the Eastern Realm, the Western Realm, the Northern Realm, and the Southern Realm. Each region has its own landscape, climate, wildlife, architecture, and people. You can travel between regions by using fast travel points or by riding mounts. The game also has a dynamic day and night cycle and weather system that affect the gameplay and visuals.</p>
|
27 |
-
<p>honor of kings world mobile game download<br />
|
28 |
-
how to download honor of kings world on android<br />
|
29 |
-
honor of kings world apk free download<br />
|
30 |
-
honor of kings world latest version download<br />
|
31 |
-
honor of kings world brazil download<br />
|
32 |
-
download honor of kings world from google play<br />
|
33 |
-
honor of kings world mod apk download<br />
|
34 |
-
honor of kings world english version download<br />
|
35 |
-
honor of kings world beta download<br />
|
36 |
-
honor of kings world apk + obb download<br />
|
37 |
-
honor of kings world android game download<br />
|
38 |
-
honor of kings world global release date download<br />
|
39 |
-
honor of kings world apk mirror download<br />
|
40 |
-
honor of kings world apk pure download<br />
|
41 |
-
honor of kings world apk offline download<br />
|
42 |
-
honor of kings world hack apk download<br />
|
43 |
-
honor of kings world update apk download<br />
|
44 |
-
honor of kings world apk for pc download<br />
|
45 |
-
honor of kings world apk no verification download<br />
|
46 |
-
honor of kings world apk unlimited money download<br />
|
47 |
-
honor of kings world gameplay video download<br />
|
48 |
-
honor of kings world review and rating download<br />
|
49 |
-
honor of kings world tips and tricks download<br />
|
50 |
-
honor of kings world best heroes guide download<br />
|
51 |
-
honor of kings world wallpaper hd download<br />
|
52 |
-
honor of kings world soundtrack mp3 download<br />
|
53 |
-
honor of kings world skins and costumes download<br />
|
54 |
-
honor of kings world cheats and codes download<br />
|
55 |
-
honor of kings world patch notes and news download<br />
|
56 |
-
honor of kings world fan art and memes download<br />
|
57 |
-
honor of kings world forum and community download<br />
|
58 |
-
honor of kings world support and feedback download<br />
|
59 |
-
honor of kings world events and rewards download<br />
|
60 |
-
honor of kings world tournaments and esports download<br />
|
61 |
-
honor of kings world merchandise and gifts download<br />
|
62 |
-
honor of kings world comics and stories download<br />
|
63 |
-
honor of kings world characters and lore download<br />
|
64 |
-
honor of kings world crossover and collaboration download<br />
|
65 |
-
honor of kings world wiki and database download<br />
|
66 |
-
honor of kings world emulator and controller support download<br />
|
67 |
-
how to install honor of kings world apk on android device <br />
|
68 |
-
how to uninstall honor of kings world apk from android device <br />
|
69 |
-
how to update honor of kings world apk on android device <br />
|
70 |
-
how to fix honor of kings world apk not working on android device <br />
|
71 |
-
how to play honor of kings world apk with friends online <br />
|
72 |
-
how to stream honor of kings world apk on twitch or youtube <br />
|
73 |
-
how to backup and restore honor of kings world apk data on android device <br />
|
74 |
-
how to transfer honor of kings world apk data from one android device to another <br />
|
75 |
-
how to optimize performance and battery life for playing honour of king's word apk on android device</p>
|
76 |
-
<p>The game also has a lot of quests and activities to do in each region. You can follow the main storyline that involves the conflict between the two factions of Honor of Kings: the Radiant and the Dire. You can also do side quests that are related to the heroes' stories or the region's history. You can also participate in events that are randomly triggered or timed, such as monster invasions, treasure hunts, or festivals. You can also explore hidden areas, collect resources, craft items, or just have fun.</p>
|
77 |
-
<h3>Collaborative and Competitive Multiplayer Modes with Friends and Other Players</h3>
|
78 |
-
<p>Honor of Kings World is not only a single-player game, but also a multiplayer game that lets you play with your friends and other players online. The game has various multiplayer modes that cater to different preferences and playstyles. You can team up with your friends or other players to take on co-op missions, dungeons, raids, or world bosses. You can also compete with other players in PvP modes, such as 5v5 arena battles, 10v10 siege battles, or free-for-all deathmatches. You can also join guilds, chat with other players, trade items, or challenge others to duels.</p>
|
79 |
-
<h2>What are Some Tips and Tricks to Play Honor of Kings World Better?</h2>
|
80 |
-
<p>Honor of Kings World is a fun and exciting game that offers a lot of challenge and reward for players who want to master it. Here are some tips and tricks to help you play Honor of Kings World better:</p>
|
81 |
-
<h3>Choose Your Hero Wisely According to Your Playstyle and Role</h3>
|
82 |
-
<p>Honor of Kings World has a diverse roster of heroes that have different skills, roles, and playstyles. You should choose your hero wisely according to your preference and the situation. For example, if you like to deal damage from a distance, you can choose a marksman hero like Marco Polo or Tesla. If you like to get up close and personal with enemies, you can choose a warrior hero like Li Bai or Arthur. If you like to support your teammates with healing or buffs, you can choose a support hero like Merlin or Joan of Arc.</p>
|
83 |
-
<p>You should also consider your role in the team when choosing your hero. For example, if you are playing a co-op mission that requires a tank hero to absorb damage and protect your allies, you can choose a tank hero like Sun Wukong or Mulan. If you are playing a PvP mode that requires a mage hero to deal burst damage and crowd control enemies, you can choose a mage hero like Da Vinci or Zhuge Liang.</p>
|
84 |
-
<h3>Upgrade Your Hero's Skills, Equipment, and Skins to Enhance Their Performance</h3>
|
85 |
-
<p>Honor of Kings World allows you to upgrade your hero's skills, equipment, and skins to enhance their performance in the game. You can upgrade your hero's skills by using skill points that you earn by leveling up or completing quests. You can upgrade your hero's equipment by using gold or gems that you earn by playing the game or purchasing them with real money. You can upgrade your hero's skins by using skin shards that you earn by opening chests or participating in events. Upgrading your hero's skills, equipment, and skins will not only improve their stats and abilities, but also change their appearance and effects.</p>
|
86 |
-
<h3>Learn the Map Layout, Objectives, and Enemies' Locations and Patterns</h3>
|
87 |
-
<p>Honor of Kings World has a large and diverse map that has multiple regions and objectives to explore and complete. You should learn the map layout, objectives, and enemies' locations and patterns to have an advantage in the game. You can use the mini-map on the top right corner of the screen to see your current location, your teammates' locations, your quests' locations, and other points of interest. You can also use the world map on the menu screen to see the whole map and fast travel to different regions.</p>
|
88 |
-
<p>You should also learn the objectives and enemies' locations and patterns in each region. For example, you should know where to find resources, chests, secrets, or bosses in each region. You should also know what types of enemies you will encounter in each region, such as their level, strength, weakness, behavior, and drops. You should also know how to complete different objectives in each region, such as how to capture a tower, how to defeat a boss, or how to solve a puzzle.</p>
|
89 |
-
<h3>Communicate and Coordinate with Your Teammates in Teamfights and Missions</h3>
|
90 |
-
<p>Honor of Kings World is a team-based game that requires communication and coordination with your teammates in teamfights and missions. You should communicate and coordinate with your teammates using the chat system or the voice chat system in the game. You can use the chat system to send text messages or quick commands to your teammates, such as "attack", "retreat", "help", or "well done". You can use the voice chat system to talk to your teammates using your microphone, which is more convenient and effective than typing.</p>
|
91 |
-
<p>You should communicate and coordinate with your teammates to plan your strategy, execute your tactics, and achieve your goals. For example, you should communicate and coordinate with your teammates to choose the right heroes for your team composition, to assign roles and lanes for each teammate, to decide when to engage or disengage from a fight, to focus on a target or an objective, or to request backup or assistance.</p>
|
92 |
-
<h3>Experiment with Different Combinations of Heroes, Items, and Strategies</h3>
|
93 |
-
<p>Honor of Kings World is a game that encourages experimentation and creativity with different combinations of heroes, items, and strategies. You should experiment with different combinations of heroes, items, and strategies to find what works best for you and your team. You can experiment with different combinations of heroes by trying out different heroes from different classes or factions. You can experiment with different combinations of items by trying out different items from different categories or effects. You can experiment with different combinations of strategies by trying out different tactics or playstyles.</p>
|
94 |
-
<p>You should experiment with different combinations of heroes, items, and strategies to discover new possibilities, synergies, and fun in the game. You may find some combinations that are more effective, more enjoyable, or more surprising than others. You may also find some combinations that are more suitable for certain modes, situations, or enemies than others. You may also find some combinations that are more challenging, more rewarding, or more satisfying than others.</p>
|
95 |
-
<h2>What are Some Reviews and Ratings of Honor of Kings World?</h2>
|
96 |
-
<p>Honor of Kings World is a new game that has not been officially released worldwide yet. However, it has already received some reviews and ratings from critics and players who have tried the game in its beta testing phase or in its limited release in China. Here are some of the reviews and ratings of Honor of Kings World from different sources:</p>
|
97 |
-
<h3>Positive Reviews from Critics and Players</h3>
|
98 |
-
<p>Many critics and players have praised Honor of Kings World for its impressive graphics, immersive soundtrack, diverse heroes, exciting combat, expansive world, and multiplayer modes. Here are some of the positive reviews from critics and players:</p>
|
99 |
-
<ul>
|
100 |
-
<li>"Honor of Kings World is a stunning open-world game that delivers a thrilling and immersive experience for fans of Honor of Kings and new players alike. The game has amazing graphics, a captivating soundtrack, a rich lore, and a lot of content to explore and enjoy. The game also has a variety of multiplayer modes that allow you to team up or compete with other players online. Honor of Kings World is a must-play game for anyone who loves open-world action RPG games." - Android Authority</li>
|
101 |
-
<li>"Honor of Kings World is a game that I have been waiting for a long time. As a fan of Honor of Kings, I was curious about the world and the characters of the game beyond the competitive matches. Honor of Kings World gives me the opportunity to explore the world and the stories of the heroes in a more immersive way. The game also gives me the opportunity to play with different heroes, items, and strategies in different modes and situations. The game is fun, challenging, and rewarding." - A player from China</li>
|
102 |
-
<li>"Honor of Kings World is a game that surprises me with its quality and content. The game has stunning graphics that rival console games, an epic soundtrack that matches the mood and atmosphere of each scene and region, cool monster fights that require skill and strategy to win, and a lot of quests and activities to do. The game also has diverse and unique heroes that have their own signature skills and stories, and customizable equipment and skins that change their appearance and effects. The game is a masterpiece that deserves praise and recognition." - A critic from Korea</li>
|
103 |
-
</ul>
|
104 |
-
<h3>Negative Reviews from Critics and Players</h3>
|
105 |
-
<p>However, not all critics and players have enjoyed Honor of Kings World as much as others. Some have criticized Honor of Kings World for its high requirements, bugs, glitches, balance issues, pay-to-win elements, and lack of originality. Here are some of the negative reviews from critics and players:</p>
|
106 |
-
<ul>
|
107 |
-
<li>"Honor of Kings World is a game that disappoints me with its high requirements, bugs, and glitches. The game requires a powerful device to run smoothly, but even then, it still suffers from lag, crashes, and errors. The game also has many bugs and glitches that ruin the gameplay and visuals, such as missing textures, invisible walls, stuck enemies, and broken quests. The game is a mess that needs more optimization and testing." - A player from India</li>
|
108 |
-
<li>"Honor of Kings World is a game that annoys me with its balance issues and pay-to-win elements. The game has a lot of heroes, items, and modes, but not all of them are balanced and fair. Some heroes are overpowered or underpowered, some items are too expensive or too cheap, some modes are too easy or too hard. The game also has a lot of pay-to-win elements that give an unfair advantage to players who spend real money on the game, such as exclusive heroes, skins, equipment, or resources. The game is unfair and frustrating for players who want to play for free or casually." - A critic from Brazil</li>
|
109 |
-
<li>"Honor of Kings World is a game that bores me with its lack of originality and innovation. The game is based on the same universe as Honor of Kings, but it does not offer anything new or different. The game is just a copy of other open-world games that have been done before, such as Genshin Impact, The Legend of Zelda: Breath of the Wild, or Horizon Zero Dawn. The game does not have any unique features or mechanics that make it stand out from other games in the genre. The game is bland and generic for players who want to try something new or different." - A player from USA</li>
|
110 |
-
</ul>
|
111 |
-
<h3>Average Ratings from Different Platforms and Sources</h3>
|
112 |
-
<p>Honor of Kings World has received mixed ratings from different platforms and sources that reflect the opinions and experiences of critics and players. Here are some of the average ratings from different platforms and sources:</p>
|
113 |
-
<table>
|
114 |
-
<tr>
|
115 |
-
<th>Platform/Source</th>
|
116 |
-
<th>Rating</th>
|
117 |
-
</tr>
|
118 |
-
<tr>
|
119 |
-
<td>Google Play Store (China)</td>
|
120 |
-
<td>4.5/5 stars (based on 1.2 million ratings)</td>
|
121 |
-
</tr>
|
122 |
-
<tr>
|
123 |
-
<td>App Store (China)</td>
|
124 |
-
<td>4.7/5 stars (based on 300 thousand ratings)</td>
|
125 |
-
</tr>
|
126 |
-
<tr>
|
127 |
-
<td>APKPure</td>
|
128 |
-
<td>4.2/5 stars (based on 10 thousand ratings)</td>
|
129 |
-
</tr>
|
130 |
-
<tr>
|
131 |
-
<td>Metacritic</td>
|
132 |
-
<td>75/100 (based on 20 critic reviews)</td>
|
133 |
-
</tr>
|
134 |
-
<tr>
|
135 |
-
<td>User Score (Metacritic)</td>
|
136 |
-
<td>6.8/10 (based on 100 user reviews)</td>
|
137 |
-
</tr>
|
138 |
-
</table>
|
139 |
-
<h1>Conclusion</h1>
|
140 |
-
<p>Honor of Kings World is a new open-world adventure game based on the popular mobile MOBA game Honor of Kings. The game offers a lot of fun and challenge for fans of Honor of Kings and new players alike. The game features stunning graphics, an immersive soundtrack, diverse heroes, exciting combat, expansive world, and multiplayer modes. The game also allows you to download and install the APK file on your Android device before it is officially released in your region.</p>
|
141 |
-
<p>If you are looking for a new open-world action RPG game to play on your mobile device, you should give Honor of Kings World a try. You may find it to be an enjoyable and rewarding experience that will keep you hooked for hours.</p>
|
142 |
-
<h2>Frequently Asked Questions</h2>
|
143 |
-
<ol>
|
144 |
-
<li>What is Honor of Kings World?</li>
|
145 |
-
<p>Honor of Kings World is a spin-off game of Honor of Kings, one of the most played and highest-grossing mobile MOBA games in the world. Honor of Kings World is an open-world action RPG game that features around 60 heroes from Honor of Kings, each with their own skills, skins, and stories.</p>
|
146 |
-
<li>How can I download and install Honor of Kings World APK on my Android device?</li>
|
147 |
-
<p>You can download and install Honor of Kings World APK on your Android device by following these steps: <br> 1) Check the compatibility of your device and the game requirements. <br> 2) Download the APK file from a trusted source like APKPure. <br> 3) Enable the installation of unknown sources on your device settings. <br> 4) Locate and install the APK file on your device. <br> 5) Launch the game and enjoy.</p>
|
148 |
-
<li>What are the main features of Honor of</p> 401be4b1e0<br />
|
149 |
-
<br />
|
150 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/A00001/bingothoo/src/components/external-link.tsx
DELETED
@@ -1,30 +0,0 @@
|
|
1 |
-
export function ExternalLink({
|
2 |
-
href,
|
3 |
-
children
|
4 |
-
}: {
|
5 |
-
href: string
|
6 |
-
children: React.ReactNode
|
7 |
-
}) {
|
8 |
-
return (
|
9 |
-
<a
|
10 |
-
href={href}
|
11 |
-
target="_blank"
|
12 |
-
rel="noreferrer"
|
13 |
-
className="inline-flex flex-1 justify-center gap-1 underline"
|
14 |
-
>
|
15 |
-
<span>{children}</span>
|
16 |
-
<svg
|
17 |
-
aria-hidden="true"
|
18 |
-
height="7"
|
19 |
-
viewBox="0 0 6 6"
|
20 |
-
width="7"
|
21 |
-
className="opacity-70"
|
22 |
-
>
|
23 |
-
<path
|
24 |
-
d="M1.25215 5.54731L0.622742 4.9179L3.78169 1.75597H1.3834L1.38936 0.890915H5.27615V4.78069H4.40513L4.41109 2.38538L1.25215 5.54731Z"
|
25 |
-
fill="currentColor"
|
26 |
-
></path>
|
27 |
-
</svg>
|
28 |
-
</a>
|
29 |
-
)
|
30 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AI4PD/hexviz/hexviz/attention.py
DELETED
@@ -1,313 +0,0 @@
|
|
1 |
-
import time
|
2 |
-
from io import StringIO
|
3 |
-
from urllib import request
|
4 |
-
|
5 |
-
import requests
|
6 |
-
import streamlit as st
|
7 |
-
import torch
|
8 |
-
from Bio.PDB import PDBParser, Polypeptide, Structure
|
9 |
-
from Bio.PDB.Residue import Residue
|
10 |
-
|
11 |
-
from hexviz.ec_number import ECNumber
|
12 |
-
from hexviz.models import ModelType, get_prot_bert, get_prot_t5, get_tape_bert, get_zymctrl
|
13 |
-
|
14 |
-
|
15 |
-
def get_structure(pdb_code: str) -> Structure:
|
16 |
-
"""
|
17 |
-
Get structure from PDB
|
18 |
-
"""
|
19 |
-
pdb_url = f"https://files.rcsb.org/download/{pdb_code}.pdb"
|
20 |
-
pdb_data = request.urlopen(pdb_url).read().decode("utf-8")
|
21 |
-
file = StringIO(pdb_data)
|
22 |
-
parser = PDBParser()
|
23 |
-
structure = parser.get_structure(pdb_code, file)
|
24 |
-
return structure
|
25 |
-
|
26 |
-
|
27 |
-
def get_pdb_file(pdb_code: str) -> Structure:
|
28 |
-
"""
|
29 |
-
Get structure from PDB
|
30 |
-
"""
|
31 |
-
pdb_url = f"https://files.rcsb.org/download/{pdb_code}.pdb"
|
32 |
-
pdb_data = request.urlopen(pdb_url).read().decode("utf-8")
|
33 |
-
file = StringIO(pdb_data)
|
34 |
-
return file
|
35 |
-
|
36 |
-
|
37 |
-
@st.cache
|
38 |
-
def get_pdb_from_seq(sequence: str) -> str | None:
|
39 |
-
"""
|
40 |
-
Get structure from sequence
|
41 |
-
"""
|
42 |
-
url = "https://api.esmatlas.com/foldSequence/v1/pdb/"
|
43 |
-
retries = 0
|
44 |
-
pdb_str = None
|
45 |
-
while retries < 3 and pdb_str is None:
|
46 |
-
response = requests.post(url, data=sequence)
|
47 |
-
pdb_str = response.text
|
48 |
-
if pdb_str == "INTERNAL SERVER ERROR":
|
49 |
-
retries += 1
|
50 |
-
time.sleep(0.1)
|
51 |
-
pdb_str = None
|
52 |
-
return pdb_str
|
53 |
-
|
54 |
-
|
55 |
-
def get_chains(structure: Structure) -> list[str]:
|
56 |
-
"""
|
57 |
-
Get list of chains in a structure
|
58 |
-
"""
|
59 |
-
chains = []
|
60 |
-
for model in structure:
|
61 |
-
for chain in model.get_chains():
|
62 |
-
chains.append(chain.id)
|
63 |
-
return chains
|
64 |
-
|
65 |
-
|
66 |
-
def res_to_1letter(residues: list[Residue]) -> str:
|
67 |
-
"""
|
68 |
-
Get single letter sequence from a list or Residues
|
69 |
-
|
70 |
-
Residues not in the standard 20 amino acids are replaced with X
|
71 |
-
"""
|
72 |
-
res_names = [residue.get_resname() for residue in residues]
|
73 |
-
residues_single_letter = map(lambda x: Polypeptide.protein_letters_3to1.get(x, "X"), res_names)
|
74 |
-
|
75 |
-
return "".join(list(residues_single_letter))
|
76 |
-
|
77 |
-
|
78 |
-
def clean_and_validate_sequence(sequence: str) -> tuple[str, str | None]:
|
79 |
-
lines = sequence.split("\n")
|
80 |
-
cleaned_sequence = "".join(line.upper() for line in lines if not line.startswith(">"))
|
81 |
-
cleaned_sequence = cleaned_sequence.replace(" ", "")
|
82 |
-
valid_residues = set(Polypeptide.protein_letters_3to1.values())
|
83 |
-
residues_in_sequence = set(cleaned_sequence)
|
84 |
-
|
85 |
-
# Check if the sequence exceeds the max allowed length
|
86 |
-
max_sequence_length = 400
|
87 |
-
if len(cleaned_sequence) > max_sequence_length:
|
88 |
-
error_message = (
|
89 |
-
f"Sequence exceeds the max allowed length of {max_sequence_length} characters"
|
90 |
-
)
|
91 |
-
return cleaned_sequence, error_message
|
92 |
-
|
93 |
-
illegal_residues = residues_in_sequence - valid_residues
|
94 |
-
if illegal_residues:
|
95 |
-
illegal_residues_str = ", ".join(illegal_residues)
|
96 |
-
error_message = f"Sequence contains illegal residues: {illegal_residues_str}"
|
97 |
-
return cleaned_sequence, error_message
|
98 |
-
else:
|
99 |
-
return cleaned_sequence, None
|
100 |
-
|
101 |
-
|
102 |
-
def remove_tokens(attentions, tokens, tokens_to_remove):
|
103 |
-
indices_to_remove = [i for i, token in enumerate(tokens) if token in tokens_to_remove]
|
104 |
-
|
105 |
-
# Remove rows and columns corresponding to special tokens and periods
|
106 |
-
for idx in sorted(indices_to_remove, reverse=True):
|
107 |
-
attentions = torch.cat((attentions[:, :, :idx], attentions[:, :, idx + 1 :]), dim=2)
|
108 |
-
attentions = torch.cat((attentions[:, :, :, :idx], attentions[:, :, :, idx + 1 :]), dim=3)
|
109 |
-
|
110 |
-
return attentions
|
111 |
-
|
112 |
-
|
113 |
-
@st.cache
|
114 |
-
def get_attention(
|
115 |
-
sequence: str,
|
116 |
-
model_type: ModelType = ModelType.TAPE_BERT,
|
117 |
-
remove_special_tokens: bool = True,
|
118 |
-
ec_number: str = None,
|
119 |
-
):
|
120 |
-
"""
|
121 |
-
Returns a tensor of shape [n_layers, n_heads, n_res, n_res] with attention weights
|
122 |
-
and the sequence of tokenes that the attention tensor corresponds to
|
123 |
-
"""
|
124 |
-
|
125 |
-
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
126 |
-
if model_type == ModelType.TAPE_BERT:
|
127 |
-
tokenizer, model = get_tape_bert()
|
128 |
-
token_idxs = tokenizer.encode(sequence).tolist()
|
129 |
-
inputs = torch.tensor(token_idxs).unsqueeze(0)
|
130 |
-
|
131 |
-
with torch.no_grad():
|
132 |
-
attentions = model(inputs)[-1]
|
133 |
-
|
134 |
-
tokenized_sequence = tokenizer.convert_ids_to_tokens(token_idxs)
|
135 |
-
|
136 |
-
if remove_special_tokens:
|
137 |
-
# Remove attention from <CLS> (first) and <SEP> (last) token
|
138 |
-
attentions = [attention[:, :, 1:-1, 1:-1] for attention in attentions]
|
139 |
-
tokenized_sequence = tokenized_sequence[1:-1]
|
140 |
-
|
141 |
-
attentions = torch.stack([attention.squeeze(0) for attention in attentions])
|
142 |
-
|
143 |
-
elif model_type == ModelType.ZymCTRL:
|
144 |
-
tokenizer, model = get_zymctrl()
|
145 |
-
|
146 |
-
if ec_number:
|
147 |
-
sequence = f"{ec_number}<sep><start>{sequence}<end><pad>"
|
148 |
-
|
149 |
-
inputs = tokenizer(sequence, return_tensors="pt").input_ids.to(device)
|
150 |
-
attention_mask = tokenizer(sequence, return_tensors="pt").attention_mask.to(device)
|
151 |
-
with torch.no_grad():
|
152 |
-
outputs = model(inputs, attention_mask=attention_mask, output_attentions=True)
|
153 |
-
attentions = outputs.attentions
|
154 |
-
|
155 |
-
tokenized_sequence = tokenizer.convert_ids_to_tokens(tokenizer.encode(sequence))
|
156 |
-
|
157 |
-
if ec_number and remove_special_tokens:
|
158 |
-
# Remove attention to special tokens and periods separating EC number components
|
159 |
-
tokens_to_remove = [".", "<sep>", "<start>", "<end>", "<pad>"]
|
160 |
-
attentions = [
|
161 |
-
remove_tokens(attention, tokenized_sequence, tokens_to_remove)
|
162 |
-
for attention in attentions
|
163 |
-
]
|
164 |
-
tokenized_sequence = [
|
165 |
-
token for token in tokenized_sequence if token not in tokens_to_remove
|
166 |
-
]
|
167 |
-
|
168 |
-
# torch.Size([1, n_heads, n_res, n_res]) -> torch.Size([n_heads, n_res, n_res])
|
169 |
-
attention_squeezed = [torch.squeeze(attention) for attention in attentions]
|
170 |
-
# ([n_heads, n_res, n_res]*n_layers) -> [n_layers, n_heads, n_res, n_res]
|
171 |
-
attention_stacked = torch.stack([attention for attention in attention_squeezed])
|
172 |
-
attentions = attention_stacked
|
173 |
-
|
174 |
-
elif model_type == ModelType.PROT_BERT:
|
175 |
-
tokenizer, model = get_prot_bert()
|
176 |
-
sequence_separated = " ".join(sequence)
|
177 |
-
token_idxs = tokenizer.encode(sequence_separated)
|
178 |
-
inputs = torch.tensor(token_idxs).unsqueeze(0).to(device)
|
179 |
-
with torch.no_grad():
|
180 |
-
attentions = model(inputs, output_attentions=True)[-1]
|
181 |
-
|
182 |
-
tokenized_sequence = tokenizer.convert_ids_to_tokens(token_idxs)
|
183 |
-
if remove_special_tokens:
|
184 |
-
# Remove attention from <CLS> (first) and <SEP> (last) token
|
185 |
-
attentions = [attention[:, :, 1:-1, 1:-1] for attention in attentions]
|
186 |
-
tokenized_sequence = tokenized_sequence[1:-1]
|
187 |
-
|
188 |
-
attentions = torch.stack([attention.squeeze(0) for attention in attentions])
|
189 |
-
|
190 |
-
elif model_type == ModelType.PROT_T5:
|
191 |
-
tokenizer, model = get_prot_t5()
|
192 |
-
sequence_separated = " ".join(sequence)
|
193 |
-
token_idxs = tokenizer.encode(sequence_separated)
|
194 |
-
inputs = torch.tensor(token_idxs).unsqueeze(0).to(device)
|
195 |
-
with torch.no_grad():
|
196 |
-
attentions = model(inputs, output_attentions=True)[-1]
|
197 |
-
|
198 |
-
tokenized_sequence = tokenizer.convert_ids_to_tokens(token_idxs)
|
199 |
-
if remove_special_tokens:
|
200 |
-
# Remove attention to </s> (last) token
|
201 |
-
attentions = [attention[:, :, :-1, :-1] for attention in attentions]
|
202 |
-
tokenized_sequence = tokenized_sequence[:-1]
|
203 |
-
attentions = torch.stack([attention.squeeze(0) for attention in attentions])
|
204 |
-
|
205 |
-
else:
|
206 |
-
raise ValueError(f"Model {model_type} not supported")
|
207 |
-
|
208 |
-
# Transfer to CPU to avoid issues with streamlit caching
|
209 |
-
return attentions.cpu(), tokenized_sequence
|
210 |
-
|
211 |
-
|
212 |
-
def unidirectional_avg_filtered(attention, layer, head, threshold):
|
213 |
-
num_layers, num_heads, seq_len, _ = attention.shape
|
214 |
-
attention_head = attention[layer, head]
|
215 |
-
unidirectional_avg_for_head = []
|
216 |
-
for i in range(seq_len):
|
217 |
-
for j in range(i, seq_len):
|
218 |
-
# Attention matrices for BERT models are asymetric.
|
219 |
-
# Bidirectional attention is represented by the average of the two values
|
220 |
-
sum = attention_head[i, j].item() + attention_head[j, i].item()
|
221 |
-
avg = sum / 2
|
222 |
-
if avg >= threshold:
|
223 |
-
unidirectional_avg_for_head.append((avg, i, j))
|
224 |
-
return unidirectional_avg_for_head
|
225 |
-
|
226 |
-
|
227 |
-
# Passing the pdb_str here is a workaround for streamlit caching
|
228 |
-
# where I need the input to be hashable and not changing
|
229 |
-
# The ideal would be to pass in the structure directly, not parsing
|
230 |
-
# Thist twice. If streamlit is upgaded to past 0.17 this can be
|
231 |
-
# fixed.
|
232 |
-
@st.cache(show_spinner=False)
|
233 |
-
def get_attention_pairs(
|
234 |
-
pdb_str: str,
|
235 |
-
layer: int,
|
236 |
-
head: int,
|
237 |
-
chain_ids: list[str] | None,
|
238 |
-
threshold: int = 0.2,
|
239 |
-
model_type: ModelType = ModelType.TAPE_BERT,
|
240 |
-
top_n: int = 2,
|
241 |
-
ec_numbers: list[list[ECNumber]] | None = None,
|
242 |
-
):
|
243 |
-
"""
|
244 |
-
Note: All residue indexes returned are 0 indexed
|
245 |
-
"""
|
246 |
-
structure = PDBParser().get_structure("pdb", StringIO(pdb_str))
|
247 |
-
|
248 |
-
if chain_ids:
|
249 |
-
chains = [ch for ch in structure.get_chains() if ch.id in chain_ids]
|
250 |
-
else:
|
251 |
-
chains = list(structure.get_chains())
|
252 |
-
# Chains are treated at lists of residues to make indexing easier
|
253 |
-
# and to avoid troubles with residues in PDB files not having a consistent
|
254 |
-
# start index
|
255 |
-
chain_ids = [chain.id for chain in chains]
|
256 |
-
chains = [[res for res in chain.get_residues()] for chain in chains]
|
257 |
-
|
258 |
-
attention_pairs = []
|
259 |
-
top_residues = []
|
260 |
-
|
261 |
-
ec_tag_length = 4
|
262 |
-
|
263 |
-
def is_tag(x):
|
264 |
-
return x < ec_tag_length
|
265 |
-
|
266 |
-
for i, chain in enumerate(chains):
|
267 |
-
ec_number = ec_numbers[i] if ec_numbers else None
|
268 |
-
ec_string = ".".join([ec.number for ec in ec_number]) if ec_number else ""
|
269 |
-
sequence = res_to_1letter(chain)
|
270 |
-
attention, _ = get_attention(sequence=sequence, model_type=model_type, ec_number=ec_string)
|
271 |
-
attention_unidirectional = unidirectional_avg_filtered(attention, layer, head, threshold)
|
272 |
-
|
273 |
-
# Store sum of attention in to a resiue (from the unidirectional attention)
|
274 |
-
residue_attention = {}
|
275 |
-
for attn_value, res_1, res_2 in attention_unidirectional:
|
276 |
-
try:
|
277 |
-
if not ec_number:
|
278 |
-
coord_1 = chain[res_1]["CA"].coord.tolist()
|
279 |
-
coord_2 = chain[res_2]["CA"].coord.tolist()
|
280 |
-
else:
|
281 |
-
if is_tag(res_1):
|
282 |
-
coord_1 = ec_number[res_1].coordinate
|
283 |
-
else:
|
284 |
-
coord_1 = chain[res_1 - ec_tag_length]["CA"].coord.tolist()
|
285 |
-
if is_tag(res_2):
|
286 |
-
coord_2 = ec_number[res_2].coordinate
|
287 |
-
else:
|
288 |
-
coord_2 = chain[res_2 - ec_tag_length]["CA"].coord.tolist()
|
289 |
-
|
290 |
-
except KeyError:
|
291 |
-
continue
|
292 |
-
|
293 |
-
attention_pairs.append((attn_value, coord_1, coord_2))
|
294 |
-
if not ec_number:
|
295 |
-
residue_attention[res_1] = residue_attention.get(res_1, 0) + attn_value
|
296 |
-
residue_attention[res_2] = residue_attention.get(res_2, 0) + attn_value
|
297 |
-
else:
|
298 |
-
for res in [res_1, res_2]:
|
299 |
-
if not is_tag(res):
|
300 |
-
residue_attention[res - ec_tag_length] = (
|
301 |
-
residue_attention.get(res - ec_tag_length, 0) + attn_value
|
302 |
-
)
|
303 |
-
if not ec_number:
|
304 |
-
attention_into_res = attention[layer, head].sum(dim=0)
|
305 |
-
else:
|
306 |
-
attention_into_res = attention[layer, head, ec_tag_length:, ec_tag_length:].sum(dim=0)
|
307 |
-
top_n_values, top_n_indexes = torch.topk(attention_into_res, top_n)
|
308 |
-
|
309 |
-
for res, attn_sum in zip(top_n_indexes, top_n_values):
|
310 |
-
fraction_of_total_attention = attn_sum.item() / len(sequence)
|
311 |
-
top_residues.append((fraction_of_total_attention, chain_ids[i], res.item()))
|
312 |
-
|
313 |
-
return attention_pairs, top_residues
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AICODER009/food_detection/app.py
DELETED
@@ -1,77 +0,0 @@
|
|
1 |
-
### 1. Imports and class names setup ###
|
2 |
-
import gradio as gr
|
3 |
-
import os
|
4 |
-
import torch
|
5 |
-
|
6 |
-
from model import create_effnetb2_model
|
7 |
-
from timeit import default_timer as timer
|
8 |
-
from typing import Tuple, Dict
|
9 |
-
|
10 |
-
# Setup class names
|
11 |
-
class_names = ["pizza", "steak", "sushi"]
|
12 |
-
|
13 |
-
### 2. Model and transforms preparation ###
|
14 |
-
|
15 |
-
# Create EffNetB2 model
|
16 |
-
effnetb2, effnetb2_transforms = create_effnetb2_model(
|
17 |
-
num_classes=3, # len(class_names) would also work
|
18 |
-
)
|
19 |
-
|
20 |
-
# Load saved weights
|
21 |
-
effnetb2.load_state_dict(
|
22 |
-
torch.load(
|
23 |
-
f="09_pretrained_effnetb2_feature_extractor_pizza_steak_sushi_20_percent.pth",
|
24 |
-
map_location=torch.device("cpu"), # load to CPU
|
25 |
-
)
|
26 |
-
)
|
27 |
-
|
28 |
-
### 3. Predict function ###
|
29 |
-
|
30 |
-
# Create predict function
|
31 |
-
def predict(img) -> Tuple[Dict, float]:
|
32 |
-
"""Transforms and performs a prediction on img and returns prediction and time taken.
|
33 |
-
"""
|
34 |
-
# Start the timer
|
35 |
-
start_time = timer()
|
36 |
-
|
37 |
-
# Transform the target image and add a batch dimension
|
38 |
-
img = effnetb2_transforms(img).unsqueeze(0)
|
39 |
-
|
40 |
-
# Put model into evaluation mode and turn on inference mode
|
41 |
-
effnetb2.eval()
|
42 |
-
with torch.inference_mode():
|
43 |
-
# Pass the transformed image through the model and turn the prediction logits into prediction probabilities
|
44 |
-
pred_probs = torch.softmax(effnetb2(img), dim=1)
|
45 |
-
|
46 |
-
# Create a prediction label and prediction probability dictionary for each prediction class (this is the required format for Gradio's output parameter)
|
47 |
-
pred_labels_and_probs = {class_names[i]: float(pred_probs[0][i]) for i in range(len(class_names))}
|
48 |
-
|
49 |
-
# Calculate the prediction time
|
50 |
-
pred_time = round(timer() - start_time, 5)
|
51 |
-
|
52 |
-
# Return the prediction dictionary and prediction time
|
53 |
-
return pred_labels_and_probs, pred_time
|
54 |
-
|
55 |
-
### 4. Gradio app ###
|
56 |
-
|
57 |
-
# Create title, description and article strings
|
58 |
-
title = "FoodVision Mini 🍕🥩🍣"
|
59 |
-
description = "An EfficientNetB2 feature extractor computer vision model to classify images of food as pizza, steak or sushi."
|
60 |
-
article = "Created at [09. PyTorch Model Deployment](https://www.learnpytorch.io/09_pytorch_model_deployment/)."
|
61 |
-
|
62 |
-
# Create examples list from "examples/" directory
|
63 |
-
example_list = [["examples/" + example] for example in os.listdir("examples")]
|
64 |
-
|
65 |
-
# Create the Gradio demo
|
66 |
-
demo = gr.Interface(fn=predict, # mapping function from input to output
|
67 |
-
inputs=gr.Image(type="pil"), # what are the inputs?
|
68 |
-
outputs=[gr.Label(num_top_classes=3, label="Predictions"), # what are the outputs?
|
69 |
-
gr.Number(label="Prediction time (s)")], # our fn has two outputs, therefore we have two outputs
|
70 |
-
# Create examples list from "examples/" directory
|
71 |
-
examples=example_list,
|
72 |
-
title=title,
|
73 |
-
description=description,
|
74 |
-
article=article)
|
75 |
-
|
76 |
-
# Launch the demo!
|
77 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIConsultant/MusicGen/audiocraft/utils/utils.py
DELETED
@@ -1,298 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
from concurrent.futures import ProcessPoolExecutor
|
8 |
-
from contextlib import contextmanager
|
9 |
-
from functools import wraps, lru_cache
|
10 |
-
import hashlib
|
11 |
-
import json
|
12 |
-
import logging
|
13 |
-
from pathlib import Path
|
14 |
-
import typing as tp
|
15 |
-
|
16 |
-
import flashy
|
17 |
-
import flashy.distrib
|
18 |
-
import omegaconf
|
19 |
-
import torch
|
20 |
-
from torch.nn.utils.rnn import pad_sequence
|
21 |
-
|
22 |
-
|
23 |
-
logger = logging.getLogger(__name__)
|
24 |
-
|
25 |
-
|
26 |
-
def model_hash(model: torch.nn.Module) -> str:
|
27 |
-
"""Return a model hash. This should allow us to track regressions in model init
|
28 |
-
from the logs of past experiments.
|
29 |
-
"""
|
30 |
-
hasher = hashlib.sha1()
|
31 |
-
for p in model.parameters():
|
32 |
-
hasher.update(p.data.cpu().numpy().tobytes())
|
33 |
-
return hasher.hexdigest()
|
34 |
-
|
35 |
-
|
36 |
-
def dict_from_config(cfg: omegaconf.DictConfig) -> dict:
|
37 |
-
"""Convenience function to map an omegaconf configuration to a dictionary.
|
38 |
-
|
39 |
-
Args:
|
40 |
-
cfg (omegaconf.DictConfig): Original configuration to map to dict.
|
41 |
-
Returns:
|
42 |
-
dict: Config as dictionary object.
|
43 |
-
"""
|
44 |
-
dct = omegaconf.OmegaConf.to_container(cfg, resolve=True)
|
45 |
-
assert isinstance(dct, dict)
|
46 |
-
return dct
|
47 |
-
|
48 |
-
|
49 |
-
def random_subset(dataset, max_samples: int, seed: int = 42) -> torch.utils.data.Subset:
|
50 |
-
if max_samples >= len(dataset):
|
51 |
-
return dataset
|
52 |
-
|
53 |
-
generator = torch.Generator().manual_seed(seed)
|
54 |
-
perm = torch.randperm(len(dataset), generator=generator)
|
55 |
-
return torch.utils.data.Subset(dataset, perm[:max_samples].tolist())
|
56 |
-
|
57 |
-
|
58 |
-
def get_loader(dataset, num_samples: tp.Optional[int], batch_size: int,
|
59 |
-
num_workers: int, seed: int, **kwargs) -> torch.utils.data.DataLoader:
|
60 |
-
"""Convenience function to load dataset into a dataloader with optional subset sampling.
|
61 |
-
|
62 |
-
Args:
|
63 |
-
dataset: Dataset to load.
|
64 |
-
num_samples (Optional[int]): Number of samples to limit subset size.
|
65 |
-
batch_size (int): Batch size.
|
66 |
-
num_workers (int): Number of workers for data loading.
|
67 |
-
seed (int): Random seed.
|
68 |
-
"""
|
69 |
-
if num_samples is not None:
|
70 |
-
dataset = random_subset(dataset, num_samples, seed)
|
71 |
-
|
72 |
-
dataloader = flashy.distrib.loader(
|
73 |
-
dataset,
|
74 |
-
batch_size=batch_size,
|
75 |
-
num_workers=num_workers,
|
76 |
-
**kwargs
|
77 |
-
)
|
78 |
-
return dataloader
|
79 |
-
|
80 |
-
|
81 |
-
def get_dataset_from_loader(dataloader):
|
82 |
-
dataset = dataloader.dataset
|
83 |
-
if isinstance(dataset, torch.utils.data.Subset):
|
84 |
-
return dataset.dataset
|
85 |
-
else:
|
86 |
-
return dataset
|
87 |
-
|
88 |
-
|
89 |
-
def multinomial(input: torch.Tensor, num_samples: int, replacement=False, *, generator=None):
|
90 |
-
"""torch.multinomial with arbitrary number of dimensions, and number of candidates on the last dimension.
|
91 |
-
|
92 |
-
Args:
|
93 |
-
input (torch.Tensor): The input tensor containing probabilities.
|
94 |
-
num_samples (int): Number of samples to draw.
|
95 |
-
replacement (bool): Whether to draw with replacement or not.
|
96 |
-
Keywords args:
|
97 |
-
generator (torch.Generator): A pseudorandom number generator for sampling.
|
98 |
-
Returns:
|
99 |
-
torch.Tensor: Last dimension contains num_samples indices
|
100 |
-
sampled from the multinomial probability distribution
|
101 |
-
located in the last dimension of tensor input.
|
102 |
-
"""
|
103 |
-
input_ = input.reshape(-1, input.shape[-1])
|
104 |
-
output_ = torch.multinomial(input_, num_samples=num_samples, replacement=replacement, generator=generator)
|
105 |
-
output = output_.reshape(*list(input.shape[:-1]), -1)
|
106 |
-
return output
|
107 |
-
|
108 |
-
|
109 |
-
def sample_top_k(probs: torch.Tensor, k: int) -> torch.Tensor:
|
110 |
-
"""Sample next token from top K values along the last dimension of the input probs tensor.
|
111 |
-
|
112 |
-
Args:
|
113 |
-
probs (torch.Tensor): Input probabilities with token candidates on the last dimension.
|
114 |
-
k (int): The k in “top-k”.
|
115 |
-
Returns:
|
116 |
-
torch.Tensor: Sampled tokens.
|
117 |
-
"""
|
118 |
-
top_k_value, _ = torch.topk(probs, k, dim=-1)
|
119 |
-
min_value_top_k = top_k_value[..., [-1]]
|
120 |
-
probs *= (probs >= min_value_top_k).float()
|
121 |
-
probs.div_(probs.sum(dim=-1, keepdim=True))
|
122 |
-
next_token = multinomial(probs, num_samples=1)
|
123 |
-
return next_token
|
124 |
-
|
125 |
-
|
126 |
-
def sample_top_p(probs: torch.Tensor, p: float) -> torch.Tensor:
|
127 |
-
"""Sample next token from top P probabilities along the last dimension of the input probs tensor.
|
128 |
-
|
129 |
-
Args:
|
130 |
-
probs (torch.Tensor): Input probabilities with token candidates on the last dimension.
|
131 |
-
p (int): The p in “top-p”.
|
132 |
-
Returns:
|
133 |
-
torch.Tensor: Sampled tokens.
|
134 |
-
"""
|
135 |
-
probs_sort, probs_idx = torch.sort(probs, dim=-1, descending=True)
|
136 |
-
probs_sum = torch.cumsum(probs_sort, dim=-1)
|
137 |
-
mask = probs_sum - probs_sort > p
|
138 |
-
probs_sort *= (~mask).float()
|
139 |
-
probs_sort.div_(probs_sort.sum(dim=-1, keepdim=True))
|
140 |
-
next_token = multinomial(probs_sort, num_samples=1)
|
141 |
-
next_token = torch.gather(probs_idx, -1, next_token)
|
142 |
-
return next_token
|
143 |
-
|
144 |
-
|
145 |
-
class DummyPoolExecutor:
|
146 |
-
"""Dummy pool executor to use when we actually have only 1 worker.
|
147 |
-
(e.g. instead of ProcessPoolExecutor).
|
148 |
-
"""
|
149 |
-
class DummyResult:
|
150 |
-
def __init__(self, func, *args, **kwargs):
|
151 |
-
self.func = func
|
152 |
-
self.args = args
|
153 |
-
self.kwargs = kwargs
|
154 |
-
|
155 |
-
def result(self):
|
156 |
-
return self.func(*self.args, **self.kwargs)
|
157 |
-
|
158 |
-
def __init__(self, workers, mp_context=None):
|
159 |
-
pass
|
160 |
-
|
161 |
-
def submit(self, func, *args, **kwargs):
|
162 |
-
return DummyPoolExecutor.DummyResult(func, *args, **kwargs)
|
163 |
-
|
164 |
-
def __enter__(self):
|
165 |
-
return self
|
166 |
-
|
167 |
-
def __exit__(self, exc_type, exc_value, exc_tb):
|
168 |
-
return
|
169 |
-
|
170 |
-
|
171 |
-
def get_pool_executor(num_workers: int, mp_context=None):
|
172 |
-
return ProcessPoolExecutor(num_workers, mp_context) if num_workers > 1 else DummyPoolExecutor(1)
|
173 |
-
|
174 |
-
|
175 |
-
def length_to_mask(lengths: torch.Tensor, max_len: tp.Optional[int] = None) -> torch.Tensor:
|
176 |
-
"""Utility function to convert a tensor of sequence lengths to a mask (useful when working on padded sequences).
|
177 |
-
For example: [3, 5] => [[1, 1, 1, 0, 0], [1, 1, 1, 1, 1]]
|
178 |
-
|
179 |
-
Args:
|
180 |
-
lengths (torch.Tensor): tensor with lengths
|
181 |
-
max_len (int): can set the max length manually. Defaults to None.
|
182 |
-
Returns:
|
183 |
-
torch.Tensor: mask with 0s where there is pad tokens else 1s
|
184 |
-
"""
|
185 |
-
assert len(lengths.shape) == 1, "Length shape should be 1 dimensional."
|
186 |
-
final_length = lengths.max().item() if not max_len else max_len
|
187 |
-
final_length = max(final_length, 1) # if all seqs are of len zero we don't want a zero-size tensor
|
188 |
-
return torch.arange(final_length)[None, :].to(lengths.device) < lengths[:, None]
|
189 |
-
|
190 |
-
|
191 |
-
def hash_trick(word: str, vocab_size: int) -> int:
|
192 |
-
"""Hash trick to pair each word with an index
|
193 |
-
|
194 |
-
Args:
|
195 |
-
word (str): word we wish to convert to an index
|
196 |
-
vocab_size (int): size of the vocabulary
|
197 |
-
Returns:
|
198 |
-
int: index of the word in the embedding LUT
|
199 |
-
"""
|
200 |
-
hash = int(hashlib.sha256(word.encode("utf-8")).hexdigest(), 16)
|
201 |
-
return hash % vocab_size
|
202 |
-
|
203 |
-
|
204 |
-
def with_rank_rng(base_seed: int = 1234):
|
205 |
-
"""Decorator for a function so that the function will use a Random Number Generator
|
206 |
-
whose state depend on the GPU rank. The original RNG state is restored upon returning.
|
207 |
-
|
208 |
-
Args:
|
209 |
-
base_seed (int): Random seed.
|
210 |
-
"""
|
211 |
-
def _decorator(fun: tp.Callable):
|
212 |
-
@wraps(fun)
|
213 |
-
def _decorated(*args, **kwargs):
|
214 |
-
state = torch.get_rng_state()
|
215 |
-
seed = base_seed ^ flashy.distrib.rank()
|
216 |
-
torch.manual_seed(seed)
|
217 |
-
logger.debug('Rank dependent seed set to %d', seed)
|
218 |
-
try:
|
219 |
-
return fun(*args, **kwargs)
|
220 |
-
finally:
|
221 |
-
torch.set_rng_state(state)
|
222 |
-
logger.debug('RNG state restored.')
|
223 |
-
return _decorated
|
224 |
-
return _decorator
|
225 |
-
|
226 |
-
|
227 |
-
def collate(tensors: tp.List[torch.Tensor], dim: int = 0) -> tp.Tuple[torch.Tensor, torch.Tensor]:
|
228 |
-
"""Get a list of tensors and collate them to a single tensor. according to the following logic:
|
229 |
-
- `dim` specifies the time dimension which will be stacked and padded.
|
230 |
-
- The output will contain 1 new dimension (dimension index 0) which will be the size of
|
231 |
-
of the original list.
|
232 |
-
|
233 |
-
Args:
|
234 |
-
tensors (tp.List[torch.Tensor]): List of tensors to collate.
|
235 |
-
dim (int): Dimension which will be stacked and padded.
|
236 |
-
Returns:
|
237 |
-
tp.Tuple[torch.Tensor, torch.Tensor]:
|
238 |
-
torch.Tensor: Stacked and padded tensor. The output will contain 1 new dimension
|
239 |
-
(dimension index 0) which will be the size of the original list.
|
240 |
-
torch.Tensor: Tensor containing length of original tensor sizes (without padding).
|
241 |
-
"""
|
242 |
-
tensors = [x.transpose(0, dim) for x in tensors]
|
243 |
-
lens = torch.LongTensor([len(x) for x in tensors])
|
244 |
-
padded_tensors = pad_sequence(tensors)
|
245 |
-
padded_tensors = padded_tensors.transpose(0, 1)
|
246 |
-
padded_tensors = padded_tensors.transpose(1, dim + 1)
|
247 |
-
return padded_tensors, lens
|
248 |
-
|
249 |
-
|
250 |
-
# TODO: Move to flashy?
|
251 |
-
def copy_state(state: tp.Any, device: tp.Union[torch.device, str] = 'cpu',
|
252 |
-
dtype: tp.Optional[torch.dtype] = None) -> tp.Any:
|
253 |
-
if isinstance(state, torch.Tensor):
|
254 |
-
if dtype is None or not state.is_floating_point():
|
255 |
-
dtype = state.dtype
|
256 |
-
return state.detach().to(device=device, dtype=dtype, copy=True)
|
257 |
-
elif isinstance(state, dict):
|
258 |
-
return {k: copy_state(v, device, dtype) for k, v in state.items()}
|
259 |
-
elif isinstance(state, list):
|
260 |
-
return [copy_state(v, device, dtype) for v in state]
|
261 |
-
|
262 |
-
|
263 |
-
# TODO: Move to flashy?
|
264 |
-
@contextmanager
|
265 |
-
def swap_state(model, state, **kwargs):
|
266 |
-
old_state = copy_state(model.state_dict())
|
267 |
-
model.load_state_dict(state, **kwargs)
|
268 |
-
try:
|
269 |
-
yield
|
270 |
-
finally:
|
271 |
-
model.load_state_dict(old_state)
|
272 |
-
|
273 |
-
|
274 |
-
@lru_cache(None)
|
275 |
-
def warn_once(logger, msg):
|
276 |
-
"""Warn about a given message only once."""
|
277 |
-
logger.warning(msg)
|
278 |
-
|
279 |
-
|
280 |
-
def is_jsonable(x: tp.Any):
|
281 |
-
"""Check if an object can be serialized into a json:"""
|
282 |
-
try:
|
283 |
-
json.dumps(x)
|
284 |
-
return True
|
285 |
-
except (TypeError, OverflowError):
|
286 |
-
return False
|
287 |
-
|
288 |
-
|
289 |
-
def load_clap_state_dict(clap_model, path: tp.Union[str, Path]):
|
290 |
-
"""Wrapper around state dict loading of CLAP model
|
291 |
-
addressing compatibility issues between CLAP and AudioCraft
|
292 |
-
HuggingFace transformer version.
|
293 |
-
See: https://github.com/LAION-AI/CLAP/issues/118
|
294 |
-
"""
|
295 |
-
from clap_module.factory import load_state_dict # type: ignore
|
296 |
-
pkg = load_state_dict(path)
|
297 |
-
pkg.pop('text_branch.embeddings.position_ids', None)
|
298 |
-
clap_model.model.load_state_dict(pkg)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/parallel_wavegan/models/melgan.py
DELETED
@@ -1,427 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
|
3 |
-
# Copyright 2020 Tomoki Hayashi
|
4 |
-
# MIT License (https://opensource.org/licenses/MIT)
|
5 |
-
|
6 |
-
"""MelGAN Modules."""
|
7 |
-
|
8 |
-
import logging
|
9 |
-
|
10 |
-
import numpy as np
|
11 |
-
import torch
|
12 |
-
|
13 |
-
from modules.parallel_wavegan.layers import CausalConv1d
|
14 |
-
from modules.parallel_wavegan.layers import CausalConvTranspose1d
|
15 |
-
from modules.parallel_wavegan.layers import ResidualStack
|
16 |
-
|
17 |
-
|
18 |
-
class MelGANGenerator(torch.nn.Module):
|
19 |
-
"""MelGAN generator module."""
|
20 |
-
|
21 |
-
def __init__(self,
|
22 |
-
in_channels=80,
|
23 |
-
out_channels=1,
|
24 |
-
kernel_size=7,
|
25 |
-
channels=512,
|
26 |
-
bias=True,
|
27 |
-
upsample_scales=[8, 8, 2, 2],
|
28 |
-
stack_kernel_size=3,
|
29 |
-
stacks=3,
|
30 |
-
nonlinear_activation="LeakyReLU",
|
31 |
-
nonlinear_activation_params={"negative_slope": 0.2},
|
32 |
-
pad="ReflectionPad1d",
|
33 |
-
pad_params={},
|
34 |
-
use_final_nonlinear_activation=True,
|
35 |
-
use_weight_norm=True,
|
36 |
-
use_causal_conv=False,
|
37 |
-
):
|
38 |
-
"""Initialize MelGANGenerator module.
|
39 |
-
|
40 |
-
Args:
|
41 |
-
in_channels (int): Number of input channels.
|
42 |
-
out_channels (int): Number of output channels.
|
43 |
-
kernel_size (int): Kernel size of initial and final conv layer.
|
44 |
-
channels (int): Initial number of channels for conv layer.
|
45 |
-
bias (bool): Whether to add bias parameter in convolution layers.
|
46 |
-
upsample_scales (list): List of upsampling scales.
|
47 |
-
stack_kernel_size (int): Kernel size of dilated conv layers in residual stack.
|
48 |
-
stacks (int): Number of stacks in a single residual stack.
|
49 |
-
nonlinear_activation (str): Activation function module name.
|
50 |
-
nonlinear_activation_params (dict): Hyperparameters for activation function.
|
51 |
-
pad (str): Padding function module name before dilated convolution layer.
|
52 |
-
pad_params (dict): Hyperparameters for padding function.
|
53 |
-
use_final_nonlinear_activation (torch.nn.Module): Activation function for the final layer.
|
54 |
-
use_weight_norm (bool): Whether to use weight norm.
|
55 |
-
If set to true, it will be applied to all of the conv layers.
|
56 |
-
use_causal_conv (bool): Whether to use causal convolution.
|
57 |
-
|
58 |
-
"""
|
59 |
-
super(MelGANGenerator, self).__init__()
|
60 |
-
|
61 |
-
# check hyper parameters is valid
|
62 |
-
assert channels >= np.prod(upsample_scales)
|
63 |
-
assert channels % (2 ** len(upsample_scales)) == 0
|
64 |
-
if not use_causal_conv:
|
65 |
-
assert (kernel_size - 1) % 2 == 0, "Not support even number kernel size."
|
66 |
-
|
67 |
-
# add initial layer
|
68 |
-
layers = []
|
69 |
-
if not use_causal_conv:
|
70 |
-
layers += [
|
71 |
-
getattr(torch.nn, pad)((kernel_size - 1) // 2, **pad_params),
|
72 |
-
torch.nn.Conv1d(in_channels, channels, kernel_size, bias=bias),
|
73 |
-
]
|
74 |
-
else:
|
75 |
-
layers += [
|
76 |
-
CausalConv1d(in_channels, channels, kernel_size,
|
77 |
-
bias=bias, pad=pad, pad_params=pad_params),
|
78 |
-
]
|
79 |
-
|
80 |
-
for i, upsample_scale in enumerate(upsample_scales):
|
81 |
-
# add upsampling layer
|
82 |
-
layers += [getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params)]
|
83 |
-
if not use_causal_conv:
|
84 |
-
layers += [
|
85 |
-
torch.nn.ConvTranspose1d(
|
86 |
-
channels // (2 ** i),
|
87 |
-
channels // (2 ** (i + 1)),
|
88 |
-
upsample_scale * 2,
|
89 |
-
stride=upsample_scale,
|
90 |
-
padding=upsample_scale // 2 + upsample_scale % 2,
|
91 |
-
output_padding=upsample_scale % 2,
|
92 |
-
bias=bias,
|
93 |
-
)
|
94 |
-
]
|
95 |
-
else:
|
96 |
-
layers += [
|
97 |
-
CausalConvTranspose1d(
|
98 |
-
channels // (2 ** i),
|
99 |
-
channels // (2 ** (i + 1)),
|
100 |
-
upsample_scale * 2,
|
101 |
-
stride=upsample_scale,
|
102 |
-
bias=bias,
|
103 |
-
)
|
104 |
-
]
|
105 |
-
|
106 |
-
# add residual stack
|
107 |
-
for j in range(stacks):
|
108 |
-
layers += [
|
109 |
-
ResidualStack(
|
110 |
-
kernel_size=stack_kernel_size,
|
111 |
-
channels=channels // (2 ** (i + 1)),
|
112 |
-
dilation=stack_kernel_size ** j,
|
113 |
-
bias=bias,
|
114 |
-
nonlinear_activation=nonlinear_activation,
|
115 |
-
nonlinear_activation_params=nonlinear_activation_params,
|
116 |
-
pad=pad,
|
117 |
-
pad_params=pad_params,
|
118 |
-
use_causal_conv=use_causal_conv,
|
119 |
-
)
|
120 |
-
]
|
121 |
-
|
122 |
-
# add final layer
|
123 |
-
layers += [getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params)]
|
124 |
-
if not use_causal_conv:
|
125 |
-
layers += [
|
126 |
-
getattr(torch.nn, pad)((kernel_size - 1) // 2, **pad_params),
|
127 |
-
torch.nn.Conv1d(channels // (2 ** (i + 1)), out_channels, kernel_size, bias=bias),
|
128 |
-
]
|
129 |
-
else:
|
130 |
-
layers += [
|
131 |
-
CausalConv1d(channels // (2 ** (i + 1)), out_channels, kernel_size,
|
132 |
-
bias=bias, pad=pad, pad_params=pad_params),
|
133 |
-
]
|
134 |
-
if use_final_nonlinear_activation:
|
135 |
-
layers += [torch.nn.Tanh()]
|
136 |
-
|
137 |
-
# define the model as a single function
|
138 |
-
self.melgan = torch.nn.Sequential(*layers)
|
139 |
-
|
140 |
-
# apply weight norm
|
141 |
-
if use_weight_norm:
|
142 |
-
self.apply_weight_norm()
|
143 |
-
|
144 |
-
# reset parameters
|
145 |
-
self.reset_parameters()
|
146 |
-
|
147 |
-
def forward(self, c):
|
148 |
-
"""Calculate forward propagation.
|
149 |
-
|
150 |
-
Args:
|
151 |
-
c (Tensor): Input tensor (B, channels, T).
|
152 |
-
|
153 |
-
Returns:
|
154 |
-
Tensor: Output tensor (B, 1, T ** prod(upsample_scales)).
|
155 |
-
|
156 |
-
"""
|
157 |
-
return self.melgan(c)
|
158 |
-
|
159 |
-
def remove_weight_norm(self):
|
160 |
-
"""Remove weight normalization module from all of the layers."""
|
161 |
-
def _remove_weight_norm(m):
|
162 |
-
try:
|
163 |
-
logging.debug(f"Weight norm is removed from {m}.")
|
164 |
-
torch.nn.utils.remove_weight_norm(m)
|
165 |
-
except ValueError: # this module didn't have weight norm
|
166 |
-
return
|
167 |
-
|
168 |
-
self.apply(_remove_weight_norm)
|
169 |
-
|
170 |
-
def apply_weight_norm(self):
|
171 |
-
"""Apply weight normalization module from all of the layers."""
|
172 |
-
def _apply_weight_norm(m):
|
173 |
-
if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.ConvTranspose1d):
|
174 |
-
torch.nn.utils.weight_norm(m)
|
175 |
-
logging.debug(f"Weight norm is applied to {m}.")
|
176 |
-
|
177 |
-
self.apply(_apply_weight_norm)
|
178 |
-
|
179 |
-
def reset_parameters(self):
|
180 |
-
"""Reset parameters.
|
181 |
-
|
182 |
-
This initialization follows official implementation manner.
|
183 |
-
https://github.com/descriptinc/melgan-neurips/blob/master/spec2wav/modules.py
|
184 |
-
|
185 |
-
"""
|
186 |
-
def _reset_parameters(m):
|
187 |
-
if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.ConvTranspose1d):
|
188 |
-
m.weight.data.normal_(0.0, 0.02)
|
189 |
-
logging.debug(f"Reset parameters in {m}.")
|
190 |
-
|
191 |
-
self.apply(_reset_parameters)
|
192 |
-
|
193 |
-
|
194 |
-
class MelGANDiscriminator(torch.nn.Module):
|
195 |
-
"""MelGAN discriminator module."""
|
196 |
-
|
197 |
-
def __init__(self,
|
198 |
-
in_channels=1,
|
199 |
-
out_channels=1,
|
200 |
-
kernel_sizes=[5, 3],
|
201 |
-
channels=16,
|
202 |
-
max_downsample_channels=1024,
|
203 |
-
bias=True,
|
204 |
-
downsample_scales=[4, 4, 4, 4],
|
205 |
-
nonlinear_activation="LeakyReLU",
|
206 |
-
nonlinear_activation_params={"negative_slope": 0.2},
|
207 |
-
pad="ReflectionPad1d",
|
208 |
-
pad_params={},
|
209 |
-
):
|
210 |
-
"""Initilize MelGAN discriminator module.
|
211 |
-
|
212 |
-
Args:
|
213 |
-
in_channels (int): Number of input channels.
|
214 |
-
out_channels (int): Number of output channels.
|
215 |
-
kernel_sizes (list): List of two kernel sizes. The prod will be used for the first conv layer,
|
216 |
-
and the first and the second kernel sizes will be used for the last two layers.
|
217 |
-
For example if kernel_sizes = [5, 3], the first layer kernel size will be 5 * 3 = 15,
|
218 |
-
the last two layers' kernel size will be 5 and 3, respectively.
|
219 |
-
channels (int): Initial number of channels for conv layer.
|
220 |
-
max_downsample_channels (int): Maximum number of channels for downsampling layers.
|
221 |
-
bias (bool): Whether to add bias parameter in convolution layers.
|
222 |
-
downsample_scales (list): List of downsampling scales.
|
223 |
-
nonlinear_activation (str): Activation function module name.
|
224 |
-
nonlinear_activation_params (dict): Hyperparameters for activation function.
|
225 |
-
pad (str): Padding function module name before dilated convolution layer.
|
226 |
-
pad_params (dict): Hyperparameters for padding function.
|
227 |
-
|
228 |
-
"""
|
229 |
-
super(MelGANDiscriminator, self).__init__()
|
230 |
-
self.layers = torch.nn.ModuleList()
|
231 |
-
|
232 |
-
# check kernel size is valid
|
233 |
-
assert len(kernel_sizes) == 2
|
234 |
-
assert kernel_sizes[0] % 2 == 1
|
235 |
-
assert kernel_sizes[1] % 2 == 1
|
236 |
-
|
237 |
-
# add first layer
|
238 |
-
self.layers += [
|
239 |
-
torch.nn.Sequential(
|
240 |
-
getattr(torch.nn, pad)((np.prod(kernel_sizes) - 1) // 2, **pad_params),
|
241 |
-
torch.nn.Conv1d(in_channels, channels, np.prod(kernel_sizes), bias=bias),
|
242 |
-
getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
|
243 |
-
)
|
244 |
-
]
|
245 |
-
|
246 |
-
# add downsample layers
|
247 |
-
in_chs = channels
|
248 |
-
for downsample_scale in downsample_scales:
|
249 |
-
out_chs = min(in_chs * downsample_scale, max_downsample_channels)
|
250 |
-
self.layers += [
|
251 |
-
torch.nn.Sequential(
|
252 |
-
torch.nn.Conv1d(
|
253 |
-
in_chs, out_chs,
|
254 |
-
kernel_size=downsample_scale * 10 + 1,
|
255 |
-
stride=downsample_scale,
|
256 |
-
padding=downsample_scale * 5,
|
257 |
-
groups=in_chs // 4,
|
258 |
-
bias=bias,
|
259 |
-
),
|
260 |
-
getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
|
261 |
-
)
|
262 |
-
]
|
263 |
-
in_chs = out_chs
|
264 |
-
|
265 |
-
# add final layers
|
266 |
-
out_chs = min(in_chs * 2, max_downsample_channels)
|
267 |
-
self.layers += [
|
268 |
-
torch.nn.Sequential(
|
269 |
-
torch.nn.Conv1d(
|
270 |
-
in_chs, out_chs, kernel_sizes[0],
|
271 |
-
padding=(kernel_sizes[0] - 1) // 2,
|
272 |
-
bias=bias,
|
273 |
-
),
|
274 |
-
getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
|
275 |
-
)
|
276 |
-
]
|
277 |
-
self.layers += [
|
278 |
-
torch.nn.Conv1d(
|
279 |
-
out_chs, out_channels, kernel_sizes[1],
|
280 |
-
padding=(kernel_sizes[1] - 1) // 2,
|
281 |
-
bias=bias,
|
282 |
-
),
|
283 |
-
]
|
284 |
-
|
285 |
-
def forward(self, x):
|
286 |
-
"""Calculate forward propagation.
|
287 |
-
|
288 |
-
Args:
|
289 |
-
x (Tensor): Input noise signal (B, 1, T).
|
290 |
-
|
291 |
-
Returns:
|
292 |
-
List: List of output tensors of each layer.
|
293 |
-
|
294 |
-
"""
|
295 |
-
outs = []
|
296 |
-
for f in self.layers:
|
297 |
-
x = f(x)
|
298 |
-
outs += [x]
|
299 |
-
|
300 |
-
return outs
|
301 |
-
|
302 |
-
|
303 |
-
class MelGANMultiScaleDiscriminator(torch.nn.Module):
|
304 |
-
"""MelGAN multi-scale discriminator module."""
|
305 |
-
|
306 |
-
def __init__(self,
|
307 |
-
in_channels=1,
|
308 |
-
out_channels=1,
|
309 |
-
scales=3,
|
310 |
-
downsample_pooling="AvgPool1d",
|
311 |
-
# follow the official implementation setting
|
312 |
-
downsample_pooling_params={
|
313 |
-
"kernel_size": 4,
|
314 |
-
"stride": 2,
|
315 |
-
"padding": 1,
|
316 |
-
"count_include_pad": False,
|
317 |
-
},
|
318 |
-
kernel_sizes=[5, 3],
|
319 |
-
channels=16,
|
320 |
-
max_downsample_channels=1024,
|
321 |
-
bias=True,
|
322 |
-
downsample_scales=[4, 4, 4, 4],
|
323 |
-
nonlinear_activation="LeakyReLU",
|
324 |
-
nonlinear_activation_params={"negative_slope": 0.2},
|
325 |
-
pad="ReflectionPad1d",
|
326 |
-
pad_params={},
|
327 |
-
use_weight_norm=True,
|
328 |
-
):
|
329 |
-
"""Initilize MelGAN multi-scale discriminator module.
|
330 |
-
|
331 |
-
Args:
|
332 |
-
in_channels (int): Number of input channels.
|
333 |
-
out_channels (int): Number of output channels.
|
334 |
-
downsample_pooling (str): Pooling module name for downsampling of the inputs.
|
335 |
-
downsample_pooling_params (dict): Parameters for the above pooling module.
|
336 |
-
kernel_sizes (list): List of two kernel sizes. The sum will be used for the first conv layer,
|
337 |
-
and the first and the second kernel sizes will be used for the last two layers.
|
338 |
-
channels (int): Initial number of channels for conv layer.
|
339 |
-
max_downsample_channels (int): Maximum number of channels for downsampling layers.
|
340 |
-
bias (bool): Whether to add bias parameter in convolution layers.
|
341 |
-
downsample_scales (list): List of downsampling scales.
|
342 |
-
nonlinear_activation (str): Activation function module name.
|
343 |
-
nonlinear_activation_params (dict): Hyperparameters for activation function.
|
344 |
-
pad (str): Padding function module name before dilated convolution layer.
|
345 |
-
pad_params (dict): Hyperparameters for padding function.
|
346 |
-
use_causal_conv (bool): Whether to use causal convolution.
|
347 |
-
|
348 |
-
"""
|
349 |
-
super(MelGANMultiScaleDiscriminator, self).__init__()
|
350 |
-
self.discriminators = torch.nn.ModuleList()
|
351 |
-
|
352 |
-
# add discriminators
|
353 |
-
for _ in range(scales):
|
354 |
-
self.discriminators += [
|
355 |
-
MelGANDiscriminator(
|
356 |
-
in_channels=in_channels,
|
357 |
-
out_channels=out_channels,
|
358 |
-
kernel_sizes=kernel_sizes,
|
359 |
-
channels=channels,
|
360 |
-
max_downsample_channels=max_downsample_channels,
|
361 |
-
bias=bias,
|
362 |
-
downsample_scales=downsample_scales,
|
363 |
-
nonlinear_activation=nonlinear_activation,
|
364 |
-
nonlinear_activation_params=nonlinear_activation_params,
|
365 |
-
pad=pad,
|
366 |
-
pad_params=pad_params,
|
367 |
-
)
|
368 |
-
]
|
369 |
-
self.pooling = getattr(torch.nn, downsample_pooling)(**downsample_pooling_params)
|
370 |
-
|
371 |
-
# apply weight norm
|
372 |
-
if use_weight_norm:
|
373 |
-
self.apply_weight_norm()
|
374 |
-
|
375 |
-
# reset parameters
|
376 |
-
self.reset_parameters()
|
377 |
-
|
378 |
-
def forward(self, x):
|
379 |
-
"""Calculate forward propagation.
|
380 |
-
|
381 |
-
Args:
|
382 |
-
x (Tensor): Input noise signal (B, 1, T).
|
383 |
-
|
384 |
-
Returns:
|
385 |
-
List: List of list of each discriminator outputs, which consists of each layer output tensors.
|
386 |
-
|
387 |
-
"""
|
388 |
-
outs = []
|
389 |
-
for f in self.discriminators:
|
390 |
-
outs += [f(x)]
|
391 |
-
x = self.pooling(x)
|
392 |
-
|
393 |
-
return outs
|
394 |
-
|
395 |
-
def remove_weight_norm(self):
|
396 |
-
"""Remove weight normalization module from all of the layers."""
|
397 |
-
def _remove_weight_norm(m):
|
398 |
-
try:
|
399 |
-
logging.debug(f"Weight norm is removed from {m}.")
|
400 |
-
torch.nn.utils.remove_weight_norm(m)
|
401 |
-
except ValueError: # this module didn't have weight norm
|
402 |
-
return
|
403 |
-
|
404 |
-
self.apply(_remove_weight_norm)
|
405 |
-
|
406 |
-
def apply_weight_norm(self):
|
407 |
-
"""Apply weight normalization module from all of the layers."""
|
408 |
-
def _apply_weight_norm(m):
|
409 |
-
if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.ConvTranspose1d):
|
410 |
-
torch.nn.utils.weight_norm(m)
|
411 |
-
logging.debug(f"Weight norm is applied to {m}.")
|
412 |
-
|
413 |
-
self.apply(_apply_weight_norm)
|
414 |
-
|
415 |
-
def reset_parameters(self):
|
416 |
-
"""Reset parameters.
|
417 |
-
|
418 |
-
This initialization follows official implementation manner.
|
419 |
-
https://github.com/descriptinc/melgan-neurips/blob/master/spec2wav/modules.py
|
420 |
-
|
421 |
-
"""
|
422 |
-
def _reset_parameters(m):
|
423 |
-
if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.ConvTranspose1d):
|
424 |
-
m.weight.data.normal_(0.0, 0.02)
|
425 |
-
logging.debug(f"Reset parameters in {m}.")
|
426 |
-
|
427 |
-
self.apply(_reset_parameters)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/text_to_speech/tasks/tts/fs.py
DELETED
@@ -1,196 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.distributions
|
3 |
-
import torch.nn.functional as F
|
4 |
-
import torch.optim
|
5 |
-
import torch.utils.data
|
6 |
-
|
7 |
-
from text_to_speech.modules.tts.fs import FastSpeech
|
8 |
-
from tasks.tts.dataset_utils import FastSpeechWordDataset
|
9 |
-
from tasks.tts.speech_base import SpeechBaseTask
|
10 |
-
from text_to_speech.utils.audio.align import mel2token_to_dur
|
11 |
-
from text_to_speech.utils.audio.pitch.utils import denorm_f0
|
12 |
-
from text_to_speech.utils.commons.hparams import hparams
|
13 |
-
|
14 |
-
|
15 |
-
class FastSpeechTask(SpeechBaseTask):
|
16 |
-
def __init__(self):
|
17 |
-
super().__init__()
|
18 |
-
self.dataset_cls = FastSpeechWordDataset
|
19 |
-
self.sil_ph = self.token_encoder.sil_phonemes()
|
20 |
-
|
21 |
-
def build_tts_model(self):
|
22 |
-
dict_size = len(self.token_encoder)
|
23 |
-
self.model = FastSpeech(dict_size, hparams)
|
24 |
-
|
25 |
-
def run_model(self, sample, infer=False, *args, **kwargs):
|
26 |
-
txt_tokens = sample['txt_tokens'] # [B, T_t]
|
27 |
-
spk_embed = sample.get('spk_embed')
|
28 |
-
spk_id = sample.get('spk_ids')
|
29 |
-
if not infer:
|
30 |
-
target = sample['mels'] # [B, T_s, 80]
|
31 |
-
mel2ph = sample['mel2ph'] # [B, T_s]
|
32 |
-
f0 = sample.get('f0')
|
33 |
-
uv = sample.get('uv')
|
34 |
-
output = self.model(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed, spk_id=spk_id,
|
35 |
-
f0=f0, uv=uv, infer=False,
|
36 |
-
ph2word=sample['ph2word'],
|
37 |
-
graph_lst=sample.get('graph_lst'),
|
38 |
-
etypes_lst=sample.get('etypes_lst'),
|
39 |
-
bert_feats=sample.get("bert_feats"),
|
40 |
-
cl_feats=sample.get("cl_feats")
|
41 |
-
)
|
42 |
-
losses = {}
|
43 |
-
self.add_mel_loss(output['mel_out'], target, losses)
|
44 |
-
self.add_dur_loss(output['dur'], mel2ph, txt_tokens, losses=losses)
|
45 |
-
if hparams['use_pitch_embed']:
|
46 |
-
self.add_pitch_loss(output, sample, losses)
|
47 |
-
return losses, output
|
48 |
-
else:
|
49 |
-
use_gt_dur = kwargs.get('infer_use_gt_dur', hparams['use_gt_dur'])
|
50 |
-
use_gt_f0 = kwargs.get('infer_use_gt_f0', hparams['use_gt_f0'])
|
51 |
-
mel2ph, uv, f0 = None, None, None
|
52 |
-
if use_gt_dur:
|
53 |
-
mel2ph = sample['mel2ph']
|
54 |
-
if use_gt_f0:
|
55 |
-
f0 = sample['f0']
|
56 |
-
uv = sample['uv']
|
57 |
-
output = self.model(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed, spk_id=spk_id,
|
58 |
-
f0=f0, uv=uv, infer=True,
|
59 |
-
ph2word=sample['ph2word'],
|
60 |
-
graph_lst=sample.get('graph_lst'),
|
61 |
-
etypes_lst=sample.get('etypes_lst'),
|
62 |
-
bert_feats=sample.get("bert_feats"),
|
63 |
-
cl_feats=sample.get("cl_feats")
|
64 |
-
)
|
65 |
-
return output
|
66 |
-
|
67 |
-
def add_dur_loss(self, dur_pred, mel2ph, txt_tokens, losses=None):
|
68 |
-
"""
|
69 |
-
|
70 |
-
:param dur_pred: [B, T], float, log scale
|
71 |
-
:param mel2ph: [B, T]
|
72 |
-
:param txt_tokens: [B, T]
|
73 |
-
:param losses:
|
74 |
-
:return:
|
75 |
-
"""
|
76 |
-
B, T = txt_tokens.shape
|
77 |
-
nonpadding = (txt_tokens != 0).float()
|
78 |
-
dur_gt = mel2token_to_dur(mel2ph, T).float() * nonpadding
|
79 |
-
is_sil = torch.zeros_like(txt_tokens).bool()
|
80 |
-
for p in self.sil_ph:
|
81 |
-
is_sil = is_sil | (txt_tokens == self.token_encoder.encode(p)[0])
|
82 |
-
is_sil = is_sil.float() # [B, T_txt]
|
83 |
-
losses['pdur'] = F.mse_loss((dur_pred + 1).log(), (dur_gt + 1).log(), reduction='none')
|
84 |
-
losses['pdur'] = (losses['pdur'] * nonpadding).sum() / nonpadding.sum()
|
85 |
-
losses['pdur'] = losses['pdur'] * hparams['lambda_ph_dur']
|
86 |
-
# use linear scale for sentence and word duration
|
87 |
-
if hparams['lambda_word_dur'] > 0:
|
88 |
-
word_id = (is_sil.cumsum(-1) * (1 - is_sil)).long()
|
89 |
-
word_dur_p = dur_pred.new_zeros([B, word_id.max() + 1]).scatter_add(1, word_id, dur_pred)[:, 1:]
|
90 |
-
word_dur_g = dur_gt.new_zeros([B, word_id.max() + 1]).scatter_add(1, word_id, dur_gt)[:, 1:]
|
91 |
-
wdur_loss = F.mse_loss((word_dur_p + 1).log(), (word_dur_g + 1).log(), reduction='none')
|
92 |
-
word_nonpadding = (word_dur_g > 0).float()
|
93 |
-
wdur_loss = (wdur_loss * word_nonpadding).sum() / word_nonpadding.sum()
|
94 |
-
losses['wdur'] = wdur_loss * hparams['lambda_word_dur']
|
95 |
-
if hparams['lambda_sent_dur'] > 0:
|
96 |
-
sent_dur_p = dur_pred.sum(-1)
|
97 |
-
sent_dur_g = dur_gt.sum(-1)
|
98 |
-
sdur_loss = F.mse_loss((sent_dur_p + 1).log(), (sent_dur_g + 1).log(), reduction='mean')
|
99 |
-
losses['sdur'] = sdur_loss.mean() * hparams['lambda_sent_dur']
|
100 |
-
|
101 |
-
def add_pitch_loss(self, output, sample, losses):
|
102 |
-
mel2ph = sample['mel2ph'] # [B, T_s]
|
103 |
-
f0 = sample['f0']
|
104 |
-
uv = sample['uv']
|
105 |
-
nonpadding = (mel2ph != 0).float() if hparams['pitch_type'] == 'frame' \
|
106 |
-
else (sample['txt_tokens'] != 0).float()
|
107 |
-
p_pred = output['pitch_pred']
|
108 |
-
assert p_pred[..., 0].shape == f0.shape
|
109 |
-
if hparams['use_uv'] and hparams['pitch_type'] == 'frame':
|
110 |
-
assert p_pred[..., 1].shape == uv.shape, (p_pred.shape, uv.shape)
|
111 |
-
losses['uv'] = (F.binary_cross_entropy_with_logits(
|
112 |
-
p_pred[:, :, 1], uv, reduction='none') * nonpadding).sum() \
|
113 |
-
/ nonpadding.sum() * hparams['lambda_uv']
|
114 |
-
nonpadding = nonpadding * (uv == 0).float()
|
115 |
-
f0_pred = p_pred[:, :, 0]
|
116 |
-
losses['f0'] = (F.l1_loss(f0_pred, f0, reduction='none') * nonpadding).sum() \
|
117 |
-
/ nonpadding.sum() * hparams['lambda_f0']
|
118 |
-
|
119 |
-
def save_valid_result(self, sample, batch_idx, model_out):
|
120 |
-
sr = hparams['audio_sample_rate']
|
121 |
-
f0_gt = None
|
122 |
-
mel_out = model_out['mel_out']
|
123 |
-
if sample.get('f0') is not None:
|
124 |
-
f0_gt = denorm_f0(sample['f0'][0].cpu(), sample['uv'][0].cpu())
|
125 |
-
self.plot_mel(batch_idx, sample['mels'], mel_out, f0s=f0_gt)
|
126 |
-
if self.global_step > 0:
|
127 |
-
wav_pred = self.vocoder.spec2wav(mel_out[0].cpu(), f0=f0_gt)
|
128 |
-
self.logger.add_audio(f'wav_val_{batch_idx}', wav_pred, self.global_step, sr)
|
129 |
-
# with gt duration
|
130 |
-
model_out = self.run_model(sample, infer=True, infer_use_gt_dur=True)
|
131 |
-
dur_info = self.get_plot_dur_info(sample, model_out)
|
132 |
-
del dur_info['dur_pred']
|
133 |
-
wav_pred = self.vocoder.spec2wav(model_out['mel_out'][0].cpu(), f0=f0_gt)
|
134 |
-
self.logger.add_audio(f'wav_gdur_{batch_idx}', wav_pred, self.global_step, sr)
|
135 |
-
self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'][0], f'mel_gdur_{batch_idx}',
|
136 |
-
dur_info=dur_info, f0s=f0_gt)
|
137 |
-
|
138 |
-
# with pred duration
|
139 |
-
if not hparams['use_gt_dur']:
|
140 |
-
model_out = self.run_model(sample, infer=True, infer_use_gt_dur=False)
|
141 |
-
dur_info = self.get_plot_dur_info(sample, model_out)
|
142 |
-
self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'][0], f'mel_pdur_{batch_idx}',
|
143 |
-
dur_info=dur_info, f0s=f0_gt)
|
144 |
-
wav_pred = self.vocoder.spec2wav(model_out['mel_out'][0].cpu(), f0=f0_gt)
|
145 |
-
self.logger.add_audio(f'wav_pdur_{batch_idx}', wav_pred, self.global_step, sr)
|
146 |
-
# gt wav
|
147 |
-
if self.global_step <= hparams['valid_infer_interval']:
|
148 |
-
mel_gt = sample['mels'][0].cpu()
|
149 |
-
wav_gt = self.vocoder.spec2wav(mel_gt, f0=f0_gt)
|
150 |
-
self.logger.add_audio(f'wav_gt_{batch_idx}', wav_gt, self.global_step, sr)
|
151 |
-
|
152 |
-
def get_plot_dur_info(self, sample, model_out):
|
153 |
-
T_txt = sample['txt_tokens'].shape[1]
|
154 |
-
dur_gt = mel2token_to_dur(sample['mel2ph'], T_txt)[0]
|
155 |
-
dur_pred = model_out['dur'] if 'dur' in model_out else dur_gt
|
156 |
-
txt = self.token_encoder.decode(sample['txt_tokens'][0].cpu().numpy())
|
157 |
-
txt = txt.split(" ")
|
158 |
-
return {'dur_gt': dur_gt, 'dur_pred': dur_pred, 'txt': txt}
|
159 |
-
|
160 |
-
def test_step(self, sample, batch_idx):
|
161 |
-
"""
|
162 |
-
|
163 |
-
:param sample:
|
164 |
-
:param batch_idx:
|
165 |
-
:return:
|
166 |
-
"""
|
167 |
-
assert sample['txt_tokens'].shape[0] == 1, 'only support batch_size=1 in inference'
|
168 |
-
outputs = self.run_model(sample, infer=True)
|
169 |
-
text = sample['text'][0]
|
170 |
-
item_name = sample['item_name'][0]
|
171 |
-
tokens = sample['txt_tokens'][0].cpu().numpy()
|
172 |
-
mel_gt = sample['mels'][0].cpu().numpy()
|
173 |
-
mel_pred = outputs['mel_out'][0].cpu().numpy()
|
174 |
-
mel2ph = sample['mel2ph'][0].cpu().numpy()
|
175 |
-
mel2ph_pred = outputs['mel2ph'][0].cpu().numpy()
|
176 |
-
str_phs = self.token_encoder.decode(tokens, strip_padding=True)
|
177 |
-
base_fn = f'[{batch_idx:06d}][{item_name.replace("%", "_")}][%s]'
|
178 |
-
if text is not None:
|
179 |
-
base_fn += text.replace(":", "$3A")[:80]
|
180 |
-
base_fn = base_fn.replace(' ', '_')
|
181 |
-
gen_dir = self.gen_dir
|
182 |
-
wav_pred = self.vocoder.spec2wav(mel_pred)
|
183 |
-
self.saving_result_pool.add_job(self.save_result, args=[
|
184 |
-
wav_pred, mel_pred, base_fn % 'P', gen_dir, str_phs, mel2ph_pred])
|
185 |
-
if hparams['save_gt']:
|
186 |
-
wav_gt = self.vocoder.spec2wav(mel_gt)
|
187 |
-
self.saving_result_pool.add_job(self.save_result, args=[
|
188 |
-
wav_gt, mel_gt, base_fn % 'G', gen_dir, str_phs, mel2ph])
|
189 |
-
print(f"Pred_shape: {mel_pred.shape}, gt_shape: {mel_gt.shape}")
|
190 |
-
return {
|
191 |
-
'item_name': item_name,
|
192 |
-
'text': text,
|
193 |
-
'ph_tokens': self.token_encoder.decode(tokens.tolist()),
|
194 |
-
'wav_fn_pred': base_fn % 'P',
|
195 |
-
'wav_fn_gt': base_fn % 'G',
|
196 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ALSv/FSW/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Apex_Face
|
3 |
-
emoji: 🖤
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: blue
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.41.2
|
8 |
-
app_file: app.py
|
9 |
-
pinned: true
|
10 |
-
license: bigcode-openrail-m
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-120e_deepfashion2_vest_256x192/td_hm_res50_4xb64-120e_deepfashion2_vest_256x192.py
DELETED
@@ -1,2861 +0,0 @@
|
|
1 |
-
default_scope = 'mmpose'
|
2 |
-
default_hooks = dict(
|
3 |
-
timer=dict(type='IterTimerHook'),
|
4 |
-
logger=dict(type='LoggerHook', interval=50),
|
5 |
-
param_scheduler=dict(type='ParamSchedulerHook'),
|
6 |
-
checkpoint=dict(
|
7 |
-
type='CheckpointHook', interval=10, save_best='PCK', rule='greater'),
|
8 |
-
sampler_seed=dict(type='DistSamplerSeedHook'),
|
9 |
-
visualization=dict(type='PoseVisualizationHook', enable=False))
|
10 |
-
custom_hooks = [dict(type='SyncBuffersHook')]
|
11 |
-
env_cfg = dict(
|
12 |
-
cudnn_benchmark=False,
|
13 |
-
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
|
14 |
-
dist_cfg=dict(backend='nccl'))
|
15 |
-
vis_backends = [dict(type='LocalVisBackend')]
|
16 |
-
visualizer = dict(
|
17 |
-
type='PoseLocalVisualizer',
|
18 |
-
vis_backends=[dict(type='LocalVisBackend'),
|
19 |
-
dict(type='WandbVisBackend')],
|
20 |
-
name='visualizer')
|
21 |
-
log_processor = dict(
|
22 |
-
type='LogProcessor', window_size=50, by_epoch=True, num_digits=6)
|
23 |
-
log_level = 'INFO'
|
24 |
-
load_from = None
|
25 |
-
resume = False
|
26 |
-
backend_args = dict(backend='local')
|
27 |
-
train_cfg = dict(by_epoch=True, max_epochs=120, val_interval=10)
|
28 |
-
val_cfg = dict()
|
29 |
-
test_cfg = dict()
|
30 |
-
colors = dict(
|
31 |
-
sss=[255, 128, 0],
|
32 |
-
lss=[255, 0, 128],
|
33 |
-
sso=[128, 0, 255],
|
34 |
-
lso=[0, 128, 255],
|
35 |
-
vest=[0, 128, 128],
|
36 |
-
sling=[0, 0, 128],
|
37 |
-
shorts=[128, 128, 128],
|
38 |
-
trousers=[128, 0, 128],
|
39 |
-
skirt=[64, 128, 128],
|
40 |
-
ssd=[64, 64, 128],
|
41 |
-
lsd=[128, 64, 0],
|
42 |
-
vd=[128, 64, 255],
|
43 |
-
sd=[128, 64, 0])
|
44 |
-
dataset_info = dict(
|
45 |
-
dataset_name='deepfashion2',
|
46 |
-
paper_info=dict(
|
47 |
-
author=
|
48 |
-
'Yuying Ge and Ruimao Zhang and Lingyun Wu and Xiaogang Wang and Xiaoou Tang and Ping Luo',
|
49 |
-
title=
|
50 |
-
'DeepFashion2: A Versatile Benchmark for Detection, Pose Estimation, Segmentation and Re-Identification of Clothing Images',
|
51 |
-
container=
|
52 |
-
'Proceedings of IEEE Conference on Computer Vision and Pattern Recognition (CVPR)',
|
53 |
-
year='2019',
|
54 |
-
homepage='https://github.com/switchablenorms/DeepFashion2'),
|
55 |
-
keypoint_info=dict({
|
56 |
-
0:
|
57 |
-
dict(name='sss_kpt1', id=0, color=[255, 128, 0], type='', swap=''),
|
58 |
-
1:
|
59 |
-
dict(
|
60 |
-
name='sss_kpt2',
|
61 |
-
id=1,
|
62 |
-
color=[255, 128, 0],
|
63 |
-
type='',
|
64 |
-
swap='sss_kpt6'),
|
65 |
-
2:
|
66 |
-
dict(
|
67 |
-
name='sss_kpt3',
|
68 |
-
id=2,
|
69 |
-
color=[255, 128, 0],
|
70 |
-
type='',
|
71 |
-
swap='sss_kpt5'),
|
72 |
-
3:
|
73 |
-
dict(name='sss_kpt4', id=3, color=[255, 128, 0], type='', swap=''),
|
74 |
-
4:
|
75 |
-
dict(
|
76 |
-
name='sss_kpt5',
|
77 |
-
id=4,
|
78 |
-
color=[255, 128, 0],
|
79 |
-
type='',
|
80 |
-
swap='sss_kpt3'),
|
81 |
-
5:
|
82 |
-
dict(
|
83 |
-
name='sss_kpt6',
|
84 |
-
id=5,
|
85 |
-
color=[255, 128, 0],
|
86 |
-
type='',
|
87 |
-
swap='sss_kpt2'),
|
88 |
-
6:
|
89 |
-
dict(
|
90 |
-
name='sss_kpt7',
|
91 |
-
id=6,
|
92 |
-
color=[255, 128, 0],
|
93 |
-
type='',
|
94 |
-
swap='sss_kpt25'),
|
95 |
-
7:
|
96 |
-
dict(
|
97 |
-
name='sss_kpt8',
|
98 |
-
id=7,
|
99 |
-
color=[255, 128, 0],
|
100 |
-
type='',
|
101 |
-
swap='sss_kpt24'),
|
102 |
-
8:
|
103 |
-
dict(
|
104 |
-
name='sss_kpt9',
|
105 |
-
id=8,
|
106 |
-
color=[255, 128, 0],
|
107 |
-
type='',
|
108 |
-
swap='sss_kpt23'),
|
109 |
-
9:
|
110 |
-
dict(
|
111 |
-
name='sss_kpt10',
|
112 |
-
id=9,
|
113 |
-
color=[255, 128, 0],
|
114 |
-
type='',
|
115 |
-
swap='sss_kpt22'),
|
116 |
-
10:
|
117 |
-
dict(
|
118 |
-
name='sss_kpt11',
|
119 |
-
id=10,
|
120 |
-
color=[255, 128, 0],
|
121 |
-
type='',
|
122 |
-
swap='sss_kpt21'),
|
123 |
-
11:
|
124 |
-
dict(
|
125 |
-
name='sss_kpt12',
|
126 |
-
id=11,
|
127 |
-
color=[255, 128, 0],
|
128 |
-
type='',
|
129 |
-
swap='sss_kpt20'),
|
130 |
-
12:
|
131 |
-
dict(
|
132 |
-
name='sss_kpt13',
|
133 |
-
id=12,
|
134 |
-
color=[255, 128, 0],
|
135 |
-
type='',
|
136 |
-
swap='sss_kpt19'),
|
137 |
-
13:
|
138 |
-
dict(
|
139 |
-
name='sss_kpt14',
|
140 |
-
id=13,
|
141 |
-
color=[255, 128, 0],
|
142 |
-
type='',
|
143 |
-
swap='sss_kpt18'),
|
144 |
-
14:
|
145 |
-
dict(
|
146 |
-
name='sss_kpt15',
|
147 |
-
id=14,
|
148 |
-
color=[255, 128, 0],
|
149 |
-
type='',
|
150 |
-
swap='sss_kpt17'),
|
151 |
-
15:
|
152 |
-
dict(name='sss_kpt16', id=15, color=[255, 128, 0], type='', swap=''),
|
153 |
-
16:
|
154 |
-
dict(
|
155 |
-
name='sss_kpt17',
|
156 |
-
id=16,
|
157 |
-
color=[255, 128, 0],
|
158 |
-
type='',
|
159 |
-
swap='sss_kpt15'),
|
160 |
-
17:
|
161 |
-
dict(
|
162 |
-
name='sss_kpt18',
|
163 |
-
id=17,
|
164 |
-
color=[255, 128, 0],
|
165 |
-
type='',
|
166 |
-
swap='sss_kpt14'),
|
167 |
-
18:
|
168 |
-
dict(
|
169 |
-
name='sss_kpt19',
|
170 |
-
id=18,
|
171 |
-
color=[255, 128, 0],
|
172 |
-
type='',
|
173 |
-
swap='sss_kpt13'),
|
174 |
-
19:
|
175 |
-
dict(
|
176 |
-
name='sss_kpt20',
|
177 |
-
id=19,
|
178 |
-
color=[255, 128, 0],
|
179 |
-
type='',
|
180 |
-
swap='sss_kpt12'),
|
181 |
-
20:
|
182 |
-
dict(
|
183 |
-
name='sss_kpt21',
|
184 |
-
id=20,
|
185 |
-
color=[255, 128, 0],
|
186 |
-
type='',
|
187 |
-
swap='sss_kpt11'),
|
188 |
-
21:
|
189 |
-
dict(
|
190 |
-
name='sss_kpt22',
|
191 |
-
id=21,
|
192 |
-
color=[255, 128, 0],
|
193 |
-
type='',
|
194 |
-
swap='sss_kpt10'),
|
195 |
-
22:
|
196 |
-
dict(
|
197 |
-
name='sss_kpt23',
|
198 |
-
id=22,
|
199 |
-
color=[255, 128, 0],
|
200 |
-
type='',
|
201 |
-
swap='sss_kpt9'),
|
202 |
-
23:
|
203 |
-
dict(
|
204 |
-
name='sss_kpt24',
|
205 |
-
id=23,
|
206 |
-
color=[255, 128, 0],
|
207 |
-
type='',
|
208 |
-
swap='sss_kpt8'),
|
209 |
-
24:
|
210 |
-
dict(
|
211 |
-
name='sss_kpt25',
|
212 |
-
id=24,
|
213 |
-
color=[255, 128, 0],
|
214 |
-
type='',
|
215 |
-
swap='sss_kpt7'),
|
216 |
-
25:
|
217 |
-
dict(name='lss_kpt1', id=25, color=[255, 0, 128], type='', swap=''),
|
218 |
-
26:
|
219 |
-
dict(
|
220 |
-
name='lss_kpt2',
|
221 |
-
id=26,
|
222 |
-
color=[255, 0, 128],
|
223 |
-
type='',
|
224 |
-
swap='lss_kpt6'),
|
225 |
-
27:
|
226 |
-
dict(
|
227 |
-
name='lss_kpt3',
|
228 |
-
id=27,
|
229 |
-
color=[255, 0, 128],
|
230 |
-
type='',
|
231 |
-
swap='lss_kpt5'),
|
232 |
-
28:
|
233 |
-
dict(name='lss_kpt4', id=28, color=[255, 0, 128], type='', swap=''),
|
234 |
-
29:
|
235 |
-
dict(
|
236 |
-
name='lss_kpt5',
|
237 |
-
id=29,
|
238 |
-
color=[255, 0, 128],
|
239 |
-
type='',
|
240 |
-
swap='lss_kpt3'),
|
241 |
-
30:
|
242 |
-
dict(
|
243 |
-
name='lss_kpt6',
|
244 |
-
id=30,
|
245 |
-
color=[255, 0, 128],
|
246 |
-
type='',
|
247 |
-
swap='lss_kpt2'),
|
248 |
-
31:
|
249 |
-
dict(
|
250 |
-
name='lss_kpt7',
|
251 |
-
id=31,
|
252 |
-
color=[255, 0, 128],
|
253 |
-
type='',
|
254 |
-
swap='lss_kpt33'),
|
255 |
-
32:
|
256 |
-
dict(
|
257 |
-
name='lss_kpt8',
|
258 |
-
id=32,
|
259 |
-
color=[255, 0, 128],
|
260 |
-
type='',
|
261 |
-
swap='lss_kpt32'),
|
262 |
-
33:
|
263 |
-
dict(
|
264 |
-
name='lss_kpt9',
|
265 |
-
id=33,
|
266 |
-
color=[255, 0, 128],
|
267 |
-
type='',
|
268 |
-
swap='lss_kpt31'),
|
269 |
-
34:
|
270 |
-
dict(
|
271 |
-
name='lss_kpt10',
|
272 |
-
id=34,
|
273 |
-
color=[255, 0, 128],
|
274 |
-
type='',
|
275 |
-
swap='lss_kpt30'),
|
276 |
-
35:
|
277 |
-
dict(
|
278 |
-
name='lss_kpt11',
|
279 |
-
id=35,
|
280 |
-
color=[255, 0, 128],
|
281 |
-
type='',
|
282 |
-
swap='lss_kpt29'),
|
283 |
-
36:
|
284 |
-
dict(
|
285 |
-
name='lss_kpt12',
|
286 |
-
id=36,
|
287 |
-
color=[255, 0, 128],
|
288 |
-
type='',
|
289 |
-
swap='lss_kpt28'),
|
290 |
-
37:
|
291 |
-
dict(
|
292 |
-
name='lss_kpt13',
|
293 |
-
id=37,
|
294 |
-
color=[255, 0, 128],
|
295 |
-
type='',
|
296 |
-
swap='lss_kpt27'),
|
297 |
-
38:
|
298 |
-
dict(
|
299 |
-
name='lss_kpt14',
|
300 |
-
id=38,
|
301 |
-
color=[255, 0, 128],
|
302 |
-
type='',
|
303 |
-
swap='lss_kpt26'),
|
304 |
-
39:
|
305 |
-
dict(
|
306 |
-
name='lss_kpt15',
|
307 |
-
id=39,
|
308 |
-
color=[255, 0, 128],
|
309 |
-
type='',
|
310 |
-
swap='lss_kpt25'),
|
311 |
-
40:
|
312 |
-
dict(
|
313 |
-
name='lss_kpt16',
|
314 |
-
id=40,
|
315 |
-
color=[255, 0, 128],
|
316 |
-
type='',
|
317 |
-
swap='lss_kpt24'),
|
318 |
-
41:
|
319 |
-
dict(
|
320 |
-
name='lss_kpt17',
|
321 |
-
id=41,
|
322 |
-
color=[255, 0, 128],
|
323 |
-
type='',
|
324 |
-
swap='lss_kpt23'),
|
325 |
-
42:
|
326 |
-
dict(
|
327 |
-
name='lss_kpt18',
|
328 |
-
id=42,
|
329 |
-
color=[255, 0, 128],
|
330 |
-
type='',
|
331 |
-
swap='lss_kpt22'),
|
332 |
-
43:
|
333 |
-
dict(
|
334 |
-
name='lss_kpt19',
|
335 |
-
id=43,
|
336 |
-
color=[255, 0, 128],
|
337 |
-
type='',
|
338 |
-
swap='lss_kpt21'),
|
339 |
-
44:
|
340 |
-
dict(name='lss_kpt20', id=44, color=[255, 0, 128], type='', swap=''),
|
341 |
-
45:
|
342 |
-
dict(
|
343 |
-
name='lss_kpt21',
|
344 |
-
id=45,
|
345 |
-
color=[255, 0, 128],
|
346 |
-
type='',
|
347 |
-
swap='lss_kpt19'),
|
348 |
-
46:
|
349 |
-
dict(
|
350 |
-
name='lss_kpt22',
|
351 |
-
id=46,
|
352 |
-
color=[255, 0, 128],
|
353 |
-
type='',
|
354 |
-
swap='lss_kpt18'),
|
355 |
-
47:
|
356 |
-
dict(
|
357 |
-
name='lss_kpt23',
|
358 |
-
id=47,
|
359 |
-
color=[255, 0, 128],
|
360 |
-
type='',
|
361 |
-
swap='lss_kpt17'),
|
362 |
-
48:
|
363 |
-
dict(
|
364 |
-
name='lss_kpt24',
|
365 |
-
id=48,
|
366 |
-
color=[255, 0, 128],
|
367 |
-
type='',
|
368 |
-
swap='lss_kpt16'),
|
369 |
-
49:
|
370 |
-
dict(
|
371 |
-
name='lss_kpt25',
|
372 |
-
id=49,
|
373 |
-
color=[255, 0, 128],
|
374 |
-
type='',
|
375 |
-
swap='lss_kpt15'),
|
376 |
-
50:
|
377 |
-
dict(
|
378 |
-
name='lss_kpt26',
|
379 |
-
id=50,
|
380 |
-
color=[255, 0, 128],
|
381 |
-
type='',
|
382 |
-
swap='lss_kpt14'),
|
383 |
-
51:
|
384 |
-
dict(
|
385 |
-
name='lss_kpt27',
|
386 |
-
id=51,
|
387 |
-
color=[255, 0, 128],
|
388 |
-
type='',
|
389 |
-
swap='lss_kpt13'),
|
390 |
-
52:
|
391 |
-
dict(
|
392 |
-
name='lss_kpt28',
|
393 |
-
id=52,
|
394 |
-
color=[255, 0, 128],
|
395 |
-
type='',
|
396 |
-
swap='lss_kpt12'),
|
397 |
-
53:
|
398 |
-
dict(
|
399 |
-
name='lss_kpt29',
|
400 |
-
id=53,
|
401 |
-
color=[255, 0, 128],
|
402 |
-
type='',
|
403 |
-
swap='lss_kpt11'),
|
404 |
-
54:
|
405 |
-
dict(
|
406 |
-
name='lss_kpt30',
|
407 |
-
id=54,
|
408 |
-
color=[255, 0, 128],
|
409 |
-
type='',
|
410 |
-
swap='lss_kpt10'),
|
411 |
-
55:
|
412 |
-
dict(
|
413 |
-
name='lss_kpt31',
|
414 |
-
id=55,
|
415 |
-
color=[255, 0, 128],
|
416 |
-
type='',
|
417 |
-
swap='lss_kpt9'),
|
418 |
-
56:
|
419 |
-
dict(
|
420 |
-
name='lss_kpt32',
|
421 |
-
id=56,
|
422 |
-
color=[255, 0, 128],
|
423 |
-
type='',
|
424 |
-
swap='lss_kpt8'),
|
425 |
-
57:
|
426 |
-
dict(
|
427 |
-
name='lss_kpt33',
|
428 |
-
id=57,
|
429 |
-
color=[255, 0, 128],
|
430 |
-
type='',
|
431 |
-
swap='lss_kpt7'),
|
432 |
-
58:
|
433 |
-
dict(name='sso_kpt1', id=58, color=[128, 0, 255], type='', swap=''),
|
434 |
-
59:
|
435 |
-
dict(
|
436 |
-
name='sso_kpt2',
|
437 |
-
id=59,
|
438 |
-
color=[128, 0, 255],
|
439 |
-
type='',
|
440 |
-
swap='sso_kpt26'),
|
441 |
-
60:
|
442 |
-
dict(
|
443 |
-
name='sso_kpt3',
|
444 |
-
id=60,
|
445 |
-
color=[128, 0, 255],
|
446 |
-
type='',
|
447 |
-
swap='sso_kpt5'),
|
448 |
-
61:
|
449 |
-
dict(
|
450 |
-
name='sso_kpt4',
|
451 |
-
id=61,
|
452 |
-
color=[128, 0, 255],
|
453 |
-
type='',
|
454 |
-
swap='sso_kpt6'),
|
455 |
-
62:
|
456 |
-
dict(
|
457 |
-
name='sso_kpt5',
|
458 |
-
id=62,
|
459 |
-
color=[128, 0, 255],
|
460 |
-
type='',
|
461 |
-
swap='sso_kpt3'),
|
462 |
-
63:
|
463 |
-
dict(
|
464 |
-
name='sso_kpt6',
|
465 |
-
id=63,
|
466 |
-
color=[128, 0, 255],
|
467 |
-
type='',
|
468 |
-
swap='sso_kpt4'),
|
469 |
-
64:
|
470 |
-
dict(
|
471 |
-
name='sso_kpt7',
|
472 |
-
id=64,
|
473 |
-
color=[128, 0, 255],
|
474 |
-
type='',
|
475 |
-
swap='sso_kpt25'),
|
476 |
-
65:
|
477 |
-
dict(
|
478 |
-
name='sso_kpt8',
|
479 |
-
id=65,
|
480 |
-
color=[128, 0, 255],
|
481 |
-
type='',
|
482 |
-
swap='sso_kpt24'),
|
483 |
-
66:
|
484 |
-
dict(
|
485 |
-
name='sso_kpt9',
|
486 |
-
id=66,
|
487 |
-
color=[128, 0, 255],
|
488 |
-
type='',
|
489 |
-
swap='sso_kpt23'),
|
490 |
-
67:
|
491 |
-
dict(
|
492 |
-
name='sso_kpt10',
|
493 |
-
id=67,
|
494 |
-
color=[128, 0, 255],
|
495 |
-
type='',
|
496 |
-
swap='sso_kpt22'),
|
497 |
-
68:
|
498 |
-
dict(
|
499 |
-
name='sso_kpt11',
|
500 |
-
id=68,
|
501 |
-
color=[128, 0, 255],
|
502 |
-
type='',
|
503 |
-
swap='sso_kpt21'),
|
504 |
-
69:
|
505 |
-
dict(
|
506 |
-
name='sso_kpt12',
|
507 |
-
id=69,
|
508 |
-
color=[128, 0, 255],
|
509 |
-
type='',
|
510 |
-
swap='sso_kpt20'),
|
511 |
-
70:
|
512 |
-
dict(
|
513 |
-
name='sso_kpt13',
|
514 |
-
id=70,
|
515 |
-
color=[128, 0, 255],
|
516 |
-
type='',
|
517 |
-
swap='sso_kpt19'),
|
518 |
-
71:
|
519 |
-
dict(
|
520 |
-
name='sso_kpt14',
|
521 |
-
id=71,
|
522 |
-
color=[128, 0, 255],
|
523 |
-
type='',
|
524 |
-
swap='sso_kpt18'),
|
525 |
-
72:
|
526 |
-
dict(
|
527 |
-
name='sso_kpt15',
|
528 |
-
id=72,
|
529 |
-
color=[128, 0, 255],
|
530 |
-
type='',
|
531 |
-
swap='sso_kpt17'),
|
532 |
-
73:
|
533 |
-
dict(
|
534 |
-
name='sso_kpt16',
|
535 |
-
id=73,
|
536 |
-
color=[128, 0, 255],
|
537 |
-
type='',
|
538 |
-
swap='sso_kpt29'),
|
539 |
-
74:
|
540 |
-
dict(
|
541 |
-
name='sso_kpt17',
|
542 |
-
id=74,
|
543 |
-
color=[128, 0, 255],
|
544 |
-
type='',
|
545 |
-
swap='sso_kpt15'),
|
546 |
-
75:
|
547 |
-
dict(
|
548 |
-
name='sso_kpt18',
|
549 |
-
id=75,
|
550 |
-
color=[128, 0, 255],
|
551 |
-
type='',
|
552 |
-
swap='sso_kpt14'),
|
553 |
-
76:
|
554 |
-
dict(
|
555 |
-
name='sso_kpt19',
|
556 |
-
id=76,
|
557 |
-
color=[128, 0, 255],
|
558 |
-
type='',
|
559 |
-
swap='sso_kpt13'),
|
560 |
-
77:
|
561 |
-
dict(
|
562 |
-
name='sso_kpt20',
|
563 |
-
id=77,
|
564 |
-
color=[128, 0, 255],
|
565 |
-
type='',
|
566 |
-
swap='sso_kpt12'),
|
567 |
-
78:
|
568 |
-
dict(
|
569 |
-
name='sso_kpt21',
|
570 |
-
id=78,
|
571 |
-
color=[128, 0, 255],
|
572 |
-
type='',
|
573 |
-
swap='sso_kpt11'),
|
574 |
-
79:
|
575 |
-
dict(
|
576 |
-
name='sso_kpt22',
|
577 |
-
id=79,
|
578 |
-
color=[128, 0, 255],
|
579 |
-
type='',
|
580 |
-
swap='sso_kpt10'),
|
581 |
-
80:
|
582 |
-
dict(
|
583 |
-
name='sso_kpt23',
|
584 |
-
id=80,
|
585 |
-
color=[128, 0, 255],
|
586 |
-
type='',
|
587 |
-
swap='sso_kpt9'),
|
588 |
-
81:
|
589 |
-
dict(
|
590 |
-
name='sso_kpt24',
|
591 |
-
id=81,
|
592 |
-
color=[128, 0, 255],
|
593 |
-
type='',
|
594 |
-
swap='sso_kpt8'),
|
595 |
-
82:
|
596 |
-
dict(
|
597 |
-
name='sso_kpt25',
|
598 |
-
id=82,
|
599 |
-
color=[128, 0, 255],
|
600 |
-
type='',
|
601 |
-
swap='sso_kpt7'),
|
602 |
-
83:
|
603 |
-
dict(
|
604 |
-
name='sso_kpt26',
|
605 |
-
id=83,
|
606 |
-
color=[128, 0, 255],
|
607 |
-
type='',
|
608 |
-
swap='sso_kpt2'),
|
609 |
-
84:
|
610 |
-
dict(
|
611 |
-
name='sso_kpt27',
|
612 |
-
id=84,
|
613 |
-
color=[128, 0, 255],
|
614 |
-
type='',
|
615 |
-
swap='sso_kpt30'),
|
616 |
-
85:
|
617 |
-
dict(
|
618 |
-
name='sso_kpt28',
|
619 |
-
id=85,
|
620 |
-
color=[128, 0, 255],
|
621 |
-
type='',
|
622 |
-
swap='sso_kpt31'),
|
623 |
-
86:
|
624 |
-
dict(
|
625 |
-
name='sso_kpt29',
|
626 |
-
id=86,
|
627 |
-
color=[128, 0, 255],
|
628 |
-
type='',
|
629 |
-
swap='sso_kpt16'),
|
630 |
-
87:
|
631 |
-
dict(
|
632 |
-
name='sso_kpt30',
|
633 |
-
id=87,
|
634 |
-
color=[128, 0, 255],
|
635 |
-
type='',
|
636 |
-
swap='sso_kpt27'),
|
637 |
-
88:
|
638 |
-
dict(
|
639 |
-
name='sso_kpt31',
|
640 |
-
id=88,
|
641 |
-
color=[128, 0, 255],
|
642 |
-
type='',
|
643 |
-
swap='sso_kpt28'),
|
644 |
-
89:
|
645 |
-
dict(name='lso_kpt1', id=89, color=[0, 128, 255], type='', swap=''),
|
646 |
-
90:
|
647 |
-
dict(
|
648 |
-
name='lso_kpt2',
|
649 |
-
id=90,
|
650 |
-
color=[0, 128, 255],
|
651 |
-
type='',
|
652 |
-
swap='lso_kpt6'),
|
653 |
-
91:
|
654 |
-
dict(
|
655 |
-
name='lso_kpt3',
|
656 |
-
id=91,
|
657 |
-
color=[0, 128, 255],
|
658 |
-
type='',
|
659 |
-
swap='lso_kpt5'),
|
660 |
-
92:
|
661 |
-
dict(
|
662 |
-
name='lso_kpt4',
|
663 |
-
id=92,
|
664 |
-
color=[0, 128, 255],
|
665 |
-
type='',
|
666 |
-
swap='lso_kpt34'),
|
667 |
-
93:
|
668 |
-
dict(
|
669 |
-
name='lso_kpt5',
|
670 |
-
id=93,
|
671 |
-
color=[0, 128, 255],
|
672 |
-
type='',
|
673 |
-
swap='lso_kpt3'),
|
674 |
-
94:
|
675 |
-
dict(
|
676 |
-
name='lso_kpt6',
|
677 |
-
id=94,
|
678 |
-
color=[0, 128, 255],
|
679 |
-
type='',
|
680 |
-
swap='lso_kpt2'),
|
681 |
-
95:
|
682 |
-
dict(
|
683 |
-
name='lso_kpt7',
|
684 |
-
id=95,
|
685 |
-
color=[0, 128, 255],
|
686 |
-
type='',
|
687 |
-
swap='lso_kpt33'),
|
688 |
-
96:
|
689 |
-
dict(
|
690 |
-
name='lso_kpt8',
|
691 |
-
id=96,
|
692 |
-
color=[0, 128, 255],
|
693 |
-
type='',
|
694 |
-
swap='lso_kpt32'),
|
695 |
-
97:
|
696 |
-
dict(
|
697 |
-
name='lso_kpt9',
|
698 |
-
id=97,
|
699 |
-
color=[0, 128, 255],
|
700 |
-
type='',
|
701 |
-
swap='lso_kpt31'),
|
702 |
-
98:
|
703 |
-
dict(
|
704 |
-
name='lso_kpt10',
|
705 |
-
id=98,
|
706 |
-
color=[0, 128, 255],
|
707 |
-
type='',
|
708 |
-
swap='lso_kpt30'),
|
709 |
-
99:
|
710 |
-
dict(
|
711 |
-
name='lso_kpt11',
|
712 |
-
id=99,
|
713 |
-
color=[0, 128, 255],
|
714 |
-
type='',
|
715 |
-
swap='lso_kpt29'),
|
716 |
-
100:
|
717 |
-
dict(
|
718 |
-
name='lso_kpt12',
|
719 |
-
id=100,
|
720 |
-
color=[0, 128, 255],
|
721 |
-
type='',
|
722 |
-
swap='lso_kpt28'),
|
723 |
-
101:
|
724 |
-
dict(
|
725 |
-
name='lso_kpt13',
|
726 |
-
id=101,
|
727 |
-
color=[0, 128, 255],
|
728 |
-
type='',
|
729 |
-
swap='lso_kpt27'),
|
730 |
-
102:
|
731 |
-
dict(
|
732 |
-
name='lso_kpt14',
|
733 |
-
id=102,
|
734 |
-
color=[0, 128, 255],
|
735 |
-
type='',
|
736 |
-
swap='lso_kpt26'),
|
737 |
-
103:
|
738 |
-
dict(
|
739 |
-
name='lso_kpt15',
|
740 |
-
id=103,
|
741 |
-
color=[0, 128, 255],
|
742 |
-
type='',
|
743 |
-
swap='lso_kpt25'),
|
744 |
-
104:
|
745 |
-
dict(
|
746 |
-
name='lso_kpt16',
|
747 |
-
id=104,
|
748 |
-
color=[0, 128, 255],
|
749 |
-
type='',
|
750 |
-
swap='lso_kpt24'),
|
751 |
-
105:
|
752 |
-
dict(
|
753 |
-
name='lso_kpt17',
|
754 |
-
id=105,
|
755 |
-
color=[0, 128, 255],
|
756 |
-
type='',
|
757 |
-
swap='lso_kpt23'),
|
758 |
-
106:
|
759 |
-
dict(
|
760 |
-
name='lso_kpt18',
|
761 |
-
id=106,
|
762 |
-
color=[0, 128, 255],
|
763 |
-
type='',
|
764 |
-
swap='lso_kpt22'),
|
765 |
-
107:
|
766 |
-
dict(
|
767 |
-
name='lso_kpt19',
|
768 |
-
id=107,
|
769 |
-
color=[0, 128, 255],
|
770 |
-
type='',
|
771 |
-
swap='lso_kpt21'),
|
772 |
-
108:
|
773 |
-
dict(
|
774 |
-
name='lso_kpt20',
|
775 |
-
id=108,
|
776 |
-
color=[0, 128, 255],
|
777 |
-
type='',
|
778 |
-
swap='lso_kpt37'),
|
779 |
-
109:
|
780 |
-
dict(
|
781 |
-
name='lso_kpt21',
|
782 |
-
id=109,
|
783 |
-
color=[0, 128, 255],
|
784 |
-
type='',
|
785 |
-
swap='lso_kpt19'),
|
786 |
-
110:
|
787 |
-
dict(
|
788 |
-
name='lso_kpt22',
|
789 |
-
id=110,
|
790 |
-
color=[0, 128, 255],
|
791 |
-
type='',
|
792 |
-
swap='lso_kpt18'),
|
793 |
-
111:
|
794 |
-
dict(
|
795 |
-
name='lso_kpt23',
|
796 |
-
id=111,
|
797 |
-
color=[0, 128, 255],
|
798 |
-
type='',
|
799 |
-
swap='lso_kpt17'),
|
800 |
-
112:
|
801 |
-
dict(
|
802 |
-
name='lso_kpt24',
|
803 |
-
id=112,
|
804 |
-
color=[0, 128, 255],
|
805 |
-
type='',
|
806 |
-
swap='lso_kpt16'),
|
807 |
-
113:
|
808 |
-
dict(
|
809 |
-
name='lso_kpt25',
|
810 |
-
id=113,
|
811 |
-
color=[0, 128, 255],
|
812 |
-
type='',
|
813 |
-
swap='lso_kpt15'),
|
814 |
-
114:
|
815 |
-
dict(
|
816 |
-
name='lso_kpt26',
|
817 |
-
id=114,
|
818 |
-
color=[0, 128, 255],
|
819 |
-
type='',
|
820 |
-
swap='lso_kpt14'),
|
821 |
-
115:
|
822 |
-
dict(
|
823 |
-
name='lso_kpt27',
|
824 |
-
id=115,
|
825 |
-
color=[0, 128, 255],
|
826 |
-
type='',
|
827 |
-
swap='lso_kpt13'),
|
828 |
-
116:
|
829 |
-
dict(
|
830 |
-
name='lso_kpt28',
|
831 |
-
id=116,
|
832 |
-
color=[0, 128, 255],
|
833 |
-
type='',
|
834 |
-
swap='lso_kpt12'),
|
835 |
-
117:
|
836 |
-
dict(
|
837 |
-
name='lso_kpt29',
|
838 |
-
id=117,
|
839 |
-
color=[0, 128, 255],
|
840 |
-
type='',
|
841 |
-
swap='lso_kpt11'),
|
842 |
-
118:
|
843 |
-
dict(
|
844 |
-
name='lso_kpt30',
|
845 |
-
id=118,
|
846 |
-
color=[0, 128, 255],
|
847 |
-
type='',
|
848 |
-
swap='lso_kpt10'),
|
849 |
-
119:
|
850 |
-
dict(
|
851 |
-
name='lso_kpt31',
|
852 |
-
id=119,
|
853 |
-
color=[0, 128, 255],
|
854 |
-
type='',
|
855 |
-
swap='lso_kpt9'),
|
856 |
-
120:
|
857 |
-
dict(
|
858 |
-
name='lso_kpt32',
|
859 |
-
id=120,
|
860 |
-
color=[0, 128, 255],
|
861 |
-
type='',
|
862 |
-
swap='lso_kpt8'),
|
863 |
-
121:
|
864 |
-
dict(
|
865 |
-
name='lso_kpt33',
|
866 |
-
id=121,
|
867 |
-
color=[0, 128, 255],
|
868 |
-
type='',
|
869 |
-
swap='lso_kpt7'),
|
870 |
-
122:
|
871 |
-
dict(
|
872 |
-
name='lso_kpt34',
|
873 |
-
id=122,
|
874 |
-
color=[0, 128, 255],
|
875 |
-
type='',
|
876 |
-
swap='lso_kpt4'),
|
877 |
-
123:
|
878 |
-
dict(
|
879 |
-
name='lso_kpt35',
|
880 |
-
id=123,
|
881 |
-
color=[0, 128, 255],
|
882 |
-
type='',
|
883 |
-
swap='lso_kpt38'),
|
884 |
-
124:
|
885 |
-
dict(
|
886 |
-
name='lso_kpt36',
|
887 |
-
id=124,
|
888 |
-
color=[0, 128, 255],
|
889 |
-
type='',
|
890 |
-
swap='lso_kpt39'),
|
891 |
-
125:
|
892 |
-
dict(
|
893 |
-
name='lso_kpt37',
|
894 |
-
id=125,
|
895 |
-
color=[0, 128, 255],
|
896 |
-
type='',
|
897 |
-
swap='lso_kpt20'),
|
898 |
-
126:
|
899 |
-
dict(
|
900 |
-
name='lso_kpt38',
|
901 |
-
id=126,
|
902 |
-
color=[0, 128, 255],
|
903 |
-
type='',
|
904 |
-
swap='lso_kpt35'),
|
905 |
-
127:
|
906 |
-
dict(
|
907 |
-
name='lso_kpt39',
|
908 |
-
id=127,
|
909 |
-
color=[0, 128, 255],
|
910 |
-
type='',
|
911 |
-
swap='lso_kpt36'),
|
912 |
-
128:
|
913 |
-
dict(name='vest_kpt1', id=128, color=[0, 128, 128], type='', swap=''),
|
914 |
-
129:
|
915 |
-
dict(
|
916 |
-
name='vest_kpt2',
|
917 |
-
id=129,
|
918 |
-
color=[0, 128, 128],
|
919 |
-
type='',
|
920 |
-
swap='vest_kpt6'),
|
921 |
-
130:
|
922 |
-
dict(
|
923 |
-
name='vest_kpt3',
|
924 |
-
id=130,
|
925 |
-
color=[0, 128, 128],
|
926 |
-
type='',
|
927 |
-
swap='vest_kpt5'),
|
928 |
-
131:
|
929 |
-
dict(name='vest_kpt4', id=131, color=[0, 128, 128], type='', swap=''),
|
930 |
-
132:
|
931 |
-
dict(
|
932 |
-
name='vest_kpt5',
|
933 |
-
id=132,
|
934 |
-
color=[0, 128, 128],
|
935 |
-
type='',
|
936 |
-
swap='vest_kpt3'),
|
937 |
-
133:
|
938 |
-
dict(
|
939 |
-
name='vest_kpt6',
|
940 |
-
id=133,
|
941 |
-
color=[0, 128, 128],
|
942 |
-
type='',
|
943 |
-
swap='vest_kpt2'),
|
944 |
-
134:
|
945 |
-
dict(
|
946 |
-
name='vest_kpt7',
|
947 |
-
id=134,
|
948 |
-
color=[0, 128, 128],
|
949 |
-
type='',
|
950 |
-
swap='vest_kpt15'),
|
951 |
-
135:
|
952 |
-
dict(
|
953 |
-
name='vest_kpt8',
|
954 |
-
id=135,
|
955 |
-
color=[0, 128, 128],
|
956 |
-
type='',
|
957 |
-
swap='vest_kpt14'),
|
958 |
-
136:
|
959 |
-
dict(
|
960 |
-
name='vest_kpt9',
|
961 |
-
id=136,
|
962 |
-
color=[0, 128, 128],
|
963 |
-
type='',
|
964 |
-
swap='vest_kpt13'),
|
965 |
-
137:
|
966 |
-
dict(
|
967 |
-
name='vest_kpt10',
|
968 |
-
id=137,
|
969 |
-
color=[0, 128, 128],
|
970 |
-
type='',
|
971 |
-
swap='vest_kpt12'),
|
972 |
-
138:
|
973 |
-
dict(name='vest_kpt11', id=138, color=[0, 128, 128], type='', swap=''),
|
974 |
-
139:
|
975 |
-
dict(
|
976 |
-
name='vest_kpt12',
|
977 |
-
id=139,
|
978 |
-
color=[0, 128, 128],
|
979 |
-
type='',
|
980 |
-
swap='vest_kpt10'),
|
981 |
-
140:
|
982 |
-
dict(name='vest_kpt13', id=140, color=[0, 128, 128], type='', swap=''),
|
983 |
-
141:
|
984 |
-
dict(
|
985 |
-
name='vest_kpt14',
|
986 |
-
id=141,
|
987 |
-
color=[0, 128, 128],
|
988 |
-
type='',
|
989 |
-
swap='vest_kpt8'),
|
990 |
-
142:
|
991 |
-
dict(
|
992 |
-
name='vest_kpt15',
|
993 |
-
id=142,
|
994 |
-
color=[0, 128, 128],
|
995 |
-
type='',
|
996 |
-
swap='vest_kpt7'),
|
997 |
-
143:
|
998 |
-
dict(name='sling_kpt1', id=143, color=[0, 0, 128], type='', swap=''),
|
999 |
-
144:
|
1000 |
-
dict(
|
1001 |
-
name='sling_kpt2',
|
1002 |
-
id=144,
|
1003 |
-
color=[0, 0, 128],
|
1004 |
-
type='',
|
1005 |
-
swap='sling_kpt6'),
|
1006 |
-
145:
|
1007 |
-
dict(
|
1008 |
-
name='sling_kpt3',
|
1009 |
-
id=145,
|
1010 |
-
color=[0, 0, 128],
|
1011 |
-
type='',
|
1012 |
-
swap='sling_kpt5'),
|
1013 |
-
146:
|
1014 |
-
dict(name='sling_kpt4', id=146, color=[0, 0, 128], type='', swap=''),
|
1015 |
-
147:
|
1016 |
-
dict(
|
1017 |
-
name='sling_kpt5',
|
1018 |
-
id=147,
|
1019 |
-
color=[0, 0, 128],
|
1020 |
-
type='',
|
1021 |
-
swap='sling_kpt3'),
|
1022 |
-
148:
|
1023 |
-
dict(
|
1024 |
-
name='sling_kpt6',
|
1025 |
-
id=148,
|
1026 |
-
color=[0, 0, 128],
|
1027 |
-
type='',
|
1028 |
-
swap='sling_kpt2'),
|
1029 |
-
149:
|
1030 |
-
dict(
|
1031 |
-
name='sling_kpt7',
|
1032 |
-
id=149,
|
1033 |
-
color=[0, 0, 128],
|
1034 |
-
type='',
|
1035 |
-
swap='sling_kpt15'),
|
1036 |
-
150:
|
1037 |
-
dict(
|
1038 |
-
name='sling_kpt8',
|
1039 |
-
id=150,
|
1040 |
-
color=[0, 0, 128],
|
1041 |
-
type='',
|
1042 |
-
swap='sling_kpt14'),
|
1043 |
-
151:
|
1044 |
-
dict(
|
1045 |
-
name='sling_kpt9',
|
1046 |
-
id=151,
|
1047 |
-
color=[0, 0, 128],
|
1048 |
-
type='',
|
1049 |
-
swap='sling_kpt13'),
|
1050 |
-
152:
|
1051 |
-
dict(
|
1052 |
-
name='sling_kpt10',
|
1053 |
-
id=152,
|
1054 |
-
color=[0, 0, 128],
|
1055 |
-
type='',
|
1056 |
-
swap='sling_kpt12'),
|
1057 |
-
153:
|
1058 |
-
dict(name='sling_kpt11', id=153, color=[0, 0, 128], type='', swap=''),
|
1059 |
-
154:
|
1060 |
-
dict(
|
1061 |
-
name='sling_kpt12',
|
1062 |
-
id=154,
|
1063 |
-
color=[0, 0, 128],
|
1064 |
-
type='',
|
1065 |
-
swap='sling_kpt10'),
|
1066 |
-
155:
|
1067 |
-
dict(
|
1068 |
-
name='sling_kpt13',
|
1069 |
-
id=155,
|
1070 |
-
color=[0, 0, 128],
|
1071 |
-
type='',
|
1072 |
-
swap='sling_kpt9'),
|
1073 |
-
156:
|
1074 |
-
dict(
|
1075 |
-
name='sling_kpt14',
|
1076 |
-
id=156,
|
1077 |
-
color=[0, 0, 128],
|
1078 |
-
type='',
|
1079 |
-
swap='sling_kpt8'),
|
1080 |
-
157:
|
1081 |
-
dict(
|
1082 |
-
name='sling_kpt15',
|
1083 |
-
id=157,
|
1084 |
-
color=[0, 0, 128],
|
1085 |
-
type='',
|
1086 |
-
swap='sling_kpt7'),
|
1087 |
-
158:
|
1088 |
-
dict(
|
1089 |
-
name='shorts_kpt1',
|
1090 |
-
id=158,
|
1091 |
-
color=[128, 128, 128],
|
1092 |
-
type='',
|
1093 |
-
swap='shorts_kpt3'),
|
1094 |
-
159:
|
1095 |
-
dict(
|
1096 |
-
name='shorts_kpt2',
|
1097 |
-
id=159,
|
1098 |
-
color=[128, 128, 128],
|
1099 |
-
type='',
|
1100 |
-
swap=''),
|
1101 |
-
160:
|
1102 |
-
dict(
|
1103 |
-
name='shorts_kpt3',
|
1104 |
-
id=160,
|
1105 |
-
color=[128, 128, 128],
|
1106 |
-
type='',
|
1107 |
-
swap='shorts_kpt1'),
|
1108 |
-
161:
|
1109 |
-
dict(
|
1110 |
-
name='shorts_kpt4',
|
1111 |
-
id=161,
|
1112 |
-
color=[128, 128, 128],
|
1113 |
-
type='',
|
1114 |
-
swap='shorts_kpt10'),
|
1115 |
-
162:
|
1116 |
-
dict(
|
1117 |
-
name='shorts_kpt5',
|
1118 |
-
id=162,
|
1119 |
-
color=[128, 128, 128],
|
1120 |
-
type='',
|
1121 |
-
swap='shorts_kpt9'),
|
1122 |
-
163:
|
1123 |
-
dict(
|
1124 |
-
name='shorts_kpt6',
|
1125 |
-
id=163,
|
1126 |
-
color=[128, 128, 128],
|
1127 |
-
type='',
|
1128 |
-
swap='shorts_kpt8'),
|
1129 |
-
164:
|
1130 |
-
dict(
|
1131 |
-
name='shorts_kpt7',
|
1132 |
-
id=164,
|
1133 |
-
color=[128, 128, 128],
|
1134 |
-
type='',
|
1135 |
-
swap=''),
|
1136 |
-
165:
|
1137 |
-
dict(
|
1138 |
-
name='shorts_kpt8',
|
1139 |
-
id=165,
|
1140 |
-
color=[128, 128, 128],
|
1141 |
-
type='',
|
1142 |
-
swap='shorts_kpt6'),
|
1143 |
-
166:
|
1144 |
-
dict(
|
1145 |
-
name='shorts_kpt9',
|
1146 |
-
id=166,
|
1147 |
-
color=[128, 128, 128],
|
1148 |
-
type='',
|
1149 |
-
swap='shorts_kpt5'),
|
1150 |
-
167:
|
1151 |
-
dict(
|
1152 |
-
name='shorts_kpt10',
|
1153 |
-
id=167,
|
1154 |
-
color=[128, 128, 128],
|
1155 |
-
type='',
|
1156 |
-
swap='shorts_kpt4'),
|
1157 |
-
168:
|
1158 |
-
dict(
|
1159 |
-
name='trousers_kpt1',
|
1160 |
-
id=168,
|
1161 |
-
color=[128, 0, 128],
|
1162 |
-
type='',
|
1163 |
-
swap='trousers_kpt3'),
|
1164 |
-
169:
|
1165 |
-
dict(
|
1166 |
-
name='trousers_kpt2',
|
1167 |
-
id=169,
|
1168 |
-
color=[128, 0, 128],
|
1169 |
-
type='',
|
1170 |
-
swap=''),
|
1171 |
-
170:
|
1172 |
-
dict(
|
1173 |
-
name='trousers_kpt3',
|
1174 |
-
id=170,
|
1175 |
-
color=[128, 0, 128],
|
1176 |
-
type='',
|
1177 |
-
swap='trousers_kpt1'),
|
1178 |
-
171:
|
1179 |
-
dict(
|
1180 |
-
name='trousers_kpt4',
|
1181 |
-
id=171,
|
1182 |
-
color=[128, 0, 128],
|
1183 |
-
type='',
|
1184 |
-
swap='trousers_kpt14'),
|
1185 |
-
172:
|
1186 |
-
dict(
|
1187 |
-
name='trousers_kpt5',
|
1188 |
-
id=172,
|
1189 |
-
color=[128, 0, 128],
|
1190 |
-
type='',
|
1191 |
-
swap='trousers_kpt13'),
|
1192 |
-
173:
|
1193 |
-
dict(
|
1194 |
-
name='trousers_kpt6',
|
1195 |
-
id=173,
|
1196 |
-
color=[128, 0, 128],
|
1197 |
-
type='',
|
1198 |
-
swap='trousers_kpt12'),
|
1199 |
-
174:
|
1200 |
-
dict(
|
1201 |
-
name='trousers_kpt7',
|
1202 |
-
id=174,
|
1203 |
-
color=[128, 0, 128],
|
1204 |
-
type='',
|
1205 |
-
swap='trousers_kpt11'),
|
1206 |
-
175:
|
1207 |
-
dict(
|
1208 |
-
name='trousers_kpt8',
|
1209 |
-
id=175,
|
1210 |
-
color=[128, 0, 128],
|
1211 |
-
type='',
|
1212 |
-
swap='trousers_kpt10'),
|
1213 |
-
176:
|
1214 |
-
dict(
|
1215 |
-
name='trousers_kpt9',
|
1216 |
-
id=176,
|
1217 |
-
color=[128, 0, 128],
|
1218 |
-
type='',
|
1219 |
-
swap=''),
|
1220 |
-
177:
|
1221 |
-
dict(
|
1222 |
-
name='trousers_kpt10',
|
1223 |
-
id=177,
|
1224 |
-
color=[128, 0, 128],
|
1225 |
-
type='',
|
1226 |
-
swap='trousers_kpt8'),
|
1227 |
-
178:
|
1228 |
-
dict(
|
1229 |
-
name='trousers_kpt11',
|
1230 |
-
id=178,
|
1231 |
-
color=[128, 0, 128],
|
1232 |
-
type='',
|
1233 |
-
swap='trousers_kpt7'),
|
1234 |
-
179:
|
1235 |
-
dict(
|
1236 |
-
name='trousers_kpt12',
|
1237 |
-
id=179,
|
1238 |
-
color=[128, 0, 128],
|
1239 |
-
type='',
|
1240 |
-
swap='trousers_kpt6'),
|
1241 |
-
180:
|
1242 |
-
dict(
|
1243 |
-
name='trousers_kpt13',
|
1244 |
-
id=180,
|
1245 |
-
color=[128, 0, 128],
|
1246 |
-
type='',
|
1247 |
-
swap='trousers_kpt5'),
|
1248 |
-
181:
|
1249 |
-
dict(
|
1250 |
-
name='trousers_kpt14',
|
1251 |
-
id=181,
|
1252 |
-
color=[128, 0, 128],
|
1253 |
-
type='',
|
1254 |
-
swap='trousers_kpt4'),
|
1255 |
-
182:
|
1256 |
-
dict(
|
1257 |
-
name='skirt_kpt1',
|
1258 |
-
id=182,
|
1259 |
-
color=[64, 128, 128],
|
1260 |
-
type='',
|
1261 |
-
swap='skirt_kpt3'),
|
1262 |
-
183:
|
1263 |
-
dict(
|
1264 |
-
name='skirt_kpt2', id=183, color=[64, 128, 128], type='', swap=''),
|
1265 |
-
184:
|
1266 |
-
dict(
|
1267 |
-
name='skirt_kpt3',
|
1268 |
-
id=184,
|
1269 |
-
color=[64, 128, 128],
|
1270 |
-
type='',
|
1271 |
-
swap='skirt_kpt1'),
|
1272 |
-
185:
|
1273 |
-
dict(
|
1274 |
-
name='skirt_kpt4',
|
1275 |
-
id=185,
|
1276 |
-
color=[64, 128, 128],
|
1277 |
-
type='',
|
1278 |
-
swap='skirt_kpt8'),
|
1279 |
-
186:
|
1280 |
-
dict(
|
1281 |
-
name='skirt_kpt5',
|
1282 |
-
id=186,
|
1283 |
-
color=[64, 128, 128],
|
1284 |
-
type='',
|
1285 |
-
swap='skirt_kpt7'),
|
1286 |
-
187:
|
1287 |
-
dict(
|
1288 |
-
name='skirt_kpt6', id=187, color=[64, 128, 128], type='', swap=''),
|
1289 |
-
188:
|
1290 |
-
dict(
|
1291 |
-
name='skirt_kpt7',
|
1292 |
-
id=188,
|
1293 |
-
color=[64, 128, 128],
|
1294 |
-
type='',
|
1295 |
-
swap='skirt_kpt5'),
|
1296 |
-
189:
|
1297 |
-
dict(
|
1298 |
-
name='skirt_kpt8',
|
1299 |
-
id=189,
|
1300 |
-
color=[64, 128, 128],
|
1301 |
-
type='',
|
1302 |
-
swap='skirt_kpt4'),
|
1303 |
-
190:
|
1304 |
-
dict(name='ssd_kpt1', id=190, color=[64, 64, 128], type='', swap=''),
|
1305 |
-
191:
|
1306 |
-
dict(
|
1307 |
-
name='ssd_kpt2',
|
1308 |
-
id=191,
|
1309 |
-
color=[64, 64, 128],
|
1310 |
-
type='',
|
1311 |
-
swap='ssd_kpt6'),
|
1312 |
-
192:
|
1313 |
-
dict(
|
1314 |
-
name='ssd_kpt3',
|
1315 |
-
id=192,
|
1316 |
-
color=[64, 64, 128],
|
1317 |
-
type='',
|
1318 |
-
swap='ssd_kpt5'),
|
1319 |
-
193:
|
1320 |
-
dict(name='ssd_kpt4', id=193, color=[64, 64, 128], type='', swap=''),
|
1321 |
-
194:
|
1322 |
-
dict(
|
1323 |
-
name='ssd_kpt5',
|
1324 |
-
id=194,
|
1325 |
-
color=[64, 64, 128],
|
1326 |
-
type='',
|
1327 |
-
swap='ssd_kpt3'),
|
1328 |
-
195:
|
1329 |
-
dict(
|
1330 |
-
name='ssd_kpt6',
|
1331 |
-
id=195,
|
1332 |
-
color=[64, 64, 128],
|
1333 |
-
type='',
|
1334 |
-
swap='ssd_kpt2'),
|
1335 |
-
196:
|
1336 |
-
dict(
|
1337 |
-
name='ssd_kpt7',
|
1338 |
-
id=196,
|
1339 |
-
color=[64, 64, 128],
|
1340 |
-
type='',
|
1341 |
-
swap='ssd_kpt29'),
|
1342 |
-
197:
|
1343 |
-
dict(
|
1344 |
-
name='ssd_kpt8',
|
1345 |
-
id=197,
|
1346 |
-
color=[64, 64, 128],
|
1347 |
-
type='',
|
1348 |
-
swap='ssd_kpt28'),
|
1349 |
-
198:
|
1350 |
-
dict(
|
1351 |
-
name='ssd_kpt9',
|
1352 |
-
id=198,
|
1353 |
-
color=[64, 64, 128],
|
1354 |
-
type='',
|
1355 |
-
swap='ssd_kpt27'),
|
1356 |
-
199:
|
1357 |
-
dict(
|
1358 |
-
name='ssd_kpt10',
|
1359 |
-
id=199,
|
1360 |
-
color=[64, 64, 128],
|
1361 |
-
type='',
|
1362 |
-
swap='ssd_kpt26'),
|
1363 |
-
200:
|
1364 |
-
dict(
|
1365 |
-
name='ssd_kpt11',
|
1366 |
-
id=200,
|
1367 |
-
color=[64, 64, 128],
|
1368 |
-
type='',
|
1369 |
-
swap='ssd_kpt25'),
|
1370 |
-
201:
|
1371 |
-
dict(
|
1372 |
-
name='ssd_kpt12',
|
1373 |
-
id=201,
|
1374 |
-
color=[64, 64, 128],
|
1375 |
-
type='',
|
1376 |
-
swap='ssd_kpt24'),
|
1377 |
-
202:
|
1378 |
-
dict(
|
1379 |
-
name='ssd_kpt13',
|
1380 |
-
id=202,
|
1381 |
-
color=[64, 64, 128],
|
1382 |
-
type='',
|
1383 |
-
swap='ssd_kpt23'),
|
1384 |
-
203:
|
1385 |
-
dict(
|
1386 |
-
name='ssd_kpt14',
|
1387 |
-
id=203,
|
1388 |
-
color=[64, 64, 128],
|
1389 |
-
type='',
|
1390 |
-
swap='ssd_kpt22'),
|
1391 |
-
204:
|
1392 |
-
dict(
|
1393 |
-
name='ssd_kpt15',
|
1394 |
-
id=204,
|
1395 |
-
color=[64, 64, 128],
|
1396 |
-
type='',
|
1397 |
-
swap='ssd_kpt21'),
|
1398 |
-
205:
|
1399 |
-
dict(
|
1400 |
-
name='ssd_kpt16',
|
1401 |
-
id=205,
|
1402 |
-
color=[64, 64, 128],
|
1403 |
-
type='',
|
1404 |
-
swap='ssd_kpt20'),
|
1405 |
-
206:
|
1406 |
-
dict(
|
1407 |
-
name='ssd_kpt17',
|
1408 |
-
id=206,
|
1409 |
-
color=[64, 64, 128],
|
1410 |
-
type='',
|
1411 |
-
swap='ssd_kpt19'),
|
1412 |
-
207:
|
1413 |
-
dict(name='ssd_kpt18', id=207, color=[64, 64, 128], type='', swap=''),
|
1414 |
-
208:
|
1415 |
-
dict(
|
1416 |
-
name='ssd_kpt19',
|
1417 |
-
id=208,
|
1418 |
-
color=[64, 64, 128],
|
1419 |
-
type='',
|
1420 |
-
swap='ssd_kpt17'),
|
1421 |
-
209:
|
1422 |
-
dict(
|
1423 |
-
name='ssd_kpt20',
|
1424 |
-
id=209,
|
1425 |
-
color=[64, 64, 128],
|
1426 |
-
type='',
|
1427 |
-
swap='ssd_kpt16'),
|
1428 |
-
210:
|
1429 |
-
dict(
|
1430 |
-
name='ssd_kpt21',
|
1431 |
-
id=210,
|
1432 |
-
color=[64, 64, 128],
|
1433 |
-
type='',
|
1434 |
-
swap='ssd_kpt15'),
|
1435 |
-
211:
|
1436 |
-
dict(
|
1437 |
-
name='ssd_kpt22',
|
1438 |
-
id=211,
|
1439 |
-
color=[64, 64, 128],
|
1440 |
-
type='',
|
1441 |
-
swap='ssd_kpt14'),
|
1442 |
-
212:
|
1443 |
-
dict(
|
1444 |
-
name='ssd_kpt23',
|
1445 |
-
id=212,
|
1446 |
-
color=[64, 64, 128],
|
1447 |
-
type='',
|
1448 |
-
swap='ssd_kpt13'),
|
1449 |
-
213:
|
1450 |
-
dict(
|
1451 |
-
name='ssd_kpt24',
|
1452 |
-
id=213,
|
1453 |
-
color=[64, 64, 128],
|
1454 |
-
type='',
|
1455 |
-
swap='ssd_kpt12'),
|
1456 |
-
214:
|
1457 |
-
dict(
|
1458 |
-
name='ssd_kpt25',
|
1459 |
-
id=214,
|
1460 |
-
color=[64, 64, 128],
|
1461 |
-
type='',
|
1462 |
-
swap='ssd_kpt11'),
|
1463 |
-
215:
|
1464 |
-
dict(
|
1465 |
-
name='ssd_kpt26',
|
1466 |
-
id=215,
|
1467 |
-
color=[64, 64, 128],
|
1468 |
-
type='',
|
1469 |
-
swap='ssd_kpt10'),
|
1470 |
-
216:
|
1471 |
-
dict(
|
1472 |
-
name='ssd_kpt27',
|
1473 |
-
id=216,
|
1474 |
-
color=[64, 64, 128],
|
1475 |
-
type='',
|
1476 |
-
swap='ssd_kpt9'),
|
1477 |
-
217:
|
1478 |
-
dict(
|
1479 |
-
name='ssd_kpt28',
|
1480 |
-
id=217,
|
1481 |
-
color=[64, 64, 128],
|
1482 |
-
type='',
|
1483 |
-
swap='ssd_kpt8'),
|
1484 |
-
218:
|
1485 |
-
dict(
|
1486 |
-
name='ssd_kpt29',
|
1487 |
-
id=218,
|
1488 |
-
color=[64, 64, 128],
|
1489 |
-
type='',
|
1490 |
-
swap='ssd_kpt7'),
|
1491 |
-
219:
|
1492 |
-
dict(name='lsd_kpt1', id=219, color=[128, 64, 0], type='', swap=''),
|
1493 |
-
220:
|
1494 |
-
dict(
|
1495 |
-
name='lsd_kpt2',
|
1496 |
-
id=220,
|
1497 |
-
color=[128, 64, 0],
|
1498 |
-
type='',
|
1499 |
-
swap='lsd_kpt6'),
|
1500 |
-
221:
|
1501 |
-
dict(
|
1502 |
-
name='lsd_kpt3',
|
1503 |
-
id=221,
|
1504 |
-
color=[128, 64, 0],
|
1505 |
-
type='',
|
1506 |
-
swap='lsd_kpt5'),
|
1507 |
-
222:
|
1508 |
-
dict(name='lsd_kpt4', id=222, color=[128, 64, 0], type='', swap=''),
|
1509 |
-
223:
|
1510 |
-
dict(
|
1511 |
-
name='lsd_kpt5',
|
1512 |
-
id=223,
|
1513 |
-
color=[128, 64, 0],
|
1514 |
-
type='',
|
1515 |
-
swap='lsd_kpt3'),
|
1516 |
-
224:
|
1517 |
-
dict(
|
1518 |
-
name='lsd_kpt6',
|
1519 |
-
id=224,
|
1520 |
-
color=[128, 64, 0],
|
1521 |
-
type='',
|
1522 |
-
swap='lsd_kpt2'),
|
1523 |
-
225:
|
1524 |
-
dict(
|
1525 |
-
name='lsd_kpt7',
|
1526 |
-
id=225,
|
1527 |
-
color=[128, 64, 0],
|
1528 |
-
type='',
|
1529 |
-
swap='lsd_kpt37'),
|
1530 |
-
226:
|
1531 |
-
dict(
|
1532 |
-
name='lsd_kpt8',
|
1533 |
-
id=226,
|
1534 |
-
color=[128, 64, 0],
|
1535 |
-
type='',
|
1536 |
-
swap='lsd_kpt36'),
|
1537 |
-
227:
|
1538 |
-
dict(
|
1539 |
-
name='lsd_kpt9',
|
1540 |
-
id=227,
|
1541 |
-
color=[128, 64, 0],
|
1542 |
-
type='',
|
1543 |
-
swap='lsd_kpt35'),
|
1544 |
-
228:
|
1545 |
-
dict(
|
1546 |
-
name='lsd_kpt10',
|
1547 |
-
id=228,
|
1548 |
-
color=[128, 64, 0],
|
1549 |
-
type='',
|
1550 |
-
swap='lsd_kpt34'),
|
1551 |
-
229:
|
1552 |
-
dict(
|
1553 |
-
name='lsd_kpt11',
|
1554 |
-
id=229,
|
1555 |
-
color=[128, 64, 0],
|
1556 |
-
type='',
|
1557 |
-
swap='lsd_kpt33'),
|
1558 |
-
230:
|
1559 |
-
dict(
|
1560 |
-
name='lsd_kpt12',
|
1561 |
-
id=230,
|
1562 |
-
color=[128, 64, 0],
|
1563 |
-
type='',
|
1564 |
-
swap='lsd_kpt32'),
|
1565 |
-
231:
|
1566 |
-
dict(
|
1567 |
-
name='lsd_kpt13',
|
1568 |
-
id=231,
|
1569 |
-
color=[128, 64, 0],
|
1570 |
-
type='',
|
1571 |
-
swap='lsd_kpt31'),
|
1572 |
-
232:
|
1573 |
-
dict(
|
1574 |
-
name='lsd_kpt14',
|
1575 |
-
id=232,
|
1576 |
-
color=[128, 64, 0],
|
1577 |
-
type='',
|
1578 |
-
swap='lsd_kpt30'),
|
1579 |
-
233:
|
1580 |
-
dict(
|
1581 |
-
name='lsd_kpt15',
|
1582 |
-
id=233,
|
1583 |
-
color=[128, 64, 0],
|
1584 |
-
type='',
|
1585 |
-
swap='lsd_kpt29'),
|
1586 |
-
234:
|
1587 |
-
dict(
|
1588 |
-
name='lsd_kpt16',
|
1589 |
-
id=234,
|
1590 |
-
color=[128, 64, 0],
|
1591 |
-
type='',
|
1592 |
-
swap='lsd_kpt28'),
|
1593 |
-
235:
|
1594 |
-
dict(
|
1595 |
-
name='lsd_kpt17',
|
1596 |
-
id=235,
|
1597 |
-
color=[128, 64, 0],
|
1598 |
-
type='',
|
1599 |
-
swap='lsd_kpt27'),
|
1600 |
-
236:
|
1601 |
-
dict(
|
1602 |
-
name='lsd_kpt18',
|
1603 |
-
id=236,
|
1604 |
-
color=[128, 64, 0],
|
1605 |
-
type='',
|
1606 |
-
swap='lsd_kpt26'),
|
1607 |
-
237:
|
1608 |
-
dict(
|
1609 |
-
name='lsd_kpt19',
|
1610 |
-
id=237,
|
1611 |
-
color=[128, 64, 0],
|
1612 |
-
type='',
|
1613 |
-
swap='lsd_kpt25'),
|
1614 |
-
238:
|
1615 |
-
dict(
|
1616 |
-
name='lsd_kpt20',
|
1617 |
-
id=238,
|
1618 |
-
color=[128, 64, 0],
|
1619 |
-
type='',
|
1620 |
-
swap='lsd_kpt24'),
|
1621 |
-
239:
|
1622 |
-
dict(
|
1623 |
-
name='lsd_kpt21',
|
1624 |
-
id=239,
|
1625 |
-
color=[128, 64, 0],
|
1626 |
-
type='',
|
1627 |
-
swap='lsd_kpt23'),
|
1628 |
-
240:
|
1629 |
-
dict(name='lsd_kpt22', id=240, color=[128, 64, 0], type='', swap=''),
|
1630 |
-
241:
|
1631 |
-
dict(
|
1632 |
-
name='lsd_kpt23',
|
1633 |
-
id=241,
|
1634 |
-
color=[128, 64, 0],
|
1635 |
-
type='',
|
1636 |
-
swap='lsd_kpt21'),
|
1637 |
-
242:
|
1638 |
-
dict(
|
1639 |
-
name='lsd_kpt24',
|
1640 |
-
id=242,
|
1641 |
-
color=[128, 64, 0],
|
1642 |
-
type='',
|
1643 |
-
swap='lsd_kpt20'),
|
1644 |
-
243:
|
1645 |
-
dict(
|
1646 |
-
name='lsd_kpt25',
|
1647 |
-
id=243,
|
1648 |
-
color=[128, 64, 0],
|
1649 |
-
type='',
|
1650 |
-
swap='lsd_kpt19'),
|
1651 |
-
244:
|
1652 |
-
dict(
|
1653 |
-
name='lsd_kpt26',
|
1654 |
-
id=244,
|
1655 |
-
color=[128, 64, 0],
|
1656 |
-
type='',
|
1657 |
-
swap='lsd_kpt18'),
|
1658 |
-
245:
|
1659 |
-
dict(
|
1660 |
-
name='lsd_kpt27',
|
1661 |
-
id=245,
|
1662 |
-
color=[128, 64, 0],
|
1663 |
-
type='',
|
1664 |
-
swap='lsd_kpt17'),
|
1665 |
-
246:
|
1666 |
-
dict(
|
1667 |
-
name='lsd_kpt28',
|
1668 |
-
id=246,
|
1669 |
-
color=[128, 64, 0],
|
1670 |
-
type='',
|
1671 |
-
swap='lsd_kpt16'),
|
1672 |
-
247:
|
1673 |
-
dict(
|
1674 |
-
name='lsd_kpt29',
|
1675 |
-
id=247,
|
1676 |
-
color=[128, 64, 0],
|
1677 |
-
type='',
|
1678 |
-
swap='lsd_kpt15'),
|
1679 |
-
248:
|
1680 |
-
dict(
|
1681 |
-
name='lsd_kpt30',
|
1682 |
-
id=248,
|
1683 |
-
color=[128, 64, 0],
|
1684 |
-
type='',
|
1685 |
-
swap='lsd_kpt14'),
|
1686 |
-
249:
|
1687 |
-
dict(
|
1688 |
-
name='lsd_kpt31',
|
1689 |
-
id=249,
|
1690 |
-
color=[128, 64, 0],
|
1691 |
-
type='',
|
1692 |
-
swap='lsd_kpt13'),
|
1693 |
-
250:
|
1694 |
-
dict(
|
1695 |
-
name='lsd_kpt32',
|
1696 |
-
id=250,
|
1697 |
-
color=[128, 64, 0],
|
1698 |
-
type='',
|
1699 |
-
swap='lsd_kpt12'),
|
1700 |
-
251:
|
1701 |
-
dict(
|
1702 |
-
name='lsd_kpt33',
|
1703 |
-
id=251,
|
1704 |
-
color=[128, 64, 0],
|
1705 |
-
type='',
|
1706 |
-
swap='lsd_kpt11'),
|
1707 |
-
252:
|
1708 |
-
dict(
|
1709 |
-
name='lsd_kpt34',
|
1710 |
-
id=252,
|
1711 |
-
color=[128, 64, 0],
|
1712 |
-
type='',
|
1713 |
-
swap='lsd_kpt10'),
|
1714 |
-
253:
|
1715 |
-
dict(
|
1716 |
-
name='lsd_kpt35',
|
1717 |
-
id=253,
|
1718 |
-
color=[128, 64, 0],
|
1719 |
-
type='',
|
1720 |
-
swap='lsd_kpt9'),
|
1721 |
-
254:
|
1722 |
-
dict(
|
1723 |
-
name='lsd_kpt36',
|
1724 |
-
id=254,
|
1725 |
-
color=[128, 64, 0],
|
1726 |
-
type='',
|
1727 |
-
swap='lsd_kpt8'),
|
1728 |
-
255:
|
1729 |
-
dict(
|
1730 |
-
name='lsd_kpt37',
|
1731 |
-
id=255,
|
1732 |
-
color=[128, 64, 0],
|
1733 |
-
type='',
|
1734 |
-
swap='lsd_kpt7'),
|
1735 |
-
256:
|
1736 |
-
dict(name='vd_kpt1', id=256, color=[128, 64, 255], type='', swap=''),
|
1737 |
-
257:
|
1738 |
-
dict(
|
1739 |
-
name='vd_kpt2',
|
1740 |
-
id=257,
|
1741 |
-
color=[128, 64, 255],
|
1742 |
-
type='',
|
1743 |
-
swap='vd_kpt6'),
|
1744 |
-
258:
|
1745 |
-
dict(
|
1746 |
-
name='vd_kpt3',
|
1747 |
-
id=258,
|
1748 |
-
color=[128, 64, 255],
|
1749 |
-
type='',
|
1750 |
-
swap='vd_kpt5'),
|
1751 |
-
259:
|
1752 |
-
dict(name='vd_kpt4', id=259, color=[128, 64, 255], type='', swap=''),
|
1753 |
-
260:
|
1754 |
-
dict(
|
1755 |
-
name='vd_kpt5',
|
1756 |
-
id=260,
|
1757 |
-
color=[128, 64, 255],
|
1758 |
-
type='',
|
1759 |
-
swap='vd_kpt3'),
|
1760 |
-
261:
|
1761 |
-
dict(
|
1762 |
-
name='vd_kpt6',
|
1763 |
-
id=261,
|
1764 |
-
color=[128, 64, 255],
|
1765 |
-
type='',
|
1766 |
-
swap='vd_kpt2'),
|
1767 |
-
262:
|
1768 |
-
dict(
|
1769 |
-
name='vd_kpt7',
|
1770 |
-
id=262,
|
1771 |
-
color=[128, 64, 255],
|
1772 |
-
type='',
|
1773 |
-
swap='vd_kpt19'),
|
1774 |
-
263:
|
1775 |
-
dict(
|
1776 |
-
name='vd_kpt8',
|
1777 |
-
id=263,
|
1778 |
-
color=[128, 64, 255],
|
1779 |
-
type='',
|
1780 |
-
swap='vd_kpt18'),
|
1781 |
-
264:
|
1782 |
-
dict(
|
1783 |
-
name='vd_kpt9',
|
1784 |
-
id=264,
|
1785 |
-
color=[128, 64, 255],
|
1786 |
-
type='',
|
1787 |
-
swap='vd_kpt17'),
|
1788 |
-
265:
|
1789 |
-
dict(
|
1790 |
-
name='vd_kpt10',
|
1791 |
-
id=265,
|
1792 |
-
color=[128, 64, 255],
|
1793 |
-
type='',
|
1794 |
-
swap='vd_kpt16'),
|
1795 |
-
266:
|
1796 |
-
dict(
|
1797 |
-
name='vd_kpt11',
|
1798 |
-
id=266,
|
1799 |
-
color=[128, 64, 255],
|
1800 |
-
type='',
|
1801 |
-
swap='vd_kpt15'),
|
1802 |
-
267:
|
1803 |
-
dict(
|
1804 |
-
name='vd_kpt12',
|
1805 |
-
id=267,
|
1806 |
-
color=[128, 64, 255],
|
1807 |
-
type='',
|
1808 |
-
swap='vd_kpt14'),
|
1809 |
-
268:
|
1810 |
-
dict(name='vd_kpt13', id=268, color=[128, 64, 255], type='', swap=''),
|
1811 |
-
269:
|
1812 |
-
dict(
|
1813 |
-
name='vd_kpt14',
|
1814 |
-
id=269,
|
1815 |
-
color=[128, 64, 255],
|
1816 |
-
type='',
|
1817 |
-
swap='vd_kpt12'),
|
1818 |
-
270:
|
1819 |
-
dict(
|
1820 |
-
name='vd_kpt15',
|
1821 |
-
id=270,
|
1822 |
-
color=[128, 64, 255],
|
1823 |
-
type='',
|
1824 |
-
swap='vd_kpt11'),
|
1825 |
-
271:
|
1826 |
-
dict(
|
1827 |
-
name='vd_kpt16',
|
1828 |
-
id=271,
|
1829 |
-
color=[128, 64, 255],
|
1830 |
-
type='',
|
1831 |
-
swap='vd_kpt10'),
|
1832 |
-
272:
|
1833 |
-
dict(
|
1834 |
-
name='vd_kpt17',
|
1835 |
-
id=272,
|
1836 |
-
color=[128, 64, 255],
|
1837 |
-
type='',
|
1838 |
-
swap='vd_kpt9'),
|
1839 |
-
273:
|
1840 |
-
dict(
|
1841 |
-
name='vd_kpt18',
|
1842 |
-
id=273,
|
1843 |
-
color=[128, 64, 255],
|
1844 |
-
type='',
|
1845 |
-
swap='vd_kpt8'),
|
1846 |
-
274:
|
1847 |
-
dict(
|
1848 |
-
name='vd_kpt19',
|
1849 |
-
id=274,
|
1850 |
-
color=[128, 64, 255],
|
1851 |
-
type='',
|
1852 |
-
swap='vd_kpt7'),
|
1853 |
-
275:
|
1854 |
-
dict(name='sd_kpt1', id=275, color=[128, 64, 0], type='', swap=''),
|
1855 |
-
276:
|
1856 |
-
dict(
|
1857 |
-
name='sd_kpt2',
|
1858 |
-
id=276,
|
1859 |
-
color=[128, 64, 0],
|
1860 |
-
type='',
|
1861 |
-
swap='sd_kpt6'),
|
1862 |
-
277:
|
1863 |
-
dict(
|
1864 |
-
name='sd_kpt3',
|
1865 |
-
id=277,
|
1866 |
-
color=[128, 64, 0],
|
1867 |
-
type='',
|
1868 |
-
swap='sd_kpt5'),
|
1869 |
-
278:
|
1870 |
-
dict(name='sd_kpt4', id=278, color=[128, 64, 0], type='', swap=''),
|
1871 |
-
279:
|
1872 |
-
dict(
|
1873 |
-
name='sd_kpt5',
|
1874 |
-
id=279,
|
1875 |
-
color=[128, 64, 0],
|
1876 |
-
type='',
|
1877 |
-
swap='sd_kpt3'),
|
1878 |
-
280:
|
1879 |
-
dict(
|
1880 |
-
name='sd_kpt6',
|
1881 |
-
id=280,
|
1882 |
-
color=[128, 64, 0],
|
1883 |
-
type='',
|
1884 |
-
swap='sd_kpt2'),
|
1885 |
-
281:
|
1886 |
-
dict(
|
1887 |
-
name='sd_kpt7',
|
1888 |
-
id=281,
|
1889 |
-
color=[128, 64, 0],
|
1890 |
-
type='',
|
1891 |
-
swap='sd_kpt19'),
|
1892 |
-
282:
|
1893 |
-
dict(
|
1894 |
-
name='sd_kpt8',
|
1895 |
-
id=282,
|
1896 |
-
color=[128, 64, 0],
|
1897 |
-
type='',
|
1898 |
-
swap='sd_kpt18'),
|
1899 |
-
283:
|
1900 |
-
dict(
|
1901 |
-
name='sd_kpt9',
|
1902 |
-
id=283,
|
1903 |
-
color=[128, 64, 0],
|
1904 |
-
type='',
|
1905 |
-
swap='sd_kpt17'),
|
1906 |
-
284:
|
1907 |
-
dict(
|
1908 |
-
name='sd_kpt10',
|
1909 |
-
id=284,
|
1910 |
-
color=[128, 64, 0],
|
1911 |
-
type='',
|
1912 |
-
swap='sd_kpt16'),
|
1913 |
-
285:
|
1914 |
-
dict(
|
1915 |
-
name='sd_kpt11',
|
1916 |
-
id=285,
|
1917 |
-
color=[128, 64, 0],
|
1918 |
-
type='',
|
1919 |
-
swap='sd_kpt15'),
|
1920 |
-
286:
|
1921 |
-
dict(
|
1922 |
-
name='sd_kpt12',
|
1923 |
-
id=286,
|
1924 |
-
color=[128, 64, 0],
|
1925 |
-
type='',
|
1926 |
-
swap='sd_kpt14'),
|
1927 |
-
287:
|
1928 |
-
dict(name='sd_kpt13', id=287, color=[128, 64, 0], type='', swap=''),
|
1929 |
-
288:
|
1930 |
-
dict(
|
1931 |
-
name='sd_kpt14',
|
1932 |
-
id=288,
|
1933 |
-
color=[128, 64, 0],
|
1934 |
-
type='',
|
1935 |
-
swap='sd_kpt12'),
|
1936 |
-
289:
|
1937 |
-
dict(
|
1938 |
-
name='sd_kpt15',
|
1939 |
-
id=289,
|
1940 |
-
color=[128, 64, 0],
|
1941 |
-
type='',
|
1942 |
-
swap='sd_kpt11'),
|
1943 |
-
290:
|
1944 |
-
dict(
|
1945 |
-
name='sd_kpt16',
|
1946 |
-
id=290,
|
1947 |
-
color=[128, 64, 0],
|
1948 |
-
type='',
|
1949 |
-
swap='sd_kpt10'),
|
1950 |
-
291:
|
1951 |
-
dict(
|
1952 |
-
name='sd_kpt17',
|
1953 |
-
id=291,
|
1954 |
-
color=[128, 64, 0],
|
1955 |
-
type='',
|
1956 |
-
swap='sd_kpt9'),
|
1957 |
-
292:
|
1958 |
-
dict(
|
1959 |
-
name='sd_kpt18',
|
1960 |
-
id=292,
|
1961 |
-
color=[128, 64, 0],
|
1962 |
-
type='',
|
1963 |
-
swap='sd_kpt8'),
|
1964 |
-
293:
|
1965 |
-
dict(
|
1966 |
-
name='sd_kpt19',
|
1967 |
-
id=293,
|
1968 |
-
color=[128, 64, 0],
|
1969 |
-
type='',
|
1970 |
-
swap='sd_kpt7')
|
1971 |
-
}),
|
1972 |
-
skeleton_info=dict({
|
1973 |
-
0:
|
1974 |
-
dict(link=('sss_kpt1', 'sss_kpt2'), id=0, color=[255, 128, 0]),
|
1975 |
-
1:
|
1976 |
-
dict(link=('sss_kpt2', 'sss_kpt7'), id=1, color=[255, 128, 0]),
|
1977 |
-
2:
|
1978 |
-
dict(link=('sss_kpt7', 'sss_kpt8'), id=2, color=[255, 128, 0]),
|
1979 |
-
3:
|
1980 |
-
dict(link=('sss_kpt8', 'sss_kpt9'), id=3, color=[255, 128, 0]),
|
1981 |
-
4:
|
1982 |
-
dict(link=('sss_kpt9', 'sss_kpt10'), id=4, color=[255, 128, 0]),
|
1983 |
-
5:
|
1984 |
-
dict(link=('sss_kpt10', 'sss_kpt11'), id=5, color=[255, 128, 0]),
|
1985 |
-
6:
|
1986 |
-
dict(link=('sss_kpt11', 'sss_kpt12'), id=6, color=[255, 128, 0]),
|
1987 |
-
7:
|
1988 |
-
dict(link=('sss_kpt12', 'sss_kpt13'), id=7, color=[255, 128, 0]),
|
1989 |
-
8:
|
1990 |
-
dict(link=('sss_kpt13', 'sss_kpt14'), id=8, color=[255, 128, 0]),
|
1991 |
-
9:
|
1992 |
-
dict(link=('sss_kpt14', 'sss_kpt15'), id=9, color=[255, 128, 0]),
|
1993 |
-
10:
|
1994 |
-
dict(link=('sss_kpt15', 'sss_kpt16'), id=10, color=[255, 128, 0]),
|
1995 |
-
11:
|
1996 |
-
dict(link=('sss_kpt16', 'sss_kpt17'), id=11, color=[255, 128, 0]),
|
1997 |
-
12:
|
1998 |
-
dict(link=('sss_kpt17', 'sss_kpt18'), id=12, color=[255, 128, 0]),
|
1999 |
-
13:
|
2000 |
-
dict(link=('sss_kpt18', 'sss_kpt19'), id=13, color=[255, 128, 0]),
|
2001 |
-
14:
|
2002 |
-
dict(link=('sss_kpt19', 'sss_kpt20'), id=14, color=[255, 128, 0]),
|
2003 |
-
15:
|
2004 |
-
dict(link=('sss_kpt20', 'sss_kpt21'), id=15, color=[255, 128, 0]),
|
2005 |
-
16:
|
2006 |
-
dict(link=('sss_kpt21', 'sss_kpt22'), id=16, color=[255, 128, 0]),
|
2007 |
-
17:
|
2008 |
-
dict(link=('sss_kpt22', 'sss_kpt23'), id=17, color=[255, 128, 0]),
|
2009 |
-
18:
|
2010 |
-
dict(link=('sss_kpt23', 'sss_kpt24'), id=18, color=[255, 128, 0]),
|
2011 |
-
19:
|
2012 |
-
dict(link=('sss_kpt24', 'sss_kpt25'), id=19, color=[255, 128, 0]),
|
2013 |
-
20:
|
2014 |
-
dict(link=('sss_kpt25', 'sss_kpt6'), id=20, color=[255, 128, 0]),
|
2015 |
-
21:
|
2016 |
-
dict(link=('sss_kpt6', 'sss_kpt1'), id=21, color=[255, 128, 0]),
|
2017 |
-
22:
|
2018 |
-
dict(link=('sss_kpt2', 'sss_kpt3'), id=22, color=[255, 128, 0]),
|
2019 |
-
23:
|
2020 |
-
dict(link=('sss_kpt3', 'sss_kpt4'), id=23, color=[255, 128, 0]),
|
2021 |
-
24:
|
2022 |
-
dict(link=('sss_kpt4', 'sss_kpt5'), id=24, color=[255, 128, 0]),
|
2023 |
-
25:
|
2024 |
-
dict(link=('sss_kpt5', 'sss_kpt6'), id=25, color=[255, 128, 0]),
|
2025 |
-
26:
|
2026 |
-
dict(link=('lss_kpt1', 'lss_kpt2'), id=26, color=[255, 0, 128]),
|
2027 |
-
27:
|
2028 |
-
dict(link=('lss_kpt2', 'lss_kpt7'), id=27, color=[255, 0, 128]),
|
2029 |
-
28:
|
2030 |
-
dict(link=('lss_kpt7', 'lss_kpt8'), id=28, color=[255, 0, 128]),
|
2031 |
-
29:
|
2032 |
-
dict(link=('lss_kpt8', 'lss_kpt9'), id=29, color=[255, 0, 128]),
|
2033 |
-
30:
|
2034 |
-
dict(link=('lss_kpt9', 'lss_kpt10'), id=30, color=[255, 0, 128]),
|
2035 |
-
31:
|
2036 |
-
dict(link=('lss_kpt10', 'lss_kpt11'), id=31, color=[255, 0, 128]),
|
2037 |
-
32:
|
2038 |
-
dict(link=('lss_kpt11', 'lss_kpt12'), id=32, color=[255, 0, 128]),
|
2039 |
-
33:
|
2040 |
-
dict(link=('lss_kpt12', 'lss_kpt13'), id=33, color=[255, 0, 128]),
|
2041 |
-
34:
|
2042 |
-
dict(link=('lss_kpt13', 'lss_kpt14'), id=34, color=[255, 0, 128]),
|
2043 |
-
35:
|
2044 |
-
dict(link=('lss_kpt14', 'lss_kpt15'), id=35, color=[255, 0, 128]),
|
2045 |
-
36:
|
2046 |
-
dict(link=('lss_kpt15', 'lss_kpt16'), id=36, color=[255, 0, 128]),
|
2047 |
-
37:
|
2048 |
-
dict(link=('lss_kpt16', 'lss_kpt17'), id=37, color=[255, 0, 128]),
|
2049 |
-
38:
|
2050 |
-
dict(link=('lss_kpt17', 'lss_kpt18'), id=38, color=[255, 0, 128]),
|
2051 |
-
39:
|
2052 |
-
dict(link=('lss_kpt18', 'lss_kpt19'), id=39, color=[255, 0, 128]),
|
2053 |
-
40:
|
2054 |
-
dict(link=('lss_kpt19', 'lss_kpt20'), id=40, color=[255, 0, 128]),
|
2055 |
-
41:
|
2056 |
-
dict(link=('lss_kpt20', 'lss_kpt21'), id=41, color=[255, 0, 128]),
|
2057 |
-
42:
|
2058 |
-
dict(link=('lss_kpt21', 'lss_kpt22'), id=42, color=[255, 0, 128]),
|
2059 |
-
43:
|
2060 |
-
dict(link=('lss_kpt22', 'lss_kpt23'), id=43, color=[255, 0, 128]),
|
2061 |
-
44:
|
2062 |
-
dict(link=('lss_kpt23', 'lss_kpt24'), id=44, color=[255, 0, 128]),
|
2063 |
-
45:
|
2064 |
-
dict(link=('lss_kpt24', 'lss_kpt25'), id=45, color=[255, 0, 128]),
|
2065 |
-
46:
|
2066 |
-
dict(link=('lss_kpt25', 'lss_kpt26'), id=46, color=[255, 0, 128]),
|
2067 |
-
47:
|
2068 |
-
dict(link=('lss_kpt26', 'lss_kpt27'), id=47, color=[255, 0, 128]),
|
2069 |
-
48:
|
2070 |
-
dict(link=('lss_kpt27', 'lss_kpt28'), id=48, color=[255, 0, 128]),
|
2071 |
-
49:
|
2072 |
-
dict(link=('lss_kpt28', 'lss_kpt29'), id=49, color=[255, 0, 128]),
|
2073 |
-
50:
|
2074 |
-
dict(link=('lss_kpt29', 'lss_kpt30'), id=50, color=[255, 0, 128]),
|
2075 |
-
51:
|
2076 |
-
dict(link=('lss_kpt30', 'lss_kpt31'), id=51, color=[255, 0, 128]),
|
2077 |
-
52:
|
2078 |
-
dict(link=('lss_kpt31', 'lss_kpt32'), id=52, color=[255, 0, 128]),
|
2079 |
-
53:
|
2080 |
-
dict(link=('lss_kpt32', 'lss_kpt33'), id=53, color=[255, 0, 128]),
|
2081 |
-
54:
|
2082 |
-
dict(link=('lss_kpt33', 'lss_kpt6'), id=54, color=[255, 0, 128]),
|
2083 |
-
55:
|
2084 |
-
dict(link=('lss_kpt6', 'lss_kpt5'), id=55, color=[255, 0, 128]),
|
2085 |
-
56:
|
2086 |
-
dict(link=('lss_kpt5', 'lss_kpt4'), id=56, color=[255, 0, 128]),
|
2087 |
-
57:
|
2088 |
-
dict(link=('lss_kpt4', 'lss_kpt3'), id=57, color=[255, 0, 128]),
|
2089 |
-
58:
|
2090 |
-
dict(link=('lss_kpt3', 'lss_kpt2'), id=58, color=[255, 0, 128]),
|
2091 |
-
59:
|
2092 |
-
dict(link=('lss_kpt6', 'lss_kpt1'), id=59, color=[255, 0, 128]),
|
2093 |
-
60:
|
2094 |
-
dict(link=('sso_kpt1', 'sso_kpt4'), id=60, color=[128, 0, 255]),
|
2095 |
-
61:
|
2096 |
-
dict(link=('sso_kpt4', 'sso_kpt7'), id=61, color=[128, 0, 255]),
|
2097 |
-
62:
|
2098 |
-
dict(link=('sso_kpt7', 'sso_kpt8'), id=62, color=[128, 0, 255]),
|
2099 |
-
63:
|
2100 |
-
dict(link=('sso_kpt8', 'sso_kpt9'), id=63, color=[128, 0, 255]),
|
2101 |
-
64:
|
2102 |
-
dict(link=('sso_kpt9', 'sso_kpt10'), id=64, color=[128, 0, 255]),
|
2103 |
-
65:
|
2104 |
-
dict(link=('sso_kpt10', 'sso_kpt11'), id=65, color=[128, 0, 255]),
|
2105 |
-
66:
|
2106 |
-
dict(link=('sso_kpt11', 'sso_kpt12'), id=66, color=[128, 0, 255]),
|
2107 |
-
67:
|
2108 |
-
dict(link=('sso_kpt12', 'sso_kpt13'), id=67, color=[128, 0, 255]),
|
2109 |
-
68:
|
2110 |
-
dict(link=('sso_kpt13', 'sso_kpt14'), id=68, color=[128, 0, 255]),
|
2111 |
-
69:
|
2112 |
-
dict(link=('sso_kpt14', 'sso_kpt15'), id=69, color=[128, 0, 255]),
|
2113 |
-
70:
|
2114 |
-
dict(link=('sso_kpt15', 'sso_kpt16'), id=70, color=[128, 0, 255]),
|
2115 |
-
71:
|
2116 |
-
dict(link=('sso_kpt16', 'sso_kpt31'), id=71, color=[128, 0, 255]),
|
2117 |
-
72:
|
2118 |
-
dict(link=('sso_kpt31', 'sso_kpt30'), id=72, color=[128, 0, 255]),
|
2119 |
-
73:
|
2120 |
-
dict(link=('sso_kpt30', 'sso_kpt2'), id=73, color=[128, 0, 255]),
|
2121 |
-
74:
|
2122 |
-
dict(link=('sso_kpt2', 'sso_kpt3'), id=74, color=[128, 0, 255]),
|
2123 |
-
75:
|
2124 |
-
dict(link=('sso_kpt3', 'sso_kpt4'), id=75, color=[128, 0, 255]),
|
2125 |
-
76:
|
2126 |
-
dict(link=('sso_kpt1', 'sso_kpt6'), id=76, color=[128, 0, 255]),
|
2127 |
-
77:
|
2128 |
-
dict(link=('sso_kpt6', 'sso_kpt25'), id=77, color=[128, 0, 255]),
|
2129 |
-
78:
|
2130 |
-
dict(link=('sso_kpt25', 'sso_kpt24'), id=78, color=[128, 0, 255]),
|
2131 |
-
79:
|
2132 |
-
dict(link=('sso_kpt24', 'sso_kpt23'), id=79, color=[128, 0, 255]),
|
2133 |
-
80:
|
2134 |
-
dict(link=('sso_kpt23', 'sso_kpt22'), id=80, color=[128, 0, 255]),
|
2135 |
-
81:
|
2136 |
-
dict(link=('sso_kpt22', 'sso_kpt21'), id=81, color=[128, 0, 255]),
|
2137 |
-
82:
|
2138 |
-
dict(link=('sso_kpt21', 'sso_kpt20'), id=82, color=[128, 0, 255]),
|
2139 |
-
83:
|
2140 |
-
dict(link=('sso_kpt20', 'sso_kpt19'), id=83, color=[128, 0, 255]),
|
2141 |
-
84:
|
2142 |
-
dict(link=('sso_kpt19', 'sso_kpt18'), id=84, color=[128, 0, 255]),
|
2143 |
-
85:
|
2144 |
-
dict(link=('sso_kpt18', 'sso_kpt17'), id=85, color=[128, 0, 255]),
|
2145 |
-
86:
|
2146 |
-
dict(link=('sso_kpt17', 'sso_kpt29'), id=86, color=[128, 0, 255]),
|
2147 |
-
87:
|
2148 |
-
dict(link=('sso_kpt29', 'sso_kpt28'), id=87, color=[128, 0, 255]),
|
2149 |
-
88:
|
2150 |
-
dict(link=('sso_kpt28', 'sso_kpt27'), id=88, color=[128, 0, 255]),
|
2151 |
-
89:
|
2152 |
-
dict(link=('sso_kpt27', 'sso_kpt26'), id=89, color=[128, 0, 255]),
|
2153 |
-
90:
|
2154 |
-
dict(link=('sso_kpt26', 'sso_kpt5'), id=90, color=[128, 0, 255]),
|
2155 |
-
91:
|
2156 |
-
dict(link=('sso_kpt5', 'sso_kpt6'), id=91, color=[128, 0, 255]),
|
2157 |
-
92:
|
2158 |
-
dict(link=('lso_kpt1', 'lso_kpt2'), id=92, color=[0, 128, 255]),
|
2159 |
-
93:
|
2160 |
-
dict(link=('lso_kpt2', 'lso_kpt7'), id=93, color=[0, 128, 255]),
|
2161 |
-
94:
|
2162 |
-
dict(link=('lso_kpt7', 'lso_kpt8'), id=94, color=[0, 128, 255]),
|
2163 |
-
95:
|
2164 |
-
dict(link=('lso_kpt8', 'lso_kpt9'), id=95, color=[0, 128, 255]),
|
2165 |
-
96:
|
2166 |
-
dict(link=('lso_kpt9', 'lso_kpt10'), id=96, color=[0, 128, 255]),
|
2167 |
-
97:
|
2168 |
-
dict(link=('lso_kpt10', 'lso_kpt11'), id=97, color=[0, 128, 255]),
|
2169 |
-
98:
|
2170 |
-
dict(link=('lso_kpt11', 'lso_kpt12'), id=98, color=[0, 128, 255]),
|
2171 |
-
99:
|
2172 |
-
dict(link=('lso_kpt12', 'lso_kpt13'), id=99, color=[0, 128, 255]),
|
2173 |
-
100:
|
2174 |
-
dict(link=('lso_kpt13', 'lso_kpt14'), id=100, color=[0, 128, 255]),
|
2175 |
-
101:
|
2176 |
-
dict(link=('lso_kpt14', 'lso_kpt15'), id=101, color=[0, 128, 255]),
|
2177 |
-
102:
|
2178 |
-
dict(link=('lso_kpt15', 'lso_kpt16'), id=102, color=[0, 128, 255]),
|
2179 |
-
103:
|
2180 |
-
dict(link=('lso_kpt16', 'lso_kpt17'), id=103, color=[0, 128, 255]),
|
2181 |
-
104:
|
2182 |
-
dict(link=('lso_kpt17', 'lso_kpt18'), id=104, color=[0, 128, 255]),
|
2183 |
-
105:
|
2184 |
-
dict(link=('lso_kpt18', 'lso_kpt19'), id=105, color=[0, 128, 255]),
|
2185 |
-
106:
|
2186 |
-
dict(link=('lso_kpt19', 'lso_kpt20'), id=106, color=[0, 128, 255]),
|
2187 |
-
107:
|
2188 |
-
dict(link=('lso_kpt20', 'lso_kpt39'), id=107, color=[0, 128, 255]),
|
2189 |
-
108:
|
2190 |
-
dict(link=('lso_kpt39', 'lso_kpt38'), id=108, color=[0, 128, 255]),
|
2191 |
-
109:
|
2192 |
-
dict(link=('lso_kpt38', 'lso_kpt4'), id=109, color=[0, 128, 255]),
|
2193 |
-
110:
|
2194 |
-
dict(link=('lso_kpt4', 'lso_kpt3'), id=110, color=[0, 128, 255]),
|
2195 |
-
111:
|
2196 |
-
dict(link=('lso_kpt3', 'lso_kpt2'), id=111, color=[0, 128, 255]),
|
2197 |
-
112:
|
2198 |
-
dict(link=('lso_kpt1', 'lso_kpt6'), id=112, color=[0, 128, 255]),
|
2199 |
-
113:
|
2200 |
-
dict(link=('lso_kpt6', 'lso_kpt33'), id=113, color=[0, 128, 255]),
|
2201 |
-
114:
|
2202 |
-
dict(link=('lso_kpt33', 'lso_kpt32'), id=114, color=[0, 128, 255]),
|
2203 |
-
115:
|
2204 |
-
dict(link=('lso_kpt32', 'lso_kpt31'), id=115, color=[0, 128, 255]),
|
2205 |
-
116:
|
2206 |
-
dict(link=('lso_kpt31', 'lso_kpt30'), id=116, color=[0, 128, 255]),
|
2207 |
-
117:
|
2208 |
-
dict(link=('lso_kpt30', 'lso_kpt29'), id=117, color=[0, 128, 255]),
|
2209 |
-
118:
|
2210 |
-
dict(link=('lso_kpt29', 'lso_kpt28'), id=118, color=[0, 128, 255]),
|
2211 |
-
119:
|
2212 |
-
dict(link=('lso_kpt28', 'lso_kpt27'), id=119, color=[0, 128, 255]),
|
2213 |
-
120:
|
2214 |
-
dict(link=('lso_kpt27', 'lso_kpt26'), id=120, color=[0, 128, 255]),
|
2215 |
-
121:
|
2216 |
-
dict(link=('lso_kpt26', 'lso_kpt25'), id=121, color=[0, 128, 255]),
|
2217 |
-
122:
|
2218 |
-
dict(link=('lso_kpt25', 'lso_kpt24'), id=122, color=[0, 128, 255]),
|
2219 |
-
123:
|
2220 |
-
dict(link=('lso_kpt24', 'lso_kpt23'), id=123, color=[0, 128, 255]),
|
2221 |
-
124:
|
2222 |
-
dict(link=('lso_kpt23', 'lso_kpt22'), id=124, color=[0, 128, 255]),
|
2223 |
-
125:
|
2224 |
-
dict(link=('lso_kpt22', 'lso_kpt21'), id=125, color=[0, 128, 255]),
|
2225 |
-
126:
|
2226 |
-
dict(link=('lso_kpt21', 'lso_kpt37'), id=126, color=[0, 128, 255]),
|
2227 |
-
127:
|
2228 |
-
dict(link=('lso_kpt37', 'lso_kpt36'), id=127, color=[0, 128, 255]),
|
2229 |
-
128:
|
2230 |
-
dict(link=('lso_kpt36', 'lso_kpt35'), id=128, color=[0, 128, 255]),
|
2231 |
-
129:
|
2232 |
-
dict(link=('lso_kpt35', 'lso_kpt34'), id=129, color=[0, 128, 255]),
|
2233 |
-
130:
|
2234 |
-
dict(link=('lso_kpt34', 'lso_kpt5'), id=130, color=[0, 128, 255]),
|
2235 |
-
131:
|
2236 |
-
dict(link=('lso_kpt5', 'lso_kpt6'), id=131, color=[0, 128, 255]),
|
2237 |
-
132:
|
2238 |
-
dict(link=('vest_kpt1', 'vest_kpt2'), id=132, color=[0, 128, 128]),
|
2239 |
-
133:
|
2240 |
-
dict(link=('vest_kpt2', 'vest_kpt7'), id=133, color=[0, 128, 128]),
|
2241 |
-
134:
|
2242 |
-
dict(link=('vest_kpt7', 'vest_kpt8'), id=134, color=[0, 128, 128]),
|
2243 |
-
135:
|
2244 |
-
dict(link=('vest_kpt8', 'vest_kpt9'), id=135, color=[0, 128, 128]),
|
2245 |
-
136:
|
2246 |
-
dict(link=('vest_kpt9', 'vest_kpt10'), id=136, color=[0, 128, 128]),
|
2247 |
-
137:
|
2248 |
-
dict(link=('vest_kpt10', 'vest_kpt11'), id=137, color=[0, 128, 128]),
|
2249 |
-
138:
|
2250 |
-
dict(link=('vest_kpt11', 'vest_kpt12'), id=138, color=[0, 128, 128]),
|
2251 |
-
139:
|
2252 |
-
dict(link=('vest_kpt12', 'vest_kpt13'), id=139, color=[0, 128, 128]),
|
2253 |
-
140:
|
2254 |
-
dict(link=('vest_kpt13', 'vest_kpt14'), id=140, color=[0, 128, 128]),
|
2255 |
-
141:
|
2256 |
-
dict(link=('vest_kpt14', 'vest_kpt15'), id=141, color=[0, 128, 128]),
|
2257 |
-
142:
|
2258 |
-
dict(link=('vest_kpt15', 'vest_kpt6'), id=142, color=[0, 128, 128]),
|
2259 |
-
143:
|
2260 |
-
dict(link=('vest_kpt6', 'vest_kpt1'), id=143, color=[0, 128, 128]),
|
2261 |
-
144:
|
2262 |
-
dict(link=('vest_kpt2', 'vest_kpt3'), id=144, color=[0, 128, 128]),
|
2263 |
-
145:
|
2264 |
-
dict(link=('vest_kpt3', 'vest_kpt4'), id=145, color=[0, 128, 128]),
|
2265 |
-
146:
|
2266 |
-
dict(link=('vest_kpt4', 'vest_kpt5'), id=146, color=[0, 128, 128]),
|
2267 |
-
147:
|
2268 |
-
dict(link=('vest_kpt5', 'vest_kpt6'), id=147, color=[0, 128, 128]),
|
2269 |
-
148:
|
2270 |
-
dict(link=('sling_kpt1', 'sling_kpt2'), id=148, color=[0, 0, 128]),
|
2271 |
-
149:
|
2272 |
-
dict(link=('sling_kpt2', 'sling_kpt8'), id=149, color=[0, 0, 128]),
|
2273 |
-
150:
|
2274 |
-
dict(link=('sling_kpt8', 'sling_kpt9'), id=150, color=[0, 0, 128]),
|
2275 |
-
151:
|
2276 |
-
dict(link=('sling_kpt9', 'sling_kpt10'), id=151, color=[0, 0, 128]),
|
2277 |
-
152:
|
2278 |
-
dict(link=('sling_kpt10', 'sling_kpt11'), id=152, color=[0, 0, 128]),
|
2279 |
-
153:
|
2280 |
-
dict(link=('sling_kpt11', 'sling_kpt12'), id=153, color=[0, 0, 128]),
|
2281 |
-
154:
|
2282 |
-
dict(link=('sling_kpt12', 'sling_kpt13'), id=154, color=[0, 0, 128]),
|
2283 |
-
155:
|
2284 |
-
dict(link=('sling_kpt13', 'sling_kpt14'), id=155, color=[0, 0, 128]),
|
2285 |
-
156:
|
2286 |
-
dict(link=('sling_kpt14', 'sling_kpt6'), id=156, color=[0, 0, 128]),
|
2287 |
-
157:
|
2288 |
-
dict(link=('sling_kpt2', 'sling_kpt7'), id=157, color=[0, 0, 128]),
|
2289 |
-
158:
|
2290 |
-
dict(link=('sling_kpt6', 'sling_kpt15'), id=158, color=[0, 0, 128]),
|
2291 |
-
159:
|
2292 |
-
dict(link=('sling_kpt2', 'sling_kpt3'), id=159, color=[0, 0, 128]),
|
2293 |
-
160:
|
2294 |
-
dict(link=('sling_kpt3', 'sling_kpt4'), id=160, color=[0, 0, 128]),
|
2295 |
-
161:
|
2296 |
-
dict(link=('sling_kpt4', 'sling_kpt5'), id=161, color=[0, 0, 128]),
|
2297 |
-
162:
|
2298 |
-
dict(link=('sling_kpt5', 'sling_kpt6'), id=162, color=[0, 0, 128]),
|
2299 |
-
163:
|
2300 |
-
dict(link=('sling_kpt1', 'sling_kpt6'), id=163, color=[0, 0, 128]),
|
2301 |
-
164:
|
2302 |
-
dict(
|
2303 |
-
link=('shorts_kpt1', 'shorts_kpt4'), id=164, color=[128, 128,
|
2304 |
-
128]),
|
2305 |
-
165:
|
2306 |
-
dict(
|
2307 |
-
link=('shorts_kpt4', 'shorts_kpt5'), id=165, color=[128, 128,
|
2308 |
-
128]),
|
2309 |
-
166:
|
2310 |
-
dict(
|
2311 |
-
link=('shorts_kpt5', 'shorts_kpt6'), id=166, color=[128, 128,
|
2312 |
-
128]),
|
2313 |
-
167:
|
2314 |
-
dict(
|
2315 |
-
link=('shorts_kpt6', 'shorts_kpt7'), id=167, color=[128, 128,
|
2316 |
-
128]),
|
2317 |
-
168:
|
2318 |
-
dict(
|
2319 |
-
link=('shorts_kpt7', 'shorts_kpt8'), id=168, color=[128, 128,
|
2320 |
-
128]),
|
2321 |
-
169:
|
2322 |
-
dict(
|
2323 |
-
link=('shorts_kpt8', 'shorts_kpt9'), id=169, color=[128, 128,
|
2324 |
-
128]),
|
2325 |
-
170:
|
2326 |
-
dict(
|
2327 |
-
link=('shorts_kpt9', 'shorts_kpt10'),
|
2328 |
-
id=170,
|
2329 |
-
color=[128, 128, 128]),
|
2330 |
-
171:
|
2331 |
-
dict(
|
2332 |
-
link=('shorts_kpt10', 'shorts_kpt3'),
|
2333 |
-
id=171,
|
2334 |
-
color=[128, 128, 128]),
|
2335 |
-
172:
|
2336 |
-
dict(
|
2337 |
-
link=('shorts_kpt3', 'shorts_kpt2'), id=172, color=[128, 128,
|
2338 |
-
128]),
|
2339 |
-
173:
|
2340 |
-
dict(
|
2341 |
-
link=('shorts_kpt2', 'shorts_kpt1'), id=173, color=[128, 128,
|
2342 |
-
128]),
|
2343 |
-
174:
|
2344 |
-
dict(
|
2345 |
-
link=('trousers_kpt1', 'trousers_kpt4'),
|
2346 |
-
id=174,
|
2347 |
-
color=[128, 0, 128]),
|
2348 |
-
175:
|
2349 |
-
dict(
|
2350 |
-
link=('trousers_kpt4', 'trousers_kpt5'),
|
2351 |
-
id=175,
|
2352 |
-
color=[128, 0, 128]),
|
2353 |
-
176:
|
2354 |
-
dict(
|
2355 |
-
link=('trousers_kpt5', 'trousers_kpt6'),
|
2356 |
-
id=176,
|
2357 |
-
color=[128, 0, 128]),
|
2358 |
-
177:
|
2359 |
-
dict(
|
2360 |
-
link=('trousers_kpt6', 'trousers_kpt7'),
|
2361 |
-
id=177,
|
2362 |
-
color=[128, 0, 128]),
|
2363 |
-
178:
|
2364 |
-
dict(
|
2365 |
-
link=('trousers_kpt7', 'trousers_kpt8'),
|
2366 |
-
id=178,
|
2367 |
-
color=[128, 0, 128]),
|
2368 |
-
179:
|
2369 |
-
dict(
|
2370 |
-
link=('trousers_kpt8', 'trousers_kpt9'),
|
2371 |
-
id=179,
|
2372 |
-
color=[128, 0, 128]),
|
2373 |
-
180:
|
2374 |
-
dict(
|
2375 |
-
link=('trousers_kpt9', 'trousers_kpt10'),
|
2376 |
-
id=180,
|
2377 |
-
color=[128, 0, 128]),
|
2378 |
-
181:
|
2379 |
-
dict(
|
2380 |
-
link=('trousers_kpt10', 'trousers_kpt11'),
|
2381 |
-
id=181,
|
2382 |
-
color=[128, 0, 128]),
|
2383 |
-
182:
|
2384 |
-
dict(
|
2385 |
-
link=('trousers_kpt11', 'trousers_kpt12'),
|
2386 |
-
id=182,
|
2387 |
-
color=[128, 0, 128]),
|
2388 |
-
183:
|
2389 |
-
dict(
|
2390 |
-
link=('trousers_kpt12', 'trousers_kpt13'),
|
2391 |
-
id=183,
|
2392 |
-
color=[128, 0, 128]),
|
2393 |
-
184:
|
2394 |
-
dict(
|
2395 |
-
link=('trousers_kpt13', 'trousers_kpt14'),
|
2396 |
-
id=184,
|
2397 |
-
color=[128, 0, 128]),
|
2398 |
-
185:
|
2399 |
-
dict(
|
2400 |
-
link=('trousers_kpt14', 'trousers_kpt3'),
|
2401 |
-
id=185,
|
2402 |
-
color=[128, 0, 128]),
|
2403 |
-
186:
|
2404 |
-
dict(
|
2405 |
-
link=('trousers_kpt3', 'trousers_kpt2'),
|
2406 |
-
id=186,
|
2407 |
-
color=[128, 0, 128]),
|
2408 |
-
187:
|
2409 |
-
dict(
|
2410 |
-
link=('trousers_kpt2', 'trousers_kpt1'),
|
2411 |
-
id=187,
|
2412 |
-
color=[128, 0, 128]),
|
2413 |
-
188:
|
2414 |
-
dict(link=('skirt_kpt1', 'skirt_kpt4'), id=188, color=[64, 128, 128]),
|
2415 |
-
189:
|
2416 |
-
dict(link=('skirt_kpt4', 'skirt_kpt5'), id=189, color=[64, 128, 128]),
|
2417 |
-
190:
|
2418 |
-
dict(link=('skirt_kpt5', 'skirt_kpt6'), id=190, color=[64, 128, 128]),
|
2419 |
-
191:
|
2420 |
-
dict(link=('skirt_kpt6', 'skirt_kpt7'), id=191, color=[64, 128, 128]),
|
2421 |
-
192:
|
2422 |
-
dict(link=('skirt_kpt7', 'skirt_kpt8'), id=192, color=[64, 128, 128]),
|
2423 |
-
193:
|
2424 |
-
dict(link=('skirt_kpt8', 'skirt_kpt3'), id=193, color=[64, 128, 128]),
|
2425 |
-
194:
|
2426 |
-
dict(link=('skirt_kpt3', 'skirt_kpt2'), id=194, color=[64, 128, 128]),
|
2427 |
-
195:
|
2428 |
-
dict(link=('skirt_kpt2', 'skirt_kpt1'), id=195, color=[64, 128, 128]),
|
2429 |
-
196:
|
2430 |
-
dict(link=('ssd_kpt1', 'ssd_kpt2'), id=196, color=[64, 64, 128]),
|
2431 |
-
197:
|
2432 |
-
dict(link=('ssd_kpt2', 'ssd_kpt7'), id=197, color=[64, 64, 128]),
|
2433 |
-
198:
|
2434 |
-
dict(link=('ssd_kpt7', 'ssd_kpt8'), id=198, color=[64, 64, 128]),
|
2435 |
-
199:
|
2436 |
-
dict(link=('ssd_kpt8', 'ssd_kpt9'), id=199, color=[64, 64, 128]),
|
2437 |
-
200:
|
2438 |
-
dict(link=('ssd_kpt9', 'ssd_kpt10'), id=200, color=[64, 64, 128]),
|
2439 |
-
201:
|
2440 |
-
dict(link=('ssd_kpt10', 'ssd_kpt11'), id=201, color=[64, 64, 128]),
|
2441 |
-
202:
|
2442 |
-
dict(link=('ssd_kpt11', 'ssd_kpt12'), id=202, color=[64, 64, 128]),
|
2443 |
-
203:
|
2444 |
-
dict(link=('ssd_kpt12', 'ssd_kpt13'), id=203, color=[64, 64, 128]),
|
2445 |
-
204:
|
2446 |
-
dict(link=('ssd_kpt13', 'ssd_kpt14'), id=204, color=[64, 64, 128]),
|
2447 |
-
205:
|
2448 |
-
dict(link=('ssd_kpt14', 'ssd_kpt15'), id=205, color=[64, 64, 128]),
|
2449 |
-
206:
|
2450 |
-
dict(link=('ssd_kpt15', 'ssd_kpt16'), id=206, color=[64, 64, 128]),
|
2451 |
-
207:
|
2452 |
-
dict(link=('ssd_kpt16', 'ssd_kpt17'), id=207, color=[64, 64, 128]),
|
2453 |
-
208:
|
2454 |
-
dict(link=('ssd_kpt17', 'ssd_kpt18'), id=208, color=[64, 64, 128]),
|
2455 |
-
209:
|
2456 |
-
dict(link=('ssd_kpt18', 'ssd_kpt19'), id=209, color=[64, 64, 128]),
|
2457 |
-
210:
|
2458 |
-
dict(link=('ssd_kpt19', 'ssd_kpt20'), id=210, color=[64, 64, 128]),
|
2459 |
-
211:
|
2460 |
-
dict(link=('ssd_kpt20', 'ssd_kpt21'), id=211, color=[64, 64, 128]),
|
2461 |
-
212:
|
2462 |
-
dict(link=('ssd_kpt21', 'ssd_kpt22'), id=212, color=[64, 64, 128]),
|
2463 |
-
213:
|
2464 |
-
dict(link=('ssd_kpt22', 'ssd_kpt23'), id=213, color=[64, 64, 128]),
|
2465 |
-
214:
|
2466 |
-
dict(link=('ssd_kpt23', 'ssd_kpt24'), id=214, color=[64, 64, 128]),
|
2467 |
-
215:
|
2468 |
-
dict(link=('ssd_kpt24', 'ssd_kpt25'), id=215, color=[64, 64, 128]),
|
2469 |
-
216:
|
2470 |
-
dict(link=('ssd_kpt25', 'ssd_kpt26'), id=216, color=[64, 64, 128]),
|
2471 |
-
217:
|
2472 |
-
dict(link=('ssd_kpt26', 'ssd_kpt27'), id=217, color=[64, 64, 128]),
|
2473 |
-
218:
|
2474 |
-
dict(link=('ssd_kpt27', 'ssd_kpt28'), id=218, color=[64, 64, 128]),
|
2475 |
-
219:
|
2476 |
-
dict(link=('ssd_kpt28', 'ssd_kpt29'), id=219, color=[64, 64, 128]),
|
2477 |
-
220:
|
2478 |
-
dict(link=('ssd_kpt29', 'ssd_kpt6'), id=220, color=[64, 64, 128]),
|
2479 |
-
221:
|
2480 |
-
dict(link=('ssd_kpt6', 'ssd_kpt5'), id=221, color=[64, 64, 128]),
|
2481 |
-
222:
|
2482 |
-
dict(link=('ssd_kpt5', 'ssd_kpt4'), id=222, color=[64, 64, 128]),
|
2483 |
-
223:
|
2484 |
-
dict(link=('ssd_kpt4', 'ssd_kpt3'), id=223, color=[64, 64, 128]),
|
2485 |
-
224:
|
2486 |
-
dict(link=('ssd_kpt3', 'ssd_kpt2'), id=224, color=[64, 64, 128]),
|
2487 |
-
225:
|
2488 |
-
dict(link=('ssd_kpt6', 'ssd_kpt1'), id=225, color=[64, 64, 128]),
|
2489 |
-
226:
|
2490 |
-
dict(link=('lsd_kpt1', 'lsd_kpt2'), id=226, color=[128, 64, 0]),
|
2491 |
-
227:
|
2492 |
-
dict(link=('lsd_kpt2', 'lsd_kpt7'), id=228, color=[128, 64, 0]),
|
2493 |
-
228:
|
2494 |
-
dict(link=('lsd_kpt7', 'lsd_kpt8'), id=228, color=[128, 64, 0]),
|
2495 |
-
229:
|
2496 |
-
dict(link=('lsd_kpt8', 'lsd_kpt9'), id=229, color=[128, 64, 0]),
|
2497 |
-
230:
|
2498 |
-
dict(link=('lsd_kpt9', 'lsd_kpt10'), id=230, color=[128, 64, 0]),
|
2499 |
-
231:
|
2500 |
-
dict(link=('lsd_kpt10', 'lsd_kpt11'), id=231, color=[128, 64, 0]),
|
2501 |
-
232:
|
2502 |
-
dict(link=('lsd_kpt11', 'lsd_kpt12'), id=232, color=[128, 64, 0]),
|
2503 |
-
233:
|
2504 |
-
dict(link=('lsd_kpt12', 'lsd_kpt13'), id=233, color=[128, 64, 0]),
|
2505 |
-
234:
|
2506 |
-
dict(link=('lsd_kpt13', 'lsd_kpt14'), id=234, color=[128, 64, 0]),
|
2507 |
-
235:
|
2508 |
-
dict(link=('lsd_kpt14', 'lsd_kpt15'), id=235, color=[128, 64, 0]),
|
2509 |
-
236:
|
2510 |
-
dict(link=('lsd_kpt15', 'lsd_kpt16'), id=236, color=[128, 64, 0]),
|
2511 |
-
237:
|
2512 |
-
dict(link=('lsd_kpt16', 'lsd_kpt17'), id=237, color=[128, 64, 0]),
|
2513 |
-
238:
|
2514 |
-
dict(link=('lsd_kpt17', 'lsd_kpt18'), id=238, color=[128, 64, 0]),
|
2515 |
-
239:
|
2516 |
-
dict(link=('lsd_kpt18', 'lsd_kpt19'), id=239, color=[128, 64, 0]),
|
2517 |
-
240:
|
2518 |
-
dict(link=('lsd_kpt19', 'lsd_kpt20'), id=240, color=[128, 64, 0]),
|
2519 |
-
241:
|
2520 |
-
dict(link=('lsd_kpt20', 'lsd_kpt21'), id=241, color=[128, 64, 0]),
|
2521 |
-
242:
|
2522 |
-
dict(link=('lsd_kpt21', 'lsd_kpt22'), id=242, color=[128, 64, 0]),
|
2523 |
-
243:
|
2524 |
-
dict(link=('lsd_kpt22', 'lsd_kpt23'), id=243, color=[128, 64, 0]),
|
2525 |
-
244:
|
2526 |
-
dict(link=('lsd_kpt23', 'lsd_kpt24'), id=244, color=[128, 64, 0]),
|
2527 |
-
245:
|
2528 |
-
dict(link=('lsd_kpt24', 'lsd_kpt25'), id=245, color=[128, 64, 0]),
|
2529 |
-
246:
|
2530 |
-
dict(link=('lsd_kpt25', 'lsd_kpt26'), id=246, color=[128, 64, 0]),
|
2531 |
-
247:
|
2532 |
-
dict(link=('lsd_kpt26', 'lsd_kpt27'), id=247, color=[128, 64, 0]),
|
2533 |
-
248:
|
2534 |
-
dict(link=('lsd_kpt27', 'lsd_kpt28'), id=248, color=[128, 64, 0]),
|
2535 |
-
249:
|
2536 |
-
dict(link=('lsd_kpt28', 'lsd_kpt29'), id=249, color=[128, 64, 0]),
|
2537 |
-
250:
|
2538 |
-
dict(link=('lsd_kpt29', 'lsd_kpt30'), id=250, color=[128, 64, 0]),
|
2539 |
-
251:
|
2540 |
-
dict(link=('lsd_kpt30', 'lsd_kpt31'), id=251, color=[128, 64, 0]),
|
2541 |
-
252:
|
2542 |
-
dict(link=('lsd_kpt31', 'lsd_kpt32'), id=252, color=[128, 64, 0]),
|
2543 |
-
253:
|
2544 |
-
dict(link=('lsd_kpt32', 'lsd_kpt33'), id=253, color=[128, 64, 0]),
|
2545 |
-
254:
|
2546 |
-
dict(link=('lsd_kpt33', 'lsd_kpt34'), id=254, color=[128, 64, 0]),
|
2547 |
-
255:
|
2548 |
-
dict(link=('lsd_kpt34', 'lsd_kpt35'), id=255, color=[128, 64, 0]),
|
2549 |
-
256:
|
2550 |
-
dict(link=('lsd_kpt35', 'lsd_kpt36'), id=256, color=[128, 64, 0]),
|
2551 |
-
257:
|
2552 |
-
dict(link=('lsd_kpt36', 'lsd_kpt37'), id=257, color=[128, 64, 0]),
|
2553 |
-
258:
|
2554 |
-
dict(link=('lsd_kpt37', 'lsd_kpt6'), id=258, color=[128, 64, 0]),
|
2555 |
-
259:
|
2556 |
-
dict(link=('lsd_kpt6', 'lsd_kpt5'), id=259, color=[128, 64, 0]),
|
2557 |
-
260:
|
2558 |
-
dict(link=('lsd_kpt5', 'lsd_kpt4'), id=260, color=[128, 64, 0]),
|
2559 |
-
261:
|
2560 |
-
dict(link=('lsd_kpt4', 'lsd_kpt3'), id=261, color=[128, 64, 0]),
|
2561 |
-
262:
|
2562 |
-
dict(link=('lsd_kpt3', 'lsd_kpt2'), id=262, color=[128, 64, 0]),
|
2563 |
-
263:
|
2564 |
-
dict(link=('lsd_kpt6', 'lsd_kpt1'), id=263, color=[128, 64, 0]),
|
2565 |
-
264:
|
2566 |
-
dict(link=('vd_kpt1', 'vd_kpt2'), id=264, color=[128, 64, 255]),
|
2567 |
-
265:
|
2568 |
-
dict(link=('vd_kpt2', 'vd_kpt7'), id=265, color=[128, 64, 255]),
|
2569 |
-
266:
|
2570 |
-
dict(link=('vd_kpt7', 'vd_kpt8'), id=266, color=[128, 64, 255]),
|
2571 |
-
267:
|
2572 |
-
dict(link=('vd_kpt8', 'vd_kpt9'), id=267, color=[128, 64, 255]),
|
2573 |
-
268:
|
2574 |
-
dict(link=('vd_kpt9', 'vd_kpt10'), id=268, color=[128, 64, 255]),
|
2575 |
-
269:
|
2576 |
-
dict(link=('vd_kpt10', 'vd_kpt11'), id=269, color=[128, 64, 255]),
|
2577 |
-
270:
|
2578 |
-
dict(link=('vd_kpt11', 'vd_kpt12'), id=270, color=[128, 64, 255]),
|
2579 |
-
271:
|
2580 |
-
dict(link=('vd_kpt12', 'vd_kpt13'), id=271, color=[128, 64, 255]),
|
2581 |
-
272:
|
2582 |
-
dict(link=('vd_kpt13', 'vd_kpt14'), id=272, color=[128, 64, 255]),
|
2583 |
-
273:
|
2584 |
-
dict(link=('vd_kpt14', 'vd_kpt15'), id=273, color=[128, 64, 255]),
|
2585 |
-
274:
|
2586 |
-
dict(link=('vd_kpt15', 'vd_kpt16'), id=274, color=[128, 64, 255]),
|
2587 |
-
275:
|
2588 |
-
dict(link=('vd_kpt16', 'vd_kpt17'), id=275, color=[128, 64, 255]),
|
2589 |
-
276:
|
2590 |
-
dict(link=('vd_kpt17', 'vd_kpt18'), id=276, color=[128, 64, 255]),
|
2591 |
-
277:
|
2592 |
-
dict(link=('vd_kpt18', 'vd_kpt19'), id=277, color=[128, 64, 255]),
|
2593 |
-
278:
|
2594 |
-
dict(link=('vd_kpt19', 'vd_kpt6'), id=278, color=[128, 64, 255]),
|
2595 |
-
279:
|
2596 |
-
dict(link=('vd_kpt6', 'vd_kpt5'), id=279, color=[128, 64, 255]),
|
2597 |
-
280:
|
2598 |
-
dict(link=('vd_kpt5', 'vd_kpt4'), id=280, color=[128, 64, 255]),
|
2599 |
-
281:
|
2600 |
-
dict(link=('vd_kpt4', 'vd_kpt3'), id=281, color=[128, 64, 255]),
|
2601 |
-
282:
|
2602 |
-
dict(link=('vd_kpt3', 'vd_kpt2'), id=282, color=[128, 64, 255]),
|
2603 |
-
283:
|
2604 |
-
dict(link=('vd_kpt6', 'vd_kpt1'), id=283, color=[128, 64, 255]),
|
2605 |
-
284:
|
2606 |
-
dict(link=('sd_kpt1', 'sd_kpt2'), id=284, color=[128, 64, 0]),
|
2607 |
-
285:
|
2608 |
-
dict(link=('sd_kpt2', 'sd_kpt8'), id=285, color=[128, 64, 0]),
|
2609 |
-
286:
|
2610 |
-
dict(link=('sd_kpt8', 'sd_kpt9'), id=286, color=[128, 64, 0]),
|
2611 |
-
287:
|
2612 |
-
dict(link=('sd_kpt9', 'sd_kpt10'), id=287, color=[128, 64, 0]),
|
2613 |
-
288:
|
2614 |
-
dict(link=('sd_kpt10', 'sd_kpt11'), id=288, color=[128, 64, 0]),
|
2615 |
-
289:
|
2616 |
-
dict(link=('sd_kpt11', 'sd_kpt12'), id=289, color=[128, 64, 0]),
|
2617 |
-
290:
|
2618 |
-
dict(link=('sd_kpt12', 'sd_kpt13'), id=290, color=[128, 64, 0]),
|
2619 |
-
291:
|
2620 |
-
dict(link=('sd_kpt13', 'sd_kpt14'), id=291, color=[128, 64, 0]),
|
2621 |
-
292:
|
2622 |
-
dict(link=('sd_kpt14', 'sd_kpt15'), id=292, color=[128, 64, 0]),
|
2623 |
-
293:
|
2624 |
-
dict(link=('sd_kpt15', 'sd_kpt16'), id=293, color=[128, 64, 0]),
|
2625 |
-
294:
|
2626 |
-
dict(link=('sd_kpt16', 'sd_kpt17'), id=294, color=[128, 64, 0]),
|
2627 |
-
295:
|
2628 |
-
dict(link=('sd_kpt17', 'sd_kpt18'), id=295, color=[128, 64, 0]),
|
2629 |
-
296:
|
2630 |
-
dict(link=('sd_kpt18', 'sd_kpt6'), id=296, color=[128, 64, 0]),
|
2631 |
-
297:
|
2632 |
-
dict(link=('sd_kpt6', 'sd_kpt5'), id=297, color=[128, 64, 0]),
|
2633 |
-
298:
|
2634 |
-
dict(link=('sd_kpt5', 'sd_kpt4'), id=298, color=[128, 64, 0]),
|
2635 |
-
299:
|
2636 |
-
dict(link=('sd_kpt4', 'sd_kpt3'), id=299, color=[128, 64, 0]),
|
2637 |
-
300:
|
2638 |
-
dict(link=('sd_kpt3', 'sd_kpt2'), id=300, color=[128, 64, 0]),
|
2639 |
-
301:
|
2640 |
-
dict(link=('sd_kpt2', 'sd_kpt7'), id=301, color=[128, 64, 0]),
|
2641 |
-
302:
|
2642 |
-
dict(link=('sd_kpt6', 'sd_kpt19'), id=302, color=[128, 64, 0]),
|
2643 |
-
303:
|
2644 |
-
dict(link=('sd_kpt6', 'sd_kpt1'), id=303, color=[128, 64, 0])
|
2645 |
-
}),
|
2646 |
-
joint_weights=[
|
2647 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2648 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2649 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2650 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2651 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2652 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2653 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2654 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2655 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2656 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2657 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2658 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2659 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2660 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2661 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2662 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2663 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2664 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2665 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2666 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2667 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
|
2668 |
-
],
|
2669 |
-
sigmas=[])
|
2670 |
-
param_scheduler = [
|
2671 |
-
dict(
|
2672 |
-
type='LinearLR', begin=0, end=500, start_factor=0.001, by_epoch=False),
|
2673 |
-
dict(
|
2674 |
-
type='MultiStepLR',
|
2675 |
-
begin=0,
|
2676 |
-
end=120,
|
2677 |
-
milestones=[80, 100],
|
2678 |
-
gamma=0.1,
|
2679 |
-
by_epoch=True)
|
2680 |
-
]
|
2681 |
-
optim_wrapper = dict(optimizer=dict(type='Adam', lr=0.0005))
|
2682 |
-
auto_scale_lr = dict(base_batch_size=512)
|
2683 |
-
dataset_type = 'DeepFashion2Dataset'
|
2684 |
-
data_mode = 'topdown'
|
2685 |
-
data_root = 'data/deepfashion2/'
|
2686 |
-
codec = dict(
|
2687 |
-
type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2)
|
2688 |
-
train_pipeline = [
|
2689 |
-
dict(type='LoadImage'),
|
2690 |
-
dict(type='GetBBoxCenterScale'),
|
2691 |
-
dict(type='RandomFlip', direction='horizontal'),
|
2692 |
-
dict(
|
2693 |
-
type='RandomBBoxTransform',
|
2694 |
-
shift_prob=0,
|
2695 |
-
rotate_factor=60,
|
2696 |
-
scale_factor=(0.75, 1.25)),
|
2697 |
-
dict(type='TopdownAffine', input_size=(192, 256)),
|
2698 |
-
dict(
|
2699 |
-
type='GenerateTarget',
|
2700 |
-
encoder=dict(
|
2701 |
-
type='MSRAHeatmap',
|
2702 |
-
input_size=(192, 256),
|
2703 |
-
heatmap_size=(48, 64),
|
2704 |
-
sigma=2)),
|
2705 |
-
dict(type='PackPoseInputs')
|
2706 |
-
]
|
2707 |
-
val_pipeline = [
|
2708 |
-
dict(type='LoadImage', backend_args=dict(backend='local')),
|
2709 |
-
dict(type='GetBBoxCenterScale'),
|
2710 |
-
dict(type='TopdownAffine', input_size=(192, 256)),
|
2711 |
-
dict(type='PackPoseInputs')
|
2712 |
-
]
|
2713 |
-
train_dataloader = dict(
|
2714 |
-
batch_size=64,
|
2715 |
-
num_workers=6,
|
2716 |
-
persistent_workers=True,
|
2717 |
-
sampler=dict(type='DefaultSampler', shuffle=True),
|
2718 |
-
dataset=dict(
|
2719 |
-
type='DeepFashion2Dataset',
|
2720 |
-
data_root='data/deepfashion2/',
|
2721 |
-
data_mode='topdown',
|
2722 |
-
ann_file='train/deepfashion2_vest.json',
|
2723 |
-
data_prefix=dict(img='train/image/'),
|
2724 |
-
pipeline=[
|
2725 |
-
dict(type='LoadImage'),
|
2726 |
-
dict(type='GetBBoxCenterScale'),
|
2727 |
-
dict(type='RandomFlip', direction='horizontal'),
|
2728 |
-
dict(
|
2729 |
-
type='RandomBBoxTransform',
|
2730 |
-
shift_prob=0,
|
2731 |
-
rotate_factor=60,
|
2732 |
-
scale_factor=(0.75, 1.25)),
|
2733 |
-
dict(type='TopdownAffine', input_size=(192, 256)),
|
2734 |
-
dict(
|
2735 |
-
type='GenerateTarget',
|
2736 |
-
encoder=dict(
|
2737 |
-
type='MSRAHeatmap',
|
2738 |
-
input_size=(192, 256),
|
2739 |
-
heatmap_size=(48, 64),
|
2740 |
-
sigma=2)),
|
2741 |
-
dict(type='PackPoseInputs')
|
2742 |
-
]))
|
2743 |
-
val_dataloader = dict(
|
2744 |
-
batch_size=32,
|
2745 |
-
num_workers=6,
|
2746 |
-
persistent_workers=True,
|
2747 |
-
drop_last=False,
|
2748 |
-
sampler=dict(type='DefaultSampler', shuffle=False),
|
2749 |
-
dataset=dict(
|
2750 |
-
type='DeepFashion2Dataset',
|
2751 |
-
data_root='data/deepfashion2/',
|
2752 |
-
data_mode='topdown',
|
2753 |
-
ann_file='validation/deepfashion2_vest.json',
|
2754 |
-
data_prefix=dict(img='validation/image/'),
|
2755 |
-
test_mode=True,
|
2756 |
-
pipeline=[
|
2757 |
-
dict(type='LoadImage', backend_args=dict(backend='local')),
|
2758 |
-
dict(type='GetBBoxCenterScale'),
|
2759 |
-
dict(type='TopdownAffine', input_size=(192, 256)),
|
2760 |
-
dict(type='PackPoseInputs')
|
2761 |
-
]))
|
2762 |
-
test_dataloader = dict(
|
2763 |
-
batch_size=32,
|
2764 |
-
num_workers=6,
|
2765 |
-
persistent_workers=True,
|
2766 |
-
drop_last=False,
|
2767 |
-
sampler=dict(type='DefaultSampler', shuffle=False),
|
2768 |
-
dataset=dict(
|
2769 |
-
type='DeepFashion2Dataset',
|
2770 |
-
data_root='data/deepfashion2/',
|
2771 |
-
data_mode='topdown',
|
2772 |
-
ann_file='validation/deepfashion2_vest.json',
|
2773 |
-
data_prefix=dict(img='validation/image/'),
|
2774 |
-
test_mode=True,
|
2775 |
-
pipeline=[
|
2776 |
-
dict(type='LoadImage', backend_args=dict(backend='local')),
|
2777 |
-
dict(type='GetBBoxCenterScale'),
|
2778 |
-
dict(type='TopdownAffine', input_size=(192, 256)),
|
2779 |
-
dict(type='PackPoseInputs')
|
2780 |
-
]))
|
2781 |
-
channel_cfg = dict(
|
2782 |
-
num_output_channels=294,
|
2783 |
-
dataset_joints=294,
|
2784 |
-
dataset_channel=[[
|
2785 |
-
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
|
2786 |
-
20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
|
2787 |
-
38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
|
2788 |
-
56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
|
2789 |
-
74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
|
2790 |
-
92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
|
2791 |
-
108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
|
2792 |
-
122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
|
2793 |
-
136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
|
2794 |
-
150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
|
2795 |
-
164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
|
2796 |
-
178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
|
2797 |
-
192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205,
|
2798 |
-
206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
|
2799 |
-
220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233,
|
2800 |
-
234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
|
2801 |
-
248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261,
|
2802 |
-
262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275,
|
2803 |
-
276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289,
|
2804 |
-
290, 291, 292, 293
|
2805 |
-
]],
|
2806 |
-
inference_channel=[
|
2807 |
-
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
|
2808 |
-
20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
|
2809 |
-
38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
|
2810 |
-
56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
|
2811 |
-
74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
|
2812 |
-
92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
|
2813 |
-
108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
|
2814 |
-
122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
|
2815 |
-
136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
|
2816 |
-
150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
|
2817 |
-
164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
|
2818 |
-
178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
|
2819 |
-
192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205,
|
2820 |
-
206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
|
2821 |
-
220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233,
|
2822 |
-
234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
|
2823 |
-
248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261,
|
2824 |
-
262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275,
|
2825 |
-
276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289,
|
2826 |
-
290, 291, 292, 293
|
2827 |
-
])
|
2828 |
-
model = dict(
|
2829 |
-
type='TopdownPoseEstimator',
|
2830 |
-
data_preprocessor=dict(
|
2831 |
-
type='PoseDataPreprocessor',
|
2832 |
-
mean=[123.675, 116.28, 103.53],
|
2833 |
-
std=[58.395, 57.12, 57.375],
|
2834 |
-
bgr_to_rgb=True),
|
2835 |
-
backbone=dict(
|
2836 |
-
type='ResNet',
|
2837 |
-
depth=50,
|
2838 |
-
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
|
2839 |
-
head=dict(
|
2840 |
-
type='HeatmapHead',
|
2841 |
-
in_channels=2048,
|
2842 |
-
out_channels=294,
|
2843 |
-
loss=dict(type='KeypointMSELoss', use_target_weight=True),
|
2844 |
-
decoder=dict(
|
2845 |
-
type='MSRAHeatmap',
|
2846 |
-
input_size=(192, 256),
|
2847 |
-
heatmap_size=(48, 64),
|
2848 |
-
sigma=2)),
|
2849 |
-
test_cfg=dict(flip_test=True, flip_mode='heatmap', shift_heatmap=True))
|
2850 |
-
val_evaluator = [
|
2851 |
-
dict(type='PCKAccuracy', thr=0.2),
|
2852 |
-
dict(type='AUC'),
|
2853 |
-
dict(type='EPE')
|
2854 |
-
]
|
2855 |
-
test_evaluator = [
|
2856 |
-
dict(type='PCKAccuracy', thr=0.2),
|
2857 |
-
dict(type='AUC'),
|
2858 |
-
dict(type='EPE')
|
2859 |
-
]
|
2860 |
-
launcher = 'pytorch'
|
2861 |
-
work_dir = './work_dirs/td_hm_res50_4xb64-120e_deepfashion2_vest_256x192'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/YAMLMake.d.ts
DELETED
@@ -1,15 +0,0 @@
|
|
1 |
-
import Builders from './builders/Builders';
|
2 |
-
export default YAMLMake;
|
3 |
-
|
4 |
-
declare namespace YAMLMake {
|
5 |
-
type BuilderType = Builders.BuilderType;
|
6 |
-
type BuildersType = { [name: string]: BuilderType }
|
7 |
-
}
|
8 |
-
|
9 |
-
declare function YAMLMake(
|
10 |
-
scene: Phaser.Scene,
|
11 |
-
data: Object | string,
|
12 |
-
view?: Object | string,
|
13 |
-
styles?: Object | string,
|
14 |
-
customBuilders?: YAMLMake.BuildersType
|
15 |
-
): Phaser.GameObjects.GameObject;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AiMimicry/sovits-models/data_utils.py
DELETED
@@ -1,155 +0,0 @@
|
|
1 |
-
import time
|
2 |
-
import os
|
3 |
-
import random
|
4 |
-
import numpy as np
|
5 |
-
import torch
|
6 |
-
import torch.utils.data
|
7 |
-
|
8 |
-
import modules.commons as commons
|
9 |
-
import utils
|
10 |
-
from modules.mel_processing import spectrogram_torch, spec_to_mel_torch
|
11 |
-
from utils import load_wav_to_torch, load_filepaths_and_text
|
12 |
-
|
13 |
-
# import h5py
|
14 |
-
|
15 |
-
|
16 |
-
"""Multi speaker version"""
|
17 |
-
|
18 |
-
|
19 |
-
class TextAudioSpeakerLoader(torch.utils.data.Dataset):
|
20 |
-
"""
|
21 |
-
1) loads audio, speaker_id, text pairs
|
22 |
-
2) normalizes text and converts them to sequences of integers
|
23 |
-
3) computes spectrograms from audio files.
|
24 |
-
"""
|
25 |
-
|
26 |
-
def __init__(self, audiopaths, hparams, all_in_mem: bool = False):
|
27 |
-
self.audiopaths = load_filepaths_and_text(audiopaths)
|
28 |
-
self.max_wav_value = hparams.data.max_wav_value
|
29 |
-
self.sampling_rate = hparams.data.sampling_rate
|
30 |
-
self.filter_length = hparams.data.filter_length
|
31 |
-
self.hop_length = hparams.data.hop_length
|
32 |
-
self.win_length = hparams.data.win_length
|
33 |
-
self.sampling_rate = hparams.data.sampling_rate
|
34 |
-
self.use_sr = hparams.train.use_sr
|
35 |
-
self.spec_len = hparams.train.max_speclen
|
36 |
-
self.spk_map = hparams.spk
|
37 |
-
|
38 |
-
random.seed(1234)
|
39 |
-
random.shuffle(self.audiopaths)
|
40 |
-
|
41 |
-
self.all_in_mem = all_in_mem
|
42 |
-
if self.all_in_mem:
|
43 |
-
self.cache = [self.get_audio(p[0]) for p in self.audiopaths]
|
44 |
-
|
45 |
-
def get_audio(self, filename):
|
46 |
-
filename = filename.replace("\\", "/")
|
47 |
-
audio, sampling_rate = load_wav_to_torch(filename)
|
48 |
-
if sampling_rate != self.sampling_rate:
|
49 |
-
raise ValueError("{} SR doesn't match target {} SR".format(
|
50 |
-
sampling_rate, self.sampling_rate))
|
51 |
-
audio_norm = audio / self.max_wav_value
|
52 |
-
audio_norm = audio_norm.unsqueeze(0)
|
53 |
-
spec_filename = filename.replace(".wav", ".spec.pt")
|
54 |
-
|
55 |
-
# Ideally, all data generated after Mar 25 should have .spec.pt
|
56 |
-
if os.path.exists(spec_filename):
|
57 |
-
spec = torch.load(spec_filename)
|
58 |
-
else:
|
59 |
-
spec = spectrogram_torch(audio_norm, self.filter_length,
|
60 |
-
self.sampling_rate, self.hop_length, self.win_length,
|
61 |
-
center=False)
|
62 |
-
spec = torch.squeeze(spec, 0)
|
63 |
-
torch.save(spec, spec_filename)
|
64 |
-
|
65 |
-
spk = filename.split("/")[-2]
|
66 |
-
spk = torch.LongTensor([self.spk_map[spk]])
|
67 |
-
|
68 |
-
f0 = np.load(filename + ".f0.npy")
|
69 |
-
f0, uv = utils.interpolate_f0(f0)
|
70 |
-
f0 = torch.FloatTensor(f0)
|
71 |
-
uv = torch.FloatTensor(uv)
|
72 |
-
|
73 |
-
c = torch.load(filename+ ".soft.pt")
|
74 |
-
c = utils.repeat_expand_2d(c.squeeze(0), f0.shape[0])
|
75 |
-
|
76 |
-
|
77 |
-
lmin = min(c.size(-1), spec.size(-1))
|
78 |
-
assert abs(c.size(-1) - spec.size(-1)) < 3, (c.size(-1), spec.size(-1), f0.shape, filename)
|
79 |
-
assert abs(audio_norm.shape[1]-lmin * self.hop_length) < 3 * self.hop_length
|
80 |
-
spec, c, f0, uv = spec[:, :lmin], c[:, :lmin], f0[:lmin], uv[:lmin]
|
81 |
-
audio_norm = audio_norm[:, :lmin * self.hop_length]
|
82 |
-
|
83 |
-
return c, f0, spec, audio_norm, spk, uv
|
84 |
-
|
85 |
-
def random_slice(self, c, f0, spec, audio_norm, spk, uv):
|
86 |
-
# if spec.shape[1] < 30:
|
87 |
-
# print("skip too short audio:", filename)
|
88 |
-
# return None
|
89 |
-
if spec.shape[1] > 800:
|
90 |
-
start = random.randint(0, spec.shape[1]-800)
|
91 |
-
end = start + 790
|
92 |
-
spec, c, f0, uv = spec[:, start:end], c[:, start:end], f0[start:end], uv[start:end]
|
93 |
-
audio_norm = audio_norm[:, start * self.hop_length : end * self.hop_length]
|
94 |
-
|
95 |
-
return c, f0, spec, audio_norm, spk, uv
|
96 |
-
|
97 |
-
def __getitem__(self, index):
|
98 |
-
if self.all_in_mem:
|
99 |
-
return self.random_slice(*self.cache[index])
|
100 |
-
else:
|
101 |
-
return self.random_slice(*self.get_audio(self.audiopaths[index][0]))
|
102 |
-
|
103 |
-
def __len__(self):
|
104 |
-
return len(self.audiopaths)
|
105 |
-
|
106 |
-
|
107 |
-
class TextAudioCollate:
|
108 |
-
|
109 |
-
def __call__(self, batch):
|
110 |
-
batch = [b for b in batch if b is not None]
|
111 |
-
|
112 |
-
input_lengths, ids_sorted_decreasing = torch.sort(
|
113 |
-
torch.LongTensor([x[0].shape[1] for x in batch]),
|
114 |
-
dim=0, descending=True)
|
115 |
-
|
116 |
-
max_c_len = max([x[0].size(1) for x in batch])
|
117 |
-
max_wav_len = max([x[3].size(1) for x in batch])
|
118 |
-
|
119 |
-
lengths = torch.LongTensor(len(batch))
|
120 |
-
|
121 |
-
c_padded = torch.FloatTensor(len(batch), batch[0][0].shape[0], max_c_len)
|
122 |
-
f0_padded = torch.FloatTensor(len(batch), max_c_len)
|
123 |
-
spec_padded = torch.FloatTensor(len(batch), batch[0][2].shape[0], max_c_len)
|
124 |
-
wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)
|
125 |
-
spkids = torch.LongTensor(len(batch), 1)
|
126 |
-
uv_padded = torch.FloatTensor(len(batch), max_c_len)
|
127 |
-
|
128 |
-
c_padded.zero_()
|
129 |
-
spec_padded.zero_()
|
130 |
-
f0_padded.zero_()
|
131 |
-
wav_padded.zero_()
|
132 |
-
uv_padded.zero_()
|
133 |
-
|
134 |
-
for i in range(len(ids_sorted_decreasing)):
|
135 |
-
row = batch[ids_sorted_decreasing[i]]
|
136 |
-
|
137 |
-
c = row[0]
|
138 |
-
c_padded[i, :, :c.size(1)] = c
|
139 |
-
lengths[i] = c.size(1)
|
140 |
-
|
141 |
-
f0 = row[1]
|
142 |
-
f0_padded[i, :f0.size(0)] = f0
|
143 |
-
|
144 |
-
spec = row[2]
|
145 |
-
spec_padded[i, :, :spec.size(1)] = spec
|
146 |
-
|
147 |
-
wav = row[3]
|
148 |
-
wav_padded[i, :, :wav.size(1)] = wav
|
149 |
-
|
150 |
-
spkids[i, 0] = row[4]
|
151 |
-
|
152 |
-
uv = row[5]
|
153 |
-
uv_padded[i, :uv.size(0)] = uv
|
154 |
-
|
155 |
-
return c_padded, f0_padded, spec_padded, wav_padded, spkids, lengths, uv_padded
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Aki004/herta-so-vits/modules/enhancer.py
DELETED
@@ -1,105 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import torch
|
3 |
-
import torch.nn.functional as F
|
4 |
-
from vdecoder.nsf_hifigan.nvSTFT import STFT
|
5 |
-
from vdecoder.nsf_hifigan.models import load_model
|
6 |
-
from torchaudio.transforms import Resample
|
7 |
-
|
8 |
-
class Enhancer:
|
9 |
-
def __init__(self, enhancer_type, enhancer_ckpt, device=None):
|
10 |
-
if device is None:
|
11 |
-
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
12 |
-
self.device = device
|
13 |
-
|
14 |
-
if enhancer_type == 'nsf-hifigan':
|
15 |
-
self.enhancer = NsfHifiGAN(enhancer_ckpt, device=self.device)
|
16 |
-
else:
|
17 |
-
raise ValueError(f" [x] Unknown enhancer: {enhancer_type}")
|
18 |
-
|
19 |
-
self.resample_kernel = {}
|
20 |
-
self.enhancer_sample_rate = self.enhancer.sample_rate()
|
21 |
-
self.enhancer_hop_size = self.enhancer.hop_size()
|
22 |
-
|
23 |
-
def enhance(self,
|
24 |
-
audio, # 1, T
|
25 |
-
sample_rate,
|
26 |
-
f0, # 1, n_frames, 1
|
27 |
-
hop_size,
|
28 |
-
adaptive_key = 0,
|
29 |
-
silence_front = 0
|
30 |
-
):
|
31 |
-
# enhancer start time
|
32 |
-
start_frame = int(silence_front * sample_rate / hop_size)
|
33 |
-
real_silence_front = start_frame * hop_size / sample_rate
|
34 |
-
audio = audio[:, int(np.round(real_silence_front * sample_rate)) : ]
|
35 |
-
f0 = f0[: , start_frame :, :]
|
36 |
-
|
37 |
-
# adaptive parameters
|
38 |
-
adaptive_factor = 2 ** ( -adaptive_key / 12)
|
39 |
-
adaptive_sample_rate = 100 * int(np.round(self.enhancer_sample_rate / adaptive_factor / 100))
|
40 |
-
real_factor = self.enhancer_sample_rate / adaptive_sample_rate
|
41 |
-
|
42 |
-
# resample the ddsp output
|
43 |
-
if sample_rate == adaptive_sample_rate:
|
44 |
-
audio_res = audio
|
45 |
-
else:
|
46 |
-
key_str = str(sample_rate) + str(adaptive_sample_rate)
|
47 |
-
if key_str not in self.resample_kernel:
|
48 |
-
self.resample_kernel[key_str] = Resample(sample_rate, adaptive_sample_rate, lowpass_filter_width = 128).to(self.device)
|
49 |
-
audio_res = self.resample_kernel[key_str](audio)
|
50 |
-
|
51 |
-
n_frames = int(audio_res.size(-1) // self.enhancer_hop_size + 1)
|
52 |
-
|
53 |
-
# resample f0
|
54 |
-
f0_np = f0.squeeze(0).squeeze(-1).cpu().numpy()
|
55 |
-
f0_np *= real_factor
|
56 |
-
time_org = (hop_size / sample_rate) * np.arange(len(f0_np)) / real_factor
|
57 |
-
time_frame = (self.enhancer_hop_size / self.enhancer_sample_rate) * np.arange(n_frames)
|
58 |
-
f0_res = np.interp(time_frame, time_org, f0_np, left=f0_np[0], right=f0_np[-1])
|
59 |
-
f0_res = torch.from_numpy(f0_res).unsqueeze(0).float().to(self.device) # 1, n_frames
|
60 |
-
|
61 |
-
# enhance
|
62 |
-
enhanced_audio, enhancer_sample_rate = self.enhancer(audio_res, f0_res)
|
63 |
-
|
64 |
-
# resample the enhanced output
|
65 |
-
if adaptive_factor != 0:
|
66 |
-
key_str = str(adaptive_sample_rate) + str(enhancer_sample_rate)
|
67 |
-
if key_str not in self.resample_kernel:
|
68 |
-
self.resample_kernel[key_str] = Resample(adaptive_sample_rate, enhancer_sample_rate, lowpass_filter_width = 128).to(self.device)
|
69 |
-
enhanced_audio = self.resample_kernel[key_str](enhanced_audio)
|
70 |
-
|
71 |
-
# pad the silence frames
|
72 |
-
if start_frame > 0:
|
73 |
-
enhanced_audio = F.pad(enhanced_audio, (int(np.round(enhancer_sample_rate * real_silence_front)), 0))
|
74 |
-
|
75 |
-
return enhanced_audio, enhancer_sample_rate
|
76 |
-
|
77 |
-
|
78 |
-
class NsfHifiGAN(torch.nn.Module):
|
79 |
-
def __init__(self, model_path, device=None):
|
80 |
-
super().__init__()
|
81 |
-
if device is None:
|
82 |
-
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
83 |
-
self.device = device
|
84 |
-
print('| Load HifiGAN: ', model_path)
|
85 |
-
self.model, self.h = load_model(model_path, device=self.device)
|
86 |
-
|
87 |
-
def sample_rate(self):
|
88 |
-
return self.h.sampling_rate
|
89 |
-
|
90 |
-
def hop_size(self):
|
91 |
-
return self.h.hop_size
|
92 |
-
|
93 |
-
def forward(self, audio, f0):
|
94 |
-
stft = STFT(
|
95 |
-
self.h.sampling_rate,
|
96 |
-
self.h.num_mels,
|
97 |
-
self.h.n_fft,
|
98 |
-
self.h.win_size,
|
99 |
-
self.h.hop_size,
|
100 |
-
self.h.fmin,
|
101 |
-
self.h.fmax)
|
102 |
-
with torch.no_grad():
|
103 |
-
mel = stft.get_mel(audio)
|
104 |
-
enhanced_audio = self.model(mel, f0[:,:mel.size(-1)]).view(-1)
|
105 |
-
return enhanced_audio, self.h.sampling_rate
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/mel_processing.py
DELETED
@@ -1,112 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
import os
|
3 |
-
import random
|
4 |
-
import torch
|
5 |
-
from torch import nn
|
6 |
-
import torch.nn.functional as F
|
7 |
-
import torch.utils.data
|
8 |
-
import numpy as np
|
9 |
-
import librosa
|
10 |
-
import librosa.util as librosa_util
|
11 |
-
from librosa.util import normalize, pad_center, tiny
|
12 |
-
from scipy.signal import get_window
|
13 |
-
from scipy.io.wavfile import read
|
14 |
-
from librosa.filters import mel as librosa_mel_fn
|
15 |
-
|
16 |
-
MAX_WAV_VALUE = 32768.0
|
17 |
-
|
18 |
-
|
19 |
-
def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
|
20 |
-
"""
|
21 |
-
PARAMS
|
22 |
-
------
|
23 |
-
C: compression factor
|
24 |
-
"""
|
25 |
-
return torch.log(torch.clamp(x, min=clip_val) * C)
|
26 |
-
|
27 |
-
|
28 |
-
def dynamic_range_decompression_torch(x, C=1):
|
29 |
-
"""
|
30 |
-
PARAMS
|
31 |
-
------
|
32 |
-
C: compression factor used to compress
|
33 |
-
"""
|
34 |
-
return torch.exp(x) / C
|
35 |
-
|
36 |
-
|
37 |
-
def spectral_normalize_torch(magnitudes):
|
38 |
-
output = dynamic_range_compression_torch(magnitudes)
|
39 |
-
return output
|
40 |
-
|
41 |
-
|
42 |
-
def spectral_de_normalize_torch(magnitudes):
|
43 |
-
output = dynamic_range_decompression_torch(magnitudes)
|
44 |
-
return output
|
45 |
-
|
46 |
-
|
47 |
-
mel_basis = {}
|
48 |
-
hann_window = {}
|
49 |
-
|
50 |
-
|
51 |
-
def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False):
|
52 |
-
if torch.min(y) < -1.:
|
53 |
-
print('min value is ', torch.min(y))
|
54 |
-
if torch.max(y) > 1.:
|
55 |
-
print('max value is ', torch.max(y))
|
56 |
-
|
57 |
-
global hann_window
|
58 |
-
dtype_device = str(y.dtype) + '_' + str(y.device)
|
59 |
-
wnsize_dtype_device = str(win_size) + '_' + dtype_device
|
60 |
-
if wnsize_dtype_device not in hann_window:
|
61 |
-
hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
|
62 |
-
|
63 |
-
y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
|
64 |
-
y = y.squeeze(1)
|
65 |
-
|
66 |
-
spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
|
67 |
-
center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)
|
68 |
-
|
69 |
-
spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
|
70 |
-
return spec
|
71 |
-
|
72 |
-
|
73 |
-
def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):
|
74 |
-
global mel_basis
|
75 |
-
dtype_device = str(spec.dtype) + '_' + str(spec.device)
|
76 |
-
fmax_dtype_device = str(fmax) + '_' + dtype_device
|
77 |
-
if fmax_dtype_device not in mel_basis:
|
78 |
-
mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
|
79 |
-
mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device)
|
80 |
-
spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
|
81 |
-
spec = spectral_normalize_torch(spec)
|
82 |
-
return spec
|
83 |
-
|
84 |
-
|
85 |
-
def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
|
86 |
-
if torch.min(y) < -1.:
|
87 |
-
print('min value is ', torch.min(y))
|
88 |
-
if torch.max(y) > 1.:
|
89 |
-
print('max value is ', torch.max(y))
|
90 |
-
|
91 |
-
global mel_basis, hann_window
|
92 |
-
dtype_device = str(y.dtype) + '_' + str(y.device)
|
93 |
-
fmax_dtype_device = str(fmax) + '_' + dtype_device
|
94 |
-
wnsize_dtype_device = str(win_size) + '_' + dtype_device
|
95 |
-
if fmax_dtype_device not in mel_basis:
|
96 |
-
mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
|
97 |
-
mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device)
|
98 |
-
if wnsize_dtype_device not in hann_window:
|
99 |
-
hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
|
100 |
-
|
101 |
-
y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
|
102 |
-
y = y.squeeze(1)
|
103 |
-
|
104 |
-
spec = torch.stft(y.float(), n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
|
105 |
-
center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)
|
106 |
-
|
107 |
-
spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
|
108 |
-
|
109 |
-
spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
|
110 |
-
spec = spectral_normalize_torch(spec)
|
111 |
-
|
112 |
-
return spec
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alesmikes/Elvirespeak/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: QnA
|
3 |
-
emoji: 📈
|
4 |
-
colorFrom: indigo
|
5 |
-
colorTo: yellow
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.24.1
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
duplicated_from: GenAIDemo/economic-forecast
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlexWang/lama/saicinpainting/training/trainers/default.py
DELETED
@@ -1,175 +0,0 @@
|
|
1 |
-
import logging
|
2 |
-
|
3 |
-
import torch
|
4 |
-
import torch.nn.functional as F
|
5 |
-
from omegaconf import OmegaConf
|
6 |
-
|
7 |
-
from saicinpainting.training.data.datasets import make_constant_area_crop_params
|
8 |
-
from saicinpainting.training.losses.distance_weighting import make_mask_distance_weighter
|
9 |
-
from saicinpainting.training.losses.feature_matching import feature_matching_loss, masked_l1_loss
|
10 |
-
from saicinpainting.training.modules.fake_fakes import FakeFakesGenerator
|
11 |
-
from saicinpainting.training.trainers.base import BaseInpaintingTrainingModule, make_multiscale_noise
|
12 |
-
from saicinpainting.utils import add_prefix_to_keys, get_ramp
|
13 |
-
|
14 |
-
LOGGER = logging.getLogger(__name__)
|
15 |
-
|
16 |
-
|
17 |
-
def make_constant_area_crop_batch(batch, **kwargs):
|
18 |
-
crop_y, crop_x, crop_height, crop_width = make_constant_area_crop_params(img_height=batch['image'].shape[2],
|
19 |
-
img_width=batch['image'].shape[3],
|
20 |
-
**kwargs)
|
21 |
-
batch['image'] = batch['image'][:, :, crop_y : crop_y + crop_height, crop_x : crop_x + crop_width]
|
22 |
-
batch['mask'] = batch['mask'][:, :, crop_y: crop_y + crop_height, crop_x: crop_x + crop_width]
|
23 |
-
return batch
|
24 |
-
|
25 |
-
|
26 |
-
class DefaultInpaintingTrainingModule(BaseInpaintingTrainingModule):
|
27 |
-
def __init__(self, *args, concat_mask=True, rescale_scheduler_kwargs=None, image_to_discriminator='predicted_image',
|
28 |
-
add_noise_kwargs=None, noise_fill_hole=False, const_area_crop_kwargs=None,
|
29 |
-
distance_weighter_kwargs=None, distance_weighted_mask_for_discr=False,
|
30 |
-
fake_fakes_proba=0, fake_fakes_generator_kwargs=None,
|
31 |
-
**kwargs):
|
32 |
-
super().__init__(*args, **kwargs)
|
33 |
-
self.concat_mask = concat_mask
|
34 |
-
self.rescale_size_getter = get_ramp(**rescale_scheduler_kwargs) if rescale_scheduler_kwargs is not None else None
|
35 |
-
self.image_to_discriminator = image_to_discriminator
|
36 |
-
self.add_noise_kwargs = add_noise_kwargs
|
37 |
-
self.noise_fill_hole = noise_fill_hole
|
38 |
-
self.const_area_crop_kwargs = const_area_crop_kwargs
|
39 |
-
self.refine_mask_for_losses = make_mask_distance_weighter(**distance_weighter_kwargs) \
|
40 |
-
if distance_weighter_kwargs is not None else None
|
41 |
-
self.distance_weighted_mask_for_discr = distance_weighted_mask_for_discr
|
42 |
-
|
43 |
-
self.fake_fakes_proba = fake_fakes_proba
|
44 |
-
if self.fake_fakes_proba > 1e-3:
|
45 |
-
self.fake_fakes_gen = FakeFakesGenerator(**(fake_fakes_generator_kwargs or {}))
|
46 |
-
|
47 |
-
def forward(self, batch):
|
48 |
-
if self.training and self.rescale_size_getter is not None:
|
49 |
-
cur_size = self.rescale_size_getter(self.global_step)
|
50 |
-
batch['image'] = F.interpolate(batch['image'], size=cur_size, mode='bilinear', align_corners=False)
|
51 |
-
batch['mask'] = F.interpolate(batch['mask'], size=cur_size, mode='nearest')
|
52 |
-
|
53 |
-
if self.training and self.const_area_crop_kwargs is not None:
|
54 |
-
batch = make_constant_area_crop_batch(batch, **self.const_area_crop_kwargs)
|
55 |
-
|
56 |
-
img = batch['image']
|
57 |
-
mask = batch['mask']
|
58 |
-
|
59 |
-
masked_img = img * (1 - mask)
|
60 |
-
|
61 |
-
if self.add_noise_kwargs is not None:
|
62 |
-
noise = make_multiscale_noise(masked_img, **self.add_noise_kwargs)
|
63 |
-
if self.noise_fill_hole:
|
64 |
-
masked_img = masked_img + mask * noise[:, :masked_img.shape[1]]
|
65 |
-
masked_img = torch.cat([masked_img, noise], dim=1)
|
66 |
-
|
67 |
-
if self.concat_mask:
|
68 |
-
masked_img = torch.cat([masked_img, mask], dim=1)
|
69 |
-
|
70 |
-
batch['predicted_image'] = self.generator(masked_img)
|
71 |
-
batch['inpainted'] = mask * batch['predicted_image'] + (1 - mask) * batch['image']
|
72 |
-
|
73 |
-
if self.fake_fakes_proba > 1e-3:
|
74 |
-
if self.training and torch.rand(1).item() < self.fake_fakes_proba:
|
75 |
-
batch['fake_fakes'], batch['fake_fakes_masks'] = self.fake_fakes_gen(img, mask)
|
76 |
-
batch['use_fake_fakes'] = True
|
77 |
-
else:
|
78 |
-
batch['fake_fakes'] = torch.zeros_like(img)
|
79 |
-
batch['fake_fakes_masks'] = torch.zeros_like(mask)
|
80 |
-
batch['use_fake_fakes'] = False
|
81 |
-
|
82 |
-
batch['mask_for_losses'] = self.refine_mask_for_losses(img, batch['predicted_image'], mask) \
|
83 |
-
if self.refine_mask_for_losses is not None and self.training \
|
84 |
-
else mask
|
85 |
-
|
86 |
-
return batch
|
87 |
-
|
88 |
-
def generator_loss(self, batch):
|
89 |
-
img = batch['image']
|
90 |
-
predicted_img = batch[self.image_to_discriminator]
|
91 |
-
original_mask = batch['mask']
|
92 |
-
supervised_mask = batch['mask_for_losses']
|
93 |
-
|
94 |
-
# L1
|
95 |
-
l1_value = masked_l1_loss(predicted_img, img, supervised_mask,
|
96 |
-
self.config.losses.l1.weight_known,
|
97 |
-
self.config.losses.l1.weight_missing)
|
98 |
-
|
99 |
-
total_loss = l1_value
|
100 |
-
metrics = dict(gen_l1=l1_value)
|
101 |
-
|
102 |
-
# vgg-based perceptual loss
|
103 |
-
if self.config.losses.perceptual.weight > 0:
|
104 |
-
pl_value = self.loss_pl(predicted_img, img, mask=supervised_mask).sum() * self.config.losses.perceptual.weight
|
105 |
-
total_loss = total_loss + pl_value
|
106 |
-
metrics['gen_pl'] = pl_value
|
107 |
-
|
108 |
-
# discriminator
|
109 |
-
# adversarial_loss calls backward by itself
|
110 |
-
mask_for_discr = supervised_mask if self.distance_weighted_mask_for_discr else original_mask
|
111 |
-
self.adversarial_loss.pre_generator_step(real_batch=img, fake_batch=predicted_img,
|
112 |
-
generator=self.generator, discriminator=self.discriminator)
|
113 |
-
discr_real_pred, discr_real_features = self.discriminator(img)
|
114 |
-
discr_fake_pred, discr_fake_features = self.discriminator(predicted_img)
|
115 |
-
adv_gen_loss, adv_metrics = self.adversarial_loss.generator_loss(real_batch=img,
|
116 |
-
fake_batch=predicted_img,
|
117 |
-
discr_real_pred=discr_real_pred,
|
118 |
-
discr_fake_pred=discr_fake_pred,
|
119 |
-
mask=mask_for_discr)
|
120 |
-
total_loss = total_loss + adv_gen_loss
|
121 |
-
metrics['gen_adv'] = adv_gen_loss
|
122 |
-
metrics.update(add_prefix_to_keys(adv_metrics, 'adv_'))
|
123 |
-
|
124 |
-
# feature matching
|
125 |
-
if self.config.losses.feature_matching.weight > 0:
|
126 |
-
need_mask_in_fm = OmegaConf.to_container(self.config.losses.feature_matching).get('pass_mask', False)
|
127 |
-
mask_for_fm = supervised_mask if need_mask_in_fm else None
|
128 |
-
fm_value = feature_matching_loss(discr_fake_features, discr_real_features,
|
129 |
-
mask=mask_for_fm) * self.config.losses.feature_matching.weight
|
130 |
-
total_loss = total_loss + fm_value
|
131 |
-
metrics['gen_fm'] = fm_value
|
132 |
-
|
133 |
-
if self.loss_resnet_pl is not None:
|
134 |
-
resnet_pl_value = self.loss_resnet_pl(predicted_img, img)
|
135 |
-
total_loss = total_loss + resnet_pl_value
|
136 |
-
metrics['gen_resnet_pl'] = resnet_pl_value
|
137 |
-
|
138 |
-
return total_loss, metrics
|
139 |
-
|
140 |
-
def discriminator_loss(self, batch):
|
141 |
-
total_loss = 0
|
142 |
-
metrics = {}
|
143 |
-
|
144 |
-
predicted_img = batch[self.image_to_discriminator].detach()
|
145 |
-
self.adversarial_loss.pre_discriminator_step(real_batch=batch['image'], fake_batch=predicted_img,
|
146 |
-
generator=self.generator, discriminator=self.discriminator)
|
147 |
-
discr_real_pred, discr_real_features = self.discriminator(batch['image'])
|
148 |
-
discr_fake_pred, discr_fake_features = self.discriminator(predicted_img)
|
149 |
-
adv_discr_loss, adv_metrics = self.adversarial_loss.discriminator_loss(real_batch=batch['image'],
|
150 |
-
fake_batch=predicted_img,
|
151 |
-
discr_real_pred=discr_real_pred,
|
152 |
-
discr_fake_pred=discr_fake_pred,
|
153 |
-
mask=batch['mask'])
|
154 |
-
total_loss = total_loss + adv_discr_loss
|
155 |
-
metrics['discr_adv'] = adv_discr_loss
|
156 |
-
metrics.update(add_prefix_to_keys(adv_metrics, 'adv_'))
|
157 |
-
|
158 |
-
|
159 |
-
if batch.get('use_fake_fakes', False):
|
160 |
-
fake_fakes = batch['fake_fakes']
|
161 |
-
self.adversarial_loss.pre_discriminator_step(real_batch=batch['image'], fake_batch=fake_fakes,
|
162 |
-
generator=self.generator, discriminator=self.discriminator)
|
163 |
-
discr_fake_fakes_pred, _ = self.discriminator(fake_fakes)
|
164 |
-
fake_fakes_adv_discr_loss, fake_fakes_adv_metrics = self.adversarial_loss.discriminator_loss(
|
165 |
-
real_batch=batch['image'],
|
166 |
-
fake_batch=fake_fakes,
|
167 |
-
discr_real_pred=discr_real_pred,
|
168 |
-
discr_fake_pred=discr_fake_fakes_pred,
|
169 |
-
mask=batch['mask']
|
170 |
-
)
|
171 |
-
total_loss = total_loss + fake_fakes_adv_discr_loss
|
172 |
-
metrics['discr_adv_fake_fakes'] = fake_fakes_adv_discr_loss
|
173 |
-
metrics.update(add_prefix_to_keys(fake_fakes_adv_metrics, 'adv_'))
|
174 |
-
|
175 |
-
return total_loss, metrics
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alycer/VITS-Umamusume-voice-synthesizer/modules.py
DELETED
@@ -1,387 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
import torch
|
3 |
-
from torch import nn
|
4 |
-
from torch.nn import functional as F
|
5 |
-
|
6 |
-
from torch.nn import Conv1d
|
7 |
-
from torch.nn.utils import weight_norm, remove_weight_norm
|
8 |
-
|
9 |
-
import commons
|
10 |
-
from commons import init_weights, get_padding
|
11 |
-
from transforms import piecewise_rational_quadratic_transform
|
12 |
-
|
13 |
-
|
14 |
-
LRELU_SLOPE = 0.1
|
15 |
-
|
16 |
-
|
17 |
-
class LayerNorm(nn.Module):
|
18 |
-
def __init__(self, channels, eps=1e-5):
|
19 |
-
super().__init__()
|
20 |
-
self.channels = channels
|
21 |
-
self.eps = eps
|
22 |
-
|
23 |
-
self.gamma = nn.Parameter(torch.ones(channels))
|
24 |
-
self.beta = nn.Parameter(torch.zeros(channels))
|
25 |
-
|
26 |
-
def forward(self, x):
|
27 |
-
x = x.transpose(1, -1)
|
28 |
-
x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
|
29 |
-
return x.transpose(1, -1)
|
30 |
-
|
31 |
-
|
32 |
-
class ConvReluNorm(nn.Module):
|
33 |
-
def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
|
34 |
-
super().__init__()
|
35 |
-
self.in_channels = in_channels
|
36 |
-
self.hidden_channels = hidden_channels
|
37 |
-
self.out_channels = out_channels
|
38 |
-
self.kernel_size = kernel_size
|
39 |
-
self.n_layers = n_layers
|
40 |
-
self.p_dropout = p_dropout
|
41 |
-
assert n_layers > 1, "Number of layers should be larger than 0."
|
42 |
-
|
43 |
-
self.conv_layers = nn.ModuleList()
|
44 |
-
self.norm_layers = nn.ModuleList()
|
45 |
-
self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2))
|
46 |
-
self.norm_layers.append(LayerNorm(hidden_channels))
|
47 |
-
self.relu_drop = nn.Sequential(
|
48 |
-
nn.ReLU(),
|
49 |
-
nn.Dropout(p_dropout))
|
50 |
-
for _ in range(n_layers-1):
|
51 |
-
self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2))
|
52 |
-
self.norm_layers.append(LayerNorm(hidden_channels))
|
53 |
-
self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
|
54 |
-
self.proj.weight.data.zero_()
|
55 |
-
self.proj.bias.data.zero_()
|
56 |
-
|
57 |
-
def forward(self, x, x_mask):
|
58 |
-
x_org = x
|
59 |
-
for i in range(self.n_layers):
|
60 |
-
x = self.conv_layers[i](x * x_mask)
|
61 |
-
x = self.norm_layers[i](x)
|
62 |
-
x = self.relu_drop(x)
|
63 |
-
x = x_org + self.proj(x)
|
64 |
-
return x * x_mask
|
65 |
-
|
66 |
-
|
67 |
-
class DDSConv(nn.Module):
|
68 |
-
"""
|
69 |
-
Dialted and Depth-Separable Convolution
|
70 |
-
"""
|
71 |
-
def __init__(self, channels, kernel_size, n_layers, p_dropout=0.):
|
72 |
-
super().__init__()
|
73 |
-
self.channels = channels
|
74 |
-
self.kernel_size = kernel_size
|
75 |
-
self.n_layers = n_layers
|
76 |
-
self.p_dropout = p_dropout
|
77 |
-
|
78 |
-
self.drop = nn.Dropout(p_dropout)
|
79 |
-
self.convs_sep = nn.ModuleList()
|
80 |
-
self.convs_1x1 = nn.ModuleList()
|
81 |
-
self.norms_1 = nn.ModuleList()
|
82 |
-
self.norms_2 = nn.ModuleList()
|
83 |
-
for i in range(n_layers):
|
84 |
-
dilation = kernel_size ** i
|
85 |
-
padding = (kernel_size * dilation - dilation) // 2
|
86 |
-
self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size,
|
87 |
-
groups=channels, dilation=dilation, padding=padding
|
88 |
-
))
|
89 |
-
self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
|
90 |
-
self.norms_1.append(LayerNorm(channels))
|
91 |
-
self.norms_2.append(LayerNorm(channels))
|
92 |
-
|
93 |
-
def forward(self, x, x_mask, g=None):
|
94 |
-
if g is not None:
|
95 |
-
x = x + g
|
96 |
-
for i in range(self.n_layers):
|
97 |
-
y = self.convs_sep[i](x * x_mask)
|
98 |
-
y = self.norms_1[i](y)
|
99 |
-
y = F.gelu(y)
|
100 |
-
y = self.convs_1x1[i](y)
|
101 |
-
y = self.norms_2[i](y)
|
102 |
-
y = F.gelu(y)
|
103 |
-
y = self.drop(y)
|
104 |
-
x = x + y
|
105 |
-
return x * x_mask
|
106 |
-
|
107 |
-
|
108 |
-
class WN(torch.nn.Module):
|
109 |
-
def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
|
110 |
-
super(WN, self).__init__()
|
111 |
-
assert(kernel_size % 2 == 1)
|
112 |
-
self.hidden_channels =hidden_channels
|
113 |
-
self.kernel_size = kernel_size,
|
114 |
-
self.dilation_rate = dilation_rate
|
115 |
-
self.n_layers = n_layers
|
116 |
-
self.gin_channels = gin_channels
|
117 |
-
self.p_dropout = p_dropout
|
118 |
-
|
119 |
-
self.in_layers = torch.nn.ModuleList()
|
120 |
-
self.res_skip_layers = torch.nn.ModuleList()
|
121 |
-
self.drop = nn.Dropout(p_dropout)
|
122 |
-
|
123 |
-
if gin_channels != 0:
|
124 |
-
cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1)
|
125 |
-
self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
|
126 |
-
|
127 |
-
for i in range(n_layers):
|
128 |
-
dilation = dilation_rate ** i
|
129 |
-
padding = int((kernel_size * dilation - dilation) / 2)
|
130 |
-
in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size,
|
131 |
-
dilation=dilation, padding=padding)
|
132 |
-
in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
|
133 |
-
self.in_layers.append(in_layer)
|
134 |
-
|
135 |
-
# last one is not necessary
|
136 |
-
if i < n_layers - 1:
|
137 |
-
res_skip_channels = 2 * hidden_channels
|
138 |
-
else:
|
139 |
-
res_skip_channels = hidden_channels
|
140 |
-
|
141 |
-
res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
|
142 |
-
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
|
143 |
-
self.res_skip_layers.append(res_skip_layer)
|
144 |
-
|
145 |
-
def forward(self, x, x_mask, g=None, **kwargs):
|
146 |
-
output = torch.zeros_like(x)
|
147 |
-
n_channels_tensor = torch.IntTensor([self.hidden_channels])
|
148 |
-
|
149 |
-
if g is not None:
|
150 |
-
g = self.cond_layer(g)
|
151 |
-
|
152 |
-
for i in range(self.n_layers):
|
153 |
-
x_in = self.in_layers[i](x)
|
154 |
-
if g is not None:
|
155 |
-
cond_offset = i * 2 * self.hidden_channels
|
156 |
-
g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:]
|
157 |
-
else:
|
158 |
-
g_l = torch.zeros_like(x_in)
|
159 |
-
|
160 |
-
acts = commons.fused_add_tanh_sigmoid_multiply(
|
161 |
-
x_in,
|
162 |
-
g_l,
|
163 |
-
n_channels_tensor)
|
164 |
-
acts = self.drop(acts)
|
165 |
-
|
166 |
-
res_skip_acts = self.res_skip_layers[i](acts)
|
167 |
-
if i < self.n_layers - 1:
|
168 |
-
res_acts = res_skip_acts[:,:self.hidden_channels,:]
|
169 |
-
x = (x + res_acts) * x_mask
|
170 |
-
output = output + res_skip_acts[:,self.hidden_channels:,:]
|
171 |
-
else:
|
172 |
-
output = output + res_skip_acts
|
173 |
-
return output * x_mask
|
174 |
-
|
175 |
-
def remove_weight_norm(self):
|
176 |
-
if self.gin_channels != 0:
|
177 |
-
torch.nn.utils.remove_weight_norm(self.cond_layer)
|
178 |
-
for l in self.in_layers:
|
179 |
-
torch.nn.utils.remove_weight_norm(l)
|
180 |
-
for l in self.res_skip_layers:
|
181 |
-
torch.nn.utils.remove_weight_norm(l)
|
182 |
-
|
183 |
-
|
184 |
-
class ResBlock1(torch.nn.Module):
|
185 |
-
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
|
186 |
-
super(ResBlock1, self).__init__()
|
187 |
-
self.convs1 = nn.ModuleList([
|
188 |
-
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
|
189 |
-
padding=get_padding(kernel_size, dilation[0]))),
|
190 |
-
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
|
191 |
-
padding=get_padding(kernel_size, dilation[1]))),
|
192 |
-
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
|
193 |
-
padding=get_padding(kernel_size, dilation[2])))
|
194 |
-
])
|
195 |
-
self.convs1.apply(init_weights)
|
196 |
-
|
197 |
-
self.convs2 = nn.ModuleList([
|
198 |
-
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
|
199 |
-
padding=get_padding(kernel_size, 1))),
|
200 |
-
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
|
201 |
-
padding=get_padding(kernel_size, 1))),
|
202 |
-
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
|
203 |
-
padding=get_padding(kernel_size, 1)))
|
204 |
-
])
|
205 |
-
self.convs2.apply(init_weights)
|
206 |
-
|
207 |
-
def forward(self, x, x_mask=None):
|
208 |
-
for c1, c2 in zip(self.convs1, self.convs2):
|
209 |
-
xt = F.leaky_relu(x, LRELU_SLOPE)
|
210 |
-
if x_mask is not None:
|
211 |
-
xt = xt * x_mask
|
212 |
-
xt = c1(xt)
|
213 |
-
xt = F.leaky_relu(xt, LRELU_SLOPE)
|
214 |
-
if x_mask is not None:
|
215 |
-
xt = xt * x_mask
|
216 |
-
xt = c2(xt)
|
217 |
-
x = xt + x
|
218 |
-
if x_mask is not None:
|
219 |
-
x = x * x_mask
|
220 |
-
return x
|
221 |
-
|
222 |
-
def remove_weight_norm(self):
|
223 |
-
for l in self.convs1:
|
224 |
-
remove_weight_norm(l)
|
225 |
-
for l in self.convs2:
|
226 |
-
remove_weight_norm(l)
|
227 |
-
|
228 |
-
|
229 |
-
class ResBlock2(torch.nn.Module):
|
230 |
-
def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
|
231 |
-
super(ResBlock2, self).__init__()
|
232 |
-
self.convs = nn.ModuleList([
|
233 |
-
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
|
234 |
-
padding=get_padding(kernel_size, dilation[0]))),
|
235 |
-
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
|
236 |
-
padding=get_padding(kernel_size, dilation[1])))
|
237 |
-
])
|
238 |
-
self.convs.apply(init_weights)
|
239 |
-
|
240 |
-
def forward(self, x, x_mask=None):
|
241 |
-
for c in self.convs:
|
242 |
-
xt = F.leaky_relu(x, LRELU_SLOPE)
|
243 |
-
if x_mask is not None:
|
244 |
-
xt = xt * x_mask
|
245 |
-
xt = c(xt)
|
246 |
-
x = xt + x
|
247 |
-
if x_mask is not None:
|
248 |
-
x = x * x_mask
|
249 |
-
return x
|
250 |
-
|
251 |
-
def remove_weight_norm(self):
|
252 |
-
for l in self.convs:
|
253 |
-
remove_weight_norm(l)
|
254 |
-
|
255 |
-
|
256 |
-
class Log(nn.Module):
|
257 |
-
def forward(self, x, x_mask, reverse=False, **kwargs):
|
258 |
-
if not reverse:
|
259 |
-
y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
|
260 |
-
logdet = torch.sum(-y, [1, 2])
|
261 |
-
return y, logdet
|
262 |
-
else:
|
263 |
-
x = torch.exp(x) * x_mask
|
264 |
-
return x
|
265 |
-
|
266 |
-
|
267 |
-
class Flip(nn.Module):
|
268 |
-
def forward(self, x, *args, reverse=False, **kwargs):
|
269 |
-
x = torch.flip(x, [1])
|
270 |
-
if not reverse:
|
271 |
-
logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
|
272 |
-
return x, logdet
|
273 |
-
else:
|
274 |
-
return x
|
275 |
-
|
276 |
-
|
277 |
-
class ElementwiseAffine(nn.Module):
|
278 |
-
def __init__(self, channels):
|
279 |
-
super().__init__()
|
280 |
-
self.channels = channels
|
281 |
-
self.m = nn.Parameter(torch.zeros(channels,1))
|
282 |
-
self.logs = nn.Parameter(torch.zeros(channels,1))
|
283 |
-
|
284 |
-
def forward(self, x, x_mask, reverse=False, **kwargs):
|
285 |
-
if not reverse:
|
286 |
-
y = self.m + torch.exp(self.logs) * x
|
287 |
-
y = y * x_mask
|
288 |
-
logdet = torch.sum(self.logs * x_mask, [1,2])
|
289 |
-
return y, logdet
|
290 |
-
else:
|
291 |
-
x = (x - self.m) * torch.exp(-self.logs) * x_mask
|
292 |
-
return x
|
293 |
-
|
294 |
-
|
295 |
-
class ResidualCouplingLayer(nn.Module):
|
296 |
-
def __init__(self,
|
297 |
-
channels,
|
298 |
-
hidden_channels,
|
299 |
-
kernel_size,
|
300 |
-
dilation_rate,
|
301 |
-
n_layers,
|
302 |
-
p_dropout=0,
|
303 |
-
gin_channels=0,
|
304 |
-
mean_only=False):
|
305 |
-
assert channels % 2 == 0, "channels should be divisible by 2"
|
306 |
-
super().__init__()
|
307 |
-
self.channels = channels
|
308 |
-
self.hidden_channels = hidden_channels
|
309 |
-
self.kernel_size = kernel_size
|
310 |
-
self.dilation_rate = dilation_rate
|
311 |
-
self.n_layers = n_layers
|
312 |
-
self.half_channels = channels // 2
|
313 |
-
self.mean_only = mean_only
|
314 |
-
|
315 |
-
self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
|
316 |
-
self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels)
|
317 |
-
self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
|
318 |
-
self.post.weight.data.zero_()
|
319 |
-
self.post.bias.data.zero_()
|
320 |
-
|
321 |
-
def forward(self, x, x_mask, g=None, reverse=False):
|
322 |
-
x0, x1 = torch.split(x, [self.half_channels]*2, 1)
|
323 |
-
h = self.pre(x0) * x_mask
|
324 |
-
h = self.enc(h, x_mask, g=g)
|
325 |
-
stats = self.post(h) * x_mask
|
326 |
-
if not self.mean_only:
|
327 |
-
m, logs = torch.split(stats, [self.half_channels]*2, 1)
|
328 |
-
else:
|
329 |
-
m = stats
|
330 |
-
logs = torch.zeros_like(m)
|
331 |
-
|
332 |
-
if not reverse:
|
333 |
-
x1 = m + x1 * torch.exp(logs) * x_mask
|
334 |
-
x = torch.cat([x0, x1], 1)
|
335 |
-
logdet = torch.sum(logs, [1,2])
|
336 |
-
return x, logdet
|
337 |
-
else:
|
338 |
-
x1 = (x1 - m) * torch.exp(-logs) * x_mask
|
339 |
-
x = torch.cat([x0, x1], 1)
|
340 |
-
return x
|
341 |
-
|
342 |
-
|
343 |
-
class ConvFlow(nn.Module):
|
344 |
-
def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0):
|
345 |
-
super().__init__()
|
346 |
-
self.in_channels = in_channels
|
347 |
-
self.filter_channels = filter_channels
|
348 |
-
self.kernel_size = kernel_size
|
349 |
-
self.n_layers = n_layers
|
350 |
-
self.num_bins = num_bins
|
351 |
-
self.tail_bound = tail_bound
|
352 |
-
self.half_channels = in_channels // 2
|
353 |
-
|
354 |
-
self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
|
355 |
-
self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.)
|
356 |
-
self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1)
|
357 |
-
self.proj.weight.data.zero_()
|
358 |
-
self.proj.bias.data.zero_()
|
359 |
-
|
360 |
-
def forward(self, x, x_mask, g=None, reverse=False):
|
361 |
-
x0, x1 = torch.split(x, [self.half_channels]*2, 1)
|
362 |
-
h = self.pre(x0)
|
363 |
-
h = self.convs(h, x_mask, g=g)
|
364 |
-
h = self.proj(h) * x_mask
|
365 |
-
|
366 |
-
b, c, t = x0.shape
|
367 |
-
h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
|
368 |
-
|
369 |
-
unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels)
|
370 |
-
unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels)
|
371 |
-
unnormalized_derivatives = h[..., 2 * self.num_bins:]
|
372 |
-
|
373 |
-
x1, logabsdet = piecewise_rational_quadratic_transform(x1,
|
374 |
-
unnormalized_widths,
|
375 |
-
unnormalized_heights,
|
376 |
-
unnormalized_derivatives,
|
377 |
-
inverse=reverse,
|
378 |
-
tails='linear',
|
379 |
-
tail_bound=self.tail_bound
|
380 |
-
)
|
381 |
-
|
382 |
-
x = torch.cat([x0, x1], 1) * x_mask
|
383 |
-
logdet = torch.sum(logabsdet * x_mask, [1,2])
|
384 |
-
if not reverse:
|
385 |
-
return x, logdet
|
386 |
-
else:
|
387 |
-
return x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/stylegan_human/training/augment.py
DELETED
@@ -1,562 +0,0 @@
|
|
1 |
-
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
2 |
-
#
|
3 |
-
# NVIDIA CORPORATION and its licensors retain all intellectual property
|
4 |
-
# and proprietary rights in and to this software, related documentation
|
5 |
-
# and any modifications thereto. Any use, reproduction, disclosure or
|
6 |
-
# distribution of this software and related documentation without an express
|
7 |
-
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
8 |
-
|
9 |
-
"""Augmentation pipeline from the paper
|
10 |
-
"Training Generative Adversarial Networks with Limited Data".
|
11 |
-
Matches the original implementation by Karras et al. at
|
12 |
-
https://github.com/NVlabs/stylegan2-ada/blob/main/training/augment.py"""
|
13 |
-
|
14 |
-
import numpy as np
|
15 |
-
import scipy.signal
|
16 |
-
import torch
|
17 |
-
from torch_utils import persistence
|
18 |
-
from torch_utils import misc
|
19 |
-
from torch_utils.ops import upfirdn2d
|
20 |
-
from torch_utils.ops import grid_sample_gradfix
|
21 |
-
from torch_utils.ops import conv2d_gradfix
|
22 |
-
|
23 |
-
# ----------------------------------------------------------------------------
|
24 |
-
# Coefficients of various wavelet decomposition low-pass filters.
|
25 |
-
|
26 |
-
wavelets = {
|
27 |
-
'haar': [0.7071067811865476, 0.7071067811865476],
|
28 |
-
'db1': [0.7071067811865476, 0.7071067811865476],
|
29 |
-
'db2': [-0.12940952255092145, 0.22414386804185735, 0.836516303737469, 0.48296291314469025],
|
30 |
-
'db3': [0.035226291882100656, -0.08544127388224149, -0.13501102001039084, 0.4598775021193313, 0.8068915093133388, 0.3326705529509569],
|
31 |
-
'db4': [-0.010597401784997278, 0.032883011666982945, 0.030841381835986965, -0.18703481171888114, -0.02798376941698385, 0.6308807679295904, 0.7148465705525415, 0.23037781330885523],
|
32 |
-
'db5': [0.003335725285001549, -0.012580751999015526, -0.006241490213011705, 0.07757149384006515, -0.03224486958502952, -0.24229488706619015, 0.13842814590110342, 0.7243085284385744, 0.6038292697974729, 0.160102397974125],
|
33 |
-
'db6': [-0.00107730108499558, 0.004777257511010651, 0.0005538422009938016, -0.031582039318031156, 0.02752286553001629, 0.09750160558707936, -0.12976686756709563, -0.22626469396516913, 0.3152503517092432, 0.7511339080215775, 0.4946238903983854, 0.11154074335008017],
|
34 |
-
'db7': [0.0003537138000010399, -0.0018016407039998328, 0.00042957797300470274, 0.012550998556013784, -0.01657454163101562, -0.03802993693503463, 0.0806126091510659, 0.07130921926705004, -0.22403618499416572, -0.14390600392910627, 0.4697822874053586, 0.7291320908465551, 0.39653931948230575, 0.07785205408506236],
|
35 |
-
'db8': [-0.00011747678400228192, 0.0006754494059985568, -0.0003917403729959771, -0.00487035299301066, 0.008746094047015655, 0.013981027917015516, -0.04408825393106472, -0.01736930100202211, 0.128747426620186, 0.00047248457399797254, -0.2840155429624281, -0.015829105256023893, 0.5853546836548691, 0.6756307362980128, 0.3128715909144659, 0.05441584224308161],
|
36 |
-
'sym2': [-0.12940952255092145, 0.22414386804185735, 0.836516303737469, 0.48296291314469025],
|
37 |
-
'sym3': [0.035226291882100656, -0.08544127388224149, -0.13501102001039084, 0.4598775021193313, 0.8068915093133388, 0.3326705529509569],
|
38 |
-
'sym4': [-0.07576571478927333, -0.02963552764599851, 0.49761866763201545, 0.8037387518059161, 0.29785779560527736, -0.09921954357684722, -0.012603967262037833, 0.0322231006040427],
|
39 |
-
'sym5': [0.027333068345077982, 0.029519490925774643, -0.039134249302383094, 0.1993975339773936, 0.7234076904024206, 0.6339789634582119, 0.01660210576452232, -0.17532808990845047, -0.021101834024758855, 0.019538882735286728],
|
40 |
-
'sym6': [0.015404109327027373, 0.0034907120842174702, -0.11799011114819057, -0.048311742585633, 0.4910559419267466, 0.787641141030194, 0.3379294217276218, -0.07263752278646252, -0.021060292512300564, 0.04472490177066578, 0.0017677118642428036, -0.007800708325034148],
|
41 |
-
'sym7': [0.002681814568257878, -0.0010473848886829163, -0.01263630340325193, 0.03051551316596357, 0.0678926935013727, -0.049552834937127255, 0.017441255086855827, 0.5361019170917628, 0.767764317003164, 0.2886296317515146, -0.14004724044296152, -0.10780823770381774, 0.004010244871533663, 0.010268176708511255],
|
42 |
-
'sym8': [-0.0033824159510061256, -0.0005421323317911481, 0.03169508781149298, 0.007607487324917605, -0.1432942383508097, -0.061273359067658524, 0.4813596512583722, 0.7771857517005235, 0.3644418948353314, -0.05194583810770904, -0.027219029917056003, 0.049137179673607506, 0.003808752013890615, -0.01495225833704823, -0.0003029205147213668, 0.0018899503327594609],
|
43 |
-
}
|
44 |
-
|
45 |
-
# ----------------------------------------------------------------------------
|
46 |
-
# Helpers for constructing transformation matrices.
|
47 |
-
|
48 |
-
|
49 |
-
def matrix(*rows, device=None):
|
50 |
-
assert all(len(row) == len(rows[0]) for row in rows)
|
51 |
-
elems = [x for row in rows for x in row]
|
52 |
-
ref = [x for x in elems if isinstance(x, torch.Tensor)]
|
53 |
-
if len(ref) == 0:
|
54 |
-
return misc.constant(np.asarray(rows), device=device)
|
55 |
-
assert device is None or device == ref[0].device
|
56 |
-
elems = [x if isinstance(x, torch.Tensor) else misc.constant(
|
57 |
-
x, shape=ref[0].shape, device=ref[0].device) for x in elems]
|
58 |
-
return torch.stack(elems, dim=-1).reshape(ref[0].shape + (len(rows), -1))
|
59 |
-
|
60 |
-
|
61 |
-
def translate2d(tx, ty, **kwargs):
|
62 |
-
return matrix(
|
63 |
-
[1, 0, tx],
|
64 |
-
[0, 1, ty],
|
65 |
-
[0, 0, 1],
|
66 |
-
**kwargs)
|
67 |
-
|
68 |
-
|
69 |
-
def translate3d(tx, ty, tz, **kwargs):
|
70 |
-
return matrix(
|
71 |
-
[1, 0, 0, tx],
|
72 |
-
[0, 1, 0, ty],
|
73 |
-
[0, 0, 1, tz],
|
74 |
-
[0, 0, 0, 1],
|
75 |
-
**kwargs)
|
76 |
-
|
77 |
-
|
78 |
-
def scale2d(sx, sy, **kwargs):
|
79 |
-
return matrix(
|
80 |
-
[sx, 0, 0],
|
81 |
-
[0, sy, 0],
|
82 |
-
[0, 0, 1],
|
83 |
-
**kwargs)
|
84 |
-
|
85 |
-
|
86 |
-
def scale3d(sx, sy, sz, **kwargs):
|
87 |
-
return matrix(
|
88 |
-
[sx, 0, 0, 0],
|
89 |
-
[0, sy, 0, 0],
|
90 |
-
[0, 0, sz, 0],
|
91 |
-
[0, 0, 0, 1],
|
92 |
-
**kwargs)
|
93 |
-
|
94 |
-
|
95 |
-
def rotate2d(theta, **kwargs):
|
96 |
-
return matrix(
|
97 |
-
[torch.cos(theta), torch.sin(-theta), 0],
|
98 |
-
[torch.sin(theta), torch.cos(theta), 0],
|
99 |
-
[0, 0, 1],
|
100 |
-
**kwargs)
|
101 |
-
|
102 |
-
|
103 |
-
def rotate3d(v, theta, **kwargs):
|
104 |
-
vx = v[..., 0]
|
105 |
-
vy = v[..., 1]
|
106 |
-
vz = v[..., 2]
|
107 |
-
s = torch.sin(theta)
|
108 |
-
c = torch.cos(theta)
|
109 |
-
cc = 1 - c
|
110 |
-
return matrix(
|
111 |
-
[vx*vx*cc+c, vx*vy*cc-vz*s, vx*vz*cc+vy*s, 0],
|
112 |
-
[vy*vx*cc+vz*s, vy*vy*cc+c, vy*vz*cc-vx*s, 0],
|
113 |
-
[vz*vx*cc-vy*s, vz*vy*cc+vx*s, vz*vz*cc+c, 0],
|
114 |
-
[0, 0, 0, 1],
|
115 |
-
**kwargs)
|
116 |
-
|
117 |
-
|
118 |
-
def translate2d_inv(tx, ty, **kwargs):
|
119 |
-
return translate2d(-tx, -ty, **kwargs)
|
120 |
-
|
121 |
-
|
122 |
-
def scale2d_inv(sx, sy, **kwargs):
|
123 |
-
return scale2d(1 / sx, 1 / sy, **kwargs)
|
124 |
-
|
125 |
-
|
126 |
-
def rotate2d_inv(theta, **kwargs):
|
127 |
-
return rotate2d(-theta, **kwargs)
|
128 |
-
|
129 |
-
# ----------------------------------------------------------------------------
|
130 |
-
# Versatile image augmentation pipeline from the paper
|
131 |
-
# "Training Generative Adversarial Networks with Limited Data".
|
132 |
-
#
|
133 |
-
# All augmentations are disabled by default; individual augmentations can
|
134 |
-
# be enabled by setting their probability multipliers to 1.
|
135 |
-
|
136 |
-
|
137 |
-
@persistence.persistent_class
|
138 |
-
class AugmentPipe(torch.nn.Module):
|
139 |
-
def __init__(self,
|
140 |
-
xflip=0, rotate90=0, xint=0, xint_max=0.125,
|
141 |
-
scale=0, rotate=0, aniso=0, xfrac=0, scale_std=0.2, rotate_max=1, aniso_std=0.2, xfrac_std=0.125,
|
142 |
-
brightness=0, contrast=0, lumaflip=0, hue=0, saturation=0, brightness_std=0.2, contrast_std=0.5, hue_max=1, saturation_std=1,
|
143 |
-
imgfilter=0, imgfilter_bands=[1, 1, 1, 1], imgfilter_std=1,
|
144 |
-
noise=0, cutout=0, noise_std=0.1, cutout_size=0.5,
|
145 |
-
):
|
146 |
-
super().__init__()
|
147 |
-
# Overall multiplier for augmentation probability.
|
148 |
-
self.register_buffer('p', torch.ones([]))
|
149 |
-
|
150 |
-
# Pixel blitting.
|
151 |
-
# Probability multiplier for x-flip.
|
152 |
-
self.xflip = float(xflip)
|
153 |
-
# Probability multiplier for 90 degree rotations.
|
154 |
-
self.rotate90 = float(rotate90)
|
155 |
-
# Probability multiplier for integer translation.
|
156 |
-
self.xint = float(xint)
|
157 |
-
# Range of integer translation, relative to image dimensions.
|
158 |
-
self.xint_max = float(xint_max)
|
159 |
-
|
160 |
-
# General geometric transformations.
|
161 |
-
# Probability multiplier for isotropic scaling.
|
162 |
-
self.scale = float(scale)
|
163 |
-
# Probability multiplier for arbitrary rotation.
|
164 |
-
self.rotate = float(rotate)
|
165 |
-
# Probability multiplier for anisotropic scaling.
|
166 |
-
self.aniso = float(aniso)
|
167 |
-
# Probability multiplier for fractional translation.
|
168 |
-
self.xfrac = float(xfrac)
|
169 |
-
# Log2 standard deviation of isotropic scaling.
|
170 |
-
self.scale_std = float(scale_std)
|
171 |
-
# Range of arbitrary rotation, 1 = full circle.
|
172 |
-
self.rotate_max = float(rotate_max)
|
173 |
-
# Log2 standard deviation of anisotropic scaling.
|
174 |
-
self.aniso_std = float(aniso_std)
|
175 |
-
# Standard deviation of frational translation, relative to image dimensions.
|
176 |
-
self.xfrac_std = float(xfrac_std)
|
177 |
-
|
178 |
-
# Color transformations.
|
179 |
-
# Probability multiplier for brightness.
|
180 |
-
self.brightness = float(brightness)
|
181 |
-
# Probability multiplier for contrast.
|
182 |
-
self.contrast = float(contrast)
|
183 |
-
# Probability multiplier for luma flip.
|
184 |
-
self.lumaflip = float(lumaflip)
|
185 |
-
# Probability multiplier for hue rotation.
|
186 |
-
self.hue = float(hue)
|
187 |
-
# Probability multiplier for saturation.
|
188 |
-
self.saturation = float(saturation)
|
189 |
-
# Standard deviation of brightness.
|
190 |
-
self.brightness_std = float(brightness_std)
|
191 |
-
# Log2 standard deviation of contrast.
|
192 |
-
self.contrast_std = float(contrast_std)
|
193 |
-
# Range of hue rotation, 1 = full circle.
|
194 |
-
self.hue_max = float(hue_max)
|
195 |
-
# Log2 standard deviation of saturation.
|
196 |
-
self.saturation_std = float(saturation_std)
|
197 |
-
|
198 |
-
# Image-space filtering.
|
199 |
-
# Probability multiplier for image-space filtering.
|
200 |
-
self.imgfilter = float(imgfilter)
|
201 |
-
# Probability multipliers for individual frequency bands.
|
202 |
-
self.imgfilter_bands = list(imgfilter_bands)
|
203 |
-
# Log2 standard deviation of image-space filter amplification.
|
204 |
-
self.imgfilter_std = float(imgfilter_std)
|
205 |
-
|
206 |
-
# Image-space corruptions.
|
207 |
-
# Probability multiplier for additive RGB noise.
|
208 |
-
self.noise = float(noise)
|
209 |
-
# Probability multiplier for cutout.
|
210 |
-
self.cutout = float(cutout)
|
211 |
-
# Standard deviation of additive RGB noise.
|
212 |
-
self.noise_std = float(noise_std)
|
213 |
-
# Size of the cutout rectangle, relative to image dimensions.
|
214 |
-
self.cutout_size = float(cutout_size)
|
215 |
-
|
216 |
-
# Setup orthogonal lowpass filter for geometric augmentations.
|
217 |
-
self.register_buffer(
|
218 |
-
'Hz_geom', upfirdn2d.setup_filter(wavelets['sym6']))
|
219 |
-
|
220 |
-
# Construct filter bank for image-space filtering.
|
221 |
-
Hz_lo = np.asarray(wavelets['sym2']) # H(z)
|
222 |
-
Hz_hi = Hz_lo * ((-1) ** np.arange(Hz_lo.size)) # H(-z)
|
223 |
-
Hz_lo2 = np.convolve(Hz_lo, Hz_lo[::-1]) / 2 # H(z) * H(z^-1) / 2
|
224 |
-
Hz_hi2 = np.convolve(Hz_hi, Hz_hi[::-1]) / 2 # H(-z) * H(-z^-1) / 2
|
225 |
-
Hz_fbank = np.eye(4, 1) # Bandpass(H(z), b_i)
|
226 |
-
for i in range(1, Hz_fbank.shape[0]):
|
227 |
-
Hz_fbank = np.dstack([Hz_fbank, np.zeros_like(Hz_fbank)]).reshape(
|
228 |
-
Hz_fbank.shape[0], -1)[:, :-1]
|
229 |
-
Hz_fbank = scipy.signal.convolve(Hz_fbank, [Hz_lo2])
|
230 |
-
Hz_fbank[i, (Hz_fbank.shape[1] - Hz_hi2.size) //
|
231 |
-
2: (Hz_fbank.shape[1] + Hz_hi2.size) // 2] += Hz_hi2
|
232 |
-
self.register_buffer('Hz_fbank', torch.as_tensor(
|
233 |
-
Hz_fbank, dtype=torch.float32))
|
234 |
-
|
235 |
-
def forward(self, images, debug_percentile=None):
|
236 |
-
assert isinstance(images, torch.Tensor) and images.ndim == 4
|
237 |
-
batch_size, num_channels, height, width = images.shape
|
238 |
-
device = images.device
|
239 |
-
if debug_percentile is not None:
|
240 |
-
debug_percentile = torch.as_tensor(
|
241 |
-
debug_percentile, dtype=torch.float32, device=device)
|
242 |
-
|
243 |
-
# -------------------------------------
|
244 |
-
# Select parameters for pixel blitting.
|
245 |
-
# -------------------------------------
|
246 |
-
|
247 |
-
# Initialize inverse homogeneous 2D transform: G_inv @ pixel_out ==> pixel_in
|
248 |
-
I_3 = torch.eye(3, device=device)
|
249 |
-
G_inv = I_3
|
250 |
-
|
251 |
-
# Apply x-flip with probability (xflip * strength).
|
252 |
-
if self.xflip > 0:
|
253 |
-
i = torch.floor(torch.rand([batch_size], device=device) * 2)
|
254 |
-
i = torch.where(torch.rand(
|
255 |
-
[batch_size], device=device) < self.xflip * self.p, i, torch.zeros_like(i))
|
256 |
-
if debug_percentile is not None:
|
257 |
-
i = torch.full_like(i, torch.floor(debug_percentile * 2))
|
258 |
-
G_inv = G_inv @ scale2d_inv(1 - 2 * i, 1)
|
259 |
-
|
260 |
-
# Apply 90 degree rotations with probability (rotate90 * strength).
|
261 |
-
if self.rotate90 > 0:
|
262 |
-
i = torch.floor(torch.rand([batch_size], device=device) * 4)
|
263 |
-
i = torch.where(torch.rand(
|
264 |
-
[batch_size], device=device) < self.rotate90 * self.p, i, torch.zeros_like(i))
|
265 |
-
if debug_percentile is not None:
|
266 |
-
i = torch.full_like(i, torch.floor(debug_percentile * 4))
|
267 |
-
G_inv = G_inv @ rotate2d_inv(-np.pi / 2 * i)
|
268 |
-
|
269 |
-
# Apply integer translation with probability (xint * strength).
|
270 |
-
if self.xint > 0:
|
271 |
-
t = (torch.rand([batch_size, 2], device=device)
|
272 |
-
* 2 - 1) * self.xint_max
|
273 |
-
t = torch.where(torch.rand(
|
274 |
-
[batch_size, 1], device=device) < self.xint * self.p, t, torch.zeros_like(t))
|
275 |
-
if debug_percentile is not None:
|
276 |
-
t = torch.full_like(
|
277 |
-
t, (debug_percentile * 2 - 1) * self.xint_max)
|
278 |
-
G_inv = G_inv @ translate2d_inv(torch.round(
|
279 |
-
t[:, 0] * width), torch.round(t[:, 1] * height))
|
280 |
-
|
281 |
-
# --------------------------------------------------------
|
282 |
-
# Select parameters for general geometric transformations.
|
283 |
-
# --------------------------------------------------------
|
284 |
-
|
285 |
-
# Apply isotropic scaling with probability (scale * strength).
|
286 |
-
if self.scale > 0:
|
287 |
-
s = torch.exp2(torch.randn(
|
288 |
-
[batch_size], device=device) * self.scale_std)
|
289 |
-
s = torch.where(torch.rand(
|
290 |
-
[batch_size], device=device) < self.scale * self.p, s, torch.ones_like(s))
|
291 |
-
if debug_percentile is not None:
|
292 |
-
s = torch.full_like(s, torch.exp2(torch.erfinv(
|
293 |
-
debug_percentile * 2 - 1) * self.scale_std))
|
294 |
-
G_inv = G_inv @ scale2d_inv(s, s)
|
295 |
-
|
296 |
-
# Apply pre-rotation with probability p_rot.
|
297 |
-
# P(pre OR post) = p
|
298 |
-
p_rot = 1 - torch.sqrt((1 - self.rotate * self.p).clamp(0, 1))
|
299 |
-
if self.rotate > 0:
|
300 |
-
theta = (torch.rand([batch_size], device=device)
|
301 |
-
* 2 - 1) * np.pi * self.rotate_max
|
302 |
-
theta = torch.where(torch.rand(
|
303 |
-
[batch_size], device=device) < p_rot, theta, torch.zeros_like(theta))
|
304 |
-
if debug_percentile is not None:
|
305 |
-
theta = torch.full_like(
|
306 |
-
theta, (debug_percentile * 2 - 1) * np.pi * self.rotate_max)
|
307 |
-
G_inv = G_inv @ rotate2d_inv(-theta) # Before anisotropic scaling.
|
308 |
-
|
309 |
-
# Apply anisotropic scaling with probability (aniso * strength).
|
310 |
-
if self.aniso > 0:
|
311 |
-
s = torch.exp2(torch.randn(
|
312 |
-
[batch_size], device=device) * self.aniso_std)
|
313 |
-
s = torch.where(torch.rand(
|
314 |
-
[batch_size], device=device) < self.aniso * self.p, s, torch.ones_like(s))
|
315 |
-
if debug_percentile is not None:
|
316 |
-
s = torch.full_like(s, torch.exp2(torch.erfinv(
|
317 |
-
debug_percentile * 2 - 1) * self.aniso_std))
|
318 |
-
G_inv = G_inv @ scale2d_inv(s, 1 / s)
|
319 |
-
|
320 |
-
# Apply post-rotation with probability p_rot.
|
321 |
-
if self.rotate > 0:
|
322 |
-
theta = (torch.rand([batch_size], device=device)
|
323 |
-
* 2 - 1) * np.pi * self.rotate_max
|
324 |
-
theta = torch.where(torch.rand(
|
325 |
-
[batch_size], device=device) < p_rot, theta, torch.zeros_like(theta))
|
326 |
-
if debug_percentile is not None:
|
327 |
-
theta = torch.zeros_like(theta)
|
328 |
-
G_inv = G_inv @ rotate2d_inv(-theta) # After anisotropic scaling.
|
329 |
-
|
330 |
-
# Apply fractional translation with probability (xfrac * strength).
|
331 |
-
if self.xfrac > 0:
|
332 |
-
t = torch.randn([batch_size, 2], device=device) * self.xfrac_std
|
333 |
-
t = torch.where(torch.rand(
|
334 |
-
[batch_size, 1], device=device) < self.xfrac * self.p, t, torch.zeros_like(t))
|
335 |
-
if debug_percentile is not None:
|
336 |
-
t = torch.full_like(t, torch.erfinv(
|
337 |
-
debug_percentile * 2 - 1) * self.xfrac_std)
|
338 |
-
G_inv = G_inv @ translate2d_inv(t[:, 0] * width, t[:, 1] * height)
|
339 |
-
|
340 |
-
# ----------------------------------
|
341 |
-
# Execute geometric transformations.
|
342 |
-
# ----------------------------------
|
343 |
-
|
344 |
-
# Execute if the transform is not identity.
|
345 |
-
if G_inv is not I_3:
|
346 |
-
|
347 |
-
# Calculate padding.
|
348 |
-
cx = (width - 1) / 2
|
349 |
-
cy = (height - 1) / 2
|
350 |
-
cp = matrix([-cx, -cy, 1], [cx, -cy, 1], [cx, cy, 1],
|
351 |
-
[-cx, cy, 1], device=device) # [idx, xyz]
|
352 |
-
cp = G_inv @ cp.t() # [batch, xyz, idx]
|
353 |
-
Hz_pad = self.Hz_geom.shape[0] // 4
|
354 |
-
margin = cp[:, :2, :].permute(
|
355 |
-
1, 0, 2).flatten(1) # [xy, batch * idx]
|
356 |
-
# [x0, y0, x1, y1]
|
357 |
-
margin = torch.cat([-margin, margin]).max(dim=1).values
|
358 |
-
margin = margin + \
|
359 |
-
misc.constant([Hz_pad * 2 - cx, Hz_pad * 2 - cy]
|
360 |
-
* 2, device=device)
|
361 |
-
margin = margin.max(misc.constant([0, 0] * 2, device=device))
|
362 |
-
margin = margin.min(misc.constant(
|
363 |
-
[width-1, height-1] * 2, device=device))
|
364 |
-
mx0, my0, mx1, my1 = margin.ceil().to(torch.int32)
|
365 |
-
|
366 |
-
# Pad image and adjust origin.
|
367 |
-
images = torch.nn.functional.pad(
|
368 |
-
input=images, pad=[mx0, mx1, my0, my1], mode='reflect')
|
369 |
-
G_inv = translate2d((mx0 - mx1) / 2, (my0 - my1) / 2) @ G_inv
|
370 |
-
|
371 |
-
# Upsample.
|
372 |
-
images = upfirdn2d.upsample2d(x=images, f=self.Hz_geom, up=2)
|
373 |
-
G_inv = scale2d(
|
374 |
-
2, 2, device=device) @ G_inv @ scale2d_inv(2, 2, device=device)
|
375 |
-
G_inv = translate2d(-0.5, -0.5,
|
376 |
-
device=device) @ G_inv @ translate2d_inv(-0.5, -0.5, device=device)
|
377 |
-
|
378 |
-
# Execute transformation.
|
379 |
-
shape = [batch_size, num_channels,
|
380 |
-
(height + Hz_pad * 2) * 2, (width + Hz_pad * 2) * 2]
|
381 |
-
G_inv = scale2d(2 / images.shape[3], 2 / images.shape[2], device=device) @ G_inv @ scale2d_inv(
|
382 |
-
2 / shape[3], 2 / shape[2], device=device)
|
383 |
-
grid = torch.nn.functional.affine_grid(
|
384 |
-
theta=G_inv[:, :2, :], size=shape, align_corners=False)
|
385 |
-
images = grid_sample_gradfix.grid_sample(images, grid)
|
386 |
-
|
387 |
-
# Downsample and crop.
|
388 |
-
images = upfirdn2d.downsample2d(
|
389 |
-
x=images, f=self.Hz_geom, down=2, padding=-Hz_pad*2, flip_filter=True)
|
390 |
-
|
391 |
-
# --------------------------------------------
|
392 |
-
# Select parameters for color transformations.
|
393 |
-
# --------------------------------------------
|
394 |
-
|
395 |
-
# Initialize homogeneous 3D transformation matrix: C @ color_in ==> color_out
|
396 |
-
I_4 = torch.eye(4, device=device)
|
397 |
-
C = I_4
|
398 |
-
|
399 |
-
# Apply brightness with probability (brightness * strength).
|
400 |
-
if self.brightness > 0:
|
401 |
-
b = torch.randn([batch_size], device=device) * self.brightness_std
|
402 |
-
b = torch.where(torch.rand(
|
403 |
-
[batch_size], device=device) < self.brightness * self.p, b, torch.zeros_like(b))
|
404 |
-
if debug_percentile is not None:
|
405 |
-
b = torch.full_like(b, torch.erfinv(
|
406 |
-
debug_percentile * 2 - 1) * self.brightness_std)
|
407 |
-
C = translate3d(b, b, b) @ C
|
408 |
-
|
409 |
-
# Apply contrast with probability (contrast * strength).
|
410 |
-
if self.contrast > 0:
|
411 |
-
c = torch.exp2(torch.randn(
|
412 |
-
[batch_size], device=device) * self.contrast_std)
|
413 |
-
c = torch.where(torch.rand(
|
414 |
-
[batch_size], device=device) < self.contrast * self.p, c, torch.ones_like(c))
|
415 |
-
if debug_percentile is not None:
|
416 |
-
c = torch.full_like(c, torch.exp2(torch.erfinv(
|
417 |
-
debug_percentile * 2 - 1) * self.contrast_std))
|
418 |
-
C = scale3d(c, c, c) @ C
|
419 |
-
|
420 |
-
# Apply luma flip with probability (lumaflip * strength).
|
421 |
-
# Luma axis.
|
422 |
-
v = misc.constant(np.asarray([1, 1, 1, 0]) / np.sqrt(3), device=device)
|
423 |
-
if self.lumaflip > 0:
|
424 |
-
i = torch.floor(torch.rand([batch_size, 1, 1], device=device) * 2)
|
425 |
-
i = torch.where(torch.rand(
|
426 |
-
[batch_size, 1, 1], device=device) < self.lumaflip * self.p, i, torch.zeros_like(i))
|
427 |
-
if debug_percentile is not None:
|
428 |
-
i = torch.full_like(i, torch.floor(debug_percentile * 2))
|
429 |
-
C = (I_4 - 2 * v.ger(v) * i) @ C # Householder reflection.
|
430 |
-
|
431 |
-
# Apply hue rotation with probability (hue * strength).
|
432 |
-
if self.hue > 0 and num_channels > 1:
|
433 |
-
theta = (torch.rand([batch_size], device=device)
|
434 |
-
* 2 - 1) * np.pi * self.hue_max
|
435 |
-
theta = torch.where(torch.rand(
|
436 |
-
[batch_size], device=device) < self.hue * self.p, theta, torch.zeros_like(theta))
|
437 |
-
if debug_percentile is not None:
|
438 |
-
theta = torch.full_like(
|
439 |
-
theta, (debug_percentile * 2 - 1) * np.pi * self.hue_max)
|
440 |
-
C = rotate3d(v, theta) @ C # Rotate around v.
|
441 |
-
|
442 |
-
# Apply saturation with probability (saturation * strength).
|
443 |
-
if self.saturation > 0 and num_channels > 1:
|
444 |
-
s = torch.exp2(torch.randn(
|
445 |
-
[batch_size, 1, 1], device=device) * self.saturation_std)
|
446 |
-
s = torch.where(torch.rand(
|
447 |
-
[batch_size, 1, 1], device=device) < self.saturation * self.p, s, torch.ones_like(s))
|
448 |
-
if debug_percentile is not None:
|
449 |
-
s = torch.full_like(s, torch.exp2(torch.erfinv(
|
450 |
-
debug_percentile * 2 - 1) * self.saturation_std))
|
451 |
-
C = (v.ger(v) + (I_4 - v.ger(v)) * s) @ C
|
452 |
-
|
453 |
-
# ------------------------------
|
454 |
-
# Execute color transformations.
|
455 |
-
# ------------------------------
|
456 |
-
|
457 |
-
# Execute if the transform is not identity.
|
458 |
-
if C is not I_4:
|
459 |
-
images = images.reshape([batch_size, num_channels, height * width])
|
460 |
-
if num_channels == 3:
|
461 |
-
images = C[:, :3, :3] @ images + C[:, :3, 3:]
|
462 |
-
elif num_channels == 1:
|
463 |
-
C = C[:, :3, :].mean(dim=1, keepdims=True)
|
464 |
-
images = images * \
|
465 |
-
C[:, :, :3].sum(dim=2, keepdims=True) + C[:, :, 3:]
|
466 |
-
else:
|
467 |
-
raise ValueError(
|
468 |
-
'Image must be RGB (3 channels) or L (1 channel)')
|
469 |
-
images = images.reshape([batch_size, num_channels, height, width])
|
470 |
-
|
471 |
-
# ----------------------
|
472 |
-
# Image-space filtering.
|
473 |
-
# ----------------------
|
474 |
-
|
475 |
-
if self.imgfilter > 0:
|
476 |
-
num_bands = self.Hz_fbank.shape[0]
|
477 |
-
assert len(self.imgfilter_bands) == num_bands
|
478 |
-
# Expected power spectrum (1/f).
|
479 |
-
expected_power = misc.constant(
|
480 |
-
np.array([10, 1, 1, 1]) / 13, device=device)
|
481 |
-
|
482 |
-
# Apply amplification for each band with probability (imgfilter * strength * band_strength).
|
483 |
-
# Global gain vector (identity).
|
484 |
-
g = torch.ones([batch_size, num_bands], device=device)
|
485 |
-
for i, band_strength in enumerate(self.imgfilter_bands):
|
486 |
-
t_i = torch.exp2(torch.randn(
|
487 |
-
[batch_size], device=device) * self.imgfilter_std)
|
488 |
-
t_i = torch.where(torch.rand(
|
489 |
-
[batch_size], device=device) < self.imgfilter * self.p * band_strength, t_i, torch.ones_like(t_i))
|
490 |
-
if debug_percentile is not None:
|
491 |
-
t_i = torch.full_like(t_i, torch.exp2(torch.erfinv(
|
492 |
-
debug_percentile * 2 - 1) * self.imgfilter_std)) if band_strength > 0 else torch.ones_like(t_i)
|
493 |
-
# Temporary gain vector.
|
494 |
-
t = torch.ones([batch_size, num_bands], device=device)
|
495 |
-
# Replace i'th element.
|
496 |
-
t[:, i] = t_i
|
497 |
-
# Normalize power.
|
498 |
-
t = t / (expected_power * t.square()
|
499 |
-
).sum(dim=-1, keepdims=True).sqrt()
|
500 |
-
# Accumulate into global gain.
|
501 |
-
g = g * t
|
502 |
-
|
503 |
-
# Construct combined amplification filter.
|
504 |
-
# [batch, tap]
|
505 |
-
Hz_prime = g @ self.Hz_fbank
|
506 |
-
Hz_prime = Hz_prime.unsqueeze(1).repeat(
|
507 |
-
[1, num_channels, 1]) # [batch, channels, tap]
|
508 |
-
# [batch * channels, 1, tap]
|
509 |
-
Hz_prime = Hz_prime.reshape([batch_size * num_channels, 1, -1])
|
510 |
-
|
511 |
-
# Apply filter.
|
512 |
-
p = self.Hz_fbank.shape[1] // 2
|
513 |
-
images = images.reshape(
|
514 |
-
[1, batch_size * num_channels, height, width])
|
515 |
-
images = torch.nn.functional.pad(
|
516 |
-
input=images, pad=[p, p, p, p], mode='reflect')
|
517 |
-
images = conv2d_gradfix.conv2d(
|
518 |
-
input=images, weight=Hz_prime.unsqueeze(2), groups=batch_size*num_channels)
|
519 |
-
images = conv2d_gradfix.conv2d(
|
520 |
-
input=images, weight=Hz_prime.unsqueeze(3), groups=batch_size*num_channels)
|
521 |
-
images = images.reshape([batch_size, num_channels, height, width])
|
522 |
-
|
523 |
-
# ------------------------
|
524 |
-
# Image-space corruptions.
|
525 |
-
# ------------------------
|
526 |
-
|
527 |
-
# Apply additive RGB noise with probability (noise * strength).
|
528 |
-
if self.noise > 0:
|
529 |
-
sigma = torch.randn([batch_size, 1, 1, 1],
|
530 |
-
device=device).abs() * self.noise_std
|
531 |
-
sigma = torch.where(torch.rand(
|
532 |
-
[batch_size, 1, 1, 1], device=device) < self.noise * self.p, sigma, torch.zeros_like(sigma))
|
533 |
-
if debug_percentile is not None:
|
534 |
-
sigma = torch.full_like(sigma, torch.erfinv(
|
535 |
-
debug_percentile) * self.noise_std)
|
536 |
-
images = images + \
|
537 |
-
torch.randn([batch_size, num_channels, height,
|
538 |
-
width], device=device) * sigma
|
539 |
-
|
540 |
-
# Apply cutout with probability (cutout * strength).
|
541 |
-
if self.cutout > 0:
|
542 |
-
size = torch.full([batch_size, 2, 1, 1, 1],
|
543 |
-
self.cutout_size, device=device)
|
544 |
-
size = torch.where(torch.rand(
|
545 |
-
[batch_size, 1, 1, 1, 1], device=device) < self.cutout * self.p, size, torch.zeros_like(size))
|
546 |
-
center = torch.rand([batch_size, 2, 1, 1, 1], device=device)
|
547 |
-
if debug_percentile is not None:
|
548 |
-
size = torch.full_like(size, self.cutout_size)
|
549 |
-
center = torch.full_like(center, debug_percentile)
|
550 |
-
coord_x = torch.arange(width, device=device).reshape([1, 1, 1, -1])
|
551 |
-
coord_y = torch.arange(
|
552 |
-
height, device=device).reshape([1, 1, -1, 1])
|
553 |
-
mask_x = (((coord_x + 0.5) / width -
|
554 |
-
center[:, 0]).abs() >= size[:, 0] / 2)
|
555 |
-
mask_y = (((coord_y + 0.5) / height -
|
556 |
-
center[:, 1]).abs() >= size[:, 1] / 2)
|
557 |
-
mask = torch.logical_or(mask_x, mask_y).to(torch.float32)
|
558 |
-
images = images * mask
|
559 |
-
|
560 |
-
return images
|
561 |
-
|
562 |
-
# ----------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/An-619/FastSAM/utils/tools.py
DELETED
@@ -1,442 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
from PIL import Image
|
3 |
-
import matplotlib.pyplot as plt
|
4 |
-
import cv2
|
5 |
-
import torch
|
6 |
-
import os
|
7 |
-
import sys
|
8 |
-
import clip
|
9 |
-
|
10 |
-
|
11 |
-
def convert_box_xywh_to_xyxy(box):
|
12 |
-
if len(box) == 4:
|
13 |
-
return [box[0], box[1], box[0] + box[2], box[1] + box[3]]
|
14 |
-
else:
|
15 |
-
result = []
|
16 |
-
for b in box:
|
17 |
-
b = convert_box_xywh_to_xyxy(b)
|
18 |
-
result.append(b)
|
19 |
-
return result
|
20 |
-
|
21 |
-
|
22 |
-
def segment_image(image, bbox):
|
23 |
-
image_array = np.array(image)
|
24 |
-
segmented_image_array = np.zeros_like(image_array)
|
25 |
-
x1, y1, x2, y2 = bbox
|
26 |
-
segmented_image_array[y1:y2, x1:x2] = image_array[y1:y2, x1:x2]
|
27 |
-
segmented_image = Image.fromarray(segmented_image_array)
|
28 |
-
black_image = Image.new("RGB", image.size, (255, 255, 255))
|
29 |
-
# transparency_mask = np.zeros_like((), dtype=np.uint8)
|
30 |
-
transparency_mask = np.zeros(
|
31 |
-
(image_array.shape[0], image_array.shape[1]), dtype=np.uint8
|
32 |
-
)
|
33 |
-
transparency_mask[y1:y2, x1:x2] = 255
|
34 |
-
transparency_mask_image = Image.fromarray(transparency_mask, mode="L")
|
35 |
-
black_image.paste(segmented_image, mask=transparency_mask_image)
|
36 |
-
return black_image
|
37 |
-
|
38 |
-
|
39 |
-
def format_results(result, filter=0):
|
40 |
-
annotations = []
|
41 |
-
n = len(result.masks.data)
|
42 |
-
for i in range(n):
|
43 |
-
annotation = {}
|
44 |
-
mask = result.masks.data[i] == 1.0
|
45 |
-
|
46 |
-
if torch.sum(mask) < filter:
|
47 |
-
continue
|
48 |
-
annotation["id"] = i
|
49 |
-
annotation["segmentation"] = mask.cpu().numpy()
|
50 |
-
annotation["bbox"] = result.boxes.data[i]
|
51 |
-
annotation["score"] = result.boxes.conf[i]
|
52 |
-
annotation["area"] = annotation["segmentation"].sum()
|
53 |
-
annotations.append(annotation)
|
54 |
-
return annotations
|
55 |
-
|
56 |
-
|
57 |
-
def filter_masks(annotations): # filter the overlap mask
|
58 |
-
annotations.sort(key=lambda x: x["area"], reverse=True)
|
59 |
-
to_remove = set()
|
60 |
-
for i in range(0, len(annotations)):
|
61 |
-
a = annotations[i]
|
62 |
-
for j in range(i + 1, len(annotations)):
|
63 |
-
b = annotations[j]
|
64 |
-
if i != j and j not in to_remove:
|
65 |
-
# check if
|
66 |
-
if b["area"] < a["area"]:
|
67 |
-
if (a["segmentation"] & b["segmentation"]).sum() / b[
|
68 |
-
"segmentation"
|
69 |
-
].sum() > 0.8:
|
70 |
-
to_remove.add(j)
|
71 |
-
|
72 |
-
return [a for i, a in enumerate(annotations) if i not in to_remove], to_remove
|
73 |
-
|
74 |
-
|
75 |
-
def get_bbox_from_mask(mask):
|
76 |
-
mask = mask.astype(np.uint8)
|
77 |
-
contours, hierarchy = cv2.findContours(
|
78 |
-
mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
|
79 |
-
)
|
80 |
-
x1, y1, w, h = cv2.boundingRect(contours[0])
|
81 |
-
x2, y2 = x1 + w, y1 + h
|
82 |
-
if len(contours) > 1:
|
83 |
-
for b in contours:
|
84 |
-
x_t, y_t, w_t, h_t = cv2.boundingRect(b)
|
85 |
-
# 将多个bbox合并成一个
|
86 |
-
x1 = min(x1, x_t)
|
87 |
-
y1 = min(y1, y_t)
|
88 |
-
x2 = max(x2, x_t + w_t)
|
89 |
-
y2 = max(y2, y_t + h_t)
|
90 |
-
h = y2 - y1
|
91 |
-
w = x2 - x1
|
92 |
-
return [x1, y1, x2, y2]
|
93 |
-
|
94 |
-
|
95 |
-
def fast_process(
|
96 |
-
annotations, args, mask_random_color, bbox=None, points=None, edges=False
|
97 |
-
):
|
98 |
-
if isinstance(annotations[0], dict):
|
99 |
-
annotations = [annotation["segmentation"] for annotation in annotations]
|
100 |
-
result_name = os.path.basename(args.img_path)
|
101 |
-
image = cv2.imread(args.img_path)
|
102 |
-
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
103 |
-
original_h = image.shape[0]
|
104 |
-
original_w = image.shape[1]
|
105 |
-
if sys.platform == "darwin":
|
106 |
-
plt.switch_backend("TkAgg")
|
107 |
-
plt.figure(figsize=(original_w/100, original_h/100))
|
108 |
-
# Add subplot with no margin.
|
109 |
-
plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)
|
110 |
-
plt.margins(0, 0)
|
111 |
-
plt.gca().xaxis.set_major_locator(plt.NullLocator())
|
112 |
-
plt.gca().yaxis.set_major_locator(plt.NullLocator())
|
113 |
-
plt.imshow(image)
|
114 |
-
if args.better_quality == True:
|
115 |
-
if isinstance(annotations[0], torch.Tensor):
|
116 |
-
annotations = np.array(annotations.cpu())
|
117 |
-
for i, mask in enumerate(annotations):
|
118 |
-
mask = cv2.morphologyEx(
|
119 |
-
mask.astype(np.uint8), cv2.MORPH_CLOSE, np.ones((3, 3), np.uint8)
|
120 |
-
)
|
121 |
-
annotations[i] = cv2.morphologyEx(
|
122 |
-
mask.astype(np.uint8), cv2.MORPH_OPEN, np.ones((8, 8), np.uint8)
|
123 |
-
)
|
124 |
-
if args.device == "cpu":
|
125 |
-
annotations = np.array(annotations)
|
126 |
-
fast_show_mask(
|
127 |
-
annotations,
|
128 |
-
plt.gca(),
|
129 |
-
random_color=mask_random_color,
|
130 |
-
bbox=bbox,
|
131 |
-
points=points,
|
132 |
-
point_label=args.point_label,
|
133 |
-
retinamask=args.retina,
|
134 |
-
target_height=original_h,
|
135 |
-
target_width=original_w,
|
136 |
-
)
|
137 |
-
else:
|
138 |
-
if isinstance(annotations[0], np.ndarray):
|
139 |
-
annotations = torch.from_numpy(annotations)
|
140 |
-
fast_show_mask_gpu(
|
141 |
-
annotations,
|
142 |
-
plt.gca(),
|
143 |
-
random_color=args.randomcolor,
|
144 |
-
bbox=bbox,
|
145 |
-
points=points,
|
146 |
-
point_label=args.point_label,
|
147 |
-
retinamask=args.retina,
|
148 |
-
target_height=original_h,
|
149 |
-
target_width=original_w,
|
150 |
-
)
|
151 |
-
if isinstance(annotations, torch.Tensor):
|
152 |
-
annotations = annotations.cpu().numpy()
|
153 |
-
if args.withContours == True:
|
154 |
-
contour_all = []
|
155 |
-
temp = np.zeros((original_h, original_w, 1))
|
156 |
-
for i, mask in enumerate(annotations):
|
157 |
-
if type(mask) == dict:
|
158 |
-
mask = mask["segmentation"]
|
159 |
-
annotation = mask.astype(np.uint8)
|
160 |
-
if args.retina == False:
|
161 |
-
annotation = cv2.resize(
|
162 |
-
annotation,
|
163 |
-
(original_w, original_h),
|
164 |
-
interpolation=cv2.INTER_NEAREST,
|
165 |
-
)
|
166 |
-
contours, hierarchy = cv2.findContours(
|
167 |
-
annotation, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE
|
168 |
-
)
|
169 |
-
for contour in contours:
|
170 |
-
contour_all.append(contour)
|
171 |
-
cv2.drawContours(temp, contour_all, -1, (255, 255, 255), 2)
|
172 |
-
color = np.array([0 / 255, 0 / 255, 255 / 255, 0.8])
|
173 |
-
contour_mask = temp / 255 * color.reshape(1, 1, -1)
|
174 |
-
plt.imshow(contour_mask)
|
175 |
-
|
176 |
-
save_path = args.output
|
177 |
-
if not os.path.exists(save_path):
|
178 |
-
os.makedirs(save_path)
|
179 |
-
plt.axis("off")
|
180 |
-
fig = plt.gcf()
|
181 |
-
plt.draw()
|
182 |
-
|
183 |
-
try:
|
184 |
-
buf = fig.canvas.tostring_rgb()
|
185 |
-
except AttributeError:
|
186 |
-
fig.canvas.draw()
|
187 |
-
buf = fig.canvas.tostring_rgb()
|
188 |
-
|
189 |
-
cols, rows = fig.canvas.get_width_height()
|
190 |
-
img_array = np.fromstring(buf, dtype=np.uint8).reshape(rows, cols, 3)
|
191 |
-
cv2.imwrite(os.path.join(save_path, result_name), cv2.cvtColor(img_array, cv2.COLOR_RGB2BGR))
|
192 |
-
|
193 |
-
|
194 |
-
# CPU post process
|
195 |
-
def fast_show_mask(
|
196 |
-
annotation,
|
197 |
-
ax,
|
198 |
-
random_color=False,
|
199 |
-
bbox=None,
|
200 |
-
points=None,
|
201 |
-
point_label=None,
|
202 |
-
retinamask=True,
|
203 |
-
target_height=960,
|
204 |
-
target_width=960,
|
205 |
-
):
|
206 |
-
msak_sum = annotation.shape[0]
|
207 |
-
height = annotation.shape[1]
|
208 |
-
weight = annotation.shape[2]
|
209 |
-
# 将annotation 按照面积 排序
|
210 |
-
areas = np.sum(annotation, axis=(1, 2))
|
211 |
-
sorted_indices = np.argsort(areas)
|
212 |
-
annotation = annotation[sorted_indices]
|
213 |
-
|
214 |
-
index = (annotation != 0).argmax(axis=0)
|
215 |
-
if random_color == True:
|
216 |
-
color = np.random.random((msak_sum, 1, 1, 3))
|
217 |
-
else:
|
218 |
-
color = np.ones((msak_sum, 1, 1, 3)) * np.array(
|
219 |
-
[30 / 255, 144 / 255, 255 / 255]
|
220 |
-
)
|
221 |
-
transparency = np.ones((msak_sum, 1, 1, 1)) * 0.6
|
222 |
-
visual = np.concatenate([color, transparency], axis=-1)
|
223 |
-
mask_image = np.expand_dims(annotation, -1) * visual
|
224 |
-
|
225 |
-
show = np.zeros((height, weight, 4))
|
226 |
-
h_indices, w_indices = np.meshgrid(
|
227 |
-
np.arange(height), np.arange(weight), indexing="ij"
|
228 |
-
)
|
229 |
-
indices = (index[h_indices, w_indices], h_indices, w_indices, slice(None))
|
230 |
-
# 使用向量化索引更新show的值
|
231 |
-
show[h_indices, w_indices, :] = mask_image[indices]
|
232 |
-
if bbox is not None:
|
233 |
-
x1, y1, x2, y2 = bbox
|
234 |
-
ax.add_patch(
|
235 |
-
plt.Rectangle(
|
236 |
-
(x1, y1), x2 - x1, y2 - y1, fill=False, edgecolor="b", linewidth=1
|
237 |
-
)
|
238 |
-
)
|
239 |
-
# draw point
|
240 |
-
if points is not None:
|
241 |
-
plt.scatter(
|
242 |
-
[point[0] for i, point in enumerate(points) if point_label[i] == 1],
|
243 |
-
[point[1] for i, point in enumerate(points) if point_label[i] == 1],
|
244 |
-
s=20,
|
245 |
-
c="y",
|
246 |
-
)
|
247 |
-
plt.scatter(
|
248 |
-
[point[0] for i, point in enumerate(points) if point_label[i] == 0],
|
249 |
-
[point[1] for i, point in enumerate(points) if point_label[i] == 0],
|
250 |
-
s=20,
|
251 |
-
c="m",
|
252 |
-
)
|
253 |
-
|
254 |
-
if retinamask == False:
|
255 |
-
show = cv2.resize(
|
256 |
-
show, (target_width, target_height), interpolation=cv2.INTER_NEAREST
|
257 |
-
)
|
258 |
-
ax.imshow(show)
|
259 |
-
|
260 |
-
|
261 |
-
def fast_show_mask_gpu(
|
262 |
-
annotation,
|
263 |
-
ax,
|
264 |
-
random_color=False,
|
265 |
-
bbox=None,
|
266 |
-
points=None,
|
267 |
-
point_label=None,
|
268 |
-
retinamask=True,
|
269 |
-
target_height=960,
|
270 |
-
target_width=960,
|
271 |
-
):
|
272 |
-
msak_sum = annotation.shape[0]
|
273 |
-
height = annotation.shape[1]
|
274 |
-
weight = annotation.shape[2]
|
275 |
-
areas = torch.sum(annotation, dim=(1, 2))
|
276 |
-
sorted_indices = torch.argsort(areas, descending=False)
|
277 |
-
annotation = annotation[sorted_indices]
|
278 |
-
# 找每个位置第一个非零值下标
|
279 |
-
index = (annotation != 0).to(torch.long).argmax(dim=0)
|
280 |
-
if random_color == True:
|
281 |
-
color = torch.rand((msak_sum, 1, 1, 3)).to(annotation.device)
|
282 |
-
else:
|
283 |
-
color = torch.ones((msak_sum, 1, 1, 3)).to(annotation.device) * torch.tensor(
|
284 |
-
[30 / 255, 144 / 255, 255 / 255]
|
285 |
-
).to(annotation.device)
|
286 |
-
transparency = torch.ones((msak_sum, 1, 1, 1)).to(annotation.device) * 0.6
|
287 |
-
visual = torch.cat([color, transparency], dim=-1)
|
288 |
-
mask_image = torch.unsqueeze(annotation, -1) * visual
|
289 |
-
# 按index取数,index指每个位置选哪个batch的数,把mask_image转成一个batch的形式
|
290 |
-
show = torch.zeros((height, weight, 4)).to(annotation.device)
|
291 |
-
h_indices, w_indices = torch.meshgrid(
|
292 |
-
torch.arange(height), torch.arange(weight), indexing="ij"
|
293 |
-
)
|
294 |
-
indices = (index[h_indices, w_indices], h_indices, w_indices, slice(None))
|
295 |
-
# 使用向量化索引更新show的值
|
296 |
-
show[h_indices, w_indices, :] = mask_image[indices]
|
297 |
-
show_cpu = show.cpu().numpy()
|
298 |
-
if bbox is not None:
|
299 |
-
x1, y1, x2, y2 = bbox
|
300 |
-
ax.add_patch(
|
301 |
-
plt.Rectangle(
|
302 |
-
(x1, y1), x2 - x1, y2 - y1, fill=False, edgecolor="b", linewidth=1
|
303 |
-
)
|
304 |
-
)
|
305 |
-
# draw point
|
306 |
-
if points is not None:
|
307 |
-
plt.scatter(
|
308 |
-
[point[0] for i, point in enumerate(points) if point_label[i] == 1],
|
309 |
-
[point[1] for i, point in enumerate(points) if point_label[i] == 1],
|
310 |
-
s=20,
|
311 |
-
c="y",
|
312 |
-
)
|
313 |
-
plt.scatter(
|
314 |
-
[point[0] for i, point in enumerate(points) if point_label[i] == 0],
|
315 |
-
[point[1] for i, point in enumerate(points) if point_label[i] == 0],
|
316 |
-
s=20,
|
317 |
-
c="m",
|
318 |
-
)
|
319 |
-
if retinamask == False:
|
320 |
-
show_cpu = cv2.resize(
|
321 |
-
show_cpu, (target_width, target_height), interpolation=cv2.INTER_NEAREST
|
322 |
-
)
|
323 |
-
ax.imshow(show_cpu)
|
324 |
-
|
325 |
-
|
326 |
-
# clip
|
327 |
-
@torch.no_grad()
|
328 |
-
def retriev(
|
329 |
-
model, preprocess, elements: [Image.Image], search_text: str, device
|
330 |
-
):
|
331 |
-
preprocessed_images = [preprocess(image).to(device) for image in elements]
|
332 |
-
tokenized_text = clip.tokenize([search_text]).to(device)
|
333 |
-
stacked_images = torch.stack(preprocessed_images)
|
334 |
-
image_features = model.encode_image(stacked_images)
|
335 |
-
text_features = model.encode_text(tokenized_text)
|
336 |
-
image_features /= image_features.norm(dim=-1, keepdim=True)
|
337 |
-
text_features /= text_features.norm(dim=-1, keepdim=True)
|
338 |
-
probs = 100.0 * image_features @ text_features.T
|
339 |
-
return probs[:, 0].softmax(dim=0)
|
340 |
-
|
341 |
-
|
342 |
-
def crop_image(annotations, image_like):
|
343 |
-
if isinstance(image_like, str):
|
344 |
-
image = Image.open(image_like)
|
345 |
-
else:
|
346 |
-
image = image_like
|
347 |
-
ori_w, ori_h = image.size
|
348 |
-
mask_h, mask_w = annotations[0]["segmentation"].shape
|
349 |
-
if ori_w != mask_w or ori_h != mask_h:
|
350 |
-
image = image.resize((mask_w, mask_h))
|
351 |
-
cropped_boxes = []
|
352 |
-
cropped_images = []
|
353 |
-
not_crop = []
|
354 |
-
origin_id = []
|
355 |
-
for _, mask in enumerate(annotations):
|
356 |
-
if np.sum(mask["segmentation"]) <= 100:
|
357 |
-
continue
|
358 |
-
origin_id.append(_)
|
359 |
-
bbox = get_bbox_from_mask(mask["segmentation"]) # mask 的 bbox
|
360 |
-
cropped_boxes.append(segment_image(image, bbox)) # 保存裁剪的图片
|
361 |
-
# cropped_boxes.append(segment_image(image,mask["segmentation"]))
|
362 |
-
cropped_images.append(bbox) # 保存裁剪的图片的bbox
|
363 |
-
return cropped_boxes, cropped_images, not_crop, origin_id, annotations
|
364 |
-
|
365 |
-
|
366 |
-
def box_prompt(masks, bbox, target_height, target_width):
|
367 |
-
h = masks.shape[1]
|
368 |
-
w = masks.shape[2]
|
369 |
-
if h != target_height or w != target_width:
|
370 |
-
bbox = [
|
371 |
-
int(bbox[0] * w / target_width),
|
372 |
-
int(bbox[1] * h / target_height),
|
373 |
-
int(bbox[2] * w / target_width),
|
374 |
-
int(bbox[3] * h / target_height),
|
375 |
-
]
|
376 |
-
bbox[0] = round(bbox[0]) if round(bbox[0]) > 0 else 0
|
377 |
-
bbox[1] = round(bbox[1]) if round(bbox[1]) > 0 else 0
|
378 |
-
bbox[2] = round(bbox[2]) if round(bbox[2]) < w else w
|
379 |
-
bbox[3] = round(bbox[3]) if round(bbox[3]) < h else h
|
380 |
-
|
381 |
-
# IoUs = torch.zeros(len(masks), dtype=torch.float32)
|
382 |
-
bbox_area = (bbox[3] - bbox[1]) * (bbox[2] - bbox[0])
|
383 |
-
|
384 |
-
masks_area = torch.sum(masks[:, bbox[1] : bbox[3], bbox[0] : bbox[2]], dim=(1, 2))
|
385 |
-
orig_masks_area = torch.sum(masks, dim=(1, 2))
|
386 |
-
|
387 |
-
union = bbox_area + orig_masks_area - masks_area
|
388 |
-
IoUs = masks_area / union
|
389 |
-
max_iou_index = torch.argmax(IoUs)
|
390 |
-
|
391 |
-
return masks[max_iou_index].cpu().numpy(), max_iou_index
|
392 |
-
|
393 |
-
|
394 |
-
def point_prompt(masks, points, point_label, target_height, target_width): # numpy 处理
|
395 |
-
h = masks[0]["segmentation"].shape[0]
|
396 |
-
w = masks[0]["segmentation"].shape[1]
|
397 |
-
if h != target_height or w != target_width:
|
398 |
-
points = [
|
399 |
-
[int(point[0] * w / target_width), int(point[1] * h / target_height)]
|
400 |
-
for point in points
|
401 |
-
]
|
402 |
-
onemask = np.zeros((h, w))
|
403 |
-
masks = sorted(masks, key=lambda x: x['area'], reverse=True)
|
404 |
-
for i, annotation in enumerate(masks):
|
405 |
-
if type(annotation) == dict:
|
406 |
-
mask = annotation['segmentation']
|
407 |
-
else:
|
408 |
-
mask = annotation
|
409 |
-
for i, point in enumerate(points):
|
410 |
-
if mask[point[1], point[0]] == 1 and point_label[i] == 1:
|
411 |
-
onemask[mask] = 1
|
412 |
-
if mask[point[1], point[0]] == 1 and point_label[i] == 0:
|
413 |
-
onemask[mask] = 0
|
414 |
-
onemask = onemask >= 1
|
415 |
-
return onemask, 0
|
416 |
-
|
417 |
-
|
418 |
-
def text_prompt(annotations, text, img_path, device, wider=False, threshold=0.9):
|
419 |
-
cropped_boxes, cropped_images, not_crop, origin_id, annotations_ = crop_image(
|
420 |
-
annotations, img_path
|
421 |
-
)
|
422 |
-
clip_model, preprocess = clip.load("./weights/CLIP_ViT_B_32.pt", device=device)
|
423 |
-
scores = retriev(
|
424 |
-
clip_model, preprocess, cropped_boxes, text, device=device
|
425 |
-
)
|
426 |
-
max_idx = scores.argsort()
|
427 |
-
max_idx = max_idx[-1]
|
428 |
-
max_idx = origin_id[int(max_idx)]
|
429 |
-
|
430 |
-
# find the biggest mask which contains the mask with max score
|
431 |
-
if wider:
|
432 |
-
mask0 = annotations_[max_idx]["segmentation"]
|
433 |
-
area0 = np.sum(mask0)
|
434 |
-
areas = [(i, np.sum(mask["segmentation"])) for i, mask in enumerate(annotations_) if i in origin_id]
|
435 |
-
areas = sorted(areas, key=lambda area: area[1], reverse=True)
|
436 |
-
indices = [area[0] for area in areas]
|
437 |
-
for index in indices:
|
438 |
-
if index == max_idx or np.sum(annotations_[index]["segmentation"] & mask0) / area0 > threshold:
|
439 |
-
max_idx = index
|
440 |
-
break
|
441 |
-
|
442 |
-
return annotations_[max_idx]["segmentation"], max_idx
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_pix2pix_zero.py
DELETED
@@ -1,622 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2023 HuggingFace Inc.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
import gc
|
17 |
-
import random
|
18 |
-
import tempfile
|
19 |
-
import unittest
|
20 |
-
|
21 |
-
import numpy as np
|
22 |
-
import torch
|
23 |
-
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
|
24 |
-
|
25 |
-
from diffusers import (
|
26 |
-
AutoencoderKL,
|
27 |
-
DDIMInverseScheduler,
|
28 |
-
DDIMScheduler,
|
29 |
-
DDPMScheduler,
|
30 |
-
EulerAncestralDiscreteScheduler,
|
31 |
-
LMSDiscreteScheduler,
|
32 |
-
StableDiffusionPix2PixZeroPipeline,
|
33 |
-
UNet2DConditionModel,
|
34 |
-
)
|
35 |
-
from diffusers.image_processor import VaeImageProcessor
|
36 |
-
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
|
37 |
-
from diffusers.utils.testing_utils import enable_full_determinism, load_image, load_pt, require_torch_gpu, skip_mps
|
38 |
-
|
39 |
-
from ..pipeline_params import (
|
40 |
-
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
|
41 |
-
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
|
42 |
-
TEXT_TO_IMAGE_IMAGE_PARAMS,
|
43 |
-
)
|
44 |
-
from ..test_pipelines_common import (
|
45 |
-
PipelineLatentTesterMixin,
|
46 |
-
PipelineTesterMixin,
|
47 |
-
assert_mean_pixel_difference,
|
48 |
-
)
|
49 |
-
|
50 |
-
|
51 |
-
enable_full_determinism()
|
52 |
-
|
53 |
-
|
54 |
-
@skip_mps
|
55 |
-
class StableDiffusionPix2PixZeroPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase):
|
56 |
-
pipeline_class = StableDiffusionPix2PixZeroPipeline
|
57 |
-
params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"image"}
|
58 |
-
batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
|
59 |
-
image_params = TEXT_TO_IMAGE_IMAGE_PARAMS
|
60 |
-
image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS
|
61 |
-
|
62 |
-
@classmethod
|
63 |
-
def setUpClass(cls):
|
64 |
-
cls.source_embeds = load_pt(
|
65 |
-
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/pix2pix/src_emb_0.pt"
|
66 |
-
)
|
67 |
-
|
68 |
-
cls.target_embeds = load_pt(
|
69 |
-
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/pix2pix/tgt_emb_0.pt"
|
70 |
-
)
|
71 |
-
|
72 |
-
def get_dummy_components(self):
|
73 |
-
torch.manual_seed(0)
|
74 |
-
unet = UNet2DConditionModel(
|
75 |
-
block_out_channels=(32, 64),
|
76 |
-
layers_per_block=2,
|
77 |
-
sample_size=32,
|
78 |
-
in_channels=4,
|
79 |
-
out_channels=4,
|
80 |
-
down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
|
81 |
-
up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
|
82 |
-
cross_attention_dim=32,
|
83 |
-
)
|
84 |
-
scheduler = DDIMScheduler()
|
85 |
-
inverse_scheduler = DDIMInverseScheduler()
|
86 |
-
torch.manual_seed(0)
|
87 |
-
vae = AutoencoderKL(
|
88 |
-
block_out_channels=[32, 64],
|
89 |
-
in_channels=3,
|
90 |
-
out_channels=3,
|
91 |
-
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
|
92 |
-
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
|
93 |
-
latent_channels=4,
|
94 |
-
)
|
95 |
-
torch.manual_seed(0)
|
96 |
-
text_encoder_config = CLIPTextConfig(
|
97 |
-
bos_token_id=0,
|
98 |
-
eos_token_id=2,
|
99 |
-
hidden_size=32,
|
100 |
-
intermediate_size=37,
|
101 |
-
layer_norm_eps=1e-05,
|
102 |
-
num_attention_heads=4,
|
103 |
-
num_hidden_layers=5,
|
104 |
-
pad_token_id=1,
|
105 |
-
vocab_size=1000,
|
106 |
-
)
|
107 |
-
text_encoder = CLIPTextModel(text_encoder_config)
|
108 |
-
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
|
109 |
-
|
110 |
-
components = {
|
111 |
-
"unet": unet,
|
112 |
-
"scheduler": scheduler,
|
113 |
-
"vae": vae,
|
114 |
-
"text_encoder": text_encoder,
|
115 |
-
"tokenizer": tokenizer,
|
116 |
-
"safety_checker": None,
|
117 |
-
"feature_extractor": None,
|
118 |
-
"inverse_scheduler": inverse_scheduler,
|
119 |
-
"caption_generator": None,
|
120 |
-
"caption_processor": None,
|
121 |
-
}
|
122 |
-
return components
|
123 |
-
|
124 |
-
def get_dummy_inputs(self, device, seed=0):
|
125 |
-
generator = torch.manual_seed(seed)
|
126 |
-
|
127 |
-
inputs = {
|
128 |
-
"prompt": "A painting of a squirrel eating a burger",
|
129 |
-
"generator": generator,
|
130 |
-
"num_inference_steps": 2,
|
131 |
-
"guidance_scale": 6.0,
|
132 |
-
"cross_attention_guidance_amount": 0.15,
|
133 |
-
"source_embeds": self.source_embeds,
|
134 |
-
"target_embeds": self.target_embeds,
|
135 |
-
"output_type": "numpy",
|
136 |
-
}
|
137 |
-
return inputs
|
138 |
-
|
139 |
-
def get_dummy_inversion_inputs(self, device, seed=0):
|
140 |
-
dummy_image = floats_tensor((2, 3, 32, 32), rng=random.Random(seed)).to(torch_device)
|
141 |
-
dummy_image = dummy_image / 2 + 0.5
|
142 |
-
generator = torch.manual_seed(seed)
|
143 |
-
|
144 |
-
inputs = {
|
145 |
-
"prompt": [
|
146 |
-
"A painting of a squirrel eating a burger",
|
147 |
-
"A painting of a burger eating a squirrel",
|
148 |
-
],
|
149 |
-
"image": dummy_image.cpu(),
|
150 |
-
"num_inference_steps": 2,
|
151 |
-
"guidance_scale": 6.0,
|
152 |
-
"generator": generator,
|
153 |
-
"output_type": "numpy",
|
154 |
-
}
|
155 |
-
return inputs
|
156 |
-
|
157 |
-
def get_dummy_inversion_inputs_by_type(self, device, seed=0, input_image_type="pt", output_type="np"):
|
158 |
-
inputs = self.get_dummy_inversion_inputs(device, seed)
|
159 |
-
|
160 |
-
if input_image_type == "pt":
|
161 |
-
image = inputs["image"]
|
162 |
-
elif input_image_type == "np":
|
163 |
-
image = VaeImageProcessor.pt_to_numpy(inputs["image"])
|
164 |
-
elif input_image_type == "pil":
|
165 |
-
image = VaeImageProcessor.pt_to_numpy(inputs["image"])
|
166 |
-
image = VaeImageProcessor.numpy_to_pil(image)
|
167 |
-
else:
|
168 |
-
raise ValueError(f"unsupported input_image_type {input_image_type}")
|
169 |
-
|
170 |
-
inputs["image"] = image
|
171 |
-
inputs["output_type"] = output_type
|
172 |
-
|
173 |
-
return inputs
|
174 |
-
|
175 |
-
def test_save_load_optional_components(self):
|
176 |
-
if not hasattr(self.pipeline_class, "_optional_components"):
|
177 |
-
return
|
178 |
-
|
179 |
-
components = self.get_dummy_components()
|
180 |
-
pipe = self.pipeline_class(**components)
|
181 |
-
pipe.to(torch_device)
|
182 |
-
pipe.set_progress_bar_config(disable=None)
|
183 |
-
|
184 |
-
# set all optional components to None and update pipeline config accordingly
|
185 |
-
for optional_component in pipe._optional_components:
|
186 |
-
setattr(pipe, optional_component, None)
|
187 |
-
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components})
|
188 |
-
|
189 |
-
inputs = self.get_dummy_inputs(torch_device)
|
190 |
-
output = pipe(**inputs)[0]
|
191 |
-
|
192 |
-
with tempfile.TemporaryDirectory() as tmpdir:
|
193 |
-
pipe.save_pretrained(tmpdir)
|
194 |
-
pipe_loaded = self.pipeline_class.from_pretrained(tmpdir)
|
195 |
-
pipe_loaded.to(torch_device)
|
196 |
-
pipe_loaded.set_progress_bar_config(disable=None)
|
197 |
-
|
198 |
-
for optional_component in pipe._optional_components:
|
199 |
-
self.assertTrue(
|
200 |
-
getattr(pipe_loaded, optional_component) is None,
|
201 |
-
f"`{optional_component}` did not stay set to None after loading.",
|
202 |
-
)
|
203 |
-
|
204 |
-
inputs = self.get_dummy_inputs(torch_device)
|
205 |
-
output_loaded = pipe_loaded(**inputs)[0]
|
206 |
-
|
207 |
-
max_diff = np.abs(output - output_loaded).max()
|
208 |
-
self.assertLess(max_diff, 1e-4)
|
209 |
-
|
210 |
-
def test_stable_diffusion_pix2pix_zero_inversion(self):
|
211 |
-
device = "cpu" # ensure determinism for the device-dependent torch.Generator
|
212 |
-
components = self.get_dummy_components()
|
213 |
-
sd_pipe = StableDiffusionPix2PixZeroPipeline(**components)
|
214 |
-
sd_pipe = sd_pipe.to(device)
|
215 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
216 |
-
|
217 |
-
inputs = self.get_dummy_inversion_inputs(device)
|
218 |
-
inputs["image"] = inputs["image"][:1]
|
219 |
-
inputs["prompt"] = inputs["prompt"][:1]
|
220 |
-
image = sd_pipe.invert(**inputs).images
|
221 |
-
image_slice = image[0, -3:, -3:, -1]
|
222 |
-
assert image.shape == (1, 32, 32, 3)
|
223 |
-
expected_slice = np.array([0.4823, 0.4783, 0.5638, 0.5201, 0.5247, 0.5644, 0.5029, 0.5404, 0.5062])
|
224 |
-
|
225 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
|
226 |
-
|
227 |
-
def test_stable_diffusion_pix2pix_zero_inversion_batch(self):
|
228 |
-
device = "cpu" # ensure determinism for the device-dependent torch.Generator
|
229 |
-
components = self.get_dummy_components()
|
230 |
-
sd_pipe = StableDiffusionPix2PixZeroPipeline(**components)
|
231 |
-
sd_pipe = sd_pipe.to(device)
|
232 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
233 |
-
|
234 |
-
inputs = self.get_dummy_inversion_inputs(device)
|
235 |
-
image = sd_pipe.invert(**inputs).images
|
236 |
-
image_slice = image[1, -3:, -3:, -1]
|
237 |
-
assert image.shape == (2, 32, 32, 3)
|
238 |
-
expected_slice = np.array([0.6446, 0.5232, 0.4914, 0.4441, 0.4654, 0.5546, 0.4650, 0.4938, 0.5044])
|
239 |
-
|
240 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
|
241 |
-
|
242 |
-
def test_stable_diffusion_pix2pix_zero_default_case(self):
|
243 |
-
device = "cpu" # ensure determinism for the device-dependent torch.Generator
|
244 |
-
components = self.get_dummy_components()
|
245 |
-
sd_pipe = StableDiffusionPix2PixZeroPipeline(**components)
|
246 |
-
sd_pipe = sd_pipe.to(device)
|
247 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
248 |
-
|
249 |
-
inputs = self.get_dummy_inputs(device)
|
250 |
-
image = sd_pipe(**inputs).images
|
251 |
-
image_slice = image[0, -3:, -3:, -1]
|
252 |
-
assert image.shape == (1, 64, 64, 3)
|
253 |
-
expected_slice = np.array([0.4863, 0.5053, 0.5033, 0.4007, 0.3571, 0.4768, 0.5176, 0.5277, 0.4940])
|
254 |
-
|
255 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
|
256 |
-
|
257 |
-
def test_stable_diffusion_pix2pix_zero_negative_prompt(self):
|
258 |
-
device = "cpu" # ensure determinism for the device-dependent torch.Generator
|
259 |
-
components = self.get_dummy_components()
|
260 |
-
sd_pipe = StableDiffusionPix2PixZeroPipeline(**components)
|
261 |
-
sd_pipe = sd_pipe.to(device)
|
262 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
263 |
-
|
264 |
-
inputs = self.get_dummy_inputs(device)
|
265 |
-
negative_prompt = "french fries"
|
266 |
-
output = sd_pipe(**inputs, negative_prompt=negative_prompt)
|
267 |
-
image = output.images
|
268 |
-
image_slice = image[0, -3:, -3:, -1]
|
269 |
-
|
270 |
-
assert image.shape == (1, 64, 64, 3)
|
271 |
-
expected_slice = np.array([0.5177, 0.5097, 0.5047, 0.4076, 0.3667, 0.4767, 0.5238, 0.5307, 0.4958])
|
272 |
-
|
273 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
|
274 |
-
|
275 |
-
def test_stable_diffusion_pix2pix_zero_euler(self):
|
276 |
-
device = "cpu" # ensure determinism for the device-dependent torch.Generator
|
277 |
-
components = self.get_dummy_components()
|
278 |
-
components["scheduler"] = EulerAncestralDiscreteScheduler(
|
279 |
-
beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear"
|
280 |
-
)
|
281 |
-
sd_pipe = StableDiffusionPix2PixZeroPipeline(**components)
|
282 |
-
sd_pipe = sd_pipe.to(device)
|
283 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
284 |
-
|
285 |
-
inputs = self.get_dummy_inputs(device)
|
286 |
-
image = sd_pipe(**inputs).images
|
287 |
-
image_slice = image[0, -3:, -3:, -1]
|
288 |
-
|
289 |
-
assert image.shape == (1, 64, 64, 3)
|
290 |
-
expected_slice = np.array([0.5421, 0.5525, 0.6085, 0.5279, 0.4658, 0.5317, 0.4418, 0.4815, 0.5132])
|
291 |
-
|
292 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
|
293 |
-
|
294 |
-
def test_stable_diffusion_pix2pix_zero_ddpm(self):
|
295 |
-
device = "cpu" # ensure determinism for the device-dependent torch.Generator
|
296 |
-
components = self.get_dummy_components()
|
297 |
-
components["scheduler"] = DDPMScheduler()
|
298 |
-
sd_pipe = StableDiffusionPix2PixZeroPipeline(**components)
|
299 |
-
sd_pipe = sd_pipe.to(device)
|
300 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
301 |
-
|
302 |
-
inputs = self.get_dummy_inputs(device)
|
303 |
-
image = sd_pipe(**inputs).images
|
304 |
-
image_slice = image[0, -3:, -3:, -1]
|
305 |
-
|
306 |
-
assert image.shape == (1, 64, 64, 3)
|
307 |
-
expected_slice = np.array([0.4861, 0.5053, 0.5038, 0.3994, 0.3562, 0.4768, 0.5172, 0.5280, 0.4938])
|
308 |
-
|
309 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
|
310 |
-
|
311 |
-
def test_stable_diffusion_pix2pix_zero_inversion_pt_np_pil_outputs_equivalent(self):
|
312 |
-
device = torch_device
|
313 |
-
components = self.get_dummy_components()
|
314 |
-
sd_pipe = StableDiffusionPix2PixZeroPipeline(**components)
|
315 |
-
sd_pipe = sd_pipe.to(device)
|
316 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
317 |
-
|
318 |
-
output_pt = sd_pipe.invert(**self.get_dummy_inversion_inputs_by_type(device, output_type="pt")).images
|
319 |
-
output_np = sd_pipe.invert(**self.get_dummy_inversion_inputs_by_type(device, output_type="np")).images
|
320 |
-
output_pil = sd_pipe.invert(**self.get_dummy_inversion_inputs_by_type(device, output_type="pil")).images
|
321 |
-
|
322 |
-
max_diff = np.abs(output_pt.cpu().numpy().transpose(0, 2, 3, 1) - output_np).max()
|
323 |
-
self.assertLess(max_diff, 1e-4, "`output_type=='pt'` generate different results from `output_type=='np'`")
|
324 |
-
|
325 |
-
max_diff = np.abs(np.array(output_pil[0]) - (output_np[0] * 255).round()).max()
|
326 |
-
self.assertLess(max_diff, 2.0, "`output_type=='pil'` generate different results from `output_type=='np'`")
|
327 |
-
|
328 |
-
def test_stable_diffusion_pix2pix_zero_inversion_pt_np_pil_inputs_equivalent(self):
|
329 |
-
device = torch_device
|
330 |
-
components = self.get_dummy_components()
|
331 |
-
sd_pipe = StableDiffusionPix2PixZeroPipeline(**components)
|
332 |
-
sd_pipe = sd_pipe.to(device)
|
333 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
334 |
-
|
335 |
-
out_input_pt = sd_pipe.invert(**self.get_dummy_inversion_inputs_by_type(device, input_image_type="pt")).images
|
336 |
-
out_input_np = sd_pipe.invert(**self.get_dummy_inversion_inputs_by_type(device, input_image_type="np")).images
|
337 |
-
out_input_pil = sd_pipe.invert(
|
338 |
-
**self.get_dummy_inversion_inputs_by_type(device, input_image_type="pil")
|
339 |
-
).images
|
340 |
-
|
341 |
-
max_diff = np.abs(out_input_pt - out_input_np).max()
|
342 |
-
self.assertLess(max_diff, 1e-4, "`input_type=='pt'` generate different result from `input_type=='np'`")
|
343 |
-
|
344 |
-
assert_mean_pixel_difference(out_input_pil, out_input_np, expected_max_diff=1)
|
345 |
-
|
346 |
-
# Non-determinism caused by the scheduler optimizing the latent inputs during inference
|
347 |
-
@unittest.skip("non-deterministic pipeline")
|
348 |
-
def test_inference_batch_single_identical(self):
|
349 |
-
return super().test_inference_batch_single_identical()
|
350 |
-
|
351 |
-
|
352 |
-
@slow
|
353 |
-
@require_torch_gpu
|
354 |
-
class StableDiffusionPix2PixZeroPipelineSlowTests(unittest.TestCase):
|
355 |
-
def tearDown(self):
|
356 |
-
super().tearDown()
|
357 |
-
gc.collect()
|
358 |
-
torch.cuda.empty_cache()
|
359 |
-
|
360 |
-
@classmethod
|
361 |
-
def setUpClass(cls):
|
362 |
-
cls.source_embeds = load_pt(
|
363 |
-
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/pix2pix/cat.pt"
|
364 |
-
)
|
365 |
-
|
366 |
-
cls.target_embeds = load_pt(
|
367 |
-
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/pix2pix/dog.pt"
|
368 |
-
)
|
369 |
-
|
370 |
-
def get_inputs(self, seed=0):
|
371 |
-
generator = torch.manual_seed(seed)
|
372 |
-
|
373 |
-
inputs = {
|
374 |
-
"prompt": "turn him into a cyborg",
|
375 |
-
"generator": generator,
|
376 |
-
"num_inference_steps": 3,
|
377 |
-
"guidance_scale": 7.5,
|
378 |
-
"cross_attention_guidance_amount": 0.15,
|
379 |
-
"source_embeds": self.source_embeds,
|
380 |
-
"target_embeds": self.target_embeds,
|
381 |
-
"output_type": "numpy",
|
382 |
-
}
|
383 |
-
return inputs
|
384 |
-
|
385 |
-
def test_stable_diffusion_pix2pix_zero_default(self):
|
386 |
-
pipe = StableDiffusionPix2PixZeroPipeline.from_pretrained(
|
387 |
-
"CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16
|
388 |
-
)
|
389 |
-
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
|
390 |
-
pipe.to(torch_device)
|
391 |
-
pipe.set_progress_bar_config(disable=None)
|
392 |
-
pipe.enable_attention_slicing()
|
393 |
-
|
394 |
-
inputs = self.get_inputs()
|
395 |
-
image = pipe(**inputs).images
|
396 |
-
image_slice = image[0, -3:, -3:, -1].flatten()
|
397 |
-
|
398 |
-
assert image.shape == (1, 512, 512, 3)
|
399 |
-
expected_slice = np.array([0.5742, 0.5757, 0.5747, 0.5781, 0.5688, 0.5713, 0.5742, 0.5664, 0.5747])
|
400 |
-
|
401 |
-
assert np.abs(expected_slice - image_slice).max() < 5e-2
|
402 |
-
|
403 |
-
def test_stable_diffusion_pix2pix_zero_k_lms(self):
|
404 |
-
pipe = StableDiffusionPix2PixZeroPipeline.from_pretrained(
|
405 |
-
"CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16
|
406 |
-
)
|
407 |
-
pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
|
408 |
-
pipe.to(torch_device)
|
409 |
-
pipe.set_progress_bar_config(disable=None)
|
410 |
-
pipe.enable_attention_slicing()
|
411 |
-
|
412 |
-
inputs = self.get_inputs()
|
413 |
-
image = pipe(**inputs).images
|
414 |
-
image_slice = image[0, -3:, -3:, -1].flatten()
|
415 |
-
|
416 |
-
assert image.shape == (1, 512, 512, 3)
|
417 |
-
expected_slice = np.array([0.6367, 0.5459, 0.5146, 0.5479, 0.4905, 0.4753, 0.4961, 0.4629, 0.4624])
|
418 |
-
|
419 |
-
assert np.abs(expected_slice - image_slice).max() < 5e-2
|
420 |
-
|
421 |
-
def test_stable_diffusion_pix2pix_zero_intermediate_state(self):
|
422 |
-
number_of_steps = 0
|
423 |
-
|
424 |
-
def callback_fn(step: int, timestep: int, latents: torch.FloatTensor) -> None:
|
425 |
-
callback_fn.has_been_called = True
|
426 |
-
nonlocal number_of_steps
|
427 |
-
number_of_steps += 1
|
428 |
-
if step == 1:
|
429 |
-
latents = latents.detach().cpu().numpy()
|
430 |
-
assert latents.shape == (1, 4, 64, 64)
|
431 |
-
latents_slice = latents[0, -3:, -3:, -1]
|
432 |
-
expected_slice = np.array([0.1345, 0.268, 0.1539, 0.0726, 0.0959, 0.2261, -0.2673, 0.0277, -0.2062])
|
433 |
-
|
434 |
-
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
|
435 |
-
elif step == 2:
|
436 |
-
latents = latents.detach().cpu().numpy()
|
437 |
-
assert latents.shape == (1, 4, 64, 64)
|
438 |
-
latents_slice = latents[0, -3:, -3:, -1]
|
439 |
-
expected_slice = np.array([0.1393, 0.2637, 0.1617, 0.0724, 0.0987, 0.2271, -0.2666, 0.0299, -0.2104])
|
440 |
-
|
441 |
-
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
|
442 |
-
|
443 |
-
callback_fn.has_been_called = False
|
444 |
-
|
445 |
-
pipe = StableDiffusionPix2PixZeroPipeline.from_pretrained(
|
446 |
-
"CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16
|
447 |
-
)
|
448 |
-
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
|
449 |
-
pipe = pipe.to(torch_device)
|
450 |
-
pipe.set_progress_bar_config(disable=None)
|
451 |
-
pipe.enable_attention_slicing()
|
452 |
-
|
453 |
-
inputs = self.get_inputs()
|
454 |
-
pipe(**inputs, callback=callback_fn, callback_steps=1)
|
455 |
-
assert callback_fn.has_been_called
|
456 |
-
assert number_of_steps == 3
|
457 |
-
|
458 |
-
def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self):
|
459 |
-
torch.cuda.empty_cache()
|
460 |
-
torch.cuda.reset_max_memory_allocated()
|
461 |
-
torch.cuda.reset_peak_memory_stats()
|
462 |
-
|
463 |
-
pipe = StableDiffusionPix2PixZeroPipeline.from_pretrained(
|
464 |
-
"CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16
|
465 |
-
)
|
466 |
-
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
|
467 |
-
pipe = pipe.to(torch_device)
|
468 |
-
pipe.set_progress_bar_config(disable=None)
|
469 |
-
pipe.enable_attention_slicing(1)
|
470 |
-
pipe.enable_sequential_cpu_offload()
|
471 |
-
|
472 |
-
inputs = self.get_inputs()
|
473 |
-
_ = pipe(**inputs)
|
474 |
-
|
475 |
-
mem_bytes = torch.cuda.max_memory_allocated()
|
476 |
-
# make sure that less than 8.2 GB is allocated
|
477 |
-
assert mem_bytes < 8.2 * 10**9
|
478 |
-
|
479 |
-
|
480 |
-
@slow
|
481 |
-
@require_torch_gpu
|
482 |
-
class InversionPipelineSlowTests(unittest.TestCase):
|
483 |
-
def tearDown(self):
|
484 |
-
super().tearDown()
|
485 |
-
gc.collect()
|
486 |
-
torch.cuda.empty_cache()
|
487 |
-
|
488 |
-
@classmethod
|
489 |
-
def setUpClass(cls):
|
490 |
-
raw_image = load_image(
|
491 |
-
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/pix2pix/cat_6.png"
|
492 |
-
)
|
493 |
-
|
494 |
-
raw_image = raw_image.convert("RGB").resize((512, 512))
|
495 |
-
|
496 |
-
cls.raw_image = raw_image
|
497 |
-
|
498 |
-
def test_stable_diffusion_pix2pix_inversion(self):
|
499 |
-
pipe = StableDiffusionPix2PixZeroPipeline.from_pretrained(
|
500 |
-
"CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16
|
501 |
-
)
|
502 |
-
pipe.inverse_scheduler = DDIMInverseScheduler.from_config(pipe.scheduler.config)
|
503 |
-
|
504 |
-
caption = "a photography of a cat with flowers"
|
505 |
-
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
|
506 |
-
pipe.enable_model_cpu_offload()
|
507 |
-
pipe.set_progress_bar_config(disable=None)
|
508 |
-
|
509 |
-
generator = torch.manual_seed(0)
|
510 |
-
output = pipe.invert(caption, image=self.raw_image, generator=generator, num_inference_steps=10)
|
511 |
-
inv_latents = output[0]
|
512 |
-
|
513 |
-
image_slice = inv_latents[0, -3:, -3:, -1].flatten()
|
514 |
-
|
515 |
-
assert inv_latents.shape == (1, 4, 64, 64)
|
516 |
-
expected_slice = np.array([0.8447, -0.0730, 0.7588, -1.2070, -0.4678, 0.1511, -0.8555, 1.1816, -0.7666])
|
517 |
-
|
518 |
-
assert np.abs(expected_slice - image_slice.cpu().numpy()).max() < 5e-2
|
519 |
-
|
520 |
-
def test_stable_diffusion_2_pix2pix_inversion(self):
|
521 |
-
pipe = StableDiffusionPix2PixZeroPipeline.from_pretrained(
|
522 |
-
"stabilityai/stable-diffusion-2-1", safety_checker=None, torch_dtype=torch.float16
|
523 |
-
)
|
524 |
-
pipe.inverse_scheduler = DDIMInverseScheduler.from_config(pipe.scheduler.config)
|
525 |
-
|
526 |
-
caption = "a photography of a cat with flowers"
|
527 |
-
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
|
528 |
-
pipe.enable_model_cpu_offload()
|
529 |
-
pipe.set_progress_bar_config(disable=None)
|
530 |
-
|
531 |
-
generator = torch.manual_seed(0)
|
532 |
-
output = pipe.invert(caption, image=self.raw_image, generator=generator, num_inference_steps=10)
|
533 |
-
inv_latents = output[0]
|
534 |
-
|
535 |
-
image_slice = inv_latents[0, -3:, -3:, -1].flatten()
|
536 |
-
|
537 |
-
assert inv_latents.shape == (1, 4, 64, 64)
|
538 |
-
expected_slice = np.array([0.8970, -0.1611, 0.4766, -1.1162, -0.5923, 0.1050, -0.9678, 1.0537, -0.6050])
|
539 |
-
|
540 |
-
assert np.abs(expected_slice - image_slice.cpu().numpy()).max() < 5e-2
|
541 |
-
|
542 |
-
def test_stable_diffusion_pix2pix_full(self):
|
543 |
-
# numpy array of https://huggingface.co/datasets/hf-internal-testing/diffusers-images/blob/main/pix2pix/dog.png
|
544 |
-
expected_image = load_numpy(
|
545 |
-
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/pix2pix/dog.npy"
|
546 |
-
)
|
547 |
-
|
548 |
-
pipe = StableDiffusionPix2PixZeroPipeline.from_pretrained(
|
549 |
-
"CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16
|
550 |
-
)
|
551 |
-
pipe.inverse_scheduler = DDIMInverseScheduler.from_config(pipe.scheduler.config)
|
552 |
-
|
553 |
-
caption = "a photography of a cat with flowers"
|
554 |
-
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
|
555 |
-
pipe.enable_model_cpu_offload()
|
556 |
-
pipe.set_progress_bar_config(disable=None)
|
557 |
-
|
558 |
-
generator = torch.manual_seed(0)
|
559 |
-
output = pipe.invert(caption, image=self.raw_image, generator=generator)
|
560 |
-
inv_latents = output[0]
|
561 |
-
|
562 |
-
source_prompts = 4 * ["a cat sitting on the street", "a cat playing in the field", "a face of a cat"]
|
563 |
-
target_prompts = 4 * ["a dog sitting on the street", "a dog playing in the field", "a face of a dog"]
|
564 |
-
|
565 |
-
source_embeds = pipe.get_embeds(source_prompts)
|
566 |
-
target_embeds = pipe.get_embeds(target_prompts)
|
567 |
-
|
568 |
-
image = pipe(
|
569 |
-
caption,
|
570 |
-
source_embeds=source_embeds,
|
571 |
-
target_embeds=target_embeds,
|
572 |
-
num_inference_steps=50,
|
573 |
-
cross_attention_guidance_amount=0.15,
|
574 |
-
generator=generator,
|
575 |
-
latents=inv_latents,
|
576 |
-
negative_prompt=caption,
|
577 |
-
output_type="np",
|
578 |
-
).images
|
579 |
-
|
580 |
-
max_diff = np.abs(expected_image - image).mean()
|
581 |
-
assert max_diff < 0.05
|
582 |
-
|
583 |
-
def test_stable_diffusion_2_pix2pix_full(self):
|
584 |
-
# numpy array of https://huggingface.co/datasets/hf-internal-testing/diffusers-images/blob/main/pix2pix/dog_2.png
|
585 |
-
expected_image = load_numpy(
|
586 |
-
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/pix2pix/dog_2.npy"
|
587 |
-
)
|
588 |
-
|
589 |
-
pipe = StableDiffusionPix2PixZeroPipeline.from_pretrained(
|
590 |
-
"stabilityai/stable-diffusion-2-1", safety_checker=None, torch_dtype=torch.float16
|
591 |
-
)
|
592 |
-
pipe.inverse_scheduler = DDIMInverseScheduler.from_config(pipe.scheduler.config)
|
593 |
-
|
594 |
-
caption = "a photography of a cat with flowers"
|
595 |
-
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
|
596 |
-
pipe.enable_model_cpu_offload()
|
597 |
-
pipe.set_progress_bar_config(disable=None)
|
598 |
-
|
599 |
-
generator = torch.manual_seed(0)
|
600 |
-
output = pipe.invert(caption, image=self.raw_image, generator=generator)
|
601 |
-
inv_latents = output[0]
|
602 |
-
|
603 |
-
source_prompts = 4 * ["a cat sitting on the street", "a cat playing in the field", "a face of a cat"]
|
604 |
-
target_prompts = 4 * ["a dog sitting on the street", "a dog playing in the field", "a face of a dog"]
|
605 |
-
|
606 |
-
source_embeds = pipe.get_embeds(source_prompts)
|
607 |
-
target_embeds = pipe.get_embeds(target_prompts)
|
608 |
-
|
609 |
-
image = pipe(
|
610 |
-
caption,
|
611 |
-
source_embeds=source_embeds,
|
612 |
-
target_embeds=target_embeds,
|
613 |
-
num_inference_steps=125,
|
614 |
-
cross_attention_guidance_amount=0.015,
|
615 |
-
generator=generator,
|
616 |
-
latents=inv_latents,
|
617 |
-
negative_prompt=caption,
|
618 |
-
output_type="np",
|
619 |
-
).images
|
620 |
-
|
621 |
-
mean_diff = np.abs(expected_image - image).mean()
|
622 |
-
assert mean_diff < 0.25
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/IAT_enhancement/model/__init__.py
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
from .IAT import IAT
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/hrnet/htc_hrnetv2p_w18_20e_coco.py
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
_base_ = './htc_hrnetv2p_w32_20e_coco.py'
|
2 |
-
model = dict(
|
3 |
-
pretrained='open-mmlab://msra/hrnetv2_w18',
|
4 |
-
backbone=dict(
|
5 |
-
extra=dict(
|
6 |
-
stage2=dict(num_channels=(18, 36)),
|
7 |
-
stage3=dict(num_channels=(18, 36, 72)),
|
8 |
-
stage4=dict(num_channels=(18, 36, 72, 144)))),
|
9 |
-
neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/pisa/pisa_mask_rcnn_r50_fpn_1x_coco.py
DELETED
@@ -1,30 +0,0 @@
|
|
1 |
-
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
|
2 |
-
|
3 |
-
model = dict(
|
4 |
-
roi_head=dict(
|
5 |
-
type='PISARoIHead',
|
6 |
-
bbox_head=dict(
|
7 |
-
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))),
|
8 |
-
train_cfg=dict(
|
9 |
-
rpn_proposal=dict(
|
10 |
-
nms_pre=2000,
|
11 |
-
max_per_img=2000,
|
12 |
-
nms=dict(type='nms', iou_threshold=0.7),
|
13 |
-
min_bbox_size=0),
|
14 |
-
rcnn=dict(
|
15 |
-
sampler=dict(
|
16 |
-
type='ScoreHLRSampler',
|
17 |
-
num=512,
|
18 |
-
pos_fraction=0.25,
|
19 |
-
neg_pos_ub=-1,
|
20 |
-
add_gt_as_proposals=True,
|
21 |
-
k=0.5,
|
22 |
-
bias=0.),
|
23 |
-
isr=dict(k=2, bias=0),
|
24 |
-
carl=dict(k=1, bias=0.2))),
|
25 |
-
test_cfg=dict(
|
26 |
-
rpn=dict(
|
27 |
-
nms_pre=2000,
|
28 |
-
max_per_img=2000,
|
29 |
-
nms=dict(type='nms', iou_threshold=0.7),
|
30 |
-
min_bbox_size=0)))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/api-examples/api-example-model.py
DELETED
@@ -1,176 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python3
|
2 |
-
|
3 |
-
import requests
|
4 |
-
|
5 |
-
HOST = '0.0.0.0:5000'
|
6 |
-
|
7 |
-
|
8 |
-
def generate(prompt, tokens=200):
|
9 |
-
request = {'prompt': prompt, 'max_new_tokens': tokens}
|
10 |
-
response = requests.post(f'http://{HOST}/api/v1/generate', json=request)
|
11 |
-
|
12 |
-
if response.status_code == 200:
|
13 |
-
return response.json()['results'][0]['text']
|
14 |
-
|
15 |
-
|
16 |
-
def model_api(request):
|
17 |
-
response = requests.post(f'http://{HOST}/api/v1/model', json=request)
|
18 |
-
return response.json()
|
19 |
-
|
20 |
-
|
21 |
-
# print some common settings
|
22 |
-
def print_basic_model_info(response):
|
23 |
-
basic_settings = ['truncation_length', 'instruction_template']
|
24 |
-
print("Model: ", response['result']['model_name'])
|
25 |
-
print("Lora(s): ", response['result']['lora_names'])
|
26 |
-
for setting in basic_settings:
|
27 |
-
print(setting, "=", response['result']['shared.settings'][setting])
|
28 |
-
|
29 |
-
|
30 |
-
# model info
|
31 |
-
def model_info():
|
32 |
-
response = model_api({'action': 'info'})
|
33 |
-
print_basic_model_info(response)
|
34 |
-
|
35 |
-
|
36 |
-
# simple loader
|
37 |
-
def model_load(model_name):
|
38 |
-
return model_api({'action': 'load', 'model_name': model_name})
|
39 |
-
|
40 |
-
|
41 |
-
# complex loader
|
42 |
-
def complex_model_load(model):
|
43 |
-
|
44 |
-
def guess_groupsize(model_name):
|
45 |
-
if '1024g' in model_name:
|
46 |
-
return 1024
|
47 |
-
elif '128g' in model_name:
|
48 |
-
return 128
|
49 |
-
elif '32g' in model_name:
|
50 |
-
return 32
|
51 |
-
else:
|
52 |
-
return -1
|
53 |
-
|
54 |
-
req = {
|
55 |
-
'action': 'load',
|
56 |
-
'model_name': model,
|
57 |
-
'args': {
|
58 |
-
'loader': 'AutoGPTQ',
|
59 |
-
|
60 |
-
'bf16': False,
|
61 |
-
'load_in_8bit': False,
|
62 |
-
'groupsize': 0,
|
63 |
-
'wbits': 0,
|
64 |
-
|
65 |
-
# llama.cpp
|
66 |
-
'threads': 0,
|
67 |
-
'n_batch': 512,
|
68 |
-
'no_mmap': False,
|
69 |
-
'mlock': False,
|
70 |
-
'cache_capacity': None,
|
71 |
-
'n_gpu_layers': 0,
|
72 |
-
'n_ctx': 2048,
|
73 |
-
|
74 |
-
# RWKV
|
75 |
-
'rwkv_strategy': None,
|
76 |
-
'rwkv_cuda_on': False,
|
77 |
-
|
78 |
-
# b&b 4-bit
|
79 |
-
# 'load_in_4bit': False,
|
80 |
-
# 'compute_dtype': 'float16',
|
81 |
-
# 'quant_type': 'nf4',
|
82 |
-
# 'use_double_quant': False,
|
83 |
-
|
84 |
-
# "cpu": false,
|
85 |
-
# "auto_devices": false,
|
86 |
-
# "gpu_memory": null,
|
87 |
-
# "cpu_memory": null,
|
88 |
-
# "disk": false,
|
89 |
-
# "disk_cache_dir": "cache",
|
90 |
-
},
|
91 |
-
}
|
92 |
-
|
93 |
-
model = model.lower()
|
94 |
-
|
95 |
-
if '4bit' in model or 'gptq' in model or 'int4' in model:
|
96 |
-
req['args']['wbits'] = 4
|
97 |
-
req['args']['groupsize'] = guess_groupsize(model)
|
98 |
-
elif '3bit' in model:
|
99 |
-
req['args']['wbits'] = 3
|
100 |
-
req['args']['groupsize'] = guess_groupsize(model)
|
101 |
-
else:
|
102 |
-
req['args']['gptq_for_llama'] = False
|
103 |
-
|
104 |
-
if '8bit' in model:
|
105 |
-
req['args']['load_in_8bit'] = True
|
106 |
-
elif '-hf' in model or 'fp16' in model:
|
107 |
-
if '7b' in model:
|
108 |
-
req['args']['bf16'] = True # for 24GB
|
109 |
-
elif '13b' in model:
|
110 |
-
req['args']['load_in_8bit'] = True # for 24GB
|
111 |
-
elif 'gguf' in model:
|
112 |
-
# req['args']['threads'] = 16
|
113 |
-
if '7b' in model:
|
114 |
-
req['args']['n_gpu_layers'] = 100
|
115 |
-
elif '13b' in model:
|
116 |
-
req['args']['n_gpu_layers'] = 100
|
117 |
-
elif '30b' in model or '33b' in model:
|
118 |
-
req['args']['n_gpu_layers'] = 59 # 24GB
|
119 |
-
elif '65b' in model:
|
120 |
-
req['args']['n_gpu_layers'] = 42 # 24GB
|
121 |
-
elif 'rwkv' in model:
|
122 |
-
req['args']['rwkv_cuda_on'] = True
|
123 |
-
if '14b' in model:
|
124 |
-
req['args']['rwkv_strategy'] = 'cuda f16i8' # 24GB
|
125 |
-
else:
|
126 |
-
req['args']['rwkv_strategy'] = 'cuda f16' # 24GB
|
127 |
-
|
128 |
-
return model_api(req)
|
129 |
-
|
130 |
-
|
131 |
-
if __name__ == '__main__':
|
132 |
-
for model in model_api({'action': 'list'})['result']:
|
133 |
-
try:
|
134 |
-
resp = complex_model_load(model)
|
135 |
-
|
136 |
-
if 'error' in resp:
|
137 |
-
print(f"❌ {model} FAIL Error: {resp['error']['message']}")
|
138 |
-
continue
|
139 |
-
else:
|
140 |
-
print_basic_model_info(resp)
|
141 |
-
|
142 |
-
ans = generate("0,1,1,2,3,5,8,13,", tokens=2)
|
143 |
-
|
144 |
-
if '21' in ans:
|
145 |
-
print(f"✅ {model} PASS ({ans})")
|
146 |
-
else:
|
147 |
-
print(f"❌ {model} FAIL ({ans})")
|
148 |
-
|
149 |
-
except Exception as e:
|
150 |
-
print(f"❌ {model} FAIL Exception: {repr(e)}")
|
151 |
-
|
152 |
-
|
153 |
-
# 0,1,1,2,3,5,8,13, is the fibonacci sequence, the next number is 21.
|
154 |
-
# Some results below.
|
155 |
-
""" $ ./model-api-example.py
|
156 |
-
Model: 4bit_gpt4-x-alpaca-13b-native-4bit-128g-cuda
|
157 |
-
Lora(s): []
|
158 |
-
truncation_length = 2048
|
159 |
-
instruction_template = Alpaca
|
160 |
-
✅ 4bit_gpt4-x-alpaca-13b-native-4bit-128g-cuda PASS (21)
|
161 |
-
Model: 4bit_WizardLM-13B-Uncensored-4bit-128g
|
162 |
-
Lora(s): []
|
163 |
-
truncation_length = 2048
|
164 |
-
instruction_template = WizardLM
|
165 |
-
✅ 4bit_WizardLM-13B-Uncensored-4bit-128g PASS (21)
|
166 |
-
Model: Aeala_VicUnlocked-alpaca-30b-4bit
|
167 |
-
Lora(s): []
|
168 |
-
truncation_length = 2048
|
169 |
-
instruction_template = Alpaca
|
170 |
-
✅ Aeala_VicUnlocked-alpaca-30b-4bit PASS (21)
|
171 |
-
Model: alpaca-30b-4bit
|
172 |
-
Lora(s): []
|
173 |
-
truncation_length = 2048
|
174 |
-
instruction_template = Alpaca
|
175 |
-
✅ alpaca-30b-4bit PASS (21)
|
176 |
-
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anni123/AuRoRA/demo_utils.py
DELETED
@@ -1,35 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import json
|
3 |
-
|
4 |
-
def self_construction(datatype):
|
5 |
-
demo_dir = "./demo_pool/{datatype}_demo".format(datatype=datatype)
|
6 |
-
|
7 |
-
data_dir = "./data_pool/{datatype}".format(datatype=datatype)
|
8 |
-
if os.path.exists(demo_dir):
|
9 |
-
print(demo_dir)
|
10 |
-
if os.path.exists(data_dir):
|
11 |
-
with open(data_dir, 'r') as f:
|
12 |
-
for line in f.readlines
|
13 |
-
|
14 |
-
self_construction('strategyqa')
|
15 |
-
|
16 |
-
single_data = {
|
17 |
-
'question': "asfawreg",
|
18 |
-
'datatype': "dfawds",
|
19 |
-
'base_ans': "",
|
20 |
-
'base_cots': "",
|
21 |
-
'adapter_ans': "",
|
22 |
-
'revised_cots': "",
|
23 |
-
'retrieved_knowledge': "",
|
24 |
-
'feedback': ""
|
25 |
-
}
|
26 |
-
|
27 |
-
data_dir = "./data_pool/{datatype}".format(datatype="test")
|
28 |
-
#with open(data_dir, 'a') as f:
|
29 |
-
# data_json = json.dumps(single_data)
|
30 |
-
# f.write(data_json + "\n")
|
31 |
-
|
32 |
-
with open(data_dir, 'r') as f:
|
33 |
-
for line in f.readlines():
|
34 |
-
data_dict = json.loads(line)
|
35 |
-
print(type(data_dict))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ArdaSaygan/PollGeneratorApp/utils.py
DELETED
@@ -1,57 +0,0 @@
|
|
1 |
-
|
2 |
-
import openai
|
3 |
-
openai.api_key = "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"
|
4 |
-
|
5 |
-
class GPTCompletion:
|
6 |
-
def __init__(
|
7 |
-
self,
|
8 |
-
system="You are a helpful AI assistant",
|
9 |
-
model="gpt-3.5-turbo",
|
10 |
-
temperature=1.0,
|
11 |
-
top_p=1.0,
|
12 |
-
n=1,
|
13 |
-
stream=False,
|
14 |
-
stop=None,
|
15 |
-
max_tokens=256,
|
16 |
-
presence_penalty=0.0,
|
17 |
-
frequency_penalty=0.0,
|
18 |
-
logit_bias={}
|
19 |
-
):
|
20 |
-
self.system = system
|
21 |
-
self.model = model
|
22 |
-
self.messages = [{"role": "system", "content": f"{self.system}"}]
|
23 |
-
self.temperature = temperature
|
24 |
-
self.top_p = top_p
|
25 |
-
self.n = n
|
26 |
-
self.stream = stream
|
27 |
-
self.stop = stop
|
28 |
-
self.max_tokens = max_tokens
|
29 |
-
self.presence_penalty = presence_penalty
|
30 |
-
self.frequency_penalty = frequency_penalty
|
31 |
-
self.logit_bias = logit_bias
|
32 |
-
|
33 |
-
|
34 |
-
def chatComplete(self, chatHistory, newMessage,firstMessage=""):
|
35 |
-
|
36 |
-
self.messages.append({"role": "user", "content": f"{firstMessage}"})
|
37 |
-
for i in range(len(chatHistory)):
|
38 |
-
self.messages.append({"role": "user", "content": f"{chatHistory[i][0]}"})
|
39 |
-
self.messages.append({"role": "assistant", "content": f"{chatHistory[i][1]}"})
|
40 |
-
|
41 |
-
self.messages.append({"role": "user", "content": f"{newMessage}"})
|
42 |
-
|
43 |
-
response = openai.ChatCompletion.create(
|
44 |
-
model=self.model,
|
45 |
-
messages=self.messages,
|
46 |
-
temperature=self.temperature,
|
47 |
-
top_p=self.top_p,
|
48 |
-
n=self.n,
|
49 |
-
stream=self.stream,
|
50 |
-
stop=self.stop,
|
51 |
-
max_tokens=self.max_tokens,
|
52 |
-
presence_penalty=self.presence_penalty,
|
53 |
-
frequency_penalty=self.frequency_penalty,
|
54 |
-
logit_bias=self.logit_bias
|
55 |
-
)
|
56 |
-
|
57 |
-
return response["choices"][0].message["content"].strip()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AriusXi/CodeGenerator/app.py
DELETED
@@ -1,17 +0,0 @@
|
|
1 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM
|
2 |
-
import gradio as grad
|
3 |
-
codegen_tkn = AutoTokenizer.from_pretrained("Salesforce/codegen-350M-mono")
|
4 |
-
mdl = AutoModelForCausalLM.from_pretrained("Salesforce/codegen-350M-mono")
|
5 |
-
|
6 |
-
def codegen(intent):
|
7 |
-
# give input as text which reflects intent of the program.
|
8 |
-
text = " write a function which takes 2 numbers as input and returns the larger of the two"
|
9 |
-
input_ids = codegen_tkn(intent, return_tensors="pt").input_ids
|
10 |
-
|
11 |
-
gen_ids = mdl.generate(input_ids, max_length=128)
|
12 |
-
response = codegen_tkn.decode(gen_ids[0], skip_special_tokens=True)
|
13 |
-
return response
|
14 |
-
|
15 |
-
output=grad.Textbox(lines=1, label="Generated Python Code", placeholder="")
|
16 |
-
inp=grad.Textbox(lines=1, label="Place your intent here")
|
17 |
-
grad.Interface(codegen, inputs=inp, outputs=output).launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Arnx/MusicGenXvAKN/CHANGELOG.md
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
# Changelog
|
2 |
-
|
3 |
-
All notable changes to this project will be documented in this file.
|
4 |
-
|
5 |
-
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
|
6 |
-
|
7 |
-
## [0.0.2a] - TBD
|
8 |
-
|
9 |
-
Improved demo, fixed top p (thanks @jnordberg).
|
10 |
-
|
11 |
-
Compressor tanh on output to avoid clipping with some style (especially piano).
|
12 |
-
Now repeating the conditioning periodically if it is too short.
|
13 |
-
|
14 |
-
More options when launching Gradio app locally (thanks @ashleykleynhans).
|
15 |
-
|
16 |
-
Testing out PyTorch 2.0 memory efficient attention.
|
17 |
-
|
18 |
-
Added extended generation (infinite length) by slowly moving the windows.
|
19 |
-
Note that other implementations exist: https://github.com/camenduru/MusicGen-colab.
|
20 |
-
|
21 |
-
## [0.0.1] - 2023-06-09
|
22 |
-
|
23 |
-
Initial release, with model evaluation only.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/filters/__init__.py
DELETED
@@ -1,940 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
pygments.filters
|
3 |
-
~~~~~~~~~~~~~~~~
|
4 |
-
|
5 |
-
Module containing filter lookup functions and default
|
6 |
-
filters.
|
7 |
-
|
8 |
-
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
|
9 |
-
:license: BSD, see LICENSE for details.
|
10 |
-
"""
|
11 |
-
|
12 |
-
import re
|
13 |
-
|
14 |
-
from pip._vendor.pygments.token import String, Comment, Keyword, Name, Error, Whitespace, \
|
15 |
-
string_to_tokentype
|
16 |
-
from pip._vendor.pygments.filter import Filter
|
17 |
-
from pip._vendor.pygments.util import get_list_opt, get_int_opt, get_bool_opt, \
|
18 |
-
get_choice_opt, ClassNotFound, OptionError
|
19 |
-
from pip._vendor.pygments.plugin import find_plugin_filters
|
20 |
-
|
21 |
-
|
22 |
-
def find_filter_class(filtername):
|
23 |
-
"""Lookup a filter by name. Return None if not found."""
|
24 |
-
if filtername in FILTERS:
|
25 |
-
return FILTERS[filtername]
|
26 |
-
for name, cls in find_plugin_filters():
|
27 |
-
if name == filtername:
|
28 |
-
return cls
|
29 |
-
return None
|
30 |
-
|
31 |
-
|
32 |
-
def get_filter_by_name(filtername, **options):
|
33 |
-
"""Return an instantiated filter.
|
34 |
-
|
35 |
-
Options are passed to the filter initializer if wanted.
|
36 |
-
Raise a ClassNotFound if not found.
|
37 |
-
"""
|
38 |
-
cls = find_filter_class(filtername)
|
39 |
-
if cls:
|
40 |
-
return cls(**options)
|
41 |
-
else:
|
42 |
-
raise ClassNotFound('filter %r not found' % filtername)
|
43 |
-
|
44 |
-
|
45 |
-
def get_all_filters():
|
46 |
-
"""Return a generator of all filter names."""
|
47 |
-
yield from FILTERS
|
48 |
-
for name, _ in find_plugin_filters():
|
49 |
-
yield name
|
50 |
-
|
51 |
-
|
52 |
-
def _replace_special(ttype, value, regex, specialttype,
|
53 |
-
replacefunc=lambda x: x):
|
54 |
-
last = 0
|
55 |
-
for match in regex.finditer(value):
|
56 |
-
start, end = match.start(), match.end()
|
57 |
-
if start != last:
|
58 |
-
yield ttype, value[last:start]
|
59 |
-
yield specialttype, replacefunc(value[start:end])
|
60 |
-
last = end
|
61 |
-
if last != len(value):
|
62 |
-
yield ttype, value[last:]
|
63 |
-
|
64 |
-
|
65 |
-
class CodeTagFilter(Filter):
|
66 |
-
"""Highlight special code tags in comments and docstrings.
|
67 |
-
|
68 |
-
Options accepted:
|
69 |
-
|
70 |
-
`codetags` : list of strings
|
71 |
-
A list of strings that are flagged as code tags. The default is to
|
72 |
-
highlight ``XXX``, ``TODO``, ``FIXME``, ``BUG`` and ``NOTE``.
|
73 |
-
|
74 |
-
.. versionchanged:: 2.13
|
75 |
-
Now recognizes ``FIXME`` by default.
|
76 |
-
"""
|
77 |
-
|
78 |
-
def __init__(self, **options):
|
79 |
-
Filter.__init__(self, **options)
|
80 |
-
tags = get_list_opt(options, 'codetags',
|
81 |
-
['XXX', 'TODO', 'FIXME', 'BUG', 'NOTE'])
|
82 |
-
self.tag_re = re.compile(r'\b(%s)\b' % '|'.join([
|
83 |
-
re.escape(tag) for tag in tags if tag
|
84 |
-
]))
|
85 |
-
|
86 |
-
def filter(self, lexer, stream):
|
87 |
-
regex = self.tag_re
|
88 |
-
for ttype, value in stream:
|
89 |
-
if ttype in String.Doc or \
|
90 |
-
ttype in Comment and \
|
91 |
-
ttype not in Comment.Preproc:
|
92 |
-
yield from _replace_special(ttype, value, regex, Comment.Special)
|
93 |
-
else:
|
94 |
-
yield ttype, value
|
95 |
-
|
96 |
-
|
97 |
-
class SymbolFilter(Filter):
|
98 |
-
"""Convert mathematical symbols such as \\<longrightarrow> in Isabelle
|
99 |
-
or \\longrightarrow in LaTeX into Unicode characters.
|
100 |
-
|
101 |
-
This is mostly useful for HTML or console output when you want to
|
102 |
-
approximate the source rendering you'd see in an IDE.
|
103 |
-
|
104 |
-
Options accepted:
|
105 |
-
|
106 |
-
`lang` : string
|
107 |
-
The symbol language. Must be one of ``'isabelle'`` or
|
108 |
-
``'latex'``. The default is ``'isabelle'``.
|
109 |
-
"""
|
110 |
-
|
111 |
-
latex_symbols = {
|
112 |
-
'\\alpha' : '\U000003b1',
|
113 |
-
'\\beta' : '\U000003b2',
|
114 |
-
'\\gamma' : '\U000003b3',
|
115 |
-
'\\delta' : '\U000003b4',
|
116 |
-
'\\varepsilon' : '\U000003b5',
|
117 |
-
'\\zeta' : '\U000003b6',
|
118 |
-
'\\eta' : '\U000003b7',
|
119 |
-
'\\vartheta' : '\U000003b8',
|
120 |
-
'\\iota' : '\U000003b9',
|
121 |
-
'\\kappa' : '\U000003ba',
|
122 |
-
'\\lambda' : '\U000003bb',
|
123 |
-
'\\mu' : '\U000003bc',
|
124 |
-
'\\nu' : '\U000003bd',
|
125 |
-
'\\xi' : '\U000003be',
|
126 |
-
'\\pi' : '\U000003c0',
|
127 |
-
'\\varrho' : '\U000003c1',
|
128 |
-
'\\sigma' : '\U000003c3',
|
129 |
-
'\\tau' : '\U000003c4',
|
130 |
-
'\\upsilon' : '\U000003c5',
|
131 |
-
'\\varphi' : '\U000003c6',
|
132 |
-
'\\chi' : '\U000003c7',
|
133 |
-
'\\psi' : '\U000003c8',
|
134 |
-
'\\omega' : '\U000003c9',
|
135 |
-
'\\Gamma' : '\U00000393',
|
136 |
-
'\\Delta' : '\U00000394',
|
137 |
-
'\\Theta' : '\U00000398',
|
138 |
-
'\\Lambda' : '\U0000039b',
|
139 |
-
'\\Xi' : '\U0000039e',
|
140 |
-
'\\Pi' : '\U000003a0',
|
141 |
-
'\\Sigma' : '\U000003a3',
|
142 |
-
'\\Upsilon' : '\U000003a5',
|
143 |
-
'\\Phi' : '\U000003a6',
|
144 |
-
'\\Psi' : '\U000003a8',
|
145 |
-
'\\Omega' : '\U000003a9',
|
146 |
-
'\\leftarrow' : '\U00002190',
|
147 |
-
'\\longleftarrow' : '\U000027f5',
|
148 |
-
'\\rightarrow' : '\U00002192',
|
149 |
-
'\\longrightarrow' : '\U000027f6',
|
150 |
-
'\\Leftarrow' : '\U000021d0',
|
151 |
-
'\\Longleftarrow' : '\U000027f8',
|
152 |
-
'\\Rightarrow' : '\U000021d2',
|
153 |
-
'\\Longrightarrow' : '\U000027f9',
|
154 |
-
'\\leftrightarrow' : '\U00002194',
|
155 |
-
'\\longleftrightarrow' : '\U000027f7',
|
156 |
-
'\\Leftrightarrow' : '\U000021d4',
|
157 |
-
'\\Longleftrightarrow' : '\U000027fa',
|
158 |
-
'\\mapsto' : '\U000021a6',
|
159 |
-
'\\longmapsto' : '\U000027fc',
|
160 |
-
'\\relbar' : '\U00002500',
|
161 |
-
'\\Relbar' : '\U00002550',
|
162 |
-
'\\hookleftarrow' : '\U000021a9',
|
163 |
-
'\\hookrightarrow' : '\U000021aa',
|
164 |
-
'\\leftharpoondown' : '\U000021bd',
|
165 |
-
'\\rightharpoondown' : '\U000021c1',
|
166 |
-
'\\leftharpoonup' : '\U000021bc',
|
167 |
-
'\\rightharpoonup' : '\U000021c0',
|
168 |
-
'\\rightleftharpoons' : '\U000021cc',
|
169 |
-
'\\leadsto' : '\U0000219d',
|
170 |
-
'\\downharpoonleft' : '\U000021c3',
|
171 |
-
'\\downharpoonright' : '\U000021c2',
|
172 |
-
'\\upharpoonleft' : '\U000021bf',
|
173 |
-
'\\upharpoonright' : '\U000021be',
|
174 |
-
'\\restriction' : '\U000021be',
|
175 |
-
'\\uparrow' : '\U00002191',
|
176 |
-
'\\Uparrow' : '\U000021d1',
|
177 |
-
'\\downarrow' : '\U00002193',
|
178 |
-
'\\Downarrow' : '\U000021d3',
|
179 |
-
'\\updownarrow' : '\U00002195',
|
180 |
-
'\\Updownarrow' : '\U000021d5',
|
181 |
-
'\\langle' : '\U000027e8',
|
182 |
-
'\\rangle' : '\U000027e9',
|
183 |
-
'\\lceil' : '\U00002308',
|
184 |
-
'\\rceil' : '\U00002309',
|
185 |
-
'\\lfloor' : '\U0000230a',
|
186 |
-
'\\rfloor' : '\U0000230b',
|
187 |
-
'\\flqq' : '\U000000ab',
|
188 |
-
'\\frqq' : '\U000000bb',
|
189 |
-
'\\bot' : '\U000022a5',
|
190 |
-
'\\top' : '\U000022a4',
|
191 |
-
'\\wedge' : '\U00002227',
|
192 |
-
'\\bigwedge' : '\U000022c0',
|
193 |
-
'\\vee' : '\U00002228',
|
194 |
-
'\\bigvee' : '\U000022c1',
|
195 |
-
'\\forall' : '\U00002200',
|
196 |
-
'\\exists' : '\U00002203',
|
197 |
-
'\\nexists' : '\U00002204',
|
198 |
-
'\\neg' : '\U000000ac',
|
199 |
-
'\\Box' : '\U000025a1',
|
200 |
-
'\\Diamond' : '\U000025c7',
|
201 |
-
'\\vdash' : '\U000022a2',
|
202 |
-
'\\models' : '\U000022a8',
|
203 |
-
'\\dashv' : '\U000022a3',
|
204 |
-
'\\surd' : '\U0000221a',
|
205 |
-
'\\le' : '\U00002264',
|
206 |
-
'\\ge' : '\U00002265',
|
207 |
-
'\\ll' : '\U0000226a',
|
208 |
-
'\\gg' : '\U0000226b',
|
209 |
-
'\\lesssim' : '\U00002272',
|
210 |
-
'\\gtrsim' : '\U00002273',
|
211 |
-
'\\lessapprox' : '\U00002a85',
|
212 |
-
'\\gtrapprox' : '\U00002a86',
|
213 |
-
'\\in' : '\U00002208',
|
214 |
-
'\\notin' : '\U00002209',
|
215 |
-
'\\subset' : '\U00002282',
|
216 |
-
'\\supset' : '\U00002283',
|
217 |
-
'\\subseteq' : '\U00002286',
|
218 |
-
'\\supseteq' : '\U00002287',
|
219 |
-
'\\sqsubset' : '\U0000228f',
|
220 |
-
'\\sqsupset' : '\U00002290',
|
221 |
-
'\\sqsubseteq' : '\U00002291',
|
222 |
-
'\\sqsupseteq' : '\U00002292',
|
223 |
-
'\\cap' : '\U00002229',
|
224 |
-
'\\bigcap' : '\U000022c2',
|
225 |
-
'\\cup' : '\U0000222a',
|
226 |
-
'\\bigcup' : '\U000022c3',
|
227 |
-
'\\sqcup' : '\U00002294',
|
228 |
-
'\\bigsqcup' : '\U00002a06',
|
229 |
-
'\\sqcap' : '\U00002293',
|
230 |
-
'\\Bigsqcap' : '\U00002a05',
|
231 |
-
'\\setminus' : '\U00002216',
|
232 |
-
'\\propto' : '\U0000221d',
|
233 |
-
'\\uplus' : '\U0000228e',
|
234 |
-
'\\bigplus' : '\U00002a04',
|
235 |
-
'\\sim' : '\U0000223c',
|
236 |
-
'\\doteq' : '\U00002250',
|
237 |
-
'\\simeq' : '\U00002243',
|
238 |
-
'\\approx' : '\U00002248',
|
239 |
-
'\\asymp' : '\U0000224d',
|
240 |
-
'\\cong' : '\U00002245',
|
241 |
-
'\\equiv' : '\U00002261',
|
242 |
-
'\\Join' : '\U000022c8',
|
243 |
-
'\\bowtie' : '\U00002a1d',
|
244 |
-
'\\prec' : '\U0000227a',
|
245 |
-
'\\succ' : '\U0000227b',
|
246 |
-
'\\preceq' : '\U0000227c',
|
247 |
-
'\\succeq' : '\U0000227d',
|
248 |
-
'\\parallel' : '\U00002225',
|
249 |
-
'\\mid' : '\U000000a6',
|
250 |
-
'\\pm' : '\U000000b1',
|
251 |
-
'\\mp' : '\U00002213',
|
252 |
-
'\\times' : '\U000000d7',
|
253 |
-
'\\div' : '\U000000f7',
|
254 |
-
'\\cdot' : '\U000022c5',
|
255 |
-
'\\star' : '\U000022c6',
|
256 |
-
'\\circ' : '\U00002218',
|
257 |
-
'\\dagger' : '\U00002020',
|
258 |
-
'\\ddagger' : '\U00002021',
|
259 |
-
'\\lhd' : '\U000022b2',
|
260 |
-
'\\rhd' : '\U000022b3',
|
261 |
-
'\\unlhd' : '\U000022b4',
|
262 |
-
'\\unrhd' : '\U000022b5',
|
263 |
-
'\\triangleleft' : '\U000025c3',
|
264 |
-
'\\triangleright' : '\U000025b9',
|
265 |
-
'\\triangle' : '\U000025b3',
|
266 |
-
'\\triangleq' : '\U0000225c',
|
267 |
-
'\\oplus' : '\U00002295',
|
268 |
-
'\\bigoplus' : '\U00002a01',
|
269 |
-
'\\otimes' : '\U00002297',
|
270 |
-
'\\bigotimes' : '\U00002a02',
|
271 |
-
'\\odot' : '\U00002299',
|
272 |
-
'\\bigodot' : '\U00002a00',
|
273 |
-
'\\ominus' : '\U00002296',
|
274 |
-
'\\oslash' : '\U00002298',
|
275 |
-
'\\dots' : '\U00002026',
|
276 |
-
'\\cdots' : '\U000022ef',
|
277 |
-
'\\sum' : '\U00002211',
|
278 |
-
'\\prod' : '\U0000220f',
|
279 |
-
'\\coprod' : '\U00002210',
|
280 |
-
'\\infty' : '\U0000221e',
|
281 |
-
'\\int' : '\U0000222b',
|
282 |
-
'\\oint' : '\U0000222e',
|
283 |
-
'\\clubsuit' : '\U00002663',
|
284 |
-
'\\diamondsuit' : '\U00002662',
|
285 |
-
'\\heartsuit' : '\U00002661',
|
286 |
-
'\\spadesuit' : '\U00002660',
|
287 |
-
'\\aleph' : '\U00002135',
|
288 |
-
'\\emptyset' : '\U00002205',
|
289 |
-
'\\nabla' : '\U00002207',
|
290 |
-
'\\partial' : '\U00002202',
|
291 |
-
'\\flat' : '\U0000266d',
|
292 |
-
'\\natural' : '\U0000266e',
|
293 |
-
'\\sharp' : '\U0000266f',
|
294 |
-
'\\angle' : '\U00002220',
|
295 |
-
'\\copyright' : '\U000000a9',
|
296 |
-
'\\textregistered' : '\U000000ae',
|
297 |
-
'\\textonequarter' : '\U000000bc',
|
298 |
-
'\\textonehalf' : '\U000000bd',
|
299 |
-
'\\textthreequarters' : '\U000000be',
|
300 |
-
'\\textordfeminine' : '\U000000aa',
|
301 |
-
'\\textordmasculine' : '\U000000ba',
|
302 |
-
'\\euro' : '\U000020ac',
|
303 |
-
'\\pounds' : '\U000000a3',
|
304 |
-
'\\yen' : '\U000000a5',
|
305 |
-
'\\textcent' : '\U000000a2',
|
306 |
-
'\\textcurrency' : '\U000000a4',
|
307 |
-
'\\textdegree' : '\U000000b0',
|
308 |
-
}
|
309 |
-
|
310 |
-
isabelle_symbols = {
|
311 |
-
'\\<zero>' : '\U0001d7ec',
|
312 |
-
'\\<one>' : '\U0001d7ed',
|
313 |
-
'\\<two>' : '\U0001d7ee',
|
314 |
-
'\\<three>' : '\U0001d7ef',
|
315 |
-
'\\<four>' : '\U0001d7f0',
|
316 |
-
'\\<five>' : '\U0001d7f1',
|
317 |
-
'\\<six>' : '\U0001d7f2',
|
318 |
-
'\\<seven>' : '\U0001d7f3',
|
319 |
-
'\\<eight>' : '\U0001d7f4',
|
320 |
-
'\\<nine>' : '\U0001d7f5',
|
321 |
-
'\\<A>' : '\U0001d49c',
|
322 |
-
'\\<B>' : '\U0000212c',
|
323 |
-
'\\<C>' : '\U0001d49e',
|
324 |
-
'\\<D>' : '\U0001d49f',
|
325 |
-
'\\<E>' : '\U00002130',
|
326 |
-
'\\<F>' : '\U00002131',
|
327 |
-
'\\<G>' : '\U0001d4a2',
|
328 |
-
'\\<H>' : '\U0000210b',
|
329 |
-
'\\<I>' : '\U00002110',
|
330 |
-
'\\<J>' : '\U0001d4a5',
|
331 |
-
'\\<K>' : '\U0001d4a6',
|
332 |
-
'\\<L>' : '\U00002112',
|
333 |
-
'\\<M>' : '\U00002133',
|
334 |
-
'\\<N>' : '\U0001d4a9',
|
335 |
-
'\\<O>' : '\U0001d4aa',
|
336 |
-
'\\<P>' : '\U0001d4ab',
|
337 |
-
'\\<Q>' : '\U0001d4ac',
|
338 |
-
'\\<R>' : '\U0000211b',
|
339 |
-
'\\<S>' : '\U0001d4ae',
|
340 |
-
'\\<T>' : '\U0001d4af',
|
341 |
-
'\\<U>' : '\U0001d4b0',
|
342 |
-
'\\<V>' : '\U0001d4b1',
|
343 |
-
'\\<W>' : '\U0001d4b2',
|
344 |
-
'\\<X>' : '\U0001d4b3',
|
345 |
-
'\\<Y>' : '\U0001d4b4',
|
346 |
-
'\\<Z>' : '\U0001d4b5',
|
347 |
-
'\\<a>' : '\U0001d5ba',
|
348 |
-
'\\<b>' : '\U0001d5bb',
|
349 |
-
'\\<c>' : '\U0001d5bc',
|
350 |
-
'\\<d>' : '\U0001d5bd',
|
351 |
-
'\\<e>' : '\U0001d5be',
|
352 |
-
'\\<f>' : '\U0001d5bf',
|
353 |
-
'\\<g>' : '\U0001d5c0',
|
354 |
-
'\\<h>' : '\U0001d5c1',
|
355 |
-
'\\<i>' : '\U0001d5c2',
|
356 |
-
'\\<j>' : '\U0001d5c3',
|
357 |
-
'\\<k>' : '\U0001d5c4',
|
358 |
-
'\\<l>' : '\U0001d5c5',
|
359 |
-
'\\<m>' : '\U0001d5c6',
|
360 |
-
'\\<n>' : '\U0001d5c7',
|
361 |
-
'\\<o>' : '\U0001d5c8',
|
362 |
-
'\\<p>' : '\U0001d5c9',
|
363 |
-
'\\<q>' : '\U0001d5ca',
|
364 |
-
'\\<r>' : '\U0001d5cb',
|
365 |
-
'\\<s>' : '\U0001d5cc',
|
366 |
-
'\\<t>' : '\U0001d5cd',
|
367 |
-
'\\<u>' : '\U0001d5ce',
|
368 |
-
'\\<v>' : '\U0001d5cf',
|
369 |
-
'\\<w>' : '\U0001d5d0',
|
370 |
-
'\\<x>' : '\U0001d5d1',
|
371 |
-
'\\<y>' : '\U0001d5d2',
|
372 |
-
'\\<z>' : '\U0001d5d3',
|
373 |
-
'\\<AA>' : '\U0001d504',
|
374 |
-
'\\<BB>' : '\U0001d505',
|
375 |
-
'\\<CC>' : '\U0000212d',
|
376 |
-
'\\<DD>' : '\U0001d507',
|
377 |
-
'\\<EE>' : '\U0001d508',
|
378 |
-
'\\<FF>' : '\U0001d509',
|
379 |
-
'\\<GG>' : '\U0001d50a',
|
380 |
-
'\\<HH>' : '\U0000210c',
|
381 |
-
'\\<II>' : '\U00002111',
|
382 |
-
'\\<JJ>' : '\U0001d50d',
|
383 |
-
'\\<KK>' : '\U0001d50e',
|
384 |
-
'\\<LL>' : '\U0001d50f',
|
385 |
-
'\\<MM>' : '\U0001d510',
|
386 |
-
'\\<NN>' : '\U0001d511',
|
387 |
-
'\\<OO>' : '\U0001d512',
|
388 |
-
'\\<PP>' : '\U0001d513',
|
389 |
-
'\\<QQ>' : '\U0001d514',
|
390 |
-
'\\<RR>' : '\U0000211c',
|
391 |
-
'\\<SS>' : '\U0001d516',
|
392 |
-
'\\<TT>' : '\U0001d517',
|
393 |
-
'\\<UU>' : '\U0001d518',
|
394 |
-
'\\<VV>' : '\U0001d519',
|
395 |
-
'\\<WW>' : '\U0001d51a',
|
396 |
-
'\\<XX>' : '\U0001d51b',
|
397 |
-
'\\<YY>' : '\U0001d51c',
|
398 |
-
'\\<ZZ>' : '\U00002128',
|
399 |
-
'\\<aa>' : '\U0001d51e',
|
400 |
-
'\\<bb>' : '\U0001d51f',
|
401 |
-
'\\<cc>' : '\U0001d520',
|
402 |
-
'\\<dd>' : '\U0001d521',
|
403 |
-
'\\<ee>' : '\U0001d522',
|
404 |
-
'\\<ff>' : '\U0001d523',
|
405 |
-
'\\<gg>' : '\U0001d524',
|
406 |
-
'\\<hh>' : '\U0001d525',
|
407 |
-
'\\<ii>' : '\U0001d526',
|
408 |
-
'\\<jj>' : '\U0001d527',
|
409 |
-
'\\<kk>' : '\U0001d528',
|
410 |
-
'\\<ll>' : '\U0001d529',
|
411 |
-
'\\<mm>' : '\U0001d52a',
|
412 |
-
'\\<nn>' : '\U0001d52b',
|
413 |
-
'\\<oo>' : '\U0001d52c',
|
414 |
-
'\\<pp>' : '\U0001d52d',
|
415 |
-
'\\<qq>' : '\U0001d52e',
|
416 |
-
'\\<rr>' : '\U0001d52f',
|
417 |
-
'\\<ss>' : '\U0001d530',
|
418 |
-
'\\<tt>' : '\U0001d531',
|
419 |
-
'\\<uu>' : '\U0001d532',
|
420 |
-
'\\<vv>' : '\U0001d533',
|
421 |
-
'\\<ww>' : '\U0001d534',
|
422 |
-
'\\<xx>' : '\U0001d535',
|
423 |
-
'\\<yy>' : '\U0001d536',
|
424 |
-
'\\<zz>' : '\U0001d537',
|
425 |
-
'\\<alpha>' : '\U000003b1',
|
426 |
-
'\\<beta>' : '\U000003b2',
|
427 |
-
'\\<gamma>' : '\U000003b3',
|
428 |
-
'\\<delta>' : '\U000003b4',
|
429 |
-
'\\<epsilon>' : '\U000003b5',
|
430 |
-
'\\<zeta>' : '\U000003b6',
|
431 |
-
'\\<eta>' : '\U000003b7',
|
432 |
-
'\\<theta>' : '\U000003b8',
|
433 |
-
'\\<iota>' : '\U000003b9',
|
434 |
-
'\\<kappa>' : '\U000003ba',
|
435 |
-
'\\<lambda>' : '\U000003bb',
|
436 |
-
'\\<mu>' : '\U000003bc',
|
437 |
-
'\\<nu>' : '\U000003bd',
|
438 |
-
'\\<xi>' : '\U000003be',
|
439 |
-
'\\<pi>' : '\U000003c0',
|
440 |
-
'\\<rho>' : '\U000003c1',
|
441 |
-
'\\<sigma>' : '\U000003c3',
|
442 |
-
'\\<tau>' : '\U000003c4',
|
443 |
-
'\\<upsilon>' : '\U000003c5',
|
444 |
-
'\\<phi>' : '\U000003c6',
|
445 |
-
'\\<chi>' : '\U000003c7',
|
446 |
-
'\\<psi>' : '\U000003c8',
|
447 |
-
'\\<omega>' : '\U000003c9',
|
448 |
-
'\\<Gamma>' : '\U00000393',
|
449 |
-
'\\<Delta>' : '\U00000394',
|
450 |
-
'\\<Theta>' : '\U00000398',
|
451 |
-
'\\<Lambda>' : '\U0000039b',
|
452 |
-
'\\<Xi>' : '\U0000039e',
|
453 |
-
'\\<Pi>' : '\U000003a0',
|
454 |
-
'\\<Sigma>' : '\U000003a3',
|
455 |
-
'\\<Upsilon>' : '\U000003a5',
|
456 |
-
'\\<Phi>' : '\U000003a6',
|
457 |
-
'\\<Psi>' : '\U000003a8',
|
458 |
-
'\\<Omega>' : '\U000003a9',
|
459 |
-
'\\<bool>' : '\U0001d539',
|
460 |
-
'\\<complex>' : '\U00002102',
|
461 |
-
'\\<nat>' : '\U00002115',
|
462 |
-
'\\<rat>' : '\U0000211a',
|
463 |
-
'\\<real>' : '\U0000211d',
|
464 |
-
'\\<int>' : '\U00002124',
|
465 |
-
'\\<leftarrow>' : '\U00002190',
|
466 |
-
'\\<longleftarrow>' : '\U000027f5',
|
467 |
-
'\\<rightarrow>' : '\U00002192',
|
468 |
-
'\\<longrightarrow>' : '\U000027f6',
|
469 |
-
'\\<Leftarrow>' : '\U000021d0',
|
470 |
-
'\\<Longleftarrow>' : '\U000027f8',
|
471 |
-
'\\<Rightarrow>' : '\U000021d2',
|
472 |
-
'\\<Longrightarrow>' : '\U000027f9',
|
473 |
-
'\\<leftrightarrow>' : '\U00002194',
|
474 |
-
'\\<longleftrightarrow>' : '\U000027f7',
|
475 |
-
'\\<Leftrightarrow>' : '\U000021d4',
|
476 |
-
'\\<Longleftrightarrow>' : '\U000027fa',
|
477 |
-
'\\<mapsto>' : '\U000021a6',
|
478 |
-
'\\<longmapsto>' : '\U000027fc',
|
479 |
-
'\\<midarrow>' : '\U00002500',
|
480 |
-
'\\<Midarrow>' : '\U00002550',
|
481 |
-
'\\<hookleftarrow>' : '\U000021a9',
|
482 |
-
'\\<hookrightarrow>' : '\U000021aa',
|
483 |
-
'\\<leftharpoondown>' : '\U000021bd',
|
484 |
-
'\\<rightharpoondown>' : '\U000021c1',
|
485 |
-
'\\<leftharpoonup>' : '\U000021bc',
|
486 |
-
'\\<rightharpoonup>' : '\U000021c0',
|
487 |
-
'\\<rightleftharpoons>' : '\U000021cc',
|
488 |
-
'\\<leadsto>' : '\U0000219d',
|
489 |
-
'\\<downharpoonleft>' : '\U000021c3',
|
490 |
-
'\\<downharpoonright>' : '\U000021c2',
|
491 |
-
'\\<upharpoonleft>' : '\U000021bf',
|
492 |
-
'\\<upharpoonright>' : '\U000021be',
|
493 |
-
'\\<restriction>' : '\U000021be',
|
494 |
-
'\\<Colon>' : '\U00002237',
|
495 |
-
'\\<up>' : '\U00002191',
|
496 |
-
'\\<Up>' : '\U000021d1',
|
497 |
-
'\\<down>' : '\U00002193',
|
498 |
-
'\\<Down>' : '\U000021d3',
|
499 |
-
'\\<updown>' : '\U00002195',
|
500 |
-
'\\<Updown>' : '\U000021d5',
|
501 |
-
'\\<langle>' : '\U000027e8',
|
502 |
-
'\\<rangle>' : '\U000027e9',
|
503 |
-
'\\<lceil>' : '\U00002308',
|
504 |
-
'\\<rceil>' : '\U00002309',
|
505 |
-
'\\<lfloor>' : '\U0000230a',
|
506 |
-
'\\<rfloor>' : '\U0000230b',
|
507 |
-
'\\<lparr>' : '\U00002987',
|
508 |
-
'\\<rparr>' : '\U00002988',
|
509 |
-
'\\<lbrakk>' : '\U000027e6',
|
510 |
-
'\\<rbrakk>' : '\U000027e7',
|
511 |
-
'\\<lbrace>' : '\U00002983',
|
512 |
-
'\\<rbrace>' : '\U00002984',
|
513 |
-
'\\<guillemotleft>' : '\U000000ab',
|
514 |
-
'\\<guillemotright>' : '\U000000bb',
|
515 |
-
'\\<bottom>' : '\U000022a5',
|
516 |
-
'\\<top>' : '\U000022a4',
|
517 |
-
'\\<and>' : '\U00002227',
|
518 |
-
'\\<And>' : '\U000022c0',
|
519 |
-
'\\<or>' : '\U00002228',
|
520 |
-
'\\<Or>' : '\U000022c1',
|
521 |
-
'\\<forall>' : '\U00002200',
|
522 |
-
'\\<exists>' : '\U00002203',
|
523 |
-
'\\<nexists>' : '\U00002204',
|
524 |
-
'\\<not>' : '\U000000ac',
|
525 |
-
'\\<box>' : '\U000025a1',
|
526 |
-
'\\<diamond>' : '\U000025c7',
|
527 |
-
'\\<turnstile>' : '\U000022a2',
|
528 |
-
'\\<Turnstile>' : '\U000022a8',
|
529 |
-
'\\<tturnstile>' : '\U000022a9',
|
530 |
-
'\\<TTurnstile>' : '\U000022ab',
|
531 |
-
'\\<stileturn>' : '\U000022a3',
|
532 |
-
'\\<surd>' : '\U0000221a',
|
533 |
-
'\\<le>' : '\U00002264',
|
534 |
-
'\\<ge>' : '\U00002265',
|
535 |
-
'\\<lless>' : '\U0000226a',
|
536 |
-
'\\<ggreater>' : '\U0000226b',
|
537 |
-
'\\<lesssim>' : '\U00002272',
|
538 |
-
'\\<greatersim>' : '\U00002273',
|
539 |
-
'\\<lessapprox>' : '\U00002a85',
|
540 |
-
'\\<greaterapprox>' : '\U00002a86',
|
541 |
-
'\\<in>' : '\U00002208',
|
542 |
-
'\\<notin>' : '\U00002209',
|
543 |
-
'\\<subset>' : '\U00002282',
|
544 |
-
'\\<supset>' : '\U00002283',
|
545 |
-
'\\<subseteq>' : '\U00002286',
|
546 |
-
'\\<supseteq>' : '\U00002287',
|
547 |
-
'\\<sqsubset>' : '\U0000228f',
|
548 |
-
'\\<sqsupset>' : '\U00002290',
|
549 |
-
'\\<sqsubseteq>' : '\U00002291',
|
550 |
-
'\\<sqsupseteq>' : '\U00002292',
|
551 |
-
'\\<inter>' : '\U00002229',
|
552 |
-
'\\<Inter>' : '\U000022c2',
|
553 |
-
'\\<union>' : '\U0000222a',
|
554 |
-
'\\<Union>' : '\U000022c3',
|
555 |
-
'\\<squnion>' : '\U00002294',
|
556 |
-
'\\<Squnion>' : '\U00002a06',
|
557 |
-
'\\<sqinter>' : '\U00002293',
|
558 |
-
'\\<Sqinter>' : '\U00002a05',
|
559 |
-
'\\<setminus>' : '\U00002216',
|
560 |
-
'\\<propto>' : '\U0000221d',
|
561 |
-
'\\<uplus>' : '\U0000228e',
|
562 |
-
'\\<Uplus>' : '\U00002a04',
|
563 |
-
'\\<noteq>' : '\U00002260',
|
564 |
-
'\\<sim>' : '\U0000223c',
|
565 |
-
'\\<doteq>' : '\U00002250',
|
566 |
-
'\\<simeq>' : '\U00002243',
|
567 |
-
'\\<approx>' : '\U00002248',
|
568 |
-
'\\<asymp>' : '\U0000224d',
|
569 |
-
'\\<cong>' : '\U00002245',
|
570 |
-
'\\<smile>' : '\U00002323',
|
571 |
-
'\\<equiv>' : '\U00002261',
|
572 |
-
'\\<frown>' : '\U00002322',
|
573 |
-
'\\<Join>' : '\U000022c8',
|
574 |
-
'\\<bowtie>' : '\U00002a1d',
|
575 |
-
'\\<prec>' : '\U0000227a',
|
576 |
-
'\\<succ>' : '\U0000227b',
|
577 |
-
'\\<preceq>' : '\U0000227c',
|
578 |
-
'\\<succeq>' : '\U0000227d',
|
579 |
-
'\\<parallel>' : '\U00002225',
|
580 |
-
'\\<bar>' : '\U000000a6',
|
581 |
-
'\\<plusminus>' : '\U000000b1',
|
582 |
-
'\\<minusplus>' : '\U00002213',
|
583 |
-
'\\<times>' : '\U000000d7',
|
584 |
-
'\\<div>' : '\U000000f7',
|
585 |
-
'\\<cdot>' : '\U000022c5',
|
586 |
-
'\\<star>' : '\U000022c6',
|
587 |
-
'\\<bullet>' : '\U00002219',
|
588 |
-
'\\<circ>' : '\U00002218',
|
589 |
-
'\\<dagger>' : '\U00002020',
|
590 |
-
'\\<ddagger>' : '\U00002021',
|
591 |
-
'\\<lhd>' : '\U000022b2',
|
592 |
-
'\\<rhd>' : '\U000022b3',
|
593 |
-
'\\<unlhd>' : '\U000022b4',
|
594 |
-
'\\<unrhd>' : '\U000022b5',
|
595 |
-
'\\<triangleleft>' : '\U000025c3',
|
596 |
-
'\\<triangleright>' : '\U000025b9',
|
597 |
-
'\\<triangle>' : '\U000025b3',
|
598 |
-
'\\<triangleq>' : '\U0000225c',
|
599 |
-
'\\<oplus>' : '\U00002295',
|
600 |
-
'\\<Oplus>' : '\U00002a01',
|
601 |
-
'\\<otimes>' : '\U00002297',
|
602 |
-
'\\<Otimes>' : '\U00002a02',
|
603 |
-
'\\<odot>' : '\U00002299',
|
604 |
-
'\\<Odot>' : '\U00002a00',
|
605 |
-
'\\<ominus>' : '\U00002296',
|
606 |
-
'\\<oslash>' : '\U00002298',
|
607 |
-
'\\<dots>' : '\U00002026',
|
608 |
-
'\\<cdots>' : '\U000022ef',
|
609 |
-
'\\<Sum>' : '\U00002211',
|
610 |
-
'\\<Prod>' : '\U0000220f',
|
611 |
-
'\\<Coprod>' : '\U00002210',
|
612 |
-
'\\<infinity>' : '\U0000221e',
|
613 |
-
'\\<integral>' : '\U0000222b',
|
614 |
-
'\\<ointegral>' : '\U0000222e',
|
615 |
-
'\\<clubsuit>' : '\U00002663',
|
616 |
-
'\\<diamondsuit>' : '\U00002662',
|
617 |
-
'\\<heartsuit>' : '\U00002661',
|
618 |
-
'\\<spadesuit>' : '\U00002660',
|
619 |
-
'\\<aleph>' : '\U00002135',
|
620 |
-
'\\<emptyset>' : '\U00002205',
|
621 |
-
'\\<nabla>' : '\U00002207',
|
622 |
-
'\\<partial>' : '\U00002202',
|
623 |
-
'\\<flat>' : '\U0000266d',
|
624 |
-
'\\<natural>' : '\U0000266e',
|
625 |
-
'\\<sharp>' : '\U0000266f',
|
626 |
-
'\\<angle>' : '\U00002220',
|
627 |
-
'\\<copyright>' : '\U000000a9',
|
628 |
-
'\\<registered>' : '\U000000ae',
|
629 |
-
'\\<hyphen>' : '\U000000ad',
|
630 |
-
'\\<inverse>' : '\U000000af',
|
631 |
-
'\\<onequarter>' : '\U000000bc',
|
632 |
-
'\\<onehalf>' : '\U000000bd',
|
633 |
-
'\\<threequarters>' : '\U000000be',
|
634 |
-
'\\<ordfeminine>' : '\U000000aa',
|
635 |
-
'\\<ordmasculine>' : '\U000000ba',
|
636 |
-
'\\<section>' : '\U000000a7',
|
637 |
-
'\\<paragraph>' : '\U000000b6',
|
638 |
-
'\\<exclamdown>' : '\U000000a1',
|
639 |
-
'\\<questiondown>' : '\U000000bf',
|
640 |
-
'\\<euro>' : '\U000020ac',
|
641 |
-
'\\<pounds>' : '\U000000a3',
|
642 |
-
'\\<yen>' : '\U000000a5',
|
643 |
-
'\\<cent>' : '\U000000a2',
|
644 |
-
'\\<currency>' : '\U000000a4',
|
645 |
-
'\\<degree>' : '\U000000b0',
|
646 |
-
'\\<amalg>' : '\U00002a3f',
|
647 |
-
'\\<mho>' : '\U00002127',
|
648 |
-
'\\<lozenge>' : '\U000025ca',
|
649 |
-
'\\<wp>' : '\U00002118',
|
650 |
-
'\\<wrong>' : '\U00002240',
|
651 |
-
'\\<struct>' : '\U000022c4',
|
652 |
-
'\\<acute>' : '\U000000b4',
|
653 |
-
'\\<index>' : '\U00000131',
|
654 |
-
'\\<dieresis>' : '\U000000a8',
|
655 |
-
'\\<cedilla>' : '\U000000b8',
|
656 |
-
'\\<hungarumlaut>' : '\U000002dd',
|
657 |
-
'\\<some>' : '\U000003f5',
|
658 |
-
'\\<newline>' : '\U000023ce',
|
659 |
-
'\\<open>' : '\U00002039',
|
660 |
-
'\\<close>' : '\U0000203a',
|
661 |
-
'\\<here>' : '\U00002302',
|
662 |
-
'\\<^sub>' : '\U000021e9',
|
663 |
-
'\\<^sup>' : '\U000021e7',
|
664 |
-
'\\<^bold>' : '\U00002759',
|
665 |
-
'\\<^bsub>' : '\U000021d8',
|
666 |
-
'\\<^esub>' : '\U000021d9',
|
667 |
-
'\\<^bsup>' : '\U000021d7',
|
668 |
-
'\\<^esup>' : '\U000021d6',
|
669 |
-
}
|
670 |
-
|
671 |
-
lang_map = {'isabelle' : isabelle_symbols, 'latex' : latex_symbols}
|
672 |
-
|
673 |
-
def __init__(self, **options):
|
674 |
-
Filter.__init__(self, **options)
|
675 |
-
lang = get_choice_opt(options, 'lang',
|
676 |
-
['isabelle', 'latex'], 'isabelle')
|
677 |
-
self.symbols = self.lang_map[lang]
|
678 |
-
|
679 |
-
def filter(self, lexer, stream):
|
680 |
-
for ttype, value in stream:
|
681 |
-
if value in self.symbols:
|
682 |
-
yield ttype, self.symbols[value]
|
683 |
-
else:
|
684 |
-
yield ttype, value
|
685 |
-
|
686 |
-
|
687 |
-
class KeywordCaseFilter(Filter):
|
688 |
-
"""Convert keywords to lowercase or uppercase or capitalize them, which
|
689 |
-
means first letter uppercase, rest lowercase.
|
690 |
-
|
691 |
-
This can be useful e.g. if you highlight Pascal code and want to adapt the
|
692 |
-
code to your styleguide.
|
693 |
-
|
694 |
-
Options accepted:
|
695 |
-
|
696 |
-
`case` : string
|
697 |
-
The casing to convert keywords to. Must be one of ``'lower'``,
|
698 |
-
``'upper'`` or ``'capitalize'``. The default is ``'lower'``.
|
699 |
-
"""
|
700 |
-
|
701 |
-
def __init__(self, **options):
|
702 |
-
Filter.__init__(self, **options)
|
703 |
-
case = get_choice_opt(options, 'case',
|
704 |
-
['lower', 'upper', 'capitalize'], 'lower')
|
705 |
-
self.convert = getattr(str, case)
|
706 |
-
|
707 |
-
def filter(self, lexer, stream):
|
708 |
-
for ttype, value in stream:
|
709 |
-
if ttype in Keyword:
|
710 |
-
yield ttype, self.convert(value)
|
711 |
-
else:
|
712 |
-
yield ttype, value
|
713 |
-
|
714 |
-
|
715 |
-
class NameHighlightFilter(Filter):
|
716 |
-
"""Highlight a normal Name (and Name.*) token with a different token type.
|
717 |
-
|
718 |
-
Example::
|
719 |
-
|
720 |
-
filter = NameHighlightFilter(
|
721 |
-
names=['foo', 'bar', 'baz'],
|
722 |
-
tokentype=Name.Function,
|
723 |
-
)
|
724 |
-
|
725 |
-
This would highlight the names "foo", "bar" and "baz"
|
726 |
-
as functions. `Name.Function` is the default token type.
|
727 |
-
|
728 |
-
Options accepted:
|
729 |
-
|
730 |
-
`names` : list of strings
|
731 |
-
A list of names that should be given the different token type.
|
732 |
-
There is no default.
|
733 |
-
`tokentype` : TokenType or string
|
734 |
-
A token type or a string containing a token type name that is
|
735 |
-
used for highlighting the strings in `names`. The default is
|
736 |
-
`Name.Function`.
|
737 |
-
"""
|
738 |
-
|
739 |
-
def __init__(self, **options):
|
740 |
-
Filter.__init__(self, **options)
|
741 |
-
self.names = set(get_list_opt(options, 'names', []))
|
742 |
-
tokentype = options.get('tokentype')
|
743 |
-
if tokentype:
|
744 |
-
self.tokentype = string_to_tokentype(tokentype)
|
745 |
-
else:
|
746 |
-
self.tokentype = Name.Function
|
747 |
-
|
748 |
-
def filter(self, lexer, stream):
|
749 |
-
for ttype, value in stream:
|
750 |
-
if ttype in Name and value in self.names:
|
751 |
-
yield self.tokentype, value
|
752 |
-
else:
|
753 |
-
yield ttype, value
|
754 |
-
|
755 |
-
|
756 |
-
class ErrorToken(Exception):
|
757 |
-
pass
|
758 |
-
|
759 |
-
|
760 |
-
class RaiseOnErrorTokenFilter(Filter):
|
761 |
-
"""Raise an exception when the lexer generates an error token.
|
762 |
-
|
763 |
-
Options accepted:
|
764 |
-
|
765 |
-
`excclass` : Exception class
|
766 |
-
The exception class to raise.
|
767 |
-
The default is `pygments.filters.ErrorToken`.
|
768 |
-
|
769 |
-
.. versionadded:: 0.8
|
770 |
-
"""
|
771 |
-
|
772 |
-
def __init__(self, **options):
|
773 |
-
Filter.__init__(self, **options)
|
774 |
-
self.exception = options.get('excclass', ErrorToken)
|
775 |
-
try:
|
776 |
-
# issubclass() will raise TypeError if first argument is not a class
|
777 |
-
if not issubclass(self.exception, Exception):
|
778 |
-
raise TypeError
|
779 |
-
except TypeError:
|
780 |
-
raise OptionError('excclass option is not an exception class')
|
781 |
-
|
782 |
-
def filter(self, lexer, stream):
|
783 |
-
for ttype, value in stream:
|
784 |
-
if ttype is Error:
|
785 |
-
raise self.exception(value)
|
786 |
-
yield ttype, value
|
787 |
-
|
788 |
-
|
789 |
-
class VisibleWhitespaceFilter(Filter):
|
790 |
-
"""Convert tabs, newlines and/or spaces to visible characters.
|
791 |
-
|
792 |
-
Options accepted:
|
793 |
-
|
794 |
-
`spaces` : string or bool
|
795 |
-
If this is a one-character string, spaces will be replaces by this string.
|
796 |
-
If it is another true value, spaces will be replaced by ``·`` (unicode
|
797 |
-
MIDDLE DOT). If it is a false value, spaces will not be replaced. The
|
798 |
-
default is ``False``.
|
799 |
-
`tabs` : string or bool
|
800 |
-
The same as for `spaces`, but the default replacement character is ``»``
|
801 |
-
(unicode RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK). The default value
|
802 |
-
is ``False``. Note: this will not work if the `tabsize` option for the
|
803 |
-
lexer is nonzero, as tabs will already have been expanded then.
|
804 |
-
`tabsize` : int
|
805 |
-
If tabs are to be replaced by this filter (see the `tabs` option), this
|
806 |
-
is the total number of characters that a tab should be expanded to.
|
807 |
-
The default is ``8``.
|
808 |
-
`newlines` : string or bool
|
809 |
-
The same as for `spaces`, but the default replacement character is ``¶``
|
810 |
-
(unicode PILCROW SIGN). The default value is ``False``.
|
811 |
-
`wstokentype` : bool
|
812 |
-
If true, give whitespace the special `Whitespace` token type. This allows
|
813 |
-
styling the visible whitespace differently (e.g. greyed out), but it can
|
814 |
-
disrupt background colors. The default is ``True``.
|
815 |
-
|
816 |
-
.. versionadded:: 0.8
|
817 |
-
"""
|
818 |
-
|
819 |
-
def __init__(self, **options):
|
820 |
-
Filter.__init__(self, **options)
|
821 |
-
for name, default in [('spaces', '·'),
|
822 |
-
('tabs', '»'),
|
823 |
-
('newlines', '¶')]:
|
824 |
-
opt = options.get(name, False)
|
825 |
-
if isinstance(opt, str) and len(opt) == 1:
|
826 |
-
setattr(self, name, opt)
|
827 |
-
else:
|
828 |
-
setattr(self, name, (opt and default or ''))
|
829 |
-
tabsize = get_int_opt(options, 'tabsize', 8)
|
830 |
-
if self.tabs:
|
831 |
-
self.tabs += ' ' * (tabsize - 1)
|
832 |
-
if self.newlines:
|
833 |
-
self.newlines += '\n'
|
834 |
-
self.wstt = get_bool_opt(options, 'wstokentype', True)
|
835 |
-
|
836 |
-
def filter(self, lexer, stream):
|
837 |
-
if self.wstt:
|
838 |
-
spaces = self.spaces or ' '
|
839 |
-
tabs = self.tabs or '\t'
|
840 |
-
newlines = self.newlines or '\n'
|
841 |
-
regex = re.compile(r'\s')
|
842 |
-
|
843 |
-
def replacefunc(wschar):
|
844 |
-
if wschar == ' ':
|
845 |
-
return spaces
|
846 |
-
elif wschar == '\t':
|
847 |
-
return tabs
|
848 |
-
elif wschar == '\n':
|
849 |
-
return newlines
|
850 |
-
return wschar
|
851 |
-
|
852 |
-
for ttype, value in stream:
|
853 |
-
yield from _replace_special(ttype, value, regex, Whitespace,
|
854 |
-
replacefunc)
|
855 |
-
else:
|
856 |
-
spaces, tabs, newlines = self.spaces, self.tabs, self.newlines
|
857 |
-
# simpler processing
|
858 |
-
for ttype, value in stream:
|
859 |
-
if spaces:
|
860 |
-
value = value.replace(' ', spaces)
|
861 |
-
if tabs:
|
862 |
-
value = value.replace('\t', tabs)
|
863 |
-
if newlines:
|
864 |
-
value = value.replace('\n', newlines)
|
865 |
-
yield ttype, value
|
866 |
-
|
867 |
-
|
868 |
-
class GobbleFilter(Filter):
|
869 |
-
"""Gobbles source code lines (eats initial characters).
|
870 |
-
|
871 |
-
This filter drops the first ``n`` characters off every line of code. This
|
872 |
-
may be useful when the source code fed to the lexer is indented by a fixed
|
873 |
-
amount of space that isn't desired in the output.
|
874 |
-
|
875 |
-
Options accepted:
|
876 |
-
|
877 |
-
`n` : int
|
878 |
-
The number of characters to gobble.
|
879 |
-
|
880 |
-
.. versionadded:: 1.2
|
881 |
-
"""
|
882 |
-
def __init__(self, **options):
|
883 |
-
Filter.__init__(self, **options)
|
884 |
-
self.n = get_int_opt(options, 'n', 0)
|
885 |
-
|
886 |
-
def gobble(self, value, left):
|
887 |
-
if left < len(value):
|
888 |
-
return value[left:], 0
|
889 |
-
else:
|
890 |
-
return '', left - len(value)
|
891 |
-
|
892 |
-
def filter(self, lexer, stream):
|
893 |
-
n = self.n
|
894 |
-
left = n # How many characters left to gobble.
|
895 |
-
for ttype, value in stream:
|
896 |
-
# Remove ``left`` tokens from first line, ``n`` from all others.
|
897 |
-
parts = value.split('\n')
|
898 |
-
(parts[0], left) = self.gobble(parts[0], left)
|
899 |
-
for i in range(1, len(parts)):
|
900 |
-
(parts[i], left) = self.gobble(parts[i], n)
|
901 |
-
value = '\n'.join(parts)
|
902 |
-
|
903 |
-
if value != '':
|
904 |
-
yield ttype, value
|
905 |
-
|
906 |
-
|
907 |
-
class TokenMergeFilter(Filter):
|
908 |
-
"""Merges consecutive tokens with the same token type in the output
|
909 |
-
stream of a lexer.
|
910 |
-
|
911 |
-
.. versionadded:: 1.2
|
912 |
-
"""
|
913 |
-
def __init__(self, **options):
|
914 |
-
Filter.__init__(self, **options)
|
915 |
-
|
916 |
-
def filter(self, lexer, stream):
|
917 |
-
current_type = None
|
918 |
-
current_value = None
|
919 |
-
for ttype, value in stream:
|
920 |
-
if ttype is current_type:
|
921 |
-
current_value += value
|
922 |
-
else:
|
923 |
-
if current_type is not None:
|
924 |
-
yield current_type, current_value
|
925 |
-
current_type = ttype
|
926 |
-
current_value = value
|
927 |
-
if current_type is not None:
|
928 |
-
yield current_type, current_value
|
929 |
-
|
930 |
-
|
931 |
-
FILTERS = {
|
932 |
-
'codetagify': CodeTagFilter,
|
933 |
-
'keywordcase': KeywordCaseFilter,
|
934 |
-
'highlight': NameHighlightFilter,
|
935 |
-
'raiseonerror': RaiseOnErrorTokenFilter,
|
936 |
-
'whitespace': VisibleWhitespaceFilter,
|
937 |
-
'gobble': GobbleFilter,
|
938 |
-
'tokenmerge': TokenMergeFilter,
|
939 |
-
'symbols': SymbolFilter,
|
940 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/_loop.py
DELETED
@@ -1,43 +0,0 @@
|
|
1 |
-
from typing import Iterable, Tuple, TypeVar
|
2 |
-
|
3 |
-
T = TypeVar("T")
|
4 |
-
|
5 |
-
|
6 |
-
def loop_first(values: Iterable[T]) -> Iterable[Tuple[bool, T]]:
|
7 |
-
"""Iterate and generate a tuple with a flag for first value."""
|
8 |
-
iter_values = iter(values)
|
9 |
-
try:
|
10 |
-
value = next(iter_values)
|
11 |
-
except StopIteration:
|
12 |
-
return
|
13 |
-
yield True, value
|
14 |
-
for value in iter_values:
|
15 |
-
yield False, value
|
16 |
-
|
17 |
-
|
18 |
-
def loop_last(values: Iterable[T]) -> Iterable[Tuple[bool, T]]:
|
19 |
-
"""Iterate and generate a tuple with a flag for last value."""
|
20 |
-
iter_values = iter(values)
|
21 |
-
try:
|
22 |
-
previous_value = next(iter_values)
|
23 |
-
except StopIteration:
|
24 |
-
return
|
25 |
-
for value in iter_values:
|
26 |
-
yield False, previous_value
|
27 |
-
previous_value = value
|
28 |
-
yield True, previous_value
|
29 |
-
|
30 |
-
|
31 |
-
def loop_first_last(values: Iterable[T]) -> Iterable[Tuple[bool, bool, T]]:
|
32 |
-
"""Iterate and generate a tuple with a flag for first and last value."""
|
33 |
-
iter_values = iter(values)
|
34 |
-
try:
|
35 |
-
previous_value = next(iter_values)
|
36 |
-
except StopIteration:
|
37 |
-
return
|
38 |
-
first = True
|
39 |
-
for value in iter_values:
|
40 |
-
yield first, False, previous_value
|
41 |
-
first = False
|
42 |
-
previous_value = value
|
43 |
-
yield first, True, previous_value
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/evaluation/cityscapes_evaluation.py
DELETED
@@ -1,194 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
import glob
|
3 |
-
import logging
|
4 |
-
import numpy as np
|
5 |
-
import os
|
6 |
-
import tempfile
|
7 |
-
from collections import OrderedDict
|
8 |
-
import torch
|
9 |
-
from PIL import Image
|
10 |
-
|
11 |
-
from detectron2.data import MetadataCatalog
|
12 |
-
from detectron2.utils import comm
|
13 |
-
from detectron2.utils.file_io import PathManager
|
14 |
-
|
15 |
-
from .evaluator import DatasetEvaluator
|
16 |
-
|
17 |
-
|
18 |
-
class CityscapesEvaluator(DatasetEvaluator):
|
19 |
-
"""
|
20 |
-
Base class for evaluation using cityscapes API.
|
21 |
-
"""
|
22 |
-
|
23 |
-
def __init__(self, dataset_name):
|
24 |
-
"""
|
25 |
-
Args:
|
26 |
-
dataset_name (str): the name of the dataset.
|
27 |
-
It must have the following metadata associated with it:
|
28 |
-
"thing_classes", "gt_dir".
|
29 |
-
"""
|
30 |
-
self._metadata = MetadataCatalog.get(dataset_name)
|
31 |
-
self._cpu_device = torch.device("cpu")
|
32 |
-
self._logger = logging.getLogger(__name__)
|
33 |
-
|
34 |
-
def reset(self):
|
35 |
-
self._working_dir = tempfile.TemporaryDirectory(prefix="cityscapes_eval_")
|
36 |
-
self._temp_dir = self._working_dir.name
|
37 |
-
# All workers will write to the same results directory
|
38 |
-
# TODO this does not work in distributed training
|
39 |
-
self._temp_dir = comm.all_gather(self._temp_dir)[0]
|
40 |
-
if self._temp_dir != self._working_dir.name:
|
41 |
-
self._working_dir.cleanup()
|
42 |
-
self._logger.info(
|
43 |
-
"Writing cityscapes results to temporary directory {} ...".format(self._temp_dir)
|
44 |
-
)
|
45 |
-
|
46 |
-
|
47 |
-
class CityscapesInstanceEvaluator(CityscapesEvaluator):
|
48 |
-
"""
|
49 |
-
Evaluate instance segmentation results on cityscapes dataset using cityscapes API.
|
50 |
-
|
51 |
-
Note:
|
52 |
-
* It does not work in multi-machine distributed training.
|
53 |
-
* It contains a synchronization, therefore has to be used on all ranks.
|
54 |
-
* Only the main process runs evaluation.
|
55 |
-
"""
|
56 |
-
|
57 |
-
def process(self, inputs, outputs):
|
58 |
-
from cityscapesscripts.helpers.labels import name2label
|
59 |
-
|
60 |
-
for input, output in zip(inputs, outputs):
|
61 |
-
file_name = input["file_name"]
|
62 |
-
basename = os.path.splitext(os.path.basename(file_name))[0]
|
63 |
-
pred_txt = os.path.join(self._temp_dir, basename + "_pred.txt")
|
64 |
-
|
65 |
-
if "instances" in output:
|
66 |
-
output = output["instances"].to(self._cpu_device)
|
67 |
-
num_instances = len(output)
|
68 |
-
with open(pred_txt, "w") as fout:
|
69 |
-
for i in range(num_instances):
|
70 |
-
pred_class = output.pred_classes[i]
|
71 |
-
classes = self._metadata.thing_classes[pred_class]
|
72 |
-
class_id = name2label[classes].id
|
73 |
-
score = output.scores[i]
|
74 |
-
mask = output.pred_masks[i].numpy().astype("uint8")
|
75 |
-
png_filename = os.path.join(
|
76 |
-
self._temp_dir, basename + "_{}_{}.png".format(i, classes)
|
77 |
-
)
|
78 |
-
|
79 |
-
Image.fromarray(mask * 255).save(png_filename)
|
80 |
-
fout.write(
|
81 |
-
"{} {} {}\n".format(os.path.basename(png_filename), class_id, score)
|
82 |
-
)
|
83 |
-
else:
|
84 |
-
# Cityscapes requires a prediction file for every ground truth image.
|
85 |
-
with open(pred_txt, "w") as fout:
|
86 |
-
pass
|
87 |
-
|
88 |
-
def evaluate(self):
|
89 |
-
"""
|
90 |
-
Returns:
|
91 |
-
dict: has a key "segm", whose value is a dict of "AP" and "AP50".
|
92 |
-
"""
|
93 |
-
comm.synchronize()
|
94 |
-
if comm.get_rank() > 0:
|
95 |
-
return
|
96 |
-
import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling as cityscapes_eval
|
97 |
-
|
98 |
-
self._logger.info("Evaluating results under {} ...".format(self._temp_dir))
|
99 |
-
|
100 |
-
# set some global states in cityscapes evaluation API, before evaluating
|
101 |
-
cityscapes_eval.args.predictionPath = os.path.abspath(self._temp_dir)
|
102 |
-
cityscapes_eval.args.predictionWalk = None
|
103 |
-
cityscapes_eval.args.JSONOutput = False
|
104 |
-
cityscapes_eval.args.colorized = False
|
105 |
-
cityscapes_eval.args.gtInstancesFile = os.path.join(self._temp_dir, "gtInstances.json")
|
106 |
-
|
107 |
-
# These lines are adopted from
|
108 |
-
# https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalInstanceLevelSemanticLabeling.py # noqa
|
109 |
-
gt_dir = PathManager.get_local_path(self._metadata.gt_dir)
|
110 |
-
groundTruthImgList = glob.glob(os.path.join(gt_dir, "*", "*_gtFine_instanceIds.png"))
|
111 |
-
assert len(
|
112 |
-
groundTruthImgList
|
113 |
-
), "Cannot find any ground truth images to use for evaluation. Searched for: {}".format(
|
114 |
-
cityscapes_eval.args.groundTruthSearch
|
115 |
-
)
|
116 |
-
predictionImgList = []
|
117 |
-
for gt in groundTruthImgList:
|
118 |
-
predictionImgList.append(cityscapes_eval.getPrediction(gt, cityscapes_eval.args))
|
119 |
-
results = cityscapes_eval.evaluateImgLists(
|
120 |
-
predictionImgList, groundTruthImgList, cityscapes_eval.args
|
121 |
-
)["averages"]
|
122 |
-
|
123 |
-
ret = OrderedDict()
|
124 |
-
ret["segm"] = {"AP": results["allAp"] * 100, "AP50": results["allAp50%"] * 100}
|
125 |
-
self._working_dir.cleanup()
|
126 |
-
return ret
|
127 |
-
|
128 |
-
|
129 |
-
class CityscapesSemSegEvaluator(CityscapesEvaluator):
|
130 |
-
"""
|
131 |
-
Evaluate semantic segmentation results on cityscapes dataset using cityscapes API.
|
132 |
-
|
133 |
-
Note:
|
134 |
-
* It does not work in multi-machine distributed training.
|
135 |
-
* It contains a synchronization, therefore has to be used on all ranks.
|
136 |
-
* Only the main process runs evaluation.
|
137 |
-
"""
|
138 |
-
|
139 |
-
def process(self, inputs, outputs):
|
140 |
-
from cityscapesscripts.helpers.labels import trainId2label
|
141 |
-
|
142 |
-
for input, output in zip(inputs, outputs):
|
143 |
-
file_name = input["file_name"]
|
144 |
-
basename = os.path.splitext(os.path.basename(file_name))[0]
|
145 |
-
pred_filename = os.path.join(self._temp_dir, basename + "_pred.png")
|
146 |
-
|
147 |
-
output = output["sem_seg"].argmax(dim=0).to(self._cpu_device).numpy()
|
148 |
-
pred = 255 * np.ones(output.shape, dtype=np.uint8)
|
149 |
-
for train_id, label in trainId2label.items():
|
150 |
-
if label.ignoreInEval:
|
151 |
-
continue
|
152 |
-
pred[output == train_id] = label.id
|
153 |
-
Image.fromarray(pred).save(pred_filename)
|
154 |
-
|
155 |
-
def evaluate(self):
|
156 |
-
comm.synchronize()
|
157 |
-
if comm.get_rank() > 0:
|
158 |
-
return
|
159 |
-
# Load the Cityscapes eval script *after* setting the required env var,
|
160 |
-
# since the script reads CITYSCAPES_DATASET into global variables at load time.
|
161 |
-
import cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling as cityscapes_eval
|
162 |
-
|
163 |
-
self._logger.info("Evaluating results under {} ...".format(self._temp_dir))
|
164 |
-
|
165 |
-
# set some global states in cityscapes evaluation API, before evaluating
|
166 |
-
cityscapes_eval.args.predictionPath = os.path.abspath(self._temp_dir)
|
167 |
-
cityscapes_eval.args.predictionWalk = None
|
168 |
-
cityscapes_eval.args.JSONOutput = False
|
169 |
-
cityscapes_eval.args.colorized = False
|
170 |
-
|
171 |
-
# These lines are adopted from
|
172 |
-
# https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalPixelLevelSemanticLabeling.py # noqa
|
173 |
-
gt_dir = PathManager.get_local_path(self._metadata.gt_dir)
|
174 |
-
groundTruthImgList = glob.glob(os.path.join(gt_dir, "*", "*_gtFine_labelIds.png"))
|
175 |
-
assert len(
|
176 |
-
groundTruthImgList
|
177 |
-
), "Cannot find any ground truth images to use for evaluation. Searched for: {}".format(
|
178 |
-
cityscapes_eval.args.groundTruthSearch
|
179 |
-
)
|
180 |
-
predictionImgList = []
|
181 |
-
for gt in groundTruthImgList:
|
182 |
-
predictionImgList.append(cityscapes_eval.getPrediction(cityscapes_eval.args, gt))
|
183 |
-
results = cityscapes_eval.evaluateImgLists(
|
184 |
-
predictionImgList, groundTruthImgList, cityscapes_eval.args
|
185 |
-
)
|
186 |
-
ret = OrderedDict()
|
187 |
-
ret["sem_seg"] = {
|
188 |
-
"IoU": 100.0 * results["averageScoreClasses"],
|
189 |
-
"iIoU": 100.0 * results["averageScoreInstClasses"],
|
190 |
-
"IoU_sup": 100.0 * results["averageScoreCategories"],
|
191 |
-
"iIoU_sup": 100.0 * results["averageScoreInstCategories"],
|
192 |
-
}
|
193 |
-
self._working_dir.cleanup()
|
194 |
-
return ret
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Banbri/zcvzcv/src/app/globals.css
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
@tailwind base;
|
2 |
-
@tailwind components;
|
3 |
-
@tailwind utilities;
|
4 |
-
|
5 |
-
:root {
|
6 |
-
--foreground-rgb: 0, 0, 0;
|
7 |
-
--background-start-rgb: 214, 219, 220;
|
8 |
-
--background-end-rgb: 255, 255, 255;
|
9 |
-
}
|
10 |
-
|
11 |
-
@media (prefers-color-scheme: dark) {
|
12 |
-
:root {
|
13 |
-
--foreground-rgb: 255, 255, 255;
|
14 |
-
--background-start-rgb: 0, 0, 0;
|
15 |
-
--background-end-rgb: 0, 0, 0;
|
16 |
-
}
|
17 |
-
}
|
18 |
-
|
19 |
-
body {
|
20 |
-
color: rgb(var(--foreground-rgb));
|
21 |
-
background: linear-gradient(
|
22 |
-
to bottom,
|
23 |
-
transparent,
|
24 |
-
rgb(var(--background-end-rgb))
|
25 |
-
)
|
26 |
-
rgb(var(--background-start-rgb));
|
27 |
-
}
|
28 |
-
|
29 |
-
|
30 |
-
/* this is the trick to bypass the style={{}} attribute when printing */
|
31 |
-
@media print {
|
32 |
-
.comic-page[style] { width: 100vw !important; }
|
33 |
-
}
|
34 |
-
|
35 |
-
|
36 |
-
.render-to-image .comic-panel {
|
37 |
-
height: auto !important;
|
38 |
-
/* max-width: fit-content !important; */
|
39 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Banbri/zcvzcv/src/app/queries/getStyle.ts
DELETED
@@ -1,52 +0,0 @@
|
|
1 |
-
import { createLlamaPrompt } from "@/lib/createLlamaPrompt"
|
2 |
-
|
3 |
-
import { predict } from "./predict"
|
4 |
-
import { Preset } from "../engine/presets"
|
5 |
-
|
6 |
-
export const getStory = async ({
|
7 |
-
preset,
|
8 |
-
prompt = "",
|
9 |
-
}: {
|
10 |
-
preset: Preset;
|
11 |
-
prompt: string;
|
12 |
-
}) => {
|
13 |
-
|
14 |
-
const query = createLlamaPrompt([
|
15 |
-
{
|
16 |
-
role: "system",
|
17 |
-
content: [
|
18 |
-
`You are a comic book author specialized in ${preset.llmPrompt}`,
|
19 |
-
`You are going to be asked to write a comic book page, your mission is to answer a JSON array containing 4 items, to describe the page (one item per panel).`,
|
20 |
-
`Each array item should be a comic book panel caption the describe the environment, era, characters, objects, textures, lighting.`,
|
21 |
-
`Be brief in your caption don't add your own comments. Be straight to the point, and never reply things like "Sure, I can.." etc.`
|
22 |
-
].filter(item => item).join("\n")
|
23 |
-
},
|
24 |
-
{
|
25 |
-
role: "user",
|
26 |
-
content: `The story is: ${prompt}`,
|
27 |
-
}
|
28 |
-
])
|
29 |
-
|
30 |
-
|
31 |
-
let result = ""
|
32 |
-
try {
|
33 |
-
result = `${await predict(query) || ""}`.trim()
|
34 |
-
if (!result.length) {
|
35 |
-
throw new Error("empty result!")
|
36 |
-
}
|
37 |
-
} catch (err) {
|
38 |
-
console.log(`prediction of the story failed, trying again..`)
|
39 |
-
try {
|
40 |
-
result = `${await predict(query+".") || ""}`.trim()
|
41 |
-
if (!result.length) {
|
42 |
-
throw new Error("empty result!")
|
43 |
-
}
|
44 |
-
} catch (err) {
|
45 |
-
console.error(`prediction of the story failed again!`)
|
46 |
-
throw new Error(`failed to generate the story ${err}`)
|
47 |
-
}
|
48 |
-
}
|
49 |
-
|
50 |
-
const tmp = result // result.split("Caption:").pop() || result
|
51 |
-
return tmp.replaceAll("\n", ", ")
|
52 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bart92/RVC_HF/utils/backups.py
DELETED
@@ -1,141 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import shutil
|
3 |
-
import hashlib
|
4 |
-
import time
|
5 |
-
import base64
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
LOGS_FOLDER = '/content/Applio-RVC-Fork/logs'
|
11 |
-
WEIGHTS_FOLDER = '/content/Applio-RVC-Fork/weights'
|
12 |
-
GOOGLE_DRIVE_PATH = '/content/drive/MyDrive/RVC_Backup'
|
13 |
-
|
14 |
-
def import_google_drive_backup():
|
15 |
-
print("Importing Google Drive backup...")
|
16 |
-
weights_exist = False
|
17 |
-
for root, dirs, files in os.walk(GOOGLE_DRIVE_PATH):
|
18 |
-
for filename in files:
|
19 |
-
filepath = os.path.join(root, filename)
|
20 |
-
if os.path.isfile(filepath) and not filepath.startswith(os.path.join(GOOGLE_DRIVE_PATH, 'weights')):
|
21 |
-
backup_filepath = os.path.join(LOGS_FOLDER, os.path.relpath(filepath, GOOGLE_DRIVE_PATH))
|
22 |
-
backup_folderpath = os.path.dirname(backup_filepath)
|
23 |
-
if not os.path.exists(backup_folderpath):
|
24 |
-
os.makedirs(backup_folderpath)
|
25 |
-
print(f'Created backup folder: {backup_folderpath}', flush=True)
|
26 |
-
shutil.copy2(filepath, backup_filepath) # copy file with metadata
|
27 |
-
print(f'Imported file from Google Drive backup: {filename}')
|
28 |
-
elif filepath.startswith(os.path.join(GOOGLE_DRIVE_PATH, 'weights')) and filename.endswith('.pth'):
|
29 |
-
weights_exist = True
|
30 |
-
weights_filepath = os.path.join(WEIGHTS_FOLDER, os.path.relpath(filepath, os.path.join(GOOGLE_DRIVE_PATH, 'weights')))
|
31 |
-
weights_folderpath = os.path.dirname(weights_filepath)
|
32 |
-
if not os.path.exists(weights_folderpath):
|
33 |
-
os.makedirs(weights_folderpath)
|
34 |
-
print(f'Created weights folder: {weights_folderpath}', flush=True)
|
35 |
-
shutil.copy2(filepath, weights_filepath) # copy file with metadata
|
36 |
-
print(f'Imported file from weights: {filename}')
|
37 |
-
if weights_exist:
|
38 |
-
print("Copied weights from Google Drive backup to local weights folder.")
|
39 |
-
else:
|
40 |
-
print("No weights found in Google Drive backup.")
|
41 |
-
print("Google Drive backup import completed.")
|
42 |
-
|
43 |
-
def get_md5_hash(file_path):
|
44 |
-
hash_md5 = hashlib.md5()
|
45 |
-
with open(file_path, "rb") as f:
|
46 |
-
for chunk in iter(lambda: f.read(4096), b""):
|
47 |
-
hash_md5.update(chunk)
|
48 |
-
return hash_md5.hexdigest()
|
49 |
-
|
50 |
-
def copy_weights_folder_to_drive():
|
51 |
-
destination_folder = os.path.join(GOOGLE_DRIVE_PATH, 'weights')
|
52 |
-
try:
|
53 |
-
if not os.path.exists(destination_folder):
|
54 |
-
os.makedirs(destination_folder)
|
55 |
-
|
56 |
-
num_copied = 0
|
57 |
-
for filename in os.listdir(WEIGHTS_FOLDER):
|
58 |
-
if filename.endswith('.pth'):
|
59 |
-
source_file = os.path.join(WEIGHTS_FOLDER, filename)
|
60 |
-
destination_file = os.path.join(destination_folder, filename)
|
61 |
-
if not os.path.exists(destination_file):
|
62 |
-
shutil.copy2(source_file, destination_file)
|
63 |
-
num_copied += 1
|
64 |
-
print(f"Copied {filename} to Google Drive!")
|
65 |
-
|
66 |
-
if num_copied == 0:
|
67 |
-
print("No new finished models found for copying.")
|
68 |
-
else:
|
69 |
-
print(f"Finished copying {num_copied} files to Google Drive!")
|
70 |
-
|
71 |
-
except Exception as e:
|
72 |
-
print(f"An error occurred while copying weights: {str(e)}")
|
73 |
-
# You can log the error or take appropriate actions here.
|
74 |
-
|
75 |
-
def backup_files():
|
76 |
-
print("\nStarting backup loop...")
|
77 |
-
last_backup_timestamps_path = os.path.join(LOGS_FOLDER, 'last_backup_timestamps.txt')
|
78 |
-
fully_updated = False # boolean to track if all files are up to date
|
79 |
-
|
80 |
-
while True:
|
81 |
-
try:
|
82 |
-
updated = False # flag to check if any files were updated
|
83 |
-
last_backup_timestamps = {}
|
84 |
-
|
85 |
-
try:
|
86 |
-
with open(last_backup_timestamps_path, 'r') as f:
|
87 |
-
last_backup_timestamps = dict(line.strip().split(':') for line in f)
|
88 |
-
except FileNotFoundError:
|
89 |
-
pass # File does not exist yet, which is fine
|
90 |
-
|
91 |
-
for root, dirs, files in os.walk(LOGS_FOLDER):
|
92 |
-
for filename in files:
|
93 |
-
if filename != 'last_backup_timestamps.txt':
|
94 |
-
filepath = os.path.join(root, filename)
|
95 |
-
if os.path.isfile(filepath):
|
96 |
-
backup_filepath = os.path.join(GOOGLE_DRIVE_PATH, os.path.relpath(filepath, LOGS_FOLDER))
|
97 |
-
backup_folderpath = os.path.dirname(backup_filepath)
|
98 |
-
if not os.path.exists(backup_folderpath):
|
99 |
-
os.makedirs(backup_folderpath)
|
100 |
-
print(f'Created backup folder: {backup_folderpath}', flush=True)
|
101 |
-
# check if file has changed since last backup
|
102 |
-
last_backup_timestamp = last_backup_timestamps.get(filepath)
|
103 |
-
current_timestamp = os.path.getmtime(filepath)
|
104 |
-
if last_backup_timestamp is None or float(last_backup_timestamp) < current_timestamp:
|
105 |
-
shutil.copy2(filepath, backup_filepath) # copy file with metadata
|
106 |
-
last_backup_timestamps[filepath] = str(current_timestamp) # update last backup timestamp
|
107 |
-
if last_backup_timestamp is None:
|
108 |
-
print(f'Backed up file: {filename}')
|
109 |
-
else:
|
110 |
-
print(f'Updating backed up file: {filename}')
|
111 |
-
updated = True
|
112 |
-
fully_updated = False # if a file is updated, all files are not up to date
|
113 |
-
|
114 |
-
# check if any files were deleted in Colab and delete them from the backup drive
|
115 |
-
for filepath in list(last_backup_timestamps.keys()):
|
116 |
-
if not os.path.exists(filepath):
|
117 |
-
backup_filepath = os.path.join(GOOGLE_DRIVE_PATH, os.path.relpath(filepath, LOGS_FOLDER))
|
118 |
-
if os.path.exists(backup_filepath):
|
119 |
-
os.remove(backup_filepath)
|
120 |
-
print(f'Deleted file: {filepath}')
|
121 |
-
del last_backup_timestamps[filepath]
|
122 |
-
updated = True
|
123 |
-
fully_updated = False # if a file is deleted, all files are not up to date
|
124 |
-
|
125 |
-
if not updated and not fully_updated:
|
126 |
-
print("Files are up to date.")
|
127 |
-
fully_updated = True # if all files are up to date, set the boolean to True
|
128 |
-
copy_weights_folder_to_drive()
|
129 |
-
sleep_time = 15
|
130 |
-
else:
|
131 |
-
sleep_time = 0.1
|
132 |
-
|
133 |
-
with open(last_backup_timestamps_path, 'w') as f:
|
134 |
-
for filepath, timestamp in last_backup_timestamps.items():
|
135 |
-
f.write(f'{filepath}:{timestamp}\n')
|
136 |
-
|
137 |
-
time.sleep(sleep_time) # wait for 15 seconds before checking again, or 0.1s if not fully up to date to speed up backups
|
138 |
-
|
139 |
-
except Exception as e:
|
140 |
-
print(f"An error occurred: {str(e)}")
|
141 |
-
# You can log the error or take appropriate actions here.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BernardoOlisan/vqganclip/taming-transformers/taming/modules/losses/lpips.py
DELETED
@@ -1,123 +0,0 @@
|
|
1 |
-
"""Stripped version of https://github.com/richzhang/PerceptualSimilarity/tree/master/models"""
|
2 |
-
|
3 |
-
import torch
|
4 |
-
import torch.nn as nn
|
5 |
-
from torchvision import models
|
6 |
-
from collections import namedtuple
|
7 |
-
|
8 |
-
from taming.util import get_ckpt_path
|
9 |
-
|
10 |
-
|
11 |
-
class LPIPS(nn.Module):
|
12 |
-
# Learned perceptual metric
|
13 |
-
def __init__(self, use_dropout=True):
|
14 |
-
super().__init__()
|
15 |
-
self.scaling_layer = ScalingLayer()
|
16 |
-
self.chns = [64, 128, 256, 512, 512] # vg16 features
|
17 |
-
self.net = vgg16(pretrained=True, requires_grad=False)
|
18 |
-
self.lin0 = NetLinLayer(self.chns[0], use_dropout=use_dropout)
|
19 |
-
self.lin1 = NetLinLayer(self.chns[1], use_dropout=use_dropout)
|
20 |
-
self.lin2 = NetLinLayer(self.chns[2], use_dropout=use_dropout)
|
21 |
-
self.lin3 = NetLinLayer(self.chns[3], use_dropout=use_dropout)
|
22 |
-
self.lin4 = NetLinLayer(self.chns[4], use_dropout=use_dropout)
|
23 |
-
self.load_from_pretrained()
|
24 |
-
for param in self.parameters():
|
25 |
-
param.requires_grad = False
|
26 |
-
|
27 |
-
def load_from_pretrained(self, name="vgg_lpips"):
|
28 |
-
ckpt = get_ckpt_path(name, "taming/modules/autoencoder/lpips")
|
29 |
-
self.load_state_dict(torch.load(ckpt, map_location=torch.device("cpu")), strict=False)
|
30 |
-
print("loaded pretrained LPIPS loss from {}".format(ckpt))
|
31 |
-
|
32 |
-
@classmethod
|
33 |
-
def from_pretrained(cls, name="vgg_lpips"):
|
34 |
-
if name is not "vgg_lpips":
|
35 |
-
raise NotImplementedError
|
36 |
-
model = cls()
|
37 |
-
ckpt = get_ckpt_path(name)
|
38 |
-
model.load_state_dict(torch.load(ckpt, map_location=torch.device("cpu")), strict=False)
|
39 |
-
return model
|
40 |
-
|
41 |
-
def forward(self, input, target):
|
42 |
-
in0_input, in1_input = (self.scaling_layer(input), self.scaling_layer(target))
|
43 |
-
outs0, outs1 = self.net(in0_input), self.net(in1_input)
|
44 |
-
feats0, feats1, diffs = {}, {}, {}
|
45 |
-
lins = [self.lin0, self.lin1, self.lin2, self.lin3, self.lin4]
|
46 |
-
for kk in range(len(self.chns)):
|
47 |
-
feats0[kk], feats1[kk] = normalize_tensor(outs0[kk]), normalize_tensor(outs1[kk])
|
48 |
-
diffs[kk] = (feats0[kk] - feats1[kk]) ** 2
|
49 |
-
|
50 |
-
res = [spatial_average(lins[kk].model(diffs[kk]), keepdim=True) for kk in range(len(self.chns))]
|
51 |
-
val = res[0]
|
52 |
-
for l in range(1, len(self.chns)):
|
53 |
-
val += res[l]
|
54 |
-
return val
|
55 |
-
|
56 |
-
|
57 |
-
class ScalingLayer(nn.Module):
|
58 |
-
def __init__(self):
|
59 |
-
super(ScalingLayer, self).__init__()
|
60 |
-
self.register_buffer('shift', torch.Tensor([-.030, -.088, -.188])[None, :, None, None])
|
61 |
-
self.register_buffer('scale', torch.Tensor([.458, .448, .450])[None, :, None, None])
|
62 |
-
|
63 |
-
def forward(self, inp):
|
64 |
-
return (inp - self.shift) / self.scale
|
65 |
-
|
66 |
-
|
67 |
-
class NetLinLayer(nn.Module):
|
68 |
-
""" A single linear layer which does a 1x1 conv """
|
69 |
-
def __init__(self, chn_in, chn_out=1, use_dropout=False):
|
70 |
-
super(NetLinLayer, self).__init__()
|
71 |
-
layers = [nn.Dropout(), ] if (use_dropout) else []
|
72 |
-
layers += [nn.Conv2d(chn_in, chn_out, 1, stride=1, padding=0, bias=False), ]
|
73 |
-
self.model = nn.Sequential(*layers)
|
74 |
-
|
75 |
-
|
76 |
-
class vgg16(torch.nn.Module):
|
77 |
-
def __init__(self, requires_grad=False, pretrained=True):
|
78 |
-
super(vgg16, self).__init__()
|
79 |
-
vgg_pretrained_features = models.vgg16(pretrained=pretrained).features
|
80 |
-
self.slice1 = torch.nn.Sequential()
|
81 |
-
self.slice2 = torch.nn.Sequential()
|
82 |
-
self.slice3 = torch.nn.Sequential()
|
83 |
-
self.slice4 = torch.nn.Sequential()
|
84 |
-
self.slice5 = torch.nn.Sequential()
|
85 |
-
self.N_slices = 5
|
86 |
-
for x in range(4):
|
87 |
-
self.slice1.add_module(str(x), vgg_pretrained_features[x])
|
88 |
-
for x in range(4, 9):
|
89 |
-
self.slice2.add_module(str(x), vgg_pretrained_features[x])
|
90 |
-
for x in range(9, 16):
|
91 |
-
self.slice3.add_module(str(x), vgg_pretrained_features[x])
|
92 |
-
for x in range(16, 23):
|
93 |
-
self.slice4.add_module(str(x), vgg_pretrained_features[x])
|
94 |
-
for x in range(23, 30):
|
95 |
-
self.slice5.add_module(str(x), vgg_pretrained_features[x])
|
96 |
-
if not requires_grad:
|
97 |
-
for param in self.parameters():
|
98 |
-
param.requires_grad = False
|
99 |
-
|
100 |
-
def forward(self, X):
|
101 |
-
h = self.slice1(X)
|
102 |
-
h_relu1_2 = h
|
103 |
-
h = self.slice2(h)
|
104 |
-
h_relu2_2 = h
|
105 |
-
h = self.slice3(h)
|
106 |
-
h_relu3_3 = h
|
107 |
-
h = self.slice4(h)
|
108 |
-
h_relu4_3 = h
|
109 |
-
h = self.slice5(h)
|
110 |
-
h_relu5_3 = h
|
111 |
-
vgg_outputs = namedtuple("VggOutputs", ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3', 'relu5_3'])
|
112 |
-
out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3, h_relu5_3)
|
113 |
-
return out
|
114 |
-
|
115 |
-
|
116 |
-
def normalize_tensor(x,eps=1e-10):
|
117 |
-
norm_factor = torch.sqrt(torch.sum(x**2,dim=1,keepdim=True))
|
118 |
-
return x/(norm_factor+eps)
|
119 |
-
|
120 |
-
|
121 |
-
def spatial_average(x, keepdim=True):
|
122 |
-
return x.mean([2,3],keepdim=keepdim)
|
123 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/boto3/dynamodb/conditions.py
DELETED
@@ -1,462 +0,0 @@
|
|
1 |
-
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License"). You
|
4 |
-
# may not use this file except in compliance with the License. A copy of
|
5 |
-
# the License is located at
|
6 |
-
#
|
7 |
-
# https://aws.amazon.com/apache2.0/
|
8 |
-
#
|
9 |
-
# or in the "license" file accompanying this file. This file is
|
10 |
-
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
|
11 |
-
# ANY KIND, either express or implied. See the License for the specific
|
12 |
-
# language governing permissions and limitations under the License.
|
13 |
-
import re
|
14 |
-
from collections import namedtuple
|
15 |
-
|
16 |
-
from boto3.exceptions import (
|
17 |
-
DynamoDBNeedsConditionError,
|
18 |
-
DynamoDBNeedsKeyConditionError,
|
19 |
-
DynamoDBOperationNotSupportedError,
|
20 |
-
)
|
21 |
-
|
22 |
-
ATTR_NAME_REGEX = re.compile(r'[^.\[\]]+(?![^\[]*\])')
|
23 |
-
|
24 |
-
|
25 |
-
class ConditionBase:
|
26 |
-
|
27 |
-
expression_format = ''
|
28 |
-
expression_operator = ''
|
29 |
-
has_grouped_values = False
|
30 |
-
|
31 |
-
def __init__(self, *values):
|
32 |
-
self._values = values
|
33 |
-
|
34 |
-
def __and__(self, other):
|
35 |
-
if not isinstance(other, ConditionBase):
|
36 |
-
raise DynamoDBOperationNotSupportedError('AND', other)
|
37 |
-
return And(self, other)
|
38 |
-
|
39 |
-
def __or__(self, other):
|
40 |
-
if not isinstance(other, ConditionBase):
|
41 |
-
raise DynamoDBOperationNotSupportedError('OR', other)
|
42 |
-
return Or(self, other)
|
43 |
-
|
44 |
-
def __invert__(self):
|
45 |
-
return Not(self)
|
46 |
-
|
47 |
-
def get_expression(self):
|
48 |
-
return {
|
49 |
-
'format': self.expression_format,
|
50 |
-
'operator': self.expression_operator,
|
51 |
-
'values': self._values,
|
52 |
-
}
|
53 |
-
|
54 |
-
def __eq__(self, other):
|
55 |
-
if isinstance(other, type(self)):
|
56 |
-
if self._values == other._values:
|
57 |
-
return True
|
58 |
-
return False
|
59 |
-
|
60 |
-
def __ne__(self, other):
|
61 |
-
return not self.__eq__(other)
|
62 |
-
|
63 |
-
|
64 |
-
class AttributeBase:
|
65 |
-
def __init__(self, name):
|
66 |
-
self.name = name
|
67 |
-
|
68 |
-
def __and__(self, value):
|
69 |
-
raise DynamoDBOperationNotSupportedError('AND', self)
|
70 |
-
|
71 |
-
def __or__(self, value):
|
72 |
-
raise DynamoDBOperationNotSupportedError('OR', self)
|
73 |
-
|
74 |
-
def __invert__(self):
|
75 |
-
raise DynamoDBOperationNotSupportedError('NOT', self)
|
76 |
-
|
77 |
-
def eq(self, value):
|
78 |
-
"""Creates a condition where the attribute is equal to the value.
|
79 |
-
|
80 |
-
:param value: The value that the attribute is equal to.
|
81 |
-
"""
|
82 |
-
return Equals(self, value)
|
83 |
-
|
84 |
-
def lt(self, value):
|
85 |
-
"""Creates a condition where the attribute is less than the value.
|
86 |
-
|
87 |
-
:param value: The value that the attribute is less than.
|
88 |
-
"""
|
89 |
-
return LessThan(self, value)
|
90 |
-
|
91 |
-
def lte(self, value):
|
92 |
-
"""Creates a condition where the attribute is less than or equal to the
|
93 |
-
value.
|
94 |
-
|
95 |
-
:param value: The value that the attribute is less than or equal to.
|
96 |
-
"""
|
97 |
-
return LessThanEquals(self, value)
|
98 |
-
|
99 |
-
def gt(self, value):
|
100 |
-
"""Creates a condition where the attribute is greater than the value.
|
101 |
-
|
102 |
-
:param value: The value that the attribute is greater than.
|
103 |
-
"""
|
104 |
-
return GreaterThan(self, value)
|
105 |
-
|
106 |
-
def gte(self, value):
|
107 |
-
"""Creates a condition where the attribute is greater than or equal to
|
108 |
-
the value.
|
109 |
-
|
110 |
-
:param value: The value that the attribute is greater than or equal to.
|
111 |
-
"""
|
112 |
-
return GreaterThanEquals(self, value)
|
113 |
-
|
114 |
-
def begins_with(self, value):
|
115 |
-
"""Creates a condition where the attribute begins with the value.
|
116 |
-
|
117 |
-
:param value: The value that the attribute begins with.
|
118 |
-
"""
|
119 |
-
return BeginsWith(self, value)
|
120 |
-
|
121 |
-
def between(self, low_value, high_value):
|
122 |
-
"""Creates a condition where the attribute is greater than or equal
|
123 |
-
to the low value and less than or equal to the high value.
|
124 |
-
|
125 |
-
:param low_value: The value that the attribute is greater than or equal to.
|
126 |
-
:param high_value: The value that the attribute is less than or equal to.
|
127 |
-
"""
|
128 |
-
return Between(self, low_value, high_value)
|
129 |
-
|
130 |
-
def __eq__(self, other):
|
131 |
-
return isinstance(other, type(self)) and self.name == other.name
|
132 |
-
|
133 |
-
def __ne__(self, other):
|
134 |
-
return not self.__eq__(other)
|
135 |
-
|
136 |
-
|
137 |
-
class ConditionAttributeBase(ConditionBase, AttributeBase):
|
138 |
-
"""This base class is for conditions that can have attribute methods.
|
139 |
-
|
140 |
-
One example is the Size condition. To complete a condition, you need
|
141 |
-
to apply another AttributeBase method like eq().
|
142 |
-
"""
|
143 |
-
|
144 |
-
def __init__(self, *values):
|
145 |
-
ConditionBase.__init__(self, *values)
|
146 |
-
# This is assuming the first value to the condition is the attribute
|
147 |
-
# in which can be used to generate its attribute base.
|
148 |
-
AttributeBase.__init__(self, values[0].name)
|
149 |
-
|
150 |
-
def __eq__(self, other):
|
151 |
-
return ConditionBase.__eq__(self, other) and AttributeBase.__eq__(
|
152 |
-
self, other
|
153 |
-
)
|
154 |
-
|
155 |
-
def __ne__(self, other):
|
156 |
-
return not self.__eq__(other)
|
157 |
-
|
158 |
-
|
159 |
-
class ComparisonCondition(ConditionBase):
|
160 |
-
expression_format = '{0} {operator} {1}'
|
161 |
-
|
162 |
-
|
163 |
-
class Equals(ComparisonCondition):
|
164 |
-
expression_operator = '='
|
165 |
-
|
166 |
-
|
167 |
-
class NotEquals(ComparisonCondition):
|
168 |
-
expression_operator = '<>'
|
169 |
-
|
170 |
-
|
171 |
-
class LessThan(ComparisonCondition):
|
172 |
-
expression_operator = '<'
|
173 |
-
|
174 |
-
|
175 |
-
class LessThanEquals(ComparisonCondition):
|
176 |
-
expression_operator = '<='
|
177 |
-
|
178 |
-
|
179 |
-
class GreaterThan(ComparisonCondition):
|
180 |
-
expression_operator = '>'
|
181 |
-
|
182 |
-
|
183 |
-
class GreaterThanEquals(ComparisonCondition):
|
184 |
-
expression_operator = '>='
|
185 |
-
|
186 |
-
|
187 |
-
class In(ComparisonCondition):
|
188 |
-
expression_operator = 'IN'
|
189 |
-
has_grouped_values = True
|
190 |
-
|
191 |
-
|
192 |
-
class Between(ConditionBase):
|
193 |
-
expression_operator = 'BETWEEN'
|
194 |
-
expression_format = '{0} {operator} {1} AND {2}'
|
195 |
-
|
196 |
-
|
197 |
-
class BeginsWith(ConditionBase):
|
198 |
-
expression_operator = 'begins_with'
|
199 |
-
expression_format = '{operator}({0}, {1})'
|
200 |
-
|
201 |
-
|
202 |
-
class Contains(ConditionBase):
|
203 |
-
expression_operator = 'contains'
|
204 |
-
expression_format = '{operator}({0}, {1})'
|
205 |
-
|
206 |
-
|
207 |
-
class Size(ConditionAttributeBase):
|
208 |
-
expression_operator = 'size'
|
209 |
-
expression_format = '{operator}({0})'
|
210 |
-
|
211 |
-
|
212 |
-
class AttributeType(ConditionBase):
|
213 |
-
expression_operator = 'attribute_type'
|
214 |
-
expression_format = '{operator}({0}, {1})'
|
215 |
-
|
216 |
-
|
217 |
-
class AttributeExists(ConditionBase):
|
218 |
-
expression_operator = 'attribute_exists'
|
219 |
-
expression_format = '{operator}({0})'
|
220 |
-
|
221 |
-
|
222 |
-
class AttributeNotExists(ConditionBase):
|
223 |
-
expression_operator = 'attribute_not_exists'
|
224 |
-
expression_format = '{operator}({0})'
|
225 |
-
|
226 |
-
|
227 |
-
class And(ConditionBase):
|
228 |
-
expression_operator = 'AND'
|
229 |
-
expression_format = '({0} {operator} {1})'
|
230 |
-
|
231 |
-
|
232 |
-
class Or(ConditionBase):
|
233 |
-
expression_operator = 'OR'
|
234 |
-
expression_format = '({0} {operator} {1})'
|
235 |
-
|
236 |
-
|
237 |
-
class Not(ConditionBase):
|
238 |
-
expression_operator = 'NOT'
|
239 |
-
expression_format = '({operator} {0})'
|
240 |
-
|
241 |
-
|
242 |
-
class Key(AttributeBase):
|
243 |
-
pass
|
244 |
-
|
245 |
-
|
246 |
-
class Attr(AttributeBase):
|
247 |
-
"""Represents an DynamoDB item's attribute."""
|
248 |
-
|
249 |
-
def ne(self, value):
|
250 |
-
"""Creates a condition where the attribute is not equal to the value
|
251 |
-
|
252 |
-
:param value: The value that the attribute is not equal to.
|
253 |
-
"""
|
254 |
-
return NotEquals(self, value)
|
255 |
-
|
256 |
-
def is_in(self, value):
|
257 |
-
"""Creates a condition where the attribute is in the value,
|
258 |
-
|
259 |
-
:type value: list
|
260 |
-
:param value: The value that the attribute is in.
|
261 |
-
"""
|
262 |
-
return In(self, value)
|
263 |
-
|
264 |
-
def exists(self):
|
265 |
-
"""Creates a condition where the attribute exists."""
|
266 |
-
return AttributeExists(self)
|
267 |
-
|
268 |
-
def not_exists(self):
|
269 |
-
"""Creates a condition where the attribute does not exist."""
|
270 |
-
return AttributeNotExists(self)
|
271 |
-
|
272 |
-
def contains(self, value):
|
273 |
-
"""Creates a condition where the attribute contains the value.
|
274 |
-
|
275 |
-
:param value: The value the attribute contains.
|
276 |
-
"""
|
277 |
-
return Contains(self, value)
|
278 |
-
|
279 |
-
def size(self):
|
280 |
-
"""Creates a condition for the attribute size.
|
281 |
-
|
282 |
-
Note another AttributeBase method must be called on the returned
|
283 |
-
size condition to be a valid DynamoDB condition.
|
284 |
-
"""
|
285 |
-
return Size(self)
|
286 |
-
|
287 |
-
def attribute_type(self, value):
|
288 |
-
"""Creates a condition for the attribute type.
|
289 |
-
|
290 |
-
:param value: The type of the attribute.
|
291 |
-
"""
|
292 |
-
return AttributeType(self, value)
|
293 |
-
|
294 |
-
|
295 |
-
BuiltConditionExpression = namedtuple(
|
296 |
-
'BuiltConditionExpression',
|
297 |
-
[
|
298 |
-
'condition_expression',
|
299 |
-
'attribute_name_placeholders',
|
300 |
-
'attribute_value_placeholders',
|
301 |
-
],
|
302 |
-
)
|
303 |
-
|
304 |
-
|
305 |
-
class ConditionExpressionBuilder:
|
306 |
-
"""This class is used to build condition expressions with placeholders"""
|
307 |
-
|
308 |
-
def __init__(self):
|
309 |
-
self._name_count = 0
|
310 |
-
self._value_count = 0
|
311 |
-
self._name_placeholder = 'n'
|
312 |
-
self._value_placeholder = 'v'
|
313 |
-
|
314 |
-
def _get_name_placeholder(self):
|
315 |
-
return '#' + self._name_placeholder + str(self._name_count)
|
316 |
-
|
317 |
-
def _get_value_placeholder(self):
|
318 |
-
return ':' + self._value_placeholder + str(self._value_count)
|
319 |
-
|
320 |
-
def reset(self):
|
321 |
-
"""Resets the placeholder name and values"""
|
322 |
-
self._name_count = 0
|
323 |
-
self._value_count = 0
|
324 |
-
|
325 |
-
def build_expression(self, condition, is_key_condition=False):
|
326 |
-
"""Builds the condition expression and the dictionary of placeholders.
|
327 |
-
|
328 |
-
:type condition: ConditionBase
|
329 |
-
:param condition: A condition to be built into a condition expression
|
330 |
-
string with any necessary placeholders.
|
331 |
-
|
332 |
-
:type is_key_condition: Boolean
|
333 |
-
:param is_key_condition: True if the expression is for a
|
334 |
-
KeyConditionExpression. False otherwise.
|
335 |
-
|
336 |
-
:rtype: (string, dict, dict)
|
337 |
-
:returns: Will return a string representing the condition with
|
338 |
-
placeholders inserted where necessary, a dictionary of
|
339 |
-
placeholders for attribute names, and a dictionary of
|
340 |
-
placeholders for attribute values. Here is a sample return value:
|
341 |
-
|
342 |
-
('#n0 = :v0', {'#n0': 'myattribute'}, {':v1': 'myvalue'})
|
343 |
-
"""
|
344 |
-
if not isinstance(condition, ConditionBase):
|
345 |
-
raise DynamoDBNeedsConditionError(condition)
|
346 |
-
attribute_name_placeholders = {}
|
347 |
-
attribute_value_placeholders = {}
|
348 |
-
condition_expression = self._build_expression(
|
349 |
-
condition,
|
350 |
-
attribute_name_placeholders,
|
351 |
-
attribute_value_placeholders,
|
352 |
-
is_key_condition=is_key_condition,
|
353 |
-
)
|
354 |
-
return BuiltConditionExpression(
|
355 |
-
condition_expression=condition_expression,
|
356 |
-
attribute_name_placeholders=attribute_name_placeholders,
|
357 |
-
attribute_value_placeholders=attribute_value_placeholders,
|
358 |
-
)
|
359 |
-
|
360 |
-
def _build_expression(
|
361 |
-
self,
|
362 |
-
condition,
|
363 |
-
attribute_name_placeholders,
|
364 |
-
attribute_value_placeholders,
|
365 |
-
is_key_condition,
|
366 |
-
):
|
367 |
-
expression_dict = condition.get_expression()
|
368 |
-
replaced_values = []
|
369 |
-
for value in expression_dict['values']:
|
370 |
-
# Build the necessary placeholders for that value.
|
371 |
-
# Placeholders are built for both attribute names and values.
|
372 |
-
replaced_value = self._build_expression_component(
|
373 |
-
value,
|
374 |
-
attribute_name_placeholders,
|
375 |
-
attribute_value_placeholders,
|
376 |
-
condition.has_grouped_values,
|
377 |
-
is_key_condition,
|
378 |
-
)
|
379 |
-
replaced_values.append(replaced_value)
|
380 |
-
# Fill out the expression using the operator and the
|
381 |
-
# values that have been replaced with placeholders.
|
382 |
-
return expression_dict['format'].format(
|
383 |
-
*replaced_values, operator=expression_dict['operator']
|
384 |
-
)
|
385 |
-
|
386 |
-
def _build_expression_component(
|
387 |
-
self,
|
388 |
-
value,
|
389 |
-
attribute_name_placeholders,
|
390 |
-
attribute_value_placeholders,
|
391 |
-
has_grouped_values,
|
392 |
-
is_key_condition,
|
393 |
-
):
|
394 |
-
# Continue to recurse if the value is a ConditionBase in order
|
395 |
-
# to extract out all parts of the expression.
|
396 |
-
if isinstance(value, ConditionBase):
|
397 |
-
return self._build_expression(
|
398 |
-
value,
|
399 |
-
attribute_name_placeholders,
|
400 |
-
attribute_value_placeholders,
|
401 |
-
is_key_condition,
|
402 |
-
)
|
403 |
-
# If it is not a ConditionBase, we can recurse no further.
|
404 |
-
# So we check if it is an attribute and add placeholders for
|
405 |
-
# its name
|
406 |
-
elif isinstance(value, AttributeBase):
|
407 |
-
if is_key_condition and not isinstance(value, Key):
|
408 |
-
raise DynamoDBNeedsKeyConditionError(
|
409 |
-
f'Attribute object {value.name} is of type {type(value)}. '
|
410 |
-
f'KeyConditionExpression only supports Attribute objects '
|
411 |
-
f'of type Key'
|
412 |
-
)
|
413 |
-
return self._build_name_placeholder(
|
414 |
-
value, attribute_name_placeholders
|
415 |
-
)
|
416 |
-
# If it is anything else, we treat it as a value and thus placeholders
|
417 |
-
# are needed for the value.
|
418 |
-
else:
|
419 |
-
return self._build_value_placeholder(
|
420 |
-
value, attribute_value_placeholders, has_grouped_values
|
421 |
-
)
|
422 |
-
|
423 |
-
def _build_name_placeholder(self, value, attribute_name_placeholders):
|
424 |
-
attribute_name = value.name
|
425 |
-
# Figure out which parts of the attribute name that needs replacement.
|
426 |
-
attribute_name_parts = ATTR_NAME_REGEX.findall(attribute_name)
|
427 |
-
|
428 |
-
# Add a temporary placeholder for each of these parts.
|
429 |
-
placeholder_format = ATTR_NAME_REGEX.sub('%s', attribute_name)
|
430 |
-
str_format_args = []
|
431 |
-
for part in attribute_name_parts:
|
432 |
-
name_placeholder = self._get_name_placeholder()
|
433 |
-
self._name_count += 1
|
434 |
-
str_format_args.append(name_placeholder)
|
435 |
-
# Add the placeholder and value to dictionary of name placeholders.
|
436 |
-
attribute_name_placeholders[name_placeholder] = part
|
437 |
-
# Replace the temporary placeholders with the designated placeholders.
|
438 |
-
return placeholder_format % tuple(str_format_args)
|
439 |
-
|
440 |
-
def _build_value_placeholder(
|
441 |
-
self, value, attribute_value_placeholders, has_grouped_values=False
|
442 |
-
):
|
443 |
-
# If the values are grouped, we need to add a placeholder for
|
444 |
-
# each element inside of the actual value.
|
445 |
-
if has_grouped_values:
|
446 |
-
placeholder_list = []
|
447 |
-
for v in value:
|
448 |
-
value_placeholder = self._get_value_placeholder()
|
449 |
-
self._value_count += 1
|
450 |
-
placeholder_list.append(value_placeholder)
|
451 |
-
attribute_value_placeholders[value_placeholder] = v
|
452 |
-
# Assuming the values are grouped by parenthesis.
|
453 |
-
# IN is the currently the only one that uses this so it maybe
|
454 |
-
# needed to be changed in future.
|
455 |
-
return '(' + ', '.join(placeholder_list) + ')'
|
456 |
-
# Otherwise, treat the value as a single value that needs only
|
457 |
-
# one placeholder.
|
458 |
-
else:
|
459 |
-
value_placeholder = self._get_value_placeholder()
|
460 |
-
self._value_count += 1
|
461 |
-
attribute_value_placeholders[value_placeholder] = value
|
462 |
-
return value_placeholder
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/__init__.py
DELETED
@@ -1,247 +0,0 @@
|
|
1 |
-
"""Extensions to the 'distutils' for large or complex distributions"""
|
2 |
-
|
3 |
-
import functools
|
4 |
-
import os
|
5 |
-
import re
|
6 |
-
import warnings
|
7 |
-
|
8 |
-
import _distutils_hack.override # noqa: F401
|
9 |
-
|
10 |
-
import distutils.core
|
11 |
-
from distutils.errors import DistutilsOptionError
|
12 |
-
from distutils.util import convert_path as _convert_path
|
13 |
-
|
14 |
-
from ._deprecation_warning import SetuptoolsDeprecationWarning
|
15 |
-
|
16 |
-
import setuptools.version
|
17 |
-
from setuptools.extension import Extension
|
18 |
-
from setuptools.dist import Distribution
|
19 |
-
from setuptools.depends import Require
|
20 |
-
from setuptools.discovery import PackageFinder, PEP420PackageFinder
|
21 |
-
from . import monkey
|
22 |
-
from . import logging
|
23 |
-
|
24 |
-
|
25 |
-
__all__ = [
|
26 |
-
'setup',
|
27 |
-
'Distribution',
|
28 |
-
'Command',
|
29 |
-
'Extension',
|
30 |
-
'Require',
|
31 |
-
'SetuptoolsDeprecationWarning',
|
32 |
-
'find_packages',
|
33 |
-
'find_namespace_packages',
|
34 |
-
]
|
35 |
-
|
36 |
-
__version__ = setuptools.version.__version__
|
37 |
-
|
38 |
-
bootstrap_install_from = None
|
39 |
-
|
40 |
-
|
41 |
-
find_packages = PackageFinder.find
|
42 |
-
find_namespace_packages = PEP420PackageFinder.find
|
43 |
-
|
44 |
-
|
45 |
-
def _install_setup_requires(attrs):
|
46 |
-
# Note: do not use `setuptools.Distribution` directly, as
|
47 |
-
# our PEP 517 backend patch `distutils.core.Distribution`.
|
48 |
-
class MinimalDistribution(distutils.core.Distribution):
|
49 |
-
"""
|
50 |
-
A minimal version of a distribution for supporting the
|
51 |
-
fetch_build_eggs interface.
|
52 |
-
"""
|
53 |
-
|
54 |
-
def __init__(self, attrs):
|
55 |
-
_incl = 'dependency_links', 'setup_requires'
|
56 |
-
filtered = {k: attrs[k] for k in set(_incl) & set(attrs)}
|
57 |
-
super().__init__(filtered)
|
58 |
-
# Prevent accidentally triggering discovery with incomplete set of attrs
|
59 |
-
self.set_defaults._disable()
|
60 |
-
|
61 |
-
def _get_project_config_files(self, filenames=None):
|
62 |
-
"""Ignore ``pyproject.toml``, they are not related to setup_requires"""
|
63 |
-
try:
|
64 |
-
cfg, toml = super()._split_standard_project_metadata(filenames)
|
65 |
-
return cfg, ()
|
66 |
-
except Exception:
|
67 |
-
return filenames, ()
|
68 |
-
|
69 |
-
def finalize_options(self):
|
70 |
-
"""
|
71 |
-
Disable finalize_options to avoid building the working set.
|
72 |
-
Ref #2158.
|
73 |
-
"""
|
74 |
-
|
75 |
-
dist = MinimalDistribution(attrs)
|
76 |
-
|
77 |
-
# Honor setup.cfg's options.
|
78 |
-
dist.parse_config_files(ignore_option_errors=True)
|
79 |
-
if dist.setup_requires:
|
80 |
-
dist.fetch_build_eggs(dist.setup_requires)
|
81 |
-
|
82 |
-
|
83 |
-
def setup(**attrs):
|
84 |
-
# Make sure we have any requirements needed to interpret 'attrs'.
|
85 |
-
logging.configure()
|
86 |
-
_install_setup_requires(attrs)
|
87 |
-
return distutils.core.setup(**attrs)
|
88 |
-
|
89 |
-
|
90 |
-
setup.__doc__ = distutils.core.setup.__doc__
|
91 |
-
|
92 |
-
|
93 |
-
_Command = monkey.get_unpatched(distutils.core.Command)
|
94 |
-
|
95 |
-
|
96 |
-
class Command(_Command):
|
97 |
-
"""
|
98 |
-
Setuptools internal actions are organized using a *command design pattern*.
|
99 |
-
This means that each action (or group of closely related actions) executed during
|
100 |
-
the build should be implemented as a ``Command`` subclass.
|
101 |
-
|
102 |
-
These commands are abstractions and do not necessarily correspond to a command that
|
103 |
-
can (or should) be executed via a terminal, in a CLI fashion (although historically
|
104 |
-
they would).
|
105 |
-
|
106 |
-
When creating a new command from scratch, custom defined classes **SHOULD** inherit
|
107 |
-
from ``setuptools.Command`` and implement a few mandatory methods.
|
108 |
-
Between these mandatory methods, are listed:
|
109 |
-
|
110 |
-
.. method:: initialize_options(self)
|
111 |
-
|
112 |
-
Set or (reset) all options/attributes/caches used by the command
|
113 |
-
to their default values. Note that these values may be overwritten during
|
114 |
-
the build.
|
115 |
-
|
116 |
-
.. method:: finalize_options(self)
|
117 |
-
|
118 |
-
Set final values for all options/attributes used by the command.
|
119 |
-
Most of the time, each option/attribute/cache should only be set if it does not
|
120 |
-
have any value yet (e.g. ``if self.attr is None: self.attr = val``).
|
121 |
-
|
122 |
-
.. method:: run(self)
|
123 |
-
|
124 |
-
Execute the actions intended by the command.
|
125 |
-
(Side effects **SHOULD** only take place when ``run`` is executed,
|
126 |
-
for example, creating new files or writing to the terminal output).
|
127 |
-
|
128 |
-
A useful analogy for command classes is to think of them as subroutines with local
|
129 |
-
variables called "options". The options are "declared" in ``initialize_options()``
|
130 |
-
and "defined" (given their final values, aka "finalized") in ``finalize_options()``,
|
131 |
-
both of which must be defined by every command class. The "body" of the subroutine,
|
132 |
-
(where it does all the work) is the ``run()`` method.
|
133 |
-
Between ``initialize_options()`` and ``finalize_options()``, ``setuptools`` may set
|
134 |
-
the values for options/attributes based on user's input (or circumstance),
|
135 |
-
which means that the implementation should be careful to not overwrite values in
|
136 |
-
``finalize_options`` unless necessary.
|
137 |
-
|
138 |
-
Please note that other commands (or other parts of setuptools) may also overwrite
|
139 |
-
the values of the command's options/attributes multiple times during the build
|
140 |
-
process.
|
141 |
-
Therefore it is important to consistently implement ``initialize_options()`` and
|
142 |
-
``finalize_options()``. For example, all derived attributes (or attributes that
|
143 |
-
depend on the value of other attributes) **SHOULD** be recomputed in
|
144 |
-
``finalize_options``.
|
145 |
-
|
146 |
-
When overwriting existing commands, custom defined classes **MUST** abide by the
|
147 |
-
same APIs implemented by the original class. They also **SHOULD** inherit from the
|
148 |
-
original class.
|
149 |
-
"""
|
150 |
-
|
151 |
-
command_consumes_arguments = False
|
152 |
-
|
153 |
-
def __init__(self, dist, **kw):
|
154 |
-
"""
|
155 |
-
Construct the command for dist, updating
|
156 |
-
vars(self) with any keyword parameters.
|
157 |
-
"""
|
158 |
-
super().__init__(dist)
|
159 |
-
vars(self).update(kw)
|
160 |
-
|
161 |
-
def _ensure_stringlike(self, option, what, default=None):
|
162 |
-
val = getattr(self, option)
|
163 |
-
if val is None:
|
164 |
-
setattr(self, option, default)
|
165 |
-
return default
|
166 |
-
elif not isinstance(val, str):
|
167 |
-
raise DistutilsOptionError(
|
168 |
-
"'%s' must be a %s (got `%s`)" % (option, what, val)
|
169 |
-
)
|
170 |
-
return val
|
171 |
-
|
172 |
-
def ensure_string_list(self, option):
|
173 |
-
r"""Ensure that 'option' is a list of strings. If 'option' is
|
174 |
-
currently a string, we split it either on /,\s*/ or /\s+/, so
|
175 |
-
"foo bar baz", "foo,bar,baz", and "foo, bar baz" all become
|
176 |
-
["foo", "bar", "baz"].
|
177 |
-
|
178 |
-
..
|
179 |
-
TODO: This method seems to be similar to the one in ``distutils.cmd``
|
180 |
-
Probably it is just here for backward compatibility with old Python versions?
|
181 |
-
|
182 |
-
:meta private:
|
183 |
-
"""
|
184 |
-
val = getattr(self, option)
|
185 |
-
if val is None:
|
186 |
-
return
|
187 |
-
elif isinstance(val, str):
|
188 |
-
setattr(self, option, re.split(r',\s*|\s+', val))
|
189 |
-
else:
|
190 |
-
if isinstance(val, list):
|
191 |
-
ok = all(isinstance(v, str) for v in val)
|
192 |
-
else:
|
193 |
-
ok = False
|
194 |
-
if not ok:
|
195 |
-
raise DistutilsOptionError(
|
196 |
-
"'%s' must be a list of strings (got %r)" % (option, val)
|
197 |
-
)
|
198 |
-
|
199 |
-
def reinitialize_command(self, command, reinit_subcommands=0, **kw):
|
200 |
-
cmd = _Command.reinitialize_command(self, command, reinit_subcommands)
|
201 |
-
vars(cmd).update(kw)
|
202 |
-
return cmd
|
203 |
-
|
204 |
-
|
205 |
-
def _find_all_simple(path):
|
206 |
-
"""
|
207 |
-
Find all files under 'path'
|
208 |
-
"""
|
209 |
-
results = (
|
210 |
-
os.path.join(base, file)
|
211 |
-
for base, dirs, files in os.walk(path, followlinks=True)
|
212 |
-
for file in files
|
213 |
-
)
|
214 |
-
return filter(os.path.isfile, results)
|
215 |
-
|
216 |
-
|
217 |
-
def findall(dir=os.curdir):
|
218 |
-
"""
|
219 |
-
Find all files under 'dir' and return the list of full filenames.
|
220 |
-
Unless dir is '.', return full filenames with dir prepended.
|
221 |
-
"""
|
222 |
-
files = _find_all_simple(dir)
|
223 |
-
if dir == os.curdir:
|
224 |
-
make_rel = functools.partial(os.path.relpath, start=dir)
|
225 |
-
files = map(make_rel, files)
|
226 |
-
return list(files)
|
227 |
-
|
228 |
-
|
229 |
-
@functools.wraps(_convert_path)
|
230 |
-
def convert_path(pathname):
|
231 |
-
from inspect import cleandoc
|
232 |
-
|
233 |
-
msg = """
|
234 |
-
The function `convert_path` is considered internal and not part of the public API.
|
235 |
-
Its direct usage by 3rd-party packages is considered deprecated and the function
|
236 |
-
may be removed in the future.
|
237 |
-
"""
|
238 |
-
warnings.warn(cleandoc(msg), SetuptoolsDeprecationWarning)
|
239 |
-
return _convert_path(pathname)
|
240 |
-
|
241 |
-
|
242 |
-
class sic(str):
|
243 |
-
"""Treat this string as-is (https://en.wikipedia.org/wiki/Sic)"""
|
244 |
-
|
245 |
-
|
246 |
-
# Apply monkey patches
|
247 |
-
monkey.patch_all()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/importlib_metadata/_itertools.py
DELETED
@@ -1,73 +0,0 @@
|
|
1 |
-
from itertools import filterfalse
|
2 |
-
|
3 |
-
|
4 |
-
def unique_everseen(iterable, key=None):
|
5 |
-
"List unique elements, preserving order. Remember all elements ever seen."
|
6 |
-
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
|
7 |
-
# unique_everseen('ABBCcAD', str.lower) --> A B C D
|
8 |
-
seen = set()
|
9 |
-
seen_add = seen.add
|
10 |
-
if key is None:
|
11 |
-
for element in filterfalse(seen.__contains__, iterable):
|
12 |
-
seen_add(element)
|
13 |
-
yield element
|
14 |
-
else:
|
15 |
-
for element in iterable:
|
16 |
-
k = key(element)
|
17 |
-
if k not in seen:
|
18 |
-
seen_add(k)
|
19 |
-
yield element
|
20 |
-
|
21 |
-
|
22 |
-
# copied from more_itertools 8.8
|
23 |
-
def always_iterable(obj, base_type=(str, bytes)):
|
24 |
-
"""If *obj* is iterable, return an iterator over its items::
|
25 |
-
|
26 |
-
>>> obj = (1, 2, 3)
|
27 |
-
>>> list(always_iterable(obj))
|
28 |
-
[1, 2, 3]
|
29 |
-
|
30 |
-
If *obj* is not iterable, return a one-item iterable containing *obj*::
|
31 |
-
|
32 |
-
>>> obj = 1
|
33 |
-
>>> list(always_iterable(obj))
|
34 |
-
[1]
|
35 |
-
|
36 |
-
If *obj* is ``None``, return an empty iterable:
|
37 |
-
|
38 |
-
>>> obj = None
|
39 |
-
>>> list(always_iterable(None))
|
40 |
-
[]
|
41 |
-
|
42 |
-
By default, binary and text strings are not considered iterable::
|
43 |
-
|
44 |
-
>>> obj = 'foo'
|
45 |
-
>>> list(always_iterable(obj))
|
46 |
-
['foo']
|
47 |
-
|
48 |
-
If *base_type* is set, objects for which ``isinstance(obj, base_type)``
|
49 |
-
returns ``True`` won't be considered iterable.
|
50 |
-
|
51 |
-
>>> obj = {'a': 1}
|
52 |
-
>>> list(always_iterable(obj)) # Iterate over the dict's keys
|
53 |
-
['a']
|
54 |
-
>>> list(always_iterable(obj, base_type=dict)) # Treat dicts as a unit
|
55 |
-
[{'a': 1}]
|
56 |
-
|
57 |
-
Set *base_type* to ``None`` to avoid any special handling and treat objects
|
58 |
-
Python considers iterable as iterable:
|
59 |
-
|
60 |
-
>>> obj = 'foo'
|
61 |
-
>>> list(always_iterable(obj, base_type=None))
|
62 |
-
['f', 'o', 'o']
|
63 |
-
"""
|
64 |
-
if obj is None:
|
65 |
-
return iter(())
|
66 |
-
|
67 |
-
if (base_type is not None) and isinstance(obj, base_type):
|
68 |
-
return iter((obj,))
|
69 |
-
|
70 |
-
try:
|
71 |
-
return iter(obj)
|
72 |
-
except TypeError:
|
73 |
-
return iter((obj,))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/filepost.py
DELETED
@@ -1,98 +0,0 @@
|
|
1 |
-
from __future__ import absolute_import
|
2 |
-
|
3 |
-
import binascii
|
4 |
-
import codecs
|
5 |
-
import os
|
6 |
-
from io import BytesIO
|
7 |
-
|
8 |
-
from .fields import RequestField
|
9 |
-
from .packages import six
|
10 |
-
from .packages.six import b
|
11 |
-
|
12 |
-
writer = codecs.lookup("utf-8")[3]
|
13 |
-
|
14 |
-
|
15 |
-
def choose_boundary():
|
16 |
-
"""
|
17 |
-
Our embarrassingly-simple replacement for mimetools.choose_boundary.
|
18 |
-
"""
|
19 |
-
boundary = binascii.hexlify(os.urandom(16))
|
20 |
-
if not six.PY2:
|
21 |
-
boundary = boundary.decode("ascii")
|
22 |
-
return boundary
|
23 |
-
|
24 |
-
|
25 |
-
def iter_field_objects(fields):
|
26 |
-
"""
|
27 |
-
Iterate over fields.
|
28 |
-
|
29 |
-
Supports list of (k, v) tuples and dicts, and lists of
|
30 |
-
:class:`~urllib3.fields.RequestField`.
|
31 |
-
|
32 |
-
"""
|
33 |
-
if isinstance(fields, dict):
|
34 |
-
i = six.iteritems(fields)
|
35 |
-
else:
|
36 |
-
i = iter(fields)
|
37 |
-
|
38 |
-
for field in i:
|
39 |
-
if isinstance(field, RequestField):
|
40 |
-
yield field
|
41 |
-
else:
|
42 |
-
yield RequestField.from_tuples(*field)
|
43 |
-
|
44 |
-
|
45 |
-
def iter_fields(fields):
|
46 |
-
"""
|
47 |
-
.. deprecated:: 1.6
|
48 |
-
|
49 |
-
Iterate over fields.
|
50 |
-
|
51 |
-
The addition of :class:`~urllib3.fields.RequestField` makes this function
|
52 |
-
obsolete. Instead, use :func:`iter_field_objects`, which returns
|
53 |
-
:class:`~urllib3.fields.RequestField` objects.
|
54 |
-
|
55 |
-
Supports list of (k, v) tuples and dicts.
|
56 |
-
"""
|
57 |
-
if isinstance(fields, dict):
|
58 |
-
return ((k, v) for k, v in six.iteritems(fields))
|
59 |
-
|
60 |
-
return ((k, v) for k, v in fields)
|
61 |
-
|
62 |
-
|
63 |
-
def encode_multipart_formdata(fields, boundary=None):
|
64 |
-
"""
|
65 |
-
Encode a dictionary of ``fields`` using the multipart/form-data MIME format.
|
66 |
-
|
67 |
-
:param fields:
|
68 |
-
Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`).
|
69 |
-
|
70 |
-
:param boundary:
|
71 |
-
If not specified, then a random boundary will be generated using
|
72 |
-
:func:`urllib3.filepost.choose_boundary`.
|
73 |
-
"""
|
74 |
-
body = BytesIO()
|
75 |
-
if boundary is None:
|
76 |
-
boundary = choose_boundary()
|
77 |
-
|
78 |
-
for field in iter_field_objects(fields):
|
79 |
-
body.write(b("--%s\r\n" % (boundary)))
|
80 |
-
|
81 |
-
writer(body).write(field.render_headers())
|
82 |
-
data = field.data
|
83 |
-
|
84 |
-
if isinstance(data, int):
|
85 |
-
data = str(data) # Backwards compatibility
|
86 |
-
|
87 |
-
if isinstance(data, six.text_type):
|
88 |
-
writer(body).write(data)
|
89 |
-
else:
|
90 |
-
body.write(data)
|
91 |
-
|
92 |
-
body.write(b"\r\n")
|
93 |
-
|
94 |
-
body.write(b("--%s--\r\n" % (boundary)))
|
95 |
-
|
96 |
-
content_type = str("multipart/form-data; boundary=%s" % boundary)
|
97 |
-
|
98 |
-
return body.getvalue(), content_type
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BlitzenPrancer/TheBloke-guanaco-65B-HF/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: TheBloke Guanaco 65B HF
|
3 |
-
emoji: 🐨
|
4 |
-
colorFrom: yellow
|
5 |
-
colorTo: indigo
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.33.1
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CNXT/GPTx/app.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
gr.Interface.load("models/gpt2").launch()
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/config/__init__.py
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
2 |
-
from .compat import downgrade_config, upgrade_config
|
3 |
-
from .config import CfgNode, get_cfg, global_cfg, set_global_cfg, configurable
|
4 |
-
|
5 |
-
__all__ = [
|
6 |
-
"CfgNode",
|
7 |
-
"get_cfg",
|
8 |
-
"global_cfg",
|
9 |
-
"set_global_cfg",
|
10 |
-
"downgrade_config",
|
11 |
-
"upgrade_config",
|
12 |
-
"configurable",
|
13 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/shape.h
DELETED
@@ -1,169 +0,0 @@
|
|
1 |
-
#pragma once
|
2 |
-
|
3 |
-
#include "diffvg.h"
|
4 |
-
#include "color.h"
|
5 |
-
#include "ptr.h"
|
6 |
-
#include "vector.h"
|
7 |
-
#include "matrix.h"
|
8 |
-
|
9 |
-
enum class ShapeType {
|
10 |
-
Circle,
|
11 |
-
Ellipse,
|
12 |
-
Path,
|
13 |
-
Rect
|
14 |
-
};
|
15 |
-
|
16 |
-
struct Circle {
|
17 |
-
float radius;
|
18 |
-
Vector2f center;
|
19 |
-
|
20 |
-
ptr<void> get_ptr() {
|
21 |
-
return ptr<void>(this);
|
22 |
-
}
|
23 |
-
};
|
24 |
-
|
25 |
-
struct Ellipse {
|
26 |
-
Vector2f radius;
|
27 |
-
Vector2f center;
|
28 |
-
|
29 |
-
ptr<void> get_ptr() {
|
30 |
-
return ptr<void>(this);
|
31 |
-
}
|
32 |
-
};
|
33 |
-
|
34 |
-
struct Path {
|
35 |
-
Path(ptr<int> num_control_points,
|
36 |
-
ptr<float> points,
|
37 |
-
ptr<float> thickness,
|
38 |
-
int num_base_points,
|
39 |
-
int num_points,
|
40 |
-
bool is_closed,
|
41 |
-
bool use_distance_approx) :
|
42 |
-
num_control_points(num_control_points.get()),
|
43 |
-
points(points.get()),
|
44 |
-
thickness(thickness.get()),
|
45 |
-
num_base_points(num_base_points),
|
46 |
-
num_points(num_points),
|
47 |
-
is_closed(is_closed),
|
48 |
-
use_distance_approx(use_distance_approx) {}
|
49 |
-
|
50 |
-
int *num_control_points;
|
51 |
-
float *points;
|
52 |
-
float *thickness;
|
53 |
-
int num_base_points;
|
54 |
-
int num_points;
|
55 |
-
bool is_closed;
|
56 |
-
bool use_distance_approx;
|
57 |
-
|
58 |
-
bool has_thickness() const {
|
59 |
-
return thickness != nullptr;
|
60 |
-
}
|
61 |
-
void copy_to(ptr<float> points, ptr<float> thickness) const;
|
62 |
-
|
63 |
-
ptr<void> get_ptr() {
|
64 |
-
return ptr<void>(this);
|
65 |
-
}
|
66 |
-
};
|
67 |
-
|
68 |
-
struct Rect {
|
69 |
-
Vector2f p_min;
|
70 |
-
Vector2f p_max;
|
71 |
-
|
72 |
-
ptr<void> get_ptr() {
|
73 |
-
return ptr<void>(this);
|
74 |
-
}
|
75 |
-
};
|
76 |
-
|
77 |
-
struct Shape {
|
78 |
-
Shape() {}
|
79 |
-
Shape(const ShapeType &type,
|
80 |
-
ptr<void> shape_ptr,
|
81 |
-
float stroke_width)
|
82 |
-
: type(type), ptr(shape_ptr.get()), stroke_width(stroke_width) {}
|
83 |
-
|
84 |
-
Circle as_circle() const {
|
85 |
-
return *(Circle*)ptr;
|
86 |
-
}
|
87 |
-
|
88 |
-
Ellipse as_ellipse() const {
|
89 |
-
return *(Ellipse*)ptr;
|
90 |
-
}
|
91 |
-
|
92 |
-
Path as_path() const {
|
93 |
-
return *(Path*)ptr;
|
94 |
-
}
|
95 |
-
|
96 |
-
Rect as_rect() const {
|
97 |
-
return *(Rect*)ptr;
|
98 |
-
}
|
99 |
-
|
100 |
-
ShapeType type;
|
101 |
-
void *ptr;
|
102 |
-
float stroke_width;
|
103 |
-
};
|
104 |
-
|
105 |
-
struct ShapeGroup {
|
106 |
-
ShapeGroup() {}
|
107 |
-
ShapeGroup(ptr<int> shape_ids,
|
108 |
-
int num_shapes,
|
109 |
-
const ColorType &fill_color_type,
|
110 |
-
ptr<void> fill_color,
|
111 |
-
const ColorType &stroke_color_type,
|
112 |
-
ptr<void> stroke_color,
|
113 |
-
bool use_even_odd_rule,
|
114 |
-
ptr<float> shape_to_canvas)
|
115 |
-
: shape_ids(shape_ids.get()),
|
116 |
-
num_shapes(num_shapes),
|
117 |
-
fill_color_type(fill_color_type),
|
118 |
-
fill_color(fill_color.get()),
|
119 |
-
stroke_color_type(stroke_color_type),
|
120 |
-
stroke_color(stroke_color.get()),
|
121 |
-
use_even_odd_rule(use_even_odd_rule),
|
122 |
-
shape_to_canvas(shape_to_canvas.get()) {
|
123 |
-
canvas_to_shape = inverse(this->shape_to_canvas);
|
124 |
-
}
|
125 |
-
|
126 |
-
bool has_fill_color() const {
|
127 |
-
return fill_color != nullptr;
|
128 |
-
}
|
129 |
-
|
130 |
-
Constant fill_color_as_constant() const {
|
131 |
-
return *(Constant*)fill_color;
|
132 |
-
}
|
133 |
-
|
134 |
-
LinearGradient fill_color_as_linear_gradient() const {
|
135 |
-
return *(LinearGradient*)fill_color;
|
136 |
-
}
|
137 |
-
|
138 |
-
RadialGradient fill_color_as_radial_gradient() const {
|
139 |
-
return *(RadialGradient*)fill_color;
|
140 |
-
}
|
141 |
-
|
142 |
-
bool has_stroke_color() const {
|
143 |
-
return stroke_color != nullptr;
|
144 |
-
}
|
145 |
-
|
146 |
-
Constant stroke_color_as_constant() const {
|
147 |
-
return *(Constant*)stroke_color;
|
148 |
-
}
|
149 |
-
|
150 |
-
LinearGradient stroke_color_as_linear_gradient() const {
|
151 |
-
return *(LinearGradient*)stroke_color;
|
152 |
-
}
|
153 |
-
|
154 |
-
RadialGradient stroke_color_as_radial_gradient() const {
|
155 |
-
return *(RadialGradient*)stroke_color;
|
156 |
-
}
|
157 |
-
|
158 |
-
void copy_to(ptr<float> shape_to_canvas) const;
|
159 |
-
|
160 |
-
int *shape_ids;
|
161 |
-
int num_shapes;
|
162 |
-
ColorType fill_color_type;
|
163 |
-
void *fill_color;
|
164 |
-
ColorType stroke_color_type;
|
165 |
-
void *stroke_color;
|
166 |
-
bool use_even_odd_rule;
|
167 |
-
Matrix3x3f canvas_to_shape;
|
168 |
-
Matrix3x3f shape_to_canvas;
|
169 |
-
};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/solve.h
DELETED
@@ -1,59 +0,0 @@
|
|
1 |
-
#pragma once
|
2 |
-
|
3 |
-
#include "diffvg.h"
|
4 |
-
|
5 |
-
template <typename T>
|
6 |
-
DEVICE
|
7 |
-
inline bool solve_quadratic(T a, T b, T c, T *t0, T *t1) {
|
8 |
-
// From https://github.com/mmp/pbrt-v3/blob/master/src/core/pbrt.h#L419
|
9 |
-
T discrim = square(b) - 4 * a * c;
|
10 |
-
if (discrim < 0) {
|
11 |
-
return false;
|
12 |
-
}
|
13 |
-
T root_discrim = sqrt(discrim);
|
14 |
-
|
15 |
-
T q;
|
16 |
-
if (b < 0) {
|
17 |
-
q = -0.5f * (b - root_discrim);
|
18 |
-
} else {
|
19 |
-
q = -0.5f * (b + root_discrim);
|
20 |
-
}
|
21 |
-
*t0 = q / a;
|
22 |
-
*t1 = c / q;
|
23 |
-
if (*t0 > *t1) {
|
24 |
-
swap_(*t0, *t1);
|
25 |
-
}
|
26 |
-
return true;
|
27 |
-
}
|
28 |
-
|
29 |
-
template <typename T>
|
30 |
-
DEVICE
|
31 |
-
inline int solve_cubic(T a, T b, T c, T d, T t[3]) {
|
32 |
-
if (fabs(a) < 1e-6f) {
|
33 |
-
if (solve_quadratic(b, c, d, &t[0], &t[1])) {
|
34 |
-
return 2;
|
35 |
-
} else {
|
36 |
-
return 0;
|
37 |
-
}
|
38 |
-
}
|
39 |
-
// normalize cubic equation
|
40 |
-
b /= a;
|
41 |
-
c /= a;
|
42 |
-
d /= a;
|
43 |
-
T Q = (b * b - 3 * c) / 9.f;
|
44 |
-
T R = (2 * b * b * b - 9 * b * c + 27 * d) / 54.f;
|
45 |
-
if (R * R < Q * Q * Q) {
|
46 |
-
// 3 real roots
|
47 |
-
T theta = acos(R / sqrt(Q * Q * Q));
|
48 |
-
t[0] = -2.f * sqrt(Q) * cos(theta / 3.f) - b / 3.f;
|
49 |
-
t[1] = -2.f * sqrt(Q) * cos((theta + 2.f * T(M_PI)) / 3.f) - b / 3.f;
|
50 |
-
t[2] = -2.f * sqrt(Q) * cos((theta - 2.f * T(M_PI)) / 3.f) - b / 3.f;
|
51 |
-
return 3;
|
52 |
-
} else {
|
53 |
-
T A = R > 0 ? -pow(R + sqrt(R * R - Q * Q * Q), T(1./3.)):
|
54 |
-
pow(-R + sqrt(R * R - Q * Q * Q), T(1./3.));
|
55 |
-
T B = fabs(A) > 1e-6f ? Q / A : T(0);
|
56 |
-
t[0] = (A + B) - b / T(3);
|
57 |
-
return 1;
|
58 |
-
}
|
59 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|