parquet-converter commited on
Commit
23db94e
·
1 Parent(s): 9360e7d

Update parquet files (step 38 of 296)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/Wake Up Sid 720p Dvdrip Torrent.md +0 -74
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Office 365 Offline Installer for Free and Install it on Your PC or Mac.md +0 -30
  3. spaces/1gistliPinn/ChatGPT4/Examples/21 Jump Street 720p Yify 208.md +0 -9
  4. spaces/1gistliPinn/ChatGPT4/Examples/Biologija Pries Egzamina Knyga Pdf 105.md +0 -8
  5. spaces/1gistliPinn/ChatGPT4/Examples/Contaplus Elite 2012 Keygen Free TOP.md +0 -6
  6. spaces/1gistliPinn/ChatGPT4/Examples/Decipher Backup Repair Keygen !!HOT!! Generator.md +0 -6
  7. spaces/1gistliPinn/ChatGPT4/Examples/Diptrace Full Version Free Download Crack REPACK.md +0 -29
  8. spaces/1gistliPinn/ChatGPT4/Examples/FileMenu Tools 7.7.0.0 With Crack (Latest) FREE.md +0 -6
  9. spaces/1gistliPinn/ChatGPT4/Examples/Flatiron 3ds Max 2012 Torrent.md +0 -72
  10. spaces/1phancelerku/anime-remove-background/Azrbaycan thsil sisteminin kurikulum az sndi Niy vacibdir v nec ilyir?.md +0 -163
  11. spaces/1phancelerku/anime-remove-background/Castle Clash Mod Apk 2022 Enjoy the Best Features of the Game with No Ads.md +0 -145
  12. spaces/1phancelerku/anime-remove-background/Download Ludo for PC and Challenge Your Friends Online.md +0 -153
  13. spaces/2023Liu2023/bingo/tailwind.config.js +0 -48
  14. spaces/232labs/VToonify/vtoonify/model/raft/train_standard.sh +0 -6
  15. spaces/AHzizi/WaifuVoiceGen/modules.py +0 -388
  16. spaces/AIFILMS/generate_human_motion/pyrender/pyrender/offscreen.py +0 -160
  17. spaces/AIFILMS/generate_human_motion/pyrender/pyrender/texture.py +0 -259
  18. spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/commons/rel_transformer_history.py +0 -628
  19. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb8-150e_deepfashion2_short_sleeved_outwear_256x192/td_hm_res50_4xb8-150e_deepfashion2_short_sleeved_outwear_256x192.py +0 -2861
  20. spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/server/websearch/generateQuery.ts +0 -13
  21. spaces/Adapter/T2I-Adapter/ldm/inference_base.py +0 -282
  22. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/CreateSpace.js +0 -10
  23. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/slider/PercentToPosition.js +0 -13
  24. spaces/Akmyradov/TurkmenTTSweSTT/asr.py +0 -41
  25. spaces/Alichuan/VITS-Umamusume-voice-synthesizer/text/ngu_dialect.py +0 -30
  26. spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/mapper/training/__init__.py +0 -0
  27. spaces/Amrrs/DragGan-Inversion/PTI/torch_utils/ops/upfirdn2d.h +0 -59
  28. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/altdiffusion/__init__.py +0 -0
  29. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/controlnet/test_flax_controlnet.py +0 -127
  30. spaces/Andy1621/uniformer_image_detection/configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py +0 -8
  31. spaces/Andy1621/uniformer_image_detection/configs/vfnet/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py +0 -16
  32. spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r101-d8_480x480_40k_pascal_context_59.py +0 -2
  33. spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/Training_PRO/train_utils.py +0 -279
  34. spaces/ArtGAN/Segment-Anything-Video/app.py +0 -319
  35. spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/groundingdino/config/GroundingDINO_SwinT_OGC.py +0 -43
  36. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/models/direct_url.py +0 -237
  37. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/resolvelib/structs.py +0 -170
  38. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/webencodings/mklabels.py +0 -59
  39. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/config/__init__.py +0 -35
  40. spaces/Avinash-12035/MyGenAIChatBot/app.py +0 -34
  41. spaces/Benson/text-generation/Examples/Anime Avatar.md +0 -101
  42. spaces/Big-Web/MMSD/env/Lib/site-packages/boto3/docs/service.py +0 -199
  43. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/exceptions.py +0 -323
  44. spaces/BramVanroy/mateo-demo/README.md +0 -12
  45. spaces/Burcin/ExtractiveSummarizer/app.py +0 -118
  46. spaces/CVPR/LIVE/edge_query.h +0 -7
  47. spaces/CVPR/LIVE/thrust/thrust/system/detail/sequential/general_copy.h +0 -147
  48. spaces/CVPR/regionclip-demo/setup.py +0 -247
  49. spaces/Chintan-Donda/KKMS-KSSW-HF/src/kkms_kssw.py +0 -77
  50. spaces/ChrisPreston/diff-svc_minato_aqua/modules/diff/diffusion.py +0 -312
spaces/1acneusushi/gradio-2dmoleculeeditor/Wake Up Sid 720p Dvdrip Torrent.md DELETED
@@ -1,74 +0,0 @@
1
- ## Wake Up Sid 720p Dvdrip Torrent
2
-
3
-
4
-
5
-
6
-
7
-
8
-
9
-
10
-
11
- **CLICK HERE ⚙⚙⚙ [https://jinyurl.com/2tA06v](https://jinyurl.com/2tA06v)**
12
-
13
-
14
-
15
-
16
-
17
-
18
-
19
-
20
-
21
-
22
-
23
-
24
-
25
- # How to Download Wake Up Sid (2009) in High Quality
26
-
27
-
28
-
29
- Wake Up Sid is a 2009 Indian comedy-drama film directed by Ayan Mukherjee and starring Ranbir Kapoor and Konkona Sen Sharma. The film tells the story of Sid Mehra, a spoiled and aimless young man who finds his true calling after meeting Aisha, an aspiring writer from Calcutta.
30
-
31
-
32
-
33
- If you want to watch this movie in high quality, you can download it from torrent sites using a VPN service. A VPN service will protect your privacy and security by encrypting your traffic and hiding your IP address from your ISP and government agencies. Here are the steps to download Wake Up Sid (2009) in 720p or 1080p bluray quality:
34
-
35
-
36
-
37
- 1. Download and install a VPN service on your device. We recommend Hide VPN as it is fast, reliable and affordable.
38
-
39
- 2. Connect to a VPN server in a country where torrenting is legal, such as Switzerland or Netherlands.
40
-
41
- 3. Go to a torrent site that has Wake Up Sid (2009) available for download. We recommend YTS.mx or YTS.rs as they have high-quality torrents and subtitles.
42
-
43
- 4. Search for Wake Up Sid (2009) and choose the desired quality (720p or 1080p). Click on the download button or magnet link to start the download.
44
-
45
- 5. Open the torrent file with your preferred torrent client and wait for the download to finish.
46
-
47
- 6. Enjoy watching Wake Up Sid (2009) in high quality!
48
-
49
-
50
-
51
- Note: Downloading torrents is risky and may expose you to legal issues. We do not condone or encourage piracy and advise you to respect the copyrights of the creators. Please use this guide at your own risk.
52
-
53
-
54
-
55
- Wake Up Sid (2009) is a refreshing and realistic portrayal of the urban youth in India. The film explores the themes of friendship, love, family, career and self-discovery through the eyes of Sid, who undergoes a transformation from a lazy and irresponsible boy to a mature and responsible man. The film also showcases the vibrant and cosmopolitan city of Mumbai, which serves as a backdrop for Sid's journey.
56
-
57
-
58
-
59
- The film received positive reviews from critics and audiences alike. It was praised for its direction, screenplay, performances, music and cinematography. It was also a commercial success, grossing over ₹750 million worldwide. It won several awards and nominations, including three Filmfare Awards for Best Debut Director, Best Supporting Actress and Best Story.
60
-
61
-
62
-
63
- Wake Up Sid (2009) is a must-watch for anyone who loves a good coming-of-age story with a touch of humor and romance. It is a film that will make you laugh, cry and cheer for Sid as he wakes up to his true potential. You can download it from torrent sites in high quality using a VPN service and enjoy it on your device.
64
-
65
-
66
-
67
- In conclusion, Wake Up Sid (2009) is a brilliant and engaging film that will appeal to anyone who loves a good story with relatable characters and realistic situations. The film is a perfect example of how a simple and honest story can touch the hearts of millions of viewers. If you want to watch this film in high quality, you can download it from torrent sites using a VPN service and enjoy it on your device. Wake Up Sid (2009) is a film that will make you wake up to life and its possibilities.
68
-
69
- 145887f19f
70
-
71
-
72
-
73
-
74
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Office 365 Offline Installer for Free and Install it on Your PC or Mac.md DELETED
@@ -1,30 +0,0 @@
1
- <br />
2
- <h1>How to Download Office 365 Offline Installer for Free</h1>
3
- <p>Office 365 is a subscription-based service that offers various Microsoft products such as Word, Excel, PowerPoint, Outlook, OneNote and more. You can access these products online or install them on your PC or Mac. However, if you have a slow or unreliable internet connection, you might want to download Office 365 offline installer for free and install it on your device without any interruptions.</p>
4
- <h2>What is Office 365 Offline Installer?</h2>
5
- <p>Office 365 offline installer is a file that contains all the necessary components to install Office 365 on your PC or Mac without an internet connection. You can download this file from your Microsoft account portal and save it to a USB drive or a disc. You can then use this file to install Office 365 on any device that meets the system requirements.</p>
6
- <h2>office 365 offline installer free</h2><br /><p><b><b>Download Zip</b> &middot;&middot;&middot;&middot;&middot; <a href="https://byltly.com/2uKzCq">https://byltly.com/2uKzCq</a></b></p><br /><br />
7
- <h2>How to Download Office 365 Offline Installer for Free?</h2>
8
- <p>To download Office 365 offline installer for free, you will need to have an active Office 365 subscription and a Microsoft account. You will also need to be connected to the internet to download this file, but once that's done, you can install Office 365 offline on your device at your convenience. Here are the steps to follow:</p>
9
- <ol>
10
- <li>Go to <a href="https://www.office.com">www.office.com</a> and sign in with your Microsoft account associated with your Office 365 subscription.</li>
11
- <li>Select Install Office from the home page.</li>
12
- <li>In the Download and install window, select Other options.</li>
13
- <li>Check the box Download an offline installer and select the language you want to install Office 365 in.</li>
14
- <li>Select Download and choose a location to save the file.</li>
15
- <li>Wait for the download to complete. The file size may vary depending on your subscription plan and language.</li>
16
- </ol>
17
- <h2>How to Install Office 365 Offline?</h2>
18
- <p>After you have downloaded the Office 365 offline installer file, you can install it on your PC or Mac by following these steps:</p>
19
- <ol>
20
- <li>Locate the file you downloaded and double-click it to open it.</li>
21
- <li>A new virtual drive will appear in your directory, for example (D:). This drive contains the Office 365 installation files.</li>
22
- <li>Select the Office folder from the virtual drive and then double-click either the Setup32.exe to install the 32-bit version of Office 365, or Setup64.exe to install the 64-bit version.</li>
23
- <li>Follow the on-screen instructions to complete the installation.</li>
24
- <li>Activate Office 365 by signing in with your Microsoft account when prompted.</li>
25
- </ol>
26
- <h2>Conclusion</h2>
27
- <p>Office 365 offline installer is a convenient way to install Office 365 on your PC or Mac without an internet connection. You can download this file for free from your Microsoft account portal and use it to install Office 365 on any device that meets the system requirements. You can also save this file to a USB drive or a disc for later use. Enjoy using Office 365 offline!</p>
28
- <p></p> ddb901b051<br />
29
- <br />
30
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/21 Jump Street 720p Yify 208.md DELETED
@@ -1,9 +0,0 @@
1
- <h2>21 jump street 720p yify 208</h2><br /><p><b><b>DOWNLOAD</b> >>>>> <a href="https://imgfil.com/2uy1Eb">https://imgfil.com/2uy1Eb</a></b></p><br /><br />
2
-
3
- If a teenager is in a position where they cannot support a child, adoption may be a natural alternative. - Adoption allows a couple to adopt a child, thereby giving them ... As with any form of adoption, it can be difficult to adopt.
4
- However, if there is something that can really be accomplished if you are in a situation that you cannot afford to raise a child, why not consider adoption?
5
- There are many pros and cons to consider before making a final decision.
6
- If you are thinking about adopting, consider the following three points to see if it is worth your effort 8a78ff9644<br />
7
- <br />
8
- <br />
9
- <p></p>
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Biologija Pries Egzamina Knyga Pdf 105.md DELETED
@@ -1,8 +0,0 @@
1
- <h2>biologija pries egzamina knyga pdf 105</h2><br /><p><b><b>Download Zip</b> &#8250;&#8250;&#8250; <a href="https://imgfil.com/2uxXgh">https://imgfil.com/2uxXgh</a></b></p><br /><br />
2
-
3
- February 13, 2018 — mentariu-literar df-105-gavrkar . -dream book-pdf -ru.
4
- Dream Interpretation - dream interpretation - interpretation of dreams, Miller's dream book, Freud's dream book, Vanga's dream book, Nostradamus' dream book, Jung's dream book, Loff's dream book, David Loff's dream book, Freud's dream book, Miller's dream book, Juno's dream book, Freud's dream book, Vanga's dream book, Nostradamus' dream book, Jung's dream book, dream book.
5
- Dream Interpretation - online for free, interpretation of dreams, Miller's dream book, Freud's dream book, Vanga's dream book, Nostradamus' dream book, Jung's dream book, Loff's dream book, David Loff's dream book, Freud's dream book, Miller's dream book, Juno's dream book, dream book. 8a78ff9644<br />
6
- <br />
7
- <br />
8
- <p></p>
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Contaplus Elite 2012 Keygen Free TOP.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>contaplus elite 2012 keygen free</h2><br /><p><b><b>Download</b> &#9913;&#9913;&#9913; <a href="https://imgfil.com/2uxZ22">https://imgfil.com/2uxZ22</a></b></p><br /><br />
2
- <br />
3
- Free Download contaplus keygen, keygen contaplus elite 2012, keygen contaplus 2012, contaplus crack keygen, contaplus flex 2015 keygen, ... 4d29de3e1b<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Decipher Backup Repair Keygen !!HOT!! Generator.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>decipher backup repair keygen generator</h2><br /><p><b><b>Download Zip</b> &#10022; <a href="https://imgfil.com/2uxYZa">https://imgfil.com/2uxYZa</a></b></p><br /><br />
2
- <br />
3
- Decipher Textmessage License Code Mac Keygen; DECIPHER BACKUP REPAIR; Decipher TextMessage ... We provide RV generator repair and installation. 1fdad05405<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Diptrace Full Version Free Download Crack REPACK.md DELETED
@@ -1,29 +0,0 @@
1
- <br />
2
- Here is what I created:
3
-
4
- <h1>How to Download and Install Diptrace Full Version for Free with Crack</h1>
5
- <p>Diptrace is a powerful and easy-to-use software for designing and simulating printed circuit boards (PCBs). It offers a comprehensive set of features, such as schematic capture, PCB layout, 3D modeling, autorouting, verification, and export. However, the full version of Diptrace is not free and requires a license key to activate.</p>
6
- <h2>Diptrace full version free download crack</h2><br /><p><b><b>Download Zip</b> &#9675;&#9675;&#9675; <a href="https://imgfil.com/2uy21T">https://imgfil.com/2uy21T</a></b></p><br /><br />
7
- <p>If you want to use Diptrace for free without paying for a license, you might be tempted to look for a crack or a patch that can bypass the activation process. However, this is not a good idea for several reasons. First of all, downloading and installing a crack or a patch from an unknown source can expose your computer to malware and viruses that can harm your system and compromise your data. Second, using a cracked or patched version of Diptrace can cause errors and bugs that can affect the performance and quality of your PCB designs. Third, using a cracked or patched version of Diptrace is illegal and unethical, as it violates the terms and conditions of the software and infringes the intellectual property rights of the developers.</p>
8
- <p>Therefore, the best way to use Diptrace for free is to download and install the official trial version from the official website. The trial version allows you to use all the features of Diptrace for 30 days without any limitations. After 30 days, you can either purchase a license key to continue using the full version or switch to the freeware version. The freeware version has some restrictions on the number of pins and signal layers, but it still allows you to design and simulate simple PCBs for personal or educational purposes.</p>
9
- <p>To download and install Diptrace full version for free with the trial option, follow these steps:</p>
10
- <ol>
11
- <li>Go to <a href="https://diptrace.com/download/download-diptrace/">https://diptrace.com/download/download-diptrace/</a> and click on the "Download" button for your operating system.</li>
12
- <li>Save the installation file on your computer and run it as an administrator.</li>
13
- <li>Follow the instructions on the screen to complete the installation process.</li>
14
- <li>Launch Diptrace and enter your name and email address to register for the trial option.</li>
15
- <li>Enjoy using Diptrace full version for free for 30 days.</li>
16
- </ol>
17
- <p>I hope this helps you with your PCB design project. If you have any questions or feedback, please let me know.</p>
18
- <p></p>
19
- Here is what I created:
20
-
21
- <p>In this article, I will show you some tips and tricks to improve your PCB design skills using Diptrace. Whether you are a beginner or an expert, you can always learn something new and enhance your productivity and creativity with Diptrace.</p>
22
- <h2>Tip 1: Use the built-in libraries and components</h2>
23
- <p>Diptrace comes with a large collection of libraries and components that you can use for your PCB design project. You can access them from the "Library" menu in the schematic or PCB editor. You can also search for a specific component by name, type, or category using the "Find Component" tool. You can also add your own custom components or import them from other sources using the "Component Editor". By using the built-in libraries and components, you can save time and avoid errors in your design.</p>
24
- <h2>Tip 2: Use the autorouter and manual routing tools</h2>
25
- <p>Diptrace offers both an autorouter and manual routing tools to help you connect the components on your PCB. The autorouter can automatically route all or some of the nets on your PCB according to your settings and preferences. You can access the autorouter from the "Route" menu in the PCB editor. You can also use the manual routing tools to draw traces, vias, arcs, polygons, and other shapes on your PCB. You can access the manual routing tools from the toolbar or the "Route" menu in the PCB editor. By using the autorouter and manual routing tools, you can optimize your PCB layout and reduce noise and interference.</p>
26
- <h2>Tip 3: Use the verification and export tools</h2>
27
- <p>Diptrace also provides verification and export tools to help you check and finalize your PCB design. The verification tools can detect and highlight any errors or warnings on your schematic or PCB, such as unconnected pins, overlapping objects, clearance violations, etc. You can access the verification tools from the "Verification" menu in the schematic or PCB editor. The export tools can generate various output files for your PCB design, such as Gerber files, drill files, netlist files, bill of materials (BOM), etc. You can access the export tools from the "File" menu in the schematic or PCB editor. By using the verification and export tools, you can ensure that your PCB design is error-free and ready for fabrication.</p> d5da3c52bf<br />
28
- <br />
29
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/FileMenu Tools 7.7.0.0 With Crack (Latest) FREE.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>FileMenu Tools 7.7.0.0 With Crack (Latest)</h2><br /><p><b><b>DOWNLOAD</b> --->>> <a href="https://imgfil.com/2uy131">https://imgfil.com/2uy131</a></b></p><br /><br />
2
- <br />
3
- First Download FileMenu Tools Crack form below Links. If You are ... Download FileMenu Tools 7.7.0.0 Multilingual [Latest] from our software library. FileMenu ... 1fdad05405<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Flatiron 3ds Max 2012 Torrent.md DELETED
@@ -1,72 +0,0 @@
1
- <br />
2
- <h1>Flatiron 3ds Max 2012 Torrent: A Guide to 3D Texture Baking</h1>
3
-
4
- <p>If you are looking for a plugin that can help you bake full scenes or selections of objects into a single UV map in 3ds Max 2012, you might want to check out Flatiron 3ds Max 2012 Torrent. Flatiron is a four steps Render To Texture plugin that is based on the Unwrella high quality automated unwrapping technology. It is a fast, simple and yet completely configurable automatic unwrapping and baking solution that can greatly speed up the process of baking complex scenes.</p>
5
- <h2>Flatiron 3ds Max 2012 Torrent</h2><br /><p><b><b>Download File</b> &#10040; <a href="https://imgfil.com/2uxYak">https://imgfil.com/2uxYak</a></b></p><br /><br />
6
-
7
- <h2>What are the benefits of using Flatiron 3ds Max 2012 Torrent?</h2>
8
-
9
- <p>Flatiron 3ds Max 2012 Torrent can help you create realistic and detailed textures for your 3D models without spending too much time and resources on rendering. Some of the benefits of using Flatiron are:</p>
10
-
11
- <ul>
12
- <li>It can handle thousands of objects at once, making it ideal for real time game levels, architectural scenes, industrial design and more.</li>
13
- <li>It can bake any additional shaders, such as diffuse, lightmaps, shadowmaps, global illumination maps, etc. into one texture.</li>
14
- <li>It can automatically generate optimal UV layouts for each object or group of objects, minimizing distortion and seams.</li>
15
- <li>It can support multiple texture resolutions and formats, such as JPG, PNG, TGA, BMP, etc.</li>
16
- <li>It can work with any render engine that supports Render To Texture functionality in 3ds Max 2012.</li>
17
- </ul>
18
-
19
- <h2>How to download and install Flatiron 3ds Max 2012 Torrent?</h2>
20
-
21
- <p>If you want to try out Flatiron 3ds Max 2012 Torrent, you can follow these steps:</p>
22
-
23
- <ol>
24
- <li>Download the Flatiron 3ds Max 2012 Torrent file from a reliable source. Make sure you have a torrent client installed on your computer.</li>
25
- <li>Extract the ZIP file to a folder on your hard drive.</li>
26
- <li>Run the setup.exe file and follow the instructions to install Flatiron on your computer.</li>
27
- <li>Copy the crack file from the crack folder and paste it into the installation directory of Flatiron.</li>
28
- <li>Launch 3ds Max 2012 and activate Flatiron from the plugin manager.</li>
29
- </ol>
30
-
31
- <h2>How to use Flatiron 3ds Max 2012 Torrent?</h2>
32
-
33
- <p>Using Flatiron 3ds Max 2012 Torrent is very easy and straightforward. You just need to follow these four steps:</p>
34
-
35
- <ol>
36
- <li>Select the objects or groups of objects that you want to bake into a single UV map.</li>
37
- <li>Open the Flatiron dialog from the Utilities panel or the Quad menu.</li>
38
- <li>Choose the texture resolution, format and output folder for your baked texture.</li>
39
- <li>Click on Start Baking and wait for Flatiron to do its magic.</li>
40
- </ol>
41
-
42
- <p>You can also adjust some advanced settings in Flatiron, such as padding, margin, overlap, smoothing groups, etc. to fine tune your results. You can also preview your baked texture in the viewport or open it in an image editor for further editing.</p>
43
-
44
- <h2>Conclusion</h2>
45
-
46
- <p>Flatiron 3ds Max 2012 Torrent is a powerful and versatile plugin that can help you create stunning textures for your 3D models in a matter of minutes. It can handle complex scenes with ease and produce high quality results with minimal effort. If you are looking for a plugin that can simplify and speed up your texture baking workflow in 3ds Max 2012, you should definitely give Flatiron a try.</p>
47
- <p></p>
48
- <h2>Where can you find Flatiron 3ds Max 2012 Torrent tutorials?</h2>
49
-
50
- <p>If you want to learn more about how to use Flatiron 3ds Max 2012 Torrent effectively, you can find some helpful tutorials online. Here are some of the best sources for Flatiron tutorials:</p>
51
-
52
- <ul>
53
- <li>The official Flatiron website has a comprehensive user manual that covers all the features and settings of the plugin. You can also find some video tutorials that demonstrate how to use Flatiron for different scenarios and projects.</li>
54
- <li>The CG Persia website has a torrent download link for Flatiron 3ds Max 2012 Torrent that also includes a video tutorial on how to bake a canalization scene using Flatiron. You can learn some tips and tricks on how to optimize your UV layout and texture quality with Flatiron.</li>
55
- <li>The YouTube channel of 3d-io games & video production GmbH has several videos that showcase the capabilities and benefits of Flatiron. You can see how Flatiron can handle complex scenes with thousands of objects, how it can bake multiple shaders into one texture, and how it can work with different render engines.</li>
56
- </ul>
57
-
58
- <h2>What are some alternatives to Flatiron 3ds Max 2012 Torrent?</h2>
59
-
60
- <p>Flatiron 3ds Max 2012 Torrent is not the only plugin that can help you with 3D texture baking in 3ds Max 2012. There are some other plugins that offer similar or different features and functions for texture baking. Some of the most popular alternatives to Flatiron are:</p>
61
-
62
- <ul>
63
- <li>Unwrella: This is another plugin from 3d-io that is based on the same unwrapping technology as Flatiron. However, Unwrella focuses more on creating optimal UV layouts for each object or group of objects, rather than baking them into a single UV map. Unwrella can also work with any 3D software that supports OBJ export.</li>
64
- <li>Render To Texture: This is a built-in feature in 3ds Max that allows you to bake textures from any render engine that supports Render To Texture functionality. You can customize your baking settings, such as resolution, format, padding, etc. and preview your results in the viewport.</li>
65
- <li>BakeMyScan: This is a free plugin that can help you bake high-poly models into low-poly models with textures. It can also optimize your mesh topology and reduce your polygon count. BakeMyScan can work with any render engine that supports Render To Texture functionality.</li>
66
- </ul>
67
-
68
- <h2>Conclusion</h2>
69
-
70
- <p>Flatiron 3ds Max 2012 Torrent is a powerful and versatile plugin that can help you create stunning textures for your 3D models in a matter of minutes. It can handle complex scenes with ease and produce high quality results with minimal effort. If you are looking for a plugin that can simplify and speed up your texture baking workflow in 3ds Max 2012, you should definitely give Flatiron a try.</p> 3cee63e6c2<br />
71
- <br />
72
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Azrbaycan thsil sisteminin kurikulum az sndi Niy vacibdir v nec ilyir?.md DELETED
@@ -1,163 +0,0 @@
1
- <br />
2
- <h1>Kurikulum az: What is it and why is it important?</h1>
3
- <p>Kurikulum az is a term that refers to the modern curriculum model that is being implemented in Azerbaijan since 2016. It is based on the principles of student-centered, competency-based, and outcome-oriented education. It aims to provide students with the knowledge, skills, values, and attitudes that they need to succeed in the 21st century. But what exactly is kurikulum az and why is it important for the development of education in Azerbaijan? In this article, we will explore the meaning, structure, content, benefits, and challenges of kurikulum az.</p>
4
- <h2>Introduction</h2>
5
- <p>Kurikulum az is derived from the word "curriculum", which means "a course of study". However, kurikulum az is more than just a list of subjects and topics that students have to learn. It is a comprehensive framework that defines the purpose, content, process, assessment, and evaluation of education in Azerbaijan. It covers all levels of education from preschool to higher education. It also reflects the national identity, culture, values, and aspirations of Azerbaijan.</p>
6
- <h2>kurikulum az</h2><br /><p><b><b>Download File</b> &#10040; <a href="https://jinyurl.com/2uNKmj">https://jinyurl.com/2uNKmj</a></b></p><br /><br />
7
- <p>The main goals of kurikulum az are to:</p>
8
- <ul>
9
- <li>Ensure that students acquire the essential knowledge and skills that are relevant to their personal, social, and professional development</li>
10
- <li>Develop students' key competencies such as critical thinking, creativity, communication, collaboration, digital literacy, civic literacy, etc.</li>
11
- <li>Foster students' lifelong learning habits and attitudes such as curiosity, initiative, responsibility, self-regulation, etc.</li>
12
- <li>Prepare students for the challenges and opportunities of the globalized world</li>
13
- </ul>
14
- <p>The main principles of kurikulum az are:</p>
15
- <ul>
16
- <li>Student-centeredness: Kurikulum az puts the needs, interests, abilities, and preferences of students at the center of education. It allows students to have more choice, voice, and agency in their learning. It also encourages students to learn by doing, discovering, solving problems, and creating products.</li>
17
- <li>Competency-basedness: Kurikulum az focuses on developing students' competencies rather than memorizing facts. Competencies are complex combinations of knowledge, skills, values, and attitudes that enable students to perform tasks effectively in various contexts. Kurikulum az defines eight key competencies that students should master by the end of their education.</li>
18
- <li>Outcome-orientedness: Kurikulum az defines clear and measurable learning outcomes for each subject and course. Learning outcomes are statements that describe what students should know, be able to do, and value as a result of their learning. Learning outcomes guide the teaching, learning, and assessment processes in kurikulum az.</li>
19
- </ul>
20
- <p>Kurikulum az is different from traditional curriculum in several ways. For example:</p>
21
- <ul>
22
- <li>Kurikulum az is more flexible and adaptable to the changing needs and demands of society and economy</li>
23
- <li>Kurikulum az is more integrated and interdisciplinary across subjects and courses</li>
24
- <li>Kurikulum az is more interactive and collaborative among students and teachers</li>
25
- <li>Kurikulum az is more diverse and inclusive of different learners' backgrounds, abilities, styles, and preferences <h2>The structure and content of kurikulum az</h2>
26
- <p>Kurikulum az is organized into four sub-levels of general education: preschool, primary, basic, and secondary. Each sub-level has its own specific objectives, content standards, and learning outcomes. The table below shows the duration, age range, and main subjects of each sub-level.</p>
27
- <table>
28
- <tr>
29
- <th>Sub-level</th>
30
- <th>Duration</th>
31
- <th>Age range</th>
32
- <th>Main subjects</th>
33
- </tr>
34
- <tr>
35
- <td>Preschool</td>
36
- <td>1-2 years</td>
37
- <td>3-5 years</td>
38
- <td>Language and communication, mathematics, natural sciences, social sciences, arts, physical education</td>
39
- </tr>
40
- <tr>
41
- <td>Primary</td>
42
- <td>4 years</td>
43
- <td>6-9 years</td>
44
- <td>Azerbaijani language and literature, mathematics, natural sciences, social sciences, foreign language, arts, physical education, ethics and religion</td>
45
- </tr>
46
- <tr>
47
- <td>Basic</td>
48
- <td>5 years</td>
49
- <td>10-14 years</td>
50
- <td>Azerbaijani language and literature, mathematics, natural sciences, social sciences, foreign language, arts, physical education, ethics and religion, information and communication technologies, elective courses</td>
51
- </tr>
52
- <tr>
53
- <td>Secondary</td>
54
- <td>2 years</td>
55
- <td>15-16 years</td>
56
- <td>Azerbaijani language and literature, mathematics, natural sciences, social sciences, foreign language, arts, physical education, ethics and religion, information and communication technologies, elective courses</td>
57
- </tr>
58
- </table>
59
- <p>Kurikulum az defines eight key competencies that students should develop throughout their general education. These competencies are:</p>
60
- <ol>
61
- <li>Linguistic competence: The ability to communicate effectively in oral and written forms in Azerbaijani and foreign languages.</li>
62
- <li>Mathematical competence: The ability to use mathematical concepts, procedures, and reasoning to solve problems in various contexts.</li>
63
- <li>Natural-scientific competence: The ability to understand and apply scientific concepts, methods, and processes to explain natural phenomena and human interactions with the environment.</li>
64
- <li>Social-scientific competence: The ability to understand and analyze social, historical, cultural, political, economic, and geographic aspects of human societies and their diversity.</li>
65
- <li>Digital competence: The ability to use information and communication technologies to access, create, process, store, share, and evaluate information.</li>
66
- <li>Civic competence: The ability to participate actively and responsibly in democratic processes and civic life at local, national, and global levels.</li>
67
- <li>Cultural competence: The ability to appreciate and respect one's own and others' cultural identities, values, beliefs, traditions, and expressions.</li>
68
- <li>Personal competence: The ability to manage one's own learning, emotions, health, well-being, relationships, and career development.</li>
69
- </ol>
70
- <p>Kurikulum az also specifies the content standards and learning outcomes for each subject and course. Content standards describe the essential knowledge and skills that students should acquire in each subject area. Learning outcomes describe the expected achievements of students at the end of each sub-level of general education. For example:</p>
71
- <p>kurikulum azərbaycan dili<br />
72
- kurikulum azərbaycan ədəbiyyatı<br />
73
- kurikulum azərbaycan tarixi<br />
74
- kurikulum azərbaycan coğrafiyası<br />
75
- kurikulum azərbaycan mədəniyyəti<br />
76
- kurikulum az portalı<br />
77
- kurikulum az şəxsi kabinet<br />
78
- kurikulum az arti edu<br />
79
- kurikulum az riyaziyyat<br />
80
- kurikulum az fizika<br />
81
- kurikulum az kimya<br />
82
- kurikulum az biologiya<br />
83
- kurikulum az ingilis dili<br />
84
- kurikulum az rus dili<br />
85
- kurikulum az alman dili<br />
86
- kurikulum az fransız dili<br />
87
- kurikulum az türk dili<br />
88
- kurikulum az fəlsəfə<br />
89
- kurikulum az psixologiya<br />
90
- kurikulum az sosial elmlər<br />
91
- kurikulum az hüquqşünaslıq<br />
92
- kurikulum az iqtisadiyyat<br />
93
- kurikulum az informatika<br />
94
- kurikulum az texnologiya<br />
95
- kurikulum az musiqi<br />
96
- kurikulum az rəsm və naxış<br />
97
- kurikulum az bədən tərbiyəsi<br />
98
- kurikulum az sivil müdafiə<br />
99
- kurikulum az tibb və sağlamlıq<br />
100
- kurikulum az ekologiya və təbii sərvətlər<br />
101
- kurikulum az mühazirələr və prezentasiyalar<br />
102
- kurikulum az testlər və suallar<br />
103
- kurikulum az imtahanlar və qiymətləndirmələr<br />
104
- kurikulum az metodika və pedaqoji texnologiyalar<br />
105
- kurikulum az təhsil standartları və proqramları<br />
106
- kurikulum az tibbi profilaktika və hüquqi mühafizə <br />
107
- kurikulum az türk dünyası və beynəlxalq ictimaiyyat <br />
108
- kurikulum az qlobal problemlər və inkişaf perspektivləri <br />
109
- kurikulum az innovasiya və yaradıcılıq <br />
110
- kurikulum az liderlik və menecment <br />
111
- kurikulum az kommunikasiya və ictimai fayda <br />
112
- kurikulum az etika və dini mühit <br />
113
- kurikulum az girişimçilik və karyera planlaşdırma <br />
114
- kurikulum az media və informasiya savadı <br />
115
- kurikulum az dil öyrənmə strategiyaları <br />
116
- kurikulum az mükafatlandırma və motivasiya <br />
117
- kurikulum az öyrənmə üsulları və stililri <br />
118
- kurikulum az öyrücülük və mentorluq <br />
119
- kurikulum az öyrütmek üçün dizayn</p>
120
- <ul>
121
- <li>The content standard for Azerbaijani language and literature in primary education is: "Students will develop their linguistic competence in Azerbaijani language by listening, speaking, reading, and writing in various situations and contexts. They will also develop their literary competence by exploring and appreciating different genres and forms of Azerbaijani literature."</li>
122
- <li>The learning outcome for Azerbaijani language and literature in primary education is: "By the end of primary education, students will be able to communicate effectively in oral and written forms in Azerbaijani language using appropriate vocabulary, grammar, and style. They will also be able to analyze and interpret different texts and works of Azerbaijani literature using basic literary concepts and techniques."</li> <h2>The benefits and challenges of kurikulum az</h2>
123
- <p>Kurikulum az has many benefits for the improvement of the quality and relevance of education in Azerbaijan. Some of these benefits are:</p>
124
- <ul>
125
- <li>Kurikulum az helps students to develop the competencies and skills that are in high demand in the modern world, such as critical thinking, creativity, communication, collaboration, digital literacy, civic literacy, etc.</li>
126
- <li>Kurikulum az enables students to learn in a more meaningful and engaging way, by connecting their learning to real-life situations, problems, and contexts.</li>
127
- <li>Kurikulum az empowers students to take more responsibility and ownership of their learning, by giving them more choice, voice, and agency in their learning process.</li>
128
- <li>Kurikulum az supports teachers to adopt more effective and innovative teaching methods, such as inquiry-based learning, project-based learning, cooperative learning, etc.</li>
129
- <li>Kurikulum az involves parents and other stakeholders in the education system, by encouraging their participation and feedback in the curriculum development, implementation, and evaluation.</li>
130
- <li>Kurikulum az reflects and promotes the national identity, culture, values, and aspirations of Azerbaijan, by integrating them into the curriculum content and outcomes.</li>
131
- </ul>
132
- <p>However, kurikulum az also faces some challenges and difficulties in its implementation and evaluation. Some of these challenges are:</p>
133
- <ul>
134
- <li>Kurikulum az requires a lot of resources and support for its successful implementation, such as adequate funding, infrastructure, equipment, materials, training, etc.</li>
135
- <li>Kurikulum az demands a lot of changes and adjustments from the teachers, students, parents, and other actors in the education system, such as new roles, responsibilities, expectations, attitudes, behaviors, etc.</li>
136
- <li>Kurikulum az poses a lot of questions and uncertainties about its effectiveness and impact on the students' learning outcomes and achievements, such as how to measure, monitor, assess, and evaluate them.</li>
137
- </ul>
138
- <h2>Conclusion</h2>
139
- <p>In conclusion, kurikulum az is a modern curriculum model that aims to provide students with the knowledge, skills, values, and attitudes that they need to succeed in the 21st century. It is based on the principles of student-centeredness, competency-basedness, and outcome-orientedness. It covers all levels of general education from preschool to higher education. It defines eight key competencies that students should develop throughout their education. It also specifies the content standards and learning outcomes for each subject and course. Kurikulum az has many benefits for the improvement of the quality and relevance of education in Azerbaijan. However, it also faces some challenges and difficulties in its implementation and evaluation. Therefore, it is important to provide continuous support and feedback to all the stakeholders involved in kurikulum az and to monitor and improve its effectiveness and impact on the students' learning outcomes and achievements.</p>
140
- <p>Do you have any questions or comments about kurikulum az? If so, please share them with us in the comment section below. We would love to hear from you!</p>
141
- <h2>FAQs</h2>
142
- <p>Here are some frequently asked questions and answers about kurikulum az:</p>
143
- <ol>
144
- <li><b>What is the difference between kurikulum az and derslik?</b></li>
145
- <p>Derslik is a term that refers to the textbooks that are used in schools. Kurikulum az is a term that refers to the curriculum model that guides the teaching, learning, and assessment processes in schools. Derslik is one of the tools that supports kurikulum az, but it is not the only one. Kurikulum az also uses other tools such as teacher guides, student workbooks, digital resources, etc.</p>
146
- <li><b>How can I access kurikulum az online?</b></li>
147
- <p>You can access kurikulum az online through the official website of the Ministry of Education of Azerbaijan: <a href="">www.edu.gov.az</a>. There you can find all the information, documents, and resources related to kurikulum az.</p>
148
- <li><b>How can I give feedback or suggestions about kurikulum az?</b></li>
149
- <p>You can give feedback or suggestions about kurikulum az through various channels such as email, phone, social media, or online surveys. You can also contact your local education authorities or school administration for any issues or concerns related to kurikulum az.</p>
150
- <li><b>How can I get involved or participate in kurikulum az?</b></li>
151
- <p>You can get involved or participate in kurikulum az by taking an active role in your own or your child's education. For example, you can:</p>
152
- <ul>
153
- <li>Read and understand the goals, principles, and - content standards and learning outcomes of kurikulum az - Support and encourage your child's learning at home and at school - Communicate and cooperate with your child's teachers and school administration - Participate in school events, activities, and decision-making processes - Join or form parent-teacher associations or other community groups that support education - Volunteer or donate to educational initiatives or projects</ul>
154
- <li><b>What are some examples of good practices or success stories of kurikulum az?</b></li>
155
- <p>There are many examples of good practices or success stories of kurikulum az that showcase the positive impact of kurikulum az on students, teachers, schools, and society. For example:</p>
156
- <ul>
157
- <li>Some schools have implemented innovative projects that integrate kurikulum az with local needs and resources, such as environmental education, cultural heritage, social entrepreneurship, etc.</li>
158
- <li>Some teachers have adopted new pedagogical methods that enhance student engagement, motivation, and achievement, such as gamification, flipped classroom, blended learning, etc.</li>
159
- <li>Some students have demonstrated outstanding performance and achievements in national and international competitions, assessments, and exhibitions, such as Olympiads, PISA, STEM Expo, etc.</li>
160
- <li>Some parents and communities have expressed their satisfaction and appreciation for the quality and relevance of education provided by kurikulum az.</li>
161
- </ul></p> 401be4b1e0<br />
162
- <br />
163
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Castle Clash Mod Apk 2022 Enjoy the Best Features of the Game with No Ads.md DELETED
@@ -1,145 +0,0 @@
1
-
2
- <h1>Castle Clash Mod Apk 2022: A Guide for Beginners</h1>
3
- <p>Are you looking for a fun and exciting strategy game that will keep you hooked for hours? Do you want to experience the thrill of building your own castle, commanding your own army, and conquering your enemies? If yes, then you should try Castle Clash Mod Apk 2022, the latest version of the popular mobile game that has millions of fans around the world.</p>
4
- <h2>castle clash mod apk 2022</h2><br /><p><b><b>Download File</b> &mdash; <a href="https://jinyurl.com/2uNLct">https://jinyurl.com/2uNLct</a></b></p><br /><br />
5
- <p>In this article, we will tell you everything you need to know about Castle Clash Mod Apk 2022, including what it is, how to download and install it, how to play it, and how to get unlimited money and gems in the game. By the end of this article, you will be ready to join the epic adventure of Castle Clash Mod Apk 2022 and become a world ruler.</p>
6
- <h2>What is Castle Clash?</h2>
7
- <h3>A brief introduction to the game and its features</h3>
8
- <p>Castle Clash is a free-to-play mobile strategy game from Playrix that was released in 2013. It is one of the most popular games in the genre, with over 100 million downloads on Google Play Store alone. The game is available for both Android and iOS devices.</p>
9
- <p>Castle Clash is a game where you can create your own kingdom, recruit and train your own troops, build and upgrade your own buildings, and fight against other players or computer-controlled enemies. You can choose from ten different medieval lords, each with their own unique troops and buildings. You can also join or create guilds, participate in events, complete quests, and collect rewards.</p>
10
- <h3>How to download and install Castle Clash Mod Apk 2022</h3>
11
- <p>If you want to enjoy the game with more features and benefits, you can download and install Castle Clash Mod Apk 2022, which is a modified version of the original game that gives you access to unlimited money and gems, as well as other perks. Here are the steps to download and install Castle Clash Mod Apk 2022:</p>
12
- <ol>
13
- <li>Go to or any other trusted website that offers Castle Clash Mod Apk 2022.</li>
14
- <li>Click on the download button and wait for the file to be downloaded on your device.</li>
15
- <li>Go to your device's settings and enable the installation of apps from unknown sources.</li>
16
- <li>Locate the downloaded file on your device and tap on it to start the installation process.</li>
17
- <li>Follow the instructions on the screen and wait for the installation to be completed.</li>
18
- <li>Launch the game and enjoy Castle Clash Mod Apk 2022.</li>
19
- </ol>
20
- <h3>What are the benefits of using Castle Clash Mod Apk 2022</h3>
21
- <p>There are many benefits of using Castle Clash Mod Apk 2022, such as:</p>
22
- <ul>
23
- <li>You can get unlimited money and gems in the game, which you can use to buy anything you want, such as troops, buildings, upgrades, items, etc.</li>
24
- <li>You can unlock all the lords, troops, buildings, and modes in the game without having to spend real money or wait for long hours.</li>
25
- <li>You can enjoy faster loading times, smoother gameplay, better graphics, and more stability in the game.</li>
26
- <li>You can have more fun and excitement in the game without any limitations or restrictions <h2>How to play Castle Clash Mod Apk 2022</h2>
27
- <h3>The basics of building your castle and army</h3>
28
- <p>Once you have installed Castle Clash Mod Apk 2022, you can start playing the game by creating your own castle and army. Here are some of the basic steps to follow:</p>
29
- <p>castle clash hack apk unlimited gems 2022<br />
30
- castle clash modded apk free download 2022<br />
31
- castle clash cheats apk latest version 2022<br />
32
- castle clash premium apk mod unlocked 2022<br />
33
- castle clash mod apk offline no root 2022<br />
34
- castle clash unlimited money apk mod 2022<br />
35
- castle clash hack tool apk no survey 2022<br />
36
- castle clash mod apk android 1 2022<br />
37
- castle clash mod apk revdl 2022<br />
38
- castle clash mod apk rexdl 2022<br />
39
- castle clash mod apk happymod 2022<br />
40
- castle clash mod apk an1 2022<br />
41
- castle clash mod apk platinmods 2022<br />
42
- castle clash mod apk blackmod 2022<br />
43
- castle clash mod apk ihackedit 2022<br />
44
- castle clash mod apk lenov.ru 2022<br />
45
- castle clash mod apk andropalace 2022<br />
46
- castle clash mod apk apkpure 2022<br />
47
- castle clash mod apk apkmody 2022<br />
48
- castle clash mod apk apknite 2022<br />
49
- castle clash mod apk mob.org 2022<br />
50
- castle clash mod apk mobpark 2022<br />
51
- castle clash mod apk android republic 2022<br />
52
- castle clash mod apk androidoyun.club 2022<br />
53
- castle clash mod apk android zone 2022<br />
54
- castle clash mod apk latest update 2022<br />
55
- castle clash mod apk new version 2022<br />
56
- castle clash mod apk full version 2022<br />
57
- castle clash mod apk pro version 2022<br />
58
- castle clash mod apk vip version 2022<br />
59
- castle clash mod apk mega mod 2022<br />
60
- castle clash mod apk god mode 2022<br />
61
- castle clash mod apk one hit kill 2022<br />
62
- castle clash mod apk unlimited everything 2022<br />
63
- castle clash mod apk all heroes unlocked 2022<br />
64
- castle clash mod apk all troops unlocked 2022<br />
65
- castle clash mod apk all weapons unlocked 2022<br />
66
- castle clash mod apk all modes unlocked 2022<br />
67
- castle clash mod apk all features unlocked 2022<br />
68
- castle clash mod apk all in one 2022<br />
69
- castle clash hack and slash mod apk 2022<br />
70
- castle clash strategy and tactics mod apk 2022<br />
71
- castle clash war and adventure mod apk 2022<br />
72
- castle clash fantasy and magic mod apk 2022<br />
73
- castle clash rpg and simulation mod apk 2022<br />
74
- castle clash online and offline mod apk 2022<br />
75
- castle clash multiplayer and singleplayer mod apk 2022<br />
76
- castle clash pvp and pve mod apk 2022<br />
77
- castle clash fun and addictive mod apk 2022<br />
78
- castle clash best and popular mod apk 2022</p>
79
- <ul>
80
- <li>Choose a lord that suits your playstyle and strategy. Each lord has different strengths and weaknesses, as well as different troops and buildings.</li>
81
- <li>Build your castle by placing various buildings, such as barracks, towers, walls, mines, vaults, etc. You can upgrade your buildings to make them stronger and more efficient.</li>
82
- <li>Recruit and train your troops by using the barracks. You can choose from different types of troops, such as infantry, archers, cavalry, mages, etc. You can also upgrade your troops to improve their skills and abilities.</li>
83
- <li>Defend your castle from enemy attacks by using your towers, walls, traps, heroes, etc. You can also use spells and items to boost your defense.</li>
84
- <li>Attack other players' castles or computer-controlled enemies by using your troops, heroes, spells, items, etc. You can also use strategies and tactics to overcome your opponents.</li>
85
- </ul>
86
- <h3>The different game modes and challenges</h3>
87
- <p>Castle Clash Mod Apk 2022 offers a variety of game modes and challenges that will test your skills and keep you entertained. Some of the game modes and challenges are:</p>
88
- <ul>
89
- <li>Arena: A mode where you can compete with other players in real-time battles and rank up in the leaderboard.</li>
90
- <li>Guild Wars: A mode where you can join or create a guild and fight with other guilds for glory and rewards.</li>
91
- <li>Dungeon: A mode where you can explore different dungeons and face various enemies and bosses.</li>
92
- <li>Raid: A mode where you can raid other players' castles and loot their resources.</li>
93
- <li>HBM: A mode where you can defend your castle from waves of enemies and earn rewards.</li>
94
- <li>Trial: A mode where you can challenge yourself with different scenarios and difficulties.</li>
95
- </ul>
96
- <h3>The best tips and tricks for winning battles and raids</h3>
97
- <p>If you want to win more battles and raids in Castle Clash Mod Apk 2022, you should follow these tips and tricks:</p>
98
- <ul>
99
- <li>Know your enemy: Before you attack or defend, you should scout your enemy's castle and troops and plan your strategy accordingly.</li>
100
- <li>Use the right troops: Depending on the situation, you should use the right troops for the job. For example, infantry are good for breaking walls, archers are good for sniping towers, cavalry are good for flanking enemies, etc.</li>
101
- <li>Use the right heroes: Heroes are powerful units that can turn the tide of battle. You should use the right heroes for the right roles. For example, some heroes are good for offense, some are good for defense, some are good for support, etc.</li>
102
- <li>Use the right spells and items: Spells and items are useful tools that can enhance your performance in battle. You should use the right spells and items for the right situations. For example, some spells and items can heal your units, some can damage your enemies, some can buff your allies, etc.</li>
103
- <li>Use the right strategies and tactics: Strategies and tactics are important factors that can determine the outcome of battle. You should use the right strategies and tactics for the right scenarios. For example, some strategies and tactics are good for attacking, some are good for defending, some are good for ambushes, etc.</li>
104
- </ul> <h2>How to get unlimited money and gems in Castle Clash Mod Apk 2022</h2>
105
- <h3>The advantages of having unlimited resources in the game</h3>
106
- <p>One of the main reasons why many players use Castle Clash Mod Apk 2022 is because it gives them unlimited money and gems in the game. Money and gems are the two main currencies in Castle Clash, and they are used for various purposes, such as:</p>
107
- <ul>
108
- <li>Buying and upgrading troops, buildings, heroes, spells, items, etc.</li>
109
- <li>Speeding up the construction and training time of your units and structures.</li>
110
- <li>Unlocking new lords, troops, buildings, and modes in the game.</li>
111
- <li>Participating in special events, quests, and rewards.</li>
112
- <li>Enhancing your gameplay experience and enjoyment.</li>
113
- </ul>
114
- <p>Having unlimited money and gems in the game can give you a huge advantage over other players who have to spend real money or wait for long hours to get them. You can have more fun and freedom in the game without any limitations or restrictions.</p>
115
- <h3>The methods of getting free money and gems in Castle Clash Mod Apk 2022</h3>
116
- <p>There are two main methods of getting free money and gems in Castle Clash Mod Apk 2022. They are:</p>
117
- <ul>
118
- <li>Using the modded version of the game: This is the easiest and most convenient method of getting unlimited money and gems in the game. All you have to do is download and install Castle Clash Mod Apk 2022 from a trusted website, such as , and launch the game. You will automatically get unlimited money and gems in your account, which you can use as you wish.</li>
119
- <li>Using online generators or hacks: This is another method of getting free money and gems in the game, but it is more risky and complicated. You have to use online tools or websites that claim to generate or hack money and gems for you, such as or . You have to enter your username or email, select the amount of money and gems you want, and complete some verification steps. Then, you will supposedly get the money and gems in your account.</li>
120
- </ul>
121
- <h3>The precautions and risks of using Castle Clash Mod Apk 2022</h3>
122
- <p>While using Castle Clash Mod Apk 2022 can be tempting and beneficial, it also comes with some precautions and risks that you should be aware of. Some of them are:</p>
123
- <ul>
124
- <li>You may get banned from the game: The developers of Castle Clash do not approve of using modded versions or hacks of the game, as they consider it cheating and unfair. They may detect your activity and ban your account from the game permanently.</li>
125
- <li>You may get viruses or malware on your device: Some websites or tools that offer Castle Clash Mod Apk 2022 or hacks may be malicious or fraudulent. They may contain viruses or malware that can harm your device or steal your personal information.</li>
126
- <li>You may lose your progress or data: Some modded versions or hacks of the game may not be compatible with the original version or updates of the game. They may cause errors or glitches that can corrupt your progress or data in the game.</li>
127
- <li>You may lose your interest or challenge in the game: Having unlimited money and gems in the game may make it too easy or boring for you. You may lose your interest or challenge in the game, as you will not have any goals or obstacles to overcome.</li>
128
- </ul>
129
- <h2>Conclusion</h2>
130
- <h3>A summary of the main points and a call to action</h3>
131
- <p>In conclusion, Castle Clash Mod Apk 2022 is a modified version of the original Castle Clash game that gives you unlimited money and gems in the game, as well as other features and benefits. It is a fun and exciting strategy game where you can build your own castle, recruit your own army, and fight against other players or enemies. You can download and install Castle Clash Mod Apk 2022 from a trusted website, such as , or use online generators or hacks to get free money and gems in the game. However, you should also be careful of the precautions and risks of using Castle Clash Mod Apk 2022, such as getting banned from the game, getting viruses or malware on your device, losing your progress or data, or losing your interest or challenge in the game.</p>
132
- <p>If you are interested in trying out Castle Clash Mod Apk 2022, you can follow the steps we have provided in this article. We hope you have enjoyed this article and learned something new about Castle Clash Mod Apk 2022. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!</p>
133
- <h <h2>FAQs</h2>
134
- <h3>What is the difference between Castle Clash and Castle Clash Mod Apk 2022?</h3>
135
- <p>Castle Clash is the original version of the game, while Castle Clash Mod Apk 2022 is a modified version of the game that gives you unlimited money and gems, as well as other features and benefits.</p>
136
- <h3>Is Castle Clash Mod Apk 2022 safe to use?</h3>
137
- <p>Castle Clash Mod Apk 2022 is safe to use if you download and install it from a trusted website, such as . However, you should also be aware of the precautions and risks of using it, such as getting banned from the game, getting viruses or malware on your device, losing your progress or data, or losing your interest or challenge in the game.</p>
138
- <h3>How can I update Castle Clash Mod Apk 2022?</h3>
139
- <p>You can update Castle Clash Mod Apk 2022 by visiting the same website where you downloaded and installed it, and downloading and installing the latest version of the mod. You should also backup your progress and data before updating, in case something goes wrong.</p>
140
- <h3>Can I play Castle Clash Mod Apk 2022 with my friends?</h3>
141
- <p>Yes, you can play Castle Clash Mod Apk 2022 with your friends, as long as they also have the same modded version of the game. You can join or create guilds, chat with other players, and cooperate or compete with them in various game modes and challenges.</p>
142
- <h3>Can I play Castle Clash Mod Apk 2022 offline?</h3>
143
- <p>No, you cannot play Castle Clash Mod Apk 2022 offline, as it requires an internet connection to run. You need to be online to access the game's servers, features, and content.</p> 401be4b1e0<br />
144
- <br />
145
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Ludo for PC and Challenge Your Friends Online.md DELETED
@@ -1,153 +0,0 @@
1
- <br />
2
- <h1>How to Download Ludo for PC and Enjoy Its Benefits</h1>
3
- <p>Ludo is one of the most popular board games in the world, especially in India, where it originated. It is a game that can be played by anyone, regardless of age or skill level. It is also a game that can offer many benefits, such as improving your cognitive abilities, social skills, and confidence. But did you know that you can also play Ludo on your PC? In this article, we will show you how to download Ludo for PC using an Android emulator, and what are the advantages of playing Ludo on PC with BlueStacks.</p>
4
- <h2>download ludo for pc</h2><br /><p><b><b>DOWNLOAD</b> &#10022;&#10022;&#10022; <a href="https://jinyurl.com/2uNJWx">https://jinyurl.com/2uNJWx</a></b></p><br /><br />
5
- <h2>What is Ludo and Why Should You Play It?</h2>
6
- <h3>Ludo is a classic board game that originated in India</h3>
7
- <p>Ludo is a board game that is played by two to four players. Each player has four tokens of the same color, which they have to move around the board according to the roll of a dice. The objective of the game is to be the first player to move all four tokens into their home triangle in the center of the board. Along the way, players can capture their opponents' tokens by landing on the same square as them, or block their path by forming a chain with their own tokens. The game is based on an ancient Indian game called Pachisi, which was played by kings and queens in medieval times.</p>
8
- <h3>Ludo is a fun and engaging game that can improve your skills and social connections</h3>
9
- <p>Ludo is not just a simple game that you play for entertainment. It is also a game that can help you develop various skills and qualities that are useful in life. For example, playing Ludo can help you:</p>
10
- <ul>
11
- <li>Develop your brain function by stimulating your logical thinking, problem-solving, analysis, and decision-making abilities.</li>
12
- <li>Give pleasure and relieve stress by providing a fun and relaxing activity that can distract you from your worries and challenges.</li>
13
- <li>Lower your blood pressure by reducing anxiety and tension that can affect your health.</li>
14
- <li>Avoid serious diseases by keeping your brain active and preventing cognitive decline.</li>
15
- <li>Strengthen your immune system by boosting your mood and happiness hormones.</li>
16
- <li>Improve your mind for strategy and tactics by planning your moves ahead and anticipating your opponents' actions.</li>
17
- <li>Have better relationships with friends and family by playing with them online or offline, communicating with them, and bonding with them over a shared interest.</li>
18
- <li>Instill a competitive spirit in yourself by challenging yourself and others to win the game.</li>
19
- <li>Escape from boredom and loneliness by playing with other players around the world, making new friends, and having fun conversations.</li>
20
- </ul>
21
- <p>As you can see, playing Ludo can have many positive effects on your mind, body, and soul. But how can you play Ludo on your PC? Let's find out in the next section.</p>
22
- <h2>How to Download Ludo for PC Using an Android Emulator</h2>
23
- <h3>An Android emulator is a software that allows you to run Android apps on your PC</h3>
24
- <p>If you want to play Ludo on your PC, you will need an Android emulator. An Android emulator is a software that mimics the Android operating system on your PC, allowing you to run Android apps and games on your computer. There are many Android emulators available online, but one of the best and most popular ones is BlueStacks.</p>
25
- <h3>You can use BlueStacks, a popular and reliable Android emulator, to download and play Ludo on your PC</h3>
26
- <p>BlueStacks is a free and easy-to-use Android emulator that has millions of users worldwide. It is compatible with Windows and Mac computers, and it supports a wide range of Android apps and games, including Ludo. With BlueStacks, you can download and play Ludo on your PC in just a few steps. Here's how:</p>
27
- <p>How to download ludo king on pc<br />
28
- Ludo game for pc free download<br />
29
- Ludo game for pc windows 10<br />
30
- Ludo game for pc online multiplayer<br />
31
- Ludo game for pc offline<br />
32
- Ludo game for pc with friends<br />
33
- Ludo game for pc bluestacks<br />
34
- Ludo game for pc emulator<br />
35
- Ludo game for pc full version<br />
36
- Ludo game for pc without internet<br />
37
- Best ludo game for pc 2023<br />
38
- Ludo club fun dice game for pc<br />
39
- Ludo star 2 game for pc<br />
40
- Ludo master new ludo game 2023 for pc<br />
41
- Ludo all star online classic board and dice game for pc<br />
42
- Ludo super classic board and dice game for pc<br />
43
- Ludo talent board and dice game for pc<br />
44
- Ludo dream classic board and dice game for pc<br />
45
- Ludo party board and dice game for pc<br />
46
- Ludo champ 2023 free new board and dice game for pc<br />
47
- Download ludo on pc with bluestacks emulator<br />
48
- Download ludo on pc with nox player emulator<br />
49
- Download ludo on pc with ld player emulator<br />
50
- Download ludo on pc with memu play emulator<br />
51
- Download ludo on pc with gameloop emulator<br />
52
- Download ludo on crazygames.com in browser<br />
53
- Download ludo king mod apk for pc<br />
54
- Download ludo king hack version for pc<br />
55
- Download ludo king unlimited money for pc<br />
56
- Download ludo king old version for pc<br />
57
- Download ludo king latest version for pc<br />
58
- Download ludo king update version for pc<br />
59
- Download ludo king offline mode for pc<br />
60
- Download ludo king voice chat feature for pc<br />
61
- Download ludo king theme change option for pc<br />
62
- Download ludo king cheats and tricks for pc<br />
63
- Download ludo king rules and tips for pc<br />
64
- Download ludo king tournament mode for pc<br />
65
- Download ludo king snake and ladder mode for pc<br />
66
- Download ludo king carrom mode for pc</p>
67
- <h4>How to install BlueStacks on your PC</h4>
68
- <ol>
69
- <li>Go to the official website of BlueStacks at [bluestacks.com] and click on the "Download BlueStacks" button.</li>
70
- <li>Wait for the download to finish and then run the installer file.</li>
71
- <li>Follow the instructions on the screen to complete the installation process.</li>
72
- <li>Launch BlueStacks on your PC and sign in with your Google account or create a new one.</li>
73
- </ol>
74
- <h4>How to access the Google Play Store and search for Ludo, Ludo King, or Ludo Club on BlueStacks</h4>
75
- <ol>
76
- <li>On the home screen of BlueStacks, click on the "Google Play" icon to open the Google Play Store.</li>
77
- <li>In the search bar, type "Ludo" and hit enter. You will see a list of Ludo games available for download.</li>
78
- <li>You can choose any Ludo game that you like, such as Ludo King or Ludo Club, which are some of the most popular and highly rated ones.</li>
79
- <li>Click on the game that you want to download and then click on the "Install" button.</li>
80
- <li>Wait for the installation to finish and then click on the "Open" button.</li>
81
- </ol>
82
- <h4>How to install and launch the Ludo game of your choice on BlueStacks</h4>
83
- <ol>
84
- <li>Once you have installed the Ludo game that you want to play, you will see its icon on the home screen of BlueStacks.</li>
85
- <li>Click on the icon to launch the game and start playing.</li>
86
- <li>You can adjust the settings of the game according to your preferences, such as the sound, language, graphics, etc.</li>
87
- <li>You can also customize your profile by choosing your name, avatar, color, etc.</li>
88
- <li>You can play Ludo in different modes, such as online multiplayer, local multiplayer, or against the computer.</li>
89
- </ol>
90
- <h2>Benefits of Playing Ludo on PC with BlueStacks</h2>
91
- <h3>You can enjoy a larger and better display of the game on your PC screen</h3>
92
- <p>One of the main benefits of playing Ludo on PC with BlueStacks is that you can enjoy a larger and better display of the game on your PC screen. You can see the board more clearly and appreciate the details more. You can also zoom in or out as you wish. Playing Ludo on a bigger screen can enhance your visual experience and make you feel more immersed in the game.</p>
93
- <h3>You can play with your friends and family online or offline, or against the computer</h3>
94
- <p>Another benefit of playing Ludo on PC with BlueStacks is that you can play with your friends and family online or offline, or against the computer. You can invite your friends or family members to join you in an online multiplayer mode, where you can chat with them and have fun together. You can also play with them offline by connecting your devices through Bluetooth or Wi-Fi. Alternatively, you can play against the computer in a single-player mode, where you can choose the difficulty level and practice your skills.</p>
95
- <h3>You can use various features and enhancements of BlueStacks to improve your gaming experience</h3>
96
- <p>A third benefit of playing Ludo on PC with BlueStacks is that you can use various features and enhancements of BlueStacks to improve your gaming experience. For example, you can use the following features of BlueStacks:</p>
97
- <ul>
98
- <li>Multi-instance: You can play multiple Ludo games at the same time on different windows, or play other games or apps while playing Ludo.</li>
99
- <li>Macro recorder: You can record and replay your actions in the game, such as rolling the dice, moving the tokens, etc.</li>
100
- <li>Keymapping: You can customize the keyboard and mouse controls for the game, such as assigning keys for different actions, changing the sensitivity, etc.</li>
101
- <li>Eco mode: You can lower the CPU and RAM usage of BlueStacks, which can improve the performance and speed of the game.</li>
102
- <li>Real-time translation: You can translate the text and voice chat in the game to any language that you want, which can help you communicate with other players from different countries.</li>
103
- </ul>
104
- <p>These are just some of the features that BlueStacks offers to enhance your gaming experience. You can explore more features and settings of BlueStacks by clicking on the menu icon on the top right corner of the emulator.</p>
105
- <h2>Conclusion</h2>
106
- <p>Ludo is a great game that can provide you with many benefits, such as improving your brain function, social skills, and happiness. But playing Ludo on PC with BlueStacks can make your gaming experience even better, as you can enjoy a larger and better display, play with your friends and family online or offline, or against the computer, and use various features and enhancements of BlueStacks to improve your performance and fun. So what are you waiting for? Download BlueStacks today and start playing Ludo on your PC!</p>
107
- <h2>FAQs</h2>
108
- <h3>What are some of the social benefits of playing Ludo?</h3>
109
- <p>Some of the social benefits of playing Ludo are:</p>
110
- <ul>
111
- <li>You can make new friends and connect with old ones by playing online with other players around the world.</li>
112
- <li>You can strengthen your bond with your family members by playing offline with them through Bluetooth or Wi-Fi.</li>
113
- <li>You can improve your communication and cooperation skills by chatting and working with your teammates in the game.</li>
114
- <li>You can learn about different cultures and languages by playing with people from different countries and using the real-time translation feature of BlueStacks.</li>
115
- </ul>
116
- <h3>What are some of the skills that you can develop by playing Ludo?</h3>
117
- <p>Some of the skills that you can develop by playing Ludo are:</p>
118
- <ul>
119
- <li>You can enhance your logical thinking, problem-solving, analysis, and decision-making abilities by planning your moves ahead and anticipating your opponents' actions.</li>
120
- <li>You can boost your memory, concentration, and attention span by keeping track of your tokens and dice rolls.</li>
121
- <li>You can increase your creativity and imagination by choosing different themes and avatars for the board and your profile.</li>
122
- <li>You can develop your strategy and tactics by using different methods and tricks to win the game.</li>
123
- </ul>
124
- <h3>How can you play Ludo online with other players around the world?</h3>
125
- <p>You can play Ludo online with other players around the world by following these steps:</p>
126
- <ol>
127
- <li>Launch the Ludo game that you have downloaded on BlueStacks.</li>
128
- <li>Select the online multiplayer mode from the main menu.</li>
129
- <li>Choose whether you want to play with two, three, or four players.</li>
130
- <li>Select whether you want to play with random players or invite your friends by sharing a code.</li>
131
- <li>Wait for the game to start and enjoy playing with other players around the world.</li>
132
- </ol>
133
- <h3>How can you change the theme of the board in Ludo?</h3>
134
- <p>You can change the theme of the board in Ludo by following these steps:</p>
135
- <ol>
136
- <li>Launch the Ludo game that you have downloaded on BlueStacks.</li>
137
- <li>Select the settings icon from the main menu.</li>
138
- <li>Select the theme option from the settings menu.</li>
139
- <li>Choose from various themes available for the board, such as nature, Egypt, disco, etc.</li>
140
- <li>Apply the theme that you like and enjoy playing on a different board.</li>
141
- </ol>
142
- <h3>How can you win the game of Ludo?</h3>
143
- <p>You can win the game of Ludo by following these tips:</p>
144
- <ul>
145
- <li>Roll the dice carefully and try to get a six as often as possible. A six will allow you to move a token out of your base or move an existing token six squares ahead. It will also give you another chance to roll again.</li>
146
- <li>Move your tokens strategically and try to capture your opponents' tokens by landing on the same square as them. This will send their tokens back to their base and delay their progress.</li>
147
- <li>Protect your tokens from being captured by forming a chain with two or more of your tokens on the same square. This will make them immune to capture by your opponents.</li>
148
- <li>Avoid landing on the star squares, as they are the most vulnerable to capture by your opponents. Instead, try to land on the safe squares, which are marked with a shield icon. These will protect your tokens from being captured.</li>
149
- <li>Move your tokens as fast as possible to reach your home triangle in the center of the board. Once you have moved all four of your tokens into your home triangle, you will win the game.</li>
150
- </ul>
151
- <p>I hope you enjoyed reading this article and learned how to download Ludo for PC and enjoy its benefits. Now, go ahead and try playing Ludo on your PC with BlueStacks and have fun!</p> 401be4b1e0<br />
152
- <br />
153
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/2023Liu2023/bingo/tailwind.config.js DELETED
@@ -1,48 +0,0 @@
1
- /** @type {import('tailwindcss').Config} */
2
- module.exports = {
3
- content: [
4
- './src/pages/**/*.{js,ts,jsx,tsx,mdx}',
5
- './src/components/**/*.{js,ts,jsx,tsx,mdx}',
6
- './src/app/**/*.{js,ts,jsx,tsx,mdx}',
7
- './src/ui/**/*.{js,ts,jsx,tsx,mdx}',
8
- ],
9
- "darkMode": "class",
10
- theme: {
11
- extend: {
12
- colors: {
13
- 'primary-blue': 'rgb(var(--color-primary-blue) / <alpha-value>)',
14
- secondary: 'rgb(var(--color-secondary) / <alpha-value>)',
15
- 'primary-background': 'rgb(var(--primary-background) / <alpha-value>)',
16
- 'primary-text': 'rgb(var(--primary-text) / <alpha-value>)',
17
- 'secondary-text': 'rgb(var(--secondary-text) / <alpha-value>)',
18
- 'light-text': 'rgb(var(--light-text) / <alpha-value>)',
19
- 'primary-border': 'rgb(var(--primary-border) / <alpha-value>)',
20
- },
21
- keyframes: {
22
- slideDownAndFade: {
23
- from: { opacity: 0, transform: 'translateY(-2px)' },
24
- to: { opacity: 1, transform: 'translateY(0)' },
25
- },
26
- slideLeftAndFade: {
27
- from: { opacity: 0, transform: 'translateX(2px)' },
28
- to: { opacity: 1, transform: 'translateX(0)' },
29
- },
30
- slideUpAndFade: {
31
- from: { opacity: 0, transform: 'translateY(2px)' },
32
- to: { opacity: 1, transform: 'translateY(0)' },
33
- },
34
- slideRightAndFade: {
35
- from: { opacity: 0, transform: 'translateX(2px)' },
36
- to: { opacity: 1, transform: 'translateX(0)' },
37
- },
38
- },
39
- animation: {
40
- slideDownAndFade: 'slideDownAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)',
41
- slideLeftAndFade: 'slideLeftAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)',
42
- slideUpAndFade: 'slideUpAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)',
43
- slideRightAndFade: 'slideRightAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)',
44
- },
45
- },
46
- },
47
- plugins: [require('@headlessui/tailwindcss'), require('tailwind-scrollbar')],
48
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/232labs/VToonify/vtoonify/model/raft/train_standard.sh DELETED
@@ -1,6 +0,0 @@
1
- #!/bin/bash
2
- mkdir -p checkpoints
3
- python -u train.py --name raft-chairs --stage chairs --validation chairs --gpus 0 1 --num_steps 100000 --batch_size 10 --lr 0.0004 --image_size 368 496 --wdecay 0.0001
4
- python -u train.py --name raft-things --stage things --validation sintel --restore_ckpt checkpoints/raft-chairs.pth --gpus 0 1 --num_steps 100000 --batch_size 6 --lr 0.000125 --image_size 400 720 --wdecay 0.0001
5
- python -u train.py --name raft-sintel --stage sintel --validation sintel --restore_ckpt checkpoints/raft-things.pth --gpus 0 1 --num_steps 100000 --batch_size 6 --lr 0.000125 --image_size 368 768 --wdecay 0.00001 --gamma=0.85
6
- python -u train.py --name raft-kitti --stage kitti --validation kitti --restore_ckpt checkpoints/raft-sintel.pth --gpus 0 1 --num_steps 50000 --batch_size 6 --lr 0.0001 --image_size 288 960 --wdecay 0.00001 --gamma=0.85
 
 
 
 
 
 
 
spaces/AHzizi/WaifuVoiceGen/modules.py DELETED
@@ -1,388 +0,0 @@
1
- import math
2
- import numpy as np
3
- import torch
4
- from torch import nn
5
- from torch.nn import functional as F
6
-
7
- from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
8
- from torch.nn.utils import weight_norm, remove_weight_norm
9
-
10
- import commons
11
- from commons import init_weights, get_padding
12
- from transforms import piecewise_rational_quadratic_transform
13
-
14
-
15
- LRELU_SLOPE = 0.1
16
-
17
-
18
- class LayerNorm(nn.Module):
19
- def __init__(self, channels, eps=1e-5):
20
- super().__init__()
21
- self.channels = channels
22
- self.eps = eps
23
-
24
- self.gamma = nn.Parameter(torch.ones(channels))
25
- self.beta = nn.Parameter(torch.zeros(channels))
26
-
27
- def forward(self, x):
28
- x = x.transpose(1, -1)
29
- x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
30
- return x.transpose(1, -1)
31
-
32
-
33
- class ConvReluNorm(nn.Module):
34
- def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
35
- super().__init__()
36
- self.in_channels = in_channels
37
- self.hidden_channels = hidden_channels
38
- self.out_channels = out_channels
39
- self.kernel_size = kernel_size
40
- self.n_layers = n_layers
41
- self.p_dropout = p_dropout
42
- assert n_layers > 1, "Number of layers should be larger than 0."
43
-
44
- self.conv_layers = nn.ModuleList()
45
- self.norm_layers = nn.ModuleList()
46
- self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2))
47
- self.norm_layers.append(LayerNorm(hidden_channels))
48
- self.relu_drop = nn.Sequential(
49
- nn.ReLU(),
50
- nn.Dropout(p_dropout))
51
- for _ in range(n_layers-1):
52
- self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2))
53
- self.norm_layers.append(LayerNorm(hidden_channels))
54
- self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
55
- self.proj.weight.data.zero_()
56
- self.proj.bias.data.zero_()
57
-
58
- def forward(self, x, x_mask):
59
- x_org = x
60
- for i in range(self.n_layers):
61
- x = self.conv_layers[i](x * x_mask)
62
- x = self.norm_layers[i](x)
63
- x = self.relu_drop(x)
64
- x = x_org + self.proj(x)
65
- return x * x_mask
66
-
67
-
68
- class DDSConv(nn.Module):
69
- """
70
- Dialted and Depth-Separable Convolution
71
- """
72
- def __init__(self, channels, kernel_size, n_layers, p_dropout=0.):
73
- super().__init__()
74
- self.channels = channels
75
- self.kernel_size = kernel_size
76
- self.n_layers = n_layers
77
- self.p_dropout = p_dropout
78
-
79
- self.drop = nn.Dropout(p_dropout)
80
- self.convs_sep = nn.ModuleList()
81
- self.convs_1x1 = nn.ModuleList()
82
- self.norms_1 = nn.ModuleList()
83
- self.norms_2 = nn.ModuleList()
84
- for i in range(n_layers):
85
- dilation = kernel_size ** i
86
- padding = (kernel_size * dilation - dilation) // 2
87
- self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size,
88
- groups=channels, dilation=dilation, padding=padding
89
- ))
90
- self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
91
- self.norms_1.append(LayerNorm(channels))
92
- self.norms_2.append(LayerNorm(channels))
93
-
94
- def forward(self, x, x_mask, g=None):
95
- if g is not None:
96
- x = x + g
97
- for i in range(self.n_layers):
98
- y = self.convs_sep[i](x * x_mask)
99
- y = self.norms_1[i](y)
100
- y = F.gelu(y)
101
- y = self.convs_1x1[i](y)
102
- y = self.norms_2[i](y)
103
- y = F.gelu(y)
104
- y = self.drop(y)
105
- x = x + y
106
- return x * x_mask
107
-
108
-
109
- class WN(torch.nn.Module):
110
- def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
111
- super(WN, self).__init__()
112
- assert(kernel_size % 2 == 1)
113
- self.hidden_channels =hidden_channels
114
- self.kernel_size = kernel_size,
115
- self.dilation_rate = dilation_rate
116
- self.n_layers = n_layers
117
- self.gin_channels = gin_channels
118
- self.p_dropout = p_dropout
119
-
120
- self.in_layers = torch.nn.ModuleList()
121
- self.res_skip_layers = torch.nn.ModuleList()
122
- self.drop = nn.Dropout(p_dropout)
123
-
124
- if gin_channels != 0:
125
- cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1)
126
- self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
127
-
128
- for i in range(n_layers):
129
- dilation = dilation_rate ** i
130
- padding = int((kernel_size * dilation - dilation) / 2)
131
- in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size,
132
- dilation=dilation, padding=padding)
133
- in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
134
- self.in_layers.append(in_layer)
135
-
136
- # last one is not necessary
137
- if i < n_layers - 1:
138
- res_skip_channels = 2 * hidden_channels
139
- else:
140
- res_skip_channels = hidden_channels
141
-
142
- res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
143
- res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
144
- self.res_skip_layers.append(res_skip_layer)
145
-
146
- def forward(self, x, x_mask, g=None, **kwargs):
147
- output = torch.zeros_like(x)
148
- n_channels_tensor = torch.IntTensor([self.hidden_channels])
149
-
150
- if g is not None:
151
- g = self.cond_layer(g)
152
-
153
- for i in range(self.n_layers):
154
- x_in = self.in_layers[i](x)
155
- if g is not None:
156
- cond_offset = i * 2 * self.hidden_channels
157
- g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:]
158
- else:
159
- g_l = torch.zeros_like(x_in)
160
-
161
- acts = commons.fused_add_tanh_sigmoid_multiply(
162
- x_in,
163
- g_l,
164
- n_channels_tensor)
165
- acts = self.drop(acts)
166
-
167
- res_skip_acts = self.res_skip_layers[i](acts)
168
- if i < self.n_layers - 1:
169
- res_acts = res_skip_acts[:,:self.hidden_channels,:]
170
- x = (x + res_acts) * x_mask
171
- output = output + res_skip_acts[:,self.hidden_channels:,:]
172
- else:
173
- output = output + res_skip_acts
174
- return output * x_mask
175
-
176
- def remove_weight_norm(self):
177
- if self.gin_channels != 0:
178
- torch.nn.utils.remove_weight_norm(self.cond_layer)
179
- for l in self.in_layers:
180
- torch.nn.utils.remove_weight_norm(l)
181
- for l in self.res_skip_layers:
182
- torch.nn.utils.remove_weight_norm(l)
183
-
184
-
185
- class ResBlock1(torch.nn.Module):
186
- def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
187
- super(ResBlock1, self).__init__()
188
- self.convs1 = nn.ModuleList([
189
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
190
- padding=get_padding(kernel_size, dilation[0]))),
191
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
192
- padding=get_padding(kernel_size, dilation[1]))),
193
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
194
- padding=get_padding(kernel_size, dilation[2])))
195
- ])
196
- self.convs1.apply(init_weights)
197
-
198
- self.convs2 = nn.ModuleList([
199
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
200
- padding=get_padding(kernel_size, 1))),
201
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
202
- padding=get_padding(kernel_size, 1))),
203
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
204
- padding=get_padding(kernel_size, 1)))
205
- ])
206
- self.convs2.apply(init_weights)
207
-
208
- def forward(self, x, x_mask=None):
209
- for c1, c2 in zip(self.convs1, self.convs2):
210
- xt = F.leaky_relu(x, LRELU_SLOPE)
211
- if x_mask is not None:
212
- xt = xt * x_mask
213
- xt = c1(xt)
214
- xt = F.leaky_relu(xt, LRELU_SLOPE)
215
- if x_mask is not None:
216
- xt = xt * x_mask
217
- xt = c2(xt)
218
- x = xt + x
219
- if x_mask is not None:
220
- x = x * x_mask
221
- return x
222
-
223
- def remove_weight_norm(self):
224
- for l in self.convs1:
225
- remove_weight_norm(l)
226
- for l in self.convs2:
227
- remove_weight_norm(l)
228
-
229
-
230
- class ResBlock2(torch.nn.Module):
231
- def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
232
- super(ResBlock2, self).__init__()
233
- self.convs = nn.ModuleList([
234
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
235
- padding=get_padding(kernel_size, dilation[0]))),
236
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
237
- padding=get_padding(kernel_size, dilation[1])))
238
- ])
239
- self.convs.apply(init_weights)
240
-
241
- def forward(self, x, x_mask=None):
242
- for c in self.convs:
243
- xt = F.leaky_relu(x, LRELU_SLOPE)
244
- if x_mask is not None:
245
- xt = xt * x_mask
246
- xt = c(xt)
247
- x = xt + x
248
- if x_mask is not None:
249
- x = x * x_mask
250
- return x
251
-
252
- def remove_weight_norm(self):
253
- for l in self.convs:
254
- remove_weight_norm(l)
255
-
256
-
257
- class Log(nn.Module):
258
- def forward(self, x, x_mask, reverse=False, **kwargs):
259
- if not reverse:
260
- y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
261
- logdet = torch.sum(-y, [1, 2])
262
- return y, logdet
263
- else:
264
- x = torch.exp(x) * x_mask
265
- return x
266
-
267
-
268
- class Flip(nn.Module):
269
- def forward(self, x, *args, reverse=False, **kwargs):
270
- x = torch.flip(x, [1])
271
- if not reverse:
272
- logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
273
- return x, logdet
274
- else:
275
- return x
276
-
277
-
278
- class ElementwiseAffine(nn.Module):
279
- def __init__(self, channels):
280
- super().__init__()
281
- self.channels = channels
282
- self.m = nn.Parameter(torch.zeros(channels,1))
283
- self.logs = nn.Parameter(torch.zeros(channels,1))
284
-
285
- def forward(self, x, x_mask, reverse=False, **kwargs):
286
- if not reverse:
287
- y = self.m + torch.exp(self.logs) * x
288
- y = y * x_mask
289
- logdet = torch.sum(self.logs * x_mask, [1,2])
290
- return y, logdet
291
- else:
292
- x = (x - self.m) * torch.exp(-self.logs) * x_mask
293
- return x
294
-
295
-
296
- class ResidualCouplingLayer(nn.Module):
297
- def __init__(self,
298
- channels,
299
- hidden_channels,
300
- kernel_size,
301
- dilation_rate,
302
- n_layers,
303
- p_dropout=0,
304
- gin_channels=0,
305
- mean_only=False):
306
- assert channels % 2 == 0, "channels should be divisible by 2"
307
- super().__init__()
308
- self.channels = channels
309
- self.hidden_channels = hidden_channels
310
- self.kernel_size = kernel_size
311
- self.dilation_rate = dilation_rate
312
- self.n_layers = n_layers
313
- self.half_channels = channels // 2
314
- self.mean_only = mean_only
315
-
316
- self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
317
- self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels)
318
- self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
319
- self.post.weight.data.zero_()
320
- self.post.bias.data.zero_()
321
-
322
- def forward(self, x, x_mask, g=None, reverse=False):
323
- x0, x1 = torch.split(x, [self.half_channels]*2, 1)
324
- h = self.pre(x0) * x_mask
325
- h = self.enc(h, x_mask, g=g)
326
- stats = self.post(h) * x_mask
327
- if not self.mean_only:
328
- m, logs = torch.split(stats, [self.half_channels]*2, 1)
329
- else:
330
- m = stats
331
- logs = torch.zeros_like(m)
332
-
333
- if not reverse:
334
- x1 = m + x1 * torch.exp(logs) * x_mask
335
- x = torch.cat([x0, x1], 1)
336
- logdet = torch.sum(logs, [1,2])
337
- return x, logdet
338
- else:
339
- x1 = (x1 - m) * torch.exp(-logs) * x_mask
340
- x = torch.cat([x0, x1], 1)
341
- return x
342
-
343
-
344
- class ConvFlow(nn.Module):
345
- def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0):
346
- super().__init__()
347
- self.in_channels = in_channels
348
- self.filter_channels = filter_channels
349
- self.kernel_size = kernel_size
350
- self.n_layers = n_layers
351
- self.num_bins = num_bins
352
- self.tail_bound = tail_bound
353
- self.half_channels = in_channels // 2
354
-
355
- self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
356
- self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.)
357
- self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1)
358
- self.proj.weight.data.zero_()
359
- self.proj.bias.data.zero_()
360
-
361
- def forward(self, x, x_mask, g=None, reverse=False):
362
- x0, x1 = torch.split(x, [self.half_channels]*2, 1)
363
- h = self.pre(x0)
364
- h = self.convs(h, x_mask, g=g)
365
- h = self.proj(h) * x_mask
366
-
367
- b, c, t = x0.shape
368
- h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
369
-
370
- unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels)
371
- unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels)
372
- unnormalized_derivatives = h[..., 2 * self.num_bins:]
373
-
374
- x1, logabsdet = piecewise_rational_quadratic_transform(x1,
375
- unnormalized_widths,
376
- unnormalized_heights,
377
- unnormalized_derivatives,
378
- inverse=reverse,
379
- tails='linear',
380
- tail_bound=self.tail_bound
381
- )
382
-
383
- x = torch.cat([x0, x1], 1) * x_mask
384
- logdet = torch.sum(logabsdet * x_mask, [1,2])
385
- if not reverse:
386
- return x, logdet
387
- else:
388
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/generate_human_motion/pyrender/pyrender/offscreen.py DELETED
@@ -1,160 +0,0 @@
1
- """Wrapper for offscreen rendering.
2
-
3
- Author: Matthew Matl
4
- """
5
- import os
6
-
7
- from .renderer import Renderer
8
- from .constants import RenderFlags
9
-
10
-
11
- class OffscreenRenderer(object):
12
- """A wrapper for offscreen rendering.
13
-
14
- Parameters
15
- ----------
16
- viewport_width : int
17
- The width of the main viewport, in pixels.
18
- viewport_height : int
19
- The height of the main viewport, in pixels.
20
- point_size : float
21
- The size of screen-space points in pixels.
22
- """
23
-
24
- def __init__(self, viewport_width, viewport_height, point_size=1.0):
25
- self.viewport_width = viewport_width
26
- self.viewport_height = viewport_height
27
- self.point_size = point_size
28
-
29
- self._platform = None
30
- self._renderer = None
31
- self._create()
32
-
33
- @property
34
- def viewport_width(self):
35
- """int : The width of the main viewport, in pixels.
36
- """
37
- return self._viewport_width
38
-
39
- @viewport_width.setter
40
- def viewport_width(self, value):
41
- self._viewport_width = int(value)
42
-
43
- @property
44
- def viewport_height(self):
45
- """int : The height of the main viewport, in pixels.
46
- """
47
- return self._viewport_height
48
-
49
- @viewport_height.setter
50
- def viewport_height(self, value):
51
- self._viewport_height = int(value)
52
-
53
- @property
54
- def point_size(self):
55
- """float : The pixel size of points in point clouds.
56
- """
57
- return self._point_size
58
-
59
- @point_size.setter
60
- def point_size(self, value):
61
- self._point_size = float(value)
62
-
63
- def render(self, scene, flags=RenderFlags.NONE, seg_node_map=None):
64
- """Render a scene with the given set of flags.
65
-
66
- Parameters
67
- ----------
68
- scene : :class:`Scene`
69
- A scene to render.
70
- flags : int
71
- A bitwise or of one or more flags from :class:`.RenderFlags`.
72
- seg_node_map : dict
73
- A map from :class:`.Node` objects to (3,) colors for each.
74
- If specified along with flags set to :attr:`.RenderFlags.SEG`,
75
- the color image will be a segmentation image.
76
-
77
- Returns
78
- -------
79
- color_im : (h, w, 3) uint8 or (h, w, 4) uint8
80
- The color buffer in RGB format, or in RGBA format if
81
- :attr:`.RenderFlags.RGBA` is set.
82
- Not returned if flags includes :attr:`.RenderFlags.DEPTH_ONLY`.
83
- depth_im : (h, w) float32
84
- The depth buffer in linear units.
85
- """
86
- self._platform.make_current()
87
- # If platform does not support dynamically-resizing framebuffers,
88
- # destroy it and restart it
89
- if (self._platform.viewport_height != self.viewport_height or
90
- self._platform.viewport_width != self.viewport_width):
91
- if not self._platform.supports_framebuffers():
92
- self.delete()
93
- self._create()
94
-
95
- self._platform.make_current()
96
- self._renderer.viewport_width = self.viewport_width
97
- self._renderer.viewport_height = self.viewport_height
98
- self._renderer.point_size = self.point_size
99
-
100
- if self._platform.supports_framebuffers():
101
- flags |= RenderFlags.OFFSCREEN
102
- retval = self._renderer.render(scene, flags, seg_node_map)
103
- else:
104
- self._renderer.render(scene, flags, seg_node_map)
105
- depth = self._renderer.read_depth_buf()
106
- if flags & RenderFlags.DEPTH_ONLY:
107
- retval = depth
108
- else:
109
- color = self._renderer.read_color_buf()
110
- retval = color, depth
111
-
112
- # Make the platform not current
113
- self._platform.make_uncurrent()
114
- return retval
115
-
116
- def delete(self):
117
- """Free all OpenGL resources.
118
- """
119
- self._platform.make_current()
120
- self._renderer.delete()
121
- self._platform.delete_context()
122
- del self._renderer
123
- del self._platform
124
- self._renderer = None
125
- self._platform = None
126
- import gc
127
- gc.collect()
128
-
129
- def _create(self):
130
- if 'PYOPENGL_PLATFORM' not in os.environ:
131
- from pyrender.platforms.pyglet_platform import PygletPlatform
132
- self._platform = PygletPlatform(self.viewport_width,
133
- self.viewport_height)
134
- elif os.environ['PYOPENGL_PLATFORM'] == 'egl':
135
- from pyrender.platforms import egl
136
- device_id = int(os.environ.get('EGL_DEVICE_ID', '0'))
137
- egl_device = egl.get_device_by_index(device_id)
138
- self._platform = egl.EGLPlatform(self.viewport_width,
139
- self.viewport_height,
140
- device=egl_device)
141
- elif os.environ['PYOPENGL_PLATFORM'] == 'osmesa':
142
- from pyrender.platforms.osmesa import OSMesaPlatform
143
- self._platform = OSMesaPlatform(self.viewport_width,
144
- self.viewport_height)
145
- else:
146
- raise ValueError('Unsupported PyOpenGL platform: {}'.format(
147
- os.environ['PYOPENGL_PLATFORM']
148
- ))
149
- self._platform.init_context()
150
- self._platform.make_current()
151
- self._renderer = Renderer(self.viewport_width, self.viewport_height)
152
-
153
- def __del__(self):
154
- try:
155
- self.delete()
156
- except Exception:
157
- pass
158
-
159
-
160
- __all__ = ['OffscreenRenderer']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/generate_human_motion/pyrender/pyrender/texture.py DELETED
@@ -1,259 +0,0 @@
1
- """Textures, conforming to the glTF 2.0 standards as specified in
2
- https://github.com/KhronosGroup/glTF/tree/master/specification/2.0#reference-texture
3
-
4
- Author: Matthew Matl
5
- """
6
- import numpy as np
7
-
8
- from OpenGL.GL import *
9
-
10
- from .utils import format_texture_source
11
- from .sampler import Sampler
12
-
13
-
14
- class Texture(object):
15
- """A texture and its sampler.
16
-
17
- Parameters
18
- ----------
19
- name : str, optional
20
- The user-defined name of this object.
21
- sampler : :class:`Sampler`
22
- The sampler used by this texture.
23
- source : (h,w,c) uint8 or (h,w,c) float or :class:`PIL.Image.Image`
24
- The image used by this texture. If None, the texture is created
25
- empty and width and height must be specified.
26
- source_channels : str
27
- Either `D`, `R`, `RG`, `GB`, `RGB`, or `RGBA`. Indicates the
28
- channels to extract from `source`. Any missing channels will be filled
29
- with `1.0`.
30
- width : int, optional
31
- For empty textures, the width of the texture buffer.
32
- height : int, optional
33
- For empty textures, the height of the texture buffer.
34
- tex_type : int
35
- Either GL_TEXTURE_2D or GL_TEXTURE_CUBE.
36
- data_format : int
37
- For now, just GL_FLOAT.
38
- """
39
-
40
- def __init__(self,
41
- name=None,
42
- sampler=None,
43
- source=None,
44
- source_channels=None,
45
- width=None,
46
- height=None,
47
- tex_type=GL_TEXTURE_2D,
48
- data_format=GL_UNSIGNED_BYTE):
49
- self.source_channels = source_channels
50
- self.name = name
51
- self.sampler = sampler
52
- self.source = source
53
- self.width = width
54
- self.height = height
55
- self.tex_type = tex_type
56
- self.data_format = data_format
57
-
58
- self._texid = None
59
- self._is_transparent = False
60
-
61
- @property
62
- def name(self):
63
- """str : The user-defined name of this object.
64
- """
65
- return self._name
66
-
67
- @name.setter
68
- def name(self, value):
69
- if value is not None:
70
- value = str(value)
71
- self._name = value
72
-
73
- @property
74
- def sampler(self):
75
- """:class:`Sampler` : The sampler used by this texture.
76
- """
77
- return self._sampler
78
-
79
- @sampler.setter
80
- def sampler(self, value):
81
- if value is None:
82
- value = Sampler()
83
- self._sampler = value
84
-
85
- @property
86
- def source(self):
87
- """(h,w,c) uint8 or float or :class:`PIL.Image.Image` : The image
88
- used in this texture.
89
- """
90
- return self._source
91
-
92
- @source.setter
93
- def source(self, value):
94
- if value is None:
95
- self._source = None
96
- else:
97
- self._source = format_texture_source(value, self.source_channels)
98
- self._is_transparent = False
99
-
100
- @property
101
- def source_channels(self):
102
- """str : The channels that were extracted from the original source.
103
- """
104
- return self._source_channels
105
-
106
- @source_channels.setter
107
- def source_channels(self, value):
108
- self._source_channels = value
109
-
110
- @property
111
- def width(self):
112
- """int : The width of the texture buffer.
113
- """
114
- return self._width
115
-
116
- @width.setter
117
- def width(self, value):
118
- self._width = value
119
-
120
- @property
121
- def height(self):
122
- """int : The height of the texture buffer.
123
- """
124
- return self._height
125
-
126
- @height.setter
127
- def height(self, value):
128
- self._height = value
129
-
130
- @property
131
- def tex_type(self):
132
- """int : The type of the texture.
133
- """
134
- return self._tex_type
135
-
136
- @tex_type.setter
137
- def tex_type(self, value):
138
- self._tex_type = value
139
-
140
- @property
141
- def data_format(self):
142
- """int : The format of the texture data.
143
- """
144
- return self._data_format
145
-
146
- @data_format.setter
147
- def data_format(self, value):
148
- self._data_format = value
149
-
150
- def is_transparent(self, cutoff=1.0):
151
- """bool : If True, the texture is partially transparent.
152
- """
153
- if self._is_transparent is None:
154
- self._is_transparent = False
155
- if self.source_channels == 'RGBA' and self.source is not None:
156
- if np.any(self.source[:,:,3] < cutoff):
157
- self._is_transparent = True
158
- return self._is_transparent
159
-
160
- def delete(self):
161
- """Remove this texture from the OpenGL context.
162
- """
163
- self._unbind()
164
- self._remove_from_context()
165
-
166
- ##################
167
- # OpenGL code
168
- ##################
169
- def _add_to_context(self):
170
- if self._texid is not None:
171
- raise ValueError('Texture already loaded into OpenGL context')
172
-
173
- fmt = GL_DEPTH_COMPONENT
174
- if self.source_channels == 'R':
175
- fmt = GL_RED
176
- elif self.source_channels == 'RG' or self.source_channels == 'GB':
177
- fmt = GL_RG
178
- elif self.source_channels == 'RGB':
179
- fmt = GL_RGB
180
- elif self.source_channels == 'RGBA':
181
- fmt = GL_RGBA
182
-
183
- # Generate the OpenGL texture
184
- self._texid = glGenTextures(1)
185
- glBindTexture(self.tex_type, self._texid)
186
-
187
- # Flip data for OpenGL buffer
188
- data = None
189
- width = self.width
190
- height = self.height
191
- if self.source is not None:
192
- data = np.ascontiguousarray(np.flip(self.source, axis=0).flatten())
193
- width = self.source.shape[1]
194
- height = self.source.shape[0]
195
-
196
- # Bind texture and generate mipmaps
197
- glTexImage2D(
198
- self.tex_type, 0, fmt, width, height, 0, fmt,
199
- self.data_format, data
200
- )
201
- if self.source is not None:
202
- glGenerateMipmap(self.tex_type)
203
-
204
- if self.sampler.magFilter is not None:
205
- glTexParameteri(
206
- self.tex_type, GL_TEXTURE_MAG_FILTER, self.sampler.magFilter
207
- )
208
- else:
209
- if self.source is not None:
210
- glTexParameteri(self.tex_type, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
211
- else:
212
- glTexParameteri(self.tex_type, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
213
- if self.sampler.minFilter is not None:
214
- glTexParameteri(
215
- self.tex_type, GL_TEXTURE_MIN_FILTER, self.sampler.minFilter
216
- )
217
- else:
218
- if self.source is not None:
219
- glTexParameteri(self.tex_type, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR)
220
- else:
221
- glTexParameteri(self.tex_type, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
222
-
223
- glTexParameteri(self.tex_type, GL_TEXTURE_WRAP_S, self.sampler.wrapS)
224
- glTexParameteri(self.tex_type, GL_TEXTURE_WRAP_T, self.sampler.wrapT)
225
- border_color = 255 * np.ones(4).astype(np.uint8)
226
- if self.data_format == GL_FLOAT:
227
- border_color = np.ones(4).astype(np.float32)
228
- glTexParameterfv(
229
- self.tex_type, GL_TEXTURE_BORDER_COLOR,
230
- border_color
231
- )
232
-
233
- # Unbind texture
234
- glBindTexture(self.tex_type, 0)
235
-
236
- def _remove_from_context(self):
237
- if self._texid is not None:
238
- # TODO OPENGL BUG?
239
- # glDeleteTextures(1, [self._texid])
240
- glDeleteTextures([self._texid])
241
- self._texid = None
242
-
243
- def _in_context(self):
244
- return self._texid is not None
245
-
246
- def _bind(self):
247
- # TODO HANDLE INDEXING INTO OTHER UV's
248
- glBindTexture(self.tex_type, self._texid)
249
-
250
- def _unbind(self):
251
- glBindTexture(self.tex_type, 0)
252
-
253
- def _bind_as_depth_attachment(self):
254
- glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT,
255
- self.tex_type, self._texid, 0)
256
-
257
- def _bind_as_color_attachment(self):
258
- glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
259
- self.tex_type, self._texid, 0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/commons/rel_transformer_history.py DELETED
@@ -1,628 +0,0 @@
1
- import math
2
- import torch
3
- from torch import nn
4
- from torch.nn import functional as F
5
- from text_to_speech.utils.commons.hparams import hparams
6
- from text_to_speech.modules.commons.layers import Embedding
7
-
8
- import transformers
9
-
10
- def convert_pad_shape(pad_shape):
11
- l = pad_shape[::-1]
12
- pad_shape = [item for sublist in l for item in sublist]
13
- return pad_shape
14
-
15
-
16
- def shift_1d(x):
17
- x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
18
- return x
19
-
20
-
21
- def sequence_mask(length, max_length=None):
22
- if max_length is None:
23
- max_length = length.max()
24
- x = torch.arange(max_length, dtype=length.dtype, device=length.device)
25
- return x.unsqueeze(0) < length.unsqueeze(1)
26
-
27
-
28
- class Encoder(nn.Module):
29
- def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0.,
30
- window_size=None, block_length=None, pre_ln=False, **kwargs):
31
- super().__init__()
32
- self.hidden_channels = hidden_channels
33
- self.filter_channels = filter_channels
34
- self.n_heads = n_heads
35
- self.n_layers = n_layers
36
- self.kernel_size = kernel_size
37
- self.p_dropout = p_dropout
38
- self.window_size = window_size
39
- self.block_length = block_length
40
- self.pre_ln = pre_ln
41
-
42
- self.drop = nn.Dropout(p_dropout)
43
- self.attn_layers = nn.ModuleList()
44
- self.norm_layers_1 = nn.ModuleList()
45
- self.ffn_layers = nn.ModuleList()
46
- self.norm_layers_2 = nn.ModuleList()
47
- for i in range(self.n_layers):
48
- self.attn_layers.append(
49
- MultiHeadAttention(hidden_channels, hidden_channels, n_heads, window_size=window_size,
50
- p_dropout=p_dropout, block_length=block_length))
51
- self.norm_layers_1.append(LayerNorm(hidden_channels))
52
- self.ffn_layers.append(
53
- FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))
54
- self.norm_layers_2.append(LayerNorm(hidden_channels))
55
- if pre_ln:
56
- self.last_ln = LayerNorm(hidden_channels)
57
-
58
- def forward(self, x, x_mask):
59
- attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
60
- for i in range(self.n_layers):
61
- x = x * x_mask
62
- x_ = x
63
- if self.pre_ln:
64
- x = self.norm_layers_1[i](x)
65
- y = self.attn_layers[i](x, x, attn_mask)
66
- y = self.drop(y)
67
- x = x_ + y
68
- if not self.pre_ln:
69
- x = self.norm_layers_1[i](x)
70
-
71
- x_ = x
72
- if self.pre_ln:
73
- x = self.norm_layers_2[i](x)
74
- y = self.ffn_layers[i](x, x_mask)
75
- y = self.drop(y)
76
- x = x_ + y
77
- if not self.pre_ln:
78
- x = self.norm_layers_2[i](x)
79
- if self.pre_ln:
80
- x = self.last_ln(x)
81
- x = x * x_mask
82
- return x
83
-
84
-
85
- class MultiHeadAttention(nn.Module):
86
- def __init__(self, channels, out_channels, n_heads, window_size=None, heads_share=True, p_dropout=0.,
87
- block_length=None, proximal_bias=False, proximal_init=False):
88
- super().__init__()
89
- assert channels % n_heads == 0
90
-
91
- self.channels = channels
92
- self.out_channels = out_channels
93
- self.n_heads = n_heads
94
- self.window_size = window_size
95
- self.heads_share = heads_share
96
- self.block_length = block_length
97
- self.proximal_bias = proximal_bias
98
- self.p_dropout = p_dropout
99
- self.attn = None
100
-
101
- self.k_channels = channels // n_heads
102
- self.conv_q = nn.Conv1d(channels, channels, 1)
103
- self.conv_k = nn.Conv1d(channels, channels, 1)
104
- self.conv_v = nn.Conv1d(channels, channels, 1)
105
- if window_size is not None:
106
- n_heads_rel = 1 if heads_share else n_heads
107
- rel_stddev = self.k_channels ** -0.5
108
- self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
109
- self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
110
- self.conv_o = nn.Conv1d(channels, out_channels, 1)
111
- self.drop = nn.Dropout(p_dropout)
112
-
113
- nn.init.xavier_uniform_(self.conv_q.weight)
114
- nn.init.xavier_uniform_(self.conv_k.weight)
115
- if proximal_init:
116
- self.conv_k.weight.data.copy_(self.conv_q.weight.data)
117
- self.conv_k.bias.data.copy_(self.conv_q.bias.data)
118
- nn.init.xavier_uniform_(self.conv_v.weight)
119
-
120
- def forward(self, x, c, attn_mask=None):
121
- q = self.conv_q(x)
122
- k = self.conv_k(c)
123
- v = self.conv_v(c)
124
-
125
- x, self.attn = self.attention(q, k, v, mask=attn_mask)
126
-
127
- x = self.conv_o(x)
128
- return x
129
-
130
- def attention(self, query, key, value, mask=None):
131
- # reshape [b, d, t] -> [b, n_h, t, d_k]
132
- b, d, t_s, t_t = (*key.size(), query.size(2))
133
- query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
134
- key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
135
- value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
136
-
137
- scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self.k_channels)
138
- if self.window_size is not None:
139
- assert t_s == t_t, "Relative attention is only available for self-attention."
140
- key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
141
- rel_logits = self._matmul_with_relative_keys(query, key_relative_embeddings)
142
- rel_logits = self._relative_position_to_absolute_position(rel_logits)
143
- scores_local = rel_logits / math.sqrt(self.k_channels)
144
- scores = scores + scores_local
145
- if self.proximal_bias:
146
- assert t_s == t_t, "Proximal bias is only available for self-attention."
147
- scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
148
- if mask is not None:
149
- scores = scores.masked_fill(mask == 0, -1e4)
150
- if self.block_length is not None:
151
- block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
152
- scores = scores * block_mask + -1e4 * (1 - block_mask)
153
- p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
154
- p_attn = self.drop(p_attn)
155
- output = torch.matmul(p_attn, value)
156
- if self.window_size is not None:
157
- relative_weights = self._absolute_position_to_relative_position(p_attn)
158
- value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
159
- output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
160
- output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t]
161
- return output, p_attn
162
-
163
- def _matmul_with_relative_values(self, x, y):
164
- """
165
- x: [b, h, l, m]
166
- y: [h or 1, m, d]
167
- ret: [b, h, l, d]
168
- """
169
- ret = torch.matmul(x, y.unsqueeze(0))
170
- return ret
171
-
172
- def _matmul_with_relative_keys(self, x, y):
173
- """
174
- x: [b, h, l, d]
175
- y: [h or 1, m, d]
176
- ret: [b, h, l, m]
177
- """
178
- ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
179
- return ret
180
-
181
- def _get_relative_embeddings(self, relative_embeddings, length):
182
- max_relative_position = 2 * self.window_size + 1
183
- # Pad first before slice to avoid using cond ops.
184
- pad_length = max(length - (self.window_size + 1), 0)
185
- slice_start_position = max((self.window_size + 1) - length, 0)
186
- slice_end_position = slice_start_position + 2 * length - 1
187
- if pad_length > 0:
188
- padded_relative_embeddings = F.pad(
189
- relative_embeddings,
190
- convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
191
- else:
192
- padded_relative_embeddings = relative_embeddings
193
- used_relative_embeddings = padded_relative_embeddings[:, slice_start_position:slice_end_position]
194
- return used_relative_embeddings
195
-
196
- def _relative_position_to_absolute_position(self, x):
197
- """
198
- x: [b, h, l, 2*l-1]
199
- ret: [b, h, l, l]
200
- """
201
- batch, heads, length, _ = x.size()
202
- # Concat columns of pad to shift from relative to absolute indexing.
203
- x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
204
-
205
- # Concat extra elements so to add up to shape (len+1, 2*len-1).
206
- x_flat = x.view([batch, heads, length * 2 * length])
207
- x_flat = F.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [0, length - 1]]))
208
-
209
- # Reshape and slice out the padded elements.
210
- x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[:, :, :length, length - 1:]
211
- return x_final
212
-
213
- def _absolute_position_to_relative_position(self, x):
214
- """
215
- x: [b, h, l, l]
216
- ret: [b, h, l, 2*l-1]
217
- """
218
- batch, heads, length, _ = x.size()
219
- # padd along column
220
- x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]]))
221
- x_flat = x.view([batch, heads, length ** 2 + length * (length - 1)])
222
- # add 0's in the beginning that will skew the elements after reshape
223
- x_flat = F.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
224
- x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
225
- return x_final
226
-
227
- def _attention_bias_proximal(self, length):
228
- """Bias for self-attention to encourage attention to close positions.
229
- Args:
230
- length: an integer scalar.
231
- Returns:
232
- a Tensor with shape [1, 1, length, length]
233
- """
234
- r = torch.arange(length, dtype=torch.float32)
235
- diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
236
- return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
237
-
238
-
239
- class FFN(nn.Module):
240
- def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None):
241
- super().__init__()
242
- self.in_channels = in_channels
243
- self.out_channels = out_channels
244
- self.filter_channels = filter_channels
245
- self.kernel_size = kernel_size
246
- self.p_dropout = p_dropout
247
- self.activation = activation
248
-
249
- self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2)
250
- self.conv_2 = nn.Conv1d(filter_channels, out_channels, 1)
251
- self.drop = nn.Dropout(p_dropout)
252
-
253
- def forward(self, x, x_mask):
254
- x = self.conv_1(x * x_mask)
255
- if self.activation == "gelu":
256
- x = x * torch.sigmoid(1.702 * x)
257
- else:
258
- x = torch.relu(x)
259
- x = self.drop(x)
260
- x = self.conv_2(x * x_mask)
261
- return x * x_mask
262
-
263
-
264
- class LayerNorm(nn.Module):
265
- def __init__(self, channels, eps=1e-4):
266
- super().__init__()
267
- self.channels = channels
268
- self.eps = eps
269
-
270
- self.gamma = nn.Parameter(torch.ones(channels))
271
- self.beta = nn.Parameter(torch.zeros(channels))
272
-
273
- def forward(self, x):
274
- n_dims = len(x.shape)
275
- mean = torch.mean(x, 1, keepdim=True)
276
- variance = torch.mean((x - mean) ** 2, 1, keepdim=True)
277
-
278
- x = (x - mean) * torch.rsqrt(variance + self.eps)
279
-
280
- shape = [1, -1] + [1] * (n_dims - 2)
281
- x = x * self.gamma.view(*shape) + self.beta.view(*shape)
282
- return x
283
-
284
-
285
- class ConvReluNorm(nn.Module):
286
- def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
287
- super().__init__()
288
- self.in_channels = in_channels
289
- self.hidden_channels = hidden_channels
290
- self.out_channels = out_channels
291
- self.kernel_size = kernel_size
292
- self.n_layers = n_layers
293
- self.p_dropout = p_dropout
294
- assert n_layers > 1, "Number of layers should be larger than 0."
295
-
296
- self.conv_layers = nn.ModuleList()
297
- self.norm_layers = nn.ModuleList()
298
- self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size // 2))
299
- self.norm_layers.append(LayerNorm(hidden_channels))
300
- self.relu_drop = nn.Sequential(
301
- nn.ReLU(),
302
- nn.Dropout(p_dropout))
303
- for _ in range(n_layers - 1):
304
- self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size // 2))
305
- self.norm_layers.append(LayerNorm(hidden_channels))
306
- self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
307
- self.proj.weight.data.zero_()
308
- self.proj.bias.data.zero_()
309
-
310
- def forward(self, x, x_mask):
311
- x_org = x
312
- for i in range(self.n_layers):
313
- x = self.conv_layers[i](x * x_mask)
314
- x = self.norm_layers[i](x)
315
- x = self.relu_drop(x)
316
- x = x_org + self.proj(x)
317
- return x * x_mask
318
-
319
-
320
- class RelTransformerEncoder(nn.Module):
321
- def __init__(self,
322
- n_vocab,
323
- out_channels,
324
- hidden_channels,
325
- filter_channels,
326
- n_heads,
327
- n_layers,
328
- kernel_size,
329
- p_dropout=0.0,
330
- window_size=4,
331
- block_length=None,
332
- prenet=True,
333
- pre_ln=True,
334
- ):
335
-
336
- super().__init__()
337
-
338
- self.n_vocab = n_vocab
339
- self.out_channels = out_channels
340
- self.hidden_channels = hidden_channels
341
- self.filter_channels = filter_channels
342
- self.n_heads = n_heads
343
- self.n_layers = n_layers
344
- self.kernel_size = kernel_size
345
- self.p_dropout = p_dropout
346
- self.window_size = window_size
347
- self.block_length = block_length
348
- self.prenet = prenet
349
- if n_vocab > 0:
350
- self.emb = Embedding(n_vocab, hidden_channels, padding_idx=0)
351
-
352
- if prenet:
353
- self.pre = ConvReluNorm(hidden_channels, hidden_channels, hidden_channels,
354
- kernel_size=5, n_layers=3, p_dropout=0)
355
- self.encoder = Encoder(
356
- hidden_channels,
357
- filter_channels,
358
- n_heads,
359
- n_layers,
360
- kernel_size,
361
- p_dropout,
362
- window_size=window_size,
363
- block_length=block_length,
364
- pre_ln=pre_ln,
365
- )
366
-
367
- def forward(self, x, x_mask=None):
368
- if self.n_vocab > 0:
369
- x_lengths = (x > 0).long().sum(-1)
370
- x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h]
371
- else:
372
- x_lengths = (x.abs().sum(-1) > 0).long().sum(-1)
373
- x = torch.transpose(x, 1, -1) # [b, h, t]
374
- x_mask = torch.unsqueeze(sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
375
-
376
- if self.prenet:
377
- x = self.pre(x, x_mask)
378
- x = self.encoder(x, x_mask)
379
- return x.transpose(1, 2)
380
-
381
-
382
- def group_hidden_by_segs(h, seg_ids, max_len):
383
- """
384
- :param h: [B, T, H]
385
- :param seg_ids: [B, T]
386
- :return: h_ph: [B, T_ph, H]
387
- """
388
- B, T, H = h.shape
389
- h_gby_segs = h.new_zeros([B, max_len + 1, H]).scatter_add_(1, seg_ids[:, :, None].repeat([1, 1, H]), h)
390
- all_ones = h.new_ones(h.shape[:2])
391
- cnt_gby_segs = h.new_zeros([B, max_len + 1]).scatter_add_(1, seg_ids, all_ones).contiguous()
392
- h_gby_segs = h_gby_segs[:, 1:]
393
- cnt_gby_segs = cnt_gby_segs[:, 1:]
394
- h_gby_segs = h_gby_segs / torch.clamp(cnt_gby_segs[:, :, None], min=1)
395
- # assert h_gby_segs.shape[-1] == 192
396
- return h_gby_segs
397
-
398
- def postprocess_word2ph(word_encoding, ph2word):
399
- word_encoding = F.pad(word_encoding,[0,0,1,0])
400
- ph2word_ = ph2word[:, :, None].repeat([1, 1, word_encoding.shape[-1]])
401
- out = torch.gather(word_encoding, 1, ph2word_) # [B, T, H]
402
- return out
403
-
404
-
405
- class Pooler(nn.Module):
406
- """
407
- Parameter-free poolers to get the sentence embedding
408
- 'cls': [CLS] representation with BERT/RoBERTa's MLP pooler.
409
- 'cls_before_pooler': [CLS] representation without the original MLP pooler.
410
- 'avg': average of the last layers' hidden states at each token.
411
- 'avg_top2': average of the last two layers.
412
- 'avg_first_last': average of the first and the last layers.
413
- """
414
- def __init__(self, pooler_type):
415
- super().__init__()
416
- self.pooler_type = pooler_type
417
- assert self.pooler_type in ["cls", "cls_before_pooler", "avg", "avg_top2", "avg_first_last"], "unrecognized pooling type %s" % self.pooler_type
418
-
419
- def forward(self, attention_mask, outputs):
420
- last_hidden = outputs.last_hidden_state
421
- pooler_output = outputs.pooler_output
422
- hidden_states = outputs.hidden_states
423
-
424
- if self.pooler_type in ['cls_before_pooler', 'cls']:
425
- return last_hidden[:, 0]
426
- elif self.pooler_type == "avg":
427
- return ((last_hidden * attention_mask.unsqueeze(-1)).sum(1) / attention_mask.sum(-1).unsqueeze(-1))
428
- elif self.pooler_type == "avg_first_last":
429
- first_hidden = hidden_states[0]
430
- last_hidden = hidden_states[-1]
431
- pooled_result = ((first_hidden + last_hidden) / 2.0 * attention_mask.unsqueeze(-1)).sum(1) / attention_mask.sum(-1).unsqueeze(-1)
432
- return pooled_result
433
- elif self.pooler_type == "avg_top2":
434
- second_last_hidden = hidden_states[-2]
435
- last_hidden = hidden_states[-1]
436
- pooled_result = ((last_hidden + second_last_hidden) / 2.0 * attention_mask.unsqueeze(-1)).sum(1) / attention_mask.sum(-1).unsqueeze(-1)
437
- return pooled_result
438
- else:
439
- raise NotImplementedError
440
-
441
-
442
- class Similarity(nn.Module):
443
- """
444
- Dot product or cosine similarity
445
- """
446
-
447
- def __init__(self, temp):
448
- super().__init__()
449
- self.temp = temp
450
- self.cos = nn.CosineSimilarity(dim=-1)
451
- self.record = None
452
- self.pos_avg = 0.0
453
- self.neg_avg = 0.0
454
-
455
- def forward(self, x, y):
456
- sim = self.cos(x, y)
457
- self.record = sim.detach() # [64,64]
458
- min_size = min(self.record.shape[0], self.record.shape[1]) # 64
459
- num_item = self.record.shape[0] * self.record.shape[1] # 4096
460
- self.pos_avg = self.record.diag().sum() / min_size
461
- if num_item - min_size == 0:
462
- self.neg_avg = (self.record.sum() - self.record.diag().sum()) / 1
463
- return sim / self.temp
464
- if torch.any(torch.isnan(self.record)).item() is True:
465
- print("we got self.record has nan when compute neg_avg")
466
- if torch.any(torch.isnan(self.record.diag())).item() is True:
467
- print("we got self.record.diag() has nan when compute neg_avg")
468
- self.neg_avg = (self.record.sum() - self.record.diag().sum()) / (num_item - min_size)
469
-
470
- return sim / self.temp
471
-
472
-
473
- class BertPredictionHeadTransform(nn.Module):
474
- def __init__(self, hidden_size):
475
- super().__init__()
476
- self.dense = nn.Linear(hidden_size, hidden_size)
477
- self.transform_act_fn = F.gelu
478
- self.LayerNorm = nn.LayerNorm(hidden_size, eps=1e-12)
479
-
480
- def forward(self, hidden_states):
481
- hidden_states = self.dense(hidden_states)
482
- hidden_states = self.transform_act_fn(hidden_states)
483
- hidden_states = self.LayerNorm(hidden_states)
484
- return hidden_states
485
-
486
-
487
- class BertLMPredictionHead(nn.Module):
488
- def __init__(self, hid_dim, out_dim):
489
- super().__init__()
490
- self.transform = BertPredictionHeadTransform(hid_dim)
491
- self.decoder = nn.Linear(hid_dim, out_dim, bias=False)
492
- self.bias = nn.Parameter(torch.zeros(out_dim))
493
- self.decoder.bias = self.bias
494
-
495
- def forward(self, hidden_states):
496
- hidden_states = self.transform(hidden_states)
497
- hidden_states = self.decoder(hidden_states)
498
- return hidden_states
499
-
500
-
501
- # V2_2
502
- # change add to concat.
503
- # now support finetune BERT
504
- # grad_bert=0.1 & trainable_block_idx=0
505
- class BERTRelTransformerEncoder(nn.Module):
506
- def __init__(self,
507
- n_vocab,
508
- out_channels,
509
- hidden_channels,
510
- filter_channels,
511
- n_heads,
512
- n_layers,
513
- kernel_size,
514
- p_dropout=0.0,
515
- window_size=4,
516
- block_length=None,
517
- prenet=True,
518
- pre_ln=True,
519
- ):
520
-
521
- super().__init__()
522
-
523
- self.n_vocab = n_vocab
524
- self.out_channels = out_channels
525
- self.hidden_channels = hidden_channels
526
- self.filter_channels = filter_channels
527
- self.n_heads = n_heads
528
- self.n_layers = n_layers
529
- self.kernel_size = kernel_size
530
- self.p_dropout = p_dropout
531
- self.window_size = window_size
532
- self.block_length = block_length
533
- self.prenet = prenet
534
- if n_vocab > 0:
535
- self.emb = Embedding(n_vocab, hidden_channels, padding_idx=0)
536
-
537
- if prenet:
538
- self.pre = ConvReluNorm(hidden_channels, hidden_channels, hidden_channels,
539
- kernel_size=5, n_layers=3, p_dropout=0)
540
- self.encoder1 = Encoder(
541
- hidden_channels,
542
- filter_channels,
543
- n_heads,
544
- n_layers//2,
545
- kernel_size,
546
- p_dropout,
547
- window_size=window_size,
548
- block_length=block_length,
549
- pre_ln=pre_ln,
550
- )
551
-
552
- self.encoder2 = Encoder(
553
- hidden_channels,
554
- filter_channels,
555
- n_heads,
556
- n_layers - n_layers//2,
557
- kernel_size,
558
- p_dropout,
559
- window_size=window_size,
560
- block_length=block_length,
561
- pre_ln=pre_ln,
562
- )
563
-
564
- if hparams['ds_name'] in ['ljspeech', 'libritts']:
565
- model_name = 'bert-base-uncased'
566
- elif hparams['ds_name'] in ['biaobei']:
567
- model_name = 'bert-base-chinese'
568
- else:
569
- raise NotImplementedError()
570
-
571
- config_kwargs = {'cache_dir': None, 'revision': 'main', 'use_auth_token': None}
572
- self.tokenizer = transformers.AutoTokenizer.from_pretrained(model_name)
573
- config = transformers.AutoConfig.from_pretrained(model_name, **config_kwargs)
574
- self.bert = transformers.AutoModelForMaskedLM.from_pretrained(
575
- model_name,
576
- config=config,
577
- )
578
- self.cl_head = BertLMPredictionHead(768, 768)
579
- trainable_start_block = hparams.get("trainable_start_block", 10)
580
- for k, v in self.bert.named_parameters():
581
- if 'embeddings' in k:
582
- v.requires_grad = False
583
- elif 'encoder.layer' in k:
584
- block_idx = int(k.split(".")[3])
585
- if block_idx < trainable_start_block:
586
- v.requires_grad = False
587
- else:
588
- v.requires_grad = True
589
- elif 'cls' in k:
590
- v.requires_grad = True
591
- else:
592
- print("Unhandled key: {}, set to requires_grad...".format(k))
593
- v.requires_grad = True
594
-
595
- self.bert_combine = nn.Sequential(*[
596
- nn.Conv1d(768 + hidden_channels, hidden_channels, 3, 1, 1),
597
- nn.ReLU(),
598
- ])
599
- self.pooler = Pooler("avg")
600
- self.sim = Similarity(temp=0.05)
601
-
602
- def forward(self, x, x_mask=None, bert_feats=None, ph2word=None, **kwargs):
603
- if self.n_vocab > 0:
604
- x_lengths = (x > 0).long().sum(-1)
605
- x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h]
606
- else:
607
- x_lengths = (x.abs().sum(-1) > 0).long().sum(-1)
608
- x = torch.transpose(x, 1, -1) # [b, h, t]
609
- x_mask = torch.unsqueeze(sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
610
-
611
- if self.prenet:
612
- x = self.pre(x, x_mask)
613
- x = self.encoder1(x, x_mask)
614
- bert_outputs = self.bert.bert(bert_feats['bert_input_ids'],
615
- attention_mask=bert_feats['bert_attention_mask'],
616
- token_type_ids=bert_feats['bert_token_type_ids'],)
617
- bert_embedding = bert_outputs['last_hidden_state']
618
- grad_bert = hparams.get("grad_bert", 0.1)
619
- bert_embedding = bert_embedding.detach() * (1-grad_bert) + bert_embedding * grad_bert
620
- bert_word_embedding = group_hidden_by_segs(bert_embedding, bert_feats['bert_token2word'], bert_feats['bert_token2word'].max().item())
621
- bert_ph_embedding = postprocess_word2ph(bert_word_embedding, ph2word)
622
- bert_ph_embedding = bert_ph_embedding.transpose(1,2)
623
- x = torch.cat([x, bert_ph_embedding], dim=1)
624
- x = self.bert_combine(x)
625
- x = self.encoder2(x, x_mask)
626
- return x.transpose(1, 2)
627
-
628
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb8-150e_deepfashion2_short_sleeved_outwear_256x192/td_hm_res50_4xb8-150e_deepfashion2_short_sleeved_outwear_256x192.py DELETED
@@ -1,2861 +0,0 @@
1
- default_scope = 'mmpose'
2
- default_hooks = dict(
3
- timer=dict(type='IterTimerHook'),
4
- logger=dict(type='LoggerHook', interval=50),
5
- param_scheduler=dict(type='ParamSchedulerHook'),
6
- checkpoint=dict(
7
- type='CheckpointHook', interval=10, save_best='PCK', rule='greater'),
8
- sampler_seed=dict(type='DistSamplerSeedHook'),
9
- visualization=dict(type='PoseVisualizationHook', enable=False))
10
- custom_hooks = [dict(type='SyncBuffersHook')]
11
- env_cfg = dict(
12
- cudnn_benchmark=False,
13
- mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
14
- dist_cfg=dict(backend='nccl'))
15
- vis_backends = [dict(type='LocalVisBackend')]
16
- visualizer = dict(
17
- type='PoseLocalVisualizer',
18
- vis_backends=[dict(type='LocalVisBackend'),
19
- dict(type='WandbVisBackend')],
20
- name='visualizer')
21
- log_processor = dict(
22
- type='LogProcessor', window_size=50, by_epoch=True, num_digits=6)
23
- log_level = 'INFO'
24
- load_from = None
25
- resume = False
26
- backend_args = dict(backend='local')
27
- train_cfg = dict(by_epoch=True, max_epochs=150, val_interval=10)
28
- val_cfg = dict()
29
- test_cfg = dict()
30
- colors = dict(
31
- sss=[255, 128, 0],
32
- lss=[255, 0, 128],
33
- sso=[128, 0, 255],
34
- lso=[0, 128, 255],
35
- vest=[0, 128, 128],
36
- sling=[0, 0, 128],
37
- shorts=[128, 128, 128],
38
- trousers=[128, 0, 128],
39
- skirt=[64, 128, 128],
40
- ssd=[64, 64, 128],
41
- lsd=[128, 64, 0],
42
- vd=[128, 64, 255],
43
- sd=[128, 64, 0])
44
- dataset_info = dict(
45
- dataset_name='deepfashion2',
46
- paper_info=dict(
47
- author=
48
- 'Yuying Ge and Ruimao Zhang and Lingyun Wu and Xiaogang Wang and Xiaoou Tang and Ping Luo',
49
- title=
50
- 'DeepFashion2: A Versatile Benchmark for Detection, Pose Estimation, Segmentation and Re-Identification of Clothing Images',
51
- container=
52
- 'Proceedings of IEEE Conference on Computer Vision and Pattern Recognition (CVPR)',
53
- year='2019',
54
- homepage='https://github.com/switchablenorms/DeepFashion2'),
55
- keypoint_info=dict({
56
- 0:
57
- dict(name='sss_kpt1', id=0, color=[255, 128, 0], type='', swap=''),
58
- 1:
59
- dict(
60
- name='sss_kpt2',
61
- id=1,
62
- color=[255, 128, 0],
63
- type='',
64
- swap='sss_kpt6'),
65
- 2:
66
- dict(
67
- name='sss_kpt3',
68
- id=2,
69
- color=[255, 128, 0],
70
- type='',
71
- swap='sss_kpt5'),
72
- 3:
73
- dict(name='sss_kpt4', id=3, color=[255, 128, 0], type='', swap=''),
74
- 4:
75
- dict(
76
- name='sss_kpt5',
77
- id=4,
78
- color=[255, 128, 0],
79
- type='',
80
- swap='sss_kpt3'),
81
- 5:
82
- dict(
83
- name='sss_kpt6',
84
- id=5,
85
- color=[255, 128, 0],
86
- type='',
87
- swap='sss_kpt2'),
88
- 6:
89
- dict(
90
- name='sss_kpt7',
91
- id=6,
92
- color=[255, 128, 0],
93
- type='',
94
- swap='sss_kpt25'),
95
- 7:
96
- dict(
97
- name='sss_kpt8',
98
- id=7,
99
- color=[255, 128, 0],
100
- type='',
101
- swap='sss_kpt24'),
102
- 8:
103
- dict(
104
- name='sss_kpt9',
105
- id=8,
106
- color=[255, 128, 0],
107
- type='',
108
- swap='sss_kpt23'),
109
- 9:
110
- dict(
111
- name='sss_kpt10',
112
- id=9,
113
- color=[255, 128, 0],
114
- type='',
115
- swap='sss_kpt22'),
116
- 10:
117
- dict(
118
- name='sss_kpt11',
119
- id=10,
120
- color=[255, 128, 0],
121
- type='',
122
- swap='sss_kpt21'),
123
- 11:
124
- dict(
125
- name='sss_kpt12',
126
- id=11,
127
- color=[255, 128, 0],
128
- type='',
129
- swap='sss_kpt20'),
130
- 12:
131
- dict(
132
- name='sss_kpt13',
133
- id=12,
134
- color=[255, 128, 0],
135
- type='',
136
- swap='sss_kpt19'),
137
- 13:
138
- dict(
139
- name='sss_kpt14',
140
- id=13,
141
- color=[255, 128, 0],
142
- type='',
143
- swap='sss_kpt18'),
144
- 14:
145
- dict(
146
- name='sss_kpt15',
147
- id=14,
148
- color=[255, 128, 0],
149
- type='',
150
- swap='sss_kpt17'),
151
- 15:
152
- dict(name='sss_kpt16', id=15, color=[255, 128, 0], type='', swap=''),
153
- 16:
154
- dict(
155
- name='sss_kpt17',
156
- id=16,
157
- color=[255, 128, 0],
158
- type='',
159
- swap='sss_kpt15'),
160
- 17:
161
- dict(
162
- name='sss_kpt18',
163
- id=17,
164
- color=[255, 128, 0],
165
- type='',
166
- swap='sss_kpt14'),
167
- 18:
168
- dict(
169
- name='sss_kpt19',
170
- id=18,
171
- color=[255, 128, 0],
172
- type='',
173
- swap='sss_kpt13'),
174
- 19:
175
- dict(
176
- name='sss_kpt20',
177
- id=19,
178
- color=[255, 128, 0],
179
- type='',
180
- swap='sss_kpt12'),
181
- 20:
182
- dict(
183
- name='sss_kpt21',
184
- id=20,
185
- color=[255, 128, 0],
186
- type='',
187
- swap='sss_kpt11'),
188
- 21:
189
- dict(
190
- name='sss_kpt22',
191
- id=21,
192
- color=[255, 128, 0],
193
- type='',
194
- swap='sss_kpt10'),
195
- 22:
196
- dict(
197
- name='sss_kpt23',
198
- id=22,
199
- color=[255, 128, 0],
200
- type='',
201
- swap='sss_kpt9'),
202
- 23:
203
- dict(
204
- name='sss_kpt24',
205
- id=23,
206
- color=[255, 128, 0],
207
- type='',
208
- swap='sss_kpt8'),
209
- 24:
210
- dict(
211
- name='sss_kpt25',
212
- id=24,
213
- color=[255, 128, 0],
214
- type='',
215
- swap='sss_kpt7'),
216
- 25:
217
- dict(name='lss_kpt1', id=25, color=[255, 0, 128], type='', swap=''),
218
- 26:
219
- dict(
220
- name='lss_kpt2',
221
- id=26,
222
- color=[255, 0, 128],
223
- type='',
224
- swap='lss_kpt6'),
225
- 27:
226
- dict(
227
- name='lss_kpt3',
228
- id=27,
229
- color=[255, 0, 128],
230
- type='',
231
- swap='lss_kpt5'),
232
- 28:
233
- dict(name='lss_kpt4', id=28, color=[255, 0, 128], type='', swap=''),
234
- 29:
235
- dict(
236
- name='lss_kpt5',
237
- id=29,
238
- color=[255, 0, 128],
239
- type='',
240
- swap='lss_kpt3'),
241
- 30:
242
- dict(
243
- name='lss_kpt6',
244
- id=30,
245
- color=[255, 0, 128],
246
- type='',
247
- swap='lss_kpt2'),
248
- 31:
249
- dict(
250
- name='lss_kpt7',
251
- id=31,
252
- color=[255, 0, 128],
253
- type='',
254
- swap='lss_kpt33'),
255
- 32:
256
- dict(
257
- name='lss_kpt8',
258
- id=32,
259
- color=[255, 0, 128],
260
- type='',
261
- swap='lss_kpt32'),
262
- 33:
263
- dict(
264
- name='lss_kpt9',
265
- id=33,
266
- color=[255, 0, 128],
267
- type='',
268
- swap='lss_kpt31'),
269
- 34:
270
- dict(
271
- name='lss_kpt10',
272
- id=34,
273
- color=[255, 0, 128],
274
- type='',
275
- swap='lss_kpt30'),
276
- 35:
277
- dict(
278
- name='lss_kpt11',
279
- id=35,
280
- color=[255, 0, 128],
281
- type='',
282
- swap='lss_kpt29'),
283
- 36:
284
- dict(
285
- name='lss_kpt12',
286
- id=36,
287
- color=[255, 0, 128],
288
- type='',
289
- swap='lss_kpt28'),
290
- 37:
291
- dict(
292
- name='lss_kpt13',
293
- id=37,
294
- color=[255, 0, 128],
295
- type='',
296
- swap='lss_kpt27'),
297
- 38:
298
- dict(
299
- name='lss_kpt14',
300
- id=38,
301
- color=[255, 0, 128],
302
- type='',
303
- swap='lss_kpt26'),
304
- 39:
305
- dict(
306
- name='lss_kpt15',
307
- id=39,
308
- color=[255, 0, 128],
309
- type='',
310
- swap='lss_kpt25'),
311
- 40:
312
- dict(
313
- name='lss_kpt16',
314
- id=40,
315
- color=[255, 0, 128],
316
- type='',
317
- swap='lss_kpt24'),
318
- 41:
319
- dict(
320
- name='lss_kpt17',
321
- id=41,
322
- color=[255, 0, 128],
323
- type='',
324
- swap='lss_kpt23'),
325
- 42:
326
- dict(
327
- name='lss_kpt18',
328
- id=42,
329
- color=[255, 0, 128],
330
- type='',
331
- swap='lss_kpt22'),
332
- 43:
333
- dict(
334
- name='lss_kpt19',
335
- id=43,
336
- color=[255, 0, 128],
337
- type='',
338
- swap='lss_kpt21'),
339
- 44:
340
- dict(name='lss_kpt20', id=44, color=[255, 0, 128], type='', swap=''),
341
- 45:
342
- dict(
343
- name='lss_kpt21',
344
- id=45,
345
- color=[255, 0, 128],
346
- type='',
347
- swap='lss_kpt19'),
348
- 46:
349
- dict(
350
- name='lss_kpt22',
351
- id=46,
352
- color=[255, 0, 128],
353
- type='',
354
- swap='lss_kpt18'),
355
- 47:
356
- dict(
357
- name='lss_kpt23',
358
- id=47,
359
- color=[255, 0, 128],
360
- type='',
361
- swap='lss_kpt17'),
362
- 48:
363
- dict(
364
- name='lss_kpt24',
365
- id=48,
366
- color=[255, 0, 128],
367
- type='',
368
- swap='lss_kpt16'),
369
- 49:
370
- dict(
371
- name='lss_kpt25',
372
- id=49,
373
- color=[255, 0, 128],
374
- type='',
375
- swap='lss_kpt15'),
376
- 50:
377
- dict(
378
- name='lss_kpt26',
379
- id=50,
380
- color=[255, 0, 128],
381
- type='',
382
- swap='lss_kpt14'),
383
- 51:
384
- dict(
385
- name='lss_kpt27',
386
- id=51,
387
- color=[255, 0, 128],
388
- type='',
389
- swap='lss_kpt13'),
390
- 52:
391
- dict(
392
- name='lss_kpt28',
393
- id=52,
394
- color=[255, 0, 128],
395
- type='',
396
- swap='lss_kpt12'),
397
- 53:
398
- dict(
399
- name='lss_kpt29',
400
- id=53,
401
- color=[255, 0, 128],
402
- type='',
403
- swap='lss_kpt11'),
404
- 54:
405
- dict(
406
- name='lss_kpt30',
407
- id=54,
408
- color=[255, 0, 128],
409
- type='',
410
- swap='lss_kpt10'),
411
- 55:
412
- dict(
413
- name='lss_kpt31',
414
- id=55,
415
- color=[255, 0, 128],
416
- type='',
417
- swap='lss_kpt9'),
418
- 56:
419
- dict(
420
- name='lss_kpt32',
421
- id=56,
422
- color=[255, 0, 128],
423
- type='',
424
- swap='lss_kpt8'),
425
- 57:
426
- dict(
427
- name='lss_kpt33',
428
- id=57,
429
- color=[255, 0, 128],
430
- type='',
431
- swap='lss_kpt7'),
432
- 58:
433
- dict(name='sso_kpt1', id=58, color=[128, 0, 255], type='', swap=''),
434
- 59:
435
- dict(
436
- name='sso_kpt2',
437
- id=59,
438
- color=[128, 0, 255],
439
- type='',
440
- swap='sso_kpt26'),
441
- 60:
442
- dict(
443
- name='sso_kpt3',
444
- id=60,
445
- color=[128, 0, 255],
446
- type='',
447
- swap='sso_kpt5'),
448
- 61:
449
- dict(
450
- name='sso_kpt4',
451
- id=61,
452
- color=[128, 0, 255],
453
- type='',
454
- swap='sso_kpt6'),
455
- 62:
456
- dict(
457
- name='sso_kpt5',
458
- id=62,
459
- color=[128, 0, 255],
460
- type='',
461
- swap='sso_kpt3'),
462
- 63:
463
- dict(
464
- name='sso_kpt6',
465
- id=63,
466
- color=[128, 0, 255],
467
- type='',
468
- swap='sso_kpt4'),
469
- 64:
470
- dict(
471
- name='sso_kpt7',
472
- id=64,
473
- color=[128, 0, 255],
474
- type='',
475
- swap='sso_kpt25'),
476
- 65:
477
- dict(
478
- name='sso_kpt8',
479
- id=65,
480
- color=[128, 0, 255],
481
- type='',
482
- swap='sso_kpt24'),
483
- 66:
484
- dict(
485
- name='sso_kpt9',
486
- id=66,
487
- color=[128, 0, 255],
488
- type='',
489
- swap='sso_kpt23'),
490
- 67:
491
- dict(
492
- name='sso_kpt10',
493
- id=67,
494
- color=[128, 0, 255],
495
- type='',
496
- swap='sso_kpt22'),
497
- 68:
498
- dict(
499
- name='sso_kpt11',
500
- id=68,
501
- color=[128, 0, 255],
502
- type='',
503
- swap='sso_kpt21'),
504
- 69:
505
- dict(
506
- name='sso_kpt12',
507
- id=69,
508
- color=[128, 0, 255],
509
- type='',
510
- swap='sso_kpt20'),
511
- 70:
512
- dict(
513
- name='sso_kpt13',
514
- id=70,
515
- color=[128, 0, 255],
516
- type='',
517
- swap='sso_kpt19'),
518
- 71:
519
- dict(
520
- name='sso_kpt14',
521
- id=71,
522
- color=[128, 0, 255],
523
- type='',
524
- swap='sso_kpt18'),
525
- 72:
526
- dict(
527
- name='sso_kpt15',
528
- id=72,
529
- color=[128, 0, 255],
530
- type='',
531
- swap='sso_kpt17'),
532
- 73:
533
- dict(
534
- name='sso_kpt16',
535
- id=73,
536
- color=[128, 0, 255],
537
- type='',
538
- swap='sso_kpt29'),
539
- 74:
540
- dict(
541
- name='sso_kpt17',
542
- id=74,
543
- color=[128, 0, 255],
544
- type='',
545
- swap='sso_kpt15'),
546
- 75:
547
- dict(
548
- name='sso_kpt18',
549
- id=75,
550
- color=[128, 0, 255],
551
- type='',
552
- swap='sso_kpt14'),
553
- 76:
554
- dict(
555
- name='sso_kpt19',
556
- id=76,
557
- color=[128, 0, 255],
558
- type='',
559
- swap='sso_kpt13'),
560
- 77:
561
- dict(
562
- name='sso_kpt20',
563
- id=77,
564
- color=[128, 0, 255],
565
- type='',
566
- swap='sso_kpt12'),
567
- 78:
568
- dict(
569
- name='sso_kpt21',
570
- id=78,
571
- color=[128, 0, 255],
572
- type='',
573
- swap='sso_kpt11'),
574
- 79:
575
- dict(
576
- name='sso_kpt22',
577
- id=79,
578
- color=[128, 0, 255],
579
- type='',
580
- swap='sso_kpt10'),
581
- 80:
582
- dict(
583
- name='sso_kpt23',
584
- id=80,
585
- color=[128, 0, 255],
586
- type='',
587
- swap='sso_kpt9'),
588
- 81:
589
- dict(
590
- name='sso_kpt24',
591
- id=81,
592
- color=[128, 0, 255],
593
- type='',
594
- swap='sso_kpt8'),
595
- 82:
596
- dict(
597
- name='sso_kpt25',
598
- id=82,
599
- color=[128, 0, 255],
600
- type='',
601
- swap='sso_kpt7'),
602
- 83:
603
- dict(
604
- name='sso_kpt26',
605
- id=83,
606
- color=[128, 0, 255],
607
- type='',
608
- swap='sso_kpt2'),
609
- 84:
610
- dict(
611
- name='sso_kpt27',
612
- id=84,
613
- color=[128, 0, 255],
614
- type='',
615
- swap='sso_kpt30'),
616
- 85:
617
- dict(
618
- name='sso_kpt28',
619
- id=85,
620
- color=[128, 0, 255],
621
- type='',
622
- swap='sso_kpt31'),
623
- 86:
624
- dict(
625
- name='sso_kpt29',
626
- id=86,
627
- color=[128, 0, 255],
628
- type='',
629
- swap='sso_kpt16'),
630
- 87:
631
- dict(
632
- name='sso_kpt30',
633
- id=87,
634
- color=[128, 0, 255],
635
- type='',
636
- swap='sso_kpt27'),
637
- 88:
638
- dict(
639
- name='sso_kpt31',
640
- id=88,
641
- color=[128, 0, 255],
642
- type='',
643
- swap='sso_kpt28'),
644
- 89:
645
- dict(name='lso_kpt1', id=89, color=[0, 128, 255], type='', swap=''),
646
- 90:
647
- dict(
648
- name='lso_kpt2',
649
- id=90,
650
- color=[0, 128, 255],
651
- type='',
652
- swap='lso_kpt6'),
653
- 91:
654
- dict(
655
- name='lso_kpt3',
656
- id=91,
657
- color=[0, 128, 255],
658
- type='',
659
- swap='lso_kpt5'),
660
- 92:
661
- dict(
662
- name='lso_kpt4',
663
- id=92,
664
- color=[0, 128, 255],
665
- type='',
666
- swap='lso_kpt34'),
667
- 93:
668
- dict(
669
- name='lso_kpt5',
670
- id=93,
671
- color=[0, 128, 255],
672
- type='',
673
- swap='lso_kpt3'),
674
- 94:
675
- dict(
676
- name='lso_kpt6',
677
- id=94,
678
- color=[0, 128, 255],
679
- type='',
680
- swap='lso_kpt2'),
681
- 95:
682
- dict(
683
- name='lso_kpt7',
684
- id=95,
685
- color=[0, 128, 255],
686
- type='',
687
- swap='lso_kpt33'),
688
- 96:
689
- dict(
690
- name='lso_kpt8',
691
- id=96,
692
- color=[0, 128, 255],
693
- type='',
694
- swap='lso_kpt32'),
695
- 97:
696
- dict(
697
- name='lso_kpt9',
698
- id=97,
699
- color=[0, 128, 255],
700
- type='',
701
- swap='lso_kpt31'),
702
- 98:
703
- dict(
704
- name='lso_kpt10',
705
- id=98,
706
- color=[0, 128, 255],
707
- type='',
708
- swap='lso_kpt30'),
709
- 99:
710
- dict(
711
- name='lso_kpt11',
712
- id=99,
713
- color=[0, 128, 255],
714
- type='',
715
- swap='lso_kpt29'),
716
- 100:
717
- dict(
718
- name='lso_kpt12',
719
- id=100,
720
- color=[0, 128, 255],
721
- type='',
722
- swap='lso_kpt28'),
723
- 101:
724
- dict(
725
- name='lso_kpt13',
726
- id=101,
727
- color=[0, 128, 255],
728
- type='',
729
- swap='lso_kpt27'),
730
- 102:
731
- dict(
732
- name='lso_kpt14',
733
- id=102,
734
- color=[0, 128, 255],
735
- type='',
736
- swap='lso_kpt26'),
737
- 103:
738
- dict(
739
- name='lso_kpt15',
740
- id=103,
741
- color=[0, 128, 255],
742
- type='',
743
- swap='lso_kpt25'),
744
- 104:
745
- dict(
746
- name='lso_kpt16',
747
- id=104,
748
- color=[0, 128, 255],
749
- type='',
750
- swap='lso_kpt24'),
751
- 105:
752
- dict(
753
- name='lso_kpt17',
754
- id=105,
755
- color=[0, 128, 255],
756
- type='',
757
- swap='lso_kpt23'),
758
- 106:
759
- dict(
760
- name='lso_kpt18',
761
- id=106,
762
- color=[0, 128, 255],
763
- type='',
764
- swap='lso_kpt22'),
765
- 107:
766
- dict(
767
- name='lso_kpt19',
768
- id=107,
769
- color=[0, 128, 255],
770
- type='',
771
- swap='lso_kpt21'),
772
- 108:
773
- dict(
774
- name='lso_kpt20',
775
- id=108,
776
- color=[0, 128, 255],
777
- type='',
778
- swap='lso_kpt37'),
779
- 109:
780
- dict(
781
- name='lso_kpt21',
782
- id=109,
783
- color=[0, 128, 255],
784
- type='',
785
- swap='lso_kpt19'),
786
- 110:
787
- dict(
788
- name='lso_kpt22',
789
- id=110,
790
- color=[0, 128, 255],
791
- type='',
792
- swap='lso_kpt18'),
793
- 111:
794
- dict(
795
- name='lso_kpt23',
796
- id=111,
797
- color=[0, 128, 255],
798
- type='',
799
- swap='lso_kpt17'),
800
- 112:
801
- dict(
802
- name='lso_kpt24',
803
- id=112,
804
- color=[0, 128, 255],
805
- type='',
806
- swap='lso_kpt16'),
807
- 113:
808
- dict(
809
- name='lso_kpt25',
810
- id=113,
811
- color=[0, 128, 255],
812
- type='',
813
- swap='lso_kpt15'),
814
- 114:
815
- dict(
816
- name='lso_kpt26',
817
- id=114,
818
- color=[0, 128, 255],
819
- type='',
820
- swap='lso_kpt14'),
821
- 115:
822
- dict(
823
- name='lso_kpt27',
824
- id=115,
825
- color=[0, 128, 255],
826
- type='',
827
- swap='lso_kpt13'),
828
- 116:
829
- dict(
830
- name='lso_kpt28',
831
- id=116,
832
- color=[0, 128, 255],
833
- type='',
834
- swap='lso_kpt12'),
835
- 117:
836
- dict(
837
- name='lso_kpt29',
838
- id=117,
839
- color=[0, 128, 255],
840
- type='',
841
- swap='lso_kpt11'),
842
- 118:
843
- dict(
844
- name='lso_kpt30',
845
- id=118,
846
- color=[0, 128, 255],
847
- type='',
848
- swap='lso_kpt10'),
849
- 119:
850
- dict(
851
- name='lso_kpt31',
852
- id=119,
853
- color=[0, 128, 255],
854
- type='',
855
- swap='lso_kpt9'),
856
- 120:
857
- dict(
858
- name='lso_kpt32',
859
- id=120,
860
- color=[0, 128, 255],
861
- type='',
862
- swap='lso_kpt8'),
863
- 121:
864
- dict(
865
- name='lso_kpt33',
866
- id=121,
867
- color=[0, 128, 255],
868
- type='',
869
- swap='lso_kpt7'),
870
- 122:
871
- dict(
872
- name='lso_kpt34',
873
- id=122,
874
- color=[0, 128, 255],
875
- type='',
876
- swap='lso_kpt4'),
877
- 123:
878
- dict(
879
- name='lso_kpt35',
880
- id=123,
881
- color=[0, 128, 255],
882
- type='',
883
- swap='lso_kpt38'),
884
- 124:
885
- dict(
886
- name='lso_kpt36',
887
- id=124,
888
- color=[0, 128, 255],
889
- type='',
890
- swap='lso_kpt39'),
891
- 125:
892
- dict(
893
- name='lso_kpt37',
894
- id=125,
895
- color=[0, 128, 255],
896
- type='',
897
- swap='lso_kpt20'),
898
- 126:
899
- dict(
900
- name='lso_kpt38',
901
- id=126,
902
- color=[0, 128, 255],
903
- type='',
904
- swap='lso_kpt35'),
905
- 127:
906
- dict(
907
- name='lso_kpt39',
908
- id=127,
909
- color=[0, 128, 255],
910
- type='',
911
- swap='lso_kpt36'),
912
- 128:
913
- dict(name='vest_kpt1', id=128, color=[0, 128, 128], type='', swap=''),
914
- 129:
915
- dict(
916
- name='vest_kpt2',
917
- id=129,
918
- color=[0, 128, 128],
919
- type='',
920
- swap='vest_kpt6'),
921
- 130:
922
- dict(
923
- name='vest_kpt3',
924
- id=130,
925
- color=[0, 128, 128],
926
- type='',
927
- swap='vest_kpt5'),
928
- 131:
929
- dict(name='vest_kpt4', id=131, color=[0, 128, 128], type='', swap=''),
930
- 132:
931
- dict(
932
- name='vest_kpt5',
933
- id=132,
934
- color=[0, 128, 128],
935
- type='',
936
- swap='vest_kpt3'),
937
- 133:
938
- dict(
939
- name='vest_kpt6',
940
- id=133,
941
- color=[0, 128, 128],
942
- type='',
943
- swap='vest_kpt2'),
944
- 134:
945
- dict(
946
- name='vest_kpt7',
947
- id=134,
948
- color=[0, 128, 128],
949
- type='',
950
- swap='vest_kpt15'),
951
- 135:
952
- dict(
953
- name='vest_kpt8',
954
- id=135,
955
- color=[0, 128, 128],
956
- type='',
957
- swap='vest_kpt14'),
958
- 136:
959
- dict(
960
- name='vest_kpt9',
961
- id=136,
962
- color=[0, 128, 128],
963
- type='',
964
- swap='vest_kpt13'),
965
- 137:
966
- dict(
967
- name='vest_kpt10',
968
- id=137,
969
- color=[0, 128, 128],
970
- type='',
971
- swap='vest_kpt12'),
972
- 138:
973
- dict(name='vest_kpt11', id=138, color=[0, 128, 128], type='', swap=''),
974
- 139:
975
- dict(
976
- name='vest_kpt12',
977
- id=139,
978
- color=[0, 128, 128],
979
- type='',
980
- swap='vest_kpt10'),
981
- 140:
982
- dict(name='vest_kpt13', id=140, color=[0, 128, 128], type='', swap=''),
983
- 141:
984
- dict(
985
- name='vest_kpt14',
986
- id=141,
987
- color=[0, 128, 128],
988
- type='',
989
- swap='vest_kpt8'),
990
- 142:
991
- dict(
992
- name='vest_kpt15',
993
- id=142,
994
- color=[0, 128, 128],
995
- type='',
996
- swap='vest_kpt7'),
997
- 143:
998
- dict(name='sling_kpt1', id=143, color=[0, 0, 128], type='', swap=''),
999
- 144:
1000
- dict(
1001
- name='sling_kpt2',
1002
- id=144,
1003
- color=[0, 0, 128],
1004
- type='',
1005
- swap='sling_kpt6'),
1006
- 145:
1007
- dict(
1008
- name='sling_kpt3',
1009
- id=145,
1010
- color=[0, 0, 128],
1011
- type='',
1012
- swap='sling_kpt5'),
1013
- 146:
1014
- dict(name='sling_kpt4', id=146, color=[0, 0, 128], type='', swap=''),
1015
- 147:
1016
- dict(
1017
- name='sling_kpt5',
1018
- id=147,
1019
- color=[0, 0, 128],
1020
- type='',
1021
- swap='sling_kpt3'),
1022
- 148:
1023
- dict(
1024
- name='sling_kpt6',
1025
- id=148,
1026
- color=[0, 0, 128],
1027
- type='',
1028
- swap='sling_kpt2'),
1029
- 149:
1030
- dict(
1031
- name='sling_kpt7',
1032
- id=149,
1033
- color=[0, 0, 128],
1034
- type='',
1035
- swap='sling_kpt15'),
1036
- 150:
1037
- dict(
1038
- name='sling_kpt8',
1039
- id=150,
1040
- color=[0, 0, 128],
1041
- type='',
1042
- swap='sling_kpt14'),
1043
- 151:
1044
- dict(
1045
- name='sling_kpt9',
1046
- id=151,
1047
- color=[0, 0, 128],
1048
- type='',
1049
- swap='sling_kpt13'),
1050
- 152:
1051
- dict(
1052
- name='sling_kpt10',
1053
- id=152,
1054
- color=[0, 0, 128],
1055
- type='',
1056
- swap='sling_kpt12'),
1057
- 153:
1058
- dict(name='sling_kpt11', id=153, color=[0, 0, 128], type='', swap=''),
1059
- 154:
1060
- dict(
1061
- name='sling_kpt12',
1062
- id=154,
1063
- color=[0, 0, 128],
1064
- type='',
1065
- swap='sling_kpt10'),
1066
- 155:
1067
- dict(
1068
- name='sling_kpt13',
1069
- id=155,
1070
- color=[0, 0, 128],
1071
- type='',
1072
- swap='sling_kpt9'),
1073
- 156:
1074
- dict(
1075
- name='sling_kpt14',
1076
- id=156,
1077
- color=[0, 0, 128],
1078
- type='',
1079
- swap='sling_kpt8'),
1080
- 157:
1081
- dict(
1082
- name='sling_kpt15',
1083
- id=157,
1084
- color=[0, 0, 128],
1085
- type='',
1086
- swap='sling_kpt7'),
1087
- 158:
1088
- dict(
1089
- name='shorts_kpt1',
1090
- id=158,
1091
- color=[128, 128, 128],
1092
- type='',
1093
- swap='shorts_kpt3'),
1094
- 159:
1095
- dict(
1096
- name='shorts_kpt2',
1097
- id=159,
1098
- color=[128, 128, 128],
1099
- type='',
1100
- swap=''),
1101
- 160:
1102
- dict(
1103
- name='shorts_kpt3',
1104
- id=160,
1105
- color=[128, 128, 128],
1106
- type='',
1107
- swap='shorts_kpt1'),
1108
- 161:
1109
- dict(
1110
- name='shorts_kpt4',
1111
- id=161,
1112
- color=[128, 128, 128],
1113
- type='',
1114
- swap='shorts_kpt10'),
1115
- 162:
1116
- dict(
1117
- name='shorts_kpt5',
1118
- id=162,
1119
- color=[128, 128, 128],
1120
- type='',
1121
- swap='shorts_kpt9'),
1122
- 163:
1123
- dict(
1124
- name='shorts_kpt6',
1125
- id=163,
1126
- color=[128, 128, 128],
1127
- type='',
1128
- swap='shorts_kpt8'),
1129
- 164:
1130
- dict(
1131
- name='shorts_kpt7',
1132
- id=164,
1133
- color=[128, 128, 128],
1134
- type='',
1135
- swap=''),
1136
- 165:
1137
- dict(
1138
- name='shorts_kpt8',
1139
- id=165,
1140
- color=[128, 128, 128],
1141
- type='',
1142
- swap='shorts_kpt6'),
1143
- 166:
1144
- dict(
1145
- name='shorts_kpt9',
1146
- id=166,
1147
- color=[128, 128, 128],
1148
- type='',
1149
- swap='shorts_kpt5'),
1150
- 167:
1151
- dict(
1152
- name='shorts_kpt10',
1153
- id=167,
1154
- color=[128, 128, 128],
1155
- type='',
1156
- swap='shorts_kpt4'),
1157
- 168:
1158
- dict(
1159
- name='trousers_kpt1',
1160
- id=168,
1161
- color=[128, 0, 128],
1162
- type='',
1163
- swap='trousers_kpt3'),
1164
- 169:
1165
- dict(
1166
- name='trousers_kpt2',
1167
- id=169,
1168
- color=[128, 0, 128],
1169
- type='',
1170
- swap=''),
1171
- 170:
1172
- dict(
1173
- name='trousers_kpt3',
1174
- id=170,
1175
- color=[128, 0, 128],
1176
- type='',
1177
- swap='trousers_kpt1'),
1178
- 171:
1179
- dict(
1180
- name='trousers_kpt4',
1181
- id=171,
1182
- color=[128, 0, 128],
1183
- type='',
1184
- swap='trousers_kpt14'),
1185
- 172:
1186
- dict(
1187
- name='trousers_kpt5',
1188
- id=172,
1189
- color=[128, 0, 128],
1190
- type='',
1191
- swap='trousers_kpt13'),
1192
- 173:
1193
- dict(
1194
- name='trousers_kpt6',
1195
- id=173,
1196
- color=[128, 0, 128],
1197
- type='',
1198
- swap='trousers_kpt12'),
1199
- 174:
1200
- dict(
1201
- name='trousers_kpt7',
1202
- id=174,
1203
- color=[128, 0, 128],
1204
- type='',
1205
- swap='trousers_kpt11'),
1206
- 175:
1207
- dict(
1208
- name='trousers_kpt8',
1209
- id=175,
1210
- color=[128, 0, 128],
1211
- type='',
1212
- swap='trousers_kpt10'),
1213
- 176:
1214
- dict(
1215
- name='trousers_kpt9',
1216
- id=176,
1217
- color=[128, 0, 128],
1218
- type='',
1219
- swap=''),
1220
- 177:
1221
- dict(
1222
- name='trousers_kpt10',
1223
- id=177,
1224
- color=[128, 0, 128],
1225
- type='',
1226
- swap='trousers_kpt8'),
1227
- 178:
1228
- dict(
1229
- name='trousers_kpt11',
1230
- id=178,
1231
- color=[128, 0, 128],
1232
- type='',
1233
- swap='trousers_kpt7'),
1234
- 179:
1235
- dict(
1236
- name='trousers_kpt12',
1237
- id=179,
1238
- color=[128, 0, 128],
1239
- type='',
1240
- swap='trousers_kpt6'),
1241
- 180:
1242
- dict(
1243
- name='trousers_kpt13',
1244
- id=180,
1245
- color=[128, 0, 128],
1246
- type='',
1247
- swap='trousers_kpt5'),
1248
- 181:
1249
- dict(
1250
- name='trousers_kpt14',
1251
- id=181,
1252
- color=[128, 0, 128],
1253
- type='',
1254
- swap='trousers_kpt4'),
1255
- 182:
1256
- dict(
1257
- name='skirt_kpt1',
1258
- id=182,
1259
- color=[64, 128, 128],
1260
- type='',
1261
- swap='skirt_kpt3'),
1262
- 183:
1263
- dict(
1264
- name='skirt_kpt2', id=183, color=[64, 128, 128], type='', swap=''),
1265
- 184:
1266
- dict(
1267
- name='skirt_kpt3',
1268
- id=184,
1269
- color=[64, 128, 128],
1270
- type='',
1271
- swap='skirt_kpt1'),
1272
- 185:
1273
- dict(
1274
- name='skirt_kpt4',
1275
- id=185,
1276
- color=[64, 128, 128],
1277
- type='',
1278
- swap='skirt_kpt8'),
1279
- 186:
1280
- dict(
1281
- name='skirt_kpt5',
1282
- id=186,
1283
- color=[64, 128, 128],
1284
- type='',
1285
- swap='skirt_kpt7'),
1286
- 187:
1287
- dict(
1288
- name='skirt_kpt6', id=187, color=[64, 128, 128], type='', swap=''),
1289
- 188:
1290
- dict(
1291
- name='skirt_kpt7',
1292
- id=188,
1293
- color=[64, 128, 128],
1294
- type='',
1295
- swap='skirt_kpt5'),
1296
- 189:
1297
- dict(
1298
- name='skirt_kpt8',
1299
- id=189,
1300
- color=[64, 128, 128],
1301
- type='',
1302
- swap='skirt_kpt4'),
1303
- 190:
1304
- dict(name='ssd_kpt1', id=190, color=[64, 64, 128], type='', swap=''),
1305
- 191:
1306
- dict(
1307
- name='ssd_kpt2',
1308
- id=191,
1309
- color=[64, 64, 128],
1310
- type='',
1311
- swap='ssd_kpt6'),
1312
- 192:
1313
- dict(
1314
- name='ssd_kpt3',
1315
- id=192,
1316
- color=[64, 64, 128],
1317
- type='',
1318
- swap='ssd_kpt5'),
1319
- 193:
1320
- dict(name='ssd_kpt4', id=193, color=[64, 64, 128], type='', swap=''),
1321
- 194:
1322
- dict(
1323
- name='ssd_kpt5',
1324
- id=194,
1325
- color=[64, 64, 128],
1326
- type='',
1327
- swap='ssd_kpt3'),
1328
- 195:
1329
- dict(
1330
- name='ssd_kpt6',
1331
- id=195,
1332
- color=[64, 64, 128],
1333
- type='',
1334
- swap='ssd_kpt2'),
1335
- 196:
1336
- dict(
1337
- name='ssd_kpt7',
1338
- id=196,
1339
- color=[64, 64, 128],
1340
- type='',
1341
- swap='ssd_kpt29'),
1342
- 197:
1343
- dict(
1344
- name='ssd_kpt8',
1345
- id=197,
1346
- color=[64, 64, 128],
1347
- type='',
1348
- swap='ssd_kpt28'),
1349
- 198:
1350
- dict(
1351
- name='ssd_kpt9',
1352
- id=198,
1353
- color=[64, 64, 128],
1354
- type='',
1355
- swap='ssd_kpt27'),
1356
- 199:
1357
- dict(
1358
- name='ssd_kpt10',
1359
- id=199,
1360
- color=[64, 64, 128],
1361
- type='',
1362
- swap='ssd_kpt26'),
1363
- 200:
1364
- dict(
1365
- name='ssd_kpt11',
1366
- id=200,
1367
- color=[64, 64, 128],
1368
- type='',
1369
- swap='ssd_kpt25'),
1370
- 201:
1371
- dict(
1372
- name='ssd_kpt12',
1373
- id=201,
1374
- color=[64, 64, 128],
1375
- type='',
1376
- swap='ssd_kpt24'),
1377
- 202:
1378
- dict(
1379
- name='ssd_kpt13',
1380
- id=202,
1381
- color=[64, 64, 128],
1382
- type='',
1383
- swap='ssd_kpt23'),
1384
- 203:
1385
- dict(
1386
- name='ssd_kpt14',
1387
- id=203,
1388
- color=[64, 64, 128],
1389
- type='',
1390
- swap='ssd_kpt22'),
1391
- 204:
1392
- dict(
1393
- name='ssd_kpt15',
1394
- id=204,
1395
- color=[64, 64, 128],
1396
- type='',
1397
- swap='ssd_kpt21'),
1398
- 205:
1399
- dict(
1400
- name='ssd_kpt16',
1401
- id=205,
1402
- color=[64, 64, 128],
1403
- type='',
1404
- swap='ssd_kpt20'),
1405
- 206:
1406
- dict(
1407
- name='ssd_kpt17',
1408
- id=206,
1409
- color=[64, 64, 128],
1410
- type='',
1411
- swap='ssd_kpt19'),
1412
- 207:
1413
- dict(name='ssd_kpt18', id=207, color=[64, 64, 128], type='', swap=''),
1414
- 208:
1415
- dict(
1416
- name='ssd_kpt19',
1417
- id=208,
1418
- color=[64, 64, 128],
1419
- type='',
1420
- swap='ssd_kpt17'),
1421
- 209:
1422
- dict(
1423
- name='ssd_kpt20',
1424
- id=209,
1425
- color=[64, 64, 128],
1426
- type='',
1427
- swap='ssd_kpt16'),
1428
- 210:
1429
- dict(
1430
- name='ssd_kpt21',
1431
- id=210,
1432
- color=[64, 64, 128],
1433
- type='',
1434
- swap='ssd_kpt15'),
1435
- 211:
1436
- dict(
1437
- name='ssd_kpt22',
1438
- id=211,
1439
- color=[64, 64, 128],
1440
- type='',
1441
- swap='ssd_kpt14'),
1442
- 212:
1443
- dict(
1444
- name='ssd_kpt23',
1445
- id=212,
1446
- color=[64, 64, 128],
1447
- type='',
1448
- swap='ssd_kpt13'),
1449
- 213:
1450
- dict(
1451
- name='ssd_kpt24',
1452
- id=213,
1453
- color=[64, 64, 128],
1454
- type='',
1455
- swap='ssd_kpt12'),
1456
- 214:
1457
- dict(
1458
- name='ssd_kpt25',
1459
- id=214,
1460
- color=[64, 64, 128],
1461
- type='',
1462
- swap='ssd_kpt11'),
1463
- 215:
1464
- dict(
1465
- name='ssd_kpt26',
1466
- id=215,
1467
- color=[64, 64, 128],
1468
- type='',
1469
- swap='ssd_kpt10'),
1470
- 216:
1471
- dict(
1472
- name='ssd_kpt27',
1473
- id=216,
1474
- color=[64, 64, 128],
1475
- type='',
1476
- swap='ssd_kpt9'),
1477
- 217:
1478
- dict(
1479
- name='ssd_kpt28',
1480
- id=217,
1481
- color=[64, 64, 128],
1482
- type='',
1483
- swap='ssd_kpt8'),
1484
- 218:
1485
- dict(
1486
- name='ssd_kpt29',
1487
- id=218,
1488
- color=[64, 64, 128],
1489
- type='',
1490
- swap='ssd_kpt7'),
1491
- 219:
1492
- dict(name='lsd_kpt1', id=219, color=[128, 64, 0], type='', swap=''),
1493
- 220:
1494
- dict(
1495
- name='lsd_kpt2',
1496
- id=220,
1497
- color=[128, 64, 0],
1498
- type='',
1499
- swap='lsd_kpt6'),
1500
- 221:
1501
- dict(
1502
- name='lsd_kpt3',
1503
- id=221,
1504
- color=[128, 64, 0],
1505
- type='',
1506
- swap='lsd_kpt5'),
1507
- 222:
1508
- dict(name='lsd_kpt4', id=222, color=[128, 64, 0], type='', swap=''),
1509
- 223:
1510
- dict(
1511
- name='lsd_kpt5',
1512
- id=223,
1513
- color=[128, 64, 0],
1514
- type='',
1515
- swap='lsd_kpt3'),
1516
- 224:
1517
- dict(
1518
- name='lsd_kpt6',
1519
- id=224,
1520
- color=[128, 64, 0],
1521
- type='',
1522
- swap='lsd_kpt2'),
1523
- 225:
1524
- dict(
1525
- name='lsd_kpt7',
1526
- id=225,
1527
- color=[128, 64, 0],
1528
- type='',
1529
- swap='lsd_kpt37'),
1530
- 226:
1531
- dict(
1532
- name='lsd_kpt8',
1533
- id=226,
1534
- color=[128, 64, 0],
1535
- type='',
1536
- swap='lsd_kpt36'),
1537
- 227:
1538
- dict(
1539
- name='lsd_kpt9',
1540
- id=227,
1541
- color=[128, 64, 0],
1542
- type='',
1543
- swap='lsd_kpt35'),
1544
- 228:
1545
- dict(
1546
- name='lsd_kpt10',
1547
- id=228,
1548
- color=[128, 64, 0],
1549
- type='',
1550
- swap='lsd_kpt34'),
1551
- 229:
1552
- dict(
1553
- name='lsd_kpt11',
1554
- id=229,
1555
- color=[128, 64, 0],
1556
- type='',
1557
- swap='lsd_kpt33'),
1558
- 230:
1559
- dict(
1560
- name='lsd_kpt12',
1561
- id=230,
1562
- color=[128, 64, 0],
1563
- type='',
1564
- swap='lsd_kpt32'),
1565
- 231:
1566
- dict(
1567
- name='lsd_kpt13',
1568
- id=231,
1569
- color=[128, 64, 0],
1570
- type='',
1571
- swap='lsd_kpt31'),
1572
- 232:
1573
- dict(
1574
- name='lsd_kpt14',
1575
- id=232,
1576
- color=[128, 64, 0],
1577
- type='',
1578
- swap='lsd_kpt30'),
1579
- 233:
1580
- dict(
1581
- name='lsd_kpt15',
1582
- id=233,
1583
- color=[128, 64, 0],
1584
- type='',
1585
- swap='lsd_kpt29'),
1586
- 234:
1587
- dict(
1588
- name='lsd_kpt16',
1589
- id=234,
1590
- color=[128, 64, 0],
1591
- type='',
1592
- swap='lsd_kpt28'),
1593
- 235:
1594
- dict(
1595
- name='lsd_kpt17',
1596
- id=235,
1597
- color=[128, 64, 0],
1598
- type='',
1599
- swap='lsd_kpt27'),
1600
- 236:
1601
- dict(
1602
- name='lsd_kpt18',
1603
- id=236,
1604
- color=[128, 64, 0],
1605
- type='',
1606
- swap='lsd_kpt26'),
1607
- 237:
1608
- dict(
1609
- name='lsd_kpt19',
1610
- id=237,
1611
- color=[128, 64, 0],
1612
- type='',
1613
- swap='lsd_kpt25'),
1614
- 238:
1615
- dict(
1616
- name='lsd_kpt20',
1617
- id=238,
1618
- color=[128, 64, 0],
1619
- type='',
1620
- swap='lsd_kpt24'),
1621
- 239:
1622
- dict(
1623
- name='lsd_kpt21',
1624
- id=239,
1625
- color=[128, 64, 0],
1626
- type='',
1627
- swap='lsd_kpt23'),
1628
- 240:
1629
- dict(name='lsd_kpt22', id=240, color=[128, 64, 0], type='', swap=''),
1630
- 241:
1631
- dict(
1632
- name='lsd_kpt23',
1633
- id=241,
1634
- color=[128, 64, 0],
1635
- type='',
1636
- swap='lsd_kpt21'),
1637
- 242:
1638
- dict(
1639
- name='lsd_kpt24',
1640
- id=242,
1641
- color=[128, 64, 0],
1642
- type='',
1643
- swap='lsd_kpt20'),
1644
- 243:
1645
- dict(
1646
- name='lsd_kpt25',
1647
- id=243,
1648
- color=[128, 64, 0],
1649
- type='',
1650
- swap='lsd_kpt19'),
1651
- 244:
1652
- dict(
1653
- name='lsd_kpt26',
1654
- id=244,
1655
- color=[128, 64, 0],
1656
- type='',
1657
- swap='lsd_kpt18'),
1658
- 245:
1659
- dict(
1660
- name='lsd_kpt27',
1661
- id=245,
1662
- color=[128, 64, 0],
1663
- type='',
1664
- swap='lsd_kpt17'),
1665
- 246:
1666
- dict(
1667
- name='lsd_kpt28',
1668
- id=246,
1669
- color=[128, 64, 0],
1670
- type='',
1671
- swap='lsd_kpt16'),
1672
- 247:
1673
- dict(
1674
- name='lsd_kpt29',
1675
- id=247,
1676
- color=[128, 64, 0],
1677
- type='',
1678
- swap='lsd_kpt15'),
1679
- 248:
1680
- dict(
1681
- name='lsd_kpt30',
1682
- id=248,
1683
- color=[128, 64, 0],
1684
- type='',
1685
- swap='lsd_kpt14'),
1686
- 249:
1687
- dict(
1688
- name='lsd_kpt31',
1689
- id=249,
1690
- color=[128, 64, 0],
1691
- type='',
1692
- swap='lsd_kpt13'),
1693
- 250:
1694
- dict(
1695
- name='lsd_kpt32',
1696
- id=250,
1697
- color=[128, 64, 0],
1698
- type='',
1699
- swap='lsd_kpt12'),
1700
- 251:
1701
- dict(
1702
- name='lsd_kpt33',
1703
- id=251,
1704
- color=[128, 64, 0],
1705
- type='',
1706
- swap='lsd_kpt11'),
1707
- 252:
1708
- dict(
1709
- name='lsd_kpt34',
1710
- id=252,
1711
- color=[128, 64, 0],
1712
- type='',
1713
- swap='lsd_kpt10'),
1714
- 253:
1715
- dict(
1716
- name='lsd_kpt35',
1717
- id=253,
1718
- color=[128, 64, 0],
1719
- type='',
1720
- swap='lsd_kpt9'),
1721
- 254:
1722
- dict(
1723
- name='lsd_kpt36',
1724
- id=254,
1725
- color=[128, 64, 0],
1726
- type='',
1727
- swap='lsd_kpt8'),
1728
- 255:
1729
- dict(
1730
- name='lsd_kpt37',
1731
- id=255,
1732
- color=[128, 64, 0],
1733
- type='',
1734
- swap='lsd_kpt7'),
1735
- 256:
1736
- dict(name='vd_kpt1', id=256, color=[128, 64, 255], type='', swap=''),
1737
- 257:
1738
- dict(
1739
- name='vd_kpt2',
1740
- id=257,
1741
- color=[128, 64, 255],
1742
- type='',
1743
- swap='vd_kpt6'),
1744
- 258:
1745
- dict(
1746
- name='vd_kpt3',
1747
- id=258,
1748
- color=[128, 64, 255],
1749
- type='',
1750
- swap='vd_kpt5'),
1751
- 259:
1752
- dict(name='vd_kpt4', id=259, color=[128, 64, 255], type='', swap=''),
1753
- 260:
1754
- dict(
1755
- name='vd_kpt5',
1756
- id=260,
1757
- color=[128, 64, 255],
1758
- type='',
1759
- swap='vd_kpt3'),
1760
- 261:
1761
- dict(
1762
- name='vd_kpt6',
1763
- id=261,
1764
- color=[128, 64, 255],
1765
- type='',
1766
- swap='vd_kpt2'),
1767
- 262:
1768
- dict(
1769
- name='vd_kpt7',
1770
- id=262,
1771
- color=[128, 64, 255],
1772
- type='',
1773
- swap='vd_kpt19'),
1774
- 263:
1775
- dict(
1776
- name='vd_kpt8',
1777
- id=263,
1778
- color=[128, 64, 255],
1779
- type='',
1780
- swap='vd_kpt18'),
1781
- 264:
1782
- dict(
1783
- name='vd_kpt9',
1784
- id=264,
1785
- color=[128, 64, 255],
1786
- type='',
1787
- swap='vd_kpt17'),
1788
- 265:
1789
- dict(
1790
- name='vd_kpt10',
1791
- id=265,
1792
- color=[128, 64, 255],
1793
- type='',
1794
- swap='vd_kpt16'),
1795
- 266:
1796
- dict(
1797
- name='vd_kpt11',
1798
- id=266,
1799
- color=[128, 64, 255],
1800
- type='',
1801
- swap='vd_kpt15'),
1802
- 267:
1803
- dict(
1804
- name='vd_kpt12',
1805
- id=267,
1806
- color=[128, 64, 255],
1807
- type='',
1808
- swap='vd_kpt14'),
1809
- 268:
1810
- dict(name='vd_kpt13', id=268, color=[128, 64, 255], type='', swap=''),
1811
- 269:
1812
- dict(
1813
- name='vd_kpt14',
1814
- id=269,
1815
- color=[128, 64, 255],
1816
- type='',
1817
- swap='vd_kpt12'),
1818
- 270:
1819
- dict(
1820
- name='vd_kpt15',
1821
- id=270,
1822
- color=[128, 64, 255],
1823
- type='',
1824
- swap='vd_kpt11'),
1825
- 271:
1826
- dict(
1827
- name='vd_kpt16',
1828
- id=271,
1829
- color=[128, 64, 255],
1830
- type='',
1831
- swap='vd_kpt10'),
1832
- 272:
1833
- dict(
1834
- name='vd_kpt17',
1835
- id=272,
1836
- color=[128, 64, 255],
1837
- type='',
1838
- swap='vd_kpt9'),
1839
- 273:
1840
- dict(
1841
- name='vd_kpt18',
1842
- id=273,
1843
- color=[128, 64, 255],
1844
- type='',
1845
- swap='vd_kpt8'),
1846
- 274:
1847
- dict(
1848
- name='vd_kpt19',
1849
- id=274,
1850
- color=[128, 64, 255],
1851
- type='',
1852
- swap='vd_kpt7'),
1853
- 275:
1854
- dict(name='sd_kpt1', id=275, color=[128, 64, 0], type='', swap=''),
1855
- 276:
1856
- dict(
1857
- name='sd_kpt2',
1858
- id=276,
1859
- color=[128, 64, 0],
1860
- type='',
1861
- swap='sd_kpt6'),
1862
- 277:
1863
- dict(
1864
- name='sd_kpt3',
1865
- id=277,
1866
- color=[128, 64, 0],
1867
- type='',
1868
- swap='sd_kpt5'),
1869
- 278:
1870
- dict(name='sd_kpt4', id=278, color=[128, 64, 0], type='', swap=''),
1871
- 279:
1872
- dict(
1873
- name='sd_kpt5',
1874
- id=279,
1875
- color=[128, 64, 0],
1876
- type='',
1877
- swap='sd_kpt3'),
1878
- 280:
1879
- dict(
1880
- name='sd_kpt6',
1881
- id=280,
1882
- color=[128, 64, 0],
1883
- type='',
1884
- swap='sd_kpt2'),
1885
- 281:
1886
- dict(
1887
- name='sd_kpt7',
1888
- id=281,
1889
- color=[128, 64, 0],
1890
- type='',
1891
- swap='sd_kpt19'),
1892
- 282:
1893
- dict(
1894
- name='sd_kpt8',
1895
- id=282,
1896
- color=[128, 64, 0],
1897
- type='',
1898
- swap='sd_kpt18'),
1899
- 283:
1900
- dict(
1901
- name='sd_kpt9',
1902
- id=283,
1903
- color=[128, 64, 0],
1904
- type='',
1905
- swap='sd_kpt17'),
1906
- 284:
1907
- dict(
1908
- name='sd_kpt10',
1909
- id=284,
1910
- color=[128, 64, 0],
1911
- type='',
1912
- swap='sd_kpt16'),
1913
- 285:
1914
- dict(
1915
- name='sd_kpt11',
1916
- id=285,
1917
- color=[128, 64, 0],
1918
- type='',
1919
- swap='sd_kpt15'),
1920
- 286:
1921
- dict(
1922
- name='sd_kpt12',
1923
- id=286,
1924
- color=[128, 64, 0],
1925
- type='',
1926
- swap='sd_kpt14'),
1927
- 287:
1928
- dict(name='sd_kpt13', id=287, color=[128, 64, 0], type='', swap=''),
1929
- 288:
1930
- dict(
1931
- name='sd_kpt14',
1932
- id=288,
1933
- color=[128, 64, 0],
1934
- type='',
1935
- swap='sd_kpt12'),
1936
- 289:
1937
- dict(
1938
- name='sd_kpt15',
1939
- id=289,
1940
- color=[128, 64, 0],
1941
- type='',
1942
- swap='sd_kpt11'),
1943
- 290:
1944
- dict(
1945
- name='sd_kpt16',
1946
- id=290,
1947
- color=[128, 64, 0],
1948
- type='',
1949
- swap='sd_kpt10'),
1950
- 291:
1951
- dict(
1952
- name='sd_kpt17',
1953
- id=291,
1954
- color=[128, 64, 0],
1955
- type='',
1956
- swap='sd_kpt9'),
1957
- 292:
1958
- dict(
1959
- name='sd_kpt18',
1960
- id=292,
1961
- color=[128, 64, 0],
1962
- type='',
1963
- swap='sd_kpt8'),
1964
- 293:
1965
- dict(
1966
- name='sd_kpt19',
1967
- id=293,
1968
- color=[128, 64, 0],
1969
- type='',
1970
- swap='sd_kpt7')
1971
- }),
1972
- skeleton_info=dict({
1973
- 0:
1974
- dict(link=('sss_kpt1', 'sss_kpt2'), id=0, color=[255, 128, 0]),
1975
- 1:
1976
- dict(link=('sss_kpt2', 'sss_kpt7'), id=1, color=[255, 128, 0]),
1977
- 2:
1978
- dict(link=('sss_kpt7', 'sss_kpt8'), id=2, color=[255, 128, 0]),
1979
- 3:
1980
- dict(link=('sss_kpt8', 'sss_kpt9'), id=3, color=[255, 128, 0]),
1981
- 4:
1982
- dict(link=('sss_kpt9', 'sss_kpt10'), id=4, color=[255, 128, 0]),
1983
- 5:
1984
- dict(link=('sss_kpt10', 'sss_kpt11'), id=5, color=[255, 128, 0]),
1985
- 6:
1986
- dict(link=('sss_kpt11', 'sss_kpt12'), id=6, color=[255, 128, 0]),
1987
- 7:
1988
- dict(link=('sss_kpt12', 'sss_kpt13'), id=7, color=[255, 128, 0]),
1989
- 8:
1990
- dict(link=('sss_kpt13', 'sss_kpt14'), id=8, color=[255, 128, 0]),
1991
- 9:
1992
- dict(link=('sss_kpt14', 'sss_kpt15'), id=9, color=[255, 128, 0]),
1993
- 10:
1994
- dict(link=('sss_kpt15', 'sss_kpt16'), id=10, color=[255, 128, 0]),
1995
- 11:
1996
- dict(link=('sss_kpt16', 'sss_kpt17'), id=11, color=[255, 128, 0]),
1997
- 12:
1998
- dict(link=('sss_kpt17', 'sss_kpt18'), id=12, color=[255, 128, 0]),
1999
- 13:
2000
- dict(link=('sss_kpt18', 'sss_kpt19'), id=13, color=[255, 128, 0]),
2001
- 14:
2002
- dict(link=('sss_kpt19', 'sss_kpt20'), id=14, color=[255, 128, 0]),
2003
- 15:
2004
- dict(link=('sss_kpt20', 'sss_kpt21'), id=15, color=[255, 128, 0]),
2005
- 16:
2006
- dict(link=('sss_kpt21', 'sss_kpt22'), id=16, color=[255, 128, 0]),
2007
- 17:
2008
- dict(link=('sss_kpt22', 'sss_kpt23'), id=17, color=[255, 128, 0]),
2009
- 18:
2010
- dict(link=('sss_kpt23', 'sss_kpt24'), id=18, color=[255, 128, 0]),
2011
- 19:
2012
- dict(link=('sss_kpt24', 'sss_kpt25'), id=19, color=[255, 128, 0]),
2013
- 20:
2014
- dict(link=('sss_kpt25', 'sss_kpt6'), id=20, color=[255, 128, 0]),
2015
- 21:
2016
- dict(link=('sss_kpt6', 'sss_kpt1'), id=21, color=[255, 128, 0]),
2017
- 22:
2018
- dict(link=('sss_kpt2', 'sss_kpt3'), id=22, color=[255, 128, 0]),
2019
- 23:
2020
- dict(link=('sss_kpt3', 'sss_kpt4'), id=23, color=[255, 128, 0]),
2021
- 24:
2022
- dict(link=('sss_kpt4', 'sss_kpt5'), id=24, color=[255, 128, 0]),
2023
- 25:
2024
- dict(link=('sss_kpt5', 'sss_kpt6'), id=25, color=[255, 128, 0]),
2025
- 26:
2026
- dict(link=('lss_kpt1', 'lss_kpt2'), id=26, color=[255, 0, 128]),
2027
- 27:
2028
- dict(link=('lss_kpt2', 'lss_kpt7'), id=27, color=[255, 0, 128]),
2029
- 28:
2030
- dict(link=('lss_kpt7', 'lss_kpt8'), id=28, color=[255, 0, 128]),
2031
- 29:
2032
- dict(link=('lss_kpt8', 'lss_kpt9'), id=29, color=[255, 0, 128]),
2033
- 30:
2034
- dict(link=('lss_kpt9', 'lss_kpt10'), id=30, color=[255, 0, 128]),
2035
- 31:
2036
- dict(link=('lss_kpt10', 'lss_kpt11'), id=31, color=[255, 0, 128]),
2037
- 32:
2038
- dict(link=('lss_kpt11', 'lss_kpt12'), id=32, color=[255, 0, 128]),
2039
- 33:
2040
- dict(link=('lss_kpt12', 'lss_kpt13'), id=33, color=[255, 0, 128]),
2041
- 34:
2042
- dict(link=('lss_kpt13', 'lss_kpt14'), id=34, color=[255, 0, 128]),
2043
- 35:
2044
- dict(link=('lss_kpt14', 'lss_kpt15'), id=35, color=[255, 0, 128]),
2045
- 36:
2046
- dict(link=('lss_kpt15', 'lss_kpt16'), id=36, color=[255, 0, 128]),
2047
- 37:
2048
- dict(link=('lss_kpt16', 'lss_kpt17'), id=37, color=[255, 0, 128]),
2049
- 38:
2050
- dict(link=('lss_kpt17', 'lss_kpt18'), id=38, color=[255, 0, 128]),
2051
- 39:
2052
- dict(link=('lss_kpt18', 'lss_kpt19'), id=39, color=[255, 0, 128]),
2053
- 40:
2054
- dict(link=('lss_kpt19', 'lss_kpt20'), id=40, color=[255, 0, 128]),
2055
- 41:
2056
- dict(link=('lss_kpt20', 'lss_kpt21'), id=41, color=[255, 0, 128]),
2057
- 42:
2058
- dict(link=('lss_kpt21', 'lss_kpt22'), id=42, color=[255, 0, 128]),
2059
- 43:
2060
- dict(link=('lss_kpt22', 'lss_kpt23'), id=43, color=[255, 0, 128]),
2061
- 44:
2062
- dict(link=('lss_kpt23', 'lss_kpt24'), id=44, color=[255, 0, 128]),
2063
- 45:
2064
- dict(link=('lss_kpt24', 'lss_kpt25'), id=45, color=[255, 0, 128]),
2065
- 46:
2066
- dict(link=('lss_kpt25', 'lss_kpt26'), id=46, color=[255, 0, 128]),
2067
- 47:
2068
- dict(link=('lss_kpt26', 'lss_kpt27'), id=47, color=[255, 0, 128]),
2069
- 48:
2070
- dict(link=('lss_kpt27', 'lss_kpt28'), id=48, color=[255, 0, 128]),
2071
- 49:
2072
- dict(link=('lss_kpt28', 'lss_kpt29'), id=49, color=[255, 0, 128]),
2073
- 50:
2074
- dict(link=('lss_kpt29', 'lss_kpt30'), id=50, color=[255, 0, 128]),
2075
- 51:
2076
- dict(link=('lss_kpt30', 'lss_kpt31'), id=51, color=[255, 0, 128]),
2077
- 52:
2078
- dict(link=('lss_kpt31', 'lss_kpt32'), id=52, color=[255, 0, 128]),
2079
- 53:
2080
- dict(link=('lss_kpt32', 'lss_kpt33'), id=53, color=[255, 0, 128]),
2081
- 54:
2082
- dict(link=('lss_kpt33', 'lss_kpt6'), id=54, color=[255, 0, 128]),
2083
- 55:
2084
- dict(link=('lss_kpt6', 'lss_kpt5'), id=55, color=[255, 0, 128]),
2085
- 56:
2086
- dict(link=('lss_kpt5', 'lss_kpt4'), id=56, color=[255, 0, 128]),
2087
- 57:
2088
- dict(link=('lss_kpt4', 'lss_kpt3'), id=57, color=[255, 0, 128]),
2089
- 58:
2090
- dict(link=('lss_kpt3', 'lss_kpt2'), id=58, color=[255, 0, 128]),
2091
- 59:
2092
- dict(link=('lss_kpt6', 'lss_kpt1'), id=59, color=[255, 0, 128]),
2093
- 60:
2094
- dict(link=('sso_kpt1', 'sso_kpt4'), id=60, color=[128, 0, 255]),
2095
- 61:
2096
- dict(link=('sso_kpt4', 'sso_kpt7'), id=61, color=[128, 0, 255]),
2097
- 62:
2098
- dict(link=('sso_kpt7', 'sso_kpt8'), id=62, color=[128, 0, 255]),
2099
- 63:
2100
- dict(link=('sso_kpt8', 'sso_kpt9'), id=63, color=[128, 0, 255]),
2101
- 64:
2102
- dict(link=('sso_kpt9', 'sso_kpt10'), id=64, color=[128, 0, 255]),
2103
- 65:
2104
- dict(link=('sso_kpt10', 'sso_kpt11'), id=65, color=[128, 0, 255]),
2105
- 66:
2106
- dict(link=('sso_kpt11', 'sso_kpt12'), id=66, color=[128, 0, 255]),
2107
- 67:
2108
- dict(link=('sso_kpt12', 'sso_kpt13'), id=67, color=[128, 0, 255]),
2109
- 68:
2110
- dict(link=('sso_kpt13', 'sso_kpt14'), id=68, color=[128, 0, 255]),
2111
- 69:
2112
- dict(link=('sso_kpt14', 'sso_kpt15'), id=69, color=[128, 0, 255]),
2113
- 70:
2114
- dict(link=('sso_kpt15', 'sso_kpt16'), id=70, color=[128, 0, 255]),
2115
- 71:
2116
- dict(link=('sso_kpt16', 'sso_kpt31'), id=71, color=[128, 0, 255]),
2117
- 72:
2118
- dict(link=('sso_kpt31', 'sso_kpt30'), id=72, color=[128, 0, 255]),
2119
- 73:
2120
- dict(link=('sso_kpt30', 'sso_kpt2'), id=73, color=[128, 0, 255]),
2121
- 74:
2122
- dict(link=('sso_kpt2', 'sso_kpt3'), id=74, color=[128, 0, 255]),
2123
- 75:
2124
- dict(link=('sso_kpt3', 'sso_kpt4'), id=75, color=[128, 0, 255]),
2125
- 76:
2126
- dict(link=('sso_kpt1', 'sso_kpt6'), id=76, color=[128, 0, 255]),
2127
- 77:
2128
- dict(link=('sso_kpt6', 'sso_kpt25'), id=77, color=[128, 0, 255]),
2129
- 78:
2130
- dict(link=('sso_kpt25', 'sso_kpt24'), id=78, color=[128, 0, 255]),
2131
- 79:
2132
- dict(link=('sso_kpt24', 'sso_kpt23'), id=79, color=[128, 0, 255]),
2133
- 80:
2134
- dict(link=('sso_kpt23', 'sso_kpt22'), id=80, color=[128, 0, 255]),
2135
- 81:
2136
- dict(link=('sso_kpt22', 'sso_kpt21'), id=81, color=[128, 0, 255]),
2137
- 82:
2138
- dict(link=('sso_kpt21', 'sso_kpt20'), id=82, color=[128, 0, 255]),
2139
- 83:
2140
- dict(link=('sso_kpt20', 'sso_kpt19'), id=83, color=[128, 0, 255]),
2141
- 84:
2142
- dict(link=('sso_kpt19', 'sso_kpt18'), id=84, color=[128, 0, 255]),
2143
- 85:
2144
- dict(link=('sso_kpt18', 'sso_kpt17'), id=85, color=[128, 0, 255]),
2145
- 86:
2146
- dict(link=('sso_kpt17', 'sso_kpt29'), id=86, color=[128, 0, 255]),
2147
- 87:
2148
- dict(link=('sso_kpt29', 'sso_kpt28'), id=87, color=[128, 0, 255]),
2149
- 88:
2150
- dict(link=('sso_kpt28', 'sso_kpt27'), id=88, color=[128, 0, 255]),
2151
- 89:
2152
- dict(link=('sso_kpt27', 'sso_kpt26'), id=89, color=[128, 0, 255]),
2153
- 90:
2154
- dict(link=('sso_kpt26', 'sso_kpt5'), id=90, color=[128, 0, 255]),
2155
- 91:
2156
- dict(link=('sso_kpt5', 'sso_kpt6'), id=91, color=[128, 0, 255]),
2157
- 92:
2158
- dict(link=('lso_kpt1', 'lso_kpt2'), id=92, color=[0, 128, 255]),
2159
- 93:
2160
- dict(link=('lso_kpt2', 'lso_kpt7'), id=93, color=[0, 128, 255]),
2161
- 94:
2162
- dict(link=('lso_kpt7', 'lso_kpt8'), id=94, color=[0, 128, 255]),
2163
- 95:
2164
- dict(link=('lso_kpt8', 'lso_kpt9'), id=95, color=[0, 128, 255]),
2165
- 96:
2166
- dict(link=('lso_kpt9', 'lso_kpt10'), id=96, color=[0, 128, 255]),
2167
- 97:
2168
- dict(link=('lso_kpt10', 'lso_kpt11'), id=97, color=[0, 128, 255]),
2169
- 98:
2170
- dict(link=('lso_kpt11', 'lso_kpt12'), id=98, color=[0, 128, 255]),
2171
- 99:
2172
- dict(link=('lso_kpt12', 'lso_kpt13'), id=99, color=[0, 128, 255]),
2173
- 100:
2174
- dict(link=('lso_kpt13', 'lso_kpt14'), id=100, color=[0, 128, 255]),
2175
- 101:
2176
- dict(link=('lso_kpt14', 'lso_kpt15'), id=101, color=[0, 128, 255]),
2177
- 102:
2178
- dict(link=('lso_kpt15', 'lso_kpt16'), id=102, color=[0, 128, 255]),
2179
- 103:
2180
- dict(link=('lso_kpt16', 'lso_kpt17'), id=103, color=[0, 128, 255]),
2181
- 104:
2182
- dict(link=('lso_kpt17', 'lso_kpt18'), id=104, color=[0, 128, 255]),
2183
- 105:
2184
- dict(link=('lso_kpt18', 'lso_kpt19'), id=105, color=[0, 128, 255]),
2185
- 106:
2186
- dict(link=('lso_kpt19', 'lso_kpt20'), id=106, color=[0, 128, 255]),
2187
- 107:
2188
- dict(link=('lso_kpt20', 'lso_kpt39'), id=107, color=[0, 128, 255]),
2189
- 108:
2190
- dict(link=('lso_kpt39', 'lso_kpt38'), id=108, color=[0, 128, 255]),
2191
- 109:
2192
- dict(link=('lso_kpt38', 'lso_kpt4'), id=109, color=[0, 128, 255]),
2193
- 110:
2194
- dict(link=('lso_kpt4', 'lso_kpt3'), id=110, color=[0, 128, 255]),
2195
- 111:
2196
- dict(link=('lso_kpt3', 'lso_kpt2'), id=111, color=[0, 128, 255]),
2197
- 112:
2198
- dict(link=('lso_kpt1', 'lso_kpt6'), id=112, color=[0, 128, 255]),
2199
- 113:
2200
- dict(link=('lso_kpt6', 'lso_kpt33'), id=113, color=[0, 128, 255]),
2201
- 114:
2202
- dict(link=('lso_kpt33', 'lso_kpt32'), id=114, color=[0, 128, 255]),
2203
- 115:
2204
- dict(link=('lso_kpt32', 'lso_kpt31'), id=115, color=[0, 128, 255]),
2205
- 116:
2206
- dict(link=('lso_kpt31', 'lso_kpt30'), id=116, color=[0, 128, 255]),
2207
- 117:
2208
- dict(link=('lso_kpt30', 'lso_kpt29'), id=117, color=[0, 128, 255]),
2209
- 118:
2210
- dict(link=('lso_kpt29', 'lso_kpt28'), id=118, color=[0, 128, 255]),
2211
- 119:
2212
- dict(link=('lso_kpt28', 'lso_kpt27'), id=119, color=[0, 128, 255]),
2213
- 120:
2214
- dict(link=('lso_kpt27', 'lso_kpt26'), id=120, color=[0, 128, 255]),
2215
- 121:
2216
- dict(link=('lso_kpt26', 'lso_kpt25'), id=121, color=[0, 128, 255]),
2217
- 122:
2218
- dict(link=('lso_kpt25', 'lso_kpt24'), id=122, color=[0, 128, 255]),
2219
- 123:
2220
- dict(link=('lso_kpt24', 'lso_kpt23'), id=123, color=[0, 128, 255]),
2221
- 124:
2222
- dict(link=('lso_kpt23', 'lso_kpt22'), id=124, color=[0, 128, 255]),
2223
- 125:
2224
- dict(link=('lso_kpt22', 'lso_kpt21'), id=125, color=[0, 128, 255]),
2225
- 126:
2226
- dict(link=('lso_kpt21', 'lso_kpt37'), id=126, color=[0, 128, 255]),
2227
- 127:
2228
- dict(link=('lso_kpt37', 'lso_kpt36'), id=127, color=[0, 128, 255]),
2229
- 128:
2230
- dict(link=('lso_kpt36', 'lso_kpt35'), id=128, color=[0, 128, 255]),
2231
- 129:
2232
- dict(link=('lso_kpt35', 'lso_kpt34'), id=129, color=[0, 128, 255]),
2233
- 130:
2234
- dict(link=('lso_kpt34', 'lso_kpt5'), id=130, color=[0, 128, 255]),
2235
- 131:
2236
- dict(link=('lso_kpt5', 'lso_kpt6'), id=131, color=[0, 128, 255]),
2237
- 132:
2238
- dict(link=('vest_kpt1', 'vest_kpt2'), id=132, color=[0, 128, 128]),
2239
- 133:
2240
- dict(link=('vest_kpt2', 'vest_kpt7'), id=133, color=[0, 128, 128]),
2241
- 134:
2242
- dict(link=('vest_kpt7', 'vest_kpt8'), id=134, color=[0, 128, 128]),
2243
- 135:
2244
- dict(link=('vest_kpt8', 'vest_kpt9'), id=135, color=[0, 128, 128]),
2245
- 136:
2246
- dict(link=('vest_kpt9', 'vest_kpt10'), id=136, color=[0, 128, 128]),
2247
- 137:
2248
- dict(link=('vest_kpt10', 'vest_kpt11'), id=137, color=[0, 128, 128]),
2249
- 138:
2250
- dict(link=('vest_kpt11', 'vest_kpt12'), id=138, color=[0, 128, 128]),
2251
- 139:
2252
- dict(link=('vest_kpt12', 'vest_kpt13'), id=139, color=[0, 128, 128]),
2253
- 140:
2254
- dict(link=('vest_kpt13', 'vest_kpt14'), id=140, color=[0, 128, 128]),
2255
- 141:
2256
- dict(link=('vest_kpt14', 'vest_kpt15'), id=141, color=[0, 128, 128]),
2257
- 142:
2258
- dict(link=('vest_kpt15', 'vest_kpt6'), id=142, color=[0, 128, 128]),
2259
- 143:
2260
- dict(link=('vest_kpt6', 'vest_kpt1'), id=143, color=[0, 128, 128]),
2261
- 144:
2262
- dict(link=('vest_kpt2', 'vest_kpt3'), id=144, color=[0, 128, 128]),
2263
- 145:
2264
- dict(link=('vest_kpt3', 'vest_kpt4'), id=145, color=[0, 128, 128]),
2265
- 146:
2266
- dict(link=('vest_kpt4', 'vest_kpt5'), id=146, color=[0, 128, 128]),
2267
- 147:
2268
- dict(link=('vest_kpt5', 'vest_kpt6'), id=147, color=[0, 128, 128]),
2269
- 148:
2270
- dict(link=('sling_kpt1', 'sling_kpt2'), id=148, color=[0, 0, 128]),
2271
- 149:
2272
- dict(link=('sling_kpt2', 'sling_kpt8'), id=149, color=[0, 0, 128]),
2273
- 150:
2274
- dict(link=('sling_kpt8', 'sling_kpt9'), id=150, color=[0, 0, 128]),
2275
- 151:
2276
- dict(link=('sling_kpt9', 'sling_kpt10'), id=151, color=[0, 0, 128]),
2277
- 152:
2278
- dict(link=('sling_kpt10', 'sling_kpt11'), id=152, color=[0, 0, 128]),
2279
- 153:
2280
- dict(link=('sling_kpt11', 'sling_kpt12'), id=153, color=[0, 0, 128]),
2281
- 154:
2282
- dict(link=('sling_kpt12', 'sling_kpt13'), id=154, color=[0, 0, 128]),
2283
- 155:
2284
- dict(link=('sling_kpt13', 'sling_kpt14'), id=155, color=[0, 0, 128]),
2285
- 156:
2286
- dict(link=('sling_kpt14', 'sling_kpt6'), id=156, color=[0, 0, 128]),
2287
- 157:
2288
- dict(link=('sling_kpt2', 'sling_kpt7'), id=157, color=[0, 0, 128]),
2289
- 158:
2290
- dict(link=('sling_kpt6', 'sling_kpt15'), id=158, color=[0, 0, 128]),
2291
- 159:
2292
- dict(link=('sling_kpt2', 'sling_kpt3'), id=159, color=[0, 0, 128]),
2293
- 160:
2294
- dict(link=('sling_kpt3', 'sling_kpt4'), id=160, color=[0, 0, 128]),
2295
- 161:
2296
- dict(link=('sling_kpt4', 'sling_kpt5'), id=161, color=[0, 0, 128]),
2297
- 162:
2298
- dict(link=('sling_kpt5', 'sling_kpt6'), id=162, color=[0, 0, 128]),
2299
- 163:
2300
- dict(link=('sling_kpt1', 'sling_kpt6'), id=163, color=[0, 0, 128]),
2301
- 164:
2302
- dict(
2303
- link=('shorts_kpt1', 'shorts_kpt4'), id=164, color=[128, 128,
2304
- 128]),
2305
- 165:
2306
- dict(
2307
- link=('shorts_kpt4', 'shorts_kpt5'), id=165, color=[128, 128,
2308
- 128]),
2309
- 166:
2310
- dict(
2311
- link=('shorts_kpt5', 'shorts_kpt6'), id=166, color=[128, 128,
2312
- 128]),
2313
- 167:
2314
- dict(
2315
- link=('shorts_kpt6', 'shorts_kpt7'), id=167, color=[128, 128,
2316
- 128]),
2317
- 168:
2318
- dict(
2319
- link=('shorts_kpt7', 'shorts_kpt8'), id=168, color=[128, 128,
2320
- 128]),
2321
- 169:
2322
- dict(
2323
- link=('shorts_kpt8', 'shorts_kpt9'), id=169, color=[128, 128,
2324
- 128]),
2325
- 170:
2326
- dict(
2327
- link=('shorts_kpt9', 'shorts_kpt10'),
2328
- id=170,
2329
- color=[128, 128, 128]),
2330
- 171:
2331
- dict(
2332
- link=('shorts_kpt10', 'shorts_kpt3'),
2333
- id=171,
2334
- color=[128, 128, 128]),
2335
- 172:
2336
- dict(
2337
- link=('shorts_kpt3', 'shorts_kpt2'), id=172, color=[128, 128,
2338
- 128]),
2339
- 173:
2340
- dict(
2341
- link=('shorts_kpt2', 'shorts_kpt1'), id=173, color=[128, 128,
2342
- 128]),
2343
- 174:
2344
- dict(
2345
- link=('trousers_kpt1', 'trousers_kpt4'),
2346
- id=174,
2347
- color=[128, 0, 128]),
2348
- 175:
2349
- dict(
2350
- link=('trousers_kpt4', 'trousers_kpt5'),
2351
- id=175,
2352
- color=[128, 0, 128]),
2353
- 176:
2354
- dict(
2355
- link=('trousers_kpt5', 'trousers_kpt6'),
2356
- id=176,
2357
- color=[128, 0, 128]),
2358
- 177:
2359
- dict(
2360
- link=('trousers_kpt6', 'trousers_kpt7'),
2361
- id=177,
2362
- color=[128, 0, 128]),
2363
- 178:
2364
- dict(
2365
- link=('trousers_kpt7', 'trousers_kpt8'),
2366
- id=178,
2367
- color=[128, 0, 128]),
2368
- 179:
2369
- dict(
2370
- link=('trousers_kpt8', 'trousers_kpt9'),
2371
- id=179,
2372
- color=[128, 0, 128]),
2373
- 180:
2374
- dict(
2375
- link=('trousers_kpt9', 'trousers_kpt10'),
2376
- id=180,
2377
- color=[128, 0, 128]),
2378
- 181:
2379
- dict(
2380
- link=('trousers_kpt10', 'trousers_kpt11'),
2381
- id=181,
2382
- color=[128, 0, 128]),
2383
- 182:
2384
- dict(
2385
- link=('trousers_kpt11', 'trousers_kpt12'),
2386
- id=182,
2387
- color=[128, 0, 128]),
2388
- 183:
2389
- dict(
2390
- link=('trousers_kpt12', 'trousers_kpt13'),
2391
- id=183,
2392
- color=[128, 0, 128]),
2393
- 184:
2394
- dict(
2395
- link=('trousers_kpt13', 'trousers_kpt14'),
2396
- id=184,
2397
- color=[128, 0, 128]),
2398
- 185:
2399
- dict(
2400
- link=('trousers_kpt14', 'trousers_kpt3'),
2401
- id=185,
2402
- color=[128, 0, 128]),
2403
- 186:
2404
- dict(
2405
- link=('trousers_kpt3', 'trousers_kpt2'),
2406
- id=186,
2407
- color=[128, 0, 128]),
2408
- 187:
2409
- dict(
2410
- link=('trousers_kpt2', 'trousers_kpt1'),
2411
- id=187,
2412
- color=[128, 0, 128]),
2413
- 188:
2414
- dict(link=('skirt_kpt1', 'skirt_kpt4'), id=188, color=[64, 128, 128]),
2415
- 189:
2416
- dict(link=('skirt_kpt4', 'skirt_kpt5'), id=189, color=[64, 128, 128]),
2417
- 190:
2418
- dict(link=('skirt_kpt5', 'skirt_kpt6'), id=190, color=[64, 128, 128]),
2419
- 191:
2420
- dict(link=('skirt_kpt6', 'skirt_kpt7'), id=191, color=[64, 128, 128]),
2421
- 192:
2422
- dict(link=('skirt_kpt7', 'skirt_kpt8'), id=192, color=[64, 128, 128]),
2423
- 193:
2424
- dict(link=('skirt_kpt8', 'skirt_kpt3'), id=193, color=[64, 128, 128]),
2425
- 194:
2426
- dict(link=('skirt_kpt3', 'skirt_kpt2'), id=194, color=[64, 128, 128]),
2427
- 195:
2428
- dict(link=('skirt_kpt2', 'skirt_kpt1'), id=195, color=[64, 128, 128]),
2429
- 196:
2430
- dict(link=('ssd_kpt1', 'ssd_kpt2'), id=196, color=[64, 64, 128]),
2431
- 197:
2432
- dict(link=('ssd_kpt2', 'ssd_kpt7'), id=197, color=[64, 64, 128]),
2433
- 198:
2434
- dict(link=('ssd_kpt7', 'ssd_kpt8'), id=198, color=[64, 64, 128]),
2435
- 199:
2436
- dict(link=('ssd_kpt8', 'ssd_kpt9'), id=199, color=[64, 64, 128]),
2437
- 200:
2438
- dict(link=('ssd_kpt9', 'ssd_kpt10'), id=200, color=[64, 64, 128]),
2439
- 201:
2440
- dict(link=('ssd_kpt10', 'ssd_kpt11'), id=201, color=[64, 64, 128]),
2441
- 202:
2442
- dict(link=('ssd_kpt11', 'ssd_kpt12'), id=202, color=[64, 64, 128]),
2443
- 203:
2444
- dict(link=('ssd_kpt12', 'ssd_kpt13'), id=203, color=[64, 64, 128]),
2445
- 204:
2446
- dict(link=('ssd_kpt13', 'ssd_kpt14'), id=204, color=[64, 64, 128]),
2447
- 205:
2448
- dict(link=('ssd_kpt14', 'ssd_kpt15'), id=205, color=[64, 64, 128]),
2449
- 206:
2450
- dict(link=('ssd_kpt15', 'ssd_kpt16'), id=206, color=[64, 64, 128]),
2451
- 207:
2452
- dict(link=('ssd_kpt16', 'ssd_kpt17'), id=207, color=[64, 64, 128]),
2453
- 208:
2454
- dict(link=('ssd_kpt17', 'ssd_kpt18'), id=208, color=[64, 64, 128]),
2455
- 209:
2456
- dict(link=('ssd_kpt18', 'ssd_kpt19'), id=209, color=[64, 64, 128]),
2457
- 210:
2458
- dict(link=('ssd_kpt19', 'ssd_kpt20'), id=210, color=[64, 64, 128]),
2459
- 211:
2460
- dict(link=('ssd_kpt20', 'ssd_kpt21'), id=211, color=[64, 64, 128]),
2461
- 212:
2462
- dict(link=('ssd_kpt21', 'ssd_kpt22'), id=212, color=[64, 64, 128]),
2463
- 213:
2464
- dict(link=('ssd_kpt22', 'ssd_kpt23'), id=213, color=[64, 64, 128]),
2465
- 214:
2466
- dict(link=('ssd_kpt23', 'ssd_kpt24'), id=214, color=[64, 64, 128]),
2467
- 215:
2468
- dict(link=('ssd_kpt24', 'ssd_kpt25'), id=215, color=[64, 64, 128]),
2469
- 216:
2470
- dict(link=('ssd_kpt25', 'ssd_kpt26'), id=216, color=[64, 64, 128]),
2471
- 217:
2472
- dict(link=('ssd_kpt26', 'ssd_kpt27'), id=217, color=[64, 64, 128]),
2473
- 218:
2474
- dict(link=('ssd_kpt27', 'ssd_kpt28'), id=218, color=[64, 64, 128]),
2475
- 219:
2476
- dict(link=('ssd_kpt28', 'ssd_kpt29'), id=219, color=[64, 64, 128]),
2477
- 220:
2478
- dict(link=('ssd_kpt29', 'ssd_kpt6'), id=220, color=[64, 64, 128]),
2479
- 221:
2480
- dict(link=('ssd_kpt6', 'ssd_kpt5'), id=221, color=[64, 64, 128]),
2481
- 222:
2482
- dict(link=('ssd_kpt5', 'ssd_kpt4'), id=222, color=[64, 64, 128]),
2483
- 223:
2484
- dict(link=('ssd_kpt4', 'ssd_kpt3'), id=223, color=[64, 64, 128]),
2485
- 224:
2486
- dict(link=('ssd_kpt3', 'ssd_kpt2'), id=224, color=[64, 64, 128]),
2487
- 225:
2488
- dict(link=('ssd_kpt6', 'ssd_kpt1'), id=225, color=[64, 64, 128]),
2489
- 226:
2490
- dict(link=('lsd_kpt1', 'lsd_kpt2'), id=226, color=[128, 64, 0]),
2491
- 227:
2492
- dict(link=('lsd_kpt2', 'lsd_kpt7'), id=228, color=[128, 64, 0]),
2493
- 228:
2494
- dict(link=('lsd_kpt7', 'lsd_kpt8'), id=228, color=[128, 64, 0]),
2495
- 229:
2496
- dict(link=('lsd_kpt8', 'lsd_kpt9'), id=229, color=[128, 64, 0]),
2497
- 230:
2498
- dict(link=('lsd_kpt9', 'lsd_kpt10'), id=230, color=[128, 64, 0]),
2499
- 231:
2500
- dict(link=('lsd_kpt10', 'lsd_kpt11'), id=231, color=[128, 64, 0]),
2501
- 232:
2502
- dict(link=('lsd_kpt11', 'lsd_kpt12'), id=232, color=[128, 64, 0]),
2503
- 233:
2504
- dict(link=('lsd_kpt12', 'lsd_kpt13'), id=233, color=[128, 64, 0]),
2505
- 234:
2506
- dict(link=('lsd_kpt13', 'lsd_kpt14'), id=234, color=[128, 64, 0]),
2507
- 235:
2508
- dict(link=('lsd_kpt14', 'lsd_kpt15'), id=235, color=[128, 64, 0]),
2509
- 236:
2510
- dict(link=('lsd_kpt15', 'lsd_kpt16'), id=236, color=[128, 64, 0]),
2511
- 237:
2512
- dict(link=('lsd_kpt16', 'lsd_kpt17'), id=237, color=[128, 64, 0]),
2513
- 238:
2514
- dict(link=('lsd_kpt17', 'lsd_kpt18'), id=238, color=[128, 64, 0]),
2515
- 239:
2516
- dict(link=('lsd_kpt18', 'lsd_kpt19'), id=239, color=[128, 64, 0]),
2517
- 240:
2518
- dict(link=('lsd_kpt19', 'lsd_kpt20'), id=240, color=[128, 64, 0]),
2519
- 241:
2520
- dict(link=('lsd_kpt20', 'lsd_kpt21'), id=241, color=[128, 64, 0]),
2521
- 242:
2522
- dict(link=('lsd_kpt21', 'lsd_kpt22'), id=242, color=[128, 64, 0]),
2523
- 243:
2524
- dict(link=('lsd_kpt22', 'lsd_kpt23'), id=243, color=[128, 64, 0]),
2525
- 244:
2526
- dict(link=('lsd_kpt23', 'lsd_kpt24'), id=244, color=[128, 64, 0]),
2527
- 245:
2528
- dict(link=('lsd_kpt24', 'lsd_kpt25'), id=245, color=[128, 64, 0]),
2529
- 246:
2530
- dict(link=('lsd_kpt25', 'lsd_kpt26'), id=246, color=[128, 64, 0]),
2531
- 247:
2532
- dict(link=('lsd_kpt26', 'lsd_kpt27'), id=247, color=[128, 64, 0]),
2533
- 248:
2534
- dict(link=('lsd_kpt27', 'lsd_kpt28'), id=248, color=[128, 64, 0]),
2535
- 249:
2536
- dict(link=('lsd_kpt28', 'lsd_kpt29'), id=249, color=[128, 64, 0]),
2537
- 250:
2538
- dict(link=('lsd_kpt29', 'lsd_kpt30'), id=250, color=[128, 64, 0]),
2539
- 251:
2540
- dict(link=('lsd_kpt30', 'lsd_kpt31'), id=251, color=[128, 64, 0]),
2541
- 252:
2542
- dict(link=('lsd_kpt31', 'lsd_kpt32'), id=252, color=[128, 64, 0]),
2543
- 253:
2544
- dict(link=('lsd_kpt32', 'lsd_kpt33'), id=253, color=[128, 64, 0]),
2545
- 254:
2546
- dict(link=('lsd_kpt33', 'lsd_kpt34'), id=254, color=[128, 64, 0]),
2547
- 255:
2548
- dict(link=('lsd_kpt34', 'lsd_kpt35'), id=255, color=[128, 64, 0]),
2549
- 256:
2550
- dict(link=('lsd_kpt35', 'lsd_kpt36'), id=256, color=[128, 64, 0]),
2551
- 257:
2552
- dict(link=('lsd_kpt36', 'lsd_kpt37'), id=257, color=[128, 64, 0]),
2553
- 258:
2554
- dict(link=('lsd_kpt37', 'lsd_kpt6'), id=258, color=[128, 64, 0]),
2555
- 259:
2556
- dict(link=('lsd_kpt6', 'lsd_kpt5'), id=259, color=[128, 64, 0]),
2557
- 260:
2558
- dict(link=('lsd_kpt5', 'lsd_kpt4'), id=260, color=[128, 64, 0]),
2559
- 261:
2560
- dict(link=('lsd_kpt4', 'lsd_kpt3'), id=261, color=[128, 64, 0]),
2561
- 262:
2562
- dict(link=('lsd_kpt3', 'lsd_kpt2'), id=262, color=[128, 64, 0]),
2563
- 263:
2564
- dict(link=('lsd_kpt6', 'lsd_kpt1'), id=263, color=[128, 64, 0]),
2565
- 264:
2566
- dict(link=('vd_kpt1', 'vd_kpt2'), id=264, color=[128, 64, 255]),
2567
- 265:
2568
- dict(link=('vd_kpt2', 'vd_kpt7'), id=265, color=[128, 64, 255]),
2569
- 266:
2570
- dict(link=('vd_kpt7', 'vd_kpt8'), id=266, color=[128, 64, 255]),
2571
- 267:
2572
- dict(link=('vd_kpt8', 'vd_kpt9'), id=267, color=[128, 64, 255]),
2573
- 268:
2574
- dict(link=('vd_kpt9', 'vd_kpt10'), id=268, color=[128, 64, 255]),
2575
- 269:
2576
- dict(link=('vd_kpt10', 'vd_kpt11'), id=269, color=[128, 64, 255]),
2577
- 270:
2578
- dict(link=('vd_kpt11', 'vd_kpt12'), id=270, color=[128, 64, 255]),
2579
- 271:
2580
- dict(link=('vd_kpt12', 'vd_kpt13'), id=271, color=[128, 64, 255]),
2581
- 272:
2582
- dict(link=('vd_kpt13', 'vd_kpt14'), id=272, color=[128, 64, 255]),
2583
- 273:
2584
- dict(link=('vd_kpt14', 'vd_kpt15'), id=273, color=[128, 64, 255]),
2585
- 274:
2586
- dict(link=('vd_kpt15', 'vd_kpt16'), id=274, color=[128, 64, 255]),
2587
- 275:
2588
- dict(link=('vd_kpt16', 'vd_kpt17'), id=275, color=[128, 64, 255]),
2589
- 276:
2590
- dict(link=('vd_kpt17', 'vd_kpt18'), id=276, color=[128, 64, 255]),
2591
- 277:
2592
- dict(link=('vd_kpt18', 'vd_kpt19'), id=277, color=[128, 64, 255]),
2593
- 278:
2594
- dict(link=('vd_kpt19', 'vd_kpt6'), id=278, color=[128, 64, 255]),
2595
- 279:
2596
- dict(link=('vd_kpt6', 'vd_kpt5'), id=279, color=[128, 64, 255]),
2597
- 280:
2598
- dict(link=('vd_kpt5', 'vd_kpt4'), id=280, color=[128, 64, 255]),
2599
- 281:
2600
- dict(link=('vd_kpt4', 'vd_kpt3'), id=281, color=[128, 64, 255]),
2601
- 282:
2602
- dict(link=('vd_kpt3', 'vd_kpt2'), id=282, color=[128, 64, 255]),
2603
- 283:
2604
- dict(link=('vd_kpt6', 'vd_kpt1'), id=283, color=[128, 64, 255]),
2605
- 284:
2606
- dict(link=('sd_kpt1', 'sd_kpt2'), id=284, color=[128, 64, 0]),
2607
- 285:
2608
- dict(link=('sd_kpt2', 'sd_kpt8'), id=285, color=[128, 64, 0]),
2609
- 286:
2610
- dict(link=('sd_kpt8', 'sd_kpt9'), id=286, color=[128, 64, 0]),
2611
- 287:
2612
- dict(link=('sd_kpt9', 'sd_kpt10'), id=287, color=[128, 64, 0]),
2613
- 288:
2614
- dict(link=('sd_kpt10', 'sd_kpt11'), id=288, color=[128, 64, 0]),
2615
- 289:
2616
- dict(link=('sd_kpt11', 'sd_kpt12'), id=289, color=[128, 64, 0]),
2617
- 290:
2618
- dict(link=('sd_kpt12', 'sd_kpt13'), id=290, color=[128, 64, 0]),
2619
- 291:
2620
- dict(link=('sd_kpt13', 'sd_kpt14'), id=291, color=[128, 64, 0]),
2621
- 292:
2622
- dict(link=('sd_kpt14', 'sd_kpt15'), id=292, color=[128, 64, 0]),
2623
- 293:
2624
- dict(link=('sd_kpt15', 'sd_kpt16'), id=293, color=[128, 64, 0]),
2625
- 294:
2626
- dict(link=('sd_kpt16', 'sd_kpt17'), id=294, color=[128, 64, 0]),
2627
- 295:
2628
- dict(link=('sd_kpt17', 'sd_kpt18'), id=295, color=[128, 64, 0]),
2629
- 296:
2630
- dict(link=('sd_kpt18', 'sd_kpt6'), id=296, color=[128, 64, 0]),
2631
- 297:
2632
- dict(link=('sd_kpt6', 'sd_kpt5'), id=297, color=[128, 64, 0]),
2633
- 298:
2634
- dict(link=('sd_kpt5', 'sd_kpt4'), id=298, color=[128, 64, 0]),
2635
- 299:
2636
- dict(link=('sd_kpt4', 'sd_kpt3'), id=299, color=[128, 64, 0]),
2637
- 300:
2638
- dict(link=('sd_kpt3', 'sd_kpt2'), id=300, color=[128, 64, 0]),
2639
- 301:
2640
- dict(link=('sd_kpt2', 'sd_kpt7'), id=301, color=[128, 64, 0]),
2641
- 302:
2642
- dict(link=('sd_kpt6', 'sd_kpt19'), id=302, color=[128, 64, 0]),
2643
- 303:
2644
- dict(link=('sd_kpt6', 'sd_kpt1'), id=303, color=[128, 64, 0])
2645
- }),
2646
- joint_weights=[
2647
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2648
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2649
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2650
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2651
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2652
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2653
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2654
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2655
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2656
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2657
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2658
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2659
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2660
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2661
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2662
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2663
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2664
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2665
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2666
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2667
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
2668
- ],
2669
- sigmas=[])
2670
- param_scheduler = [
2671
- dict(
2672
- type='LinearLR', begin=0, end=500, start_factor=0.001, by_epoch=False),
2673
- dict(
2674
- type='MultiStepLR',
2675
- begin=0,
2676
- end=150,
2677
- milestones=[100, 130],
2678
- gamma=0.1,
2679
- by_epoch=True)
2680
- ]
2681
- optim_wrapper = dict(optimizer=dict(type='Adam', lr=0.0005))
2682
- auto_scale_lr = dict(base_batch_size=512)
2683
- dataset_type = 'DeepFashion2Dataset'
2684
- data_mode = 'topdown'
2685
- data_root = 'data/deepfashion2/'
2686
- codec = dict(
2687
- type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2)
2688
- train_pipeline = [
2689
- dict(type='LoadImage'),
2690
- dict(type='GetBBoxCenterScale'),
2691
- dict(type='RandomFlip', direction='horizontal'),
2692
- dict(
2693
- type='RandomBBoxTransform',
2694
- shift_prob=0,
2695
- rotate_factor=60,
2696
- scale_factor=(0.75, 1.25)),
2697
- dict(type='TopdownAffine', input_size=(192, 256)),
2698
- dict(
2699
- type='GenerateTarget',
2700
- encoder=dict(
2701
- type='MSRAHeatmap',
2702
- input_size=(192, 256),
2703
- heatmap_size=(48, 64),
2704
- sigma=2)),
2705
- dict(type='PackPoseInputs')
2706
- ]
2707
- val_pipeline = [
2708
- dict(type='LoadImage', backend_args=dict(backend='local')),
2709
- dict(type='GetBBoxCenterScale'),
2710
- dict(type='TopdownAffine', input_size=(192, 256)),
2711
- dict(type='PackPoseInputs')
2712
- ]
2713
- train_dataloader = dict(
2714
- batch_size=8,
2715
- num_workers=6,
2716
- persistent_workers=True,
2717
- sampler=dict(type='DefaultSampler', shuffle=True),
2718
- dataset=dict(
2719
- type='DeepFashion2Dataset',
2720
- data_root='data/deepfashion2/',
2721
- data_mode='topdown',
2722
- ann_file='train/deepfashion2_short_sleeved_outwear.json',
2723
- data_prefix=dict(img='train/image/'),
2724
- pipeline=[
2725
- dict(type='LoadImage'),
2726
- dict(type='GetBBoxCenterScale'),
2727
- dict(type='RandomFlip', direction='horizontal'),
2728
- dict(
2729
- type='RandomBBoxTransform',
2730
- shift_prob=0,
2731
- rotate_factor=60,
2732
- scale_factor=(0.75, 1.25)),
2733
- dict(type='TopdownAffine', input_size=(192, 256)),
2734
- dict(
2735
- type='GenerateTarget',
2736
- encoder=dict(
2737
- type='MSRAHeatmap',
2738
- input_size=(192, 256),
2739
- heatmap_size=(48, 64),
2740
- sigma=2)),
2741
- dict(type='PackPoseInputs')
2742
- ]))
2743
- val_dataloader = dict(
2744
- batch_size=8,
2745
- num_workers=6,
2746
- persistent_workers=True,
2747
- drop_last=False,
2748
- sampler=dict(type='DefaultSampler', shuffle=False),
2749
- dataset=dict(
2750
- type='DeepFashion2Dataset',
2751
- data_root='data/deepfashion2/',
2752
- data_mode='topdown',
2753
- ann_file='validation/deepfashion2_short_sleeved_outwear.json',
2754
- data_prefix=dict(img='validation/image/'),
2755
- test_mode=True,
2756
- pipeline=[
2757
- dict(type='LoadImage', backend_args=dict(backend='local')),
2758
- dict(type='GetBBoxCenterScale'),
2759
- dict(type='TopdownAffine', input_size=(192, 256)),
2760
- dict(type='PackPoseInputs')
2761
- ]))
2762
- test_dataloader = dict(
2763
- batch_size=8,
2764
- num_workers=6,
2765
- persistent_workers=True,
2766
- drop_last=False,
2767
- sampler=dict(type='DefaultSampler', shuffle=False),
2768
- dataset=dict(
2769
- type='DeepFashion2Dataset',
2770
- data_root='data/deepfashion2/',
2771
- data_mode='topdown',
2772
- ann_file='validation/deepfashion2_short_sleeved_outwear.json',
2773
- data_prefix=dict(img='validation/image/'),
2774
- test_mode=True,
2775
- pipeline=[
2776
- dict(type='LoadImage', backend_args=dict(backend='local')),
2777
- dict(type='GetBBoxCenterScale'),
2778
- dict(type='TopdownAffine', input_size=(192, 256)),
2779
- dict(type='PackPoseInputs')
2780
- ]))
2781
- channel_cfg = dict(
2782
- num_output_channels=294,
2783
- dataset_joints=294,
2784
- dataset_channel=[[
2785
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
2786
- 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
2787
- 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
2788
- 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
2789
- 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
2790
- 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
2791
- 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
2792
- 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
2793
- 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
2794
- 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
2795
- 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
2796
- 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
2797
- 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205,
2798
- 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
2799
- 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233,
2800
- 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
2801
- 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261,
2802
- 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275,
2803
- 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289,
2804
- 290, 291, 292, 293
2805
- ]],
2806
- inference_channel=[
2807
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
2808
- 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
2809
- 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
2810
- 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
2811
- 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
2812
- 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
2813
- 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
2814
- 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
2815
- 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
2816
- 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
2817
- 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
2818
- 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
2819
- 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205,
2820
- 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
2821
- 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233,
2822
- 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
2823
- 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261,
2824
- 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275,
2825
- 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289,
2826
- 290, 291, 292, 293
2827
- ])
2828
- model = dict(
2829
- type='TopdownPoseEstimator',
2830
- data_preprocessor=dict(
2831
- type='PoseDataPreprocessor',
2832
- mean=[123.675, 116.28, 103.53],
2833
- std=[58.395, 57.12, 57.375],
2834
- bgr_to_rgb=True),
2835
- backbone=dict(
2836
- type='ResNet',
2837
- depth=50,
2838
- init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
2839
- head=dict(
2840
- type='HeatmapHead',
2841
- in_channels=2048,
2842
- out_channels=294,
2843
- loss=dict(type='KeypointMSELoss', use_target_weight=True),
2844
- decoder=dict(
2845
- type='MSRAHeatmap',
2846
- input_size=(192, 256),
2847
- heatmap_size=(48, 64),
2848
- sigma=2)),
2849
- test_cfg=dict(flip_test=True, flip_mode='heatmap', shift_heatmap=True))
2850
- val_evaluator = [
2851
- dict(type='PCKAccuracy', thr=0.2),
2852
- dict(type='AUC'),
2853
- dict(type='EPE')
2854
- ]
2855
- test_evaluator = [
2856
- dict(type='PCKAccuracy', thr=0.2),
2857
- dict(type='AUC'),
2858
- dict(type='EPE')
2859
- ]
2860
- launcher = 'pytorch'
2861
- work_dir = './work_dirs/td_hm_res50_4xb8-150e_deepfashion2_short_sleeved_outwear_256x192'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/server/websearch/generateQuery.ts DELETED
@@ -1,13 +0,0 @@
1
- import type { Message } from "$lib/types/Message";
2
- import { generateFromDefaultEndpoint } from "../generateFromDefaultEndpoint";
3
- import { defaultModel } from "../models";
4
-
5
- export async function generateQuery(messages: Message[]) {
6
- const promptSearchQuery = defaultModel.webSearchQueryPromptRender({ messages });
7
- const searchQuery = await generateFromDefaultEndpoint(promptSearchQuery).then((query) => {
8
- const arr = query.split(/\r?\n/);
9
- return arr[0].length > 0 ? arr[0] : arr[1];
10
- });
11
-
12
- return searchQuery;
13
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Adapter/T2I-Adapter/ldm/inference_base.py DELETED
@@ -1,282 +0,0 @@
1
- import argparse
2
- import torch
3
- from omegaconf import OmegaConf
4
-
5
- from ldm.models.diffusion.ddim import DDIMSampler
6
- from ldm.models.diffusion.plms import PLMSSampler
7
- from ldm.modules.encoders.adapter import Adapter, StyleAdapter, Adapter_light
8
- from ldm.modules.extra_condition.api import ExtraCondition
9
- from ldm.util import fix_cond_shapes, load_model_from_config, read_state_dict
10
-
11
- DEFAULT_NEGATIVE_PROMPT = 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, ' \
12
- 'fewer digits, cropped, worst quality, low quality'
13
-
14
-
15
- def get_base_argument_parser() -> argparse.ArgumentParser:
16
- """get the base argument parser for inference scripts"""
17
- parser = argparse.ArgumentParser()
18
- parser.add_argument(
19
- '--outdir',
20
- type=str,
21
- help='dir to write results to',
22
- default=None,
23
- )
24
-
25
- parser.add_argument(
26
- '--prompt',
27
- type=str,
28
- nargs='?',
29
- default=None,
30
- help='positive prompt',
31
- )
32
-
33
- parser.add_argument(
34
- '--neg_prompt',
35
- type=str,
36
- default=DEFAULT_NEGATIVE_PROMPT,
37
- help='negative prompt',
38
- )
39
-
40
- parser.add_argument(
41
- '--cond_path',
42
- type=str,
43
- default=None,
44
- help='condition image path',
45
- )
46
-
47
- parser.add_argument(
48
- '--cond_inp_type',
49
- type=str,
50
- default='image',
51
- help='the type of the input condition image, take depth T2I as example, the input can be raw image, '
52
- 'which depth will be calculated, or the input can be a directly a depth map image',
53
- )
54
-
55
- parser.add_argument(
56
- '--sampler',
57
- type=str,
58
- default='ddim',
59
- choices=['ddim', 'plms'],
60
- help='sampling algorithm, currently, only ddim and plms are supported, more are on the way',
61
- )
62
-
63
- parser.add_argument(
64
- '--steps',
65
- type=int,
66
- default=50,
67
- help='number of sampling steps',
68
- )
69
-
70
- parser.add_argument(
71
- '--sd_ckpt',
72
- type=str,
73
- default='models/sd-v1-4.ckpt',
74
- help='path to checkpoint of stable diffusion model, both .ckpt and .safetensor are supported',
75
- )
76
-
77
- parser.add_argument(
78
- '--vae_ckpt',
79
- type=str,
80
- default=None,
81
- help='vae checkpoint, anime SD models usually have seperate vae ckpt that need to be loaded',
82
- )
83
-
84
- parser.add_argument(
85
- '--adapter_ckpt',
86
- type=str,
87
- default=None,
88
- help='path to checkpoint of adapter',
89
- )
90
-
91
- parser.add_argument(
92
- '--config',
93
- type=str,
94
- default='configs/stable-diffusion/sd-v1-inference.yaml',
95
- help='path to config which constructs SD model',
96
- )
97
-
98
- parser.add_argument(
99
- '--max_resolution',
100
- type=float,
101
- default=512 * 512,
102
- help='max image height * width, only for computer with limited vram',
103
- )
104
-
105
- parser.add_argument(
106
- '--resize_short_edge',
107
- type=int,
108
- default=None,
109
- help='resize short edge of the input image, if this arg is set, max_resolution will not be used',
110
- )
111
-
112
- parser.add_argument(
113
- '--C',
114
- type=int,
115
- default=4,
116
- help='latent channels',
117
- )
118
-
119
- parser.add_argument(
120
- '--f',
121
- type=int,
122
- default=8,
123
- help='downsampling factor',
124
- )
125
-
126
- parser.add_argument(
127
- '--scale',
128
- type=float,
129
- default=7.5,
130
- help='unconditional guidance scale: eps = eps(x, empty) + scale * (eps(x, cond) - eps(x, empty))',
131
- )
132
-
133
- parser.add_argument(
134
- '--cond_tau',
135
- type=float,
136
- default=1.0,
137
- help='timestamp parameter that determines until which step the adapter is applied, '
138
- 'similar as Prompt-to-Prompt tau')
139
-
140
- parser.add_argument(
141
- '--cond_weight',
142
- type=float,
143
- default=1.0,
144
- help='the adapter features are multiplied by the cond_weight. The larger the cond_weight, the more aligned '
145
- 'the generated image and condition will be, but the generated quality may be reduced',
146
- )
147
-
148
- parser.add_argument(
149
- '--seed',
150
- type=int,
151
- default=42,
152
- )
153
-
154
- parser.add_argument(
155
- '--n_samples',
156
- type=int,
157
- default=4,
158
- help='# of samples to generate',
159
- )
160
-
161
- return parser
162
-
163
-
164
- def get_sd_models(opt):
165
- """
166
- build stable diffusion model, sampler
167
- """
168
- # SD
169
- config = OmegaConf.load(f"{opt.config}")
170
- model = load_model_from_config(config, opt.sd_ckpt, opt.vae_ckpt)
171
- sd_model = model.to(opt.device)
172
-
173
- # sampler
174
- if opt.sampler == 'plms':
175
- sampler = PLMSSampler(model)
176
- elif opt.sampler == 'ddim':
177
- sampler = DDIMSampler(model)
178
- else:
179
- raise NotImplementedError
180
-
181
- return sd_model, sampler
182
-
183
-
184
- def get_t2i_adapter_models(opt):
185
- config = OmegaConf.load(f"{opt.config}")
186
- model = load_model_from_config(config, opt.sd_ckpt, opt.vae_ckpt)
187
- adapter_ckpt_path = getattr(opt, f'{opt.which_cond}_adapter_ckpt', None)
188
- if adapter_ckpt_path is None:
189
- adapter_ckpt_path = getattr(opt, 'adapter_ckpt')
190
- adapter_ckpt = read_state_dict(adapter_ckpt_path)
191
- new_state_dict = {}
192
- for k, v in adapter_ckpt.items():
193
- if not k.startswith('adapter.'):
194
- new_state_dict[f'adapter.{k}'] = v
195
- else:
196
- new_state_dict[k] = v
197
- m, u = model.load_state_dict(new_state_dict, strict=False)
198
- if len(u) > 0:
199
- print(f"unexpected keys in loading adapter ckpt {adapter_ckpt_path}:")
200
- print(u)
201
-
202
- model = model.to(opt.device)
203
-
204
- # sampler
205
- if opt.sampler == 'plms':
206
- sampler = PLMSSampler(model)
207
- elif opt.sampler == 'ddim':
208
- sampler = DDIMSampler(model)
209
- else:
210
- raise NotImplementedError
211
-
212
- return model, sampler
213
-
214
-
215
- def get_cond_ch(cond_type: ExtraCondition):
216
- if cond_type == ExtraCondition.sketch or cond_type == ExtraCondition.canny:
217
- return 1
218
- return 3
219
-
220
-
221
- def get_adapters(opt, cond_type: ExtraCondition):
222
- adapter = {}
223
- cond_weight = getattr(opt, f'{cond_type.name}_weight', None)
224
- if cond_weight is None:
225
- cond_weight = getattr(opt, 'cond_weight')
226
- adapter['cond_weight'] = cond_weight
227
-
228
- if cond_type == ExtraCondition.style:
229
- adapter['model'] = StyleAdapter(width=1024, context_dim=768, num_head=8, n_layes=3, num_token=8).to(opt.device)
230
- elif cond_type == ExtraCondition.color:
231
- adapter['model'] = Adapter_light(
232
- cin=64 * get_cond_ch(cond_type),
233
- channels=[320, 640, 1280, 1280],
234
- nums_rb=4).to(opt.device)
235
- else:
236
- adapter['model'] = Adapter(
237
- cin=64 * get_cond_ch(cond_type),
238
- channels=[320, 640, 1280, 1280][:4],
239
- nums_rb=2,
240
- ksize=1,
241
- sk=True,
242
- use_conv=False).to(opt.device)
243
- ckpt_path = getattr(opt, f'{cond_type.name}_adapter_ckpt', None)
244
- if ckpt_path is None:
245
- ckpt_path = getattr(opt, 'adapter_ckpt')
246
- adapter['model'].load_state_dict(torch.load(ckpt_path))
247
-
248
- return adapter
249
-
250
-
251
- def diffusion_inference(opt, model, sampler, adapter_features, append_to_context=None):
252
- # get text embedding
253
- c = model.get_learned_conditioning([opt.prompt])
254
- if opt.scale != 1.0:
255
- uc = model.get_learned_conditioning([opt.neg_prompt])
256
- else:
257
- uc = None
258
- c, uc = fix_cond_shapes(model, c, uc)
259
-
260
- if not hasattr(opt, 'H'):
261
- opt.H = 512
262
- opt.W = 512
263
- shape = [opt.C, opt.H // opt.f, opt.W // opt.f]
264
-
265
- samples_latents, _ = sampler.sample(
266
- S=opt.steps,
267
- conditioning=c,
268
- batch_size=1,
269
- shape=shape,
270
- verbose=False,
271
- unconditional_guidance_scale=opt.scale,
272
- unconditional_conditioning=uc,
273
- x_T=None,
274
- features_adapter=adapter_features,
275
- append_to_context=append_to_context,
276
- cond_tau=opt.cond_tau,
277
- )
278
-
279
- x_samples = model.decode_first_stage(samples_latents)
280
- x_samples = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0)
281
-
282
- return x_samples
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/CreateSpace.js DELETED
@@ -1,10 +0,0 @@
1
- import Space from '../../space/Space.js';
2
-
3
- var CreateSpace = function (scene, data, view, styles, customBuilders) {
4
- var gameObject = new Space(scene);
5
- // Don't add Zone into scene
6
- // this.scene.add.existing(gameObject);
7
- return gameObject;
8
- }
9
-
10
- export default CreateSpace;
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/slider/PercentToPosition.js DELETED
@@ -1,13 +0,0 @@
1
- const Linear = Phaser.Math.Linear;
2
-
3
- var PercentToPosition = function (t, startPoint, endPoint, out) {
4
- if (out === undefined) {
5
- out = tmpOut;
6
- }
7
- out.x = Linear(startPoint.x, endPoint.x, t);
8
- out.y = Linear(startPoint.y, endPoint.y, t);
9
- return out;
10
- }
11
- var tmpOut = {};
12
-
13
- export default PercentToPosition;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Akmyradov/TurkmenTTSweSTT/asr.py DELETED
@@ -1,41 +0,0 @@
1
- import librosa
2
- from transformers import Wav2Vec2ForCTC, AutoProcessor
3
- import torch
4
-
5
- ASR_SAMPLING_RATE = 16_000
6
-
7
-
8
- MODEL_ID = "facebook/mms-1b-all"
9
-
10
- processor = AutoProcessor.from_pretrained(MODEL_ID)
11
- model = Wav2Vec2ForCTC.from_pretrained(MODEL_ID)
12
-
13
-
14
- def transcribe(microphone, file_upload, lang):
15
-
16
- warn_output = ""
17
- if (microphone is not None) and (file_upload is not None):
18
- warn_output = (
19
- "WARNING: You've uploaded an audio file and used the microphone. "
20
- "The recorded file from the microphone will be used and the uploaded audio will be discarded.\n"
21
- )
22
- elif (microphone is None) and (file_upload is None):
23
- return "ERROR: You have to either use the microphone or upload an audio file"
24
-
25
- audio_fp = microphone if microphone is not None else file_upload
26
- audio_samples = librosa.load(audio_fp, sr=ASR_SAMPLING_RATE, mono=True)[0]
27
-
28
- lang_code = lang.split(":")[0]
29
- processor.tokenizer.set_target_lang(lang_code)
30
- model.load_adapter(lang_code)
31
-
32
- inputs = processor(
33
- audio_samples, sampling_rate=ASR_SAMPLING_RATE, return_tensors="pt"
34
- )
35
-
36
- with torch.no_grad():
37
- outputs = model(**inputs).logits
38
-
39
- ids = torch.argmax(outputs, dim=-1)[0]
40
- transcription = processor.decode(ids)
41
- return warn_output + transcription
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alichuan/VITS-Umamusume-voice-synthesizer/text/ngu_dialect.py DELETED
@@ -1,30 +0,0 @@
1
- import re
2
- import opencc
3
-
4
-
5
- dialects = {'SZ': 'suzhou', 'WX': 'wuxi', 'CZ': 'changzhou', 'HZ': 'hangzhou',
6
- 'SX': 'shaoxing', 'NB': 'ningbo', 'JJ': 'jingjiang', 'YX': 'yixing',
7
- 'JD': 'jiading', 'ZR': 'zhenru', 'PH': 'pinghu', 'TX': 'tongxiang',
8
- 'JS': 'jiashan', 'HN': 'xiashi', 'LP': 'linping', 'XS': 'xiaoshan',
9
- 'FY': 'fuyang', 'RA': 'ruao', 'CX': 'cixi', 'SM': 'sanmen',
10
- 'TT': 'tiantai', 'WZ': 'wenzhou', 'SC': 'suichang', 'YB': 'youbu'}
11
-
12
- converters = {}
13
-
14
- for dialect in dialects.values():
15
- try:
16
- converters[dialect] = opencc.OpenCC(dialect)
17
- except:
18
- pass
19
-
20
-
21
- def ngu_dialect_to_ipa(text, dialect):
22
- dialect = dialects[dialect]
23
- text = converters[dialect].convert(text).replace('-','').replace('$',' ')
24
- text = re.sub(r'[、;:]', ',', text)
25
- text = re.sub(r'\s*,\s*', ', ', text)
26
- text = re.sub(r'\s*。\s*', '. ', text)
27
- text = re.sub(r'\s*?\s*', '? ', text)
28
- text = re.sub(r'\s*!\s*', '! ', text)
29
- text = re.sub(r'\s*$', '', text)
30
- return text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/mapper/training/__init__.py DELETED
File without changes
spaces/Amrrs/DragGan-Inversion/PTI/torch_utils/ops/upfirdn2d.h DELETED
@@ -1,59 +0,0 @@
1
- // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
2
- //
3
- // NVIDIA CORPORATION and its licensors retain all intellectual property
4
- // and proprietary rights in and to this software, related documentation
5
- // and any modifications thereto. Any use, reproduction, disclosure or
6
- // distribution of this software and related documentation without an express
7
- // license agreement from NVIDIA CORPORATION is strictly prohibited.
8
-
9
- #include <cuda_runtime.h>
10
-
11
- //------------------------------------------------------------------------
12
- // CUDA kernel parameters.
13
-
14
- struct upfirdn2d_kernel_params
15
- {
16
- const void* x;
17
- const float* f;
18
- void* y;
19
-
20
- int2 up;
21
- int2 down;
22
- int2 pad0;
23
- int flip;
24
- float gain;
25
-
26
- int4 inSize; // [width, height, channel, batch]
27
- int4 inStride;
28
- int2 filterSize; // [width, height]
29
- int2 filterStride;
30
- int4 outSize; // [width, height, channel, batch]
31
- int4 outStride;
32
- int sizeMinor;
33
- int sizeMajor;
34
-
35
- int loopMinor;
36
- int loopMajor;
37
- int loopX;
38
- int launchMinor;
39
- int launchMajor;
40
- };
41
-
42
- //------------------------------------------------------------------------
43
- // CUDA kernel specialization.
44
-
45
- struct upfirdn2d_kernel_spec
46
- {
47
- void* kernel;
48
- int tileOutW;
49
- int tileOutH;
50
- int loopMinor;
51
- int loopX;
52
- };
53
-
54
- //------------------------------------------------------------------------
55
- // CUDA kernel selection.
56
-
57
- template <class T> upfirdn2d_kernel_spec choose_upfirdn2d_kernel(const upfirdn2d_kernel_params& p);
58
-
59
- //------------------------------------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/altdiffusion/__init__.py DELETED
File without changes
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/controlnet/test_flax_controlnet.py DELETED
@@ -1,127 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 HuggingFace Inc.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import gc
17
- import unittest
18
-
19
- from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
20
- from diffusers.utils import is_flax_available, load_image, slow
21
- from diffusers.utils.testing_utils import require_flax
22
-
23
-
24
- if is_flax_available():
25
- import jax
26
- import jax.numpy as jnp
27
- from flax.jax_utils import replicate
28
- from flax.training.common_utils import shard
29
-
30
-
31
- @slow
32
- @require_flax
33
- class FlaxControlNetPipelineIntegrationTests(unittest.TestCase):
34
- def tearDown(self):
35
- # clean up the VRAM after each test
36
- super().tearDown()
37
- gc.collect()
38
-
39
- def test_canny(self):
40
- controlnet, controlnet_params = FlaxControlNetModel.from_pretrained(
41
- "lllyasviel/sd-controlnet-canny", from_pt=True, dtype=jnp.bfloat16
42
- )
43
- pipe, params = FlaxStableDiffusionControlNetPipeline.from_pretrained(
44
- "runwayml/stable-diffusion-v1-5", controlnet=controlnet, from_pt=True, dtype=jnp.bfloat16
45
- )
46
- params["controlnet"] = controlnet_params
47
-
48
- prompts = "bird"
49
- num_samples = jax.device_count()
50
- prompt_ids = pipe.prepare_text_inputs([prompts] * num_samples)
51
-
52
- canny_image = load_image(
53
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png"
54
- )
55
- processed_image = pipe.prepare_image_inputs([canny_image] * num_samples)
56
-
57
- rng = jax.random.PRNGKey(0)
58
- rng = jax.random.split(rng, jax.device_count())
59
-
60
- p_params = replicate(params)
61
- prompt_ids = shard(prompt_ids)
62
- processed_image = shard(processed_image)
63
-
64
- images = pipe(
65
- prompt_ids=prompt_ids,
66
- image=processed_image,
67
- params=p_params,
68
- prng_seed=rng,
69
- num_inference_steps=50,
70
- jit=True,
71
- ).images
72
- assert images.shape == (jax.device_count(), 1, 768, 512, 3)
73
-
74
- images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
75
- image_slice = images[0, 253:256, 253:256, -1]
76
-
77
- output_slice = jnp.asarray(jax.device_get(image_slice.flatten()))
78
- expected_slice = jnp.array(
79
- [0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078]
80
- )
81
- print(f"output_slice: {output_slice}")
82
- assert jnp.abs(output_slice - expected_slice).max() < 1e-2
83
-
84
- def test_pose(self):
85
- controlnet, controlnet_params = FlaxControlNetModel.from_pretrained(
86
- "lllyasviel/sd-controlnet-openpose", from_pt=True, dtype=jnp.bfloat16
87
- )
88
- pipe, params = FlaxStableDiffusionControlNetPipeline.from_pretrained(
89
- "runwayml/stable-diffusion-v1-5", controlnet=controlnet, from_pt=True, dtype=jnp.bfloat16
90
- )
91
- params["controlnet"] = controlnet_params
92
-
93
- prompts = "Chef in the kitchen"
94
- num_samples = jax.device_count()
95
- prompt_ids = pipe.prepare_text_inputs([prompts] * num_samples)
96
-
97
- pose_image = load_image(
98
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png"
99
- )
100
- processed_image = pipe.prepare_image_inputs([pose_image] * num_samples)
101
-
102
- rng = jax.random.PRNGKey(0)
103
- rng = jax.random.split(rng, jax.device_count())
104
-
105
- p_params = replicate(params)
106
- prompt_ids = shard(prompt_ids)
107
- processed_image = shard(processed_image)
108
-
109
- images = pipe(
110
- prompt_ids=prompt_ids,
111
- image=processed_image,
112
- params=p_params,
113
- prng_seed=rng,
114
- num_inference_steps=50,
115
- jit=True,
116
- ).images
117
- assert images.shape == (jax.device_count(), 1, 768, 512, 3)
118
-
119
- images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
120
- image_slice = images[0, 253:256, 253:256, -1]
121
-
122
- output_slice = jnp.asarray(jax.device_get(image_slice.flatten()))
123
- expected_slice = jnp.array(
124
- [[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]]
125
- )
126
- print(f"output_slice: {output_slice}")
127
- assert jnp.abs(output_slice - expected_slice).max() < 1e-2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py DELETED
@@ -1,8 +0,0 @@
1
- _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
2
-
3
- model = dict(
4
- neck=dict(
5
- type='PAFPN',
6
- in_channels=[256, 512, 1024, 2048],
7
- out_channels=256,
8
- num_outs=5))
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/vfnet/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py DELETED
@@ -1,16 +0,0 @@
1
- _base_ = './vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py'
2
- model = dict(
3
- pretrained='open-mmlab://resnext101_32x4d',
4
- backbone=dict(
5
- type='ResNeXt',
6
- depth=101,
7
- groups=32,
8
- base_width=4,
9
- num_stages=4,
10
- out_indices=(0, 1, 2, 3),
11
- frozen_stages=1,
12
- norm_cfg=dict(type='BN', requires_grad=True),
13
- norm_eval=True,
14
- style='pytorch',
15
- dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),
16
- stage_with_dcn=(False, True, True, True)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r101-d8_480x480_40k_pascal_context_59.py DELETED
@@ -1,2 +0,0 @@
1
- _base_ = './pspnet_r50-d8_480x480_40k_pascal_context_59.py'
2
- model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
 
 
 
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/Training_PRO/train_utils.py DELETED
@@ -1,279 +0,0 @@
1
- import os
2
- from modules import shared, utils
3
- from pathlib import Path
4
- import json
5
-
6
- def list_subfoldersByTime(directory):
7
-
8
- if not directory.endswith('/'):
9
- directory += '/'
10
- subfolders = []
11
- path = directory
12
- name_list = os.listdir(path)
13
- full_list = [os.path.join(path,i) for i in name_list]
14
- time_sorted_list = sorted(full_list, key=os.path.getmtime,reverse=True)
15
-
16
- for entry in time_sorted_list:
17
- if os.path.isdir(entry):
18
- entry_str = f"{entry}" # Convert entry to a string
19
- full_path = entry_str
20
- entry_str = entry_str.replace('\\','/')
21
- entry_str = entry_str.replace(f"{directory}", "") # Remove directory part
22
- subfolders.append(entry_str)
23
-
24
- return subfolders
25
-
26
- def get_available_loras_local(_sortedByTime):
27
-
28
- model_dir = shared.args.lora_dir # Update with the appropriate directory path
29
- subfolders = []
30
- if _sortedByTime:
31
- subfolders = list_subfoldersByTime(model_dir)
32
- else:
33
- subfolders = utils.get_available_loras()
34
-
35
- return subfolders
36
-
37
-
38
- # FPHAM SPLIT BY SENTENCE BLOCK ===============
39
-
40
- def split_sentences(text: str, cutoff_len: int):
41
- sentences = []
42
- sentence = ''
43
- delimiters = ['. ', '? ', '! ', '... ', '.\n', '?\n', '!\n','...\n','</s>','<//>']
44
- abbreviations = ['Mr. ', 'Mrs. ', 'Dr. ', 'Ms. ', 'St. ', 'Prof. ', 'Jr. ', 'Ltd. ', 'Capt. ', 'Col. ', 'Gen. ', 'Ave. ', 'Blvd. ', 'Co. ', 'Corp. ', 'Dept. ', 'Est. ', 'Gov. ', 'Inc. ', 'Ph.D. ', 'Univ. ']
45
- errors = 0
46
- max_cut = cutoff_len-1
47
- prev_char = ''
48
-
49
- for char in text:
50
- sentence += char
51
-
52
-
53
- if (any(sentence.endswith(delimiter) for delimiter in delimiters) and
54
- not (prev_char.isupper() and len(sentence) >= 3 and sentence[-3] != ' ') and
55
- not any(sentence.endswith(abbreviation) for abbreviation in abbreviations)):
56
- tokens = shared.tokenizer.encode(sentence)
57
-
58
- if len(tokens) > max_cut:
59
- tokens = tokens[:max_cut]
60
- sentence = shared.tokenizer.decode(tokens, skip_special_tokens=True)
61
- errors = errors + 1
62
-
63
- sentences.append({'text': sentence, 'size': len(tokens)})
64
-
65
- sentence = ''
66
-
67
- prev_char = char
68
-
69
- if sentence:
70
- tokens = shared.tokenizer.encode(sentence)
71
- if len(tokens) > max_cut:
72
- tokens = tokens[:max_cut]
73
- sentence = shared.tokenizer.decode(tokens, skip_special_tokens=True)
74
- errors = errors + 1
75
-
76
- sentences.append({'text': sentence, 'size': len(tokens)})
77
-
78
- if errors > 0:
79
- print(f"Trimmed sentences beyond Cutoff Length: {errors}")
80
-
81
- return sentences
82
-
83
- # The goal of following code is to create blocks of text + overlapping blocks while:
84
- # respects sentence boundaries
85
- # always uses all the text
86
- # hard cut defined by hard_cut_string or </s> will always end at the end of data block
87
- # no overlapping blocks will be created across hard cut or across </s> token
88
-
89
- def precise_cut(text: str, overlap: bool, min_chars_cut: int, eos_to_hc: bool, cutoff_len: int, hard_cut_string: str, debug_slicer:bool):
90
-
91
- EOSX_str = '<//>' #hardcut placeholder
92
- EOS_str = '</s>'
93
- print("Precise raw text slicer: ON")
94
-
95
- cut_string = hard_cut_string.replace('\\n', '\n')
96
- text = text.replace(cut_string, EOSX_str)
97
- sentences = split_sentences(text, cutoff_len)
98
-
99
- print(f"Sentences: {len(sentences)}")
100
- sentencelist = []
101
- currentSentence = ''
102
- totalLength = 0
103
- max_cut = cutoff_len-1
104
- half_cut = cutoff_len//2
105
- halfcut_length = 0
106
-
107
- edgeindex = []
108
- half_index = 0
109
-
110
- for index, item in enumerate(sentences):
111
-
112
- if halfcut_length+ item['size'] < half_cut:
113
- halfcut_length += item['size']
114
- half_index = index
115
- else:
116
- edgeindex.append(half_index)
117
- halfcut_length = -2 * max_cut
118
-
119
-
120
- if totalLength + item['size'] < max_cut and not currentSentence.endswith(EOSX_str):
121
- currentSentence += item['text']
122
- totalLength += item['size']
123
- else:
124
-
125
- if len(currentSentence.strip()) > min_chars_cut:
126
- sentencelist.append(currentSentence.strip())
127
-
128
- currentSentence = item['text']
129
- totalLength = item['size']
130
- halfcut_length = item['size']
131
-
132
- if len(currentSentence.strip()) > min_chars_cut:
133
- sentencelist.append(currentSentence.strip())
134
-
135
- unique_blocks = len(sentencelist)
136
- print(f"Text Blocks: {unique_blocks}")
137
-
138
- #overlap strategies:
139
- # don't overlap across HARD CUT (EOSX)
140
- if overlap:
141
- for edge_idx in edgeindex:
142
- currentSentence = ''
143
- totalLength = 0
144
-
145
- for item in sentences[edge_idx:]:
146
- if totalLength + item['size'] < max_cut:
147
- currentSentence += item['text']
148
- totalLength += item['size']
149
- else:
150
- #if by chance EOSX is at the end then it's acceptable
151
- if currentSentence.endswith(EOSX_str) and len(currentSentence.strip()) > min_chars_cut:
152
- sentencelist.append(currentSentence.strip())
153
- # otherwise don't cross hard cut
154
- elif EOSX_str not in currentSentence and len(currentSentence.strip()) > min_chars_cut:
155
- sentencelist.append(currentSentence.strip())
156
-
157
- currentSentence = ''
158
- totalLength = 0
159
- break
160
-
161
- print(f"+ Overlapping blocks: {len(sentencelist)-unique_blocks}")
162
-
163
- num_EOS = 0
164
- for i in range(len(sentencelist)):
165
- if eos_to_hc:
166
- sentencelist[i] = sentencelist[i].replace(EOSX_str, EOS_str)
167
- else:
168
- sentencelist[i] = sentencelist[i].replace(EOSX_str, '')
169
-
170
- #someone may have had stop strings in the raw text...
171
- sentencelist[i] = sentencelist[i].replace("</s></s>", EOS_str)
172
- num_EOS += sentencelist[i].count(EOS_str)
173
-
174
- if num_EOS > 0:
175
- print(f"+ EOS count: {num_EOS}")
176
-
177
- #final check for useless lines
178
- sentencelist = [item for item in sentencelist if item.strip() != "</s>"]
179
- sentencelist = [item for item in sentencelist if item.strip() != ""]
180
-
181
-
182
- if debug_slicer:
183
- # Write the log file
184
- Path('logs').mkdir(exist_ok=True)
185
- sentencelist_dict = {index: sentence for index, sentence in enumerate(sentencelist)}
186
- output_file = "logs/sentencelist.json"
187
- with open(output_file, 'w') as f:
188
- json.dump(sentencelist_dict, f,indent=2)
189
-
190
- print("Saved sentencelist.json in logs folder")
191
-
192
- return sentencelist
193
-
194
-
195
- def sliding_block_cut(text: str, min_chars_cut: int, eos_to_hc: bool, cutoff_len: int, hard_cut_string: str, debug_slicer:bool):
196
-
197
- EOSX_str = '<//>' #hardcut placeholder
198
- EOS_str = '</s>'
199
- print("Mega Block Overlap: ON")
200
-
201
- cut_string = hard_cut_string.replace('\\n', '\n')
202
- text = text.replace(cut_string, EOSX_str)
203
- sentences = split_sentences(text, cutoff_len)
204
-
205
- print(f"Sentences: {len(sentences)}")
206
- sentencelist = []
207
-
208
- max_cut = cutoff_len-1
209
-
210
- #print(f"max_cut: {max_cut}")
211
- advancing_to = 0
212
-
213
- prev_block_lastsentence = ""
214
-
215
-
216
- for i in range(len(sentences)):
217
- totalLength = 0
218
- currentSentence = ''
219
- lastsentence = ""
220
-
221
- if i >= advancing_to:
222
- for k in range(i, len(sentences)):
223
-
224
- current_length = sentences[k]['size']
225
-
226
- if totalLength + current_length <= max_cut and not currentSentence.endswith(EOSX_str):
227
- currentSentence += sentences[k]['text']
228
- totalLength += current_length
229
- lastsentence = sentences[k]['text']
230
- else:
231
- if len(currentSentence.strip()) > min_chars_cut:
232
- if prev_block_lastsentence!=lastsentence:
233
- sentencelist.append(currentSentence.strip())
234
- prev_block_lastsentence = lastsentence
235
-
236
- advancing_to = 0
237
- if currentSentence.endswith(EOSX_str):
238
- advancing_to = k
239
-
240
- currentSentence = ""
241
- totalLength = 0
242
- break
243
-
244
- if currentSentence != "":
245
- if len(currentSentence.strip()) > min_chars_cut:
246
- sentencelist.append(currentSentence.strip())
247
-
248
- unique_blocks = len(sentencelist)
249
- print(f"Text Blocks: {unique_blocks}")
250
- num_EOS = 0
251
- for i in range(len(sentencelist)):
252
- if eos_to_hc:
253
- sentencelist[i] = sentencelist[i].replace(EOSX_str, EOS_str)
254
- else:
255
- sentencelist[i] = sentencelist[i].replace(EOSX_str, '')
256
-
257
- #someone may have had stop strings in the raw text...
258
- sentencelist[i] = sentencelist[i].replace("</s></s>", EOS_str)
259
- num_EOS += sentencelist[i].count(EOS_str)
260
-
261
- if num_EOS > 0:
262
- print(f"+ EOS count: {num_EOS}")
263
-
264
- #final check for useless lines
265
- sentencelist = [item for item in sentencelist if item.strip() != "</s>"]
266
- sentencelist = [item for item in sentencelist if item.strip() != ""]
267
-
268
-
269
- if debug_slicer:
270
- # Write the log file
271
- Path('logs').mkdir(exist_ok=True)
272
- sentencelist_dict = {index: sentence for index, sentence in enumerate(sentencelist)}
273
- output_file = "logs/sentencelist.json"
274
- with open(output_file, 'w') as f:
275
- json.dump(sentencelist_dict, f,indent=2)
276
-
277
- print("Saved sentencelist.json in logs folder")
278
-
279
- return sentencelist
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ArtGAN/Segment-Anything-Video/app.py DELETED
@@ -1,319 +0,0 @@
1
- import gradio as gr
2
- from demo import automask_image_app, automask_video_app, sahi_autoseg_app
3
-
4
-
5
-
6
- def image_app():
7
- with gr.Blocks():
8
- with gr.Row():
9
- with gr.Column():
10
- seg_automask_image_file = gr.Image(type="filepath").style(height=260)
11
- with gr.Row():
12
- with gr.Column():
13
- seg_automask_image_model_type = gr.Dropdown(
14
- choices=[
15
- "vit_h",
16
- "vit_l",
17
- "vit_b",
18
- ],
19
- value="vit_l",
20
- label="Model Type",
21
- )
22
-
23
- seg_automask_image_min_area = gr.Number(
24
- value=0,
25
- label="Min Area",
26
- )
27
- with gr.Row():
28
- with gr.Column():
29
- seg_automask_image_points_per_side = gr.Slider(
30
- minimum=0,
31
- maximum=32,
32
- step=2,
33
- value=16,
34
- label="Points per Side",
35
- )
36
-
37
- seg_automask_image_points_per_batch = gr.Slider(
38
- minimum=0,
39
- maximum=64,
40
- step=2,
41
- value=32,
42
- label="Points per Batch",
43
- )
44
-
45
- seg_automask_image_predict = gr.Button(value="Generator")
46
-
47
- with gr.Column():
48
- output_image = gr.Image()
49
-
50
- seg_automask_image_predict.click(
51
- fn=automask_image_app,
52
- inputs=[
53
- seg_automask_image_file,
54
- seg_automask_image_model_type,
55
- seg_automask_image_points_per_side,
56
- seg_automask_image_points_per_batch,
57
- seg_automask_image_min_area,
58
- ],
59
- outputs=[output_image],
60
- )
61
-
62
- gr.Examples(
63
- examples=[
64
- [
65
- "testv3.jpeg",
66
- "vit_l",
67
- 16,
68
- 32,
69
- 0,
70
- ],
71
-
72
- ],
73
- fn=automask_image_app,
74
- inputs=[
75
- seg_automask_image_file,
76
- seg_automask_image_model_type,
77
- seg_automask_image_points_per_side,
78
- seg_automask_image_points_per_batch,
79
- seg_automask_image_min_area,
80
- ],
81
- outputs=[output_image],
82
- cache_examples=True,
83
- )
84
-
85
-
86
- def video_app():
87
- with gr.Blocks():
88
- with gr.Row():
89
- with gr.Column():
90
- seg_automask_video_file = gr.Video().style(height=260)
91
- with gr.Row():
92
- with gr.Column():
93
- seg_automask_video_model_type = gr.Dropdown(
94
- choices=[
95
- "vit_h",
96
- "vit_l",
97
- "vit_b",
98
- ],
99
- value="vit_l",
100
- label="Model Type",
101
- )
102
- seg_automask_video_min_area = gr.Number(
103
- value=1000,
104
- label="Min Area",
105
- )
106
-
107
- with gr.Row():
108
- with gr.Column():
109
- seg_automask_video_points_per_side = gr.Slider(
110
- minimum=0,
111
- maximum=32,
112
- step=2,
113
- value=16,
114
- label="Points per Side",
115
- )
116
-
117
- seg_automask_video_points_per_batch = gr.Slider(
118
- minimum=0,
119
- maximum=64,
120
- step=2,
121
- value=32,
122
- label="Points per Batch",
123
- )
124
-
125
- seg_automask_video_predict = gr.Button(value="Generator")
126
- with gr.Column():
127
- output_video = gr.Video()
128
-
129
- seg_automask_video_predict.click(
130
- fn=automask_video_app,
131
- inputs=[
132
- seg_automask_video_file,
133
- seg_automask_video_model_type,
134
- seg_automask_video_points_per_side,
135
- seg_automask_video_points_per_batch,
136
- seg_automask_video_min_area,
137
- ],
138
- outputs=[output_video],
139
- )
140
-
141
- gr.Examples(
142
- examples=[
143
- [
144
- "testv2.mp4",
145
- "vit_l",
146
- 16,
147
- 32,
148
- 0,
149
- ],
150
- ],
151
- fn=automask_video_app,
152
- inputs=[
153
- seg_automask_video_file,
154
- seg_automask_video_model_type,
155
- seg_automask_video_points_per_side,
156
- seg_automask_video_points_per_batch,
157
- seg_automask_video_min_area,
158
- ],
159
- outputs=[output_video],
160
- cache_examples=True,
161
- )
162
-
163
-
164
- def sahi_app():
165
- with gr.Blocks():
166
- with gr.Row():
167
- with gr.Column():
168
- sahi_image_file = gr.Image(type="filepath").style(height=260)
169
- sahi_autoseg_model_type = gr.Dropdown(
170
- choices=[
171
- "vit_h",
172
- "vit_l",
173
- "vit_b",
174
- ],
175
- value="vit_l",
176
- label="Sam Model Type",
177
- )
178
-
179
- with gr.Row():
180
- with gr.Column():
181
- sahi_model_type = gr.Dropdown(
182
- choices=[
183
- "yolov5",
184
- "yolov8",
185
- ],
186
- value="yolov5",
187
- label="Detector Model Type",
188
- )
189
- sahi_image_size = gr.Slider(
190
- minimum=0,
191
- maximum=1280,
192
- step=32,
193
- value=640,
194
- label="Image Size",
195
- )
196
-
197
- sahi_overlap_width = gr.Slider(
198
- minimum=0,
199
- maximum=1,
200
- step=0.1,
201
- value=0.2,
202
- label="Overlap Width",
203
- )
204
-
205
- sahi_slice_width = gr.Slider(
206
- minimum=0,
207
- maximum=640,
208
- step=32,
209
- value=256,
210
- label="Slice Width",
211
- )
212
-
213
- with gr.Row():
214
- with gr.Column():
215
- sahi_model_path = gr.Dropdown(
216
- choices=[
217
- "yolov5l.pt",
218
- "yolov5l6.pt",
219
- "yolov8l.pt",
220
- "yolov8x.pt",
221
- ],
222
- value="yolov5l6.pt",
223
- label="Detector Model Path",
224
- )
225
-
226
- sahi_conf_th = gr.Slider(
227
- minimum=0,
228
- maximum=1,
229
- step=0.1,
230
- value=0.2,
231
- label="Confidence Threshold",
232
- )
233
- sahi_overlap_height = gr.Slider(
234
- minimum=0,
235
- maximum=1,
236
- step=0.1,
237
- value=0.2,
238
- label="Overlap Height",
239
- )
240
- sahi_slice_height = gr.Slider(
241
- minimum=0,
242
- maximum=640,
243
- step=32,
244
- value=256,
245
- label="Slice Height",
246
- )
247
- sahi_image_predict = gr.Button(value="Generator")
248
-
249
- with gr.Column():
250
- output_image = gr.Image()
251
-
252
- sahi_image_predict.click(
253
- fn=sahi_autoseg_app,
254
- inputs=[
255
- sahi_image_file,
256
- sahi_autoseg_model_type,
257
- sahi_model_type,
258
- sahi_model_path,
259
- sahi_conf_th,
260
- sahi_image_size,
261
- sahi_slice_height,
262
- sahi_slice_width,
263
- sahi_overlap_height,
264
- sahi_overlap_width,
265
- ],
266
- outputs=[output_image],
267
- )
268
-
269
- gr.Examples(
270
- examples=[
271
- [
272
- "testv1.jpg",
273
- "vit_l",
274
- "yolov5",
275
- "yolov5l6.pt",
276
- 0.2,
277
- 1280,
278
- 256,
279
- 256,
280
- 0.2,
281
- 0.2,
282
- ],
283
- ],
284
- fn=sahi_autoseg_app,
285
- inputs=[
286
- sahi_image_file,
287
- sahi_autoseg_model_type,
288
- sahi_model_type,
289
- sahi_model_path,
290
- sahi_conf_th,
291
- sahi_image_size,
292
- sahi_slice_height,
293
- sahi_slice_width,
294
- sahi_overlap_height,
295
- sahi_overlap_width,
296
- ],
297
- outputs=[output_image],
298
- cache_examples=True,
299
- )
300
-
301
-
302
- def metaseg_app():
303
- app = gr.Blocks()
304
- with app:
305
- with gr.Row():
306
- with gr.Column():
307
- with gr.Tab("Image"):
308
- image_app()
309
- with gr.Tab("Video"):
310
- video_app()
311
- with gr.Tab("SAHI"):
312
- sahi_app()
313
-
314
- app.queue(concurrency_count=1)
315
- app.launch(debug=True, enable_queue=True)
316
-
317
-
318
- if __name__ == "__main__":
319
- metaseg_app()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/groundingdino/config/GroundingDINO_SwinT_OGC.py DELETED
@@ -1,43 +0,0 @@
1
- batch_size = 1
2
- modelname = "groundingdino"
3
- backbone = "swin_T_224_1k"
4
- position_embedding = "sine"
5
- pe_temperatureH = 20
6
- pe_temperatureW = 20
7
- return_interm_indices = [1, 2, 3]
8
- backbone_freeze_keywords = None
9
- enc_layers = 6
10
- dec_layers = 6
11
- pre_norm = False
12
- dim_feedforward = 2048
13
- hidden_dim = 256
14
- dropout = 0.0
15
- nheads = 8
16
- num_queries = 900
17
- query_dim = 4
18
- num_patterns = 0
19
- num_feature_levels = 4
20
- enc_n_points = 4
21
- dec_n_points = 4
22
- two_stage_type = "standard"
23
- two_stage_bbox_embed_share = False
24
- two_stage_class_embed_share = False
25
- transformer_activation = "relu"
26
- dec_pred_bbox_embed_share = True
27
- dn_box_noise_scale = 1.0
28
- dn_label_noise_ratio = 0.5
29
- dn_label_coef = 1.0
30
- dn_bbox_coef = 1.0
31
- embed_init_tgt = True
32
- dn_labelbook_size = 2000
33
- max_text_len = 256
34
- text_encoder_type = "bert-base-uncased"
35
- use_text_enhancer = True
36
- use_fusion_layer = True
37
- use_checkpoint = True
38
- use_transformer_ckpt = True
39
- use_text_cross_attention = True
40
- text_dropout = 0.0
41
- fusion_dropout = 0.0
42
- fusion_droppath = 0.1
43
- sub_sentence_present = True
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/models/direct_url.py DELETED
@@ -1,237 +0,0 @@
1
- """ PEP 610 """
2
- import json
3
- import re
4
- import urllib.parse
5
- from typing import Any, Dict, Iterable, Optional, Type, TypeVar, Union
6
-
7
- __all__ = [
8
- "DirectUrl",
9
- "DirectUrlValidationError",
10
- "DirInfo",
11
- "ArchiveInfo",
12
- "VcsInfo",
13
- ]
14
-
15
- T = TypeVar("T")
16
-
17
- DIRECT_URL_METADATA_NAME = "direct_url.json"
18
- ENV_VAR_RE = re.compile(r"^\$\{[A-Za-z0-9-_]+\}(:\$\{[A-Za-z0-9-_]+\})?$")
19
-
20
-
21
- class DirectUrlValidationError(Exception):
22
- pass
23
-
24
-
25
- def _get(
26
- d: Dict[str, Any], expected_type: Type[T], key: str, default: Optional[T] = None
27
- ) -> Optional[T]:
28
- """Get value from dictionary and verify expected type."""
29
- if key not in d:
30
- return default
31
- value = d[key]
32
- if not isinstance(value, expected_type):
33
- raise DirectUrlValidationError(
34
- "{!r} has unexpected type for {} (expected {})".format(
35
- value, key, expected_type
36
- )
37
- )
38
- return value
39
-
40
-
41
- def _get_required(
42
- d: Dict[str, Any], expected_type: Type[T], key: str, default: Optional[T] = None
43
- ) -> T:
44
- value = _get(d, expected_type, key, default)
45
- if value is None:
46
- raise DirectUrlValidationError(f"{key} must have a value")
47
- return value
48
-
49
-
50
- def _exactly_one_of(infos: Iterable[Optional["InfoType"]]) -> "InfoType":
51
- infos = [info for info in infos if info is not None]
52
- if not infos:
53
- raise DirectUrlValidationError(
54
- "missing one of archive_info, dir_info, vcs_info"
55
- )
56
- if len(infos) > 1:
57
- raise DirectUrlValidationError(
58
- "more than one of archive_info, dir_info, vcs_info"
59
- )
60
- assert infos[0] is not None
61
- return infos[0]
62
-
63
-
64
- def _filter_none(**kwargs: Any) -> Dict[str, Any]:
65
- """Make dict excluding None values."""
66
- return {k: v for k, v in kwargs.items() if v is not None}
67
-
68
-
69
- class VcsInfo:
70
- name = "vcs_info"
71
-
72
- def __init__(
73
- self,
74
- vcs: str,
75
- commit_id: str,
76
- requested_revision: Optional[str] = None,
77
- ) -> None:
78
- self.vcs = vcs
79
- self.requested_revision = requested_revision
80
- self.commit_id = commit_id
81
-
82
- @classmethod
83
- def _from_dict(cls, d: Optional[Dict[str, Any]]) -> Optional["VcsInfo"]:
84
- if d is None:
85
- return None
86
- return cls(
87
- vcs=_get_required(d, str, "vcs"),
88
- commit_id=_get_required(d, str, "commit_id"),
89
- requested_revision=_get(d, str, "requested_revision"),
90
- )
91
-
92
- def _to_dict(self) -> Dict[str, Any]:
93
- return _filter_none(
94
- vcs=self.vcs,
95
- requested_revision=self.requested_revision,
96
- commit_id=self.commit_id,
97
- )
98
-
99
-
100
- class ArchiveInfo:
101
- name = "archive_info"
102
-
103
- def __init__(
104
- self,
105
- hash: Optional[str] = None,
106
- hashes: Optional[Dict[str, str]] = None,
107
- ) -> None:
108
- # set hashes before hash, since the hash setter will further populate hashes
109
- self.hashes = hashes
110
- self.hash = hash
111
-
112
- @property
113
- def hash(self) -> Optional[str]:
114
- return self._hash
115
-
116
- @hash.setter
117
- def hash(self, value: Optional[str]) -> None:
118
- if value is not None:
119
- # Auto-populate the hashes key to upgrade to the new format automatically.
120
- # We don't back-populate the legacy hash key from hashes.
121
- try:
122
- hash_name, hash_value = value.split("=", 1)
123
- except ValueError:
124
- raise DirectUrlValidationError(
125
- f"invalid archive_info.hash format: {value!r}"
126
- )
127
- if self.hashes is None:
128
- self.hashes = {hash_name: hash_value}
129
- elif hash_name not in self.hashes:
130
- self.hashes = self.hashes.copy()
131
- self.hashes[hash_name] = hash_value
132
- self._hash = value
133
-
134
- @classmethod
135
- def _from_dict(cls, d: Optional[Dict[str, Any]]) -> Optional["ArchiveInfo"]:
136
- if d is None:
137
- return None
138
- return cls(hash=_get(d, str, "hash"), hashes=_get(d, dict, "hashes"))
139
-
140
- def _to_dict(self) -> Dict[str, Any]:
141
- return _filter_none(hash=self.hash, hashes=self.hashes)
142
-
143
-
144
- class DirInfo:
145
- name = "dir_info"
146
-
147
- def __init__(
148
- self,
149
- editable: bool = False,
150
- ) -> None:
151
- self.editable = editable
152
-
153
- @classmethod
154
- def _from_dict(cls, d: Optional[Dict[str, Any]]) -> Optional["DirInfo"]:
155
- if d is None:
156
- return None
157
- return cls(editable=_get_required(d, bool, "editable", default=False))
158
-
159
- def _to_dict(self) -> Dict[str, Any]:
160
- return _filter_none(editable=self.editable or None)
161
-
162
-
163
- InfoType = Union[ArchiveInfo, DirInfo, VcsInfo]
164
-
165
-
166
- class DirectUrl:
167
- def __init__(
168
- self,
169
- url: str,
170
- info: InfoType,
171
- subdirectory: Optional[str] = None,
172
- ) -> None:
173
- self.url = url
174
- self.info = info
175
- self.subdirectory = subdirectory
176
-
177
- def _remove_auth_from_netloc(self, netloc: str) -> str:
178
- if "@" not in netloc:
179
- return netloc
180
- user_pass, netloc_no_user_pass = netloc.split("@", 1)
181
- if (
182
- isinstance(self.info, VcsInfo)
183
- and self.info.vcs == "git"
184
- and user_pass == "git"
185
- ):
186
- return netloc
187
- if ENV_VAR_RE.match(user_pass):
188
- return netloc
189
- return netloc_no_user_pass
190
-
191
- @property
192
- def redacted_url(self) -> str:
193
- """url with user:password part removed unless it is formed with
194
- environment variables as specified in PEP 610, or it is ``git``
195
- in the case of a git URL.
196
- """
197
- purl = urllib.parse.urlsplit(self.url)
198
- netloc = self._remove_auth_from_netloc(purl.netloc)
199
- surl = urllib.parse.urlunsplit(
200
- (purl.scheme, netloc, purl.path, purl.query, purl.fragment)
201
- )
202
- return surl
203
-
204
- def validate(self) -> None:
205
- self.from_dict(self.to_dict())
206
-
207
- @classmethod
208
- def from_dict(cls, d: Dict[str, Any]) -> "DirectUrl":
209
- return DirectUrl(
210
- url=_get_required(d, str, "url"),
211
- subdirectory=_get(d, str, "subdirectory"),
212
- info=_exactly_one_of(
213
- [
214
- ArchiveInfo._from_dict(_get(d, dict, "archive_info")),
215
- DirInfo._from_dict(_get(d, dict, "dir_info")),
216
- VcsInfo._from_dict(_get(d, dict, "vcs_info")),
217
- ]
218
- ),
219
- )
220
-
221
- def to_dict(self) -> Dict[str, Any]:
222
- res = _filter_none(
223
- url=self.redacted_url,
224
- subdirectory=self.subdirectory,
225
- )
226
- res[self.info.name] = self.info._to_dict()
227
- return res
228
-
229
- @classmethod
230
- def from_json(cls, s: str) -> "DirectUrl":
231
- return cls.from_dict(json.loads(s))
232
-
233
- def to_json(self) -> str:
234
- return json.dumps(self.to_dict(), sort_keys=True)
235
-
236
- def is_local_editable(self) -> bool:
237
- return isinstance(self.info, DirInfo) and self.info.editable
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/resolvelib/structs.py DELETED
@@ -1,170 +0,0 @@
1
- import itertools
2
-
3
- from .compat import collections_abc
4
-
5
-
6
- class DirectedGraph(object):
7
- """A graph structure with directed edges."""
8
-
9
- def __init__(self):
10
- self._vertices = set()
11
- self._forwards = {} # <key> -> Set[<key>]
12
- self._backwards = {} # <key> -> Set[<key>]
13
-
14
- def __iter__(self):
15
- return iter(self._vertices)
16
-
17
- def __len__(self):
18
- return len(self._vertices)
19
-
20
- def __contains__(self, key):
21
- return key in self._vertices
22
-
23
- def copy(self):
24
- """Return a shallow copy of this graph."""
25
- other = DirectedGraph()
26
- other._vertices = set(self._vertices)
27
- other._forwards = {k: set(v) for k, v in self._forwards.items()}
28
- other._backwards = {k: set(v) for k, v in self._backwards.items()}
29
- return other
30
-
31
- def add(self, key):
32
- """Add a new vertex to the graph."""
33
- if key in self._vertices:
34
- raise ValueError("vertex exists")
35
- self._vertices.add(key)
36
- self._forwards[key] = set()
37
- self._backwards[key] = set()
38
-
39
- def remove(self, key):
40
- """Remove a vertex from the graph, disconnecting all edges from/to it."""
41
- self._vertices.remove(key)
42
- for f in self._forwards.pop(key):
43
- self._backwards[f].remove(key)
44
- for t in self._backwards.pop(key):
45
- self._forwards[t].remove(key)
46
-
47
- def connected(self, f, t):
48
- return f in self._backwards[t] and t in self._forwards[f]
49
-
50
- def connect(self, f, t):
51
- """Connect two existing vertices.
52
-
53
- Nothing happens if the vertices are already connected.
54
- """
55
- if t not in self._vertices:
56
- raise KeyError(t)
57
- self._forwards[f].add(t)
58
- self._backwards[t].add(f)
59
-
60
- def iter_edges(self):
61
- for f, children in self._forwards.items():
62
- for t in children:
63
- yield f, t
64
-
65
- def iter_children(self, key):
66
- return iter(self._forwards[key])
67
-
68
- def iter_parents(self, key):
69
- return iter(self._backwards[key])
70
-
71
-
72
- class IteratorMapping(collections_abc.Mapping):
73
- def __init__(self, mapping, accessor, appends=None):
74
- self._mapping = mapping
75
- self._accessor = accessor
76
- self._appends = appends or {}
77
-
78
- def __repr__(self):
79
- return "IteratorMapping({!r}, {!r}, {!r})".format(
80
- self._mapping,
81
- self._accessor,
82
- self._appends,
83
- )
84
-
85
- def __bool__(self):
86
- return bool(self._mapping or self._appends)
87
-
88
- __nonzero__ = __bool__ # XXX: Python 2.
89
-
90
- def __contains__(self, key):
91
- return key in self._mapping or key in self._appends
92
-
93
- def __getitem__(self, k):
94
- try:
95
- v = self._mapping[k]
96
- except KeyError:
97
- return iter(self._appends[k])
98
- return itertools.chain(self._accessor(v), self._appends.get(k, ()))
99
-
100
- def __iter__(self):
101
- more = (k for k in self._appends if k not in self._mapping)
102
- return itertools.chain(self._mapping, more)
103
-
104
- def __len__(self):
105
- more = sum(1 for k in self._appends if k not in self._mapping)
106
- return len(self._mapping) + more
107
-
108
-
109
- class _FactoryIterableView(object):
110
- """Wrap an iterator factory returned by `find_matches()`.
111
-
112
- Calling `iter()` on this class would invoke the underlying iterator
113
- factory, making it a "collection with ordering" that can be iterated
114
- through multiple times, but lacks random access methods presented in
115
- built-in Python sequence types.
116
- """
117
-
118
- def __init__(self, factory):
119
- self._factory = factory
120
- self._iterable = None
121
-
122
- def __repr__(self):
123
- return "{}({})".format(type(self).__name__, list(self))
124
-
125
- def __bool__(self):
126
- try:
127
- next(iter(self))
128
- except StopIteration:
129
- return False
130
- return True
131
-
132
- __nonzero__ = __bool__ # XXX: Python 2.
133
-
134
- def __iter__(self):
135
- iterable = (
136
- self._factory() if self._iterable is None else self._iterable
137
- )
138
- self._iterable, current = itertools.tee(iterable)
139
- return current
140
-
141
-
142
- class _SequenceIterableView(object):
143
- """Wrap an iterable returned by find_matches().
144
-
145
- This is essentially just a proxy to the underlying sequence that provides
146
- the same interface as `_FactoryIterableView`.
147
- """
148
-
149
- def __init__(self, sequence):
150
- self._sequence = sequence
151
-
152
- def __repr__(self):
153
- return "{}({})".format(type(self).__name__, self._sequence)
154
-
155
- def __bool__(self):
156
- return bool(self._sequence)
157
-
158
- __nonzero__ = __bool__ # XXX: Python 2.
159
-
160
- def __iter__(self):
161
- return iter(self._sequence)
162
-
163
-
164
- def build_iter_view(matches):
165
- """Build an iterable view from the value returned by `find_matches()`."""
166
- if callable(matches):
167
- return _FactoryIterableView(matches)
168
- if not isinstance(matches, collections_abc.Sequence):
169
- matches = list(matches)
170
- return _SequenceIterableView(matches)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/webencodings/mklabels.py DELETED
@@ -1,59 +0,0 @@
1
- """
2
-
3
- webencodings.mklabels
4
- ~~~~~~~~~~~~~~~~~~~~~
5
-
6
- Regenarate the webencodings.labels module.
7
-
8
- :copyright: Copyright 2012 by Simon Sapin
9
- :license: BSD, see LICENSE for details.
10
-
11
- """
12
-
13
- import json
14
- try:
15
- from urllib import urlopen
16
- except ImportError:
17
- from urllib.request import urlopen
18
-
19
-
20
- def assert_lower(string):
21
- assert string == string.lower()
22
- return string
23
-
24
-
25
- def generate(url):
26
- parts = ['''\
27
- """
28
-
29
- webencodings.labels
30
- ~~~~~~~~~~~~~~~~~~~
31
-
32
- Map encoding labels to their name.
33
-
34
- :copyright: Copyright 2012 by Simon Sapin
35
- :license: BSD, see LICENSE for details.
36
-
37
- """
38
-
39
- # XXX Do not edit!
40
- # This file is automatically generated by mklabels.py
41
-
42
- LABELS = {
43
- ''']
44
- labels = [
45
- (repr(assert_lower(label)).lstrip('u'),
46
- repr(encoding['name']).lstrip('u'))
47
- for category in json.loads(urlopen(url).read().decode('ascii'))
48
- for encoding in category['encodings']
49
- for label in encoding['labels']]
50
- max_len = max(len(label) for label, name in labels)
51
- parts.extend(
52
- ' %s:%s %s,\n' % (label, ' ' * (max_len - len(label)), name)
53
- for label, name in labels)
54
- parts.append('}')
55
- return ''.join(parts)
56
-
57
-
58
- if __name__ == '__main__':
59
- print(generate('http://encoding.spec.whatwg.org/encodings.json'))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/config/__init__.py DELETED
@@ -1,35 +0,0 @@
1
- """For backward compatibility, expose main functions from
2
- ``setuptools.config.setupcfg``
3
- """
4
- import warnings
5
- from functools import wraps
6
- from textwrap import dedent
7
- from typing import Callable, TypeVar, cast
8
-
9
- from .._deprecation_warning import SetuptoolsDeprecationWarning
10
- from . import setupcfg
11
-
12
- Fn = TypeVar("Fn", bound=Callable)
13
-
14
- __all__ = ('parse_configuration', 'read_configuration')
15
-
16
-
17
- def _deprecation_notice(fn: Fn) -> Fn:
18
- @wraps(fn)
19
- def _wrapper(*args, **kwargs):
20
- msg = f"""\
21
- As setuptools moves its configuration towards `pyproject.toml`,
22
- `{__name__}.{fn.__name__}` became deprecated.
23
-
24
- For the time being, you can use the `{setupcfg.__name__}` module
25
- to access a backward compatible API, but this module is provisional
26
- and might be removed in the future.
27
- """
28
- warnings.warn(dedent(msg), SetuptoolsDeprecationWarning, stacklevel=2)
29
- return fn(*args, **kwargs)
30
-
31
- return cast(Fn, _wrapper)
32
-
33
-
34
- read_configuration = _deprecation_notice(setupcfg.read_configuration)
35
- parse_configuration = _deprecation_notice(setupcfg.parse_configuration)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Avinash-12035/MyGenAIChatBot/app.py DELETED
@@ -1,34 +0,0 @@
1
- import os
2
- import gradio as gr
3
- from langchain.chat_models import ChatOpenAI
4
- from langchain import LLMChain, PromptTemplate
5
- from langchain.memory import ConversationBufferMemory
6
-
7
- OPENAI_API_KEY=os.getenv('OPENAI_API_KEY')
8
-
9
- template = """hi Avinash, your youthful and witty personal assistant! At 18 years old, he's full of energy and always eager to help. Avinash's goal is to assist you with any questions or problems you might have. His enthusiasm shines through in every response, making interactions with his enjoyable and engaging.
10
- {chat_history}
11
- User: {user_message}
12
- Chatbot:"""
13
-
14
- prompt = PromptTemplate(
15
- input_variables=["chat_history", "user_message"], template=template
16
- )
17
-
18
- memory = ConversationBufferMemory(memory_key="chat_history")
19
-
20
- llm_chain = LLMChain(
21
- llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"),
22
- prompt=prompt,
23
- verbose=True,
24
- memory=memory,
25
- )
26
-
27
- def get_text_response(user_message,history):
28
- response = llm_chain.predict(user_message = user_message)
29
- return response
30
-
31
- demo = gr.ChatInterface(get_text_response)
32
-
33
- if __name__ == "__main__":
34
- demo.launch() #To create a public link, set `share=True` in `launch()`. To enable errors and logs, set `debug=True` in `launch()`.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Anime Avatar.md DELETED
@@ -1,101 +0,0 @@
1
- <br />
2
- <h1>Qué es un avatar de anime y por qué deberías tener uno</h1>
3
- <p>Anime avatar es un término que se refiere a una imagen digital o personaje que te representa en línea, utilizando el estilo de anime, que es una forma de animación de Japón. Los avatares de anime se están volviendo más populares y populares, ya que ofrecen muchos beneficios para los usuarios en línea que quieren expresarse, conectarse con otros y divertirse. En este artículo, explicaremos qué son los avatares de anime, cómo pueden beneficiarte, cuáles son algunos ejemplos y tendencias de los avatares de anime, y cómo puedes crear tu propio avatar de anime en cuatro sencillos pasos. </p>
4
- <h2>anime avatar</h2><br /><p><b><b>Download Zip</b> &#10022;&#10022;&#10022; <a href="https://bltlly.com/2v6J87">https://bltlly.com/2v6J87</a></b></p><br /><br />
5
- <h2>La definición y el origen del anime avatar</h2>
6
- <p>Para entender qué son los avatares de anime, primero tenemos que entender qué son anime y avatares por separado. </p>
7
- <h3>Anime como un estilo de animación de Japón</h3>
8
- <p>Anime es una palabra derivada de la animación, y se refiere a un estilo de animación que se originó en Japón. El anime se caracteriza por su estilo artístico distintivo, que a menudo presenta ojos grandes y expresivos, cabello colorido, expresiones exageradas y movimientos dinámicos. Anime también cubre una amplia gama de géneros, temas e historias, apelando a diversos públicos y gustos. Anime tiene una larga historia y una gran base de fans, tanto en Japón y en todo el mundo. Algunos ejemplos de series de anime famosas son Naruto, One Piece, Dragon Ball, Sailor Moon, Pokemon, Attack on Titan, My Hero Academia, Demon Slayer, etc.</p>
9
- <h3>Avatar como representación digital de uno mismo</h3>
10
-
11
- <h3>Avatar de anime como una combinación de anime y avatar</h3>
12
- <p>Anime avatar es un término que combina anime y avatar, lo que significa una imagen digital o personaje que te representa en línea usando el estilo de anime. Un avatar de anime puede ser una imagen o modelo 2D o 3D que imita la apariencia y los movimientos de un personaje de anime. Un avatar de anime también puede tener varias características y opciones que le permiten personalizar su apariencia, expresiones, voz, ropa, accesorios, antecedentes, etc. Un avatar de anime se puede usar para varios propósitos y ocasiones en línea, como chatear, jugar, transmitir, socializar, etc.</p>
13
- <h2>Los beneficios de tener un avatar de anime</h2>
14
- <p>Tener un avatar de anime puede ofrecerle muchos beneficios en línea. Aquí están algunos de ellos:</p>
15
- <p></p>
16
- <h3>Expresar tu personalidad y creatividad</h3>
17
- <p>Uno de los principales beneficios de tener un avatar de anime es que te permite expresar tu personalidad y creatividad en línea. Puedes elegir un avatar de anime que refleje tus rasgos, preferencias, intereses, aficiones, estados de ánimo, etc., o crear uno que sea completamente original y único. También puedes cambiar tu avatar de anime de acuerdo a diferentes situaciones y contextos en línea. Por ejemplo, puedes tener diferentes avatares de anime para diferentes plataformas, juegos, géneros, estados de ánimo, etc. También puedes dar rienda suelta a tu creatividad e imaginación diseñando tu avatar de anime con varias opciones y características. Puedes hacer tu avatar de anime tan realista o fantástico como quieras, y experimentar con diferentes estilos y combinaciones. </p>
18
- <h3>Unirse a la creciente comunidad de fans del anime</h3>
19
-
20
- <h3>Mejorar su presencia en línea y el compromiso</h3>
21
- <p>Un tercer beneficio de tener un avatar de anime es que puede mejorar su presencia en línea y el compromiso. Un avatar de anime puede ayudarte a destacar entre la multitud y atraer más atención y seguidores en línea. Un avatar de anime también puede hacerte más memorable y reconocible en línea, ya que puede crear una fuerte impresión visual e identidad para ti. Un avatar de anime también puede hacerte más atractivo e interactivo en línea, ya que puede transmitir tus emociones y expresiones de manera más vívida y dinámica. Un avatar de anime también puede hacerte más entretenido y divertido en línea, ya que puede agregar humor, encanto y personalidad a tu contenido en línea. </p>
22
- <h2>Los ejemplos y tendencias de anime avatar</h2>
23
- <p>Los avatares de anime no son un fenómeno nuevo, pero se han vuelto más populares y populares en los últimos años. Aquí hay algunos ejemplos y tendencias de avatares de anime:</p>
24
- <h3>Los populares generadores de avatar de anime y plataformas</h3>
25
- <p>Hay muchos generadores de avatar de anime y plataformas disponibles en línea que le permiten crear su propio avatar de anime fácil y rápidamente. Algunos de los más populares son:</p>
26
- <tabla>
27
- <tr>
28
- <th>Nombre</th>
29
- <th>Descripción</th>
30
- <th>URL</th>
31
- </tr>
32
- <tr>
33
- <td>Picrew</td>
34
- <td>Un sitio web japonés que alberga miles de creadores de avatar de anime generados por el usuario con varios estilos y opciones. </td>
35
- <td>(https://picrew.me/)</td>
36
- </tr>
37
- <tr>
38
- <td>Vroid Studio</td>
39
- <td>Un software 3D gratuito que te permite crear tus propios modelos de avatar de anime 3D con alta calidad y detalle. </td>
40
- <td>(https://vroid.com/en/studio)</td>
41
- </tr>
42
- <tr>
43
- <td>VRChat</td>
44
- <td>Una plataforma de realidad virtual social que te permite crear, subir y usar tus propios avatares de anime en 3D en varios mundos virtuales y escenarios. </td>
45
- <td>(https://www.vrchat.com/)</td>
46
- </tr>
47
- <tr>
48
- <td>Zepeto</td>
49
- <td>Una aplicación móvil que te permite crear tus propios avatares de anime en 3D con expresiones y movimientos faciales realistas. </td>
50
- <td>(https://zepeto.me/)</td>
51
- </tr>
52
-
53
- <td>FaceRig</td>
54
- <td>Un software que te permite usar tu webcam para animar tus propios avatares de anime 2D o 3D en tiempo real. </td>
55
- <td>(https://facerig.com/)</td>
56
- </tr>
57
- </table> <h3>La adopción de avatar de anime por personalidades en línea</h3>
58
- <p>Otra tendencia de los avatares de anime es que han sido adoptados por muchas personalidades en línea, tales como serpentinas, influencers, celebridades, etc. Algunos de ellos utilizan avatares de anime como su persona en línea principal o alternativa, mientras que otros los usan como una forma de experimentar con diferentes estilos y géneros. Algunos ejemplos de personalidades en línea que usan avatares de anime son:</p>
59
- <ul>
60
- <li>Kizuna AI: Un YouTuber virtual que es considerado el primer y más popular anime avatar streamer. Ella tiene más de 4 millones de suscriptores en YouTube y es conocida por su personalidad linda y energética. </li>
61
- <li>CodeMiko: Un streamer virtual que utiliza un avatar de anime en 3D que es controlado por un traje de captura de movimiento. Tiene más de 1 millón de seguidores en Twitch y es conocida por sus transmisiones interactivas e inmersivas. </li>
62
- <li>Lil Nas X: Un rapero y cantante que utiliza un anime en 3D avatar para realizar su hit canción "Montero (Call Me By Your Name)" en un concierto virtual en Roblox. Atrajo a más de 30 millones de espectadores y recibió comentarios positivos de los fans. </li>
63
- <li>Belle Delphine: Una modelo e influencer que usó un avatar de anime en 2D para bromear con sus fans en el Día de los Inocentes. Ella fingió ser una serpentina virtual y subió un video de su avatar de anime bailando y cantando. </li>
64
- <li>Pokimane: Un streamer y jugador que utiliza un avatar de anime en 2D para transmitir en Twitch como una broma. Ella sorprendió a sus fans con su avatar de anime, que se basó en su verdadera apariencia y voz. </li>
65
- </ul>
66
- <h3>Las posibilidades futuras de avatar de anime con AI y VR</h3>
67
- <p>Una tercera tendencia de los avatares de anime es que tienen el potencial de evolucionar y mejorar con el avance de las tecnologías de IA y RV. Algunos de los posibles escenarios futuros de avatares de anime son:</p>
68
- <ul>
69
-
70
- <li>Avatares de anime que pueden aprender de su comportamiento, preferencias y comentarios, y adaptarse a sus necesidades y expectativas utilizando modelos y sistemas de IA. </li>
71
- <li>Avatares de anime que pueden interactuar contigo y con otros usuarios en tiempo real usando chatbots y agentes de IA. </li>
72
- <li>Avatares de anime que se pueden experimentar en plena inmersión y presencia usando auriculares y dispositivos de VR. </li>
73
- <li>Avatares de anime que se pueden personalizar y personalizar usando herramientas e interfaces de realidad virtual. </li>
74
- </ul>
75
- <h2>Cómo crear tu propio avatar de anime en cuatro sencillos pasos</h2>
76
- <p>Si estás interesado en crear tu propio avatar de anime, aquí hay cuatro sencillos pasos que puedes seguir:</p>
77
- <h3>Elige un generador de avatar de anime que se adapte a tus necesidades</h3>
78
- <p>El primer paso es elegir un generador de avatar de anime que se adapte a sus necesidades. Hay muchos generadores de avatar de anime disponibles en línea, cada uno con sus propias ventajas y desventajas. Usted debe considerar factores tales como el estilo, calidad, características, opciones, facilidad de uso, costo, etc., del generador de avatar de anime. También puedes comparar diferentes generadores de avatar de anime leyendo reseñas, viendo tutoriales o probando demos. También puede consultar la tabla de arriba para algunos generadores de avatar de anime populares. </p>
79
- <h3>Personaliza tu avatar de anime con varias opciones y características</h3>
80
- <p>El segundo paso es personalizar tu avatar de anime con varias opciones y características. Dependiendo del generador de avatar de anime que elijas, puedes personalizar aspectos como la cara, cabello, ojos, nariz, boca, piel, cuerpo, ropa, accesorios, etc., de tu avatar de anime. También puede ajustar los colores, tamaños, formas, posiciones, ángulos, etc., de estos aspectos. También puedes añadir efectos como sombras, iluminación, filtros, etc., para mejorar tu avatar de anime. También puedes previsualizar tu avatar de anime en diferentes poses y expresiones. </p> <h3>Guarda y descarga tu avatar de anime en alta calidad</h3>
81
-
82
- <h3>Comparte y usa tu avatar de anime en diferentes plataformas y ocasiones</h3>
83
- <p>El cuarto y último paso es compartir y usar tu avatar de anime en diferentes plataformas y ocasiones. Puedes usar tu avatar de anime para varios propósitos y ocasiones en línea, como chatear, jugar, transmitir, socializar, etc. También puedes compartir tu avatar de anime con tus amigos, familiares, fans, seguidores, etc., en línea. También puedes subir tu avatar de anime a diferentes plataformas y sitios web que admiten avatares de anime, como VRChat, FaceRig, Zepeto, etc. También puedes imprimir tu avatar de anime en diferentes productos y materiales, como pegatinas, carteles, camisas, tazas, etc.</p>
84
- <h2>Conclusión y preguntas frecuentes</h2>
85
- <p>Los avatares de anime son imágenes digitales o personajes que te representan en línea usando el estilo de anime. Los avatares de anime se están volviendo más populares y populares, ya que ofrecen muchos beneficios para los usuarios en línea que quieren expresarse, conectarse con otros y divertirse. Los avatares de anime también están evolucionando y mejorando con el avance de las tecnologías de IA y VR. Puede crear su propio avatar de anime en cuatro sencillos pasos: elegir un generador de avatar de anime que se adapte a sus necesidades, personalizar su avatar de anime con varias opciones y características, guardar y descargar su avatar de anime en alta calidad, y compartir y utilizar su avatar de anime en diferentes plataformas y ocasiones. </p>
86
- <p>Aquí hay algunas preguntas frecuentes sobre avatares de anime:</p>
87
- <ul>
88
- <li>Q: ¿Cuánto cuesta crear un avatar de anime? </li>
89
- <li>A: Depende del generador de avatar de anime que elijas. Algunos generadores de avatar de anime son de uso gratuito, mientras que otros pueden cobrar una tarifa o requerir una suscripción. Usted debe comprobar el precio y los términos de servicio del generador de avatar anime antes de usarlo. </li>
90
- <li>Q: ¿Cuánto tiempo se tarda en crear un avatar de anime? </li>
91
-
92
- <li>Q: ¿Cómo puedo hacer que mi avatar de anime se parezca más a mí? </li>
93
- <li>A: Hay algunos consejos y trucos que pueden ayudarte a hacer que tu avatar de anime se parezca más a ti. Por ejemplo, puedes usar una foto tuya como referencia o una plantilla para tu avatar de anime. También puedes ajustar las proporciones, colores, formas, etc., de tu avatar de anime para que coincida con tu apariencia real. También puedes añadir detalles como gafas, piercings, tatuajes, etc. </li>
94
- <li>Q: ¿Cómo puedo hacer mi avatar de anime más único y original? </li>
95
- <li>A: Hay algunos consejos y trucos que pueden ayudarte a hacer tu avatar de anime más único y original. Por ejemplo, puedes mezclar y combinar diferentes estilos y géneros de anime para tu avatar de anime. También puedes añadir elementos como fantasía, ciencia ficción, terror, etc., a tu avatar de anime. También puedes experimentar con diferentes efectos como filtros, sombras, iluminación, etc., a tu avatar de anime. También puedes usar tu creatividad e imaginación para crear tu avatar de anime. </li>
96
- <li>Q: ¿Cómo puedo proteger mi avatar de anime de ser robado o copiado? </li>
97
- <li>A: Hay algunos consejos y trucos que pueden ayudarte a proteger tu avatar de anime de ser robado o copiado. Por ejemplo, puedes agregar una marca de agua o una firma a tu avatar de anime. También puedes usar una herramienta de búsqueda de imagen inversa para comprobar si tu avatar de anime ha sido utilizado por alguien más sin tu permiso. También puedes reportar o tomar acciones legales contra cualquiera que robe o copie tu avatar de anime. </li>
98
- </ul>
99
- <p>Espero que este artículo te haya ayudado a aprender más sobre los avatares de anime y cómo crear los tuyos. Si usted tiene alguna pregunta o retroalimentación, por favor no dude en dejar un comentario a continuación. Gracias por leer y divertirse con su avatar de anime! </p> 64aa2da5cf<br />
100
- <br />
101
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/boto3/docs/service.py DELETED
@@ -1,199 +0,0 @@
1
- # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License"). You
4
- # may not use this file except in compliance with the License. A copy of
5
- # the License is located at
6
- #
7
- # https://aws.amazon.com/apache2.0/
8
- #
9
- # or in the "license" file accompanying this file. This file is
10
- # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11
- # ANY KIND, either express or implied. See the License for the specific
12
- # language governing permissions and limitations under the License.
13
- import os
14
-
15
- from botocore.docs.bcdoc.restdoc import DocumentStructure
16
- from botocore.docs.service import ServiceDocumenter as BaseServiceDocumenter
17
- from botocore.exceptions import DataNotFoundError
18
-
19
- import boto3
20
- from boto3.docs.client import Boto3ClientDocumenter
21
- from boto3.docs.resource import ResourceDocumenter, ServiceResourceDocumenter
22
- from boto3.utils import ServiceContext
23
-
24
-
25
- class ServiceDocumenter(BaseServiceDocumenter):
26
- # The path used to find examples
27
- EXAMPLE_PATH = os.path.join(os.path.dirname(boto3.__file__), 'examples')
28
-
29
- def __init__(self, service_name, session, root_docs_path):
30
- super().__init__(
31
- service_name=service_name,
32
- # I know that this is an internal attribute, but the botocore session
33
- # is needed to load the paginator and waiter models.
34
- session=session._session,
35
- root_docs_path=root_docs_path,
36
- )
37
- self._boto3_session = session
38
- self._client = self._boto3_session.client(service_name)
39
- self._service_resource = None
40
- if self._service_name in self._boto3_session.get_available_resources():
41
- self._service_resource = self._boto3_session.resource(service_name)
42
- self.sections = [
43
- 'title',
44
- 'client',
45
- 'paginators',
46
- 'waiters',
47
- 'resources',
48
- 'examples',
49
- ]
50
- self._root_docs_path = root_docs_path
51
- self._USER_GUIDE_LINK = (
52
- 'https://boto3.amazonaws.com/'
53
- 'v1/documentation/api/latest/guide/resources.html'
54
- )
55
-
56
- def document_service(self):
57
- """Documents an entire service.
58
-
59
- :returns: The reStructured text of the documented service.
60
- """
61
- doc_structure = DocumentStructure(
62
- self._service_name, section_names=self.sections, target='html'
63
- )
64
- self.title(doc_structure.get_section('title'))
65
-
66
- self.client_api(doc_structure.get_section('client'))
67
- self.paginator_api(doc_structure.get_section('paginators'))
68
- self.waiter_api(doc_structure.get_section('waiters'))
69
- if self._service_resource:
70
- self.resource_section(doc_structure.get_section('resources'))
71
- self._document_examples(doc_structure.get_section('examples'))
72
- return doc_structure.flush_structure()
73
-
74
- def client_api(self, section):
75
- examples = None
76
- try:
77
- examples = self.get_examples(self._service_name)
78
- except DataNotFoundError:
79
- pass
80
-
81
- Boto3ClientDocumenter(
82
- self._client, self._root_docs_path, examples
83
- ).document_client(section)
84
-
85
- def resource_section(self, section):
86
- section.style.h2('Resources')
87
- section.style.new_line()
88
- section.write(
89
- 'Resources are available in boto3 via the '
90
- '``resource`` method. For more detailed instructions '
91
- 'and examples on the usage of resources, see the '
92
- 'resources '
93
- )
94
- section.style.external_link(
95
- title='user guide',
96
- link=self._USER_GUIDE_LINK,
97
- )
98
- section.write('.')
99
- section.style.new_line()
100
- section.style.new_line()
101
- section.write('The available resources are:')
102
- section.style.new_line()
103
- section.style.toctree()
104
- self._document_service_resource(section)
105
- self._document_resources(section)
106
-
107
- def _document_service_resource(self, section):
108
- # Create a new DocumentStructure for each Service Resource and add contents.
109
- service_resource_doc = DocumentStructure(
110
- 'service-resource', target='html'
111
- )
112
- breadcrumb_section = service_resource_doc.add_new_section('breadcrumb')
113
- breadcrumb_section.style.ref(
114
- self._client.__class__.__name__, f'../../{self._service_name}'
115
- )
116
- breadcrumb_section.write(' / Resource / ServiceResource')
117
- ServiceResourceDocumenter(
118
- self._service_resource, self._session, self._root_docs_path
119
- ).document_resource(service_resource_doc)
120
- # Write collections in individual/nested files.
121
- # Path: <root>/reference/services/<service>/<resource_name>/<collection_name>.rst
122
- resource_name = self._service_resource.meta.resource_model.name
123
- if resource_name == self._service_name:
124
- resource_name = 'service-resource'
125
- service_resource_dir_path = os.path.join(
126
- self._root_docs_path,
127
- f'{self._service_name}',
128
- f'{resource_name.lower()}',
129
- )
130
- service_resource_doc.write_to_file(service_resource_dir_path, 'index')
131
- section.style.tocitem(f'{self._service_name}/{resource_name}/index')
132
-
133
- def _document_resources(self, section):
134
- temp_identifier_value = 'foo'
135
- loader = self._session.get_component('data_loader')
136
- json_resource_model = loader.load_service_model(
137
- self._service_name, 'resources-1'
138
- )
139
- service_model = self._service_resource.meta.client.meta.service_model
140
- for resource_name in json_resource_model['resources']:
141
- resource_model = json_resource_model['resources'][resource_name]
142
- resource_cls = (
143
- self._boto3_session.resource_factory.load_from_definition(
144
- resource_name=resource_name,
145
- single_resource_json_definition=resource_model,
146
- service_context=ServiceContext(
147
- service_name=self._service_name,
148
- resource_json_definitions=json_resource_model[
149
- 'resources'
150
- ],
151
- service_model=service_model,
152
- service_waiter_model=None,
153
- ),
154
- )
155
- )
156
- identifiers = resource_cls.meta.resource_model.identifiers
157
- args = []
158
- for _ in identifiers:
159
- args.append(temp_identifier_value)
160
- resource = resource_cls(*args, client=self._client)
161
- # Create a new DocumentStructure for each Resource and add contents.
162
- resource_name = resource.meta.resource_model.name.lower()
163
- resource_doc = DocumentStructure(resource_name, target='html')
164
- breadcrumb_section = resource_doc.add_new_section('breadcrumb')
165
- breadcrumb_section.style.ref(
166
- self._client.__class__.__name__, f'../../{self._service_name}'
167
- )
168
- breadcrumb_section.write(
169
- f' / Resource / {resource.meta.resource_model.name}'
170
- )
171
- ResourceDocumenter(
172
- resource, self._session, self._root_docs_path
173
- ).document_resource(
174
- resource_doc.add_new_section(resource.meta.resource_model.name)
175
- )
176
- # Write collections in individual/nested files.
177
- # Path: <root>/reference/services/<service>/<resource_name>/<index>.rst
178
- service_resource_dir_path = os.path.join(
179
- self._root_docs_path,
180
- f'{self._service_name}',
181
- f'{resource_name}',
182
- )
183
- resource_doc.write_to_file(service_resource_dir_path, 'index')
184
- section.style.tocitem(
185
- f'{self._service_name}/{resource_name}/index'
186
- )
187
-
188
- def _get_example_file(self):
189
- return os.path.realpath(
190
- os.path.join(self.EXAMPLE_PATH, self._service_name + '.rst')
191
- )
192
-
193
- def _document_examples(self, section):
194
- examples_file = self._get_example_file()
195
- if os.path.isfile(examples_file):
196
- section.style.h2('Examples')
197
- section.style.new_line()
198
- with open(examples_file) as f:
199
- section.write(f.read())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/exceptions.py DELETED
@@ -1,323 +0,0 @@
1
- from __future__ import absolute_import
2
-
3
- from .packages.six.moves.http_client import IncompleteRead as httplib_IncompleteRead
4
-
5
- # Base Exceptions
6
-
7
-
8
- class HTTPError(Exception):
9
- """Base exception used by this module."""
10
-
11
- pass
12
-
13
-
14
- class HTTPWarning(Warning):
15
- """Base warning used by this module."""
16
-
17
- pass
18
-
19
-
20
- class PoolError(HTTPError):
21
- """Base exception for errors caused within a pool."""
22
-
23
- def __init__(self, pool, message):
24
- self.pool = pool
25
- HTTPError.__init__(self, "%s: %s" % (pool, message))
26
-
27
- def __reduce__(self):
28
- # For pickling purposes.
29
- return self.__class__, (None, None)
30
-
31
-
32
- class RequestError(PoolError):
33
- """Base exception for PoolErrors that have associated URLs."""
34
-
35
- def __init__(self, pool, url, message):
36
- self.url = url
37
- PoolError.__init__(self, pool, message)
38
-
39
- def __reduce__(self):
40
- # For pickling purposes.
41
- return self.__class__, (None, self.url, None)
42
-
43
-
44
- class SSLError(HTTPError):
45
- """Raised when SSL certificate fails in an HTTPS connection."""
46
-
47
- pass
48
-
49
-
50
- class ProxyError(HTTPError):
51
- """Raised when the connection to a proxy fails."""
52
-
53
- def __init__(self, message, error, *args):
54
- super(ProxyError, self).__init__(message, error, *args)
55
- self.original_error = error
56
-
57
-
58
- class DecodeError(HTTPError):
59
- """Raised when automatic decoding based on Content-Type fails."""
60
-
61
- pass
62
-
63
-
64
- class ProtocolError(HTTPError):
65
- """Raised when something unexpected happens mid-request/response."""
66
-
67
- pass
68
-
69
-
70
- #: Renamed to ProtocolError but aliased for backwards compatibility.
71
- ConnectionError = ProtocolError
72
-
73
-
74
- # Leaf Exceptions
75
-
76
-
77
- class MaxRetryError(RequestError):
78
- """Raised when the maximum number of retries is exceeded.
79
-
80
- :param pool: The connection pool
81
- :type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`
82
- :param string url: The requested Url
83
- :param exceptions.Exception reason: The underlying error
84
-
85
- """
86
-
87
- def __init__(self, pool, url, reason=None):
88
- self.reason = reason
89
-
90
- message = "Max retries exceeded with url: %s (Caused by %r)" % (url, reason)
91
-
92
- RequestError.__init__(self, pool, url, message)
93
-
94
-
95
- class HostChangedError(RequestError):
96
- """Raised when an existing pool gets a request for a foreign host."""
97
-
98
- def __init__(self, pool, url, retries=3):
99
- message = "Tried to open a foreign host with url: %s" % url
100
- RequestError.__init__(self, pool, url, message)
101
- self.retries = retries
102
-
103
-
104
- class TimeoutStateError(HTTPError):
105
- """Raised when passing an invalid state to a timeout"""
106
-
107
- pass
108
-
109
-
110
- class TimeoutError(HTTPError):
111
- """Raised when a socket timeout error occurs.
112
-
113
- Catching this error will catch both :exc:`ReadTimeoutErrors
114
- <ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`.
115
- """
116
-
117
- pass
118
-
119
-
120
- class ReadTimeoutError(TimeoutError, RequestError):
121
- """Raised when a socket timeout occurs while receiving data from a server"""
122
-
123
- pass
124
-
125
-
126
- # This timeout error does not have a URL attached and needs to inherit from the
127
- # base HTTPError
128
- class ConnectTimeoutError(TimeoutError):
129
- """Raised when a socket timeout occurs while connecting to a server"""
130
-
131
- pass
132
-
133
-
134
- class NewConnectionError(ConnectTimeoutError, PoolError):
135
- """Raised when we fail to establish a new connection. Usually ECONNREFUSED."""
136
-
137
- pass
138
-
139
-
140
- class EmptyPoolError(PoolError):
141
- """Raised when a pool runs out of connections and no more are allowed."""
142
-
143
- pass
144
-
145
-
146
- class ClosedPoolError(PoolError):
147
- """Raised when a request enters a pool after the pool has been closed."""
148
-
149
- pass
150
-
151
-
152
- class LocationValueError(ValueError, HTTPError):
153
- """Raised when there is something wrong with a given URL input."""
154
-
155
- pass
156
-
157
-
158
- class LocationParseError(LocationValueError):
159
- """Raised when get_host or similar fails to parse the URL input."""
160
-
161
- def __init__(self, location):
162
- message = "Failed to parse: %s" % location
163
- HTTPError.__init__(self, message)
164
-
165
- self.location = location
166
-
167
-
168
- class URLSchemeUnknown(LocationValueError):
169
- """Raised when a URL input has an unsupported scheme."""
170
-
171
- def __init__(self, scheme):
172
- message = "Not supported URL scheme %s" % scheme
173
- super(URLSchemeUnknown, self).__init__(message)
174
-
175
- self.scheme = scheme
176
-
177
-
178
- class ResponseError(HTTPError):
179
- """Used as a container for an error reason supplied in a MaxRetryError."""
180
-
181
- GENERIC_ERROR = "too many error responses"
182
- SPECIFIC_ERROR = "too many {status_code} error responses"
183
-
184
-
185
- class SecurityWarning(HTTPWarning):
186
- """Warned when performing security reducing actions"""
187
-
188
- pass
189
-
190
-
191
- class SubjectAltNameWarning(SecurityWarning):
192
- """Warned when connecting to a host with a certificate missing a SAN."""
193
-
194
- pass
195
-
196
-
197
- class InsecureRequestWarning(SecurityWarning):
198
- """Warned when making an unverified HTTPS request."""
199
-
200
- pass
201
-
202
-
203
- class SystemTimeWarning(SecurityWarning):
204
- """Warned when system time is suspected to be wrong"""
205
-
206
- pass
207
-
208
-
209
- class InsecurePlatformWarning(SecurityWarning):
210
- """Warned when certain TLS/SSL configuration is not available on a platform."""
211
-
212
- pass
213
-
214
-
215
- class SNIMissingWarning(HTTPWarning):
216
- """Warned when making a HTTPS request without SNI available."""
217
-
218
- pass
219
-
220
-
221
- class DependencyWarning(HTTPWarning):
222
- """
223
- Warned when an attempt is made to import a module with missing optional
224
- dependencies.
225
- """
226
-
227
- pass
228
-
229
-
230
- class ResponseNotChunked(ProtocolError, ValueError):
231
- """Response needs to be chunked in order to read it as chunks."""
232
-
233
- pass
234
-
235
-
236
- class BodyNotHttplibCompatible(HTTPError):
237
- """
238
- Body should be :class:`http.client.HTTPResponse` like
239
- (have an fp attribute which returns raw chunks) for read_chunked().
240
- """
241
-
242
- pass
243
-
244
-
245
- class IncompleteRead(HTTPError, httplib_IncompleteRead):
246
- """
247
- Response length doesn't match expected Content-Length
248
-
249
- Subclass of :class:`http.client.IncompleteRead` to allow int value
250
- for ``partial`` to avoid creating large objects on streamed reads.
251
- """
252
-
253
- def __init__(self, partial, expected):
254
- super(IncompleteRead, self).__init__(partial, expected)
255
-
256
- def __repr__(self):
257
- return "IncompleteRead(%i bytes read, %i more expected)" % (
258
- self.partial,
259
- self.expected,
260
- )
261
-
262
-
263
- class InvalidChunkLength(HTTPError, httplib_IncompleteRead):
264
- """Invalid chunk length in a chunked response."""
265
-
266
- def __init__(self, response, length):
267
- super(InvalidChunkLength, self).__init__(
268
- response.tell(), response.length_remaining
269
- )
270
- self.response = response
271
- self.length = length
272
-
273
- def __repr__(self):
274
- return "InvalidChunkLength(got length %r, %i bytes read)" % (
275
- self.length,
276
- self.partial,
277
- )
278
-
279
-
280
- class InvalidHeader(HTTPError):
281
- """The header provided was somehow invalid."""
282
-
283
- pass
284
-
285
-
286
- class ProxySchemeUnknown(AssertionError, URLSchemeUnknown):
287
- """ProxyManager does not support the supplied scheme"""
288
-
289
- # TODO(t-8ch): Stop inheriting from AssertionError in v2.0.
290
-
291
- def __init__(self, scheme):
292
- # 'localhost' is here because our URL parser parses
293
- # localhost:8080 -> scheme=localhost, remove if we fix this.
294
- if scheme == "localhost":
295
- scheme = None
296
- if scheme is None:
297
- message = "Proxy URL had no scheme, should start with http:// or https://"
298
- else:
299
- message = (
300
- "Proxy URL had unsupported scheme %s, should use http:// or https://"
301
- % scheme
302
- )
303
- super(ProxySchemeUnknown, self).__init__(message)
304
-
305
-
306
- class ProxySchemeUnsupported(ValueError):
307
- """Fetching HTTPS resources through HTTPS proxies is unsupported"""
308
-
309
- pass
310
-
311
-
312
- class HeaderParsingError(HTTPError):
313
- """Raised by assert_header_parsing, but we convert it to a log.warning statement."""
314
-
315
- def __init__(self, defects, unparsed_data):
316
- message = "%s, unparsed data: %r" % (defects or "Unknown", unparsed_data)
317
- super(HeaderParsingError, self).__init__(message)
318
-
319
-
320
- class UnrewindableBodyError(HTTPError):
321
- """urllib3 encountered an error when trying to rewind a body"""
322
-
323
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BramVanroy/mateo-demo/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: MATEO
3
- emoji: 🎈
4
- colorFrom: green
5
- colorTo: green
6
- sdk: docker
7
- app_port: 7860
8
- pinned: false
9
- license: gpl-3.0
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Burcin/ExtractiveSummarizer/app.py DELETED
@@ -1,118 +0,0 @@
1
- import gradio as gr
2
- from gradio.mix import Parallel, Series
3
- import wikipedia
4
- import spacy
5
- from spacy.lang.en.stop_words import STOP_WORDS
6
- from string import punctuation
7
- import nltk
8
- nltk.download('wordnet', quiet=True)
9
- nltk.download('punkt', quiet=True)
10
- from nltk.stem import WordNetLemmatizer
11
- from heapq import nlargest
12
- import warnings
13
- from sklearn.feature_extraction.text import TfidfVectorizer
14
- import numpy as np
15
-
16
- warnings.filterwarnings("ignore")
17
-
18
- def get_wiki_original_text(inp):
19
- text = wikipedia.summary(inp)
20
- return text
21
-
22
-
23
-
24
- def get_wiki_summary_by_lem(inp):
25
- text = wikipedia.summary(inp)
26
-
27
- print(text)
28
-
29
- stopwords = list(STOP_WORDS)
30
-
31
- lemmatizer = WordNetLemmatizer()
32
- tokens = [lemmatizer.lemmatize(str(token).lower()) for token in nltk.word_tokenize(text) if str(token) not in punctuation and str(token).lower() not in stopwords and len(token) >1]
33
- word_counts = {}
34
-
35
- for token in tokens:
36
- if token in word_counts.keys():
37
- word_counts[token] += 1
38
- else:
39
- word_counts[token] = 1
40
-
41
-
42
-
43
- sentence_scores = {}
44
-
45
- for sentence in nltk.sent_tokenize(text):
46
- sentence_scores[sentence] = 0
47
- for wrd in nltk.word_tokenize(sentence):
48
- if lemmatizer.lemmatize(str(wrd).lower()) in word_counts.keys():
49
- sentence_scores[sentence] += word_counts[lemmatizer.lemmatize(str(wrd).lower())]
50
-
51
- summary_length = 0
52
-
53
- if len(sentence_scores) > 5 :
54
- summary_length = int(len(sentence_scores)*0.20)
55
- else:
56
- summary_length = int(len(sentence_scores)*0.50)
57
-
58
- summary = str()
59
-
60
- for sentence in nltk.sent_tokenize(text):
61
- for i in range(0,summary_length):
62
- if str(sentence).find(str(nlargest(summary_length, sentence_scores, key = sentence_scores.get)[i])) == 0:
63
- summary += str(sentence).replace('\n','')
64
- summary += ' '
65
-
66
-
67
- print('\033[1m' + "Summarized Text" + '\033[0m')
68
-
69
- return summary
70
-
71
-
72
- def get_wiki_summary_by_tfidf(inp):
73
- text = wikipedia.summary(inp)
74
-
75
- tfidf_vectorizer = TfidfVectorizer(ngram_range=(1,3))
76
-
77
- all_sentences = [str(sent) for sent in nltk.sent_tokenize(text)]
78
- sentence_vectors = tfidf_vectorizer.fit_transform(all_sentences)
79
-
80
- sentence_scores_vector = np.hstack(np.array(sentence_vectors.sum(axis=1)))
81
-
82
- sentence_scores = dict(zip(all_sentences, sentence_scores_vector))
83
-
84
- summary_length = 0
85
-
86
- if len(sentence_scores) > 5 :
87
- summary_length = int(len(sentence_scores)*0.20)
88
- else:
89
- summary_length = int(len(sentence_scores)*0.50)
90
-
91
- summary = str()
92
-
93
- for sentence in nltk.sent_tokenize(text):
94
- for i in range(0,summary_length):
95
- if str(sentence).find(str(nlargest(summary_length, sentence_scores, key = sentence_scores.get)[i])) == 0:
96
- summary += str(sentence).replace('\n','')
97
- summary += ' '
98
-
99
-
100
- return summary
101
-
102
-
103
-
104
- desc = """This interface allows you to summarize Wikipedia contents. Only requirement is to write the topic and it collects content by fetching from Wikipedia. For summarization this model uses 2 different extractive summarization methods and the number of sentences in the output depends on the length of the original text."""
105
-
106
-
107
- sample = [['Europe'],['Great Depression'],['Crocodile Dundee']]
108
-
109
-
110
- iface = Parallel(gr.Interface(fn=get_wiki_original_text, inputs=gr.inputs.Textbox(label="Text"), outputs="text", description='Original Text'),
111
- gr.Interface(fn=get_wiki_summary_by_lem, inputs=gr.inputs.Textbox(label="Text"), outputs="text", description='Summary 1'),
112
- gr.Interface(fn=get_wiki_summary_by_tfidf, inputs=gr.inputs.Textbox(label="Text"), outputs="text", description='Summary 2'),
113
- title= 'Text Summarizer',
114
- description = desc,
115
- examples=sample,
116
- inputs = gr.inputs.Textbox(label="Text"))
117
-
118
- iface.launch(inline = False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/edge_query.h DELETED
@@ -1,7 +0,0 @@
1
- #pragma once
2
-
3
- struct EdgeQuery {
4
- int shape_group_id;
5
- int shape_id;
6
- bool hit; // Do we hit the specified shape_group_id & shape_id?
7
- };
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/detail/sequential/general_copy.h DELETED
@@ -1,147 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- /*! \file general_copy.h
18
- * \brief Sequential copy algorithms for general iterators.
19
- */
20
-
21
- #pragma once
22
-
23
- #include <thrust/detail/config.h>
24
- #include <thrust/detail/raw_reference_cast.h>
25
- #include <thrust/detail/type_traits.h>
26
-
27
- namespace thrust
28
- {
29
- namespace system
30
- {
31
- namespace detail
32
- {
33
- namespace sequential
34
- {
35
- namespace general_copy_detail
36
- {
37
-
38
-
39
- template<typename T1, typename T2>
40
- struct lazy_is_assignable
41
- : thrust::detail::is_assignable<
42
- typename T1::type,
43
- typename T2::type
44
- >
45
- {};
46
-
47
-
48
- // sometimes OutputIterator's reference type is reported as void
49
- // in that case, just assume that we're able to assign to it OK
50
- template<typename InputIterator, typename OutputIterator>
51
- struct reference_is_assignable
52
- : thrust::detail::eval_if<
53
- thrust::detail::is_same<
54
- typename thrust::iterator_reference<OutputIterator>::type, void
55
- >::value,
56
- thrust::detail::true_type,
57
- lazy_is_assignable<
58
- thrust::iterator_reference<OutputIterator>,
59
- thrust::iterator_reference<InputIterator>
60
- >
61
- >::type
62
- {};
63
-
64
-
65
- // introduce an iterator assign helper to deal with assignments from
66
- // a wrapped reference
67
-
68
- __thrust_exec_check_disable__
69
- template<typename OutputIterator, typename InputIterator>
70
- inline __host__ __device__
71
- typename thrust::detail::enable_if<
72
- reference_is_assignable<InputIterator,OutputIterator>::value
73
- >::type
74
- iter_assign(OutputIterator dst, InputIterator src)
75
- {
76
- *dst = *src;
77
- }
78
-
79
-
80
- __thrust_exec_check_disable__
81
- template<typename OutputIterator, typename InputIterator>
82
- inline __host__ __device__
83
- typename thrust::detail::disable_if<
84
- reference_is_assignable<InputIterator,OutputIterator>::value
85
- >::type
86
- iter_assign(OutputIterator dst, InputIterator src)
87
- {
88
- typedef typename thrust::iterator_value<InputIterator>::type value_type;
89
-
90
- // insert a temporary and hope for the best
91
- *dst = static_cast<value_type>(*src);
92
- }
93
-
94
-
95
- } // end general_copy_detail
96
-
97
-
98
- __thrust_exec_check_disable__
99
- template<typename InputIterator,
100
- typename OutputIterator>
101
- __host__ __device__
102
- OutputIterator general_copy(InputIterator first,
103
- InputIterator last,
104
- OutputIterator result)
105
- {
106
- for(; first != last; ++first, ++result)
107
- {
108
- // gcc 4.2 crashes while instantiating iter_assign
109
- #if (THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_GCC) && (THRUST_GCC_VERSION < 40300)
110
- *result = *first;
111
- #else
112
- general_copy_detail::iter_assign(result, first);
113
- #endif
114
- }
115
-
116
- return result;
117
- } // end general_copy()
118
-
119
-
120
- __thrust_exec_check_disable__
121
- template<typename InputIterator,
122
- typename Size,
123
- typename OutputIterator>
124
- __host__ __device__
125
- OutputIterator general_copy_n(InputIterator first,
126
- Size n,
127
- OutputIterator result)
128
- {
129
- for(; n > Size(0); ++first, ++result, --n)
130
- {
131
- // gcc 4.2 crashes while instantiating iter_assign
132
- #if (THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_GCC) && (THRUST_GCC_VERSION < 40300)
133
- *result = *first;
134
- #else
135
- general_copy_detail::iter_assign(result, first);
136
- #endif
137
- }
138
-
139
- return result;
140
- } // end general_copy_n()
141
-
142
-
143
- } // end namespace sequential
144
- } // end namespace detail
145
- } // end namespace system
146
- } // end namespace thrust
147
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/regionclip-demo/setup.py DELETED
@@ -1,247 +0,0 @@
1
- #!/usr/bin/env python
2
- # Copyright (c) Facebook, Inc. and its affiliates.
3
-
4
- import glob
5
- import os
6
- import shutil
7
- from os import path
8
- from setuptools import find_packages, setup
9
- from typing import List
10
- import torch
11
- from torch.utils.cpp_extension import CUDA_HOME, CppExtension, CUDAExtension
12
- from torch.utils.hipify import hipify_python
13
-
14
- torch_ver = [int(x) for x in torch.__version__.split(".")[:2]]
15
- assert torch_ver >= [1, 6], "Requires PyTorch >= 1.6"
16
-
17
-
18
- def get_version():
19
- init_py_path = path.join(path.abspath(path.dirname(__file__)), "detectron2", "__init__.py")
20
- init_py = open(init_py_path, "r").readlines()
21
- version_line = [l.strip() for l in init_py if l.startswith("__version__")][0]
22
- version = version_line.split("=")[-1].strip().strip("'\"")
23
-
24
- # The following is used to build release packages.
25
- # Users should never use it.
26
- suffix = os.getenv("D2_VERSION_SUFFIX", "")
27
- version = version + suffix
28
- if os.getenv("BUILD_NIGHTLY", "0") == "1":
29
- from datetime import datetime
30
-
31
- date_str = datetime.today().strftime("%y%m%d")
32
- version = version + ".dev" + date_str
33
-
34
- new_init_py = [l for l in init_py if not l.startswith("__version__")]
35
- new_init_py.append('__version__ = "{}"\n'.format(version))
36
- with open(init_py_path, "w") as f:
37
- f.write("".join(new_init_py))
38
- return version
39
-
40
-
41
- def get_extensions():
42
- this_dir = path.dirname(path.abspath(__file__))
43
- extensions_dir = path.join(this_dir, "detectron2", "layers", "csrc")
44
-
45
- main_source = path.join(extensions_dir, "vision.cpp")
46
- sources = glob.glob(path.join(extensions_dir, "**", "*.cpp"))
47
-
48
- from torch.utils.cpp_extension import ROCM_HOME
49
-
50
- is_rocm_pytorch = (
51
- True if ((torch.version.hip is not None) and (ROCM_HOME is not None)) else False
52
- )
53
-
54
- hipify_ver = (
55
- [int(x) for x in torch.utils.hipify.__version__.split(".")]
56
- if hasattr(torch.utils.hipify, "__version__")
57
- else [0, 0, 0]
58
- )
59
-
60
- if is_rocm_pytorch and hipify_ver < [1, 0, 0]: # TODO not needed since pt1.8
61
-
62
- # Earlier versions of hipification and extension modules were not
63
- # transparent, i.e. would require an explicit call to hipify, and the
64
- # hipification would introduce "hip" subdirectories, possibly changing
65
- # the relationship between source and header files.
66
- # This path is maintained for backwards compatibility.
67
-
68
- hipify_python.hipify(
69
- project_directory=this_dir,
70
- output_directory=this_dir,
71
- includes="/detectron2/layers/csrc/*",
72
- show_detailed=True,
73
- is_pytorch_extension=True,
74
- )
75
-
76
- source_cuda = glob.glob(path.join(extensions_dir, "**", "hip", "*.hip")) + glob.glob(
77
- path.join(extensions_dir, "hip", "*.hip")
78
- )
79
-
80
- shutil.copy(
81
- "detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_utils.h",
82
- "detectron2/layers/csrc/box_iou_rotated/hip/box_iou_rotated_utils.h",
83
- )
84
- shutil.copy(
85
- "detectron2/layers/csrc/deformable/deform_conv.h",
86
- "detectron2/layers/csrc/deformable/hip/deform_conv.h",
87
- )
88
-
89
- sources = [main_source] + sources
90
- sources = [
91
- s
92
- for s in sources
93
- if not is_rocm_pytorch or torch_ver < [1, 7] or not s.endswith("hip/vision.cpp")
94
- ]
95
-
96
- else:
97
-
98
- # common code between cuda and rocm platforms,
99
- # for hipify version [1,0,0] and later.
100
-
101
- source_cuda = glob.glob(path.join(extensions_dir, "**", "*.cu")) + glob.glob(
102
- path.join(extensions_dir, "*.cu")
103
- )
104
-
105
- sources = [main_source] + sources
106
-
107
- extension = CppExtension
108
-
109
- extra_compile_args = {"cxx": []}
110
- define_macros = []
111
-
112
- if (torch.cuda.is_available() and ((CUDA_HOME is not None) or is_rocm_pytorch)) or os.getenv(
113
- "FORCE_CUDA", "0"
114
- ) == "1":
115
- extension = CUDAExtension
116
- sources += source_cuda
117
-
118
- if not is_rocm_pytorch:
119
- define_macros += [("WITH_CUDA", None)]
120
- extra_compile_args["nvcc"] = [
121
- "-O3",
122
- "-DCUDA_HAS_FP16=1",
123
- "-D__CUDA_NO_HALF_OPERATORS__",
124
- "-D__CUDA_NO_HALF_CONVERSIONS__",
125
- "-D__CUDA_NO_HALF2_OPERATORS__",
126
- ]
127
- else:
128
- define_macros += [("WITH_HIP", None)]
129
- extra_compile_args["nvcc"] = []
130
-
131
- if torch_ver < [1, 7]:
132
- # supported by https://github.com/pytorch/pytorch/pull/43931
133
- CC = os.environ.get("CC", None)
134
- if CC is not None:
135
- extra_compile_args["nvcc"].append("-ccbin={}".format(CC))
136
-
137
- include_dirs = [extensions_dir]
138
-
139
- ext_modules = [
140
- extension(
141
- "detectron2._C",
142
- sources,
143
- include_dirs=include_dirs,
144
- define_macros=define_macros,
145
- extra_compile_args=extra_compile_args,
146
- )
147
- ]
148
-
149
- return ext_modules
150
-
151
-
152
- def get_model_zoo_configs() -> List[str]:
153
- """
154
- Return a list of configs to include in package for model zoo. Copy over these configs inside
155
- detectron2/model_zoo.
156
- """
157
-
158
- # Use absolute paths while symlinking.
159
- source_configs_dir = path.join(path.dirname(path.realpath(__file__)), "configs")
160
- destination = path.join(
161
- path.dirname(path.realpath(__file__)), "detectron2", "model_zoo", "configs"
162
- )
163
- # Symlink the config directory inside package to have a cleaner pip install.
164
-
165
- # Remove stale symlink/directory from a previous build.
166
- if path.exists(source_configs_dir):
167
- if path.islink(destination):
168
- os.unlink(destination)
169
- elif path.isdir(destination):
170
- shutil.rmtree(destination)
171
-
172
- if not path.exists(destination):
173
- try:
174
- os.symlink(source_configs_dir, destination)
175
- except OSError:
176
- # Fall back to copying if symlink fails: ex. on Windows.
177
- shutil.copytree(source_configs_dir, destination)
178
-
179
- config_paths = glob.glob("configs/**/*.yaml", recursive=True) + glob.glob(
180
- "configs/**/*.py", recursive=True
181
- )
182
- return config_paths
183
-
184
-
185
- # For projects that are relative small and provide features that are very close
186
- # to detectron2's core functionalities, we install them under detectron2.projects
187
- PROJECTS = {
188
- # "detectron2.projects.point_rend": "projects/PointRend/point_rend",
189
- # "detectron2.projects.deeplab": "projects/DeepLab/deeplab",
190
- # "detectron2.projects.panoptic_deeplab": "projects/Panoptic-DeepLab/panoptic_deeplab",
191
- }
192
-
193
- setup(
194
- name="detectron2",
195
- version=get_version(),
196
- author="FAIR",
197
- url="https://github.com/facebookresearch/detectron2",
198
- description="Detectron2 is FAIR's next-generation research "
199
- "platform for object detection and segmentation.",
200
- packages=find_packages(exclude=("configs", "tests*")) + list(PROJECTS.keys()),
201
- package_dir=PROJECTS,
202
- package_data={"detectron2.model_zoo": get_model_zoo_configs()},
203
- python_requires=">=3.6",
204
- install_requires=[
205
- # Do not add opencv here. Just like pytorch, user should install
206
- # opencv themselves, preferrably by OS's package manager, or by
207
- # choosing the proper pypi package name at https://github.com/skvark/opencv-python
208
- "termcolor>=1.1",
209
- "Pillow>=7.1", # or use pillow-simd for better performance
210
- "yacs>=0.1.6",
211
- "tabulate",
212
- "cloudpickle",
213
- "matplotlib",
214
- "tqdm>4.29.0",
215
- "tensorboard",
216
- # Lock version of fvcore/iopath because they may have breaking changes
217
- # NOTE: when updating fvcore/iopath version, make sure fvcore depends
218
- # on compatible version of iopath.
219
- "fvcore>=0.1.5,<0.1.6", # required like this to make it pip installable
220
- "iopath>=0.1.7,<0.1.9",
221
- "pycocotools>=2.0.2", # corresponds to https://github.com/ppwwyyxx/cocoapi
222
- "future", # used by caffe2
223
- "pydot", # used to save caffe2 SVGs
224
- "dataclasses; python_version<'3.7'",
225
- "omegaconf>=2.1.0rc1",
226
- "hydra-core>=1.1.0rc1",
227
- "black==21.4b2",
228
- # When adding to the list, may need to update docs/requirements.txt
229
- # or add mock in docs/conf.py
230
- ],
231
- extras_require={
232
- "all": [
233
- "shapely",
234
- "pygments>=2.2",
235
- "psutil",
236
- "panopticapi @ https://github.com/cocodataset/panopticapi/archive/master.zip",
237
- ],
238
- "dev": [
239
- "flake8==3.8.1",
240
- "isort==4.3.21",
241
- "flake8-bugbear",
242
- "flake8-comprehensions",
243
- ],
244
- },
245
- ext_modules=get_extensions(),
246
- cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension},
247
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Chintan-Donda/KKMS-KSSW-HF/src/kkms_kssw.py DELETED
@@ -1,77 +0,0 @@
1
- import os
2
-
3
- import src.constants as constants_utils
4
- import src.langchain_utils as langchain_utils
5
- import src.weather as weather_utils
6
- import src.mandi_price as mandi_utils
7
- import src.translator as translator_utils
8
- import src.web_crawler as web_crawler_utils
9
-
10
- import logging
11
- logger = logging.getLogger(__name__)
12
- logging.basicConfig(
13
- format="%(asctime)s %(levelname)s [%(name)s] %(message)s", level=logging.INFO, datefmt="%Y-%m-%d %H:%M:%S"
14
- )
15
-
16
- import warnings
17
- warnings.filterwarnings('ignore')
18
-
19
-
20
-
21
- class KKMS_KSSW:
22
- def __init__(self):
23
- self.index_type = constants_utils.INDEX_TYPE
24
- self.load_from_existing_index_store = constants_utils.LOAD_FROM_EXISTING_INDEX_STORE
25
-
26
- # Instantiate langchain_utils class object
27
- self.langchain_utils_obj = langchain_utils.LANGCHAIN_UTILS(
28
- index_type=self.index_type,
29
- load_from_existing_index_store=self.load_from_existing_index_store
30
- )
31
- # Instantiate Mandi Price utils class object
32
- self.mandi_utils_obj = mandi_utils.MANDI_PRICE()
33
- # Instantiate Weather class object
34
- self.weather_utils_obj = weather_utils.WEATHER()
35
- # Instantiate translator_utils class object
36
- self.translator_utils_obj = translator_utils.TRANSLATOR()
37
-
38
-
39
-
40
- # Initialize index (vector store)
41
- def load_create_index(self):
42
- logger.info(f"Load/Create index")
43
- self.langchain_utils_obj.load_create_index()
44
-
45
-
46
- # Upload data and update the index
47
- def upload_data(
48
- self,
49
- doc_type,
50
- files_or_urls,
51
- index_category
52
- ):
53
- logger.info(f"Uploading data")
54
- self.langchain_utils_obj.upload_data(
55
- doc_type=doc_type,
56
- files_or_urls=files_or_urls,
57
- index_category=index_category
58
- )
59
-
60
-
61
- # Define query on index to retrieve the most relevant top K documents from the vector store
62
- def query(
63
- self,
64
- question,
65
- question_category
66
- ):
67
- '''
68
- Args:
69
- mode: can be any of [default, embedding]
70
- response_mode: can be any of [default, compact, tree_summarize]
71
- '''
72
- logger.info(f"Querying from index/vector store")
73
-
74
- return self.langchain_utils_obj.query(
75
- question=question,
76
- question_category=question_category
77
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChrisPreston/diff-svc_minato_aqua/modules/diff/diffusion.py DELETED
@@ -1,312 +0,0 @@
1
- from collections import deque
2
- from functools import partial
3
- from inspect import isfunction
4
-
5
- import numpy as np
6
- import torch
7
- import torch.nn.functional as F
8
- from torch import nn
9
- from tqdm import tqdm
10
-
11
- from modules.encoder import SvcEncoder
12
- from training.train_pipeline import Batch2Loss
13
- from utils.hparams import hparams
14
-
15
-
16
- def exists(x):
17
- return x is not None
18
-
19
-
20
- def default(val, d):
21
- if exists(val):
22
- return val
23
- return d() if isfunction(d) else d
24
-
25
-
26
- # gaussian diffusion trainer class
27
-
28
- def extract(a, t, x_shape):
29
- b, *_ = t.shape
30
- out = a.gather(-1, t)
31
- return out.reshape(b, *((1,) * (len(x_shape) - 1)))
32
-
33
-
34
- def noise_like(shape, device, repeat=False):
35
- repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))
36
- noise = lambda: torch.randn(shape, device=device)
37
- return repeat_noise() if repeat else noise()
38
-
39
-
40
- def linear_beta_schedule(timesteps, max_beta=hparams.get('max_beta', 0.01)):
41
- """
42
- linear schedule
43
- """
44
- betas = np.linspace(1e-4, max_beta, timesteps)
45
- return betas
46
-
47
-
48
- def cosine_beta_schedule(timesteps, s=0.008):
49
- """
50
- cosine schedule
51
- as proposed in https://openreview.net/forum?id=-NEXDKk8gZ
52
- """
53
- steps = timesteps + 1
54
- x = np.linspace(0, steps, steps)
55
- alphas_cumprod = np.cos(((x / steps) + s) / (1 + s) * np.pi * 0.5) ** 2
56
- alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
57
- betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
58
- return np.clip(betas, a_min=0, a_max=0.999)
59
-
60
-
61
- beta_schedule = {
62
- "cosine": cosine_beta_schedule,
63
- "linear": linear_beta_schedule,
64
- }
65
-
66
-
67
- class GaussianDiffusion(nn.Module):
68
- def __init__(self, phone_encoder, out_dims, denoise_fn,
69
- timesteps=1000, K_step=1000, loss_type=hparams.get('diff_loss_type', 'l1'), betas=None, spec_min=None,
70
- spec_max=None):
71
- super().__init__()
72
- self.denoise_fn = denoise_fn
73
- self.fs2 = SvcEncoder(phone_encoder, out_dims)
74
- self.mel_bins = out_dims
75
-
76
- if exists(betas):
77
- betas = betas.detach().cpu().numpy() if isinstance(betas, torch.Tensor) else betas
78
- else:
79
- if 'schedule_type' in hparams.keys():
80
- betas = beta_schedule[hparams['schedule_type']](timesteps)
81
- else:
82
- betas = cosine_beta_schedule(timesteps)
83
-
84
- alphas = 1. - betas
85
- alphas_cumprod = np.cumprod(alphas, axis=0)
86
- alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
87
-
88
- timesteps, = betas.shape
89
- self.num_timesteps = int(timesteps)
90
- self.K_step = K_step
91
- self.loss_type = loss_type
92
-
93
- self.noise_list = deque(maxlen=4)
94
-
95
- to_torch = partial(torch.tensor, dtype=torch.float32)
96
-
97
- self.register_buffer('betas', to_torch(betas))
98
- self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
99
- self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev))
100
-
101
- # calculations for diffusion q(x_t | x_{t-1}) and others
102
- self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
103
- self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
104
- self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod)))
105
- self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))
106
- self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))
107
-
108
- # calculations for posterior q(x_{t-1} | x_t, x_0)
109
- posterior_variance = betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod)
110
- # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
111
- self.register_buffer('posterior_variance', to_torch(posterior_variance))
112
- # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
113
- self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20))))
114
- self.register_buffer('posterior_mean_coef1', to_torch(
115
- betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)))
116
- self.register_buffer('posterior_mean_coef2', to_torch(
117
- (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod)))
118
-
119
- self.register_buffer('spec_min', torch.FloatTensor(spec_min)[None, None, :hparams['keep_bins']])
120
- self.register_buffer('spec_max', torch.FloatTensor(spec_max)[None, None, :hparams['keep_bins']])
121
-
122
- def q_mean_variance(self, x_start, t):
123
- mean = extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
124
- variance = extract(1. - self.alphas_cumprod, t, x_start.shape)
125
- log_variance = extract(self.log_one_minus_alphas_cumprod, t, x_start.shape)
126
- return mean, variance, log_variance
127
-
128
- def predict_start_from_noise(self, x_t, t, noise):
129
- return (
130
- extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
131
- extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
132
- )
133
-
134
- def q_posterior(self, x_start, x_t, t):
135
- posterior_mean = (
136
- extract(self.posterior_mean_coef1, t, x_t.shape) * x_start +
137
- extract(self.posterior_mean_coef2, t, x_t.shape) * x_t
138
- )
139
- posterior_variance = extract(self.posterior_variance, t, x_t.shape)
140
- posterior_log_variance_clipped = extract(self.posterior_log_variance_clipped, t, x_t.shape)
141
- return posterior_mean, posterior_variance, posterior_log_variance_clipped
142
-
143
- def p_mean_variance(self, x, t, cond, clip_denoised: bool):
144
- noise_pred = self.denoise_fn(x, t, cond=cond)
145
- x_recon = self.predict_start_from_noise(x, t=t, noise=noise_pred)
146
-
147
- if clip_denoised:
148
- x_recon.clamp_(-1., 1.)
149
-
150
- model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
151
- return model_mean, posterior_variance, posterior_log_variance
152
-
153
- @torch.no_grad()
154
- def p_sample(self, x, t, cond, clip_denoised=True, repeat_noise=False):
155
- b, *_, device = *x.shape, x.device
156
- model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, cond=cond, clip_denoised=clip_denoised)
157
- noise = noise_like(x.shape, device, repeat_noise)
158
- # no noise when t == 0
159
- nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
160
- return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
161
-
162
- @torch.no_grad()
163
- def p_sample_plms(self, x, t, interval, cond, clip_denoised=True, repeat_noise=False):
164
- """
165
- Use the PLMS method from [Pseudo Numerical Methods for Diffusion Models on Manifolds](https://arxiv.org/abs/2202.09778).
166
- """
167
-
168
- def get_x_pred(x, noise_t, t):
169
- a_t = extract(self.alphas_cumprod, t, x.shape)
170
- a_prev = extract(self.alphas_cumprod, torch.max(t - interval, torch.zeros_like(t)), x.shape)
171
- a_t_sq, a_prev_sq = a_t.sqrt(), a_prev.sqrt()
172
-
173
- x_delta = (a_prev - a_t) * ((1 / (a_t_sq * (a_t_sq + a_prev_sq))) * x - 1 / (
174
- a_t_sq * (((1 - a_prev) * a_t).sqrt() + ((1 - a_t) * a_prev).sqrt())) * noise_t)
175
- x_pred = x + x_delta
176
-
177
- return x_pred
178
-
179
- noise_list = self.noise_list
180
- noise_pred = self.denoise_fn(x, t, cond=cond)
181
-
182
- if len(noise_list) == 0:
183
- x_pred = get_x_pred(x, noise_pred, t)
184
- noise_pred_prev = self.denoise_fn(x_pred, max(t - interval, 0), cond=cond)
185
- noise_pred_prime = (noise_pred + noise_pred_prev) / 2
186
- elif len(noise_list) == 1:
187
- noise_pred_prime = (3 * noise_pred - noise_list[-1]) / 2
188
- elif len(noise_list) == 2:
189
- noise_pred_prime = (23 * noise_pred - 16 * noise_list[-1] + 5 * noise_list[-2]) / 12
190
- elif len(noise_list) >= 3:
191
- noise_pred_prime = (55 * noise_pred - 59 * noise_list[-1] + 37 * noise_list[-2] - 9 * noise_list[-3]) / 24
192
-
193
- x_prev = get_x_pred(x, noise_pred_prime, t)
194
- noise_list.append(noise_pred)
195
-
196
- return x_prev
197
-
198
- def q_sample(self, x_start, t, noise=None):
199
- noise = default(noise, lambda: torch.randn_like(x_start))
200
- return (
201
- extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
202
- extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise
203
- )
204
-
205
- def p_losses(self, x_start, t, cond, noise=None, nonpadding=None):
206
- noise = default(noise, lambda: torch.randn_like(x_start))
207
-
208
- x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
209
- x_recon = self.denoise_fn(x_noisy, t, cond)
210
-
211
- if self.loss_type == 'l1':
212
- if nonpadding is not None:
213
- loss = ((noise - x_recon).abs() * nonpadding.unsqueeze(1)).mean()
214
- else:
215
- # print('are you sure w/o nonpadding?')
216
- loss = (noise - x_recon).abs().mean()
217
-
218
- elif self.loss_type == 'l2':
219
- loss = F.mse_loss(noise, x_recon)
220
- else:
221
- raise NotImplementedError()
222
-
223
- return loss
224
-
225
- def forward(self, hubert, mel2ph=None, spk_embed=None,
226
- ref_mels=None, f0=None, uv=None, energy=None, infer=False, **kwargs):
227
- '''
228
- conditioning diffusion, use fastspeech2 encoder output as the condition
229
- '''
230
- ret = self.fs2(hubert, mel2ph, spk_embed, None, f0, uv, energy,
231
- skip_decoder=True, infer=infer, **kwargs)
232
- cond = ret['decoder_inp'].transpose(1, 2)
233
- b, *_, device = *hubert.shape, hubert.device
234
-
235
- if not infer:
236
- Batch2Loss.module4(
237
- self.p_losses,
238
- self.norm_spec(ref_mels), cond, ret, self.K_step, b, device
239
- )
240
- else:
241
- if 'use_gt_mel' in kwargs.keys() and kwargs['use_gt_mel']:
242
- t = kwargs['add_noise_step']
243
- print('===>using ground truth mel as start, please make sure parameter "key==0" !')
244
- fs2_mels = ref_mels
245
- fs2_mels = self.norm_spec(fs2_mels)
246
- fs2_mels = fs2_mels.transpose(1, 2)[:, None, :, :]
247
- x = self.q_sample(x_start=fs2_mels, t=torch.tensor([t - 1], device=device).long())
248
- else:
249
- t = self.K_step
250
- shape = (cond.shape[0], 1, self.mel_bins, cond.shape[2])
251
- x = torch.randn(shape, device=device)
252
- if hparams.get('pndm_speedup') and hparams['pndm_speedup'] > 1:
253
- self.noise_list = deque(maxlen=4)
254
- iteration_interval = hparams['pndm_speedup']
255
- for i in tqdm(reversed(range(0, t, iteration_interval)), desc='sample time step',
256
- total=t // iteration_interval):
257
- x = self.p_sample_plms(x, torch.full((b,), i, device=device, dtype=torch.long), iteration_interval,
258
- cond)
259
- else:
260
- for i in tqdm(reversed(range(0, t)), desc='sample time step', total=t):
261
- x = self.p_sample(x, torch.full((b,), i, device=device, dtype=torch.long), cond)
262
- x = x[:, 0].transpose(1, 2)
263
- if mel2ph is not None: # for singing
264
- ret['mel_out'] = self.denorm_spec(x) * ((mel2ph > 0).float()[:, :, None])
265
- else:
266
- ret['mel_out'] = self.denorm_spec(x)
267
- return ret
268
-
269
- def norm_spec(self, x):
270
- return (x - self.spec_min) / (self.spec_max - self.spec_min) * 2 - 1
271
-
272
- def denorm_spec(self, x):
273
- return (x + 1) / 2 * (self.spec_max - self.spec_min) + self.spec_min
274
-
275
- def out2mel(self, x):
276
- return x
277
-
278
-
279
- class OfflineGaussianDiffusion(GaussianDiffusion):
280
- def forward(self, txt_tokens, mel2ph=None, spk_embed=None,
281
- ref_mels=None, f0=None, uv=None, energy=None, infer=False, **kwargs):
282
- b, *_, device = *txt_tokens.shape, txt_tokens.device
283
-
284
- ret = self.fs2(txt_tokens, mel2ph, spk_embed, ref_mels, f0, uv, energy,
285
- skip_decoder=True, infer=True, **kwargs)
286
- cond = ret['decoder_inp'].transpose(1, 2)
287
- fs2_mels = ref_mels[1]
288
- ref_mels = ref_mels[0]
289
-
290
- if not infer:
291
- t = torch.randint(0, self.K_step, (b,), device=device).long()
292
- x = ref_mels
293
- x = self.norm_spec(x)
294
- x = x.transpose(1, 2)[:, None, :, :] # [B, 1, M, T]
295
- ret['diff_loss'] = self.p_losses(x, t, cond)
296
- else:
297
- t = self.K_step
298
- fs2_mels = self.norm_spec(fs2_mels)
299
- fs2_mels = fs2_mels.transpose(1, 2)[:, None, :, :]
300
-
301
- x = self.q_sample(x_start=fs2_mels, t=torch.tensor([t - 1], device=device).long())
302
-
303
- if hparams.get('gaussian_start') is not None and hparams['gaussian_start']:
304
- print('===> gaussion start.')
305
- shape = (cond.shape[0], 1, self.mel_bins, cond.shape[2])
306
- x = torch.randn(shape, device=device)
307
- for i in tqdm(reversed(range(0, t)), desc='sample time step', total=t):
308
- x = self.p_sample(x, torch.full((b,), i, device=device, dtype=torch.long), cond)
309
- x = x[:, 0].transpose(1, 2)
310
- ret['mel_out'] = self.denorm_spec(x)
311
-
312
- return ret