Commit
·
1d7c168
1
Parent(s):
e6f326b
Update parquet files (step 80 of 397)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/101-5/gpt4free/g4f/.v1/testing/deepai_test.py +0 -18
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Apowersoft Video Converter Studio 4.9.1 Crack ((EXCLUSIVE)).md +0 -158
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cubase 8 Free Download Full Version Crack Windows 10 Is It Worth It?.md +0 -21
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cyberghost Vpn Serial Giveaway.md +0 -33
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Fizikos Uzdavinynas 10 Kl Pdf 37 _VERIFIED_.md +0 -22
- spaces/1gistliPinn/ChatGPT4/Examples/Antenna Web Design Studio 6.57 REPACK Crack.md +0 -96
- spaces/1gistliPinn/ChatGPT4/Examples/Folder Marker Pro 4.0 UPDATED Crack.md +0 -6
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Apex Racing How to Race and Drift with Unlimited Money MOD.md +0 -117
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Build and Battle with Your Favorite Cookies in Cookie Run Kingdom Online.md +0 -93
- spaces/1phancelerku/anime-remove-background/ARK Survival Evolved APK - The Best Mobile Game of 2023 - Download Here.md +0 -117
- spaces/1phancelerku/anime-remove-background/Download 3D Interior Design Software and Tools for Professional Results.md +0 -108
- spaces/2023Liu2023/bingo/src/lib/bots/bing/types.ts +0 -259
- spaces/2kaara/oreo/Dockerfile +0 -21
- spaces/AIConsultant/MusicGen/audiocraft/data/__init__.py +0 -10
- spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-210e_deepfashion2_sling_dress_256x192/td_hm_res50_4xb64-210e_deepfashion2_sling_dress_256x192.py +0 -2861
- spaces/AchyuthGamer/OpenGPT-Chat-UI/.svelte-kit/types/src/routes/search/[id]/$types.d.ts +0 -9
- spaces/AkshayKumarP/AI-ChatBot/app.py +0 -47
- spaces/AnandSoni2001/StockMarketPrediction/README.md +0 -12
- spaces/Andy1621/uniformer_image_detection/tools/deployment/mmdet2torchserve.py +0 -107
- spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_d6_r101-d16_769x769_40k_cityscapes.py +0 -2
- spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/hooks/ema.py +0 -89
- spaces/Anustup/NS_AI_LABS/src/download.py +0 -72
- spaces/ArkanDash/rvc-models-new/lib/infer_pack/transforms.py +0 -209
- spaces/AutoGeneralAI/ChatGPT/README_cn.md +0 -12
- spaces/Bart92/RVC_HF/infer/modules/train/extract_feature_print.py +0 -137
- spaces/Benson/text-generation/Examples/Como Hacer Una Hoja De Papel.md +0 -83
- spaces/Benson/text-generation/Examples/Descargar Clash Royale En El Ordenador.md +0 -116
- spaces/BernardoOlisan/vqganclip/taming-transformers/taming/data/imagenet.py +0 -558
- spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/model.py +0 -946
- spaces/BilalSardar/Gpt4All/app.py +0 -18
- spaces/Branon/TurboKeys/README.md +0 -11
- spaces/Brasd99/JustClothify/app.py +0 -45
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/csrc/ROIAlign/ROIAlign_cpu.cpp +0 -503
- spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/for_each.h +0 -109
- spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/generate.h +0 -23
- spaces/CVPR/regionclip-demo/datasets/prepare_cocofied_lvis.py +0 -176
- spaces/Chirag4579/prakalpa-image-comparator/app.py +0 -87
- spaces/CognitiveAIForHealth/README/README.md +0 -143
- spaces/Datasculptor/MusicGen/app.py +0 -407
- spaces/DragGan/DragGan-Inversion/stylegan_human/pti/pti_models/e4e/stylegan2/op/fused_bias_act.cpp +0 -21
- spaces/DragGan/DragGan-Inversion/stylegan_human/utils/__init__.py +0 -0
- spaces/Duskfallcrew/lambdalabs-sd-pokemon-diffusers/app.py +0 -3
- spaces/ECCV2022/PSG/utils.py +0 -300
- spaces/ECCV2022/bytetrack/tutorials/centertrack/mot_online/kalman_filter.py +0 -269
- spaces/ECCV2022/bytetrack/yolox/deepsort_tracker/iou_matching.py +0 -76
- spaces/ECCV2022/bytetrack/yolox/models/__init__.py +0 -10
- spaces/ECCV2022/bytetrack/yolox/models/losses.py +0 -81
- spaces/Egrt/LicenseGAN/plate.py +0 -47
- spaces/EuroPython2022/mmocr-demo/configs/_base_/recog_datasets/ST_SA_MJ_real_train.py +0 -81
- spaces/EveryPizza/stabilityai-stable-diffusion-2/README.md +0 -12
spaces/101-5/gpt4free/g4f/.v1/testing/deepai_test.py
DELETED
@@ -1,18 +0,0 @@
|
|
1 |
-
from gpt4free import deepai
|
2 |
-
|
3 |
-
#single completion
|
4 |
-
for chunk in deepai.Completion.create("Write a list of possible vacation destinations:"):
|
5 |
-
print(chunk, end="", flush=True)
|
6 |
-
print()
|
7 |
-
|
8 |
-
#chat completion
|
9 |
-
print("==============")
|
10 |
-
messages = [ #taken from the openai docs
|
11 |
-
{"role": "system", "content": "You are a helpful assistant."},
|
12 |
-
{"role": "user", "content": "Who won the world series in 2020?"},
|
13 |
-
{"role": "assistant", "content": "The Los Angeles Dodgers won the World Series in 2020."},
|
14 |
-
{"role": "user", "content": "Where was it played?"}
|
15 |
-
]
|
16 |
-
for chunk in deepai.ChatCompletion.create(messages):
|
17 |
-
print(chunk, end="", flush=True)
|
18 |
-
print()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Apowersoft Video Converter Studio 4.9.1 Crack ((EXCLUSIVE)).md
DELETED
@@ -1,158 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Apowersoft Video Converter Studio 4.9.1 Crack: A Powerful and Easy-to-Use Video Converter</h1>
|
3 |
-
<p>If you are looking for a simple yet effective way to convert your video and audio files to various formats, you may want to try Apowersoft Video Converter Studio 4.9.1 Crack. This is a cracked version of a popular video converter software that can help you convert, edit, and enhance your media files with ease.</p>
|
4 |
-
<p>In this article, we will show you what Apowersoft Video Converter Studio is, how to download and install it, how to use it, and what are its advantages and disadvantages. By the end of this article, you will have a clear idea of whether this software is suitable for your needs or not.</p>
|
5 |
-
<h2>Apowersoft Video Converter Studio 4.9.1 Crack</h2><br /><p><b><b>Download File</b> › <a href="https://byltly.com/2uKxST">https://byltly.com/2uKxST</a></b></p><br /><br />
|
6 |
-
<h2>What is Apowersoft Video Converter Studio?</h2>
|
7 |
-
<h3>A brief introduction to the software and its features</h3>
|
8 |
-
<p>Apowersoft Video Converter Studio is a powerful video converter program that can convert all types of video and audio formats, such as AVI, MP4, FLV, MKV, MP3, WAV, etc. It can also prepare your media files for various devices and platforms, such as YouTube, DVD, iPhone, Android, etc.</p>
|
9 |
-
<p>But that's not all. Apowersoft Video Converter Studio also has a built-in video editor that allows you to trim, crop, rotate, add effects, subtitles, watermarks, etc. You can also adjust the video parameters, such as resolution, bitrate, frame rate, etc.</p>
|
10 |
-
<p>Moreover, Apowersoft Video Converter Studio has a useful subtitle editor that lets you load external subtitles and edit them freely. You can change the font size, color, position, etc. You can also merge multiple videos into one file with this software.</p>
|
11 |
-
<h3>The benefits of using Apowersoft Video Converter Studio</h3>
|
12 |
-
<p>There are many benefits of using Apowersoft Video Converter Studio for your video conversion needs. Here are some of them:</p>
|
13 |
-
<p>Apowersoft Video Converter Studio 4.9.1 full version download<br />
|
14 |
-
How to crack Apowersoft Video Converter Studio 4.9.1<br />
|
15 |
-
Apowersoft Video Converter Studio 4.9.1 license key generator<br />
|
16 |
-
Apowersoft Video Converter Studio 4.9.1 serial number free<br />
|
17 |
-
Apowersoft Video Converter Studio 4.9.1 activation code<br />
|
18 |
-
Apowersoft Video Converter Studio 4.9.1 patch download<br />
|
19 |
-
Apowersoft Video Converter Studio 4.9.1 keygen torrent<br />
|
20 |
-
Apowersoft Video Converter Studio 4.9.1 registration code<br />
|
21 |
-
Apowersoft Video Converter Studio 4.9.1 cracked for windows<br />
|
22 |
-
Apowersoft Video Converter Studio 4.9.1 cracked for mac<br />
|
23 |
-
Apowersoft Video Converter Studio 4.9.1 portable version<br />
|
24 |
-
Apowersoft Video Converter Studio 4.9.1 review and features<br />
|
25 |
-
Apowersoft Video Converter Studio 4.9.1 alternative software<br />
|
26 |
-
Apowersoft Video Converter Studio 4.9.1 vs Wondershare UniConverter<br />
|
27 |
-
Apowersoft Video Converter Studio 4.9.1 vs Movavi Video Converter<br />
|
28 |
-
Apowersoft Video Converter Studio 4.9.1 vs Freemake Video Converter<br />
|
29 |
-
Apowersoft Video Converter Studio 4.9.1 vs Any Video Converter<br />
|
30 |
-
Apowersoft Video Converter Studio 4.9.1 vs HandBrake<br />
|
31 |
-
Apowersoft Video Converter Studio 4.9.1 vs Format Factory<br />
|
32 |
-
Apowersoft Video Converter Studio 4.9.1 vs VLC Media Player<br />
|
33 |
-
Apowersoft Video Converter Studio 4.9.1 supported formats and devices<br />
|
34 |
-
Apowersoft Video Converter Studio 4.9.1 user guide and tutorial<br />
|
35 |
-
Apowersoft Video Converter Studio 4.9.1 system requirements and compatibility<br />
|
36 |
-
Apowersoft Video Converter Studio 4.9.1 pros and cons<br />
|
37 |
-
Apowersoft Video Converter Studio 4.9.1 price and discount coupon<br />
|
38 |
-
How to install and uninstall Apowersoft Video Converter Studio 4.9.1<br />
|
39 |
-
How to update and upgrade Apowersoft Video Converter Studio 4.9.1<br />
|
40 |
-
How to convert videos with Apowersoft Video Converter Studio 4.9.1<br />
|
41 |
-
How to edit videos with Apowersoft Video Converter Studio 4.9.1<br />
|
42 |
-
How to download online videos with Apowersoft Video Converter Studio 4.9.1<br />
|
43 |
-
How to record screen with Apowersoft Video Converter Studio 4.9.1<br />
|
44 |
-
How to burn DVD with Apowersoft Video Converter Studio 4.9.1<br />
|
45 |
-
How to merge videos with Apowersoft Video Converter Studio 4.9.1<br />
|
46 |
-
How to crop videos with Apowersoft Video Converter Studio 4.9.1<br />
|
47 |
-
How to rotate videos with Apowersoft Video Converter Studio 4.9.1<br />
|
48 |
-
How to add watermark to videos with Apowersoft Video Converter Studio 4.9.1<br />
|
49 |
-
How to add subtitles to videos with Apowersoft Video Converter Studio 4.9.1<br />
|
50 |
-
How to extract audio from videos with Apowersoft Video Converter Studio 4.9.</p>
|
51 |
-
<ul>
|
52 |
-
<li>It can preserve 100% video quality as the original video file.</li>
|
53 |
-
<li>It can convert video and audio files at a fast speed.</li>
|
54 |
-
<li>It can support multiple languages for subtitles.</li>
|
55 |
-
<li>It has a simple and intuitive user interface that is easy to use.</li>
|
56 |
-
<li>It has a wide range of output formats and presets for different devices.</li>
|
57 |
-
</ul>
|
58 |
-
<h2>How to Download and Install Apowersoft Video Converter Studio 4.9.1 Crack?</h2>
|
59 |
-
<h3>The steps to download the software from a reliable source</h3>
|
60 |
-
<p>If you want to download Apowersoft Video Converter Studio 4.9.1 Crack for free, you need to find a reliable source that offers the cracked version of the software. There are many websites that claim to provide this software for free, but some of them may contain viruses or malware that can harm your computer.</p>
|
61 |
-
<p>One of the websites that we recommend is SadeemPC.com, which provides cracked software, games, nulled scripts, WordPress themes and plugins for free. You can download Apowersoft Video Converter Studio 4.9.1 Crack from this website by following these steps:</p>
|
62 |
-
<ol>
|
63 |
-
<li>Go to <a href="https://www.sadeempc.com/apowersoft-video-converter-studio-crack/">https://www.sadeempc.com/apowersoft-video-converter-studio-crack/</a></li>
|
64 |
-
<li>Scroll down until you see the download links section.</li>
|
65 |
-
<li>Click on one of the download links (preferably Download Now) and wait for a few seconds.</li>
|
66 |
-
<li>You will be redirected to another page where you need to click on another download link (preferably Download Via Torrent).</li>
|
67 |
-
<li>You will be redirected again to another page where you need to click on another download link (preferably Download From UploadRAR).</li>
|
68 |
-
<li>You will be redirected again to another page where you need to click on another download link (preferably Free Download).</li>
|
69 |
-
<li>You will be redirected again to another page where you need to click on another download link (preferably Create Download Link).</li>
|
70 |
-
<li>You will be redirected again to another page where you need to click on another download link (preferably Click Here To Download).</li>
|
71 |
-
<li>A pop-up window will appear where you need to click on Save File.</li>
|
72 |
-
<li>The file will be downloaded to your computer.</li>
|
73 |
-
</ol>
|
74 |
-
<h3>The steps to install the software and activate it with a serial key</h3>
|
75 |
-
<p>After downloading the file from SadeemPC.com, you need to install it on your computer by following these steps:</p>
|
76 |
-
<ol>
|
77 |
-
<li>Extract the file using WinRAR or any other file extractor.</li>
|
78 |
-
<li>Open the extracted folder and run Setup.exe as administrator.</li>
|
79 |
-
<li>Follow the installation wizard until it finishes.</li>
|
80 |
-
<li>Do not launch the software yet.</li>
|
81 |
-
<li>Open the Crack folder and copy all the files inside it.</li>
|
82 |
-
<li>Paste them into the installation directory (usually C:\Program Files\Apowersoft\Video Converter Studio).</li>
|
83 |
-
<li>Launch the software.</li>
|
84 |
-
<li>A pop-up window will appear asking you to register.</li>
|
85 |
-
<li>Enter any name and email address.</li>
|
86 |
-
<li>Enter one of the serial keys provided in Serial Keys.txt file.</li>
|
87 |
-
<li>Click on Register Now.</li>
|
88 |
-
<li>The software will be activated successfully.</li>
|
89 |
-
</ol>
|
90 |
-
<h2>How to Use Apowersoft Video Converter Studio 4.9.1 Crack?</h2>
|
91 |
-
<h3>The main interface and functions of the software</h3>
|
92 |
-
<p>When you launch Apowersoft Video Converter Studio 4.9.1 Crack for the first time, you will see its main interface which consists of four sections:</p>
|
93 |
-
<ul>
|
94 |
-
<h3>How to convert video and audio files to various formats</h3>
|
95 |
-
<p>One of the main functions of Apowersoft Video Converter Studio 4.9.1 Crack is to convert video and audio files to various formats that you need. You can do this by following these simple steps:</p>
|
96 |
-
<ol>
|
97 |
-
<li>Select the files that you want to convert from the file list.</li>
|
98 |
-
<li>Click on the "Profile" button at the bottom of the interface and choose an output format from the drop-down menu.</li>
|
99 |
-
<li>You can also click on the "Edit" button next to the "Profile" button to customize the video parameters, such as resolution, bitrate, frame rate, etc.</li>
|
100 |
-
<li>Click on the "Convert" button at the lower right corner of the interface and wait for the conversion process to finish.</li>
|
101 |
-
<li>You can check the converted files by clicking on the "Open Folder" button at the bottom of the interface.</li>
|
102 |
-
</ol>
|
103 |
-
<p>You can also use the "Add to batch" button at the bottom of the interface to add multiple files for batch conversion. This will save you time and effort.</p>
|
104 |
-
<h3>How to edit your videos with built-in tools</h3>
|
105 |
-
<p>Another function of Apowersoft Video Converter Studio 4.9.1 Crack is to edit your videos with built-in tools. You can do this by following these simple steps:</p>
|
106 |
-
<ol>
|
107 |
-
<li>Select a file that you want to edit from the file list.</li>
|
108 |
-
<li>Click on the "Edit" button on the toolbar and a new window will pop up.</li>
|
109 |
-
<li>In this window, you can trim, crop, rotate, add effects, subtitles, watermarks, etc. to your video.</li>
|
110 |
-
<li>You can preview your changes in real time by using the play button at the bottom of the window.</li>
|
111 |
-
<li>When you are satisfied with your editing, click on the "OK" button at the lower right corner of the window.</li>
|
112 |
-
<li>You can also click on the "Reset" button at the lower left corner of the window to undo your changes.</li>
|
113 |
-
</ol>
|
114 |
-
<p>You can also use the "Edit" button next to the "Profile" button at the bottom of the interface to adjust some basic video parameters, such as brightness, contrast, saturation, etc.</p>
|
115 |
-
<h3>How to add external subtitles and audio files</h3>
|
116 |
-
<p>A third function of Apowersoft Video Converter Studio 4.9.1 Crack is to add external subtitles and audio files to your videos. You can do this by following these simple steps:</p>
|
117 |
-
<ol>
|
118 |
-
<li>Select a file that you want to add subtitles or audio files to from the file list.</li>
|
119 |
-
<h3>What are the Advantages of Apowersoft Video Converter Studio 4.9.1 Crack?</h3>
|
120 |
-
<p>There are many advantages of using Apowersoft Video Converter Studio 4.9.1 Crack for your video conversion and editing needs. Here are some of them:</p>
|
121 |
-
<ul>
|
122 |
-
<li>It can preserve 100% video quality as the original video file.</li>
|
123 |
-
<li>It can convert video and audio files at a fast speed.</li>
|
124 |
-
<li>It can support multiple video and audio formats and devices.</li>
|
125 |
-
<li>It has a simple and intuitive user interface that is easy to use.</li>
|
126 |
-
<li>It has a built-in video editor that allows you to trim, crop, rotate, add effects, subtitles, watermarks, etc.</li>
|
127 |
-
<li>It has a useful subtitle editor that lets you load external subtitles and edit them freely.</li>
|
128 |
-
<li>It has the ability to merge pieces of videos into one.</li>
|
129 |
-
<li>It is free to download and use with a serial key.</li>
|
130 |
-
</ul>
|
131 |
-
<h3>What are the Disadvantages of Apowersoft Video Converter Studio 4.9.1 Crack?</h3>
|
132 |
-
<p>However, there are also some disadvantages of using Apowersoft Video Converter Studio 4.9.1 Crack that you should be aware of. Here are some of them:</p>
|
133 |
-
<ul>
|
134 |
-
<li>It may contain viruses or malware that can harm your computer if you download it from untrusted sources.</li>
|
135 |
-
<li>It may violate the copyright and license agreement of the official software if you use it without permission.</li>
|
136 |
-
<li>It may not receive updates and technical support from the official site if you use it illegally.</li>
|
137 |
-
</ul>
|
138 |
-
<h2>Conclusion</h2>
|
139 |
-
<p>In conclusion, Apowersoft Video Converter Studio 4.9.1 Crack is a powerful and easy-to-use video converter software that can help you convert, edit, and enhance your video and audio files with ease. It has many advantages, such as high-quality and fast conversion, multiple formats and devices support, built-in video editor and subtitle editor, etc. However, it also has some disadvantages, such as possible risks of downloading cracked software from untrusted sources, legal and ethical issues of using cracked software without permission, lack of updates and technical support from the official site, etc.</p>
|
140 |
-
<p>If you want to try this software for free, you can download it from SadeemPC.com and use one of the serial keys provided in Serial Keys.txt file to activate it. However, we recommend you to buy the official version from Apowersoft.com if you want to enjoy its full features and benefits legally and safely.</p>
|
141 |
-
<p>We hope this article has helped you understand what Apowersoft Video Converter Studio 4.9.1 Crack is, how to download and install it, how to use it, and what are its advantages and disadvantages. If you have any questions or feedback, please feel free to leave a comment below.</p>
|
142 |
-
<h2>FAQs</h2>
|
143 |
-
<p>Here are some frequently asked questions and answers about Apowersoft Video Converter Studio 4.9.1 Crack:</p>
|
144 |
-
<ol>
|
145 |
-
<li><b>What is the difference between Apowersoft Video Converter Studio 4.9.1 Crack and Apowersoft Video Converter Studio 4.9.1?</b></li>
|
146 |
-
<p>The main difference is that Apowersoft Video Converter Studio 4.9.1 Crack is a cracked version of the official software that can be downloaded for free with a serial key from some websites, while Apowersoft Video Converter Studio 4.9.1 is the official version that can be bought from Apowersoft.com with a license code.</p>
|
147 |
-
<li><b>Is Apowersoft Video Converter Studio 4.9.1 Crack safe to use?</b></li>
|
148 |
-
<p>It depends on where you download it from. If you download it from a reliable source like SadeemPC.com, it may be safe to use. However, if you download it from an untrusted source, it may contain viruses or malware that can harm your computer.</p>
|
149 |
-
<li><b>Is Apowersoft Video Converter Studio 4.9.1 Crack legal to use?</b></li>
|
150 |
-
<p>No, it is not legal to use Apowersoft Video Converter Studio 4.9.1 Crack without permission from the official site. It may violate the copyright and license agreement of the official software if you use it illegally.</p>
|
151 |
-
<li><b>How can I update Apowersoft Video Converter Studio 4.9.1 Crack?</b></li>
|
152 |
-
<p>You cannot update Apowersoft Video Converter Studio 4.9.1 Crack manually or automatically because it is not connected to the official site. If you want to update the software, you need to buy the official version from Apowersoft.com or download a newer cracked version from another source.</p>
|
153 |
-
<li><b>How can I get technical support for Apowersoft Video Converter Studio 4.9.1 Crack?</b></li>
|
154 |
-
<p>You cannot get technical support for Apowersoft Video Converter Studio 4.9.1 Crack from the official site because it is not recognized by them. If you need technical support, you need to buy the official version from Apowersoft.com or contact the source where you downloaded the cracked version.</p>
|
155 |
-
</ol>
|
156 |
-
</p> 0a6ba089eb<br />
|
157 |
-
<br />
|
158 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cubase 8 Free Download Full Version Crack Windows 10 Is It Worth It?.md
DELETED
@@ -1,21 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Get Cubase 8 Free Download Full Version Crack Windows 10</h1>
|
3 |
-
<p>If you are looking for a powerful and versatile digital audio workstation, you might be interested in Cubase 8. Cubase 8 is a software that allows you to record, edit, mix and produce music of any genre and style. It has a user-friendly interface, a large collection of instruments and effects, and advanced features such as VST Connect SE, Chord Track and Chord Pads.</p>
|
4 |
-
<h2>cubase 8 free download full version crack windows 10</h2><br /><p><b><b>Download Zip</b> ✅ <a href="https://byltly.com/2uKz5L">https://byltly.com/2uKz5L</a></b></p><br /><br />
|
5 |
-
<p>However, Cubase 8 is not a cheap software. The full version costs $549.99, which might be too expensive for some users. That's why some people look for ways to get Cubase 8 free download full version crack Windows 10. A crack is a modified version of the software that bypasses the activation process and lets you use it without paying.</p>
|
6 |
-
<p>But is it really worth it to get Cubase 8 free download full version crack Windows 10? In this article, we will discuss the pros and cons of using a cracked version of Cubase 8, and how to get it safely and legally.</p>
|
7 |
-
<h2>The Pros of Using Cubase 8 Free Download Full Version Crack Windows 10</h2>
|
8 |
-
<p>The main advantage of using a cracked version of Cubase 8 is that you can save money. You don't have to spend hundreds of dollars to get the full features and functionality of the software. You can enjoy all the benefits of Cubase 8 without breaking the bank.</p>
|
9 |
-
<p>Another advantage is that you can access the software anytime and anywhere. You don't have to worry about online activation or registration. You can use Cubase 8 offline and on any computer you want. You can also share it with your friends and colleagues without any restrictions.</p>
|
10 |
-
<p></p>
|
11 |
-
<h2>The Cons of Using Cubase 8 Free Download Full Version Crack Windows 10</h2>
|
12 |
-
<p>However, using a cracked version of Cubase 8 also has some drawbacks. The first one is that you are violating the intellectual property rights of the software developer. By using a crack, you are stealing their work and depriving them of their rightful income. This is not only unethical but also illegal. You could face legal consequences if you are caught using or distributing a cracked version of Cubase 8.</p>
|
13 |
-
<p>The second drawback is that you are risking your computer's security and performance. A crack is usually created by hackers or malicious users who might insert viruses, malware or spyware into the software. These could harm your computer, steal your personal information, or damage your files. You could also experience crashes, errors, or glitches while using a cracked version of Cubase 8. You might lose your work or compromise your projects.</p>
|
14 |
-
<p>The third drawback is that you are missing out on updates and support. A cracked version of Cubase 8 is not compatible with the official updates and patches released by the software developer. These updates are important to fix bugs, improve performance, and add new features and enhancements. You could also miss out on technical support and customer service if you encounter any problems or issues with the software.</p>
|
15 |
-
<h2>How to Get Cubase 8 Free Download Full Version Crack Windows 10 Safely and Legally</h2>
|
16 |
-
<p>So, how can you get Cubase 8 free download full version crack Windows 10 without risking your computer's security and performance, violating the law, or missing out on updates and support? The answer is simple: you can't.</p>
|
17 |
-
<p>There is no safe and legal way to get a cracked version of Cubase 8. The only way to get the full version of Cubase 8 is to buy it from the official website or an authorized dealer. This way, you can ensure that you are getting a genuine and reliable product that will meet your needs and expectations.</p>
|
18 |
-
<p>However, if you are still hesitant to spend money on Cubase 8, there are some alternatives that you can try. For example, you can download the trial version of Cubase 8 from the official website. The trial version lets you use the software for 30 days for free. You can test all the features and functions of Cubase 8 and see if it suits your preferences and requirements.</p>
|
19 |
-
<p>Another option is to use a free or cheaper digital audio workstation that has similar capabilities as Cubase 8. Some examples are Audacity, Reaper, LMMS, Ardour, or GarageBand. These software are either free or low-cost, but they still offer a range of tools and functions for music</p> ddb901b051<br />
|
20 |
-
<br />
|
21 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cyberghost Vpn Serial Giveaway.md
DELETED
@@ -1,33 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Get a Free Cyberghost VPN Serial Key for 12 Months</h1>
|
3 |
-
<p>If you are looking for a reliable and secure VPN service that can protect your online privacy and unblock geo-restricted content, you might be interested in Cyberghost VPN. Cyberghost VPN is one of the most popular VPNs in the world, with over 30 million users and 3000+ servers in 60+ countries. Cyberghost VPN offers a strict no-logs policy, high-level encryption, Wi-Fi protection, malware blocking, and access to streaming services like Netflix, Hulu, BBC iPlayer, and more.</p>
|
4 |
-
<h2>Cyberghost Vpn Serial Giveaway</h2><br /><p><b><b>DOWNLOAD</b> ->>> <a href="https://byltly.com/2uKx8W">https://byltly.com/2uKx8W</a></b></p><br /><br />
|
5 |
-
<p>But what if you don't want to pay for a VPN subscription? Well, you are in luck because we have an exclusive giveaway for our readers. You can get a free Cyberghost VPN serial key that will activate your premium account for 12 months and allow you to use the service on up to 7 devices simultaneously. That's a value of $69.99 that you can get for free!</p>
|
6 |
-
<p>How to participate in the giveaway? It's very simple. Just follow these steps:</p>
|
7 |
-
<ol>
|
8 |
-
<li>Complete this survey: https://cyberghostvpn.typeform.com/to/cI1IEO</li>
|
9 |
-
<li>Enter your name and email address in the widget below and click on "Enter".</li>
|
10 |
-
<li>Wait for the giveaway to end on Monday, August 11 at at 15:00 UTC (8:00 a.m. PDT/PST).</li>
|
11 |
-
<li>Check your email inbox for the Cyberghost VPN serial key and instructions on how to activate it.</li>
|
12 |
-
</ol>
|
13 |
-
<p>That's it! You have just entered the giveaway and have a chance to win a free Cyberghost VPN serial key. Don't miss this opportunity to enjoy one of the best VPNs in the market for free. Good luck!</p>
|
14 |
-
|
15 |
-
<p>Why should you use Cyberghost VPN? There are many benefits of using a VPN service, especially in today's digital world where online threats and censorship are rampant. Here are some of the reasons why you should use Cyberghost VPN:</p>
|
16 |
-
<ul>
|
17 |
-
<li>You can hide your real IP address and location from prying eyes and hackers. This way, you can surf the web anonymously and securely, without leaving any traces or exposing your personal data.</li>
|
18 |
-
<li>You can bypass geo-restrictions and access any website or service that is blocked in your country or region. For example, you can watch Netflix US from anywhere in the world, or access social media platforms like Facebook and Twitter in countries where they are banned.</li>
|
19 |
-
<li>You can protect your online transactions and conversations from eavesdroppers and cybercriminals. Cyberghost VPN encrypts your traffic with AES-256 bit encryption, the same standard used by the military and banks. This means that no one can intercept or tamper with your data, even on public Wi-Fi networks.</li>
|
20 |
-
<li>You can enjoy faster and smoother online streaming and gaming. Cyberghost VPN has optimized servers for various streaming platforms and games, ensuring that you get the best possible performance and quality. You can also avoid bandwidth throttling and ISP snooping that can slow down your connection.</li>
|
21 |
-
</ul>
|
22 |
-
<p>How to use Cyberghost VPN? Using Cyberghost VPN is very easy and intuitive. You don't need any technical skills or knowledge to use it. Here's how to use Cyberghost VPN:</p>
|
23 |
-
<p></p>
|
24 |
-
<ol>
|
25 |
-
<li>Download and install the Cyberghost VPN app on your device. You can find it on the official website or on the app store of your device.</li>
|
26 |
-
<li>Launch the app and log in with your Cyberghost VPN serial key or create a free account if you don't have one.</li>
|
27 |
-
<li>Select the VPN profile that suits your needs. You can choose from Surf Anonymously, Unblock Streaming, Network / Wi-Fi Protection, Torrent Anonymously, or Custom.</li>
|
28 |
-
<li>Click on the Connect button and wait for the app to establish a secure connection to a VPN server.</li>
|
29 |
-
<li>Enjoy your online freedom and privacy with Cyberghost VPN!</li>
|
30 |
-
</ol>
|
31 |
-
<p>Conclusion Cyberghost VPN is one of the best VPN services that you can use to protect your online privacy and unblock geo-restricted content. It offers a strict no-logs policy, high-level encryption, Wi-Fi protection, malware blocking, and access to streaming services like Netflix, Hulu, BBC iPlayer, and more. And now, you have a chance to get a free Cyberghost VPN serial key that will activate your premium account for 12 months and allow you to use the service on up to 7 devices simultaneously. Don't miss this opportunity to enjoy one of the best VPNs in the market for free. Enter the giveaway now and good luck!</p> cec2833e83<br />
|
32 |
-
<br />
|
33 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Fizikos Uzdavinynas 10 Kl Pdf 37 _VERIFIED_.md
DELETED
@@ -1,22 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Fizikos Uzdavinynas 10 Kl PDF 37: A Useful Resource for Physics Students</h1>
|
3 |
-
<p>If you are a student of physics in the 10th grade, you might be looking for a good problem book to practice your skills and prepare for exams. One of the best options available online is the fizikos uzdavinynas 10 kl pdf 37, which is a collection of physics problems and solutions for the 10th grade curriculum in Lithuania.</p>
|
4 |
-
<h2>fizikos uzdavinynas 10 kl pdf 37</h2><br /><p><b><b>DOWNLOAD</b> 🔗 <a href="https://byltly.com/2uKvEI">https://byltly.com/2uKvEI</a></b></p><br /><br />
|
5 |
-
<p>The fizikos uzdavinynas 10 kl pdf 37 covers all the main topics of physics, such as mechanics, thermodynamics, electricity, magnetism, optics, and modern physics. It contains over 300 problems of varying difficulty levels, from simple exercises to challenging puzzles. The problems are arranged by topic and subtopic, and each problem has a detailed solution with explanations and diagrams.</p>
|
6 |
-
<p>The fizikos uzdavinynas 10 kl pdf 37 is a great resource for physics students who want to improve their understanding of the subject and test their knowledge. It can also help teachers who need some extra material for their classes or homework assignments. The fizikos uzdavinynas 10 kl pdf 37 is available for free download from the website of the Lithuanian Ministry of Education and Science.</p>
|
7 |
-
<p>To download the fizikos uzdavinynas 10 kl pdf 37, you just need to follow these simple steps:</p>
|
8 |
-
<ol>
|
9 |
-
<li>Go to the website of the Lithuanian Ministry of Education and Science at https://www.smm.lt/</li>
|
10 |
-
<li>Click on the tab "Mokymo priemonÄs" (Teaching tools) on the top menu.</li>
|
11 |
-
<li>Scroll down to the section "Fizika" (Physics) and click on the link "Fizikos uždavinių rinkinys X klasei" (Physics problem set for the 10th grade).</li>
|
12 |
-
<li>You will see a list of files with different formats and languages. Choose the one that says "PDF (Lietuvių)" (PDF in Lithuanian).</li>
|
13 |
-
<li>Click on the download icon next to the file name and save it to your device.</li>
|
14 |
-
</ol>
|
15 |
-
<p>That's it! You now have access to the fizikos uzdavinynas 10 kl pdf 37, a useful resource for physics students. Enjoy solving the problems and learning more about physics!</p>
|
16 |
-
|
17 |
-
<p>The fizikos uzdavinynas 10 kl pdf 37 is not only a problem book, but also a learning tool. It can help you review the concepts and formulas of physics, as well as develop your problem-solving skills and logical thinking. The problems are designed to stimulate your curiosity and creativity, and to challenge you to apply your knowledge in different situations.</p>
|
18 |
-
<p></p>
|
19 |
-
<p>The fizikos uzdavinynas 10 kl pdf 37 is also a reliable source of information. It is based on the official physics curriculum for the 10th grade in Lithuania, which follows the international standards and recommendations. The problems and solutions are written by experienced physics teachers and experts, who have checked the accuracy and clarity of the content.</p>
|
20 |
-
<p>The fizikos uzdavinynas 10 kl pdf 37 is suitable for students of any level of physics proficiency. Whether you are a beginner or an advanced learner, you will find something useful and interesting in this book. You can use it as a self-study guide, a supplement to your textbook, or a preparation for exams. You can also use it as a fun and educational activity to do with your friends or family.</p> cec2833e83<br />
|
21 |
-
<br />
|
22 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Antenna Web Design Studio 6.57 REPACK Crack.md
DELETED
@@ -1,96 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Antenna Web Design Studio 6.57 Crack: A Review</h1>
|
3 |
-
<p>If you are looking for a software that can help you create your own website without coding, you might want to check out Antenna Web Design Studio 6.57 Crack. This is a powerful and easy-to-use web design tool that lets you design your web pages with drag and drop, transparent layers, master pages, and more. In this article, we will review the features, benefits, and drawbacks of Antenna Web Design Studio 6.57 Crack.</p>
|
4 |
-
|
5 |
-
<h2>Features of Antenna Web Design Studio 6.57 Crack</h2>
|
6 |
-
<p>Antenna Web Design Studio 6.57 Crack has many features that make it a great choice for web designers of all levels. Some of the main features are:</p>
|
7 |
-
<h2>Antenna Web Design Studio 6.57 Crack</h2><br /><p><b><b>Download File</b> ✶✶✶ <a href="https://imgfil.com/2uxZlp">https://imgfil.com/2uxZlp</a></b></p><br /><br />
|
8 |
-
<ul>
|
9 |
-
<li>No need for technical knowledge of HTML and CSS: You can design your web pages visually with Antenna Web Design Studio 6.57 Crack, without writing any code. You can also edit the HTML and CSS code if you want to customize your pages further.</li>
|
10 |
-
<li>Create dual layer layouts for desktop and mobile versions: You can design your website for both desktop and mobile devices with Antenna Web Design Studio 6.57 Crack. You can create two different layouts for each page, and switch between them easily. This way, you can ensure that your website looks good and works well on any device.</li>
|
11 |
-
<li>Very high speed in creating pages: You can create your web pages in minutes with Antenna Web Design Studio 6.57 Crack, thanks to its intuitive interface and powerful editor. You can drag and drop your images, text, videos, buttons, and animations from anywhere on your webpage, and adjust them as you like.</li>
|
12 |
-
<li>Possibility to create gallery of images: You can also create beautiful photo galleries for your web pages with Antenna Web Design Studio 6.57 Crack. You can choose from different styles and effects for your galleries, and add captions and links to your images.</li>
|
13 |
-
<li>Benefit from the very powerful and beautiful graphics capabilities: Antenna Web Design Studio 6.57 Crack also has a built-in graphics editor that lets you create stunning graphics for your website. You can draw shapes, gradients, shadows, textures, and more with Antenna Web Design Studio 6.57 Crack.</li>
|
14 |
-
<li>Take advantage of the complete layering system of your web pages: Antenna Web Design Studio 6.57 Crack also allows you to use transparent layers to create complex layouts for your web pages. You can stack multiple layers on top of each other, and change their opacity, position, size, and rotation.</li>
|
15 |
-
<li>Ability to design CSS with CSS Styles Editor: Antenna Web Design Studio 6.57 Crack also has a CSS Styles Editor that lets you create and edit CSS styles for your web pages. You can apply different styles to different elements on your page, and preview the results instantly.</li>
|
16 |
-
</ul>
|
17 |
-
|
18 |
-
<h2>Benefits of Antenna Web Design Studio 6.57 Crack</h2>
|
19 |
-
<p>Antenna Web Design Studio 6.57 Crack has many benefits that make it a worthwhile software to use for web design. Some of the main benefits are:</p>
|
20 |
-
<ul>
|
21 |
-
<li>It is easy to use: Antenna Web Design Studio 6.57 Crack has a user-friendly interface that makes it easy to navigate and use. You can access all the tools and options from the main menu or the toolbar, and get help from the online tutorials and support.</li>
|
22 |
-
<li>It is flexible: Antenna Web Design Studio 6.57 Crack gives you full control over your web design, allowing you to customize every aspect of your web pages. You can also import and export your files in various formats, such as HTML, JPG, PNG, GIF, SVG, PDF, etc.</li>
|
23 |
-
<li>It is affordable: Antenna Web Design Studio 6.57 Crack is a low-cost software that offers a lot of value for its price. You can download it for free from various websites, or buy it for a reasonable price from the official website.</li>
|
24 |
-
<li>It is compatible: Antenna Web Design Studio 6.57 Crack is compatible with Windows 7/8/8.1/10 operating systems, and works well with most browsers and devices.</li>
|
25 |
-
</ul>
|
26 |
-
|
27 |
-
<h2>Drawbacks of Antenna Web Design Studio 6.57 Crack</h2>
|
28 |
-
<p>Antenna Web Design Studio 6.57 Crack is not a perfect software, and it has some drawbacks that you should be aware of before using it. Some of the main drawbacks are:</p>
|
29 |
-
<ul>
|
30 |
-
<li>It is illegal: Antenna Web Design Studio 6.57 Crack is a cracked version of the original software, which means that it is not authorized by the developer or the distributor. Using cracked software is illegal and unethical, and it may expose you to legal issues or malware infections.</li>
|
31 |
-
<li>It is unstable: Antenna Web Design Studio 6.57 Crack may not work properly or crash frequently due to its cracked nature. It may also have bugs or errors that affect its performance or functionality.</li>
|
32 |
-
<li>It is outdated: Antenna Web Design Studio 6.57 Crack is an old version of the software that may not have the latest features or updates that the original software has.</li>
|
33 |
-
<li>It is unsupported: Antenna Web Design Studio 6.57 Crack does not have any official support or customer service from the developer or the distributor. If you encounter any problems or issues with the software, you will not be able to get any help or assistance.</li>
|
34 |
-
</ul>
|
35 |
-
|
36 |
-
<h2>Conclusion</h2>
|
37 |
-
<p>Antenna Web Design Studio 6.57 Crack is a powerful and easy-to-use web design software that lets you create your own website without coding. It has many features, benefits, and drawbacks that you should consider before using it.</p>
|
38 |
-
|
39 |
-
<p>If you are looking for a legal, stable, updated, and supported version of the software, you should buy Antenna Web Design Studio from the official website or a trusted vendor.</p>
|
40 |
-
|
41 |
-
<p>If you are looking for an alternative web design software that is free and open source, you should try WordPress.org or Wix.com.</p>
|
42 |
-
|
43 |
-
<p>If you are looking for a professional web design service that can create a custom website for you at an affordable price, you should contact us today.</p>
|
44 |
-
<h2>How to Download and Install Antenna Web Design Studio 6.57 Crack</h2>
|
45 |
-
<p>If you want to try Antenna Web Design Studio 6.57 Crack for yourself, you can download it from various websites that offer cracked software. However, we do not recommend this option, as it is illegal and risky. You may end up downloading a virus or malware that can harm your computer or steal your personal information.</p>
|
46 |
-
|
47 |
-
<p>The best way to download and install Antenna Web Design Studio 6.57 Crack is to buy it from the official website or a trusted vendor. This way, you can get a legal, stable, updated, and supported version of the software that will work as intended.</p>
|
48 |
-
<p></p>
|
49 |
-
|
50 |
-
<p>To buy Antenna Web Design Studio 6.57 Crack, you need to follow these steps:</p>
|
51 |
-
<ol>
|
52 |
-
<li>Go to the official website of Antenna Web Design Studio at https://www.stormdance.net/</li>
|
53 |
-
<li>Click on the "Buy Now" button and choose your preferred payment method.</li>
|
54 |
-
<li>After completing the payment, you will receive an email with your license key and a download link.</li>
|
55 |
-
<li>Click on the download link and save the setup file on your computer.</li>
|
56 |
-
<li>Run the setup file and follow the instructions to install Antenna Web Design Studio 6.57 Crack on your computer.</li>
|
57 |
-
<li>Enter your license key when prompted to activate the software.</li>
|
58 |
-
<li>Enjoy creating your own website with Antenna Web Design Studio 6.57 Crack.</li>
|
59 |
-
</ol>
|
60 |
-
|
61 |
-
<h2>Tips and Tricks for Using Antenna Web Design Studio 6.57 Crack</h2>
|
62 |
-
<p>Antenna Web Design Studio 6.57 Crack is a versatile and powerful web design software that can help you create amazing websites with ease. However, there are some tips and tricks that can help you make the most out of it and improve your web design skills. Here are some of them:</p>
|
63 |
-
<ul>
|
64 |
-
<li>Use master pages: Master pages are templates that you can use to create consistent layouts for your web pages. You can define common elements such as headers, footers, menus, logos, etc. on your master pages, and then apply them to your web pages. This way, you can save time and ensure uniformity across your website.</li>
|
65 |
-
<li>Use layers: Layers are transparent containers that you can use to arrange your web page elements in different positions and depths. You can stack multiple layers on top of each other, and change their opacity, position, size, and rotation. This way, you can create complex and dynamic layouts for your web pages.</li>
|
66 |
-
<li>Use styles: Styles are sets of formatting rules that you can apply to different elements on your web page, such as text, images, links, etc. You can create and edit styles with the CSS Styles Editor in Antenna Web Design Studio 6.57 Crack. This way, you can change the appearance of your web page elements easily and consistently.</li>
|
67 |
-
<li>Use graphics: Graphics are visual elements that you can create or import in Antenna Web Design Studio 6.57 Crack. You can use graphics to enhance the look and feel of your website, such as backgrounds, buttons, icons, logos, etc. You can also use the built-in graphics editor in Antenna Web Design Studio 6.57 Crack to create stunning graphics for your website.</li>
|
68 |
-
<li>Use galleries: Galleries are collections of images that you can display on your web pages in different ways. You can choose from different styles and effects for your galleries, such as slideshows, lightboxes, carousels, etc. You can also add captions and links to your images in your galleries.</li>
|
69 |
-
</ul>
|
70 |
-
|
71 |
-
<h2>Conclusion</h2>
|
72 |
-
<p>Antenna Web Design Studio 6.57 Crack is a powerful and easy-to-use web design software that lets you create your own website without coding. It has many features, benefits, and drawbacks that you should consider before using it.</p>
|
73 |
-
|
74 |
-
<p>If you are looking for a legal, stable, updated, and supported version of the software, you should buy Antenna Web Design Studio from the official website or a trusted vendor.</p>
|
75 |
-
|
76 |
-
<p>If you are looking for an alternative web design software that is free and open source, you should try WordPress.org or Wix.com.</p>
|
77 |
-
|
78 |
-
<p>If you are looking for a professional web design service that can create a custom website for you at an affordable price, you should contact us today.</p>
|
79 |
-
|
80 |
-
<p>We hope this article has helped you learn more about Antenna Web Design Studio 6.57 Crack and how to use it effectively.</p>
|
81 |
-
|
82 |
-
<p>Thank you for reading!</p>
|
83 |
-
<h2>Conclusion</h2>
|
84 |
-
<p>Antenna Web Design Studio 6.57 Crack is a powerful and easy-to-use web design software that lets you create your own website without coding. It has many features, benefits, and drawbacks that you should consider before using it.</p>
|
85 |
-
|
86 |
-
<p>If you are looking for a legal, stable, updated, and supported version of the software, you should buy Antenna Web Design Studio from the official website or a trusted vendor.</p>
|
87 |
-
|
88 |
-
<p>If you are looking for an alternative web design software that is free and open source, you should try WordPress.org or Wix.com.</p>
|
89 |
-
|
90 |
-
<p>If you are looking for a professional web design service that can create a custom website for you at an affordable price, you should contact us today.</p>
|
91 |
-
|
92 |
-
<p>We hope this article has helped you learn more about Antenna Web Design Studio 6.57 Crack and how to use it effectively.</p>
|
93 |
-
|
94 |
-
<p>Thank you for reading!</p> 3cee63e6c2<br />
|
95 |
-
<br />
|
96 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Folder Marker Pro 4.0 UPDATED Crack.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>Folder Marker Pro 4.0 Crack</h2><br /><p><b><b>Download Zip</b> ✔ <a href="https://imgfil.com/2uxXNN">https://imgfil.com/2uxXNN</a></b></p><br /><br />
|
2 |
-
|
3 |
-
d5da3c52bf<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Apex Racing How to Race and Drift with Unlimited Money MOD.md
DELETED
@@ -1,117 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Apex Racing Mod: A Realistic and Fun Racing Game for Android</h1>
|
3 |
-
<p>If you are a fan of racing and drifting games, you might want to check out Apex Racing Mod, a game that offers highly realistic vehicle simulation, multiplayer and single player modes, customizable cars and tracks, leaderboard and achievements, and more. In this article, we will tell you what Apex Racing Mod is, how to download and install it, and how to play it.</p>
|
4 |
-
<h2>apex racing mod</h2><br /><p><b><b>Download File</b> ○ <a href="https://urlin.us/2uSUk9">https://urlin.us/2uSUk9</a></b></p><br /><br />
|
5 |
-
<h2>What is Apex Racing Mod?</h2>
|
6 |
-
<p>Apex Racing Mod is a modified version of Apex Racing, a racing and drifting game developed by Mobiplay Games. The modded version gives you unlimited money, which you can use to buy and upgrade your cars, unlock new tracks, and access premium features. The modded version also removes ads and other restrictions from the original game.</p>
|
7 |
-
<h3>Features of Apex Racing Mod</h3>
|
8 |
-
<p>Apex Racing Mod has many features that make it one of the best racing games for Android. Here are some of them:</p>
|
9 |
-
<h4>Realistic vehicle simulation</h4>
|
10 |
-
<p>Apex Racing Mod uses advanced physics and graphics to create realistic car movements, collisions, damages, sounds, and effects. You can feel the difference between different car models, engines, tires, suspensions, brakes, and more. You can also customize your car's appearance, performance, and handling to suit your preferences.</p>
|
11 |
-
<p>apex racing mod apk download<br />
|
12 |
-
apex racing mod unlimited money<br />
|
13 |
-
apex racing mod realistic simulation<br />
|
14 |
-
apex racing mod multiplayer mode<br />
|
15 |
-
apex racing mod single player mode<br />
|
16 |
-
apex racing mod latest version<br />
|
17 |
-
apex racing mod android game<br />
|
18 |
-
apex racing mod free game<br />
|
19 |
-
apex racing mod offline game<br />
|
20 |
-
apex racing mod online game<br />
|
21 |
-
apex racing mod drift game<br />
|
22 |
-
apex racing mod car game<br />
|
23 |
-
apex racing mod best game<br />
|
24 |
-
apex racing mod fun game<br />
|
25 |
-
apex racing mod challenging game<br />
|
26 |
-
apex racing mod global leaderboard<br />
|
27 |
-
apex racing mod review<br />
|
28 |
-
apex racing mod guide<br />
|
29 |
-
apex racing mod tips<br />
|
30 |
-
apex racing mod tricks<br />
|
31 |
-
apex racing mod cheats<br />
|
32 |
-
apex racing mod hack<br />
|
33 |
-
apex racing mod gameplay<br />
|
34 |
-
apex racing mod features<br />
|
35 |
-
apex racing mod graphics<br />
|
36 |
-
apex racing mod sound<br />
|
37 |
-
apex racing mod controls<br />
|
38 |
-
apex racing mod customization<br />
|
39 |
-
apex racing mod vehicles<br />
|
40 |
-
apex racing mod tracks<br />
|
41 |
-
apex racing mod maps<br />
|
42 |
-
apex racing mod modes<br />
|
43 |
-
apex racing mod levels<br />
|
44 |
-
apex racing mod missions<br />
|
45 |
-
apex racing mod challenges<br />
|
46 |
-
apex racing mod achievements<br />
|
47 |
-
apex racing mod rewards<br />
|
48 |
-
apex racing mod coins<br />
|
49 |
-
apex racing mod gems<br />
|
50 |
-
apex racing mod upgrades<br />
|
51 |
-
apex racing mod skins<br />
|
52 |
-
apex racing mod decals<br />
|
53 |
-
apex racing mod spoilers<br />
|
54 |
-
apex racing mod tires<br />
|
55 |
-
apex racing mod engines<br />
|
56 |
-
apex racing mod nitro<br />
|
57 |
-
apex racing mod speedometer <br />
|
58 |
-
apex racing mod camera angle <br />
|
59 |
-
apex racing mod steering wheel</p>
|
60 |
-
<h4>Multiplayer and single player modes</h4>
|
61 |
-
<p>Apex Racing Mod supports both online and offline gameplay. You can race against other players from around the world in real-time multiplayer mode, or challenge yourself in single player mode. You can choose from different game modes, such as race, drift, time trial, elimination, and more. You can also join or create your own racing club and compete with other clubs.</p>
|
62 |
-
<h4>Customizable cars and tracks</h4>
|
63 |
-
<p>Apex Racing Mod has a variety of cars and tracks to choose from. You can unlock over 50 cars from different categories, such as sports, muscle, classic, supercars, and more. You can also unlock over 20 tracks from different locations, such as city streets, highways, deserts, mountains, snowfields, and more. You can also create your own tracks using the track editor.</p>
|
64 |
-
<h4>Leaderboard and achievements</h4>
|
65 |
-
<p>Apex Racing Mod has a global leaderboard that shows your ranking among other players based on your performance in multiplayer mode. You can also earn achievements by completing various tasks and challenges in the game. You can share your progress and achievements with your friends on social media.</p>
|
66 |
-
<h2>How to download and install Apex Racing Mod?</h2>
|
67 |
-
<p>If you want to download and install Apex Racing Mod on your Android device, you need to follow these steps:</p>
|
68 |
-
<h3>Requirements for Apex Racing Mod</h3>
|
69 |
-
<p>Before you download and install Apex Racing Mod, you need to make sure that your device meets these requirements:</p>
|
70 |
-
<ul>
|
71 |
-
<li>Your device must have Android 5.0 or higher.</li>
|
72 |
-
<li>Your device must have at least 200 MB of free storage space.</li>
|
73 |
-
<li>Your device must have a stable internet connection.</li>
|
74 |
-
<li>You must enable unknown sources in your device's settings.</li>
|
75 |
-
</ul>
|
76 |
-
<h3>Steps to download and install Apex Racing Mod</h3>
|
77 |
-
<p>After you have checked the requirements, you can follow these steps to download and install Apex Racing Mod:</p>
|
78 |
-
<ol>
|
79 |
-
<li>Go to this link to download the APK file of Apex Racing Mod.</li>
|
80 |
-
<li>Once the download is complete, locate the APK file in your device's file manager and tap on it to install it.</li>
|
81 |
-
<li>Wait <p>Wait for the installation to finish and grant the necessary permissions to the app.</li>
|
82 |
-
<li>Launch the app and enjoy playing Apex Racing Mod with unlimited money and no ads.</li>
|
83 |
-
</ol>
|
84 |
-
<h2>How to play Apex Racing Mod?</h2>
|
85 |
-
<p>Now that you have downloaded and installed Apex Racing Mod, you might be wondering how to play it. Here are some tips and tricks to help you get started:</p>
|
86 |
-
<h3>Controls and gameplay of Apex Racing Mod</h3>
|
87 |
-
<p>Apex Racing Mod has simple and intuitive controls that you can customize according to your preference. You can use the on-screen buttons or tilt your device to steer your car. You can also use the brake, accelerator, handbrake, and nitro buttons to control your speed and drift. You can switch between different camera angles to get a better view of the road.</p>
|
88 |
-
<p>The gameplay of Apex Racing Mod is fast-paced and exciting. You can choose from different game modes, such as race, drift, time trial, elimination, and more. You can also select your car and track from the available options or create your own. You can earn money by winning races, drifting, performing stunts, and completing challenges. You can use the money to buy and upgrade your cars, unlock new tracks, and access premium features.</p>
|
89 |
-
<h3>Tips and tricks for Apex Racing Mod</h3>
|
90 |
-
<p>If you want to improve your skills and performance in Apex Racing Mod, you can follow these tips and tricks:</p>
|
91 |
-
<ul>
|
92 |
-
<li>Practice on different tracks and cars to get familiar with their characteristics and handling.</li>
|
93 |
-
<li>Use the nitro wisely. Don't waste it on straight roads or when you are already ahead of your opponents. Save it for tight corners or when you need a boost.</li>
|
94 |
-
<li>Drift as much as possible. Drifting not only gives you more money, but also fills up your nitro meter faster.</li>
|
95 |
-
<li>Avoid crashing into other cars or obstacles. Crashing will damage your car and slow you down. It will also reduce your money and score.</li>
|
96 |
-
<li>Upgrade your car regularly. Upgrading your car will improve its performance, speed, handling, durability, and appearance.</li>
|
97 |
-
</ul>
|
98 |
-
<h2>Conclusion</h2>
|
99 |
-
<p>Apex Racing Mod is a fun and realistic racing game for Android that you can download and play for free. It has many features that make it one of the best racing games for Android, such as realistic vehicle simulation, multiplayer and single player modes, customizable cars and tracks, leaderboard and achievements, and more. It also gives you unlimited money, which you can use to buy and upgrade your cars, unlock new tracks, and access premium features. It also removes ads and other restrictions from the original game.</p>
|
100 |
-
<p>If you are looking for a racing game that will challenge your skills and entertain you for hours, you should try Apex Racing Mod. It is easy to download and install, and easy to play. You can race against other players from around the world or challenge yourself in single player mode. You can also create your own tracks using the track editor. You can share your progress and achievements with your friends on social media.</p>
|
101 |
-
<p>Apex Racing Mod is a game that will make you feel like a real racer. Download it now and enjoy the thrill of racing!</p>
|
102 |
-
<h2>FAQs</h2>
|
103 |
-
<p>Here are some frequently asked questions about Apex Racing Mod:</p>
|
104 |
-
<ol>
|
105 |
-
<li><b>Is Apex Racing Mod safe to download and install?</b></li>
|
106 |
-
<p>Yes, Apex Racing Mod is safe to download and install. It does not contain any viruses or malware that could harm your device or data. However, you should always download it from a trusted source and enable unknown sources in your device's settings before installing it.</p>
|
107 |
-
<li><b>Do I need to root my device to play Apex Racing Mod?</b></li>
|
108 |
-
<p>No, you do not need to root your device to play Apex Racing Mod. The modded version works on both rooted and non-rooted devices.</p>
|
109 |
-
<li><b>Can I play Apex Racing Mod offline?</b></li>
|
110 |
-
<p>Yes, you can play Apex Racing Mod offline. However, some features may not be available or updated when you play offline, such as multiplayer mode, leaderboard, achievements, etc.</p>
|
111 |
-
<li><b>How can I update Apex Racing Mod?</b></li>
|
112 |
-
<p>To update Apex Racing Mod, you need to download the latest version of the APK file and install it over the existing one. You do not need to uninstall the previous version or lose your progress.</p>
|
113 |
-
<li><b>How can I contact the developer of Apex Racing Mod?</b></li>
|
114 |
-
<p>If you have any questions or feedback about Apex Racing Mod, you can contact the developer of the original game (Mobiplay Games) through their email <p>If you have any questions or feedback about Apex Racing Mod, you can contact the developer of the original game (Mobiplay Games) through their email address: [email protected]. You can also visit their website or follow them on Facebook and Twitter for more updates and news.</p>
|
115 |
-
: https://apkrace.com/apex-racing-mod-apk/ : https://mobiplaygames.com/ : https://www.facebook.com/mobiplaygames : https://twitter.com/mobiplaygames</p> 197e85843d<br />
|
116 |
-
<br />
|
117 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Build and Battle with Your Favorite Cookies in Cookie Run Kingdom Online.md
DELETED
@@ -1,93 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Cookie Run Kingdom: How to Play Online Without Downloading</h1>
|
3 |
-
<p>Do you love cookies and RPG games? If so, you might want to try Cookie Run Kingdom, a popular game that lets you build your own kingdom of cookies and fight against evil forces. But what if you don't want to download the game on your device? Don't worry, there are ways to play Cookie Run Kingdom online without downloading. In this article, we will show you how to do that and why you might want to try it.</p>
|
4 |
-
<h2>cookie run kingdom no download</h2><br /><p><b><b>Download</b> 🌟 <a href="https://urlin.us/2uSUU0">https://urlin.us/2uSUU0</a></b></p><br /><br />
|
5 |
-
<h2>What is Cookie Run Kingdom?</h2>
|
6 |
-
<p>Cookie Run Kingdom is a builder and battle RPG game developed by Devsisters Corporation. It was released in January 2021 and has been downloaded over 10 million times on Google Play and App Store. The game is also available on PC, Mac, and browser platforms.</p>
|
7 |
-
<h3>A builder and battle RPG game</h3>
|
8 |
-
<p>In Cookie Run Kingdom, you can create your own kingdom of cookies by building various structures, such as houses, farms, shops, and factories. You can also recruit different types of cookies, each with their own skills and abilities, to join your team. You can then use your team of cookies to fight against enemies in various modes, such as story mode, tower mode, arena mode, and guild mode. You can also upgrade your cookies and equip them with items to make them stronger.</p>
|
9 |
-
<h3>A cookie-themed adventure with a rich story</h3>
|
10 |
-
<p>Cookie Run Kingdom is not just a game of building and fighting. It also has a rich story that unfolds as you progress through the game. You will discover the secrets of the cookie world, meet various characters, and face different challenges. The game has over 200 story levels, each with its own cutscenes and dialogues. The game also features voice acting from famous actors, such as Tom Kenny, Tara Strong, Cristina Vee, and more.</p>
|
11 |
-
<h3>A multiplayer game with guilds and events</h3>
|
12 |
-
<p>Cookie Run Kingdom is also a multiplayer game that lets you interact with other players from around the world. You can join or create a guild with other players and cooperate with them in guild missions, raids, wars, and chat. You can also compete with other players in the arena mode and rank up in the leaderboard. The game also has regular events that offer special rewards and challenges.</p>
|
13 |
-
<h2>Why play Cookie Run Kingdom online without downloading?</h2>
|
14 |
-
<p>Cookie Run Kingdom is a fun and addictive game that you can enjoy on your device. However, there are some reasons why you might want to play it online without downloading. Here are some of them:</p>
|
15 |
-
<h3>Save storage space on your device</h3>
|
16 |
-
<p>Cookie Run Kingdom is a large game that requires about 1.5 GB of storage space on your device. If you have limited storage space or want to save it for other apps or files, you might not want to download the game. By playing it online without downloading, you can save storage space on your device and still enjoy the game.</p>
|
17 |
-
<h3>Play on any device with a browser</h3>
|
18 |
-
<p>Cookie Run Kingdom is compatible with Android, iOS, PC, Mac, and browser platforms. However, if you don't have access to your preferred device or want to switch between devices easily, you might want to play it online without downloading without downloading. By playing it online, you can use any device that has a browser and an internet connection. You can also switch between devices without losing your progress or data.</p>
|
19 |
-
<h3>Enjoy faster loading and smoother gameplay</h3>
|
20 |
-
<p>Cookie Run Kingdom is a high-quality game that requires a lot of resources to run smoothly. If you have a slow or unstable internet connection, or a low-end device, you might experience lagging, crashing, or freezing issues when playing the game. By playing it online without downloading, you can avoid these problems and enjoy faster loading and smoother gameplay. The online platform will handle the processing and rendering of the game for you, so you don't have to worry about your device's performance or connection.</p>
|
21 |
-
<p>cookie run kingdom online free<br />
|
22 |
-
cookie run kingdom browser game<br />
|
23 |
-
cookie run kingdom play without install<br />
|
24 |
-
cookie run kingdom unblocked games<br />
|
25 |
-
cookie run kingdom pc version<br />
|
26 |
-
cookie run kingdom mac compatible<br />
|
27 |
-
cookie run kingdom web version<br />
|
28 |
-
cookie run kingdom no emulator<br />
|
29 |
-
cookie run kingdom instant play<br />
|
30 |
-
cookie run kingdom now.gg<br />
|
31 |
-
cookie run kingdom rpg online<br />
|
32 |
-
cookie run kingdom builder game<br />
|
33 |
-
cookie run kingdom sonic crossover<br />
|
34 |
-
cookie run kingdom guild tips<br />
|
35 |
-
cookie run kingdom coupon codes<br />
|
36 |
-
cookie run kingdom combat sneak peek<br />
|
37 |
-
cookie run kingdom characters list<br />
|
38 |
-
cookie run kingdom story mode<br />
|
39 |
-
cookie run kingdom dark flour war<br />
|
40 |
-
cookie run kingdom age of darkness<br />
|
41 |
-
cookie run kingdom rise of heroes<br />
|
42 |
-
cookie run kingdom final battle<br />
|
43 |
-
cookie run kingdom oyster cookie review<br />
|
44 |
-
cookie run kingdom best team composition<br />
|
45 |
-
cookie run kingdom tier list 2023<br />
|
46 |
-
cookie run kingdom update news<br />
|
47 |
-
cookie run kingdom devplay corporation<br />
|
48 |
-
cookie run kingdom discord server<br />
|
49 |
-
cookie run kingdom instagram page<br />
|
50 |
-
cookie run kingdom facebook group<br />
|
51 |
-
cookie run kingdom youtube videos<br />
|
52 |
-
cookie run kingdom reddit community<br />
|
53 |
-
cookie run kingdom wiki guide<br />
|
54 |
-
cookie run kingdom fandom page<br />
|
55 |
-
cookie run kingdom fan art gallery<br />
|
56 |
-
cookie run kingdom merchandise store<br />
|
57 |
-
cookie run kingdom plushies collection<br />
|
58 |
-
cookie run kingdom rainbow cubes hack<br />
|
59 |
-
cookie run kingdom magic cutters cheat<br />
|
60 |
-
cookie run kingdom time jumpers glitch<br />
|
61 |
-
cookie run kingdom aurora items freebie<br />
|
62 |
-
cookie run kingdom how to get gingerbrave <br />
|
63 |
-
cookie run kingdom how to unlock costumes <br />
|
64 |
-
cookie run kingdom how to level up fast <br />
|
65 |
-
cookie run kingdom how to earn crystals <br />
|
66 |
-
cookie run kingdom how to join a guild <br />
|
67 |
-
cookie run kingdom how to make friends <br />
|
68 |
-
cookie run kingdom how to chat with others <br />
|
69 |
-
cookie run kingdom how to change language <br />
|
70 |
-
cookie run kingdom how to contact support</p>
|
71 |
-
<h2>How to play Cookie Run Kingdom online without downloading?</h2>
|
72 |
-
<p>Now that you know why you might want to play Cookie Run Kingdom online without downloading, you might be wondering how to do it. There are two main ways to play the game online: using now.gg or using YouTube. Here are the steps for each method:</p>
|
73 |
-
<h3>Use now.gg to play in your browser</h3>
|
74 |
-
<p>Now.gg is a cloud gaming platform that lets you play mobile games in your browser without downloading or installing anything. It is free, fast, and secure. You can use now.gg to play Cookie Run Kingdom online in a few simple steps:</p>
|
75 |
-
<h4>Step 1: Go to the now.gg website</h4>
|
76 |
-
<p>Open your browser and go to the now.gg website. You will see a list of games that you can play on the platform. You can also use the search bar to find the game you want.</p>
|
77 |
-
<h4>Step 2: Search for Cookie Run Kingdom</h4>
|
78 |
-
<p>Type "Cookie Run Kingdom" in the search bar and hit enter. You will see the game's icon and name on the screen. Click on it to open the game's page.</p>
|
79 |
-
<h4>Step 3: Click on the play button and enjoy</h4>
|
80 |
-
<p>On the game's page, you will see a big play button on the top right corner. Click on it to start playing the game in your browser. You will need to sign in with your Google account or create a new one if you don't have one. You will also need to agree to the terms and conditions of the game and the platform. After that, you can enjoy playing Cookie Run Kingdom online without downloading.</p>
|
81 |
-
<h3>Use YouTube to watch gameplay videos and guides</h3>
|
82 |
-
<p>If you don't want to play Cookie Run Kingdom online but still want to enjoy it without downloading, you can use YouTube to watch gameplay videos and guides. YouTube is a video-sharing platform that has millions of videos on various topics, including games. You can use YouTube to watch Cookie Run Kingdom videos in a few simple steps:</p>
|
83 |
-
<h4>Step 1: Go to YouTube and search for Cookie Run Kingdom</h4>
|
84 |
-
<p>Open your browser and go to YouTube.com. You will see a search bar on the top of the page. Type "Cookie Run Kingdom" in the search bar and hit enter. You will see a list of videos related to the game.</p>
|
85 |
-
<h4>Step 2: Find a video that suits your interest and level</h4>
|
86 |
-
<p>You can browse through the videos and find one that suits your interest and level. For example, you can watch videos that show how to build your kingdom, how to fight against enemies, how to recruit cookies, how to upgrade cookies, how to join guilds, how to participate in events, and more. You can also filter the videos by relevance, date, view count, rating, etc.</p>
|
87 |
-
<h4>Step 3: Watch and learn from other players</h4>
|
88 |
-
<p>Once you find a video that you like, click on it to watch it. You can learn from other players' strategies, tips, tricks, and experiences. You can also leave comments, like, share, or subscribe to the video or channel if you want.</p>
|
89 |
-
<h2>Conclusion</h2>
|
90 |
-
<p>Cookie Run Kingdom is a fun and addictive game that you can play on your device or online without downloading. Playing it online has some benefits, such as saving storage space, playing on any device with a browser, and enjoying faster loading and smoother gameplay. You can play it online using now.gg or watch it online using YouTube. Either way, you can have a great time with this cookie-themed adventure.</p>
|
91 |
-
FAQs - Q: Is Cookie Run Kingdom free to play? - A: Yes, Cookie Run Kingdom is free to play with optional in-app purchases. - Q: How do I save my progress when playing Cookie Run Kingdom online? - A: You can save your progress by linking your game account with your Google account or Facebook account. - Q: Can I play Cookie Run Kingdom offline? - A - A: No, you need an internet connection to play Cookie Run Kingdom. - Q: How do I get more cookies in Cookie Run Kingdom? - A: You can get more cookies by completing story levels, participating in events, summoning them with crystals or tickets, or buying them with real money. - Q: How do I join a guild in Cookie Run Kingdom? - A: You can join a guild by tapping on the guild icon on the bottom right corner of the screen. You can then search for a guild that suits your preferences, or create your own guild if you want. - Q: How do I contact the developers of Cookie Run Kingdom? - A: You can contact the developers of Cookie Run Kingdom by sending an email to [email protected] or visiting their official website, Facebook page, Twitter account, or Discord server.</p> 197e85843d<br />
|
92 |
-
<br />
|
93 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/ARK Survival Evolved APK - The Best Mobile Game of 2023 - Download Here.md
DELETED
@@ -1,117 +0,0 @@
|
|
1 |
-
|
2 |
-
<br>
|
3 |
-
<table>
|
4 |
-
<tr>
|
5 |
-
<td>
|
6 |
-
<h1>Download ARK: Survival Evolved APK Latest Version</h1>
|
7 |
-
<p>Do you love dinosaurs? Do you love survival games? Do you love open-world games? If you answered yes to any of these questions, then you should definitely check out <strong>ARK: Survival Evolved</strong>, one of the most popular and immersive games on Android. In this article, we will tell you everything you need to know about this game and how to download its latest version as an APK file.</p>
|
8 |
-
<h2>What is ARK: Survival Evolved?</h2>
|
9 |
-
<p>ARK: Survival Evolved</strong> is a 3D action-adventure game that lets you experience what it would be like to live in a world full of dinosaurs and other prehistoric creatures. You can explore, hunt, gather, craft, build, tame, breed, and fight your way through a massive open world that is constantly evolving and changing. You can play solo or join a tribe of other players online and cooperate or compete with them in various modes. You can also customize your character, your base, and your dinosaurs to suit your playstyle and preferences.</p>
|
10 |
-
<h2>download ark survival evolved apk latest version</h2><br /><p><b><b>Download Zip</b> ⇒⇒⇒ <a href="https://jinyurl.com/2uNOqk">https://jinyurl.com/2uNOqk</a></b></p><br /><br />
|
11 |
-
<h2>Why download ARK: Survival Evolved APK?</h2>
|
12 |
-
<p>If you are a fan of ARK: Survival Evolved, you might be wondering why you should download the APK file instead of getting the game from the Google Play Store. Well, there are several reasons why downloading the APK file can be beneficial for you. Here are some of them:</p>
|
13 |
-
<ul>
|
14 |
-
<li>You can get the latest version of the game before it is officially released on the Play Store. This way, you can enjoy the new features, bug fixes, and improvements as soon as possible.</li>
|
15 |
-
<li>You can access the game even if it is not available in your region or country. Some games are geo-restricted or banned in certain areas due to legal or political reasons. By downloading the APK file, you can bypass these restrictions and play the game wherever you are.</li>
|
16 |
-
<li>You can save storage space on your device. The APK file is usually smaller than the Play Store version because it does not include additional data or files that are not necessary for running the game. You can also delete the APK file after installing the game to free up more space.</li>
|
17 |
-
<li>You can avoid annoying ads and in-app purchases. Some games on the Play Store are filled with ads that interrupt your gameplay or tempt you to spend real money on items or upgrades. By downloading the APK file, you can avoid these annoyances and enjoy the game without any distractions or costs.</li>
|
18 |
-
</ul>
|
19 |
-
<h2>How to download ARK: Survival Evolved APK?</h2>
|
20 |
-
<p>Now that you know why downloading ARK: Survival Evolved APK is a good idea, you might be wondering how to do it. Don't worry, it's very easy and simple. Just follow these steps and you will be playing the game in no time:</p>
|
21 |
-
<h3>Step 1: Enable unknown sources</h3>
|
22 |
-
<p>Before you can install any APK file on your device, you need to enable unknown sources in your settings. This will allow you to install apps from sources other than the Play Store. To do this, go to your device's settings and look for security or privacy options. Then, find the option that says unknown sources or allow installation from unknown sources and toggle it on. You might see a warning message that says installing apps from unknown sources can harm your device or compromise your data. Don't worry, this is just a precautionary measure and as long as you download the APK file from a reliable and safe source, you have nothing to fear.</p>
|
23 |
-
<h3>Step 2: Download the APK file</h3>
|
24 |
-
<p>The next step is to download the APK file of ARK: Survival Evolved from a trustworthy and reputable source. There are many websites that offer APK files for various games and apps, but not all of them are safe and secure. Some of them might contain malware, viruses, or spyware that can damage your device or steal your personal information. To avoid this risk, we recommend downloading the APK file from <a href="">APKPure.com</a>, one of the most popular and trusted sources for APK files on the internet. To download the APK file from APKPure.com, simply click on this link: <a href="">https://apkpure.com/ark-survival-evolved/com.studiowildcard.wardrumstudios.ark/download?from=details</a>. This will take you to the download page where you can see the size, version, and update date of the APK file. Then, click on the download button and wait for the file to be downloaded to your device.</p>
|
25 |
-
<h3>Step 3: Install the APK file</h3>
|
26 |
-
<p>Once you have downloaded the APK file, you need to install it on your device. To do this, locate the file in your downloads folder or wherever you saved it and tap on it. You might see a pop-up window that asks you if you want to install this application. Tap on install and wait for the installation process to finish. You might also see a pop-up window that asks you if you want to open this application or done. Tap on done and exit the installer.</p>
|
27 |
-
<h3>Step 4: Launch the game and enjoy</h3>
|
28 |
-
<p>Congratulations! You have successfully installed ARK: Survival Evolved APK on your device. Now, all that's left to do is to launch the game and enjoy it. To do this, go to your app drawer or home screen and look for the ARK: Survival Evolved icon. Tap on it and wait for the game to load. You might see a splash screen that shows the game's logo and some information. Then, you will see the main menu where you can choose to play single player, multiplayer, or settings. Choose the mode you want to play and start your adventure in the world of ARK: Survival Evolved.</p>
|
29 |
-
<h2>What are the features of ARK: Survival Evolved APK?</h2>
|
30 |
-
<p>ARK: Survival Evolved APK is not just a simple game. It is a rich and immersive experience that offers you countless hours of fun and entertainment. Here are some of the features that make this game so amazing:</p>
|
31 |
-
<h3>Feature 1: Explore a massive open world</h3>
|
32 |
-
<p>One of the most impressive aspects of ARK: Survival Evolved is its huge and diverse game world. You can explore over 50 square kilometers of land and sea, each with its own biome, climate, terrain, flora, and fauna. You can discover lush jungles, snowy mountains, volcanic islands, swamps, caves, underwater reefs, and more. You can also encounter over 200 different species of animals, from dinosaurs and mammals to insects and fish. Some of them are friendly and can be tamed, while others are hostile and will attack you on sight. You can also find hidden secrets, ancient ruins, artifacts, and loot scattered around the world.</p>
|
33 |
-
<p>How to download ark survival evolved apk for free<br />
|
34 |
-
Ark survival evolved apk mod unlimited money and resources<br />
|
35 |
-
Ark survival evolved apk obb data offline download<br />
|
36 |
-
Download ark survival evolved apk on pc with bluestacks<br />
|
37 |
-
Ark survival evolved apk latest version 2.0.28 update<br />
|
38 |
-
Ark survival evolved apk full game unlocked download<br />
|
39 |
-
Ark survival evolved apk android requirements and compatibility<br />
|
40 |
-
Ark survival evolved apk filehippo download link and review<br />
|
41 |
-
Ark survival evolved apk combo download and install guide<br />
|
42 |
-
Ark survival evolved apk wizcase download for free 2023<br />
|
43 |
-
Best settings for ark survival evolved apk on android<br />
|
44 |
-
Ark survival evolved apk gameplay and features overview<br />
|
45 |
-
Ark survival evolved apk cheats and hacks for android<br />
|
46 |
-
Ark survival evolved apk download size and installation time<br />
|
47 |
-
Ark survival evolved apk tips and tricks for beginners<br />
|
48 |
-
Ark survival evolved apk multiplayer mode and online servers<br />
|
49 |
-
Ark survival evolved apk graphics and performance comparison<br />
|
50 |
-
Ark survival evolved apk bugs and issues fix and solutions<br />
|
51 |
-
Ark survival evolved apk new dinosaurs and creatures update<br />
|
52 |
-
Ark survival evolved apk maps and locations guide<br />
|
53 |
-
Ark survival evolved apk crafting and building system tutorial<br />
|
54 |
-
Ark survival evolved apk weapons and armor list and stats<br />
|
55 |
-
Ark survival evolved apk taming and breeding dinosaurs guide<br />
|
56 |
-
Ark survival evolved apk missions and challenges walkthrough<br />
|
57 |
-
Ark survival evolved apk skins and customizations options<br />
|
58 |
-
Ark survival evolved apk events and rewards calendar 2023<br />
|
59 |
-
Ark survival evolved apk reviews and ratings from users<br />
|
60 |
-
Ark survival evolved apk alternatives and similar games for android<br />
|
61 |
-
Ark survival evolved apk faq and support contact information<br />
|
62 |
-
Ark survival evolved apk developer studio wildcard website and social media</p>
|
63 |
-
<h3>Feature 2: Tame and breed over 80 dinosaurs</h3>
|
64 |
-
<p>If you love dinosaurs, you will love ARK: Survival Evolved. This game lets you tame and breed over 80 different types of dinosaurs, from the mighty Tyrannosaurus Rex to the adorable Dodo. You can use various methods to capture and tame them, such as knocking them out with tranquilizer darts, feeding them berries or meat, or using special items like kibble or pheromones. Once you have tamed a dinosaur, you can ride it, use it as a mount, a pet, a companion, or a weapon. You can also breed them to create new generations of dinosaurs with improved stats and traits. You can even mutate them to create unique and rare variations.</p>
|
65 |
-
<h3>Feature 3: Craft and build your base</h3>
|
66 |
-
<p>In order to survive in ARK: Survival Evolved, you need to craft and build your base. You can gather resources from the environment, such as wood, stone, metal, fiber, hide, and more. You can use these resources to craft tools, weapons, armor, clothing, and other items that will help you in your journey. You can also use these resources to build structures, such as walls, floors, roofs, doors, windows, fences, ladders, ramps, and more. You can design your base however you want, from a simple hut to a fortified castle. You can also decorate your base with furniture, paintings, trophies, flags, and more.</p>
|
67 |
-
<h3>Feature 4: Join a tribe and cooperate with other players</h3>
|
68 |
-
<p>ARK: Survival Evolved is not only a solo game. You can also play online with other players from around the world. You can join a tribe of up to 10 players and cooperate with them in various ways. You can share resources, items, structures, dinosaurs, and more with your tribe members. You can also chat with them, trade with them, fight with them, or raid with them. You can also join or create a server that suits your preferences, such as PvE, PvP, hardcore, casual, modded, or vanilla. You can also customize the server settings, such as the difficulty level, the day and night cycle, the weather, the spawn rates, and more.</p>
|
69 |
-
<h3>Feature 5: Survive in a harsh environment</h3>
|
70 |
-
<p>ARK: Survival Evolved is not a game for the faint of heart. It is a game that challenges you to survive in a harsh and unforgiving environment. You have to deal with hunger, thirst, temperature, weather, diseases, and predators. You have to eat and drink regularly to maintain your health and stamina. You have to wear appropriate clothing and shelter to protect yourself from the heat or cold. You have to avoid or cure diseases that can affect your performance or even kill you. You have to fight or flee from predators that can attack you at any time. You have to be prepared for anything and everything in ARK: Survival Evolved.</p>
|
71 |
-
<h2>What are the requirements for ARK: Survival Evolved APK?</h2>
|
72 |
-
<p>ARK: Survival Evolved APK is a game that requires a lot of resources and power to run smoothly on your device. It is not a game that you can play on any device. It is a game that you need to have a decent device to enjoy. Here are the minimum and recommended specifications for running ARK: Survival Evolved APK on your device:</p>
|
73 |
-
<table>
|
74 |
-
<tr>
|
75 |
-
<th>Minimum</th>
|
76 |
-
<th>Recommended</th>
|
77 |
-
</tr>
|
78 |
-
<tr>
|
79 |
-
<td>Android 7.0 or higher</td>
|
80 |
-
<td>Android 9.0 or higher</td>
|
81 |
-
</tr>
|
82 |
-
<tr>
|
83 |
-
<td>3 GB of RAM</td>
|
84 |
-
<td>4 GB of RAM or more</td>
|
85 |
-
</tr>
|
86 |
-
<tr>
|
87 |
-
<td>2.4 GHz quad-core processor</td>
|
88 |
-
<td>3.0 GHz octa-core processor or better</td>
|
89 |
-
</tr>
|
90 |
-
<tr>
|
91 |
-
<td>Mali-T760MP4 GPU or equivalent</td>
|
92 |
-
<td>Adreno 530 GPU or higher</td>
|
93 |
-
</tr>
|
94 |
-
<tr>
|
95 |
-
<td>2 GB of free storage space</td>
|
96 |
-
<td>4 GB of free storage space or more</td>
|
97 |
-
</tr>
|
98 |
-
</table>
|
99 |
-
<p>If your device meets these requirements, you should be able to play ARK: Survival Evolved APK without any major issues. However, if your device does not meet these requirements, you might experience lag, crashes, glitches, or errors while playing the game. In that case, you might want to lower the graphics settings, close other apps running in the background, or upgrade your device.</p>
|
100 |
-
<h2>Conclusion</h2>
|
101 |
-
<p>In conclusion, ARK: Survival Evolved APK is an amazing game that offers you a unique and thrilling experience of living in a world full of dinosaurs and other prehistoric creatures. You can explore, hunt, gather, craft, build, tame, breed, and fight your way through a massive open world that is constantly evolving and changing. You can play solo or join a tribe of other players online and cooperate or compete with them in various modes. You can also customize your character, your base, and your dinosaurs to suit your playstyle and preferences.</p>
|
102 |
-
<p>If you are interested in playing this game, you can download its latest version as an APK file from APKPure.com by following the steps we have provided above. This way, you can enjoy the game before it is officially released on the Play Store, access the game even if it is not available in your region or country, save storage space on your device, avoid annoying ads and in-app purchases, and enjoy the game without any distractions or costs.</p>
|
103 |
-
<p>So, what are you waiting for? Download ARK: Survival Evolved APK now and start your adventure in the world of dinosaurs. You won't regret it!</p>
|
104 |
-
<h2>FAQs</h2>
|
105 |
-
<p>Here are some of the frequently asked questions and answers about ARK: Survival Evolved APK:</p>
|
106 |
-
<h3>Q: Is ARK: Survival Evolved APK safe to download and install?</h3>
|
107 |
-
<p>A: Yes, as long as you download the APK file from a reliable and safe source like APKPure.com, you should not have any problems with security or privacy. However, you should always be careful when downloading and installing any app from unknown sources and scan them with an antivirus app before opening them.</p>
|
108 |
-
<h3>Q: Is ARK: Survival Evolved APK free to play?</h3>
|
109 |
-
<p>A: Yes, ARK: Survival Evolved APK is free to play and does not require any subscription or payment to download or play. However, the game does offer some optional in-app purchases that can enhance your gameplay or unlock some premium features. You can choose to buy these items or not depending on your preference.</p>
|
110 |
-
<h3>Q: How can I update ARK: Survival Evolved APK?</h3>
|
111 |
-
<p>A: To update ARK: Survival Evolved APK, you need to download the latest version of the APK file from APKPure.com and install it over the existing one. You do not need to uninstall the previous version or lose your progress. However, you should always backup your data before updating any app to avoid any potential issues.</p>
|
112 |
-
<h3>Q: How can I play ARK: Survival Evolved APK offline?</h3>
|
113 |
-
<p>A: You can play ARK: Survival Evolved APK offline by choosing the single player mode from the main menu. This way, you can enjoy the game without an internet connection or other players. However, you will not be able to access some features or modes that require online connectivity, such as multiplayer, leaderboards, or events.</p>
|
114 |
-
<h3>Q: How can I contact the developers of ARK: Survival Evolved APK?</h3>
|
115 |
-
<p>A: You can contact the developers of ARK: Survival Evolved APK by visiting their official website at <a href="">https://www.playark.com/</a> or their social media pages on Facebook, Twitter, Instagram, YouTube, or Discord. You can also send them an email at <a href="">[email protected]</a> or use the in-game feedback option.</p> 197e85843d<br />
|
116 |
-
<br />
|
117 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download 3D Interior Design Software and Tools for Professional Results.md
DELETED
@@ -1,108 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download 3D Interior Models for Your Projects</h1>
|
3 |
-
<p>If you are working on a project that involves interior design, architecture, or visualization, you might want to use 3D interior models to enhance your work. 3D interior models are digital representations of indoor spaces and objects that you can download and use in your software. In this article, we will explain what 3D interior models are, why you need them, where to find them, and how to use them.</p>
|
4 |
-
<h2>download 3d interior</h2><br /><p><b><b>Download File</b> ✶ <a href="https://jinyurl.com/2uNPeY">https://jinyurl.com/2uNPeY</a></b></p><br /><br />
|
5 |
-
<h2>What are 3D Interior Models and Why You Need Them</h2>
|
6 |
-
<h3>3D Interior Models are digital representations of indoor spaces and objects</h3>
|
7 |
-
<p>3D interior models are files that contain the geometry, texture, material, lighting, and other properties of indoor spaces and objects. They can be created using 3D modeling software or scanned from real-life environments. They can range from simple furniture pieces to complex scenes with multiple elements.</p>
|
8 |
-
<h3>3D Interior Models can help you visualize, design, and present your ideas</h3>
|
9 |
-
<p>Using 3D interior models can have many benefits for your projects. For example, you can:</p>
|
10 |
-
<ul>
|
11 |
-
<li>Visualize your ideas in a realistic and immersive way</li>
|
12 |
-
<li>Design and test different layouts, colors, styles, and lighting effects</li>
|
13 |
-
<li>Present your work to clients, collaborators, or audiences in a professional and engaging way</li>
|
14 |
-
<li>Save time and money by avoiding mistakes and rework</li>
|
15 |
-
<li>Learn from other designers and improve your skills</li>
|
16 |
-
</ul>
|
17 |
-
<h2>Where to Find and Download Free 3D Interior Models Online</h2>
|
18 |
-
<p>There are many websites that offer free 3D interior models for download. You can browse through thousands of models in various categories, formats, and quality levels. Here are some of the most popular ones:</p>
|
19 |
-
<h3>Sketchfab</h3>
|
20 |
-
<p>[Sketchfab](^1^) is a platform that lets you upload, view, and download 3D models in your browser. You can find over 18,000 free interior 3D models on Sketchfab, ranging from commercial buildings to cozy bedrooms. You can also interact with the models using VR or AR devices.</p>
|
21 |
-
<h3>CGTrader</h3>
|
22 |
-
<p>[CGTrader](^2^) is a marketplace that connects 3D designers with buyers. You can find over 18,000 free interior 3D models on CGTrader, covering various styles and themes. You can also buy premium models or request custom ones from the community.</p>
|
23 |
-
<h3>3DZIP.ORG</h3>
|
24 |
-
<p>[3DZIP.ORG](^3^) is a website that provides free resources for 3D visualization. You can find over 13,000 free interior 3D models on 3DZIP.ORG, mostly in SketchUp format. You can also download textures, materials, scenes, and tutorials.</p>
|
25 |
-
<p>download 3d interior design software<br />
|
26 |
-
download 3d interior models free<br />
|
27 |
-
download 3d interior rendering software<br />
|
28 |
-
download 3d interior design app<br />
|
29 |
-
download 3d interior scenes for 3ds max<br />
|
30 |
-
download 3d interior design online<br />
|
31 |
-
download 3d interior design programs<br />
|
32 |
-
download 3d interior models for sketchup<br />
|
33 |
-
download 3d interior design software for pc<br />
|
34 |
-
download 3d interior models for blender<br />
|
35 |
-
download 3d interior design software for mac<br />
|
36 |
-
download 3d interior models for maya<br />
|
37 |
-
download 3d interior design software free trial<br />
|
38 |
-
download 3d interior models for revit<br />
|
39 |
-
download 3d interior design software full version<br />
|
40 |
-
download 3d interior models for autocad<br />
|
41 |
-
download 3d interior design software for android<br />
|
42 |
-
download 3d interior models for unity<br />
|
43 |
-
download 3d interior design software for windows 10<br />
|
44 |
-
download 3d interior models for unreal engine<br />
|
45 |
-
download 3d interior design software for beginners<br />
|
46 |
-
download 3d interior models for lumion<br />
|
47 |
-
download 3d interior design software for professionals<br />
|
48 |
-
download 3d interior models for vray<br />
|
49 |
-
download 3d interior design software with crack<br />
|
50 |
-
download 3d interior models for cinema 4d<br />
|
51 |
-
download 3d interior design software reviews<br />
|
52 |
-
download 3d interior models for rhino<br />
|
53 |
-
download 3d interior design software comparison<br />
|
54 |
-
download 3d interior models for blender cycles<br />
|
55 |
-
download 3d interior design software tutorial<br />
|
56 |
-
download 3d interior models for sketchup vray<br />
|
57 |
-
download 3d interior design software best<br />
|
58 |
-
download 3d interior models for blender eevee<br />
|
59 |
-
download 3d interior design software foyr neo<br />
|
60 |
-
download 3d interior models for sketchup pro<br />
|
61 |
-
download 3d interior design software homestyler<br />
|
62 |
-
download 3d interior models for blender free<br />
|
63 |
-
download 3d interior design software homebyme<br />
|
64 |
-
download 3d interior models for sketchup free<br />
|
65 |
-
download 3d interior design software planner 5D <br />
|
66 |
-
download 3d interior models for blender realistic <br />
|
67 |
-
download 3d interior design software roomstyler <br />
|
68 |
-
download 3d interior models for sketchup realistic <br />
|
69 |
-
download 3d interior design software smartdraw <br />
|
70 |
-
download 3d interior models for blender low poly <br />
|
71 |
-
download 3d interior design software roomsketcher <br />
|
72 |
-
download 3d interior models for sketchup low poly <br />
|
73 |
-
download 3d interior design software ikea home planner <br />
|
74 |
-
download 3d interior models for blender game engine</p>
|
75 |
-
<h2>How to Use 3D Interior Models in Your Software</h2>
|
76 |
-
<h3>Importing 3D Interior Models into Your Software</h3>
|
77 |
-
<p>To use 3D interior models in your software, you need to import them first. Depending on the software you use, you might need to convert the model files into compatible formats. Some of the most common formats for 3D interior models are FBX, OBJ, MAX, C4D, SKP, and BLEND. You can use online converters or plugins to convert the files if needed.</p>
|
78 |
-
<h3>Editing and Customizing 3D Interior Models</h3>
|
79 |
-
<p>Once you have imported the 3D interior models into your software, you can edit and customize them according to your needs. You can modify the geometry, texture, material, lighting, and other properties of the models. You can also add or remove elements, combine or split models, and adjust the scale and orientation of the models. You can use the tools and features of your software to make the changes you want.</p>
|
80 |
-
<h3>Rendering and Exporting 3D Interior Models</h3>
|
81 |
-
<p>After you have finished editing and customizing the 3D interior models, you can render and export them for your final output. Rendering is the process of generating realistic images or videos from the 3D models. Exporting is the process of saving the 3D models or the rendered images or videos in a file format that you can use for your project. You can use the settings and options of your software to render and export the 3D interior models.</p>
|
82 |
-
<h2>Conclusion</h2>
|
83 |
-
<p>3D interior models are a great way to enhance your projects that involve interior design, architecture, or visualization. They can help you visualize, design, and present your ideas in a realistic and immersive way. You can find and download free 3D interior models online from various websites, such as Sketchfab, CGTrader, and 3DZIP.ORG. You can also use your software to import, edit, customize, render, and export the 3D interior models for your final output.</p>
|
84 |
-
<h2>FAQs</h2>
|
85 |
-
<h4>What are some of the best software for 3D interior modeling?</h4>
|
86 |
-
<p>Some of the best software for 3D interior modeling are SketchUp, Blender, 3ds Max, Cinema 4D, and Revit. They have powerful tools and features for creating and editing 3D interior models.</p>
|
87 |
-
<h4>How can I learn 3D interior modeling?</h4>
|
88 |
-
<p>You can learn 3D interior modeling by taking online courses, watching tutorials, reading books, or joining communities. You can also practice by following projects or challenges that involve 3D interior modeling.</p>
|
89 |
-
<h4>How much does it cost to download 3D interior models?</h4>
|
90 |
-
<p>It depends on the website and the model you want to download. Some websites offer free 3D interior models that you can download without any charge. Some websites offer premium 3D interior models that you have to pay for. Some websites offer both free and premium 3D interior models that you can choose from.</p>
|
91 |
-
<h4>What are some of the advantages of using 3D interior models?</h4>
|
92 |
-
<p>Some of the advantages of using 3D interior models are:</p>
|
93 |
-
<ul>
|
94 |
-
<li>You can save time and money by avoiding mistakes and rework</li>
|
95 |
-
<li>You can design and test different layouts, colors, styles, and lighting effects</li>
|
96 |
-
<li>You can present your work to clients, collaborators, or audiences in a professional and engaging way</li>
|
97 |
-
<li>You can learn from other designers and improve your skills</li>
|
98 |
-
</ul>
|
99 |
-
<h4>What are some of the challenges of using 3D interior models?</h4>
|
100 |
-
<p>Some of the challenges of using 3D interior models are:</p>
|
101 |
-
<ul>
|
102 |
-
<li>You need to have a compatible software and hardware to use them</li>
|
103 |
-
<li>You need to have some skills and knowledge to create and edit them</li>
|
104 |
-
<li>You need to have a good internet connection to download them</li>
|
105 |
-
<li>You need to respect the license and attribution of the models</li>
|
106 |
-
</ul></p> 197e85843d<br />
|
107 |
-
<br />
|
108 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/2023Liu2023/bingo/src/lib/bots/bing/types.ts
DELETED
@@ -1,259 +0,0 @@
|
|
1 |
-
export type Author = 'user' | 'system' | 'bot'
|
2 |
-
|
3 |
-
export type BotId = 'bing'
|
4 |
-
|
5 |
-
export enum BingConversationStyle {
|
6 |
-
Creative = 'Creative',
|
7 |
-
Balanced = 'Balanced',
|
8 |
-
Precise = 'Precise'
|
9 |
-
}
|
10 |
-
|
11 |
-
export enum ErrorCode {
|
12 |
-
CONVERSATION_LIMIT = 'CONVERSATION_LIMIT',
|
13 |
-
BING_UNAUTHORIZED = 'BING_UNAUTHORIZED',
|
14 |
-
BING_FORBIDDEN = 'BING_FORBIDDEN',
|
15 |
-
BING_CAPTCHA = 'BING_CAPTCHA',
|
16 |
-
THROTTLE_LIMIT = 'THROTTLE_LIMIT',
|
17 |
-
NOTFOUND_ERROR = 'NOT_FOUND_ERROR',
|
18 |
-
UNKOWN_ERROR = 'UNKOWN_ERROR',
|
19 |
-
NETWORK_ERROR = 'NETWORK_ERROR',
|
20 |
-
}
|
21 |
-
|
22 |
-
export class ChatError extends Error {
|
23 |
-
code: ErrorCode
|
24 |
-
constructor(message: string, code: ErrorCode) {
|
25 |
-
super(message)
|
26 |
-
this.code = code
|
27 |
-
}
|
28 |
-
}
|
29 |
-
|
30 |
-
export type ChatMessageModel = {
|
31 |
-
id: string
|
32 |
-
author: Author
|
33 |
-
text: string
|
34 |
-
error?: ChatError
|
35 |
-
throttling?: Throttling
|
36 |
-
sourceAttributions?: SourceAttribution[]
|
37 |
-
suggestedResponses?: SuggestedResponse[]
|
38 |
-
}
|
39 |
-
|
40 |
-
export interface ConversationModel {
|
41 |
-
messages: ChatMessageModel[]
|
42 |
-
}
|
43 |
-
|
44 |
-
export type Event =
|
45 |
-
| {
|
46 |
-
type: 'UPDATE_ANSWER'
|
47 |
-
data: {
|
48 |
-
text: string
|
49 |
-
spokenText?: string
|
50 |
-
sourceAttributions?: SourceAttribution[]
|
51 |
-
suggestedResponses?: SuggestedResponse[]
|
52 |
-
throttling?: Throttling
|
53 |
-
}
|
54 |
-
}
|
55 |
-
| {
|
56 |
-
type: 'DONE'
|
57 |
-
}
|
58 |
-
| {
|
59 |
-
type: 'ERROR'
|
60 |
-
error: ChatError
|
61 |
-
}
|
62 |
-
|
63 |
-
export interface SendMessageParams<T> {
|
64 |
-
prompt: string
|
65 |
-
imageUrl?: string
|
66 |
-
options: T
|
67 |
-
onEvent: (event: Event) => void
|
68 |
-
signal?: AbortSignal
|
69 |
-
}
|
70 |
-
|
71 |
-
export interface ConversationResponse {
|
72 |
-
conversationId: string
|
73 |
-
clientId: string
|
74 |
-
conversationSignature: string
|
75 |
-
result: {
|
76 |
-
value: string
|
77 |
-
message?: string
|
78 |
-
}
|
79 |
-
}
|
80 |
-
|
81 |
-
export interface Telemetry {
|
82 |
-
metrics?: null
|
83 |
-
startTime: string
|
84 |
-
}
|
85 |
-
|
86 |
-
export interface ChatUpdateArgument {
|
87 |
-
messages?: ChatResponseMessage[]
|
88 |
-
throttling?: Throttling
|
89 |
-
requestId: string
|
90 |
-
result: null
|
91 |
-
}
|
92 |
-
|
93 |
-
export type ChatUpdateCompleteResponse = {
|
94 |
-
type: 2
|
95 |
-
invocationId: string
|
96 |
-
item: ChatResponseItem
|
97 |
-
} | {
|
98 |
-
type: 1
|
99 |
-
target: string
|
100 |
-
arguments: ChatUpdateArgument[]
|
101 |
-
} | {
|
102 |
-
type: 3
|
103 |
-
invocationId: string
|
104 |
-
} | {
|
105 |
-
type: 6 | 7
|
106 |
-
}
|
107 |
-
|
108 |
-
export interface ChatRequestResult {
|
109 |
-
value: string
|
110 |
-
serviceVersion: string
|
111 |
-
error?: string
|
112 |
-
}
|
113 |
-
|
114 |
-
export interface ChatResponseItem {
|
115 |
-
messages: ChatResponseMessage[]
|
116 |
-
firstNewMessageIndex: number
|
117 |
-
suggestedResponses: null
|
118 |
-
conversationId: string
|
119 |
-
requestId: string
|
120 |
-
conversationExpiryTime: string
|
121 |
-
telemetry: Telemetry
|
122 |
-
result: ChatRequestResult
|
123 |
-
throttling: Throttling
|
124 |
-
}
|
125 |
-
export enum InvocationEventType {
|
126 |
-
Invocation = 1,
|
127 |
-
StreamItem = 2,
|
128 |
-
Completion = 3,
|
129 |
-
StreamInvocation = 4,
|
130 |
-
CancelInvocation = 5,
|
131 |
-
Ping = 6,
|
132 |
-
Close = 7,
|
133 |
-
}
|
134 |
-
|
135 |
-
// https://github.com/bytemate/bingchat-api/blob/main/src/lib.ts
|
136 |
-
|
137 |
-
export interface ConversationInfo {
|
138 |
-
conversationId: string
|
139 |
-
clientId: string
|
140 |
-
conversationSignature: string
|
141 |
-
invocationId: number
|
142 |
-
conversationStyle: BingConversationStyle
|
143 |
-
prompt: string
|
144 |
-
imageUrl?: string
|
145 |
-
}
|
146 |
-
|
147 |
-
export interface BingChatResponse {
|
148 |
-
conversationSignature: string
|
149 |
-
conversationId: string
|
150 |
-
clientId: string
|
151 |
-
invocationId: number
|
152 |
-
conversationExpiryTime: Date
|
153 |
-
response: string
|
154 |
-
details: ChatResponseMessage
|
155 |
-
}
|
156 |
-
|
157 |
-
export interface Throttling {
|
158 |
-
maxNumLongDocSummaryUserMessagesInConversation: number
|
159 |
-
maxNumUserMessagesInConversation: number
|
160 |
-
numLongDocSummaryUserMessagesInConversation: number
|
161 |
-
numUserMessagesInConversation: number
|
162 |
-
}
|
163 |
-
|
164 |
-
export interface ChatResponseMessage {
|
165 |
-
text: string
|
166 |
-
spokenText?: string
|
167 |
-
author: string
|
168 |
-
createdAt: Date
|
169 |
-
timestamp: Date
|
170 |
-
messageId: string
|
171 |
-
requestId: string
|
172 |
-
offense: string
|
173 |
-
adaptiveCards: AdaptiveCard[]
|
174 |
-
sourceAttributions: SourceAttribution[]
|
175 |
-
feedback: Feedback
|
176 |
-
contentOrigin: string
|
177 |
-
messageType?: string
|
178 |
-
contentType?: string
|
179 |
-
privacy: null
|
180 |
-
suggestedResponses: SuggestedResponse[]
|
181 |
-
}
|
182 |
-
|
183 |
-
export interface AdaptiveCard {
|
184 |
-
type: string
|
185 |
-
version: string
|
186 |
-
body: Body[]
|
187 |
-
}
|
188 |
-
|
189 |
-
export interface Body {
|
190 |
-
type: string
|
191 |
-
text: string
|
192 |
-
wrap: boolean
|
193 |
-
size?: string
|
194 |
-
}
|
195 |
-
|
196 |
-
export interface Feedback {
|
197 |
-
tag: null
|
198 |
-
updatedOn: null
|
199 |
-
type: string
|
200 |
-
}
|
201 |
-
|
202 |
-
export interface SourceAttribution {
|
203 |
-
providerDisplayName: string
|
204 |
-
seeMoreUrl: string
|
205 |
-
searchQuery: string
|
206 |
-
}
|
207 |
-
|
208 |
-
export interface SuggestedResponse {
|
209 |
-
text: string
|
210 |
-
author?: Author
|
211 |
-
createdAt?: Date
|
212 |
-
timestamp?: Date
|
213 |
-
messageId?: string
|
214 |
-
messageType?: string
|
215 |
-
offense?: string
|
216 |
-
feedback?: Feedback
|
217 |
-
contentOrigin?: string
|
218 |
-
privacy?: null
|
219 |
-
}
|
220 |
-
|
221 |
-
export interface KBlobRequest {
|
222 |
-
knowledgeRequest: KnowledgeRequestContext
|
223 |
-
imageBase64?: string
|
224 |
-
}
|
225 |
-
|
226 |
-
export interface KBlobResponse {
|
227 |
-
blobId: string
|
228 |
-
processedBlobId?: string
|
229 |
-
}
|
230 |
-
|
231 |
-
export interface KnowledgeRequestContext {
|
232 |
-
imageInfo: ImageInfo;
|
233 |
-
knowledgeRequest: KnowledgeRequest;
|
234 |
-
}
|
235 |
-
|
236 |
-
export interface ImageInfo {
|
237 |
-
url?: string;
|
238 |
-
}
|
239 |
-
|
240 |
-
export interface KnowledgeRequest {
|
241 |
-
invokedSkills: string[];
|
242 |
-
subscriptionId: string;
|
243 |
-
invokedSkillsRequestData: InvokedSkillsRequestData;
|
244 |
-
convoData: ConvoData;
|
245 |
-
}
|
246 |
-
|
247 |
-
export interface ConvoData {
|
248 |
-
convoid: string;
|
249 |
-
convotone: BingConversationStyle;
|
250 |
-
}
|
251 |
-
|
252 |
-
export interface InvokedSkillsRequestData {
|
253 |
-
enableFaceBlur: boolean;
|
254 |
-
}
|
255 |
-
|
256 |
-
export interface FileItem {
|
257 |
-
url: string;
|
258 |
-
status?: 'loading' | 'error' | 'loaded'
|
259 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/2kaara/oreo/Dockerfile
DELETED
@@ -1,21 +0,0 @@
|
|
1 |
-
FROM node:18-bullseye-slim
|
2 |
-
|
3 |
-
RUN apt-get update && \
|
4 |
-
|
5 |
-
apt-get install -y git
|
6 |
-
|
7 |
-
RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app
|
8 |
-
|
9 |
-
WORKDIR /app
|
10 |
-
|
11 |
-
RUN npm install
|
12 |
-
|
13 |
-
COPY Dockerfile greeting.md* .env* ./
|
14 |
-
|
15 |
-
RUN npm run build
|
16 |
-
|
17 |
-
EXPOSE 7860
|
18 |
-
|
19 |
-
ENV NODE_ENV=production
|
20 |
-
|
21 |
-
CMD [ "npm", "start" ]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIConsultant/MusicGen/audiocraft/data/__init__.py
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
"""Audio loading and writing support. Datasets for raw audio
|
7 |
-
or also including some metadata."""
|
8 |
-
|
9 |
-
# flake8: noqa
|
10 |
-
from . import audio, audio_dataset, info_audio_dataset, music_dataset, sound_dataset
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-210e_deepfashion2_sling_dress_256x192/td_hm_res50_4xb64-210e_deepfashion2_sling_dress_256x192.py
DELETED
@@ -1,2861 +0,0 @@
|
|
1 |
-
default_scope = 'mmpose'
|
2 |
-
default_hooks = dict(
|
3 |
-
timer=dict(type='IterTimerHook'),
|
4 |
-
logger=dict(type='LoggerHook', interval=50),
|
5 |
-
param_scheduler=dict(type='ParamSchedulerHook'),
|
6 |
-
checkpoint=dict(
|
7 |
-
type='CheckpointHook', interval=10, save_best='PCK', rule='greater'),
|
8 |
-
sampler_seed=dict(type='DistSamplerSeedHook'),
|
9 |
-
visualization=dict(type='PoseVisualizationHook', enable=False))
|
10 |
-
custom_hooks = [dict(type='SyncBuffersHook')]
|
11 |
-
env_cfg = dict(
|
12 |
-
cudnn_benchmark=False,
|
13 |
-
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
|
14 |
-
dist_cfg=dict(backend='nccl'))
|
15 |
-
vis_backends = [dict(type='LocalVisBackend')]
|
16 |
-
visualizer = dict(
|
17 |
-
type='PoseLocalVisualizer',
|
18 |
-
vis_backends=[dict(type='LocalVisBackend'),
|
19 |
-
dict(type='WandbVisBackend')],
|
20 |
-
name='visualizer')
|
21 |
-
log_processor = dict(
|
22 |
-
type='LogProcessor', window_size=50, by_epoch=True, num_digits=6)
|
23 |
-
log_level = 'INFO'
|
24 |
-
load_from = None
|
25 |
-
resume = False
|
26 |
-
backend_args = dict(backend='local')
|
27 |
-
train_cfg = dict(by_epoch=True, max_epochs=210, val_interval=10)
|
28 |
-
val_cfg = dict()
|
29 |
-
test_cfg = dict()
|
30 |
-
colors = dict(
|
31 |
-
sss=[255, 128, 0],
|
32 |
-
lss=[255, 0, 128],
|
33 |
-
sso=[128, 0, 255],
|
34 |
-
lso=[0, 128, 255],
|
35 |
-
vest=[0, 128, 128],
|
36 |
-
sling=[0, 0, 128],
|
37 |
-
shorts=[128, 128, 128],
|
38 |
-
trousers=[128, 0, 128],
|
39 |
-
skirt=[64, 128, 128],
|
40 |
-
ssd=[64, 64, 128],
|
41 |
-
lsd=[128, 64, 0],
|
42 |
-
vd=[128, 64, 255],
|
43 |
-
sd=[128, 64, 0])
|
44 |
-
dataset_info = dict(
|
45 |
-
dataset_name='deepfashion2',
|
46 |
-
paper_info=dict(
|
47 |
-
author=
|
48 |
-
'Yuying Ge and Ruimao Zhang and Lingyun Wu and Xiaogang Wang and Xiaoou Tang and Ping Luo',
|
49 |
-
title=
|
50 |
-
'DeepFashion2: A Versatile Benchmark for Detection, Pose Estimation, Segmentation and Re-Identification of Clothing Images',
|
51 |
-
container=
|
52 |
-
'Proceedings of IEEE Conference on Computer Vision and Pattern Recognition (CVPR)',
|
53 |
-
year='2019',
|
54 |
-
homepage='https://github.com/switchablenorms/DeepFashion2'),
|
55 |
-
keypoint_info=dict({
|
56 |
-
0:
|
57 |
-
dict(name='sss_kpt1', id=0, color=[255, 128, 0], type='', swap=''),
|
58 |
-
1:
|
59 |
-
dict(
|
60 |
-
name='sss_kpt2',
|
61 |
-
id=1,
|
62 |
-
color=[255, 128, 0],
|
63 |
-
type='',
|
64 |
-
swap='sss_kpt6'),
|
65 |
-
2:
|
66 |
-
dict(
|
67 |
-
name='sss_kpt3',
|
68 |
-
id=2,
|
69 |
-
color=[255, 128, 0],
|
70 |
-
type='',
|
71 |
-
swap='sss_kpt5'),
|
72 |
-
3:
|
73 |
-
dict(name='sss_kpt4', id=3, color=[255, 128, 0], type='', swap=''),
|
74 |
-
4:
|
75 |
-
dict(
|
76 |
-
name='sss_kpt5',
|
77 |
-
id=4,
|
78 |
-
color=[255, 128, 0],
|
79 |
-
type='',
|
80 |
-
swap='sss_kpt3'),
|
81 |
-
5:
|
82 |
-
dict(
|
83 |
-
name='sss_kpt6',
|
84 |
-
id=5,
|
85 |
-
color=[255, 128, 0],
|
86 |
-
type='',
|
87 |
-
swap='sss_kpt2'),
|
88 |
-
6:
|
89 |
-
dict(
|
90 |
-
name='sss_kpt7',
|
91 |
-
id=6,
|
92 |
-
color=[255, 128, 0],
|
93 |
-
type='',
|
94 |
-
swap='sss_kpt25'),
|
95 |
-
7:
|
96 |
-
dict(
|
97 |
-
name='sss_kpt8',
|
98 |
-
id=7,
|
99 |
-
color=[255, 128, 0],
|
100 |
-
type='',
|
101 |
-
swap='sss_kpt24'),
|
102 |
-
8:
|
103 |
-
dict(
|
104 |
-
name='sss_kpt9',
|
105 |
-
id=8,
|
106 |
-
color=[255, 128, 0],
|
107 |
-
type='',
|
108 |
-
swap='sss_kpt23'),
|
109 |
-
9:
|
110 |
-
dict(
|
111 |
-
name='sss_kpt10',
|
112 |
-
id=9,
|
113 |
-
color=[255, 128, 0],
|
114 |
-
type='',
|
115 |
-
swap='sss_kpt22'),
|
116 |
-
10:
|
117 |
-
dict(
|
118 |
-
name='sss_kpt11',
|
119 |
-
id=10,
|
120 |
-
color=[255, 128, 0],
|
121 |
-
type='',
|
122 |
-
swap='sss_kpt21'),
|
123 |
-
11:
|
124 |
-
dict(
|
125 |
-
name='sss_kpt12',
|
126 |
-
id=11,
|
127 |
-
color=[255, 128, 0],
|
128 |
-
type='',
|
129 |
-
swap='sss_kpt20'),
|
130 |
-
12:
|
131 |
-
dict(
|
132 |
-
name='sss_kpt13',
|
133 |
-
id=12,
|
134 |
-
color=[255, 128, 0],
|
135 |
-
type='',
|
136 |
-
swap='sss_kpt19'),
|
137 |
-
13:
|
138 |
-
dict(
|
139 |
-
name='sss_kpt14',
|
140 |
-
id=13,
|
141 |
-
color=[255, 128, 0],
|
142 |
-
type='',
|
143 |
-
swap='sss_kpt18'),
|
144 |
-
14:
|
145 |
-
dict(
|
146 |
-
name='sss_kpt15',
|
147 |
-
id=14,
|
148 |
-
color=[255, 128, 0],
|
149 |
-
type='',
|
150 |
-
swap='sss_kpt17'),
|
151 |
-
15:
|
152 |
-
dict(name='sss_kpt16', id=15, color=[255, 128, 0], type='', swap=''),
|
153 |
-
16:
|
154 |
-
dict(
|
155 |
-
name='sss_kpt17',
|
156 |
-
id=16,
|
157 |
-
color=[255, 128, 0],
|
158 |
-
type='',
|
159 |
-
swap='sss_kpt15'),
|
160 |
-
17:
|
161 |
-
dict(
|
162 |
-
name='sss_kpt18',
|
163 |
-
id=17,
|
164 |
-
color=[255, 128, 0],
|
165 |
-
type='',
|
166 |
-
swap='sss_kpt14'),
|
167 |
-
18:
|
168 |
-
dict(
|
169 |
-
name='sss_kpt19',
|
170 |
-
id=18,
|
171 |
-
color=[255, 128, 0],
|
172 |
-
type='',
|
173 |
-
swap='sss_kpt13'),
|
174 |
-
19:
|
175 |
-
dict(
|
176 |
-
name='sss_kpt20',
|
177 |
-
id=19,
|
178 |
-
color=[255, 128, 0],
|
179 |
-
type='',
|
180 |
-
swap='sss_kpt12'),
|
181 |
-
20:
|
182 |
-
dict(
|
183 |
-
name='sss_kpt21',
|
184 |
-
id=20,
|
185 |
-
color=[255, 128, 0],
|
186 |
-
type='',
|
187 |
-
swap='sss_kpt11'),
|
188 |
-
21:
|
189 |
-
dict(
|
190 |
-
name='sss_kpt22',
|
191 |
-
id=21,
|
192 |
-
color=[255, 128, 0],
|
193 |
-
type='',
|
194 |
-
swap='sss_kpt10'),
|
195 |
-
22:
|
196 |
-
dict(
|
197 |
-
name='sss_kpt23',
|
198 |
-
id=22,
|
199 |
-
color=[255, 128, 0],
|
200 |
-
type='',
|
201 |
-
swap='sss_kpt9'),
|
202 |
-
23:
|
203 |
-
dict(
|
204 |
-
name='sss_kpt24',
|
205 |
-
id=23,
|
206 |
-
color=[255, 128, 0],
|
207 |
-
type='',
|
208 |
-
swap='sss_kpt8'),
|
209 |
-
24:
|
210 |
-
dict(
|
211 |
-
name='sss_kpt25',
|
212 |
-
id=24,
|
213 |
-
color=[255, 128, 0],
|
214 |
-
type='',
|
215 |
-
swap='sss_kpt7'),
|
216 |
-
25:
|
217 |
-
dict(name='lss_kpt1', id=25, color=[255, 0, 128], type='', swap=''),
|
218 |
-
26:
|
219 |
-
dict(
|
220 |
-
name='lss_kpt2',
|
221 |
-
id=26,
|
222 |
-
color=[255, 0, 128],
|
223 |
-
type='',
|
224 |
-
swap='lss_kpt6'),
|
225 |
-
27:
|
226 |
-
dict(
|
227 |
-
name='lss_kpt3',
|
228 |
-
id=27,
|
229 |
-
color=[255, 0, 128],
|
230 |
-
type='',
|
231 |
-
swap='lss_kpt5'),
|
232 |
-
28:
|
233 |
-
dict(name='lss_kpt4', id=28, color=[255, 0, 128], type='', swap=''),
|
234 |
-
29:
|
235 |
-
dict(
|
236 |
-
name='lss_kpt5',
|
237 |
-
id=29,
|
238 |
-
color=[255, 0, 128],
|
239 |
-
type='',
|
240 |
-
swap='lss_kpt3'),
|
241 |
-
30:
|
242 |
-
dict(
|
243 |
-
name='lss_kpt6',
|
244 |
-
id=30,
|
245 |
-
color=[255, 0, 128],
|
246 |
-
type='',
|
247 |
-
swap='lss_kpt2'),
|
248 |
-
31:
|
249 |
-
dict(
|
250 |
-
name='lss_kpt7',
|
251 |
-
id=31,
|
252 |
-
color=[255, 0, 128],
|
253 |
-
type='',
|
254 |
-
swap='lss_kpt33'),
|
255 |
-
32:
|
256 |
-
dict(
|
257 |
-
name='lss_kpt8',
|
258 |
-
id=32,
|
259 |
-
color=[255, 0, 128],
|
260 |
-
type='',
|
261 |
-
swap='lss_kpt32'),
|
262 |
-
33:
|
263 |
-
dict(
|
264 |
-
name='lss_kpt9',
|
265 |
-
id=33,
|
266 |
-
color=[255, 0, 128],
|
267 |
-
type='',
|
268 |
-
swap='lss_kpt31'),
|
269 |
-
34:
|
270 |
-
dict(
|
271 |
-
name='lss_kpt10',
|
272 |
-
id=34,
|
273 |
-
color=[255, 0, 128],
|
274 |
-
type='',
|
275 |
-
swap='lss_kpt30'),
|
276 |
-
35:
|
277 |
-
dict(
|
278 |
-
name='lss_kpt11',
|
279 |
-
id=35,
|
280 |
-
color=[255, 0, 128],
|
281 |
-
type='',
|
282 |
-
swap='lss_kpt29'),
|
283 |
-
36:
|
284 |
-
dict(
|
285 |
-
name='lss_kpt12',
|
286 |
-
id=36,
|
287 |
-
color=[255, 0, 128],
|
288 |
-
type='',
|
289 |
-
swap='lss_kpt28'),
|
290 |
-
37:
|
291 |
-
dict(
|
292 |
-
name='lss_kpt13',
|
293 |
-
id=37,
|
294 |
-
color=[255, 0, 128],
|
295 |
-
type='',
|
296 |
-
swap='lss_kpt27'),
|
297 |
-
38:
|
298 |
-
dict(
|
299 |
-
name='lss_kpt14',
|
300 |
-
id=38,
|
301 |
-
color=[255, 0, 128],
|
302 |
-
type='',
|
303 |
-
swap='lss_kpt26'),
|
304 |
-
39:
|
305 |
-
dict(
|
306 |
-
name='lss_kpt15',
|
307 |
-
id=39,
|
308 |
-
color=[255, 0, 128],
|
309 |
-
type='',
|
310 |
-
swap='lss_kpt25'),
|
311 |
-
40:
|
312 |
-
dict(
|
313 |
-
name='lss_kpt16',
|
314 |
-
id=40,
|
315 |
-
color=[255, 0, 128],
|
316 |
-
type='',
|
317 |
-
swap='lss_kpt24'),
|
318 |
-
41:
|
319 |
-
dict(
|
320 |
-
name='lss_kpt17',
|
321 |
-
id=41,
|
322 |
-
color=[255, 0, 128],
|
323 |
-
type='',
|
324 |
-
swap='lss_kpt23'),
|
325 |
-
42:
|
326 |
-
dict(
|
327 |
-
name='lss_kpt18',
|
328 |
-
id=42,
|
329 |
-
color=[255, 0, 128],
|
330 |
-
type='',
|
331 |
-
swap='lss_kpt22'),
|
332 |
-
43:
|
333 |
-
dict(
|
334 |
-
name='lss_kpt19',
|
335 |
-
id=43,
|
336 |
-
color=[255, 0, 128],
|
337 |
-
type='',
|
338 |
-
swap='lss_kpt21'),
|
339 |
-
44:
|
340 |
-
dict(name='lss_kpt20', id=44, color=[255, 0, 128], type='', swap=''),
|
341 |
-
45:
|
342 |
-
dict(
|
343 |
-
name='lss_kpt21',
|
344 |
-
id=45,
|
345 |
-
color=[255, 0, 128],
|
346 |
-
type='',
|
347 |
-
swap='lss_kpt19'),
|
348 |
-
46:
|
349 |
-
dict(
|
350 |
-
name='lss_kpt22',
|
351 |
-
id=46,
|
352 |
-
color=[255, 0, 128],
|
353 |
-
type='',
|
354 |
-
swap='lss_kpt18'),
|
355 |
-
47:
|
356 |
-
dict(
|
357 |
-
name='lss_kpt23',
|
358 |
-
id=47,
|
359 |
-
color=[255, 0, 128],
|
360 |
-
type='',
|
361 |
-
swap='lss_kpt17'),
|
362 |
-
48:
|
363 |
-
dict(
|
364 |
-
name='lss_kpt24',
|
365 |
-
id=48,
|
366 |
-
color=[255, 0, 128],
|
367 |
-
type='',
|
368 |
-
swap='lss_kpt16'),
|
369 |
-
49:
|
370 |
-
dict(
|
371 |
-
name='lss_kpt25',
|
372 |
-
id=49,
|
373 |
-
color=[255, 0, 128],
|
374 |
-
type='',
|
375 |
-
swap='lss_kpt15'),
|
376 |
-
50:
|
377 |
-
dict(
|
378 |
-
name='lss_kpt26',
|
379 |
-
id=50,
|
380 |
-
color=[255, 0, 128],
|
381 |
-
type='',
|
382 |
-
swap='lss_kpt14'),
|
383 |
-
51:
|
384 |
-
dict(
|
385 |
-
name='lss_kpt27',
|
386 |
-
id=51,
|
387 |
-
color=[255, 0, 128],
|
388 |
-
type='',
|
389 |
-
swap='lss_kpt13'),
|
390 |
-
52:
|
391 |
-
dict(
|
392 |
-
name='lss_kpt28',
|
393 |
-
id=52,
|
394 |
-
color=[255, 0, 128],
|
395 |
-
type='',
|
396 |
-
swap='lss_kpt12'),
|
397 |
-
53:
|
398 |
-
dict(
|
399 |
-
name='lss_kpt29',
|
400 |
-
id=53,
|
401 |
-
color=[255, 0, 128],
|
402 |
-
type='',
|
403 |
-
swap='lss_kpt11'),
|
404 |
-
54:
|
405 |
-
dict(
|
406 |
-
name='lss_kpt30',
|
407 |
-
id=54,
|
408 |
-
color=[255, 0, 128],
|
409 |
-
type='',
|
410 |
-
swap='lss_kpt10'),
|
411 |
-
55:
|
412 |
-
dict(
|
413 |
-
name='lss_kpt31',
|
414 |
-
id=55,
|
415 |
-
color=[255, 0, 128],
|
416 |
-
type='',
|
417 |
-
swap='lss_kpt9'),
|
418 |
-
56:
|
419 |
-
dict(
|
420 |
-
name='lss_kpt32',
|
421 |
-
id=56,
|
422 |
-
color=[255, 0, 128],
|
423 |
-
type='',
|
424 |
-
swap='lss_kpt8'),
|
425 |
-
57:
|
426 |
-
dict(
|
427 |
-
name='lss_kpt33',
|
428 |
-
id=57,
|
429 |
-
color=[255, 0, 128],
|
430 |
-
type='',
|
431 |
-
swap='lss_kpt7'),
|
432 |
-
58:
|
433 |
-
dict(name='sso_kpt1', id=58, color=[128, 0, 255], type='', swap=''),
|
434 |
-
59:
|
435 |
-
dict(
|
436 |
-
name='sso_kpt2',
|
437 |
-
id=59,
|
438 |
-
color=[128, 0, 255],
|
439 |
-
type='',
|
440 |
-
swap='sso_kpt26'),
|
441 |
-
60:
|
442 |
-
dict(
|
443 |
-
name='sso_kpt3',
|
444 |
-
id=60,
|
445 |
-
color=[128, 0, 255],
|
446 |
-
type='',
|
447 |
-
swap='sso_kpt5'),
|
448 |
-
61:
|
449 |
-
dict(
|
450 |
-
name='sso_kpt4',
|
451 |
-
id=61,
|
452 |
-
color=[128, 0, 255],
|
453 |
-
type='',
|
454 |
-
swap='sso_kpt6'),
|
455 |
-
62:
|
456 |
-
dict(
|
457 |
-
name='sso_kpt5',
|
458 |
-
id=62,
|
459 |
-
color=[128, 0, 255],
|
460 |
-
type='',
|
461 |
-
swap='sso_kpt3'),
|
462 |
-
63:
|
463 |
-
dict(
|
464 |
-
name='sso_kpt6',
|
465 |
-
id=63,
|
466 |
-
color=[128, 0, 255],
|
467 |
-
type='',
|
468 |
-
swap='sso_kpt4'),
|
469 |
-
64:
|
470 |
-
dict(
|
471 |
-
name='sso_kpt7',
|
472 |
-
id=64,
|
473 |
-
color=[128, 0, 255],
|
474 |
-
type='',
|
475 |
-
swap='sso_kpt25'),
|
476 |
-
65:
|
477 |
-
dict(
|
478 |
-
name='sso_kpt8',
|
479 |
-
id=65,
|
480 |
-
color=[128, 0, 255],
|
481 |
-
type='',
|
482 |
-
swap='sso_kpt24'),
|
483 |
-
66:
|
484 |
-
dict(
|
485 |
-
name='sso_kpt9',
|
486 |
-
id=66,
|
487 |
-
color=[128, 0, 255],
|
488 |
-
type='',
|
489 |
-
swap='sso_kpt23'),
|
490 |
-
67:
|
491 |
-
dict(
|
492 |
-
name='sso_kpt10',
|
493 |
-
id=67,
|
494 |
-
color=[128, 0, 255],
|
495 |
-
type='',
|
496 |
-
swap='sso_kpt22'),
|
497 |
-
68:
|
498 |
-
dict(
|
499 |
-
name='sso_kpt11',
|
500 |
-
id=68,
|
501 |
-
color=[128, 0, 255],
|
502 |
-
type='',
|
503 |
-
swap='sso_kpt21'),
|
504 |
-
69:
|
505 |
-
dict(
|
506 |
-
name='sso_kpt12',
|
507 |
-
id=69,
|
508 |
-
color=[128, 0, 255],
|
509 |
-
type='',
|
510 |
-
swap='sso_kpt20'),
|
511 |
-
70:
|
512 |
-
dict(
|
513 |
-
name='sso_kpt13',
|
514 |
-
id=70,
|
515 |
-
color=[128, 0, 255],
|
516 |
-
type='',
|
517 |
-
swap='sso_kpt19'),
|
518 |
-
71:
|
519 |
-
dict(
|
520 |
-
name='sso_kpt14',
|
521 |
-
id=71,
|
522 |
-
color=[128, 0, 255],
|
523 |
-
type='',
|
524 |
-
swap='sso_kpt18'),
|
525 |
-
72:
|
526 |
-
dict(
|
527 |
-
name='sso_kpt15',
|
528 |
-
id=72,
|
529 |
-
color=[128, 0, 255],
|
530 |
-
type='',
|
531 |
-
swap='sso_kpt17'),
|
532 |
-
73:
|
533 |
-
dict(
|
534 |
-
name='sso_kpt16',
|
535 |
-
id=73,
|
536 |
-
color=[128, 0, 255],
|
537 |
-
type='',
|
538 |
-
swap='sso_kpt29'),
|
539 |
-
74:
|
540 |
-
dict(
|
541 |
-
name='sso_kpt17',
|
542 |
-
id=74,
|
543 |
-
color=[128, 0, 255],
|
544 |
-
type='',
|
545 |
-
swap='sso_kpt15'),
|
546 |
-
75:
|
547 |
-
dict(
|
548 |
-
name='sso_kpt18',
|
549 |
-
id=75,
|
550 |
-
color=[128, 0, 255],
|
551 |
-
type='',
|
552 |
-
swap='sso_kpt14'),
|
553 |
-
76:
|
554 |
-
dict(
|
555 |
-
name='sso_kpt19',
|
556 |
-
id=76,
|
557 |
-
color=[128, 0, 255],
|
558 |
-
type='',
|
559 |
-
swap='sso_kpt13'),
|
560 |
-
77:
|
561 |
-
dict(
|
562 |
-
name='sso_kpt20',
|
563 |
-
id=77,
|
564 |
-
color=[128, 0, 255],
|
565 |
-
type='',
|
566 |
-
swap='sso_kpt12'),
|
567 |
-
78:
|
568 |
-
dict(
|
569 |
-
name='sso_kpt21',
|
570 |
-
id=78,
|
571 |
-
color=[128, 0, 255],
|
572 |
-
type='',
|
573 |
-
swap='sso_kpt11'),
|
574 |
-
79:
|
575 |
-
dict(
|
576 |
-
name='sso_kpt22',
|
577 |
-
id=79,
|
578 |
-
color=[128, 0, 255],
|
579 |
-
type='',
|
580 |
-
swap='sso_kpt10'),
|
581 |
-
80:
|
582 |
-
dict(
|
583 |
-
name='sso_kpt23',
|
584 |
-
id=80,
|
585 |
-
color=[128, 0, 255],
|
586 |
-
type='',
|
587 |
-
swap='sso_kpt9'),
|
588 |
-
81:
|
589 |
-
dict(
|
590 |
-
name='sso_kpt24',
|
591 |
-
id=81,
|
592 |
-
color=[128, 0, 255],
|
593 |
-
type='',
|
594 |
-
swap='sso_kpt8'),
|
595 |
-
82:
|
596 |
-
dict(
|
597 |
-
name='sso_kpt25',
|
598 |
-
id=82,
|
599 |
-
color=[128, 0, 255],
|
600 |
-
type='',
|
601 |
-
swap='sso_kpt7'),
|
602 |
-
83:
|
603 |
-
dict(
|
604 |
-
name='sso_kpt26',
|
605 |
-
id=83,
|
606 |
-
color=[128, 0, 255],
|
607 |
-
type='',
|
608 |
-
swap='sso_kpt2'),
|
609 |
-
84:
|
610 |
-
dict(
|
611 |
-
name='sso_kpt27',
|
612 |
-
id=84,
|
613 |
-
color=[128, 0, 255],
|
614 |
-
type='',
|
615 |
-
swap='sso_kpt30'),
|
616 |
-
85:
|
617 |
-
dict(
|
618 |
-
name='sso_kpt28',
|
619 |
-
id=85,
|
620 |
-
color=[128, 0, 255],
|
621 |
-
type='',
|
622 |
-
swap='sso_kpt31'),
|
623 |
-
86:
|
624 |
-
dict(
|
625 |
-
name='sso_kpt29',
|
626 |
-
id=86,
|
627 |
-
color=[128, 0, 255],
|
628 |
-
type='',
|
629 |
-
swap='sso_kpt16'),
|
630 |
-
87:
|
631 |
-
dict(
|
632 |
-
name='sso_kpt30',
|
633 |
-
id=87,
|
634 |
-
color=[128, 0, 255],
|
635 |
-
type='',
|
636 |
-
swap='sso_kpt27'),
|
637 |
-
88:
|
638 |
-
dict(
|
639 |
-
name='sso_kpt31',
|
640 |
-
id=88,
|
641 |
-
color=[128, 0, 255],
|
642 |
-
type='',
|
643 |
-
swap='sso_kpt28'),
|
644 |
-
89:
|
645 |
-
dict(name='lso_kpt1', id=89, color=[0, 128, 255], type='', swap=''),
|
646 |
-
90:
|
647 |
-
dict(
|
648 |
-
name='lso_kpt2',
|
649 |
-
id=90,
|
650 |
-
color=[0, 128, 255],
|
651 |
-
type='',
|
652 |
-
swap='lso_kpt6'),
|
653 |
-
91:
|
654 |
-
dict(
|
655 |
-
name='lso_kpt3',
|
656 |
-
id=91,
|
657 |
-
color=[0, 128, 255],
|
658 |
-
type='',
|
659 |
-
swap='lso_kpt5'),
|
660 |
-
92:
|
661 |
-
dict(
|
662 |
-
name='lso_kpt4',
|
663 |
-
id=92,
|
664 |
-
color=[0, 128, 255],
|
665 |
-
type='',
|
666 |
-
swap='lso_kpt34'),
|
667 |
-
93:
|
668 |
-
dict(
|
669 |
-
name='lso_kpt5',
|
670 |
-
id=93,
|
671 |
-
color=[0, 128, 255],
|
672 |
-
type='',
|
673 |
-
swap='lso_kpt3'),
|
674 |
-
94:
|
675 |
-
dict(
|
676 |
-
name='lso_kpt6',
|
677 |
-
id=94,
|
678 |
-
color=[0, 128, 255],
|
679 |
-
type='',
|
680 |
-
swap='lso_kpt2'),
|
681 |
-
95:
|
682 |
-
dict(
|
683 |
-
name='lso_kpt7',
|
684 |
-
id=95,
|
685 |
-
color=[0, 128, 255],
|
686 |
-
type='',
|
687 |
-
swap='lso_kpt33'),
|
688 |
-
96:
|
689 |
-
dict(
|
690 |
-
name='lso_kpt8',
|
691 |
-
id=96,
|
692 |
-
color=[0, 128, 255],
|
693 |
-
type='',
|
694 |
-
swap='lso_kpt32'),
|
695 |
-
97:
|
696 |
-
dict(
|
697 |
-
name='lso_kpt9',
|
698 |
-
id=97,
|
699 |
-
color=[0, 128, 255],
|
700 |
-
type='',
|
701 |
-
swap='lso_kpt31'),
|
702 |
-
98:
|
703 |
-
dict(
|
704 |
-
name='lso_kpt10',
|
705 |
-
id=98,
|
706 |
-
color=[0, 128, 255],
|
707 |
-
type='',
|
708 |
-
swap='lso_kpt30'),
|
709 |
-
99:
|
710 |
-
dict(
|
711 |
-
name='lso_kpt11',
|
712 |
-
id=99,
|
713 |
-
color=[0, 128, 255],
|
714 |
-
type='',
|
715 |
-
swap='lso_kpt29'),
|
716 |
-
100:
|
717 |
-
dict(
|
718 |
-
name='lso_kpt12',
|
719 |
-
id=100,
|
720 |
-
color=[0, 128, 255],
|
721 |
-
type='',
|
722 |
-
swap='lso_kpt28'),
|
723 |
-
101:
|
724 |
-
dict(
|
725 |
-
name='lso_kpt13',
|
726 |
-
id=101,
|
727 |
-
color=[0, 128, 255],
|
728 |
-
type='',
|
729 |
-
swap='lso_kpt27'),
|
730 |
-
102:
|
731 |
-
dict(
|
732 |
-
name='lso_kpt14',
|
733 |
-
id=102,
|
734 |
-
color=[0, 128, 255],
|
735 |
-
type='',
|
736 |
-
swap='lso_kpt26'),
|
737 |
-
103:
|
738 |
-
dict(
|
739 |
-
name='lso_kpt15',
|
740 |
-
id=103,
|
741 |
-
color=[0, 128, 255],
|
742 |
-
type='',
|
743 |
-
swap='lso_kpt25'),
|
744 |
-
104:
|
745 |
-
dict(
|
746 |
-
name='lso_kpt16',
|
747 |
-
id=104,
|
748 |
-
color=[0, 128, 255],
|
749 |
-
type='',
|
750 |
-
swap='lso_kpt24'),
|
751 |
-
105:
|
752 |
-
dict(
|
753 |
-
name='lso_kpt17',
|
754 |
-
id=105,
|
755 |
-
color=[0, 128, 255],
|
756 |
-
type='',
|
757 |
-
swap='lso_kpt23'),
|
758 |
-
106:
|
759 |
-
dict(
|
760 |
-
name='lso_kpt18',
|
761 |
-
id=106,
|
762 |
-
color=[0, 128, 255],
|
763 |
-
type='',
|
764 |
-
swap='lso_kpt22'),
|
765 |
-
107:
|
766 |
-
dict(
|
767 |
-
name='lso_kpt19',
|
768 |
-
id=107,
|
769 |
-
color=[0, 128, 255],
|
770 |
-
type='',
|
771 |
-
swap='lso_kpt21'),
|
772 |
-
108:
|
773 |
-
dict(
|
774 |
-
name='lso_kpt20',
|
775 |
-
id=108,
|
776 |
-
color=[0, 128, 255],
|
777 |
-
type='',
|
778 |
-
swap='lso_kpt37'),
|
779 |
-
109:
|
780 |
-
dict(
|
781 |
-
name='lso_kpt21',
|
782 |
-
id=109,
|
783 |
-
color=[0, 128, 255],
|
784 |
-
type='',
|
785 |
-
swap='lso_kpt19'),
|
786 |
-
110:
|
787 |
-
dict(
|
788 |
-
name='lso_kpt22',
|
789 |
-
id=110,
|
790 |
-
color=[0, 128, 255],
|
791 |
-
type='',
|
792 |
-
swap='lso_kpt18'),
|
793 |
-
111:
|
794 |
-
dict(
|
795 |
-
name='lso_kpt23',
|
796 |
-
id=111,
|
797 |
-
color=[0, 128, 255],
|
798 |
-
type='',
|
799 |
-
swap='lso_kpt17'),
|
800 |
-
112:
|
801 |
-
dict(
|
802 |
-
name='lso_kpt24',
|
803 |
-
id=112,
|
804 |
-
color=[0, 128, 255],
|
805 |
-
type='',
|
806 |
-
swap='lso_kpt16'),
|
807 |
-
113:
|
808 |
-
dict(
|
809 |
-
name='lso_kpt25',
|
810 |
-
id=113,
|
811 |
-
color=[0, 128, 255],
|
812 |
-
type='',
|
813 |
-
swap='lso_kpt15'),
|
814 |
-
114:
|
815 |
-
dict(
|
816 |
-
name='lso_kpt26',
|
817 |
-
id=114,
|
818 |
-
color=[0, 128, 255],
|
819 |
-
type='',
|
820 |
-
swap='lso_kpt14'),
|
821 |
-
115:
|
822 |
-
dict(
|
823 |
-
name='lso_kpt27',
|
824 |
-
id=115,
|
825 |
-
color=[0, 128, 255],
|
826 |
-
type='',
|
827 |
-
swap='lso_kpt13'),
|
828 |
-
116:
|
829 |
-
dict(
|
830 |
-
name='lso_kpt28',
|
831 |
-
id=116,
|
832 |
-
color=[0, 128, 255],
|
833 |
-
type='',
|
834 |
-
swap='lso_kpt12'),
|
835 |
-
117:
|
836 |
-
dict(
|
837 |
-
name='lso_kpt29',
|
838 |
-
id=117,
|
839 |
-
color=[0, 128, 255],
|
840 |
-
type='',
|
841 |
-
swap='lso_kpt11'),
|
842 |
-
118:
|
843 |
-
dict(
|
844 |
-
name='lso_kpt30',
|
845 |
-
id=118,
|
846 |
-
color=[0, 128, 255],
|
847 |
-
type='',
|
848 |
-
swap='lso_kpt10'),
|
849 |
-
119:
|
850 |
-
dict(
|
851 |
-
name='lso_kpt31',
|
852 |
-
id=119,
|
853 |
-
color=[0, 128, 255],
|
854 |
-
type='',
|
855 |
-
swap='lso_kpt9'),
|
856 |
-
120:
|
857 |
-
dict(
|
858 |
-
name='lso_kpt32',
|
859 |
-
id=120,
|
860 |
-
color=[0, 128, 255],
|
861 |
-
type='',
|
862 |
-
swap='lso_kpt8'),
|
863 |
-
121:
|
864 |
-
dict(
|
865 |
-
name='lso_kpt33',
|
866 |
-
id=121,
|
867 |
-
color=[0, 128, 255],
|
868 |
-
type='',
|
869 |
-
swap='lso_kpt7'),
|
870 |
-
122:
|
871 |
-
dict(
|
872 |
-
name='lso_kpt34',
|
873 |
-
id=122,
|
874 |
-
color=[0, 128, 255],
|
875 |
-
type='',
|
876 |
-
swap='lso_kpt4'),
|
877 |
-
123:
|
878 |
-
dict(
|
879 |
-
name='lso_kpt35',
|
880 |
-
id=123,
|
881 |
-
color=[0, 128, 255],
|
882 |
-
type='',
|
883 |
-
swap='lso_kpt38'),
|
884 |
-
124:
|
885 |
-
dict(
|
886 |
-
name='lso_kpt36',
|
887 |
-
id=124,
|
888 |
-
color=[0, 128, 255],
|
889 |
-
type='',
|
890 |
-
swap='lso_kpt39'),
|
891 |
-
125:
|
892 |
-
dict(
|
893 |
-
name='lso_kpt37',
|
894 |
-
id=125,
|
895 |
-
color=[0, 128, 255],
|
896 |
-
type='',
|
897 |
-
swap='lso_kpt20'),
|
898 |
-
126:
|
899 |
-
dict(
|
900 |
-
name='lso_kpt38',
|
901 |
-
id=126,
|
902 |
-
color=[0, 128, 255],
|
903 |
-
type='',
|
904 |
-
swap='lso_kpt35'),
|
905 |
-
127:
|
906 |
-
dict(
|
907 |
-
name='lso_kpt39',
|
908 |
-
id=127,
|
909 |
-
color=[0, 128, 255],
|
910 |
-
type='',
|
911 |
-
swap='lso_kpt36'),
|
912 |
-
128:
|
913 |
-
dict(name='vest_kpt1', id=128, color=[0, 128, 128], type='', swap=''),
|
914 |
-
129:
|
915 |
-
dict(
|
916 |
-
name='vest_kpt2',
|
917 |
-
id=129,
|
918 |
-
color=[0, 128, 128],
|
919 |
-
type='',
|
920 |
-
swap='vest_kpt6'),
|
921 |
-
130:
|
922 |
-
dict(
|
923 |
-
name='vest_kpt3',
|
924 |
-
id=130,
|
925 |
-
color=[0, 128, 128],
|
926 |
-
type='',
|
927 |
-
swap='vest_kpt5'),
|
928 |
-
131:
|
929 |
-
dict(name='vest_kpt4', id=131, color=[0, 128, 128], type='', swap=''),
|
930 |
-
132:
|
931 |
-
dict(
|
932 |
-
name='vest_kpt5',
|
933 |
-
id=132,
|
934 |
-
color=[0, 128, 128],
|
935 |
-
type='',
|
936 |
-
swap='vest_kpt3'),
|
937 |
-
133:
|
938 |
-
dict(
|
939 |
-
name='vest_kpt6',
|
940 |
-
id=133,
|
941 |
-
color=[0, 128, 128],
|
942 |
-
type='',
|
943 |
-
swap='vest_kpt2'),
|
944 |
-
134:
|
945 |
-
dict(
|
946 |
-
name='vest_kpt7',
|
947 |
-
id=134,
|
948 |
-
color=[0, 128, 128],
|
949 |
-
type='',
|
950 |
-
swap='vest_kpt15'),
|
951 |
-
135:
|
952 |
-
dict(
|
953 |
-
name='vest_kpt8',
|
954 |
-
id=135,
|
955 |
-
color=[0, 128, 128],
|
956 |
-
type='',
|
957 |
-
swap='vest_kpt14'),
|
958 |
-
136:
|
959 |
-
dict(
|
960 |
-
name='vest_kpt9',
|
961 |
-
id=136,
|
962 |
-
color=[0, 128, 128],
|
963 |
-
type='',
|
964 |
-
swap='vest_kpt13'),
|
965 |
-
137:
|
966 |
-
dict(
|
967 |
-
name='vest_kpt10',
|
968 |
-
id=137,
|
969 |
-
color=[0, 128, 128],
|
970 |
-
type='',
|
971 |
-
swap='vest_kpt12'),
|
972 |
-
138:
|
973 |
-
dict(name='vest_kpt11', id=138, color=[0, 128, 128], type='', swap=''),
|
974 |
-
139:
|
975 |
-
dict(
|
976 |
-
name='vest_kpt12',
|
977 |
-
id=139,
|
978 |
-
color=[0, 128, 128],
|
979 |
-
type='',
|
980 |
-
swap='vest_kpt10'),
|
981 |
-
140:
|
982 |
-
dict(name='vest_kpt13', id=140, color=[0, 128, 128], type='', swap=''),
|
983 |
-
141:
|
984 |
-
dict(
|
985 |
-
name='vest_kpt14',
|
986 |
-
id=141,
|
987 |
-
color=[0, 128, 128],
|
988 |
-
type='',
|
989 |
-
swap='vest_kpt8'),
|
990 |
-
142:
|
991 |
-
dict(
|
992 |
-
name='vest_kpt15',
|
993 |
-
id=142,
|
994 |
-
color=[0, 128, 128],
|
995 |
-
type='',
|
996 |
-
swap='vest_kpt7'),
|
997 |
-
143:
|
998 |
-
dict(name='sling_kpt1', id=143, color=[0, 0, 128], type='', swap=''),
|
999 |
-
144:
|
1000 |
-
dict(
|
1001 |
-
name='sling_kpt2',
|
1002 |
-
id=144,
|
1003 |
-
color=[0, 0, 128],
|
1004 |
-
type='',
|
1005 |
-
swap='sling_kpt6'),
|
1006 |
-
145:
|
1007 |
-
dict(
|
1008 |
-
name='sling_kpt3',
|
1009 |
-
id=145,
|
1010 |
-
color=[0, 0, 128],
|
1011 |
-
type='',
|
1012 |
-
swap='sling_kpt5'),
|
1013 |
-
146:
|
1014 |
-
dict(name='sling_kpt4', id=146, color=[0, 0, 128], type='', swap=''),
|
1015 |
-
147:
|
1016 |
-
dict(
|
1017 |
-
name='sling_kpt5',
|
1018 |
-
id=147,
|
1019 |
-
color=[0, 0, 128],
|
1020 |
-
type='',
|
1021 |
-
swap='sling_kpt3'),
|
1022 |
-
148:
|
1023 |
-
dict(
|
1024 |
-
name='sling_kpt6',
|
1025 |
-
id=148,
|
1026 |
-
color=[0, 0, 128],
|
1027 |
-
type='',
|
1028 |
-
swap='sling_kpt2'),
|
1029 |
-
149:
|
1030 |
-
dict(
|
1031 |
-
name='sling_kpt7',
|
1032 |
-
id=149,
|
1033 |
-
color=[0, 0, 128],
|
1034 |
-
type='',
|
1035 |
-
swap='sling_kpt15'),
|
1036 |
-
150:
|
1037 |
-
dict(
|
1038 |
-
name='sling_kpt8',
|
1039 |
-
id=150,
|
1040 |
-
color=[0, 0, 128],
|
1041 |
-
type='',
|
1042 |
-
swap='sling_kpt14'),
|
1043 |
-
151:
|
1044 |
-
dict(
|
1045 |
-
name='sling_kpt9',
|
1046 |
-
id=151,
|
1047 |
-
color=[0, 0, 128],
|
1048 |
-
type='',
|
1049 |
-
swap='sling_kpt13'),
|
1050 |
-
152:
|
1051 |
-
dict(
|
1052 |
-
name='sling_kpt10',
|
1053 |
-
id=152,
|
1054 |
-
color=[0, 0, 128],
|
1055 |
-
type='',
|
1056 |
-
swap='sling_kpt12'),
|
1057 |
-
153:
|
1058 |
-
dict(name='sling_kpt11', id=153, color=[0, 0, 128], type='', swap=''),
|
1059 |
-
154:
|
1060 |
-
dict(
|
1061 |
-
name='sling_kpt12',
|
1062 |
-
id=154,
|
1063 |
-
color=[0, 0, 128],
|
1064 |
-
type='',
|
1065 |
-
swap='sling_kpt10'),
|
1066 |
-
155:
|
1067 |
-
dict(
|
1068 |
-
name='sling_kpt13',
|
1069 |
-
id=155,
|
1070 |
-
color=[0, 0, 128],
|
1071 |
-
type='',
|
1072 |
-
swap='sling_kpt9'),
|
1073 |
-
156:
|
1074 |
-
dict(
|
1075 |
-
name='sling_kpt14',
|
1076 |
-
id=156,
|
1077 |
-
color=[0, 0, 128],
|
1078 |
-
type='',
|
1079 |
-
swap='sling_kpt8'),
|
1080 |
-
157:
|
1081 |
-
dict(
|
1082 |
-
name='sling_kpt15',
|
1083 |
-
id=157,
|
1084 |
-
color=[0, 0, 128],
|
1085 |
-
type='',
|
1086 |
-
swap='sling_kpt7'),
|
1087 |
-
158:
|
1088 |
-
dict(
|
1089 |
-
name='shorts_kpt1',
|
1090 |
-
id=158,
|
1091 |
-
color=[128, 128, 128],
|
1092 |
-
type='',
|
1093 |
-
swap='shorts_kpt3'),
|
1094 |
-
159:
|
1095 |
-
dict(
|
1096 |
-
name='shorts_kpt2',
|
1097 |
-
id=159,
|
1098 |
-
color=[128, 128, 128],
|
1099 |
-
type='',
|
1100 |
-
swap=''),
|
1101 |
-
160:
|
1102 |
-
dict(
|
1103 |
-
name='shorts_kpt3',
|
1104 |
-
id=160,
|
1105 |
-
color=[128, 128, 128],
|
1106 |
-
type='',
|
1107 |
-
swap='shorts_kpt1'),
|
1108 |
-
161:
|
1109 |
-
dict(
|
1110 |
-
name='shorts_kpt4',
|
1111 |
-
id=161,
|
1112 |
-
color=[128, 128, 128],
|
1113 |
-
type='',
|
1114 |
-
swap='shorts_kpt10'),
|
1115 |
-
162:
|
1116 |
-
dict(
|
1117 |
-
name='shorts_kpt5',
|
1118 |
-
id=162,
|
1119 |
-
color=[128, 128, 128],
|
1120 |
-
type='',
|
1121 |
-
swap='shorts_kpt9'),
|
1122 |
-
163:
|
1123 |
-
dict(
|
1124 |
-
name='shorts_kpt6',
|
1125 |
-
id=163,
|
1126 |
-
color=[128, 128, 128],
|
1127 |
-
type='',
|
1128 |
-
swap='shorts_kpt8'),
|
1129 |
-
164:
|
1130 |
-
dict(
|
1131 |
-
name='shorts_kpt7',
|
1132 |
-
id=164,
|
1133 |
-
color=[128, 128, 128],
|
1134 |
-
type='',
|
1135 |
-
swap=''),
|
1136 |
-
165:
|
1137 |
-
dict(
|
1138 |
-
name='shorts_kpt8',
|
1139 |
-
id=165,
|
1140 |
-
color=[128, 128, 128],
|
1141 |
-
type='',
|
1142 |
-
swap='shorts_kpt6'),
|
1143 |
-
166:
|
1144 |
-
dict(
|
1145 |
-
name='shorts_kpt9',
|
1146 |
-
id=166,
|
1147 |
-
color=[128, 128, 128],
|
1148 |
-
type='',
|
1149 |
-
swap='shorts_kpt5'),
|
1150 |
-
167:
|
1151 |
-
dict(
|
1152 |
-
name='shorts_kpt10',
|
1153 |
-
id=167,
|
1154 |
-
color=[128, 128, 128],
|
1155 |
-
type='',
|
1156 |
-
swap='shorts_kpt4'),
|
1157 |
-
168:
|
1158 |
-
dict(
|
1159 |
-
name='trousers_kpt1',
|
1160 |
-
id=168,
|
1161 |
-
color=[128, 0, 128],
|
1162 |
-
type='',
|
1163 |
-
swap='trousers_kpt3'),
|
1164 |
-
169:
|
1165 |
-
dict(
|
1166 |
-
name='trousers_kpt2',
|
1167 |
-
id=169,
|
1168 |
-
color=[128, 0, 128],
|
1169 |
-
type='',
|
1170 |
-
swap=''),
|
1171 |
-
170:
|
1172 |
-
dict(
|
1173 |
-
name='trousers_kpt3',
|
1174 |
-
id=170,
|
1175 |
-
color=[128, 0, 128],
|
1176 |
-
type='',
|
1177 |
-
swap='trousers_kpt1'),
|
1178 |
-
171:
|
1179 |
-
dict(
|
1180 |
-
name='trousers_kpt4',
|
1181 |
-
id=171,
|
1182 |
-
color=[128, 0, 128],
|
1183 |
-
type='',
|
1184 |
-
swap='trousers_kpt14'),
|
1185 |
-
172:
|
1186 |
-
dict(
|
1187 |
-
name='trousers_kpt5',
|
1188 |
-
id=172,
|
1189 |
-
color=[128, 0, 128],
|
1190 |
-
type='',
|
1191 |
-
swap='trousers_kpt13'),
|
1192 |
-
173:
|
1193 |
-
dict(
|
1194 |
-
name='trousers_kpt6',
|
1195 |
-
id=173,
|
1196 |
-
color=[128, 0, 128],
|
1197 |
-
type='',
|
1198 |
-
swap='trousers_kpt12'),
|
1199 |
-
174:
|
1200 |
-
dict(
|
1201 |
-
name='trousers_kpt7',
|
1202 |
-
id=174,
|
1203 |
-
color=[128, 0, 128],
|
1204 |
-
type='',
|
1205 |
-
swap='trousers_kpt11'),
|
1206 |
-
175:
|
1207 |
-
dict(
|
1208 |
-
name='trousers_kpt8',
|
1209 |
-
id=175,
|
1210 |
-
color=[128, 0, 128],
|
1211 |
-
type='',
|
1212 |
-
swap='trousers_kpt10'),
|
1213 |
-
176:
|
1214 |
-
dict(
|
1215 |
-
name='trousers_kpt9',
|
1216 |
-
id=176,
|
1217 |
-
color=[128, 0, 128],
|
1218 |
-
type='',
|
1219 |
-
swap=''),
|
1220 |
-
177:
|
1221 |
-
dict(
|
1222 |
-
name='trousers_kpt10',
|
1223 |
-
id=177,
|
1224 |
-
color=[128, 0, 128],
|
1225 |
-
type='',
|
1226 |
-
swap='trousers_kpt8'),
|
1227 |
-
178:
|
1228 |
-
dict(
|
1229 |
-
name='trousers_kpt11',
|
1230 |
-
id=178,
|
1231 |
-
color=[128, 0, 128],
|
1232 |
-
type='',
|
1233 |
-
swap='trousers_kpt7'),
|
1234 |
-
179:
|
1235 |
-
dict(
|
1236 |
-
name='trousers_kpt12',
|
1237 |
-
id=179,
|
1238 |
-
color=[128, 0, 128],
|
1239 |
-
type='',
|
1240 |
-
swap='trousers_kpt6'),
|
1241 |
-
180:
|
1242 |
-
dict(
|
1243 |
-
name='trousers_kpt13',
|
1244 |
-
id=180,
|
1245 |
-
color=[128, 0, 128],
|
1246 |
-
type='',
|
1247 |
-
swap='trousers_kpt5'),
|
1248 |
-
181:
|
1249 |
-
dict(
|
1250 |
-
name='trousers_kpt14',
|
1251 |
-
id=181,
|
1252 |
-
color=[128, 0, 128],
|
1253 |
-
type='',
|
1254 |
-
swap='trousers_kpt4'),
|
1255 |
-
182:
|
1256 |
-
dict(
|
1257 |
-
name='skirt_kpt1',
|
1258 |
-
id=182,
|
1259 |
-
color=[64, 128, 128],
|
1260 |
-
type='',
|
1261 |
-
swap='skirt_kpt3'),
|
1262 |
-
183:
|
1263 |
-
dict(
|
1264 |
-
name='skirt_kpt2', id=183, color=[64, 128, 128], type='', swap=''),
|
1265 |
-
184:
|
1266 |
-
dict(
|
1267 |
-
name='skirt_kpt3',
|
1268 |
-
id=184,
|
1269 |
-
color=[64, 128, 128],
|
1270 |
-
type='',
|
1271 |
-
swap='skirt_kpt1'),
|
1272 |
-
185:
|
1273 |
-
dict(
|
1274 |
-
name='skirt_kpt4',
|
1275 |
-
id=185,
|
1276 |
-
color=[64, 128, 128],
|
1277 |
-
type='',
|
1278 |
-
swap='skirt_kpt8'),
|
1279 |
-
186:
|
1280 |
-
dict(
|
1281 |
-
name='skirt_kpt5',
|
1282 |
-
id=186,
|
1283 |
-
color=[64, 128, 128],
|
1284 |
-
type='',
|
1285 |
-
swap='skirt_kpt7'),
|
1286 |
-
187:
|
1287 |
-
dict(
|
1288 |
-
name='skirt_kpt6', id=187, color=[64, 128, 128], type='', swap=''),
|
1289 |
-
188:
|
1290 |
-
dict(
|
1291 |
-
name='skirt_kpt7',
|
1292 |
-
id=188,
|
1293 |
-
color=[64, 128, 128],
|
1294 |
-
type='',
|
1295 |
-
swap='skirt_kpt5'),
|
1296 |
-
189:
|
1297 |
-
dict(
|
1298 |
-
name='skirt_kpt8',
|
1299 |
-
id=189,
|
1300 |
-
color=[64, 128, 128],
|
1301 |
-
type='',
|
1302 |
-
swap='skirt_kpt4'),
|
1303 |
-
190:
|
1304 |
-
dict(name='ssd_kpt1', id=190, color=[64, 64, 128], type='', swap=''),
|
1305 |
-
191:
|
1306 |
-
dict(
|
1307 |
-
name='ssd_kpt2',
|
1308 |
-
id=191,
|
1309 |
-
color=[64, 64, 128],
|
1310 |
-
type='',
|
1311 |
-
swap='ssd_kpt6'),
|
1312 |
-
192:
|
1313 |
-
dict(
|
1314 |
-
name='ssd_kpt3',
|
1315 |
-
id=192,
|
1316 |
-
color=[64, 64, 128],
|
1317 |
-
type='',
|
1318 |
-
swap='ssd_kpt5'),
|
1319 |
-
193:
|
1320 |
-
dict(name='ssd_kpt4', id=193, color=[64, 64, 128], type='', swap=''),
|
1321 |
-
194:
|
1322 |
-
dict(
|
1323 |
-
name='ssd_kpt5',
|
1324 |
-
id=194,
|
1325 |
-
color=[64, 64, 128],
|
1326 |
-
type='',
|
1327 |
-
swap='ssd_kpt3'),
|
1328 |
-
195:
|
1329 |
-
dict(
|
1330 |
-
name='ssd_kpt6',
|
1331 |
-
id=195,
|
1332 |
-
color=[64, 64, 128],
|
1333 |
-
type='',
|
1334 |
-
swap='ssd_kpt2'),
|
1335 |
-
196:
|
1336 |
-
dict(
|
1337 |
-
name='ssd_kpt7',
|
1338 |
-
id=196,
|
1339 |
-
color=[64, 64, 128],
|
1340 |
-
type='',
|
1341 |
-
swap='ssd_kpt29'),
|
1342 |
-
197:
|
1343 |
-
dict(
|
1344 |
-
name='ssd_kpt8',
|
1345 |
-
id=197,
|
1346 |
-
color=[64, 64, 128],
|
1347 |
-
type='',
|
1348 |
-
swap='ssd_kpt28'),
|
1349 |
-
198:
|
1350 |
-
dict(
|
1351 |
-
name='ssd_kpt9',
|
1352 |
-
id=198,
|
1353 |
-
color=[64, 64, 128],
|
1354 |
-
type='',
|
1355 |
-
swap='ssd_kpt27'),
|
1356 |
-
199:
|
1357 |
-
dict(
|
1358 |
-
name='ssd_kpt10',
|
1359 |
-
id=199,
|
1360 |
-
color=[64, 64, 128],
|
1361 |
-
type='',
|
1362 |
-
swap='ssd_kpt26'),
|
1363 |
-
200:
|
1364 |
-
dict(
|
1365 |
-
name='ssd_kpt11',
|
1366 |
-
id=200,
|
1367 |
-
color=[64, 64, 128],
|
1368 |
-
type='',
|
1369 |
-
swap='ssd_kpt25'),
|
1370 |
-
201:
|
1371 |
-
dict(
|
1372 |
-
name='ssd_kpt12',
|
1373 |
-
id=201,
|
1374 |
-
color=[64, 64, 128],
|
1375 |
-
type='',
|
1376 |
-
swap='ssd_kpt24'),
|
1377 |
-
202:
|
1378 |
-
dict(
|
1379 |
-
name='ssd_kpt13',
|
1380 |
-
id=202,
|
1381 |
-
color=[64, 64, 128],
|
1382 |
-
type='',
|
1383 |
-
swap='ssd_kpt23'),
|
1384 |
-
203:
|
1385 |
-
dict(
|
1386 |
-
name='ssd_kpt14',
|
1387 |
-
id=203,
|
1388 |
-
color=[64, 64, 128],
|
1389 |
-
type='',
|
1390 |
-
swap='ssd_kpt22'),
|
1391 |
-
204:
|
1392 |
-
dict(
|
1393 |
-
name='ssd_kpt15',
|
1394 |
-
id=204,
|
1395 |
-
color=[64, 64, 128],
|
1396 |
-
type='',
|
1397 |
-
swap='ssd_kpt21'),
|
1398 |
-
205:
|
1399 |
-
dict(
|
1400 |
-
name='ssd_kpt16',
|
1401 |
-
id=205,
|
1402 |
-
color=[64, 64, 128],
|
1403 |
-
type='',
|
1404 |
-
swap='ssd_kpt20'),
|
1405 |
-
206:
|
1406 |
-
dict(
|
1407 |
-
name='ssd_kpt17',
|
1408 |
-
id=206,
|
1409 |
-
color=[64, 64, 128],
|
1410 |
-
type='',
|
1411 |
-
swap='ssd_kpt19'),
|
1412 |
-
207:
|
1413 |
-
dict(name='ssd_kpt18', id=207, color=[64, 64, 128], type='', swap=''),
|
1414 |
-
208:
|
1415 |
-
dict(
|
1416 |
-
name='ssd_kpt19',
|
1417 |
-
id=208,
|
1418 |
-
color=[64, 64, 128],
|
1419 |
-
type='',
|
1420 |
-
swap='ssd_kpt17'),
|
1421 |
-
209:
|
1422 |
-
dict(
|
1423 |
-
name='ssd_kpt20',
|
1424 |
-
id=209,
|
1425 |
-
color=[64, 64, 128],
|
1426 |
-
type='',
|
1427 |
-
swap='ssd_kpt16'),
|
1428 |
-
210:
|
1429 |
-
dict(
|
1430 |
-
name='ssd_kpt21',
|
1431 |
-
id=210,
|
1432 |
-
color=[64, 64, 128],
|
1433 |
-
type='',
|
1434 |
-
swap='ssd_kpt15'),
|
1435 |
-
211:
|
1436 |
-
dict(
|
1437 |
-
name='ssd_kpt22',
|
1438 |
-
id=211,
|
1439 |
-
color=[64, 64, 128],
|
1440 |
-
type='',
|
1441 |
-
swap='ssd_kpt14'),
|
1442 |
-
212:
|
1443 |
-
dict(
|
1444 |
-
name='ssd_kpt23',
|
1445 |
-
id=212,
|
1446 |
-
color=[64, 64, 128],
|
1447 |
-
type='',
|
1448 |
-
swap='ssd_kpt13'),
|
1449 |
-
213:
|
1450 |
-
dict(
|
1451 |
-
name='ssd_kpt24',
|
1452 |
-
id=213,
|
1453 |
-
color=[64, 64, 128],
|
1454 |
-
type='',
|
1455 |
-
swap='ssd_kpt12'),
|
1456 |
-
214:
|
1457 |
-
dict(
|
1458 |
-
name='ssd_kpt25',
|
1459 |
-
id=214,
|
1460 |
-
color=[64, 64, 128],
|
1461 |
-
type='',
|
1462 |
-
swap='ssd_kpt11'),
|
1463 |
-
215:
|
1464 |
-
dict(
|
1465 |
-
name='ssd_kpt26',
|
1466 |
-
id=215,
|
1467 |
-
color=[64, 64, 128],
|
1468 |
-
type='',
|
1469 |
-
swap='ssd_kpt10'),
|
1470 |
-
216:
|
1471 |
-
dict(
|
1472 |
-
name='ssd_kpt27',
|
1473 |
-
id=216,
|
1474 |
-
color=[64, 64, 128],
|
1475 |
-
type='',
|
1476 |
-
swap='ssd_kpt9'),
|
1477 |
-
217:
|
1478 |
-
dict(
|
1479 |
-
name='ssd_kpt28',
|
1480 |
-
id=217,
|
1481 |
-
color=[64, 64, 128],
|
1482 |
-
type='',
|
1483 |
-
swap='ssd_kpt8'),
|
1484 |
-
218:
|
1485 |
-
dict(
|
1486 |
-
name='ssd_kpt29',
|
1487 |
-
id=218,
|
1488 |
-
color=[64, 64, 128],
|
1489 |
-
type='',
|
1490 |
-
swap='ssd_kpt7'),
|
1491 |
-
219:
|
1492 |
-
dict(name='lsd_kpt1', id=219, color=[128, 64, 0], type='', swap=''),
|
1493 |
-
220:
|
1494 |
-
dict(
|
1495 |
-
name='lsd_kpt2',
|
1496 |
-
id=220,
|
1497 |
-
color=[128, 64, 0],
|
1498 |
-
type='',
|
1499 |
-
swap='lsd_kpt6'),
|
1500 |
-
221:
|
1501 |
-
dict(
|
1502 |
-
name='lsd_kpt3',
|
1503 |
-
id=221,
|
1504 |
-
color=[128, 64, 0],
|
1505 |
-
type='',
|
1506 |
-
swap='lsd_kpt5'),
|
1507 |
-
222:
|
1508 |
-
dict(name='lsd_kpt4', id=222, color=[128, 64, 0], type='', swap=''),
|
1509 |
-
223:
|
1510 |
-
dict(
|
1511 |
-
name='lsd_kpt5',
|
1512 |
-
id=223,
|
1513 |
-
color=[128, 64, 0],
|
1514 |
-
type='',
|
1515 |
-
swap='lsd_kpt3'),
|
1516 |
-
224:
|
1517 |
-
dict(
|
1518 |
-
name='lsd_kpt6',
|
1519 |
-
id=224,
|
1520 |
-
color=[128, 64, 0],
|
1521 |
-
type='',
|
1522 |
-
swap='lsd_kpt2'),
|
1523 |
-
225:
|
1524 |
-
dict(
|
1525 |
-
name='lsd_kpt7',
|
1526 |
-
id=225,
|
1527 |
-
color=[128, 64, 0],
|
1528 |
-
type='',
|
1529 |
-
swap='lsd_kpt37'),
|
1530 |
-
226:
|
1531 |
-
dict(
|
1532 |
-
name='lsd_kpt8',
|
1533 |
-
id=226,
|
1534 |
-
color=[128, 64, 0],
|
1535 |
-
type='',
|
1536 |
-
swap='lsd_kpt36'),
|
1537 |
-
227:
|
1538 |
-
dict(
|
1539 |
-
name='lsd_kpt9',
|
1540 |
-
id=227,
|
1541 |
-
color=[128, 64, 0],
|
1542 |
-
type='',
|
1543 |
-
swap='lsd_kpt35'),
|
1544 |
-
228:
|
1545 |
-
dict(
|
1546 |
-
name='lsd_kpt10',
|
1547 |
-
id=228,
|
1548 |
-
color=[128, 64, 0],
|
1549 |
-
type='',
|
1550 |
-
swap='lsd_kpt34'),
|
1551 |
-
229:
|
1552 |
-
dict(
|
1553 |
-
name='lsd_kpt11',
|
1554 |
-
id=229,
|
1555 |
-
color=[128, 64, 0],
|
1556 |
-
type='',
|
1557 |
-
swap='lsd_kpt33'),
|
1558 |
-
230:
|
1559 |
-
dict(
|
1560 |
-
name='lsd_kpt12',
|
1561 |
-
id=230,
|
1562 |
-
color=[128, 64, 0],
|
1563 |
-
type='',
|
1564 |
-
swap='lsd_kpt32'),
|
1565 |
-
231:
|
1566 |
-
dict(
|
1567 |
-
name='lsd_kpt13',
|
1568 |
-
id=231,
|
1569 |
-
color=[128, 64, 0],
|
1570 |
-
type='',
|
1571 |
-
swap='lsd_kpt31'),
|
1572 |
-
232:
|
1573 |
-
dict(
|
1574 |
-
name='lsd_kpt14',
|
1575 |
-
id=232,
|
1576 |
-
color=[128, 64, 0],
|
1577 |
-
type='',
|
1578 |
-
swap='lsd_kpt30'),
|
1579 |
-
233:
|
1580 |
-
dict(
|
1581 |
-
name='lsd_kpt15',
|
1582 |
-
id=233,
|
1583 |
-
color=[128, 64, 0],
|
1584 |
-
type='',
|
1585 |
-
swap='lsd_kpt29'),
|
1586 |
-
234:
|
1587 |
-
dict(
|
1588 |
-
name='lsd_kpt16',
|
1589 |
-
id=234,
|
1590 |
-
color=[128, 64, 0],
|
1591 |
-
type='',
|
1592 |
-
swap='lsd_kpt28'),
|
1593 |
-
235:
|
1594 |
-
dict(
|
1595 |
-
name='lsd_kpt17',
|
1596 |
-
id=235,
|
1597 |
-
color=[128, 64, 0],
|
1598 |
-
type='',
|
1599 |
-
swap='lsd_kpt27'),
|
1600 |
-
236:
|
1601 |
-
dict(
|
1602 |
-
name='lsd_kpt18',
|
1603 |
-
id=236,
|
1604 |
-
color=[128, 64, 0],
|
1605 |
-
type='',
|
1606 |
-
swap='lsd_kpt26'),
|
1607 |
-
237:
|
1608 |
-
dict(
|
1609 |
-
name='lsd_kpt19',
|
1610 |
-
id=237,
|
1611 |
-
color=[128, 64, 0],
|
1612 |
-
type='',
|
1613 |
-
swap='lsd_kpt25'),
|
1614 |
-
238:
|
1615 |
-
dict(
|
1616 |
-
name='lsd_kpt20',
|
1617 |
-
id=238,
|
1618 |
-
color=[128, 64, 0],
|
1619 |
-
type='',
|
1620 |
-
swap='lsd_kpt24'),
|
1621 |
-
239:
|
1622 |
-
dict(
|
1623 |
-
name='lsd_kpt21',
|
1624 |
-
id=239,
|
1625 |
-
color=[128, 64, 0],
|
1626 |
-
type='',
|
1627 |
-
swap='lsd_kpt23'),
|
1628 |
-
240:
|
1629 |
-
dict(name='lsd_kpt22', id=240, color=[128, 64, 0], type='', swap=''),
|
1630 |
-
241:
|
1631 |
-
dict(
|
1632 |
-
name='lsd_kpt23',
|
1633 |
-
id=241,
|
1634 |
-
color=[128, 64, 0],
|
1635 |
-
type='',
|
1636 |
-
swap='lsd_kpt21'),
|
1637 |
-
242:
|
1638 |
-
dict(
|
1639 |
-
name='lsd_kpt24',
|
1640 |
-
id=242,
|
1641 |
-
color=[128, 64, 0],
|
1642 |
-
type='',
|
1643 |
-
swap='lsd_kpt20'),
|
1644 |
-
243:
|
1645 |
-
dict(
|
1646 |
-
name='lsd_kpt25',
|
1647 |
-
id=243,
|
1648 |
-
color=[128, 64, 0],
|
1649 |
-
type='',
|
1650 |
-
swap='lsd_kpt19'),
|
1651 |
-
244:
|
1652 |
-
dict(
|
1653 |
-
name='lsd_kpt26',
|
1654 |
-
id=244,
|
1655 |
-
color=[128, 64, 0],
|
1656 |
-
type='',
|
1657 |
-
swap='lsd_kpt18'),
|
1658 |
-
245:
|
1659 |
-
dict(
|
1660 |
-
name='lsd_kpt27',
|
1661 |
-
id=245,
|
1662 |
-
color=[128, 64, 0],
|
1663 |
-
type='',
|
1664 |
-
swap='lsd_kpt17'),
|
1665 |
-
246:
|
1666 |
-
dict(
|
1667 |
-
name='lsd_kpt28',
|
1668 |
-
id=246,
|
1669 |
-
color=[128, 64, 0],
|
1670 |
-
type='',
|
1671 |
-
swap='lsd_kpt16'),
|
1672 |
-
247:
|
1673 |
-
dict(
|
1674 |
-
name='lsd_kpt29',
|
1675 |
-
id=247,
|
1676 |
-
color=[128, 64, 0],
|
1677 |
-
type='',
|
1678 |
-
swap='lsd_kpt15'),
|
1679 |
-
248:
|
1680 |
-
dict(
|
1681 |
-
name='lsd_kpt30',
|
1682 |
-
id=248,
|
1683 |
-
color=[128, 64, 0],
|
1684 |
-
type='',
|
1685 |
-
swap='lsd_kpt14'),
|
1686 |
-
249:
|
1687 |
-
dict(
|
1688 |
-
name='lsd_kpt31',
|
1689 |
-
id=249,
|
1690 |
-
color=[128, 64, 0],
|
1691 |
-
type='',
|
1692 |
-
swap='lsd_kpt13'),
|
1693 |
-
250:
|
1694 |
-
dict(
|
1695 |
-
name='lsd_kpt32',
|
1696 |
-
id=250,
|
1697 |
-
color=[128, 64, 0],
|
1698 |
-
type='',
|
1699 |
-
swap='lsd_kpt12'),
|
1700 |
-
251:
|
1701 |
-
dict(
|
1702 |
-
name='lsd_kpt33',
|
1703 |
-
id=251,
|
1704 |
-
color=[128, 64, 0],
|
1705 |
-
type='',
|
1706 |
-
swap='lsd_kpt11'),
|
1707 |
-
252:
|
1708 |
-
dict(
|
1709 |
-
name='lsd_kpt34',
|
1710 |
-
id=252,
|
1711 |
-
color=[128, 64, 0],
|
1712 |
-
type='',
|
1713 |
-
swap='lsd_kpt10'),
|
1714 |
-
253:
|
1715 |
-
dict(
|
1716 |
-
name='lsd_kpt35',
|
1717 |
-
id=253,
|
1718 |
-
color=[128, 64, 0],
|
1719 |
-
type='',
|
1720 |
-
swap='lsd_kpt9'),
|
1721 |
-
254:
|
1722 |
-
dict(
|
1723 |
-
name='lsd_kpt36',
|
1724 |
-
id=254,
|
1725 |
-
color=[128, 64, 0],
|
1726 |
-
type='',
|
1727 |
-
swap='lsd_kpt8'),
|
1728 |
-
255:
|
1729 |
-
dict(
|
1730 |
-
name='lsd_kpt37',
|
1731 |
-
id=255,
|
1732 |
-
color=[128, 64, 0],
|
1733 |
-
type='',
|
1734 |
-
swap='lsd_kpt7'),
|
1735 |
-
256:
|
1736 |
-
dict(name='vd_kpt1', id=256, color=[128, 64, 255], type='', swap=''),
|
1737 |
-
257:
|
1738 |
-
dict(
|
1739 |
-
name='vd_kpt2',
|
1740 |
-
id=257,
|
1741 |
-
color=[128, 64, 255],
|
1742 |
-
type='',
|
1743 |
-
swap='vd_kpt6'),
|
1744 |
-
258:
|
1745 |
-
dict(
|
1746 |
-
name='vd_kpt3',
|
1747 |
-
id=258,
|
1748 |
-
color=[128, 64, 255],
|
1749 |
-
type='',
|
1750 |
-
swap='vd_kpt5'),
|
1751 |
-
259:
|
1752 |
-
dict(name='vd_kpt4', id=259, color=[128, 64, 255], type='', swap=''),
|
1753 |
-
260:
|
1754 |
-
dict(
|
1755 |
-
name='vd_kpt5',
|
1756 |
-
id=260,
|
1757 |
-
color=[128, 64, 255],
|
1758 |
-
type='',
|
1759 |
-
swap='vd_kpt3'),
|
1760 |
-
261:
|
1761 |
-
dict(
|
1762 |
-
name='vd_kpt6',
|
1763 |
-
id=261,
|
1764 |
-
color=[128, 64, 255],
|
1765 |
-
type='',
|
1766 |
-
swap='vd_kpt2'),
|
1767 |
-
262:
|
1768 |
-
dict(
|
1769 |
-
name='vd_kpt7',
|
1770 |
-
id=262,
|
1771 |
-
color=[128, 64, 255],
|
1772 |
-
type='',
|
1773 |
-
swap='vd_kpt19'),
|
1774 |
-
263:
|
1775 |
-
dict(
|
1776 |
-
name='vd_kpt8',
|
1777 |
-
id=263,
|
1778 |
-
color=[128, 64, 255],
|
1779 |
-
type='',
|
1780 |
-
swap='vd_kpt18'),
|
1781 |
-
264:
|
1782 |
-
dict(
|
1783 |
-
name='vd_kpt9',
|
1784 |
-
id=264,
|
1785 |
-
color=[128, 64, 255],
|
1786 |
-
type='',
|
1787 |
-
swap='vd_kpt17'),
|
1788 |
-
265:
|
1789 |
-
dict(
|
1790 |
-
name='vd_kpt10',
|
1791 |
-
id=265,
|
1792 |
-
color=[128, 64, 255],
|
1793 |
-
type='',
|
1794 |
-
swap='vd_kpt16'),
|
1795 |
-
266:
|
1796 |
-
dict(
|
1797 |
-
name='vd_kpt11',
|
1798 |
-
id=266,
|
1799 |
-
color=[128, 64, 255],
|
1800 |
-
type='',
|
1801 |
-
swap='vd_kpt15'),
|
1802 |
-
267:
|
1803 |
-
dict(
|
1804 |
-
name='vd_kpt12',
|
1805 |
-
id=267,
|
1806 |
-
color=[128, 64, 255],
|
1807 |
-
type='',
|
1808 |
-
swap='vd_kpt14'),
|
1809 |
-
268:
|
1810 |
-
dict(name='vd_kpt13', id=268, color=[128, 64, 255], type='', swap=''),
|
1811 |
-
269:
|
1812 |
-
dict(
|
1813 |
-
name='vd_kpt14',
|
1814 |
-
id=269,
|
1815 |
-
color=[128, 64, 255],
|
1816 |
-
type='',
|
1817 |
-
swap='vd_kpt12'),
|
1818 |
-
270:
|
1819 |
-
dict(
|
1820 |
-
name='vd_kpt15',
|
1821 |
-
id=270,
|
1822 |
-
color=[128, 64, 255],
|
1823 |
-
type='',
|
1824 |
-
swap='vd_kpt11'),
|
1825 |
-
271:
|
1826 |
-
dict(
|
1827 |
-
name='vd_kpt16',
|
1828 |
-
id=271,
|
1829 |
-
color=[128, 64, 255],
|
1830 |
-
type='',
|
1831 |
-
swap='vd_kpt10'),
|
1832 |
-
272:
|
1833 |
-
dict(
|
1834 |
-
name='vd_kpt17',
|
1835 |
-
id=272,
|
1836 |
-
color=[128, 64, 255],
|
1837 |
-
type='',
|
1838 |
-
swap='vd_kpt9'),
|
1839 |
-
273:
|
1840 |
-
dict(
|
1841 |
-
name='vd_kpt18',
|
1842 |
-
id=273,
|
1843 |
-
color=[128, 64, 255],
|
1844 |
-
type='',
|
1845 |
-
swap='vd_kpt8'),
|
1846 |
-
274:
|
1847 |
-
dict(
|
1848 |
-
name='vd_kpt19',
|
1849 |
-
id=274,
|
1850 |
-
color=[128, 64, 255],
|
1851 |
-
type='',
|
1852 |
-
swap='vd_kpt7'),
|
1853 |
-
275:
|
1854 |
-
dict(name='sd_kpt1', id=275, color=[128, 64, 0], type='', swap=''),
|
1855 |
-
276:
|
1856 |
-
dict(
|
1857 |
-
name='sd_kpt2',
|
1858 |
-
id=276,
|
1859 |
-
color=[128, 64, 0],
|
1860 |
-
type='',
|
1861 |
-
swap='sd_kpt6'),
|
1862 |
-
277:
|
1863 |
-
dict(
|
1864 |
-
name='sd_kpt3',
|
1865 |
-
id=277,
|
1866 |
-
color=[128, 64, 0],
|
1867 |
-
type='',
|
1868 |
-
swap='sd_kpt5'),
|
1869 |
-
278:
|
1870 |
-
dict(name='sd_kpt4', id=278, color=[128, 64, 0], type='', swap=''),
|
1871 |
-
279:
|
1872 |
-
dict(
|
1873 |
-
name='sd_kpt5',
|
1874 |
-
id=279,
|
1875 |
-
color=[128, 64, 0],
|
1876 |
-
type='',
|
1877 |
-
swap='sd_kpt3'),
|
1878 |
-
280:
|
1879 |
-
dict(
|
1880 |
-
name='sd_kpt6',
|
1881 |
-
id=280,
|
1882 |
-
color=[128, 64, 0],
|
1883 |
-
type='',
|
1884 |
-
swap='sd_kpt2'),
|
1885 |
-
281:
|
1886 |
-
dict(
|
1887 |
-
name='sd_kpt7',
|
1888 |
-
id=281,
|
1889 |
-
color=[128, 64, 0],
|
1890 |
-
type='',
|
1891 |
-
swap='sd_kpt19'),
|
1892 |
-
282:
|
1893 |
-
dict(
|
1894 |
-
name='sd_kpt8',
|
1895 |
-
id=282,
|
1896 |
-
color=[128, 64, 0],
|
1897 |
-
type='',
|
1898 |
-
swap='sd_kpt18'),
|
1899 |
-
283:
|
1900 |
-
dict(
|
1901 |
-
name='sd_kpt9',
|
1902 |
-
id=283,
|
1903 |
-
color=[128, 64, 0],
|
1904 |
-
type='',
|
1905 |
-
swap='sd_kpt17'),
|
1906 |
-
284:
|
1907 |
-
dict(
|
1908 |
-
name='sd_kpt10',
|
1909 |
-
id=284,
|
1910 |
-
color=[128, 64, 0],
|
1911 |
-
type='',
|
1912 |
-
swap='sd_kpt16'),
|
1913 |
-
285:
|
1914 |
-
dict(
|
1915 |
-
name='sd_kpt11',
|
1916 |
-
id=285,
|
1917 |
-
color=[128, 64, 0],
|
1918 |
-
type='',
|
1919 |
-
swap='sd_kpt15'),
|
1920 |
-
286:
|
1921 |
-
dict(
|
1922 |
-
name='sd_kpt12',
|
1923 |
-
id=286,
|
1924 |
-
color=[128, 64, 0],
|
1925 |
-
type='',
|
1926 |
-
swap='sd_kpt14'),
|
1927 |
-
287:
|
1928 |
-
dict(name='sd_kpt13', id=287, color=[128, 64, 0], type='', swap=''),
|
1929 |
-
288:
|
1930 |
-
dict(
|
1931 |
-
name='sd_kpt14',
|
1932 |
-
id=288,
|
1933 |
-
color=[128, 64, 0],
|
1934 |
-
type='',
|
1935 |
-
swap='sd_kpt12'),
|
1936 |
-
289:
|
1937 |
-
dict(
|
1938 |
-
name='sd_kpt15',
|
1939 |
-
id=289,
|
1940 |
-
color=[128, 64, 0],
|
1941 |
-
type='',
|
1942 |
-
swap='sd_kpt11'),
|
1943 |
-
290:
|
1944 |
-
dict(
|
1945 |
-
name='sd_kpt16',
|
1946 |
-
id=290,
|
1947 |
-
color=[128, 64, 0],
|
1948 |
-
type='',
|
1949 |
-
swap='sd_kpt10'),
|
1950 |
-
291:
|
1951 |
-
dict(
|
1952 |
-
name='sd_kpt17',
|
1953 |
-
id=291,
|
1954 |
-
color=[128, 64, 0],
|
1955 |
-
type='',
|
1956 |
-
swap='sd_kpt9'),
|
1957 |
-
292:
|
1958 |
-
dict(
|
1959 |
-
name='sd_kpt18',
|
1960 |
-
id=292,
|
1961 |
-
color=[128, 64, 0],
|
1962 |
-
type='',
|
1963 |
-
swap='sd_kpt8'),
|
1964 |
-
293:
|
1965 |
-
dict(
|
1966 |
-
name='sd_kpt19',
|
1967 |
-
id=293,
|
1968 |
-
color=[128, 64, 0],
|
1969 |
-
type='',
|
1970 |
-
swap='sd_kpt7')
|
1971 |
-
}),
|
1972 |
-
skeleton_info=dict({
|
1973 |
-
0:
|
1974 |
-
dict(link=('sss_kpt1', 'sss_kpt2'), id=0, color=[255, 128, 0]),
|
1975 |
-
1:
|
1976 |
-
dict(link=('sss_kpt2', 'sss_kpt7'), id=1, color=[255, 128, 0]),
|
1977 |
-
2:
|
1978 |
-
dict(link=('sss_kpt7', 'sss_kpt8'), id=2, color=[255, 128, 0]),
|
1979 |
-
3:
|
1980 |
-
dict(link=('sss_kpt8', 'sss_kpt9'), id=3, color=[255, 128, 0]),
|
1981 |
-
4:
|
1982 |
-
dict(link=('sss_kpt9', 'sss_kpt10'), id=4, color=[255, 128, 0]),
|
1983 |
-
5:
|
1984 |
-
dict(link=('sss_kpt10', 'sss_kpt11'), id=5, color=[255, 128, 0]),
|
1985 |
-
6:
|
1986 |
-
dict(link=('sss_kpt11', 'sss_kpt12'), id=6, color=[255, 128, 0]),
|
1987 |
-
7:
|
1988 |
-
dict(link=('sss_kpt12', 'sss_kpt13'), id=7, color=[255, 128, 0]),
|
1989 |
-
8:
|
1990 |
-
dict(link=('sss_kpt13', 'sss_kpt14'), id=8, color=[255, 128, 0]),
|
1991 |
-
9:
|
1992 |
-
dict(link=('sss_kpt14', 'sss_kpt15'), id=9, color=[255, 128, 0]),
|
1993 |
-
10:
|
1994 |
-
dict(link=('sss_kpt15', 'sss_kpt16'), id=10, color=[255, 128, 0]),
|
1995 |
-
11:
|
1996 |
-
dict(link=('sss_kpt16', 'sss_kpt17'), id=11, color=[255, 128, 0]),
|
1997 |
-
12:
|
1998 |
-
dict(link=('sss_kpt17', 'sss_kpt18'), id=12, color=[255, 128, 0]),
|
1999 |
-
13:
|
2000 |
-
dict(link=('sss_kpt18', 'sss_kpt19'), id=13, color=[255, 128, 0]),
|
2001 |
-
14:
|
2002 |
-
dict(link=('sss_kpt19', 'sss_kpt20'), id=14, color=[255, 128, 0]),
|
2003 |
-
15:
|
2004 |
-
dict(link=('sss_kpt20', 'sss_kpt21'), id=15, color=[255, 128, 0]),
|
2005 |
-
16:
|
2006 |
-
dict(link=('sss_kpt21', 'sss_kpt22'), id=16, color=[255, 128, 0]),
|
2007 |
-
17:
|
2008 |
-
dict(link=('sss_kpt22', 'sss_kpt23'), id=17, color=[255, 128, 0]),
|
2009 |
-
18:
|
2010 |
-
dict(link=('sss_kpt23', 'sss_kpt24'), id=18, color=[255, 128, 0]),
|
2011 |
-
19:
|
2012 |
-
dict(link=('sss_kpt24', 'sss_kpt25'), id=19, color=[255, 128, 0]),
|
2013 |
-
20:
|
2014 |
-
dict(link=('sss_kpt25', 'sss_kpt6'), id=20, color=[255, 128, 0]),
|
2015 |
-
21:
|
2016 |
-
dict(link=('sss_kpt6', 'sss_kpt1'), id=21, color=[255, 128, 0]),
|
2017 |
-
22:
|
2018 |
-
dict(link=('sss_kpt2', 'sss_kpt3'), id=22, color=[255, 128, 0]),
|
2019 |
-
23:
|
2020 |
-
dict(link=('sss_kpt3', 'sss_kpt4'), id=23, color=[255, 128, 0]),
|
2021 |
-
24:
|
2022 |
-
dict(link=('sss_kpt4', 'sss_kpt5'), id=24, color=[255, 128, 0]),
|
2023 |
-
25:
|
2024 |
-
dict(link=('sss_kpt5', 'sss_kpt6'), id=25, color=[255, 128, 0]),
|
2025 |
-
26:
|
2026 |
-
dict(link=('lss_kpt1', 'lss_kpt2'), id=26, color=[255, 0, 128]),
|
2027 |
-
27:
|
2028 |
-
dict(link=('lss_kpt2', 'lss_kpt7'), id=27, color=[255, 0, 128]),
|
2029 |
-
28:
|
2030 |
-
dict(link=('lss_kpt7', 'lss_kpt8'), id=28, color=[255, 0, 128]),
|
2031 |
-
29:
|
2032 |
-
dict(link=('lss_kpt8', 'lss_kpt9'), id=29, color=[255, 0, 128]),
|
2033 |
-
30:
|
2034 |
-
dict(link=('lss_kpt9', 'lss_kpt10'), id=30, color=[255, 0, 128]),
|
2035 |
-
31:
|
2036 |
-
dict(link=('lss_kpt10', 'lss_kpt11'), id=31, color=[255, 0, 128]),
|
2037 |
-
32:
|
2038 |
-
dict(link=('lss_kpt11', 'lss_kpt12'), id=32, color=[255, 0, 128]),
|
2039 |
-
33:
|
2040 |
-
dict(link=('lss_kpt12', 'lss_kpt13'), id=33, color=[255, 0, 128]),
|
2041 |
-
34:
|
2042 |
-
dict(link=('lss_kpt13', 'lss_kpt14'), id=34, color=[255, 0, 128]),
|
2043 |
-
35:
|
2044 |
-
dict(link=('lss_kpt14', 'lss_kpt15'), id=35, color=[255, 0, 128]),
|
2045 |
-
36:
|
2046 |
-
dict(link=('lss_kpt15', 'lss_kpt16'), id=36, color=[255, 0, 128]),
|
2047 |
-
37:
|
2048 |
-
dict(link=('lss_kpt16', 'lss_kpt17'), id=37, color=[255, 0, 128]),
|
2049 |
-
38:
|
2050 |
-
dict(link=('lss_kpt17', 'lss_kpt18'), id=38, color=[255, 0, 128]),
|
2051 |
-
39:
|
2052 |
-
dict(link=('lss_kpt18', 'lss_kpt19'), id=39, color=[255, 0, 128]),
|
2053 |
-
40:
|
2054 |
-
dict(link=('lss_kpt19', 'lss_kpt20'), id=40, color=[255, 0, 128]),
|
2055 |
-
41:
|
2056 |
-
dict(link=('lss_kpt20', 'lss_kpt21'), id=41, color=[255, 0, 128]),
|
2057 |
-
42:
|
2058 |
-
dict(link=('lss_kpt21', 'lss_kpt22'), id=42, color=[255, 0, 128]),
|
2059 |
-
43:
|
2060 |
-
dict(link=('lss_kpt22', 'lss_kpt23'), id=43, color=[255, 0, 128]),
|
2061 |
-
44:
|
2062 |
-
dict(link=('lss_kpt23', 'lss_kpt24'), id=44, color=[255, 0, 128]),
|
2063 |
-
45:
|
2064 |
-
dict(link=('lss_kpt24', 'lss_kpt25'), id=45, color=[255, 0, 128]),
|
2065 |
-
46:
|
2066 |
-
dict(link=('lss_kpt25', 'lss_kpt26'), id=46, color=[255, 0, 128]),
|
2067 |
-
47:
|
2068 |
-
dict(link=('lss_kpt26', 'lss_kpt27'), id=47, color=[255, 0, 128]),
|
2069 |
-
48:
|
2070 |
-
dict(link=('lss_kpt27', 'lss_kpt28'), id=48, color=[255, 0, 128]),
|
2071 |
-
49:
|
2072 |
-
dict(link=('lss_kpt28', 'lss_kpt29'), id=49, color=[255, 0, 128]),
|
2073 |
-
50:
|
2074 |
-
dict(link=('lss_kpt29', 'lss_kpt30'), id=50, color=[255, 0, 128]),
|
2075 |
-
51:
|
2076 |
-
dict(link=('lss_kpt30', 'lss_kpt31'), id=51, color=[255, 0, 128]),
|
2077 |
-
52:
|
2078 |
-
dict(link=('lss_kpt31', 'lss_kpt32'), id=52, color=[255, 0, 128]),
|
2079 |
-
53:
|
2080 |
-
dict(link=('lss_kpt32', 'lss_kpt33'), id=53, color=[255, 0, 128]),
|
2081 |
-
54:
|
2082 |
-
dict(link=('lss_kpt33', 'lss_kpt6'), id=54, color=[255, 0, 128]),
|
2083 |
-
55:
|
2084 |
-
dict(link=('lss_kpt6', 'lss_kpt5'), id=55, color=[255, 0, 128]),
|
2085 |
-
56:
|
2086 |
-
dict(link=('lss_kpt5', 'lss_kpt4'), id=56, color=[255, 0, 128]),
|
2087 |
-
57:
|
2088 |
-
dict(link=('lss_kpt4', 'lss_kpt3'), id=57, color=[255, 0, 128]),
|
2089 |
-
58:
|
2090 |
-
dict(link=('lss_kpt3', 'lss_kpt2'), id=58, color=[255, 0, 128]),
|
2091 |
-
59:
|
2092 |
-
dict(link=('lss_kpt6', 'lss_kpt1'), id=59, color=[255, 0, 128]),
|
2093 |
-
60:
|
2094 |
-
dict(link=('sso_kpt1', 'sso_kpt4'), id=60, color=[128, 0, 255]),
|
2095 |
-
61:
|
2096 |
-
dict(link=('sso_kpt4', 'sso_kpt7'), id=61, color=[128, 0, 255]),
|
2097 |
-
62:
|
2098 |
-
dict(link=('sso_kpt7', 'sso_kpt8'), id=62, color=[128, 0, 255]),
|
2099 |
-
63:
|
2100 |
-
dict(link=('sso_kpt8', 'sso_kpt9'), id=63, color=[128, 0, 255]),
|
2101 |
-
64:
|
2102 |
-
dict(link=('sso_kpt9', 'sso_kpt10'), id=64, color=[128, 0, 255]),
|
2103 |
-
65:
|
2104 |
-
dict(link=('sso_kpt10', 'sso_kpt11'), id=65, color=[128, 0, 255]),
|
2105 |
-
66:
|
2106 |
-
dict(link=('sso_kpt11', 'sso_kpt12'), id=66, color=[128, 0, 255]),
|
2107 |
-
67:
|
2108 |
-
dict(link=('sso_kpt12', 'sso_kpt13'), id=67, color=[128, 0, 255]),
|
2109 |
-
68:
|
2110 |
-
dict(link=('sso_kpt13', 'sso_kpt14'), id=68, color=[128, 0, 255]),
|
2111 |
-
69:
|
2112 |
-
dict(link=('sso_kpt14', 'sso_kpt15'), id=69, color=[128, 0, 255]),
|
2113 |
-
70:
|
2114 |
-
dict(link=('sso_kpt15', 'sso_kpt16'), id=70, color=[128, 0, 255]),
|
2115 |
-
71:
|
2116 |
-
dict(link=('sso_kpt16', 'sso_kpt31'), id=71, color=[128, 0, 255]),
|
2117 |
-
72:
|
2118 |
-
dict(link=('sso_kpt31', 'sso_kpt30'), id=72, color=[128, 0, 255]),
|
2119 |
-
73:
|
2120 |
-
dict(link=('sso_kpt30', 'sso_kpt2'), id=73, color=[128, 0, 255]),
|
2121 |
-
74:
|
2122 |
-
dict(link=('sso_kpt2', 'sso_kpt3'), id=74, color=[128, 0, 255]),
|
2123 |
-
75:
|
2124 |
-
dict(link=('sso_kpt3', 'sso_kpt4'), id=75, color=[128, 0, 255]),
|
2125 |
-
76:
|
2126 |
-
dict(link=('sso_kpt1', 'sso_kpt6'), id=76, color=[128, 0, 255]),
|
2127 |
-
77:
|
2128 |
-
dict(link=('sso_kpt6', 'sso_kpt25'), id=77, color=[128, 0, 255]),
|
2129 |
-
78:
|
2130 |
-
dict(link=('sso_kpt25', 'sso_kpt24'), id=78, color=[128, 0, 255]),
|
2131 |
-
79:
|
2132 |
-
dict(link=('sso_kpt24', 'sso_kpt23'), id=79, color=[128, 0, 255]),
|
2133 |
-
80:
|
2134 |
-
dict(link=('sso_kpt23', 'sso_kpt22'), id=80, color=[128, 0, 255]),
|
2135 |
-
81:
|
2136 |
-
dict(link=('sso_kpt22', 'sso_kpt21'), id=81, color=[128, 0, 255]),
|
2137 |
-
82:
|
2138 |
-
dict(link=('sso_kpt21', 'sso_kpt20'), id=82, color=[128, 0, 255]),
|
2139 |
-
83:
|
2140 |
-
dict(link=('sso_kpt20', 'sso_kpt19'), id=83, color=[128, 0, 255]),
|
2141 |
-
84:
|
2142 |
-
dict(link=('sso_kpt19', 'sso_kpt18'), id=84, color=[128, 0, 255]),
|
2143 |
-
85:
|
2144 |
-
dict(link=('sso_kpt18', 'sso_kpt17'), id=85, color=[128, 0, 255]),
|
2145 |
-
86:
|
2146 |
-
dict(link=('sso_kpt17', 'sso_kpt29'), id=86, color=[128, 0, 255]),
|
2147 |
-
87:
|
2148 |
-
dict(link=('sso_kpt29', 'sso_kpt28'), id=87, color=[128, 0, 255]),
|
2149 |
-
88:
|
2150 |
-
dict(link=('sso_kpt28', 'sso_kpt27'), id=88, color=[128, 0, 255]),
|
2151 |
-
89:
|
2152 |
-
dict(link=('sso_kpt27', 'sso_kpt26'), id=89, color=[128, 0, 255]),
|
2153 |
-
90:
|
2154 |
-
dict(link=('sso_kpt26', 'sso_kpt5'), id=90, color=[128, 0, 255]),
|
2155 |
-
91:
|
2156 |
-
dict(link=('sso_kpt5', 'sso_kpt6'), id=91, color=[128, 0, 255]),
|
2157 |
-
92:
|
2158 |
-
dict(link=('lso_kpt1', 'lso_kpt2'), id=92, color=[0, 128, 255]),
|
2159 |
-
93:
|
2160 |
-
dict(link=('lso_kpt2', 'lso_kpt7'), id=93, color=[0, 128, 255]),
|
2161 |
-
94:
|
2162 |
-
dict(link=('lso_kpt7', 'lso_kpt8'), id=94, color=[0, 128, 255]),
|
2163 |
-
95:
|
2164 |
-
dict(link=('lso_kpt8', 'lso_kpt9'), id=95, color=[0, 128, 255]),
|
2165 |
-
96:
|
2166 |
-
dict(link=('lso_kpt9', 'lso_kpt10'), id=96, color=[0, 128, 255]),
|
2167 |
-
97:
|
2168 |
-
dict(link=('lso_kpt10', 'lso_kpt11'), id=97, color=[0, 128, 255]),
|
2169 |
-
98:
|
2170 |
-
dict(link=('lso_kpt11', 'lso_kpt12'), id=98, color=[0, 128, 255]),
|
2171 |
-
99:
|
2172 |
-
dict(link=('lso_kpt12', 'lso_kpt13'), id=99, color=[0, 128, 255]),
|
2173 |
-
100:
|
2174 |
-
dict(link=('lso_kpt13', 'lso_kpt14'), id=100, color=[0, 128, 255]),
|
2175 |
-
101:
|
2176 |
-
dict(link=('lso_kpt14', 'lso_kpt15'), id=101, color=[0, 128, 255]),
|
2177 |
-
102:
|
2178 |
-
dict(link=('lso_kpt15', 'lso_kpt16'), id=102, color=[0, 128, 255]),
|
2179 |
-
103:
|
2180 |
-
dict(link=('lso_kpt16', 'lso_kpt17'), id=103, color=[0, 128, 255]),
|
2181 |
-
104:
|
2182 |
-
dict(link=('lso_kpt17', 'lso_kpt18'), id=104, color=[0, 128, 255]),
|
2183 |
-
105:
|
2184 |
-
dict(link=('lso_kpt18', 'lso_kpt19'), id=105, color=[0, 128, 255]),
|
2185 |
-
106:
|
2186 |
-
dict(link=('lso_kpt19', 'lso_kpt20'), id=106, color=[0, 128, 255]),
|
2187 |
-
107:
|
2188 |
-
dict(link=('lso_kpt20', 'lso_kpt39'), id=107, color=[0, 128, 255]),
|
2189 |
-
108:
|
2190 |
-
dict(link=('lso_kpt39', 'lso_kpt38'), id=108, color=[0, 128, 255]),
|
2191 |
-
109:
|
2192 |
-
dict(link=('lso_kpt38', 'lso_kpt4'), id=109, color=[0, 128, 255]),
|
2193 |
-
110:
|
2194 |
-
dict(link=('lso_kpt4', 'lso_kpt3'), id=110, color=[0, 128, 255]),
|
2195 |
-
111:
|
2196 |
-
dict(link=('lso_kpt3', 'lso_kpt2'), id=111, color=[0, 128, 255]),
|
2197 |
-
112:
|
2198 |
-
dict(link=('lso_kpt1', 'lso_kpt6'), id=112, color=[0, 128, 255]),
|
2199 |
-
113:
|
2200 |
-
dict(link=('lso_kpt6', 'lso_kpt33'), id=113, color=[0, 128, 255]),
|
2201 |
-
114:
|
2202 |
-
dict(link=('lso_kpt33', 'lso_kpt32'), id=114, color=[0, 128, 255]),
|
2203 |
-
115:
|
2204 |
-
dict(link=('lso_kpt32', 'lso_kpt31'), id=115, color=[0, 128, 255]),
|
2205 |
-
116:
|
2206 |
-
dict(link=('lso_kpt31', 'lso_kpt30'), id=116, color=[0, 128, 255]),
|
2207 |
-
117:
|
2208 |
-
dict(link=('lso_kpt30', 'lso_kpt29'), id=117, color=[0, 128, 255]),
|
2209 |
-
118:
|
2210 |
-
dict(link=('lso_kpt29', 'lso_kpt28'), id=118, color=[0, 128, 255]),
|
2211 |
-
119:
|
2212 |
-
dict(link=('lso_kpt28', 'lso_kpt27'), id=119, color=[0, 128, 255]),
|
2213 |
-
120:
|
2214 |
-
dict(link=('lso_kpt27', 'lso_kpt26'), id=120, color=[0, 128, 255]),
|
2215 |
-
121:
|
2216 |
-
dict(link=('lso_kpt26', 'lso_kpt25'), id=121, color=[0, 128, 255]),
|
2217 |
-
122:
|
2218 |
-
dict(link=('lso_kpt25', 'lso_kpt24'), id=122, color=[0, 128, 255]),
|
2219 |
-
123:
|
2220 |
-
dict(link=('lso_kpt24', 'lso_kpt23'), id=123, color=[0, 128, 255]),
|
2221 |
-
124:
|
2222 |
-
dict(link=('lso_kpt23', 'lso_kpt22'), id=124, color=[0, 128, 255]),
|
2223 |
-
125:
|
2224 |
-
dict(link=('lso_kpt22', 'lso_kpt21'), id=125, color=[0, 128, 255]),
|
2225 |
-
126:
|
2226 |
-
dict(link=('lso_kpt21', 'lso_kpt37'), id=126, color=[0, 128, 255]),
|
2227 |
-
127:
|
2228 |
-
dict(link=('lso_kpt37', 'lso_kpt36'), id=127, color=[0, 128, 255]),
|
2229 |
-
128:
|
2230 |
-
dict(link=('lso_kpt36', 'lso_kpt35'), id=128, color=[0, 128, 255]),
|
2231 |
-
129:
|
2232 |
-
dict(link=('lso_kpt35', 'lso_kpt34'), id=129, color=[0, 128, 255]),
|
2233 |
-
130:
|
2234 |
-
dict(link=('lso_kpt34', 'lso_kpt5'), id=130, color=[0, 128, 255]),
|
2235 |
-
131:
|
2236 |
-
dict(link=('lso_kpt5', 'lso_kpt6'), id=131, color=[0, 128, 255]),
|
2237 |
-
132:
|
2238 |
-
dict(link=('vest_kpt1', 'vest_kpt2'), id=132, color=[0, 128, 128]),
|
2239 |
-
133:
|
2240 |
-
dict(link=('vest_kpt2', 'vest_kpt7'), id=133, color=[0, 128, 128]),
|
2241 |
-
134:
|
2242 |
-
dict(link=('vest_kpt7', 'vest_kpt8'), id=134, color=[0, 128, 128]),
|
2243 |
-
135:
|
2244 |
-
dict(link=('vest_kpt8', 'vest_kpt9'), id=135, color=[0, 128, 128]),
|
2245 |
-
136:
|
2246 |
-
dict(link=('vest_kpt9', 'vest_kpt10'), id=136, color=[0, 128, 128]),
|
2247 |
-
137:
|
2248 |
-
dict(link=('vest_kpt10', 'vest_kpt11'), id=137, color=[0, 128, 128]),
|
2249 |
-
138:
|
2250 |
-
dict(link=('vest_kpt11', 'vest_kpt12'), id=138, color=[0, 128, 128]),
|
2251 |
-
139:
|
2252 |
-
dict(link=('vest_kpt12', 'vest_kpt13'), id=139, color=[0, 128, 128]),
|
2253 |
-
140:
|
2254 |
-
dict(link=('vest_kpt13', 'vest_kpt14'), id=140, color=[0, 128, 128]),
|
2255 |
-
141:
|
2256 |
-
dict(link=('vest_kpt14', 'vest_kpt15'), id=141, color=[0, 128, 128]),
|
2257 |
-
142:
|
2258 |
-
dict(link=('vest_kpt15', 'vest_kpt6'), id=142, color=[0, 128, 128]),
|
2259 |
-
143:
|
2260 |
-
dict(link=('vest_kpt6', 'vest_kpt1'), id=143, color=[0, 128, 128]),
|
2261 |
-
144:
|
2262 |
-
dict(link=('vest_kpt2', 'vest_kpt3'), id=144, color=[0, 128, 128]),
|
2263 |
-
145:
|
2264 |
-
dict(link=('vest_kpt3', 'vest_kpt4'), id=145, color=[0, 128, 128]),
|
2265 |
-
146:
|
2266 |
-
dict(link=('vest_kpt4', 'vest_kpt5'), id=146, color=[0, 128, 128]),
|
2267 |
-
147:
|
2268 |
-
dict(link=('vest_kpt5', 'vest_kpt6'), id=147, color=[0, 128, 128]),
|
2269 |
-
148:
|
2270 |
-
dict(link=('sling_kpt1', 'sling_kpt2'), id=148, color=[0, 0, 128]),
|
2271 |
-
149:
|
2272 |
-
dict(link=('sling_kpt2', 'sling_kpt8'), id=149, color=[0, 0, 128]),
|
2273 |
-
150:
|
2274 |
-
dict(link=('sling_kpt8', 'sling_kpt9'), id=150, color=[0, 0, 128]),
|
2275 |
-
151:
|
2276 |
-
dict(link=('sling_kpt9', 'sling_kpt10'), id=151, color=[0, 0, 128]),
|
2277 |
-
152:
|
2278 |
-
dict(link=('sling_kpt10', 'sling_kpt11'), id=152, color=[0, 0, 128]),
|
2279 |
-
153:
|
2280 |
-
dict(link=('sling_kpt11', 'sling_kpt12'), id=153, color=[0, 0, 128]),
|
2281 |
-
154:
|
2282 |
-
dict(link=('sling_kpt12', 'sling_kpt13'), id=154, color=[0, 0, 128]),
|
2283 |
-
155:
|
2284 |
-
dict(link=('sling_kpt13', 'sling_kpt14'), id=155, color=[0, 0, 128]),
|
2285 |
-
156:
|
2286 |
-
dict(link=('sling_kpt14', 'sling_kpt6'), id=156, color=[0, 0, 128]),
|
2287 |
-
157:
|
2288 |
-
dict(link=('sling_kpt2', 'sling_kpt7'), id=157, color=[0, 0, 128]),
|
2289 |
-
158:
|
2290 |
-
dict(link=('sling_kpt6', 'sling_kpt15'), id=158, color=[0, 0, 128]),
|
2291 |
-
159:
|
2292 |
-
dict(link=('sling_kpt2', 'sling_kpt3'), id=159, color=[0, 0, 128]),
|
2293 |
-
160:
|
2294 |
-
dict(link=('sling_kpt3', 'sling_kpt4'), id=160, color=[0, 0, 128]),
|
2295 |
-
161:
|
2296 |
-
dict(link=('sling_kpt4', 'sling_kpt5'), id=161, color=[0, 0, 128]),
|
2297 |
-
162:
|
2298 |
-
dict(link=('sling_kpt5', 'sling_kpt6'), id=162, color=[0, 0, 128]),
|
2299 |
-
163:
|
2300 |
-
dict(link=('sling_kpt1', 'sling_kpt6'), id=163, color=[0, 0, 128]),
|
2301 |
-
164:
|
2302 |
-
dict(
|
2303 |
-
link=('shorts_kpt1', 'shorts_kpt4'), id=164, color=[128, 128,
|
2304 |
-
128]),
|
2305 |
-
165:
|
2306 |
-
dict(
|
2307 |
-
link=('shorts_kpt4', 'shorts_kpt5'), id=165, color=[128, 128,
|
2308 |
-
128]),
|
2309 |
-
166:
|
2310 |
-
dict(
|
2311 |
-
link=('shorts_kpt5', 'shorts_kpt6'), id=166, color=[128, 128,
|
2312 |
-
128]),
|
2313 |
-
167:
|
2314 |
-
dict(
|
2315 |
-
link=('shorts_kpt6', 'shorts_kpt7'), id=167, color=[128, 128,
|
2316 |
-
128]),
|
2317 |
-
168:
|
2318 |
-
dict(
|
2319 |
-
link=('shorts_kpt7', 'shorts_kpt8'), id=168, color=[128, 128,
|
2320 |
-
128]),
|
2321 |
-
169:
|
2322 |
-
dict(
|
2323 |
-
link=('shorts_kpt8', 'shorts_kpt9'), id=169, color=[128, 128,
|
2324 |
-
128]),
|
2325 |
-
170:
|
2326 |
-
dict(
|
2327 |
-
link=('shorts_kpt9', 'shorts_kpt10'),
|
2328 |
-
id=170,
|
2329 |
-
color=[128, 128, 128]),
|
2330 |
-
171:
|
2331 |
-
dict(
|
2332 |
-
link=('shorts_kpt10', 'shorts_kpt3'),
|
2333 |
-
id=171,
|
2334 |
-
color=[128, 128, 128]),
|
2335 |
-
172:
|
2336 |
-
dict(
|
2337 |
-
link=('shorts_kpt3', 'shorts_kpt2'), id=172, color=[128, 128,
|
2338 |
-
128]),
|
2339 |
-
173:
|
2340 |
-
dict(
|
2341 |
-
link=('shorts_kpt2', 'shorts_kpt1'), id=173, color=[128, 128,
|
2342 |
-
128]),
|
2343 |
-
174:
|
2344 |
-
dict(
|
2345 |
-
link=('trousers_kpt1', 'trousers_kpt4'),
|
2346 |
-
id=174,
|
2347 |
-
color=[128, 0, 128]),
|
2348 |
-
175:
|
2349 |
-
dict(
|
2350 |
-
link=('trousers_kpt4', 'trousers_kpt5'),
|
2351 |
-
id=175,
|
2352 |
-
color=[128, 0, 128]),
|
2353 |
-
176:
|
2354 |
-
dict(
|
2355 |
-
link=('trousers_kpt5', 'trousers_kpt6'),
|
2356 |
-
id=176,
|
2357 |
-
color=[128, 0, 128]),
|
2358 |
-
177:
|
2359 |
-
dict(
|
2360 |
-
link=('trousers_kpt6', 'trousers_kpt7'),
|
2361 |
-
id=177,
|
2362 |
-
color=[128, 0, 128]),
|
2363 |
-
178:
|
2364 |
-
dict(
|
2365 |
-
link=('trousers_kpt7', 'trousers_kpt8'),
|
2366 |
-
id=178,
|
2367 |
-
color=[128, 0, 128]),
|
2368 |
-
179:
|
2369 |
-
dict(
|
2370 |
-
link=('trousers_kpt8', 'trousers_kpt9'),
|
2371 |
-
id=179,
|
2372 |
-
color=[128, 0, 128]),
|
2373 |
-
180:
|
2374 |
-
dict(
|
2375 |
-
link=('trousers_kpt9', 'trousers_kpt10'),
|
2376 |
-
id=180,
|
2377 |
-
color=[128, 0, 128]),
|
2378 |
-
181:
|
2379 |
-
dict(
|
2380 |
-
link=('trousers_kpt10', 'trousers_kpt11'),
|
2381 |
-
id=181,
|
2382 |
-
color=[128, 0, 128]),
|
2383 |
-
182:
|
2384 |
-
dict(
|
2385 |
-
link=('trousers_kpt11', 'trousers_kpt12'),
|
2386 |
-
id=182,
|
2387 |
-
color=[128, 0, 128]),
|
2388 |
-
183:
|
2389 |
-
dict(
|
2390 |
-
link=('trousers_kpt12', 'trousers_kpt13'),
|
2391 |
-
id=183,
|
2392 |
-
color=[128, 0, 128]),
|
2393 |
-
184:
|
2394 |
-
dict(
|
2395 |
-
link=('trousers_kpt13', 'trousers_kpt14'),
|
2396 |
-
id=184,
|
2397 |
-
color=[128, 0, 128]),
|
2398 |
-
185:
|
2399 |
-
dict(
|
2400 |
-
link=('trousers_kpt14', 'trousers_kpt3'),
|
2401 |
-
id=185,
|
2402 |
-
color=[128, 0, 128]),
|
2403 |
-
186:
|
2404 |
-
dict(
|
2405 |
-
link=('trousers_kpt3', 'trousers_kpt2'),
|
2406 |
-
id=186,
|
2407 |
-
color=[128, 0, 128]),
|
2408 |
-
187:
|
2409 |
-
dict(
|
2410 |
-
link=('trousers_kpt2', 'trousers_kpt1'),
|
2411 |
-
id=187,
|
2412 |
-
color=[128, 0, 128]),
|
2413 |
-
188:
|
2414 |
-
dict(link=('skirt_kpt1', 'skirt_kpt4'), id=188, color=[64, 128, 128]),
|
2415 |
-
189:
|
2416 |
-
dict(link=('skirt_kpt4', 'skirt_kpt5'), id=189, color=[64, 128, 128]),
|
2417 |
-
190:
|
2418 |
-
dict(link=('skirt_kpt5', 'skirt_kpt6'), id=190, color=[64, 128, 128]),
|
2419 |
-
191:
|
2420 |
-
dict(link=('skirt_kpt6', 'skirt_kpt7'), id=191, color=[64, 128, 128]),
|
2421 |
-
192:
|
2422 |
-
dict(link=('skirt_kpt7', 'skirt_kpt8'), id=192, color=[64, 128, 128]),
|
2423 |
-
193:
|
2424 |
-
dict(link=('skirt_kpt8', 'skirt_kpt3'), id=193, color=[64, 128, 128]),
|
2425 |
-
194:
|
2426 |
-
dict(link=('skirt_kpt3', 'skirt_kpt2'), id=194, color=[64, 128, 128]),
|
2427 |
-
195:
|
2428 |
-
dict(link=('skirt_kpt2', 'skirt_kpt1'), id=195, color=[64, 128, 128]),
|
2429 |
-
196:
|
2430 |
-
dict(link=('ssd_kpt1', 'ssd_kpt2'), id=196, color=[64, 64, 128]),
|
2431 |
-
197:
|
2432 |
-
dict(link=('ssd_kpt2', 'ssd_kpt7'), id=197, color=[64, 64, 128]),
|
2433 |
-
198:
|
2434 |
-
dict(link=('ssd_kpt7', 'ssd_kpt8'), id=198, color=[64, 64, 128]),
|
2435 |
-
199:
|
2436 |
-
dict(link=('ssd_kpt8', 'ssd_kpt9'), id=199, color=[64, 64, 128]),
|
2437 |
-
200:
|
2438 |
-
dict(link=('ssd_kpt9', 'ssd_kpt10'), id=200, color=[64, 64, 128]),
|
2439 |
-
201:
|
2440 |
-
dict(link=('ssd_kpt10', 'ssd_kpt11'), id=201, color=[64, 64, 128]),
|
2441 |
-
202:
|
2442 |
-
dict(link=('ssd_kpt11', 'ssd_kpt12'), id=202, color=[64, 64, 128]),
|
2443 |
-
203:
|
2444 |
-
dict(link=('ssd_kpt12', 'ssd_kpt13'), id=203, color=[64, 64, 128]),
|
2445 |
-
204:
|
2446 |
-
dict(link=('ssd_kpt13', 'ssd_kpt14'), id=204, color=[64, 64, 128]),
|
2447 |
-
205:
|
2448 |
-
dict(link=('ssd_kpt14', 'ssd_kpt15'), id=205, color=[64, 64, 128]),
|
2449 |
-
206:
|
2450 |
-
dict(link=('ssd_kpt15', 'ssd_kpt16'), id=206, color=[64, 64, 128]),
|
2451 |
-
207:
|
2452 |
-
dict(link=('ssd_kpt16', 'ssd_kpt17'), id=207, color=[64, 64, 128]),
|
2453 |
-
208:
|
2454 |
-
dict(link=('ssd_kpt17', 'ssd_kpt18'), id=208, color=[64, 64, 128]),
|
2455 |
-
209:
|
2456 |
-
dict(link=('ssd_kpt18', 'ssd_kpt19'), id=209, color=[64, 64, 128]),
|
2457 |
-
210:
|
2458 |
-
dict(link=('ssd_kpt19', 'ssd_kpt20'), id=210, color=[64, 64, 128]),
|
2459 |
-
211:
|
2460 |
-
dict(link=('ssd_kpt20', 'ssd_kpt21'), id=211, color=[64, 64, 128]),
|
2461 |
-
212:
|
2462 |
-
dict(link=('ssd_kpt21', 'ssd_kpt22'), id=212, color=[64, 64, 128]),
|
2463 |
-
213:
|
2464 |
-
dict(link=('ssd_kpt22', 'ssd_kpt23'), id=213, color=[64, 64, 128]),
|
2465 |
-
214:
|
2466 |
-
dict(link=('ssd_kpt23', 'ssd_kpt24'), id=214, color=[64, 64, 128]),
|
2467 |
-
215:
|
2468 |
-
dict(link=('ssd_kpt24', 'ssd_kpt25'), id=215, color=[64, 64, 128]),
|
2469 |
-
216:
|
2470 |
-
dict(link=('ssd_kpt25', 'ssd_kpt26'), id=216, color=[64, 64, 128]),
|
2471 |
-
217:
|
2472 |
-
dict(link=('ssd_kpt26', 'ssd_kpt27'), id=217, color=[64, 64, 128]),
|
2473 |
-
218:
|
2474 |
-
dict(link=('ssd_kpt27', 'ssd_kpt28'), id=218, color=[64, 64, 128]),
|
2475 |
-
219:
|
2476 |
-
dict(link=('ssd_kpt28', 'ssd_kpt29'), id=219, color=[64, 64, 128]),
|
2477 |
-
220:
|
2478 |
-
dict(link=('ssd_kpt29', 'ssd_kpt6'), id=220, color=[64, 64, 128]),
|
2479 |
-
221:
|
2480 |
-
dict(link=('ssd_kpt6', 'ssd_kpt5'), id=221, color=[64, 64, 128]),
|
2481 |
-
222:
|
2482 |
-
dict(link=('ssd_kpt5', 'ssd_kpt4'), id=222, color=[64, 64, 128]),
|
2483 |
-
223:
|
2484 |
-
dict(link=('ssd_kpt4', 'ssd_kpt3'), id=223, color=[64, 64, 128]),
|
2485 |
-
224:
|
2486 |
-
dict(link=('ssd_kpt3', 'ssd_kpt2'), id=224, color=[64, 64, 128]),
|
2487 |
-
225:
|
2488 |
-
dict(link=('ssd_kpt6', 'ssd_kpt1'), id=225, color=[64, 64, 128]),
|
2489 |
-
226:
|
2490 |
-
dict(link=('lsd_kpt1', 'lsd_kpt2'), id=226, color=[128, 64, 0]),
|
2491 |
-
227:
|
2492 |
-
dict(link=('lsd_kpt2', 'lsd_kpt7'), id=228, color=[128, 64, 0]),
|
2493 |
-
228:
|
2494 |
-
dict(link=('lsd_kpt7', 'lsd_kpt8'), id=228, color=[128, 64, 0]),
|
2495 |
-
229:
|
2496 |
-
dict(link=('lsd_kpt8', 'lsd_kpt9'), id=229, color=[128, 64, 0]),
|
2497 |
-
230:
|
2498 |
-
dict(link=('lsd_kpt9', 'lsd_kpt10'), id=230, color=[128, 64, 0]),
|
2499 |
-
231:
|
2500 |
-
dict(link=('lsd_kpt10', 'lsd_kpt11'), id=231, color=[128, 64, 0]),
|
2501 |
-
232:
|
2502 |
-
dict(link=('lsd_kpt11', 'lsd_kpt12'), id=232, color=[128, 64, 0]),
|
2503 |
-
233:
|
2504 |
-
dict(link=('lsd_kpt12', 'lsd_kpt13'), id=233, color=[128, 64, 0]),
|
2505 |
-
234:
|
2506 |
-
dict(link=('lsd_kpt13', 'lsd_kpt14'), id=234, color=[128, 64, 0]),
|
2507 |
-
235:
|
2508 |
-
dict(link=('lsd_kpt14', 'lsd_kpt15'), id=235, color=[128, 64, 0]),
|
2509 |
-
236:
|
2510 |
-
dict(link=('lsd_kpt15', 'lsd_kpt16'), id=236, color=[128, 64, 0]),
|
2511 |
-
237:
|
2512 |
-
dict(link=('lsd_kpt16', 'lsd_kpt17'), id=237, color=[128, 64, 0]),
|
2513 |
-
238:
|
2514 |
-
dict(link=('lsd_kpt17', 'lsd_kpt18'), id=238, color=[128, 64, 0]),
|
2515 |
-
239:
|
2516 |
-
dict(link=('lsd_kpt18', 'lsd_kpt19'), id=239, color=[128, 64, 0]),
|
2517 |
-
240:
|
2518 |
-
dict(link=('lsd_kpt19', 'lsd_kpt20'), id=240, color=[128, 64, 0]),
|
2519 |
-
241:
|
2520 |
-
dict(link=('lsd_kpt20', 'lsd_kpt21'), id=241, color=[128, 64, 0]),
|
2521 |
-
242:
|
2522 |
-
dict(link=('lsd_kpt21', 'lsd_kpt22'), id=242, color=[128, 64, 0]),
|
2523 |
-
243:
|
2524 |
-
dict(link=('lsd_kpt22', 'lsd_kpt23'), id=243, color=[128, 64, 0]),
|
2525 |
-
244:
|
2526 |
-
dict(link=('lsd_kpt23', 'lsd_kpt24'), id=244, color=[128, 64, 0]),
|
2527 |
-
245:
|
2528 |
-
dict(link=('lsd_kpt24', 'lsd_kpt25'), id=245, color=[128, 64, 0]),
|
2529 |
-
246:
|
2530 |
-
dict(link=('lsd_kpt25', 'lsd_kpt26'), id=246, color=[128, 64, 0]),
|
2531 |
-
247:
|
2532 |
-
dict(link=('lsd_kpt26', 'lsd_kpt27'), id=247, color=[128, 64, 0]),
|
2533 |
-
248:
|
2534 |
-
dict(link=('lsd_kpt27', 'lsd_kpt28'), id=248, color=[128, 64, 0]),
|
2535 |
-
249:
|
2536 |
-
dict(link=('lsd_kpt28', 'lsd_kpt29'), id=249, color=[128, 64, 0]),
|
2537 |
-
250:
|
2538 |
-
dict(link=('lsd_kpt29', 'lsd_kpt30'), id=250, color=[128, 64, 0]),
|
2539 |
-
251:
|
2540 |
-
dict(link=('lsd_kpt30', 'lsd_kpt31'), id=251, color=[128, 64, 0]),
|
2541 |
-
252:
|
2542 |
-
dict(link=('lsd_kpt31', 'lsd_kpt32'), id=252, color=[128, 64, 0]),
|
2543 |
-
253:
|
2544 |
-
dict(link=('lsd_kpt32', 'lsd_kpt33'), id=253, color=[128, 64, 0]),
|
2545 |
-
254:
|
2546 |
-
dict(link=('lsd_kpt33', 'lsd_kpt34'), id=254, color=[128, 64, 0]),
|
2547 |
-
255:
|
2548 |
-
dict(link=('lsd_kpt34', 'lsd_kpt35'), id=255, color=[128, 64, 0]),
|
2549 |
-
256:
|
2550 |
-
dict(link=('lsd_kpt35', 'lsd_kpt36'), id=256, color=[128, 64, 0]),
|
2551 |
-
257:
|
2552 |
-
dict(link=('lsd_kpt36', 'lsd_kpt37'), id=257, color=[128, 64, 0]),
|
2553 |
-
258:
|
2554 |
-
dict(link=('lsd_kpt37', 'lsd_kpt6'), id=258, color=[128, 64, 0]),
|
2555 |
-
259:
|
2556 |
-
dict(link=('lsd_kpt6', 'lsd_kpt5'), id=259, color=[128, 64, 0]),
|
2557 |
-
260:
|
2558 |
-
dict(link=('lsd_kpt5', 'lsd_kpt4'), id=260, color=[128, 64, 0]),
|
2559 |
-
261:
|
2560 |
-
dict(link=('lsd_kpt4', 'lsd_kpt3'), id=261, color=[128, 64, 0]),
|
2561 |
-
262:
|
2562 |
-
dict(link=('lsd_kpt3', 'lsd_kpt2'), id=262, color=[128, 64, 0]),
|
2563 |
-
263:
|
2564 |
-
dict(link=('lsd_kpt6', 'lsd_kpt1'), id=263, color=[128, 64, 0]),
|
2565 |
-
264:
|
2566 |
-
dict(link=('vd_kpt1', 'vd_kpt2'), id=264, color=[128, 64, 255]),
|
2567 |
-
265:
|
2568 |
-
dict(link=('vd_kpt2', 'vd_kpt7'), id=265, color=[128, 64, 255]),
|
2569 |
-
266:
|
2570 |
-
dict(link=('vd_kpt7', 'vd_kpt8'), id=266, color=[128, 64, 255]),
|
2571 |
-
267:
|
2572 |
-
dict(link=('vd_kpt8', 'vd_kpt9'), id=267, color=[128, 64, 255]),
|
2573 |
-
268:
|
2574 |
-
dict(link=('vd_kpt9', 'vd_kpt10'), id=268, color=[128, 64, 255]),
|
2575 |
-
269:
|
2576 |
-
dict(link=('vd_kpt10', 'vd_kpt11'), id=269, color=[128, 64, 255]),
|
2577 |
-
270:
|
2578 |
-
dict(link=('vd_kpt11', 'vd_kpt12'), id=270, color=[128, 64, 255]),
|
2579 |
-
271:
|
2580 |
-
dict(link=('vd_kpt12', 'vd_kpt13'), id=271, color=[128, 64, 255]),
|
2581 |
-
272:
|
2582 |
-
dict(link=('vd_kpt13', 'vd_kpt14'), id=272, color=[128, 64, 255]),
|
2583 |
-
273:
|
2584 |
-
dict(link=('vd_kpt14', 'vd_kpt15'), id=273, color=[128, 64, 255]),
|
2585 |
-
274:
|
2586 |
-
dict(link=('vd_kpt15', 'vd_kpt16'), id=274, color=[128, 64, 255]),
|
2587 |
-
275:
|
2588 |
-
dict(link=('vd_kpt16', 'vd_kpt17'), id=275, color=[128, 64, 255]),
|
2589 |
-
276:
|
2590 |
-
dict(link=('vd_kpt17', 'vd_kpt18'), id=276, color=[128, 64, 255]),
|
2591 |
-
277:
|
2592 |
-
dict(link=('vd_kpt18', 'vd_kpt19'), id=277, color=[128, 64, 255]),
|
2593 |
-
278:
|
2594 |
-
dict(link=('vd_kpt19', 'vd_kpt6'), id=278, color=[128, 64, 255]),
|
2595 |
-
279:
|
2596 |
-
dict(link=('vd_kpt6', 'vd_kpt5'), id=279, color=[128, 64, 255]),
|
2597 |
-
280:
|
2598 |
-
dict(link=('vd_kpt5', 'vd_kpt4'), id=280, color=[128, 64, 255]),
|
2599 |
-
281:
|
2600 |
-
dict(link=('vd_kpt4', 'vd_kpt3'), id=281, color=[128, 64, 255]),
|
2601 |
-
282:
|
2602 |
-
dict(link=('vd_kpt3', 'vd_kpt2'), id=282, color=[128, 64, 255]),
|
2603 |
-
283:
|
2604 |
-
dict(link=('vd_kpt6', 'vd_kpt1'), id=283, color=[128, 64, 255]),
|
2605 |
-
284:
|
2606 |
-
dict(link=('sd_kpt1', 'sd_kpt2'), id=284, color=[128, 64, 0]),
|
2607 |
-
285:
|
2608 |
-
dict(link=('sd_kpt2', 'sd_kpt8'), id=285, color=[128, 64, 0]),
|
2609 |
-
286:
|
2610 |
-
dict(link=('sd_kpt8', 'sd_kpt9'), id=286, color=[128, 64, 0]),
|
2611 |
-
287:
|
2612 |
-
dict(link=('sd_kpt9', 'sd_kpt10'), id=287, color=[128, 64, 0]),
|
2613 |
-
288:
|
2614 |
-
dict(link=('sd_kpt10', 'sd_kpt11'), id=288, color=[128, 64, 0]),
|
2615 |
-
289:
|
2616 |
-
dict(link=('sd_kpt11', 'sd_kpt12'), id=289, color=[128, 64, 0]),
|
2617 |
-
290:
|
2618 |
-
dict(link=('sd_kpt12', 'sd_kpt13'), id=290, color=[128, 64, 0]),
|
2619 |
-
291:
|
2620 |
-
dict(link=('sd_kpt13', 'sd_kpt14'), id=291, color=[128, 64, 0]),
|
2621 |
-
292:
|
2622 |
-
dict(link=('sd_kpt14', 'sd_kpt15'), id=292, color=[128, 64, 0]),
|
2623 |
-
293:
|
2624 |
-
dict(link=('sd_kpt15', 'sd_kpt16'), id=293, color=[128, 64, 0]),
|
2625 |
-
294:
|
2626 |
-
dict(link=('sd_kpt16', 'sd_kpt17'), id=294, color=[128, 64, 0]),
|
2627 |
-
295:
|
2628 |
-
dict(link=('sd_kpt17', 'sd_kpt18'), id=295, color=[128, 64, 0]),
|
2629 |
-
296:
|
2630 |
-
dict(link=('sd_kpt18', 'sd_kpt6'), id=296, color=[128, 64, 0]),
|
2631 |
-
297:
|
2632 |
-
dict(link=('sd_kpt6', 'sd_kpt5'), id=297, color=[128, 64, 0]),
|
2633 |
-
298:
|
2634 |
-
dict(link=('sd_kpt5', 'sd_kpt4'), id=298, color=[128, 64, 0]),
|
2635 |
-
299:
|
2636 |
-
dict(link=('sd_kpt4', 'sd_kpt3'), id=299, color=[128, 64, 0]),
|
2637 |
-
300:
|
2638 |
-
dict(link=('sd_kpt3', 'sd_kpt2'), id=300, color=[128, 64, 0]),
|
2639 |
-
301:
|
2640 |
-
dict(link=('sd_kpt2', 'sd_kpt7'), id=301, color=[128, 64, 0]),
|
2641 |
-
302:
|
2642 |
-
dict(link=('sd_kpt6', 'sd_kpt19'), id=302, color=[128, 64, 0]),
|
2643 |
-
303:
|
2644 |
-
dict(link=('sd_kpt6', 'sd_kpt1'), id=303, color=[128, 64, 0])
|
2645 |
-
}),
|
2646 |
-
joint_weights=[
|
2647 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2648 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2649 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2650 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2651 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2652 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2653 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2654 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2655 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2656 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2657 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2658 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2659 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2660 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2661 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2662 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2663 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2664 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2665 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2666 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2667 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
|
2668 |
-
],
|
2669 |
-
sigmas=[])
|
2670 |
-
param_scheduler = [
|
2671 |
-
dict(
|
2672 |
-
type='LinearLR', begin=0, end=500, start_factor=0.001, by_epoch=False),
|
2673 |
-
dict(
|
2674 |
-
type='MultiStepLR',
|
2675 |
-
begin=0,
|
2676 |
-
end=210,
|
2677 |
-
milestones=[100, 160],
|
2678 |
-
gamma=0.1,
|
2679 |
-
by_epoch=True)
|
2680 |
-
]
|
2681 |
-
optim_wrapper = dict(optimizer=dict(type='Adam', lr=0.0005))
|
2682 |
-
auto_scale_lr = dict(base_batch_size=512)
|
2683 |
-
dataset_type = 'DeepFashion2Dataset'
|
2684 |
-
data_mode = 'topdown'
|
2685 |
-
data_root = 'data/deepfashion2/'
|
2686 |
-
codec = dict(
|
2687 |
-
type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2)
|
2688 |
-
train_pipeline = [
|
2689 |
-
dict(type='LoadImage'),
|
2690 |
-
dict(type='GetBBoxCenterScale'),
|
2691 |
-
dict(type='RandomFlip', direction='horizontal'),
|
2692 |
-
dict(
|
2693 |
-
type='RandomBBoxTransform',
|
2694 |
-
shift_prob=0,
|
2695 |
-
rotate_factor=60,
|
2696 |
-
scale_factor=(0.75, 1.25)),
|
2697 |
-
dict(type='TopdownAffine', input_size=(192, 256)),
|
2698 |
-
dict(
|
2699 |
-
type='GenerateTarget',
|
2700 |
-
encoder=dict(
|
2701 |
-
type='MSRAHeatmap',
|
2702 |
-
input_size=(192, 256),
|
2703 |
-
heatmap_size=(48, 64),
|
2704 |
-
sigma=2)),
|
2705 |
-
dict(type='PackPoseInputs')
|
2706 |
-
]
|
2707 |
-
val_pipeline = [
|
2708 |
-
dict(type='LoadImage', backend_args=dict(backend='local')),
|
2709 |
-
dict(type='GetBBoxCenterScale'),
|
2710 |
-
dict(type='TopdownAffine', input_size=(192, 256)),
|
2711 |
-
dict(type='PackPoseInputs')
|
2712 |
-
]
|
2713 |
-
train_dataloader = dict(
|
2714 |
-
batch_size=64,
|
2715 |
-
num_workers=6,
|
2716 |
-
persistent_workers=True,
|
2717 |
-
sampler=dict(type='DefaultSampler', shuffle=True),
|
2718 |
-
dataset=dict(
|
2719 |
-
type='DeepFashion2Dataset',
|
2720 |
-
data_root='data/deepfashion2/',
|
2721 |
-
data_mode='topdown',
|
2722 |
-
ann_file='train/deepfashion2_sling_dress.json',
|
2723 |
-
data_prefix=dict(img='train/image/'),
|
2724 |
-
pipeline=[
|
2725 |
-
dict(type='LoadImage'),
|
2726 |
-
dict(type='GetBBoxCenterScale'),
|
2727 |
-
dict(type='RandomFlip', direction='horizontal'),
|
2728 |
-
dict(
|
2729 |
-
type='RandomBBoxTransform',
|
2730 |
-
shift_prob=0,
|
2731 |
-
rotate_factor=60,
|
2732 |
-
scale_factor=(0.75, 1.25)),
|
2733 |
-
dict(type='TopdownAffine', input_size=(192, 256)),
|
2734 |
-
dict(
|
2735 |
-
type='GenerateTarget',
|
2736 |
-
encoder=dict(
|
2737 |
-
type='MSRAHeatmap',
|
2738 |
-
input_size=(192, 256),
|
2739 |
-
heatmap_size=(48, 64),
|
2740 |
-
sigma=2)),
|
2741 |
-
dict(type='PackPoseInputs')
|
2742 |
-
]))
|
2743 |
-
val_dataloader = dict(
|
2744 |
-
batch_size=32,
|
2745 |
-
num_workers=6,
|
2746 |
-
persistent_workers=True,
|
2747 |
-
drop_last=False,
|
2748 |
-
sampler=dict(type='DefaultSampler', shuffle=False),
|
2749 |
-
dataset=dict(
|
2750 |
-
type='DeepFashion2Dataset',
|
2751 |
-
data_root='data/deepfashion2/',
|
2752 |
-
data_mode='topdown',
|
2753 |
-
ann_file='validation/deepfashion2_sling_dress.json',
|
2754 |
-
data_prefix=dict(img='validation/image/'),
|
2755 |
-
test_mode=True,
|
2756 |
-
pipeline=[
|
2757 |
-
dict(type='LoadImage', backend_args=dict(backend='local')),
|
2758 |
-
dict(type='GetBBoxCenterScale'),
|
2759 |
-
dict(type='TopdownAffine', input_size=(192, 256)),
|
2760 |
-
dict(type='PackPoseInputs')
|
2761 |
-
]))
|
2762 |
-
test_dataloader = dict(
|
2763 |
-
batch_size=32,
|
2764 |
-
num_workers=6,
|
2765 |
-
persistent_workers=True,
|
2766 |
-
drop_last=False,
|
2767 |
-
sampler=dict(type='DefaultSampler', shuffle=False),
|
2768 |
-
dataset=dict(
|
2769 |
-
type='DeepFashion2Dataset',
|
2770 |
-
data_root='data/deepfashion2/',
|
2771 |
-
data_mode='topdown',
|
2772 |
-
ann_file='validation/deepfashion2_sling_dress.json',
|
2773 |
-
data_prefix=dict(img='validation/image/'),
|
2774 |
-
test_mode=True,
|
2775 |
-
pipeline=[
|
2776 |
-
dict(type='LoadImage', backend_args=dict(backend='local')),
|
2777 |
-
dict(type='GetBBoxCenterScale'),
|
2778 |
-
dict(type='TopdownAffine', input_size=(192, 256)),
|
2779 |
-
dict(type='PackPoseInputs')
|
2780 |
-
]))
|
2781 |
-
channel_cfg = dict(
|
2782 |
-
num_output_channels=294,
|
2783 |
-
dataset_joints=294,
|
2784 |
-
dataset_channel=[[
|
2785 |
-
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
|
2786 |
-
20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
|
2787 |
-
38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
|
2788 |
-
56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
|
2789 |
-
74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
|
2790 |
-
92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
|
2791 |
-
108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
|
2792 |
-
122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
|
2793 |
-
136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
|
2794 |
-
150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
|
2795 |
-
164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
|
2796 |
-
178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
|
2797 |
-
192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205,
|
2798 |
-
206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
|
2799 |
-
220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233,
|
2800 |
-
234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
|
2801 |
-
248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261,
|
2802 |
-
262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275,
|
2803 |
-
276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289,
|
2804 |
-
290, 291, 292, 293
|
2805 |
-
]],
|
2806 |
-
inference_channel=[
|
2807 |
-
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
|
2808 |
-
20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
|
2809 |
-
38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
|
2810 |
-
56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
|
2811 |
-
74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
|
2812 |
-
92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
|
2813 |
-
108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
|
2814 |
-
122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
|
2815 |
-
136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
|
2816 |
-
150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
|
2817 |
-
164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
|
2818 |
-
178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
|
2819 |
-
192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205,
|
2820 |
-
206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
|
2821 |
-
220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233,
|
2822 |
-
234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
|
2823 |
-
248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261,
|
2824 |
-
262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275,
|
2825 |
-
276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289,
|
2826 |
-
290, 291, 292, 293
|
2827 |
-
])
|
2828 |
-
model = dict(
|
2829 |
-
type='TopdownPoseEstimator',
|
2830 |
-
data_preprocessor=dict(
|
2831 |
-
type='PoseDataPreprocessor',
|
2832 |
-
mean=[123.675, 116.28, 103.53],
|
2833 |
-
std=[58.395, 57.12, 57.375],
|
2834 |
-
bgr_to_rgb=True),
|
2835 |
-
backbone=dict(
|
2836 |
-
type='ResNet',
|
2837 |
-
depth=50,
|
2838 |
-
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
|
2839 |
-
head=dict(
|
2840 |
-
type='HeatmapHead',
|
2841 |
-
in_channels=2048,
|
2842 |
-
out_channels=294,
|
2843 |
-
loss=dict(type='KeypointMSELoss', use_target_weight=True),
|
2844 |
-
decoder=dict(
|
2845 |
-
type='MSRAHeatmap',
|
2846 |
-
input_size=(192, 256),
|
2847 |
-
heatmap_size=(48, 64),
|
2848 |
-
sigma=2)),
|
2849 |
-
test_cfg=dict(flip_test=True, flip_mode='heatmap', shift_heatmap=True))
|
2850 |
-
val_evaluator = [
|
2851 |
-
dict(type='PCKAccuracy', thr=0.2),
|
2852 |
-
dict(type='AUC'),
|
2853 |
-
dict(type='EPE')
|
2854 |
-
]
|
2855 |
-
test_evaluator = [
|
2856 |
-
dict(type='PCKAccuracy', thr=0.2),
|
2857 |
-
dict(type='AUC'),
|
2858 |
-
dict(type='EPE')
|
2859 |
-
]
|
2860 |
-
launcher = 'pytorch'
|
2861 |
-
work_dir = './work_dirs/td_hm_res50_4xb64-210e_deepfashion2_sling_dress_256x192'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT-Chat-UI/.svelte-kit/types/src/routes/search/[id]/$types.d.ts
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
import type * as Kit from '@sveltejs/kit';
|
2 |
-
|
3 |
-
type Expand<T> = T extends infer O ? { [K in keyof O]: O[K] } : never;
|
4 |
-
type RouteParams = { id: string }
|
5 |
-
type RouteId = '/search/[id]';
|
6 |
-
|
7 |
-
export type EntryGenerator = () => Promise<Array<RouteParams>> | Array<RouteParams>;
|
8 |
-
export type RequestHandler = Kit.RequestHandler<RouteParams, RouteId>;
|
9 |
-
export type RequestEvent = Kit.RequestEvent<RouteParams, RouteId>;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AkshayKumarP/AI-ChatBot/app.py
DELETED
@@ -1,47 +0,0 @@
|
|
1 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer
|
2 |
-
import gradio as gr
|
3 |
-
import torch
|
4 |
-
|
5 |
-
|
6 |
-
title = "????AI ChatBot"
|
7 |
-
description = "A State-of-the-Art Large-scale Pretrained Response generation model (DialoGPT)"
|
8 |
-
examples = [["How are you?"]]
|
9 |
-
|
10 |
-
|
11 |
-
tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-large")
|
12 |
-
model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-large")
|
13 |
-
|
14 |
-
|
15 |
-
def predict(input, history=[]):
|
16 |
-
# tokenize the new input sentence
|
17 |
-
new_user_input_ids = tokenizer.encode(
|
18 |
-
input + tokenizer.eos_token, return_tensors="pt"
|
19 |
-
)
|
20 |
-
|
21 |
-
# append the new user input tokens to the chat history
|
22 |
-
bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
|
23 |
-
|
24 |
-
# generate a response
|
25 |
-
history = model.generate(
|
26 |
-
bot_input_ids, max_length=4000, pad_token_id=tokenizer.eos_token_id
|
27 |
-
).tolist()
|
28 |
-
|
29 |
-
# convert the tokens to text, and then split the responses into lines
|
30 |
-
response = tokenizer.decode(history[0]).split("<|endoftext|>")
|
31 |
-
# print('decoded_response-->>'+str(response))
|
32 |
-
response = [
|
33 |
-
(response[i], response[i + 1]) for i in range(0, len(response) - 1, 2)
|
34 |
-
] # convert to tuples of list
|
35 |
-
# print('response-->>'+str(response))
|
36 |
-
return response, history
|
37 |
-
|
38 |
-
|
39 |
-
gr.Interface(
|
40 |
-
fn=predict,
|
41 |
-
title=title,
|
42 |
-
description=description,
|
43 |
-
examples=examples,
|
44 |
-
inputs=["text", "state"],
|
45 |
-
outputs=["chatbot", "state"],
|
46 |
-
theme="finlaymacklon/boxy_violet",
|
47 |
-
).launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnandSoni2001/StockMarketPrediction/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: StockMarketPrediction
|
3 |
-
emoji: 👀
|
4 |
-
colorFrom: green
|
5 |
-
colorTo: gray
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 1.21.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/tools/deployment/mmdet2torchserve.py
DELETED
@@ -1,107 +0,0 @@
|
|
1 |
-
from argparse import ArgumentParser, Namespace
|
2 |
-
from pathlib import Path
|
3 |
-
from tempfile import TemporaryDirectory
|
4 |
-
|
5 |
-
import mmcv
|
6 |
-
|
7 |
-
try:
|
8 |
-
from model_archiver.model_packaging import package_model
|
9 |
-
from model_archiver.model_packaging_utils import ModelExportUtils
|
10 |
-
except ImportError:
|
11 |
-
package_model = None
|
12 |
-
|
13 |
-
|
14 |
-
def mmdet2torchserve(
|
15 |
-
config_file: str,
|
16 |
-
checkpoint_file: str,
|
17 |
-
output_folder: str,
|
18 |
-
model_name: str,
|
19 |
-
model_version: str = '1.0',
|
20 |
-
force: bool = False,
|
21 |
-
):
|
22 |
-
"""Converts MMDetection model (config + checkpoint) to TorchServe `.mar`.
|
23 |
-
|
24 |
-
Args:
|
25 |
-
config_file:
|
26 |
-
In MMDetection config format.
|
27 |
-
The contents vary for each task repository.
|
28 |
-
checkpoint_file:
|
29 |
-
In MMDetection checkpoint format.
|
30 |
-
The contents vary for each task repository.
|
31 |
-
output_folder:
|
32 |
-
Folder where `{model_name}.mar` will be created.
|
33 |
-
The file created will be in TorchServe archive format.
|
34 |
-
model_name:
|
35 |
-
If not None, used for naming the `{model_name}.mar` file
|
36 |
-
that will be created under `output_folder`.
|
37 |
-
If None, `{Path(checkpoint_file).stem}` will be used.
|
38 |
-
model_version:
|
39 |
-
Model's version.
|
40 |
-
force:
|
41 |
-
If True, if there is an existing `{model_name}.mar`
|
42 |
-
file under `output_folder` it will be overwritten.
|
43 |
-
"""
|
44 |
-
config = mmcv.Config.fromfile(config_file)
|
45 |
-
|
46 |
-
with TemporaryDirectory() as tmpdir:
|
47 |
-
config.dump(f'{tmpdir}/config.py')
|
48 |
-
|
49 |
-
args = Namespace(
|
50 |
-
**{
|
51 |
-
'model_file': f'{tmpdir}/config.py',
|
52 |
-
'serialized_file': checkpoint_file,
|
53 |
-
'handler': f'{Path(__file__).parent}/mmdet_handler.py',
|
54 |
-
'model_name': model_name or Path(checkpoint_file).stem,
|
55 |
-
'version': model_version,
|
56 |
-
'export_path': output_folder,
|
57 |
-
'force': force,
|
58 |
-
'requirements_file': None,
|
59 |
-
'extra_files': None,
|
60 |
-
'runtime': 'python',
|
61 |
-
'archive_format': 'default'
|
62 |
-
})
|
63 |
-
manifest = ModelExportUtils.generate_manifest_json(args)
|
64 |
-
package_model(args, manifest)
|
65 |
-
|
66 |
-
|
67 |
-
def parse_args():
|
68 |
-
parser = ArgumentParser(
|
69 |
-
description='Convert MMDetection models to TorchServe `.mar` format.')
|
70 |
-
parser.add_argument('config', type=str, help='config file path')
|
71 |
-
parser.add_argument('checkpoint', type=str, help='checkpoint file path')
|
72 |
-
parser.add_argument(
|
73 |
-
'--output-folder',
|
74 |
-
type=str,
|
75 |
-
required=True,
|
76 |
-
help='Folder where `{model_name}.mar` will be created.')
|
77 |
-
parser.add_argument(
|
78 |
-
'--model-name',
|
79 |
-
type=str,
|
80 |
-
default=None,
|
81 |
-
help='If not None, used for naming the `{model_name}.mar`'
|
82 |
-
'file that will be created under `output_folder`.'
|
83 |
-
'If None, `{Path(checkpoint_file).stem}` will be used.')
|
84 |
-
parser.add_argument(
|
85 |
-
'--model-version',
|
86 |
-
type=str,
|
87 |
-
default='1.0',
|
88 |
-
help='Number used for versioning.')
|
89 |
-
parser.add_argument(
|
90 |
-
'-f',
|
91 |
-
'--force',
|
92 |
-
action='store_true',
|
93 |
-
help='overwrite the existing `{model_name}.mar`')
|
94 |
-
args = parser.parse_args()
|
95 |
-
|
96 |
-
return args
|
97 |
-
|
98 |
-
|
99 |
-
if __name__ == '__main__':
|
100 |
-
args = parse_args()
|
101 |
-
|
102 |
-
if package_model is None:
|
103 |
-
raise ImportError('`torch-model-archiver` is required.'
|
104 |
-
'Try: pip install torch-model-archiver')
|
105 |
-
|
106 |
-
mmdet2torchserve(args.config, args.checkpoint, args.output_folder,
|
107 |
-
args.model_name, args.model_version, args.force)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_d6_r101-d16_769x769_40k_cityscapes.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
_base_ = './fcn_d6_r50-d16_769x769_40k_cityscapes.py'
|
2 |
-
model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/hooks/ema.py
DELETED
@@ -1,89 +0,0 @@
|
|
1 |
-
# Copyright (c) OpenMMLab. All rights reserved.
|
2 |
-
from ...parallel import is_module_wrapper
|
3 |
-
from ..hooks.hook import HOOKS, Hook
|
4 |
-
|
5 |
-
|
6 |
-
@HOOKS.register_module()
|
7 |
-
class EMAHook(Hook):
|
8 |
-
r"""Exponential Moving Average Hook.
|
9 |
-
|
10 |
-
Use Exponential Moving Average on all parameters of model in training
|
11 |
-
process. All parameters have a ema backup, which update by the formula
|
12 |
-
as below. EMAHook takes priority over EvalHook and CheckpointSaverHook.
|
13 |
-
|
14 |
-
.. math::
|
15 |
-
|
16 |
-
\text{Xema\_{t+1}} = (1 - \text{momentum}) \times
|
17 |
-
\text{Xema\_{t}} + \text{momentum} \times X_t
|
18 |
-
|
19 |
-
Args:
|
20 |
-
momentum (float): The momentum used for updating ema parameter.
|
21 |
-
Defaults to 0.0002.
|
22 |
-
interval (int): Update ema parameter every interval iteration.
|
23 |
-
Defaults to 1.
|
24 |
-
warm_up (int): During first warm_up steps, we may use smaller momentum
|
25 |
-
to update ema parameters more slowly. Defaults to 100.
|
26 |
-
resume_from (str): The checkpoint path. Defaults to None.
|
27 |
-
"""
|
28 |
-
|
29 |
-
def __init__(self,
|
30 |
-
momentum=0.0002,
|
31 |
-
interval=1,
|
32 |
-
warm_up=100,
|
33 |
-
resume_from=None):
|
34 |
-
assert isinstance(interval, int) and interval > 0
|
35 |
-
self.warm_up = warm_up
|
36 |
-
self.interval = interval
|
37 |
-
assert momentum > 0 and momentum < 1
|
38 |
-
self.momentum = momentum**interval
|
39 |
-
self.checkpoint = resume_from
|
40 |
-
|
41 |
-
def before_run(self, runner):
|
42 |
-
"""To resume model with it's ema parameters more friendly.
|
43 |
-
|
44 |
-
Register ema parameter as ``named_buffer`` to model
|
45 |
-
"""
|
46 |
-
model = runner.model
|
47 |
-
if is_module_wrapper(model):
|
48 |
-
model = model.module
|
49 |
-
self.param_ema_buffer = {}
|
50 |
-
self.model_parameters = dict(model.named_parameters(recurse=True))
|
51 |
-
for name, value in self.model_parameters.items():
|
52 |
-
# "." is not allowed in module's buffer name
|
53 |
-
buffer_name = f"ema_{name.replace('.', '_')}"
|
54 |
-
self.param_ema_buffer[name] = buffer_name
|
55 |
-
model.register_buffer(buffer_name, value.data.clone())
|
56 |
-
self.model_buffers = dict(model.named_buffers(recurse=True))
|
57 |
-
if self.checkpoint is not None:
|
58 |
-
runner.resume(self.checkpoint)
|
59 |
-
|
60 |
-
def after_train_iter(self, runner):
|
61 |
-
"""Update ema parameter every self.interval iterations."""
|
62 |
-
curr_step = runner.iter
|
63 |
-
# We warm up the momentum considering the instability at beginning
|
64 |
-
momentum = min(self.momentum,
|
65 |
-
(1 + curr_step) / (self.warm_up + curr_step))
|
66 |
-
if curr_step % self.interval != 0:
|
67 |
-
return
|
68 |
-
for name, parameter in self.model_parameters.items():
|
69 |
-
buffer_name = self.param_ema_buffer[name]
|
70 |
-
buffer_parameter = self.model_buffers[buffer_name]
|
71 |
-
buffer_parameter.mul_(1 - momentum).add_(momentum, parameter.data)
|
72 |
-
|
73 |
-
def after_train_epoch(self, runner):
|
74 |
-
"""We load parameter values from ema backup to model before the
|
75 |
-
EvalHook."""
|
76 |
-
self._swap_ema_parameters()
|
77 |
-
|
78 |
-
def before_train_epoch(self, runner):
|
79 |
-
"""We recover model's parameter from ema backup after last epoch's
|
80 |
-
EvalHook."""
|
81 |
-
self._swap_ema_parameters()
|
82 |
-
|
83 |
-
def _swap_ema_parameters(self):
|
84 |
-
"""Swap the parameter of model with parameter in ema_buffer."""
|
85 |
-
for name, value in self.model_parameters.items():
|
86 |
-
temp = value.data.clone()
|
87 |
-
ema_buffer = self.model_buffers[self.param_ema_buffer[name]]
|
88 |
-
value.data.copy_(ema_buffer.data)
|
89 |
-
ema_buffer.data.copy_(temp)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anustup/NS_AI_LABS/src/download.py
DELETED
@@ -1,72 +0,0 @@
|
|
1 |
-
from tempfile import mkdtemp
|
2 |
-
from typing import List
|
3 |
-
from yt_dlp import YoutubeDL
|
4 |
-
|
5 |
-
import yt_dlp
|
6 |
-
from yt_dlp.postprocessor import PostProcessor
|
7 |
-
|
8 |
-
class FilenameCollectorPP(PostProcessor):
|
9 |
-
def __init__(self):
|
10 |
-
super(FilenameCollectorPP, self).__init__(None)
|
11 |
-
self.filenames = []
|
12 |
-
|
13 |
-
def run(self, information):
|
14 |
-
self.filenames.append(information["filepath"])
|
15 |
-
return [], information
|
16 |
-
|
17 |
-
def download_url(url: str, maxDuration: int = None, destinationDirectory: str = None, playlistItems: str = "1") -> List[str]:
|
18 |
-
try:
|
19 |
-
return _perform_download(url, maxDuration=maxDuration, outputTemplate=None, destinationDirectory=destinationDirectory, playlistItems=playlistItems)
|
20 |
-
except yt_dlp.utils.DownloadError as e:
|
21 |
-
# In case of an OS error, try again with a different output template
|
22 |
-
if e.msg and e.msg.find("[Errno 36] File name too long") >= 0:
|
23 |
-
return _perform_download(url, maxDuration=maxDuration, outputTemplate="%(title).10s %(id)s.%(ext)s")
|
24 |
-
pass
|
25 |
-
|
26 |
-
def _perform_download(url: str, maxDuration: int = None, outputTemplate: str = None, destinationDirectory: str = None, playlistItems: str = "1"):
|
27 |
-
# Create a temporary directory to store the downloaded files
|
28 |
-
if destinationDirectory is None:
|
29 |
-
destinationDirectory = mkdtemp()
|
30 |
-
|
31 |
-
ydl_opts = {
|
32 |
-
"format": "bestaudio/best",
|
33 |
-
'paths': {
|
34 |
-
'home': destinationDirectory
|
35 |
-
}
|
36 |
-
}
|
37 |
-
if (playlistItems):
|
38 |
-
ydl_opts['playlist_items'] = playlistItems
|
39 |
-
|
40 |
-
# Add output template if specified
|
41 |
-
if outputTemplate:
|
42 |
-
ydl_opts['outtmpl'] = outputTemplate
|
43 |
-
|
44 |
-
filename_collector = FilenameCollectorPP()
|
45 |
-
|
46 |
-
with YoutubeDL(ydl_opts) as ydl:
|
47 |
-
if maxDuration and maxDuration > 0:
|
48 |
-
info = ydl.extract_info(url, download=False)
|
49 |
-
duration = info['duration']
|
50 |
-
|
51 |
-
if duration >= maxDuration:
|
52 |
-
raise ExceededMaximumDuration(videoDuration=duration, maxDuration=maxDuration, message="Video is too long")
|
53 |
-
|
54 |
-
ydl.add_post_processor(filename_collector)
|
55 |
-
ydl.download([url])
|
56 |
-
|
57 |
-
if len(filename_collector.filenames) <= 0:
|
58 |
-
raise Exception("Cannot download " + url)
|
59 |
-
|
60 |
-
result = []
|
61 |
-
|
62 |
-
for filename in filename_collector.filenames:
|
63 |
-
result.append(filename)
|
64 |
-
print("Downloaded " + filename)
|
65 |
-
|
66 |
-
return result
|
67 |
-
|
68 |
-
class ExceededMaximumDuration(Exception):
|
69 |
-
def __init__(self, videoDuration, maxDuration, message):
|
70 |
-
self.videoDuration = videoDuration
|
71 |
-
self.maxDuration = maxDuration
|
72 |
-
super().__init__(message)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ArkanDash/rvc-models-new/lib/infer_pack/transforms.py
DELETED
@@ -1,209 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from torch.nn import functional as F
|
3 |
-
|
4 |
-
import numpy as np
|
5 |
-
|
6 |
-
|
7 |
-
DEFAULT_MIN_BIN_WIDTH = 1e-3
|
8 |
-
DEFAULT_MIN_BIN_HEIGHT = 1e-3
|
9 |
-
DEFAULT_MIN_DERIVATIVE = 1e-3
|
10 |
-
|
11 |
-
|
12 |
-
def piecewise_rational_quadratic_transform(
|
13 |
-
inputs,
|
14 |
-
unnormalized_widths,
|
15 |
-
unnormalized_heights,
|
16 |
-
unnormalized_derivatives,
|
17 |
-
inverse=False,
|
18 |
-
tails=None,
|
19 |
-
tail_bound=1.0,
|
20 |
-
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
21 |
-
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
22 |
-
min_derivative=DEFAULT_MIN_DERIVATIVE,
|
23 |
-
):
|
24 |
-
if tails is None:
|
25 |
-
spline_fn = rational_quadratic_spline
|
26 |
-
spline_kwargs = {}
|
27 |
-
else:
|
28 |
-
spline_fn = unconstrained_rational_quadratic_spline
|
29 |
-
spline_kwargs = {"tails": tails, "tail_bound": tail_bound}
|
30 |
-
|
31 |
-
outputs, logabsdet = spline_fn(
|
32 |
-
inputs=inputs,
|
33 |
-
unnormalized_widths=unnormalized_widths,
|
34 |
-
unnormalized_heights=unnormalized_heights,
|
35 |
-
unnormalized_derivatives=unnormalized_derivatives,
|
36 |
-
inverse=inverse,
|
37 |
-
min_bin_width=min_bin_width,
|
38 |
-
min_bin_height=min_bin_height,
|
39 |
-
min_derivative=min_derivative,
|
40 |
-
**spline_kwargs
|
41 |
-
)
|
42 |
-
return outputs, logabsdet
|
43 |
-
|
44 |
-
|
45 |
-
def searchsorted(bin_locations, inputs, eps=1e-6):
|
46 |
-
bin_locations[..., -1] += eps
|
47 |
-
return torch.sum(inputs[..., None] >= bin_locations, dim=-1) - 1
|
48 |
-
|
49 |
-
|
50 |
-
def unconstrained_rational_quadratic_spline(
|
51 |
-
inputs,
|
52 |
-
unnormalized_widths,
|
53 |
-
unnormalized_heights,
|
54 |
-
unnormalized_derivatives,
|
55 |
-
inverse=False,
|
56 |
-
tails="linear",
|
57 |
-
tail_bound=1.0,
|
58 |
-
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
59 |
-
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
60 |
-
min_derivative=DEFAULT_MIN_DERIVATIVE,
|
61 |
-
):
|
62 |
-
inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
|
63 |
-
outside_interval_mask = ~inside_interval_mask
|
64 |
-
|
65 |
-
outputs = torch.zeros_like(inputs)
|
66 |
-
logabsdet = torch.zeros_like(inputs)
|
67 |
-
|
68 |
-
if tails == "linear":
|
69 |
-
unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
|
70 |
-
constant = np.log(np.exp(1 - min_derivative) - 1)
|
71 |
-
unnormalized_derivatives[..., 0] = constant
|
72 |
-
unnormalized_derivatives[..., -1] = constant
|
73 |
-
|
74 |
-
outputs[outside_interval_mask] = inputs[outside_interval_mask]
|
75 |
-
logabsdet[outside_interval_mask] = 0
|
76 |
-
else:
|
77 |
-
raise RuntimeError("{} tails are not implemented.".format(tails))
|
78 |
-
|
79 |
-
(
|
80 |
-
outputs[inside_interval_mask],
|
81 |
-
logabsdet[inside_interval_mask],
|
82 |
-
) = rational_quadratic_spline(
|
83 |
-
inputs=inputs[inside_interval_mask],
|
84 |
-
unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
|
85 |
-
unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
|
86 |
-
unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
|
87 |
-
inverse=inverse,
|
88 |
-
left=-tail_bound,
|
89 |
-
right=tail_bound,
|
90 |
-
bottom=-tail_bound,
|
91 |
-
top=tail_bound,
|
92 |
-
min_bin_width=min_bin_width,
|
93 |
-
min_bin_height=min_bin_height,
|
94 |
-
min_derivative=min_derivative,
|
95 |
-
)
|
96 |
-
|
97 |
-
return outputs, logabsdet
|
98 |
-
|
99 |
-
|
100 |
-
def rational_quadratic_spline(
|
101 |
-
inputs,
|
102 |
-
unnormalized_widths,
|
103 |
-
unnormalized_heights,
|
104 |
-
unnormalized_derivatives,
|
105 |
-
inverse=False,
|
106 |
-
left=0.0,
|
107 |
-
right=1.0,
|
108 |
-
bottom=0.0,
|
109 |
-
top=1.0,
|
110 |
-
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
111 |
-
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
112 |
-
min_derivative=DEFAULT_MIN_DERIVATIVE,
|
113 |
-
):
|
114 |
-
if torch.min(inputs) < left or torch.max(inputs) > right:
|
115 |
-
raise ValueError("Input to a transform is not within its domain")
|
116 |
-
|
117 |
-
num_bins = unnormalized_widths.shape[-1]
|
118 |
-
|
119 |
-
if min_bin_width * num_bins > 1.0:
|
120 |
-
raise ValueError("Minimal bin width too large for the number of bins")
|
121 |
-
if min_bin_height * num_bins > 1.0:
|
122 |
-
raise ValueError("Minimal bin height too large for the number of bins")
|
123 |
-
|
124 |
-
widths = F.softmax(unnormalized_widths, dim=-1)
|
125 |
-
widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
|
126 |
-
cumwidths = torch.cumsum(widths, dim=-1)
|
127 |
-
cumwidths = F.pad(cumwidths, pad=(1, 0), mode="constant", value=0.0)
|
128 |
-
cumwidths = (right - left) * cumwidths + left
|
129 |
-
cumwidths[..., 0] = left
|
130 |
-
cumwidths[..., -1] = right
|
131 |
-
widths = cumwidths[..., 1:] - cumwidths[..., :-1]
|
132 |
-
|
133 |
-
derivatives = min_derivative + F.softplus(unnormalized_derivatives)
|
134 |
-
|
135 |
-
heights = F.softmax(unnormalized_heights, dim=-1)
|
136 |
-
heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
|
137 |
-
cumheights = torch.cumsum(heights, dim=-1)
|
138 |
-
cumheights = F.pad(cumheights, pad=(1, 0), mode="constant", value=0.0)
|
139 |
-
cumheights = (top - bottom) * cumheights + bottom
|
140 |
-
cumheights[..., 0] = bottom
|
141 |
-
cumheights[..., -1] = top
|
142 |
-
heights = cumheights[..., 1:] - cumheights[..., :-1]
|
143 |
-
|
144 |
-
if inverse:
|
145 |
-
bin_idx = searchsorted(cumheights, inputs)[..., None]
|
146 |
-
else:
|
147 |
-
bin_idx = searchsorted(cumwidths, inputs)[..., None]
|
148 |
-
|
149 |
-
input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
|
150 |
-
input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
|
151 |
-
|
152 |
-
input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
|
153 |
-
delta = heights / widths
|
154 |
-
input_delta = delta.gather(-1, bin_idx)[..., 0]
|
155 |
-
|
156 |
-
input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
|
157 |
-
input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
|
158 |
-
|
159 |
-
input_heights = heights.gather(-1, bin_idx)[..., 0]
|
160 |
-
|
161 |
-
if inverse:
|
162 |
-
a = (inputs - input_cumheights) * (
|
163 |
-
input_derivatives + input_derivatives_plus_one - 2 * input_delta
|
164 |
-
) + input_heights * (input_delta - input_derivatives)
|
165 |
-
b = input_heights * input_derivatives - (inputs - input_cumheights) * (
|
166 |
-
input_derivatives + input_derivatives_plus_one - 2 * input_delta
|
167 |
-
)
|
168 |
-
c = -input_delta * (inputs - input_cumheights)
|
169 |
-
|
170 |
-
discriminant = b.pow(2) - 4 * a * c
|
171 |
-
assert (discriminant >= 0).all()
|
172 |
-
|
173 |
-
root = (2 * c) / (-b - torch.sqrt(discriminant))
|
174 |
-
outputs = root * input_bin_widths + input_cumwidths
|
175 |
-
|
176 |
-
theta_one_minus_theta = root * (1 - root)
|
177 |
-
denominator = input_delta + (
|
178 |
-
(input_derivatives + input_derivatives_plus_one - 2 * input_delta)
|
179 |
-
* theta_one_minus_theta
|
180 |
-
)
|
181 |
-
derivative_numerator = input_delta.pow(2) * (
|
182 |
-
input_derivatives_plus_one * root.pow(2)
|
183 |
-
+ 2 * input_delta * theta_one_minus_theta
|
184 |
-
+ input_derivatives * (1 - root).pow(2)
|
185 |
-
)
|
186 |
-
logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
|
187 |
-
|
188 |
-
return outputs, -logabsdet
|
189 |
-
else:
|
190 |
-
theta = (inputs - input_cumwidths) / input_bin_widths
|
191 |
-
theta_one_minus_theta = theta * (1 - theta)
|
192 |
-
|
193 |
-
numerator = input_heights * (
|
194 |
-
input_delta * theta.pow(2) + input_derivatives * theta_one_minus_theta
|
195 |
-
)
|
196 |
-
denominator = input_delta + (
|
197 |
-
(input_derivatives + input_derivatives_plus_one - 2 * input_delta)
|
198 |
-
* theta_one_minus_theta
|
199 |
-
)
|
200 |
-
outputs = input_cumheights + numerator / denominator
|
201 |
-
|
202 |
-
derivative_numerator = input_delta.pow(2) * (
|
203 |
-
input_derivatives_plus_one * theta.pow(2)
|
204 |
-
+ 2 * input_delta * theta_one_minus_theta
|
205 |
-
+ input_derivatives * (1 - theta).pow(2)
|
206 |
-
)
|
207 |
-
logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
|
208 |
-
|
209 |
-
return outputs, logabsdet
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AutoGeneralAI/ChatGPT/README_cn.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
# ChatGPT
|
2 |
-
|
3 |
-
GUI for ChatGPT API在线体验网址 https://huggingface.co/spaces/AutoGeneralAI/ChatGPT
|
4 |
-
|
5 |
-
## 使用方法
|
6 |
-
将自己的OpenAI API KEY https://platform.openai.com/
|
7 |
-
放入key输入框,然后就可以愉快的使用ChatGPT,愉快的对话了。
|
8 |
-
|
9 |
-
本项目不是web版的ChatGPT,而是调用其官方API的GUI for ChatGPT API。
|
10 |
-
|
11 |
-

|
12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bart92/RVC_HF/infer/modules/train/extract_feature_print.py
DELETED
@@ -1,137 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import sys
|
3 |
-
import traceback
|
4 |
-
|
5 |
-
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
|
6 |
-
os.environ["PYTORCH_MPS_HIGH_WATERMARK_RATIO"] = "0.0"
|
7 |
-
|
8 |
-
device = sys.argv[1]
|
9 |
-
n_part = int(sys.argv[2])
|
10 |
-
i_part = int(sys.argv[3])
|
11 |
-
if len(sys.argv) == 6:
|
12 |
-
exp_dir = sys.argv[4]
|
13 |
-
version = sys.argv[5]
|
14 |
-
else:
|
15 |
-
i_gpu = sys.argv[4]
|
16 |
-
exp_dir = sys.argv[5]
|
17 |
-
os.environ["CUDA_VISIBLE_DEVICES"] = str(i_gpu)
|
18 |
-
version = sys.argv[6]
|
19 |
-
import fairseq
|
20 |
-
import numpy as np
|
21 |
-
import soundfile as sf
|
22 |
-
import torch
|
23 |
-
import torch.nn.functional as F
|
24 |
-
|
25 |
-
if "privateuseone" not in device:
|
26 |
-
device = "cpu"
|
27 |
-
if torch.cuda.is_available():
|
28 |
-
device = "cuda"
|
29 |
-
elif torch.backends.mps.is_available():
|
30 |
-
device = "mps"
|
31 |
-
else:
|
32 |
-
import torch_directml
|
33 |
-
|
34 |
-
device = torch_directml.device(torch_directml.default_device())
|
35 |
-
|
36 |
-
def forward_dml(ctx, x, scale):
|
37 |
-
ctx.scale = scale
|
38 |
-
res = x.clone().detach()
|
39 |
-
return res
|
40 |
-
|
41 |
-
fairseq.modules.grad_multiply.GradMultiply.forward = forward_dml
|
42 |
-
|
43 |
-
f = open("%s/extract_f0_feature.log" % exp_dir, "a+")
|
44 |
-
|
45 |
-
|
46 |
-
def printt(strr):
|
47 |
-
print(strr)
|
48 |
-
f.write("%s\n" % strr)
|
49 |
-
f.flush()
|
50 |
-
|
51 |
-
|
52 |
-
printt(sys.argv)
|
53 |
-
model_path = "assets/hubert/hubert_base.pt"
|
54 |
-
|
55 |
-
printt(exp_dir)
|
56 |
-
wavPath = "%s/1_16k_wavs" % exp_dir
|
57 |
-
outPath = (
|
58 |
-
"%s/3_feature256" % exp_dir if version == "v1" else "%s/3_feature768" % exp_dir
|
59 |
-
)
|
60 |
-
os.makedirs(outPath, exist_ok=True)
|
61 |
-
|
62 |
-
|
63 |
-
# wave must be 16k, hop_size=320
|
64 |
-
def readwave(wav_path, normalize=False):
|
65 |
-
wav, sr = sf.read(wav_path)
|
66 |
-
assert sr == 16000
|
67 |
-
feats = torch.from_numpy(wav).float()
|
68 |
-
if feats.dim() == 2: # double channels
|
69 |
-
feats = feats.mean(-1)
|
70 |
-
assert feats.dim() == 1, feats.dim()
|
71 |
-
if normalize:
|
72 |
-
with torch.no_grad():
|
73 |
-
feats = F.layer_norm(feats, feats.shape)
|
74 |
-
feats = feats.view(1, -1)
|
75 |
-
return feats
|
76 |
-
|
77 |
-
|
78 |
-
# HuBERT model
|
79 |
-
printt("load model(s) from {}".format(model_path))
|
80 |
-
# if hubert model is exist
|
81 |
-
if os.access(model_path, os.F_OK) == False:
|
82 |
-
printt(
|
83 |
-
"Error: Extracting is shut down because %s does not exist, you may download it from https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main"
|
84 |
-
% model_path
|
85 |
-
)
|
86 |
-
exit(0)
|
87 |
-
models, saved_cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task(
|
88 |
-
[model_path],
|
89 |
-
suffix="",
|
90 |
-
)
|
91 |
-
model = models[0]
|
92 |
-
model = model.to(device)
|
93 |
-
printt("move model to %s" % device)
|
94 |
-
if device not in ["mps", "cpu"]:
|
95 |
-
model = model.half()
|
96 |
-
model.eval()
|
97 |
-
|
98 |
-
todo = sorted(list(os.listdir(wavPath)))[i_part::n_part]
|
99 |
-
n = max(1, len(todo) // 10) # 最多打印十条
|
100 |
-
if len(todo) == 0:
|
101 |
-
printt("no-feature-todo")
|
102 |
-
else:
|
103 |
-
printt("all-feature-%s" % len(todo))
|
104 |
-
for idx, file in enumerate(todo):
|
105 |
-
try:
|
106 |
-
if file.endswith(".wav"):
|
107 |
-
wav_path = "%s/%s" % (wavPath, file)
|
108 |
-
out_path = "%s/%s" % (outPath, file.replace("wav", "npy"))
|
109 |
-
|
110 |
-
if os.path.exists(out_path):
|
111 |
-
continue
|
112 |
-
|
113 |
-
feats = readwave(wav_path, normalize=saved_cfg.task.normalize)
|
114 |
-
padding_mask = torch.BoolTensor(feats.shape).fill_(False)
|
115 |
-
inputs = {
|
116 |
-
"source": feats.half().to(device)
|
117 |
-
if device not in ["mps", "cpu"]
|
118 |
-
else feats.to(device),
|
119 |
-
"padding_mask": padding_mask.to(device),
|
120 |
-
"output_layer": 9 if version == "v1" else 12, # layer 9
|
121 |
-
}
|
122 |
-
with torch.no_grad():
|
123 |
-
logits = model.extract_features(**inputs)
|
124 |
-
feats = (
|
125 |
-
model.final_proj(logits[0]) if version == "v1" else logits[0]
|
126 |
-
)
|
127 |
-
|
128 |
-
feats = feats.squeeze(0).float().cpu().numpy()
|
129 |
-
if np.isnan(feats).sum() == 0:
|
130 |
-
np.save(out_path, feats, allow_pickle=False)
|
131 |
-
else:
|
132 |
-
printt("%s-contains nan" % file)
|
133 |
-
if idx % n == 0:
|
134 |
-
printt("now-%s,all-%s,%s,%s" % (len(todo), idx, file, feats.shape))
|
135 |
-
except:
|
136 |
-
printt(traceback.format_exc())
|
137 |
-
printt("all-feature-done")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Como Hacer Una Hoja De Papel.md
DELETED
@@ -1,83 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Cómo descargar en HBO Go: Una guía completa</h1>
|
3 |
-
<p>Si eres un fan de los programas y películas de HBO, es posible que quieras descargarlos en tu teléfono o tableta y verlos sin conexión. De esta manera, podrás disfrutar de tu contenido favorito sin preocuparte por la conexión a Internet o el uso de datos. Pero ¿cómo descargarlo en HBO Go? ¿Y cuáles son las diferencias entre HBO Go, HBO Max y HBO Now? En este artículo, responderemos estas preguntas y más. También te daremos algunos consejos y trucos para descargar en HBO Go.</p>
|
4 |
-
<h2>¿Qué es HBO Go y cómo funciona? </h2>
|
5 |
-
<p>HBO Go es un servicio de streaming que te permite ver toda la programación de HBO en tus dispositivos. Incluye series originales, películas, documentales, especiales y más. También puede acceder a algunos contenidos de otras propiedades de WarnerMedia, como DC, Cartoon Network y Turner Classic Movies.</p>
|
6 |
-
<h2>como hacer una hoja de papel</h2><br /><p><b><b>Download</b> --->>> <a href="https://bltlly.com/2v6Mdv">https://bltlly.com/2v6Mdv</a></b></p><br /><br />
|
7 |
-
<p>Para usar HBO Go, necesita tener una suscripción a HBO a través de su proveedor de TV. A continuación, puede iniciar sesión en la aplicación o sitio web con los detalles de su cuenta de proveedor de TV. También puede usar su cuenta para acceder a HBO Max, que es un servicio mejorado que ofrece más contenido y características. </p>
|
8 |
-
<h3>HBO Go vs HBO Max vs HBO Ahora</h3>
|
9 |
-
<p>HBO Go no es el único servicio de transmisión de HBO. También hay HBO Max y HBO Now. Aquí están las principales diferencias entre ellos:</p>
|
10 |
-
<ul>
|
11 |
-
<li>HBO Max es un servicio independiente que cuesta $15 al mes. Incluye todo el contenido de HBO, además de originales exclusivos y una selección más amplia de películas y programas de otras marcas WarnerMedia. También ofrece streaming 4K, visualización offline, perfiles, controles parentales y más. </li>
|
12 |
-
<li>HBO Now es un servicio heredado que fue renombrado como "HBO". Cuesta $15 al mes y ofrece el mismo contenido que HBO Go, pero sin requerir una suscripción a un proveedor de TV. Sin embargo, no incluye ninguno de los contenidos adicionales o características de HBO Max.</li>
|
13 |
-
|
14 |
-
</ul>
|
15 |
-
<p>En la mayoría de los casos, si tiene una suscripción a HBO a través de su proveedor de TV, puede acceder a HBO Max sin costo adicional. Puede consultar el sitio web <a href="( 1 )">HBO Max</a> para ver si su suscripción es elegible. </p>
|
16 |
-
<h3>Dispositivos compatibles con HBO Go</h3>
|
17 |
-
<p>HBO Go es compatible con múltiples dispositivos. Los dispositivos compatibles incluyen:</p>
|
18 |
-
<ul>
|
19 |
-
<li>Teléfonos móviles y tabletas (Android e iOS)</li>
|
20 |
-
<li>Apple TV</li>
|
21 |
-
<li>Google Chromecast</li>
|
22 |
-
<li>Amazon Fire TV</li>
|
23 |
-
<li>Samsung Smart TV</li>
|
24 |
-
<li>PlayStation</li>
|
25 |
-
<li>Xbox</li>
|
26 |
-
<li>Android TV</li>
|
27 |
-
<li>Roku</li>
|
28 |
-
<li>TiVo</li>
|
29 |
-
</ul>
|
30 |
-
<p>Para usar HBO Vaya a su dispositivo, necesita descargar la aplicación desde la tienda de aplicaciones o visitar <a href="( 2 )">HBOGO.com</a>. También necesitas activar tu dispositivo ingresando un código en <a href="( 12 )">HBOGo.com/Activate</a>. </p>
|
31 |
-
<h3>Planes de suscripción de HBO Go</h3>
|
32 |
-
<p>HBO Go no tiene planes de suscripción separados. Se incluye con su suscripción de HBO existente a través de su proveedor de TV. El costo de su suscripción puede variar dependiendo de su proveedor y paquete. </p>
|
33 |
-
<p></p>
|
34 |
-
<p>Si no tienes una suscripción a HBO, puedes suscribirte a HBO Max, que ofrece el mismo contenido que HBO Go, además de más. También puede obtener una prueba gratuita de HBO Max durante 7 días. </p>
|
35 |
-
<h2>Cómo descargar programas y películas en HBO Go</h2>
|
36 |
-
<p>Uno de los beneficios de HBO Go es que puede descargar programas y películas en sus dispositivos móviles y verlos sin conexión. Esto es útil cuando viaja, viaja o está en un lugar sin acceso a Internet. Estos son los pasos para descargar en HBO Go:</p>
|
37 |
-
<h3>Paso 1: Abra la aplicación HBO Go e inicie sesión</h3>
|
38 |
-
<p>El primer paso es abrir la aplicación HBO Go en su teléfono o tableta. Si no tiene la aplicación, puede descargarla desde la tienda de aplicaciones. Luego, inicie sesión con los detalles de su cuenta de proveedor de TV. Verá la pantalla de inicio con diferentes categorías y recomendaciones. </p>
|
39 |
-
<h3>Paso 2: Busca el título que quieres descargar</h3>
|
40 |
-
|
41 |
-
<p>No todos los títulos están disponibles para su descarga en HBO Go. Puede saber si un título se puede descargar buscando un icono de descarga junto a él. El icono parece una flecha hacia abajo con una línea debajo. </p>
|
42 |
-
<h3>Paso 3: Toca el icono de descarga</h3>
|
43 |
-
<p>Una vez que encuentre el título que desea descargar, toque en él para abrir su página de detalles. A continuación, toque en el icono de descarga junto al episodio o película que desea descargar. Verá una barra de progreso que muestra el estado de descarga. </p>
|
44 |
-
<p>Puedes descargar hasta 15 títulos a la vez en HBO Go. También puedes pausar o cancelar descargas pulsando en los iconos junto a ellos. </p>
|
45 |
-
<h3>Paso 4: Ir a Mis descargas para ver sus descargas</h3>
|
46 |
-
<p>Después de que tus descargas estén completas, puedes ir a Mis descargas para verlas sin conexión. Para acceder a Mis descargas, toque en el icono del menú en la esquina superior izquierda de la pantalla y, a continuación, toque en Mis descargas. Verá una lista de sus títulos descargados, ordenados por fecha de vencimiento. </p>
|
47 |
-
<p>Puedes ver tus descargas en cualquier momento, en cualquier lugar, sin conexión a Internet. Sin embargo, debe iniciar sesión en HBO Go y tener una suscripción activa. También necesitas renovar tus descargas cada 30 días conectándote a internet. </p>
|
48 |
-
<h2>Consejos y trucos para descargar en HBO Go</h2>
|
49 |
-
<p>Para aprovechar al máximo su experiencia de descarga en HBO Go, aquí hay algunos consejos y trucos que debe saber:</p>
|
50 |
-
<h3>Cómo cambiar la calidad de descarga</h3>
|
51 |
-
<p>Por defecto, HBO Go descarga títulos en calidad estándar (SD), que utiliza menos espacio de almacenamiento y datos. Sin embargo, si prefiere mayor calidad (HD), puede cambiarlo en la configuración. Para hacerlo, toca el icono del menú en la esquina superior izquierda de la pantalla, luego toca Configuración, luego Opciones de vídeo y luego Calidad de descarga. Puede elegir entre SD y HD.</p>
|
52 |
-
<p>Tenga en cuenta que las descargas HD tomarán más tiempo y usarán más espacio de almacenamiento y datos que las descargas SD. </p>
|
53 |
-
<h3>Cómo renovar o eliminar descargas</h3>
|
54 |
-
|
55 |
-
<p>Si desea eliminar sus descargas, puede hacerlo tocando en Editar en la esquina superior derecha de Mis descargas, luego seleccionando los títulos que desea eliminar, luego tocando en Eliminar en la parte inferior de la pantalla. </p>
|
56 |
-
<h3>Cómo descargar a través de una red móvil</h3>
|
57 |
-
<p>Por defecto, HBO Go solo le permite descargar a través de una red Wi-Fi, que guarda el uso de sus datos. Sin embargo, si desea descargar a través de una red móvil, puede habilitarlo en la configuración. Para hacerlo, toca el icono del menú en la esquina superior izquierda de la pantalla, luego toca Configuración, luego Opciones de vídeo y luego Descarga de redes móviles. Puedes activarlo o desactivarlo. </p>
|
58 |
-
<p>Tenga en cuenta que la descarga a través de una red móvil utilizará su plan de datos y puede incurrir en cargos adicionales de su proveedor. </p>
|
59 |
-
<h2>Conclusión</h2>
|
60 |
-
<p>HBO Go es una gran manera de ver todo el contenido de HBO en sus dispositivos. También le permite descargar programas y películas y verlas sin conexión. En este artículo, te mostramos cómo descargar en HBO Go y te dimos algunos consejos y trucos para hacerlo. </p>
|
61 |
-
<p>Esperamos que este artículo sea útil e informativo. Ahora, veamos algunas preguntas frecuentes que puede tener sobre la descarga en HBO Go.</p>
|
62 |
-
<h2>Preguntas frecuentes</h2>
|
63 |
-
<p>Aquí están algunas de las preguntas más frecuentes sobre la descarga en HBO Go:</p>
|
64 |
-
<h3>¿Cuántos títulos puedo descargar en HBO Go? </h3>
|
65 |
-
<p>Puedes descargar hasta 15 títulos a la vez en HBO Go. También puedes descargar el mismo título en hasta tres dispositivos. </p>
|
66 |
-
<h3> ¿Cuánto espacio de almacenamiento necesito para las descargas en HBO Go? </h3>
|
67 |
-
<p>El espacio de almacenamiento que necesitas para las descargas en HBO Go depende de la calidad y duración de los títulos que descargues. Generalmente, las descargas SD usan aproximadamente 0.5 GB por hora, mientras que las descargas HD usan aproximadamente 1 GB por hora. Puede comprobar el tamaño de sus descargas pulsando en el icono de información junto a ellos en Mis descargas.</p>
|
68 |
-
<h3>¿Puedo ver descargas en otros dispositivos o compartirlas con otros? </h3>
|
69 |
-
|
70 |
-
<h3>¿Puedo descargar los títulos que están saliendo de HBO Go pronto? </h3>
|
71 |
-
<p>Sí, puedes descargar títulos que están saliendo de HBO Go pronto, siempre y cuando todavía estén disponibles en el servicio. Sin embargo, no podrá verlos después de que expiren, incluso si se descargan. Verás una notificación si alguna de tus descargas expira pronto. </p>
|
72 |
-
<h3>¿Qué pasa si tengo problemas para descargar o ver descargas en HBO Go? </h3>
|
73 |
-
<p>Si tiene problemas para descargar o ver descargas en HBO Go, puede probar algunos de estos pasos de solución de problemas:</p>
|
74 |
-
<ul>
|
75 |
-
<li> Asegúrese de que tiene suficiente espacio de almacenamiento y duración de la batería en su dispositivo. </li>
|
76 |
-
<li>Asegúrese de tener una conexión a Internet estable al descargar o renovar descargas. </li>
|
77 |
-
<li>Asegúrese de tener la última versión de la aplicación HBO Go y el software de su dispositivo. </li>
|
78 |
-
<li>Reiniciar el dispositivo y la aplicación HBO Go. </li>
|
79 |
-
<li>Eliminar y reinstalar la aplicación HBO Go. </li>
|
80 |
-
<li>Póngase en contacto con el servicio de atención al cliente de HBO Go. </li>
|
81 |
-
</ul></p> 64aa2da5cf<br />
|
82 |
-
<br />
|
83 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar Clash Royale En El Ordenador.md
DELETED
@@ -1,116 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Cómo descargar Clash Royale en el ordenador</h1>
|
3 |
-
<p>¿Te encanta jugar juegos de estrategia en tu dispositivo móvil? ¿Quieres experimentar un juego de ritmo rápido y adictivo que combina la recolección de cartas, defensa de torres y batallas en tiempo real? Si respondiste que sí, entonces definitivamente deberías probar <strong>Clash Royale</strong>, uno de los juegos más populares y exitosos de Supercell, los creadores de <strong>Clash of Clans</strong>. </p>
|
4 |
-
<h2>descargar clash royale en el ordenador</h2><br /><p><b><b>DOWNLOAD</b> ✔ <a href="https://bltlly.com/2v6LXo">https://bltlly.com/2v6LXo</a></b></p><br /><br />
|
5 |
-
<p>Pero ¿qué pasa si quieres jugar Clash Royale en una pantalla más grande, con mejores gráficos, un rendimiento más suave y controles más cómodos? ¡Bueno, puedes hacer eso también! En este artículo, te mostraremos cómo descargar Clash Royale en tu computadora usando un emulador de Android y cómo jugarlo como un profesional. ¡Vamos a empezar! </p>
|
6 |
-
<h2>¿Qué es Clash Royale? </h2>
|
7 |
-
<h3>Una breve introducción al juego y sus características</h3>
|
8 |
-
<p>Clash Royale es un juego multijugador en tiempo real que cuenta con tus personajes favoritos del universo Clash. Puedes recoger y actualizar docenas de cartas con las tropas, hechizos y defensas de Clash of Clans, así como los Royales: Princes, Knights, Baby Dragons y más. También puede desbloquear nuevas tarjetas a medida que avanza a través de diferentes arenas. </p>
|
9 |
-
<p>El objetivo del juego es derribar el enemigo Rey y Princesas de sus torres usando sus cartas. También puedes defender tus propias torres de los ataques enemigos. Cada partida dura tres minutos o hasta que un jugador destruye la torre del rey enemigo. Puedes ganar trofeos, coronas, cofres, oro, gemas y cartas al ganar partidas. </p>
|
10 |
-
<p>El juego también ofrece varios modos y eventos para que usted disfrute. Puedes unirte o crear un clan con otros jugadores para compartir cartas y luchar en guerras de clanes. Puedes participar en desafíos y torneos de temporada para ganar recompensas y gloria. También puedes personalizar tus torres con skins, emotes y objetos mágicos. </p>
|
11 |
-
<h3>¿Por qué es popular y divertido jugar? </h3>
|
12 |
-
|
13 |
-
<p>El juego también requiere que pienses rápido y actúes inteligente en batallas en tiempo real contra jugadores de todo el mundo. Tienes que manejar tu elixir sabiamente, desplegar tus cartas estratégicamente, contrarrestar los movimientos de tu enemigo y adaptarte a situaciones cambiantes. ¡Nunca se sabe lo que pasará en un partido de Clash Royale! </p>
|
14 |
-
<p>Además, el juego se actualiza constantemente con nuevas características, cartas, modos, eventos y cambios de equilibrio que lo mantienen fresco y emocionante. Siempre hay algo nuevo que descubrir y dominar en Clash Royale. Si usted es un jugador casual o un jugador competitivo, encontrará algo que se adapte a su gusto y nivel de habilidad en este juego. </p>
|
15 |
-
<p></p>
|
16 |
-
<h2>Cómo descargar Clash Royale en PC</h2>
|
17 |
-
<h3>Los beneficios de jugar Clash Royale en PC</h3>
|
18 |
-
<p>Aunque Clash Royale está diseñado principalmente para dispositivos móviles, también puedes reproducirlo en tu ordenador usando un emulador de Android. Un emulador de Android es un software que le permite ejecutar aplicaciones y juegos de Android en su PC. Hay muchos beneficios de jugar Clash Royale en PC usando un emulador, como:</p>
|
19 |
-
<ul>
|
20 |
-
<li> Puedes disfrutar del juego en una pantalla más grande con mayor resolución y mejores gráficos. </li>
|
21 |
-
<li>Puedes usar el teclado y el ratón para controlar el juego con mayor facilidad y precisión. </li>
|
22 |
-
<li> Puede evitar el drenaje de la batería, el sobrecalentamiento y los problemas de retraso que pueden ocurrir en su dispositivo móvil. </li>
|
23 |
-
<li> Puede acceder al juego desde su PC en cualquier momento sin transferir su cuenta o datos. </li>
|
24 |
-
<li> Puede grabar, transmitir o capturar imágenes de su juego con facilidad. </li>
|
25 |
-
</ul>
|
26 |
-
<p>Por supuesto, todavía necesitará una conexión a Internet estable y un PC compatible para ejecutar el emulador y el juego sin problemas. Pero si los tienes, ¡entonces estás listo para descargar Clash Royale en tu computadora! </p>
|
27 |
-
<h3>El mejor emulador para usar: Bluestacks</h3>
|
28 |
-
|
29 |
-
<ul>
|
30 |
-
<li> Tiene un proceso de instalación rápido y fácil que no requiere ninguna habilidad técnica o conocimiento. </li>
|
31 |
-
<li> Tiene una interfaz fácil de usar que le permite acceder a la Google Play Store y descargar Clash Royale con solo unos pocos clics. </li>
|
32 |
-
<li> Tiene una alta compatibilidad y rendimiento que garantiza una experiencia de juego suave y sin fisuras. </li>
|
33 |
-
<li> Tiene una herramienta de asignación de teclas incorporada que le permite personalizar la configuración del teclado y el ratón para un control y comodidad óptimos. </li>
|
34 |
-
<li> Tiene una función de varias instancias que le permite ejecutar varias instancias de Clash Royale u otras aplicaciones simultáneamente. </li>
|
35 |
-
</ul>
|
36 |
-
<p>Con Bluestacks, puedes disfrutar jugando Clash Royale en PC como nunca antes. También puede consultar el sitio web oficial de Bluestacks para obtener más información y soporte. </p>
|
37 |
-
<h3>Los pasos para descargar e instalar Bluestacks y Clash Royale</h3>
|
38 |
-
<p>Ahora que sabes por qué y cómo usar Bluestacks, vamos a ver los pasos para descargarlo e instalarlo y Clash Royale en tu PC. Siga estos sencillos pasos:</p>
|
39 |
-
<ol>
|
40 |
-
<li>Ve al sitio web oficial de Bluestacks y haz clic en el botón <strong>Descargar Bluestacks</strong>. Esto comenzará a descargar el archivo de instalación en su PC.</li>
|
41 |
-
<li>Una vez completada la descarga, abra el archivo de instalación y siga las instrucciones en la pantalla para instalar Bluestacks en su PC. Esto puede tardar unos minutos dependiendo de las especificaciones de su PC. </li>
|
42 |
-
<li>Después de la instalación, inicie Bluestacks desde su escritorio o menú de inicio. Verá la pantalla de inicio de Bluestacks con varios iconos y opciones. </li>
|
43 |
-
<li>En la pantalla de inicio, busque el icono <strong>Google Play Store</strong> y haga clic en él. Esto abrirá la aplicación Google Play Store dentro de Bluestacks.</li>
|
44 |
-
<li>En la aplicación Google Play Store, busca <strong>Clash Royale</strong> usando la barra de búsqueda. Verás el icono y el nombre del juego en los resultados de búsqueda. </li>
|
45 |
-
|
46 |
-
<li>Una vez completada la instalación, verás el icono del juego en la pantalla de inicio de Bluestacks. También puedes encontrarlo en la pestaña <strong>Mis juegos</strong>. </li>
|
47 |
-
<li>Haga clic en el icono del juego para iniciar Clash Royale en su PC. Verá la pantalla de carga del juego con el logotipo y la música de Supercell. </li>
|
48 |
-
<li>Si esta es la primera vez que juegas a Clash Royale, tendrás que pasar por un tutorial que te enseña los fundamentos del juego. También puedes iniciar sesión con tu cuenta de Google o Supercell ID para sincronizar tu progreso desde tu dispositivo móvil. </li>
|
49 |
-
<li>Si ya has jugado Clash Royale antes, puedes saltarte el tutorial e iniciar sesión con tu cuenta de Google o Supercell ID para continuar donde lo dejaste. </li>
|
50 |
-
</ol>
|
51 |
-
<p>¡Felicidades! Has descargado e instalado Clash Royale en tu PC usando Bluestacks. Ahora puedes disfrutar de este increíble juego en una pantalla más grande con mejores gráficos, un rendimiento más suave y controles más cómodos. </p>
|
52 |
-
<h2>Cómo jugar Clash Royale en PC</h2>
|
53 |
-
<h3>El juego básico y los controles</h3>
|
54 |
-
<p>La jugabilidad de Clash Royale en PC es similar a la de los dispositivos móviles. Tienes que usar tus cartas para atacar las torres enemigas y defender tus propias torres. Tienes que administrar tu elixir, que se utiliza para jugar a las cartas, y elegir las cartas adecuadas para cada situación. Tienes que ganar batallas para ganar trofeos, coronas, cofres y otras recompensas. </p>
|
55 |
-
<p>Los controles de Clash Royale en PC son diferentes de los de los dispositivos móviles. Puede utilizar el teclado y el ratón para jugar el juego en lugar de tocar y deslizar en la pantalla. Aquí están los controles básicos de Clash Royale en PC usando Bluestacks:</p>
|
56 |
-
<ul>
|
57 |
-
<li>Puedes usar el <strong>mouse</strong> para seleccionar cartas, arrastrarlas y soltarlas en el campo de batalla, e interactuar con el menú del juego y la interfaz. </li>
|
58 |
-
<li> Puede utilizar las teclas de flecha <strong></strong> para mover la cámara y ver diferentes partes de la arena. </li>
|
59 |
-
|
60 |
-
<li> Puedes usar la tecla <strong>ESC</strong> para pausar el juego o salir del partido. </li>
|
61 |
-
<li> Puede usar la tecla <strong>F1</strong> para acceder al menú de ayuda y ver los atajos de teclado. </li>
|
62 |
-
</ul>
|
63 |
-
<p>También puede personalizar la configuración del teclado y el ratón utilizando la herramienta de asignación de teclas en Bluestacks. Puede asignar diferentes teclas o botones a diferentes acciones, como jugar a las cartas, usar emotes, abrir cofres, etc. También puede ajustar la sensibilidad y la velocidad de su ratón. Para acceder a la herramienta de asignación de teclas, haga clic en el icono del teclado <strong></strong> en el lado derecho de la ventana Bluestacks. </p>
|
64 |
-
<h3>Los consejos y trucos para ganar batallas y progresar más rápido</h3>
|
65 |
-
<p>Jugar Clash Royale en PC no es solo acerca de conocer el juego básico y los controles. También necesitas aprender algunos consejos y trucos para ganar batallas y progresar más rápido en el juego. Estos son algunos de ellos:</p>
|
66 |
-
<ul>
|
67 |
-
<li>Conozca las fortalezas y debilidades de cada tarjeta y cómo interactúan entre sí. Por ejemplo, debes saber qué cartas pueden contrarrestar o ser contrarrestadas por otras cartas, qué cartas pueden infligir daño por salpicadura o apuntar unidades aéreas, qué cartas pueden empujar o tirar a los enemigos, etc.</li>
|
68 |
-
<li>Construir una cubierta equilibrada y versátil que puede manejar diferentes situaciones y oponentes. Por ejemplo, tienen una mezcla de cartas de bajo costo y alto costo, tarjetas ofensivas y defensivas, tarjetas de un solo objetivo y de área de efecto, unidades terrestres y aéreas, etc.</li>
|
69 |
-
<li>Usa tu elixir sabiamente y eficientemente. No lo desperdicies en movimientos innecesarios o ineficaces. No lo dejes reposar a plena capacidad por mucho tiempo. Trata de obtener una ventaja de elixir sobre tu oponente haciendo operaciones de elixir positivas. </li>
|
70 |
-
<li>Presta atención a los movimientos y cartas de tu oponente. Trata de predecir lo que harán a continuación y cómo puedes contrarrestarlos. Trata de atraerlos a cometer errores o desperdiciar su elixir. Explota sus debilidades y castiga sus errores. </li>
|
71 |
-
|
72 |
-
<li>Sé agresivo pero no temerario. No tengas miedo de atacar cuando tengas una oportunidad o una ventaja. No sea demasiado pasivo o defensivo cuando esté detrás o bajo presión. Pero tampoco sea demasiado codicioso o descuidado. Sepa cuándo retirarse o defenderse cuando sea necesario. </li>
|
73 |
-
<li>Sé flexible y adaptable. No te apegues a una estrategia o plan todo el tiempo. No tengas miedo de cambiar tus tácticas o cartas según la situación o el oponente. Experimenta con diferentes combinaciones y sinergias. Aprende de tus ganancias y pérdidas. </li>
|
74 |
-
</ul>
|
75 |
-
<h3>Las mejores barajas y estrategias para diferentes arenas y modos</h3>
|
76 |
-
<p>El juego también ofrece varias arenas y modos para que usted juegue en. Cada arena tiene un tema diferente, fondo, música, y tarjetas desbloqueables. Cada modo tiene una regla diferente, objetivo, recompensa y desafío. Necesitas usar diferentes barajas y estrategias para diferentes arenas y modos para tener éxito en ellas. </p>
|
77 |
-
<p>Aquí hay algunos ejemplos de las mejores barajas y estrategias para algunas de las arenas y modos en Clash Royale:</p>
|
78 |
-
<tabla>
|
79 |
-
<tr><th>Arena/Mode</th><th>Deck</th><th>Estrategia</th></tr>
|
80 |
-
<tr><td>Estadio Duende (Arena 1)</td><td>Gigante, Mosquetero, Mini P.E.K.K.A., Bola de fuego, Flechas, Caballero, Arqueros, Duendes</td><td>Esta es una baraja simple pero efectiva que puede ayudarte a ganar tus primeras batallas en Clash Royale. La idea principal es utilizar el Gigante como un tanque para proteger a sus otras unidades detrás de él, como el Mosquetero o el Mini P.E.K.K.A., que puede hacer mucho daño a las torres o unidades enemigas. Usa tus hechizos, como las filas de Bola de Fuego o Ar, para despejar el camino para tu Gigante o para acabar con torres o unidades enemigas de baja salud. Usa tus unidades baratas, como Knight, Archers o Goblins, para pedalear tus cartas más rápido o para defenderte de los ataques enemigos. </td></tr>
|
81 |
-
|
82 |
-
<tr><td>Legendary Arena (Arena 13)</td><td>Golem, Night Witch, Lumberjack, Baby Dragon, Tornado, Lightning, Mega Minion, Zap</td><td>Esta es una baraja poderosa y popular que puede ayudarte a llegar a la Arena Legendaria y más allá. La idea principal es usar el Golem como un tanque masivo que puede absorber mucho daño y explotar en los Golemitas cuando se destruye. Usa a la Bruja Nocturna y al Leñador como tus principales unidades de apoyo detrás del Golem, que pueden engendrar Murciélagos y soltar Rabia cuando te maten. Usa el Dragón Bebé y el Minion Mega como tus unidades de apoyo secundarias que pueden infligir daño aéreo y terrestre. Usa el hechizo Tornado para reunir a las unidades enemigas y activar tu Torre Rey o para crear sinergias con el daño por salpicadura de tu Dragón Bebé. Usa el hechizo Rayo para aturdir y dañar las unidades o torres enemigas. Usa el hechizo Zap para restablecer las unidades enemigas o para acabar con los enemigos de baja salud. </td></tr>
|
83 |
-
</tabla>
|
84 |
-
<h2>Conclusión</h2>
|
85 |
-
<p>Clash Royale es un increíble juego que puedes disfrutar en tu dispositivo móvil o en tu PC usando un emulador de Android. Siguiendo esta guía, puedes aprender cómo descargar Clash Royale en tu computadora usando Bluestacks y cómo jugarlo como un profesional. También puedes aprender algunas de las mejores barajas y estrategias para diferentes escenarios y modos en Clash Royale.</p>
|
86 |
-
<p>Entonces, ¿qué estás esperando? Descarga Clash Royale en tu PC hoy y únete a millones de jugadores en este épico juego de estrategia, habilidad, suerte y diversión. ¡No te arrepentirás! </p>
|
87 |
-
<h2>Preguntas frecuentes</h2>
|
88 |
-
<h3>Q1: ¿Clash Royale es libre de jugar? </h3>
|
89 |
-
<p>A1: Sí, Clash Royale es gratis para jugar y descargar en dispositivos móviles y PC. Sin embargo, también ofrece compras en la aplicación que pueden mejorar su experiencia de juego. Puedes comprar gemas con dinero real, que se pueden usar para comprar cofres, oro, tarjetas, pieles, emotes, objetos mágicos, etc. También puedes comprar una suscripción Pass Royale que te da acceso a recompensas y beneficios exclusivos. </p>
|
90 |
-
<h3>Q2: ¿Puedo jugar Clash Royale con mis amigos? </h3>
|
91 |
-
|
92 |
-
<h3>Q3: ¿Cómo puedo obtener más cartas y gemas en Clash Royale? </h3>
|
93 |
-
<p>A3: Hay muchas maneras de obtener más cartas y gemas en Clash Royale sin gastar dinero real. Puedes obtener cartas abriendo los cofres que ganes ganando batallas o completando misiones. También puedes conseguir cartas solicitándoselas a tus compañeros de clan o comprándolas en la tienda con oro. Puedes obtener gemas completando logros o eventos que recompensen gemas. También puedes obtener gemas abriendo cofres libres o cofres que contengan gemas. </p>
|
94 |
-
<h3>Q4: ¿Cuáles son los requisitos del sistema para jugar Clash Royale en PC? </h3>
|
95 |
-
<p>A4: Los requisitos del sistema para jugar Clash Royale en PC usando Bluestacks son los siguientes:</p>
|
96 |
-
<ul>
|
97 |
-
<li>Sistema operativo: Windows 7 o superior</li>
|
98 |
-
<li>Procesador: Procesador Intel o AMD</li>
|
99 |
-
<li>RAM: Al menos 2GB de RAM</li>
|
100 |
-
<li>HDD: 5GB de espacio libre en disco</li>
|
101 |
-
<li>Internet: Conexión a Internet de banda ancha</li>
|
102 |
-
</ul>
|
103 |
-
<p>Si su PC cumple con estos requisitos, usted debe ser capaz de jugar Clash Royale en el PC sin ningún problema. Sin embargo, si encuentra algún problema o error, puede consultar el sitio web oficial de Bluestacks para la solución de problemas y soporte. </p>
|
104 |
-
<h3>Q5: ¿Dónde puedo encontrar más información y soporte para Clash Royale? </h3>
|
105 |
-
<p>A5: Si desea encontrar más información y soporte para Clash Royale, puede visitar las siguientes fuentes:</p>
|
106 |
-
<ul>
|
107 |
-
<li>El sitio web oficial de Clash Royale, donde puedes encontrar las últimas noticias, actualizaciones, eventos y medios sobre el juego. </li>
|
108 |
-
<li>El blog oficial de Clash Royale, donde puedes encontrar publicaciones detalladas sobre las características del juego, cambios de equilibrio, consejos y trucos, y más. </li>
|
109 |
-
<li>El canal oficial de YouTube de Clash Royale, donde puedes ver videos de gameplay, tutoriales, destacados y más. </li>
|
110 |
-
<li>La cuenta oficial de Twitter de Clash Royale, donde puedes seguir los últimos tweets y anuncios sobre el juego. </li>
|
111 |
-
|
112 |
-
<li>La comunidad oficial de Reddit de Clash Royale, donde puedes unir discusiones e hilos con otros jugadores y fans del juego. </li>
|
113 |
-
<li>El servidor oficial de Discord de Clash Royale, donde puedes chatear y chatear por voz con otros jugadores y fans del juego. </li>
|
114 |
-
</ul></p> 64aa2da5cf<br />
|
115 |
-
<br />
|
116 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BernardoOlisan/vqganclip/taming-transformers/taming/data/imagenet.py
DELETED
@@ -1,558 +0,0 @@
|
|
1 |
-
import os, tarfile, glob, shutil
|
2 |
-
import yaml
|
3 |
-
import numpy as np
|
4 |
-
from tqdm import tqdm
|
5 |
-
from PIL import Image
|
6 |
-
import albumentations
|
7 |
-
from omegaconf import OmegaConf
|
8 |
-
from torch.utils.data import Dataset
|
9 |
-
|
10 |
-
from taming.data.base import ImagePaths
|
11 |
-
from taming.util import download, retrieve
|
12 |
-
import taming.data.utils as bdu
|
13 |
-
|
14 |
-
|
15 |
-
def give_synsets_from_indices(indices, path_to_yaml="data/imagenet_idx_to_synset.yaml"):
|
16 |
-
synsets = []
|
17 |
-
with open(path_to_yaml) as f:
|
18 |
-
di2s = yaml.load(f)
|
19 |
-
for idx in indices:
|
20 |
-
synsets.append(str(di2s[idx]))
|
21 |
-
print("Using {} different synsets for construction of Restriced Imagenet.".format(len(synsets)))
|
22 |
-
return synsets
|
23 |
-
|
24 |
-
|
25 |
-
def str_to_indices(string):
|
26 |
-
"""Expects a string in the format '32-123, 256, 280-321'"""
|
27 |
-
assert not string.endswith(","), "provided string '{}' ends with a comma, pls remove it".format(string)
|
28 |
-
subs = string.split(",")
|
29 |
-
indices = []
|
30 |
-
for sub in subs:
|
31 |
-
subsubs = sub.split("-")
|
32 |
-
assert len(subsubs) > 0
|
33 |
-
if len(subsubs) == 1:
|
34 |
-
indices.append(int(subsubs[0]))
|
35 |
-
else:
|
36 |
-
rang = [j for j in range(int(subsubs[0]), int(subsubs[1]))]
|
37 |
-
indices.extend(rang)
|
38 |
-
return sorted(indices)
|
39 |
-
|
40 |
-
|
41 |
-
class ImageNetBase(Dataset):
|
42 |
-
def __init__(self, config=None):
|
43 |
-
self.config = config or OmegaConf.create()
|
44 |
-
if not type(self.config)==dict:
|
45 |
-
self.config = OmegaConf.to_container(self.config)
|
46 |
-
self._prepare()
|
47 |
-
self._prepare_synset_to_human()
|
48 |
-
self._prepare_idx_to_synset()
|
49 |
-
self._load()
|
50 |
-
|
51 |
-
def __len__(self):
|
52 |
-
return len(self.data)
|
53 |
-
|
54 |
-
def __getitem__(self, i):
|
55 |
-
return self.data[i]
|
56 |
-
|
57 |
-
def _prepare(self):
|
58 |
-
raise NotImplementedError()
|
59 |
-
|
60 |
-
def _filter_relpaths(self, relpaths):
|
61 |
-
ignore = set([
|
62 |
-
"n06596364_9591.JPEG",
|
63 |
-
])
|
64 |
-
relpaths = [rpath for rpath in relpaths if not rpath.split("/")[-1] in ignore]
|
65 |
-
if "sub_indices" in self.config:
|
66 |
-
indices = str_to_indices(self.config["sub_indices"])
|
67 |
-
synsets = give_synsets_from_indices(indices, path_to_yaml=self.idx2syn) # returns a list of strings
|
68 |
-
files = []
|
69 |
-
for rpath in relpaths:
|
70 |
-
syn = rpath.split("/")[0]
|
71 |
-
if syn in synsets:
|
72 |
-
files.append(rpath)
|
73 |
-
return files
|
74 |
-
else:
|
75 |
-
return relpaths
|
76 |
-
|
77 |
-
def _prepare_synset_to_human(self):
|
78 |
-
SIZE = 2655750
|
79 |
-
URL = "https://heibox.uni-heidelberg.de/f/9f28e956cd304264bb82/?dl=1"
|
80 |
-
self.human_dict = os.path.join(self.root, "synset_human.txt")
|
81 |
-
if (not os.path.exists(self.human_dict) or
|
82 |
-
not os.path.getsize(self.human_dict)==SIZE):
|
83 |
-
download(URL, self.human_dict)
|
84 |
-
|
85 |
-
def _prepare_idx_to_synset(self):
|
86 |
-
URL = "https://heibox.uni-heidelberg.de/f/d835d5b6ceda4d3aa910/?dl=1"
|
87 |
-
self.idx2syn = os.path.join(self.root, "index_synset.yaml")
|
88 |
-
if (not os.path.exists(self.idx2syn)):
|
89 |
-
download(URL, self.idx2syn)
|
90 |
-
|
91 |
-
def _load(self):
|
92 |
-
with open(self.txt_filelist, "r") as f:
|
93 |
-
self.relpaths = f.read().splitlines()
|
94 |
-
l1 = len(self.relpaths)
|
95 |
-
self.relpaths = self._filter_relpaths(self.relpaths)
|
96 |
-
print("Removed {} files from filelist during filtering.".format(l1 - len(self.relpaths)))
|
97 |
-
|
98 |
-
self.synsets = [p.split("/")[0] for p in self.relpaths]
|
99 |
-
self.abspaths = [os.path.join(self.datadir, p) for p in self.relpaths]
|
100 |
-
|
101 |
-
unique_synsets = np.unique(self.synsets)
|
102 |
-
class_dict = dict((synset, i) for i, synset in enumerate(unique_synsets))
|
103 |
-
self.class_labels = [class_dict[s] for s in self.synsets]
|
104 |
-
|
105 |
-
with open(self.human_dict, "r") as f:
|
106 |
-
human_dict = f.read().splitlines()
|
107 |
-
human_dict = dict(line.split(maxsplit=1) for line in human_dict)
|
108 |
-
|
109 |
-
self.human_labels = [human_dict[s] for s in self.synsets]
|
110 |
-
|
111 |
-
labels = {
|
112 |
-
"relpath": np.array(self.relpaths),
|
113 |
-
"synsets": np.array(self.synsets),
|
114 |
-
"class_label": np.array(self.class_labels),
|
115 |
-
"human_label": np.array(self.human_labels),
|
116 |
-
}
|
117 |
-
self.data = ImagePaths(self.abspaths,
|
118 |
-
labels=labels,
|
119 |
-
size=retrieve(self.config, "size", default=0),
|
120 |
-
random_crop=self.random_crop)
|
121 |
-
|
122 |
-
|
123 |
-
class ImageNetTrain(ImageNetBase):
|
124 |
-
NAME = "ILSVRC2012_train"
|
125 |
-
URL = "http://www.image-net.org/challenges/LSVRC/2012/"
|
126 |
-
AT_HASH = "a306397ccf9c2ead27155983c254227c0fd938e2"
|
127 |
-
FILES = [
|
128 |
-
"ILSVRC2012_img_train.tar",
|
129 |
-
]
|
130 |
-
SIZES = [
|
131 |
-
147897477120,
|
132 |
-
]
|
133 |
-
|
134 |
-
def _prepare(self):
|
135 |
-
self.random_crop = retrieve(self.config, "ImageNetTrain/random_crop",
|
136 |
-
default=True)
|
137 |
-
cachedir = os.environ.get("XDG_CACHE_HOME", os.path.expanduser("~/.cache"))
|
138 |
-
self.root = os.path.join(cachedir, "autoencoders/data", self.NAME)
|
139 |
-
self.datadir = os.path.join(self.root, "data")
|
140 |
-
self.txt_filelist = os.path.join(self.root, "filelist.txt")
|
141 |
-
self.expected_length = 1281167
|
142 |
-
if not bdu.is_prepared(self.root):
|
143 |
-
# prep
|
144 |
-
print("Preparing dataset {} in {}".format(self.NAME, self.root))
|
145 |
-
|
146 |
-
datadir = self.datadir
|
147 |
-
if not os.path.exists(datadir):
|
148 |
-
path = os.path.join(self.root, self.FILES[0])
|
149 |
-
if not os.path.exists(path) or not os.path.getsize(path)==self.SIZES[0]:
|
150 |
-
import academictorrents as at
|
151 |
-
atpath = at.get(self.AT_HASH, datastore=self.root)
|
152 |
-
assert atpath == path
|
153 |
-
|
154 |
-
print("Extracting {} to {}".format(path, datadir))
|
155 |
-
os.makedirs(datadir, exist_ok=True)
|
156 |
-
with tarfile.open(path, "r:") as tar:
|
157 |
-
tar.extractall(path=datadir)
|
158 |
-
|
159 |
-
print("Extracting sub-tars.")
|
160 |
-
subpaths = sorted(glob.glob(os.path.join(datadir, "*.tar")))
|
161 |
-
for subpath in tqdm(subpaths):
|
162 |
-
subdir = subpath[:-len(".tar")]
|
163 |
-
os.makedirs(subdir, exist_ok=True)
|
164 |
-
with tarfile.open(subpath, "r:") as tar:
|
165 |
-
tar.extractall(path=subdir)
|
166 |
-
|
167 |
-
|
168 |
-
filelist = glob.glob(os.path.join(datadir, "**", "*.JPEG"))
|
169 |
-
filelist = [os.path.relpath(p, start=datadir) for p in filelist]
|
170 |
-
filelist = sorted(filelist)
|
171 |
-
filelist = "\n".join(filelist)+"\n"
|
172 |
-
with open(self.txt_filelist, "w") as f:
|
173 |
-
f.write(filelist)
|
174 |
-
|
175 |
-
bdu.mark_prepared(self.root)
|
176 |
-
|
177 |
-
|
178 |
-
class ImageNetValidation(ImageNetBase):
|
179 |
-
NAME = "ILSVRC2012_validation"
|
180 |
-
URL = "http://www.image-net.org/challenges/LSVRC/2012/"
|
181 |
-
AT_HASH = "5d6d0df7ed81efd49ca99ea4737e0ae5e3a5f2e5"
|
182 |
-
VS_URL = "https://heibox.uni-heidelberg.de/f/3e0f6e9c624e45f2bd73/?dl=1"
|
183 |
-
FILES = [
|
184 |
-
"ILSVRC2012_img_val.tar",
|
185 |
-
"validation_synset.txt",
|
186 |
-
]
|
187 |
-
SIZES = [
|
188 |
-
6744924160,
|
189 |
-
1950000,
|
190 |
-
]
|
191 |
-
|
192 |
-
def _prepare(self):
|
193 |
-
self.random_crop = retrieve(self.config, "ImageNetValidation/random_crop",
|
194 |
-
default=False)
|
195 |
-
cachedir = os.environ.get("XDG_CACHE_HOME", os.path.expanduser("~/.cache"))
|
196 |
-
self.root = os.path.join(cachedir, "autoencoders/data", self.NAME)
|
197 |
-
self.datadir = os.path.join(self.root, "data")
|
198 |
-
self.txt_filelist = os.path.join(self.root, "filelist.txt")
|
199 |
-
self.expected_length = 50000
|
200 |
-
if not bdu.is_prepared(self.root):
|
201 |
-
# prep
|
202 |
-
print("Preparing dataset {} in {}".format(self.NAME, self.root))
|
203 |
-
|
204 |
-
datadir = self.datadir
|
205 |
-
if not os.path.exists(datadir):
|
206 |
-
path = os.path.join(self.root, self.FILES[0])
|
207 |
-
if not os.path.exists(path) or not os.path.getsize(path)==self.SIZES[0]:
|
208 |
-
import academictorrents as at
|
209 |
-
atpath = at.get(self.AT_HASH, datastore=self.root)
|
210 |
-
assert atpath == path
|
211 |
-
|
212 |
-
print("Extracting {} to {}".format(path, datadir))
|
213 |
-
os.makedirs(datadir, exist_ok=True)
|
214 |
-
with tarfile.open(path, "r:") as tar:
|
215 |
-
tar.extractall(path=datadir)
|
216 |
-
|
217 |
-
vspath = os.path.join(self.root, self.FILES[1])
|
218 |
-
if not os.path.exists(vspath) or not os.path.getsize(vspath)==self.SIZES[1]:
|
219 |
-
download(self.VS_URL, vspath)
|
220 |
-
|
221 |
-
with open(vspath, "r") as f:
|
222 |
-
synset_dict = f.read().splitlines()
|
223 |
-
synset_dict = dict(line.split() for line in synset_dict)
|
224 |
-
|
225 |
-
print("Reorganizing into synset folders")
|
226 |
-
synsets = np.unique(list(synset_dict.values()))
|
227 |
-
for s in synsets:
|
228 |
-
os.makedirs(os.path.join(datadir, s), exist_ok=True)
|
229 |
-
for k, v in synset_dict.items():
|
230 |
-
src = os.path.join(datadir, k)
|
231 |
-
dst = os.path.join(datadir, v)
|
232 |
-
shutil.move(src, dst)
|
233 |
-
|
234 |
-
filelist = glob.glob(os.path.join(datadir, "**", "*.JPEG"))
|
235 |
-
filelist = [os.path.relpath(p, start=datadir) for p in filelist]
|
236 |
-
filelist = sorted(filelist)
|
237 |
-
filelist = "\n".join(filelist)+"\n"
|
238 |
-
with open(self.txt_filelist, "w") as f:
|
239 |
-
f.write(filelist)
|
240 |
-
|
241 |
-
bdu.mark_prepared(self.root)
|
242 |
-
|
243 |
-
|
244 |
-
def get_preprocessor(size=None, random_crop=False, additional_targets=None,
|
245 |
-
crop_size=None):
|
246 |
-
if size is not None and size > 0:
|
247 |
-
transforms = list()
|
248 |
-
rescaler = albumentations.SmallestMaxSize(max_size = size)
|
249 |
-
transforms.append(rescaler)
|
250 |
-
if not random_crop:
|
251 |
-
cropper = albumentations.CenterCrop(height=size,width=size)
|
252 |
-
transforms.append(cropper)
|
253 |
-
else:
|
254 |
-
cropper = albumentations.RandomCrop(height=size,width=size)
|
255 |
-
transforms.append(cropper)
|
256 |
-
flipper = albumentations.HorizontalFlip()
|
257 |
-
transforms.append(flipper)
|
258 |
-
preprocessor = albumentations.Compose(transforms,
|
259 |
-
additional_targets=additional_targets)
|
260 |
-
elif crop_size is not None and crop_size > 0:
|
261 |
-
if not random_crop:
|
262 |
-
cropper = albumentations.CenterCrop(height=crop_size,width=crop_size)
|
263 |
-
else:
|
264 |
-
cropper = albumentations.RandomCrop(height=crop_size,width=crop_size)
|
265 |
-
transforms = [cropper]
|
266 |
-
preprocessor = albumentations.Compose(transforms,
|
267 |
-
additional_targets=additional_targets)
|
268 |
-
else:
|
269 |
-
preprocessor = lambda **kwargs: kwargs
|
270 |
-
return preprocessor
|
271 |
-
|
272 |
-
|
273 |
-
def rgba_to_depth(x):
|
274 |
-
assert x.dtype == np.uint8
|
275 |
-
assert len(x.shape) == 3 and x.shape[2] == 4
|
276 |
-
y = x.copy()
|
277 |
-
y.dtype = np.float32
|
278 |
-
y = y.reshape(x.shape[:2])
|
279 |
-
return np.ascontiguousarray(y)
|
280 |
-
|
281 |
-
|
282 |
-
class BaseWithDepth(Dataset):
|
283 |
-
DEFAULT_DEPTH_ROOT="data/imagenet_depth"
|
284 |
-
|
285 |
-
def __init__(self, config=None, size=None, random_crop=False,
|
286 |
-
crop_size=None, root=None):
|
287 |
-
self.config = config
|
288 |
-
self.base_dset = self.get_base_dset()
|
289 |
-
self.preprocessor = get_preprocessor(
|
290 |
-
size=size,
|
291 |
-
crop_size=crop_size,
|
292 |
-
random_crop=random_crop,
|
293 |
-
additional_targets={"depth": "image"})
|
294 |
-
self.crop_size = crop_size
|
295 |
-
if self.crop_size is not None:
|
296 |
-
self.rescaler = albumentations.Compose(
|
297 |
-
[albumentations.SmallestMaxSize(max_size = self.crop_size)],
|
298 |
-
additional_targets={"depth": "image"})
|
299 |
-
if root is not None:
|
300 |
-
self.DEFAULT_DEPTH_ROOT = root
|
301 |
-
|
302 |
-
def __len__(self):
|
303 |
-
return len(self.base_dset)
|
304 |
-
|
305 |
-
def preprocess_depth(self, path):
|
306 |
-
rgba = np.array(Image.open(path))
|
307 |
-
depth = rgba_to_depth(rgba)
|
308 |
-
depth = (depth - depth.min())/max(1e-8, depth.max()-depth.min())
|
309 |
-
depth = 2.0*depth-1.0
|
310 |
-
return depth
|
311 |
-
|
312 |
-
def __getitem__(self, i):
|
313 |
-
e = self.base_dset[i]
|
314 |
-
e["depth"] = self.preprocess_depth(self.get_depth_path(e))
|
315 |
-
# up if necessary
|
316 |
-
h,w,c = e["image"].shape
|
317 |
-
if self.crop_size and min(h,w) < self.crop_size:
|
318 |
-
# have to upscale to be able to crop - this just uses bilinear
|
319 |
-
out = self.rescaler(image=e["image"], depth=e["depth"])
|
320 |
-
e["image"] = out["image"]
|
321 |
-
e["depth"] = out["depth"]
|
322 |
-
transformed = self.preprocessor(image=e["image"], depth=e["depth"])
|
323 |
-
e["image"] = transformed["image"]
|
324 |
-
e["depth"] = transformed["depth"]
|
325 |
-
return e
|
326 |
-
|
327 |
-
|
328 |
-
class ImageNetTrainWithDepth(BaseWithDepth):
|
329 |
-
# default to random_crop=True
|
330 |
-
def __init__(self, random_crop=True, sub_indices=None, **kwargs):
|
331 |
-
self.sub_indices = sub_indices
|
332 |
-
super().__init__(random_crop=random_crop, **kwargs)
|
333 |
-
|
334 |
-
def get_base_dset(self):
|
335 |
-
if self.sub_indices is None:
|
336 |
-
return ImageNetTrain()
|
337 |
-
else:
|
338 |
-
return ImageNetTrain({"sub_indices": self.sub_indices})
|
339 |
-
|
340 |
-
def get_depth_path(self, e):
|
341 |
-
fid = os.path.splitext(e["relpath"])[0]+".png"
|
342 |
-
fid = os.path.join(self.DEFAULT_DEPTH_ROOT, "train", fid)
|
343 |
-
return fid
|
344 |
-
|
345 |
-
|
346 |
-
class ImageNetValidationWithDepth(BaseWithDepth):
|
347 |
-
def __init__(self, sub_indices=None, **kwargs):
|
348 |
-
self.sub_indices = sub_indices
|
349 |
-
super().__init__(**kwargs)
|
350 |
-
|
351 |
-
def get_base_dset(self):
|
352 |
-
if self.sub_indices is None:
|
353 |
-
return ImageNetValidation()
|
354 |
-
else:
|
355 |
-
return ImageNetValidation({"sub_indices": self.sub_indices})
|
356 |
-
|
357 |
-
def get_depth_path(self, e):
|
358 |
-
fid = os.path.splitext(e["relpath"])[0]+".png"
|
359 |
-
fid = os.path.join(self.DEFAULT_DEPTH_ROOT, "val", fid)
|
360 |
-
return fid
|
361 |
-
|
362 |
-
|
363 |
-
class RINTrainWithDepth(ImageNetTrainWithDepth):
|
364 |
-
def __init__(self, config=None, size=None, random_crop=True, crop_size=None):
|
365 |
-
sub_indices = "30-32, 33-37, 151-268, 281-285, 80-100, 365-382, 389-397, 118-121, 300-319"
|
366 |
-
super().__init__(config=config, size=size, random_crop=random_crop,
|
367 |
-
sub_indices=sub_indices, crop_size=crop_size)
|
368 |
-
|
369 |
-
|
370 |
-
class RINValidationWithDepth(ImageNetValidationWithDepth):
|
371 |
-
def __init__(self, config=None, size=None, random_crop=False, crop_size=None):
|
372 |
-
sub_indices = "30-32, 33-37, 151-268, 281-285, 80-100, 365-382, 389-397, 118-121, 300-319"
|
373 |
-
super().__init__(config=config, size=size, random_crop=random_crop,
|
374 |
-
sub_indices=sub_indices, crop_size=crop_size)
|
375 |
-
|
376 |
-
|
377 |
-
class DRINExamples(Dataset):
|
378 |
-
def __init__(self):
|
379 |
-
self.preprocessor = get_preprocessor(size=256, additional_targets={"depth": "image"})
|
380 |
-
with open("data/drin_examples.txt", "r") as f:
|
381 |
-
relpaths = f.read().splitlines()
|
382 |
-
self.image_paths = [os.path.join("data/drin_images",
|
383 |
-
relpath) for relpath in relpaths]
|
384 |
-
self.depth_paths = [os.path.join("data/drin_depth",
|
385 |
-
relpath.replace(".JPEG", ".png")) for relpath in relpaths]
|
386 |
-
|
387 |
-
def __len__(self):
|
388 |
-
return len(self.image_paths)
|
389 |
-
|
390 |
-
def preprocess_image(self, image_path):
|
391 |
-
image = Image.open(image_path)
|
392 |
-
if not image.mode == "RGB":
|
393 |
-
image = image.convert("RGB")
|
394 |
-
image = np.array(image).astype(np.uint8)
|
395 |
-
image = self.preprocessor(image=image)["image"]
|
396 |
-
image = (image/127.5 - 1.0).astype(np.float32)
|
397 |
-
return image
|
398 |
-
|
399 |
-
def preprocess_depth(self, path):
|
400 |
-
rgba = np.array(Image.open(path))
|
401 |
-
depth = rgba_to_depth(rgba)
|
402 |
-
depth = (depth - depth.min())/max(1e-8, depth.max()-depth.min())
|
403 |
-
depth = 2.0*depth-1.0
|
404 |
-
return depth
|
405 |
-
|
406 |
-
def __getitem__(self, i):
|
407 |
-
e = dict()
|
408 |
-
e["image"] = self.preprocess_image(self.image_paths[i])
|
409 |
-
e["depth"] = self.preprocess_depth(self.depth_paths[i])
|
410 |
-
transformed = self.preprocessor(image=e["image"], depth=e["depth"])
|
411 |
-
e["image"] = transformed["image"]
|
412 |
-
e["depth"] = transformed["depth"]
|
413 |
-
return e
|
414 |
-
|
415 |
-
|
416 |
-
def imscale(x, factor, keepshapes=False, keepmode="bicubic"):
|
417 |
-
if factor is None or factor==1:
|
418 |
-
return x
|
419 |
-
|
420 |
-
dtype = x.dtype
|
421 |
-
assert dtype in [np.float32, np.float64]
|
422 |
-
assert x.min() >= -1
|
423 |
-
assert x.max() <= 1
|
424 |
-
|
425 |
-
keepmode = {"nearest": Image.NEAREST, "bilinear": Image.BILINEAR,
|
426 |
-
"bicubic": Image.BICUBIC}[keepmode]
|
427 |
-
|
428 |
-
lr = (x+1.0)*127.5
|
429 |
-
lr = lr.clip(0,255).astype(np.uint8)
|
430 |
-
lr = Image.fromarray(lr)
|
431 |
-
|
432 |
-
h, w, _ = x.shape
|
433 |
-
nh = h//factor
|
434 |
-
nw = w//factor
|
435 |
-
assert nh > 0 and nw > 0, (nh, nw)
|
436 |
-
|
437 |
-
lr = lr.resize((nw,nh), Image.BICUBIC)
|
438 |
-
if keepshapes:
|
439 |
-
lr = lr.resize((w,h), keepmode)
|
440 |
-
lr = np.array(lr)/127.5-1.0
|
441 |
-
lr = lr.astype(dtype)
|
442 |
-
|
443 |
-
return lr
|
444 |
-
|
445 |
-
|
446 |
-
class ImageNetScale(Dataset):
|
447 |
-
def __init__(self, size=None, crop_size=None, random_crop=False,
|
448 |
-
up_factor=None, hr_factor=None, keep_mode="bicubic"):
|
449 |
-
self.base = self.get_base()
|
450 |
-
|
451 |
-
self.size = size
|
452 |
-
self.crop_size = crop_size if crop_size is not None else self.size
|
453 |
-
self.random_crop = random_crop
|
454 |
-
self.up_factor = up_factor
|
455 |
-
self.hr_factor = hr_factor
|
456 |
-
self.keep_mode = keep_mode
|
457 |
-
|
458 |
-
transforms = list()
|
459 |
-
|
460 |
-
if self.size is not None and self.size > 0:
|
461 |
-
rescaler = albumentations.SmallestMaxSize(max_size = self.size)
|
462 |
-
self.rescaler = rescaler
|
463 |
-
transforms.append(rescaler)
|
464 |
-
|
465 |
-
if self.crop_size is not None and self.crop_size > 0:
|
466 |
-
if len(transforms) == 0:
|
467 |
-
self.rescaler = albumentations.SmallestMaxSize(max_size = self.crop_size)
|
468 |
-
|
469 |
-
if not self.random_crop:
|
470 |
-
cropper = albumentations.CenterCrop(height=self.crop_size,width=self.crop_size)
|
471 |
-
else:
|
472 |
-
cropper = albumentations.RandomCrop(height=self.crop_size,width=self.crop_size)
|
473 |
-
transforms.append(cropper)
|
474 |
-
|
475 |
-
if len(transforms) > 0:
|
476 |
-
if self.up_factor is not None:
|
477 |
-
additional_targets = {"lr": "image"}
|
478 |
-
else:
|
479 |
-
additional_targets = None
|
480 |
-
self.preprocessor = albumentations.Compose(transforms,
|
481 |
-
additional_targets=additional_targets)
|
482 |
-
else:
|
483 |
-
self.preprocessor = lambda **kwargs: kwargs
|
484 |
-
|
485 |
-
def __len__(self):
|
486 |
-
return len(self.base)
|
487 |
-
|
488 |
-
def __getitem__(self, i):
|
489 |
-
example = self.base[i]
|
490 |
-
image = example["image"]
|
491 |
-
# adjust resolution
|
492 |
-
image = imscale(image, self.hr_factor, keepshapes=False)
|
493 |
-
h,w,c = image.shape
|
494 |
-
if self.crop_size and min(h,w) < self.crop_size:
|
495 |
-
# have to upscale to be able to crop - this just uses bilinear
|
496 |
-
image = self.rescaler(image=image)["image"]
|
497 |
-
if self.up_factor is None:
|
498 |
-
image = self.preprocessor(image=image)["image"]
|
499 |
-
example["image"] = image
|
500 |
-
else:
|
501 |
-
lr = imscale(image, self.up_factor, keepshapes=True,
|
502 |
-
keepmode=self.keep_mode)
|
503 |
-
|
504 |
-
out = self.preprocessor(image=image, lr=lr)
|
505 |
-
example["image"] = out["image"]
|
506 |
-
example["lr"] = out["lr"]
|
507 |
-
|
508 |
-
return example
|
509 |
-
|
510 |
-
class ImageNetScaleTrain(ImageNetScale):
|
511 |
-
def __init__(self, random_crop=True, **kwargs):
|
512 |
-
super().__init__(random_crop=random_crop, **kwargs)
|
513 |
-
|
514 |
-
def get_base(self):
|
515 |
-
return ImageNetTrain()
|
516 |
-
|
517 |
-
class ImageNetScaleValidation(ImageNetScale):
|
518 |
-
def get_base(self):
|
519 |
-
return ImageNetValidation()
|
520 |
-
|
521 |
-
|
522 |
-
from skimage.feature import canny
|
523 |
-
from skimage.color import rgb2gray
|
524 |
-
|
525 |
-
|
526 |
-
class ImageNetEdges(ImageNetScale):
|
527 |
-
def __init__(self, up_factor=1, **kwargs):
|
528 |
-
super().__init__(up_factor=1, **kwargs)
|
529 |
-
|
530 |
-
def __getitem__(self, i):
|
531 |
-
example = self.base[i]
|
532 |
-
image = example["image"]
|
533 |
-
h,w,c = image.shape
|
534 |
-
if self.crop_size and min(h,w) < self.crop_size:
|
535 |
-
# have to upscale to be able to crop - this just uses bilinear
|
536 |
-
image = self.rescaler(image=image)["image"]
|
537 |
-
|
538 |
-
lr = canny(rgb2gray(image), sigma=2)
|
539 |
-
lr = lr.astype(np.float32)
|
540 |
-
lr = lr[:,:,None][:,:,[0,0,0]]
|
541 |
-
|
542 |
-
out = self.preprocessor(image=image, lr=lr)
|
543 |
-
example["image"] = out["image"]
|
544 |
-
example["lr"] = out["lr"]
|
545 |
-
|
546 |
-
return example
|
547 |
-
|
548 |
-
|
549 |
-
class ImageNetEdgesTrain(ImageNetEdges):
|
550 |
-
def __init__(self, random_crop=True, **kwargs):
|
551 |
-
super().__init__(random_crop=random_crop, **kwargs)
|
552 |
-
|
553 |
-
def get_base(self):
|
554 |
-
return ImageNetTrain()
|
555 |
-
|
556 |
-
class ImageNetEdgesValidation(ImageNetEdges):
|
557 |
-
def get_base(self):
|
558 |
-
return ImageNetValidation()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/model.py
DELETED
@@ -1,946 +0,0 @@
|
|
1 |
-
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License"). You
|
4 |
-
# may not use this file except in compliance with the License. A copy of
|
5 |
-
# the License is located at
|
6 |
-
#
|
7 |
-
# http://aws.amazon.com/apache2.0/
|
8 |
-
#
|
9 |
-
# or in the "license" file accompanying this file. This file is
|
10 |
-
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
|
11 |
-
# ANY KIND, either express or implied. See the License for the specific
|
12 |
-
# language governing permissions and limitations under the License.
|
13 |
-
"""Abstractions to interact with service models."""
|
14 |
-
from collections import defaultdict
|
15 |
-
from typing import NamedTuple, Union
|
16 |
-
|
17 |
-
from botocore.compat import OrderedDict
|
18 |
-
from botocore.exceptions import (
|
19 |
-
MissingServiceIdError,
|
20 |
-
UndefinedModelAttributeError,
|
21 |
-
)
|
22 |
-
from botocore.utils import CachedProperty, hyphenize_service_id, instance_cache
|
23 |
-
|
24 |
-
NOT_SET = object()
|
25 |
-
|
26 |
-
|
27 |
-
class NoShapeFoundError(Exception):
|
28 |
-
pass
|
29 |
-
|
30 |
-
|
31 |
-
class InvalidShapeError(Exception):
|
32 |
-
pass
|
33 |
-
|
34 |
-
|
35 |
-
class OperationNotFoundError(Exception):
|
36 |
-
pass
|
37 |
-
|
38 |
-
|
39 |
-
class InvalidShapeReferenceError(Exception):
|
40 |
-
pass
|
41 |
-
|
42 |
-
|
43 |
-
class ServiceId(str):
|
44 |
-
def hyphenize(self):
|
45 |
-
return hyphenize_service_id(self)
|
46 |
-
|
47 |
-
|
48 |
-
class Shape:
|
49 |
-
"""Object representing a shape from the service model."""
|
50 |
-
|
51 |
-
# To simplify serialization logic, all shape params that are
|
52 |
-
# related to serialization are moved from the top level hash into
|
53 |
-
# a 'serialization' hash. This list below contains the names of all
|
54 |
-
# the attributes that should be moved.
|
55 |
-
SERIALIZED_ATTRS = [
|
56 |
-
'locationName',
|
57 |
-
'queryName',
|
58 |
-
'flattened',
|
59 |
-
'location',
|
60 |
-
'payload',
|
61 |
-
'streaming',
|
62 |
-
'timestampFormat',
|
63 |
-
'xmlNamespace',
|
64 |
-
'resultWrapper',
|
65 |
-
'xmlAttribute',
|
66 |
-
'eventstream',
|
67 |
-
'event',
|
68 |
-
'eventheader',
|
69 |
-
'eventpayload',
|
70 |
-
'jsonvalue',
|
71 |
-
'timestampFormat',
|
72 |
-
'hostLabel',
|
73 |
-
]
|
74 |
-
METADATA_ATTRS = [
|
75 |
-
'required',
|
76 |
-
'min',
|
77 |
-
'max',
|
78 |
-
'pattern',
|
79 |
-
'sensitive',
|
80 |
-
'enum',
|
81 |
-
'idempotencyToken',
|
82 |
-
'error',
|
83 |
-
'exception',
|
84 |
-
'endpointdiscoveryid',
|
85 |
-
'retryable',
|
86 |
-
'document',
|
87 |
-
'union',
|
88 |
-
'contextParam',
|
89 |
-
'clientContextParams',
|
90 |
-
]
|
91 |
-
MAP_TYPE = OrderedDict
|
92 |
-
|
93 |
-
def __init__(self, shape_name, shape_model, shape_resolver=None):
|
94 |
-
"""
|
95 |
-
|
96 |
-
:type shape_name: string
|
97 |
-
:param shape_name: The name of the shape.
|
98 |
-
|
99 |
-
:type shape_model: dict
|
100 |
-
:param shape_model: The shape model. This would be the value
|
101 |
-
associated with the key in the "shapes" dict of the
|
102 |
-
service model (i.e ``model['shapes'][shape_name]``)
|
103 |
-
|
104 |
-
:type shape_resolver: botocore.model.ShapeResolver
|
105 |
-
:param shape_resolver: A shape resolver object. This is used to
|
106 |
-
resolve references to other shapes. For scalar shape types
|
107 |
-
(string, integer, boolean, etc.), this argument is not
|
108 |
-
required. If a shape_resolver is not provided for a complex
|
109 |
-
type, then a ``ValueError`` will be raised when an attempt
|
110 |
-
to resolve a shape is made.
|
111 |
-
|
112 |
-
"""
|
113 |
-
self.name = shape_name
|
114 |
-
self.type_name = shape_model['type']
|
115 |
-
self.documentation = shape_model.get('documentation', '')
|
116 |
-
self._shape_model = shape_model
|
117 |
-
if shape_resolver is None:
|
118 |
-
# If a shape_resolver is not provided, we create an object
|
119 |
-
# that will throw errors if you attempt to resolve
|
120 |
-
# a shape. This is actually ok for scalar shapes
|
121 |
-
# because they don't need to resolve shapes and shouldn't
|
122 |
-
# be required to provide an object they won't use.
|
123 |
-
shape_resolver = UnresolvableShapeMap()
|
124 |
-
self._shape_resolver = shape_resolver
|
125 |
-
self._cache = {}
|
126 |
-
|
127 |
-
@CachedProperty
|
128 |
-
def serialization(self):
|
129 |
-
"""Serialization information about the shape.
|
130 |
-
|
131 |
-
This contains information that may be needed for input serialization
|
132 |
-
or response parsing. This can include:
|
133 |
-
|
134 |
-
* name
|
135 |
-
* queryName
|
136 |
-
* flattened
|
137 |
-
* location
|
138 |
-
* payload
|
139 |
-
* streaming
|
140 |
-
* xmlNamespace
|
141 |
-
* resultWrapper
|
142 |
-
* xmlAttribute
|
143 |
-
* jsonvalue
|
144 |
-
* timestampFormat
|
145 |
-
|
146 |
-
:rtype: dict
|
147 |
-
:return: Serialization information about the shape.
|
148 |
-
|
149 |
-
"""
|
150 |
-
model = self._shape_model
|
151 |
-
serialization = {}
|
152 |
-
for attr in self.SERIALIZED_ATTRS:
|
153 |
-
if attr in self._shape_model:
|
154 |
-
serialization[attr] = model[attr]
|
155 |
-
# For consistency, locationName is renamed to just 'name'.
|
156 |
-
if 'locationName' in serialization:
|
157 |
-
serialization['name'] = serialization.pop('locationName')
|
158 |
-
return serialization
|
159 |
-
|
160 |
-
@CachedProperty
|
161 |
-
def metadata(self):
|
162 |
-
"""Metadata about the shape.
|
163 |
-
|
164 |
-
This requires optional information about the shape, including:
|
165 |
-
|
166 |
-
* min
|
167 |
-
* max
|
168 |
-
* pattern
|
169 |
-
* enum
|
170 |
-
* sensitive
|
171 |
-
* required
|
172 |
-
* idempotencyToken
|
173 |
-
* document
|
174 |
-
* union
|
175 |
-
|
176 |
-
:rtype: dict
|
177 |
-
:return: Metadata about the shape.
|
178 |
-
|
179 |
-
"""
|
180 |
-
model = self._shape_model
|
181 |
-
metadata = {}
|
182 |
-
for attr in self.METADATA_ATTRS:
|
183 |
-
if attr in self._shape_model:
|
184 |
-
metadata[attr] = model[attr]
|
185 |
-
return metadata
|
186 |
-
|
187 |
-
@CachedProperty
|
188 |
-
def required_members(self):
|
189 |
-
"""A list of members that are required.
|
190 |
-
|
191 |
-
A structure shape can define members that are required.
|
192 |
-
This value will return a list of required members. If there
|
193 |
-
are no required members an empty list is returned.
|
194 |
-
|
195 |
-
"""
|
196 |
-
return self.metadata.get('required', [])
|
197 |
-
|
198 |
-
def _resolve_shape_ref(self, shape_ref):
|
199 |
-
return self._shape_resolver.resolve_shape_ref(shape_ref)
|
200 |
-
|
201 |
-
def __repr__(self):
|
202 |
-
return f"<{self.__class__.__name__}({self.name})>"
|
203 |
-
|
204 |
-
@property
|
205 |
-
def event_stream_name(self):
|
206 |
-
return None
|
207 |
-
|
208 |
-
|
209 |
-
class StructureShape(Shape):
|
210 |
-
@CachedProperty
|
211 |
-
def members(self):
|
212 |
-
members = self._shape_model.get('members', self.MAP_TYPE())
|
213 |
-
# The members dict looks like:
|
214 |
-
# 'members': {
|
215 |
-
# 'MemberName': {'shape': 'shapeName'},
|
216 |
-
# 'MemberName2': {'shape': 'shapeName'},
|
217 |
-
# }
|
218 |
-
# We return a dict of member name to Shape object.
|
219 |
-
shape_members = self.MAP_TYPE()
|
220 |
-
for name, shape_ref in members.items():
|
221 |
-
shape_members[name] = self._resolve_shape_ref(shape_ref)
|
222 |
-
return shape_members
|
223 |
-
|
224 |
-
@CachedProperty
|
225 |
-
def event_stream_name(self):
|
226 |
-
for member_name, member in self.members.items():
|
227 |
-
if member.serialization.get('eventstream'):
|
228 |
-
return member_name
|
229 |
-
return None
|
230 |
-
|
231 |
-
@CachedProperty
|
232 |
-
def error_code(self):
|
233 |
-
if not self.metadata.get('exception', False):
|
234 |
-
return None
|
235 |
-
error_metadata = self.metadata.get("error", {})
|
236 |
-
code = error_metadata.get("code")
|
237 |
-
if code:
|
238 |
-
return code
|
239 |
-
# Use the exception name if there is no explicit code modeled
|
240 |
-
return self.name
|
241 |
-
|
242 |
-
@CachedProperty
|
243 |
-
def is_document_type(self):
|
244 |
-
return self.metadata.get('document', False)
|
245 |
-
|
246 |
-
@CachedProperty
|
247 |
-
def is_tagged_union(self):
|
248 |
-
return self.metadata.get('union', False)
|
249 |
-
|
250 |
-
|
251 |
-
class ListShape(Shape):
|
252 |
-
@CachedProperty
|
253 |
-
def member(self):
|
254 |
-
return self._resolve_shape_ref(self._shape_model['member'])
|
255 |
-
|
256 |
-
|
257 |
-
class MapShape(Shape):
|
258 |
-
@CachedProperty
|
259 |
-
def key(self):
|
260 |
-
return self._resolve_shape_ref(self._shape_model['key'])
|
261 |
-
|
262 |
-
@CachedProperty
|
263 |
-
def value(self):
|
264 |
-
return self._resolve_shape_ref(self._shape_model['value'])
|
265 |
-
|
266 |
-
|
267 |
-
class StringShape(Shape):
|
268 |
-
@CachedProperty
|
269 |
-
def enum(self):
|
270 |
-
return self.metadata.get('enum', [])
|
271 |
-
|
272 |
-
|
273 |
-
class StaticContextParameter(NamedTuple):
|
274 |
-
name: str
|
275 |
-
value: Union[bool, str]
|
276 |
-
|
277 |
-
|
278 |
-
class ContextParameter(NamedTuple):
|
279 |
-
name: str
|
280 |
-
member_name: str
|
281 |
-
|
282 |
-
|
283 |
-
class ClientContextParameter(NamedTuple):
|
284 |
-
name: str
|
285 |
-
type: str
|
286 |
-
documentation: str
|
287 |
-
|
288 |
-
|
289 |
-
class ServiceModel:
|
290 |
-
"""
|
291 |
-
|
292 |
-
:ivar service_description: The parsed service description dictionary.
|
293 |
-
|
294 |
-
"""
|
295 |
-
|
296 |
-
def __init__(self, service_description, service_name=None):
|
297 |
-
"""
|
298 |
-
|
299 |
-
:type service_description: dict
|
300 |
-
:param service_description: The service description model. This value
|
301 |
-
is obtained from a botocore.loader.Loader, or from directly loading
|
302 |
-
the file yourself::
|
303 |
-
|
304 |
-
service_description = json.load(
|
305 |
-
open('/path/to/service-description-model.json'))
|
306 |
-
model = ServiceModel(service_description)
|
307 |
-
|
308 |
-
:type service_name: str
|
309 |
-
:param service_name: The name of the service. Normally this is
|
310 |
-
the endpoint prefix defined in the service_description. However,
|
311 |
-
you can override this value to provide a more convenient name.
|
312 |
-
This is done in a few places in botocore (ses instead of email,
|
313 |
-
emr instead of elasticmapreduce). If this value is not provided,
|
314 |
-
it will default to the endpointPrefix defined in the model.
|
315 |
-
|
316 |
-
"""
|
317 |
-
self._service_description = service_description
|
318 |
-
# We want clients to be able to access metadata directly.
|
319 |
-
self.metadata = service_description.get('metadata', {})
|
320 |
-
self._shape_resolver = ShapeResolver(
|
321 |
-
service_description.get('shapes', {})
|
322 |
-
)
|
323 |
-
self._signature_version = NOT_SET
|
324 |
-
self._service_name = service_name
|
325 |
-
self._instance_cache = {}
|
326 |
-
|
327 |
-
def shape_for(self, shape_name, member_traits=None):
|
328 |
-
return self._shape_resolver.get_shape_by_name(
|
329 |
-
shape_name, member_traits
|
330 |
-
)
|
331 |
-
|
332 |
-
def shape_for_error_code(self, error_code):
|
333 |
-
return self._error_code_cache.get(error_code, None)
|
334 |
-
|
335 |
-
@CachedProperty
|
336 |
-
def _error_code_cache(self):
|
337 |
-
error_code_cache = {}
|
338 |
-
for error_shape in self.error_shapes:
|
339 |
-
code = error_shape.error_code
|
340 |
-
error_code_cache[code] = error_shape
|
341 |
-
return error_code_cache
|
342 |
-
|
343 |
-
def resolve_shape_ref(self, shape_ref):
|
344 |
-
return self._shape_resolver.resolve_shape_ref(shape_ref)
|
345 |
-
|
346 |
-
@CachedProperty
|
347 |
-
def shape_names(self):
|
348 |
-
return list(self._service_description.get('shapes', {}))
|
349 |
-
|
350 |
-
@CachedProperty
|
351 |
-
def error_shapes(self):
|
352 |
-
error_shapes = []
|
353 |
-
for shape_name in self.shape_names:
|
354 |
-
error_shape = self.shape_for(shape_name)
|
355 |
-
if error_shape.metadata.get('exception', False):
|
356 |
-
error_shapes.append(error_shape)
|
357 |
-
return error_shapes
|
358 |
-
|
359 |
-
@instance_cache
|
360 |
-
def operation_model(self, operation_name):
|
361 |
-
try:
|
362 |
-
model = self._service_description['operations'][operation_name]
|
363 |
-
except KeyError:
|
364 |
-
raise OperationNotFoundError(operation_name)
|
365 |
-
return OperationModel(model, self, operation_name)
|
366 |
-
|
367 |
-
@CachedProperty
|
368 |
-
def documentation(self):
|
369 |
-
return self._service_description.get('documentation', '')
|
370 |
-
|
371 |
-
@CachedProperty
|
372 |
-
def operation_names(self):
|
373 |
-
return list(self._service_description.get('operations', []))
|
374 |
-
|
375 |
-
@CachedProperty
|
376 |
-
def service_name(self):
|
377 |
-
"""The name of the service.
|
378 |
-
|
379 |
-
This defaults to the endpointPrefix defined in the service model.
|
380 |
-
However, this value can be overriden when a ``ServiceModel`` is
|
381 |
-
created. If a service_name was not provided when the ``ServiceModel``
|
382 |
-
was created and if there is no endpointPrefix defined in the
|
383 |
-
service model, then an ``UndefinedModelAttributeError`` exception
|
384 |
-
will be raised.
|
385 |
-
|
386 |
-
"""
|
387 |
-
if self._service_name is not None:
|
388 |
-
return self._service_name
|
389 |
-
else:
|
390 |
-
return self.endpoint_prefix
|
391 |
-
|
392 |
-
@CachedProperty
|
393 |
-
def service_id(self):
|
394 |
-
try:
|
395 |
-
return ServiceId(self._get_metadata_property('serviceId'))
|
396 |
-
except UndefinedModelAttributeError:
|
397 |
-
raise MissingServiceIdError(service_name=self._service_name)
|
398 |
-
|
399 |
-
@CachedProperty
|
400 |
-
def signing_name(self):
|
401 |
-
"""The name to use when computing signatures.
|
402 |
-
|
403 |
-
If the model does not define a signing name, this
|
404 |
-
value will be the endpoint prefix defined in the model.
|
405 |
-
"""
|
406 |
-
signing_name = self.metadata.get('signingName')
|
407 |
-
if signing_name is None:
|
408 |
-
signing_name = self.endpoint_prefix
|
409 |
-
return signing_name
|
410 |
-
|
411 |
-
@CachedProperty
|
412 |
-
def api_version(self):
|
413 |
-
return self._get_metadata_property('apiVersion')
|
414 |
-
|
415 |
-
@CachedProperty
|
416 |
-
def protocol(self):
|
417 |
-
return self._get_metadata_property('protocol')
|
418 |
-
|
419 |
-
@CachedProperty
|
420 |
-
def endpoint_prefix(self):
|
421 |
-
return self._get_metadata_property('endpointPrefix')
|
422 |
-
|
423 |
-
@CachedProperty
|
424 |
-
def endpoint_discovery_operation(self):
|
425 |
-
for operation in self.operation_names:
|
426 |
-
model = self.operation_model(operation)
|
427 |
-
if model.is_endpoint_discovery_operation:
|
428 |
-
return model
|
429 |
-
|
430 |
-
@CachedProperty
|
431 |
-
def endpoint_discovery_required(self):
|
432 |
-
for operation in self.operation_names:
|
433 |
-
model = self.operation_model(operation)
|
434 |
-
if (
|
435 |
-
model.endpoint_discovery is not None
|
436 |
-
and model.endpoint_discovery.get('required')
|
437 |
-
):
|
438 |
-
return True
|
439 |
-
return False
|
440 |
-
|
441 |
-
@CachedProperty
|
442 |
-
def client_context_parameters(self):
|
443 |
-
params = self._service_description.get('clientContextParams', {})
|
444 |
-
return [
|
445 |
-
ClientContextParameter(
|
446 |
-
name=param_name,
|
447 |
-
type=param_val['type'],
|
448 |
-
documentation=param_val['documentation'],
|
449 |
-
)
|
450 |
-
for param_name, param_val in params.items()
|
451 |
-
]
|
452 |
-
|
453 |
-
def _get_metadata_property(self, name):
|
454 |
-
try:
|
455 |
-
return self.metadata[name]
|
456 |
-
except KeyError:
|
457 |
-
raise UndefinedModelAttributeError(
|
458 |
-
f'"{name}" not defined in the metadata of the model: {self}'
|
459 |
-
)
|
460 |
-
|
461 |
-
# Signature version is one of the rare properties
|
462 |
-
# that can be modified so a CachedProperty is not used here.
|
463 |
-
|
464 |
-
@property
|
465 |
-
def signature_version(self):
|
466 |
-
if self._signature_version is NOT_SET:
|
467 |
-
signature_version = self.metadata.get('signatureVersion')
|
468 |
-
self._signature_version = signature_version
|
469 |
-
return self._signature_version
|
470 |
-
|
471 |
-
@signature_version.setter
|
472 |
-
def signature_version(self, value):
|
473 |
-
self._signature_version = value
|
474 |
-
|
475 |
-
def __repr__(self):
|
476 |
-
return f'{self.__class__.__name__}({self.service_name})'
|
477 |
-
|
478 |
-
|
479 |
-
class OperationModel:
|
480 |
-
def __init__(self, operation_model, service_model, name=None):
|
481 |
-
"""
|
482 |
-
|
483 |
-
:type operation_model: dict
|
484 |
-
:param operation_model: The operation model. This comes from the
|
485 |
-
service model, and is the value associated with the operation
|
486 |
-
name in the service model (i.e ``model['operations'][op_name]``).
|
487 |
-
|
488 |
-
:type service_model: botocore.model.ServiceModel
|
489 |
-
:param service_model: The service model associated with the operation.
|
490 |
-
|
491 |
-
:type name: string
|
492 |
-
:param name: The operation name. This is the operation name exposed to
|
493 |
-
the users of this model. This can potentially be different from
|
494 |
-
the "wire_name", which is the operation name that *must* by
|
495 |
-
provided over the wire. For example, given::
|
496 |
-
|
497 |
-
"CreateCloudFrontOriginAccessIdentity":{
|
498 |
-
"name":"CreateCloudFrontOriginAccessIdentity2014_11_06",
|
499 |
-
...
|
500 |
-
}
|
501 |
-
|
502 |
-
The ``name`` would be ``CreateCloudFrontOriginAccessIdentity``,
|
503 |
-
but the ``self.wire_name`` would be
|
504 |
-
``CreateCloudFrontOriginAccessIdentity2014_11_06``, which is the
|
505 |
-
value we must send in the corresponding HTTP request.
|
506 |
-
|
507 |
-
"""
|
508 |
-
self._operation_model = operation_model
|
509 |
-
self._service_model = service_model
|
510 |
-
self._api_name = name
|
511 |
-
# Clients can access '.name' to get the operation name
|
512 |
-
# and '.metadata' to get the top level metdata of the service.
|
513 |
-
self._wire_name = operation_model.get('name')
|
514 |
-
self.metadata = service_model.metadata
|
515 |
-
self.http = operation_model.get('http', {})
|
516 |
-
|
517 |
-
@CachedProperty
|
518 |
-
def name(self):
|
519 |
-
if self._api_name is not None:
|
520 |
-
return self._api_name
|
521 |
-
else:
|
522 |
-
return self.wire_name
|
523 |
-
|
524 |
-
@property
|
525 |
-
def wire_name(self):
|
526 |
-
"""The wire name of the operation.
|
527 |
-
|
528 |
-
In many situations this is the same value as the
|
529 |
-
``name``, value, but in some services, the operation name
|
530 |
-
exposed to the user is different from the operaiton name
|
531 |
-
we send across the wire (e.g cloudfront).
|
532 |
-
|
533 |
-
Any serialization code should use ``wire_name``.
|
534 |
-
|
535 |
-
"""
|
536 |
-
return self._operation_model.get('name')
|
537 |
-
|
538 |
-
@property
|
539 |
-
def service_model(self):
|
540 |
-
return self._service_model
|
541 |
-
|
542 |
-
@CachedProperty
|
543 |
-
def documentation(self):
|
544 |
-
return self._operation_model.get('documentation', '')
|
545 |
-
|
546 |
-
@CachedProperty
|
547 |
-
def deprecated(self):
|
548 |
-
return self._operation_model.get('deprecated', False)
|
549 |
-
|
550 |
-
@CachedProperty
|
551 |
-
def endpoint_discovery(self):
|
552 |
-
# Explicit None default. An empty dictionary for this trait means it is
|
553 |
-
# enabled but not required to be used.
|
554 |
-
return self._operation_model.get('endpointdiscovery', None)
|
555 |
-
|
556 |
-
@CachedProperty
|
557 |
-
def is_endpoint_discovery_operation(self):
|
558 |
-
return self._operation_model.get('endpointoperation', False)
|
559 |
-
|
560 |
-
@CachedProperty
|
561 |
-
def input_shape(self):
|
562 |
-
if 'input' not in self._operation_model:
|
563 |
-
# Some operations do not accept any input and do not define an
|
564 |
-
# input shape.
|
565 |
-
return None
|
566 |
-
return self._service_model.resolve_shape_ref(
|
567 |
-
self._operation_model['input']
|
568 |
-
)
|
569 |
-
|
570 |
-
@CachedProperty
|
571 |
-
def output_shape(self):
|
572 |
-
if 'output' not in self._operation_model:
|
573 |
-
# Some operations do not define an output shape,
|
574 |
-
# in which case we return None to indicate the
|
575 |
-
# operation has no expected output.
|
576 |
-
return None
|
577 |
-
return self._service_model.resolve_shape_ref(
|
578 |
-
self._operation_model['output']
|
579 |
-
)
|
580 |
-
|
581 |
-
@CachedProperty
|
582 |
-
def idempotent_members(self):
|
583 |
-
input_shape = self.input_shape
|
584 |
-
if not input_shape:
|
585 |
-
return []
|
586 |
-
|
587 |
-
return [
|
588 |
-
name
|
589 |
-
for (name, shape) in input_shape.members.items()
|
590 |
-
if 'idempotencyToken' in shape.metadata
|
591 |
-
and shape.metadata['idempotencyToken']
|
592 |
-
]
|
593 |
-
|
594 |
-
@CachedProperty
|
595 |
-
def static_context_parameters(self):
|
596 |
-
params = self._operation_model.get('staticContextParams', {})
|
597 |
-
return [
|
598 |
-
StaticContextParameter(name=name, value=props.get('value'))
|
599 |
-
for name, props in params.items()
|
600 |
-
]
|
601 |
-
|
602 |
-
@CachedProperty
|
603 |
-
def context_parameters(self):
|
604 |
-
if not self.input_shape:
|
605 |
-
return []
|
606 |
-
|
607 |
-
return [
|
608 |
-
ContextParameter(
|
609 |
-
name=shape.metadata['contextParam']['name'],
|
610 |
-
member_name=name,
|
611 |
-
)
|
612 |
-
for name, shape in self.input_shape.members.items()
|
613 |
-
if 'contextParam' in shape.metadata
|
614 |
-
and 'name' in shape.metadata['contextParam']
|
615 |
-
]
|
616 |
-
|
617 |
-
@CachedProperty
|
618 |
-
def auth_type(self):
|
619 |
-
return self._operation_model.get('authtype')
|
620 |
-
|
621 |
-
@CachedProperty
|
622 |
-
def error_shapes(self):
|
623 |
-
shapes = self._operation_model.get("errors", [])
|
624 |
-
return list(self._service_model.resolve_shape_ref(s) for s in shapes)
|
625 |
-
|
626 |
-
@CachedProperty
|
627 |
-
def endpoint(self):
|
628 |
-
return self._operation_model.get('endpoint')
|
629 |
-
|
630 |
-
@CachedProperty
|
631 |
-
def http_checksum_required(self):
|
632 |
-
return self._operation_model.get('httpChecksumRequired', False)
|
633 |
-
|
634 |
-
@CachedProperty
|
635 |
-
def http_checksum(self):
|
636 |
-
return self._operation_model.get('httpChecksum', {})
|
637 |
-
|
638 |
-
@CachedProperty
|
639 |
-
def has_event_stream_input(self):
|
640 |
-
return self.get_event_stream_input() is not None
|
641 |
-
|
642 |
-
@CachedProperty
|
643 |
-
def has_event_stream_output(self):
|
644 |
-
return self.get_event_stream_output() is not None
|
645 |
-
|
646 |
-
def get_event_stream_input(self):
|
647 |
-
return self._get_event_stream(self.input_shape)
|
648 |
-
|
649 |
-
def get_event_stream_output(self):
|
650 |
-
return self._get_event_stream(self.output_shape)
|
651 |
-
|
652 |
-
def _get_event_stream(self, shape):
|
653 |
-
"""Returns the event stream member's shape if any or None otherwise."""
|
654 |
-
if shape is None:
|
655 |
-
return None
|
656 |
-
event_name = shape.event_stream_name
|
657 |
-
if event_name:
|
658 |
-
return shape.members[event_name]
|
659 |
-
return None
|
660 |
-
|
661 |
-
@CachedProperty
|
662 |
-
def has_streaming_input(self):
|
663 |
-
return self.get_streaming_input() is not None
|
664 |
-
|
665 |
-
@CachedProperty
|
666 |
-
def has_streaming_output(self):
|
667 |
-
return self.get_streaming_output() is not None
|
668 |
-
|
669 |
-
def get_streaming_input(self):
|
670 |
-
return self._get_streaming_body(self.input_shape)
|
671 |
-
|
672 |
-
def get_streaming_output(self):
|
673 |
-
return self._get_streaming_body(self.output_shape)
|
674 |
-
|
675 |
-
def _get_streaming_body(self, shape):
|
676 |
-
"""Returns the streaming member's shape if any; or None otherwise."""
|
677 |
-
if shape is None:
|
678 |
-
return None
|
679 |
-
payload = shape.serialization.get('payload')
|
680 |
-
if payload is not None:
|
681 |
-
payload_shape = shape.members[payload]
|
682 |
-
if payload_shape.type_name == 'blob':
|
683 |
-
return payload_shape
|
684 |
-
return None
|
685 |
-
|
686 |
-
def __repr__(self):
|
687 |
-
return f'{self.__class__.__name__}(name={self.name})'
|
688 |
-
|
689 |
-
|
690 |
-
class ShapeResolver:
|
691 |
-
"""Resolves shape references."""
|
692 |
-
|
693 |
-
# Any type not in this mapping will default to the Shape class.
|
694 |
-
SHAPE_CLASSES = {
|
695 |
-
'structure': StructureShape,
|
696 |
-
'list': ListShape,
|
697 |
-
'map': MapShape,
|
698 |
-
'string': StringShape,
|
699 |
-
}
|
700 |
-
|
701 |
-
def __init__(self, shape_map):
|
702 |
-
self._shape_map = shape_map
|
703 |
-
self._shape_cache = {}
|
704 |
-
|
705 |
-
def get_shape_by_name(self, shape_name, member_traits=None):
|
706 |
-
try:
|
707 |
-
shape_model = self._shape_map[shape_name]
|
708 |
-
except KeyError:
|
709 |
-
raise NoShapeFoundError(shape_name)
|
710 |
-
try:
|
711 |
-
shape_cls = self.SHAPE_CLASSES.get(shape_model['type'], Shape)
|
712 |
-
except KeyError:
|
713 |
-
raise InvalidShapeError(
|
714 |
-
f"Shape is missing required key 'type': {shape_model}"
|
715 |
-
)
|
716 |
-
if member_traits:
|
717 |
-
shape_model = shape_model.copy()
|
718 |
-
shape_model.update(member_traits)
|
719 |
-
result = shape_cls(shape_name, shape_model, self)
|
720 |
-
return result
|
721 |
-
|
722 |
-
def resolve_shape_ref(self, shape_ref):
|
723 |
-
# A shape_ref is a dict that has a 'shape' key that
|
724 |
-
# refers to a shape name as well as any additional
|
725 |
-
# member traits that are then merged over the shape
|
726 |
-
# definition. For example:
|
727 |
-
# {"shape": "StringType", "locationName": "Foobar"}
|
728 |
-
if len(shape_ref) == 1 and 'shape' in shape_ref:
|
729 |
-
# It's just a shape ref with no member traits, we can avoid
|
730 |
-
# a .copy(). This is the common case so it's specifically
|
731 |
-
# called out here.
|
732 |
-
return self.get_shape_by_name(shape_ref['shape'])
|
733 |
-
else:
|
734 |
-
member_traits = shape_ref.copy()
|
735 |
-
try:
|
736 |
-
shape_name = member_traits.pop('shape')
|
737 |
-
except KeyError:
|
738 |
-
raise InvalidShapeReferenceError(
|
739 |
-
f"Invalid model, missing shape reference: {shape_ref}"
|
740 |
-
)
|
741 |
-
return self.get_shape_by_name(shape_name, member_traits)
|
742 |
-
|
743 |
-
|
744 |
-
class UnresolvableShapeMap:
|
745 |
-
"""A ShapeResolver that will throw ValueErrors when shapes are resolved."""
|
746 |
-
|
747 |
-
def get_shape_by_name(self, shape_name, member_traits=None):
|
748 |
-
raise ValueError(
|
749 |
-
f"Attempted to lookup shape '{shape_name}', but no shape map was provided."
|
750 |
-
)
|
751 |
-
|
752 |
-
def resolve_shape_ref(self, shape_ref):
|
753 |
-
raise ValueError(
|
754 |
-
f"Attempted to resolve shape '{shape_ref}', but no shape "
|
755 |
-
f"map was provided."
|
756 |
-
)
|
757 |
-
|
758 |
-
|
759 |
-
class DenormalizedStructureBuilder:
|
760 |
-
"""Build a StructureShape from a denormalized model.
|
761 |
-
|
762 |
-
This is a convenience builder class that makes it easy to construct
|
763 |
-
``StructureShape``s based on a denormalized model.
|
764 |
-
|
765 |
-
It will handle the details of creating unique shape names and creating
|
766 |
-
the appropriate shape map needed by the ``StructureShape`` class.
|
767 |
-
|
768 |
-
Example usage::
|
769 |
-
|
770 |
-
builder = DenormalizedStructureBuilder()
|
771 |
-
shape = builder.with_members({
|
772 |
-
'A': {
|
773 |
-
'type': 'structure',
|
774 |
-
'members': {
|
775 |
-
'B': {
|
776 |
-
'type': 'structure',
|
777 |
-
'members': {
|
778 |
-
'C': {
|
779 |
-
'type': 'string',
|
780 |
-
}
|
781 |
-
}
|
782 |
-
}
|
783 |
-
}
|
784 |
-
}
|
785 |
-
}).build_model()
|
786 |
-
# ``shape`` is now an instance of botocore.model.StructureShape
|
787 |
-
|
788 |
-
:type dict_type: class
|
789 |
-
:param dict_type: The dictionary type to use, allowing you to opt-in
|
790 |
-
to using OrderedDict or another dict type. This can
|
791 |
-
be particularly useful for testing when order
|
792 |
-
matters, such as for documentation.
|
793 |
-
|
794 |
-
"""
|
795 |
-
|
796 |
-
SCALAR_TYPES = (
|
797 |
-
'string',
|
798 |
-
'integer',
|
799 |
-
'boolean',
|
800 |
-
'blob',
|
801 |
-
'float',
|
802 |
-
'timestamp',
|
803 |
-
'long',
|
804 |
-
'double',
|
805 |
-
'char',
|
806 |
-
)
|
807 |
-
|
808 |
-
def __init__(self, name=None):
|
809 |
-
self.members = OrderedDict()
|
810 |
-
self._name_generator = ShapeNameGenerator()
|
811 |
-
if name is None:
|
812 |
-
self.name = self._name_generator.new_shape_name('structure')
|
813 |
-
|
814 |
-
def with_members(self, members):
|
815 |
-
"""
|
816 |
-
|
817 |
-
:type members: dict
|
818 |
-
:param members: The denormalized members.
|
819 |
-
|
820 |
-
:return: self
|
821 |
-
|
822 |
-
"""
|
823 |
-
self._members = members
|
824 |
-
return self
|
825 |
-
|
826 |
-
def build_model(self):
|
827 |
-
"""Build the model based on the provided members.
|
828 |
-
|
829 |
-
:rtype: botocore.model.StructureShape
|
830 |
-
:return: The built StructureShape object.
|
831 |
-
|
832 |
-
"""
|
833 |
-
shapes = OrderedDict()
|
834 |
-
denormalized = {
|
835 |
-
'type': 'structure',
|
836 |
-
'members': self._members,
|
837 |
-
}
|
838 |
-
self._build_model(denormalized, shapes, self.name)
|
839 |
-
resolver = ShapeResolver(shape_map=shapes)
|
840 |
-
return StructureShape(
|
841 |
-
shape_name=self.name,
|
842 |
-
shape_model=shapes[self.name],
|
843 |
-
shape_resolver=resolver,
|
844 |
-
)
|
845 |
-
|
846 |
-
def _build_model(self, model, shapes, shape_name):
|
847 |
-
if model['type'] == 'structure':
|
848 |
-
shapes[shape_name] = self._build_structure(model, shapes)
|
849 |
-
elif model['type'] == 'list':
|
850 |
-
shapes[shape_name] = self._build_list(model, shapes)
|
851 |
-
elif model['type'] == 'map':
|
852 |
-
shapes[shape_name] = self._build_map(model, shapes)
|
853 |
-
elif model['type'] in self.SCALAR_TYPES:
|
854 |
-
shapes[shape_name] = self._build_scalar(model)
|
855 |
-
else:
|
856 |
-
raise InvalidShapeError(f"Unknown shape type: {model['type']}")
|
857 |
-
|
858 |
-
def _build_structure(self, model, shapes):
|
859 |
-
members = OrderedDict()
|
860 |
-
shape = self._build_initial_shape(model)
|
861 |
-
shape['members'] = members
|
862 |
-
|
863 |
-
for name, member_model in model.get('members', OrderedDict()).items():
|
864 |
-
member_shape_name = self._get_shape_name(member_model)
|
865 |
-
members[name] = {'shape': member_shape_name}
|
866 |
-
self._build_model(member_model, shapes, member_shape_name)
|
867 |
-
return shape
|
868 |
-
|
869 |
-
def _build_list(self, model, shapes):
|
870 |
-
member_shape_name = self._get_shape_name(model)
|
871 |
-
shape = self._build_initial_shape(model)
|
872 |
-
shape['member'] = {'shape': member_shape_name}
|
873 |
-
self._build_model(model['member'], shapes, member_shape_name)
|
874 |
-
return shape
|
875 |
-
|
876 |
-
def _build_map(self, model, shapes):
|
877 |
-
key_shape_name = self._get_shape_name(model['key'])
|
878 |
-
value_shape_name = self._get_shape_name(model['value'])
|
879 |
-
shape = self._build_initial_shape(model)
|
880 |
-
shape['key'] = {'shape': key_shape_name}
|
881 |
-
shape['value'] = {'shape': value_shape_name}
|
882 |
-
self._build_model(model['key'], shapes, key_shape_name)
|
883 |
-
self._build_model(model['value'], shapes, value_shape_name)
|
884 |
-
return shape
|
885 |
-
|
886 |
-
def _build_initial_shape(self, model):
|
887 |
-
shape = {
|
888 |
-
'type': model['type'],
|
889 |
-
}
|
890 |
-
if 'documentation' in model:
|
891 |
-
shape['documentation'] = model['documentation']
|
892 |
-
for attr in Shape.METADATA_ATTRS:
|
893 |
-
if attr in model:
|
894 |
-
shape[attr] = model[attr]
|
895 |
-
return shape
|
896 |
-
|
897 |
-
def _build_scalar(self, model):
|
898 |
-
return self._build_initial_shape(model)
|
899 |
-
|
900 |
-
def _get_shape_name(self, model):
|
901 |
-
if 'shape_name' in model:
|
902 |
-
return model['shape_name']
|
903 |
-
else:
|
904 |
-
return self._name_generator.new_shape_name(model['type'])
|
905 |
-
|
906 |
-
|
907 |
-
class ShapeNameGenerator:
|
908 |
-
"""Generate unique shape names for a type.
|
909 |
-
|
910 |
-
This class can be used in conjunction with the DenormalizedStructureBuilder
|
911 |
-
to generate unique shape names for a given type.
|
912 |
-
|
913 |
-
"""
|
914 |
-
|
915 |
-
def __init__(self):
|
916 |
-
self._name_cache = defaultdict(int)
|
917 |
-
|
918 |
-
def new_shape_name(self, type_name):
|
919 |
-
"""Generate a unique shape name.
|
920 |
-
|
921 |
-
This method will guarantee a unique shape name each time it is
|
922 |
-
called with the same type.
|
923 |
-
|
924 |
-
::
|
925 |
-
|
926 |
-
>>> s = ShapeNameGenerator()
|
927 |
-
>>> s.new_shape_name('structure')
|
928 |
-
'StructureType1'
|
929 |
-
>>> s.new_shape_name('structure')
|
930 |
-
'StructureType2'
|
931 |
-
>>> s.new_shape_name('list')
|
932 |
-
'ListType1'
|
933 |
-
>>> s.new_shape_name('list')
|
934 |
-
'ListType2'
|
935 |
-
|
936 |
-
|
937 |
-
:type type_name: string
|
938 |
-
:param type_name: The type name (structure, list, map, string, etc.)
|
939 |
-
|
940 |
-
:rtype: string
|
941 |
-
:return: A unique shape name for the given type
|
942 |
-
|
943 |
-
"""
|
944 |
-
self._name_cache[type_name] += 1
|
945 |
-
current_index = self._name_cache[type_name]
|
946 |
-
return f'{type_name.capitalize()}Type{current_index}'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BilalSardar/Gpt4All/app.py
DELETED
@@ -1,18 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
from nomic.gpt4all.gpt4all import GPT4AllGPU
|
3 |
-
|
4 |
-
m = GPT4AllGPU()
|
5 |
-
m.open()
|
6 |
-
|
7 |
-
def chat(input):
|
8 |
-
return m.prompt(input)
|
9 |
-
|
10 |
-
demo=gr.Interface(fn=chat,
|
11 |
-
inputs="text",
|
12 |
-
outputs="text",
|
13 |
-
examples=[['write me a story about a lonely computer']],
|
14 |
-
title="GPT4ALL",
|
15 |
-
description="Check https://github.com/nomic-ai/gpt4all"
|
16 |
-
)
|
17 |
-
|
18 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Branon/TurboKeys/README.md
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: TurboKeys
|
3 |
-
emoji: 😔
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: blue
|
6 |
-
sdk: docker
|
7 |
-
pinned: false
|
8 |
-
duplicated_from: Branon/TempBRICS
|
9 |
-
---
|
10 |
-
|
11 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Brasd99/JustClothify/app.py
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import subprocess
|
3 |
-
from typing import Dict
|
4 |
-
import json
|
5 |
-
import numpy as np
|
6 |
-
import wget
|
7 |
-
import gradio as gr
|
8 |
-
subprocess.call(['pip', 'install', 'git+https://github.com/facebookresearch/detectron2@main#subdirectory=projects/DensePose'])
|
9 |
-
from helpers.processor import TextureProcessor
|
10 |
-
|
11 |
-
def image_processing(person_img: np.ndarray, model_img: np.ndarray) -> np.ndarray:
|
12 |
-
print('Attempt to get textured image.')
|
13 |
-
return texture_processor.extract(person_img, model_img)
|
14 |
-
|
15 |
-
def load_model(current_path: str, config: Dict) -> None:
|
16 |
-
data_path = os.path.join(current_path, 'data')
|
17 |
-
if not os.path.isdir(data_path):
|
18 |
-
os.mkdir(data_path)
|
19 |
-
for filename, url in config.items():
|
20 |
-
wget.download(url, os.path.join(data_path, filename))
|
21 |
-
|
22 |
-
with open("config.json", "r") as f:
|
23 |
-
config = json.load(f)
|
24 |
-
|
25 |
-
current_path = os.getcwd()
|
26 |
-
load_model(current_path, config)
|
27 |
-
densepose_config = os.path.join(current_path, 'data', 'config.yaml')
|
28 |
-
densepose_weights = os.path.join(current_path, 'data', 'weights.pkl')
|
29 |
-
|
30 |
-
texture_processor = TextureProcessor(densepose_config, densepose_weights)
|
31 |
-
|
32 |
-
title = '<h1 style="text-align:center">JustClothify</h1>'
|
33 |
-
|
34 |
-
with gr.Blocks(theme='soft', title='JustClothify') as blocks:
|
35 |
-
gr.HTML(title)
|
36 |
-
gr.Markdown('Upload an image of a person and an image of a model with clothes, the system will generate an image of a person wearing these clothes.')
|
37 |
-
with gr.Row():
|
38 |
-
person_image = gr.inputs.Image(label='Person Image', type='numpy')
|
39 |
-
model_image = gr.inputs.Image(label='Model Image (with clothes)', type='numpy')
|
40 |
-
process_button = gr.Button('Process')
|
41 |
-
outputs = gr.outputs.Image(label='Result Image', type='numpy')
|
42 |
-
|
43 |
-
process_button.click(fn=image_processing, inputs=[person_image, model_image], outputs=outputs)
|
44 |
-
|
45 |
-
blocks.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/csrc/ROIAlign/ROIAlign_cpu.cpp
DELETED
@@ -1,503 +0,0 @@
|
|
1 |
-
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
2 |
-
#include <ATen/TensorUtils.h>
|
3 |
-
#include "ROIAlign.h"
|
4 |
-
|
5 |
-
namespace {
|
6 |
-
|
7 |
-
// implementation taken from Caffe2
|
8 |
-
template <typename T>
|
9 |
-
struct PreCalc {
|
10 |
-
int pos1;
|
11 |
-
int pos2;
|
12 |
-
int pos3;
|
13 |
-
int pos4;
|
14 |
-
T w1;
|
15 |
-
T w2;
|
16 |
-
T w3;
|
17 |
-
T w4;
|
18 |
-
};
|
19 |
-
|
20 |
-
template <typename T>
|
21 |
-
void pre_calc_for_bilinear_interpolate(
|
22 |
-
const int height,
|
23 |
-
const int width,
|
24 |
-
const int pooled_height,
|
25 |
-
const int pooled_width,
|
26 |
-
const int iy_upper,
|
27 |
-
const int ix_upper,
|
28 |
-
T roi_start_h,
|
29 |
-
T roi_start_w,
|
30 |
-
T bin_size_h,
|
31 |
-
T bin_size_w,
|
32 |
-
int roi_bin_grid_h,
|
33 |
-
int roi_bin_grid_w,
|
34 |
-
std::vector<PreCalc<T>>& pre_calc) {
|
35 |
-
int pre_calc_index = 0;
|
36 |
-
for (int ph = 0; ph < pooled_height; ph++) {
|
37 |
-
for (int pw = 0; pw < pooled_width; pw++) {
|
38 |
-
for (int iy = 0; iy < iy_upper; iy++) {
|
39 |
-
const T yy = roi_start_h + ph * bin_size_h +
|
40 |
-
static_cast<T>(iy + .5f) * bin_size_h /
|
41 |
-
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
|
42 |
-
for (int ix = 0; ix < ix_upper; ix++) {
|
43 |
-
const T xx = roi_start_w + pw * bin_size_w +
|
44 |
-
static_cast<T>(ix + .5f) * bin_size_w /
|
45 |
-
static_cast<T>(roi_bin_grid_w);
|
46 |
-
|
47 |
-
T x = xx;
|
48 |
-
T y = yy;
|
49 |
-
// deal with: inverse elements are out of feature map boundary
|
50 |
-
if (y < -1.0 || y > height || x < -1.0 || x > width) {
|
51 |
-
// empty
|
52 |
-
PreCalc<T> pc;
|
53 |
-
pc.pos1 = 0;
|
54 |
-
pc.pos2 = 0;
|
55 |
-
pc.pos3 = 0;
|
56 |
-
pc.pos4 = 0;
|
57 |
-
pc.w1 = 0;
|
58 |
-
pc.w2 = 0;
|
59 |
-
pc.w3 = 0;
|
60 |
-
pc.w4 = 0;
|
61 |
-
pre_calc[pre_calc_index] = pc;
|
62 |
-
pre_calc_index += 1;
|
63 |
-
continue;
|
64 |
-
}
|
65 |
-
|
66 |
-
if (y <= 0) {
|
67 |
-
y = 0;
|
68 |
-
}
|
69 |
-
if (x <= 0) {
|
70 |
-
x = 0;
|
71 |
-
}
|
72 |
-
|
73 |
-
int y_low = (int)y;
|
74 |
-
int x_low = (int)x;
|
75 |
-
int y_high;
|
76 |
-
int x_high;
|
77 |
-
|
78 |
-
if (y_low >= height - 1) {
|
79 |
-
y_high = y_low = height - 1;
|
80 |
-
y = (T)y_low;
|
81 |
-
} else {
|
82 |
-
y_high = y_low + 1;
|
83 |
-
}
|
84 |
-
|
85 |
-
if (x_low >= width - 1) {
|
86 |
-
x_high = x_low = width - 1;
|
87 |
-
x = (T)x_low;
|
88 |
-
} else {
|
89 |
-
x_high = x_low + 1;
|
90 |
-
}
|
91 |
-
|
92 |
-
T ly = y - y_low;
|
93 |
-
T lx = x - x_low;
|
94 |
-
T hy = 1. - ly, hx = 1. - lx;
|
95 |
-
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
|
96 |
-
|
97 |
-
// save weights and indices
|
98 |
-
PreCalc<T> pc;
|
99 |
-
pc.pos1 = y_low * width + x_low;
|
100 |
-
pc.pos2 = y_low * width + x_high;
|
101 |
-
pc.pos3 = y_high * width + x_low;
|
102 |
-
pc.pos4 = y_high * width + x_high;
|
103 |
-
pc.w1 = w1;
|
104 |
-
pc.w2 = w2;
|
105 |
-
pc.w3 = w3;
|
106 |
-
pc.w4 = w4;
|
107 |
-
pre_calc[pre_calc_index] = pc;
|
108 |
-
|
109 |
-
pre_calc_index += 1;
|
110 |
-
}
|
111 |
-
}
|
112 |
-
}
|
113 |
-
}
|
114 |
-
}
|
115 |
-
|
116 |
-
template <typename T>
|
117 |
-
void ROIAlignForward(
|
118 |
-
const int nthreads,
|
119 |
-
const T* input,
|
120 |
-
const T& spatial_scale,
|
121 |
-
const int channels,
|
122 |
-
const int height,
|
123 |
-
const int width,
|
124 |
-
const int pooled_height,
|
125 |
-
const int pooled_width,
|
126 |
-
const int sampling_ratio,
|
127 |
-
const T* rois,
|
128 |
-
T* output,
|
129 |
-
bool aligned) {
|
130 |
-
int n_rois = nthreads / channels / pooled_width / pooled_height;
|
131 |
-
// (n, c, ph, pw) is an element in the pooled output
|
132 |
-
// can be parallelized using omp
|
133 |
-
// #pragma omp parallel for num_threads(32)
|
134 |
-
for (int n = 0; n < n_rois; n++) {
|
135 |
-
int index_n = n * channels * pooled_width * pooled_height;
|
136 |
-
|
137 |
-
const T* offset_rois = rois + n * 5;
|
138 |
-
int roi_batch_ind = offset_rois[0];
|
139 |
-
|
140 |
-
// Do not use rounding; this implementation detail is critical
|
141 |
-
T offset = aligned ? (T)0.5 : (T)0.0;
|
142 |
-
T roi_start_w = offset_rois[1] * spatial_scale - offset;
|
143 |
-
T roi_start_h = offset_rois[2] * spatial_scale - offset;
|
144 |
-
T roi_end_w = offset_rois[3] * spatial_scale - offset;
|
145 |
-
T roi_end_h = offset_rois[4] * spatial_scale - offset;
|
146 |
-
|
147 |
-
T roi_width = roi_end_w - roi_start_w;
|
148 |
-
T roi_height = roi_end_h - roi_start_h;
|
149 |
-
if (aligned) {
|
150 |
-
AT_ASSERTM(
|
151 |
-
roi_width >= 0 && roi_height >= 0,
|
152 |
-
"ROIs in ROIAlign cannot have non-negative size!");
|
153 |
-
} else { // for backward-compatibility only
|
154 |
-
roi_width = std::max(roi_width, (T)1.);
|
155 |
-
roi_height = std::max(roi_height, (T)1.);
|
156 |
-
}
|
157 |
-
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
|
158 |
-
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
|
159 |
-
|
160 |
-
// We use roi_bin_grid to sample the grid and mimic integral
|
161 |
-
int roi_bin_grid_h = (sampling_ratio > 0)
|
162 |
-
? sampling_ratio
|
163 |
-
: ceil(roi_height / pooled_height); // e.g., = 2
|
164 |
-
int roi_bin_grid_w =
|
165 |
-
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
|
166 |
-
|
167 |
-
// We do average (integral) pooling inside a bin
|
168 |
-
// When the grid is empty, output zeros == 0/1, instead of NaN.
|
169 |
-
const T count = std::max(roi_bin_grid_h * roi_bin_grid_w, 1); // e.g. = 4
|
170 |
-
|
171 |
-
// we want to precalculate indices and weights shared by all channels,
|
172 |
-
// this is the key point of optimization
|
173 |
-
std::vector<PreCalc<T>> pre_calc(
|
174 |
-
roi_bin_grid_h * roi_bin_grid_w * pooled_width * pooled_height);
|
175 |
-
pre_calc_for_bilinear_interpolate(
|
176 |
-
height,
|
177 |
-
width,
|
178 |
-
pooled_height,
|
179 |
-
pooled_width,
|
180 |
-
roi_bin_grid_h,
|
181 |
-
roi_bin_grid_w,
|
182 |
-
roi_start_h,
|
183 |
-
roi_start_w,
|
184 |
-
bin_size_h,
|
185 |
-
bin_size_w,
|
186 |
-
roi_bin_grid_h,
|
187 |
-
roi_bin_grid_w,
|
188 |
-
pre_calc);
|
189 |
-
|
190 |
-
for (int c = 0; c < channels; c++) {
|
191 |
-
int index_n_c = index_n + c * pooled_width * pooled_height;
|
192 |
-
const T* offset_input =
|
193 |
-
input + (roi_batch_ind * channels + c) * height * width;
|
194 |
-
int pre_calc_index = 0;
|
195 |
-
|
196 |
-
for (int ph = 0; ph < pooled_height; ph++) {
|
197 |
-
for (int pw = 0; pw < pooled_width; pw++) {
|
198 |
-
int index = index_n_c + ph * pooled_width + pw;
|
199 |
-
|
200 |
-
T output_val = 0.;
|
201 |
-
for (int iy = 0; iy < roi_bin_grid_h; iy++) {
|
202 |
-
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
|
203 |
-
PreCalc<T> pc = pre_calc[pre_calc_index];
|
204 |
-
output_val += pc.w1 * offset_input[pc.pos1] +
|
205 |
-
pc.w2 * offset_input[pc.pos2] +
|
206 |
-
pc.w3 * offset_input[pc.pos3] + pc.w4 * offset_input[pc.pos4];
|
207 |
-
|
208 |
-
pre_calc_index += 1;
|
209 |
-
}
|
210 |
-
}
|
211 |
-
output_val /= count;
|
212 |
-
|
213 |
-
output[index] = output_val;
|
214 |
-
} // for pw
|
215 |
-
} // for ph
|
216 |
-
} // for c
|
217 |
-
} // for n
|
218 |
-
}
|
219 |
-
|
220 |
-
template <typename T>
|
221 |
-
void bilinear_interpolate_gradient(
|
222 |
-
const int height,
|
223 |
-
const int width,
|
224 |
-
T y,
|
225 |
-
T x,
|
226 |
-
T& w1,
|
227 |
-
T& w2,
|
228 |
-
T& w3,
|
229 |
-
T& w4,
|
230 |
-
int& x_low,
|
231 |
-
int& x_high,
|
232 |
-
int& y_low,
|
233 |
-
int& y_high,
|
234 |
-
const int index /* index for debug only*/) {
|
235 |
-
// deal with cases that inverse elements are out of feature map boundary
|
236 |
-
if (y < -1.0 || y > height || x < -1.0 || x > width) {
|
237 |
-
// empty
|
238 |
-
w1 = w2 = w3 = w4 = 0.;
|
239 |
-
x_low = x_high = y_low = y_high = -1;
|
240 |
-
return;
|
241 |
-
}
|
242 |
-
|
243 |
-
if (y <= 0)
|
244 |
-
y = 0;
|
245 |
-
if (x <= 0)
|
246 |
-
x = 0;
|
247 |
-
|
248 |
-
y_low = (int)y;
|
249 |
-
x_low = (int)x;
|
250 |
-
|
251 |
-
if (y_low >= height - 1) {
|
252 |
-
y_high = y_low = height - 1;
|
253 |
-
y = (T)y_low;
|
254 |
-
} else {
|
255 |
-
y_high = y_low + 1;
|
256 |
-
}
|
257 |
-
|
258 |
-
if (x_low >= width - 1) {
|
259 |
-
x_high = x_low = width - 1;
|
260 |
-
x = (T)x_low;
|
261 |
-
} else {
|
262 |
-
x_high = x_low + 1;
|
263 |
-
}
|
264 |
-
|
265 |
-
T ly = y - y_low;
|
266 |
-
T lx = x - x_low;
|
267 |
-
T hy = 1. - ly, hx = 1. - lx;
|
268 |
-
|
269 |
-
// reference in forward
|
270 |
-
// T v1 = input[y_low * width + x_low];
|
271 |
-
// T v2 = input[y_low * width + x_high];
|
272 |
-
// T v3 = input[y_high * width + x_low];
|
273 |
-
// T v4 = input[y_high * width + x_high];
|
274 |
-
// T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
|
275 |
-
|
276 |
-
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
|
277 |
-
|
278 |
-
return;
|
279 |
-
}
|
280 |
-
|
281 |
-
template <class T>
|
282 |
-
inline void add(T* address, const T& val) {
|
283 |
-
*address += val;
|
284 |
-
}
|
285 |
-
|
286 |
-
template <typename T>
|
287 |
-
void ROIAlignBackward(
|
288 |
-
const int nthreads,
|
289 |
-
const T* grad_output,
|
290 |
-
const T& spatial_scale,
|
291 |
-
const int channels,
|
292 |
-
const int height,
|
293 |
-
const int width,
|
294 |
-
const int pooled_height,
|
295 |
-
const int pooled_width,
|
296 |
-
const int sampling_ratio,
|
297 |
-
T* grad_input,
|
298 |
-
const T* rois,
|
299 |
-
const int n_stride,
|
300 |
-
const int c_stride,
|
301 |
-
const int h_stride,
|
302 |
-
const int w_stride,
|
303 |
-
bool aligned) {
|
304 |
-
for (int index = 0; index < nthreads; index++) {
|
305 |
-
// (n, c, ph, pw) is an element in the pooled output
|
306 |
-
int pw = index % pooled_width;
|
307 |
-
int ph = (index / pooled_width) % pooled_height;
|
308 |
-
int c = (index / pooled_width / pooled_height) % channels;
|
309 |
-
int n = index / pooled_width / pooled_height / channels;
|
310 |
-
|
311 |
-
const T* offset_rois = rois + n * 5;
|
312 |
-
int roi_batch_ind = offset_rois[0];
|
313 |
-
|
314 |
-
// Do not use rounding; this implementation detail is critical
|
315 |
-
T offset = aligned ? (T)0.5 : (T)0.0;
|
316 |
-
T roi_start_w = offset_rois[1] * spatial_scale - offset;
|
317 |
-
T roi_start_h = offset_rois[2] * spatial_scale - offset;
|
318 |
-
T roi_end_w = offset_rois[3] * spatial_scale - offset;
|
319 |
-
T roi_end_h = offset_rois[4] * spatial_scale - offset;
|
320 |
-
|
321 |
-
T roi_width = roi_end_w - roi_start_w;
|
322 |
-
T roi_height = roi_end_h - roi_start_h;
|
323 |
-
if (aligned) {
|
324 |
-
AT_ASSERTM(
|
325 |
-
roi_width >= 0 && roi_height >= 0,
|
326 |
-
"ROIs in ROIAlign do not have non-negative size!");
|
327 |
-
} else { // for backward-compatibility only
|
328 |
-
roi_width = std::max(roi_width, (T)1.);
|
329 |
-
roi_height = std::max(roi_height, (T)1.);
|
330 |
-
}
|
331 |
-
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
|
332 |
-
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
|
333 |
-
|
334 |
-
T* offset_grad_input =
|
335 |
-
grad_input + ((roi_batch_ind * channels + c) * height * width);
|
336 |
-
|
337 |
-
int output_offset = n * n_stride + c * c_stride;
|
338 |
-
const T* offset_grad_output = grad_output + output_offset;
|
339 |
-
const T grad_output_this_bin =
|
340 |
-
offset_grad_output[ph * h_stride + pw * w_stride];
|
341 |
-
|
342 |
-
// We use roi_bin_grid to sample the grid and mimic integral
|
343 |
-
int roi_bin_grid_h = (sampling_ratio > 0)
|
344 |
-
? sampling_ratio
|
345 |
-
: ceil(roi_height / pooled_height); // e.g., = 2
|
346 |
-
int roi_bin_grid_w =
|
347 |
-
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
|
348 |
-
|
349 |
-
// We do average (integral) pooling inside a bin
|
350 |
-
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
|
351 |
-
|
352 |
-
for (int iy = 0; iy < roi_bin_grid_h; iy++) {
|
353 |
-
const T y = roi_start_h + ph * bin_size_h +
|
354 |
-
static_cast<T>(iy + .5f) * bin_size_h /
|
355 |
-
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
|
356 |
-
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
|
357 |
-
const T x = roi_start_w + pw * bin_size_w +
|
358 |
-
static_cast<T>(ix + .5f) * bin_size_w /
|
359 |
-
static_cast<T>(roi_bin_grid_w);
|
360 |
-
|
361 |
-
T w1, w2, w3, w4;
|
362 |
-
int x_low, x_high, y_low, y_high;
|
363 |
-
|
364 |
-
bilinear_interpolate_gradient(
|
365 |
-
height,
|
366 |
-
width,
|
367 |
-
y,
|
368 |
-
x,
|
369 |
-
w1,
|
370 |
-
w2,
|
371 |
-
w3,
|
372 |
-
w4,
|
373 |
-
x_low,
|
374 |
-
x_high,
|
375 |
-
y_low,
|
376 |
-
y_high,
|
377 |
-
index);
|
378 |
-
|
379 |
-
T g1 = grad_output_this_bin * w1 / count;
|
380 |
-
T g2 = grad_output_this_bin * w2 / count;
|
381 |
-
T g3 = grad_output_this_bin * w3 / count;
|
382 |
-
T g4 = grad_output_this_bin * w4 / count;
|
383 |
-
|
384 |
-
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
|
385 |
-
// atomic add is not needed for now since it is single threaded
|
386 |
-
add(offset_grad_input + y_low * width + x_low, static_cast<T>(g1));
|
387 |
-
add(offset_grad_input + y_low * width + x_high, static_cast<T>(g2));
|
388 |
-
add(offset_grad_input + y_high * width + x_low, static_cast<T>(g3));
|
389 |
-
add(offset_grad_input + y_high * width + x_high, static_cast<T>(g4));
|
390 |
-
} // if
|
391 |
-
} // ix
|
392 |
-
} // iy
|
393 |
-
} // for
|
394 |
-
} // ROIAlignBackward
|
395 |
-
|
396 |
-
} // namespace
|
397 |
-
|
398 |
-
namespace detectron2 {
|
399 |
-
|
400 |
-
at::Tensor ROIAlign_forward_cpu(
|
401 |
-
const at::Tensor& input,
|
402 |
-
const at::Tensor& rois,
|
403 |
-
const float spatial_scale,
|
404 |
-
const int pooled_height,
|
405 |
-
const int pooled_width,
|
406 |
-
const int sampling_ratio,
|
407 |
-
bool aligned) {
|
408 |
-
AT_ASSERTM(input.device().is_cpu(), "input must be a CPU tensor");
|
409 |
-
AT_ASSERTM(rois.device().is_cpu(), "rois must be a CPU tensor");
|
410 |
-
|
411 |
-
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
|
412 |
-
|
413 |
-
at::CheckedFrom c = "ROIAlign_forward_cpu";
|
414 |
-
at::checkAllSameType(c, {input_t, rois_t});
|
415 |
-
|
416 |
-
auto num_rois = rois.size(0);
|
417 |
-
auto channels = input.size(1);
|
418 |
-
auto height = input.size(2);
|
419 |
-
auto width = input.size(3);
|
420 |
-
|
421 |
-
at::Tensor output = at::zeros(
|
422 |
-
{num_rois, channels, pooled_height, pooled_width}, input.options());
|
423 |
-
|
424 |
-
auto output_size = num_rois * pooled_height * pooled_width * channels;
|
425 |
-
|
426 |
-
if (output.numel() == 0)
|
427 |
-
return output;
|
428 |
-
|
429 |
-
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "ROIAlign_forward", [&] {
|
430 |
-
ROIAlignForward<scalar_t>(
|
431 |
-
output_size,
|
432 |
-
input.contiguous().data_ptr<scalar_t>(),
|
433 |
-
spatial_scale,
|
434 |
-
channels,
|
435 |
-
height,
|
436 |
-
width,
|
437 |
-
pooled_height,
|
438 |
-
pooled_width,
|
439 |
-
sampling_ratio,
|
440 |
-
rois.contiguous().data_ptr<scalar_t>(),
|
441 |
-
output.data_ptr<scalar_t>(),
|
442 |
-
aligned);
|
443 |
-
});
|
444 |
-
return output;
|
445 |
-
}
|
446 |
-
|
447 |
-
at::Tensor ROIAlign_backward_cpu(
|
448 |
-
const at::Tensor& grad,
|
449 |
-
const at::Tensor& rois,
|
450 |
-
const float spatial_scale,
|
451 |
-
const int pooled_height,
|
452 |
-
const int pooled_width,
|
453 |
-
const int batch_size,
|
454 |
-
const int channels,
|
455 |
-
const int height,
|
456 |
-
const int width,
|
457 |
-
const int sampling_ratio,
|
458 |
-
bool aligned) {
|
459 |
-
AT_ASSERTM(grad.device().is_cpu(), "grad must be a CPU tensor");
|
460 |
-
AT_ASSERTM(rois.device().is_cpu(), "rois must be a CPU tensor");
|
461 |
-
|
462 |
-
at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2};
|
463 |
-
|
464 |
-
at::CheckedFrom c = "ROIAlign_backward_cpu";
|
465 |
-
at::checkAllSameType(c, {grad_t, rois_t});
|
466 |
-
|
467 |
-
at::Tensor grad_input =
|
468 |
-
at::zeros({batch_size, channels, height, width}, grad.options());
|
469 |
-
|
470 |
-
// handle possibly empty gradients
|
471 |
-
if (grad.numel() == 0) {
|
472 |
-
return grad_input;
|
473 |
-
}
|
474 |
-
|
475 |
-
// get stride values to ensure indexing into gradients is correct.
|
476 |
-
int n_stride = grad.stride(0);
|
477 |
-
int c_stride = grad.stride(1);
|
478 |
-
int h_stride = grad.stride(2);
|
479 |
-
int w_stride = grad.stride(3);
|
480 |
-
|
481 |
-
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.type(), "ROIAlign_forward", [&] {
|
482 |
-
ROIAlignBackward<scalar_t>(
|
483 |
-
grad.numel(),
|
484 |
-
grad.contiguous().data_ptr<scalar_t>(),
|
485 |
-
spatial_scale,
|
486 |
-
channels,
|
487 |
-
height,
|
488 |
-
width,
|
489 |
-
pooled_height,
|
490 |
-
pooled_width,
|
491 |
-
sampling_ratio,
|
492 |
-
grad_input.data_ptr<scalar_t>(),
|
493 |
-
rois.contiguous().data_ptr<scalar_t>(),
|
494 |
-
n_stride,
|
495 |
-
c_stride,
|
496 |
-
h_stride,
|
497 |
-
w_stride,
|
498 |
-
aligned);
|
499 |
-
});
|
500 |
-
return grad_input;
|
501 |
-
}
|
502 |
-
|
503 |
-
} // namespace detectron2
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/for_each.h
DELETED
@@ -1,109 +0,0 @@
|
|
1 |
-
/******************************************************************************
|
2 |
-
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
|
3 |
-
*
|
4 |
-
* Redistribution and use in source and binary forms, with or without
|
5 |
-
* modification, are permitted provided that the following conditions are met:
|
6 |
-
* * Redistributions of source code must retain the above copyright
|
7 |
-
* notice, this list of conditions and the following disclaimer.
|
8 |
-
* * Redistributions in binary form must reproduce the above copyright
|
9 |
-
* notice, this list of conditions and the following disclaimer in the
|
10 |
-
* documentation and/or other materials provided with the distribution.
|
11 |
-
* * Neither the name of the NVIDIA CORPORATION nor the
|
12 |
-
* names of its contributors may be used to endorse or promote products
|
13 |
-
* derived from this software without specific prior written permission.
|
14 |
-
*
|
15 |
-
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
16 |
-
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
17 |
-
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
18 |
-
* ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
|
19 |
-
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
20 |
-
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
21 |
-
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
22 |
-
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
23 |
-
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
24 |
-
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
25 |
-
*
|
26 |
-
******************************************************************************/
|
27 |
-
#pragma once
|
28 |
-
|
29 |
-
|
30 |
-
#if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
|
31 |
-
#include <iterator>
|
32 |
-
#include <thrust/system/cuda/config.h>
|
33 |
-
|
34 |
-
#include <thrust/system/cuda/detail/util.h>
|
35 |
-
#include <thrust/system/cuda/detail/parallel_for.h>
|
36 |
-
#include <thrust/detail/function.h>
|
37 |
-
#include <thrust/distance.h>
|
38 |
-
|
39 |
-
namespace thrust
|
40 |
-
{
|
41 |
-
|
42 |
-
namespace cuda_cub {
|
43 |
-
|
44 |
-
// for_each functor
|
45 |
-
template <class Input, class UnaryOp>
|
46 |
-
struct for_each_f
|
47 |
-
{
|
48 |
-
Input input;
|
49 |
-
UnaryOp op;
|
50 |
-
|
51 |
-
THRUST_FUNCTION
|
52 |
-
for_each_f(Input input, UnaryOp op)
|
53 |
-
: input(input), op(op) {}
|
54 |
-
|
55 |
-
template <class Size>
|
56 |
-
THRUST_DEVICE_FUNCTION void operator()(Size idx)
|
57 |
-
{
|
58 |
-
op(raw_reference_cast(input[idx]));
|
59 |
-
}
|
60 |
-
};
|
61 |
-
|
62 |
-
//-------------------------
|
63 |
-
// Thrust API entry points
|
64 |
-
//-------------------------
|
65 |
-
|
66 |
-
// for_each_n
|
67 |
-
template <class Derived,
|
68 |
-
class Input,
|
69 |
-
class Size,
|
70 |
-
class UnaryOp>
|
71 |
-
Input THRUST_FUNCTION
|
72 |
-
for_each_n(execution_policy<Derived> &policy,
|
73 |
-
Input first,
|
74 |
-
Size count,
|
75 |
-
UnaryOp op)
|
76 |
-
{
|
77 |
-
typedef thrust::detail::wrapped_function<UnaryOp, void> wrapped_t;
|
78 |
-
wrapped_t wrapped_op(op);
|
79 |
-
|
80 |
-
cuda_cub::parallel_for(policy,
|
81 |
-
for_each_f<Input, wrapped_t>(first, wrapped_op),
|
82 |
-
count);
|
83 |
-
|
84 |
-
cuda_cub::throw_on_error(
|
85 |
-
cuda_cub::synchronize(policy)
|
86 |
-
, "for_each: failed to synchronize"
|
87 |
-
);
|
88 |
-
|
89 |
-
return first + count;
|
90 |
-
}
|
91 |
-
|
92 |
-
// for_each
|
93 |
-
template <class Derived,
|
94 |
-
class Input,
|
95 |
-
class UnaryOp>
|
96 |
-
Input THRUST_FUNCTION
|
97 |
-
for_each(execution_policy<Derived> &policy,
|
98 |
-
Input first,
|
99 |
-
Input last,
|
100 |
-
UnaryOp op)
|
101 |
-
{
|
102 |
-
typedef typename iterator_traits<Input>::difference_type size_type;
|
103 |
-
size_type count = static_cast<size_type>(thrust::distance(first,last));
|
104 |
-
return cuda_cub::for_each_n(policy, first, count, op);
|
105 |
-
}
|
106 |
-
} // namespace cuda_cub
|
107 |
-
|
108 |
-
} // end namespace thrust
|
109 |
-
#endif
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/generate.h
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
|
21 |
-
// this system inherits generate
|
22 |
-
#include <thrust/system/cpp/detail/generate.h>
|
23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/regionclip-demo/datasets/prepare_cocofied_lvis.py
DELETED
@@ -1,176 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python3
|
2 |
-
# -*- coding: utf-8 -*-
|
3 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
4 |
-
|
5 |
-
import copy
|
6 |
-
import json
|
7 |
-
import os
|
8 |
-
from collections import defaultdict
|
9 |
-
|
10 |
-
# This mapping is extracted from the official LVIS mapping:
|
11 |
-
# https://github.com/lvis-dataset/lvis-api/blob/master/data/coco_to_synset.json
|
12 |
-
COCO_SYNSET_CATEGORIES = [
|
13 |
-
{"synset": "person.n.01", "coco_cat_id": 1},
|
14 |
-
{"synset": "bicycle.n.01", "coco_cat_id": 2},
|
15 |
-
{"synset": "car.n.01", "coco_cat_id": 3},
|
16 |
-
{"synset": "motorcycle.n.01", "coco_cat_id": 4},
|
17 |
-
{"synset": "airplane.n.01", "coco_cat_id": 5},
|
18 |
-
{"synset": "bus.n.01", "coco_cat_id": 6},
|
19 |
-
{"synset": "train.n.01", "coco_cat_id": 7},
|
20 |
-
{"synset": "truck.n.01", "coco_cat_id": 8},
|
21 |
-
{"synset": "boat.n.01", "coco_cat_id": 9},
|
22 |
-
{"synset": "traffic_light.n.01", "coco_cat_id": 10},
|
23 |
-
{"synset": "fireplug.n.01", "coco_cat_id": 11},
|
24 |
-
{"synset": "stop_sign.n.01", "coco_cat_id": 13},
|
25 |
-
{"synset": "parking_meter.n.01", "coco_cat_id": 14},
|
26 |
-
{"synset": "bench.n.01", "coco_cat_id": 15},
|
27 |
-
{"synset": "bird.n.01", "coco_cat_id": 16},
|
28 |
-
{"synset": "cat.n.01", "coco_cat_id": 17},
|
29 |
-
{"synset": "dog.n.01", "coco_cat_id": 18},
|
30 |
-
{"synset": "horse.n.01", "coco_cat_id": 19},
|
31 |
-
{"synset": "sheep.n.01", "coco_cat_id": 20},
|
32 |
-
{"synset": "beef.n.01", "coco_cat_id": 21},
|
33 |
-
{"synset": "elephant.n.01", "coco_cat_id": 22},
|
34 |
-
{"synset": "bear.n.01", "coco_cat_id": 23},
|
35 |
-
{"synset": "zebra.n.01", "coco_cat_id": 24},
|
36 |
-
{"synset": "giraffe.n.01", "coco_cat_id": 25},
|
37 |
-
{"synset": "backpack.n.01", "coco_cat_id": 27},
|
38 |
-
{"synset": "umbrella.n.01", "coco_cat_id": 28},
|
39 |
-
{"synset": "bag.n.04", "coco_cat_id": 31},
|
40 |
-
{"synset": "necktie.n.01", "coco_cat_id": 32},
|
41 |
-
{"synset": "bag.n.06", "coco_cat_id": 33},
|
42 |
-
{"synset": "frisbee.n.01", "coco_cat_id": 34},
|
43 |
-
{"synset": "ski.n.01", "coco_cat_id": 35},
|
44 |
-
{"synset": "snowboard.n.01", "coco_cat_id": 36},
|
45 |
-
{"synset": "ball.n.06", "coco_cat_id": 37},
|
46 |
-
{"synset": "kite.n.03", "coco_cat_id": 38},
|
47 |
-
{"synset": "baseball_bat.n.01", "coco_cat_id": 39},
|
48 |
-
{"synset": "baseball_glove.n.01", "coco_cat_id": 40},
|
49 |
-
{"synset": "skateboard.n.01", "coco_cat_id": 41},
|
50 |
-
{"synset": "surfboard.n.01", "coco_cat_id": 42},
|
51 |
-
{"synset": "tennis_racket.n.01", "coco_cat_id": 43},
|
52 |
-
{"synset": "bottle.n.01", "coco_cat_id": 44},
|
53 |
-
{"synset": "wineglass.n.01", "coco_cat_id": 46},
|
54 |
-
{"synset": "cup.n.01", "coco_cat_id": 47},
|
55 |
-
{"synset": "fork.n.01", "coco_cat_id": 48},
|
56 |
-
{"synset": "knife.n.01", "coco_cat_id": 49},
|
57 |
-
{"synset": "spoon.n.01", "coco_cat_id": 50},
|
58 |
-
{"synset": "bowl.n.03", "coco_cat_id": 51},
|
59 |
-
{"synset": "banana.n.02", "coco_cat_id": 52},
|
60 |
-
{"synset": "apple.n.01", "coco_cat_id": 53},
|
61 |
-
{"synset": "sandwich.n.01", "coco_cat_id": 54},
|
62 |
-
{"synset": "orange.n.01", "coco_cat_id": 55},
|
63 |
-
{"synset": "broccoli.n.01", "coco_cat_id": 56},
|
64 |
-
{"synset": "carrot.n.01", "coco_cat_id": 57},
|
65 |
-
{"synset": "frank.n.02", "coco_cat_id": 58},
|
66 |
-
{"synset": "pizza.n.01", "coco_cat_id": 59},
|
67 |
-
{"synset": "doughnut.n.02", "coco_cat_id": 60},
|
68 |
-
{"synset": "cake.n.03", "coco_cat_id": 61},
|
69 |
-
{"synset": "chair.n.01", "coco_cat_id": 62},
|
70 |
-
{"synset": "sofa.n.01", "coco_cat_id": 63},
|
71 |
-
{"synset": "pot.n.04", "coco_cat_id": 64},
|
72 |
-
{"synset": "bed.n.01", "coco_cat_id": 65},
|
73 |
-
{"synset": "dining_table.n.01", "coco_cat_id": 67},
|
74 |
-
{"synset": "toilet.n.02", "coco_cat_id": 70},
|
75 |
-
{"synset": "television_receiver.n.01", "coco_cat_id": 72},
|
76 |
-
{"synset": "laptop.n.01", "coco_cat_id": 73},
|
77 |
-
{"synset": "mouse.n.04", "coco_cat_id": 74},
|
78 |
-
{"synset": "remote_control.n.01", "coco_cat_id": 75},
|
79 |
-
{"synset": "computer_keyboard.n.01", "coco_cat_id": 76},
|
80 |
-
{"synset": "cellular_telephone.n.01", "coco_cat_id": 77},
|
81 |
-
{"synset": "microwave.n.02", "coco_cat_id": 78},
|
82 |
-
{"synset": "oven.n.01", "coco_cat_id": 79},
|
83 |
-
{"synset": "toaster.n.02", "coco_cat_id": 80},
|
84 |
-
{"synset": "sink.n.01", "coco_cat_id": 81},
|
85 |
-
{"synset": "electric_refrigerator.n.01", "coco_cat_id": 82},
|
86 |
-
{"synset": "book.n.01", "coco_cat_id": 84},
|
87 |
-
{"synset": "clock.n.01", "coco_cat_id": 85},
|
88 |
-
{"synset": "vase.n.01", "coco_cat_id": 86},
|
89 |
-
{"synset": "scissors.n.01", "coco_cat_id": 87},
|
90 |
-
{"synset": "teddy.n.01", "coco_cat_id": 88},
|
91 |
-
{"synset": "hand_blower.n.01", "coco_cat_id": 89},
|
92 |
-
{"synset": "toothbrush.n.01", "coco_cat_id": 90},
|
93 |
-
]
|
94 |
-
|
95 |
-
|
96 |
-
def cocofy_lvis(input_filename, output_filename):
|
97 |
-
"""
|
98 |
-
Filter LVIS instance segmentation annotations to remove all categories that are not included in
|
99 |
-
COCO. The new json files can be used to evaluate COCO AP using `lvis-api`. The category ids in
|
100 |
-
the output json are the incontiguous COCO dataset ids.
|
101 |
-
|
102 |
-
Args:
|
103 |
-
input_filename (str): path to the LVIS json file.
|
104 |
-
output_filename (str): path to the COCOfied json file.
|
105 |
-
"""
|
106 |
-
|
107 |
-
with open(input_filename, "r") as f:
|
108 |
-
lvis_json = json.load(f)
|
109 |
-
|
110 |
-
lvis_annos = lvis_json.pop("annotations")
|
111 |
-
cocofied_lvis = copy.deepcopy(lvis_json)
|
112 |
-
lvis_json["annotations"] = lvis_annos
|
113 |
-
|
114 |
-
# Mapping from lvis cat id to coco cat id via synset
|
115 |
-
lvis_cat_id_to_synset = {cat["id"]: cat["synset"] for cat in lvis_json["categories"]}
|
116 |
-
synset_to_coco_cat_id = {x["synset"]: x["coco_cat_id"] for x in COCO_SYNSET_CATEGORIES}
|
117 |
-
# Synsets that we will keep in the dataset
|
118 |
-
synsets_to_keep = set(synset_to_coco_cat_id.keys())
|
119 |
-
coco_cat_id_with_instances = defaultdict(int)
|
120 |
-
|
121 |
-
new_annos = []
|
122 |
-
ann_id = 1
|
123 |
-
for ann in lvis_annos:
|
124 |
-
lvis_cat_id = ann["category_id"]
|
125 |
-
synset = lvis_cat_id_to_synset[lvis_cat_id]
|
126 |
-
if synset not in synsets_to_keep:
|
127 |
-
continue
|
128 |
-
coco_cat_id = synset_to_coco_cat_id[synset]
|
129 |
-
new_ann = copy.deepcopy(ann)
|
130 |
-
new_ann["category_id"] = coco_cat_id
|
131 |
-
new_ann["id"] = ann_id
|
132 |
-
ann_id += 1
|
133 |
-
new_annos.append(new_ann)
|
134 |
-
coco_cat_id_with_instances[coco_cat_id] += 1
|
135 |
-
cocofied_lvis["annotations"] = new_annos
|
136 |
-
|
137 |
-
for image in cocofied_lvis["images"]:
|
138 |
-
for key in ["not_exhaustive_category_ids", "neg_category_ids"]:
|
139 |
-
new_category_list = []
|
140 |
-
for lvis_cat_id in image[key]:
|
141 |
-
synset = lvis_cat_id_to_synset[lvis_cat_id]
|
142 |
-
if synset not in synsets_to_keep:
|
143 |
-
continue
|
144 |
-
coco_cat_id = synset_to_coco_cat_id[synset]
|
145 |
-
new_category_list.append(coco_cat_id)
|
146 |
-
coco_cat_id_with_instances[coco_cat_id] += 1
|
147 |
-
image[key] = new_category_list
|
148 |
-
|
149 |
-
coco_cat_id_with_instances = set(coco_cat_id_with_instances.keys())
|
150 |
-
|
151 |
-
new_categories = []
|
152 |
-
for cat in lvis_json["categories"]:
|
153 |
-
synset = cat["synset"]
|
154 |
-
if synset not in synsets_to_keep:
|
155 |
-
continue
|
156 |
-
coco_cat_id = synset_to_coco_cat_id[synset]
|
157 |
-
if coco_cat_id not in coco_cat_id_with_instances:
|
158 |
-
continue
|
159 |
-
new_cat = copy.deepcopy(cat)
|
160 |
-
new_cat["id"] = coco_cat_id
|
161 |
-
new_categories.append(new_cat)
|
162 |
-
cocofied_lvis["categories"] = new_categories
|
163 |
-
|
164 |
-
with open(output_filename, "w") as f:
|
165 |
-
json.dump(cocofied_lvis, f)
|
166 |
-
print("{} is COCOfied and stored in {}.".format(input_filename, output_filename))
|
167 |
-
|
168 |
-
|
169 |
-
if __name__ == "__main__":
|
170 |
-
dataset_dir = os.path.join(os.getenv("DETECTRON2_DATASETS", "datasets"), "lvis")
|
171 |
-
for s in ["lvis_v0.5_train", "lvis_v0.5_val"]:
|
172 |
-
print("Start COCOfing {}.".format(s))
|
173 |
-
cocofy_lvis(
|
174 |
-
os.path.join(dataset_dir, "{}.json".format(s)),
|
175 |
-
os.path.join(dataset_dir, "{}_cocofied.json".format(s)),
|
176 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Chirag4579/prakalpa-image-comparator/app.py
DELETED
@@ -1,87 +0,0 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
from streamlit_image_comparison import image_comparison
|
3 |
-
|
4 |
-
st.set_page_config(page_title="Prakalpa", page_icon="🔭", layout="centered")
|
5 |
-
# title_container = st.container()
|
6 |
-
# col1, col2 = st.columns([1, 20])
|
7 |
-
# image = Image.open('C:/Users/Chirag Chauhan/Desktop/100409820-removebg-preview.png')
|
8 |
-
# with title_container:
|
9 |
-
# with col1:
|
10 |
-
# st.sidebar.image(image, width=300)
|
11 |
-
# with col2:
|
12 |
-
# st.sidebar.markdown('<h1 style="color: blue;">Prakalpa</h1>',
|
13 |
-
# unsafe_allow_html=True)
|
14 |
-
st.sidebar.markdown("# *Hubble vs James Webb*")
|
15 |
-
|
16 |
-
|
17 |
-
def main():
|
18 |
-
html_temp = """
|
19 |
-
<style>
|
20 |
-
#MainMenu {visibility:hidden;}
|
21 |
-
tbody th {display:none}
|
22 |
-
.blank {display:none}
|
23 |
-
h1 {
|
24 |
-
text-align: center;
|
25 |
-
}
|
26 |
-
[data-testid="stSidebar"][aria-expanded="true"] > div:first-child {
|
27 |
-
width: 450px;
|
28 |
-
}
|
29 |
-
[data-testid="stSidebar"][aria-expanded="false"] > div:first-child {
|
30 |
-
width: 450px;
|
31 |
-
}
|
32 |
-
div.block-container{top:-20px;}
|
33 |
-
</style>
|
34 |
-
"""
|
35 |
-
|
36 |
-
st.markdown(html_temp, unsafe_allow_html=True)
|
37 |
-
|
38 |
-
selection = st.sidebar.multiselect('Select comparison image:', ['Southern Nebula', "Stephan's Quintet",
|
39 |
-
'Galaxy Cluster SMACS 0723'])
|
40 |
-
|
41 |
-
if 'Southern Nebula' in selection:
|
42 |
-
with st.expander('Southern Nebula'):
|
43 |
-
image_comparison(
|
44 |
-
img1="https://www.webbcompare.com/img/hubble/southern_nebula_700.jpg",
|
45 |
-
img2="https://www.webbcompare.com/img/webb/southern_nebula_700.jpg",
|
46 |
-
label1="Hubble",
|
47 |
-
label2="Webb",
|
48 |
-
width=660,
|
49 |
-
make_responsive=True
|
50 |
-
)
|
51 |
-
|
52 |
-
if "Stephan's Quintet" in selection:
|
53 |
-
with st.expander("Stephan's Quintet"):
|
54 |
-
image_comparison(
|
55 |
-
img1="https://www.webbcompare.com/img/hubble/stephans_quintet_1400.jpg",
|
56 |
-
img2="https://www.webbcompare.com/img/webb/stephans_quintet_1400.jpg",
|
57 |
-
label1="Hubble",
|
58 |
-
label2="Webb",
|
59 |
-
width=660,
|
60 |
-
make_responsive=True
|
61 |
-
)
|
62 |
-
|
63 |
-
if "Galaxy Cluster SMACS 0723" in selection:
|
64 |
-
with st.expander('Galaxy Cluster SMACS 0723'):
|
65 |
-
image_comparison(
|
66 |
-
img1="https://www.webbcompare.com/img/hubble/deep_field_700.jpg",
|
67 |
-
img2="https://www.webbcompare.com/img/webb/deep_field_700.jpg",
|
68 |
-
label1="Hubble",
|
69 |
-
label2="Webb",
|
70 |
-
width=660,
|
71 |
-
make_responsive=True
|
72 |
-
)
|
73 |
-
|
74 |
-
# if "Carina Nebula" in selection:
|
75 |
-
# with st.expander('Carina Nebula'):
|
76 |
-
# image_comparison(
|
77 |
-
# img1="https://www.webbcompare.com/img/hubble/carina_1400.png",
|
78 |
-
# img2="https://www.webbcompare.com/img/webb/carina_1400.jpg",
|
79 |
-
# label1="Hubble",
|
80 |
-
# label2="Webb",
|
81 |
-
# width=660,
|
82 |
-
# make_responsive=True,
|
83 |
-
# starting_position=50
|
84 |
-
# )
|
85 |
-
|
86 |
-
|
87 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CognitiveAIForHealth/README/README.md
DELETED
@@ -1,143 +0,0 @@
|
|
1 |
-
|
2 |
-
# Classroom Examples for Today: 🚀[Examples](https://huggingface.co/spaces/awacke1/AIZTH-03-09-2023)
|
3 |
-
|
4 |
-
# 👋 Two easy ways to turbo boost your AI learning journey! 💻
|
5 |
-
# 🌐 AI Pair Programming
|
6 |
-
## Open 2 Browsers to:
|
7 |
-
1. __🌐 ChatGPT__ [URL](https://chat.openai.com/chat) or [URL2](https://platform.openai.com/playground) and
|
8 |
-
2. __🌐 Huggingface__ [URL](https://huggingface.co/awacke1) in separate browser windows.
|
9 |
-
1. 🤖 Use prompts to generate a streamlit program on Huggingface or locally to test it.
|
10 |
-
2. 🔧 For advanced work, add Python 3.10 and VSCode locally, and debug as gradio or streamlit apps.
|
11 |
-
3. 🚀 Use these two superpower processes to reduce the time it takes you to make a new AI program! ⏱️
|
12 |
-
|
13 |
-
Example Starter Prompt:
|
14 |
-
|
15 |
-
Write a streamlit program that demonstrates Data synthesis.
|
16 |
-
Synthesize data from multiple sources to create new datasets.
|
17 |
-
Use two datasets and demonstrate pandas dataframe query merge and join
|
18 |
-
with two datasets in python list dictionaries:
|
19 |
-
List of Hospitals that are over 1000 bed count by city and state, and
|
20 |
-
State population size and square miles.
|
21 |
-
Perform a calculated function on the merged dataset.
|
22 |
-
|
23 |
-
|
24 |
-
# 🎥 YouTube University Method:
|
25 |
-
1. 🏋️♀️ Plan two hours each weekday to exercise your body and brain.
|
26 |
-
2. 🎬 Make a playlist of videos you want to learn from on YouTube. Save the links to edit later.
|
27 |
-
3. 🚀 Try watching the videos at a faster speed while exercising, and sample the first five minutes of each video.
|
28 |
-
4. 📜 Reorder the playlist so the most useful videos are at the front, and take breaks to exercise.
|
29 |
-
5. 📝 Practice note-taking in markdown to instantly save what you want to remember. Share your notes with others!
|
30 |
-
6. 👥 AI Pair Programming Using Long Answer Language Models with Human Feedback:
|
31 |
-
## 🎥 2023 AI/ML Advanced Learning Playlists:
|
32 |
-
1. [2023 QA Models and Long Form Question Answering NLP](https://www.youtube.com/playlist?list=PLHgX2IExbFovrkkx8HMTLNgYdjCMNYmX_)
|
33 |
-
2. [FHIR Bioinformatics Development Using AI/ML and Python, Streamlit, and Gradio - 2022](https://www.youtube.com/playlist?list=PLHgX2IExbFovoMUC3hYXeFegpk_Y0Lz0Q)
|
34 |
-
3. [2023 ChatGPT for Coding Assistant Streamlit, Gradio and Python Apps](https://www.youtube.com/playlist?list=PLHgX2IExbFouOEnppexiKZVdz_k5b0pvI)
|
35 |
-
4. [2023 BigScience Bloom - Large Language Model for AI Systems and NLP](https://www.youtube.com/playlist?list=PLHgX2IExbFouqnsIqziThlPCX_miiDq14)
|
36 |
-
5. [2023 Streamlit Pro Tips for AI UI UX for Data Science, Engineering, and Mathematics](https://www.youtube.com/playlist?list=PLHgX2IExbFou3cP19hHO9Xb-cN8uwr5RM)
|
37 |
-
6. [2023 Fun, New and Interesting AI, Videos, and AI/ML Techniques](https://www.youtube.com/playlist?list=PLHgX2IExbFotoMt32SrT3Xynt5BXTGnEP)
|
38 |
-
7. [2023 Best Minds in AGI AI Gamification and Large Language Models](https://www.youtube.com/playlist?list=PLHgX2IExbFotmFeBTpyje1uI22n0GAkXT)
|
39 |
-
8. [2023 State of the Art for Vision Image Classification, Text Classification and Regression, Extractive Question Answering and Tabular Classification](https://www.youtube.com/playlist?list=PLHgX2IExbFotPcPu6pauNHOoZTTbnAQ2F)
|
40 |
-
9. [2023 AutoML DataRobot and AI Platforms for Building Models, Features, Test, and Transparency](https://www.youtube.com/playlist?list=PLHgX2IExbFovsY2oGbDwdEhPrakkC8i3g)
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
## Language Models 🗣️
|
46 |
-
🏆 Bloom sets new record for most performant and efficient AI model in science! 🌸
|
47 |
-
|
48 |
-
### Comparison of Large Language Models
|
49 |
-
| Model Name | Model Size (in Parameters) |
|
50 |
-
| ----------------- | -------------------------- |
|
51 |
-
| BigScience-tr11-176B | 176 billion |
|
52 |
-
| GPT-3 | 175 billion |
|
53 |
-
| OpenAI's DALL-E 2.0 | 500 million |
|
54 |
-
| NVIDIA's Megatron | 8.3 billion |
|
55 |
-
| Transformer-XL | 250 million |
|
56 |
-
| XLNet | 210 million |
|
57 |
-
|
58 |
-
## ChatGPT Datasets 📚
|
59 |
-
- WebText
|
60 |
-
- Common Crawl
|
61 |
-
- BooksCorpus
|
62 |
-
- English Wikipedia
|
63 |
-
- Toronto Books Corpus
|
64 |
-
- OpenWebText
|
65 |
-
-
|
66 |
-
## ChatGPT Datasets - Details 📚
|
67 |
-
- **WebText:** A dataset of web pages crawled from domains on the Alexa top 5,000 list. This dataset was used to pretrain GPT-2.
|
68 |
-
- [WebText: A Large-Scale Unsupervised Text Corpus by Radford et al.](https://paperswithcode.com/dataset/webtext)
|
69 |
-
- **Common Crawl:** A dataset of web pages from a variety of domains, which is updated regularly. This dataset was used to pretrain GPT-3.
|
70 |
-
- [Language Models are Few-Shot Learners](https://paperswithcode.com/dataset/common-crawl) by Brown et al.
|
71 |
-
- **BooksCorpus:** A dataset of over 11,000 books from a variety of genres.
|
72 |
-
- [Scalable Methods for 8 Billion Token Language Modeling](https://paperswithcode.com/dataset/bookcorpus) by Zhu et al.
|
73 |
-
- **English Wikipedia:** A dump of the English-language Wikipedia as of 2018, with articles from 2001-2017.
|
74 |
-
- [Improving Language Understanding by Generative Pre-Training](https://huggingface.co/spaces/awacke1/WikipediaUltimateAISearch?logs=build) Space for Wikipedia Search
|
75 |
-
- **Toronto Books Corpus:** A dataset of over 7,000 books from a variety of genres, collected by the University of Toronto.
|
76 |
-
- [Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond](https://paperswithcode.com/dataset/bookcorpus) by Schwenk and Douze.
|
77 |
-
- **OpenWebText:** A dataset of web pages that were filtered to remove content that was likely to be low-quality or spammy. This dataset was used to pretrain GPT-3.
|
78 |
-
- [Language Models are Few-Shot Learners](https://paperswithcode.com/dataset/openwebtext) by Brown et al.
|
79 |
-
|
80 |
-
## Big Science Model 🚀
|
81 |
-
- 📜 Papers:
|
82 |
-
1. BLOOM: A 176B-Parameter Open-Access Multilingual Language Model [Paper](https://arxiv.org/abs/2211.05100)
|
83 |
-
2. Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism [Paper](https://arxiv.org/abs/1909.08053)
|
84 |
-
3. 8-bit Optimizers via Block-wise Quantization [Paper](https://arxiv.org/abs/2110.02861)
|
85 |
-
4. Train Short, Test Long: Attention with Linear Biases Enables Input Length Extrapolation [Paper](https://arxiv.org/abs/2108.12409)
|
86 |
-
5. [Other papers related to Big Science](https://huggingface.co/models?other=doi:10.57967/hf/0003)
|
87 |
-
6. [217 other models optimized for use with Bloom](https://huggingface.co/models?other=bloom)
|
88 |
-
|
89 |
-
- 📚 Datasets:
|
90 |
-
|
91 |
-
**Datasets:**
|
92 |
-
1. - **Universal Dependencies:** A collection of annotated corpora for natural language processing in a range of languages, with a focus on dependency parsing.
|
93 |
-
- [Universal Dependencies official website.](https://universaldependencies.org/)
|
94 |
-
2. - **WMT 2014:** The fourth edition of the Workshop on Statistical Machine Translation, featuring shared tasks on translating between English and various other languages.
|
95 |
-
- [WMT14 website.](http://www.statmt.org/wmt14/)
|
96 |
-
3. - **The Pile:** An English language corpus of diverse text, sourced from various places on the internet.
|
97 |
-
- [The Pile official website.](https://pile.eleuther.ai/)
|
98 |
-
4. - **HumanEval:** A dataset of English sentences, annotated with human judgments on a range of linguistic qualities.
|
99 |
-
- [HumanEval: An Evaluation Benchmark for Language Understanding](https://github.com/google-research-datasets/humaneval) by Gabriel Ilharco, Daniel Loureiro, Pedro Rodriguez, and Afonso Mendes.
|
100 |
-
5. - **FLORES-101:** A dataset of parallel sentences in 101 languages, designed for multilingual machine translation.
|
101 |
-
- [FLORES-101: A Massively Multilingual Parallel Corpus for Language Understanding](https://flores101.opennmt.net/) by Aman Madaan, Shruti Rijhwani, Raghav Gupta, and Mitesh M. Khapra.
|
102 |
-
6. - **CrowS-Pairs:** A dataset of sentence pairs, designed for evaluating the plausibility of generated text.
|
103 |
-
- [CrowS-Pairs: A Challenge Dataset for Plausible Plausibility Judgments](https://github.com/stanford-cogsci/crows-pairs) by Andrea Madotto, Zhaojiang Lin, Chien-Sheng Wu, Pascale Fung, and Caiming Xiong.
|
104 |
-
7. - **WikiLingua:** A dataset of parallel sentences in 75 languages, sourced from Wikipedia.
|
105 |
-
- [WikiLingua: A New Benchmark Dataset for Cross-Lingual Wikification](https://arxiv.org/abs/2105.08031) by Jiarui Yao, Yanqiao Zhu, Ruihan Bao, Guosheng Lin, Lidong Bing, and Bei Shi.
|
106 |
-
8. - **MTEB:** A dataset of English sentences, annotated with their entailment relationships with respect to other sentences.
|
107 |
-
- [Multi-Task Evaluation Benchmark for Natural Language Inference](https://github.com/google-research-datasets/mteb) by Michał Lukasik, Marcin Junczys-Dowmunt, and Houda Bouamor.
|
108 |
-
9. - **xP3:** A dataset of English sentences, annotated with their paraphrase relationships with respect to other sentences.
|
109 |
-
- [xP3: A Large-Scale Evaluation Benchmark for Paraphrase Identification in Context](https://github.com/nyu-dl/xp3) by Aniket Didolkar, James Mayfield, Markus Saers, and Jason Baldridge.
|
110 |
-
10. - **DiaBLa:** A dataset of English dialogue, annotated with dialogue acts.
|
111 |
-
- [A Large-Scale Corpus for Conversation Disentanglement](https://github.com/HLTCHKUST/DiaBLA) by Samuel Broscheit, António Branco, and André F. T. Martins.
|
112 |
-
|
113 |
-
- 📚 Dataset Papers with Code
|
114 |
-
1. [Universal Dependencies](https://paperswithcode.com/dataset/universal-dependencies)
|
115 |
-
2. [WMT 2014](https://paperswithcode.com/dataset/wmt-2014)
|
116 |
-
3. [The Pile](https://paperswithcode.com/dataset/the-pile)
|
117 |
-
4. [HumanEval](https://paperswithcode.com/dataset/humaneval)
|
118 |
-
5. [FLORES-101](https://paperswithcode.com/dataset/flores-101)
|
119 |
-
6. [CrowS-Pairs](https://paperswithcode.com/dataset/crows-pairs)
|
120 |
-
7. [WikiLingua](https://paperswithcode.com/dataset/wikilingua)
|
121 |
-
8. [MTEB](https://paperswithcode.com/dataset/mteb)
|
122 |
-
9. [xP3](https://paperswithcode.com/dataset/xp3)
|
123 |
-
10. [DiaBLa](https://paperswithcode.com/dataset/diabla)
|
124 |
-
|
125 |
-
# Deep RL ML Strategy 🧠
|
126 |
-
The AI strategies are:
|
127 |
-
- Language Model Preparation using Human Augmented with Supervised Fine Tuning 🤖
|
128 |
-
- Reward Model Training with Prompts Dataset Multi-Model Generate Data to Rank 🎁
|
129 |
-
- Fine Tuning with Reinforcement Reward and Distance Distribution Regret Score 🎯
|
130 |
-
- Proximal Policy Optimization Fine Tuning 🤝
|
131 |
-
- Variations - Preference Model Pretraining 🤔
|
132 |
-
- Use Ranking Datasets Sentiment - Thumbs Up/Down, Distribution 📊
|
133 |
-
- Online Version Getting Feedback 💬
|
134 |
-
- OpenAI - InstructGPT - Humans generate LM Training Text 🔍
|
135 |
-
- DeepMind - Advantage Actor Critic Sparrow, GopherCite 🦜
|
136 |
-
- Reward Model Human Prefence Feedback 🏆
|
137 |
-
|
138 |
-
|
139 |
-
For more information on specific techniques and implementations, check out the following resources:
|
140 |
-
- OpenAI's paper on [GPT-3](https://arxiv.org/abs/2005.14165) which details their Language Model Preparation approach
|
141 |
-
- DeepMind's paper on [SAC](https://arxiv.org/abs/1801.01290) which describes the Advantage Actor Critic algorithm
|
142 |
-
- OpenAI's paper on [Reward Learning](https://arxiv.org/abs/1810.06580) which explains their approach to training Reward Models
|
143 |
-
- OpenAI's blog post on [GPT-3's fine-tuning process](https://openai.com/blog/fine-tuning-gpt-3/)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Datasculptor/MusicGen/app.py
DELETED
@@ -1,407 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
# Updated to account for UI changes from https://github.com/rkfg/audiocraft/blob/long/app.py
|
8 |
-
# also released under the MIT license.
|
9 |
-
|
10 |
-
import argparse
|
11 |
-
from concurrent.futures import ProcessPoolExecutor
|
12 |
-
import os
|
13 |
-
from pathlib import Path
|
14 |
-
import subprocess as sp
|
15 |
-
from tempfile import NamedTemporaryFile
|
16 |
-
import time
|
17 |
-
import typing as tp
|
18 |
-
import warnings
|
19 |
-
|
20 |
-
import torch
|
21 |
-
import gradio as gr
|
22 |
-
|
23 |
-
from audiocraft.data.audio_utils import convert_audio
|
24 |
-
from audiocraft.data.audio import audio_write
|
25 |
-
from audiocraft.models import MusicGen
|
26 |
-
|
27 |
-
|
28 |
-
MODEL = None # Last used model
|
29 |
-
IS_BATCHED = "facebook/MusicGen" in os.environ.get('SPACE_ID', '')
|
30 |
-
MAX_BATCH_SIZE = 6
|
31 |
-
BATCHED_DURATION = 15
|
32 |
-
INTERRUPTING = False
|
33 |
-
# We have to wrap subprocess call to clean a bit the log when using gr.make_waveform
|
34 |
-
_old_call = sp.call
|
35 |
-
|
36 |
-
|
37 |
-
def _call_nostderr(*args, **kwargs):
|
38 |
-
# Avoid ffmpeg vomitting on the logs.
|
39 |
-
kwargs['stderr'] = sp.DEVNULL
|
40 |
-
kwargs['stdout'] = sp.DEVNULL
|
41 |
-
_old_call(*args, **kwargs)
|
42 |
-
|
43 |
-
|
44 |
-
sp.call = _call_nostderr
|
45 |
-
# Preallocating the pool of processes.
|
46 |
-
pool = ProcessPoolExecutor(3)
|
47 |
-
pool.__enter__()
|
48 |
-
|
49 |
-
|
50 |
-
def interrupt():
|
51 |
-
global INTERRUPTING
|
52 |
-
INTERRUPTING = True
|
53 |
-
|
54 |
-
|
55 |
-
class FileCleaner:
|
56 |
-
def __init__(self, file_lifetime: float = 3600):
|
57 |
-
self.file_lifetime = file_lifetime
|
58 |
-
self.files = []
|
59 |
-
|
60 |
-
def add(self, path: tp.Union[str, Path]):
|
61 |
-
self._cleanup()
|
62 |
-
self.files.append((time.time(), Path(path)))
|
63 |
-
|
64 |
-
def _cleanup(self):
|
65 |
-
now = time.time()
|
66 |
-
for time_added, path in list(self.files):
|
67 |
-
if now - time_added > self.file_lifetime:
|
68 |
-
if path.exists():
|
69 |
-
path.unlink()
|
70 |
-
self.files.pop(0)
|
71 |
-
else:
|
72 |
-
break
|
73 |
-
|
74 |
-
|
75 |
-
file_cleaner = FileCleaner()
|
76 |
-
|
77 |
-
|
78 |
-
def make_waveform(*args, **kwargs):
|
79 |
-
# Further remove some warnings.
|
80 |
-
be = time.time()
|
81 |
-
with warnings.catch_warnings():
|
82 |
-
warnings.simplefilter('ignore')
|
83 |
-
out = gr.make_waveform(*args, **kwargs)
|
84 |
-
print("Make a video took", time.time() - be)
|
85 |
-
return out
|
86 |
-
|
87 |
-
|
88 |
-
def load_model(version='melody'):
|
89 |
-
global MODEL
|
90 |
-
print("Loading model", version)
|
91 |
-
if MODEL is None or MODEL.name != version:
|
92 |
-
MODEL = MusicGen.get_pretrained(version)
|
93 |
-
|
94 |
-
|
95 |
-
def _do_predictions(texts, melodies, duration, progress=False, **gen_kwargs):
|
96 |
-
MODEL.set_generation_params(duration=duration, **gen_kwargs)
|
97 |
-
print("new batch", len(texts), texts, [None if m is None else (m[0], m[1].shape) for m in melodies])
|
98 |
-
be = time.time()
|
99 |
-
processed_melodies = []
|
100 |
-
target_sr = 32000
|
101 |
-
target_ac = 1
|
102 |
-
for melody in melodies:
|
103 |
-
if melody is None:
|
104 |
-
processed_melodies.append(None)
|
105 |
-
else:
|
106 |
-
sr, melody = melody[0], torch.from_numpy(melody[1]).to(MODEL.device).float().t()
|
107 |
-
if melody.dim() == 1:
|
108 |
-
melody = melody[None]
|
109 |
-
melody = melody[..., :int(sr * duration)]
|
110 |
-
melody = convert_audio(melody, sr, target_sr, target_ac)
|
111 |
-
processed_melodies.append(melody)
|
112 |
-
|
113 |
-
if any(m is not None for m in processed_melodies):
|
114 |
-
outputs = MODEL.generate_with_chroma(
|
115 |
-
descriptions=texts,
|
116 |
-
melody_wavs=processed_melodies,
|
117 |
-
melody_sample_rate=target_sr,
|
118 |
-
progress=progress,
|
119 |
-
)
|
120 |
-
else:
|
121 |
-
outputs = MODEL.generate(texts, progress=progress)
|
122 |
-
|
123 |
-
outputs = outputs.detach().cpu().float()
|
124 |
-
out_files = []
|
125 |
-
for output in outputs:
|
126 |
-
with NamedTemporaryFile("wb", suffix=".wav", delete=False) as file:
|
127 |
-
audio_write(
|
128 |
-
file.name, output, MODEL.sample_rate, strategy="loudness",
|
129 |
-
loudness_headroom_db=16, loudness_compressor=True, add_suffix=False)
|
130 |
-
out_files.append(pool.submit(make_waveform, file.name))
|
131 |
-
file_cleaner.add(file.name)
|
132 |
-
res = [out_file.result() for out_file in out_files]
|
133 |
-
for file in res:
|
134 |
-
file_cleaner.add(file)
|
135 |
-
print("batch finished", len(texts), time.time() - be)
|
136 |
-
print("Tempfiles currently stored: ", len(file_cleaner.files))
|
137 |
-
return res
|
138 |
-
|
139 |
-
|
140 |
-
def predict_batched(texts, melodies):
|
141 |
-
max_text_length = 512
|
142 |
-
texts = [text[:max_text_length] for text in texts]
|
143 |
-
load_model('melody')
|
144 |
-
res = _do_predictions(texts, melodies, BATCHED_DURATION)
|
145 |
-
return [res]
|
146 |
-
|
147 |
-
|
148 |
-
def predict_full(model, text, melody, duration, topk, topp, temperature, cfg_coef, progress=gr.Progress()):
|
149 |
-
global INTERRUPTING
|
150 |
-
INTERRUPTING = False
|
151 |
-
if temperature < 0:
|
152 |
-
raise gr.Error("Temperature must be >= 0.")
|
153 |
-
if topk < 0:
|
154 |
-
raise gr.Error("Topk must be non-negative.")
|
155 |
-
if topp < 0:
|
156 |
-
raise gr.Error("Topp must be non-negative.")
|
157 |
-
|
158 |
-
topk = int(topk)
|
159 |
-
load_model(model)
|
160 |
-
|
161 |
-
def _progress(generated, to_generate):
|
162 |
-
progress((generated, to_generate))
|
163 |
-
if INTERRUPTING:
|
164 |
-
raise gr.Error("Interrupted.")
|
165 |
-
MODEL.set_custom_progress_callback(_progress)
|
166 |
-
|
167 |
-
outs = _do_predictions(
|
168 |
-
[text], [melody], duration, progress=True,
|
169 |
-
top_k=topk, top_p=topp, temperature=temperature, cfg_coef=cfg_coef)
|
170 |
-
return outs[0]
|
171 |
-
|
172 |
-
|
173 |
-
def toggle_audio_src(choice):
|
174 |
-
if choice == "mic":
|
175 |
-
return gr.update(source="microphone", value=None, label="Microphone")
|
176 |
-
else:
|
177 |
-
return gr.update(source="upload", value=None, label="File")
|
178 |
-
|
179 |
-
|
180 |
-
def ui_full(launch_kwargs):
|
181 |
-
with gr.Blocks() as interface:
|
182 |
-
gr.Markdown(
|
183 |
-
"""
|
184 |
-
# MusicGen
|
185 |
-
This is your private demo for [MusicGen](https://github.com/facebookresearch/audiocraft),
|
186 |
-
a simple and controllable model for music generation
|
187 |
-
presented at: ["Simple and Controllable Music Generation"](https://huggingface.co/papers/2306.05284)
|
188 |
-
"""
|
189 |
-
)
|
190 |
-
with gr.Row():
|
191 |
-
with gr.Column():
|
192 |
-
with gr.Row():
|
193 |
-
text = gr.Text(label="Input Text", interactive=True)
|
194 |
-
with gr.Column():
|
195 |
-
radio = gr.Radio(["file", "mic"], value="file",
|
196 |
-
label="Condition on a melody (optional) File or Mic")
|
197 |
-
melody = gr.Audio(source="upload", type="numpy", label="File",
|
198 |
-
interactive=True, elem_id="melody-input")
|
199 |
-
with gr.Row():
|
200 |
-
submit = gr.Button("Submit")
|
201 |
-
# Adapted from https://github.com/rkfg/audiocraft/blob/long/app.py, MIT license.
|
202 |
-
_ = gr.Button("Interrupt").click(fn=interrupt, queue=False)
|
203 |
-
with gr.Row():
|
204 |
-
model = gr.Radio(["melody", "medium", "small", "large"],
|
205 |
-
label="Model", value="melody", interactive=True)
|
206 |
-
with gr.Row():
|
207 |
-
duration = gr.Slider(minimum=1, maximum=120, value=10, label="Duration", interactive=True)
|
208 |
-
with gr.Row():
|
209 |
-
topk = gr.Number(label="Top-k", value=250, interactive=True)
|
210 |
-
topp = gr.Number(label="Top-p", value=0, interactive=True)
|
211 |
-
temperature = gr.Number(label="Temperature", value=1.0, interactive=True)
|
212 |
-
cfg_coef = gr.Number(label="Classifier Free Guidance", value=3.0, interactive=True)
|
213 |
-
with gr.Column():
|
214 |
-
output = gr.Video(label="Generated Music")
|
215 |
-
submit.click(predict_full,
|
216 |
-
inputs=[model, text, melody, duration, topk, topp, temperature, cfg_coef],
|
217 |
-
outputs=[output])
|
218 |
-
radio.change(toggle_audio_src, radio, [melody], queue=False, show_progress=False)
|
219 |
-
gr.Examples(
|
220 |
-
fn=predict_full,
|
221 |
-
examples=[
|
222 |
-
[
|
223 |
-
"An 80s driving pop song with heavy drums and synth pads in the background",
|
224 |
-
"./assets/bach.mp3",
|
225 |
-
"melody"
|
226 |
-
],
|
227 |
-
[
|
228 |
-
"A cheerful country song with acoustic guitars",
|
229 |
-
"./assets/bolero_ravel.mp3",
|
230 |
-
"melody"
|
231 |
-
],
|
232 |
-
[
|
233 |
-
"90s rock song with electric guitar and heavy drums",
|
234 |
-
None,
|
235 |
-
"medium"
|
236 |
-
],
|
237 |
-
[
|
238 |
-
"a light and cheerly EDM track, with syncopated drums, aery pads, and strong emotions",
|
239 |
-
"./assets/bach.mp3",
|
240 |
-
"melody"
|
241 |
-
],
|
242 |
-
[
|
243 |
-
"lofi slow bpm electro chill with organic samples",
|
244 |
-
None,
|
245 |
-
"medium",
|
246 |
-
],
|
247 |
-
],
|
248 |
-
inputs=[text, melody, model],
|
249 |
-
outputs=[output]
|
250 |
-
)
|
251 |
-
gr.Markdown(
|
252 |
-
"""
|
253 |
-
### More details
|
254 |
-
|
255 |
-
The model will generate a short music extract based on the description you provided.
|
256 |
-
The model can generate up to 30 seconds of audio in one pass. It is now possible
|
257 |
-
to extend the generation by feeding back the end of the previous chunk of audio.
|
258 |
-
This can take a long time, and the model might lose consistency. The model might also
|
259 |
-
decide at arbitrary positions that the song ends.
|
260 |
-
|
261 |
-
**WARNING:** Choosing long durations will take a long time to generate (2min might take ~10min).
|
262 |
-
An overlap of 12 seconds is kept with the previously generated chunk, and 18 "new" seconds
|
263 |
-
are generated each time.
|
264 |
-
|
265 |
-
We present 4 model variations:
|
266 |
-
1. Melody -- a music generation model capable of generating music condition
|
267 |
-
on text and melody inputs. **Note**, you can also use text only.
|
268 |
-
2. Small -- a 300M transformer decoder conditioned on text only.
|
269 |
-
3. Medium -- a 1.5B transformer decoder conditioned on text only.
|
270 |
-
4. Large -- a 3.3B transformer decoder conditioned on text only (might OOM for the longest sequences.)
|
271 |
-
|
272 |
-
When using `melody`, ou can optionaly provide a reference audio from
|
273 |
-
which a broad melody will be extracted. The model will then try to follow both
|
274 |
-
the description and melody provided.
|
275 |
-
|
276 |
-
You can also use your own GPU or a Google Colab by following the instructions on our repo.
|
277 |
-
See [github.com/facebookresearch/audiocraft](https://github.com/facebookresearch/audiocraft)
|
278 |
-
for more details.
|
279 |
-
"""
|
280 |
-
)
|
281 |
-
|
282 |
-
interface.queue().launch(**launch_kwargs)
|
283 |
-
|
284 |
-
|
285 |
-
def ui_batched(launch_kwargs):
|
286 |
-
with gr.Blocks() as demo:
|
287 |
-
gr.Markdown(
|
288 |
-
"""
|
289 |
-
# MusicGen
|
290 |
-
|
291 |
-
This is the demo for [MusicGen](https://github.com/facebookresearch/audiocraft),
|
292 |
-
a simple and controllable model for music generation
|
293 |
-
presented at: ["Simple and Controllable Music Generation"](https://huggingface.co/papers/2306.05284).
|
294 |
-
<br/>
|
295 |
-
<a href="https://huggingface.co/spaces/facebook/MusicGen?duplicate=true"
|
296 |
-
style="display: inline-block;margin-top: .5em;margin-right: .25em;" target="_blank">
|
297 |
-
<img style="margin-bottom: 0em;display: inline;margin-top: -.25em;"
|
298 |
-
src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>
|
299 |
-
for longer sequences, more control and no queue.</p>
|
300 |
-
"""
|
301 |
-
)
|
302 |
-
with gr.Row():
|
303 |
-
with gr.Column():
|
304 |
-
with gr.Row():
|
305 |
-
text = gr.Text(label="Describe your music", lines=2, interactive=True)
|
306 |
-
with gr.Column():
|
307 |
-
radio = gr.Radio(["file", "mic"], value="file",
|
308 |
-
label="Condition on a melody (optional) File or Mic")
|
309 |
-
melody = gr.Audio(source="upload", type="numpy", label="File",
|
310 |
-
interactive=True, elem_id="melody-input")
|
311 |
-
with gr.Row():
|
312 |
-
submit = gr.Button("Generate")
|
313 |
-
with gr.Column():
|
314 |
-
output = gr.Video(label="Generated Music")
|
315 |
-
submit.click(predict_batched, inputs=[text, melody],
|
316 |
-
outputs=[output], batch=True, max_batch_size=MAX_BATCH_SIZE)
|
317 |
-
radio.change(toggle_audio_src, radio, [melody], queue=False, show_progress=False)
|
318 |
-
gr.Examples(
|
319 |
-
fn=predict_batched,
|
320 |
-
examples=[
|
321 |
-
[
|
322 |
-
"An 80s driving pop song with heavy drums and synth pads in the background",
|
323 |
-
"./assets/bach.mp3",
|
324 |
-
],
|
325 |
-
[
|
326 |
-
"A cheerful country song with acoustic guitars",
|
327 |
-
"./assets/bolero_ravel.mp3",
|
328 |
-
],
|
329 |
-
[
|
330 |
-
"90s rock song with electric guitar and heavy drums",
|
331 |
-
None,
|
332 |
-
],
|
333 |
-
[
|
334 |
-
"a light and cheerly EDM track, with syncopated drums, aery pads, and strong emotions bpm: 130",
|
335 |
-
"./assets/bach.mp3",
|
336 |
-
],
|
337 |
-
[
|
338 |
-
"lofi slow bpm electro chill with organic samples",
|
339 |
-
None,
|
340 |
-
],
|
341 |
-
],
|
342 |
-
inputs=[text, melody],
|
343 |
-
outputs=[output]
|
344 |
-
)
|
345 |
-
gr.Markdown("""
|
346 |
-
### More details
|
347 |
-
|
348 |
-
The model will generate 12 seconds of audio based on the description you provided.
|
349 |
-
You can optionaly provide a reference audio from which a broad melody will be extracted.
|
350 |
-
The model will then try to follow both the description and melody provided.
|
351 |
-
All samples are generated with the `melody` model.
|
352 |
-
|
353 |
-
You can also use your own GPU or a Google Colab by following the instructions on our repo.
|
354 |
-
|
355 |
-
See [github.com/facebookresearch/audiocraft](https://github.com/facebookresearch/audiocraft)
|
356 |
-
for more details.
|
357 |
-
""")
|
358 |
-
|
359 |
-
demo.queue(max_size=8 * 4).launch(**launch_kwargs)
|
360 |
-
|
361 |
-
|
362 |
-
if __name__ == "__main__":
|
363 |
-
parser = argparse.ArgumentParser()
|
364 |
-
parser.add_argument(
|
365 |
-
'--listen',
|
366 |
-
type=str,
|
367 |
-
default='0.0.0.0' if 'SPACE_ID' in os.environ else '127.0.0.1',
|
368 |
-
help='IP to listen on for connections to Gradio',
|
369 |
-
)
|
370 |
-
parser.add_argument(
|
371 |
-
'--username', type=str, default='', help='Username for authentication'
|
372 |
-
)
|
373 |
-
parser.add_argument(
|
374 |
-
'--password', type=str, default='', help='Password for authentication'
|
375 |
-
)
|
376 |
-
parser.add_argument(
|
377 |
-
'--server_port',
|
378 |
-
type=int,
|
379 |
-
default=0,
|
380 |
-
help='Port to run the server listener on',
|
381 |
-
)
|
382 |
-
parser.add_argument(
|
383 |
-
'--inbrowser', action='store_true', help='Open in browser'
|
384 |
-
)
|
385 |
-
parser.add_argument(
|
386 |
-
'--share', action='store_true', help='Share the gradio UI'
|
387 |
-
)
|
388 |
-
|
389 |
-
args = parser.parse_args()
|
390 |
-
|
391 |
-
launch_kwargs = {}
|
392 |
-
launch_kwargs['server_name'] = args.listen
|
393 |
-
|
394 |
-
if args.username and args.password:
|
395 |
-
launch_kwargs['auth'] = (args.username, args.password)
|
396 |
-
if args.server_port:
|
397 |
-
launch_kwargs['server_port'] = args.server_port
|
398 |
-
if args.inbrowser:
|
399 |
-
launch_kwargs['inbrowser'] = args.inbrowser
|
400 |
-
if args.share:
|
401 |
-
launch_kwargs['share'] = args.share
|
402 |
-
|
403 |
-
# Show the interface
|
404 |
-
if IS_BATCHED:
|
405 |
-
ui_batched(launch_kwargs)
|
406 |
-
else:
|
407 |
-
ui_full(launch_kwargs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DragGan/DragGan-Inversion/stylegan_human/pti/pti_models/e4e/stylegan2/op/fused_bias_act.cpp
DELETED
@@ -1,21 +0,0 @@
|
|
1 |
-
#include <torch/extension.h>
|
2 |
-
|
3 |
-
|
4 |
-
torch::Tensor fused_bias_act_op(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer,
|
5 |
-
int act, int grad, float alpha, float scale);
|
6 |
-
|
7 |
-
#define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor")
|
8 |
-
#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
|
9 |
-
#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
|
10 |
-
|
11 |
-
torch::Tensor fused_bias_act(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer,
|
12 |
-
int act, int grad, float alpha, float scale) {
|
13 |
-
CHECK_CUDA(input);
|
14 |
-
CHECK_CUDA(bias);
|
15 |
-
|
16 |
-
return fused_bias_act_op(input, bias, refer, act, grad, alpha, scale);
|
17 |
-
}
|
18 |
-
|
19 |
-
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
20 |
-
m.def("fused_bias_act", &fused_bias_act, "fused bias act (CUDA)");
|
21 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DragGan/DragGan-Inversion/stylegan_human/utils/__init__.py
DELETED
File without changes
|
spaces/Duskfallcrew/lambdalabs-sd-pokemon-diffusers/app.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
gr.Interface.load("models/lambdalabs/sd-pokemon-diffusers").launch()
|
|
|
|
|
|
|
|
spaces/ECCV2022/PSG/utils.py
DELETED
@@ -1,300 +0,0 @@
|
|
1 |
-
from typing import Tuple
|
2 |
-
import PIL
|
3 |
-
import mmcv
|
4 |
-
import numpy as np
|
5 |
-
from detectron2.utils.colormap import colormap
|
6 |
-
from detectron2.utils.visualizer import VisImage, Visualizer
|
7 |
-
from mmdet.datasets.coco_panoptic import INSTANCE_OFFSET
|
8 |
-
from PIL import Image
|
9 |
-
|
10 |
-
|
11 |
-
CLASSES = [
|
12 |
-
'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
|
13 |
-
'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
|
14 |
-
'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
|
15 |
-
'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
|
16 |
-
'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
|
17 |
-
'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
|
18 |
-
'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
|
19 |
-
'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
|
20 |
-
'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
|
21 |
-
'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
|
22 |
-
'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
|
23 |
-
'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
|
24 |
-
'hair drier', 'toothbrush', 'banner', 'blanket', 'bridge', 'cardboard',
|
25 |
-
'counter', 'curtain', 'door-stuff', 'floor-wood', 'flower', 'fruit',
|
26 |
-
'gravel', 'house', 'light', 'mirror-stuff', 'net', 'pillow', 'platform',
|
27 |
-
'playingfield', 'railroad', 'river', 'road', 'roof', 'sand', 'sea',
|
28 |
-
'shelf', 'snow', 'stairs', 'tent', 'towel', 'wall-brick', 'wall-stone',
|
29 |
-
'wall-tile', 'wall-wood', 'water-other', 'window-blind', 'window-other',
|
30 |
-
'tree-merged', 'fence-merged', 'ceiling-merged', 'sky-other-merged',
|
31 |
-
'cabinet-merged', 'table-merged', 'floor-other-merged', 'pavement-merged',
|
32 |
-
'mountain-merged', 'grass-merged', 'dirt-merged', 'paper-merged',
|
33 |
-
'food-other-merged', 'building-other-merged', 'rock-merged',
|
34 |
-
'wall-other-merged', 'rug-merged', 'background'
|
35 |
-
]
|
36 |
-
|
37 |
-
PREDICATES = [
|
38 |
-
'over',
|
39 |
-
'in front of',
|
40 |
-
'beside',
|
41 |
-
'on',
|
42 |
-
'in',
|
43 |
-
'attached to',
|
44 |
-
'hanging from',
|
45 |
-
'on back of',
|
46 |
-
'falling off',
|
47 |
-
'going down',
|
48 |
-
'painted on',
|
49 |
-
'walking on',
|
50 |
-
'running on',
|
51 |
-
'crossing',
|
52 |
-
'standing on',
|
53 |
-
'lying on',
|
54 |
-
'sitting on',
|
55 |
-
'flying over',
|
56 |
-
'jumping over',
|
57 |
-
'jumping from',
|
58 |
-
'wearing',
|
59 |
-
'holding',
|
60 |
-
'carrying',
|
61 |
-
'looking at',
|
62 |
-
'guiding',
|
63 |
-
'kissing',
|
64 |
-
'eating',
|
65 |
-
'drinking',
|
66 |
-
'feeding',
|
67 |
-
'biting',
|
68 |
-
'catching',
|
69 |
-
'picking',
|
70 |
-
'playing with',
|
71 |
-
'chasing',
|
72 |
-
'climbing',
|
73 |
-
'cleaning',
|
74 |
-
'playing',
|
75 |
-
'touching',
|
76 |
-
'pushing',
|
77 |
-
'pulling',
|
78 |
-
'opening',
|
79 |
-
'cooking',
|
80 |
-
'talking to',
|
81 |
-
'throwing',
|
82 |
-
'slicing',
|
83 |
-
'driving',
|
84 |
-
'riding',
|
85 |
-
'parked on',
|
86 |
-
'driving on',
|
87 |
-
'about to hit',
|
88 |
-
'kicking',
|
89 |
-
'swinging',
|
90 |
-
'entering',
|
91 |
-
'exiting',
|
92 |
-
'enclosing',
|
93 |
-
'leaning on',
|
94 |
-
]
|
95 |
-
|
96 |
-
|
97 |
-
def get_colormap(num_colors: int):
|
98 |
-
return (np.resize(colormap(), (num_colors, 3))).tolist()
|
99 |
-
|
100 |
-
|
101 |
-
def draw_text(
|
102 |
-
viz_img: VisImage = None,
|
103 |
-
text: str = None,
|
104 |
-
x: float = None,
|
105 |
-
y: float = None,
|
106 |
-
color: Tuple[float, float, float] = [0, 0, 0],
|
107 |
-
size: float = 10,
|
108 |
-
padding: float = 5,
|
109 |
-
box_color: str = 'black',
|
110 |
-
font: str = None,
|
111 |
-
) -> float:
|
112 |
-
text_obj = viz_img.ax.text(
|
113 |
-
x,
|
114 |
-
y,
|
115 |
-
text,
|
116 |
-
size=size,
|
117 |
-
# family="sans-serif",
|
118 |
-
bbox={
|
119 |
-
'facecolor': box_color,
|
120 |
-
'alpha': 0.8,
|
121 |
-
'pad': padding,
|
122 |
-
'edgecolor': 'none',
|
123 |
-
},
|
124 |
-
verticalalignment='top',
|
125 |
-
horizontalalignment='left',
|
126 |
-
color=color,
|
127 |
-
zorder=10,
|
128 |
-
rotation=0,
|
129 |
-
)
|
130 |
-
viz_img.get_image()
|
131 |
-
text_dims = text_obj.get_bbox_patch().get_extents()
|
132 |
-
|
133 |
-
return text_dims.width
|
134 |
-
|
135 |
-
|
136 |
-
def show_result(img,
|
137 |
-
result,
|
138 |
-
is_one_stage,
|
139 |
-
num_rel=20,
|
140 |
-
show=False,
|
141 |
-
out_dir=None,
|
142 |
-
out_file=None):
|
143 |
-
# Load image
|
144 |
-
img = mmcv.imread(img)
|
145 |
-
img = img.copy() # (H, W, 3)
|
146 |
-
img_h, img_w = img.shape[:-1]
|
147 |
-
|
148 |
-
# Decrease contrast
|
149 |
-
img = PIL.Image.fromarray(img)
|
150 |
-
converter = PIL.ImageEnhance.Color(img)
|
151 |
-
img = converter.enhance(0.01)
|
152 |
-
if out_file is not None:
|
153 |
-
mmcv.imwrite(np.asarray(img), 'bw'+out_file)
|
154 |
-
|
155 |
-
# Draw masks
|
156 |
-
pan_results = result.pan_results
|
157 |
-
|
158 |
-
ids = np.unique(pan_results)[::-1]
|
159 |
-
num_classes = 133
|
160 |
-
legal_indices = (ids != num_classes) # for VOID label
|
161 |
-
ids = ids[legal_indices]
|
162 |
-
|
163 |
-
# Get predicted labels
|
164 |
-
labels = np.array([id % INSTANCE_OFFSET for id in ids], dtype=np.int64)
|
165 |
-
labels = [CLASSES[l] for l in labels]
|
166 |
-
|
167 |
-
#For psgtr
|
168 |
-
rel_obj_labels = result.labels
|
169 |
-
rel_obj_labels = [CLASSES[l - 1] for l in rel_obj_labels]
|
170 |
-
|
171 |
-
# (N_m, H, W)
|
172 |
-
segms = pan_results[None] == ids[:, None, None]
|
173 |
-
# Resize predicted masks
|
174 |
-
segms = [
|
175 |
-
mmcv.image.imresize(m.astype(float), (img_w, img_h)) for m in segms
|
176 |
-
]
|
177 |
-
# One stage segmentation
|
178 |
-
masks = result.masks
|
179 |
-
|
180 |
-
# Choose colors for each instance in coco
|
181 |
-
colormap_coco = get_colormap(len(masks)) if is_one_stage else get_colormap(len(segms))
|
182 |
-
colormap_coco = (np.array(colormap_coco) / 255).tolist()
|
183 |
-
|
184 |
-
# Viualize masks
|
185 |
-
viz = Visualizer(img)
|
186 |
-
viz.overlay_instances(
|
187 |
-
labels=rel_obj_labels if is_one_stage else labels,
|
188 |
-
masks=masks if is_one_stage else segms,
|
189 |
-
assigned_colors=colormap_coco,
|
190 |
-
)
|
191 |
-
viz_img = viz.get_output().get_image()
|
192 |
-
if out_file is not None:
|
193 |
-
mmcv.imwrite(viz_img, out_file)
|
194 |
-
|
195 |
-
# Draw relations
|
196 |
-
|
197 |
-
# Filter out relations
|
198 |
-
### Debug: output all relations if not enough
|
199 |
-
n_rel_topk = min(num_rel, len(result.labels)//2)
|
200 |
-
# Exclude background class
|
201 |
-
rel_dists = result.rel_dists[:, 1:]
|
202 |
-
# rel_dists = result.rel_dists
|
203 |
-
rel_scores = rel_dists.max(1)
|
204 |
-
# rel_scores = result.triplet_scores
|
205 |
-
# Extract relations with top scores
|
206 |
-
rel_topk_idx = np.argpartition(rel_scores, -n_rel_topk)[-n_rel_topk:]
|
207 |
-
rel_labels_topk = rel_dists[rel_topk_idx].argmax(1)
|
208 |
-
rel_pair_idxes_topk = result.rel_pair_idxes[rel_topk_idx]
|
209 |
-
relations = np.concatenate(
|
210 |
-
[rel_pair_idxes_topk, rel_labels_topk[..., None]], axis=1)
|
211 |
-
n_rels = len(relations)
|
212 |
-
|
213 |
-
top_padding = 20
|
214 |
-
bottom_padding = 20
|
215 |
-
left_padding = 20
|
216 |
-
text_size = 10
|
217 |
-
text_padding = 5
|
218 |
-
text_height = text_size + 2 * text_padding
|
219 |
-
row_padding = 10
|
220 |
-
height = (top_padding + bottom_padding + n_rels *
|
221 |
-
(text_height + row_padding) - row_padding)
|
222 |
-
width = img_w
|
223 |
-
curr_x = left_padding
|
224 |
-
curr_y = top_padding
|
225 |
-
|
226 |
-
# # Adjust colormaps
|
227 |
-
# colormap_coco = [adjust_text_color(c, viz) for c in colormap_coco]
|
228 |
-
viz_graph = VisImage(np.full((height, width, 3), 255))
|
229 |
-
|
230 |
-
all_rel_vis = []
|
231 |
-
|
232 |
-
for i, r in enumerate(relations):
|
233 |
-
s_idx, o_idx, rel_id = r
|
234 |
-
s_label = rel_obj_labels[s_idx]
|
235 |
-
o_label = rel_obj_labels[o_idx]
|
236 |
-
rel_label = PREDICATES[rel_id]
|
237 |
-
viz = Visualizer(img)
|
238 |
-
viz.overlay_instances(
|
239 |
-
labels=[s_label, o_label],
|
240 |
-
masks=[masks[s_idx], masks[o_idx]],
|
241 |
-
assigned_colors=[colormap_coco[s_idx], colormap_coco[o_idx]],
|
242 |
-
)
|
243 |
-
viz_masked_img = viz.get_output().get_image()
|
244 |
-
|
245 |
-
viz_graph = VisImage(np.full((40, width, 3), 255))
|
246 |
-
curr_x = 2
|
247 |
-
curr_y = 2
|
248 |
-
text_size = 25
|
249 |
-
text_padding = 20
|
250 |
-
font = 36
|
251 |
-
text_width = draw_text(
|
252 |
-
viz_img=viz_graph,
|
253 |
-
text=s_label,
|
254 |
-
x=curr_x,
|
255 |
-
y=curr_y,
|
256 |
-
color=colormap_coco[s_idx],
|
257 |
-
size=text_size,
|
258 |
-
padding=text_padding,
|
259 |
-
font=font,
|
260 |
-
)
|
261 |
-
curr_x += text_width
|
262 |
-
# Draw relation text
|
263 |
-
text_width = draw_text(
|
264 |
-
viz_img=viz_graph,
|
265 |
-
text=rel_label,
|
266 |
-
x=curr_x,
|
267 |
-
y=curr_y,
|
268 |
-
size=text_size,
|
269 |
-
padding=text_padding,
|
270 |
-
box_color='gainsboro',
|
271 |
-
font=font,
|
272 |
-
)
|
273 |
-
curr_x += text_width
|
274 |
-
|
275 |
-
# Draw object text
|
276 |
-
text_width = draw_text(
|
277 |
-
viz_img=viz_graph,
|
278 |
-
text=o_label,
|
279 |
-
x=curr_x,
|
280 |
-
y=curr_y,
|
281 |
-
color=colormap_coco[o_idx],
|
282 |
-
size=text_size,
|
283 |
-
padding=text_padding,
|
284 |
-
font=font,
|
285 |
-
)
|
286 |
-
output_viz_graph = np.vstack([viz_masked_img, viz_graph.get_image()])
|
287 |
-
if show:
|
288 |
-
all_rel_vis.append(output_viz_graph)
|
289 |
-
|
290 |
-
return all_rel_vis
|
291 |
-
|
292 |
-
|
293 |
-
def make_gif(np_images):
|
294 |
-
frames = [Image.fromarray(numpy_image.astype('uint8'), 'RGB') for numpy_image in np_images]
|
295 |
-
# frames = [Image.open(image) for image in images]
|
296 |
-
frame_one = frames[0]
|
297 |
-
file_name = "top_rel.gif"
|
298 |
-
frame_one.save(file_name, format="GIF", append_images=frames,
|
299 |
-
save_all=True, duration=1000, loop=0)
|
300 |
-
return file_name
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ECCV2022/bytetrack/tutorials/centertrack/mot_online/kalman_filter.py
DELETED
@@ -1,269 +0,0 @@
|
|
1 |
-
# vim: expandtab:ts=4:sw=4
|
2 |
-
import numpy as np
|
3 |
-
import scipy.linalg
|
4 |
-
|
5 |
-
"""
|
6 |
-
Table for the 0.95 quantile of the chi-square distribution with N degrees of
|
7 |
-
freedom (contains values for N=1, ..., 9). Taken from MATLAB/Octave's chi2inv
|
8 |
-
function and used as Mahalanobis gating threshold.
|
9 |
-
"""
|
10 |
-
chi2inv95 = {
|
11 |
-
1: 3.8415,
|
12 |
-
2: 5.9915,
|
13 |
-
3: 7.8147,
|
14 |
-
4: 9.4877,
|
15 |
-
5: 11.070,
|
16 |
-
6: 12.592,
|
17 |
-
7: 14.067,
|
18 |
-
8: 15.507,
|
19 |
-
9: 16.919}
|
20 |
-
|
21 |
-
|
22 |
-
class KalmanFilter(object):
|
23 |
-
"""
|
24 |
-
A simple Kalman filter for tracking bounding boxes in image space.
|
25 |
-
|
26 |
-
The 8-dimensional state space
|
27 |
-
|
28 |
-
x, y, a, h, vx, vy, va, vh
|
29 |
-
|
30 |
-
contains the bounding box center position (x, y), aspect ratio a, height h,
|
31 |
-
and their respective velocities.
|
32 |
-
|
33 |
-
Object motion follows a constant velocity model. The bounding box location
|
34 |
-
(x, y, a, h) is taken as direct observation of the state space (linear
|
35 |
-
observation model).
|
36 |
-
|
37 |
-
"""
|
38 |
-
|
39 |
-
def __init__(self):
|
40 |
-
ndim, dt = 4, 1.
|
41 |
-
|
42 |
-
# Create Kalman filter model matrices.
|
43 |
-
self._motion_mat = np.eye(2 * ndim, 2 * ndim)
|
44 |
-
for i in range(ndim):
|
45 |
-
self._motion_mat[i, ndim + i] = dt
|
46 |
-
self._update_mat = np.eye(ndim, 2 * ndim)
|
47 |
-
|
48 |
-
# Motion and observation uncertainty are chosen relative to the current
|
49 |
-
# state estimate. These weights control the amount of uncertainty in
|
50 |
-
# the model. This is a bit hacky.
|
51 |
-
self._std_weight_position = 1. / 20
|
52 |
-
self._std_weight_velocity = 1. / 160
|
53 |
-
|
54 |
-
def initiate(self, measurement):
|
55 |
-
"""Create track from unassociated measurement.
|
56 |
-
|
57 |
-
Parameters
|
58 |
-
----------
|
59 |
-
measurement : ndarray
|
60 |
-
Bounding box coordinates (x, y, a, h) with center position (x, y),
|
61 |
-
aspect ratio a, and height h.
|
62 |
-
|
63 |
-
Returns
|
64 |
-
-------
|
65 |
-
(ndarray, ndarray)
|
66 |
-
Returns the mean vector (8 dimensional) and covariance matrix (8x8
|
67 |
-
dimensional) of the new track. Unobserved velocities are initialized
|
68 |
-
to 0 mean.
|
69 |
-
|
70 |
-
"""
|
71 |
-
mean_pos = measurement
|
72 |
-
mean_vel = np.zeros_like(mean_pos)
|
73 |
-
mean = np.r_[mean_pos, mean_vel]
|
74 |
-
|
75 |
-
std = [
|
76 |
-
2 * self._std_weight_position * measurement[3],
|
77 |
-
2 * self._std_weight_position * measurement[3],
|
78 |
-
1e-2,
|
79 |
-
2 * self._std_weight_position * measurement[3],
|
80 |
-
10 * self._std_weight_velocity * measurement[3],
|
81 |
-
10 * self._std_weight_velocity * measurement[3],
|
82 |
-
1e-5,
|
83 |
-
10 * self._std_weight_velocity * measurement[3]]
|
84 |
-
covariance = np.diag(np.square(std))
|
85 |
-
return mean, covariance
|
86 |
-
|
87 |
-
def predict(self, mean, covariance):
|
88 |
-
"""Run Kalman filter prediction step.
|
89 |
-
|
90 |
-
Parameters
|
91 |
-
----------
|
92 |
-
mean : ndarray
|
93 |
-
The 8 dimensional mean vector of the object state at the previous
|
94 |
-
time step.
|
95 |
-
covariance : ndarray
|
96 |
-
The 8x8 dimensional covariance matrix of the object state at the
|
97 |
-
previous time step.
|
98 |
-
|
99 |
-
Returns
|
100 |
-
-------
|
101 |
-
(ndarray, ndarray)
|
102 |
-
Returns the mean vector and covariance matrix of the predicted
|
103 |
-
state. Unobserved velocities are initialized to 0 mean.
|
104 |
-
|
105 |
-
"""
|
106 |
-
std_pos = [
|
107 |
-
self._std_weight_position * mean[3],
|
108 |
-
self._std_weight_position * mean[3],
|
109 |
-
1e-2,
|
110 |
-
self._std_weight_position * mean[3]]
|
111 |
-
std_vel = [
|
112 |
-
self._std_weight_velocity * mean[3],
|
113 |
-
self._std_weight_velocity * mean[3],
|
114 |
-
1e-5,
|
115 |
-
self._std_weight_velocity * mean[3]]
|
116 |
-
motion_cov = np.diag(np.square(np.r_[std_pos, std_vel]))
|
117 |
-
|
118 |
-
#mean = np.dot(self._motion_mat, mean)
|
119 |
-
mean = np.dot(mean, self._motion_mat.T)
|
120 |
-
covariance = np.linalg.multi_dot((
|
121 |
-
self._motion_mat, covariance, self._motion_mat.T)) + motion_cov
|
122 |
-
|
123 |
-
return mean, covariance
|
124 |
-
|
125 |
-
def project(self, mean, covariance):
|
126 |
-
"""Project state distribution to measurement space.
|
127 |
-
|
128 |
-
Parameters
|
129 |
-
----------
|
130 |
-
mean : ndarray
|
131 |
-
The state's mean vector (8 dimensional array).
|
132 |
-
covariance : ndarray
|
133 |
-
The state's covariance matrix (8x8 dimensional).
|
134 |
-
|
135 |
-
Returns
|
136 |
-
-------
|
137 |
-
(ndarray, ndarray)
|
138 |
-
Returns the projected mean and covariance matrix of the given state
|
139 |
-
estimate.
|
140 |
-
|
141 |
-
"""
|
142 |
-
std = [
|
143 |
-
self._std_weight_position * mean[3],
|
144 |
-
self._std_weight_position * mean[3],
|
145 |
-
1e-1,
|
146 |
-
self._std_weight_position * mean[3]]
|
147 |
-
innovation_cov = np.diag(np.square(std))
|
148 |
-
|
149 |
-
mean = np.dot(self._update_mat, mean)
|
150 |
-
covariance = np.linalg.multi_dot((
|
151 |
-
self._update_mat, covariance, self._update_mat.T))
|
152 |
-
return mean, covariance + innovation_cov
|
153 |
-
|
154 |
-
def multi_predict(self, mean, covariance):
|
155 |
-
"""Run Kalman filter prediction step (Vectorized version).
|
156 |
-
Parameters
|
157 |
-
----------
|
158 |
-
mean : ndarray
|
159 |
-
The Nx8 dimensional mean matrix of the object states at the previous
|
160 |
-
time step.
|
161 |
-
covariance : ndarray
|
162 |
-
The Nx8x8 dimensional covariance matrics of the object states at the
|
163 |
-
previous time step.
|
164 |
-
Returns
|
165 |
-
-------
|
166 |
-
(ndarray, ndarray)
|
167 |
-
Returns the mean vector and covariance matrix of the predicted
|
168 |
-
state. Unobserved velocities are initialized to 0 mean.
|
169 |
-
"""
|
170 |
-
std_pos = [
|
171 |
-
self._std_weight_position * mean[:, 3],
|
172 |
-
self._std_weight_position * mean[:, 3],
|
173 |
-
1e-2 * np.ones_like(mean[:, 3]),
|
174 |
-
self._std_weight_position * mean[:, 3]]
|
175 |
-
std_vel = [
|
176 |
-
self._std_weight_velocity * mean[:, 3],
|
177 |
-
self._std_weight_velocity * mean[:, 3],
|
178 |
-
1e-5 * np.ones_like(mean[:, 3]),
|
179 |
-
self._std_weight_velocity * mean[:, 3]]
|
180 |
-
sqr = np.square(np.r_[std_pos, std_vel]).T
|
181 |
-
|
182 |
-
motion_cov = []
|
183 |
-
for i in range(len(mean)):
|
184 |
-
motion_cov.append(np.diag(sqr[i]))
|
185 |
-
motion_cov = np.asarray(motion_cov)
|
186 |
-
|
187 |
-
mean = np.dot(mean, self._motion_mat.T)
|
188 |
-
left = np.dot(self._motion_mat, covariance).transpose((1, 0, 2))
|
189 |
-
covariance = np.dot(left, self._motion_mat.T) + motion_cov
|
190 |
-
|
191 |
-
return mean, covariance
|
192 |
-
|
193 |
-
def update(self, mean, covariance, measurement):
|
194 |
-
"""Run Kalman filter correction step.
|
195 |
-
|
196 |
-
Parameters
|
197 |
-
----------
|
198 |
-
mean : ndarray
|
199 |
-
The predicted state's mean vector (8 dimensional).
|
200 |
-
covariance : ndarray
|
201 |
-
The state's covariance matrix (8x8 dimensional).
|
202 |
-
measurement : ndarray
|
203 |
-
The 4 dimensional measurement vector (x, y, a, h), where (x, y)
|
204 |
-
is the center position, a the aspect ratio, and h the height of the
|
205 |
-
bounding box.
|
206 |
-
|
207 |
-
Returns
|
208 |
-
-------
|
209 |
-
(ndarray, ndarray)
|
210 |
-
Returns the measurement-corrected state distribution.
|
211 |
-
|
212 |
-
"""
|
213 |
-
projected_mean, projected_cov = self.project(mean, covariance)
|
214 |
-
|
215 |
-
chol_factor, lower = scipy.linalg.cho_factor(
|
216 |
-
projected_cov, lower=True, check_finite=False)
|
217 |
-
kalman_gain = scipy.linalg.cho_solve(
|
218 |
-
(chol_factor, lower), np.dot(covariance, self._update_mat.T).T,
|
219 |
-
check_finite=False).T
|
220 |
-
innovation = measurement - projected_mean
|
221 |
-
|
222 |
-
new_mean = mean + np.dot(innovation, kalman_gain.T)
|
223 |
-
new_covariance = covariance - np.linalg.multi_dot((
|
224 |
-
kalman_gain, projected_cov, kalman_gain.T))
|
225 |
-
return new_mean, new_covariance
|
226 |
-
|
227 |
-
def gating_distance(self, mean, covariance, measurements,
|
228 |
-
only_position=False, metric='maha'):
|
229 |
-
"""Compute gating distance between state distribution and measurements.
|
230 |
-
A suitable distance threshold can be obtained from `chi2inv95`. If
|
231 |
-
`only_position` is False, the chi-square distribution has 4 degrees of
|
232 |
-
freedom, otherwise 2.
|
233 |
-
Parameters
|
234 |
-
----------
|
235 |
-
mean : ndarray
|
236 |
-
Mean vector over the state distribution (8 dimensional).
|
237 |
-
covariance : ndarray
|
238 |
-
Covariance of the state distribution (8x8 dimensional).
|
239 |
-
measurements : ndarray
|
240 |
-
An Nx4 dimensional matrix of N measurements, each in
|
241 |
-
format (x, y, a, h) where (x, y) is the bounding box center
|
242 |
-
position, a the aspect ratio, and h the height.
|
243 |
-
only_position : Optional[bool]
|
244 |
-
If True, distance computation is done with respect to the bounding
|
245 |
-
box center position only.
|
246 |
-
Returns
|
247 |
-
-------
|
248 |
-
ndarray
|
249 |
-
Returns an array of length N, where the i-th element contains the
|
250 |
-
squared Mahalanobis distance between (mean, covariance) and
|
251 |
-
`measurements[i]`.
|
252 |
-
"""
|
253 |
-
mean, covariance = self.project(mean, covariance)
|
254 |
-
if only_position:
|
255 |
-
mean, covariance = mean[:2], covariance[:2, :2]
|
256 |
-
measurements = measurements[:, :2]
|
257 |
-
|
258 |
-
d = measurements - mean
|
259 |
-
if metric == 'gaussian':
|
260 |
-
return np.sum(d * d, axis=1)
|
261 |
-
elif metric == 'maha':
|
262 |
-
cholesky_factor = np.linalg.cholesky(covariance)
|
263 |
-
z = scipy.linalg.solve_triangular(
|
264 |
-
cholesky_factor, d.T, lower=True, check_finite=False,
|
265 |
-
overwrite_b=True)
|
266 |
-
squared_maha = np.sum(z * z, axis=0)
|
267 |
-
return squared_maha
|
268 |
-
else:
|
269 |
-
raise ValueError('invalid distance metric')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ECCV2022/bytetrack/yolox/deepsort_tracker/iou_matching.py
DELETED
@@ -1,76 +0,0 @@
|
|
1 |
-
# vim: expandtab:ts=4:sw=4
|
2 |
-
from __future__ import absolute_import
|
3 |
-
import numpy as np
|
4 |
-
from yolox.deepsort_tracker import linear_assignment
|
5 |
-
|
6 |
-
|
7 |
-
def iou(bbox, candidates):
|
8 |
-
"""Computer intersection over union.
|
9 |
-
Parameters
|
10 |
-
----------
|
11 |
-
bbox : ndarray
|
12 |
-
A bounding box in format `(top left x, top left y, width, height)`.
|
13 |
-
candidates : ndarray
|
14 |
-
A matrix of candidate bounding boxes (one per row) in the same format
|
15 |
-
as `bbox`.
|
16 |
-
Returns
|
17 |
-
-------
|
18 |
-
ndarray
|
19 |
-
The intersection over union in [0, 1] between the `bbox` and each
|
20 |
-
candidate. A higher score means a larger fraction of the `bbox` is
|
21 |
-
occluded by the candidate.
|
22 |
-
"""
|
23 |
-
bbox_tl, bbox_br = bbox[:2], bbox[:2] + bbox[2:]
|
24 |
-
candidates_tl = candidates[:, :2]
|
25 |
-
candidates_br = candidates[:, :2] + candidates[:, 2:]
|
26 |
-
|
27 |
-
tl = np.c_[np.maximum(bbox_tl[0], candidates_tl[:, 0])[:, np.newaxis],
|
28 |
-
np.maximum(bbox_tl[1], candidates_tl[:, 1])[:, np.newaxis]]
|
29 |
-
br = np.c_[np.minimum(bbox_br[0], candidates_br[:, 0])[:, np.newaxis],
|
30 |
-
np.minimum(bbox_br[1], candidates_br[:, 1])[:, np.newaxis]]
|
31 |
-
wh = np.maximum(0., br - tl)
|
32 |
-
|
33 |
-
area_intersection = wh.prod(axis=1)
|
34 |
-
area_bbox = bbox[2:].prod()
|
35 |
-
area_candidates = candidates[:, 2:].prod(axis=1)
|
36 |
-
return area_intersection / (area_bbox + area_candidates - area_intersection)
|
37 |
-
|
38 |
-
|
39 |
-
def iou_cost(tracks, detections, track_indices=None,
|
40 |
-
detection_indices=None):
|
41 |
-
"""An intersection over union distance metric.
|
42 |
-
Parameters
|
43 |
-
----------
|
44 |
-
tracks : List[deep_sort.track.Track]
|
45 |
-
A list of tracks.
|
46 |
-
detections : List[deep_sort.detection.Detection]
|
47 |
-
A list of detections.
|
48 |
-
track_indices : Optional[List[int]]
|
49 |
-
A list of indices to tracks that should be matched. Defaults to
|
50 |
-
all `tracks`.
|
51 |
-
detection_indices : Optional[List[int]]
|
52 |
-
A list of indices to detections that should be matched. Defaults
|
53 |
-
to all `detections`.
|
54 |
-
Returns
|
55 |
-
-------
|
56 |
-
ndarray
|
57 |
-
Returns a cost matrix of shape
|
58 |
-
len(track_indices), len(detection_indices) where entry (i, j) is
|
59 |
-
`1 - iou(tracks[track_indices[i]], detections[detection_indices[j]])`.
|
60 |
-
"""
|
61 |
-
if track_indices is None:
|
62 |
-
track_indices = np.arange(len(tracks))
|
63 |
-
if detection_indices is None:
|
64 |
-
detection_indices = np.arange(len(detections))
|
65 |
-
|
66 |
-
cost_matrix = np.zeros((len(track_indices), len(detection_indices)))
|
67 |
-
for row, track_idx in enumerate(track_indices):
|
68 |
-
if tracks[track_idx].time_since_update > 1:
|
69 |
-
cost_matrix[row, :] = linear_assignment.INFTY_COST
|
70 |
-
continue
|
71 |
-
|
72 |
-
bbox = tracks[track_idx].to_tlwh()
|
73 |
-
candidates = np.asarray(
|
74 |
-
[detections[i].tlwh for i in detection_indices])
|
75 |
-
cost_matrix[row, :] = 1. - iou(bbox, candidates)
|
76 |
-
return cost_matrix
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ECCV2022/bytetrack/yolox/models/__init__.py
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python3
|
2 |
-
# -*- coding:utf-8 -*-
|
3 |
-
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
|
4 |
-
|
5 |
-
from .darknet import CSPDarknet, Darknet
|
6 |
-
from .losses import IOUloss
|
7 |
-
from .yolo_fpn import YOLOFPN
|
8 |
-
from .yolo_head import YOLOXHead
|
9 |
-
from .yolo_pafpn import YOLOPAFPN
|
10 |
-
from .yolox import YOLOX
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ECCV2022/bytetrack/yolox/models/losses.py
DELETED
@@ -1,81 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python
|
2 |
-
# -*- encoding: utf-8 -*-
|
3 |
-
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
|
4 |
-
|
5 |
-
import torch
|
6 |
-
import torch.nn as nn
|
7 |
-
import torch.nn.functional as F
|
8 |
-
|
9 |
-
|
10 |
-
class IOUloss(nn.Module):
|
11 |
-
def __init__(self, reduction="none", loss_type="iou"):
|
12 |
-
super(IOUloss, self).__init__()
|
13 |
-
self.reduction = reduction
|
14 |
-
self.loss_type = loss_type
|
15 |
-
|
16 |
-
def forward(self, pred, target):
|
17 |
-
assert pred.shape[0] == target.shape[0]
|
18 |
-
|
19 |
-
pred = pred.view(-1, 4)
|
20 |
-
target = target.view(-1, 4)
|
21 |
-
tl = torch.max(
|
22 |
-
(pred[:, :2] - pred[:, 2:] / 2), (target[:, :2] - target[:, 2:] / 2)
|
23 |
-
)
|
24 |
-
br = torch.min(
|
25 |
-
(pred[:, :2] + pred[:, 2:] / 2), (target[:, :2] + target[:, 2:] / 2)
|
26 |
-
)
|
27 |
-
|
28 |
-
area_p = torch.prod(pred[:, 2:], 1)
|
29 |
-
area_g = torch.prod(target[:, 2:], 1)
|
30 |
-
|
31 |
-
en = (tl < br).type(tl.type()).prod(dim=1)
|
32 |
-
area_i = torch.prod(br - tl, 1) * en
|
33 |
-
iou = (area_i) / (area_p + area_g - area_i + 1e-16)
|
34 |
-
|
35 |
-
if self.loss_type == "iou":
|
36 |
-
loss = 1 - iou ** 2
|
37 |
-
elif self.loss_type == "giou":
|
38 |
-
c_tl = torch.min(
|
39 |
-
(pred[:, :2] - pred[:, 2:] / 2), (target[:, :2] - target[:, 2:] / 2)
|
40 |
-
)
|
41 |
-
c_br = torch.max(
|
42 |
-
(pred[:, :2] + pred[:, 2:] / 2), (target[:, :2] + target[:, 2:] / 2)
|
43 |
-
)
|
44 |
-
area_c = torch.prod(c_br - c_tl, 1)
|
45 |
-
giou = iou - (area_c - area_i) / area_c.clamp(1e-16)
|
46 |
-
loss = 1 - giou.clamp(min=-1.0, max=1.0)
|
47 |
-
|
48 |
-
if self.reduction == "mean":
|
49 |
-
loss = loss.mean()
|
50 |
-
elif self.reduction == "sum":
|
51 |
-
loss = loss.sum()
|
52 |
-
|
53 |
-
return loss
|
54 |
-
|
55 |
-
|
56 |
-
def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2):
|
57 |
-
"""
|
58 |
-
Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
|
59 |
-
Args:
|
60 |
-
inputs: A float tensor of arbitrary shape.
|
61 |
-
The predictions for each example.
|
62 |
-
targets: A float tensor with the same shape as inputs. Stores the binary
|
63 |
-
classification label for each element in inputs
|
64 |
-
(0 for the negative class and 1 for the positive class).
|
65 |
-
alpha: (optional) Weighting factor in range (0,1) to balance
|
66 |
-
positive vs negative examples. Default = -1 (no weighting).
|
67 |
-
gamma: Exponent of the modulating factor (1 - p_t) to
|
68 |
-
balance easy vs hard examples.
|
69 |
-
Returns:
|
70 |
-
Loss tensor
|
71 |
-
"""
|
72 |
-
prob = inputs.sigmoid()
|
73 |
-
ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
|
74 |
-
p_t = prob * targets + (1 - prob) * (1 - targets)
|
75 |
-
loss = ce_loss * ((1 - p_t) ** gamma)
|
76 |
-
|
77 |
-
if alpha >= 0:
|
78 |
-
alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
|
79 |
-
loss = alpha_t * loss
|
80 |
-
#return loss.mean(0).sum() / num_boxes
|
81 |
-
return loss.sum() / num_boxes
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Egrt/LicenseGAN/plate.py
DELETED
@@ -1,47 +0,0 @@
|
|
1 |
-
import cv2
|
2 |
-
import matplotlib.pyplot as plt
|
3 |
-
import numpy as np
|
4 |
-
import torch
|
5 |
-
import torch.nn as nn
|
6 |
-
from torchvision import transforms
|
7 |
-
|
8 |
-
from esrgan import ESRGAN
|
9 |
-
|
10 |
-
esrgan = ESRGAN()
|
11 |
-
|
12 |
-
def viz(module, input):
|
13 |
-
x = input[0][0]
|
14 |
-
#最多显示4张图
|
15 |
-
min_num = np.minimum(4, x.size()[0])
|
16 |
-
for i in range(min_num):
|
17 |
-
plt.subplot(1, 4, i+1)
|
18 |
-
plt.imshow(x[i].cpu())
|
19 |
-
plt.xticks([]) #去掉横坐标值
|
20 |
-
plt.yticks([]) #去掉纵坐标值
|
21 |
-
plt.show()
|
22 |
-
|
23 |
-
def main():
|
24 |
-
t = transforms.Compose([transforms.ToPILImage(),
|
25 |
-
transforms.Resize((224, 224)),
|
26 |
-
transforms.ToTensor(),
|
27 |
-
transforms.Normalize(mean=[0.485, 0.456, 0.406],
|
28 |
-
std=[0.229, 0.224, 0.225])
|
29 |
-
])
|
30 |
-
|
31 |
-
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
32 |
-
|
33 |
-
model = esrgan.net
|
34 |
-
for name, m in model.named_modules():
|
35 |
-
# if not isinstance(m, torch.nn.ModuleList) and \
|
36 |
-
# not isinstance(m, torch.nn.Sequential) and \
|
37 |
-
# type(m) in torch.nn.__dict__.values():
|
38 |
-
# 这里只对卷积层的feature map进行显示
|
39 |
-
if isinstance(m, torch.nn.Conv2d):
|
40 |
-
m.register_forward_pre_hook(viz)
|
41 |
-
img = cv2.imread('image.png')
|
42 |
-
img = t(img).unsqueeze(0).to(device)
|
43 |
-
with torch.no_grad():
|
44 |
-
model(img)
|
45 |
-
|
46 |
-
if __name__ == '__main__':
|
47 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/EuroPython2022/mmocr-demo/configs/_base_/recog_datasets/ST_SA_MJ_real_train.py
DELETED
@@ -1,81 +0,0 @@
|
|
1 |
-
# Text Recognition Training set, including:
|
2 |
-
# Synthetic Datasets: SynthText, SynthAdd, Syn90k
|
3 |
-
# Real Dataset: IC11, IC13, IC15, COCO-Test, IIIT5k
|
4 |
-
|
5 |
-
train_prefix = 'data/mixture'
|
6 |
-
|
7 |
-
train_img_prefix1 = f'{train_prefix}/icdar_2011'
|
8 |
-
train_img_prefix2 = f'{train_prefix}/icdar_2013'
|
9 |
-
train_img_prefix3 = f'{train_prefix}/icdar_2015'
|
10 |
-
train_img_prefix4 = f'{train_prefix}/coco_text'
|
11 |
-
train_img_prefix5 = f'{train_prefix}/IIIT5K'
|
12 |
-
train_img_prefix6 = f'{train_prefix}/SynthText_Add'
|
13 |
-
train_img_prefix7 = f'{train_prefix}/SynthText'
|
14 |
-
train_img_prefix8 = f'{train_prefix}/Syn90k'
|
15 |
-
|
16 |
-
train_ann_file1 = f'{train_prefix}/icdar_2011/train_label.txt',
|
17 |
-
train_ann_file2 = f'{train_prefix}/icdar_2013/train_label.txt',
|
18 |
-
train_ann_file3 = f'{train_prefix}/icdar_2015/train_label.txt',
|
19 |
-
train_ann_file4 = f'{train_prefix}/coco_text/train_label.txt',
|
20 |
-
train_ann_file5 = f'{train_prefix}/IIIT5K/train_label.txt',
|
21 |
-
train_ann_file6 = f'{train_prefix}/SynthText_Add/label.txt',
|
22 |
-
train_ann_file7 = f'{train_prefix}/SynthText/shuffle_labels.txt',
|
23 |
-
train_ann_file8 = f'{train_prefix}/Syn90k/shuffle_labels.txt'
|
24 |
-
|
25 |
-
train1 = dict(
|
26 |
-
type='OCRDataset',
|
27 |
-
img_prefix=train_img_prefix1,
|
28 |
-
ann_file=train_ann_file1,
|
29 |
-
loader=dict(
|
30 |
-
type='AnnFileLoader',
|
31 |
-
repeat=20,
|
32 |
-
file_format='txt',
|
33 |
-
parser=dict(
|
34 |
-
type='LineStrParser',
|
35 |
-
keys=['filename', 'text'],
|
36 |
-
keys_idx=[0, 1],
|
37 |
-
separator=' ')),
|
38 |
-
pipeline=None,
|
39 |
-
test_mode=False)
|
40 |
-
|
41 |
-
train2 = {key: value for key, value in train1.items()}
|
42 |
-
train2['img_prefix'] = train_img_prefix2
|
43 |
-
train2['ann_file'] = train_ann_file2
|
44 |
-
|
45 |
-
train3 = {key: value for key, value in train1.items()}
|
46 |
-
train3['img_prefix'] = train_img_prefix3
|
47 |
-
train3['ann_file'] = train_ann_file3
|
48 |
-
|
49 |
-
train4 = {key: value for key, value in train1.items()}
|
50 |
-
train4['img_prefix'] = train_img_prefix4
|
51 |
-
train4['ann_file'] = train_ann_file4
|
52 |
-
|
53 |
-
train5 = {key: value for key, value in train1.items()}
|
54 |
-
train5['img_prefix'] = train_img_prefix5
|
55 |
-
train5['ann_file'] = train_ann_file5
|
56 |
-
|
57 |
-
train6 = dict(
|
58 |
-
type='OCRDataset',
|
59 |
-
img_prefix=train_img_prefix6,
|
60 |
-
ann_file=train_ann_file6,
|
61 |
-
loader=dict(
|
62 |
-
type='AnnFileLoader',
|
63 |
-
repeat=1,
|
64 |
-
file_format='txt',
|
65 |
-
parser=dict(
|
66 |
-
type='LineStrParser',
|
67 |
-
keys=['filename', 'text'],
|
68 |
-
keys_idx=[0, 1],
|
69 |
-
separator=' ')),
|
70 |
-
pipeline=None,
|
71 |
-
test_mode=False)
|
72 |
-
|
73 |
-
train7 = {key: value for key, value in train6.items()}
|
74 |
-
train7['img_prefix'] = train_img_prefix7
|
75 |
-
train7['ann_file'] = train_ann_file7
|
76 |
-
|
77 |
-
train8 = {key: value for key, value in train6.items()}
|
78 |
-
train8['img_prefix'] = train_img_prefix8
|
79 |
-
train8['ann_file'] = train_ann_file8
|
80 |
-
|
81 |
-
train_list = [train1, train2, train3, train4, train5, train6, train7, train8]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/EveryPizza/stabilityai-stable-diffusion-2/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Stabilityai Stable Diffusion 2
|
3 |
-
emoji: 🐢
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: green
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.20.1
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|