Commit
·
fb4b01f
1
Parent(s):
eb780e7
Update parquet files (step 15 of 249)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/101-5/gpt4free/g4f/.v1/CODE_OF_CONDUCT.md +0 -128
- spaces/1gistliPinn/ChatGPT4/Examples/Ativador Do Windows 8.1 Utorrent.md +0 -6
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/DRAGON BALL LEGENDS APK Actualizado 2022 Join Goku and Friends in Epic 3D Battles.md +0 -167
- spaces/232labs/VToonify/vtoonify/model/raft/alt_cuda_corr/correlation.cpp +0 -54
- spaces/2ndelement/voicevox/voicevox_engine/metas/MetasStore.py +0 -72
- spaces/7eu7d7/anime-ai-detect-fucker/README.md +0 -13
- spaces/AIFILMS/generate_human_motion/VQ-Trans/utils/eval_trans.py +0 -580
- spaces/AIGC-Audio/AudioGPT/NeuralSeq/data_gen/tts/wav_processors/__init__.py +0 -2
- spaces/AIGC-Audio/AudioGPT/NeuralSeq/tasks/tts/tts.py +0 -131
- spaces/ALSv/FSW/roop/__init__.py +0 -0
- spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-60e_deepfashion2_trousers_256x192/td_hm_res50_4xb64-60e_deepfashion2_trousers_256x192.py +0 -2861
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/canvasframemanager-plugin.js +0 -19
- spaces/Aki004/herta-so-vits/cluster/train_cluster.py +0 -89
- spaces/Alpaca233/SadTalker/src/facerender/modules/keypoint_detector.py +0 -179
- spaces/Andres99/Tune-A-Video-Training-UI/inference.py +0 -109
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/schedulers/test_scheduler_dpm_sde.py +0 -168
- spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/assigners/__init__.py +0 -16
- spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_d6_r101-d16_512x1024_80k_cityscapes.py +0 -2
- spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/superbooga/chromadb.py +0 -125
- spaces/Audio-AGI/AudioSep/models/CLAP/open_clip/feature_fusion.py +0 -192
- spaces/Awiny/Image2Paragraph/models/controlnet_model.py +0 -56
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/datasets/prepare_cocofied_lvis.py +0 -176
- spaces/Banbri/zcvzcv/src/app/engine/forbidden.ts +0 -6
- spaces/Bart92/RVC_HF/demucs/compressed.py +0 -115
- spaces/Benson/text-generation/Examples/Autobs Simulador Indonesia Apk ltima Versin.md +0 -71
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/ansi.py +0 -240
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__init__.py +0 -0
- spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/filelist.py +0 -371
- spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/importlib_metadata/__init__.py +0 -1047
- spaces/BilalSardar/Voice-Cloning/app.py +0 -165
- spaces/CVPR/LIVE/pybind11/include/pybind11/eigen.h +0 -607
- spaces/CVPR/LIVE/setup.py +0 -98
- spaces/CVPR/LIVE/thrust/thrust/detail/type_traits/has_member_function.h +0 -118
- spaces/CVPR/LIVE/thrust/thrust/sort.h +0 -1362
- spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/gather.h +0 -23
- spaces/CVPR/LIVE/thrust/thrust/type_traits/integer_sequence.h +0 -262
- spaces/CVPR/lama-example/models/ade20k/segm_lib/nn/modules/comm.py +0 -131
- spaces/Chirag1994/Melanoma_Skin_Cancer_Detection_App/README.md +0 -13
- spaces/Chukwuka/FoodVision-Model/README.md +0 -275
- spaces/CikeyQI/Yunzai/Yunzai/plugins/ws-plugin/config/system/cfg_system.js +0 -201
- spaces/CikeyQI/meme-api/meme_generator/memes/karyl_point/__init__.py +0 -18
- spaces/CofAI/chat.v1/README.md +0 -11
- spaces/CofAI/chat/g4f/Provider/Providers/Aichat.py +0 -35
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fsspec/implementations/dbfs.py +0 -457
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/components/slider.py +0 -210
- spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/utils/writer.py +0 -56
- spaces/DeclK/pose/tools/inferencer.py +0 -160
- spaces/DeepLabCut/MegaDetector_DeepLabCut/fonts/read.md +0 -0
- spaces/Demosthene-OR/avr23-cds-translation/app.py +0 -80
- spaces/Detomo/ai-avatar-backend/app.js +0 -45
spaces/101-5/gpt4free/g4f/.v1/CODE_OF_CONDUCT.md
DELETED
@@ -1,128 +0,0 @@
|
|
1 |
-
# Contributor Covenant Code of Conduct
|
2 |
-
|
3 |
-
## Our Pledge
|
4 |
-
|
5 |
-
We as members, contributors, and leaders pledge to make participation in our
|
6 |
-
community a harassment-free experience for everyone, regardless of age, body
|
7 |
-
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
8 |
-
identity and expression, level of experience, education, socio-economic status,
|
9 |
-
nationality, personal appearance, race, religion, or sexual identity
|
10 |
-
and orientation.
|
11 |
-
|
12 |
-
We pledge to act and interact in ways that contribute to an open, welcoming,
|
13 |
-
diverse, inclusive, and healthy community.
|
14 |
-
|
15 |
-
## Our Standards
|
16 |
-
|
17 |
-
Examples of behavior that contributes to a positive environment for our
|
18 |
-
community include:
|
19 |
-
|
20 |
-
* Demonstrating empathy and kindness toward other people
|
21 |
-
* Being respectful of differing opinions, viewpoints, and experiences
|
22 |
-
* Giving and gracefully accepting constructive feedback
|
23 |
-
* Accepting responsibility and apologizing to those affected by our mistakes,
|
24 |
-
and learning from the experience
|
25 |
-
* Focusing on what is best not just for us as individuals, but for the
|
26 |
-
overall community
|
27 |
-
|
28 |
-
Examples of unacceptable behavior include:
|
29 |
-
|
30 |
-
* The use of sexualized language or imagery, and sexual attention or
|
31 |
-
advances of any kind
|
32 |
-
* Trolling, insulting or derogatory comments, and personal or political attacks
|
33 |
-
* Public or private harassment
|
34 |
-
* Publishing others' private information, such as a physical or email
|
35 |
-
address, without their explicit permission
|
36 |
-
* Other conduct which could reasonably be considered inappropriate in a
|
37 |
-
professional setting
|
38 |
-
|
39 |
-
## Enforcement Responsibilities
|
40 |
-
|
41 |
-
Community leaders are responsible for clarifying and enforcing our standards of
|
42 |
-
acceptable behavior and will take appropriate and fair corrective action in
|
43 |
-
response to any behavior that they deem inappropriate, threatening, offensive,
|
44 |
-
or harmful.
|
45 |
-
|
46 |
-
Community leaders have the right and responsibility to remove, edit, or reject
|
47 |
-
comments, commits, code, wiki edits, issues, and other contributions that are
|
48 |
-
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
49 |
-
decisions when appropriate.
|
50 |
-
|
51 |
-
## Scope
|
52 |
-
|
53 |
-
This Code of Conduct applies within all community spaces, and also applies when
|
54 |
-
an individual is officially representing the community in public spaces.
|
55 |
-
Examples of representing our community include using an official e-mail address,
|
56 |
-
posting via an official social media account, or acting as an appointed
|
57 |
-
representative at an online or offline event.
|
58 |
-
|
59 |
-
## Enforcement
|
60 |
-
|
61 |
-
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
62 |
-
reported to the community leaders responsible for enforcement at
|
63 |
-
https://t.me/xtekky.
|
64 |
-
All complaints will be reviewed and investigated promptly and fairly.
|
65 |
-
|
66 |
-
All community leaders are obligated to respect the privacy and security of the
|
67 |
-
reporter of any incident.
|
68 |
-
|
69 |
-
## Enforcement Guidelines
|
70 |
-
|
71 |
-
Community leaders will follow these Community Impact Guidelines in determining
|
72 |
-
the consequences for any action they deem in violation of this Code of Conduct:
|
73 |
-
|
74 |
-
### 1. Correction
|
75 |
-
|
76 |
-
**Community Impact**: Use of inappropriate language or other behavior deemed
|
77 |
-
unprofessional or unwelcome in the community.
|
78 |
-
|
79 |
-
**Consequence**: A private, written warning from community leaders, providing
|
80 |
-
clarity around the nature of the violation and an explanation of why the
|
81 |
-
behavior was inappropriate. A public apology may be requested.
|
82 |
-
|
83 |
-
### 2. Warning
|
84 |
-
|
85 |
-
**Community Impact**: A violation through a single incident or series
|
86 |
-
of actions.
|
87 |
-
|
88 |
-
**Consequence**: A warning with consequences for continued behavior. No
|
89 |
-
interaction with the people involved, including unsolicited interaction with
|
90 |
-
those enforcing the Code of Conduct, for a specified period of time. This
|
91 |
-
includes avoiding interactions in community spaces as well as external channels
|
92 |
-
like social media. Violating these terms may lead to a temporary or
|
93 |
-
permanent ban.
|
94 |
-
|
95 |
-
### 3. Temporary Ban
|
96 |
-
|
97 |
-
**Community Impact**: A serious violation of community standards, including
|
98 |
-
sustained inappropriate behavior.
|
99 |
-
|
100 |
-
**Consequence**: A temporary ban from any sort of interaction or public
|
101 |
-
communication with the community for a specified period of time. No public or
|
102 |
-
private interaction with the people involved, including unsolicited interaction
|
103 |
-
with those enforcing the Code of Conduct, is allowed during this period.
|
104 |
-
Violating these terms may lead to a permanent ban.
|
105 |
-
|
106 |
-
### 4. Permanent Ban
|
107 |
-
|
108 |
-
**Community Impact**: Demonstrating a pattern of violation of community
|
109 |
-
standards, including sustained inappropriate behavior, harassment of an
|
110 |
-
individual, or aggression toward or disparagement of classes of individuals.
|
111 |
-
|
112 |
-
**Consequence**: A permanent ban from any sort of public interaction within
|
113 |
-
the community.
|
114 |
-
|
115 |
-
## Attribution
|
116 |
-
|
117 |
-
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
118 |
-
version 2.0, available at
|
119 |
-
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
|
120 |
-
|
121 |
-
Community Impact Guidelines were inspired by [Mozilla's code of conduct
|
122 |
-
enforcement ladder](https://github.com/mozilla/diversity).
|
123 |
-
|
124 |
-
[homepage]: https://www.contributor-covenant.org
|
125 |
-
|
126 |
-
For answers to common questions about this code of conduct, see the FAQ at
|
127 |
-
https://www.contributor-covenant.org/faq. Translations are available at
|
128 |
-
https://www.contributor-covenant.org/translations.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Ativador Do Windows 8.1 Utorrent.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>Ativador do Windows 8.1 utorrent</h2><br /><p><b><b>DOWNLOAD</b> ->->->-> <a href="https://imgfil.com/2uxZSM">https://imgfil.com/2uxZSM</a></b></p><br /><br />
|
2 |
-
|
3 |
-
January 7, 2022 - Windows 8.1 Crack is one of the OS widely used by millions of people besides Windows. This OS is simple and easy to use. If you . NET developer, you will be able to create applications that use most of the technologies available on Windows. But if you are a .NET programmer, then I strongly recommend that you download Windows 8.1. It is a Windows operating system, but with a different architecture, which is an improved version of Windows. All security updates and bug fixes that Windows 8.1 is currently receiving are also available for Windows 8.1. 8a78ff9644<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/DRAGON BALL LEGENDS APK Actualizado 2022 Join Goku and Friends in Epic 3D Battles.md
DELETED
@@ -1,167 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Dragon Ball Legends APK Actualizado 2022: The Ultimate Anime Action RPG for Android</h1>
|
3 |
-
<p>If you are a fan of the Dragon Ball anime series, you will love playing Dragon Ball Legends APK, the latest game from Bandai Namco Entertainment. Dragon Ball Legends APK is an action-packed anime RPG that lets you summon your favorite DB characters for battle in stunning 3D graphics. You can enjoy a new original story based on the mysterious Saiyan Shallot, or relive the classic DB sagas with Goku, Vegeta, Frieza, and more. Dragon Ball Legends APK is free to download and play on your Android device, and it is updated regularly with new features and content. In this article, we will tell you everything you need to know about Dragon Ball Legends APK, including its features, how to download and install it, and why you should play it.</p>
|
4 |
-
<h2>dragon ball legends apk actualizado 2022</h2><br /><p><b><b>Download</b> > <a href="https://urlin.us/2uSX4z">https://urlin.us/2uSX4z</a></b></p><br /><br />
|
5 |
-
<h2>What is Dragon Ball Legends APK?</h2>
|
6 |
-
<p>Dragon Ball Legends APK is an Android game that is based on the popular Dragon Ball anime series. It is developed by Bandai Namco Entertainment, the same company that created other DB games such as Dragon Ball Z Dokkan Battle and Dragon Ball FighterZ. Dragon Ball Legends APK is an anime action RPG that combines fast-paced fighting with card-based strategy. You can control your favorite DB heroes in 3D battles, using your ability cards to unleash powerful combos and special moves. You can also enjoy a new original story that features a new character designed by Akira Toriyama, the creator of Dragon Ball. You can also join other players from around the world in live PVP matches, or test your skills in casual or ranked matches.</p>
|
7 |
-
<h3>Features of Dragon Ball Legends APK</h3>
|
8 |
-
<p>Dragon Ball Legends APK has many features that make it one of the best DB games for Android. Here are some of them:</p>
|
9 |
-
<h4>Epic 3D visuals and animations</h4>
|
10 |
-
<p>Dragon Ball Legends APK brings the anime to life on your mobile device with high-quality 3D characters and stages. You can see your favorite DB heroes and villains in action, with smooth animations and modern takes on their iconic moves. You can also witness the amazing team-based Rising Rush attack, which fills the screen with explosive effects.</p>
|
11 |
-
<h4>Intuitive fighting controls and card-based strategy</h4>
|
12 |
-
<p>Dragon Ball Legends APK has simple and intuitive fighting controls that let you dodge, counter, and attack your opponent in real time. You can also use your ability cards to build your own strategy, choosing when to use melee, ranged, or special attacks. You can also customize your deck of cards to suit your play style and preferences.</p>
|
13 |
-
<p>dragon ball legends apk mod 2022<br />
|
14 |
-
dragon ball legends apk download latest version<br />
|
15 |
-
dragon ball legends apk hack 2022<br />
|
16 |
-
dragon ball legends apk obb 2022<br />
|
17 |
-
dragon ball legends apk unlimited crystals 2022<br />
|
18 |
-
dragon ball legends apk offline 2022<br />
|
19 |
-
dragon ball legends apk update 2022<br />
|
20 |
-
dragon ball legends apk free download 2022<br />
|
21 |
-
dragon ball legends apk no verification 2022<br />
|
22 |
-
dragon ball legends apk android 2022<br />
|
23 |
-
dragon ball legends apk ios 2022<br />
|
24 |
-
dragon ball legends apk full 2022<br />
|
25 |
-
dragon ball legends apk english 2022<br />
|
26 |
-
dragon ball legends apk español 2022<br />
|
27 |
-
dragon ball legends apk mega 2022<br />
|
28 |
-
dragon ball legends apk mediafire 2022<br />
|
29 |
-
dragon ball legends apk uptodown 2022<br />
|
30 |
-
dragon ball legends apk revdl 2022<br />
|
31 |
-
dragon ball legends apk rexdl 2022<br />
|
32 |
-
dragon ball legends apk pure 2022<br />
|
33 |
-
dragon ball legends apk mirror 2022<br />
|
34 |
-
dragon ball legends apk mob.org 2022<br />
|
35 |
-
dragon ball legends apk apkpure 2022<br />
|
36 |
-
dragon ball legends apk apkmirror 2022<br />
|
37 |
-
dragon ball legends apk apkmody 2022<br />
|
38 |
-
dragon ball legends apk andropalace 2022<br />
|
39 |
-
dragon ball legends apk android republic 2022<br />
|
40 |
-
dragon ball legends apk android oyun club 2022<br />
|
41 |
-
dragon ball legends apk android game zone bd 2022<br />
|
42 |
-
dragon ball legends apk android gamespot net 2022<br />
|
43 |
-
dragon ball legends apk android gamespot com 2022<br />
|
44 |
-
dragon ball legends apk android gamespot in 2022<br />
|
45 |
-
dragon ball legends apk android gamespot pk 2022<br />
|
46 |
-
dragon ball legends apk android gamespot xyz 2022<br />
|
47 |
-
dragon ball legends apk android gamespot co uk 2022<br />
|
48 |
-
dragon ball legends apk android gamespot ca 2022<br />
|
49 |
-
dragon ball legends apk android gamespot au 2022<br />
|
50 |
-
dragon ball legends apk android gamespot nz 2022<br />
|
51 |
-
dragon ball legends apk android gamespot za 2022<br />
|
52 |
-
dragon ball legends apk android gamespot ng 2022<br />
|
53 |
-
dragon ball legends apk android gamespot ke 2022<br />
|
54 |
-
dragon ball legends apk android gamespot gh 2022<br />
|
55 |
-
dragon ball legends apk android gamespot eg 2022<br />
|
56 |
-
dragon ball legends apk android gamespot ma 2022<br />
|
57 |
-
dragon ball legends apk android gamespot dz 2022<br />
|
58 |
-
dragon ball legends apk android gamespot tn 2022<br />
|
59 |
-
dragon ball legends apk android gamespot ly 2022<br />
|
60 |
-
dragon ball legends apk android gamespot sd 2022<br />
|
61 |
-
dragon ball legends apk android gamespot et 2022</p>
|
62 |
-
<h4>Original RPG storyline and voice acting</h4>
|
63 |
-
<p>Dragon Ball Legends APK has an original RPG storyline that follows the adventures of Shallot, a new Saiyan character designed by Akira Toriyama. You can join Shallot and other DB characters as they try to save the world from a mysterious threat. You can also enjoy voice acting from the original anime cast, which adds more immersion and authenticity to the game.</p>
|
64 |
-
<h4>Iconic DB characters and sagas</h4>
|
65 |
-
<p>Dragon Ball Legends APK has more than 400 characters to collect and train, from various DB anime series such as DBZ, DBGT, and DBS. You can summon characters such as Goku, Vegeta, Trunks, Piccolo, Frieza, Broly, Majin Buu, and many more. You can also play through classic DB sagas such as the Saiyan Saga, the Frieza Saga, the Cell Saga, and the Tournament of Power Saga. You can also participate in special events and limited-time missions that feature exclusive characters and rewards.</p>
|
66 |
-
<h3>How to download and install Dragon Ball Legends APK?</h3>
|
67 |
-
<p>Dragon Ball Legends APK is easy to download and install on your Android device. Here are the steps you need to follow:</p>
|
68 |
-
<h4>Requirements and compatibility</h4>
|
69 |
-
<p>Before you download Dragon Ball Legends APK, you need to make sure that your device meets the following requirements:</p>
|
70 |
-
<ul>
|
71 |
-
<li>Your device must have Android 6.0 or higher.</li>
|
72 |
-
<li>Your device must have at least 2 GB of RAM and 1.5 GB of free storage space.</li>
|
73 |
-
<li>Your device must have a stable internet connection.</li>
|
74 |
-
<li>Your device must support OpenGL ES 3.0 or higher.</li>
|
75 |
-
</ul>
|
76 |
-
<p>You can check the compatibility of your device by visiting the official website of Dragon Ball Legends APK . You can also see the list of supported devices and the minimum specifications for the game.</p>
|
77 |
-
<h4>Steps to download and install</h4>
|
78 |
-
<p>Once you have confirmed that your device is compatible, you can follow these steps to download and install Dragon Ball Legends APK:</p>
|
79 |
-
<ol>
|
80 |
-
<li>Go to the official website of Dragon Ball Legends APK and click on the "Download" button.</li>
|
81 |
-
<li>You will be redirected to a third-party website where you can download the APK file. Click on the "Download APK" button and wait for the file to be downloaded.</li>
|
82 |
-
<li>After the file is downloaded, locate it in your device's file manager and tap on it to install it. You may need to enable "Unknown sources" in your device's settings to allow the installation of apps from sources other than Google Play Store.</li>
|
83 |
-
<li>Follow the on-screen instructions to complete the installation process. You may need to grant some permissions to the app to access your device's features.</li>
|
84 |
-
<li>Once the installation is done, you can launch the app and enjoy playing Dragon Ball Legends APK.</li>
|
85 |
-
</ol>
|
86 |
-
<h4>Tips and tricks to play Dragon Ball Legends APK</h4>
|
87 |
-
<p>If you want to improve your skills and performance in Dragon Ball Legends APK, here are some tips and tricks that you can use:</p>
|
88 |
-
<ul>
|
89 |
-
<li>Learn the basics of combat, such as how to dodge, counter, charge, switch, and use cards. You can practice in the training mode or watch the tutorial videos in the game.</li>
|
90 |
-
<li>Upgrade your characters by leveling them up, increasing their soul boost, equipping them with items, and unlocking their limit breaks. You can also use Z power and Z medals to increase their star rating and stats.</li>
|
91 |
-
<li>Build a balanced team that has a good synergy and covers different elements, roles, and abilities. You can also use tags and categories to boost your team's power and bonuses.</li>
|
92 |
-
<li>Use your Rising Rush wisely, as it can turn the tide of battle in your favor. You can activate it by using seven different cards in a row, and then choose one card for the final attack. Try to guess your opponent's card to cancel their Rising Rush.</li>
|
93 |
-
<li>Play online with other players in PVP mode, where you can compete in casual or ranked matches, or join guilds and co-op battles. You can also earn rewards such as crystals, coins, tickets, and items by completing missions and events.</li>
|
94 |
-
</ul> <h3>Why should you play Dragon Ball Legends APK?</h3>
|
95 |
-
<p>Dragon Ball Legends APK is a game that will appeal to any DB fan or anime lover. It has many advantages and benefits that make it worth playing. Here are some of them:</p>
|
96 |
-
<h4>Pros and cons of Dragon Ball Legends APK</h4>
|
97 |
-
<p>Like any game, Dragon Ball Legends APK has its pros and cons. Here are some of them:</p>
|
98 |
-
<table>
|
99 |
-
<tr>
|
100 |
-
<th>Pros</th>
|
101 |
-
<th>Cons</th>
|
102 |
-
</tr>
|
103 |
-
<tr>
|
104 |
-
<td>- Free to download and play</td>
|
105 |
-
<td>- Requires internet connection</td>
|
106 |
-
</tr>
|
107 |
-
<tr>
|
108 |
-
<td>- Amazing 3D graphics and animations</td>
|
109 |
-
<td>- May drain battery and data</td>
|
110 |
-
</tr>
|
111 |
-
<tr>
|
112 |
-
<td>- Simple and fun fighting controls and strategy</td>
|
113 |
-
<td>- May be repetitive and grindy</td>
|
114 |
-
</tr>
|
115 |
-
<tr>
|
116 |
-
<td>- Original RPG story and voice acting</td>
|
117 |
-
<td>- May have some bugs and glitches</td>
|
118 |
-
</tr>
|
119 |
-
<tr>
|
120 |
-
<td>- Iconic DB characters and sagas</td>
|
121 |
-
<td>- May have some balance issues and power creep</td>
|
122 |
-
</tr>
|
123 |
-
<tr>
|
124 |
-
<td>- Online PVP and co-op modes</td>
|
125 |
-
<td>- May have some lag and disconnect issues</td>
|
126 |
-
</tr>
|
127 |
-
<tr>
|
128 |
-
<td>- Regular updates and events</td>
|
129 |
-
<td>- May be hard to get rare characters and items</td>
|
130 |
-
</tr>
|
131 |
-
</table>
|
132 |
-
<h4>Reviews and ratings of Dragon Ball Legends APK</h4>
|
133 |
-
<p>Dragon Ball Legends APK has received positive reviews and ratings from players and critics alike. It has a 4.2 out of 5 stars rating on Google Play Store, based on more than 1.5 million reviews. It also has a 4.6 out of 5 stars rating on App Store, based on more than 100 thousand reviews. Some of the comments from the users are:</p>
|
134 |
-
<ul>
|
135 |
-
<li>"This game is awesome! The graphics are amazing, the gameplay is smooth, and the story is engaging. I love the characters and the voice acting. It feels like I'm watching the anime."</li>
|
136 |
-
<li>"This game is very fun and addictive. I like the card system and the strategy involved. The PVP mode is challenging and rewarding. The events are exciting and generous."</li>
|
137 |
-
<li>"This game is good, but it could be better. I wish there were more modes and features. I also wish there were more ways to get crystals and items. The game can be frustrating sometimes."</li>
|
138 |
-
<li>"This game is bad, don't waste your time. The game is rigged and unfair. The PVP mode is full of hackers and cheaters. The game is boring and repetitive. The game is greedy and pay-to-win."</li>
|
139 |
-
</ul>
|
140 |
-
<h4>Comparison with other DB games for Android</h4>
|
141 |
-
<p>Dragon Ball Legends APK is not the only DB game for Android. There are other games that you can try, such as:</p>
|
142 |
-
<ul>
|
143 |
-
<li>Dragon Ball Z Dokkan Battle: A puzzle RPG game that lets you collect and awaken DB characters, create your own team, and fight in various events and modes.</li>
|
144 |
-
<li>Dragon Ball Z Kakarot: An action RPG game that lets you relive the DBZ story, explore the open world, train your characters, and fight in epic battles.</li>
|
145 |
-
<li>Dragon Ball FighterZ: A fighting game that lets you choose from a roster of DB characters, customize your team, and fight in online or offline matches.</li>
|
146 |
-
<li>Dragon Ball Legends APK vs Dragon Ball Z Dokkan Battle: Dragon Ball Legends APK has better graphics, animations, voice acting, and story than Dragon Ball Z Dokkan Battle. However, Dragon Ball Z Dokkan Battle has more characters, modes, events, and customization than Dragon Ball Legends APK.</li>
|
147 |
-
<li>Dragon Ball Legends APK vs Dragon Ball Z Kakarot: Dragon Ball Legends APK has more online features, PVP modes, co-op modes, and updates than Dragon Ball Z Kakarot. However, Dragon Ball Z Kakarot has more offline features, exploration, quests, side stories, and mini-games than Dragon Ball Legends APK.</li>
|
148 |
-
<li>Dragon Ball Legends APK vs Dragon Ball FighterZ: Dragon Ball Legends APK has more RPG elements, card-based strategy, original story, and character development than Dragon Ball FighterZ. However, Dragon Ball FighterZ has more fighting elements, combo system, competitive mode, and cross-platform support than Dragon Ball Legends APK.</li>
|
149 |
-
</ul>
|
150 |
-
<h2>Conclusion</h2>
|
151 |
-
<p>Dragon Ball Legends APK is a great game for anyone who loves DB or anime in general. It is a free-to-play anime action RPG that lets you summon your favorite DB characters for battle in stunning 3D graphics. You can enjoy a new original story based on the mysterious Saiyan Shallot, or relive the classic DB sagas with Goku, Vegeta, Frieza, and more. You can also join other players from around the world in live PVP matches, or test your skills in casual or ranked matches. Dragon Ball Legends APK has many features that make it one of the best DB games for Android, such as epic 3D visuals and animations, intuitive fighting controls and card-based strategy, original RPG storyline and voice acting, and iconic DB characters and sagas. Dragon Ball Legends APK is easy to download and install on your Android device, and it is updated regularly with new features and content. You can also use some tips and tricks to improve your skills and performance in the game. Dragon Ball Legends APK has its pros and cons, as well as reviews and ratings from other players and critics. You can also compare it with other DB games for Android, such as Dragon Ball Z Dokkan Battle, Dragon Ball Z Kakarot, and Dragon Ball FighterZ. We hope that this article has helped you learn more about Dragon Ball Legends APK, and that you will enjoy playing it on your Android device.</p>
|
152 |
-
<h3>FAQs</h3>
|
153 |
-
<p>Here are some frequently asked questions about Dragon Ball Legends APK:</p>
|
154 |
-
<ul>
|
155 |
-
<li>Q: Is Dragon Ball Legends APK safe to download and play?</li>
|
156 |
-
<li>A: Yes, Dragon Ball Legends APK is safe to download and play, as long as you download it from the official website or a trusted source. You should also scan the APK file with an antivirus program before installing it.</li>
|
157 |
-
<li>Q: How can I get more crystals and items in Dragon Ball Legends APK?</li>
|
158 |
-
<li>A: You can get more crystals and items in Dragon Ball Legends APK by completing missions, events, challenges, achievements, login bonuses, daily rewards, and summoning banners. You can also buy them with real money through in-app purchases.</li>
|
159 |
-
<li>Q: How can I contact the support team of Dragon Ball Legends APK?</li>
|
160 |
-
<li>A: You can contact the support team of Dragon Ball Legends APK by visiting the official website of the game and clicking on the "Support" button. You can also send them an email at [email protected] or use the in-game feedback option.</li>
|
161 |
-
<li>Q: How can I play Dragon Ball Legends APK on PC?</li>
|
162 |
-
<li>A: You can play Dragon Ball Legends APK on PC by using an Android emulator such as BlueStacks, NoxPlayer, or LDPlayer. You can download and install the emulator on your PC, then download and install the APK file of the game on the emulator. You can then launch the game and enjoy playing it on a bigger screen.</li>
|
163 |
-
<li>Q: How can I update Dragon Ball Legends APK?</li>
|
164 |
-
<li>A: You can update Dragon Ball Legends APK by visiting the official website of the game and downloading the latest version of the APK file. You can also check for updates in the Google Play Store or App Store, if you have installed the game from there.</li>
|
165 |
-
</ul></p> 197e85843d<br />
|
166 |
-
<br />
|
167 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/232labs/VToonify/vtoonify/model/raft/alt_cuda_corr/correlation.cpp
DELETED
@@ -1,54 +0,0 @@
|
|
1 |
-
#include <torch/extension.h>
|
2 |
-
#include <vector>
|
3 |
-
|
4 |
-
// CUDA forward declarations
|
5 |
-
std::vector<torch::Tensor> corr_cuda_forward(
|
6 |
-
torch::Tensor fmap1,
|
7 |
-
torch::Tensor fmap2,
|
8 |
-
torch::Tensor coords,
|
9 |
-
int radius);
|
10 |
-
|
11 |
-
std::vector<torch::Tensor> corr_cuda_backward(
|
12 |
-
torch::Tensor fmap1,
|
13 |
-
torch::Tensor fmap2,
|
14 |
-
torch::Tensor coords,
|
15 |
-
torch::Tensor corr_grad,
|
16 |
-
int radius);
|
17 |
-
|
18 |
-
// C++ interface
|
19 |
-
#define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor")
|
20 |
-
#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
|
21 |
-
#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
|
22 |
-
|
23 |
-
std::vector<torch::Tensor> corr_forward(
|
24 |
-
torch::Tensor fmap1,
|
25 |
-
torch::Tensor fmap2,
|
26 |
-
torch::Tensor coords,
|
27 |
-
int radius) {
|
28 |
-
CHECK_INPUT(fmap1);
|
29 |
-
CHECK_INPUT(fmap2);
|
30 |
-
CHECK_INPUT(coords);
|
31 |
-
|
32 |
-
return corr_cuda_forward(fmap1, fmap2, coords, radius);
|
33 |
-
}
|
34 |
-
|
35 |
-
|
36 |
-
std::vector<torch::Tensor> corr_backward(
|
37 |
-
torch::Tensor fmap1,
|
38 |
-
torch::Tensor fmap2,
|
39 |
-
torch::Tensor coords,
|
40 |
-
torch::Tensor corr_grad,
|
41 |
-
int radius) {
|
42 |
-
CHECK_INPUT(fmap1);
|
43 |
-
CHECK_INPUT(fmap2);
|
44 |
-
CHECK_INPUT(coords);
|
45 |
-
CHECK_INPUT(corr_grad);
|
46 |
-
|
47 |
-
return corr_cuda_backward(fmap1, fmap2, coords, corr_grad, radius);
|
48 |
-
}
|
49 |
-
|
50 |
-
|
51 |
-
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
52 |
-
m.def("forward", &corr_forward, "CORR forward");
|
53 |
-
m.def("backward", &corr_backward, "CORR backward");
|
54 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/2ndelement/voicevox/voicevox_engine/metas/MetasStore.py
DELETED
@@ -1,72 +0,0 @@
|
|
1 |
-
import json
|
2 |
-
from pathlib import Path
|
3 |
-
from typing import TYPE_CHECKING, Dict, List, Tuple
|
4 |
-
|
5 |
-
from voicevox_engine.metas.Metas import CoreSpeaker, EngineSpeaker, Speaker, StyleInfo
|
6 |
-
|
7 |
-
if TYPE_CHECKING:
|
8 |
-
from voicevox_engine.synthesis_engine.synthesis_engine_base import (
|
9 |
-
SynthesisEngineBase,
|
10 |
-
)
|
11 |
-
|
12 |
-
|
13 |
-
class MetasStore:
|
14 |
-
"""
|
15 |
-
話者やスタイルのメタ情報を管理する
|
16 |
-
"""
|
17 |
-
|
18 |
-
def __init__(self, engine_speakers_path: Path) -> None:
|
19 |
-
self._engine_speakers_path = engine_speakers_path
|
20 |
-
self._loaded_metas: Dict[str, EngineSpeaker] = {
|
21 |
-
folder.name: EngineSpeaker(
|
22 |
-
**json.loads((folder / "metas.json").read_text(encoding="utf-8"))
|
23 |
-
)
|
24 |
-
for folder in engine_speakers_path.iterdir()
|
25 |
-
}
|
26 |
-
|
27 |
-
def speaker_engine_metas(self, speaker_uuid: str) -> EngineSpeaker:
|
28 |
-
return self.loaded_metas[speaker_uuid]
|
29 |
-
|
30 |
-
def combine_metas(self, core_metas: List[CoreSpeaker]) -> List[Speaker]:
|
31 |
-
"""
|
32 |
-
与えられたmetaにエンジンのコア情報を付加して返す
|
33 |
-
core_metas: コアのmetas()が返すJSONのModel
|
34 |
-
"""
|
35 |
-
|
36 |
-
return [
|
37 |
-
Speaker(
|
38 |
-
**self.speaker_engine_metas(speaker_meta.speaker_uuid).dict(),
|
39 |
-
**speaker_meta.dict(),
|
40 |
-
)
|
41 |
-
for speaker_meta in core_metas
|
42 |
-
]
|
43 |
-
|
44 |
-
# FIXME: engineではなくList[CoreSpeaker]を渡す形にすることで
|
45 |
-
# SynthesisEngineBaseによる循環importを修正する
|
46 |
-
def load_combined_metas(self, engine: "SynthesisEngineBase") -> List[Speaker]:
|
47 |
-
"""
|
48 |
-
与えられたエンジンから、コア・エンジン両方の情報を含んだMetasを返す
|
49 |
-
"""
|
50 |
-
|
51 |
-
core_metas = [CoreSpeaker(**speaker) for speaker in json.loads(engine.speakers)]
|
52 |
-
return self.combine_metas(core_metas)
|
53 |
-
|
54 |
-
@property
|
55 |
-
def engine_speakers_path(self) -> Path:
|
56 |
-
return self._engine_speakers_path
|
57 |
-
|
58 |
-
@property
|
59 |
-
def loaded_metas(self) -> Dict[str, EngineSpeaker]:
|
60 |
-
return self._loaded_metas
|
61 |
-
|
62 |
-
|
63 |
-
def construct_lookup(speakers: List[Speaker]) -> Dict[int, Tuple[Speaker, StyleInfo]]:
|
64 |
-
"""
|
65 |
-
`{style.id: StyleInfo}`の変換テーブル
|
66 |
-
"""
|
67 |
-
|
68 |
-
lookup_table = dict()
|
69 |
-
for speaker in speakers:
|
70 |
-
for style in speaker.styles:
|
71 |
-
lookup_table[style.id] = (speaker, style)
|
72 |
-
return lookup_table
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/7eu7d7/anime-ai-detect-fucker/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Anime Ai Detect Attacker
|
3 |
-
emoji: 🌖
|
4 |
-
colorFrom: yellow
|
5 |
-
colorTo: purple
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.16.2
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: apache-2.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIFILMS/generate_human_motion/VQ-Trans/utils/eval_trans.py
DELETED
@@ -1,580 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
|
3 |
-
import clip
|
4 |
-
import numpy as np
|
5 |
-
import torch
|
6 |
-
from scipy import linalg
|
7 |
-
|
8 |
-
import visualization.plot_3d_global as plot_3d
|
9 |
-
from utils.motion_process import recover_from_ric
|
10 |
-
|
11 |
-
|
12 |
-
def tensorborad_add_video_xyz(writer, xyz, nb_iter, tag, nb_vis=4, title_batch=None, outname=None):
|
13 |
-
xyz = xyz[:1]
|
14 |
-
bs, seq = xyz.shape[:2]
|
15 |
-
xyz = xyz.reshape(bs, seq, -1, 3)
|
16 |
-
plot_xyz = plot_3d.draw_to_batch(xyz.cpu().numpy(),title_batch, outname)
|
17 |
-
plot_xyz =np.transpose(plot_xyz, (0, 1, 4, 2, 3))
|
18 |
-
writer.add_video(tag, plot_xyz, nb_iter, fps = 20)
|
19 |
-
|
20 |
-
@torch.no_grad()
|
21 |
-
def evaluation_vqvae(out_dir, val_loader, net, logger, writer, nb_iter, best_fid, best_iter, best_div, best_top1, best_top2, best_top3, best_matching, eval_wrapper, draw = True, save = True, savegif=False, savenpy=False) :
|
22 |
-
net.eval()
|
23 |
-
nb_sample = 0
|
24 |
-
|
25 |
-
draw_org = []
|
26 |
-
draw_pred = []
|
27 |
-
draw_text = []
|
28 |
-
|
29 |
-
|
30 |
-
motion_annotation_list = []
|
31 |
-
motion_pred_list = []
|
32 |
-
|
33 |
-
R_precision_real = 0
|
34 |
-
R_precision = 0
|
35 |
-
|
36 |
-
nb_sample = 0
|
37 |
-
matching_score_real = 0
|
38 |
-
matching_score_pred = 0
|
39 |
-
for batch in val_loader:
|
40 |
-
word_embeddings, pos_one_hots, caption, sent_len, motion, m_length, token, name = batch
|
41 |
-
|
42 |
-
motion = motion.cuda()
|
43 |
-
et, em = eval_wrapper.get_co_embeddings(word_embeddings, pos_one_hots, sent_len, motion, m_length)
|
44 |
-
bs, seq = motion.shape[0], motion.shape[1]
|
45 |
-
|
46 |
-
num_joints = 21 if motion.shape[-1] == 251 else 22
|
47 |
-
|
48 |
-
pred_pose_eval = torch.zeros((bs, seq, motion.shape[-1])).cuda()
|
49 |
-
|
50 |
-
for i in range(bs):
|
51 |
-
pose = val_loader.dataset.inv_transform(motion[i:i+1, :m_length[i], :].detach().cpu().numpy())
|
52 |
-
pose_xyz = recover_from_ric(torch.from_numpy(pose).float().cuda(), num_joints)
|
53 |
-
|
54 |
-
|
55 |
-
pred_pose, loss_commit, perplexity = net(motion[i:i+1, :m_length[i]])
|
56 |
-
pred_denorm = val_loader.dataset.inv_transform(pred_pose.detach().cpu().numpy())
|
57 |
-
pred_xyz = recover_from_ric(torch.from_numpy(pred_denorm).float().cuda(), num_joints)
|
58 |
-
|
59 |
-
if savenpy:
|
60 |
-
np.save(os.path.join(out_dir, name[i]+'_gt.npy'), pose_xyz[:, :m_length[i]].cpu().numpy())
|
61 |
-
np.save(os.path.join(out_dir, name[i]+'_pred.npy'), pred_xyz.detach().cpu().numpy())
|
62 |
-
|
63 |
-
pred_pose_eval[i:i+1,:m_length[i],:] = pred_pose
|
64 |
-
|
65 |
-
if i < min(4, bs):
|
66 |
-
draw_org.append(pose_xyz)
|
67 |
-
draw_pred.append(pred_xyz)
|
68 |
-
draw_text.append(caption[i])
|
69 |
-
|
70 |
-
et_pred, em_pred = eval_wrapper.get_co_embeddings(word_embeddings, pos_one_hots, sent_len, pred_pose_eval, m_length)
|
71 |
-
|
72 |
-
motion_pred_list.append(em_pred)
|
73 |
-
motion_annotation_list.append(em)
|
74 |
-
|
75 |
-
temp_R, temp_match = calculate_R_precision(et.cpu().numpy(), em.cpu().numpy(), top_k=3, sum_all=True)
|
76 |
-
R_precision_real += temp_R
|
77 |
-
matching_score_real += temp_match
|
78 |
-
temp_R, temp_match = calculate_R_precision(et_pred.cpu().numpy(), em_pred.cpu().numpy(), top_k=3, sum_all=True)
|
79 |
-
R_precision += temp_R
|
80 |
-
matching_score_pred += temp_match
|
81 |
-
|
82 |
-
nb_sample += bs
|
83 |
-
|
84 |
-
motion_annotation_np = torch.cat(motion_annotation_list, dim=0).cpu().numpy()
|
85 |
-
motion_pred_np = torch.cat(motion_pred_list, dim=0).cpu().numpy()
|
86 |
-
gt_mu, gt_cov = calculate_activation_statistics(motion_annotation_np)
|
87 |
-
mu, cov= calculate_activation_statistics(motion_pred_np)
|
88 |
-
|
89 |
-
diversity_real = calculate_diversity(motion_annotation_np, 300 if nb_sample > 300 else 100)
|
90 |
-
diversity = calculate_diversity(motion_pred_np, 300 if nb_sample > 300 else 100)
|
91 |
-
|
92 |
-
R_precision_real = R_precision_real / nb_sample
|
93 |
-
R_precision = R_precision / nb_sample
|
94 |
-
|
95 |
-
matching_score_real = matching_score_real / nb_sample
|
96 |
-
matching_score_pred = matching_score_pred / nb_sample
|
97 |
-
|
98 |
-
fid = calculate_frechet_distance(gt_mu, gt_cov, mu, cov)
|
99 |
-
|
100 |
-
msg = f"--> \t Eva. Iter {nb_iter} :, FID. {fid:.4f}, Diversity Real. {diversity_real:.4f}, Diversity. {diversity:.4f}, R_precision_real. {R_precision_real}, R_precision. {R_precision}, matching_score_real. {matching_score_real}, matching_score_pred. {matching_score_pred}"
|
101 |
-
logger.info(msg)
|
102 |
-
|
103 |
-
if draw:
|
104 |
-
writer.add_scalar('./Test/FID', fid, nb_iter)
|
105 |
-
writer.add_scalar('./Test/Diversity', diversity, nb_iter)
|
106 |
-
writer.add_scalar('./Test/top1', R_precision[0], nb_iter)
|
107 |
-
writer.add_scalar('./Test/top2', R_precision[1], nb_iter)
|
108 |
-
writer.add_scalar('./Test/top3', R_precision[2], nb_iter)
|
109 |
-
writer.add_scalar('./Test/matching_score', matching_score_pred, nb_iter)
|
110 |
-
|
111 |
-
|
112 |
-
if nb_iter % 5000 == 0 :
|
113 |
-
for ii in range(4):
|
114 |
-
tensorborad_add_video_xyz(writer, draw_org[ii], nb_iter, tag='./Vis/org_eval'+str(ii), nb_vis=1, title_batch=[draw_text[ii]], outname=[os.path.join(out_dir, 'gt'+str(ii)+'.gif')] if savegif else None)
|
115 |
-
|
116 |
-
if nb_iter % 5000 == 0 :
|
117 |
-
for ii in range(4):
|
118 |
-
tensorborad_add_video_xyz(writer, draw_pred[ii], nb_iter, tag='./Vis/pred_eval'+str(ii), nb_vis=1, title_batch=[draw_text[ii]], outname=[os.path.join(out_dir, 'pred'+str(ii)+'.gif')] if savegif else None)
|
119 |
-
|
120 |
-
|
121 |
-
if fid < best_fid :
|
122 |
-
msg = f"--> --> \t FID Improved from {best_fid:.5f} to {fid:.5f} !!!"
|
123 |
-
logger.info(msg)
|
124 |
-
best_fid, best_iter = fid, nb_iter
|
125 |
-
if save:
|
126 |
-
torch.save({'net' : net.state_dict()}, os.path.join(out_dir, 'net_best_fid.pth'))
|
127 |
-
|
128 |
-
if abs(diversity_real - diversity) < abs(diversity_real - best_div) :
|
129 |
-
msg = f"--> --> \t Diversity Improved from {best_div:.5f} to {diversity:.5f} !!!"
|
130 |
-
logger.info(msg)
|
131 |
-
best_div = diversity
|
132 |
-
if save:
|
133 |
-
torch.save({'net' : net.state_dict()}, os.path.join(out_dir, 'net_best_div.pth'))
|
134 |
-
|
135 |
-
if R_precision[0] > best_top1 :
|
136 |
-
msg = f"--> --> \t Top1 Improved from {best_top1:.4f} to {R_precision[0]:.4f} !!!"
|
137 |
-
logger.info(msg)
|
138 |
-
best_top1 = R_precision[0]
|
139 |
-
if save:
|
140 |
-
torch.save({'net' : net.state_dict()}, os.path.join(out_dir, 'net_best_top1.pth'))
|
141 |
-
|
142 |
-
if R_precision[1] > best_top2 :
|
143 |
-
msg = f"--> --> \t Top2 Improved from {best_top2:.4f} to {R_precision[1]:.4f} !!!"
|
144 |
-
logger.info(msg)
|
145 |
-
best_top2 = R_precision[1]
|
146 |
-
|
147 |
-
if R_precision[2] > best_top3 :
|
148 |
-
msg = f"--> --> \t Top3 Improved from {best_top3:.4f} to {R_precision[2]:.4f} !!!"
|
149 |
-
logger.info(msg)
|
150 |
-
best_top3 = R_precision[2]
|
151 |
-
|
152 |
-
if matching_score_pred < best_matching :
|
153 |
-
msg = f"--> --> \t matching_score Improved from {best_matching:.5f} to {matching_score_pred:.5f} !!!"
|
154 |
-
logger.info(msg)
|
155 |
-
best_matching = matching_score_pred
|
156 |
-
if save:
|
157 |
-
torch.save({'net' : net.state_dict()}, os.path.join(out_dir, 'net_best_matching.pth'))
|
158 |
-
|
159 |
-
if save:
|
160 |
-
torch.save({'net' : net.state_dict()}, os.path.join(out_dir, 'net_last.pth'))
|
161 |
-
|
162 |
-
net.train()
|
163 |
-
return best_fid, best_iter, best_div, best_top1, best_top2, best_top3, best_matching, writer, logger
|
164 |
-
|
165 |
-
|
166 |
-
@torch.no_grad()
|
167 |
-
def evaluation_transformer(out_dir, val_loader, net, trans, logger, writer, nb_iter, best_fid, best_iter, best_div, best_top1, best_top2, best_top3, best_matching, clip_model, eval_wrapper, draw = True, save = True, savegif=False) :
|
168 |
-
|
169 |
-
trans.eval()
|
170 |
-
nb_sample = 0
|
171 |
-
|
172 |
-
draw_org = []
|
173 |
-
draw_pred = []
|
174 |
-
draw_text = []
|
175 |
-
draw_text_pred = []
|
176 |
-
|
177 |
-
motion_annotation_list = []
|
178 |
-
motion_pred_list = []
|
179 |
-
R_precision_real = 0
|
180 |
-
R_precision = 0
|
181 |
-
matching_score_real = 0
|
182 |
-
matching_score_pred = 0
|
183 |
-
|
184 |
-
nb_sample = 0
|
185 |
-
for i in range(1):
|
186 |
-
for batch in val_loader:
|
187 |
-
word_embeddings, pos_one_hots, clip_text, sent_len, pose, m_length, token, name = batch
|
188 |
-
|
189 |
-
bs, seq = pose.shape[:2]
|
190 |
-
num_joints = 21 if pose.shape[-1] == 251 else 22
|
191 |
-
|
192 |
-
text = clip.tokenize(clip_text, truncate=True).cuda()
|
193 |
-
|
194 |
-
feat_clip_text = clip_model.encode_text(text).float()
|
195 |
-
pred_pose_eval = torch.zeros((bs, seq, pose.shape[-1])).cuda()
|
196 |
-
pred_len = torch.ones(bs).long()
|
197 |
-
|
198 |
-
for k in range(bs):
|
199 |
-
try:
|
200 |
-
index_motion = trans.sample(feat_clip_text[k:k+1], False)
|
201 |
-
except:
|
202 |
-
index_motion = torch.ones(1,1).cuda().long()
|
203 |
-
|
204 |
-
pred_pose = net.forward_decoder(index_motion)
|
205 |
-
cur_len = pred_pose.shape[1]
|
206 |
-
|
207 |
-
pred_len[k] = min(cur_len, seq)
|
208 |
-
pred_pose_eval[k:k+1, :cur_len] = pred_pose[:, :seq]
|
209 |
-
|
210 |
-
if draw:
|
211 |
-
pred_denorm = val_loader.dataset.inv_transform(pred_pose.detach().cpu().numpy())
|
212 |
-
pred_xyz = recover_from_ric(torch.from_numpy(pred_denorm).float().cuda(), num_joints)
|
213 |
-
|
214 |
-
if i == 0 and k < 4:
|
215 |
-
draw_pred.append(pred_xyz)
|
216 |
-
draw_text_pred.append(clip_text[k])
|
217 |
-
|
218 |
-
et_pred, em_pred = eval_wrapper.get_co_embeddings(word_embeddings, pos_one_hots, sent_len, pred_pose_eval, pred_len)
|
219 |
-
|
220 |
-
if i == 0:
|
221 |
-
pose = pose.cuda().float()
|
222 |
-
|
223 |
-
et, em = eval_wrapper.get_co_embeddings(word_embeddings, pos_one_hots, sent_len, pose, m_length)
|
224 |
-
motion_annotation_list.append(em)
|
225 |
-
motion_pred_list.append(em_pred)
|
226 |
-
|
227 |
-
if draw:
|
228 |
-
pose = val_loader.dataset.inv_transform(pose.detach().cpu().numpy())
|
229 |
-
pose_xyz = recover_from_ric(torch.from_numpy(pose).float().cuda(), num_joints)
|
230 |
-
|
231 |
-
|
232 |
-
for j in range(min(4, bs)):
|
233 |
-
draw_org.append(pose_xyz[j][:m_length[j]].unsqueeze(0))
|
234 |
-
draw_text.append(clip_text[j])
|
235 |
-
|
236 |
-
temp_R, temp_match = calculate_R_precision(et.cpu().numpy(), em.cpu().numpy(), top_k=3, sum_all=True)
|
237 |
-
R_precision_real += temp_R
|
238 |
-
matching_score_real += temp_match
|
239 |
-
temp_R, temp_match = calculate_R_precision(et_pred.cpu().numpy(), em_pred.cpu().numpy(), top_k=3, sum_all=True)
|
240 |
-
R_precision += temp_R
|
241 |
-
matching_score_pred += temp_match
|
242 |
-
|
243 |
-
nb_sample += bs
|
244 |
-
|
245 |
-
motion_annotation_np = torch.cat(motion_annotation_list, dim=0).cpu().numpy()
|
246 |
-
motion_pred_np = torch.cat(motion_pred_list, dim=0).cpu().numpy()
|
247 |
-
gt_mu, gt_cov = calculate_activation_statistics(motion_annotation_np)
|
248 |
-
mu, cov= calculate_activation_statistics(motion_pred_np)
|
249 |
-
|
250 |
-
diversity_real = calculate_diversity(motion_annotation_np, 300 if nb_sample > 300 else 100)
|
251 |
-
diversity = calculate_diversity(motion_pred_np, 300 if nb_sample > 300 else 100)
|
252 |
-
|
253 |
-
R_precision_real = R_precision_real / nb_sample
|
254 |
-
R_precision = R_precision / nb_sample
|
255 |
-
|
256 |
-
matching_score_real = matching_score_real / nb_sample
|
257 |
-
matching_score_pred = matching_score_pred / nb_sample
|
258 |
-
|
259 |
-
|
260 |
-
fid = calculate_frechet_distance(gt_mu, gt_cov, mu, cov)
|
261 |
-
|
262 |
-
msg = f"--> \t Eva. Iter {nb_iter} :, FID. {fid:.4f}, Diversity Real. {diversity_real:.4f}, Diversity. {diversity:.4f}, R_precision_real. {R_precision_real}, R_precision. {R_precision}, matching_score_real. {matching_score_real}, matching_score_pred. {matching_score_pred}"
|
263 |
-
logger.info(msg)
|
264 |
-
|
265 |
-
|
266 |
-
if draw:
|
267 |
-
writer.add_scalar('./Test/FID', fid, nb_iter)
|
268 |
-
writer.add_scalar('./Test/Diversity', diversity, nb_iter)
|
269 |
-
writer.add_scalar('./Test/top1', R_precision[0], nb_iter)
|
270 |
-
writer.add_scalar('./Test/top2', R_precision[1], nb_iter)
|
271 |
-
writer.add_scalar('./Test/top3', R_precision[2], nb_iter)
|
272 |
-
writer.add_scalar('./Test/matching_score', matching_score_pred, nb_iter)
|
273 |
-
|
274 |
-
|
275 |
-
if nb_iter % 10000 == 0 :
|
276 |
-
for ii in range(4):
|
277 |
-
tensorborad_add_video_xyz(writer, draw_org[ii], nb_iter, tag='./Vis/org_eval'+str(ii), nb_vis=1, title_batch=[draw_text[ii]], outname=[os.path.join(out_dir, 'gt'+str(ii)+'.gif')] if savegif else None)
|
278 |
-
|
279 |
-
if nb_iter % 10000 == 0 :
|
280 |
-
for ii in range(4):
|
281 |
-
tensorborad_add_video_xyz(writer, draw_pred[ii], nb_iter, tag='./Vis/pred_eval'+str(ii), nb_vis=1, title_batch=[draw_text_pred[ii]], outname=[os.path.join(out_dir, 'pred'+str(ii)+'.gif')] if savegif else None)
|
282 |
-
|
283 |
-
|
284 |
-
if fid < best_fid :
|
285 |
-
msg = f"--> --> \t FID Improved from {best_fid:.5f} to {fid:.5f} !!!"
|
286 |
-
logger.info(msg)
|
287 |
-
best_fid, best_iter = fid, nb_iter
|
288 |
-
if save:
|
289 |
-
torch.save({'trans' : trans.state_dict()}, os.path.join(out_dir, 'net_best_fid.pth'))
|
290 |
-
|
291 |
-
if matching_score_pred < best_matching :
|
292 |
-
msg = f"--> --> \t matching_score Improved from {best_matching:.5f} to {matching_score_pred:.5f} !!!"
|
293 |
-
logger.info(msg)
|
294 |
-
best_matching = matching_score_pred
|
295 |
-
|
296 |
-
if abs(diversity_real - diversity) < abs(diversity_real - best_div) :
|
297 |
-
msg = f"--> --> \t Diversity Improved from {best_div:.5f} to {diversity:.5f} !!!"
|
298 |
-
logger.info(msg)
|
299 |
-
best_div = diversity
|
300 |
-
|
301 |
-
if R_precision[0] > best_top1 :
|
302 |
-
msg = f"--> --> \t Top1 Improved from {best_top1:.4f} to {R_precision[0]:.4f} !!!"
|
303 |
-
logger.info(msg)
|
304 |
-
best_top1 = R_precision[0]
|
305 |
-
|
306 |
-
if R_precision[1] > best_top2 :
|
307 |
-
msg = f"--> --> \t Top2 Improved from {best_top2:.4f} to {R_precision[1]:.4f} !!!"
|
308 |
-
logger.info(msg)
|
309 |
-
best_top2 = R_precision[1]
|
310 |
-
|
311 |
-
if R_precision[2] > best_top3 :
|
312 |
-
msg = f"--> --> \t Top3 Improved from {best_top3:.4f} to {R_precision[2]:.4f} !!!"
|
313 |
-
logger.info(msg)
|
314 |
-
best_top3 = R_precision[2]
|
315 |
-
|
316 |
-
if save:
|
317 |
-
torch.save({'trans' : trans.state_dict()}, os.path.join(out_dir, 'net_last.pth'))
|
318 |
-
|
319 |
-
trans.train()
|
320 |
-
return best_fid, best_iter, best_div, best_top1, best_top2, best_top3, best_matching, writer, logger
|
321 |
-
|
322 |
-
|
323 |
-
@torch.no_grad()
|
324 |
-
def evaluation_transformer_test(out_dir, val_loader, net, trans, logger, writer, nb_iter, best_fid, best_iter, best_div, best_top1, best_top2, best_top3, best_matching, best_multi, clip_model, eval_wrapper, draw = True, save = True, savegif=False, savenpy=False) :
|
325 |
-
|
326 |
-
trans.eval()
|
327 |
-
nb_sample = 0
|
328 |
-
|
329 |
-
draw_org = []
|
330 |
-
draw_pred = []
|
331 |
-
draw_text = []
|
332 |
-
draw_text_pred = []
|
333 |
-
draw_name = []
|
334 |
-
|
335 |
-
motion_annotation_list = []
|
336 |
-
motion_pred_list = []
|
337 |
-
motion_multimodality = []
|
338 |
-
R_precision_real = 0
|
339 |
-
R_precision = 0
|
340 |
-
matching_score_real = 0
|
341 |
-
matching_score_pred = 0
|
342 |
-
|
343 |
-
nb_sample = 0
|
344 |
-
|
345 |
-
for batch in val_loader:
|
346 |
-
|
347 |
-
word_embeddings, pos_one_hots, clip_text, sent_len, pose, m_length, token, name = batch
|
348 |
-
bs, seq = pose.shape[:2]
|
349 |
-
num_joints = 21 if pose.shape[-1] == 251 else 22
|
350 |
-
|
351 |
-
text = clip.tokenize(clip_text, truncate=True).cuda()
|
352 |
-
|
353 |
-
feat_clip_text = clip_model.encode_text(text).float()
|
354 |
-
motion_multimodality_batch = []
|
355 |
-
for i in range(30):
|
356 |
-
pred_pose_eval = torch.zeros((bs, seq, pose.shape[-1])).cuda()
|
357 |
-
pred_len = torch.ones(bs).long()
|
358 |
-
|
359 |
-
for k in range(bs):
|
360 |
-
try:
|
361 |
-
index_motion = trans.sample(feat_clip_text[k:k+1], True)
|
362 |
-
except:
|
363 |
-
index_motion = torch.ones(1,1).cuda().long()
|
364 |
-
|
365 |
-
pred_pose = net.forward_decoder(index_motion)
|
366 |
-
cur_len = pred_pose.shape[1]
|
367 |
-
|
368 |
-
pred_len[k] = min(cur_len, seq)
|
369 |
-
pred_pose_eval[k:k+1, :cur_len] = pred_pose[:, :seq]
|
370 |
-
|
371 |
-
if i == 0 and (draw or savenpy):
|
372 |
-
pred_denorm = val_loader.dataset.inv_transform(pred_pose.detach().cpu().numpy())
|
373 |
-
pred_xyz = recover_from_ric(torch.from_numpy(pred_denorm).float().cuda(), num_joints)
|
374 |
-
|
375 |
-
if savenpy:
|
376 |
-
np.save(os.path.join(out_dir, name[k]+'_pred.npy'), pred_xyz.detach().cpu().numpy())
|
377 |
-
|
378 |
-
if draw:
|
379 |
-
if i == 0:
|
380 |
-
draw_pred.append(pred_xyz)
|
381 |
-
draw_text_pred.append(clip_text[k])
|
382 |
-
draw_name.append(name[k])
|
383 |
-
|
384 |
-
et_pred, em_pred = eval_wrapper.get_co_embeddings(word_embeddings, pos_one_hots, sent_len, pred_pose_eval, pred_len)
|
385 |
-
|
386 |
-
motion_multimodality_batch.append(em_pred.reshape(bs, 1, -1))
|
387 |
-
|
388 |
-
if i == 0:
|
389 |
-
pose = pose.cuda().float()
|
390 |
-
|
391 |
-
et, em = eval_wrapper.get_co_embeddings(word_embeddings, pos_one_hots, sent_len, pose, m_length)
|
392 |
-
motion_annotation_list.append(em)
|
393 |
-
motion_pred_list.append(em_pred)
|
394 |
-
|
395 |
-
if draw or savenpy:
|
396 |
-
pose = val_loader.dataset.inv_transform(pose.detach().cpu().numpy())
|
397 |
-
pose_xyz = recover_from_ric(torch.from_numpy(pose).float().cuda(), num_joints)
|
398 |
-
|
399 |
-
if savenpy:
|
400 |
-
for j in range(bs):
|
401 |
-
np.save(os.path.join(out_dir, name[j]+'_gt.npy'), pose_xyz[j][:m_length[j]].unsqueeze(0).cpu().numpy())
|
402 |
-
|
403 |
-
if draw:
|
404 |
-
for j in range(bs):
|
405 |
-
draw_org.append(pose_xyz[j][:m_length[j]].unsqueeze(0))
|
406 |
-
draw_text.append(clip_text[j])
|
407 |
-
|
408 |
-
temp_R, temp_match = calculate_R_precision(et.cpu().numpy(), em.cpu().numpy(), top_k=3, sum_all=True)
|
409 |
-
R_precision_real += temp_R
|
410 |
-
matching_score_real += temp_match
|
411 |
-
temp_R, temp_match = calculate_R_precision(et_pred.cpu().numpy(), em_pred.cpu().numpy(), top_k=3, sum_all=True)
|
412 |
-
R_precision += temp_R
|
413 |
-
matching_score_pred += temp_match
|
414 |
-
|
415 |
-
nb_sample += bs
|
416 |
-
|
417 |
-
motion_multimodality.append(torch.cat(motion_multimodality_batch, dim=1))
|
418 |
-
|
419 |
-
motion_annotation_np = torch.cat(motion_annotation_list, dim=0).cpu().numpy()
|
420 |
-
motion_pred_np = torch.cat(motion_pred_list, dim=0).cpu().numpy()
|
421 |
-
gt_mu, gt_cov = calculate_activation_statistics(motion_annotation_np)
|
422 |
-
mu, cov= calculate_activation_statistics(motion_pred_np)
|
423 |
-
|
424 |
-
diversity_real = calculate_diversity(motion_annotation_np, 300 if nb_sample > 300 else 100)
|
425 |
-
diversity = calculate_diversity(motion_pred_np, 300 if nb_sample > 300 else 100)
|
426 |
-
|
427 |
-
R_precision_real = R_precision_real / nb_sample
|
428 |
-
R_precision = R_precision / nb_sample
|
429 |
-
|
430 |
-
matching_score_real = matching_score_real / nb_sample
|
431 |
-
matching_score_pred = matching_score_pred / nb_sample
|
432 |
-
|
433 |
-
multimodality = 0
|
434 |
-
motion_multimodality = torch.cat(motion_multimodality, dim=0).cpu().numpy()
|
435 |
-
multimodality = calculate_multimodality(motion_multimodality, 10)
|
436 |
-
|
437 |
-
fid = calculate_frechet_distance(gt_mu, gt_cov, mu, cov)
|
438 |
-
|
439 |
-
msg = f"--> \t Eva. Iter {nb_iter} :, FID. {fid:.4f}, Diversity Real. {diversity_real:.4f}, Diversity. {diversity:.4f}, R_precision_real. {R_precision_real}, R_precision. {R_precision}, matching_score_real. {matching_score_real}, matching_score_pred. {matching_score_pred}, multimodality. {multimodality:.4f}"
|
440 |
-
logger.info(msg)
|
441 |
-
|
442 |
-
|
443 |
-
if draw:
|
444 |
-
for ii in range(len(draw_org)):
|
445 |
-
tensorborad_add_video_xyz(writer, draw_org[ii], nb_iter, tag='./Vis/'+draw_name[ii]+'_org', nb_vis=1, title_batch=[draw_text[ii]], outname=[os.path.join(out_dir, draw_name[ii]+'_skel_gt.gif')] if savegif else None)
|
446 |
-
|
447 |
-
tensorborad_add_video_xyz(writer, draw_pred[ii], nb_iter, tag='./Vis/'+draw_name[ii]+'_pred', nb_vis=1, title_batch=[draw_text_pred[ii]], outname=[os.path.join(out_dir, draw_name[ii]+'_skel_pred.gif')] if savegif else None)
|
448 |
-
|
449 |
-
trans.train()
|
450 |
-
return fid, best_iter, diversity, R_precision[0], R_precision[1], R_precision[2], matching_score_pred, multimodality, writer, logger
|
451 |
-
|
452 |
-
# (X - X_train)*(X - X_train) = -2X*X_train + X*X + X_train*X_train
|
453 |
-
def euclidean_distance_matrix(matrix1, matrix2):
|
454 |
-
"""
|
455 |
-
Params:
|
456 |
-
-- matrix1: N1 x D
|
457 |
-
-- matrix2: N2 x D
|
458 |
-
Returns:
|
459 |
-
-- dist: N1 x N2
|
460 |
-
dist[i, j] == distance(matrix1[i], matrix2[j])
|
461 |
-
"""
|
462 |
-
assert matrix1.shape[1] == matrix2.shape[1]
|
463 |
-
d1 = -2 * np.dot(matrix1, matrix2.T) # shape (num_test, num_train)
|
464 |
-
d2 = np.sum(np.square(matrix1), axis=1, keepdims=True) # shape (num_test, 1)
|
465 |
-
d3 = np.sum(np.square(matrix2), axis=1) # shape (num_train, )
|
466 |
-
dists = np.sqrt(d1 + d2 + d3) # broadcasting
|
467 |
-
return dists
|
468 |
-
|
469 |
-
|
470 |
-
|
471 |
-
def calculate_top_k(mat, top_k):
|
472 |
-
size = mat.shape[0]
|
473 |
-
gt_mat = np.expand_dims(np.arange(size), 1).repeat(size, 1)
|
474 |
-
bool_mat = (mat == gt_mat)
|
475 |
-
correct_vec = False
|
476 |
-
top_k_list = []
|
477 |
-
for i in range(top_k):
|
478 |
-
# print(correct_vec, bool_mat[:, i])
|
479 |
-
correct_vec = (correct_vec | bool_mat[:, i])
|
480 |
-
# print(correct_vec)
|
481 |
-
top_k_list.append(correct_vec[:, None])
|
482 |
-
top_k_mat = np.concatenate(top_k_list, axis=1)
|
483 |
-
return top_k_mat
|
484 |
-
|
485 |
-
|
486 |
-
def calculate_R_precision(embedding1, embedding2, top_k, sum_all=False):
|
487 |
-
dist_mat = euclidean_distance_matrix(embedding1, embedding2)
|
488 |
-
matching_score = dist_mat.trace()
|
489 |
-
argmax = np.argsort(dist_mat, axis=1)
|
490 |
-
top_k_mat = calculate_top_k(argmax, top_k)
|
491 |
-
if sum_all:
|
492 |
-
return top_k_mat.sum(axis=0), matching_score
|
493 |
-
else:
|
494 |
-
return top_k_mat, matching_score
|
495 |
-
|
496 |
-
def calculate_multimodality(activation, multimodality_times):
|
497 |
-
assert len(activation.shape) == 3
|
498 |
-
assert activation.shape[1] > multimodality_times
|
499 |
-
num_per_sent = activation.shape[1]
|
500 |
-
|
501 |
-
first_dices = np.random.choice(num_per_sent, multimodality_times, replace=False)
|
502 |
-
second_dices = np.random.choice(num_per_sent, multimodality_times, replace=False)
|
503 |
-
dist = linalg.norm(activation[:, first_dices] - activation[:, second_dices], axis=2)
|
504 |
-
return dist.mean()
|
505 |
-
|
506 |
-
|
507 |
-
def calculate_diversity(activation, diversity_times):
|
508 |
-
assert len(activation.shape) == 2
|
509 |
-
assert activation.shape[0] > diversity_times
|
510 |
-
num_samples = activation.shape[0]
|
511 |
-
|
512 |
-
first_indices = np.random.choice(num_samples, diversity_times, replace=False)
|
513 |
-
second_indices = np.random.choice(num_samples, diversity_times, replace=False)
|
514 |
-
dist = linalg.norm(activation[first_indices] - activation[second_indices], axis=1)
|
515 |
-
return dist.mean()
|
516 |
-
|
517 |
-
|
518 |
-
|
519 |
-
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
|
520 |
-
|
521 |
-
mu1 = np.atleast_1d(mu1)
|
522 |
-
mu2 = np.atleast_1d(mu2)
|
523 |
-
|
524 |
-
sigma1 = np.atleast_2d(sigma1)
|
525 |
-
sigma2 = np.atleast_2d(sigma2)
|
526 |
-
|
527 |
-
assert mu1.shape == mu2.shape, \
|
528 |
-
'Training and test mean vectors have different lengths'
|
529 |
-
assert sigma1.shape == sigma2.shape, \
|
530 |
-
'Training and test covariances have different dimensions'
|
531 |
-
|
532 |
-
diff = mu1 - mu2
|
533 |
-
|
534 |
-
# Product might be almost singular
|
535 |
-
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
|
536 |
-
if not np.isfinite(covmean).all():
|
537 |
-
msg = ('fid calculation produces singular product; '
|
538 |
-
'adding %s to diagonal of cov estimates') % eps
|
539 |
-
print(msg)
|
540 |
-
offset = np.eye(sigma1.shape[0]) * eps
|
541 |
-
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
|
542 |
-
|
543 |
-
# Numerical error might give slight imaginary component
|
544 |
-
if np.iscomplexobj(covmean):
|
545 |
-
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
|
546 |
-
m = np.max(np.abs(covmean.imag))
|
547 |
-
raise ValueError('Imaginary component {}'.format(m))
|
548 |
-
covmean = covmean.real
|
549 |
-
|
550 |
-
tr_covmean = np.trace(covmean)
|
551 |
-
|
552 |
-
return (diff.dot(diff) + np.trace(sigma1)
|
553 |
-
+ np.trace(sigma2) - 2 * tr_covmean)
|
554 |
-
|
555 |
-
|
556 |
-
|
557 |
-
def calculate_activation_statistics(activations):
|
558 |
-
|
559 |
-
mu = np.mean(activations, axis=0)
|
560 |
-
cov = np.cov(activations, rowvar=False)
|
561 |
-
return mu, cov
|
562 |
-
|
563 |
-
|
564 |
-
def calculate_frechet_feature_distance(feature_list1, feature_list2):
|
565 |
-
feature_list1 = np.stack(feature_list1)
|
566 |
-
feature_list2 = np.stack(feature_list2)
|
567 |
-
|
568 |
-
# normalize the scale
|
569 |
-
mean = np.mean(feature_list1, axis=0)
|
570 |
-
std = np.std(feature_list1, axis=0) + 1e-10
|
571 |
-
feature_list1 = (feature_list1 - mean) / std
|
572 |
-
feature_list2 = (feature_list2 - mean) / std
|
573 |
-
|
574 |
-
dist = calculate_frechet_distance(
|
575 |
-
mu1=np.mean(feature_list1, axis=0),
|
576 |
-
sigma1=np.cov(feature_list1, rowvar=False),
|
577 |
-
mu2=np.mean(feature_list2, axis=0),
|
578 |
-
sigma2=np.cov(feature_list2, rowvar=False),
|
579 |
-
)
|
580 |
-
return dist
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/NeuralSeq/data_gen/tts/wav_processors/__init__.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
from . import base_processor
|
2 |
-
from . import common_processors
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/NeuralSeq/tasks/tts/tts.py
DELETED
@@ -1,131 +0,0 @@
|
|
1 |
-
from multiprocessing.pool import Pool
|
2 |
-
|
3 |
-
import matplotlib
|
4 |
-
|
5 |
-
from utils.pl_utils import data_loader
|
6 |
-
from utils.training_utils import RSQRTSchedule
|
7 |
-
from vocoders.base_vocoder import get_vocoder_cls, BaseVocoder
|
8 |
-
from modules.fastspeech.pe import PitchExtractor
|
9 |
-
|
10 |
-
matplotlib.use('Agg')
|
11 |
-
import os
|
12 |
-
import numpy as np
|
13 |
-
from tqdm import tqdm
|
14 |
-
import torch.distributed as dist
|
15 |
-
|
16 |
-
from tasks.base_task import BaseTask
|
17 |
-
from utils.hparams import hparams
|
18 |
-
from utils.text_encoder import TokenTextEncoder
|
19 |
-
import json
|
20 |
-
|
21 |
-
import torch
|
22 |
-
import torch.optim
|
23 |
-
import torch.utils.data
|
24 |
-
import utils
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
class TtsTask(BaseTask):
|
29 |
-
def __init__(self, *args, **kwargs):
|
30 |
-
self.vocoder = None
|
31 |
-
self.phone_encoder = self.build_phone_encoder(hparams['binary_data_dir'])
|
32 |
-
self.padding_idx = self.phone_encoder.pad()
|
33 |
-
self.eos_idx = self.phone_encoder.eos()
|
34 |
-
self.seg_idx = self.phone_encoder.seg()
|
35 |
-
self.saving_result_pool = None
|
36 |
-
self.saving_results_futures = None
|
37 |
-
self.stats = {}
|
38 |
-
super().__init__(*args, **kwargs)
|
39 |
-
|
40 |
-
def build_scheduler(self, optimizer):
|
41 |
-
return RSQRTSchedule(optimizer)
|
42 |
-
|
43 |
-
def build_optimizer(self, model):
|
44 |
-
self.optimizer = optimizer = torch.optim.AdamW(
|
45 |
-
model.parameters(),
|
46 |
-
lr=hparams['lr'])
|
47 |
-
return optimizer
|
48 |
-
|
49 |
-
def build_dataloader(self, dataset, shuffle, max_tokens=None, max_sentences=None,
|
50 |
-
required_batch_size_multiple=-1, endless=False, batch_by_size=True):
|
51 |
-
devices_cnt = torch.cuda.device_count()
|
52 |
-
if devices_cnt == 0:
|
53 |
-
devices_cnt = 1
|
54 |
-
if required_batch_size_multiple == -1:
|
55 |
-
required_batch_size_multiple = devices_cnt
|
56 |
-
|
57 |
-
def shuffle_batches(batches):
|
58 |
-
np.random.shuffle(batches)
|
59 |
-
return batches
|
60 |
-
|
61 |
-
if max_tokens is not None:
|
62 |
-
max_tokens *= devices_cnt
|
63 |
-
if max_sentences is not None:
|
64 |
-
max_sentences *= devices_cnt
|
65 |
-
indices = dataset.ordered_indices()
|
66 |
-
if batch_by_size:
|
67 |
-
batch_sampler = utils.batch_by_size(
|
68 |
-
indices, dataset.num_tokens, max_tokens=max_tokens, max_sentences=max_sentences,
|
69 |
-
required_batch_size_multiple=required_batch_size_multiple,
|
70 |
-
)
|
71 |
-
else:
|
72 |
-
batch_sampler = []
|
73 |
-
for i in range(0, len(indices), max_sentences):
|
74 |
-
batch_sampler.append(indices[i:i + max_sentences])
|
75 |
-
|
76 |
-
if shuffle:
|
77 |
-
batches = shuffle_batches(list(batch_sampler))
|
78 |
-
if endless:
|
79 |
-
batches = [b for _ in range(1000) for b in shuffle_batches(list(batch_sampler))]
|
80 |
-
else:
|
81 |
-
batches = batch_sampler
|
82 |
-
if endless:
|
83 |
-
batches = [b for _ in range(1000) for b in batches]
|
84 |
-
num_workers = dataset.num_workers
|
85 |
-
if self.trainer.use_ddp:
|
86 |
-
num_replicas = dist.get_world_size()
|
87 |
-
rank = dist.get_rank()
|
88 |
-
batches = [x[rank::num_replicas] for x in batches if len(x) % num_replicas == 0]
|
89 |
-
return torch.utils.data.DataLoader(dataset,
|
90 |
-
collate_fn=dataset.collater,
|
91 |
-
batch_sampler=batches,
|
92 |
-
num_workers=num_workers,
|
93 |
-
pin_memory=False)
|
94 |
-
|
95 |
-
def build_phone_encoder(self, data_dir):
|
96 |
-
phone_list_file = os.path.join(data_dir, 'phone_set.json')
|
97 |
-
|
98 |
-
phone_list = json.load(open(phone_list_file))
|
99 |
-
return TokenTextEncoder(None, vocab_list=phone_list, replace_oov=',')
|
100 |
-
|
101 |
-
def build_optimizer(self, model):
|
102 |
-
self.optimizer = optimizer = torch.optim.AdamW(
|
103 |
-
model.parameters(),
|
104 |
-
lr=hparams['lr'])
|
105 |
-
return optimizer
|
106 |
-
|
107 |
-
def test_start(self):
|
108 |
-
self.saving_result_pool = Pool(8)
|
109 |
-
self.saving_results_futures = []
|
110 |
-
self.vocoder: BaseVocoder = get_vocoder_cls(hparams)()
|
111 |
-
if hparams.get('pe_enable') is not None and hparams['pe_enable']:
|
112 |
-
self.pe = PitchExtractor().cuda()
|
113 |
-
utils.load_ckpt(self.pe, hparams['pe_ckpt'], 'model', strict=True)
|
114 |
-
self.pe.eval()
|
115 |
-
def test_end(self, outputs):
|
116 |
-
self.saving_result_pool.close()
|
117 |
-
[f.get() for f in tqdm(self.saving_results_futures)]
|
118 |
-
self.saving_result_pool.join()
|
119 |
-
return {}
|
120 |
-
|
121 |
-
##########
|
122 |
-
# utils
|
123 |
-
##########
|
124 |
-
def weights_nonzero_speech(self, target):
|
125 |
-
# target : B x T x mel
|
126 |
-
# Assign weight 1.0 to all labels except for padding (id=0).
|
127 |
-
dim = target.size(-1)
|
128 |
-
return target.abs().sum(-1, keepdim=True).ne(0).float().repeat(1, 1, dim)
|
129 |
-
|
130 |
-
if __name__ == '__main__':
|
131 |
-
TtsTask.start()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ALSv/FSW/roop/__init__.py
DELETED
File without changes
|
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-60e_deepfashion2_trousers_256x192/td_hm_res50_4xb64-60e_deepfashion2_trousers_256x192.py
DELETED
@@ -1,2861 +0,0 @@
|
|
1 |
-
default_scope = 'mmpose'
|
2 |
-
default_hooks = dict(
|
3 |
-
timer=dict(type='IterTimerHook'),
|
4 |
-
logger=dict(type='LoggerHook', interval=50),
|
5 |
-
param_scheduler=dict(type='ParamSchedulerHook'),
|
6 |
-
checkpoint=dict(
|
7 |
-
type='CheckpointHook', interval=10, save_best='PCK', rule='greater'),
|
8 |
-
sampler_seed=dict(type='DistSamplerSeedHook'),
|
9 |
-
visualization=dict(type='PoseVisualizationHook', enable=False))
|
10 |
-
custom_hooks = [dict(type='SyncBuffersHook')]
|
11 |
-
env_cfg = dict(
|
12 |
-
cudnn_benchmark=False,
|
13 |
-
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
|
14 |
-
dist_cfg=dict(backend='nccl'))
|
15 |
-
vis_backends = [dict(type='LocalVisBackend')]
|
16 |
-
visualizer = dict(
|
17 |
-
type='PoseLocalVisualizer',
|
18 |
-
vis_backends=[dict(type='LocalVisBackend'),
|
19 |
-
dict(type='WandbVisBackend')],
|
20 |
-
name='visualizer')
|
21 |
-
log_processor = dict(
|
22 |
-
type='LogProcessor', window_size=50, by_epoch=True, num_digits=6)
|
23 |
-
log_level = 'INFO'
|
24 |
-
load_from = None
|
25 |
-
resume = False
|
26 |
-
backend_args = dict(backend='local')
|
27 |
-
train_cfg = dict(by_epoch=True, max_epochs=60, val_interval=10)
|
28 |
-
val_cfg = dict()
|
29 |
-
test_cfg = dict()
|
30 |
-
colors = dict(
|
31 |
-
sss=[255, 128, 0],
|
32 |
-
lss=[255, 0, 128],
|
33 |
-
sso=[128, 0, 255],
|
34 |
-
lso=[0, 128, 255],
|
35 |
-
vest=[0, 128, 128],
|
36 |
-
sling=[0, 0, 128],
|
37 |
-
shorts=[128, 128, 128],
|
38 |
-
trousers=[128, 0, 128],
|
39 |
-
skirt=[64, 128, 128],
|
40 |
-
ssd=[64, 64, 128],
|
41 |
-
lsd=[128, 64, 0],
|
42 |
-
vd=[128, 64, 255],
|
43 |
-
sd=[128, 64, 0])
|
44 |
-
dataset_info = dict(
|
45 |
-
dataset_name='deepfashion2',
|
46 |
-
paper_info=dict(
|
47 |
-
author=
|
48 |
-
'Yuying Ge and Ruimao Zhang and Lingyun Wu and Xiaogang Wang and Xiaoou Tang and Ping Luo',
|
49 |
-
title=
|
50 |
-
'DeepFashion2: A Versatile Benchmark for Detection, Pose Estimation, Segmentation and Re-Identification of Clothing Images',
|
51 |
-
container=
|
52 |
-
'Proceedings of IEEE Conference on Computer Vision and Pattern Recognition (CVPR)',
|
53 |
-
year='2019',
|
54 |
-
homepage='https://github.com/switchablenorms/DeepFashion2'),
|
55 |
-
keypoint_info=dict({
|
56 |
-
0:
|
57 |
-
dict(name='sss_kpt1', id=0, color=[255, 128, 0], type='', swap=''),
|
58 |
-
1:
|
59 |
-
dict(
|
60 |
-
name='sss_kpt2',
|
61 |
-
id=1,
|
62 |
-
color=[255, 128, 0],
|
63 |
-
type='',
|
64 |
-
swap='sss_kpt6'),
|
65 |
-
2:
|
66 |
-
dict(
|
67 |
-
name='sss_kpt3',
|
68 |
-
id=2,
|
69 |
-
color=[255, 128, 0],
|
70 |
-
type='',
|
71 |
-
swap='sss_kpt5'),
|
72 |
-
3:
|
73 |
-
dict(name='sss_kpt4', id=3, color=[255, 128, 0], type='', swap=''),
|
74 |
-
4:
|
75 |
-
dict(
|
76 |
-
name='sss_kpt5',
|
77 |
-
id=4,
|
78 |
-
color=[255, 128, 0],
|
79 |
-
type='',
|
80 |
-
swap='sss_kpt3'),
|
81 |
-
5:
|
82 |
-
dict(
|
83 |
-
name='sss_kpt6',
|
84 |
-
id=5,
|
85 |
-
color=[255, 128, 0],
|
86 |
-
type='',
|
87 |
-
swap='sss_kpt2'),
|
88 |
-
6:
|
89 |
-
dict(
|
90 |
-
name='sss_kpt7',
|
91 |
-
id=6,
|
92 |
-
color=[255, 128, 0],
|
93 |
-
type='',
|
94 |
-
swap='sss_kpt25'),
|
95 |
-
7:
|
96 |
-
dict(
|
97 |
-
name='sss_kpt8',
|
98 |
-
id=7,
|
99 |
-
color=[255, 128, 0],
|
100 |
-
type='',
|
101 |
-
swap='sss_kpt24'),
|
102 |
-
8:
|
103 |
-
dict(
|
104 |
-
name='sss_kpt9',
|
105 |
-
id=8,
|
106 |
-
color=[255, 128, 0],
|
107 |
-
type='',
|
108 |
-
swap='sss_kpt23'),
|
109 |
-
9:
|
110 |
-
dict(
|
111 |
-
name='sss_kpt10',
|
112 |
-
id=9,
|
113 |
-
color=[255, 128, 0],
|
114 |
-
type='',
|
115 |
-
swap='sss_kpt22'),
|
116 |
-
10:
|
117 |
-
dict(
|
118 |
-
name='sss_kpt11',
|
119 |
-
id=10,
|
120 |
-
color=[255, 128, 0],
|
121 |
-
type='',
|
122 |
-
swap='sss_kpt21'),
|
123 |
-
11:
|
124 |
-
dict(
|
125 |
-
name='sss_kpt12',
|
126 |
-
id=11,
|
127 |
-
color=[255, 128, 0],
|
128 |
-
type='',
|
129 |
-
swap='sss_kpt20'),
|
130 |
-
12:
|
131 |
-
dict(
|
132 |
-
name='sss_kpt13',
|
133 |
-
id=12,
|
134 |
-
color=[255, 128, 0],
|
135 |
-
type='',
|
136 |
-
swap='sss_kpt19'),
|
137 |
-
13:
|
138 |
-
dict(
|
139 |
-
name='sss_kpt14',
|
140 |
-
id=13,
|
141 |
-
color=[255, 128, 0],
|
142 |
-
type='',
|
143 |
-
swap='sss_kpt18'),
|
144 |
-
14:
|
145 |
-
dict(
|
146 |
-
name='sss_kpt15',
|
147 |
-
id=14,
|
148 |
-
color=[255, 128, 0],
|
149 |
-
type='',
|
150 |
-
swap='sss_kpt17'),
|
151 |
-
15:
|
152 |
-
dict(name='sss_kpt16', id=15, color=[255, 128, 0], type='', swap=''),
|
153 |
-
16:
|
154 |
-
dict(
|
155 |
-
name='sss_kpt17',
|
156 |
-
id=16,
|
157 |
-
color=[255, 128, 0],
|
158 |
-
type='',
|
159 |
-
swap='sss_kpt15'),
|
160 |
-
17:
|
161 |
-
dict(
|
162 |
-
name='sss_kpt18',
|
163 |
-
id=17,
|
164 |
-
color=[255, 128, 0],
|
165 |
-
type='',
|
166 |
-
swap='sss_kpt14'),
|
167 |
-
18:
|
168 |
-
dict(
|
169 |
-
name='sss_kpt19',
|
170 |
-
id=18,
|
171 |
-
color=[255, 128, 0],
|
172 |
-
type='',
|
173 |
-
swap='sss_kpt13'),
|
174 |
-
19:
|
175 |
-
dict(
|
176 |
-
name='sss_kpt20',
|
177 |
-
id=19,
|
178 |
-
color=[255, 128, 0],
|
179 |
-
type='',
|
180 |
-
swap='sss_kpt12'),
|
181 |
-
20:
|
182 |
-
dict(
|
183 |
-
name='sss_kpt21',
|
184 |
-
id=20,
|
185 |
-
color=[255, 128, 0],
|
186 |
-
type='',
|
187 |
-
swap='sss_kpt11'),
|
188 |
-
21:
|
189 |
-
dict(
|
190 |
-
name='sss_kpt22',
|
191 |
-
id=21,
|
192 |
-
color=[255, 128, 0],
|
193 |
-
type='',
|
194 |
-
swap='sss_kpt10'),
|
195 |
-
22:
|
196 |
-
dict(
|
197 |
-
name='sss_kpt23',
|
198 |
-
id=22,
|
199 |
-
color=[255, 128, 0],
|
200 |
-
type='',
|
201 |
-
swap='sss_kpt9'),
|
202 |
-
23:
|
203 |
-
dict(
|
204 |
-
name='sss_kpt24',
|
205 |
-
id=23,
|
206 |
-
color=[255, 128, 0],
|
207 |
-
type='',
|
208 |
-
swap='sss_kpt8'),
|
209 |
-
24:
|
210 |
-
dict(
|
211 |
-
name='sss_kpt25',
|
212 |
-
id=24,
|
213 |
-
color=[255, 128, 0],
|
214 |
-
type='',
|
215 |
-
swap='sss_kpt7'),
|
216 |
-
25:
|
217 |
-
dict(name='lss_kpt1', id=25, color=[255, 0, 128], type='', swap=''),
|
218 |
-
26:
|
219 |
-
dict(
|
220 |
-
name='lss_kpt2',
|
221 |
-
id=26,
|
222 |
-
color=[255, 0, 128],
|
223 |
-
type='',
|
224 |
-
swap='lss_kpt6'),
|
225 |
-
27:
|
226 |
-
dict(
|
227 |
-
name='lss_kpt3',
|
228 |
-
id=27,
|
229 |
-
color=[255, 0, 128],
|
230 |
-
type='',
|
231 |
-
swap='lss_kpt5'),
|
232 |
-
28:
|
233 |
-
dict(name='lss_kpt4', id=28, color=[255, 0, 128], type='', swap=''),
|
234 |
-
29:
|
235 |
-
dict(
|
236 |
-
name='lss_kpt5',
|
237 |
-
id=29,
|
238 |
-
color=[255, 0, 128],
|
239 |
-
type='',
|
240 |
-
swap='lss_kpt3'),
|
241 |
-
30:
|
242 |
-
dict(
|
243 |
-
name='lss_kpt6',
|
244 |
-
id=30,
|
245 |
-
color=[255, 0, 128],
|
246 |
-
type='',
|
247 |
-
swap='lss_kpt2'),
|
248 |
-
31:
|
249 |
-
dict(
|
250 |
-
name='lss_kpt7',
|
251 |
-
id=31,
|
252 |
-
color=[255, 0, 128],
|
253 |
-
type='',
|
254 |
-
swap='lss_kpt33'),
|
255 |
-
32:
|
256 |
-
dict(
|
257 |
-
name='lss_kpt8',
|
258 |
-
id=32,
|
259 |
-
color=[255, 0, 128],
|
260 |
-
type='',
|
261 |
-
swap='lss_kpt32'),
|
262 |
-
33:
|
263 |
-
dict(
|
264 |
-
name='lss_kpt9',
|
265 |
-
id=33,
|
266 |
-
color=[255, 0, 128],
|
267 |
-
type='',
|
268 |
-
swap='lss_kpt31'),
|
269 |
-
34:
|
270 |
-
dict(
|
271 |
-
name='lss_kpt10',
|
272 |
-
id=34,
|
273 |
-
color=[255, 0, 128],
|
274 |
-
type='',
|
275 |
-
swap='lss_kpt30'),
|
276 |
-
35:
|
277 |
-
dict(
|
278 |
-
name='lss_kpt11',
|
279 |
-
id=35,
|
280 |
-
color=[255, 0, 128],
|
281 |
-
type='',
|
282 |
-
swap='lss_kpt29'),
|
283 |
-
36:
|
284 |
-
dict(
|
285 |
-
name='lss_kpt12',
|
286 |
-
id=36,
|
287 |
-
color=[255, 0, 128],
|
288 |
-
type='',
|
289 |
-
swap='lss_kpt28'),
|
290 |
-
37:
|
291 |
-
dict(
|
292 |
-
name='lss_kpt13',
|
293 |
-
id=37,
|
294 |
-
color=[255, 0, 128],
|
295 |
-
type='',
|
296 |
-
swap='lss_kpt27'),
|
297 |
-
38:
|
298 |
-
dict(
|
299 |
-
name='lss_kpt14',
|
300 |
-
id=38,
|
301 |
-
color=[255, 0, 128],
|
302 |
-
type='',
|
303 |
-
swap='lss_kpt26'),
|
304 |
-
39:
|
305 |
-
dict(
|
306 |
-
name='lss_kpt15',
|
307 |
-
id=39,
|
308 |
-
color=[255, 0, 128],
|
309 |
-
type='',
|
310 |
-
swap='lss_kpt25'),
|
311 |
-
40:
|
312 |
-
dict(
|
313 |
-
name='lss_kpt16',
|
314 |
-
id=40,
|
315 |
-
color=[255, 0, 128],
|
316 |
-
type='',
|
317 |
-
swap='lss_kpt24'),
|
318 |
-
41:
|
319 |
-
dict(
|
320 |
-
name='lss_kpt17',
|
321 |
-
id=41,
|
322 |
-
color=[255, 0, 128],
|
323 |
-
type='',
|
324 |
-
swap='lss_kpt23'),
|
325 |
-
42:
|
326 |
-
dict(
|
327 |
-
name='lss_kpt18',
|
328 |
-
id=42,
|
329 |
-
color=[255, 0, 128],
|
330 |
-
type='',
|
331 |
-
swap='lss_kpt22'),
|
332 |
-
43:
|
333 |
-
dict(
|
334 |
-
name='lss_kpt19',
|
335 |
-
id=43,
|
336 |
-
color=[255, 0, 128],
|
337 |
-
type='',
|
338 |
-
swap='lss_kpt21'),
|
339 |
-
44:
|
340 |
-
dict(name='lss_kpt20', id=44, color=[255, 0, 128], type='', swap=''),
|
341 |
-
45:
|
342 |
-
dict(
|
343 |
-
name='lss_kpt21',
|
344 |
-
id=45,
|
345 |
-
color=[255, 0, 128],
|
346 |
-
type='',
|
347 |
-
swap='lss_kpt19'),
|
348 |
-
46:
|
349 |
-
dict(
|
350 |
-
name='lss_kpt22',
|
351 |
-
id=46,
|
352 |
-
color=[255, 0, 128],
|
353 |
-
type='',
|
354 |
-
swap='lss_kpt18'),
|
355 |
-
47:
|
356 |
-
dict(
|
357 |
-
name='lss_kpt23',
|
358 |
-
id=47,
|
359 |
-
color=[255, 0, 128],
|
360 |
-
type='',
|
361 |
-
swap='lss_kpt17'),
|
362 |
-
48:
|
363 |
-
dict(
|
364 |
-
name='lss_kpt24',
|
365 |
-
id=48,
|
366 |
-
color=[255, 0, 128],
|
367 |
-
type='',
|
368 |
-
swap='lss_kpt16'),
|
369 |
-
49:
|
370 |
-
dict(
|
371 |
-
name='lss_kpt25',
|
372 |
-
id=49,
|
373 |
-
color=[255, 0, 128],
|
374 |
-
type='',
|
375 |
-
swap='lss_kpt15'),
|
376 |
-
50:
|
377 |
-
dict(
|
378 |
-
name='lss_kpt26',
|
379 |
-
id=50,
|
380 |
-
color=[255, 0, 128],
|
381 |
-
type='',
|
382 |
-
swap='lss_kpt14'),
|
383 |
-
51:
|
384 |
-
dict(
|
385 |
-
name='lss_kpt27',
|
386 |
-
id=51,
|
387 |
-
color=[255, 0, 128],
|
388 |
-
type='',
|
389 |
-
swap='lss_kpt13'),
|
390 |
-
52:
|
391 |
-
dict(
|
392 |
-
name='lss_kpt28',
|
393 |
-
id=52,
|
394 |
-
color=[255, 0, 128],
|
395 |
-
type='',
|
396 |
-
swap='lss_kpt12'),
|
397 |
-
53:
|
398 |
-
dict(
|
399 |
-
name='lss_kpt29',
|
400 |
-
id=53,
|
401 |
-
color=[255, 0, 128],
|
402 |
-
type='',
|
403 |
-
swap='lss_kpt11'),
|
404 |
-
54:
|
405 |
-
dict(
|
406 |
-
name='lss_kpt30',
|
407 |
-
id=54,
|
408 |
-
color=[255, 0, 128],
|
409 |
-
type='',
|
410 |
-
swap='lss_kpt10'),
|
411 |
-
55:
|
412 |
-
dict(
|
413 |
-
name='lss_kpt31',
|
414 |
-
id=55,
|
415 |
-
color=[255, 0, 128],
|
416 |
-
type='',
|
417 |
-
swap='lss_kpt9'),
|
418 |
-
56:
|
419 |
-
dict(
|
420 |
-
name='lss_kpt32',
|
421 |
-
id=56,
|
422 |
-
color=[255, 0, 128],
|
423 |
-
type='',
|
424 |
-
swap='lss_kpt8'),
|
425 |
-
57:
|
426 |
-
dict(
|
427 |
-
name='lss_kpt33',
|
428 |
-
id=57,
|
429 |
-
color=[255, 0, 128],
|
430 |
-
type='',
|
431 |
-
swap='lss_kpt7'),
|
432 |
-
58:
|
433 |
-
dict(name='sso_kpt1', id=58, color=[128, 0, 255], type='', swap=''),
|
434 |
-
59:
|
435 |
-
dict(
|
436 |
-
name='sso_kpt2',
|
437 |
-
id=59,
|
438 |
-
color=[128, 0, 255],
|
439 |
-
type='',
|
440 |
-
swap='sso_kpt26'),
|
441 |
-
60:
|
442 |
-
dict(
|
443 |
-
name='sso_kpt3',
|
444 |
-
id=60,
|
445 |
-
color=[128, 0, 255],
|
446 |
-
type='',
|
447 |
-
swap='sso_kpt5'),
|
448 |
-
61:
|
449 |
-
dict(
|
450 |
-
name='sso_kpt4',
|
451 |
-
id=61,
|
452 |
-
color=[128, 0, 255],
|
453 |
-
type='',
|
454 |
-
swap='sso_kpt6'),
|
455 |
-
62:
|
456 |
-
dict(
|
457 |
-
name='sso_kpt5',
|
458 |
-
id=62,
|
459 |
-
color=[128, 0, 255],
|
460 |
-
type='',
|
461 |
-
swap='sso_kpt3'),
|
462 |
-
63:
|
463 |
-
dict(
|
464 |
-
name='sso_kpt6',
|
465 |
-
id=63,
|
466 |
-
color=[128, 0, 255],
|
467 |
-
type='',
|
468 |
-
swap='sso_kpt4'),
|
469 |
-
64:
|
470 |
-
dict(
|
471 |
-
name='sso_kpt7',
|
472 |
-
id=64,
|
473 |
-
color=[128, 0, 255],
|
474 |
-
type='',
|
475 |
-
swap='sso_kpt25'),
|
476 |
-
65:
|
477 |
-
dict(
|
478 |
-
name='sso_kpt8',
|
479 |
-
id=65,
|
480 |
-
color=[128, 0, 255],
|
481 |
-
type='',
|
482 |
-
swap='sso_kpt24'),
|
483 |
-
66:
|
484 |
-
dict(
|
485 |
-
name='sso_kpt9',
|
486 |
-
id=66,
|
487 |
-
color=[128, 0, 255],
|
488 |
-
type='',
|
489 |
-
swap='sso_kpt23'),
|
490 |
-
67:
|
491 |
-
dict(
|
492 |
-
name='sso_kpt10',
|
493 |
-
id=67,
|
494 |
-
color=[128, 0, 255],
|
495 |
-
type='',
|
496 |
-
swap='sso_kpt22'),
|
497 |
-
68:
|
498 |
-
dict(
|
499 |
-
name='sso_kpt11',
|
500 |
-
id=68,
|
501 |
-
color=[128, 0, 255],
|
502 |
-
type='',
|
503 |
-
swap='sso_kpt21'),
|
504 |
-
69:
|
505 |
-
dict(
|
506 |
-
name='sso_kpt12',
|
507 |
-
id=69,
|
508 |
-
color=[128, 0, 255],
|
509 |
-
type='',
|
510 |
-
swap='sso_kpt20'),
|
511 |
-
70:
|
512 |
-
dict(
|
513 |
-
name='sso_kpt13',
|
514 |
-
id=70,
|
515 |
-
color=[128, 0, 255],
|
516 |
-
type='',
|
517 |
-
swap='sso_kpt19'),
|
518 |
-
71:
|
519 |
-
dict(
|
520 |
-
name='sso_kpt14',
|
521 |
-
id=71,
|
522 |
-
color=[128, 0, 255],
|
523 |
-
type='',
|
524 |
-
swap='sso_kpt18'),
|
525 |
-
72:
|
526 |
-
dict(
|
527 |
-
name='sso_kpt15',
|
528 |
-
id=72,
|
529 |
-
color=[128, 0, 255],
|
530 |
-
type='',
|
531 |
-
swap='sso_kpt17'),
|
532 |
-
73:
|
533 |
-
dict(
|
534 |
-
name='sso_kpt16',
|
535 |
-
id=73,
|
536 |
-
color=[128, 0, 255],
|
537 |
-
type='',
|
538 |
-
swap='sso_kpt29'),
|
539 |
-
74:
|
540 |
-
dict(
|
541 |
-
name='sso_kpt17',
|
542 |
-
id=74,
|
543 |
-
color=[128, 0, 255],
|
544 |
-
type='',
|
545 |
-
swap='sso_kpt15'),
|
546 |
-
75:
|
547 |
-
dict(
|
548 |
-
name='sso_kpt18',
|
549 |
-
id=75,
|
550 |
-
color=[128, 0, 255],
|
551 |
-
type='',
|
552 |
-
swap='sso_kpt14'),
|
553 |
-
76:
|
554 |
-
dict(
|
555 |
-
name='sso_kpt19',
|
556 |
-
id=76,
|
557 |
-
color=[128, 0, 255],
|
558 |
-
type='',
|
559 |
-
swap='sso_kpt13'),
|
560 |
-
77:
|
561 |
-
dict(
|
562 |
-
name='sso_kpt20',
|
563 |
-
id=77,
|
564 |
-
color=[128, 0, 255],
|
565 |
-
type='',
|
566 |
-
swap='sso_kpt12'),
|
567 |
-
78:
|
568 |
-
dict(
|
569 |
-
name='sso_kpt21',
|
570 |
-
id=78,
|
571 |
-
color=[128, 0, 255],
|
572 |
-
type='',
|
573 |
-
swap='sso_kpt11'),
|
574 |
-
79:
|
575 |
-
dict(
|
576 |
-
name='sso_kpt22',
|
577 |
-
id=79,
|
578 |
-
color=[128, 0, 255],
|
579 |
-
type='',
|
580 |
-
swap='sso_kpt10'),
|
581 |
-
80:
|
582 |
-
dict(
|
583 |
-
name='sso_kpt23',
|
584 |
-
id=80,
|
585 |
-
color=[128, 0, 255],
|
586 |
-
type='',
|
587 |
-
swap='sso_kpt9'),
|
588 |
-
81:
|
589 |
-
dict(
|
590 |
-
name='sso_kpt24',
|
591 |
-
id=81,
|
592 |
-
color=[128, 0, 255],
|
593 |
-
type='',
|
594 |
-
swap='sso_kpt8'),
|
595 |
-
82:
|
596 |
-
dict(
|
597 |
-
name='sso_kpt25',
|
598 |
-
id=82,
|
599 |
-
color=[128, 0, 255],
|
600 |
-
type='',
|
601 |
-
swap='sso_kpt7'),
|
602 |
-
83:
|
603 |
-
dict(
|
604 |
-
name='sso_kpt26',
|
605 |
-
id=83,
|
606 |
-
color=[128, 0, 255],
|
607 |
-
type='',
|
608 |
-
swap='sso_kpt2'),
|
609 |
-
84:
|
610 |
-
dict(
|
611 |
-
name='sso_kpt27',
|
612 |
-
id=84,
|
613 |
-
color=[128, 0, 255],
|
614 |
-
type='',
|
615 |
-
swap='sso_kpt30'),
|
616 |
-
85:
|
617 |
-
dict(
|
618 |
-
name='sso_kpt28',
|
619 |
-
id=85,
|
620 |
-
color=[128, 0, 255],
|
621 |
-
type='',
|
622 |
-
swap='sso_kpt31'),
|
623 |
-
86:
|
624 |
-
dict(
|
625 |
-
name='sso_kpt29',
|
626 |
-
id=86,
|
627 |
-
color=[128, 0, 255],
|
628 |
-
type='',
|
629 |
-
swap='sso_kpt16'),
|
630 |
-
87:
|
631 |
-
dict(
|
632 |
-
name='sso_kpt30',
|
633 |
-
id=87,
|
634 |
-
color=[128, 0, 255],
|
635 |
-
type='',
|
636 |
-
swap='sso_kpt27'),
|
637 |
-
88:
|
638 |
-
dict(
|
639 |
-
name='sso_kpt31',
|
640 |
-
id=88,
|
641 |
-
color=[128, 0, 255],
|
642 |
-
type='',
|
643 |
-
swap='sso_kpt28'),
|
644 |
-
89:
|
645 |
-
dict(name='lso_kpt1', id=89, color=[0, 128, 255], type='', swap=''),
|
646 |
-
90:
|
647 |
-
dict(
|
648 |
-
name='lso_kpt2',
|
649 |
-
id=90,
|
650 |
-
color=[0, 128, 255],
|
651 |
-
type='',
|
652 |
-
swap='lso_kpt6'),
|
653 |
-
91:
|
654 |
-
dict(
|
655 |
-
name='lso_kpt3',
|
656 |
-
id=91,
|
657 |
-
color=[0, 128, 255],
|
658 |
-
type='',
|
659 |
-
swap='lso_kpt5'),
|
660 |
-
92:
|
661 |
-
dict(
|
662 |
-
name='lso_kpt4',
|
663 |
-
id=92,
|
664 |
-
color=[0, 128, 255],
|
665 |
-
type='',
|
666 |
-
swap='lso_kpt34'),
|
667 |
-
93:
|
668 |
-
dict(
|
669 |
-
name='lso_kpt5',
|
670 |
-
id=93,
|
671 |
-
color=[0, 128, 255],
|
672 |
-
type='',
|
673 |
-
swap='lso_kpt3'),
|
674 |
-
94:
|
675 |
-
dict(
|
676 |
-
name='lso_kpt6',
|
677 |
-
id=94,
|
678 |
-
color=[0, 128, 255],
|
679 |
-
type='',
|
680 |
-
swap='lso_kpt2'),
|
681 |
-
95:
|
682 |
-
dict(
|
683 |
-
name='lso_kpt7',
|
684 |
-
id=95,
|
685 |
-
color=[0, 128, 255],
|
686 |
-
type='',
|
687 |
-
swap='lso_kpt33'),
|
688 |
-
96:
|
689 |
-
dict(
|
690 |
-
name='lso_kpt8',
|
691 |
-
id=96,
|
692 |
-
color=[0, 128, 255],
|
693 |
-
type='',
|
694 |
-
swap='lso_kpt32'),
|
695 |
-
97:
|
696 |
-
dict(
|
697 |
-
name='lso_kpt9',
|
698 |
-
id=97,
|
699 |
-
color=[0, 128, 255],
|
700 |
-
type='',
|
701 |
-
swap='lso_kpt31'),
|
702 |
-
98:
|
703 |
-
dict(
|
704 |
-
name='lso_kpt10',
|
705 |
-
id=98,
|
706 |
-
color=[0, 128, 255],
|
707 |
-
type='',
|
708 |
-
swap='lso_kpt30'),
|
709 |
-
99:
|
710 |
-
dict(
|
711 |
-
name='lso_kpt11',
|
712 |
-
id=99,
|
713 |
-
color=[0, 128, 255],
|
714 |
-
type='',
|
715 |
-
swap='lso_kpt29'),
|
716 |
-
100:
|
717 |
-
dict(
|
718 |
-
name='lso_kpt12',
|
719 |
-
id=100,
|
720 |
-
color=[0, 128, 255],
|
721 |
-
type='',
|
722 |
-
swap='lso_kpt28'),
|
723 |
-
101:
|
724 |
-
dict(
|
725 |
-
name='lso_kpt13',
|
726 |
-
id=101,
|
727 |
-
color=[0, 128, 255],
|
728 |
-
type='',
|
729 |
-
swap='lso_kpt27'),
|
730 |
-
102:
|
731 |
-
dict(
|
732 |
-
name='lso_kpt14',
|
733 |
-
id=102,
|
734 |
-
color=[0, 128, 255],
|
735 |
-
type='',
|
736 |
-
swap='lso_kpt26'),
|
737 |
-
103:
|
738 |
-
dict(
|
739 |
-
name='lso_kpt15',
|
740 |
-
id=103,
|
741 |
-
color=[0, 128, 255],
|
742 |
-
type='',
|
743 |
-
swap='lso_kpt25'),
|
744 |
-
104:
|
745 |
-
dict(
|
746 |
-
name='lso_kpt16',
|
747 |
-
id=104,
|
748 |
-
color=[0, 128, 255],
|
749 |
-
type='',
|
750 |
-
swap='lso_kpt24'),
|
751 |
-
105:
|
752 |
-
dict(
|
753 |
-
name='lso_kpt17',
|
754 |
-
id=105,
|
755 |
-
color=[0, 128, 255],
|
756 |
-
type='',
|
757 |
-
swap='lso_kpt23'),
|
758 |
-
106:
|
759 |
-
dict(
|
760 |
-
name='lso_kpt18',
|
761 |
-
id=106,
|
762 |
-
color=[0, 128, 255],
|
763 |
-
type='',
|
764 |
-
swap='lso_kpt22'),
|
765 |
-
107:
|
766 |
-
dict(
|
767 |
-
name='lso_kpt19',
|
768 |
-
id=107,
|
769 |
-
color=[0, 128, 255],
|
770 |
-
type='',
|
771 |
-
swap='lso_kpt21'),
|
772 |
-
108:
|
773 |
-
dict(
|
774 |
-
name='lso_kpt20',
|
775 |
-
id=108,
|
776 |
-
color=[0, 128, 255],
|
777 |
-
type='',
|
778 |
-
swap='lso_kpt37'),
|
779 |
-
109:
|
780 |
-
dict(
|
781 |
-
name='lso_kpt21',
|
782 |
-
id=109,
|
783 |
-
color=[0, 128, 255],
|
784 |
-
type='',
|
785 |
-
swap='lso_kpt19'),
|
786 |
-
110:
|
787 |
-
dict(
|
788 |
-
name='lso_kpt22',
|
789 |
-
id=110,
|
790 |
-
color=[0, 128, 255],
|
791 |
-
type='',
|
792 |
-
swap='lso_kpt18'),
|
793 |
-
111:
|
794 |
-
dict(
|
795 |
-
name='lso_kpt23',
|
796 |
-
id=111,
|
797 |
-
color=[0, 128, 255],
|
798 |
-
type='',
|
799 |
-
swap='lso_kpt17'),
|
800 |
-
112:
|
801 |
-
dict(
|
802 |
-
name='lso_kpt24',
|
803 |
-
id=112,
|
804 |
-
color=[0, 128, 255],
|
805 |
-
type='',
|
806 |
-
swap='lso_kpt16'),
|
807 |
-
113:
|
808 |
-
dict(
|
809 |
-
name='lso_kpt25',
|
810 |
-
id=113,
|
811 |
-
color=[0, 128, 255],
|
812 |
-
type='',
|
813 |
-
swap='lso_kpt15'),
|
814 |
-
114:
|
815 |
-
dict(
|
816 |
-
name='lso_kpt26',
|
817 |
-
id=114,
|
818 |
-
color=[0, 128, 255],
|
819 |
-
type='',
|
820 |
-
swap='lso_kpt14'),
|
821 |
-
115:
|
822 |
-
dict(
|
823 |
-
name='lso_kpt27',
|
824 |
-
id=115,
|
825 |
-
color=[0, 128, 255],
|
826 |
-
type='',
|
827 |
-
swap='lso_kpt13'),
|
828 |
-
116:
|
829 |
-
dict(
|
830 |
-
name='lso_kpt28',
|
831 |
-
id=116,
|
832 |
-
color=[0, 128, 255],
|
833 |
-
type='',
|
834 |
-
swap='lso_kpt12'),
|
835 |
-
117:
|
836 |
-
dict(
|
837 |
-
name='lso_kpt29',
|
838 |
-
id=117,
|
839 |
-
color=[0, 128, 255],
|
840 |
-
type='',
|
841 |
-
swap='lso_kpt11'),
|
842 |
-
118:
|
843 |
-
dict(
|
844 |
-
name='lso_kpt30',
|
845 |
-
id=118,
|
846 |
-
color=[0, 128, 255],
|
847 |
-
type='',
|
848 |
-
swap='lso_kpt10'),
|
849 |
-
119:
|
850 |
-
dict(
|
851 |
-
name='lso_kpt31',
|
852 |
-
id=119,
|
853 |
-
color=[0, 128, 255],
|
854 |
-
type='',
|
855 |
-
swap='lso_kpt9'),
|
856 |
-
120:
|
857 |
-
dict(
|
858 |
-
name='lso_kpt32',
|
859 |
-
id=120,
|
860 |
-
color=[0, 128, 255],
|
861 |
-
type='',
|
862 |
-
swap='lso_kpt8'),
|
863 |
-
121:
|
864 |
-
dict(
|
865 |
-
name='lso_kpt33',
|
866 |
-
id=121,
|
867 |
-
color=[0, 128, 255],
|
868 |
-
type='',
|
869 |
-
swap='lso_kpt7'),
|
870 |
-
122:
|
871 |
-
dict(
|
872 |
-
name='lso_kpt34',
|
873 |
-
id=122,
|
874 |
-
color=[0, 128, 255],
|
875 |
-
type='',
|
876 |
-
swap='lso_kpt4'),
|
877 |
-
123:
|
878 |
-
dict(
|
879 |
-
name='lso_kpt35',
|
880 |
-
id=123,
|
881 |
-
color=[0, 128, 255],
|
882 |
-
type='',
|
883 |
-
swap='lso_kpt38'),
|
884 |
-
124:
|
885 |
-
dict(
|
886 |
-
name='lso_kpt36',
|
887 |
-
id=124,
|
888 |
-
color=[0, 128, 255],
|
889 |
-
type='',
|
890 |
-
swap='lso_kpt39'),
|
891 |
-
125:
|
892 |
-
dict(
|
893 |
-
name='lso_kpt37',
|
894 |
-
id=125,
|
895 |
-
color=[0, 128, 255],
|
896 |
-
type='',
|
897 |
-
swap='lso_kpt20'),
|
898 |
-
126:
|
899 |
-
dict(
|
900 |
-
name='lso_kpt38',
|
901 |
-
id=126,
|
902 |
-
color=[0, 128, 255],
|
903 |
-
type='',
|
904 |
-
swap='lso_kpt35'),
|
905 |
-
127:
|
906 |
-
dict(
|
907 |
-
name='lso_kpt39',
|
908 |
-
id=127,
|
909 |
-
color=[0, 128, 255],
|
910 |
-
type='',
|
911 |
-
swap='lso_kpt36'),
|
912 |
-
128:
|
913 |
-
dict(name='vest_kpt1', id=128, color=[0, 128, 128], type='', swap=''),
|
914 |
-
129:
|
915 |
-
dict(
|
916 |
-
name='vest_kpt2',
|
917 |
-
id=129,
|
918 |
-
color=[0, 128, 128],
|
919 |
-
type='',
|
920 |
-
swap='vest_kpt6'),
|
921 |
-
130:
|
922 |
-
dict(
|
923 |
-
name='vest_kpt3',
|
924 |
-
id=130,
|
925 |
-
color=[0, 128, 128],
|
926 |
-
type='',
|
927 |
-
swap='vest_kpt5'),
|
928 |
-
131:
|
929 |
-
dict(name='vest_kpt4', id=131, color=[0, 128, 128], type='', swap=''),
|
930 |
-
132:
|
931 |
-
dict(
|
932 |
-
name='vest_kpt5',
|
933 |
-
id=132,
|
934 |
-
color=[0, 128, 128],
|
935 |
-
type='',
|
936 |
-
swap='vest_kpt3'),
|
937 |
-
133:
|
938 |
-
dict(
|
939 |
-
name='vest_kpt6',
|
940 |
-
id=133,
|
941 |
-
color=[0, 128, 128],
|
942 |
-
type='',
|
943 |
-
swap='vest_kpt2'),
|
944 |
-
134:
|
945 |
-
dict(
|
946 |
-
name='vest_kpt7',
|
947 |
-
id=134,
|
948 |
-
color=[0, 128, 128],
|
949 |
-
type='',
|
950 |
-
swap='vest_kpt15'),
|
951 |
-
135:
|
952 |
-
dict(
|
953 |
-
name='vest_kpt8',
|
954 |
-
id=135,
|
955 |
-
color=[0, 128, 128],
|
956 |
-
type='',
|
957 |
-
swap='vest_kpt14'),
|
958 |
-
136:
|
959 |
-
dict(
|
960 |
-
name='vest_kpt9',
|
961 |
-
id=136,
|
962 |
-
color=[0, 128, 128],
|
963 |
-
type='',
|
964 |
-
swap='vest_kpt13'),
|
965 |
-
137:
|
966 |
-
dict(
|
967 |
-
name='vest_kpt10',
|
968 |
-
id=137,
|
969 |
-
color=[0, 128, 128],
|
970 |
-
type='',
|
971 |
-
swap='vest_kpt12'),
|
972 |
-
138:
|
973 |
-
dict(name='vest_kpt11', id=138, color=[0, 128, 128], type='', swap=''),
|
974 |
-
139:
|
975 |
-
dict(
|
976 |
-
name='vest_kpt12',
|
977 |
-
id=139,
|
978 |
-
color=[0, 128, 128],
|
979 |
-
type='',
|
980 |
-
swap='vest_kpt10'),
|
981 |
-
140:
|
982 |
-
dict(name='vest_kpt13', id=140, color=[0, 128, 128], type='', swap=''),
|
983 |
-
141:
|
984 |
-
dict(
|
985 |
-
name='vest_kpt14',
|
986 |
-
id=141,
|
987 |
-
color=[0, 128, 128],
|
988 |
-
type='',
|
989 |
-
swap='vest_kpt8'),
|
990 |
-
142:
|
991 |
-
dict(
|
992 |
-
name='vest_kpt15',
|
993 |
-
id=142,
|
994 |
-
color=[0, 128, 128],
|
995 |
-
type='',
|
996 |
-
swap='vest_kpt7'),
|
997 |
-
143:
|
998 |
-
dict(name='sling_kpt1', id=143, color=[0, 0, 128], type='', swap=''),
|
999 |
-
144:
|
1000 |
-
dict(
|
1001 |
-
name='sling_kpt2',
|
1002 |
-
id=144,
|
1003 |
-
color=[0, 0, 128],
|
1004 |
-
type='',
|
1005 |
-
swap='sling_kpt6'),
|
1006 |
-
145:
|
1007 |
-
dict(
|
1008 |
-
name='sling_kpt3',
|
1009 |
-
id=145,
|
1010 |
-
color=[0, 0, 128],
|
1011 |
-
type='',
|
1012 |
-
swap='sling_kpt5'),
|
1013 |
-
146:
|
1014 |
-
dict(name='sling_kpt4', id=146, color=[0, 0, 128], type='', swap=''),
|
1015 |
-
147:
|
1016 |
-
dict(
|
1017 |
-
name='sling_kpt5',
|
1018 |
-
id=147,
|
1019 |
-
color=[0, 0, 128],
|
1020 |
-
type='',
|
1021 |
-
swap='sling_kpt3'),
|
1022 |
-
148:
|
1023 |
-
dict(
|
1024 |
-
name='sling_kpt6',
|
1025 |
-
id=148,
|
1026 |
-
color=[0, 0, 128],
|
1027 |
-
type='',
|
1028 |
-
swap='sling_kpt2'),
|
1029 |
-
149:
|
1030 |
-
dict(
|
1031 |
-
name='sling_kpt7',
|
1032 |
-
id=149,
|
1033 |
-
color=[0, 0, 128],
|
1034 |
-
type='',
|
1035 |
-
swap='sling_kpt15'),
|
1036 |
-
150:
|
1037 |
-
dict(
|
1038 |
-
name='sling_kpt8',
|
1039 |
-
id=150,
|
1040 |
-
color=[0, 0, 128],
|
1041 |
-
type='',
|
1042 |
-
swap='sling_kpt14'),
|
1043 |
-
151:
|
1044 |
-
dict(
|
1045 |
-
name='sling_kpt9',
|
1046 |
-
id=151,
|
1047 |
-
color=[0, 0, 128],
|
1048 |
-
type='',
|
1049 |
-
swap='sling_kpt13'),
|
1050 |
-
152:
|
1051 |
-
dict(
|
1052 |
-
name='sling_kpt10',
|
1053 |
-
id=152,
|
1054 |
-
color=[0, 0, 128],
|
1055 |
-
type='',
|
1056 |
-
swap='sling_kpt12'),
|
1057 |
-
153:
|
1058 |
-
dict(name='sling_kpt11', id=153, color=[0, 0, 128], type='', swap=''),
|
1059 |
-
154:
|
1060 |
-
dict(
|
1061 |
-
name='sling_kpt12',
|
1062 |
-
id=154,
|
1063 |
-
color=[0, 0, 128],
|
1064 |
-
type='',
|
1065 |
-
swap='sling_kpt10'),
|
1066 |
-
155:
|
1067 |
-
dict(
|
1068 |
-
name='sling_kpt13',
|
1069 |
-
id=155,
|
1070 |
-
color=[0, 0, 128],
|
1071 |
-
type='',
|
1072 |
-
swap='sling_kpt9'),
|
1073 |
-
156:
|
1074 |
-
dict(
|
1075 |
-
name='sling_kpt14',
|
1076 |
-
id=156,
|
1077 |
-
color=[0, 0, 128],
|
1078 |
-
type='',
|
1079 |
-
swap='sling_kpt8'),
|
1080 |
-
157:
|
1081 |
-
dict(
|
1082 |
-
name='sling_kpt15',
|
1083 |
-
id=157,
|
1084 |
-
color=[0, 0, 128],
|
1085 |
-
type='',
|
1086 |
-
swap='sling_kpt7'),
|
1087 |
-
158:
|
1088 |
-
dict(
|
1089 |
-
name='shorts_kpt1',
|
1090 |
-
id=158,
|
1091 |
-
color=[128, 128, 128],
|
1092 |
-
type='',
|
1093 |
-
swap='shorts_kpt3'),
|
1094 |
-
159:
|
1095 |
-
dict(
|
1096 |
-
name='shorts_kpt2',
|
1097 |
-
id=159,
|
1098 |
-
color=[128, 128, 128],
|
1099 |
-
type='',
|
1100 |
-
swap=''),
|
1101 |
-
160:
|
1102 |
-
dict(
|
1103 |
-
name='shorts_kpt3',
|
1104 |
-
id=160,
|
1105 |
-
color=[128, 128, 128],
|
1106 |
-
type='',
|
1107 |
-
swap='shorts_kpt1'),
|
1108 |
-
161:
|
1109 |
-
dict(
|
1110 |
-
name='shorts_kpt4',
|
1111 |
-
id=161,
|
1112 |
-
color=[128, 128, 128],
|
1113 |
-
type='',
|
1114 |
-
swap='shorts_kpt10'),
|
1115 |
-
162:
|
1116 |
-
dict(
|
1117 |
-
name='shorts_kpt5',
|
1118 |
-
id=162,
|
1119 |
-
color=[128, 128, 128],
|
1120 |
-
type='',
|
1121 |
-
swap='shorts_kpt9'),
|
1122 |
-
163:
|
1123 |
-
dict(
|
1124 |
-
name='shorts_kpt6',
|
1125 |
-
id=163,
|
1126 |
-
color=[128, 128, 128],
|
1127 |
-
type='',
|
1128 |
-
swap='shorts_kpt8'),
|
1129 |
-
164:
|
1130 |
-
dict(
|
1131 |
-
name='shorts_kpt7',
|
1132 |
-
id=164,
|
1133 |
-
color=[128, 128, 128],
|
1134 |
-
type='',
|
1135 |
-
swap=''),
|
1136 |
-
165:
|
1137 |
-
dict(
|
1138 |
-
name='shorts_kpt8',
|
1139 |
-
id=165,
|
1140 |
-
color=[128, 128, 128],
|
1141 |
-
type='',
|
1142 |
-
swap='shorts_kpt6'),
|
1143 |
-
166:
|
1144 |
-
dict(
|
1145 |
-
name='shorts_kpt9',
|
1146 |
-
id=166,
|
1147 |
-
color=[128, 128, 128],
|
1148 |
-
type='',
|
1149 |
-
swap='shorts_kpt5'),
|
1150 |
-
167:
|
1151 |
-
dict(
|
1152 |
-
name='shorts_kpt10',
|
1153 |
-
id=167,
|
1154 |
-
color=[128, 128, 128],
|
1155 |
-
type='',
|
1156 |
-
swap='shorts_kpt4'),
|
1157 |
-
168:
|
1158 |
-
dict(
|
1159 |
-
name='trousers_kpt1',
|
1160 |
-
id=168,
|
1161 |
-
color=[128, 0, 128],
|
1162 |
-
type='',
|
1163 |
-
swap='trousers_kpt3'),
|
1164 |
-
169:
|
1165 |
-
dict(
|
1166 |
-
name='trousers_kpt2',
|
1167 |
-
id=169,
|
1168 |
-
color=[128, 0, 128],
|
1169 |
-
type='',
|
1170 |
-
swap=''),
|
1171 |
-
170:
|
1172 |
-
dict(
|
1173 |
-
name='trousers_kpt3',
|
1174 |
-
id=170,
|
1175 |
-
color=[128, 0, 128],
|
1176 |
-
type='',
|
1177 |
-
swap='trousers_kpt1'),
|
1178 |
-
171:
|
1179 |
-
dict(
|
1180 |
-
name='trousers_kpt4',
|
1181 |
-
id=171,
|
1182 |
-
color=[128, 0, 128],
|
1183 |
-
type='',
|
1184 |
-
swap='trousers_kpt14'),
|
1185 |
-
172:
|
1186 |
-
dict(
|
1187 |
-
name='trousers_kpt5',
|
1188 |
-
id=172,
|
1189 |
-
color=[128, 0, 128],
|
1190 |
-
type='',
|
1191 |
-
swap='trousers_kpt13'),
|
1192 |
-
173:
|
1193 |
-
dict(
|
1194 |
-
name='trousers_kpt6',
|
1195 |
-
id=173,
|
1196 |
-
color=[128, 0, 128],
|
1197 |
-
type='',
|
1198 |
-
swap='trousers_kpt12'),
|
1199 |
-
174:
|
1200 |
-
dict(
|
1201 |
-
name='trousers_kpt7',
|
1202 |
-
id=174,
|
1203 |
-
color=[128, 0, 128],
|
1204 |
-
type='',
|
1205 |
-
swap='trousers_kpt11'),
|
1206 |
-
175:
|
1207 |
-
dict(
|
1208 |
-
name='trousers_kpt8',
|
1209 |
-
id=175,
|
1210 |
-
color=[128, 0, 128],
|
1211 |
-
type='',
|
1212 |
-
swap='trousers_kpt10'),
|
1213 |
-
176:
|
1214 |
-
dict(
|
1215 |
-
name='trousers_kpt9',
|
1216 |
-
id=176,
|
1217 |
-
color=[128, 0, 128],
|
1218 |
-
type='',
|
1219 |
-
swap=''),
|
1220 |
-
177:
|
1221 |
-
dict(
|
1222 |
-
name='trousers_kpt10',
|
1223 |
-
id=177,
|
1224 |
-
color=[128, 0, 128],
|
1225 |
-
type='',
|
1226 |
-
swap='trousers_kpt8'),
|
1227 |
-
178:
|
1228 |
-
dict(
|
1229 |
-
name='trousers_kpt11',
|
1230 |
-
id=178,
|
1231 |
-
color=[128, 0, 128],
|
1232 |
-
type='',
|
1233 |
-
swap='trousers_kpt7'),
|
1234 |
-
179:
|
1235 |
-
dict(
|
1236 |
-
name='trousers_kpt12',
|
1237 |
-
id=179,
|
1238 |
-
color=[128, 0, 128],
|
1239 |
-
type='',
|
1240 |
-
swap='trousers_kpt6'),
|
1241 |
-
180:
|
1242 |
-
dict(
|
1243 |
-
name='trousers_kpt13',
|
1244 |
-
id=180,
|
1245 |
-
color=[128, 0, 128],
|
1246 |
-
type='',
|
1247 |
-
swap='trousers_kpt5'),
|
1248 |
-
181:
|
1249 |
-
dict(
|
1250 |
-
name='trousers_kpt14',
|
1251 |
-
id=181,
|
1252 |
-
color=[128, 0, 128],
|
1253 |
-
type='',
|
1254 |
-
swap='trousers_kpt4'),
|
1255 |
-
182:
|
1256 |
-
dict(
|
1257 |
-
name='skirt_kpt1',
|
1258 |
-
id=182,
|
1259 |
-
color=[64, 128, 128],
|
1260 |
-
type='',
|
1261 |
-
swap='skirt_kpt3'),
|
1262 |
-
183:
|
1263 |
-
dict(
|
1264 |
-
name='skirt_kpt2', id=183, color=[64, 128, 128], type='', swap=''),
|
1265 |
-
184:
|
1266 |
-
dict(
|
1267 |
-
name='skirt_kpt3',
|
1268 |
-
id=184,
|
1269 |
-
color=[64, 128, 128],
|
1270 |
-
type='',
|
1271 |
-
swap='skirt_kpt1'),
|
1272 |
-
185:
|
1273 |
-
dict(
|
1274 |
-
name='skirt_kpt4',
|
1275 |
-
id=185,
|
1276 |
-
color=[64, 128, 128],
|
1277 |
-
type='',
|
1278 |
-
swap='skirt_kpt8'),
|
1279 |
-
186:
|
1280 |
-
dict(
|
1281 |
-
name='skirt_kpt5',
|
1282 |
-
id=186,
|
1283 |
-
color=[64, 128, 128],
|
1284 |
-
type='',
|
1285 |
-
swap='skirt_kpt7'),
|
1286 |
-
187:
|
1287 |
-
dict(
|
1288 |
-
name='skirt_kpt6', id=187, color=[64, 128, 128], type='', swap=''),
|
1289 |
-
188:
|
1290 |
-
dict(
|
1291 |
-
name='skirt_kpt7',
|
1292 |
-
id=188,
|
1293 |
-
color=[64, 128, 128],
|
1294 |
-
type='',
|
1295 |
-
swap='skirt_kpt5'),
|
1296 |
-
189:
|
1297 |
-
dict(
|
1298 |
-
name='skirt_kpt8',
|
1299 |
-
id=189,
|
1300 |
-
color=[64, 128, 128],
|
1301 |
-
type='',
|
1302 |
-
swap='skirt_kpt4'),
|
1303 |
-
190:
|
1304 |
-
dict(name='ssd_kpt1', id=190, color=[64, 64, 128], type='', swap=''),
|
1305 |
-
191:
|
1306 |
-
dict(
|
1307 |
-
name='ssd_kpt2',
|
1308 |
-
id=191,
|
1309 |
-
color=[64, 64, 128],
|
1310 |
-
type='',
|
1311 |
-
swap='ssd_kpt6'),
|
1312 |
-
192:
|
1313 |
-
dict(
|
1314 |
-
name='ssd_kpt3',
|
1315 |
-
id=192,
|
1316 |
-
color=[64, 64, 128],
|
1317 |
-
type='',
|
1318 |
-
swap='ssd_kpt5'),
|
1319 |
-
193:
|
1320 |
-
dict(name='ssd_kpt4', id=193, color=[64, 64, 128], type='', swap=''),
|
1321 |
-
194:
|
1322 |
-
dict(
|
1323 |
-
name='ssd_kpt5',
|
1324 |
-
id=194,
|
1325 |
-
color=[64, 64, 128],
|
1326 |
-
type='',
|
1327 |
-
swap='ssd_kpt3'),
|
1328 |
-
195:
|
1329 |
-
dict(
|
1330 |
-
name='ssd_kpt6',
|
1331 |
-
id=195,
|
1332 |
-
color=[64, 64, 128],
|
1333 |
-
type='',
|
1334 |
-
swap='ssd_kpt2'),
|
1335 |
-
196:
|
1336 |
-
dict(
|
1337 |
-
name='ssd_kpt7',
|
1338 |
-
id=196,
|
1339 |
-
color=[64, 64, 128],
|
1340 |
-
type='',
|
1341 |
-
swap='ssd_kpt29'),
|
1342 |
-
197:
|
1343 |
-
dict(
|
1344 |
-
name='ssd_kpt8',
|
1345 |
-
id=197,
|
1346 |
-
color=[64, 64, 128],
|
1347 |
-
type='',
|
1348 |
-
swap='ssd_kpt28'),
|
1349 |
-
198:
|
1350 |
-
dict(
|
1351 |
-
name='ssd_kpt9',
|
1352 |
-
id=198,
|
1353 |
-
color=[64, 64, 128],
|
1354 |
-
type='',
|
1355 |
-
swap='ssd_kpt27'),
|
1356 |
-
199:
|
1357 |
-
dict(
|
1358 |
-
name='ssd_kpt10',
|
1359 |
-
id=199,
|
1360 |
-
color=[64, 64, 128],
|
1361 |
-
type='',
|
1362 |
-
swap='ssd_kpt26'),
|
1363 |
-
200:
|
1364 |
-
dict(
|
1365 |
-
name='ssd_kpt11',
|
1366 |
-
id=200,
|
1367 |
-
color=[64, 64, 128],
|
1368 |
-
type='',
|
1369 |
-
swap='ssd_kpt25'),
|
1370 |
-
201:
|
1371 |
-
dict(
|
1372 |
-
name='ssd_kpt12',
|
1373 |
-
id=201,
|
1374 |
-
color=[64, 64, 128],
|
1375 |
-
type='',
|
1376 |
-
swap='ssd_kpt24'),
|
1377 |
-
202:
|
1378 |
-
dict(
|
1379 |
-
name='ssd_kpt13',
|
1380 |
-
id=202,
|
1381 |
-
color=[64, 64, 128],
|
1382 |
-
type='',
|
1383 |
-
swap='ssd_kpt23'),
|
1384 |
-
203:
|
1385 |
-
dict(
|
1386 |
-
name='ssd_kpt14',
|
1387 |
-
id=203,
|
1388 |
-
color=[64, 64, 128],
|
1389 |
-
type='',
|
1390 |
-
swap='ssd_kpt22'),
|
1391 |
-
204:
|
1392 |
-
dict(
|
1393 |
-
name='ssd_kpt15',
|
1394 |
-
id=204,
|
1395 |
-
color=[64, 64, 128],
|
1396 |
-
type='',
|
1397 |
-
swap='ssd_kpt21'),
|
1398 |
-
205:
|
1399 |
-
dict(
|
1400 |
-
name='ssd_kpt16',
|
1401 |
-
id=205,
|
1402 |
-
color=[64, 64, 128],
|
1403 |
-
type='',
|
1404 |
-
swap='ssd_kpt20'),
|
1405 |
-
206:
|
1406 |
-
dict(
|
1407 |
-
name='ssd_kpt17',
|
1408 |
-
id=206,
|
1409 |
-
color=[64, 64, 128],
|
1410 |
-
type='',
|
1411 |
-
swap='ssd_kpt19'),
|
1412 |
-
207:
|
1413 |
-
dict(name='ssd_kpt18', id=207, color=[64, 64, 128], type='', swap=''),
|
1414 |
-
208:
|
1415 |
-
dict(
|
1416 |
-
name='ssd_kpt19',
|
1417 |
-
id=208,
|
1418 |
-
color=[64, 64, 128],
|
1419 |
-
type='',
|
1420 |
-
swap='ssd_kpt17'),
|
1421 |
-
209:
|
1422 |
-
dict(
|
1423 |
-
name='ssd_kpt20',
|
1424 |
-
id=209,
|
1425 |
-
color=[64, 64, 128],
|
1426 |
-
type='',
|
1427 |
-
swap='ssd_kpt16'),
|
1428 |
-
210:
|
1429 |
-
dict(
|
1430 |
-
name='ssd_kpt21',
|
1431 |
-
id=210,
|
1432 |
-
color=[64, 64, 128],
|
1433 |
-
type='',
|
1434 |
-
swap='ssd_kpt15'),
|
1435 |
-
211:
|
1436 |
-
dict(
|
1437 |
-
name='ssd_kpt22',
|
1438 |
-
id=211,
|
1439 |
-
color=[64, 64, 128],
|
1440 |
-
type='',
|
1441 |
-
swap='ssd_kpt14'),
|
1442 |
-
212:
|
1443 |
-
dict(
|
1444 |
-
name='ssd_kpt23',
|
1445 |
-
id=212,
|
1446 |
-
color=[64, 64, 128],
|
1447 |
-
type='',
|
1448 |
-
swap='ssd_kpt13'),
|
1449 |
-
213:
|
1450 |
-
dict(
|
1451 |
-
name='ssd_kpt24',
|
1452 |
-
id=213,
|
1453 |
-
color=[64, 64, 128],
|
1454 |
-
type='',
|
1455 |
-
swap='ssd_kpt12'),
|
1456 |
-
214:
|
1457 |
-
dict(
|
1458 |
-
name='ssd_kpt25',
|
1459 |
-
id=214,
|
1460 |
-
color=[64, 64, 128],
|
1461 |
-
type='',
|
1462 |
-
swap='ssd_kpt11'),
|
1463 |
-
215:
|
1464 |
-
dict(
|
1465 |
-
name='ssd_kpt26',
|
1466 |
-
id=215,
|
1467 |
-
color=[64, 64, 128],
|
1468 |
-
type='',
|
1469 |
-
swap='ssd_kpt10'),
|
1470 |
-
216:
|
1471 |
-
dict(
|
1472 |
-
name='ssd_kpt27',
|
1473 |
-
id=216,
|
1474 |
-
color=[64, 64, 128],
|
1475 |
-
type='',
|
1476 |
-
swap='ssd_kpt9'),
|
1477 |
-
217:
|
1478 |
-
dict(
|
1479 |
-
name='ssd_kpt28',
|
1480 |
-
id=217,
|
1481 |
-
color=[64, 64, 128],
|
1482 |
-
type='',
|
1483 |
-
swap='ssd_kpt8'),
|
1484 |
-
218:
|
1485 |
-
dict(
|
1486 |
-
name='ssd_kpt29',
|
1487 |
-
id=218,
|
1488 |
-
color=[64, 64, 128],
|
1489 |
-
type='',
|
1490 |
-
swap='ssd_kpt7'),
|
1491 |
-
219:
|
1492 |
-
dict(name='lsd_kpt1', id=219, color=[128, 64, 0], type='', swap=''),
|
1493 |
-
220:
|
1494 |
-
dict(
|
1495 |
-
name='lsd_kpt2',
|
1496 |
-
id=220,
|
1497 |
-
color=[128, 64, 0],
|
1498 |
-
type='',
|
1499 |
-
swap='lsd_kpt6'),
|
1500 |
-
221:
|
1501 |
-
dict(
|
1502 |
-
name='lsd_kpt3',
|
1503 |
-
id=221,
|
1504 |
-
color=[128, 64, 0],
|
1505 |
-
type='',
|
1506 |
-
swap='lsd_kpt5'),
|
1507 |
-
222:
|
1508 |
-
dict(name='lsd_kpt4', id=222, color=[128, 64, 0], type='', swap=''),
|
1509 |
-
223:
|
1510 |
-
dict(
|
1511 |
-
name='lsd_kpt5',
|
1512 |
-
id=223,
|
1513 |
-
color=[128, 64, 0],
|
1514 |
-
type='',
|
1515 |
-
swap='lsd_kpt3'),
|
1516 |
-
224:
|
1517 |
-
dict(
|
1518 |
-
name='lsd_kpt6',
|
1519 |
-
id=224,
|
1520 |
-
color=[128, 64, 0],
|
1521 |
-
type='',
|
1522 |
-
swap='lsd_kpt2'),
|
1523 |
-
225:
|
1524 |
-
dict(
|
1525 |
-
name='lsd_kpt7',
|
1526 |
-
id=225,
|
1527 |
-
color=[128, 64, 0],
|
1528 |
-
type='',
|
1529 |
-
swap='lsd_kpt37'),
|
1530 |
-
226:
|
1531 |
-
dict(
|
1532 |
-
name='lsd_kpt8',
|
1533 |
-
id=226,
|
1534 |
-
color=[128, 64, 0],
|
1535 |
-
type='',
|
1536 |
-
swap='lsd_kpt36'),
|
1537 |
-
227:
|
1538 |
-
dict(
|
1539 |
-
name='lsd_kpt9',
|
1540 |
-
id=227,
|
1541 |
-
color=[128, 64, 0],
|
1542 |
-
type='',
|
1543 |
-
swap='lsd_kpt35'),
|
1544 |
-
228:
|
1545 |
-
dict(
|
1546 |
-
name='lsd_kpt10',
|
1547 |
-
id=228,
|
1548 |
-
color=[128, 64, 0],
|
1549 |
-
type='',
|
1550 |
-
swap='lsd_kpt34'),
|
1551 |
-
229:
|
1552 |
-
dict(
|
1553 |
-
name='lsd_kpt11',
|
1554 |
-
id=229,
|
1555 |
-
color=[128, 64, 0],
|
1556 |
-
type='',
|
1557 |
-
swap='lsd_kpt33'),
|
1558 |
-
230:
|
1559 |
-
dict(
|
1560 |
-
name='lsd_kpt12',
|
1561 |
-
id=230,
|
1562 |
-
color=[128, 64, 0],
|
1563 |
-
type='',
|
1564 |
-
swap='lsd_kpt32'),
|
1565 |
-
231:
|
1566 |
-
dict(
|
1567 |
-
name='lsd_kpt13',
|
1568 |
-
id=231,
|
1569 |
-
color=[128, 64, 0],
|
1570 |
-
type='',
|
1571 |
-
swap='lsd_kpt31'),
|
1572 |
-
232:
|
1573 |
-
dict(
|
1574 |
-
name='lsd_kpt14',
|
1575 |
-
id=232,
|
1576 |
-
color=[128, 64, 0],
|
1577 |
-
type='',
|
1578 |
-
swap='lsd_kpt30'),
|
1579 |
-
233:
|
1580 |
-
dict(
|
1581 |
-
name='lsd_kpt15',
|
1582 |
-
id=233,
|
1583 |
-
color=[128, 64, 0],
|
1584 |
-
type='',
|
1585 |
-
swap='lsd_kpt29'),
|
1586 |
-
234:
|
1587 |
-
dict(
|
1588 |
-
name='lsd_kpt16',
|
1589 |
-
id=234,
|
1590 |
-
color=[128, 64, 0],
|
1591 |
-
type='',
|
1592 |
-
swap='lsd_kpt28'),
|
1593 |
-
235:
|
1594 |
-
dict(
|
1595 |
-
name='lsd_kpt17',
|
1596 |
-
id=235,
|
1597 |
-
color=[128, 64, 0],
|
1598 |
-
type='',
|
1599 |
-
swap='lsd_kpt27'),
|
1600 |
-
236:
|
1601 |
-
dict(
|
1602 |
-
name='lsd_kpt18',
|
1603 |
-
id=236,
|
1604 |
-
color=[128, 64, 0],
|
1605 |
-
type='',
|
1606 |
-
swap='lsd_kpt26'),
|
1607 |
-
237:
|
1608 |
-
dict(
|
1609 |
-
name='lsd_kpt19',
|
1610 |
-
id=237,
|
1611 |
-
color=[128, 64, 0],
|
1612 |
-
type='',
|
1613 |
-
swap='lsd_kpt25'),
|
1614 |
-
238:
|
1615 |
-
dict(
|
1616 |
-
name='lsd_kpt20',
|
1617 |
-
id=238,
|
1618 |
-
color=[128, 64, 0],
|
1619 |
-
type='',
|
1620 |
-
swap='lsd_kpt24'),
|
1621 |
-
239:
|
1622 |
-
dict(
|
1623 |
-
name='lsd_kpt21',
|
1624 |
-
id=239,
|
1625 |
-
color=[128, 64, 0],
|
1626 |
-
type='',
|
1627 |
-
swap='lsd_kpt23'),
|
1628 |
-
240:
|
1629 |
-
dict(name='lsd_kpt22', id=240, color=[128, 64, 0], type='', swap=''),
|
1630 |
-
241:
|
1631 |
-
dict(
|
1632 |
-
name='lsd_kpt23',
|
1633 |
-
id=241,
|
1634 |
-
color=[128, 64, 0],
|
1635 |
-
type='',
|
1636 |
-
swap='lsd_kpt21'),
|
1637 |
-
242:
|
1638 |
-
dict(
|
1639 |
-
name='lsd_kpt24',
|
1640 |
-
id=242,
|
1641 |
-
color=[128, 64, 0],
|
1642 |
-
type='',
|
1643 |
-
swap='lsd_kpt20'),
|
1644 |
-
243:
|
1645 |
-
dict(
|
1646 |
-
name='lsd_kpt25',
|
1647 |
-
id=243,
|
1648 |
-
color=[128, 64, 0],
|
1649 |
-
type='',
|
1650 |
-
swap='lsd_kpt19'),
|
1651 |
-
244:
|
1652 |
-
dict(
|
1653 |
-
name='lsd_kpt26',
|
1654 |
-
id=244,
|
1655 |
-
color=[128, 64, 0],
|
1656 |
-
type='',
|
1657 |
-
swap='lsd_kpt18'),
|
1658 |
-
245:
|
1659 |
-
dict(
|
1660 |
-
name='lsd_kpt27',
|
1661 |
-
id=245,
|
1662 |
-
color=[128, 64, 0],
|
1663 |
-
type='',
|
1664 |
-
swap='lsd_kpt17'),
|
1665 |
-
246:
|
1666 |
-
dict(
|
1667 |
-
name='lsd_kpt28',
|
1668 |
-
id=246,
|
1669 |
-
color=[128, 64, 0],
|
1670 |
-
type='',
|
1671 |
-
swap='lsd_kpt16'),
|
1672 |
-
247:
|
1673 |
-
dict(
|
1674 |
-
name='lsd_kpt29',
|
1675 |
-
id=247,
|
1676 |
-
color=[128, 64, 0],
|
1677 |
-
type='',
|
1678 |
-
swap='lsd_kpt15'),
|
1679 |
-
248:
|
1680 |
-
dict(
|
1681 |
-
name='lsd_kpt30',
|
1682 |
-
id=248,
|
1683 |
-
color=[128, 64, 0],
|
1684 |
-
type='',
|
1685 |
-
swap='lsd_kpt14'),
|
1686 |
-
249:
|
1687 |
-
dict(
|
1688 |
-
name='lsd_kpt31',
|
1689 |
-
id=249,
|
1690 |
-
color=[128, 64, 0],
|
1691 |
-
type='',
|
1692 |
-
swap='lsd_kpt13'),
|
1693 |
-
250:
|
1694 |
-
dict(
|
1695 |
-
name='lsd_kpt32',
|
1696 |
-
id=250,
|
1697 |
-
color=[128, 64, 0],
|
1698 |
-
type='',
|
1699 |
-
swap='lsd_kpt12'),
|
1700 |
-
251:
|
1701 |
-
dict(
|
1702 |
-
name='lsd_kpt33',
|
1703 |
-
id=251,
|
1704 |
-
color=[128, 64, 0],
|
1705 |
-
type='',
|
1706 |
-
swap='lsd_kpt11'),
|
1707 |
-
252:
|
1708 |
-
dict(
|
1709 |
-
name='lsd_kpt34',
|
1710 |
-
id=252,
|
1711 |
-
color=[128, 64, 0],
|
1712 |
-
type='',
|
1713 |
-
swap='lsd_kpt10'),
|
1714 |
-
253:
|
1715 |
-
dict(
|
1716 |
-
name='lsd_kpt35',
|
1717 |
-
id=253,
|
1718 |
-
color=[128, 64, 0],
|
1719 |
-
type='',
|
1720 |
-
swap='lsd_kpt9'),
|
1721 |
-
254:
|
1722 |
-
dict(
|
1723 |
-
name='lsd_kpt36',
|
1724 |
-
id=254,
|
1725 |
-
color=[128, 64, 0],
|
1726 |
-
type='',
|
1727 |
-
swap='lsd_kpt8'),
|
1728 |
-
255:
|
1729 |
-
dict(
|
1730 |
-
name='lsd_kpt37',
|
1731 |
-
id=255,
|
1732 |
-
color=[128, 64, 0],
|
1733 |
-
type='',
|
1734 |
-
swap='lsd_kpt7'),
|
1735 |
-
256:
|
1736 |
-
dict(name='vd_kpt1', id=256, color=[128, 64, 255], type='', swap=''),
|
1737 |
-
257:
|
1738 |
-
dict(
|
1739 |
-
name='vd_kpt2',
|
1740 |
-
id=257,
|
1741 |
-
color=[128, 64, 255],
|
1742 |
-
type='',
|
1743 |
-
swap='vd_kpt6'),
|
1744 |
-
258:
|
1745 |
-
dict(
|
1746 |
-
name='vd_kpt3',
|
1747 |
-
id=258,
|
1748 |
-
color=[128, 64, 255],
|
1749 |
-
type='',
|
1750 |
-
swap='vd_kpt5'),
|
1751 |
-
259:
|
1752 |
-
dict(name='vd_kpt4', id=259, color=[128, 64, 255], type='', swap=''),
|
1753 |
-
260:
|
1754 |
-
dict(
|
1755 |
-
name='vd_kpt5',
|
1756 |
-
id=260,
|
1757 |
-
color=[128, 64, 255],
|
1758 |
-
type='',
|
1759 |
-
swap='vd_kpt3'),
|
1760 |
-
261:
|
1761 |
-
dict(
|
1762 |
-
name='vd_kpt6',
|
1763 |
-
id=261,
|
1764 |
-
color=[128, 64, 255],
|
1765 |
-
type='',
|
1766 |
-
swap='vd_kpt2'),
|
1767 |
-
262:
|
1768 |
-
dict(
|
1769 |
-
name='vd_kpt7',
|
1770 |
-
id=262,
|
1771 |
-
color=[128, 64, 255],
|
1772 |
-
type='',
|
1773 |
-
swap='vd_kpt19'),
|
1774 |
-
263:
|
1775 |
-
dict(
|
1776 |
-
name='vd_kpt8',
|
1777 |
-
id=263,
|
1778 |
-
color=[128, 64, 255],
|
1779 |
-
type='',
|
1780 |
-
swap='vd_kpt18'),
|
1781 |
-
264:
|
1782 |
-
dict(
|
1783 |
-
name='vd_kpt9',
|
1784 |
-
id=264,
|
1785 |
-
color=[128, 64, 255],
|
1786 |
-
type='',
|
1787 |
-
swap='vd_kpt17'),
|
1788 |
-
265:
|
1789 |
-
dict(
|
1790 |
-
name='vd_kpt10',
|
1791 |
-
id=265,
|
1792 |
-
color=[128, 64, 255],
|
1793 |
-
type='',
|
1794 |
-
swap='vd_kpt16'),
|
1795 |
-
266:
|
1796 |
-
dict(
|
1797 |
-
name='vd_kpt11',
|
1798 |
-
id=266,
|
1799 |
-
color=[128, 64, 255],
|
1800 |
-
type='',
|
1801 |
-
swap='vd_kpt15'),
|
1802 |
-
267:
|
1803 |
-
dict(
|
1804 |
-
name='vd_kpt12',
|
1805 |
-
id=267,
|
1806 |
-
color=[128, 64, 255],
|
1807 |
-
type='',
|
1808 |
-
swap='vd_kpt14'),
|
1809 |
-
268:
|
1810 |
-
dict(name='vd_kpt13', id=268, color=[128, 64, 255], type='', swap=''),
|
1811 |
-
269:
|
1812 |
-
dict(
|
1813 |
-
name='vd_kpt14',
|
1814 |
-
id=269,
|
1815 |
-
color=[128, 64, 255],
|
1816 |
-
type='',
|
1817 |
-
swap='vd_kpt12'),
|
1818 |
-
270:
|
1819 |
-
dict(
|
1820 |
-
name='vd_kpt15',
|
1821 |
-
id=270,
|
1822 |
-
color=[128, 64, 255],
|
1823 |
-
type='',
|
1824 |
-
swap='vd_kpt11'),
|
1825 |
-
271:
|
1826 |
-
dict(
|
1827 |
-
name='vd_kpt16',
|
1828 |
-
id=271,
|
1829 |
-
color=[128, 64, 255],
|
1830 |
-
type='',
|
1831 |
-
swap='vd_kpt10'),
|
1832 |
-
272:
|
1833 |
-
dict(
|
1834 |
-
name='vd_kpt17',
|
1835 |
-
id=272,
|
1836 |
-
color=[128, 64, 255],
|
1837 |
-
type='',
|
1838 |
-
swap='vd_kpt9'),
|
1839 |
-
273:
|
1840 |
-
dict(
|
1841 |
-
name='vd_kpt18',
|
1842 |
-
id=273,
|
1843 |
-
color=[128, 64, 255],
|
1844 |
-
type='',
|
1845 |
-
swap='vd_kpt8'),
|
1846 |
-
274:
|
1847 |
-
dict(
|
1848 |
-
name='vd_kpt19',
|
1849 |
-
id=274,
|
1850 |
-
color=[128, 64, 255],
|
1851 |
-
type='',
|
1852 |
-
swap='vd_kpt7'),
|
1853 |
-
275:
|
1854 |
-
dict(name='sd_kpt1', id=275, color=[128, 64, 0], type='', swap=''),
|
1855 |
-
276:
|
1856 |
-
dict(
|
1857 |
-
name='sd_kpt2',
|
1858 |
-
id=276,
|
1859 |
-
color=[128, 64, 0],
|
1860 |
-
type='',
|
1861 |
-
swap='sd_kpt6'),
|
1862 |
-
277:
|
1863 |
-
dict(
|
1864 |
-
name='sd_kpt3',
|
1865 |
-
id=277,
|
1866 |
-
color=[128, 64, 0],
|
1867 |
-
type='',
|
1868 |
-
swap='sd_kpt5'),
|
1869 |
-
278:
|
1870 |
-
dict(name='sd_kpt4', id=278, color=[128, 64, 0], type='', swap=''),
|
1871 |
-
279:
|
1872 |
-
dict(
|
1873 |
-
name='sd_kpt5',
|
1874 |
-
id=279,
|
1875 |
-
color=[128, 64, 0],
|
1876 |
-
type='',
|
1877 |
-
swap='sd_kpt3'),
|
1878 |
-
280:
|
1879 |
-
dict(
|
1880 |
-
name='sd_kpt6',
|
1881 |
-
id=280,
|
1882 |
-
color=[128, 64, 0],
|
1883 |
-
type='',
|
1884 |
-
swap='sd_kpt2'),
|
1885 |
-
281:
|
1886 |
-
dict(
|
1887 |
-
name='sd_kpt7',
|
1888 |
-
id=281,
|
1889 |
-
color=[128, 64, 0],
|
1890 |
-
type='',
|
1891 |
-
swap='sd_kpt19'),
|
1892 |
-
282:
|
1893 |
-
dict(
|
1894 |
-
name='sd_kpt8',
|
1895 |
-
id=282,
|
1896 |
-
color=[128, 64, 0],
|
1897 |
-
type='',
|
1898 |
-
swap='sd_kpt18'),
|
1899 |
-
283:
|
1900 |
-
dict(
|
1901 |
-
name='sd_kpt9',
|
1902 |
-
id=283,
|
1903 |
-
color=[128, 64, 0],
|
1904 |
-
type='',
|
1905 |
-
swap='sd_kpt17'),
|
1906 |
-
284:
|
1907 |
-
dict(
|
1908 |
-
name='sd_kpt10',
|
1909 |
-
id=284,
|
1910 |
-
color=[128, 64, 0],
|
1911 |
-
type='',
|
1912 |
-
swap='sd_kpt16'),
|
1913 |
-
285:
|
1914 |
-
dict(
|
1915 |
-
name='sd_kpt11',
|
1916 |
-
id=285,
|
1917 |
-
color=[128, 64, 0],
|
1918 |
-
type='',
|
1919 |
-
swap='sd_kpt15'),
|
1920 |
-
286:
|
1921 |
-
dict(
|
1922 |
-
name='sd_kpt12',
|
1923 |
-
id=286,
|
1924 |
-
color=[128, 64, 0],
|
1925 |
-
type='',
|
1926 |
-
swap='sd_kpt14'),
|
1927 |
-
287:
|
1928 |
-
dict(name='sd_kpt13', id=287, color=[128, 64, 0], type='', swap=''),
|
1929 |
-
288:
|
1930 |
-
dict(
|
1931 |
-
name='sd_kpt14',
|
1932 |
-
id=288,
|
1933 |
-
color=[128, 64, 0],
|
1934 |
-
type='',
|
1935 |
-
swap='sd_kpt12'),
|
1936 |
-
289:
|
1937 |
-
dict(
|
1938 |
-
name='sd_kpt15',
|
1939 |
-
id=289,
|
1940 |
-
color=[128, 64, 0],
|
1941 |
-
type='',
|
1942 |
-
swap='sd_kpt11'),
|
1943 |
-
290:
|
1944 |
-
dict(
|
1945 |
-
name='sd_kpt16',
|
1946 |
-
id=290,
|
1947 |
-
color=[128, 64, 0],
|
1948 |
-
type='',
|
1949 |
-
swap='sd_kpt10'),
|
1950 |
-
291:
|
1951 |
-
dict(
|
1952 |
-
name='sd_kpt17',
|
1953 |
-
id=291,
|
1954 |
-
color=[128, 64, 0],
|
1955 |
-
type='',
|
1956 |
-
swap='sd_kpt9'),
|
1957 |
-
292:
|
1958 |
-
dict(
|
1959 |
-
name='sd_kpt18',
|
1960 |
-
id=292,
|
1961 |
-
color=[128, 64, 0],
|
1962 |
-
type='',
|
1963 |
-
swap='sd_kpt8'),
|
1964 |
-
293:
|
1965 |
-
dict(
|
1966 |
-
name='sd_kpt19',
|
1967 |
-
id=293,
|
1968 |
-
color=[128, 64, 0],
|
1969 |
-
type='',
|
1970 |
-
swap='sd_kpt7')
|
1971 |
-
}),
|
1972 |
-
skeleton_info=dict({
|
1973 |
-
0:
|
1974 |
-
dict(link=('sss_kpt1', 'sss_kpt2'), id=0, color=[255, 128, 0]),
|
1975 |
-
1:
|
1976 |
-
dict(link=('sss_kpt2', 'sss_kpt7'), id=1, color=[255, 128, 0]),
|
1977 |
-
2:
|
1978 |
-
dict(link=('sss_kpt7', 'sss_kpt8'), id=2, color=[255, 128, 0]),
|
1979 |
-
3:
|
1980 |
-
dict(link=('sss_kpt8', 'sss_kpt9'), id=3, color=[255, 128, 0]),
|
1981 |
-
4:
|
1982 |
-
dict(link=('sss_kpt9', 'sss_kpt10'), id=4, color=[255, 128, 0]),
|
1983 |
-
5:
|
1984 |
-
dict(link=('sss_kpt10', 'sss_kpt11'), id=5, color=[255, 128, 0]),
|
1985 |
-
6:
|
1986 |
-
dict(link=('sss_kpt11', 'sss_kpt12'), id=6, color=[255, 128, 0]),
|
1987 |
-
7:
|
1988 |
-
dict(link=('sss_kpt12', 'sss_kpt13'), id=7, color=[255, 128, 0]),
|
1989 |
-
8:
|
1990 |
-
dict(link=('sss_kpt13', 'sss_kpt14'), id=8, color=[255, 128, 0]),
|
1991 |
-
9:
|
1992 |
-
dict(link=('sss_kpt14', 'sss_kpt15'), id=9, color=[255, 128, 0]),
|
1993 |
-
10:
|
1994 |
-
dict(link=('sss_kpt15', 'sss_kpt16'), id=10, color=[255, 128, 0]),
|
1995 |
-
11:
|
1996 |
-
dict(link=('sss_kpt16', 'sss_kpt17'), id=11, color=[255, 128, 0]),
|
1997 |
-
12:
|
1998 |
-
dict(link=('sss_kpt17', 'sss_kpt18'), id=12, color=[255, 128, 0]),
|
1999 |
-
13:
|
2000 |
-
dict(link=('sss_kpt18', 'sss_kpt19'), id=13, color=[255, 128, 0]),
|
2001 |
-
14:
|
2002 |
-
dict(link=('sss_kpt19', 'sss_kpt20'), id=14, color=[255, 128, 0]),
|
2003 |
-
15:
|
2004 |
-
dict(link=('sss_kpt20', 'sss_kpt21'), id=15, color=[255, 128, 0]),
|
2005 |
-
16:
|
2006 |
-
dict(link=('sss_kpt21', 'sss_kpt22'), id=16, color=[255, 128, 0]),
|
2007 |
-
17:
|
2008 |
-
dict(link=('sss_kpt22', 'sss_kpt23'), id=17, color=[255, 128, 0]),
|
2009 |
-
18:
|
2010 |
-
dict(link=('sss_kpt23', 'sss_kpt24'), id=18, color=[255, 128, 0]),
|
2011 |
-
19:
|
2012 |
-
dict(link=('sss_kpt24', 'sss_kpt25'), id=19, color=[255, 128, 0]),
|
2013 |
-
20:
|
2014 |
-
dict(link=('sss_kpt25', 'sss_kpt6'), id=20, color=[255, 128, 0]),
|
2015 |
-
21:
|
2016 |
-
dict(link=('sss_kpt6', 'sss_kpt1'), id=21, color=[255, 128, 0]),
|
2017 |
-
22:
|
2018 |
-
dict(link=('sss_kpt2', 'sss_kpt3'), id=22, color=[255, 128, 0]),
|
2019 |
-
23:
|
2020 |
-
dict(link=('sss_kpt3', 'sss_kpt4'), id=23, color=[255, 128, 0]),
|
2021 |
-
24:
|
2022 |
-
dict(link=('sss_kpt4', 'sss_kpt5'), id=24, color=[255, 128, 0]),
|
2023 |
-
25:
|
2024 |
-
dict(link=('sss_kpt5', 'sss_kpt6'), id=25, color=[255, 128, 0]),
|
2025 |
-
26:
|
2026 |
-
dict(link=('lss_kpt1', 'lss_kpt2'), id=26, color=[255, 0, 128]),
|
2027 |
-
27:
|
2028 |
-
dict(link=('lss_kpt2', 'lss_kpt7'), id=27, color=[255, 0, 128]),
|
2029 |
-
28:
|
2030 |
-
dict(link=('lss_kpt7', 'lss_kpt8'), id=28, color=[255, 0, 128]),
|
2031 |
-
29:
|
2032 |
-
dict(link=('lss_kpt8', 'lss_kpt9'), id=29, color=[255, 0, 128]),
|
2033 |
-
30:
|
2034 |
-
dict(link=('lss_kpt9', 'lss_kpt10'), id=30, color=[255, 0, 128]),
|
2035 |
-
31:
|
2036 |
-
dict(link=('lss_kpt10', 'lss_kpt11'), id=31, color=[255, 0, 128]),
|
2037 |
-
32:
|
2038 |
-
dict(link=('lss_kpt11', 'lss_kpt12'), id=32, color=[255, 0, 128]),
|
2039 |
-
33:
|
2040 |
-
dict(link=('lss_kpt12', 'lss_kpt13'), id=33, color=[255, 0, 128]),
|
2041 |
-
34:
|
2042 |
-
dict(link=('lss_kpt13', 'lss_kpt14'), id=34, color=[255, 0, 128]),
|
2043 |
-
35:
|
2044 |
-
dict(link=('lss_kpt14', 'lss_kpt15'), id=35, color=[255, 0, 128]),
|
2045 |
-
36:
|
2046 |
-
dict(link=('lss_kpt15', 'lss_kpt16'), id=36, color=[255, 0, 128]),
|
2047 |
-
37:
|
2048 |
-
dict(link=('lss_kpt16', 'lss_kpt17'), id=37, color=[255, 0, 128]),
|
2049 |
-
38:
|
2050 |
-
dict(link=('lss_kpt17', 'lss_kpt18'), id=38, color=[255, 0, 128]),
|
2051 |
-
39:
|
2052 |
-
dict(link=('lss_kpt18', 'lss_kpt19'), id=39, color=[255, 0, 128]),
|
2053 |
-
40:
|
2054 |
-
dict(link=('lss_kpt19', 'lss_kpt20'), id=40, color=[255, 0, 128]),
|
2055 |
-
41:
|
2056 |
-
dict(link=('lss_kpt20', 'lss_kpt21'), id=41, color=[255, 0, 128]),
|
2057 |
-
42:
|
2058 |
-
dict(link=('lss_kpt21', 'lss_kpt22'), id=42, color=[255, 0, 128]),
|
2059 |
-
43:
|
2060 |
-
dict(link=('lss_kpt22', 'lss_kpt23'), id=43, color=[255, 0, 128]),
|
2061 |
-
44:
|
2062 |
-
dict(link=('lss_kpt23', 'lss_kpt24'), id=44, color=[255, 0, 128]),
|
2063 |
-
45:
|
2064 |
-
dict(link=('lss_kpt24', 'lss_kpt25'), id=45, color=[255, 0, 128]),
|
2065 |
-
46:
|
2066 |
-
dict(link=('lss_kpt25', 'lss_kpt26'), id=46, color=[255, 0, 128]),
|
2067 |
-
47:
|
2068 |
-
dict(link=('lss_kpt26', 'lss_kpt27'), id=47, color=[255, 0, 128]),
|
2069 |
-
48:
|
2070 |
-
dict(link=('lss_kpt27', 'lss_kpt28'), id=48, color=[255, 0, 128]),
|
2071 |
-
49:
|
2072 |
-
dict(link=('lss_kpt28', 'lss_kpt29'), id=49, color=[255, 0, 128]),
|
2073 |
-
50:
|
2074 |
-
dict(link=('lss_kpt29', 'lss_kpt30'), id=50, color=[255, 0, 128]),
|
2075 |
-
51:
|
2076 |
-
dict(link=('lss_kpt30', 'lss_kpt31'), id=51, color=[255, 0, 128]),
|
2077 |
-
52:
|
2078 |
-
dict(link=('lss_kpt31', 'lss_kpt32'), id=52, color=[255, 0, 128]),
|
2079 |
-
53:
|
2080 |
-
dict(link=('lss_kpt32', 'lss_kpt33'), id=53, color=[255, 0, 128]),
|
2081 |
-
54:
|
2082 |
-
dict(link=('lss_kpt33', 'lss_kpt6'), id=54, color=[255, 0, 128]),
|
2083 |
-
55:
|
2084 |
-
dict(link=('lss_kpt6', 'lss_kpt5'), id=55, color=[255, 0, 128]),
|
2085 |
-
56:
|
2086 |
-
dict(link=('lss_kpt5', 'lss_kpt4'), id=56, color=[255, 0, 128]),
|
2087 |
-
57:
|
2088 |
-
dict(link=('lss_kpt4', 'lss_kpt3'), id=57, color=[255, 0, 128]),
|
2089 |
-
58:
|
2090 |
-
dict(link=('lss_kpt3', 'lss_kpt2'), id=58, color=[255, 0, 128]),
|
2091 |
-
59:
|
2092 |
-
dict(link=('lss_kpt6', 'lss_kpt1'), id=59, color=[255, 0, 128]),
|
2093 |
-
60:
|
2094 |
-
dict(link=('sso_kpt1', 'sso_kpt4'), id=60, color=[128, 0, 255]),
|
2095 |
-
61:
|
2096 |
-
dict(link=('sso_kpt4', 'sso_kpt7'), id=61, color=[128, 0, 255]),
|
2097 |
-
62:
|
2098 |
-
dict(link=('sso_kpt7', 'sso_kpt8'), id=62, color=[128, 0, 255]),
|
2099 |
-
63:
|
2100 |
-
dict(link=('sso_kpt8', 'sso_kpt9'), id=63, color=[128, 0, 255]),
|
2101 |
-
64:
|
2102 |
-
dict(link=('sso_kpt9', 'sso_kpt10'), id=64, color=[128, 0, 255]),
|
2103 |
-
65:
|
2104 |
-
dict(link=('sso_kpt10', 'sso_kpt11'), id=65, color=[128, 0, 255]),
|
2105 |
-
66:
|
2106 |
-
dict(link=('sso_kpt11', 'sso_kpt12'), id=66, color=[128, 0, 255]),
|
2107 |
-
67:
|
2108 |
-
dict(link=('sso_kpt12', 'sso_kpt13'), id=67, color=[128, 0, 255]),
|
2109 |
-
68:
|
2110 |
-
dict(link=('sso_kpt13', 'sso_kpt14'), id=68, color=[128, 0, 255]),
|
2111 |
-
69:
|
2112 |
-
dict(link=('sso_kpt14', 'sso_kpt15'), id=69, color=[128, 0, 255]),
|
2113 |
-
70:
|
2114 |
-
dict(link=('sso_kpt15', 'sso_kpt16'), id=70, color=[128, 0, 255]),
|
2115 |
-
71:
|
2116 |
-
dict(link=('sso_kpt16', 'sso_kpt31'), id=71, color=[128, 0, 255]),
|
2117 |
-
72:
|
2118 |
-
dict(link=('sso_kpt31', 'sso_kpt30'), id=72, color=[128, 0, 255]),
|
2119 |
-
73:
|
2120 |
-
dict(link=('sso_kpt30', 'sso_kpt2'), id=73, color=[128, 0, 255]),
|
2121 |
-
74:
|
2122 |
-
dict(link=('sso_kpt2', 'sso_kpt3'), id=74, color=[128, 0, 255]),
|
2123 |
-
75:
|
2124 |
-
dict(link=('sso_kpt3', 'sso_kpt4'), id=75, color=[128, 0, 255]),
|
2125 |
-
76:
|
2126 |
-
dict(link=('sso_kpt1', 'sso_kpt6'), id=76, color=[128, 0, 255]),
|
2127 |
-
77:
|
2128 |
-
dict(link=('sso_kpt6', 'sso_kpt25'), id=77, color=[128, 0, 255]),
|
2129 |
-
78:
|
2130 |
-
dict(link=('sso_kpt25', 'sso_kpt24'), id=78, color=[128, 0, 255]),
|
2131 |
-
79:
|
2132 |
-
dict(link=('sso_kpt24', 'sso_kpt23'), id=79, color=[128, 0, 255]),
|
2133 |
-
80:
|
2134 |
-
dict(link=('sso_kpt23', 'sso_kpt22'), id=80, color=[128, 0, 255]),
|
2135 |
-
81:
|
2136 |
-
dict(link=('sso_kpt22', 'sso_kpt21'), id=81, color=[128, 0, 255]),
|
2137 |
-
82:
|
2138 |
-
dict(link=('sso_kpt21', 'sso_kpt20'), id=82, color=[128, 0, 255]),
|
2139 |
-
83:
|
2140 |
-
dict(link=('sso_kpt20', 'sso_kpt19'), id=83, color=[128, 0, 255]),
|
2141 |
-
84:
|
2142 |
-
dict(link=('sso_kpt19', 'sso_kpt18'), id=84, color=[128, 0, 255]),
|
2143 |
-
85:
|
2144 |
-
dict(link=('sso_kpt18', 'sso_kpt17'), id=85, color=[128, 0, 255]),
|
2145 |
-
86:
|
2146 |
-
dict(link=('sso_kpt17', 'sso_kpt29'), id=86, color=[128, 0, 255]),
|
2147 |
-
87:
|
2148 |
-
dict(link=('sso_kpt29', 'sso_kpt28'), id=87, color=[128, 0, 255]),
|
2149 |
-
88:
|
2150 |
-
dict(link=('sso_kpt28', 'sso_kpt27'), id=88, color=[128, 0, 255]),
|
2151 |
-
89:
|
2152 |
-
dict(link=('sso_kpt27', 'sso_kpt26'), id=89, color=[128, 0, 255]),
|
2153 |
-
90:
|
2154 |
-
dict(link=('sso_kpt26', 'sso_kpt5'), id=90, color=[128, 0, 255]),
|
2155 |
-
91:
|
2156 |
-
dict(link=('sso_kpt5', 'sso_kpt6'), id=91, color=[128, 0, 255]),
|
2157 |
-
92:
|
2158 |
-
dict(link=('lso_kpt1', 'lso_kpt2'), id=92, color=[0, 128, 255]),
|
2159 |
-
93:
|
2160 |
-
dict(link=('lso_kpt2', 'lso_kpt7'), id=93, color=[0, 128, 255]),
|
2161 |
-
94:
|
2162 |
-
dict(link=('lso_kpt7', 'lso_kpt8'), id=94, color=[0, 128, 255]),
|
2163 |
-
95:
|
2164 |
-
dict(link=('lso_kpt8', 'lso_kpt9'), id=95, color=[0, 128, 255]),
|
2165 |
-
96:
|
2166 |
-
dict(link=('lso_kpt9', 'lso_kpt10'), id=96, color=[0, 128, 255]),
|
2167 |
-
97:
|
2168 |
-
dict(link=('lso_kpt10', 'lso_kpt11'), id=97, color=[0, 128, 255]),
|
2169 |
-
98:
|
2170 |
-
dict(link=('lso_kpt11', 'lso_kpt12'), id=98, color=[0, 128, 255]),
|
2171 |
-
99:
|
2172 |
-
dict(link=('lso_kpt12', 'lso_kpt13'), id=99, color=[0, 128, 255]),
|
2173 |
-
100:
|
2174 |
-
dict(link=('lso_kpt13', 'lso_kpt14'), id=100, color=[0, 128, 255]),
|
2175 |
-
101:
|
2176 |
-
dict(link=('lso_kpt14', 'lso_kpt15'), id=101, color=[0, 128, 255]),
|
2177 |
-
102:
|
2178 |
-
dict(link=('lso_kpt15', 'lso_kpt16'), id=102, color=[0, 128, 255]),
|
2179 |
-
103:
|
2180 |
-
dict(link=('lso_kpt16', 'lso_kpt17'), id=103, color=[0, 128, 255]),
|
2181 |
-
104:
|
2182 |
-
dict(link=('lso_kpt17', 'lso_kpt18'), id=104, color=[0, 128, 255]),
|
2183 |
-
105:
|
2184 |
-
dict(link=('lso_kpt18', 'lso_kpt19'), id=105, color=[0, 128, 255]),
|
2185 |
-
106:
|
2186 |
-
dict(link=('lso_kpt19', 'lso_kpt20'), id=106, color=[0, 128, 255]),
|
2187 |
-
107:
|
2188 |
-
dict(link=('lso_kpt20', 'lso_kpt39'), id=107, color=[0, 128, 255]),
|
2189 |
-
108:
|
2190 |
-
dict(link=('lso_kpt39', 'lso_kpt38'), id=108, color=[0, 128, 255]),
|
2191 |
-
109:
|
2192 |
-
dict(link=('lso_kpt38', 'lso_kpt4'), id=109, color=[0, 128, 255]),
|
2193 |
-
110:
|
2194 |
-
dict(link=('lso_kpt4', 'lso_kpt3'), id=110, color=[0, 128, 255]),
|
2195 |
-
111:
|
2196 |
-
dict(link=('lso_kpt3', 'lso_kpt2'), id=111, color=[0, 128, 255]),
|
2197 |
-
112:
|
2198 |
-
dict(link=('lso_kpt1', 'lso_kpt6'), id=112, color=[0, 128, 255]),
|
2199 |
-
113:
|
2200 |
-
dict(link=('lso_kpt6', 'lso_kpt33'), id=113, color=[0, 128, 255]),
|
2201 |
-
114:
|
2202 |
-
dict(link=('lso_kpt33', 'lso_kpt32'), id=114, color=[0, 128, 255]),
|
2203 |
-
115:
|
2204 |
-
dict(link=('lso_kpt32', 'lso_kpt31'), id=115, color=[0, 128, 255]),
|
2205 |
-
116:
|
2206 |
-
dict(link=('lso_kpt31', 'lso_kpt30'), id=116, color=[0, 128, 255]),
|
2207 |
-
117:
|
2208 |
-
dict(link=('lso_kpt30', 'lso_kpt29'), id=117, color=[0, 128, 255]),
|
2209 |
-
118:
|
2210 |
-
dict(link=('lso_kpt29', 'lso_kpt28'), id=118, color=[0, 128, 255]),
|
2211 |
-
119:
|
2212 |
-
dict(link=('lso_kpt28', 'lso_kpt27'), id=119, color=[0, 128, 255]),
|
2213 |
-
120:
|
2214 |
-
dict(link=('lso_kpt27', 'lso_kpt26'), id=120, color=[0, 128, 255]),
|
2215 |
-
121:
|
2216 |
-
dict(link=('lso_kpt26', 'lso_kpt25'), id=121, color=[0, 128, 255]),
|
2217 |
-
122:
|
2218 |
-
dict(link=('lso_kpt25', 'lso_kpt24'), id=122, color=[0, 128, 255]),
|
2219 |
-
123:
|
2220 |
-
dict(link=('lso_kpt24', 'lso_kpt23'), id=123, color=[0, 128, 255]),
|
2221 |
-
124:
|
2222 |
-
dict(link=('lso_kpt23', 'lso_kpt22'), id=124, color=[0, 128, 255]),
|
2223 |
-
125:
|
2224 |
-
dict(link=('lso_kpt22', 'lso_kpt21'), id=125, color=[0, 128, 255]),
|
2225 |
-
126:
|
2226 |
-
dict(link=('lso_kpt21', 'lso_kpt37'), id=126, color=[0, 128, 255]),
|
2227 |
-
127:
|
2228 |
-
dict(link=('lso_kpt37', 'lso_kpt36'), id=127, color=[0, 128, 255]),
|
2229 |
-
128:
|
2230 |
-
dict(link=('lso_kpt36', 'lso_kpt35'), id=128, color=[0, 128, 255]),
|
2231 |
-
129:
|
2232 |
-
dict(link=('lso_kpt35', 'lso_kpt34'), id=129, color=[0, 128, 255]),
|
2233 |
-
130:
|
2234 |
-
dict(link=('lso_kpt34', 'lso_kpt5'), id=130, color=[0, 128, 255]),
|
2235 |
-
131:
|
2236 |
-
dict(link=('lso_kpt5', 'lso_kpt6'), id=131, color=[0, 128, 255]),
|
2237 |
-
132:
|
2238 |
-
dict(link=('vest_kpt1', 'vest_kpt2'), id=132, color=[0, 128, 128]),
|
2239 |
-
133:
|
2240 |
-
dict(link=('vest_kpt2', 'vest_kpt7'), id=133, color=[0, 128, 128]),
|
2241 |
-
134:
|
2242 |
-
dict(link=('vest_kpt7', 'vest_kpt8'), id=134, color=[0, 128, 128]),
|
2243 |
-
135:
|
2244 |
-
dict(link=('vest_kpt8', 'vest_kpt9'), id=135, color=[0, 128, 128]),
|
2245 |
-
136:
|
2246 |
-
dict(link=('vest_kpt9', 'vest_kpt10'), id=136, color=[0, 128, 128]),
|
2247 |
-
137:
|
2248 |
-
dict(link=('vest_kpt10', 'vest_kpt11'), id=137, color=[0, 128, 128]),
|
2249 |
-
138:
|
2250 |
-
dict(link=('vest_kpt11', 'vest_kpt12'), id=138, color=[0, 128, 128]),
|
2251 |
-
139:
|
2252 |
-
dict(link=('vest_kpt12', 'vest_kpt13'), id=139, color=[0, 128, 128]),
|
2253 |
-
140:
|
2254 |
-
dict(link=('vest_kpt13', 'vest_kpt14'), id=140, color=[0, 128, 128]),
|
2255 |
-
141:
|
2256 |
-
dict(link=('vest_kpt14', 'vest_kpt15'), id=141, color=[0, 128, 128]),
|
2257 |
-
142:
|
2258 |
-
dict(link=('vest_kpt15', 'vest_kpt6'), id=142, color=[0, 128, 128]),
|
2259 |
-
143:
|
2260 |
-
dict(link=('vest_kpt6', 'vest_kpt1'), id=143, color=[0, 128, 128]),
|
2261 |
-
144:
|
2262 |
-
dict(link=('vest_kpt2', 'vest_kpt3'), id=144, color=[0, 128, 128]),
|
2263 |
-
145:
|
2264 |
-
dict(link=('vest_kpt3', 'vest_kpt4'), id=145, color=[0, 128, 128]),
|
2265 |
-
146:
|
2266 |
-
dict(link=('vest_kpt4', 'vest_kpt5'), id=146, color=[0, 128, 128]),
|
2267 |
-
147:
|
2268 |
-
dict(link=('vest_kpt5', 'vest_kpt6'), id=147, color=[0, 128, 128]),
|
2269 |
-
148:
|
2270 |
-
dict(link=('sling_kpt1', 'sling_kpt2'), id=148, color=[0, 0, 128]),
|
2271 |
-
149:
|
2272 |
-
dict(link=('sling_kpt2', 'sling_kpt8'), id=149, color=[0, 0, 128]),
|
2273 |
-
150:
|
2274 |
-
dict(link=('sling_kpt8', 'sling_kpt9'), id=150, color=[0, 0, 128]),
|
2275 |
-
151:
|
2276 |
-
dict(link=('sling_kpt9', 'sling_kpt10'), id=151, color=[0, 0, 128]),
|
2277 |
-
152:
|
2278 |
-
dict(link=('sling_kpt10', 'sling_kpt11'), id=152, color=[0, 0, 128]),
|
2279 |
-
153:
|
2280 |
-
dict(link=('sling_kpt11', 'sling_kpt12'), id=153, color=[0, 0, 128]),
|
2281 |
-
154:
|
2282 |
-
dict(link=('sling_kpt12', 'sling_kpt13'), id=154, color=[0, 0, 128]),
|
2283 |
-
155:
|
2284 |
-
dict(link=('sling_kpt13', 'sling_kpt14'), id=155, color=[0, 0, 128]),
|
2285 |
-
156:
|
2286 |
-
dict(link=('sling_kpt14', 'sling_kpt6'), id=156, color=[0, 0, 128]),
|
2287 |
-
157:
|
2288 |
-
dict(link=('sling_kpt2', 'sling_kpt7'), id=157, color=[0, 0, 128]),
|
2289 |
-
158:
|
2290 |
-
dict(link=('sling_kpt6', 'sling_kpt15'), id=158, color=[0, 0, 128]),
|
2291 |
-
159:
|
2292 |
-
dict(link=('sling_kpt2', 'sling_kpt3'), id=159, color=[0, 0, 128]),
|
2293 |
-
160:
|
2294 |
-
dict(link=('sling_kpt3', 'sling_kpt4'), id=160, color=[0, 0, 128]),
|
2295 |
-
161:
|
2296 |
-
dict(link=('sling_kpt4', 'sling_kpt5'), id=161, color=[0, 0, 128]),
|
2297 |
-
162:
|
2298 |
-
dict(link=('sling_kpt5', 'sling_kpt6'), id=162, color=[0, 0, 128]),
|
2299 |
-
163:
|
2300 |
-
dict(link=('sling_kpt1', 'sling_kpt6'), id=163, color=[0, 0, 128]),
|
2301 |
-
164:
|
2302 |
-
dict(
|
2303 |
-
link=('shorts_kpt1', 'shorts_kpt4'), id=164, color=[128, 128,
|
2304 |
-
128]),
|
2305 |
-
165:
|
2306 |
-
dict(
|
2307 |
-
link=('shorts_kpt4', 'shorts_kpt5'), id=165, color=[128, 128,
|
2308 |
-
128]),
|
2309 |
-
166:
|
2310 |
-
dict(
|
2311 |
-
link=('shorts_kpt5', 'shorts_kpt6'), id=166, color=[128, 128,
|
2312 |
-
128]),
|
2313 |
-
167:
|
2314 |
-
dict(
|
2315 |
-
link=('shorts_kpt6', 'shorts_kpt7'), id=167, color=[128, 128,
|
2316 |
-
128]),
|
2317 |
-
168:
|
2318 |
-
dict(
|
2319 |
-
link=('shorts_kpt7', 'shorts_kpt8'), id=168, color=[128, 128,
|
2320 |
-
128]),
|
2321 |
-
169:
|
2322 |
-
dict(
|
2323 |
-
link=('shorts_kpt8', 'shorts_kpt9'), id=169, color=[128, 128,
|
2324 |
-
128]),
|
2325 |
-
170:
|
2326 |
-
dict(
|
2327 |
-
link=('shorts_kpt9', 'shorts_kpt10'),
|
2328 |
-
id=170,
|
2329 |
-
color=[128, 128, 128]),
|
2330 |
-
171:
|
2331 |
-
dict(
|
2332 |
-
link=('shorts_kpt10', 'shorts_kpt3'),
|
2333 |
-
id=171,
|
2334 |
-
color=[128, 128, 128]),
|
2335 |
-
172:
|
2336 |
-
dict(
|
2337 |
-
link=('shorts_kpt3', 'shorts_kpt2'), id=172, color=[128, 128,
|
2338 |
-
128]),
|
2339 |
-
173:
|
2340 |
-
dict(
|
2341 |
-
link=('shorts_kpt2', 'shorts_kpt1'), id=173, color=[128, 128,
|
2342 |
-
128]),
|
2343 |
-
174:
|
2344 |
-
dict(
|
2345 |
-
link=('trousers_kpt1', 'trousers_kpt4'),
|
2346 |
-
id=174,
|
2347 |
-
color=[128, 0, 128]),
|
2348 |
-
175:
|
2349 |
-
dict(
|
2350 |
-
link=('trousers_kpt4', 'trousers_kpt5'),
|
2351 |
-
id=175,
|
2352 |
-
color=[128, 0, 128]),
|
2353 |
-
176:
|
2354 |
-
dict(
|
2355 |
-
link=('trousers_kpt5', 'trousers_kpt6'),
|
2356 |
-
id=176,
|
2357 |
-
color=[128, 0, 128]),
|
2358 |
-
177:
|
2359 |
-
dict(
|
2360 |
-
link=('trousers_kpt6', 'trousers_kpt7'),
|
2361 |
-
id=177,
|
2362 |
-
color=[128, 0, 128]),
|
2363 |
-
178:
|
2364 |
-
dict(
|
2365 |
-
link=('trousers_kpt7', 'trousers_kpt8'),
|
2366 |
-
id=178,
|
2367 |
-
color=[128, 0, 128]),
|
2368 |
-
179:
|
2369 |
-
dict(
|
2370 |
-
link=('trousers_kpt8', 'trousers_kpt9'),
|
2371 |
-
id=179,
|
2372 |
-
color=[128, 0, 128]),
|
2373 |
-
180:
|
2374 |
-
dict(
|
2375 |
-
link=('trousers_kpt9', 'trousers_kpt10'),
|
2376 |
-
id=180,
|
2377 |
-
color=[128, 0, 128]),
|
2378 |
-
181:
|
2379 |
-
dict(
|
2380 |
-
link=('trousers_kpt10', 'trousers_kpt11'),
|
2381 |
-
id=181,
|
2382 |
-
color=[128, 0, 128]),
|
2383 |
-
182:
|
2384 |
-
dict(
|
2385 |
-
link=('trousers_kpt11', 'trousers_kpt12'),
|
2386 |
-
id=182,
|
2387 |
-
color=[128, 0, 128]),
|
2388 |
-
183:
|
2389 |
-
dict(
|
2390 |
-
link=('trousers_kpt12', 'trousers_kpt13'),
|
2391 |
-
id=183,
|
2392 |
-
color=[128, 0, 128]),
|
2393 |
-
184:
|
2394 |
-
dict(
|
2395 |
-
link=('trousers_kpt13', 'trousers_kpt14'),
|
2396 |
-
id=184,
|
2397 |
-
color=[128, 0, 128]),
|
2398 |
-
185:
|
2399 |
-
dict(
|
2400 |
-
link=('trousers_kpt14', 'trousers_kpt3'),
|
2401 |
-
id=185,
|
2402 |
-
color=[128, 0, 128]),
|
2403 |
-
186:
|
2404 |
-
dict(
|
2405 |
-
link=('trousers_kpt3', 'trousers_kpt2'),
|
2406 |
-
id=186,
|
2407 |
-
color=[128, 0, 128]),
|
2408 |
-
187:
|
2409 |
-
dict(
|
2410 |
-
link=('trousers_kpt2', 'trousers_kpt1'),
|
2411 |
-
id=187,
|
2412 |
-
color=[128, 0, 128]),
|
2413 |
-
188:
|
2414 |
-
dict(link=('skirt_kpt1', 'skirt_kpt4'), id=188, color=[64, 128, 128]),
|
2415 |
-
189:
|
2416 |
-
dict(link=('skirt_kpt4', 'skirt_kpt5'), id=189, color=[64, 128, 128]),
|
2417 |
-
190:
|
2418 |
-
dict(link=('skirt_kpt5', 'skirt_kpt6'), id=190, color=[64, 128, 128]),
|
2419 |
-
191:
|
2420 |
-
dict(link=('skirt_kpt6', 'skirt_kpt7'), id=191, color=[64, 128, 128]),
|
2421 |
-
192:
|
2422 |
-
dict(link=('skirt_kpt7', 'skirt_kpt8'), id=192, color=[64, 128, 128]),
|
2423 |
-
193:
|
2424 |
-
dict(link=('skirt_kpt8', 'skirt_kpt3'), id=193, color=[64, 128, 128]),
|
2425 |
-
194:
|
2426 |
-
dict(link=('skirt_kpt3', 'skirt_kpt2'), id=194, color=[64, 128, 128]),
|
2427 |
-
195:
|
2428 |
-
dict(link=('skirt_kpt2', 'skirt_kpt1'), id=195, color=[64, 128, 128]),
|
2429 |
-
196:
|
2430 |
-
dict(link=('ssd_kpt1', 'ssd_kpt2'), id=196, color=[64, 64, 128]),
|
2431 |
-
197:
|
2432 |
-
dict(link=('ssd_kpt2', 'ssd_kpt7'), id=197, color=[64, 64, 128]),
|
2433 |
-
198:
|
2434 |
-
dict(link=('ssd_kpt7', 'ssd_kpt8'), id=198, color=[64, 64, 128]),
|
2435 |
-
199:
|
2436 |
-
dict(link=('ssd_kpt8', 'ssd_kpt9'), id=199, color=[64, 64, 128]),
|
2437 |
-
200:
|
2438 |
-
dict(link=('ssd_kpt9', 'ssd_kpt10'), id=200, color=[64, 64, 128]),
|
2439 |
-
201:
|
2440 |
-
dict(link=('ssd_kpt10', 'ssd_kpt11'), id=201, color=[64, 64, 128]),
|
2441 |
-
202:
|
2442 |
-
dict(link=('ssd_kpt11', 'ssd_kpt12'), id=202, color=[64, 64, 128]),
|
2443 |
-
203:
|
2444 |
-
dict(link=('ssd_kpt12', 'ssd_kpt13'), id=203, color=[64, 64, 128]),
|
2445 |
-
204:
|
2446 |
-
dict(link=('ssd_kpt13', 'ssd_kpt14'), id=204, color=[64, 64, 128]),
|
2447 |
-
205:
|
2448 |
-
dict(link=('ssd_kpt14', 'ssd_kpt15'), id=205, color=[64, 64, 128]),
|
2449 |
-
206:
|
2450 |
-
dict(link=('ssd_kpt15', 'ssd_kpt16'), id=206, color=[64, 64, 128]),
|
2451 |
-
207:
|
2452 |
-
dict(link=('ssd_kpt16', 'ssd_kpt17'), id=207, color=[64, 64, 128]),
|
2453 |
-
208:
|
2454 |
-
dict(link=('ssd_kpt17', 'ssd_kpt18'), id=208, color=[64, 64, 128]),
|
2455 |
-
209:
|
2456 |
-
dict(link=('ssd_kpt18', 'ssd_kpt19'), id=209, color=[64, 64, 128]),
|
2457 |
-
210:
|
2458 |
-
dict(link=('ssd_kpt19', 'ssd_kpt20'), id=210, color=[64, 64, 128]),
|
2459 |
-
211:
|
2460 |
-
dict(link=('ssd_kpt20', 'ssd_kpt21'), id=211, color=[64, 64, 128]),
|
2461 |
-
212:
|
2462 |
-
dict(link=('ssd_kpt21', 'ssd_kpt22'), id=212, color=[64, 64, 128]),
|
2463 |
-
213:
|
2464 |
-
dict(link=('ssd_kpt22', 'ssd_kpt23'), id=213, color=[64, 64, 128]),
|
2465 |
-
214:
|
2466 |
-
dict(link=('ssd_kpt23', 'ssd_kpt24'), id=214, color=[64, 64, 128]),
|
2467 |
-
215:
|
2468 |
-
dict(link=('ssd_kpt24', 'ssd_kpt25'), id=215, color=[64, 64, 128]),
|
2469 |
-
216:
|
2470 |
-
dict(link=('ssd_kpt25', 'ssd_kpt26'), id=216, color=[64, 64, 128]),
|
2471 |
-
217:
|
2472 |
-
dict(link=('ssd_kpt26', 'ssd_kpt27'), id=217, color=[64, 64, 128]),
|
2473 |
-
218:
|
2474 |
-
dict(link=('ssd_kpt27', 'ssd_kpt28'), id=218, color=[64, 64, 128]),
|
2475 |
-
219:
|
2476 |
-
dict(link=('ssd_kpt28', 'ssd_kpt29'), id=219, color=[64, 64, 128]),
|
2477 |
-
220:
|
2478 |
-
dict(link=('ssd_kpt29', 'ssd_kpt6'), id=220, color=[64, 64, 128]),
|
2479 |
-
221:
|
2480 |
-
dict(link=('ssd_kpt6', 'ssd_kpt5'), id=221, color=[64, 64, 128]),
|
2481 |
-
222:
|
2482 |
-
dict(link=('ssd_kpt5', 'ssd_kpt4'), id=222, color=[64, 64, 128]),
|
2483 |
-
223:
|
2484 |
-
dict(link=('ssd_kpt4', 'ssd_kpt3'), id=223, color=[64, 64, 128]),
|
2485 |
-
224:
|
2486 |
-
dict(link=('ssd_kpt3', 'ssd_kpt2'), id=224, color=[64, 64, 128]),
|
2487 |
-
225:
|
2488 |
-
dict(link=('ssd_kpt6', 'ssd_kpt1'), id=225, color=[64, 64, 128]),
|
2489 |
-
226:
|
2490 |
-
dict(link=('lsd_kpt1', 'lsd_kpt2'), id=226, color=[128, 64, 0]),
|
2491 |
-
227:
|
2492 |
-
dict(link=('lsd_kpt2', 'lsd_kpt7'), id=228, color=[128, 64, 0]),
|
2493 |
-
228:
|
2494 |
-
dict(link=('lsd_kpt7', 'lsd_kpt8'), id=228, color=[128, 64, 0]),
|
2495 |
-
229:
|
2496 |
-
dict(link=('lsd_kpt8', 'lsd_kpt9'), id=229, color=[128, 64, 0]),
|
2497 |
-
230:
|
2498 |
-
dict(link=('lsd_kpt9', 'lsd_kpt10'), id=230, color=[128, 64, 0]),
|
2499 |
-
231:
|
2500 |
-
dict(link=('lsd_kpt10', 'lsd_kpt11'), id=231, color=[128, 64, 0]),
|
2501 |
-
232:
|
2502 |
-
dict(link=('lsd_kpt11', 'lsd_kpt12'), id=232, color=[128, 64, 0]),
|
2503 |
-
233:
|
2504 |
-
dict(link=('lsd_kpt12', 'lsd_kpt13'), id=233, color=[128, 64, 0]),
|
2505 |
-
234:
|
2506 |
-
dict(link=('lsd_kpt13', 'lsd_kpt14'), id=234, color=[128, 64, 0]),
|
2507 |
-
235:
|
2508 |
-
dict(link=('lsd_kpt14', 'lsd_kpt15'), id=235, color=[128, 64, 0]),
|
2509 |
-
236:
|
2510 |
-
dict(link=('lsd_kpt15', 'lsd_kpt16'), id=236, color=[128, 64, 0]),
|
2511 |
-
237:
|
2512 |
-
dict(link=('lsd_kpt16', 'lsd_kpt17'), id=237, color=[128, 64, 0]),
|
2513 |
-
238:
|
2514 |
-
dict(link=('lsd_kpt17', 'lsd_kpt18'), id=238, color=[128, 64, 0]),
|
2515 |
-
239:
|
2516 |
-
dict(link=('lsd_kpt18', 'lsd_kpt19'), id=239, color=[128, 64, 0]),
|
2517 |
-
240:
|
2518 |
-
dict(link=('lsd_kpt19', 'lsd_kpt20'), id=240, color=[128, 64, 0]),
|
2519 |
-
241:
|
2520 |
-
dict(link=('lsd_kpt20', 'lsd_kpt21'), id=241, color=[128, 64, 0]),
|
2521 |
-
242:
|
2522 |
-
dict(link=('lsd_kpt21', 'lsd_kpt22'), id=242, color=[128, 64, 0]),
|
2523 |
-
243:
|
2524 |
-
dict(link=('lsd_kpt22', 'lsd_kpt23'), id=243, color=[128, 64, 0]),
|
2525 |
-
244:
|
2526 |
-
dict(link=('lsd_kpt23', 'lsd_kpt24'), id=244, color=[128, 64, 0]),
|
2527 |
-
245:
|
2528 |
-
dict(link=('lsd_kpt24', 'lsd_kpt25'), id=245, color=[128, 64, 0]),
|
2529 |
-
246:
|
2530 |
-
dict(link=('lsd_kpt25', 'lsd_kpt26'), id=246, color=[128, 64, 0]),
|
2531 |
-
247:
|
2532 |
-
dict(link=('lsd_kpt26', 'lsd_kpt27'), id=247, color=[128, 64, 0]),
|
2533 |
-
248:
|
2534 |
-
dict(link=('lsd_kpt27', 'lsd_kpt28'), id=248, color=[128, 64, 0]),
|
2535 |
-
249:
|
2536 |
-
dict(link=('lsd_kpt28', 'lsd_kpt29'), id=249, color=[128, 64, 0]),
|
2537 |
-
250:
|
2538 |
-
dict(link=('lsd_kpt29', 'lsd_kpt30'), id=250, color=[128, 64, 0]),
|
2539 |
-
251:
|
2540 |
-
dict(link=('lsd_kpt30', 'lsd_kpt31'), id=251, color=[128, 64, 0]),
|
2541 |
-
252:
|
2542 |
-
dict(link=('lsd_kpt31', 'lsd_kpt32'), id=252, color=[128, 64, 0]),
|
2543 |
-
253:
|
2544 |
-
dict(link=('lsd_kpt32', 'lsd_kpt33'), id=253, color=[128, 64, 0]),
|
2545 |
-
254:
|
2546 |
-
dict(link=('lsd_kpt33', 'lsd_kpt34'), id=254, color=[128, 64, 0]),
|
2547 |
-
255:
|
2548 |
-
dict(link=('lsd_kpt34', 'lsd_kpt35'), id=255, color=[128, 64, 0]),
|
2549 |
-
256:
|
2550 |
-
dict(link=('lsd_kpt35', 'lsd_kpt36'), id=256, color=[128, 64, 0]),
|
2551 |
-
257:
|
2552 |
-
dict(link=('lsd_kpt36', 'lsd_kpt37'), id=257, color=[128, 64, 0]),
|
2553 |
-
258:
|
2554 |
-
dict(link=('lsd_kpt37', 'lsd_kpt6'), id=258, color=[128, 64, 0]),
|
2555 |
-
259:
|
2556 |
-
dict(link=('lsd_kpt6', 'lsd_kpt5'), id=259, color=[128, 64, 0]),
|
2557 |
-
260:
|
2558 |
-
dict(link=('lsd_kpt5', 'lsd_kpt4'), id=260, color=[128, 64, 0]),
|
2559 |
-
261:
|
2560 |
-
dict(link=('lsd_kpt4', 'lsd_kpt3'), id=261, color=[128, 64, 0]),
|
2561 |
-
262:
|
2562 |
-
dict(link=('lsd_kpt3', 'lsd_kpt2'), id=262, color=[128, 64, 0]),
|
2563 |
-
263:
|
2564 |
-
dict(link=('lsd_kpt6', 'lsd_kpt1'), id=263, color=[128, 64, 0]),
|
2565 |
-
264:
|
2566 |
-
dict(link=('vd_kpt1', 'vd_kpt2'), id=264, color=[128, 64, 255]),
|
2567 |
-
265:
|
2568 |
-
dict(link=('vd_kpt2', 'vd_kpt7'), id=265, color=[128, 64, 255]),
|
2569 |
-
266:
|
2570 |
-
dict(link=('vd_kpt7', 'vd_kpt8'), id=266, color=[128, 64, 255]),
|
2571 |
-
267:
|
2572 |
-
dict(link=('vd_kpt8', 'vd_kpt9'), id=267, color=[128, 64, 255]),
|
2573 |
-
268:
|
2574 |
-
dict(link=('vd_kpt9', 'vd_kpt10'), id=268, color=[128, 64, 255]),
|
2575 |
-
269:
|
2576 |
-
dict(link=('vd_kpt10', 'vd_kpt11'), id=269, color=[128, 64, 255]),
|
2577 |
-
270:
|
2578 |
-
dict(link=('vd_kpt11', 'vd_kpt12'), id=270, color=[128, 64, 255]),
|
2579 |
-
271:
|
2580 |
-
dict(link=('vd_kpt12', 'vd_kpt13'), id=271, color=[128, 64, 255]),
|
2581 |
-
272:
|
2582 |
-
dict(link=('vd_kpt13', 'vd_kpt14'), id=272, color=[128, 64, 255]),
|
2583 |
-
273:
|
2584 |
-
dict(link=('vd_kpt14', 'vd_kpt15'), id=273, color=[128, 64, 255]),
|
2585 |
-
274:
|
2586 |
-
dict(link=('vd_kpt15', 'vd_kpt16'), id=274, color=[128, 64, 255]),
|
2587 |
-
275:
|
2588 |
-
dict(link=('vd_kpt16', 'vd_kpt17'), id=275, color=[128, 64, 255]),
|
2589 |
-
276:
|
2590 |
-
dict(link=('vd_kpt17', 'vd_kpt18'), id=276, color=[128, 64, 255]),
|
2591 |
-
277:
|
2592 |
-
dict(link=('vd_kpt18', 'vd_kpt19'), id=277, color=[128, 64, 255]),
|
2593 |
-
278:
|
2594 |
-
dict(link=('vd_kpt19', 'vd_kpt6'), id=278, color=[128, 64, 255]),
|
2595 |
-
279:
|
2596 |
-
dict(link=('vd_kpt6', 'vd_kpt5'), id=279, color=[128, 64, 255]),
|
2597 |
-
280:
|
2598 |
-
dict(link=('vd_kpt5', 'vd_kpt4'), id=280, color=[128, 64, 255]),
|
2599 |
-
281:
|
2600 |
-
dict(link=('vd_kpt4', 'vd_kpt3'), id=281, color=[128, 64, 255]),
|
2601 |
-
282:
|
2602 |
-
dict(link=('vd_kpt3', 'vd_kpt2'), id=282, color=[128, 64, 255]),
|
2603 |
-
283:
|
2604 |
-
dict(link=('vd_kpt6', 'vd_kpt1'), id=283, color=[128, 64, 255]),
|
2605 |
-
284:
|
2606 |
-
dict(link=('sd_kpt1', 'sd_kpt2'), id=284, color=[128, 64, 0]),
|
2607 |
-
285:
|
2608 |
-
dict(link=('sd_kpt2', 'sd_kpt8'), id=285, color=[128, 64, 0]),
|
2609 |
-
286:
|
2610 |
-
dict(link=('sd_kpt8', 'sd_kpt9'), id=286, color=[128, 64, 0]),
|
2611 |
-
287:
|
2612 |
-
dict(link=('sd_kpt9', 'sd_kpt10'), id=287, color=[128, 64, 0]),
|
2613 |
-
288:
|
2614 |
-
dict(link=('sd_kpt10', 'sd_kpt11'), id=288, color=[128, 64, 0]),
|
2615 |
-
289:
|
2616 |
-
dict(link=('sd_kpt11', 'sd_kpt12'), id=289, color=[128, 64, 0]),
|
2617 |
-
290:
|
2618 |
-
dict(link=('sd_kpt12', 'sd_kpt13'), id=290, color=[128, 64, 0]),
|
2619 |
-
291:
|
2620 |
-
dict(link=('sd_kpt13', 'sd_kpt14'), id=291, color=[128, 64, 0]),
|
2621 |
-
292:
|
2622 |
-
dict(link=('sd_kpt14', 'sd_kpt15'), id=292, color=[128, 64, 0]),
|
2623 |
-
293:
|
2624 |
-
dict(link=('sd_kpt15', 'sd_kpt16'), id=293, color=[128, 64, 0]),
|
2625 |
-
294:
|
2626 |
-
dict(link=('sd_kpt16', 'sd_kpt17'), id=294, color=[128, 64, 0]),
|
2627 |
-
295:
|
2628 |
-
dict(link=('sd_kpt17', 'sd_kpt18'), id=295, color=[128, 64, 0]),
|
2629 |
-
296:
|
2630 |
-
dict(link=('sd_kpt18', 'sd_kpt6'), id=296, color=[128, 64, 0]),
|
2631 |
-
297:
|
2632 |
-
dict(link=('sd_kpt6', 'sd_kpt5'), id=297, color=[128, 64, 0]),
|
2633 |
-
298:
|
2634 |
-
dict(link=('sd_kpt5', 'sd_kpt4'), id=298, color=[128, 64, 0]),
|
2635 |
-
299:
|
2636 |
-
dict(link=('sd_kpt4', 'sd_kpt3'), id=299, color=[128, 64, 0]),
|
2637 |
-
300:
|
2638 |
-
dict(link=('sd_kpt3', 'sd_kpt2'), id=300, color=[128, 64, 0]),
|
2639 |
-
301:
|
2640 |
-
dict(link=('sd_kpt2', 'sd_kpt7'), id=301, color=[128, 64, 0]),
|
2641 |
-
302:
|
2642 |
-
dict(link=('sd_kpt6', 'sd_kpt19'), id=302, color=[128, 64, 0]),
|
2643 |
-
303:
|
2644 |
-
dict(link=('sd_kpt6', 'sd_kpt1'), id=303, color=[128, 64, 0])
|
2645 |
-
}),
|
2646 |
-
joint_weights=[
|
2647 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2648 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2649 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2650 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2651 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2652 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2653 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2654 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2655 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2656 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2657 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2658 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2659 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2660 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2661 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2662 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2663 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2664 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2665 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2666 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2667 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
|
2668 |
-
],
|
2669 |
-
sigmas=[])
|
2670 |
-
param_scheduler = [
|
2671 |
-
dict(
|
2672 |
-
type='LinearLR', begin=0, end=500, start_factor=0.001, by_epoch=False),
|
2673 |
-
dict(
|
2674 |
-
type='MultiStepLR',
|
2675 |
-
begin=0,
|
2676 |
-
end=60,
|
2677 |
-
milestones=[20, 40],
|
2678 |
-
gamma=0.1,
|
2679 |
-
by_epoch=True)
|
2680 |
-
]
|
2681 |
-
optim_wrapper = dict(optimizer=dict(type='Adam', lr=0.0005))
|
2682 |
-
auto_scale_lr = dict(base_batch_size=512)
|
2683 |
-
dataset_type = 'DeepFashion2Dataset'
|
2684 |
-
data_mode = 'topdown'
|
2685 |
-
data_root = 'data/deepfashion2/'
|
2686 |
-
codec = dict(
|
2687 |
-
type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2)
|
2688 |
-
train_pipeline = [
|
2689 |
-
dict(type='LoadImage'),
|
2690 |
-
dict(type='GetBBoxCenterScale'),
|
2691 |
-
dict(type='RandomFlip', direction='horizontal'),
|
2692 |
-
dict(
|
2693 |
-
type='RandomBBoxTransform',
|
2694 |
-
shift_prob=0,
|
2695 |
-
rotate_factor=60,
|
2696 |
-
scale_factor=(0.75, 1.25)),
|
2697 |
-
dict(type='TopdownAffine', input_size=(192, 256)),
|
2698 |
-
dict(
|
2699 |
-
type='GenerateTarget',
|
2700 |
-
encoder=dict(
|
2701 |
-
type='MSRAHeatmap',
|
2702 |
-
input_size=(192, 256),
|
2703 |
-
heatmap_size=(48, 64),
|
2704 |
-
sigma=2)),
|
2705 |
-
dict(type='PackPoseInputs')
|
2706 |
-
]
|
2707 |
-
val_pipeline = [
|
2708 |
-
dict(type='LoadImage', backend_args=dict(backend='local')),
|
2709 |
-
dict(type='GetBBoxCenterScale'),
|
2710 |
-
dict(type='TopdownAffine', input_size=(192, 256)),
|
2711 |
-
dict(type='PackPoseInputs')
|
2712 |
-
]
|
2713 |
-
train_dataloader = dict(
|
2714 |
-
batch_size=64,
|
2715 |
-
num_workers=6,
|
2716 |
-
persistent_workers=True,
|
2717 |
-
sampler=dict(type='DefaultSampler', shuffle=True),
|
2718 |
-
dataset=dict(
|
2719 |
-
type='DeepFashion2Dataset',
|
2720 |
-
data_root='data/deepfashion2/',
|
2721 |
-
data_mode='topdown',
|
2722 |
-
ann_file='train/deepfashion2_trousers.json',
|
2723 |
-
data_prefix=dict(img='train/image/'),
|
2724 |
-
pipeline=[
|
2725 |
-
dict(type='LoadImage'),
|
2726 |
-
dict(type='GetBBoxCenterScale'),
|
2727 |
-
dict(type='RandomFlip', direction='horizontal'),
|
2728 |
-
dict(
|
2729 |
-
type='RandomBBoxTransform',
|
2730 |
-
shift_prob=0,
|
2731 |
-
rotate_factor=60,
|
2732 |
-
scale_factor=(0.75, 1.25)),
|
2733 |
-
dict(type='TopdownAffine', input_size=(192, 256)),
|
2734 |
-
dict(
|
2735 |
-
type='GenerateTarget',
|
2736 |
-
encoder=dict(
|
2737 |
-
type='MSRAHeatmap',
|
2738 |
-
input_size=(192, 256),
|
2739 |
-
heatmap_size=(48, 64),
|
2740 |
-
sigma=2)),
|
2741 |
-
dict(type='PackPoseInputs')
|
2742 |
-
]))
|
2743 |
-
val_dataloader = dict(
|
2744 |
-
batch_size=32,
|
2745 |
-
num_workers=4,
|
2746 |
-
persistent_workers=True,
|
2747 |
-
drop_last=False,
|
2748 |
-
sampler=dict(type='DefaultSampler', shuffle=False),
|
2749 |
-
dataset=dict(
|
2750 |
-
type='DeepFashion2Dataset',
|
2751 |
-
data_root='data/deepfashion2/',
|
2752 |
-
data_mode='topdown',
|
2753 |
-
ann_file='validation/deepfashion2_trousers.json',
|
2754 |
-
data_prefix=dict(img='validation/image/'),
|
2755 |
-
test_mode=True,
|
2756 |
-
pipeline=[
|
2757 |
-
dict(type='LoadImage', backend_args=dict(backend='local')),
|
2758 |
-
dict(type='GetBBoxCenterScale'),
|
2759 |
-
dict(type='TopdownAffine', input_size=(192, 256)),
|
2760 |
-
dict(type='PackPoseInputs')
|
2761 |
-
]))
|
2762 |
-
test_dataloader = dict(
|
2763 |
-
batch_size=32,
|
2764 |
-
num_workers=4,
|
2765 |
-
persistent_workers=True,
|
2766 |
-
drop_last=False,
|
2767 |
-
sampler=dict(type='DefaultSampler', shuffle=False),
|
2768 |
-
dataset=dict(
|
2769 |
-
type='DeepFashion2Dataset',
|
2770 |
-
data_root='data/deepfashion2/',
|
2771 |
-
data_mode='topdown',
|
2772 |
-
ann_file='validation/deepfashion2_trousers.json',
|
2773 |
-
data_prefix=dict(img='validation/image/'),
|
2774 |
-
test_mode=True,
|
2775 |
-
pipeline=[
|
2776 |
-
dict(type='LoadImage', backend_args=dict(backend='local')),
|
2777 |
-
dict(type='GetBBoxCenterScale'),
|
2778 |
-
dict(type='TopdownAffine', input_size=(192, 256)),
|
2779 |
-
dict(type='PackPoseInputs')
|
2780 |
-
]))
|
2781 |
-
channel_cfg = dict(
|
2782 |
-
num_output_channels=294,
|
2783 |
-
dataset_joints=294,
|
2784 |
-
dataset_channel=[[
|
2785 |
-
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
|
2786 |
-
20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
|
2787 |
-
38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
|
2788 |
-
56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
|
2789 |
-
74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
|
2790 |
-
92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
|
2791 |
-
108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
|
2792 |
-
122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
|
2793 |
-
136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
|
2794 |
-
150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
|
2795 |
-
164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
|
2796 |
-
178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
|
2797 |
-
192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205,
|
2798 |
-
206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
|
2799 |
-
220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233,
|
2800 |
-
234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
|
2801 |
-
248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261,
|
2802 |
-
262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275,
|
2803 |
-
276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289,
|
2804 |
-
290, 291, 292, 293
|
2805 |
-
]],
|
2806 |
-
inference_channel=[
|
2807 |
-
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
|
2808 |
-
20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
|
2809 |
-
38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
|
2810 |
-
56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
|
2811 |
-
74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
|
2812 |
-
92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
|
2813 |
-
108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
|
2814 |
-
122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
|
2815 |
-
136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
|
2816 |
-
150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
|
2817 |
-
164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
|
2818 |
-
178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
|
2819 |
-
192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205,
|
2820 |
-
206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
|
2821 |
-
220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233,
|
2822 |
-
234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
|
2823 |
-
248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261,
|
2824 |
-
262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275,
|
2825 |
-
276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289,
|
2826 |
-
290, 291, 292, 293
|
2827 |
-
])
|
2828 |
-
model = dict(
|
2829 |
-
type='TopdownPoseEstimator',
|
2830 |
-
data_preprocessor=dict(
|
2831 |
-
type='PoseDataPreprocessor',
|
2832 |
-
mean=[123.675, 116.28, 103.53],
|
2833 |
-
std=[58.395, 57.12, 57.375],
|
2834 |
-
bgr_to_rgb=True),
|
2835 |
-
backbone=dict(
|
2836 |
-
type='ResNet',
|
2837 |
-
depth=50,
|
2838 |
-
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
|
2839 |
-
head=dict(
|
2840 |
-
type='HeatmapHead',
|
2841 |
-
in_channels=2048,
|
2842 |
-
out_channels=294,
|
2843 |
-
loss=dict(type='KeypointMSELoss', use_target_weight=True),
|
2844 |
-
decoder=dict(
|
2845 |
-
type='MSRAHeatmap',
|
2846 |
-
input_size=(192, 256),
|
2847 |
-
heatmap_size=(48, 64),
|
2848 |
-
sigma=2)),
|
2849 |
-
test_cfg=dict(flip_test=True, flip_mode='heatmap', shift_heatmap=True))
|
2850 |
-
val_evaluator = [
|
2851 |
-
dict(type='PCKAccuracy', thr=0.2),
|
2852 |
-
dict(type='AUC'),
|
2853 |
-
dict(type='EPE')
|
2854 |
-
]
|
2855 |
-
test_evaluator = [
|
2856 |
-
dict(type='PCKAccuracy', thr=0.2),
|
2857 |
-
dict(type='AUC'),
|
2858 |
-
dict(type='EPE')
|
2859 |
-
]
|
2860 |
-
launcher = 'pytorch'
|
2861 |
-
work_dir = './work_dirs/td_hm_res50_4xb64-60e_deepfashion2_trousers_256x192'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/canvasframemanager-plugin.js
DELETED
@@ -1,19 +0,0 @@
|
|
1 |
-
import CanvasFrameManager from './canvasframemanager.js';
|
2 |
-
|
3 |
-
class CanvasFrameManagerPlugin extends Phaser.Plugins.BasePlugin {
|
4 |
-
|
5 |
-
constructor(pluginManager) {
|
6 |
-
super(pluginManager);
|
7 |
-
}
|
8 |
-
|
9 |
-
start() {
|
10 |
-
var eventEmitter = this.game.events;
|
11 |
-
eventEmitter.on('destroy', this.destroy, this);
|
12 |
-
}
|
13 |
-
|
14 |
-
add(scene, key, width, height, cellWidth, cellHeight, fillColor) {
|
15 |
-
return new CanvasFrameManager(scene, key, width, height, cellWidth, cellHeight, fillColor);
|
16 |
-
}
|
17 |
-
}
|
18 |
-
|
19 |
-
export default CanvasFrameManagerPlugin;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Aki004/herta-so-vits/cluster/train_cluster.py
DELETED
@@ -1,89 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
from glob import glob
|
3 |
-
from pathlib import Path
|
4 |
-
import torch
|
5 |
-
import logging
|
6 |
-
import argparse
|
7 |
-
import torch
|
8 |
-
import numpy as np
|
9 |
-
from sklearn.cluster import KMeans, MiniBatchKMeans
|
10 |
-
import tqdm
|
11 |
-
logging.basicConfig(level=logging.INFO)
|
12 |
-
logger = logging.getLogger(__name__)
|
13 |
-
import time
|
14 |
-
import random
|
15 |
-
|
16 |
-
def train_cluster(in_dir, n_clusters, use_minibatch=True, verbose=False):
|
17 |
-
|
18 |
-
logger.info(f"Loading features from {in_dir}")
|
19 |
-
features = []
|
20 |
-
nums = 0
|
21 |
-
for path in tqdm.tqdm(in_dir.glob("*.soft.pt")):
|
22 |
-
features.append(torch.load(path).squeeze(0).numpy().T)
|
23 |
-
# print(features[-1].shape)
|
24 |
-
features = np.concatenate(features, axis=0)
|
25 |
-
print(nums, features.nbytes/ 1024**2, "MB , shape:",features.shape, features.dtype)
|
26 |
-
features = features.astype(np.float32)
|
27 |
-
logger.info(f"Clustering features of shape: {features.shape}")
|
28 |
-
t = time.time()
|
29 |
-
if use_minibatch:
|
30 |
-
kmeans = MiniBatchKMeans(n_clusters=n_clusters,verbose=verbose, batch_size=4096, max_iter=80).fit(features)
|
31 |
-
else:
|
32 |
-
kmeans = KMeans(n_clusters=n_clusters,verbose=verbose).fit(features)
|
33 |
-
print(time.time()-t, "s")
|
34 |
-
|
35 |
-
x = {
|
36 |
-
"n_features_in_": kmeans.n_features_in_,
|
37 |
-
"_n_threads": kmeans._n_threads,
|
38 |
-
"cluster_centers_": kmeans.cluster_centers_,
|
39 |
-
}
|
40 |
-
print("end")
|
41 |
-
|
42 |
-
return x
|
43 |
-
|
44 |
-
|
45 |
-
if __name__ == "__main__":
|
46 |
-
|
47 |
-
parser = argparse.ArgumentParser()
|
48 |
-
parser.add_argument('--dataset', type=Path, default="./dataset/44k",
|
49 |
-
help='path of training data directory')
|
50 |
-
parser.add_argument('--output', type=Path, default="logs/44k",
|
51 |
-
help='path of model output directory')
|
52 |
-
|
53 |
-
args = parser.parse_args()
|
54 |
-
|
55 |
-
checkpoint_dir = args.output
|
56 |
-
dataset = args.dataset
|
57 |
-
n_clusters = 10000
|
58 |
-
|
59 |
-
ckpt = {}
|
60 |
-
for spk in os.listdir(dataset):
|
61 |
-
if os.path.isdir(dataset/spk):
|
62 |
-
print(f"train kmeans for {spk}...")
|
63 |
-
in_dir = dataset/spk
|
64 |
-
x = train_cluster(in_dir, n_clusters, verbose=False)
|
65 |
-
ckpt[spk] = x
|
66 |
-
|
67 |
-
checkpoint_path = checkpoint_dir / f"kmeans_{n_clusters}.pt"
|
68 |
-
checkpoint_path.parent.mkdir(exist_ok=True, parents=True)
|
69 |
-
torch.save(
|
70 |
-
ckpt,
|
71 |
-
checkpoint_path,
|
72 |
-
)
|
73 |
-
|
74 |
-
|
75 |
-
# import cluster
|
76 |
-
# for spk in tqdm.tqdm(os.listdir("dataset")):
|
77 |
-
# if os.path.isdir(f"dataset/{spk}"):
|
78 |
-
# print(f"start kmeans inference for {spk}...")
|
79 |
-
# for feature_path in tqdm.tqdm(glob(f"dataset/{spk}/*.discrete.npy", recursive=True)):
|
80 |
-
# mel_path = feature_path.replace(".discrete.npy",".mel.npy")
|
81 |
-
# mel_spectrogram = np.load(mel_path)
|
82 |
-
# feature_len = mel_spectrogram.shape[-1]
|
83 |
-
# c = np.load(feature_path)
|
84 |
-
# c = utils.tools.repeat_expand_2d(torch.FloatTensor(c), feature_len).numpy()
|
85 |
-
# feature = c.T
|
86 |
-
# feature_class = cluster.get_cluster_result(feature, spk)
|
87 |
-
# np.save(feature_path.replace(".discrete.npy", ".discrete_class.npy"), feature_class)
|
88 |
-
|
89 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alpaca233/SadTalker/src/facerender/modules/keypoint_detector.py
DELETED
@@ -1,179 +0,0 @@
|
|
1 |
-
from torch import nn
|
2 |
-
import torch
|
3 |
-
import torch.nn.functional as F
|
4 |
-
|
5 |
-
from src.facerender.sync_batchnorm import SynchronizedBatchNorm2d as BatchNorm2d
|
6 |
-
from src.facerender.modules.util import KPHourglass, make_coordinate_grid, AntiAliasInterpolation2d, ResBottleneck
|
7 |
-
|
8 |
-
|
9 |
-
class KPDetector(nn.Module):
|
10 |
-
"""
|
11 |
-
Detecting canonical keypoints. Return keypoint position and jacobian near each keypoint.
|
12 |
-
"""
|
13 |
-
|
14 |
-
def __init__(self, block_expansion, feature_channel, num_kp, image_channel, max_features, reshape_channel, reshape_depth,
|
15 |
-
num_blocks, temperature, estimate_jacobian=False, scale_factor=1, single_jacobian_map=False):
|
16 |
-
super(KPDetector, self).__init__()
|
17 |
-
|
18 |
-
self.predictor = KPHourglass(block_expansion, in_features=image_channel,
|
19 |
-
max_features=max_features, reshape_features=reshape_channel, reshape_depth=reshape_depth, num_blocks=num_blocks)
|
20 |
-
|
21 |
-
# self.kp = nn.Conv3d(in_channels=self.predictor.out_filters, out_channels=num_kp, kernel_size=7, padding=3)
|
22 |
-
self.kp = nn.Conv3d(in_channels=self.predictor.out_filters, out_channels=num_kp, kernel_size=3, padding=1)
|
23 |
-
|
24 |
-
if estimate_jacobian:
|
25 |
-
self.num_jacobian_maps = 1 if single_jacobian_map else num_kp
|
26 |
-
# self.jacobian = nn.Conv3d(in_channels=self.predictor.out_filters, out_channels=9 * self.num_jacobian_maps, kernel_size=7, padding=3)
|
27 |
-
self.jacobian = nn.Conv3d(in_channels=self.predictor.out_filters, out_channels=9 * self.num_jacobian_maps, kernel_size=3, padding=1)
|
28 |
-
'''
|
29 |
-
initial as:
|
30 |
-
[[1 0 0]
|
31 |
-
[0 1 0]
|
32 |
-
[0 0 1]]
|
33 |
-
'''
|
34 |
-
self.jacobian.weight.data.zero_()
|
35 |
-
self.jacobian.bias.data.copy_(torch.tensor([1, 0, 0, 0, 1, 0, 0, 0, 1] * self.num_jacobian_maps, dtype=torch.float))
|
36 |
-
else:
|
37 |
-
self.jacobian = None
|
38 |
-
|
39 |
-
self.temperature = temperature
|
40 |
-
self.scale_factor = scale_factor
|
41 |
-
if self.scale_factor != 1:
|
42 |
-
self.down = AntiAliasInterpolation2d(image_channel, self.scale_factor)
|
43 |
-
|
44 |
-
def gaussian2kp(self, heatmap):
|
45 |
-
"""
|
46 |
-
Extract the mean from a heatmap
|
47 |
-
"""
|
48 |
-
shape = heatmap.shape
|
49 |
-
heatmap = heatmap.unsqueeze(-1)
|
50 |
-
grid = make_coordinate_grid(shape[2:], heatmap.type()).unsqueeze_(0).unsqueeze_(0)
|
51 |
-
value = (heatmap * grid).sum(dim=(2, 3, 4))
|
52 |
-
kp = {'value': value}
|
53 |
-
|
54 |
-
return kp
|
55 |
-
|
56 |
-
def forward(self, x):
|
57 |
-
if self.scale_factor != 1:
|
58 |
-
x = self.down(x)
|
59 |
-
|
60 |
-
feature_map = self.predictor(x)
|
61 |
-
prediction = self.kp(feature_map)
|
62 |
-
|
63 |
-
final_shape = prediction.shape
|
64 |
-
heatmap = prediction.view(final_shape[0], final_shape[1], -1)
|
65 |
-
heatmap = F.softmax(heatmap / self.temperature, dim=2)
|
66 |
-
heatmap = heatmap.view(*final_shape)
|
67 |
-
|
68 |
-
out = self.gaussian2kp(heatmap)
|
69 |
-
|
70 |
-
if self.jacobian is not None:
|
71 |
-
jacobian_map = self.jacobian(feature_map)
|
72 |
-
jacobian_map = jacobian_map.reshape(final_shape[0], self.num_jacobian_maps, 9, final_shape[2],
|
73 |
-
final_shape[3], final_shape[4])
|
74 |
-
heatmap = heatmap.unsqueeze(2)
|
75 |
-
|
76 |
-
jacobian = heatmap * jacobian_map
|
77 |
-
jacobian = jacobian.view(final_shape[0], final_shape[1], 9, -1)
|
78 |
-
jacobian = jacobian.sum(dim=-1)
|
79 |
-
jacobian = jacobian.view(jacobian.shape[0], jacobian.shape[1], 3, 3)
|
80 |
-
out['jacobian'] = jacobian
|
81 |
-
|
82 |
-
return out
|
83 |
-
|
84 |
-
|
85 |
-
class HEEstimator(nn.Module):
|
86 |
-
"""
|
87 |
-
Estimating head pose and expression.
|
88 |
-
"""
|
89 |
-
|
90 |
-
def __init__(self, block_expansion, feature_channel, num_kp, image_channel, max_features, num_bins=66, estimate_jacobian=True):
|
91 |
-
super(HEEstimator, self).__init__()
|
92 |
-
|
93 |
-
self.conv1 = nn.Conv2d(in_channels=image_channel, out_channels=block_expansion, kernel_size=7, padding=3, stride=2)
|
94 |
-
self.norm1 = BatchNorm2d(block_expansion, affine=True)
|
95 |
-
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
|
96 |
-
|
97 |
-
self.conv2 = nn.Conv2d(in_channels=block_expansion, out_channels=256, kernel_size=1)
|
98 |
-
self.norm2 = BatchNorm2d(256, affine=True)
|
99 |
-
|
100 |
-
self.block1 = nn.Sequential()
|
101 |
-
for i in range(3):
|
102 |
-
self.block1.add_module('b1_'+ str(i), ResBottleneck(in_features=256, stride=1))
|
103 |
-
|
104 |
-
self.conv3 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=1)
|
105 |
-
self.norm3 = BatchNorm2d(512, affine=True)
|
106 |
-
self.block2 = ResBottleneck(in_features=512, stride=2)
|
107 |
-
|
108 |
-
self.block3 = nn.Sequential()
|
109 |
-
for i in range(3):
|
110 |
-
self.block3.add_module('b3_'+ str(i), ResBottleneck(in_features=512, stride=1))
|
111 |
-
|
112 |
-
self.conv4 = nn.Conv2d(in_channels=512, out_channels=1024, kernel_size=1)
|
113 |
-
self.norm4 = BatchNorm2d(1024, affine=True)
|
114 |
-
self.block4 = ResBottleneck(in_features=1024, stride=2)
|
115 |
-
|
116 |
-
self.block5 = nn.Sequential()
|
117 |
-
for i in range(5):
|
118 |
-
self.block5.add_module('b5_'+ str(i), ResBottleneck(in_features=1024, stride=1))
|
119 |
-
|
120 |
-
self.conv5 = nn.Conv2d(in_channels=1024, out_channels=2048, kernel_size=1)
|
121 |
-
self.norm5 = BatchNorm2d(2048, affine=True)
|
122 |
-
self.block6 = ResBottleneck(in_features=2048, stride=2)
|
123 |
-
|
124 |
-
self.block7 = nn.Sequential()
|
125 |
-
for i in range(2):
|
126 |
-
self.block7.add_module('b7_'+ str(i), ResBottleneck(in_features=2048, stride=1))
|
127 |
-
|
128 |
-
self.fc_roll = nn.Linear(2048, num_bins)
|
129 |
-
self.fc_pitch = nn.Linear(2048, num_bins)
|
130 |
-
self.fc_yaw = nn.Linear(2048, num_bins)
|
131 |
-
|
132 |
-
self.fc_t = nn.Linear(2048, 3)
|
133 |
-
|
134 |
-
self.fc_exp = nn.Linear(2048, 3*num_kp)
|
135 |
-
|
136 |
-
def forward(self, x):
|
137 |
-
out = self.conv1(x)
|
138 |
-
out = self.norm1(out)
|
139 |
-
out = F.relu(out)
|
140 |
-
out = self.maxpool(out)
|
141 |
-
|
142 |
-
out = self.conv2(out)
|
143 |
-
out = self.norm2(out)
|
144 |
-
out = F.relu(out)
|
145 |
-
|
146 |
-
out = self.block1(out)
|
147 |
-
|
148 |
-
out = self.conv3(out)
|
149 |
-
out = self.norm3(out)
|
150 |
-
out = F.relu(out)
|
151 |
-
out = self.block2(out)
|
152 |
-
|
153 |
-
out = self.block3(out)
|
154 |
-
|
155 |
-
out = self.conv4(out)
|
156 |
-
out = self.norm4(out)
|
157 |
-
out = F.relu(out)
|
158 |
-
out = self.block4(out)
|
159 |
-
|
160 |
-
out = self.block5(out)
|
161 |
-
|
162 |
-
out = self.conv5(out)
|
163 |
-
out = self.norm5(out)
|
164 |
-
out = F.relu(out)
|
165 |
-
out = self.block6(out)
|
166 |
-
|
167 |
-
out = self.block7(out)
|
168 |
-
|
169 |
-
out = F.adaptive_avg_pool2d(out, 1)
|
170 |
-
out = out.view(out.shape[0], -1)
|
171 |
-
|
172 |
-
yaw = self.fc_roll(out)
|
173 |
-
pitch = self.fc_pitch(out)
|
174 |
-
roll = self.fc_yaw(out)
|
175 |
-
t = self.fc_t(out)
|
176 |
-
exp = self.fc_exp(out)
|
177 |
-
|
178 |
-
return {'yaw': yaw, 'pitch': pitch, 'roll': roll, 't': t, 'exp': exp}
|
179 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andres99/Tune-A-Video-Training-UI/inference.py
DELETED
@@ -1,109 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
-
import gc
|
4 |
-
import pathlib
|
5 |
-
import sys
|
6 |
-
import tempfile
|
7 |
-
|
8 |
-
import gradio as gr
|
9 |
-
import imageio
|
10 |
-
import PIL.Image
|
11 |
-
import torch
|
12 |
-
from diffusers.utils.import_utils import is_xformers_available
|
13 |
-
from einops import rearrange
|
14 |
-
from huggingface_hub import ModelCard
|
15 |
-
|
16 |
-
sys.path.append('Tune-A-Video')
|
17 |
-
|
18 |
-
from tuneavideo.models.unet import UNet3DConditionModel
|
19 |
-
from tuneavideo.pipelines.pipeline_tuneavideo import TuneAVideoPipeline
|
20 |
-
|
21 |
-
|
22 |
-
class InferencePipeline:
|
23 |
-
def __init__(self, hf_token: str | None = None):
|
24 |
-
self.hf_token = hf_token
|
25 |
-
self.pipe = None
|
26 |
-
self.device = torch.device(
|
27 |
-
'cuda:0' if torch.cuda.is_available() else 'cpu')
|
28 |
-
self.model_id = None
|
29 |
-
|
30 |
-
def clear(self) -> None:
|
31 |
-
self.model_id = None
|
32 |
-
del self.pipe
|
33 |
-
self.pipe = None
|
34 |
-
torch.cuda.empty_cache()
|
35 |
-
gc.collect()
|
36 |
-
|
37 |
-
@staticmethod
|
38 |
-
def check_if_model_is_local(model_id: str) -> bool:
|
39 |
-
return pathlib.Path(model_id).exists()
|
40 |
-
|
41 |
-
@staticmethod
|
42 |
-
def get_model_card(model_id: str,
|
43 |
-
hf_token: str | None = None) -> ModelCard:
|
44 |
-
if InferencePipeline.check_if_model_is_local(model_id):
|
45 |
-
card_path = (pathlib.Path(model_id) / 'README.md').as_posix()
|
46 |
-
else:
|
47 |
-
card_path = model_id
|
48 |
-
return ModelCard.load(card_path, token=hf_token)
|
49 |
-
|
50 |
-
@staticmethod
|
51 |
-
def get_base_model_info(model_id: str, hf_token: str | None = None) -> str:
|
52 |
-
card = InferencePipeline.get_model_card(model_id, hf_token)
|
53 |
-
return card.data.base_model
|
54 |
-
|
55 |
-
def load_pipe(self, model_id: str) -> None:
|
56 |
-
if model_id == self.model_id:
|
57 |
-
return
|
58 |
-
base_model_id = self.get_base_model_info(model_id, self.hf_token)
|
59 |
-
unet = UNet3DConditionModel.from_pretrained(
|
60 |
-
model_id,
|
61 |
-
subfolder='unet',
|
62 |
-
torch_dtype=torch.float16,
|
63 |
-
use_auth_token=self.hf_token)
|
64 |
-
pipe = TuneAVideoPipeline.from_pretrained(base_model_id,
|
65 |
-
unet=unet,
|
66 |
-
torch_dtype=torch.float16,
|
67 |
-
use_auth_token=self.hf_token)
|
68 |
-
pipe = pipe.to(self.device)
|
69 |
-
if is_xformers_available():
|
70 |
-
pipe.unet.enable_xformers_memory_efficient_attention()
|
71 |
-
self.pipe = pipe
|
72 |
-
self.model_id = model_id # type: ignore
|
73 |
-
|
74 |
-
def run(
|
75 |
-
self,
|
76 |
-
model_id: str,
|
77 |
-
prompt: str,
|
78 |
-
video_length: int,
|
79 |
-
fps: int,
|
80 |
-
seed: int,
|
81 |
-
n_steps: int,
|
82 |
-
guidance_scale: float,
|
83 |
-
) -> PIL.Image.Image:
|
84 |
-
if not torch.cuda.is_available():
|
85 |
-
raise gr.Error('CUDA is not available.')
|
86 |
-
|
87 |
-
self.load_pipe(model_id)
|
88 |
-
|
89 |
-
generator = torch.Generator(device=self.device).manual_seed(seed)
|
90 |
-
out = self.pipe(
|
91 |
-
prompt,
|
92 |
-
video_length=video_length,
|
93 |
-
width=512,
|
94 |
-
height=512,
|
95 |
-
num_inference_steps=n_steps,
|
96 |
-
guidance_scale=guidance_scale,
|
97 |
-
generator=generator,
|
98 |
-
) # type: ignore
|
99 |
-
|
100 |
-
frames = rearrange(out.videos[0], 'c t h w -> t h w c')
|
101 |
-
frames = (frames * 255).to(torch.uint8).numpy()
|
102 |
-
|
103 |
-
out_file = tempfile.NamedTemporaryFile(suffix='.mp4', delete=False)
|
104 |
-
writer = imageio.get_writer(out_file.name, fps=fps)
|
105 |
-
for frame in frames:
|
106 |
-
writer.append_data(frame)
|
107 |
-
writer.close()
|
108 |
-
|
109 |
-
return out_file.name
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/schedulers/test_scheduler_dpm_sde.py
DELETED
@@ -1,168 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
|
3 |
-
from diffusers import DPMSolverSDEScheduler
|
4 |
-
from diffusers.utils import torch_device
|
5 |
-
from diffusers.utils.testing_utils import require_torchsde
|
6 |
-
|
7 |
-
from .test_schedulers import SchedulerCommonTest
|
8 |
-
|
9 |
-
|
10 |
-
@require_torchsde
|
11 |
-
class DPMSolverSDESchedulerTest(SchedulerCommonTest):
|
12 |
-
scheduler_classes = (DPMSolverSDEScheduler,)
|
13 |
-
num_inference_steps = 10
|
14 |
-
|
15 |
-
def get_scheduler_config(self, **kwargs):
|
16 |
-
config = {
|
17 |
-
"num_train_timesteps": 1100,
|
18 |
-
"beta_start": 0.0001,
|
19 |
-
"beta_end": 0.02,
|
20 |
-
"beta_schedule": "linear",
|
21 |
-
"noise_sampler_seed": 0,
|
22 |
-
}
|
23 |
-
|
24 |
-
config.update(**kwargs)
|
25 |
-
return config
|
26 |
-
|
27 |
-
def test_timesteps(self):
|
28 |
-
for timesteps in [10, 50, 100, 1000]:
|
29 |
-
self.check_over_configs(num_train_timesteps=timesteps)
|
30 |
-
|
31 |
-
def test_betas(self):
|
32 |
-
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02]):
|
33 |
-
self.check_over_configs(beta_start=beta_start, beta_end=beta_end)
|
34 |
-
|
35 |
-
def test_schedules(self):
|
36 |
-
for schedule in ["linear", "scaled_linear"]:
|
37 |
-
self.check_over_configs(beta_schedule=schedule)
|
38 |
-
|
39 |
-
def test_prediction_type(self):
|
40 |
-
for prediction_type in ["epsilon", "v_prediction"]:
|
41 |
-
self.check_over_configs(prediction_type=prediction_type)
|
42 |
-
|
43 |
-
def test_full_loop_no_noise(self):
|
44 |
-
scheduler_class = self.scheduler_classes[0]
|
45 |
-
scheduler_config = self.get_scheduler_config()
|
46 |
-
scheduler = scheduler_class(**scheduler_config)
|
47 |
-
|
48 |
-
scheduler.set_timesteps(self.num_inference_steps)
|
49 |
-
|
50 |
-
model = self.dummy_model()
|
51 |
-
sample = self.dummy_sample_deter * scheduler.init_noise_sigma
|
52 |
-
sample = sample.to(torch_device)
|
53 |
-
|
54 |
-
for i, t in enumerate(scheduler.timesteps):
|
55 |
-
sample = scheduler.scale_model_input(sample, t)
|
56 |
-
|
57 |
-
model_output = model(sample, t)
|
58 |
-
|
59 |
-
output = scheduler.step(model_output, t, sample)
|
60 |
-
sample = output.prev_sample
|
61 |
-
|
62 |
-
result_sum = torch.sum(torch.abs(sample))
|
63 |
-
result_mean = torch.mean(torch.abs(sample))
|
64 |
-
|
65 |
-
if torch_device in ["mps"]:
|
66 |
-
assert abs(result_sum.item() - 167.47821044921875) < 1e-2
|
67 |
-
assert abs(result_mean.item() - 0.2178705964565277) < 1e-3
|
68 |
-
elif torch_device in ["cuda"]:
|
69 |
-
assert abs(result_sum.item() - 171.59352111816406) < 1e-2
|
70 |
-
assert abs(result_mean.item() - 0.22342906892299652) < 1e-3
|
71 |
-
else:
|
72 |
-
assert abs(result_sum.item() - 162.52383422851562) < 1e-2
|
73 |
-
assert abs(result_mean.item() - 0.211619570851326) < 1e-3
|
74 |
-
|
75 |
-
def test_full_loop_with_v_prediction(self):
|
76 |
-
scheduler_class = self.scheduler_classes[0]
|
77 |
-
scheduler_config = self.get_scheduler_config(prediction_type="v_prediction")
|
78 |
-
scheduler = scheduler_class(**scheduler_config)
|
79 |
-
|
80 |
-
scheduler.set_timesteps(self.num_inference_steps)
|
81 |
-
|
82 |
-
model = self.dummy_model()
|
83 |
-
sample = self.dummy_sample_deter * scheduler.init_noise_sigma
|
84 |
-
sample = sample.to(torch_device)
|
85 |
-
|
86 |
-
for i, t in enumerate(scheduler.timesteps):
|
87 |
-
sample = scheduler.scale_model_input(sample, t)
|
88 |
-
|
89 |
-
model_output = model(sample, t)
|
90 |
-
|
91 |
-
output = scheduler.step(model_output, t, sample)
|
92 |
-
sample = output.prev_sample
|
93 |
-
|
94 |
-
result_sum = torch.sum(torch.abs(sample))
|
95 |
-
result_mean = torch.mean(torch.abs(sample))
|
96 |
-
|
97 |
-
if torch_device in ["mps"]:
|
98 |
-
assert abs(result_sum.item() - 124.77149200439453) < 1e-2
|
99 |
-
assert abs(result_mean.item() - 0.16226289014816284) < 1e-3
|
100 |
-
elif torch_device in ["cuda"]:
|
101 |
-
assert abs(result_sum.item() - 128.1663360595703) < 1e-2
|
102 |
-
assert abs(result_mean.item() - 0.16688326001167297) < 1e-3
|
103 |
-
else:
|
104 |
-
assert abs(result_sum.item() - 119.8487548828125) < 1e-2
|
105 |
-
assert abs(result_mean.item() - 0.1560530662536621) < 1e-3
|
106 |
-
|
107 |
-
def test_full_loop_device(self):
|
108 |
-
scheduler_class = self.scheduler_classes[0]
|
109 |
-
scheduler_config = self.get_scheduler_config()
|
110 |
-
scheduler = scheduler_class(**scheduler_config)
|
111 |
-
|
112 |
-
scheduler.set_timesteps(self.num_inference_steps, device=torch_device)
|
113 |
-
|
114 |
-
model = self.dummy_model()
|
115 |
-
sample = self.dummy_sample_deter.to(torch_device) * scheduler.init_noise_sigma
|
116 |
-
|
117 |
-
for t in scheduler.timesteps:
|
118 |
-
sample = scheduler.scale_model_input(sample, t)
|
119 |
-
|
120 |
-
model_output = model(sample, t)
|
121 |
-
|
122 |
-
output = scheduler.step(model_output, t, sample)
|
123 |
-
sample = output.prev_sample
|
124 |
-
|
125 |
-
result_sum = torch.sum(torch.abs(sample))
|
126 |
-
result_mean = torch.mean(torch.abs(sample))
|
127 |
-
|
128 |
-
if torch_device in ["mps"]:
|
129 |
-
assert abs(result_sum.item() - 167.46957397460938) < 1e-2
|
130 |
-
assert abs(result_mean.item() - 0.21805934607982635) < 1e-3
|
131 |
-
elif torch_device in ["cuda"]:
|
132 |
-
assert abs(result_sum.item() - 171.59353637695312) < 1e-2
|
133 |
-
assert abs(result_mean.item() - 0.22342908382415771) < 1e-3
|
134 |
-
else:
|
135 |
-
assert abs(result_sum.item() - 162.52383422851562) < 1e-2
|
136 |
-
assert abs(result_mean.item() - 0.211619570851326) < 1e-3
|
137 |
-
|
138 |
-
def test_full_loop_device_karras_sigmas(self):
|
139 |
-
scheduler_class = self.scheduler_classes[0]
|
140 |
-
scheduler_config = self.get_scheduler_config()
|
141 |
-
scheduler = scheduler_class(**scheduler_config, use_karras_sigmas=True)
|
142 |
-
|
143 |
-
scheduler.set_timesteps(self.num_inference_steps, device=torch_device)
|
144 |
-
|
145 |
-
model = self.dummy_model()
|
146 |
-
sample = self.dummy_sample_deter.to(torch_device) * scheduler.init_noise_sigma
|
147 |
-
sample = sample.to(torch_device)
|
148 |
-
|
149 |
-
for t in scheduler.timesteps:
|
150 |
-
sample = scheduler.scale_model_input(sample, t)
|
151 |
-
|
152 |
-
model_output = model(sample, t)
|
153 |
-
|
154 |
-
output = scheduler.step(model_output, t, sample)
|
155 |
-
sample = output.prev_sample
|
156 |
-
|
157 |
-
result_sum = torch.sum(torch.abs(sample))
|
158 |
-
result_mean = torch.mean(torch.abs(sample))
|
159 |
-
|
160 |
-
if torch_device in ["mps"]:
|
161 |
-
assert abs(result_sum.item() - 176.66974135742188) < 1e-2
|
162 |
-
assert abs(result_mean.item() - 0.23003872730981811) < 1e-2
|
163 |
-
elif torch_device in ["cuda"]:
|
164 |
-
assert abs(result_sum.item() - 177.63653564453125) < 1e-2
|
165 |
-
assert abs(result_mean.item() - 0.23003872730981811) < 1e-2
|
166 |
-
else:
|
167 |
-
assert abs(result_sum.item() - 170.3135223388672) < 1e-2
|
168 |
-
assert abs(result_mean.item() - 0.23003872730981811) < 1e-2
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/assigners/__init__.py
DELETED
@@ -1,16 +0,0 @@
|
|
1 |
-
from .approx_max_iou_assigner import ApproxMaxIoUAssigner
|
2 |
-
from .assign_result import AssignResult
|
3 |
-
from .atss_assigner import ATSSAssigner
|
4 |
-
from .base_assigner import BaseAssigner
|
5 |
-
from .center_region_assigner import CenterRegionAssigner
|
6 |
-
from .grid_assigner import GridAssigner
|
7 |
-
from .hungarian_assigner import HungarianAssigner
|
8 |
-
from .max_iou_assigner import MaxIoUAssigner
|
9 |
-
from .point_assigner import PointAssigner
|
10 |
-
from .region_assigner import RegionAssigner
|
11 |
-
|
12 |
-
__all__ = [
|
13 |
-
'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult',
|
14 |
-
'PointAssigner', 'ATSSAssigner', 'CenterRegionAssigner', 'GridAssigner',
|
15 |
-
'HungarianAssigner', 'RegionAssigner'
|
16 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_d6_r101-d16_512x1024_80k_cityscapes.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
_base_ = './fcn_d6_r50-d16_512x1024_80k_cityscapes.py'
|
2 |
-
model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
|
|
|
|
|
|
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/superbooga/chromadb.py
DELETED
@@ -1,125 +0,0 @@
|
|
1 |
-
import chromadb
|
2 |
-
import posthog
|
3 |
-
import torch
|
4 |
-
from chromadb.config import Settings
|
5 |
-
from sentence_transformers import SentenceTransformer
|
6 |
-
|
7 |
-
from modules.logging_colors import logger
|
8 |
-
|
9 |
-
logger.info('Intercepting all calls to posthog :)')
|
10 |
-
posthog.capture = lambda *args, **kwargs: None
|
11 |
-
|
12 |
-
|
13 |
-
class Collecter():
|
14 |
-
def __init__(self):
|
15 |
-
pass
|
16 |
-
|
17 |
-
def add(self, texts: list[str]):
|
18 |
-
pass
|
19 |
-
|
20 |
-
def get(self, search_strings: list[str], n_results: int) -> list[str]:
|
21 |
-
pass
|
22 |
-
|
23 |
-
def clear(self):
|
24 |
-
pass
|
25 |
-
|
26 |
-
|
27 |
-
class Embedder():
|
28 |
-
def __init__(self):
|
29 |
-
pass
|
30 |
-
|
31 |
-
def embed(self, text: str) -> list[torch.Tensor]:
|
32 |
-
pass
|
33 |
-
|
34 |
-
|
35 |
-
class ChromaCollector(Collecter):
|
36 |
-
def __init__(self, embedder: Embedder):
|
37 |
-
super().__init__()
|
38 |
-
self.chroma_client = chromadb.Client(Settings(anonymized_telemetry=False))
|
39 |
-
self.embedder = embedder
|
40 |
-
self.collection = self.chroma_client.create_collection(name="context", embedding_function=embedder.embed)
|
41 |
-
self.ids = []
|
42 |
-
|
43 |
-
def add(self, texts: list[str]):
|
44 |
-
if len(texts) == 0:
|
45 |
-
return
|
46 |
-
|
47 |
-
self.ids = [f"id{i}" for i in range(len(texts))]
|
48 |
-
self.collection.add(documents=texts, ids=self.ids)
|
49 |
-
|
50 |
-
def get_documents_ids_distances(self, search_strings: list[str], n_results: int):
|
51 |
-
n_results = min(len(self.ids), n_results)
|
52 |
-
if n_results == 0:
|
53 |
-
return [], [], []
|
54 |
-
|
55 |
-
result = self.collection.query(query_texts=search_strings, n_results=n_results, include=['documents', 'distances'])
|
56 |
-
documents = result['documents'][0]
|
57 |
-
ids = list(map(lambda x: int(x[2:]), result['ids'][0]))
|
58 |
-
distances = result['distances'][0]
|
59 |
-
return documents, ids, distances
|
60 |
-
|
61 |
-
# Get chunks by similarity
|
62 |
-
def get(self, search_strings: list[str], n_results: int) -> list[str]:
|
63 |
-
documents, _, _ = self.get_documents_ids_distances(search_strings, n_results)
|
64 |
-
return documents
|
65 |
-
|
66 |
-
# Get ids by similarity
|
67 |
-
def get_ids(self, search_strings: list[str], n_results: int) -> list[str]:
|
68 |
-
_, ids, _ = self.get_documents_ids_distances(search_strings, n_results)
|
69 |
-
return ids
|
70 |
-
|
71 |
-
# Get chunks by similarity and then sort by insertion order
|
72 |
-
def get_sorted(self, search_strings: list[str], n_results: int) -> list[str]:
|
73 |
-
documents, ids, _ = self.get_documents_ids_distances(search_strings, n_results)
|
74 |
-
return [x for _, x in sorted(zip(ids, documents))]
|
75 |
-
|
76 |
-
# Multiply distance by factor within [0, time_weight] where more recent is lower
|
77 |
-
def apply_time_weight_to_distances(self, ids: list[int], distances: list[float], time_weight: float = 1.0) -> list[float]:
|
78 |
-
if len(self.ids) <= 1:
|
79 |
-
return distances.copy()
|
80 |
-
|
81 |
-
return [distance * (1 - _id / (len(self.ids) - 1) * time_weight) for _id, distance in zip(ids, distances)]
|
82 |
-
|
83 |
-
# Get ids by similarity and then sort by insertion order
|
84 |
-
def get_ids_sorted(self, search_strings: list[str], n_results: int, n_initial: int = None, time_weight: float = 1.0) -> list[str]:
|
85 |
-
do_time_weight = time_weight > 0
|
86 |
-
if not (do_time_weight and n_initial is not None):
|
87 |
-
n_initial = n_results
|
88 |
-
elif n_initial == -1:
|
89 |
-
n_initial = len(self.ids)
|
90 |
-
|
91 |
-
if n_initial < n_results:
|
92 |
-
raise ValueError(f"n_initial {n_initial} should be >= n_results {n_results}")
|
93 |
-
|
94 |
-
_, ids, distances = self.get_documents_ids_distances(search_strings, n_initial)
|
95 |
-
if do_time_weight:
|
96 |
-
distances_w = self.apply_time_weight_to_distances(ids, distances, time_weight=time_weight)
|
97 |
-
results = zip(ids, distances, distances_w)
|
98 |
-
results = sorted(results, key=lambda x: x[2])[:n_results]
|
99 |
-
results = sorted(results, key=lambda x: x[0])
|
100 |
-
ids = [x[0] for x in results]
|
101 |
-
|
102 |
-
return sorted(ids)
|
103 |
-
|
104 |
-
def clear(self):
|
105 |
-
self.collection.delete(ids=self.ids)
|
106 |
-
self.ids = []
|
107 |
-
|
108 |
-
|
109 |
-
class SentenceTransformerEmbedder(Embedder):
|
110 |
-
def __init__(self) -> None:
|
111 |
-
self.model = SentenceTransformer("sentence-transformers/all-mpnet-base-v2")
|
112 |
-
self.embed = self.model.encode
|
113 |
-
|
114 |
-
|
115 |
-
def make_collector():
|
116 |
-
global embedder
|
117 |
-
return ChromaCollector(embedder)
|
118 |
-
|
119 |
-
|
120 |
-
def add_chunks_to_collector(chunks, collector):
|
121 |
-
collector.clear()
|
122 |
-
collector.add(chunks)
|
123 |
-
|
124 |
-
|
125 |
-
embedder = SentenceTransformerEmbedder()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Audio-AGI/AudioSep/models/CLAP/open_clip/feature_fusion.py
DELETED
@@ -1,192 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
Feature Fusion for Varible-Length Data Processing
|
3 |
-
AFF/iAFF is referred and modified from https://github.com/YimianDai/open-aff/blob/master/aff_pytorch/aff_net/fusion.py
|
4 |
-
According to the paper: Yimian Dai et al, Attentional Feature Fusion, IEEE Winter Conference on Applications of Computer Vision, WACV 2021
|
5 |
-
"""
|
6 |
-
|
7 |
-
import torch
|
8 |
-
import torch.nn as nn
|
9 |
-
|
10 |
-
|
11 |
-
class DAF(nn.Module):
|
12 |
-
"""
|
13 |
-
直接相加 DirectAddFuse
|
14 |
-
"""
|
15 |
-
|
16 |
-
def __init__(self):
|
17 |
-
super(DAF, self).__init__()
|
18 |
-
|
19 |
-
def forward(self, x, residual):
|
20 |
-
return x + residual
|
21 |
-
|
22 |
-
|
23 |
-
class iAFF(nn.Module):
|
24 |
-
"""
|
25 |
-
多特征融合 iAFF
|
26 |
-
"""
|
27 |
-
|
28 |
-
def __init__(self, channels=64, r=4, type="2D"):
|
29 |
-
super(iAFF, self).__init__()
|
30 |
-
inter_channels = int(channels // r)
|
31 |
-
|
32 |
-
if type == "1D":
|
33 |
-
# 本地注意力
|
34 |
-
self.local_att = nn.Sequential(
|
35 |
-
nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
|
36 |
-
nn.BatchNorm1d(inter_channels),
|
37 |
-
nn.ReLU(inplace=True),
|
38 |
-
nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
|
39 |
-
nn.BatchNorm1d(channels),
|
40 |
-
)
|
41 |
-
|
42 |
-
# 全局注意力
|
43 |
-
self.global_att = nn.Sequential(
|
44 |
-
nn.AdaptiveAvgPool1d(1),
|
45 |
-
nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
|
46 |
-
nn.BatchNorm1d(inter_channels),
|
47 |
-
nn.ReLU(inplace=True),
|
48 |
-
nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
|
49 |
-
nn.BatchNorm1d(channels),
|
50 |
-
)
|
51 |
-
|
52 |
-
# 第二次本地注意力
|
53 |
-
self.local_att2 = nn.Sequential(
|
54 |
-
nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
|
55 |
-
nn.BatchNorm1d(inter_channels),
|
56 |
-
nn.ReLU(inplace=True),
|
57 |
-
nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
|
58 |
-
nn.BatchNorm1d(channels),
|
59 |
-
)
|
60 |
-
# 第二次全局注意力
|
61 |
-
self.global_att2 = nn.Sequential(
|
62 |
-
nn.AdaptiveAvgPool1d(1),
|
63 |
-
nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
|
64 |
-
nn.BatchNorm1d(inter_channels),
|
65 |
-
nn.ReLU(inplace=True),
|
66 |
-
nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
|
67 |
-
nn.BatchNorm1d(channels),
|
68 |
-
)
|
69 |
-
elif type == "2D":
|
70 |
-
# 本地注意力
|
71 |
-
self.local_att = nn.Sequential(
|
72 |
-
nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
|
73 |
-
nn.BatchNorm2d(inter_channels),
|
74 |
-
nn.ReLU(inplace=True),
|
75 |
-
nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
|
76 |
-
nn.BatchNorm2d(channels),
|
77 |
-
)
|
78 |
-
|
79 |
-
# 全局注意力
|
80 |
-
self.global_att = nn.Sequential(
|
81 |
-
nn.AdaptiveAvgPool2d(1),
|
82 |
-
nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
|
83 |
-
nn.BatchNorm2d(inter_channels),
|
84 |
-
nn.ReLU(inplace=True),
|
85 |
-
nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
|
86 |
-
nn.BatchNorm2d(channels),
|
87 |
-
)
|
88 |
-
|
89 |
-
# 第二次本地注意力
|
90 |
-
self.local_att2 = nn.Sequential(
|
91 |
-
nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
|
92 |
-
nn.BatchNorm2d(inter_channels),
|
93 |
-
nn.ReLU(inplace=True),
|
94 |
-
nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
|
95 |
-
nn.BatchNorm2d(channels),
|
96 |
-
)
|
97 |
-
# 第二次全局注意力
|
98 |
-
self.global_att2 = nn.Sequential(
|
99 |
-
nn.AdaptiveAvgPool2d(1),
|
100 |
-
nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
|
101 |
-
nn.BatchNorm2d(inter_channels),
|
102 |
-
nn.ReLU(inplace=True),
|
103 |
-
nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
|
104 |
-
nn.BatchNorm2d(channels),
|
105 |
-
)
|
106 |
-
else:
|
107 |
-
raise f"the type is not supported"
|
108 |
-
|
109 |
-
self.sigmoid = nn.Sigmoid()
|
110 |
-
|
111 |
-
def forward(self, x, residual):
|
112 |
-
flag = False
|
113 |
-
xa = x + residual
|
114 |
-
if xa.size(0) == 1:
|
115 |
-
xa = torch.cat([xa, xa], dim=0)
|
116 |
-
flag = True
|
117 |
-
xl = self.local_att(xa)
|
118 |
-
xg = self.global_att(xa)
|
119 |
-
xlg = xl + xg
|
120 |
-
wei = self.sigmoid(xlg)
|
121 |
-
xi = x * wei + residual * (1 - wei)
|
122 |
-
|
123 |
-
xl2 = self.local_att2(xi)
|
124 |
-
xg2 = self.global_att(xi)
|
125 |
-
xlg2 = xl2 + xg2
|
126 |
-
wei2 = self.sigmoid(xlg2)
|
127 |
-
xo = x * wei2 + residual * (1 - wei2)
|
128 |
-
if flag:
|
129 |
-
xo = xo[0].unsqueeze(0)
|
130 |
-
return xo
|
131 |
-
|
132 |
-
|
133 |
-
class AFF(nn.Module):
|
134 |
-
"""
|
135 |
-
多特征融合 AFF
|
136 |
-
"""
|
137 |
-
|
138 |
-
def __init__(self, channels=64, r=4, type="2D"):
|
139 |
-
super(AFF, self).__init__()
|
140 |
-
inter_channels = int(channels // r)
|
141 |
-
|
142 |
-
if type == "1D":
|
143 |
-
self.local_att = nn.Sequential(
|
144 |
-
nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
|
145 |
-
nn.BatchNorm1d(inter_channels),
|
146 |
-
nn.ReLU(inplace=True),
|
147 |
-
nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
|
148 |
-
nn.BatchNorm1d(channels),
|
149 |
-
)
|
150 |
-
self.global_att = nn.Sequential(
|
151 |
-
nn.AdaptiveAvgPool1d(1),
|
152 |
-
nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
|
153 |
-
nn.BatchNorm1d(inter_channels),
|
154 |
-
nn.ReLU(inplace=True),
|
155 |
-
nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
|
156 |
-
nn.BatchNorm1d(channels),
|
157 |
-
)
|
158 |
-
elif type == "2D":
|
159 |
-
self.local_att = nn.Sequential(
|
160 |
-
nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
|
161 |
-
nn.BatchNorm2d(inter_channels),
|
162 |
-
nn.ReLU(inplace=True),
|
163 |
-
nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
|
164 |
-
nn.BatchNorm2d(channels),
|
165 |
-
)
|
166 |
-
self.global_att = nn.Sequential(
|
167 |
-
nn.AdaptiveAvgPool2d(1),
|
168 |
-
nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
|
169 |
-
nn.BatchNorm2d(inter_channels),
|
170 |
-
nn.ReLU(inplace=True),
|
171 |
-
nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
|
172 |
-
nn.BatchNorm2d(channels),
|
173 |
-
)
|
174 |
-
else:
|
175 |
-
raise f"the type is not supported."
|
176 |
-
|
177 |
-
self.sigmoid = nn.Sigmoid()
|
178 |
-
|
179 |
-
def forward(self, x, residual):
|
180 |
-
flag = False
|
181 |
-
xa = x + residual
|
182 |
-
if xa.size(0) == 1:
|
183 |
-
xa = torch.cat([xa, xa], dim=0)
|
184 |
-
flag = True
|
185 |
-
xl = self.local_att(xa)
|
186 |
-
xg = self.global_att(xa)
|
187 |
-
xlg = xl + xg
|
188 |
-
wei = self.sigmoid(xlg)
|
189 |
-
xo = 2 * x * wei + 2 * residual * (1 - wei)
|
190 |
-
if flag:
|
191 |
-
xo = xo[0].unsqueeze(0)
|
192 |
-
return xo
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/controlnet_model.py
DELETED
@@ -1,56 +0,0 @@
|
|
1 |
-
import cv2
|
2 |
-
import torch
|
3 |
-
import numpy as np
|
4 |
-
from PIL import Image
|
5 |
-
from diffusers import (
|
6 |
-
StableDiffusionControlNetPipeline,
|
7 |
-
ControlNetModel,
|
8 |
-
UniPCMultistepScheduler,
|
9 |
-
)
|
10 |
-
|
11 |
-
|
12 |
-
class TextToImage:
|
13 |
-
def __init__(self, device):
|
14 |
-
self.device = device
|
15 |
-
self.model = self.initialize_model()
|
16 |
-
|
17 |
-
def initialize_model(self):
|
18 |
-
controlnet = ControlNetModel.from_pretrained(
|
19 |
-
"fusing/stable-diffusion-v1-5-controlnet-canny",
|
20 |
-
torch_dtype=torch.float16,
|
21 |
-
)
|
22 |
-
pipeline = StableDiffusionControlNetPipeline.from_pretrained(
|
23 |
-
"runwayml/stable-diffusion-v1-5",
|
24 |
-
controlnet=controlnet,
|
25 |
-
safety_checker=None,
|
26 |
-
torch_dtype=torch.float16,
|
27 |
-
)
|
28 |
-
pipeline.scheduler = UniPCMultistepScheduler.from_config(
|
29 |
-
pipeline.scheduler.config
|
30 |
-
)
|
31 |
-
pipeline.enable_model_cpu_offload()
|
32 |
-
pipeline.to(self.device)
|
33 |
-
return pipeline
|
34 |
-
|
35 |
-
@staticmethod
|
36 |
-
def preprocess_image(image):
|
37 |
-
image = np.array(image)
|
38 |
-
low_threshold = 100
|
39 |
-
high_threshold = 200
|
40 |
-
image = cv2.Canny(image, low_threshold, high_threshold)
|
41 |
-
image = np.stack([image, image, image], axis=2)
|
42 |
-
image = Image.fromarray(image)
|
43 |
-
return image
|
44 |
-
|
45 |
-
def text_to_image(self, text, image):
|
46 |
-
print('\033[1;35m' + '*' * 100 + '\033[0m')
|
47 |
-
print('\nStep5, Text to Image:')
|
48 |
-
image = self.preprocess_image(image)
|
49 |
-
generated_image = self.model(text, image, num_inference_steps=20).images[0]
|
50 |
-
print("Generated image has been svaed.")
|
51 |
-
print('\033[1;35m' + '*' * 100 + '\033[0m')
|
52 |
-
return generated_image
|
53 |
-
|
54 |
-
def text_to_image_debug(self, text, image):
|
55 |
-
print("text_to_image_debug")
|
56 |
-
return image
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/datasets/prepare_cocofied_lvis.py
DELETED
@@ -1,176 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python3
|
2 |
-
# -*- coding: utf-8 -*-
|
3 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
4 |
-
|
5 |
-
import copy
|
6 |
-
import json
|
7 |
-
import os
|
8 |
-
from collections import defaultdict
|
9 |
-
|
10 |
-
# This mapping is extracted from the official LVIS mapping:
|
11 |
-
# https://github.com/lvis-dataset/lvis-api/blob/master/data/coco_to_synset.json
|
12 |
-
COCO_SYNSET_CATEGORIES = [
|
13 |
-
{"synset": "person.n.01", "coco_cat_id": 1},
|
14 |
-
{"synset": "bicycle.n.01", "coco_cat_id": 2},
|
15 |
-
{"synset": "car.n.01", "coco_cat_id": 3},
|
16 |
-
{"synset": "motorcycle.n.01", "coco_cat_id": 4},
|
17 |
-
{"synset": "airplane.n.01", "coco_cat_id": 5},
|
18 |
-
{"synset": "bus.n.01", "coco_cat_id": 6},
|
19 |
-
{"synset": "train.n.01", "coco_cat_id": 7},
|
20 |
-
{"synset": "truck.n.01", "coco_cat_id": 8},
|
21 |
-
{"synset": "boat.n.01", "coco_cat_id": 9},
|
22 |
-
{"synset": "traffic_light.n.01", "coco_cat_id": 10},
|
23 |
-
{"synset": "fireplug.n.01", "coco_cat_id": 11},
|
24 |
-
{"synset": "stop_sign.n.01", "coco_cat_id": 13},
|
25 |
-
{"synset": "parking_meter.n.01", "coco_cat_id": 14},
|
26 |
-
{"synset": "bench.n.01", "coco_cat_id": 15},
|
27 |
-
{"synset": "bird.n.01", "coco_cat_id": 16},
|
28 |
-
{"synset": "cat.n.01", "coco_cat_id": 17},
|
29 |
-
{"synset": "dog.n.01", "coco_cat_id": 18},
|
30 |
-
{"synset": "horse.n.01", "coco_cat_id": 19},
|
31 |
-
{"synset": "sheep.n.01", "coco_cat_id": 20},
|
32 |
-
{"synset": "beef.n.01", "coco_cat_id": 21},
|
33 |
-
{"synset": "elephant.n.01", "coco_cat_id": 22},
|
34 |
-
{"synset": "bear.n.01", "coco_cat_id": 23},
|
35 |
-
{"synset": "zebra.n.01", "coco_cat_id": 24},
|
36 |
-
{"synset": "giraffe.n.01", "coco_cat_id": 25},
|
37 |
-
{"synset": "backpack.n.01", "coco_cat_id": 27},
|
38 |
-
{"synset": "umbrella.n.01", "coco_cat_id": 28},
|
39 |
-
{"synset": "bag.n.04", "coco_cat_id": 31},
|
40 |
-
{"synset": "necktie.n.01", "coco_cat_id": 32},
|
41 |
-
{"synset": "bag.n.06", "coco_cat_id": 33},
|
42 |
-
{"synset": "frisbee.n.01", "coco_cat_id": 34},
|
43 |
-
{"synset": "ski.n.01", "coco_cat_id": 35},
|
44 |
-
{"synset": "snowboard.n.01", "coco_cat_id": 36},
|
45 |
-
{"synset": "ball.n.06", "coco_cat_id": 37},
|
46 |
-
{"synset": "kite.n.03", "coco_cat_id": 38},
|
47 |
-
{"synset": "baseball_bat.n.01", "coco_cat_id": 39},
|
48 |
-
{"synset": "baseball_glove.n.01", "coco_cat_id": 40},
|
49 |
-
{"synset": "skateboard.n.01", "coco_cat_id": 41},
|
50 |
-
{"synset": "surfboard.n.01", "coco_cat_id": 42},
|
51 |
-
{"synset": "tennis_racket.n.01", "coco_cat_id": 43},
|
52 |
-
{"synset": "bottle.n.01", "coco_cat_id": 44},
|
53 |
-
{"synset": "wineglass.n.01", "coco_cat_id": 46},
|
54 |
-
{"synset": "cup.n.01", "coco_cat_id": 47},
|
55 |
-
{"synset": "fork.n.01", "coco_cat_id": 48},
|
56 |
-
{"synset": "knife.n.01", "coco_cat_id": 49},
|
57 |
-
{"synset": "spoon.n.01", "coco_cat_id": 50},
|
58 |
-
{"synset": "bowl.n.03", "coco_cat_id": 51},
|
59 |
-
{"synset": "banana.n.02", "coco_cat_id": 52},
|
60 |
-
{"synset": "apple.n.01", "coco_cat_id": 53},
|
61 |
-
{"synset": "sandwich.n.01", "coco_cat_id": 54},
|
62 |
-
{"synset": "orange.n.01", "coco_cat_id": 55},
|
63 |
-
{"synset": "broccoli.n.01", "coco_cat_id": 56},
|
64 |
-
{"synset": "carrot.n.01", "coco_cat_id": 57},
|
65 |
-
{"synset": "frank.n.02", "coco_cat_id": 58},
|
66 |
-
{"synset": "pizza.n.01", "coco_cat_id": 59},
|
67 |
-
{"synset": "doughnut.n.02", "coco_cat_id": 60},
|
68 |
-
{"synset": "cake.n.03", "coco_cat_id": 61},
|
69 |
-
{"synset": "chair.n.01", "coco_cat_id": 62},
|
70 |
-
{"synset": "sofa.n.01", "coco_cat_id": 63},
|
71 |
-
{"synset": "pot.n.04", "coco_cat_id": 64},
|
72 |
-
{"synset": "bed.n.01", "coco_cat_id": 65},
|
73 |
-
{"synset": "dining_table.n.01", "coco_cat_id": 67},
|
74 |
-
{"synset": "toilet.n.02", "coco_cat_id": 70},
|
75 |
-
{"synset": "television_receiver.n.01", "coco_cat_id": 72},
|
76 |
-
{"synset": "laptop.n.01", "coco_cat_id": 73},
|
77 |
-
{"synset": "mouse.n.04", "coco_cat_id": 74},
|
78 |
-
{"synset": "remote_control.n.01", "coco_cat_id": 75},
|
79 |
-
{"synset": "computer_keyboard.n.01", "coco_cat_id": 76},
|
80 |
-
{"synset": "cellular_telephone.n.01", "coco_cat_id": 77},
|
81 |
-
{"synset": "microwave.n.02", "coco_cat_id": 78},
|
82 |
-
{"synset": "oven.n.01", "coco_cat_id": 79},
|
83 |
-
{"synset": "toaster.n.02", "coco_cat_id": 80},
|
84 |
-
{"synset": "sink.n.01", "coco_cat_id": 81},
|
85 |
-
{"synset": "electric_refrigerator.n.01", "coco_cat_id": 82},
|
86 |
-
{"synset": "book.n.01", "coco_cat_id": 84},
|
87 |
-
{"synset": "clock.n.01", "coco_cat_id": 85},
|
88 |
-
{"synset": "vase.n.01", "coco_cat_id": 86},
|
89 |
-
{"synset": "scissors.n.01", "coco_cat_id": 87},
|
90 |
-
{"synset": "teddy.n.01", "coco_cat_id": 88},
|
91 |
-
{"synset": "hand_blower.n.01", "coco_cat_id": 89},
|
92 |
-
{"synset": "toothbrush.n.01", "coco_cat_id": 90},
|
93 |
-
]
|
94 |
-
|
95 |
-
|
96 |
-
def cocofy_lvis(input_filename, output_filename):
|
97 |
-
"""
|
98 |
-
Filter LVIS instance segmentation annotations to remove all categories that are not included in
|
99 |
-
COCO. The new json files can be used to evaluate COCO AP using `lvis-api`. The category ids in
|
100 |
-
the output json are the incontiguous COCO dataset ids.
|
101 |
-
|
102 |
-
Args:
|
103 |
-
input_filename (str): path to the LVIS json file.
|
104 |
-
output_filename (str): path to the COCOfied json file.
|
105 |
-
"""
|
106 |
-
|
107 |
-
with open(input_filename, "r") as f:
|
108 |
-
lvis_json = json.load(f)
|
109 |
-
|
110 |
-
lvis_annos = lvis_json.pop("annotations")
|
111 |
-
cocofied_lvis = copy.deepcopy(lvis_json)
|
112 |
-
lvis_json["annotations"] = lvis_annos
|
113 |
-
|
114 |
-
# Mapping from lvis cat id to coco cat id via synset
|
115 |
-
lvis_cat_id_to_synset = {cat["id"]: cat["synset"] for cat in lvis_json["categories"]}
|
116 |
-
synset_to_coco_cat_id = {x["synset"]: x["coco_cat_id"] for x in COCO_SYNSET_CATEGORIES}
|
117 |
-
# Synsets that we will keep in the dataset
|
118 |
-
synsets_to_keep = set(synset_to_coco_cat_id.keys())
|
119 |
-
coco_cat_id_with_instances = defaultdict(int)
|
120 |
-
|
121 |
-
new_annos = []
|
122 |
-
ann_id = 1
|
123 |
-
for ann in lvis_annos:
|
124 |
-
lvis_cat_id = ann["category_id"]
|
125 |
-
synset = lvis_cat_id_to_synset[lvis_cat_id]
|
126 |
-
if synset not in synsets_to_keep:
|
127 |
-
continue
|
128 |
-
coco_cat_id = synset_to_coco_cat_id[synset]
|
129 |
-
new_ann = copy.deepcopy(ann)
|
130 |
-
new_ann["category_id"] = coco_cat_id
|
131 |
-
new_ann["id"] = ann_id
|
132 |
-
ann_id += 1
|
133 |
-
new_annos.append(new_ann)
|
134 |
-
coco_cat_id_with_instances[coco_cat_id] += 1
|
135 |
-
cocofied_lvis["annotations"] = new_annos
|
136 |
-
|
137 |
-
for image in cocofied_lvis["images"]:
|
138 |
-
for key in ["not_exhaustive_category_ids", "neg_category_ids"]:
|
139 |
-
new_category_list = []
|
140 |
-
for lvis_cat_id in image[key]:
|
141 |
-
synset = lvis_cat_id_to_synset[lvis_cat_id]
|
142 |
-
if synset not in synsets_to_keep:
|
143 |
-
continue
|
144 |
-
coco_cat_id = synset_to_coco_cat_id[synset]
|
145 |
-
new_category_list.append(coco_cat_id)
|
146 |
-
coco_cat_id_with_instances[coco_cat_id] += 1
|
147 |
-
image[key] = new_category_list
|
148 |
-
|
149 |
-
coco_cat_id_with_instances = set(coco_cat_id_with_instances.keys())
|
150 |
-
|
151 |
-
new_categories = []
|
152 |
-
for cat in lvis_json["categories"]:
|
153 |
-
synset = cat["synset"]
|
154 |
-
if synset not in synsets_to_keep:
|
155 |
-
continue
|
156 |
-
coco_cat_id = synset_to_coco_cat_id[synset]
|
157 |
-
if coco_cat_id not in coco_cat_id_with_instances:
|
158 |
-
continue
|
159 |
-
new_cat = copy.deepcopy(cat)
|
160 |
-
new_cat["id"] = coco_cat_id
|
161 |
-
new_categories.append(new_cat)
|
162 |
-
cocofied_lvis["categories"] = new_categories
|
163 |
-
|
164 |
-
with open(output_filename, "w") as f:
|
165 |
-
json.dump(cocofied_lvis, f)
|
166 |
-
print("{} is COCOfied and stored in {}.".format(input_filename, output_filename))
|
167 |
-
|
168 |
-
|
169 |
-
if __name__ == "__main__":
|
170 |
-
dataset_dir = os.path.join(os.getenv("DETECTRON2_DATASETS", "datasets"), "lvis")
|
171 |
-
for s in ["lvis_v0.5_train", "lvis_v0.5_val"]:
|
172 |
-
print("Start COCOfing {}.".format(s))
|
173 |
-
cocofy_lvis(
|
174 |
-
os.path.join(dataset_dir, "{}.json".format(s)),
|
175 |
-
os.path.join(dataset_dir, "{}_cocofied.json".format(s)),
|
176 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Banbri/zcvzcv/src/app/engine/forbidden.ts
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
|
2 |
-
// the NSFW has to contain bad words, but doing so might get the code flagged
|
3 |
-
// or attract unwanted attention, so we hash them
|
4 |
-
export const forbidden = [
|
5 |
-
// TODO implement this
|
6 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bart92/RVC_HF/demucs/compressed.py
DELETED
@@ -1,115 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
import json
|
8 |
-
from fractions import Fraction
|
9 |
-
from concurrent import futures
|
10 |
-
|
11 |
-
import musdb
|
12 |
-
from torch import distributed
|
13 |
-
|
14 |
-
from .audio import AudioFile
|
15 |
-
|
16 |
-
|
17 |
-
def get_musdb_tracks(root, *args, **kwargs):
|
18 |
-
mus = musdb.DB(root, *args, **kwargs)
|
19 |
-
return {track.name: track.path for track in mus}
|
20 |
-
|
21 |
-
|
22 |
-
class StemsSet:
|
23 |
-
def __init__(self, tracks, metadata, duration=None, stride=1,
|
24 |
-
samplerate=44100, channels=2, streams=slice(None)):
|
25 |
-
|
26 |
-
self.metadata = []
|
27 |
-
for name, path in tracks.items():
|
28 |
-
meta = dict(metadata[name])
|
29 |
-
meta["path"] = path
|
30 |
-
meta["name"] = name
|
31 |
-
self.metadata.append(meta)
|
32 |
-
if duration is not None and meta["duration"] < duration:
|
33 |
-
raise ValueError(f"Track {name} duration is too small {meta['duration']}")
|
34 |
-
self.metadata.sort(key=lambda x: x["name"])
|
35 |
-
self.duration = duration
|
36 |
-
self.stride = stride
|
37 |
-
self.channels = channels
|
38 |
-
self.samplerate = samplerate
|
39 |
-
self.streams = streams
|
40 |
-
|
41 |
-
def __len__(self):
|
42 |
-
return sum(self._examples_count(m) for m in self.metadata)
|
43 |
-
|
44 |
-
def _examples_count(self, meta):
|
45 |
-
if self.duration is None:
|
46 |
-
return 1
|
47 |
-
else:
|
48 |
-
return int((meta["duration"] - self.duration) // self.stride + 1)
|
49 |
-
|
50 |
-
def track_metadata(self, index):
|
51 |
-
for meta in self.metadata:
|
52 |
-
examples = self._examples_count(meta)
|
53 |
-
if index >= examples:
|
54 |
-
index -= examples
|
55 |
-
continue
|
56 |
-
return meta
|
57 |
-
|
58 |
-
def __getitem__(self, index):
|
59 |
-
for meta in self.metadata:
|
60 |
-
examples = self._examples_count(meta)
|
61 |
-
if index >= examples:
|
62 |
-
index -= examples
|
63 |
-
continue
|
64 |
-
streams = AudioFile(meta["path"]).read(seek_time=index * self.stride,
|
65 |
-
duration=self.duration,
|
66 |
-
channels=self.channels,
|
67 |
-
samplerate=self.samplerate,
|
68 |
-
streams=self.streams)
|
69 |
-
return (streams - meta["mean"]) / meta["std"]
|
70 |
-
|
71 |
-
|
72 |
-
def _get_track_metadata(path):
|
73 |
-
# use mono at 44kHz as reference. For any other settings data won't be perfectly
|
74 |
-
# normalized but it should be good enough.
|
75 |
-
audio = AudioFile(path)
|
76 |
-
mix = audio.read(streams=0, channels=1, samplerate=44100)
|
77 |
-
return {"duration": audio.duration, "std": mix.std().item(), "mean": mix.mean().item()}
|
78 |
-
|
79 |
-
|
80 |
-
def _build_metadata(tracks, workers=10):
|
81 |
-
pendings = []
|
82 |
-
with futures.ProcessPoolExecutor(workers) as pool:
|
83 |
-
for name, path in tracks.items():
|
84 |
-
pendings.append((name, pool.submit(_get_track_metadata, path)))
|
85 |
-
return {name: p.result() for name, p in pendings}
|
86 |
-
|
87 |
-
|
88 |
-
def _build_musdb_metadata(path, musdb, workers):
|
89 |
-
tracks = get_musdb_tracks(musdb)
|
90 |
-
metadata = _build_metadata(tracks, workers)
|
91 |
-
path.parent.mkdir(exist_ok=True, parents=True)
|
92 |
-
json.dump(metadata, open(path, "w"))
|
93 |
-
|
94 |
-
|
95 |
-
def get_compressed_datasets(args, samples):
|
96 |
-
metadata_file = args.metadata / "musdb.json"
|
97 |
-
if not metadata_file.is_file() and args.rank == 0:
|
98 |
-
_build_musdb_metadata(metadata_file, args.musdb, args.workers)
|
99 |
-
if args.world_size > 1:
|
100 |
-
distributed.barrier()
|
101 |
-
metadata = json.load(open(metadata_file))
|
102 |
-
duration = Fraction(samples, args.samplerate)
|
103 |
-
stride = Fraction(args.data_stride, args.samplerate)
|
104 |
-
train_set = StemsSet(get_musdb_tracks(args.musdb, subsets=["train"], split="train"),
|
105 |
-
metadata,
|
106 |
-
duration=duration,
|
107 |
-
stride=stride,
|
108 |
-
streams=slice(1, None),
|
109 |
-
samplerate=args.samplerate,
|
110 |
-
channels=args.audio_channels)
|
111 |
-
valid_set = StemsSet(get_musdb_tracks(args.musdb, subsets=["train"], split="valid"),
|
112 |
-
metadata,
|
113 |
-
samplerate=args.samplerate,
|
114 |
-
channels=args.audio_channels)
|
115 |
-
return train_set, valid_set
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Autobs Simulador Indonesia Apk ltima Versin.md
DELETED
@@ -1,71 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Grupo de Fuerzas Especiales 2 Mod APK versión antigua: Una guía para los jugadores</h1>
|
3 |
-
<p>Si eres un fan de los juegos de disparos en primera persona, es posible que hayas oído hablar de Special Forces Group 2. Este es un popular juego multijugador en línea que te permite experimentar intensas batallas con diferentes modos, armas, mapas y personajes. Pero ¿sabías que también se puede descargar la versión antigua apk mod de este juego y disfrutar de algunas características y ventajas adicionales? En este artículo, le diremos todo lo que necesita saber sobre Special Forces Group 2 mod apk versión antigua, incluyendo lo que es, por qué debe descargarlo, cómo descargarlo, y cómo usarlo. ¡Vamos a empezar! </p>
|
4 |
-
<h2>autobús simulador indonesia apk última versión</h2><br /><p><b><b>Download</b> ✅ <a href="https://bltlly.com/2v6Jxg">https://bltlly.com/2v6Jxg</a></b></p><br /><br />
|
5 |
-
<h2>¿Qué es el Grupo de Fuerzas Especiales 2?</h2>
|
6 |
-
<h3>Una breve introducción al juego</h3>
|
7 |
-
<p>Special Forces Group 2 es un juego de acción en 3D desarrollado por ForgeGames. Fue lanzado en 2016 para dispositivos Android e iOS. El juego tiene más de 100 millones de descargas en Google Play Store y tiene una calificación de 4.5 de 5 estrellas. El juego está inspirado en la famosa serie Counter-Strike y ofrece una jugabilidad y gráficos similares. </p>
|
8 |
-
<h3>Las características y la jugabilidad del Grupo de Fuerzas Especiales 2</h3>
|
9 |
-
<p>El juego tiene muchas características que lo hacen divertido y adictivo. Algunos de ellos son:</p>
|
10 |
-
<ul>
|
11 |
-
<li>9 modos de juego: Clásico, Resurrección, Capturar la bandera, Modo Zombie, Modo Bomba, Cuchillos, Deathmatch, Carrera de Armas y Francotirador.</li>
|
12 |
-
<li>30 mapas: Puede elegir entre diferentes lugares como desierto, ciudad, nieve, bosque, etc.</li>
|
13 |
-
<li>25 armas: Puede equiparse con varias armas como pistolas, rifles, escopetas, francotiradores, granadas, etc.</li>
|
14 |
-
<li>8 caracteres por equipo: puede personalizar su apariencia y elegir entre diferentes pieles y trajes. </li>
|
15 |
-
<li>Modo multijugador: Puedes jugar online con tus amigos u otros jugadores de todo el mundo. </li>
|
16 |
-
<li>Modo offline: También puedes jugar offline con bots o practicar tus habilidades. </li>
|
17 |
-
<li>Chat de voz: Puedes comunicarte con tus compañeros de equipo usando chat de voz. </li>
|
18 |
-
|
19 |
-
</ul>
|
20 |
-
<p>El modo de juego de Special Forces Group 2 es simple y directo. Usted tiene que unirse a un equipo (ya sea terroristas o antiterroristas) y completar los objetivos de cada modo. Por ejemplo, en el modo Clásico, tienes que eliminar a todos los enemigos o desactivar la bomba. En el modo Zombie, tienes que sobrevivir a los ataques zombi o infectar a otros jugadores. En el modo Capturar la bandera, tienes que capturar la bandera enemiga y llevarla de vuelta a tu base. Y así sucesivamente. </p>
|
21 |
-
<h2>¿Por qué descargar la versión antigua mod apk de Special Forces Group 2?</h2>
|
22 |
-
<h3>Los beneficios de usar la versión antigua mod apk</h3>
|
23 |
-
<p>La versión antigua apk mod de Special Forces Group 2 es una versión modificada del juego original que tiene algunas características y ventajas adicionales. Algunos de ellos son:</p>
|
24 |
-
<ul>
|
25 |
-
<li>Dinero ilimitado: Puede comprar cualquier arma o artículo sin preocuparse por el costo. </li>
|
26 |
-
<li>Pieles y trajes desbloqueados: Puede acceder a todas las pieles y trajes de forma gratuita. </li>
|
27 |
-
<li>No hay anuncios: Puedes disfrutar del juego sin anuncios molestos. </li>
|
28 |
-
<li>No se requiere raíz: No es necesario rootear el dispositivo para instalar la versión antigua mod apk. </li>
|
29 |
-
</ul>
|
30 |
-
<h3>Los inconvenientes y riesgos de usar la versión antigua apk mod</h3>
|
31 |
-
<p>Sin embargo, el uso de la versión antigua apk mod también tiene algunos inconvenientes y riesgos que usted debe ser consciente de. Algunos de ellos son:</p <p>- Problemas de compatibilidad: La versión antigua mod apk puede no funcionar correctamente en algunos dispositivos o con algunas actualizaciones. </p>
|
32 |
-
<p>- Riesgo de prohibición: La versión antigua mod apk puede ser detectado por los desarrolladores de juegos y dar lugar a una prohibición del modo en línea. </p>
|
33 |
-
<p></p>
|
34 |
-
<p>- Riesgo de virus: La versión antigua apk mod puede contener malware o spyware que puede dañar su dispositivo o robar sus datos. </p>
|
35 |
-
<p>- Cuestiones éticas: La versión antigua apk mod puede darle una ventaja injusta sobre otros jugadores y arruinar el equilibrio y la diversión del juego. </p>
|
36 |
-
|
37 |
-
<h2>Cómo descargar e instalar la versión antigua mod apk de Special Forces Group 2?</h2>
|
38 |
-
<h3>Los pasos para descargar e instalar la versión antigua apk mod</h3>
|
39 |
-
<p>Si todavía desea probar la versión antigua apk mod de Special Forces Group 2, aquí están los pasos para descargarlo e instalarlo:</p>
|
40 |
-
<ol>
|
41 |
-
<li>Ir a un sitio web de confianza que proporciona la versión antigua apk mod de Special Forces Group 2. Por ejemplo, puede visitar [este enlace] para descargar la versión antigua apk mod 4.21 del juego. </li>
|
42 |
-
<li>Descargar el archivo apk mod y el archivo obb a su dispositivo. Asegúrese de que tiene suficiente espacio de almacenamiento y una conexión a Internet estable. </li>
|
43 |
-
<li>Habilite la instalación de aplicaciones de fuentes desconocidas en su dispositivo. Para hacer esto, vaya a Configuración > Seguridad > Fuentes desconocidas y conéctelo. </li>
|
44 |
-
<li>Busque los archivos descargados en su dispositivo e instale el archivo apk mod. No lo abra todavía. </li>
|
45 |
-
<li>Extraiga el archivo obb usando una aplicación de administrador de archivos y copie la carpeta llamada "com.ForgeGames.SpecialForcesGroup2" al directorio Android/obb en su dispositivo. </li>
|
46 |
-
<li> Iniciar el juego y disfrutar de la versión antigua apk mod de Grupo de Fuerzas Especiales 2.</li>
|
47 |
-
</ol>
|
48 |
-
<h3>Los consejos y trucos para disfrutar de la versión antigua apk mod</h3>
|
49 |
-
<p>Aquí hay algunos consejos y trucos para disfrutar de la versión antigua apk mod de Special Forces Group 2:</p>
|
50 |
-
<ul>
|
51 |
-
<li>Usa diferentes armas y modos para explorar el juego y divertirte. </li>
|
52 |
-
<li>Juega con tus amigos o únete a un clan para cooperar y competir con otros jugadores. </li>
|
53 |
-
<li>Ajuste la configuración de los gráficos y los controles de acuerdo con su preferencia y el rendimiento del dispositivo. </li>
|
54 |
-
<li>Sé respetuoso y amigable con otros jugadores y evita hacer trampa o abusar del juego. </li>
|
55 |
-
<li>Actualizar el juego con regularidad para obtener nuevas características y correcciones de errores. </li>
|
56 |
-
</ul>
|
57 |
-
<h2>Conclusión</h2>
|
58 |
-
|
59 |
-
<h2>Preguntas frecuentes</h2>
|
60 |
-
<h4>¿Qué es el Grupo de Fuerzas Especiales 2?</h4>
|
61 |
-
<p>Special Forces Group 2 es un popular juego multijugador en primera persona desarrollado por ForgeGames. Fue lanzado en 2016 para dispositivos Android e iOS. </p>
|
62 |
-
<h4>¿Qué es el grupo de fuerzas especiales 2 mod apk versión antigua? </h4>
|
63 |
-
<p>Grupo de Fuerzas Especiales 2 mod apk versión antigua es una versión modificada del juego original que tiene algunas características y ventajas adicionales como dinero ilimitado, pieles desbloqueadas, sin anuncios, etc.</p>
|
64 |
-
<h4> ¿Cómo descargar el grupo de fuerzas especiales 2 mod apk versión antigua? </h4>
|
65 |
-
<p>Puede descargar Grupo de Fuerzas Especiales 2 mod apk versión antigua de un sitio web de confianza que lo proporciona. También necesita descargar el archivo obb y seguir algunos pasos para instalarlo en su dispositivo. </p>
|
66 |
-
<h4> ¿Es seguro el grupo de fuerzas especiales 2 mod apk versión antigua? </h4>
|
67 |
-
<p>No, Grupo de Fuerzas Especiales 2 mod apk versión antigua no es seguro. Puede tener problemas de compatibilidad, riesgo de prohibición, riesgo de virus, cuestiones éticas, etc. Debe usarlo bajo su propio riesgo y discreción. </p>
|
68 |
-
<h4> ¿Cómo disfrutar de Special Forces Group 2 mod apk versión antigua? </h4>
|
69 |
-
<p>Puedes disfrutar de Special Forces Group 2 mod apk versión antigua mediante el uso de diferentes armas y modos, jugando con tus amigos o unirse a un clan, ajustar la configuración de gráficos y controles, ser respetuoso y amigable con otros jugadores, y actualizar el juego regularmente. </p> 64aa2da5cf<br />
|
70 |
-
<br />
|
71 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/ansi.py
DELETED
@@ -1,240 +0,0 @@
|
|
1 |
-
import re
|
2 |
-
import sys
|
3 |
-
from contextlib import suppress
|
4 |
-
from typing import Iterable, NamedTuple, Optional
|
5 |
-
|
6 |
-
from .color import Color
|
7 |
-
from .style import Style
|
8 |
-
from .text import Text
|
9 |
-
|
10 |
-
re_ansi = re.compile(
|
11 |
-
r"""
|
12 |
-
(?:\x1b\](.*?)\x1b\\)|
|
13 |
-
(?:\x1b([(@-Z\\-_]|\[[0-?]*[ -/]*[@-~]))
|
14 |
-
""",
|
15 |
-
re.VERBOSE,
|
16 |
-
)
|
17 |
-
|
18 |
-
|
19 |
-
class _AnsiToken(NamedTuple):
|
20 |
-
"""Result of ansi tokenized string."""
|
21 |
-
|
22 |
-
plain: str = ""
|
23 |
-
sgr: Optional[str] = ""
|
24 |
-
osc: Optional[str] = ""
|
25 |
-
|
26 |
-
|
27 |
-
def _ansi_tokenize(ansi_text: str) -> Iterable[_AnsiToken]:
|
28 |
-
"""Tokenize a string in to plain text and ANSI codes.
|
29 |
-
|
30 |
-
Args:
|
31 |
-
ansi_text (str): A String containing ANSI codes.
|
32 |
-
|
33 |
-
Yields:
|
34 |
-
AnsiToken: A named tuple of (plain, sgr, osc)
|
35 |
-
"""
|
36 |
-
|
37 |
-
position = 0
|
38 |
-
sgr: Optional[str]
|
39 |
-
osc: Optional[str]
|
40 |
-
for match in re_ansi.finditer(ansi_text):
|
41 |
-
start, end = match.span(0)
|
42 |
-
osc, sgr = match.groups()
|
43 |
-
if start > position:
|
44 |
-
yield _AnsiToken(ansi_text[position:start])
|
45 |
-
if sgr:
|
46 |
-
if sgr == "(":
|
47 |
-
position = end + 1
|
48 |
-
continue
|
49 |
-
if sgr.endswith("m"):
|
50 |
-
yield _AnsiToken("", sgr[1:-1], osc)
|
51 |
-
else:
|
52 |
-
yield _AnsiToken("", sgr, osc)
|
53 |
-
position = end
|
54 |
-
if position < len(ansi_text):
|
55 |
-
yield _AnsiToken(ansi_text[position:])
|
56 |
-
|
57 |
-
|
58 |
-
SGR_STYLE_MAP = {
|
59 |
-
1: "bold",
|
60 |
-
2: "dim",
|
61 |
-
3: "italic",
|
62 |
-
4: "underline",
|
63 |
-
5: "blink",
|
64 |
-
6: "blink2",
|
65 |
-
7: "reverse",
|
66 |
-
8: "conceal",
|
67 |
-
9: "strike",
|
68 |
-
21: "underline2",
|
69 |
-
22: "not dim not bold",
|
70 |
-
23: "not italic",
|
71 |
-
24: "not underline",
|
72 |
-
25: "not blink",
|
73 |
-
26: "not blink2",
|
74 |
-
27: "not reverse",
|
75 |
-
28: "not conceal",
|
76 |
-
29: "not strike",
|
77 |
-
30: "color(0)",
|
78 |
-
31: "color(1)",
|
79 |
-
32: "color(2)",
|
80 |
-
33: "color(3)",
|
81 |
-
34: "color(4)",
|
82 |
-
35: "color(5)",
|
83 |
-
36: "color(6)",
|
84 |
-
37: "color(7)",
|
85 |
-
39: "default",
|
86 |
-
40: "on color(0)",
|
87 |
-
41: "on color(1)",
|
88 |
-
42: "on color(2)",
|
89 |
-
43: "on color(3)",
|
90 |
-
44: "on color(4)",
|
91 |
-
45: "on color(5)",
|
92 |
-
46: "on color(6)",
|
93 |
-
47: "on color(7)",
|
94 |
-
49: "on default",
|
95 |
-
51: "frame",
|
96 |
-
52: "encircle",
|
97 |
-
53: "overline",
|
98 |
-
54: "not frame not encircle",
|
99 |
-
55: "not overline",
|
100 |
-
90: "color(8)",
|
101 |
-
91: "color(9)",
|
102 |
-
92: "color(10)",
|
103 |
-
93: "color(11)",
|
104 |
-
94: "color(12)",
|
105 |
-
95: "color(13)",
|
106 |
-
96: "color(14)",
|
107 |
-
97: "color(15)",
|
108 |
-
100: "on color(8)",
|
109 |
-
101: "on color(9)",
|
110 |
-
102: "on color(10)",
|
111 |
-
103: "on color(11)",
|
112 |
-
104: "on color(12)",
|
113 |
-
105: "on color(13)",
|
114 |
-
106: "on color(14)",
|
115 |
-
107: "on color(15)",
|
116 |
-
}
|
117 |
-
|
118 |
-
|
119 |
-
class AnsiDecoder:
|
120 |
-
"""Translate ANSI code in to styled Text."""
|
121 |
-
|
122 |
-
def __init__(self) -> None:
|
123 |
-
self.style = Style.null()
|
124 |
-
|
125 |
-
def decode(self, terminal_text: str) -> Iterable[Text]:
|
126 |
-
"""Decode ANSI codes in an iterable of lines.
|
127 |
-
|
128 |
-
Args:
|
129 |
-
lines (Iterable[str]): An iterable of lines of terminal output.
|
130 |
-
|
131 |
-
Yields:
|
132 |
-
Text: Marked up Text.
|
133 |
-
"""
|
134 |
-
for line in terminal_text.splitlines():
|
135 |
-
yield self.decode_line(line)
|
136 |
-
|
137 |
-
def decode_line(self, line: str) -> Text:
|
138 |
-
"""Decode a line containing ansi codes.
|
139 |
-
|
140 |
-
Args:
|
141 |
-
line (str): A line of terminal output.
|
142 |
-
|
143 |
-
Returns:
|
144 |
-
Text: A Text instance marked up according to ansi codes.
|
145 |
-
"""
|
146 |
-
from_ansi = Color.from_ansi
|
147 |
-
from_rgb = Color.from_rgb
|
148 |
-
_Style = Style
|
149 |
-
text = Text()
|
150 |
-
append = text.append
|
151 |
-
line = line.rsplit("\r", 1)[-1]
|
152 |
-
for plain_text, sgr, osc in _ansi_tokenize(line):
|
153 |
-
if plain_text:
|
154 |
-
append(plain_text, self.style or None)
|
155 |
-
elif osc is not None:
|
156 |
-
if osc.startswith("8;"):
|
157 |
-
_params, semicolon, link = osc[2:].partition(";")
|
158 |
-
if semicolon:
|
159 |
-
self.style = self.style.update_link(link or None)
|
160 |
-
elif sgr is not None:
|
161 |
-
# Translate in to semi-colon separated codes
|
162 |
-
# Ignore invalid codes, because we want to be lenient
|
163 |
-
codes = [
|
164 |
-
min(255, int(_code) if _code else 0)
|
165 |
-
for _code in sgr.split(";")
|
166 |
-
if _code.isdigit() or _code == ""
|
167 |
-
]
|
168 |
-
iter_codes = iter(codes)
|
169 |
-
for code in iter_codes:
|
170 |
-
if code == 0:
|
171 |
-
# reset
|
172 |
-
self.style = _Style.null()
|
173 |
-
elif code in SGR_STYLE_MAP:
|
174 |
-
# styles
|
175 |
-
self.style += _Style.parse(SGR_STYLE_MAP[code])
|
176 |
-
elif code == 38:
|
177 |
-
# Foreground
|
178 |
-
with suppress(StopIteration):
|
179 |
-
color_type = next(iter_codes)
|
180 |
-
if color_type == 5:
|
181 |
-
self.style += _Style.from_color(
|
182 |
-
from_ansi(next(iter_codes))
|
183 |
-
)
|
184 |
-
elif color_type == 2:
|
185 |
-
self.style += _Style.from_color(
|
186 |
-
from_rgb(
|
187 |
-
next(iter_codes),
|
188 |
-
next(iter_codes),
|
189 |
-
next(iter_codes),
|
190 |
-
)
|
191 |
-
)
|
192 |
-
elif code == 48:
|
193 |
-
# Background
|
194 |
-
with suppress(StopIteration):
|
195 |
-
color_type = next(iter_codes)
|
196 |
-
if color_type == 5:
|
197 |
-
self.style += _Style.from_color(
|
198 |
-
None, from_ansi(next(iter_codes))
|
199 |
-
)
|
200 |
-
elif color_type == 2:
|
201 |
-
self.style += _Style.from_color(
|
202 |
-
None,
|
203 |
-
from_rgb(
|
204 |
-
next(iter_codes),
|
205 |
-
next(iter_codes),
|
206 |
-
next(iter_codes),
|
207 |
-
),
|
208 |
-
)
|
209 |
-
|
210 |
-
return text
|
211 |
-
|
212 |
-
|
213 |
-
if sys.platform != "win32" and __name__ == "__main__": # pragma: no cover
|
214 |
-
import io
|
215 |
-
import os
|
216 |
-
import pty
|
217 |
-
import sys
|
218 |
-
|
219 |
-
decoder = AnsiDecoder()
|
220 |
-
|
221 |
-
stdout = io.BytesIO()
|
222 |
-
|
223 |
-
def read(fd: int) -> bytes:
|
224 |
-
data = os.read(fd, 1024)
|
225 |
-
stdout.write(data)
|
226 |
-
return data
|
227 |
-
|
228 |
-
pty.spawn(sys.argv[1:], read)
|
229 |
-
|
230 |
-
from .console import Console
|
231 |
-
|
232 |
-
console = Console(record=True)
|
233 |
-
|
234 |
-
stdout_result = stdout.getvalue().decode("utf-8")
|
235 |
-
print(stdout_result)
|
236 |
-
|
237 |
-
for line in decoder.decode(stdout_result):
|
238 |
-
console.print(line)
|
239 |
-
|
240 |
-
console.save_html("stdout.html")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__init__.py
DELETED
File without changes
|
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/filelist.py
DELETED
@@ -1,371 +0,0 @@
|
|
1 |
-
"""distutils.filelist
|
2 |
-
|
3 |
-
Provides the FileList class, used for poking about the filesystem
|
4 |
-
and building lists of files.
|
5 |
-
"""
|
6 |
-
|
7 |
-
import os
|
8 |
-
import re
|
9 |
-
import fnmatch
|
10 |
-
import functools
|
11 |
-
|
12 |
-
from distutils.util import convert_path
|
13 |
-
from distutils.errors import DistutilsTemplateError, DistutilsInternalError
|
14 |
-
from distutils import log
|
15 |
-
|
16 |
-
|
17 |
-
class FileList:
|
18 |
-
"""A list of files built by on exploring the filesystem and filtered by
|
19 |
-
applying various patterns to what we find there.
|
20 |
-
|
21 |
-
Instance attributes:
|
22 |
-
dir
|
23 |
-
directory from which files will be taken -- only used if
|
24 |
-
'allfiles' not supplied to constructor
|
25 |
-
files
|
26 |
-
list of filenames currently being built/filtered/manipulated
|
27 |
-
allfiles
|
28 |
-
complete list of files under consideration (ie. without any
|
29 |
-
filtering applied)
|
30 |
-
"""
|
31 |
-
|
32 |
-
def __init__(self, warn=None, debug_print=None):
|
33 |
-
# ignore argument to FileList, but keep them for backwards
|
34 |
-
# compatibility
|
35 |
-
self.allfiles = None
|
36 |
-
self.files = []
|
37 |
-
|
38 |
-
def set_allfiles(self, allfiles):
|
39 |
-
self.allfiles = allfiles
|
40 |
-
|
41 |
-
def findall(self, dir=os.curdir):
|
42 |
-
self.allfiles = findall(dir)
|
43 |
-
|
44 |
-
def debug_print(self, msg):
|
45 |
-
"""Print 'msg' to stdout if the global DEBUG (taken from the
|
46 |
-
DISTUTILS_DEBUG environment variable) flag is true.
|
47 |
-
"""
|
48 |
-
from distutils.debug import DEBUG
|
49 |
-
|
50 |
-
if DEBUG:
|
51 |
-
print(msg)
|
52 |
-
|
53 |
-
# Collection methods
|
54 |
-
|
55 |
-
def append(self, item):
|
56 |
-
self.files.append(item)
|
57 |
-
|
58 |
-
def extend(self, items):
|
59 |
-
self.files.extend(items)
|
60 |
-
|
61 |
-
def sort(self):
|
62 |
-
# Not a strict lexical sort!
|
63 |
-
sortable_files = sorted(map(os.path.split, self.files))
|
64 |
-
self.files = []
|
65 |
-
for sort_tuple in sortable_files:
|
66 |
-
self.files.append(os.path.join(*sort_tuple))
|
67 |
-
|
68 |
-
# Other miscellaneous utility methods
|
69 |
-
|
70 |
-
def remove_duplicates(self):
|
71 |
-
# Assumes list has been sorted!
|
72 |
-
for i in range(len(self.files) - 1, 0, -1):
|
73 |
-
if self.files[i] == self.files[i - 1]:
|
74 |
-
del self.files[i]
|
75 |
-
|
76 |
-
# "File template" methods
|
77 |
-
|
78 |
-
def _parse_template_line(self, line):
|
79 |
-
words = line.split()
|
80 |
-
action = words[0]
|
81 |
-
|
82 |
-
patterns = dir = dir_pattern = None
|
83 |
-
|
84 |
-
if action in ('include', 'exclude', 'global-include', 'global-exclude'):
|
85 |
-
if len(words) < 2:
|
86 |
-
raise DistutilsTemplateError(
|
87 |
-
"'%s' expects <pattern1> <pattern2> ..." % action
|
88 |
-
)
|
89 |
-
patterns = [convert_path(w) for w in words[1:]]
|
90 |
-
elif action in ('recursive-include', 'recursive-exclude'):
|
91 |
-
if len(words) < 3:
|
92 |
-
raise DistutilsTemplateError(
|
93 |
-
"'%s' expects <dir> <pattern1> <pattern2> ..." % action
|
94 |
-
)
|
95 |
-
dir = convert_path(words[1])
|
96 |
-
patterns = [convert_path(w) for w in words[2:]]
|
97 |
-
elif action in ('graft', 'prune'):
|
98 |
-
if len(words) != 2:
|
99 |
-
raise DistutilsTemplateError(
|
100 |
-
"'%s' expects a single <dir_pattern>" % action
|
101 |
-
)
|
102 |
-
dir_pattern = convert_path(words[1])
|
103 |
-
else:
|
104 |
-
raise DistutilsTemplateError("unknown action '%s'" % action)
|
105 |
-
|
106 |
-
return (action, patterns, dir, dir_pattern)
|
107 |
-
|
108 |
-
def process_template_line(self, line): # noqa: C901
|
109 |
-
# Parse the line: split it up, make sure the right number of words
|
110 |
-
# is there, and return the relevant words. 'action' is always
|
111 |
-
# defined: it's the first word of the line. Which of the other
|
112 |
-
# three are defined depends on the action; it'll be either
|
113 |
-
# patterns, (dir and patterns), or (dir_pattern).
|
114 |
-
(action, patterns, dir, dir_pattern) = self._parse_template_line(line)
|
115 |
-
|
116 |
-
# OK, now we know that the action is valid and we have the
|
117 |
-
# right number of words on the line for that action -- so we
|
118 |
-
# can proceed with minimal error-checking.
|
119 |
-
if action == 'include':
|
120 |
-
self.debug_print("include " + ' '.join(patterns))
|
121 |
-
for pattern in patterns:
|
122 |
-
if not self.include_pattern(pattern, anchor=1):
|
123 |
-
log.warn("warning: no files found matching '%s'", pattern)
|
124 |
-
|
125 |
-
elif action == 'exclude':
|
126 |
-
self.debug_print("exclude " + ' '.join(patterns))
|
127 |
-
for pattern in patterns:
|
128 |
-
if not self.exclude_pattern(pattern, anchor=1):
|
129 |
-
log.warn(
|
130 |
-
(
|
131 |
-
"warning: no previously-included files "
|
132 |
-
"found matching '%s'"
|
133 |
-
),
|
134 |
-
pattern,
|
135 |
-
)
|
136 |
-
|
137 |
-
elif action == 'global-include':
|
138 |
-
self.debug_print("global-include " + ' '.join(patterns))
|
139 |
-
for pattern in patterns:
|
140 |
-
if not self.include_pattern(pattern, anchor=0):
|
141 |
-
log.warn(
|
142 |
-
(
|
143 |
-
"warning: no files found matching '%s' "
|
144 |
-
"anywhere in distribution"
|
145 |
-
),
|
146 |
-
pattern,
|
147 |
-
)
|
148 |
-
|
149 |
-
elif action == 'global-exclude':
|
150 |
-
self.debug_print("global-exclude " + ' '.join(patterns))
|
151 |
-
for pattern in patterns:
|
152 |
-
if not self.exclude_pattern(pattern, anchor=0):
|
153 |
-
log.warn(
|
154 |
-
(
|
155 |
-
"warning: no previously-included files matching "
|
156 |
-
"'%s' found anywhere in distribution"
|
157 |
-
),
|
158 |
-
pattern,
|
159 |
-
)
|
160 |
-
|
161 |
-
elif action == 'recursive-include':
|
162 |
-
self.debug_print("recursive-include {} {}".format(dir, ' '.join(patterns)))
|
163 |
-
for pattern in patterns:
|
164 |
-
if not self.include_pattern(pattern, prefix=dir):
|
165 |
-
msg = (
|
166 |
-
"warning: no files found matching '%s' " "under directory '%s'"
|
167 |
-
)
|
168 |
-
log.warn(msg, pattern, dir)
|
169 |
-
|
170 |
-
elif action == 'recursive-exclude':
|
171 |
-
self.debug_print("recursive-exclude {} {}".format(dir, ' '.join(patterns)))
|
172 |
-
for pattern in patterns:
|
173 |
-
if not self.exclude_pattern(pattern, prefix=dir):
|
174 |
-
log.warn(
|
175 |
-
(
|
176 |
-
"warning: no previously-included files matching "
|
177 |
-
"'%s' found under directory '%s'"
|
178 |
-
),
|
179 |
-
pattern,
|
180 |
-
dir,
|
181 |
-
)
|
182 |
-
|
183 |
-
elif action == 'graft':
|
184 |
-
self.debug_print("graft " + dir_pattern)
|
185 |
-
if not self.include_pattern(None, prefix=dir_pattern):
|
186 |
-
log.warn("warning: no directories found matching '%s'", dir_pattern)
|
187 |
-
|
188 |
-
elif action == 'prune':
|
189 |
-
self.debug_print("prune " + dir_pattern)
|
190 |
-
if not self.exclude_pattern(None, prefix=dir_pattern):
|
191 |
-
log.warn(
|
192 |
-
("no previously-included directories found " "matching '%s'"),
|
193 |
-
dir_pattern,
|
194 |
-
)
|
195 |
-
else:
|
196 |
-
raise DistutilsInternalError(
|
197 |
-
"this cannot happen: invalid action '%s'" % action
|
198 |
-
)
|
199 |
-
|
200 |
-
# Filtering/selection methods
|
201 |
-
|
202 |
-
def include_pattern(self, pattern, anchor=1, prefix=None, is_regex=0):
|
203 |
-
"""Select strings (presumably filenames) from 'self.files' that
|
204 |
-
match 'pattern', a Unix-style wildcard (glob) pattern. Patterns
|
205 |
-
are not quite the same as implemented by the 'fnmatch' module: '*'
|
206 |
-
and '?' match non-special characters, where "special" is platform-
|
207 |
-
dependent: slash on Unix; colon, slash, and backslash on
|
208 |
-
DOS/Windows; and colon on Mac OS.
|
209 |
-
|
210 |
-
If 'anchor' is true (the default), then the pattern match is more
|
211 |
-
stringent: "*.py" will match "foo.py" but not "foo/bar.py". If
|
212 |
-
'anchor' is false, both of these will match.
|
213 |
-
|
214 |
-
If 'prefix' is supplied, then only filenames starting with 'prefix'
|
215 |
-
(itself a pattern) and ending with 'pattern', with anything in between
|
216 |
-
them, will match. 'anchor' is ignored in this case.
|
217 |
-
|
218 |
-
If 'is_regex' is true, 'anchor' and 'prefix' are ignored, and
|
219 |
-
'pattern' is assumed to be either a string containing a regex or a
|
220 |
-
regex object -- no translation is done, the regex is just compiled
|
221 |
-
and used as-is.
|
222 |
-
|
223 |
-
Selected strings will be added to self.files.
|
224 |
-
|
225 |
-
Return True if files are found, False otherwise.
|
226 |
-
"""
|
227 |
-
# XXX docstring lying about what the special chars are?
|
228 |
-
files_found = False
|
229 |
-
pattern_re = translate_pattern(pattern, anchor, prefix, is_regex)
|
230 |
-
self.debug_print("include_pattern: applying regex r'%s'" % pattern_re.pattern)
|
231 |
-
|
232 |
-
# delayed loading of allfiles list
|
233 |
-
if self.allfiles is None:
|
234 |
-
self.findall()
|
235 |
-
|
236 |
-
for name in self.allfiles:
|
237 |
-
if pattern_re.search(name):
|
238 |
-
self.debug_print(" adding " + name)
|
239 |
-
self.files.append(name)
|
240 |
-
files_found = True
|
241 |
-
return files_found
|
242 |
-
|
243 |
-
def exclude_pattern(self, pattern, anchor=1, prefix=None, is_regex=0):
|
244 |
-
"""Remove strings (presumably filenames) from 'files' that match
|
245 |
-
'pattern'. Other parameters are the same as for
|
246 |
-
'include_pattern()', above.
|
247 |
-
The list 'self.files' is modified in place.
|
248 |
-
Return True if files are found, False otherwise.
|
249 |
-
"""
|
250 |
-
files_found = False
|
251 |
-
pattern_re = translate_pattern(pattern, anchor, prefix, is_regex)
|
252 |
-
self.debug_print("exclude_pattern: applying regex r'%s'" % pattern_re.pattern)
|
253 |
-
for i in range(len(self.files) - 1, -1, -1):
|
254 |
-
if pattern_re.search(self.files[i]):
|
255 |
-
self.debug_print(" removing " + self.files[i])
|
256 |
-
del self.files[i]
|
257 |
-
files_found = True
|
258 |
-
return files_found
|
259 |
-
|
260 |
-
|
261 |
-
# Utility functions
|
262 |
-
|
263 |
-
|
264 |
-
def _find_all_simple(path):
|
265 |
-
"""
|
266 |
-
Find all files under 'path'
|
267 |
-
"""
|
268 |
-
all_unique = _UniqueDirs.filter(os.walk(path, followlinks=True))
|
269 |
-
results = (
|
270 |
-
os.path.join(base, file) for base, dirs, files in all_unique for file in files
|
271 |
-
)
|
272 |
-
return filter(os.path.isfile, results)
|
273 |
-
|
274 |
-
|
275 |
-
class _UniqueDirs(set):
|
276 |
-
"""
|
277 |
-
Exclude previously-seen dirs from walk results,
|
278 |
-
avoiding infinite recursion.
|
279 |
-
Ref https://bugs.python.org/issue44497.
|
280 |
-
"""
|
281 |
-
|
282 |
-
def __call__(self, walk_item):
|
283 |
-
"""
|
284 |
-
Given an item from an os.walk result, determine
|
285 |
-
if the item represents a unique dir for this instance
|
286 |
-
and if not, prevent further traversal.
|
287 |
-
"""
|
288 |
-
base, dirs, files = walk_item
|
289 |
-
stat = os.stat(base)
|
290 |
-
candidate = stat.st_dev, stat.st_ino
|
291 |
-
found = candidate in self
|
292 |
-
if found:
|
293 |
-
del dirs[:]
|
294 |
-
self.add(candidate)
|
295 |
-
return not found
|
296 |
-
|
297 |
-
@classmethod
|
298 |
-
def filter(cls, items):
|
299 |
-
return filter(cls(), items)
|
300 |
-
|
301 |
-
|
302 |
-
def findall(dir=os.curdir):
|
303 |
-
"""
|
304 |
-
Find all files under 'dir' and return the list of full filenames.
|
305 |
-
Unless dir is '.', return full filenames with dir prepended.
|
306 |
-
"""
|
307 |
-
files = _find_all_simple(dir)
|
308 |
-
if dir == os.curdir:
|
309 |
-
make_rel = functools.partial(os.path.relpath, start=dir)
|
310 |
-
files = map(make_rel, files)
|
311 |
-
return list(files)
|
312 |
-
|
313 |
-
|
314 |
-
def glob_to_re(pattern):
|
315 |
-
"""Translate a shell-like glob pattern to a regular expression; return
|
316 |
-
a string containing the regex. Differs from 'fnmatch.translate()' in
|
317 |
-
that '*' does not match "special characters" (which are
|
318 |
-
platform-specific).
|
319 |
-
"""
|
320 |
-
pattern_re = fnmatch.translate(pattern)
|
321 |
-
|
322 |
-
# '?' and '*' in the glob pattern become '.' and '.*' in the RE, which
|
323 |
-
# IMHO is wrong -- '?' and '*' aren't supposed to match slash in Unix,
|
324 |
-
# and by extension they shouldn't match such "special characters" under
|
325 |
-
# any OS. So change all non-escaped dots in the RE to match any
|
326 |
-
# character except the special characters (currently: just os.sep).
|
327 |
-
sep = os.sep
|
328 |
-
if os.sep == '\\':
|
329 |
-
# we're using a regex to manipulate a regex, so we need
|
330 |
-
# to escape the backslash twice
|
331 |
-
sep = r'\\\\'
|
332 |
-
escaped = r'\1[^%s]' % sep
|
333 |
-
pattern_re = re.sub(r'((?<!\\)(\\\\)*)\.', escaped, pattern_re)
|
334 |
-
return pattern_re
|
335 |
-
|
336 |
-
|
337 |
-
def translate_pattern(pattern, anchor=1, prefix=None, is_regex=0):
|
338 |
-
"""Translate a shell-like wildcard pattern to a compiled regular
|
339 |
-
expression. Return the compiled regex. If 'is_regex' true,
|
340 |
-
then 'pattern' is directly compiled to a regex (if it's a string)
|
341 |
-
or just returned as-is (assumes it's a regex object).
|
342 |
-
"""
|
343 |
-
if is_regex:
|
344 |
-
if isinstance(pattern, str):
|
345 |
-
return re.compile(pattern)
|
346 |
-
else:
|
347 |
-
return pattern
|
348 |
-
|
349 |
-
# ditch start and end characters
|
350 |
-
start, _, end = glob_to_re('_').partition('_')
|
351 |
-
|
352 |
-
if pattern:
|
353 |
-
pattern_re = glob_to_re(pattern)
|
354 |
-
assert pattern_re.startswith(start) and pattern_re.endswith(end)
|
355 |
-
else:
|
356 |
-
pattern_re = ''
|
357 |
-
|
358 |
-
if prefix is not None:
|
359 |
-
prefix_re = glob_to_re(prefix)
|
360 |
-
assert prefix_re.startswith(start) and prefix_re.endswith(end)
|
361 |
-
prefix_re = prefix_re[len(start) : len(prefix_re) - len(end)]
|
362 |
-
sep = os.sep
|
363 |
-
if os.sep == '\\':
|
364 |
-
sep = r'\\'
|
365 |
-
pattern_re = pattern_re[len(start) : len(pattern_re) - len(end)]
|
366 |
-
pattern_re = r'{}\A{}{}.*{}{}'.format(start, prefix_re, sep, pattern_re, end)
|
367 |
-
else: # no prefix -- respect anchor flag
|
368 |
-
if anchor:
|
369 |
-
pattern_re = r'{}\A{}'.format(start, pattern_re[len(start) :])
|
370 |
-
|
371 |
-
return re.compile(pattern_re)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/importlib_metadata/__init__.py
DELETED
@@ -1,1047 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import re
|
3 |
-
import abc
|
4 |
-
import csv
|
5 |
-
import sys
|
6 |
-
from .. import zipp
|
7 |
-
import email
|
8 |
-
import pathlib
|
9 |
-
import operator
|
10 |
-
import textwrap
|
11 |
-
import warnings
|
12 |
-
import functools
|
13 |
-
import itertools
|
14 |
-
import posixpath
|
15 |
-
import collections
|
16 |
-
|
17 |
-
from . import _adapters, _meta
|
18 |
-
from ._collections import FreezableDefaultDict, Pair
|
19 |
-
from ._compat import (
|
20 |
-
NullFinder,
|
21 |
-
install,
|
22 |
-
pypy_partial,
|
23 |
-
)
|
24 |
-
from ._functools import method_cache, pass_none
|
25 |
-
from ._itertools import always_iterable, unique_everseen
|
26 |
-
from ._meta import PackageMetadata, SimplePath
|
27 |
-
|
28 |
-
from contextlib import suppress
|
29 |
-
from importlib import import_module
|
30 |
-
from importlib.abc import MetaPathFinder
|
31 |
-
from itertools import starmap
|
32 |
-
from typing import List, Mapping, Optional, Union
|
33 |
-
|
34 |
-
|
35 |
-
__all__ = [
|
36 |
-
'Distribution',
|
37 |
-
'DistributionFinder',
|
38 |
-
'PackageMetadata',
|
39 |
-
'PackageNotFoundError',
|
40 |
-
'distribution',
|
41 |
-
'distributions',
|
42 |
-
'entry_points',
|
43 |
-
'files',
|
44 |
-
'metadata',
|
45 |
-
'packages_distributions',
|
46 |
-
'requires',
|
47 |
-
'version',
|
48 |
-
]
|
49 |
-
|
50 |
-
|
51 |
-
class PackageNotFoundError(ModuleNotFoundError):
|
52 |
-
"""The package was not found."""
|
53 |
-
|
54 |
-
def __str__(self):
|
55 |
-
return f"No package metadata was found for {self.name}"
|
56 |
-
|
57 |
-
@property
|
58 |
-
def name(self):
|
59 |
-
(name,) = self.args
|
60 |
-
return name
|
61 |
-
|
62 |
-
|
63 |
-
class Sectioned:
|
64 |
-
"""
|
65 |
-
A simple entry point config parser for performance
|
66 |
-
|
67 |
-
>>> for item in Sectioned.read(Sectioned._sample):
|
68 |
-
... print(item)
|
69 |
-
Pair(name='sec1', value='# comments ignored')
|
70 |
-
Pair(name='sec1', value='a = 1')
|
71 |
-
Pair(name='sec1', value='b = 2')
|
72 |
-
Pair(name='sec2', value='a = 2')
|
73 |
-
|
74 |
-
>>> res = Sectioned.section_pairs(Sectioned._sample)
|
75 |
-
>>> item = next(res)
|
76 |
-
>>> item.name
|
77 |
-
'sec1'
|
78 |
-
>>> item.value
|
79 |
-
Pair(name='a', value='1')
|
80 |
-
>>> item = next(res)
|
81 |
-
>>> item.value
|
82 |
-
Pair(name='b', value='2')
|
83 |
-
>>> item = next(res)
|
84 |
-
>>> item.name
|
85 |
-
'sec2'
|
86 |
-
>>> item.value
|
87 |
-
Pair(name='a', value='2')
|
88 |
-
>>> list(res)
|
89 |
-
[]
|
90 |
-
"""
|
91 |
-
|
92 |
-
_sample = textwrap.dedent(
|
93 |
-
"""
|
94 |
-
[sec1]
|
95 |
-
# comments ignored
|
96 |
-
a = 1
|
97 |
-
b = 2
|
98 |
-
|
99 |
-
[sec2]
|
100 |
-
a = 2
|
101 |
-
"""
|
102 |
-
).lstrip()
|
103 |
-
|
104 |
-
@classmethod
|
105 |
-
def section_pairs(cls, text):
|
106 |
-
return (
|
107 |
-
section._replace(value=Pair.parse(section.value))
|
108 |
-
for section in cls.read(text, filter_=cls.valid)
|
109 |
-
if section.name is not None
|
110 |
-
)
|
111 |
-
|
112 |
-
@staticmethod
|
113 |
-
def read(text, filter_=None):
|
114 |
-
lines = filter(filter_, map(str.strip, text.splitlines()))
|
115 |
-
name = None
|
116 |
-
for value in lines:
|
117 |
-
section_match = value.startswith('[') and value.endswith(']')
|
118 |
-
if section_match:
|
119 |
-
name = value.strip('[]')
|
120 |
-
continue
|
121 |
-
yield Pair(name, value)
|
122 |
-
|
123 |
-
@staticmethod
|
124 |
-
def valid(line):
|
125 |
-
return line and not line.startswith('#')
|
126 |
-
|
127 |
-
|
128 |
-
class DeprecatedTuple:
|
129 |
-
"""
|
130 |
-
Provide subscript item access for backward compatibility.
|
131 |
-
|
132 |
-
>>> recwarn = getfixture('recwarn')
|
133 |
-
>>> ep = EntryPoint(name='name', value='value', group='group')
|
134 |
-
>>> ep[:]
|
135 |
-
('name', 'value', 'group')
|
136 |
-
>>> ep[0]
|
137 |
-
'name'
|
138 |
-
>>> len(recwarn)
|
139 |
-
1
|
140 |
-
"""
|
141 |
-
|
142 |
-
_warn = functools.partial(
|
143 |
-
warnings.warn,
|
144 |
-
"EntryPoint tuple interface is deprecated. Access members by name.",
|
145 |
-
DeprecationWarning,
|
146 |
-
stacklevel=pypy_partial(2),
|
147 |
-
)
|
148 |
-
|
149 |
-
def __getitem__(self, item):
|
150 |
-
self._warn()
|
151 |
-
return self._key()[item]
|
152 |
-
|
153 |
-
|
154 |
-
class EntryPoint(DeprecatedTuple):
|
155 |
-
"""An entry point as defined by Python packaging conventions.
|
156 |
-
|
157 |
-
See `the packaging docs on entry points
|
158 |
-
<https://packaging.python.org/specifications/entry-points/>`_
|
159 |
-
for more information.
|
160 |
-
"""
|
161 |
-
|
162 |
-
pattern = re.compile(
|
163 |
-
r'(?P<module>[\w.]+)\s*'
|
164 |
-
r'(:\s*(?P<attr>[\w.]+)\s*)?'
|
165 |
-
r'((?P<extras>\[.*\])\s*)?$'
|
166 |
-
)
|
167 |
-
"""
|
168 |
-
A regular expression describing the syntax for an entry point,
|
169 |
-
which might look like:
|
170 |
-
|
171 |
-
- module
|
172 |
-
- package.module
|
173 |
-
- package.module:attribute
|
174 |
-
- package.module:object.attribute
|
175 |
-
- package.module:attr [extra1, extra2]
|
176 |
-
|
177 |
-
Other combinations are possible as well.
|
178 |
-
|
179 |
-
The expression is lenient about whitespace around the ':',
|
180 |
-
following the attr, and following any extras.
|
181 |
-
"""
|
182 |
-
|
183 |
-
dist: Optional['Distribution'] = None
|
184 |
-
|
185 |
-
def __init__(self, name, value, group):
|
186 |
-
vars(self).update(name=name, value=value, group=group)
|
187 |
-
|
188 |
-
def load(self):
|
189 |
-
"""Load the entry point from its definition. If only a module
|
190 |
-
is indicated by the value, return that module. Otherwise,
|
191 |
-
return the named object.
|
192 |
-
"""
|
193 |
-
match = self.pattern.match(self.value)
|
194 |
-
module = import_module(match.group('module'))
|
195 |
-
attrs = filter(None, (match.group('attr') or '').split('.'))
|
196 |
-
return functools.reduce(getattr, attrs, module)
|
197 |
-
|
198 |
-
@property
|
199 |
-
def module(self):
|
200 |
-
match = self.pattern.match(self.value)
|
201 |
-
return match.group('module')
|
202 |
-
|
203 |
-
@property
|
204 |
-
def attr(self):
|
205 |
-
match = self.pattern.match(self.value)
|
206 |
-
return match.group('attr')
|
207 |
-
|
208 |
-
@property
|
209 |
-
def extras(self):
|
210 |
-
match = self.pattern.match(self.value)
|
211 |
-
return list(re.finditer(r'\w+', match.group('extras') or ''))
|
212 |
-
|
213 |
-
def _for(self, dist):
|
214 |
-
vars(self).update(dist=dist)
|
215 |
-
return self
|
216 |
-
|
217 |
-
def __iter__(self):
|
218 |
-
"""
|
219 |
-
Supply iter so one may construct dicts of EntryPoints by name.
|
220 |
-
"""
|
221 |
-
msg = (
|
222 |
-
"Construction of dict of EntryPoints is deprecated in "
|
223 |
-
"favor of EntryPoints."
|
224 |
-
)
|
225 |
-
warnings.warn(msg, DeprecationWarning)
|
226 |
-
return iter((self.name, self))
|
227 |
-
|
228 |
-
def matches(self, **params):
|
229 |
-
attrs = (getattr(self, param) for param in params)
|
230 |
-
return all(map(operator.eq, params.values(), attrs))
|
231 |
-
|
232 |
-
def _key(self):
|
233 |
-
return self.name, self.value, self.group
|
234 |
-
|
235 |
-
def __lt__(self, other):
|
236 |
-
return self._key() < other._key()
|
237 |
-
|
238 |
-
def __eq__(self, other):
|
239 |
-
return self._key() == other._key()
|
240 |
-
|
241 |
-
def __setattr__(self, name, value):
|
242 |
-
raise AttributeError("EntryPoint objects are immutable.")
|
243 |
-
|
244 |
-
def __repr__(self):
|
245 |
-
return (
|
246 |
-
f'EntryPoint(name={self.name!r}, value={self.value!r}, '
|
247 |
-
f'group={self.group!r})'
|
248 |
-
)
|
249 |
-
|
250 |
-
def __hash__(self):
|
251 |
-
return hash(self._key())
|
252 |
-
|
253 |
-
|
254 |
-
class DeprecatedList(list):
|
255 |
-
"""
|
256 |
-
Allow an otherwise immutable object to implement mutability
|
257 |
-
for compatibility.
|
258 |
-
|
259 |
-
>>> recwarn = getfixture('recwarn')
|
260 |
-
>>> dl = DeprecatedList(range(3))
|
261 |
-
>>> dl[0] = 1
|
262 |
-
>>> dl.append(3)
|
263 |
-
>>> del dl[3]
|
264 |
-
>>> dl.reverse()
|
265 |
-
>>> dl.sort()
|
266 |
-
>>> dl.extend([4])
|
267 |
-
>>> dl.pop(-1)
|
268 |
-
4
|
269 |
-
>>> dl.remove(1)
|
270 |
-
>>> dl += [5]
|
271 |
-
>>> dl + [6]
|
272 |
-
[1, 2, 5, 6]
|
273 |
-
>>> dl + (6,)
|
274 |
-
[1, 2, 5, 6]
|
275 |
-
>>> dl.insert(0, 0)
|
276 |
-
>>> dl
|
277 |
-
[0, 1, 2, 5]
|
278 |
-
>>> dl == [0, 1, 2, 5]
|
279 |
-
True
|
280 |
-
>>> dl == (0, 1, 2, 5)
|
281 |
-
True
|
282 |
-
>>> len(recwarn)
|
283 |
-
1
|
284 |
-
"""
|
285 |
-
|
286 |
-
__slots__ = ()
|
287 |
-
|
288 |
-
_warn = functools.partial(
|
289 |
-
warnings.warn,
|
290 |
-
"EntryPoints list interface is deprecated. Cast to list if needed.",
|
291 |
-
DeprecationWarning,
|
292 |
-
stacklevel=pypy_partial(2),
|
293 |
-
)
|
294 |
-
|
295 |
-
def _wrap_deprecated_method(method_name: str): # type: ignore
|
296 |
-
def wrapped(self, *args, **kwargs):
|
297 |
-
self._warn()
|
298 |
-
return getattr(super(), method_name)(*args, **kwargs)
|
299 |
-
|
300 |
-
return method_name, wrapped
|
301 |
-
|
302 |
-
locals().update(
|
303 |
-
map(
|
304 |
-
_wrap_deprecated_method,
|
305 |
-
'__setitem__ __delitem__ append reverse extend pop remove '
|
306 |
-
'__iadd__ insert sort'.split(),
|
307 |
-
)
|
308 |
-
)
|
309 |
-
|
310 |
-
def __add__(self, other):
|
311 |
-
if not isinstance(other, tuple):
|
312 |
-
self._warn()
|
313 |
-
other = tuple(other)
|
314 |
-
return self.__class__(tuple(self) + other)
|
315 |
-
|
316 |
-
def __eq__(self, other):
|
317 |
-
if not isinstance(other, tuple):
|
318 |
-
self._warn()
|
319 |
-
other = tuple(other)
|
320 |
-
|
321 |
-
return tuple(self).__eq__(other)
|
322 |
-
|
323 |
-
|
324 |
-
class EntryPoints(DeprecatedList):
|
325 |
-
"""
|
326 |
-
An immutable collection of selectable EntryPoint objects.
|
327 |
-
"""
|
328 |
-
|
329 |
-
__slots__ = ()
|
330 |
-
|
331 |
-
def __getitem__(self, name): # -> EntryPoint:
|
332 |
-
"""
|
333 |
-
Get the EntryPoint in self matching name.
|
334 |
-
"""
|
335 |
-
if isinstance(name, int):
|
336 |
-
warnings.warn(
|
337 |
-
"Accessing entry points by index is deprecated. "
|
338 |
-
"Cast to tuple if needed.",
|
339 |
-
DeprecationWarning,
|
340 |
-
stacklevel=2,
|
341 |
-
)
|
342 |
-
return super().__getitem__(name)
|
343 |
-
try:
|
344 |
-
return next(iter(self.select(name=name)))
|
345 |
-
except StopIteration:
|
346 |
-
raise KeyError(name)
|
347 |
-
|
348 |
-
def select(self, **params):
|
349 |
-
"""
|
350 |
-
Select entry points from self that match the
|
351 |
-
given parameters (typically group and/or name).
|
352 |
-
"""
|
353 |
-
return EntryPoints(ep for ep in self if ep.matches(**params))
|
354 |
-
|
355 |
-
@property
|
356 |
-
def names(self):
|
357 |
-
"""
|
358 |
-
Return the set of all names of all entry points.
|
359 |
-
"""
|
360 |
-
return {ep.name for ep in self}
|
361 |
-
|
362 |
-
@property
|
363 |
-
def groups(self):
|
364 |
-
"""
|
365 |
-
Return the set of all groups of all entry points.
|
366 |
-
|
367 |
-
For coverage while SelectableGroups is present.
|
368 |
-
>>> EntryPoints().groups
|
369 |
-
set()
|
370 |
-
"""
|
371 |
-
return {ep.group for ep in self}
|
372 |
-
|
373 |
-
@classmethod
|
374 |
-
def _from_text_for(cls, text, dist):
|
375 |
-
return cls(ep._for(dist) for ep in cls._from_text(text))
|
376 |
-
|
377 |
-
@staticmethod
|
378 |
-
def _from_text(text):
|
379 |
-
return (
|
380 |
-
EntryPoint(name=item.value.name, value=item.value.value, group=item.name)
|
381 |
-
for item in Sectioned.section_pairs(text or '')
|
382 |
-
)
|
383 |
-
|
384 |
-
|
385 |
-
class Deprecated:
|
386 |
-
"""
|
387 |
-
Compatibility add-in for mapping to indicate that
|
388 |
-
mapping behavior is deprecated.
|
389 |
-
|
390 |
-
>>> recwarn = getfixture('recwarn')
|
391 |
-
>>> class DeprecatedDict(Deprecated, dict): pass
|
392 |
-
>>> dd = DeprecatedDict(foo='bar')
|
393 |
-
>>> dd.get('baz', None)
|
394 |
-
>>> dd['foo']
|
395 |
-
'bar'
|
396 |
-
>>> list(dd)
|
397 |
-
['foo']
|
398 |
-
>>> list(dd.keys())
|
399 |
-
['foo']
|
400 |
-
>>> 'foo' in dd
|
401 |
-
True
|
402 |
-
>>> list(dd.values())
|
403 |
-
['bar']
|
404 |
-
>>> len(recwarn)
|
405 |
-
1
|
406 |
-
"""
|
407 |
-
|
408 |
-
_warn = functools.partial(
|
409 |
-
warnings.warn,
|
410 |
-
"SelectableGroups dict interface is deprecated. Use select.",
|
411 |
-
DeprecationWarning,
|
412 |
-
stacklevel=pypy_partial(2),
|
413 |
-
)
|
414 |
-
|
415 |
-
def __getitem__(self, name):
|
416 |
-
self._warn()
|
417 |
-
return super().__getitem__(name)
|
418 |
-
|
419 |
-
def get(self, name, default=None):
|
420 |
-
self._warn()
|
421 |
-
return super().get(name, default)
|
422 |
-
|
423 |
-
def __iter__(self):
|
424 |
-
self._warn()
|
425 |
-
return super().__iter__()
|
426 |
-
|
427 |
-
def __contains__(self, *args):
|
428 |
-
self._warn()
|
429 |
-
return super().__contains__(*args)
|
430 |
-
|
431 |
-
def keys(self):
|
432 |
-
self._warn()
|
433 |
-
return super().keys()
|
434 |
-
|
435 |
-
def values(self):
|
436 |
-
self._warn()
|
437 |
-
return super().values()
|
438 |
-
|
439 |
-
|
440 |
-
class SelectableGroups(Deprecated, dict):
|
441 |
-
"""
|
442 |
-
A backward- and forward-compatible result from
|
443 |
-
entry_points that fully implements the dict interface.
|
444 |
-
"""
|
445 |
-
|
446 |
-
@classmethod
|
447 |
-
def load(cls, eps):
|
448 |
-
by_group = operator.attrgetter('group')
|
449 |
-
ordered = sorted(eps, key=by_group)
|
450 |
-
grouped = itertools.groupby(ordered, by_group)
|
451 |
-
return cls((group, EntryPoints(eps)) for group, eps in grouped)
|
452 |
-
|
453 |
-
@property
|
454 |
-
def _all(self):
|
455 |
-
"""
|
456 |
-
Reconstruct a list of all entrypoints from the groups.
|
457 |
-
"""
|
458 |
-
groups = super(Deprecated, self).values()
|
459 |
-
return EntryPoints(itertools.chain.from_iterable(groups))
|
460 |
-
|
461 |
-
@property
|
462 |
-
def groups(self):
|
463 |
-
return self._all.groups
|
464 |
-
|
465 |
-
@property
|
466 |
-
def names(self):
|
467 |
-
"""
|
468 |
-
for coverage:
|
469 |
-
>>> SelectableGroups().names
|
470 |
-
set()
|
471 |
-
"""
|
472 |
-
return self._all.names
|
473 |
-
|
474 |
-
def select(self, **params):
|
475 |
-
if not params:
|
476 |
-
return self
|
477 |
-
return self._all.select(**params)
|
478 |
-
|
479 |
-
|
480 |
-
class PackagePath(pathlib.PurePosixPath):
|
481 |
-
"""A reference to a path in a package"""
|
482 |
-
|
483 |
-
def read_text(self, encoding='utf-8'):
|
484 |
-
with self.locate().open(encoding=encoding) as stream:
|
485 |
-
return stream.read()
|
486 |
-
|
487 |
-
def read_binary(self):
|
488 |
-
with self.locate().open('rb') as stream:
|
489 |
-
return stream.read()
|
490 |
-
|
491 |
-
def locate(self):
|
492 |
-
"""Return a path-like object for this path"""
|
493 |
-
return self.dist.locate_file(self)
|
494 |
-
|
495 |
-
|
496 |
-
class FileHash:
|
497 |
-
def __init__(self, spec):
|
498 |
-
self.mode, _, self.value = spec.partition('=')
|
499 |
-
|
500 |
-
def __repr__(self):
|
501 |
-
return f'<FileHash mode: {self.mode} value: {self.value}>'
|
502 |
-
|
503 |
-
|
504 |
-
class Distribution:
|
505 |
-
"""A Python distribution package."""
|
506 |
-
|
507 |
-
@abc.abstractmethod
|
508 |
-
def read_text(self, filename):
|
509 |
-
"""Attempt to load metadata file given by the name.
|
510 |
-
|
511 |
-
:param filename: The name of the file in the distribution info.
|
512 |
-
:return: The text if found, otherwise None.
|
513 |
-
"""
|
514 |
-
|
515 |
-
@abc.abstractmethod
|
516 |
-
def locate_file(self, path):
|
517 |
-
"""
|
518 |
-
Given a path to a file in this distribution, return a path
|
519 |
-
to it.
|
520 |
-
"""
|
521 |
-
|
522 |
-
@classmethod
|
523 |
-
def from_name(cls, name):
|
524 |
-
"""Return the Distribution for the given package name.
|
525 |
-
|
526 |
-
:param name: The name of the distribution package to search for.
|
527 |
-
:return: The Distribution instance (or subclass thereof) for the named
|
528 |
-
package, if found.
|
529 |
-
:raises PackageNotFoundError: When the named package's distribution
|
530 |
-
metadata cannot be found.
|
531 |
-
"""
|
532 |
-
for resolver in cls._discover_resolvers():
|
533 |
-
dists = resolver(DistributionFinder.Context(name=name))
|
534 |
-
dist = next(iter(dists), None)
|
535 |
-
if dist is not None:
|
536 |
-
return dist
|
537 |
-
else:
|
538 |
-
raise PackageNotFoundError(name)
|
539 |
-
|
540 |
-
@classmethod
|
541 |
-
def discover(cls, **kwargs):
|
542 |
-
"""Return an iterable of Distribution objects for all packages.
|
543 |
-
|
544 |
-
Pass a ``context`` or pass keyword arguments for constructing
|
545 |
-
a context.
|
546 |
-
|
547 |
-
:context: A ``DistributionFinder.Context`` object.
|
548 |
-
:return: Iterable of Distribution objects for all packages.
|
549 |
-
"""
|
550 |
-
context = kwargs.pop('context', None)
|
551 |
-
if context and kwargs:
|
552 |
-
raise ValueError("cannot accept context and kwargs")
|
553 |
-
context = context or DistributionFinder.Context(**kwargs)
|
554 |
-
return itertools.chain.from_iterable(
|
555 |
-
resolver(context) for resolver in cls._discover_resolvers()
|
556 |
-
)
|
557 |
-
|
558 |
-
@staticmethod
|
559 |
-
def at(path):
|
560 |
-
"""Return a Distribution for the indicated metadata path
|
561 |
-
|
562 |
-
:param path: a string or path-like object
|
563 |
-
:return: a concrete Distribution instance for the path
|
564 |
-
"""
|
565 |
-
return PathDistribution(pathlib.Path(path))
|
566 |
-
|
567 |
-
@staticmethod
|
568 |
-
def _discover_resolvers():
|
569 |
-
"""Search the meta_path for resolvers."""
|
570 |
-
declared = (
|
571 |
-
getattr(finder, 'find_distributions', None) for finder in sys.meta_path
|
572 |
-
)
|
573 |
-
return filter(None, declared)
|
574 |
-
|
575 |
-
@property
|
576 |
-
def metadata(self) -> _meta.PackageMetadata:
|
577 |
-
"""Return the parsed metadata for this Distribution.
|
578 |
-
|
579 |
-
The returned object will have keys that name the various bits of
|
580 |
-
metadata. See PEP 566 for details.
|
581 |
-
"""
|
582 |
-
text = (
|
583 |
-
self.read_text('METADATA')
|
584 |
-
or self.read_text('PKG-INFO')
|
585 |
-
# This last clause is here to support old egg-info files. Its
|
586 |
-
# effect is to just end up using the PathDistribution's self._path
|
587 |
-
# (which points to the egg-info file) attribute unchanged.
|
588 |
-
or self.read_text('')
|
589 |
-
)
|
590 |
-
return _adapters.Message(email.message_from_string(text))
|
591 |
-
|
592 |
-
@property
|
593 |
-
def name(self):
|
594 |
-
"""Return the 'Name' metadata for the distribution package."""
|
595 |
-
return self.metadata['Name']
|
596 |
-
|
597 |
-
@property
|
598 |
-
def _normalized_name(self):
|
599 |
-
"""Return a normalized version of the name."""
|
600 |
-
return Prepared.normalize(self.name)
|
601 |
-
|
602 |
-
@property
|
603 |
-
def version(self):
|
604 |
-
"""Return the 'Version' metadata for the distribution package."""
|
605 |
-
return self.metadata['Version']
|
606 |
-
|
607 |
-
@property
|
608 |
-
def entry_points(self):
|
609 |
-
return EntryPoints._from_text_for(self.read_text('entry_points.txt'), self)
|
610 |
-
|
611 |
-
@property
|
612 |
-
def files(self):
|
613 |
-
"""Files in this distribution.
|
614 |
-
|
615 |
-
:return: List of PackagePath for this distribution or None
|
616 |
-
|
617 |
-
Result is `None` if the metadata file that enumerates files
|
618 |
-
(i.e. RECORD for dist-info or SOURCES.txt for egg-info) is
|
619 |
-
missing.
|
620 |
-
Result may be empty if the metadata exists but is empty.
|
621 |
-
"""
|
622 |
-
|
623 |
-
def make_file(name, hash=None, size_str=None):
|
624 |
-
result = PackagePath(name)
|
625 |
-
result.hash = FileHash(hash) if hash else None
|
626 |
-
result.size = int(size_str) if size_str else None
|
627 |
-
result.dist = self
|
628 |
-
return result
|
629 |
-
|
630 |
-
@pass_none
|
631 |
-
def make_files(lines):
|
632 |
-
return list(starmap(make_file, csv.reader(lines)))
|
633 |
-
|
634 |
-
return make_files(self._read_files_distinfo() or self._read_files_egginfo())
|
635 |
-
|
636 |
-
def _read_files_distinfo(self):
|
637 |
-
"""
|
638 |
-
Read the lines of RECORD
|
639 |
-
"""
|
640 |
-
text = self.read_text('RECORD')
|
641 |
-
return text and text.splitlines()
|
642 |
-
|
643 |
-
def _read_files_egginfo(self):
|
644 |
-
"""
|
645 |
-
SOURCES.txt might contain literal commas, so wrap each line
|
646 |
-
in quotes.
|
647 |
-
"""
|
648 |
-
text = self.read_text('SOURCES.txt')
|
649 |
-
return text and map('"{}"'.format, text.splitlines())
|
650 |
-
|
651 |
-
@property
|
652 |
-
def requires(self):
|
653 |
-
"""Generated requirements specified for this Distribution"""
|
654 |
-
reqs = self._read_dist_info_reqs() or self._read_egg_info_reqs()
|
655 |
-
return reqs and list(reqs)
|
656 |
-
|
657 |
-
def _read_dist_info_reqs(self):
|
658 |
-
return self.metadata.get_all('Requires-Dist')
|
659 |
-
|
660 |
-
def _read_egg_info_reqs(self):
|
661 |
-
source = self.read_text('requires.txt')
|
662 |
-
return pass_none(self._deps_from_requires_text)(source)
|
663 |
-
|
664 |
-
@classmethod
|
665 |
-
def _deps_from_requires_text(cls, source):
|
666 |
-
return cls._convert_egg_info_reqs_to_simple_reqs(Sectioned.read(source))
|
667 |
-
|
668 |
-
@staticmethod
|
669 |
-
def _convert_egg_info_reqs_to_simple_reqs(sections):
|
670 |
-
"""
|
671 |
-
Historically, setuptools would solicit and store 'extra'
|
672 |
-
requirements, including those with environment markers,
|
673 |
-
in separate sections. More modern tools expect each
|
674 |
-
dependency to be defined separately, with any relevant
|
675 |
-
extras and environment markers attached directly to that
|
676 |
-
requirement. This method converts the former to the
|
677 |
-
latter. See _test_deps_from_requires_text for an example.
|
678 |
-
"""
|
679 |
-
|
680 |
-
def make_condition(name):
|
681 |
-
return name and f'extra == "{name}"'
|
682 |
-
|
683 |
-
def quoted_marker(section):
|
684 |
-
section = section or ''
|
685 |
-
extra, sep, markers = section.partition(':')
|
686 |
-
if extra and markers:
|
687 |
-
markers = f'({markers})'
|
688 |
-
conditions = list(filter(None, [markers, make_condition(extra)]))
|
689 |
-
return '; ' + ' and '.join(conditions) if conditions else ''
|
690 |
-
|
691 |
-
def url_req_space(req):
|
692 |
-
"""
|
693 |
-
PEP 508 requires a space between the url_spec and the quoted_marker.
|
694 |
-
Ref python/importlib_metadata#357.
|
695 |
-
"""
|
696 |
-
# '@' is uniquely indicative of a url_req.
|
697 |
-
return ' ' * ('@' in req)
|
698 |
-
|
699 |
-
for section in sections:
|
700 |
-
space = url_req_space(section.value)
|
701 |
-
yield section.value + space + quoted_marker(section.name)
|
702 |
-
|
703 |
-
|
704 |
-
class DistributionFinder(MetaPathFinder):
|
705 |
-
"""
|
706 |
-
A MetaPathFinder capable of discovering installed distributions.
|
707 |
-
"""
|
708 |
-
|
709 |
-
class Context:
|
710 |
-
"""
|
711 |
-
Keyword arguments presented by the caller to
|
712 |
-
``distributions()`` or ``Distribution.discover()``
|
713 |
-
to narrow the scope of a search for distributions
|
714 |
-
in all DistributionFinders.
|
715 |
-
|
716 |
-
Each DistributionFinder may expect any parameters
|
717 |
-
and should attempt to honor the canonical
|
718 |
-
parameters defined below when appropriate.
|
719 |
-
"""
|
720 |
-
|
721 |
-
name = None
|
722 |
-
"""
|
723 |
-
Specific name for which a distribution finder should match.
|
724 |
-
A name of ``None`` matches all distributions.
|
725 |
-
"""
|
726 |
-
|
727 |
-
def __init__(self, **kwargs):
|
728 |
-
vars(self).update(kwargs)
|
729 |
-
|
730 |
-
@property
|
731 |
-
def path(self):
|
732 |
-
"""
|
733 |
-
The sequence of directory path that a distribution finder
|
734 |
-
should search.
|
735 |
-
|
736 |
-
Typically refers to Python installed package paths such as
|
737 |
-
"site-packages" directories and defaults to ``sys.path``.
|
738 |
-
"""
|
739 |
-
return vars(self).get('path', sys.path)
|
740 |
-
|
741 |
-
@abc.abstractmethod
|
742 |
-
def find_distributions(self, context=Context()):
|
743 |
-
"""
|
744 |
-
Find distributions.
|
745 |
-
|
746 |
-
Return an iterable of all Distribution instances capable of
|
747 |
-
loading the metadata for packages matching the ``context``,
|
748 |
-
a DistributionFinder.Context instance.
|
749 |
-
"""
|
750 |
-
|
751 |
-
|
752 |
-
class FastPath:
|
753 |
-
"""
|
754 |
-
Micro-optimized class for searching a path for
|
755 |
-
children.
|
756 |
-
|
757 |
-
>>> FastPath('').children()
|
758 |
-
['...']
|
759 |
-
"""
|
760 |
-
|
761 |
-
@functools.lru_cache() # type: ignore
|
762 |
-
def __new__(cls, root):
|
763 |
-
return super().__new__(cls)
|
764 |
-
|
765 |
-
def __init__(self, root):
|
766 |
-
self.root = str(root)
|
767 |
-
|
768 |
-
def joinpath(self, child):
|
769 |
-
return pathlib.Path(self.root, child)
|
770 |
-
|
771 |
-
def children(self):
|
772 |
-
with suppress(Exception):
|
773 |
-
return os.listdir(self.root or '.')
|
774 |
-
with suppress(Exception):
|
775 |
-
return self.zip_children()
|
776 |
-
return []
|
777 |
-
|
778 |
-
def zip_children(self):
|
779 |
-
zip_path = zipp.Path(self.root)
|
780 |
-
names = zip_path.root.namelist()
|
781 |
-
self.joinpath = zip_path.joinpath
|
782 |
-
|
783 |
-
return dict.fromkeys(child.split(posixpath.sep, 1)[0] for child in names)
|
784 |
-
|
785 |
-
def search(self, name):
|
786 |
-
return self.lookup(self.mtime).search(name)
|
787 |
-
|
788 |
-
@property
|
789 |
-
def mtime(self):
|
790 |
-
with suppress(OSError):
|
791 |
-
return os.stat(self.root).st_mtime
|
792 |
-
self.lookup.cache_clear()
|
793 |
-
|
794 |
-
@method_cache
|
795 |
-
def lookup(self, mtime):
|
796 |
-
return Lookup(self)
|
797 |
-
|
798 |
-
|
799 |
-
class Lookup:
|
800 |
-
def __init__(self, path: FastPath):
|
801 |
-
base = os.path.basename(path.root).lower()
|
802 |
-
base_is_egg = base.endswith(".egg")
|
803 |
-
self.infos = FreezableDefaultDict(list)
|
804 |
-
self.eggs = FreezableDefaultDict(list)
|
805 |
-
|
806 |
-
for child in path.children():
|
807 |
-
low = child.lower()
|
808 |
-
if low.endswith((".dist-info", ".egg-info")):
|
809 |
-
# rpartition is faster than splitext and suitable for this purpose.
|
810 |
-
name = low.rpartition(".")[0].partition("-")[0]
|
811 |
-
normalized = Prepared.normalize(name)
|
812 |
-
self.infos[normalized].append(path.joinpath(child))
|
813 |
-
elif base_is_egg and low == "egg-info":
|
814 |
-
name = base.rpartition(".")[0].partition("-")[0]
|
815 |
-
legacy_normalized = Prepared.legacy_normalize(name)
|
816 |
-
self.eggs[legacy_normalized].append(path.joinpath(child))
|
817 |
-
|
818 |
-
self.infos.freeze()
|
819 |
-
self.eggs.freeze()
|
820 |
-
|
821 |
-
def search(self, prepared):
|
822 |
-
infos = (
|
823 |
-
self.infos[prepared.normalized]
|
824 |
-
if prepared
|
825 |
-
else itertools.chain.from_iterable(self.infos.values())
|
826 |
-
)
|
827 |
-
eggs = (
|
828 |
-
self.eggs[prepared.legacy_normalized]
|
829 |
-
if prepared
|
830 |
-
else itertools.chain.from_iterable(self.eggs.values())
|
831 |
-
)
|
832 |
-
return itertools.chain(infos, eggs)
|
833 |
-
|
834 |
-
|
835 |
-
class Prepared:
|
836 |
-
"""
|
837 |
-
A prepared search for metadata on a possibly-named package.
|
838 |
-
"""
|
839 |
-
|
840 |
-
normalized = None
|
841 |
-
legacy_normalized = None
|
842 |
-
|
843 |
-
def __init__(self, name):
|
844 |
-
self.name = name
|
845 |
-
if name is None:
|
846 |
-
return
|
847 |
-
self.normalized = self.normalize(name)
|
848 |
-
self.legacy_normalized = self.legacy_normalize(name)
|
849 |
-
|
850 |
-
@staticmethod
|
851 |
-
def normalize(name):
|
852 |
-
"""
|
853 |
-
PEP 503 normalization plus dashes as underscores.
|
854 |
-
"""
|
855 |
-
return re.sub(r"[-_.]+", "-", name).lower().replace('-', '_')
|
856 |
-
|
857 |
-
@staticmethod
|
858 |
-
def legacy_normalize(name):
|
859 |
-
"""
|
860 |
-
Normalize the package name as found in the convention in
|
861 |
-
older packaging tools versions and specs.
|
862 |
-
"""
|
863 |
-
return name.lower().replace('-', '_')
|
864 |
-
|
865 |
-
def __bool__(self):
|
866 |
-
return bool(self.name)
|
867 |
-
|
868 |
-
|
869 |
-
@install
|
870 |
-
class MetadataPathFinder(NullFinder, DistributionFinder):
|
871 |
-
"""A degenerate finder for distribution packages on the file system.
|
872 |
-
|
873 |
-
This finder supplies only a find_distributions() method for versions
|
874 |
-
of Python that do not have a PathFinder find_distributions().
|
875 |
-
"""
|
876 |
-
|
877 |
-
def find_distributions(self, context=DistributionFinder.Context()):
|
878 |
-
"""
|
879 |
-
Find distributions.
|
880 |
-
|
881 |
-
Return an iterable of all Distribution instances capable of
|
882 |
-
loading the metadata for packages matching ``context.name``
|
883 |
-
(or all names if ``None`` indicated) along the paths in the list
|
884 |
-
of directories ``context.path``.
|
885 |
-
"""
|
886 |
-
found = self._search_paths(context.name, context.path)
|
887 |
-
return map(PathDistribution, found)
|
888 |
-
|
889 |
-
@classmethod
|
890 |
-
def _search_paths(cls, name, paths):
|
891 |
-
"""Find metadata directories in paths heuristically."""
|
892 |
-
prepared = Prepared(name)
|
893 |
-
return itertools.chain.from_iterable(
|
894 |
-
path.search(prepared) for path in map(FastPath, paths)
|
895 |
-
)
|
896 |
-
|
897 |
-
def invalidate_caches(cls):
|
898 |
-
FastPath.__new__.cache_clear()
|
899 |
-
|
900 |
-
|
901 |
-
class PathDistribution(Distribution):
|
902 |
-
def __init__(self, path: SimplePath):
|
903 |
-
"""Construct a distribution.
|
904 |
-
|
905 |
-
:param path: SimplePath indicating the metadata directory.
|
906 |
-
"""
|
907 |
-
self._path = path
|
908 |
-
|
909 |
-
def read_text(self, filename):
|
910 |
-
with suppress(
|
911 |
-
FileNotFoundError,
|
912 |
-
IsADirectoryError,
|
913 |
-
KeyError,
|
914 |
-
NotADirectoryError,
|
915 |
-
PermissionError,
|
916 |
-
):
|
917 |
-
return self._path.joinpath(filename).read_text(encoding='utf-8')
|
918 |
-
|
919 |
-
read_text.__doc__ = Distribution.read_text.__doc__
|
920 |
-
|
921 |
-
def locate_file(self, path):
|
922 |
-
return self._path.parent / path
|
923 |
-
|
924 |
-
@property
|
925 |
-
def _normalized_name(self):
|
926 |
-
"""
|
927 |
-
Performance optimization: where possible, resolve the
|
928 |
-
normalized name from the file system path.
|
929 |
-
"""
|
930 |
-
stem = os.path.basename(str(self._path))
|
931 |
-
return self._name_from_stem(stem) or super()._normalized_name
|
932 |
-
|
933 |
-
def _name_from_stem(self, stem):
|
934 |
-
name, ext = os.path.splitext(stem)
|
935 |
-
if ext not in ('.dist-info', '.egg-info'):
|
936 |
-
return
|
937 |
-
name, sep, rest = stem.partition('-')
|
938 |
-
return name
|
939 |
-
|
940 |
-
|
941 |
-
def distribution(distribution_name):
|
942 |
-
"""Get the ``Distribution`` instance for the named package.
|
943 |
-
|
944 |
-
:param distribution_name: The name of the distribution package as a string.
|
945 |
-
:return: A ``Distribution`` instance (or subclass thereof).
|
946 |
-
"""
|
947 |
-
return Distribution.from_name(distribution_name)
|
948 |
-
|
949 |
-
|
950 |
-
def distributions(**kwargs):
|
951 |
-
"""Get all ``Distribution`` instances in the current environment.
|
952 |
-
|
953 |
-
:return: An iterable of ``Distribution`` instances.
|
954 |
-
"""
|
955 |
-
return Distribution.discover(**kwargs)
|
956 |
-
|
957 |
-
|
958 |
-
def metadata(distribution_name) -> _meta.PackageMetadata:
|
959 |
-
"""Get the metadata for the named package.
|
960 |
-
|
961 |
-
:param distribution_name: The name of the distribution package to query.
|
962 |
-
:return: A PackageMetadata containing the parsed metadata.
|
963 |
-
"""
|
964 |
-
return Distribution.from_name(distribution_name).metadata
|
965 |
-
|
966 |
-
|
967 |
-
def version(distribution_name):
|
968 |
-
"""Get the version string for the named package.
|
969 |
-
|
970 |
-
:param distribution_name: The name of the distribution package to query.
|
971 |
-
:return: The version string for the package as defined in the package's
|
972 |
-
"Version" metadata key.
|
973 |
-
"""
|
974 |
-
return distribution(distribution_name).version
|
975 |
-
|
976 |
-
|
977 |
-
def entry_points(**params) -> Union[EntryPoints, SelectableGroups]:
|
978 |
-
"""Return EntryPoint objects for all installed packages.
|
979 |
-
|
980 |
-
Pass selection parameters (group or name) to filter the
|
981 |
-
result to entry points matching those properties (see
|
982 |
-
EntryPoints.select()).
|
983 |
-
|
984 |
-
For compatibility, returns ``SelectableGroups`` object unless
|
985 |
-
selection parameters are supplied. In the future, this function
|
986 |
-
will return ``EntryPoints`` instead of ``SelectableGroups``
|
987 |
-
even when no selection parameters are supplied.
|
988 |
-
|
989 |
-
For maximum future compatibility, pass selection parameters
|
990 |
-
or invoke ``.select`` with parameters on the result.
|
991 |
-
|
992 |
-
:return: EntryPoints or SelectableGroups for all installed packages.
|
993 |
-
"""
|
994 |
-
norm_name = operator.attrgetter('_normalized_name')
|
995 |
-
unique = functools.partial(unique_everseen, key=norm_name)
|
996 |
-
eps = itertools.chain.from_iterable(
|
997 |
-
dist.entry_points for dist in unique(distributions())
|
998 |
-
)
|
999 |
-
return SelectableGroups.load(eps).select(**params)
|
1000 |
-
|
1001 |
-
|
1002 |
-
def files(distribution_name):
|
1003 |
-
"""Return a list of files for the named package.
|
1004 |
-
|
1005 |
-
:param distribution_name: The name of the distribution package to query.
|
1006 |
-
:return: List of files composing the distribution.
|
1007 |
-
"""
|
1008 |
-
return distribution(distribution_name).files
|
1009 |
-
|
1010 |
-
|
1011 |
-
def requires(distribution_name):
|
1012 |
-
"""
|
1013 |
-
Return a list of requirements for the named package.
|
1014 |
-
|
1015 |
-
:return: An iterator of requirements, suitable for
|
1016 |
-
packaging.requirement.Requirement.
|
1017 |
-
"""
|
1018 |
-
return distribution(distribution_name).requires
|
1019 |
-
|
1020 |
-
|
1021 |
-
def packages_distributions() -> Mapping[str, List[str]]:
|
1022 |
-
"""
|
1023 |
-
Return a mapping of top-level packages to their
|
1024 |
-
distributions.
|
1025 |
-
|
1026 |
-
>>> import collections.abc
|
1027 |
-
>>> pkgs = packages_distributions()
|
1028 |
-
>>> all(isinstance(dist, collections.abc.Sequence) for dist in pkgs.values())
|
1029 |
-
True
|
1030 |
-
"""
|
1031 |
-
pkg_to_dist = collections.defaultdict(list)
|
1032 |
-
for dist in distributions():
|
1033 |
-
for pkg in _top_level_declared(dist) or _top_level_inferred(dist):
|
1034 |
-
pkg_to_dist[pkg].append(dist.metadata['Name'])
|
1035 |
-
return dict(pkg_to_dist)
|
1036 |
-
|
1037 |
-
|
1038 |
-
def _top_level_declared(dist):
|
1039 |
-
return (dist.read_text('top_level.txt') or '').split()
|
1040 |
-
|
1041 |
-
|
1042 |
-
def _top_level_inferred(dist):
|
1043 |
-
return {
|
1044 |
-
f.parts[0] if len(f.parts) > 1 else f.with_suffix('').name
|
1045 |
-
for f in always_iterable(dist.files)
|
1046 |
-
if f.suffix == ".py"
|
1047 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BilalSardar/Voice-Cloning/app.py
DELETED
@@ -1,165 +0,0 @@
|
|
1 |
-
from turtle import title
|
2 |
-
import gradio as gr
|
3 |
-
|
4 |
-
import git
|
5 |
-
import os
|
6 |
-
os.system('git clone https://github.com/Edresson/Coqui-TTS -b multilingual-torchaudio-SE TTS')
|
7 |
-
os.system('pip install -q -e TTS/')
|
8 |
-
os.system('pip install -q torchaudio==0.9.0')
|
9 |
-
|
10 |
-
import sys
|
11 |
-
TTS_PATH = "TTS/"
|
12 |
-
|
13 |
-
# add libraries into environment
|
14 |
-
sys.path.append(TTS_PATH) # set this if TTS is not installed globally
|
15 |
-
|
16 |
-
import os
|
17 |
-
import string
|
18 |
-
import time
|
19 |
-
import argparse
|
20 |
-
import json
|
21 |
-
|
22 |
-
import numpy as np
|
23 |
-
import IPython
|
24 |
-
from IPython.display import Audio
|
25 |
-
|
26 |
-
|
27 |
-
import torch
|
28 |
-
|
29 |
-
from TTS.tts.utils.synthesis import synthesis
|
30 |
-
#from TTS.tts.utils.text.symbols import make_symbols, phonemes, symbols
|
31 |
-
try:
|
32 |
-
from TTS.utils.audio import AudioProcessor
|
33 |
-
except:
|
34 |
-
from TTS.utils.audio import AudioProcessor
|
35 |
-
|
36 |
-
|
37 |
-
from TTS.tts.models import setup_model
|
38 |
-
from TTS.config import load_config
|
39 |
-
from TTS.tts.models.vits import *
|
40 |
-
|
41 |
-
OUT_PATH = 'out/'
|
42 |
-
|
43 |
-
# create output path
|
44 |
-
os.makedirs(OUT_PATH, exist_ok=True)
|
45 |
-
|
46 |
-
# model vars
|
47 |
-
MODEL_PATH = '/home/user/app/best_model_latest.pth.tar'
|
48 |
-
CONFIG_PATH = '/home/user/app/config.json'
|
49 |
-
TTS_LANGUAGES = "/home/user/app/language_ids.json"
|
50 |
-
TTS_SPEAKERS = "/home/user/app/speakers.json"
|
51 |
-
USE_CUDA = torch.cuda.is_available()
|
52 |
-
|
53 |
-
# load the config
|
54 |
-
C = load_config(CONFIG_PATH)
|
55 |
-
|
56 |
-
|
57 |
-
# load the audio processor
|
58 |
-
ap = AudioProcessor(**C.audio)
|
59 |
-
|
60 |
-
speaker_embedding = None
|
61 |
-
|
62 |
-
C.model_args['d_vector_file'] = TTS_SPEAKERS
|
63 |
-
C.model_args['use_speaker_encoder_as_loss'] = False
|
64 |
-
|
65 |
-
model = setup_model(C)
|
66 |
-
model.language_manager.set_language_ids_from_file(TTS_LANGUAGES)
|
67 |
-
# print(model.language_manager.num_languages, model.embedded_language_dim)
|
68 |
-
# print(model.emb_l)
|
69 |
-
cp = torch.load(MODEL_PATH, map_location=torch.device('cpu'))
|
70 |
-
# remove speaker encoder
|
71 |
-
model_weights = cp['model'].copy()
|
72 |
-
for key in list(model_weights.keys()):
|
73 |
-
if "speaker_encoder" in key:
|
74 |
-
del model_weights[key]
|
75 |
-
|
76 |
-
model.load_state_dict(model_weights)
|
77 |
-
|
78 |
-
|
79 |
-
model.eval()
|
80 |
-
|
81 |
-
if USE_CUDA:
|
82 |
-
model = model.cuda()
|
83 |
-
|
84 |
-
# synthesize voice
|
85 |
-
use_griffin_lim = False
|
86 |
-
|
87 |
-
os.system('pip install -q pydub ffmpeg-normalize')
|
88 |
-
|
89 |
-
CONFIG_SE_PATH = "config_se.json"
|
90 |
-
CHECKPOINT_SE_PATH = "SE_checkpoint.pth.tar"
|
91 |
-
|
92 |
-
from TTS.tts.utils.speakers import SpeakerManager
|
93 |
-
from pydub import AudioSegment
|
94 |
-
import librosa
|
95 |
-
|
96 |
-
SE_speaker_manager = SpeakerManager(encoder_model_path=CHECKPOINT_SE_PATH, encoder_config_path=CONFIG_SE_PATH, use_cuda=USE_CUDA)
|
97 |
-
|
98 |
-
def compute_spec(ref_file):
|
99 |
-
y, sr = librosa.load(ref_file, sr=ap.sample_rate)
|
100 |
-
spec = ap.spectrogram(y)
|
101 |
-
spec = torch.FloatTensor(spec).unsqueeze(0)
|
102 |
-
return spec
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
def greet(Text,Voicetoclone,VoiceMicrophone):
|
107 |
-
text= "%s" % (Text)
|
108 |
-
if Voicetoclone is not None:
|
109 |
-
reference_files= "%s" % (Voicetoclone)
|
110 |
-
print("path url")
|
111 |
-
print(Voicetoclone)
|
112 |
-
sample= str(Voicetoclone)
|
113 |
-
else:
|
114 |
-
reference_files= "%s" % (VoiceMicrophone)
|
115 |
-
print("path url")
|
116 |
-
print(VoiceMicrophone)
|
117 |
-
sample= str(VoiceMicrophone)
|
118 |
-
size= len(reference_files)*sys.getsizeof(reference_files)
|
119 |
-
size2= size / 1000000
|
120 |
-
if (size2 > 0.012) or len(text)>2000:
|
121 |
-
message="File is greater than 30mb or Text inserted is longer than 2000 characters. Please re-try with smaller sizes."
|
122 |
-
print(message)
|
123 |
-
raise SystemExit("File is greater than 30mb. Please re-try or Text inserted is longer than 2000 characters. Please re-try with smaller sizes.")
|
124 |
-
else:
|
125 |
-
os.system('ffmpeg-normalize $sample -nt rms -t=-27 -o $sample -ar 16000 -f')
|
126 |
-
reference_emb = SE_speaker_manager.compute_d_vector_from_clip(reference_files)
|
127 |
-
model.length_scale = 1 # scaler for the duration predictor. The larger it is, the slower the speech.
|
128 |
-
model.inference_noise_scale = 0.3 # defines the noise variance applied to the random z vector at inference.
|
129 |
-
model.inference_noise_scale_dp = 0.3 # defines the noise variance applied to the duration predictor z vector at inference.
|
130 |
-
text = text
|
131 |
-
model.language_manager.language_id_mapping
|
132 |
-
language_id = 0
|
133 |
-
|
134 |
-
print(" > text: {}".format(text))
|
135 |
-
wav, alignment, _, _ = synthesis(
|
136 |
-
model,
|
137 |
-
text,
|
138 |
-
C,
|
139 |
-
"cuda" in str(next(model.parameters()).device),
|
140 |
-
ap,
|
141 |
-
speaker_id=None,
|
142 |
-
d_vector=reference_emb,
|
143 |
-
style_wav=None,
|
144 |
-
language_id=language_id,
|
145 |
-
enable_eos_bos_chars=C.enable_eos_bos_chars,
|
146 |
-
use_griffin_lim=True,
|
147 |
-
do_trim_silence=False,
|
148 |
-
).values()
|
149 |
-
print("Generated Audio")
|
150 |
-
IPython.display.display(Audio(wav, rate=ap.sample_rate))
|
151 |
-
#file_name = text.replace(" ", "_")
|
152 |
-
#file_name = file_name.translate(str.maketrans('', '', string.punctuation.replace('_', ''))) + '.wav'
|
153 |
-
file_name="Audio.wav"
|
154 |
-
out_path = os.path.join(OUT_PATH, file_name)
|
155 |
-
print(" > Saving output to {}".format(out_path))
|
156 |
-
ap.save_wav(wav, out_path)
|
157 |
-
return out_path
|
158 |
-
|
159 |
-
demo = gr.Interface(
|
160 |
-
fn=greet,
|
161 |
-
inputs=[gr.inputs.Textbox(label='What would you like the voice to say? (max. 2000 characters per request)'),gr.Audio(type="filepath", source="upload",label='Please upload a voice to clone (max. 30mb)'),gr.Audio(source="microphone", type="filepath", streaming=True)],
|
162 |
-
outputs="audio",
|
163 |
-
title="Bilal's Voice Cloning Tool"
|
164 |
-
)
|
165 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/pybind11/include/pybind11/eigen.h
DELETED
@@ -1,607 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
pybind11/eigen.h: Transparent conversion for dense and sparse Eigen matrices
|
3 |
-
|
4 |
-
Copyright (c) 2016 Wenzel Jakob <[email protected]>
|
5 |
-
|
6 |
-
All rights reserved. Use of this source code is governed by a
|
7 |
-
BSD-style license that can be found in the LICENSE file.
|
8 |
-
*/
|
9 |
-
|
10 |
-
#pragma once
|
11 |
-
|
12 |
-
#include "numpy.h"
|
13 |
-
|
14 |
-
#if defined(__INTEL_COMPILER)
|
15 |
-
# pragma warning(disable: 1682) // implicit conversion of a 64-bit integral type to a smaller integral type (potential portability problem)
|
16 |
-
#elif defined(__GNUG__) || defined(__clang__)
|
17 |
-
# pragma GCC diagnostic push
|
18 |
-
# pragma GCC diagnostic ignored "-Wconversion"
|
19 |
-
# pragma GCC diagnostic ignored "-Wdeprecated-declarations"
|
20 |
-
# ifdef __clang__
|
21 |
-
// Eigen generates a bunch of implicit-copy-constructor-is-deprecated warnings with -Wdeprecated
|
22 |
-
// under Clang, so disable that warning here:
|
23 |
-
# pragma GCC diagnostic ignored "-Wdeprecated"
|
24 |
-
# endif
|
25 |
-
# if __GNUC__ >= 7
|
26 |
-
# pragma GCC diagnostic ignored "-Wint-in-bool-context"
|
27 |
-
# endif
|
28 |
-
#endif
|
29 |
-
|
30 |
-
#if defined(_MSC_VER)
|
31 |
-
# pragma warning(push)
|
32 |
-
# pragma warning(disable: 4127) // warning C4127: Conditional expression is constant
|
33 |
-
# pragma warning(disable: 4996) // warning C4996: std::unary_negate is deprecated in C++17
|
34 |
-
#endif
|
35 |
-
|
36 |
-
#include <Eigen/Core>
|
37 |
-
#include <Eigen/SparseCore>
|
38 |
-
|
39 |
-
// Eigen prior to 3.2.7 doesn't have proper move constructors--but worse, some classes get implicit
|
40 |
-
// move constructors that break things. We could detect this an explicitly copy, but an extra copy
|
41 |
-
// of matrices seems highly undesirable.
|
42 |
-
static_assert(EIGEN_VERSION_AT_LEAST(3,2,7), "Eigen support in pybind11 requires Eigen >= 3.2.7");
|
43 |
-
|
44 |
-
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
|
45 |
-
|
46 |
-
// Provide a convenience alias for easier pass-by-ref usage with fully dynamic strides:
|
47 |
-
using EigenDStride = Eigen::Stride<Eigen::Dynamic, Eigen::Dynamic>;
|
48 |
-
template <typename MatrixType> using EigenDRef = Eigen::Ref<MatrixType, 0, EigenDStride>;
|
49 |
-
template <typename MatrixType> using EigenDMap = Eigen::Map<MatrixType, 0, EigenDStride>;
|
50 |
-
|
51 |
-
PYBIND11_NAMESPACE_BEGIN(detail)
|
52 |
-
|
53 |
-
#if EIGEN_VERSION_AT_LEAST(3,3,0)
|
54 |
-
using EigenIndex = Eigen::Index;
|
55 |
-
#else
|
56 |
-
using EigenIndex = EIGEN_DEFAULT_DENSE_INDEX_TYPE;
|
57 |
-
#endif
|
58 |
-
|
59 |
-
// Matches Eigen::Map, Eigen::Ref, blocks, etc:
|
60 |
-
template <typename T> using is_eigen_dense_map = all_of<is_template_base_of<Eigen::DenseBase, T>, std::is_base_of<Eigen::MapBase<T, Eigen::ReadOnlyAccessors>, T>>;
|
61 |
-
template <typename T> using is_eigen_mutable_map = std::is_base_of<Eigen::MapBase<T, Eigen::WriteAccessors>, T>;
|
62 |
-
template <typename T> using is_eigen_dense_plain = all_of<negation<is_eigen_dense_map<T>>, is_template_base_of<Eigen::PlainObjectBase, T>>;
|
63 |
-
template <typename T> using is_eigen_sparse = is_template_base_of<Eigen::SparseMatrixBase, T>;
|
64 |
-
// Test for objects inheriting from EigenBase<Derived> that aren't captured by the above. This
|
65 |
-
// basically covers anything that can be assigned to a dense matrix but that don't have a typical
|
66 |
-
// matrix data layout that can be copied from their .data(). For example, DiagonalMatrix and
|
67 |
-
// SelfAdjointView fall into this category.
|
68 |
-
template <typename T> using is_eigen_other = all_of<
|
69 |
-
is_template_base_of<Eigen::EigenBase, T>,
|
70 |
-
negation<any_of<is_eigen_dense_map<T>, is_eigen_dense_plain<T>, is_eigen_sparse<T>>>
|
71 |
-
>;
|
72 |
-
|
73 |
-
// Captures numpy/eigen conformability status (returned by EigenProps::conformable()):
|
74 |
-
template <bool EigenRowMajor> struct EigenConformable {
|
75 |
-
bool conformable = false;
|
76 |
-
EigenIndex rows = 0, cols = 0;
|
77 |
-
EigenDStride stride{0, 0}; // Only valid if negativestrides is false!
|
78 |
-
bool negativestrides = false; // If true, do not use stride!
|
79 |
-
|
80 |
-
EigenConformable(bool fits = false) : conformable{fits} {}
|
81 |
-
// Matrix type:
|
82 |
-
EigenConformable(EigenIndex r, EigenIndex c,
|
83 |
-
EigenIndex rstride, EigenIndex cstride) :
|
84 |
-
conformable{true}, rows{r}, cols{c} {
|
85 |
-
// TODO: when Eigen bug #747 is fixed, remove the tests for non-negativity. http://eigen.tuxfamily.org/bz/show_bug.cgi?id=747
|
86 |
-
if (rstride < 0 || cstride < 0) {
|
87 |
-
negativestrides = true;
|
88 |
-
} else {
|
89 |
-
stride = {EigenRowMajor ? rstride : cstride /* outer stride */,
|
90 |
-
EigenRowMajor ? cstride : rstride /* inner stride */ };
|
91 |
-
}
|
92 |
-
}
|
93 |
-
// Vector type:
|
94 |
-
EigenConformable(EigenIndex r, EigenIndex c, EigenIndex stride)
|
95 |
-
: EigenConformable(r, c, r == 1 ? c*stride : stride, c == 1 ? r : r*stride) {}
|
96 |
-
|
97 |
-
template <typename props> bool stride_compatible() const {
|
98 |
-
// To have compatible strides, we need (on both dimensions) one of fully dynamic strides,
|
99 |
-
// matching strides, or a dimension size of 1 (in which case the stride value is irrelevant)
|
100 |
-
return
|
101 |
-
!negativestrides &&
|
102 |
-
(props::inner_stride == Eigen::Dynamic || props::inner_stride == stride.inner() ||
|
103 |
-
(EigenRowMajor ? cols : rows) == 1) &&
|
104 |
-
(props::outer_stride == Eigen::Dynamic || props::outer_stride == stride.outer() ||
|
105 |
-
(EigenRowMajor ? rows : cols) == 1);
|
106 |
-
}
|
107 |
-
operator bool() const { return conformable; }
|
108 |
-
};
|
109 |
-
|
110 |
-
template <typename Type> struct eigen_extract_stride { using type = Type; };
|
111 |
-
template <typename PlainObjectType, int MapOptions, typename StrideType>
|
112 |
-
struct eigen_extract_stride<Eigen::Map<PlainObjectType, MapOptions, StrideType>> { using type = StrideType; };
|
113 |
-
template <typename PlainObjectType, int Options, typename StrideType>
|
114 |
-
struct eigen_extract_stride<Eigen::Ref<PlainObjectType, Options, StrideType>> { using type = StrideType; };
|
115 |
-
|
116 |
-
// Helper struct for extracting information from an Eigen type
|
117 |
-
template <typename Type_> struct EigenProps {
|
118 |
-
using Type = Type_;
|
119 |
-
using Scalar = typename Type::Scalar;
|
120 |
-
using StrideType = typename eigen_extract_stride<Type>::type;
|
121 |
-
static constexpr EigenIndex
|
122 |
-
rows = Type::RowsAtCompileTime,
|
123 |
-
cols = Type::ColsAtCompileTime,
|
124 |
-
size = Type::SizeAtCompileTime;
|
125 |
-
static constexpr bool
|
126 |
-
row_major = Type::IsRowMajor,
|
127 |
-
vector = Type::IsVectorAtCompileTime, // At least one dimension has fixed size 1
|
128 |
-
fixed_rows = rows != Eigen::Dynamic,
|
129 |
-
fixed_cols = cols != Eigen::Dynamic,
|
130 |
-
fixed = size != Eigen::Dynamic, // Fully-fixed size
|
131 |
-
dynamic = !fixed_rows && !fixed_cols; // Fully-dynamic size
|
132 |
-
|
133 |
-
template <EigenIndex i, EigenIndex ifzero> using if_zero = std::integral_constant<EigenIndex, i == 0 ? ifzero : i>;
|
134 |
-
static constexpr EigenIndex inner_stride = if_zero<StrideType::InnerStrideAtCompileTime, 1>::value,
|
135 |
-
outer_stride = if_zero<StrideType::OuterStrideAtCompileTime,
|
136 |
-
vector ? size : row_major ? cols : rows>::value;
|
137 |
-
static constexpr bool dynamic_stride = inner_stride == Eigen::Dynamic && outer_stride == Eigen::Dynamic;
|
138 |
-
static constexpr bool requires_row_major = !dynamic_stride && !vector && (row_major ? inner_stride : outer_stride) == 1;
|
139 |
-
static constexpr bool requires_col_major = !dynamic_stride && !vector && (row_major ? outer_stride : inner_stride) == 1;
|
140 |
-
|
141 |
-
// Takes an input array and determines whether we can make it fit into the Eigen type. If
|
142 |
-
// the array is a vector, we attempt to fit it into either an Eigen 1xN or Nx1 vector
|
143 |
-
// (preferring the latter if it will fit in either, i.e. for a fully dynamic matrix type).
|
144 |
-
static EigenConformable<row_major> conformable(const array &a) {
|
145 |
-
const auto dims = a.ndim();
|
146 |
-
if (dims < 1 || dims > 2)
|
147 |
-
return false;
|
148 |
-
|
149 |
-
if (dims == 2) { // Matrix type: require exact match (or dynamic)
|
150 |
-
|
151 |
-
EigenIndex
|
152 |
-
np_rows = a.shape(0),
|
153 |
-
np_cols = a.shape(1),
|
154 |
-
np_rstride = a.strides(0) / static_cast<ssize_t>(sizeof(Scalar)),
|
155 |
-
np_cstride = a.strides(1) / static_cast<ssize_t>(sizeof(Scalar));
|
156 |
-
if ((fixed_rows && np_rows != rows) || (fixed_cols && np_cols != cols))
|
157 |
-
return false;
|
158 |
-
|
159 |
-
return {np_rows, np_cols, np_rstride, np_cstride};
|
160 |
-
}
|
161 |
-
|
162 |
-
// Otherwise we're storing an n-vector. Only one of the strides will be used, but whichever
|
163 |
-
// is used, we want the (single) numpy stride value.
|
164 |
-
const EigenIndex n = a.shape(0),
|
165 |
-
stride = a.strides(0) / static_cast<ssize_t>(sizeof(Scalar));
|
166 |
-
|
167 |
-
if (vector) { // Eigen type is a compile-time vector
|
168 |
-
if (fixed && size != n)
|
169 |
-
return false; // Vector size mismatch
|
170 |
-
return {rows == 1 ? 1 : n, cols == 1 ? 1 : n, stride};
|
171 |
-
}
|
172 |
-
else if (fixed) {
|
173 |
-
// The type has a fixed size, but is not a vector: abort
|
174 |
-
return false;
|
175 |
-
}
|
176 |
-
else if (fixed_cols) {
|
177 |
-
// Since this isn't a vector, cols must be != 1. We allow this only if it exactly
|
178 |
-
// equals the number of elements (rows is Dynamic, and so 1 row is allowed).
|
179 |
-
if (cols != n) return false;
|
180 |
-
return {1, n, stride};
|
181 |
-
}
|
182 |
-
else {
|
183 |
-
// Otherwise it's either fully dynamic, or column dynamic; both become a column vector
|
184 |
-
if (fixed_rows && rows != n) return false;
|
185 |
-
return {n, 1, stride};
|
186 |
-
}
|
187 |
-
}
|
188 |
-
|
189 |
-
static constexpr bool show_writeable = is_eigen_dense_map<Type>::value && is_eigen_mutable_map<Type>::value;
|
190 |
-
static constexpr bool show_order = is_eigen_dense_map<Type>::value;
|
191 |
-
static constexpr bool show_c_contiguous = show_order && requires_row_major;
|
192 |
-
static constexpr bool show_f_contiguous = !show_c_contiguous && show_order && requires_col_major;
|
193 |
-
|
194 |
-
static constexpr auto descriptor =
|
195 |
-
_("numpy.ndarray[") + npy_format_descriptor<Scalar>::name +
|
196 |
-
_("[") + _<fixed_rows>(_<(size_t) rows>(), _("m")) +
|
197 |
-
_(", ") + _<fixed_cols>(_<(size_t) cols>(), _("n")) +
|
198 |
-
_("]") +
|
199 |
-
// For a reference type (e.g. Ref<MatrixXd>) we have other constraints that might need to be
|
200 |
-
// satisfied: writeable=True (for a mutable reference), and, depending on the map's stride
|
201 |
-
// options, possibly f_contiguous or c_contiguous. We include them in the descriptor output
|
202 |
-
// to provide some hint as to why a TypeError is occurring (otherwise it can be confusing to
|
203 |
-
// see that a function accepts a 'numpy.ndarray[float64[3,2]]' and an error message that you
|
204 |
-
// *gave* a numpy.ndarray of the right type and dimensions.
|
205 |
-
_<show_writeable>(", flags.writeable", "") +
|
206 |
-
_<show_c_contiguous>(", flags.c_contiguous", "") +
|
207 |
-
_<show_f_contiguous>(", flags.f_contiguous", "") +
|
208 |
-
_("]");
|
209 |
-
};
|
210 |
-
|
211 |
-
// Casts an Eigen type to numpy array. If given a base, the numpy array references the src data,
|
212 |
-
// otherwise it'll make a copy. writeable lets you turn off the writeable flag for the array.
|
213 |
-
template <typename props> handle eigen_array_cast(typename props::Type const &src, handle base = handle(), bool writeable = true) {
|
214 |
-
constexpr ssize_t elem_size = sizeof(typename props::Scalar);
|
215 |
-
array a;
|
216 |
-
if (props::vector)
|
217 |
-
a = array({ src.size() }, { elem_size * src.innerStride() }, src.data(), base);
|
218 |
-
else
|
219 |
-
a = array({ src.rows(), src.cols() }, { elem_size * src.rowStride(), elem_size * src.colStride() },
|
220 |
-
src.data(), base);
|
221 |
-
|
222 |
-
if (!writeable)
|
223 |
-
array_proxy(a.ptr())->flags &= ~detail::npy_api::NPY_ARRAY_WRITEABLE_;
|
224 |
-
|
225 |
-
return a.release();
|
226 |
-
}
|
227 |
-
|
228 |
-
// Takes an lvalue ref to some Eigen type and a (python) base object, creating a numpy array that
|
229 |
-
// reference the Eigen object's data with `base` as the python-registered base class (if omitted,
|
230 |
-
// the base will be set to None, and lifetime management is up to the caller). The numpy array is
|
231 |
-
// non-writeable if the given type is const.
|
232 |
-
template <typename props, typename Type>
|
233 |
-
handle eigen_ref_array(Type &src, handle parent = none()) {
|
234 |
-
// none here is to get past array's should-we-copy detection, which currently always
|
235 |
-
// copies when there is no base. Setting the base to None should be harmless.
|
236 |
-
return eigen_array_cast<props>(src, parent, !std::is_const<Type>::value);
|
237 |
-
}
|
238 |
-
|
239 |
-
// Takes a pointer to some dense, plain Eigen type, builds a capsule around it, then returns a numpy
|
240 |
-
// array that references the encapsulated data with a python-side reference to the capsule to tie
|
241 |
-
// its destruction to that of any dependent python objects. Const-ness is determined by whether or
|
242 |
-
// not the Type of the pointer given is const.
|
243 |
-
template <typename props, typename Type, typename = enable_if_t<is_eigen_dense_plain<Type>::value>>
|
244 |
-
handle eigen_encapsulate(Type *src) {
|
245 |
-
capsule base(src, [](void *o) { delete static_cast<Type *>(o); });
|
246 |
-
return eigen_ref_array<props>(*src, base);
|
247 |
-
}
|
248 |
-
|
249 |
-
// Type caster for regular, dense matrix types (e.g. MatrixXd), but not maps/refs/etc. of dense
|
250 |
-
// types.
|
251 |
-
template<typename Type>
|
252 |
-
struct type_caster<Type, enable_if_t<is_eigen_dense_plain<Type>::value>> {
|
253 |
-
using Scalar = typename Type::Scalar;
|
254 |
-
using props = EigenProps<Type>;
|
255 |
-
|
256 |
-
bool load(handle src, bool convert) {
|
257 |
-
// If we're in no-convert mode, only load if given an array of the correct type
|
258 |
-
if (!convert && !isinstance<array_t<Scalar>>(src))
|
259 |
-
return false;
|
260 |
-
|
261 |
-
// Coerce into an array, but don't do type conversion yet; the copy below handles it.
|
262 |
-
auto buf = array::ensure(src);
|
263 |
-
|
264 |
-
if (!buf)
|
265 |
-
return false;
|
266 |
-
|
267 |
-
auto dims = buf.ndim();
|
268 |
-
if (dims < 1 || dims > 2)
|
269 |
-
return false;
|
270 |
-
|
271 |
-
auto fits = props::conformable(buf);
|
272 |
-
if (!fits)
|
273 |
-
return false;
|
274 |
-
|
275 |
-
// Allocate the new type, then build a numpy reference into it
|
276 |
-
value = Type(fits.rows, fits.cols);
|
277 |
-
auto ref = reinterpret_steal<array>(eigen_ref_array<props>(value));
|
278 |
-
if (dims == 1) ref = ref.squeeze();
|
279 |
-
else if (ref.ndim() == 1) buf = buf.squeeze();
|
280 |
-
|
281 |
-
int result = detail::npy_api::get().PyArray_CopyInto_(ref.ptr(), buf.ptr());
|
282 |
-
|
283 |
-
if (result < 0) { // Copy failed!
|
284 |
-
PyErr_Clear();
|
285 |
-
return false;
|
286 |
-
}
|
287 |
-
|
288 |
-
return true;
|
289 |
-
}
|
290 |
-
|
291 |
-
private:
|
292 |
-
|
293 |
-
// Cast implementation
|
294 |
-
template <typename CType>
|
295 |
-
static handle cast_impl(CType *src, return_value_policy policy, handle parent) {
|
296 |
-
switch (policy) {
|
297 |
-
case return_value_policy::take_ownership:
|
298 |
-
case return_value_policy::automatic:
|
299 |
-
return eigen_encapsulate<props>(src);
|
300 |
-
case return_value_policy::move:
|
301 |
-
return eigen_encapsulate<props>(new CType(std::move(*src)));
|
302 |
-
case return_value_policy::copy:
|
303 |
-
return eigen_array_cast<props>(*src);
|
304 |
-
case return_value_policy::reference:
|
305 |
-
case return_value_policy::automatic_reference:
|
306 |
-
return eigen_ref_array<props>(*src);
|
307 |
-
case return_value_policy::reference_internal:
|
308 |
-
return eigen_ref_array<props>(*src, parent);
|
309 |
-
default:
|
310 |
-
throw cast_error("unhandled return_value_policy: should not happen!");
|
311 |
-
};
|
312 |
-
}
|
313 |
-
|
314 |
-
public:
|
315 |
-
|
316 |
-
// Normal returned non-reference, non-const value:
|
317 |
-
static handle cast(Type &&src, return_value_policy /* policy */, handle parent) {
|
318 |
-
return cast_impl(&src, return_value_policy::move, parent);
|
319 |
-
}
|
320 |
-
// If you return a non-reference const, we mark the numpy array readonly:
|
321 |
-
static handle cast(const Type &&src, return_value_policy /* policy */, handle parent) {
|
322 |
-
return cast_impl(&src, return_value_policy::move, parent);
|
323 |
-
}
|
324 |
-
// lvalue reference return; default (automatic) becomes copy
|
325 |
-
static handle cast(Type &src, return_value_policy policy, handle parent) {
|
326 |
-
if (policy == return_value_policy::automatic || policy == return_value_policy::automatic_reference)
|
327 |
-
policy = return_value_policy::copy;
|
328 |
-
return cast_impl(&src, policy, parent);
|
329 |
-
}
|
330 |
-
// const lvalue reference return; default (automatic) becomes copy
|
331 |
-
static handle cast(const Type &src, return_value_policy policy, handle parent) {
|
332 |
-
if (policy == return_value_policy::automatic || policy == return_value_policy::automatic_reference)
|
333 |
-
policy = return_value_policy::copy;
|
334 |
-
return cast(&src, policy, parent);
|
335 |
-
}
|
336 |
-
// non-const pointer return
|
337 |
-
static handle cast(Type *src, return_value_policy policy, handle parent) {
|
338 |
-
return cast_impl(src, policy, parent);
|
339 |
-
}
|
340 |
-
// const pointer return
|
341 |
-
static handle cast(const Type *src, return_value_policy policy, handle parent) {
|
342 |
-
return cast_impl(src, policy, parent);
|
343 |
-
}
|
344 |
-
|
345 |
-
static constexpr auto name = props::descriptor;
|
346 |
-
|
347 |
-
operator Type*() { return &value; }
|
348 |
-
operator Type&() { return value; }
|
349 |
-
operator Type&&() && { return std::move(value); }
|
350 |
-
template <typename T> using cast_op_type = movable_cast_op_type<T>;
|
351 |
-
|
352 |
-
private:
|
353 |
-
Type value;
|
354 |
-
};
|
355 |
-
|
356 |
-
// Base class for casting reference/map/block/etc. objects back to python.
|
357 |
-
template <typename MapType> struct eigen_map_caster {
|
358 |
-
private:
|
359 |
-
using props = EigenProps<MapType>;
|
360 |
-
|
361 |
-
public:
|
362 |
-
|
363 |
-
// Directly referencing a ref/map's data is a bit dangerous (whatever the map/ref points to has
|
364 |
-
// to stay around), but we'll allow it under the assumption that you know what you're doing (and
|
365 |
-
// have an appropriate keep_alive in place). We return a numpy array pointing directly at the
|
366 |
-
// ref's data (The numpy array ends up read-only if the ref was to a const matrix type.) Note
|
367 |
-
// that this means you need to ensure you don't destroy the object in some other way (e.g. with
|
368 |
-
// an appropriate keep_alive, or with a reference to a statically allocated matrix).
|
369 |
-
static handle cast(const MapType &src, return_value_policy policy, handle parent) {
|
370 |
-
switch (policy) {
|
371 |
-
case return_value_policy::copy:
|
372 |
-
return eigen_array_cast<props>(src);
|
373 |
-
case return_value_policy::reference_internal:
|
374 |
-
return eigen_array_cast<props>(src, parent, is_eigen_mutable_map<MapType>::value);
|
375 |
-
case return_value_policy::reference:
|
376 |
-
case return_value_policy::automatic:
|
377 |
-
case return_value_policy::automatic_reference:
|
378 |
-
return eigen_array_cast<props>(src, none(), is_eigen_mutable_map<MapType>::value);
|
379 |
-
default:
|
380 |
-
// move, take_ownership don't make any sense for a ref/map:
|
381 |
-
pybind11_fail("Invalid return_value_policy for Eigen Map/Ref/Block type");
|
382 |
-
}
|
383 |
-
}
|
384 |
-
|
385 |
-
static constexpr auto name = props::descriptor;
|
386 |
-
|
387 |
-
// Explicitly delete these: support python -> C++ conversion on these (i.e. these can be return
|
388 |
-
// types but not bound arguments). We still provide them (with an explicitly delete) so that
|
389 |
-
// you end up here if you try anyway.
|
390 |
-
bool load(handle, bool) = delete;
|
391 |
-
operator MapType() = delete;
|
392 |
-
template <typename> using cast_op_type = MapType;
|
393 |
-
};
|
394 |
-
|
395 |
-
// We can return any map-like object (but can only load Refs, specialized next):
|
396 |
-
template <typename Type> struct type_caster<Type, enable_if_t<is_eigen_dense_map<Type>::value>>
|
397 |
-
: eigen_map_caster<Type> {};
|
398 |
-
|
399 |
-
// Loader for Ref<...> arguments. See the documentation for info on how to make this work without
|
400 |
-
// copying (it requires some extra effort in many cases).
|
401 |
-
template <typename PlainObjectType, typename StrideType>
|
402 |
-
struct type_caster<
|
403 |
-
Eigen::Ref<PlainObjectType, 0, StrideType>,
|
404 |
-
enable_if_t<is_eigen_dense_map<Eigen::Ref<PlainObjectType, 0, StrideType>>::value>
|
405 |
-
> : public eigen_map_caster<Eigen::Ref<PlainObjectType, 0, StrideType>> {
|
406 |
-
private:
|
407 |
-
using Type = Eigen::Ref<PlainObjectType, 0, StrideType>;
|
408 |
-
using props = EigenProps<Type>;
|
409 |
-
using Scalar = typename props::Scalar;
|
410 |
-
using MapType = Eigen::Map<PlainObjectType, 0, StrideType>;
|
411 |
-
using Array = array_t<Scalar, array::forcecast |
|
412 |
-
((props::row_major ? props::inner_stride : props::outer_stride) == 1 ? array::c_style :
|
413 |
-
(props::row_major ? props::outer_stride : props::inner_stride) == 1 ? array::f_style : 0)>;
|
414 |
-
static constexpr bool need_writeable = is_eigen_mutable_map<Type>::value;
|
415 |
-
// Delay construction (these have no default constructor)
|
416 |
-
std::unique_ptr<MapType> map;
|
417 |
-
std::unique_ptr<Type> ref;
|
418 |
-
// Our array. When possible, this is just a numpy array pointing to the source data, but
|
419 |
-
// sometimes we can't avoid copying (e.g. input is not a numpy array at all, has an incompatible
|
420 |
-
// layout, or is an array of a type that needs to be converted). Using a numpy temporary
|
421 |
-
// (rather than an Eigen temporary) saves an extra copy when we need both type conversion and
|
422 |
-
// storage order conversion. (Note that we refuse to use this temporary copy when loading an
|
423 |
-
// argument for a Ref<M> with M non-const, i.e. a read-write reference).
|
424 |
-
Array copy_or_ref;
|
425 |
-
public:
|
426 |
-
bool load(handle src, bool convert) {
|
427 |
-
// First check whether what we have is already an array of the right type. If not, we can't
|
428 |
-
// avoid a copy (because the copy is also going to do type conversion).
|
429 |
-
bool need_copy = !isinstance<Array>(src);
|
430 |
-
|
431 |
-
EigenConformable<props::row_major> fits;
|
432 |
-
if (!need_copy) {
|
433 |
-
// We don't need a converting copy, but we also need to check whether the strides are
|
434 |
-
// compatible with the Ref's stride requirements
|
435 |
-
Array aref = reinterpret_borrow<Array>(src);
|
436 |
-
|
437 |
-
if (aref && (!need_writeable || aref.writeable())) {
|
438 |
-
fits = props::conformable(aref);
|
439 |
-
if (!fits) return false; // Incompatible dimensions
|
440 |
-
if (!fits.template stride_compatible<props>())
|
441 |
-
need_copy = true;
|
442 |
-
else
|
443 |
-
copy_or_ref = std::move(aref);
|
444 |
-
}
|
445 |
-
else {
|
446 |
-
need_copy = true;
|
447 |
-
}
|
448 |
-
}
|
449 |
-
|
450 |
-
if (need_copy) {
|
451 |
-
// We need to copy: If we need a mutable reference, or we're not supposed to convert
|
452 |
-
// (either because we're in the no-convert overload pass, or because we're explicitly
|
453 |
-
// instructed not to copy (via `py::arg().noconvert()`) we have to fail loading.
|
454 |
-
if (!convert || need_writeable) return false;
|
455 |
-
|
456 |
-
Array copy = Array::ensure(src);
|
457 |
-
if (!copy) return false;
|
458 |
-
fits = props::conformable(copy);
|
459 |
-
if (!fits || !fits.template stride_compatible<props>())
|
460 |
-
return false;
|
461 |
-
copy_or_ref = std::move(copy);
|
462 |
-
loader_life_support::add_patient(copy_or_ref);
|
463 |
-
}
|
464 |
-
|
465 |
-
ref.reset();
|
466 |
-
map.reset(new MapType(data(copy_or_ref), fits.rows, fits.cols, make_stride(fits.stride.outer(), fits.stride.inner())));
|
467 |
-
ref.reset(new Type(*map));
|
468 |
-
|
469 |
-
return true;
|
470 |
-
}
|
471 |
-
|
472 |
-
operator Type*() { return ref.get(); }
|
473 |
-
operator Type&() { return *ref; }
|
474 |
-
template <typename _T> using cast_op_type = pybind11::detail::cast_op_type<_T>;
|
475 |
-
|
476 |
-
private:
|
477 |
-
template <typename T = Type, enable_if_t<is_eigen_mutable_map<T>::value, int> = 0>
|
478 |
-
Scalar *data(Array &a) { return a.mutable_data(); }
|
479 |
-
|
480 |
-
template <typename T = Type, enable_if_t<!is_eigen_mutable_map<T>::value, int> = 0>
|
481 |
-
const Scalar *data(Array &a) { return a.data(); }
|
482 |
-
|
483 |
-
// Attempt to figure out a constructor of `Stride` that will work.
|
484 |
-
// If both strides are fixed, use a default constructor:
|
485 |
-
template <typename S> using stride_ctor_default = bool_constant<
|
486 |
-
S::InnerStrideAtCompileTime != Eigen::Dynamic && S::OuterStrideAtCompileTime != Eigen::Dynamic &&
|
487 |
-
std::is_default_constructible<S>::value>;
|
488 |
-
// Otherwise, if there is a two-index constructor, assume it is (outer,inner) like
|
489 |
-
// Eigen::Stride, and use it:
|
490 |
-
template <typename S> using stride_ctor_dual = bool_constant<
|
491 |
-
!stride_ctor_default<S>::value && std::is_constructible<S, EigenIndex, EigenIndex>::value>;
|
492 |
-
// Otherwise, if there is a one-index constructor, and just one of the strides is dynamic, use
|
493 |
-
// it (passing whichever stride is dynamic).
|
494 |
-
template <typename S> using stride_ctor_outer = bool_constant<
|
495 |
-
!any_of<stride_ctor_default<S>, stride_ctor_dual<S>>::value &&
|
496 |
-
S::OuterStrideAtCompileTime == Eigen::Dynamic && S::InnerStrideAtCompileTime != Eigen::Dynamic &&
|
497 |
-
std::is_constructible<S, EigenIndex>::value>;
|
498 |
-
template <typename S> using stride_ctor_inner = bool_constant<
|
499 |
-
!any_of<stride_ctor_default<S>, stride_ctor_dual<S>>::value &&
|
500 |
-
S::InnerStrideAtCompileTime == Eigen::Dynamic && S::OuterStrideAtCompileTime != Eigen::Dynamic &&
|
501 |
-
std::is_constructible<S, EigenIndex>::value>;
|
502 |
-
|
503 |
-
template <typename S = StrideType, enable_if_t<stride_ctor_default<S>::value, int> = 0>
|
504 |
-
static S make_stride(EigenIndex, EigenIndex) { return S(); }
|
505 |
-
template <typename S = StrideType, enable_if_t<stride_ctor_dual<S>::value, int> = 0>
|
506 |
-
static S make_stride(EigenIndex outer, EigenIndex inner) { return S(outer, inner); }
|
507 |
-
template <typename S = StrideType, enable_if_t<stride_ctor_outer<S>::value, int> = 0>
|
508 |
-
static S make_stride(EigenIndex outer, EigenIndex) { return S(outer); }
|
509 |
-
template <typename S = StrideType, enable_if_t<stride_ctor_inner<S>::value, int> = 0>
|
510 |
-
static S make_stride(EigenIndex, EigenIndex inner) { return S(inner); }
|
511 |
-
|
512 |
-
};
|
513 |
-
|
514 |
-
// type_caster for special matrix types (e.g. DiagonalMatrix), which are EigenBase, but not
|
515 |
-
// EigenDense (i.e. they don't have a data(), at least not with the usual matrix layout).
|
516 |
-
// load() is not supported, but we can cast them into the python domain by first copying to a
|
517 |
-
// regular Eigen::Matrix, then casting that.
|
518 |
-
template <typename Type>
|
519 |
-
struct type_caster<Type, enable_if_t<is_eigen_other<Type>::value>> {
|
520 |
-
protected:
|
521 |
-
using Matrix = Eigen::Matrix<typename Type::Scalar, Type::RowsAtCompileTime, Type::ColsAtCompileTime>;
|
522 |
-
using props = EigenProps<Matrix>;
|
523 |
-
public:
|
524 |
-
static handle cast(const Type &src, return_value_policy /* policy */, handle /* parent */) {
|
525 |
-
handle h = eigen_encapsulate<props>(new Matrix(src));
|
526 |
-
return h;
|
527 |
-
}
|
528 |
-
static handle cast(const Type *src, return_value_policy policy, handle parent) { return cast(*src, policy, parent); }
|
529 |
-
|
530 |
-
static constexpr auto name = props::descriptor;
|
531 |
-
|
532 |
-
// Explicitly delete these: support python -> C++ conversion on these (i.e. these can be return
|
533 |
-
// types but not bound arguments). We still provide them (with an explicitly delete) so that
|
534 |
-
// you end up here if you try anyway.
|
535 |
-
bool load(handle, bool) = delete;
|
536 |
-
operator Type() = delete;
|
537 |
-
template <typename> using cast_op_type = Type;
|
538 |
-
};
|
539 |
-
|
540 |
-
template<typename Type>
|
541 |
-
struct type_caster<Type, enable_if_t<is_eigen_sparse<Type>::value>> {
|
542 |
-
typedef typename Type::Scalar Scalar;
|
543 |
-
typedef remove_reference_t<decltype(*std::declval<Type>().outerIndexPtr())> StorageIndex;
|
544 |
-
typedef typename Type::Index Index;
|
545 |
-
static constexpr bool rowMajor = Type::IsRowMajor;
|
546 |
-
|
547 |
-
bool load(handle src, bool) {
|
548 |
-
if (!src)
|
549 |
-
return false;
|
550 |
-
|
551 |
-
auto obj = reinterpret_borrow<object>(src);
|
552 |
-
object sparse_module = module::import("scipy.sparse");
|
553 |
-
object matrix_type = sparse_module.attr(
|
554 |
-
rowMajor ? "csr_matrix" : "csc_matrix");
|
555 |
-
|
556 |
-
if (!obj.get_type().is(matrix_type)) {
|
557 |
-
try {
|
558 |
-
obj = matrix_type(obj);
|
559 |
-
} catch (const error_already_set &) {
|
560 |
-
return false;
|
561 |
-
}
|
562 |
-
}
|
563 |
-
|
564 |
-
auto values = array_t<Scalar>((object) obj.attr("data"));
|
565 |
-
auto innerIndices = array_t<StorageIndex>((object) obj.attr("indices"));
|
566 |
-
auto outerIndices = array_t<StorageIndex>((object) obj.attr("indptr"));
|
567 |
-
auto shape = pybind11::tuple((pybind11::object) obj.attr("shape"));
|
568 |
-
auto nnz = obj.attr("nnz").cast<Index>();
|
569 |
-
|
570 |
-
if (!values || !innerIndices || !outerIndices)
|
571 |
-
return false;
|
572 |
-
|
573 |
-
value = Eigen::MappedSparseMatrix<Scalar, Type::Flags, StorageIndex>(
|
574 |
-
shape[0].cast<Index>(), shape[1].cast<Index>(), nnz,
|
575 |
-
outerIndices.mutable_data(), innerIndices.mutable_data(), values.mutable_data());
|
576 |
-
|
577 |
-
return true;
|
578 |
-
}
|
579 |
-
|
580 |
-
static handle cast(const Type &src, return_value_policy /* policy */, handle /* parent */) {
|
581 |
-
const_cast<Type&>(src).makeCompressed();
|
582 |
-
|
583 |
-
object matrix_type = module::import("scipy.sparse").attr(
|
584 |
-
rowMajor ? "csr_matrix" : "csc_matrix");
|
585 |
-
|
586 |
-
array data(src.nonZeros(), src.valuePtr());
|
587 |
-
array outerIndices((rowMajor ? src.rows() : src.cols()) + 1, src.outerIndexPtr());
|
588 |
-
array innerIndices(src.nonZeros(), src.innerIndexPtr());
|
589 |
-
|
590 |
-
return matrix_type(
|
591 |
-
std::make_tuple(data, innerIndices, outerIndices),
|
592 |
-
std::make_pair(src.rows(), src.cols())
|
593 |
-
).release();
|
594 |
-
}
|
595 |
-
|
596 |
-
PYBIND11_TYPE_CASTER(Type, _<(Type::IsRowMajor) != 0>("scipy.sparse.csr_matrix[", "scipy.sparse.csc_matrix[")
|
597 |
-
+ npy_format_descriptor<Scalar>::name + _("]"));
|
598 |
-
};
|
599 |
-
|
600 |
-
PYBIND11_NAMESPACE_END(detail)
|
601 |
-
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
|
602 |
-
|
603 |
-
#if defined(__GNUG__) || defined(__clang__)
|
604 |
-
# pragma GCC diagnostic pop
|
605 |
-
#elif defined(_MSC_VER)
|
606 |
-
# pragma warning(pop)
|
607 |
-
#endif
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/setup.py
DELETED
@@ -1,98 +0,0 @@
|
|
1 |
-
# Adapted from https://github.com/pybind/cmake_example/blob/master/setup.py
|
2 |
-
import os
|
3 |
-
import re
|
4 |
-
import sys
|
5 |
-
import platform
|
6 |
-
import subprocess
|
7 |
-
import importlib
|
8 |
-
from sysconfig import get_paths
|
9 |
-
|
10 |
-
import importlib
|
11 |
-
from setuptools import setup, Extension
|
12 |
-
from setuptools.command.build_ext import build_ext
|
13 |
-
from setuptools.command.install import install
|
14 |
-
from distutils.sysconfig import get_config_var
|
15 |
-
from distutils.version import LooseVersion
|
16 |
-
|
17 |
-
class CMakeExtension(Extension):
|
18 |
-
def __init__(self, name, sourcedir, build_with_cuda):
|
19 |
-
Extension.__init__(self, name, sources=[])
|
20 |
-
self.sourcedir = os.path.abspath(sourcedir)
|
21 |
-
self.build_with_cuda = build_with_cuda
|
22 |
-
|
23 |
-
class Build(build_ext):
|
24 |
-
def run(self):
|
25 |
-
try:
|
26 |
-
out = subprocess.check_output(['cmake', '--version'])
|
27 |
-
except OSError:
|
28 |
-
raise RuntimeError("CMake must be installed to build the following extensions: " +
|
29 |
-
", ".join(e.name for e in self.extensions))
|
30 |
-
|
31 |
-
super().run()
|
32 |
-
|
33 |
-
def build_extension(self, ext):
|
34 |
-
if isinstance(ext, CMakeExtension):
|
35 |
-
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
|
36 |
-
info = get_paths()
|
37 |
-
include_path = info['include']
|
38 |
-
cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
|
39 |
-
'-DPYTHON_INCLUDE_PATH=' + include_path]
|
40 |
-
|
41 |
-
cfg = 'Debug' if self.debug else 'Release'
|
42 |
-
build_args = ['--config', cfg]
|
43 |
-
|
44 |
-
if platform.system() == "Windows":
|
45 |
-
cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir),
|
46 |
-
'-DCMAKE_RUNTIME_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir)]
|
47 |
-
if sys.maxsize > 2**32:
|
48 |
-
cmake_args += ['-A', 'x64']
|
49 |
-
build_args += ['--', '/m']
|
50 |
-
else:
|
51 |
-
cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
|
52 |
-
build_args += ['--', '-j8']
|
53 |
-
|
54 |
-
if ext.build_with_cuda:
|
55 |
-
cmake_args += ['-DDIFFVG_CUDA=1']
|
56 |
-
else:
|
57 |
-
cmake_args += ['-DDIFFVG_CUDA=0']
|
58 |
-
|
59 |
-
env = os.environ.copy()
|
60 |
-
env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(env.get('CXXFLAGS', ''),
|
61 |
-
self.distribution.get_version())
|
62 |
-
if not os.path.exists(self.build_temp):
|
63 |
-
os.makedirs(self.build_temp)
|
64 |
-
subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env)
|
65 |
-
subprocess.check_call(['cmake', '--build', '.'] + build_args, cwd=self.build_temp)
|
66 |
-
else:
|
67 |
-
super().build_extension(ext)
|
68 |
-
|
69 |
-
torch_spec = importlib.util.find_spec("torch")
|
70 |
-
tf_spec = importlib.util.find_spec("tensorflow")
|
71 |
-
packages = []
|
72 |
-
build_with_cuda = False
|
73 |
-
if torch_spec is not None:
|
74 |
-
packages.append('pydiffvg')
|
75 |
-
import torch
|
76 |
-
if torch.cuda.is_available():
|
77 |
-
build_with_cuda = True
|
78 |
-
if tf_spec is not None and sys.platform != 'win32':
|
79 |
-
packages.append('pydiffvg_tensorflow')
|
80 |
-
if not build_with_cuda:
|
81 |
-
import tensorflow as tf
|
82 |
-
if tf.test.is_gpu_available(cuda_only=True, min_cuda_compute_capability=None):
|
83 |
-
build_with_cuda = True
|
84 |
-
if len(packages) == 0:
|
85 |
-
print('Error: PyTorch or Tensorflow must be installed. For Windows platform only PyTorch is supported.')
|
86 |
-
exit()
|
87 |
-
# Override build_with_cuda with environment variable
|
88 |
-
if 'DIFFVG_CUDA' in os.environ:
|
89 |
-
build_with_cuda = os.environ['DIFFVG_CUDA'] == '1'
|
90 |
-
|
91 |
-
setup(name = 'diffvg',
|
92 |
-
version = '0.0.1',
|
93 |
-
install_requires = ["svgpathtools"],
|
94 |
-
description = 'Differentiable Vector Graphics',
|
95 |
-
ext_modules = [CMakeExtension('diffvg', '', build_with_cuda)],
|
96 |
-
cmdclass = dict(build_ext=Build, install=install),
|
97 |
-
packages = packages,
|
98 |
-
zip_safe = False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/detail/type_traits/has_member_function.h
DELETED
@@ -1,118 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/type_traits.h>
|
20 |
-
|
21 |
-
#define __THRUST_DEFINE_HAS_MEMBER_FUNCTION(trait_name, member_function_name) \
|
22 |
-
template<typename T, typename Signature> class trait_name; \
|
23 |
-
\
|
24 |
-
template<typename T, typename Result> \
|
25 |
-
class trait_name<T, Result(void)> \
|
26 |
-
{ \
|
27 |
-
class yes { char m; }; \
|
28 |
-
class no { yes m[2]; }; \
|
29 |
-
struct base_mixin \
|
30 |
-
{ \
|
31 |
-
Result member_function_name(); \
|
32 |
-
}; \
|
33 |
-
struct base : public T, public base_mixin {}; \
|
34 |
-
template <typename U, U t> class helper{}; \
|
35 |
-
template <typename U> \
|
36 |
-
static no deduce(U*, helper<Result (base_mixin::*)(), &U::member_function_name>* = 0); \
|
37 |
-
static yes deduce(...); \
|
38 |
-
public: \
|
39 |
-
static const bool value = sizeof(yes) == sizeof(deduce(static_cast<base*>(0))); \
|
40 |
-
typedef thrust::detail::integral_constant<bool,value> type; \
|
41 |
-
}; \
|
42 |
-
\
|
43 |
-
template<typename T, typename Result, typename Arg> \
|
44 |
-
class trait_name<T, Result(Arg)> \
|
45 |
-
{ \
|
46 |
-
class yes { char m; }; \
|
47 |
-
class no { yes m[2]; }; \
|
48 |
-
struct base_mixin \
|
49 |
-
{ \
|
50 |
-
Result member_function_name(Arg); \
|
51 |
-
}; \
|
52 |
-
struct base : public T, public base_mixin {}; \
|
53 |
-
template <typename U, U t> class helper{}; \
|
54 |
-
template <typename U> \
|
55 |
-
static no deduce(U*, helper<Result (base_mixin::*)(Arg), &U::member_function_name>* = 0); \
|
56 |
-
static yes deduce(...); \
|
57 |
-
public: \
|
58 |
-
static const bool value = sizeof(yes) == sizeof(deduce(static_cast<base*>(0))); \
|
59 |
-
typedef thrust::detail::integral_constant<bool,value> type; \
|
60 |
-
}; \
|
61 |
-
\
|
62 |
-
template<typename T, typename Result, typename Arg1, typename Arg2> \
|
63 |
-
class trait_name<T, Result(Arg1,Arg2)> \
|
64 |
-
{ \
|
65 |
-
class yes { char m; }; \
|
66 |
-
class no { yes m[2]; }; \
|
67 |
-
struct base_mixin \
|
68 |
-
{ \
|
69 |
-
Result member_function_name(Arg1,Arg2); \
|
70 |
-
}; \
|
71 |
-
struct base : public T, public base_mixin {}; \
|
72 |
-
template <typename U, U t> class helper{}; \
|
73 |
-
template <typename U> \
|
74 |
-
static no deduce(U*, helper<Result (base_mixin::*)(Arg1,Arg2), &U::member_function_name>* = 0); \
|
75 |
-
static yes deduce(...); \
|
76 |
-
public: \
|
77 |
-
static const bool value = sizeof(yes) == sizeof(deduce(static_cast<base*>(0))); \
|
78 |
-
typedef thrust::detail::integral_constant<bool,value> type; \
|
79 |
-
}; \
|
80 |
-
\
|
81 |
-
template<typename T, typename Result, typename Arg1, typename Arg2, typename Arg3> \
|
82 |
-
class trait_name<T, Result(Arg1,Arg2,Arg3)> \
|
83 |
-
{ \
|
84 |
-
class yes { char m; }; \
|
85 |
-
class no { yes m[2]; }; \
|
86 |
-
struct base_mixin \
|
87 |
-
{ \
|
88 |
-
Result member_function_name(Arg1,Arg2,Arg3); \
|
89 |
-
}; \
|
90 |
-
struct base : public T, public base_mixin {}; \
|
91 |
-
template <typename U, U t> class helper{}; \
|
92 |
-
template <typename U> \
|
93 |
-
static no deduce(U*, helper<Result (base_mixin::*)(Arg1,Arg2,Arg3), &U::member_function_name>* = 0); \
|
94 |
-
static yes deduce(...); \
|
95 |
-
public: \
|
96 |
-
static const bool value = sizeof(yes) == sizeof(deduce(static_cast<base*>(0))); \
|
97 |
-
typedef thrust::detail::integral_constant<bool,value> type; \
|
98 |
-
}; \
|
99 |
-
\
|
100 |
-
template<typename T, typename Result, typename Arg1, typename Arg2, typename Arg3, typename Arg4> \
|
101 |
-
class trait_name<T, Result(Arg1,Arg2,Arg3,Arg4)> \
|
102 |
-
{ \
|
103 |
-
class yes { char m; }; \
|
104 |
-
class no { yes m[2]; }; \
|
105 |
-
struct base_mixin \
|
106 |
-
{ \
|
107 |
-
Result member_function_name(Arg1,Arg2,Arg3,Arg4); \
|
108 |
-
}; \
|
109 |
-
struct base : public T, public base_mixin {}; \
|
110 |
-
template <typename U, U t> class helper{}; \
|
111 |
-
template <typename U> \
|
112 |
-
static no deduce(U*, helper<Result (base_mixin::*)(Arg1,Arg2,Arg3,Arg4), &U::member_function_name>* = 0); \
|
113 |
-
static yes deduce(...); \
|
114 |
-
public: \
|
115 |
-
static const bool value = sizeof(yes) == sizeof(deduce(static_cast<base*>(0))); \
|
116 |
-
typedef thrust::detail::integral_constant<bool,value> type; \
|
117 |
-
};
|
118 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/sort.h
DELETED
@@ -1,1362 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
|
18 |
-
/*! \file thrust/sort.h
|
19 |
-
* \brief Functions for reorganizing ranges into sorted order
|
20 |
-
*/
|
21 |
-
|
22 |
-
#pragma once
|
23 |
-
|
24 |
-
#include <thrust/detail/config.h>
|
25 |
-
#include <thrust/detail/execution_policy.h>
|
26 |
-
|
27 |
-
namespace thrust
|
28 |
-
{
|
29 |
-
|
30 |
-
|
31 |
-
/*! \addtogroup sorting
|
32 |
-
* \ingroup algorithms
|
33 |
-
* \{
|
34 |
-
*/
|
35 |
-
|
36 |
-
|
37 |
-
/*! \p sort sorts the elements in <tt>[first, last)</tt> into
|
38 |
-
* ascending order, meaning that if \c i and \c j are any two valid
|
39 |
-
* iterators in <tt>[first, last)</tt> such that \c i precedes \c j,
|
40 |
-
* then \c *j is not less than \c *i. Note: \c sort is not guaranteed
|
41 |
-
* to be stable. That is, suppose that \c *i and \c *j are equivalent:
|
42 |
-
* neither one is less than the other. It is not guaranteed that the
|
43 |
-
* relative order of these two elements will be preserved by \p sort.
|
44 |
-
*
|
45 |
-
* This version of \p sort compares objects using \c operator<.
|
46 |
-
*
|
47 |
-
* The algorithm's execution is parallelized as determined by \p exec.
|
48 |
-
*
|
49 |
-
* \param exec The execution policy to use for parallelization.
|
50 |
-
* \param first The beginning of the sequence.
|
51 |
-
* \param last The end of the sequence.
|
52 |
-
*
|
53 |
-
* \tparam DerivedPolicy The name of the derived execution policy.
|
54 |
-
* \tparam RandomAccessIterator is a model of <a href="http://www.sgi.com/tech/stl/RandomAccessIterator.html">Random Access Iterator</a>,
|
55 |
-
* \p RandomAccessIterator is mutable,
|
56 |
-
* and \p RandomAccessIterator's \c value_type is a model of <a href="http://www.sgi.com/tech/stl/LessThanComparable.html">LessThan Comparable</a>,
|
57 |
-
* and the ordering relation on \p RandomAccessIterator's \c value_type is a <em>strict weak ordering</em>, as defined in the
|
58 |
-
* <a href="http://www.sgi.com/tech/stl/LessThanComparable.html">LessThan Comparable</a> requirements.
|
59 |
-
*
|
60 |
-
* The following code snippet demonstrates how to use \p sort to sort
|
61 |
-
* a sequence of integers using the \p thrust::host execution policy for parallelization:
|
62 |
-
*
|
63 |
-
* \code
|
64 |
-
* #include <thrust/sort.h>
|
65 |
-
* #include <thrust/execution_policy.h>
|
66 |
-
* ...
|
67 |
-
* const int N = 6;
|
68 |
-
* int A[N] = {1, 4, 2, 8, 5, 7};
|
69 |
-
* thrust::sort(thrust::host, A, A + N);
|
70 |
-
* // A is now {1, 2, 4, 5, 7, 8}
|
71 |
-
* \endcode
|
72 |
-
*
|
73 |
-
* \see http://www.sgi.com/tech/stl/sort.html
|
74 |
-
* \see \p stable_sort
|
75 |
-
* \see \p sort_by_key
|
76 |
-
*/
|
77 |
-
template<typename DerivedPolicy, typename RandomAccessIterator>
|
78 |
-
__host__ __device__
|
79 |
-
void sort(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
80 |
-
RandomAccessIterator first,
|
81 |
-
RandomAccessIterator last);
|
82 |
-
|
83 |
-
|
84 |
-
/*! \p sort sorts the elements in <tt>[first, last)</tt> into
|
85 |
-
* ascending order, meaning that if \c i and \c j are any two valid
|
86 |
-
* iterators in <tt>[first, last)</tt> such that \c i precedes \c j,
|
87 |
-
* then \c *j is not less than \c *i. Note: \c sort is not guaranteed
|
88 |
-
* to be stable. That is, suppose that \c *i and \c *j are equivalent:
|
89 |
-
* neither one is less than the other. It is not guaranteed that the
|
90 |
-
* relative order of these two elements will be preserved by \p sort.
|
91 |
-
*
|
92 |
-
* This version of \p sort compares objects using \c operator<.
|
93 |
-
*
|
94 |
-
* \param first The beginning of the sequence.
|
95 |
-
* \param last The end of the sequence.
|
96 |
-
*
|
97 |
-
* \tparam RandomAccessIterator is a model of <a href="http://www.sgi.com/tech/stl/RandomAccessIterator.html">Random Access Iterator</a>,
|
98 |
-
* \p RandomAccessIterator is mutable,
|
99 |
-
* and \p RandomAccessIterator's \c value_type is a model of <a href="http://www.sgi.com/tech/stl/LessThanComparable.html">LessThan Comparable</a>,
|
100 |
-
* and the ordering relation on \p RandomAccessIterator's \c value_type is a <em>strict weak ordering</em>, as defined in the
|
101 |
-
* <a href="http://www.sgi.com/tech/stl/LessThanComparable.html">LessThan Comparable</a> requirements.
|
102 |
-
*
|
103 |
-
* The following code snippet demonstrates how to use \p sort to sort
|
104 |
-
* a sequence of integers.
|
105 |
-
*
|
106 |
-
* \code
|
107 |
-
* #include <thrust/sort.h>
|
108 |
-
* ...
|
109 |
-
* const int N = 6;
|
110 |
-
* int A[N] = {1, 4, 2, 8, 5, 7};
|
111 |
-
* thrust::sort(A, A + N);
|
112 |
-
* // A is now {1, 2, 4, 5, 7, 8}
|
113 |
-
* \endcode
|
114 |
-
*
|
115 |
-
* \see http://www.sgi.com/tech/stl/sort.html
|
116 |
-
* \see \p stable_sort
|
117 |
-
* \see \p sort_by_key
|
118 |
-
*/
|
119 |
-
template<typename RandomAccessIterator>
|
120 |
-
void sort(RandomAccessIterator first,
|
121 |
-
RandomAccessIterator last);
|
122 |
-
|
123 |
-
|
124 |
-
/*! \p sort sorts the elements in <tt>[first, last)</tt> into
|
125 |
-
* ascending order, meaning that if \c i and \c j are any two valid
|
126 |
-
* iterators in <tt>[first, last)</tt> such that \c i precedes \c j,
|
127 |
-
* then \c *j is not less than \c *i. Note: \c sort is not guaranteed
|
128 |
-
* to be stable. That is, suppose that \c *i and \c *j are equivalent:
|
129 |
-
* neither one is less than the other. It is not guaranteed that the
|
130 |
-
* relative order of these two elements will be preserved by \p sort.
|
131 |
-
*
|
132 |
-
* This version of \p sort compares objects using a function object
|
133 |
-
* \p comp.
|
134 |
-
*
|
135 |
-
* The algorithm's execution is parallelized as determined by \p exec.
|
136 |
-
*
|
137 |
-
* \param exec The execution policy to use for parallelization.
|
138 |
-
* \param first The beginning of the sequence.
|
139 |
-
* \param last The end of the sequence.
|
140 |
-
* \param comp Comparison operator.
|
141 |
-
*
|
142 |
-
* \tparam DerivedPolicy The name of the derived execution policy.
|
143 |
-
* \tparam RandomAccessIterator is a model of <a href="http://www.sgi.com/tech/stl/RandomAccessIterator.html">Random Access Iterator</a>,
|
144 |
-
* \p RandomAccessIterator is mutable,
|
145 |
-
* and \p RandomAccessIterator's \c value_type is convertible to \p StrictWeakOrdering's
|
146 |
-
* \c first_argument_type and \c second_argument_type.
|
147 |
-
* \tparam StrictWeakOrdering is a model of <a href="http://www.sgi.com/tech/stl/StrictWeakOrdering.html">Strict Weak Ordering</a>.
|
148 |
-
*
|
149 |
-
* The following code demonstrates how to sort integers in descending order
|
150 |
-
* using the greater<int> comparison operator using the \p thrust::host execution policy for parallelization:
|
151 |
-
*
|
152 |
-
* \code
|
153 |
-
* #include <thrust/sort.h>
|
154 |
-
* #include <thrust/functional.h>
|
155 |
-
* #include <thrust/execution_policy.h>
|
156 |
-
* ...
|
157 |
-
* const int N = 6;
|
158 |
-
* int A[N] = {1, 4, 2, 8, 5, 7};
|
159 |
-
* thrust::sort(thrust::host, A, A + N, thrust::greater<int>());
|
160 |
-
* // A is now {8, 7, 5, 4, 2, 1};
|
161 |
-
* \endcode
|
162 |
-
*
|
163 |
-
* \see http://www.sgi.com/tech/stl/sort.html
|
164 |
-
* \see \p stable_sort
|
165 |
-
* \see \p sort_by_key
|
166 |
-
*/
|
167 |
-
template<typename DerivedPolicy,
|
168 |
-
typename RandomAccessIterator,
|
169 |
-
typename StrictWeakOrdering>
|
170 |
-
__host__ __device__
|
171 |
-
void sort(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
172 |
-
RandomAccessIterator first,
|
173 |
-
RandomAccessIterator last,
|
174 |
-
StrictWeakOrdering comp);
|
175 |
-
|
176 |
-
|
177 |
-
/*! \p sort sorts the elements in <tt>[first, last)</tt> into
|
178 |
-
* ascending order, meaning that if \c i and \c j are any two valid
|
179 |
-
* iterators in <tt>[first, last)</tt> such that \c i precedes \c j,
|
180 |
-
* then \c *j is not less than \c *i. Note: \c sort is not guaranteed
|
181 |
-
* to be stable. That is, suppose that \c *i and \c *j are equivalent:
|
182 |
-
* neither one is less than the other. It is not guaranteed that the
|
183 |
-
* relative order of these two elements will be preserved by \p sort.
|
184 |
-
*
|
185 |
-
* This version of \p sort compares objects using a function object
|
186 |
-
* \p comp.
|
187 |
-
*
|
188 |
-
* \param first The beginning of the sequence.
|
189 |
-
* \param last The end of the sequence.
|
190 |
-
* \param comp Comparison operator.
|
191 |
-
*
|
192 |
-
* \tparam RandomAccessIterator is a model of <a href="http://www.sgi.com/tech/stl/RandomAccessIterator.html">Random Access Iterator</a>,
|
193 |
-
* \p RandomAccessIterator is mutable,
|
194 |
-
* and \p RandomAccessIterator's \c value_type is convertible to \p StrictWeakOrdering's
|
195 |
-
* \c first_argument_type and \c second_argument_type.
|
196 |
-
* \tparam StrictWeakOrdering is a model of <a href="http://www.sgi.com/tech/stl/StrictWeakOrdering.html">Strict Weak Ordering</a>.
|
197 |
-
*
|
198 |
-
* The following code demonstrates how to sort integers in descending order
|
199 |
-
* using the greater<int> comparison operator.
|
200 |
-
*
|
201 |
-
* \code
|
202 |
-
* #include <thrust/sort.h>
|
203 |
-
* #include <thrust/functional.h>
|
204 |
-
* ...
|
205 |
-
* const int N = 6;
|
206 |
-
* int A[N] = {1, 4, 2, 8, 5, 7};
|
207 |
-
* thrust::sort(A, A + N, thrust::greater<int>());
|
208 |
-
* // A is now {8, 7, 5, 4, 2, 1};
|
209 |
-
* \endcode
|
210 |
-
*
|
211 |
-
* \see http://www.sgi.com/tech/stl/sort.html
|
212 |
-
* \see \p stable_sort
|
213 |
-
* \see \p sort_by_key
|
214 |
-
*/
|
215 |
-
template<typename RandomAccessIterator,
|
216 |
-
typename StrictWeakOrdering>
|
217 |
-
__host__ __device__
|
218 |
-
void sort(RandomAccessIterator first,
|
219 |
-
RandomAccessIterator last,
|
220 |
-
StrictWeakOrdering comp);
|
221 |
-
|
222 |
-
|
223 |
-
/*! \p stable_sort is much like \c sort: it sorts the elements in
|
224 |
-
* <tt>[first, last)</tt> into ascending order, meaning that if \c i
|
225 |
-
* and \c j are any two valid iterators in <tt>[first, last)</tt> such
|
226 |
-
* that \c i precedes \c j, then \c *j is not less than \c *i.
|
227 |
-
*
|
228 |
-
* As the name suggests, \p stable_sort is stable: it preserves the
|
229 |
-
* relative ordering of equivalent elements. That is, if \c x and \c y
|
230 |
-
* are elements in <tt>[first, last)</tt> such that \c x precedes \c y,
|
231 |
-
* and if the two elements are equivalent (neither <tt>x < y</tt> nor
|
232 |
-
* <tt>y < x</tt>) then a postcondition of \p stable_sort is that \c x
|
233 |
-
* still precedes \c y.
|
234 |
-
*
|
235 |
-
* This version of \p stable_sort compares objects using \c operator<.
|
236 |
-
*
|
237 |
-
* The algorithm's execution is parallelized as determined by \p exec.
|
238 |
-
*
|
239 |
-
* \param exec The execution policy to use for parallelization.
|
240 |
-
* \param first The beginning of the sequence.
|
241 |
-
* \param last The end of the sequence.
|
242 |
-
*
|
243 |
-
* \tparam DerivedPolicy The name of the derived execution policy.
|
244 |
-
* \tparam RandomAccessIterator is a model of <a href="http://www.sgi.com/tech/stl/RandomAccessIterator.html">Random Access Iterator</a>,
|
245 |
-
* \p RandomAccessIterator is mutable,
|
246 |
-
* and \p RandomAccessIterator's \c value_type is a model of <a href="http://www.sgi.com/tech/stl/LessThanComparable.html">LessThan Comparable</a>,
|
247 |
-
* and the ordering relation on \p RandomAccessIterator's \c value_type is a <em>strict weak ordering</em>, as defined in the
|
248 |
-
* <a href="http://www.sgi.com/tech/stl/LessThanComparable.html">LessThan Comparable</a> requirements.
|
249 |
-
*
|
250 |
-
* The following code snippet demonstrates how to use \p sort to sort
|
251 |
-
* a sequence of integers using the \p thrust::host execution policy for parallelization:
|
252 |
-
*
|
253 |
-
* \code
|
254 |
-
* #include <thrust/sort.h>
|
255 |
-
* #include <thrust/execution_policy.h>
|
256 |
-
* ...
|
257 |
-
* const int N = 6;
|
258 |
-
* int A[N] = {1, 4, 2, 8, 5, 7};
|
259 |
-
* thrust::stable_sort(thrust::host, A, A + N);
|
260 |
-
* // A is now {1, 2, 4, 5, 7, 8}
|
261 |
-
* \endcode
|
262 |
-
*
|
263 |
-
* \see http://www.sgi.com/tech/stl/stable_sort.html
|
264 |
-
* \see \p sort
|
265 |
-
* \see \p stable_sort_by_key
|
266 |
-
*/
|
267 |
-
template<typename DerivedPolicy, typename RandomAccessIterator>
|
268 |
-
__host__ __device__
|
269 |
-
void stable_sort(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
270 |
-
RandomAccessIterator first,
|
271 |
-
RandomAccessIterator last);
|
272 |
-
|
273 |
-
|
274 |
-
/*! \p stable_sort is much like \c sort: it sorts the elements in
|
275 |
-
* <tt>[first, last)</tt> into ascending order, meaning that if \c i
|
276 |
-
* and \c j are any two valid iterators in <tt>[first, last)</tt> such
|
277 |
-
* that \c i precedes \c j, then \c *j is not less than \c *i.
|
278 |
-
*
|
279 |
-
* As the name suggests, \p stable_sort is stable: it preserves the
|
280 |
-
* relative ordering of equivalent elements. That is, if \c x and \c y
|
281 |
-
* are elements in <tt>[first, last)</tt> such that \c x precedes \c y,
|
282 |
-
* and if the two elements are equivalent (neither <tt>x < y</tt> nor
|
283 |
-
* <tt>y < x</tt>) then a postcondition of \p stable_sort is that \c x
|
284 |
-
* still precedes \c y.
|
285 |
-
*
|
286 |
-
* This version of \p stable_sort compares objects using \c operator<.
|
287 |
-
*
|
288 |
-
* \param first The beginning of the sequence.
|
289 |
-
* \param last The end of the sequence.
|
290 |
-
*
|
291 |
-
* \tparam RandomAccessIterator is a model of <a href="http://www.sgi.com/tech/stl/RandomAccessIterator.html">Random Access Iterator</a>,
|
292 |
-
* \p RandomAccessIterator is mutable,
|
293 |
-
* and \p RandomAccessIterator's \c value_type is a model of <a href="http://www.sgi.com/tech/stl/LessThanComparable.html">LessThan Comparable</a>,
|
294 |
-
* and the ordering relation on \p RandomAccessIterator's \c value_type is a <em>strict weak ordering</em>, as defined in the
|
295 |
-
* <a href="http://www.sgi.com/tech/stl/LessThanComparable.html">LessThan Comparable</a> requirements.
|
296 |
-
*
|
297 |
-
* The following code snippet demonstrates how to use \p sort to sort
|
298 |
-
* a sequence of integers.
|
299 |
-
*
|
300 |
-
* \code
|
301 |
-
* #include <thrust/sort.h>
|
302 |
-
* ...
|
303 |
-
* const int N = 6;
|
304 |
-
* int A[N] = {1, 4, 2, 8, 5, 7};
|
305 |
-
* thrust::stable_sort(A, A + N);
|
306 |
-
* // A is now {1, 2, 4, 5, 7, 8}
|
307 |
-
* \endcode
|
308 |
-
*
|
309 |
-
* \see http://www.sgi.com/tech/stl/stable_sort.html
|
310 |
-
* \see \p sort
|
311 |
-
* \see \p stable_sort_by_key
|
312 |
-
*/
|
313 |
-
template<typename RandomAccessIterator>
|
314 |
-
void stable_sort(RandomAccessIterator first,
|
315 |
-
RandomAccessIterator last);
|
316 |
-
|
317 |
-
|
318 |
-
/*! \p stable_sort is much like \c sort: it sorts the elements in
|
319 |
-
* <tt>[first, last)</tt> into ascending order, meaning that if \c i
|
320 |
-
* and \c j are any two valid iterators in <tt>[first, last)</tt> such
|
321 |
-
* that \c i precedes \c j, then \c *j is not less than \c *i.
|
322 |
-
*
|
323 |
-
* As the name suggests, \p stable_sort is stable: it preserves the
|
324 |
-
* relative ordering of equivalent elements. That is, if \c x and \c y
|
325 |
-
* are elements in <tt>[first, last)</tt> such that \c x precedes \c y,
|
326 |
-
* and if the two elements are equivalent (neither <tt>x < y</tt> nor
|
327 |
-
* <tt>y < x</tt>) then a postcondition of \p stable_sort is that \c x
|
328 |
-
* still precedes \c y.
|
329 |
-
*
|
330 |
-
* This version of \p stable_sort compares objects using a function object
|
331 |
-
* \p comp.
|
332 |
-
*
|
333 |
-
* The algorithm's execution is parallelized as determined by \p exec.
|
334 |
-
*
|
335 |
-
* \param exec The execution policy to use for parallelization.
|
336 |
-
* \param first The beginning of the sequence.
|
337 |
-
* \param last The end of the sequence.
|
338 |
-
* \param comp Comparison operator.
|
339 |
-
*
|
340 |
-
* \tparam DerivedPolicy The name of the derived execution policy.
|
341 |
-
* \tparam RandomAccessIterator is a model of <a href="http://www.sgi.com/tech/stl/RandomAccessIterator.html">Random Access Iterator</a>,
|
342 |
-
* \p RandomAccessIterator is mutable,
|
343 |
-
* and \p RandomAccessIterator's \c value_type is convertible to \p StrictWeakOrdering's
|
344 |
-
* \c first_argument_type and \c second_argument_type.
|
345 |
-
* \tparam StrictWeakOrdering is a model of <a href="http://www.sgi.com/tech/stl/StrictWeakOrdering.html">Strict Weak Ordering</a>.
|
346 |
-
*
|
347 |
-
* The following code demonstrates how to sort integers in descending order
|
348 |
-
* using the greater<int> comparison operator using the \p thrust::host execution policy for parallelization:
|
349 |
-
*
|
350 |
-
* \code
|
351 |
-
* #include <thrust/sort.h>
|
352 |
-
* #include <thrust/functional.h>
|
353 |
-
* #include <thrust/execution_policy.h>
|
354 |
-
* ...
|
355 |
-
* const int N = 6;
|
356 |
-
* int A[N] = {1, 4, 2, 8, 5, 7};
|
357 |
-
* thrust::sort(A, A + N, thrust::greater<int>());
|
358 |
-
* // A is now {8, 7, 5, 4, 2, 1};
|
359 |
-
* \endcode
|
360 |
-
*
|
361 |
-
* \see http://www.sgi.com/tech/stl/stable_sort.html
|
362 |
-
* \see \p sort
|
363 |
-
* \see \p stable_sort_by_key
|
364 |
-
*/
|
365 |
-
template<typename DerivedPolicy,
|
366 |
-
typename RandomAccessIterator,
|
367 |
-
typename StrictWeakOrdering>
|
368 |
-
__host__ __device__
|
369 |
-
void stable_sort(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
370 |
-
RandomAccessIterator first,
|
371 |
-
RandomAccessIterator last,
|
372 |
-
StrictWeakOrdering comp);
|
373 |
-
|
374 |
-
|
375 |
-
/*! \p stable_sort is much like \c sort: it sorts the elements in
|
376 |
-
* <tt>[first, last)</tt> into ascending order, meaning that if \c i
|
377 |
-
* and \c j are any two valid iterators in <tt>[first, last)</tt> such
|
378 |
-
* that \c i precedes \c j, then \c *j is not less than \c *i.
|
379 |
-
*
|
380 |
-
* As the name suggests, \p stable_sort is stable: it preserves the
|
381 |
-
* relative ordering of equivalent elements. That is, if \c x and \c y
|
382 |
-
* are elements in <tt>[first, last)</tt> such that \c x precedes \c y,
|
383 |
-
* and if the two elements are equivalent (neither <tt>x < y</tt> nor
|
384 |
-
* <tt>y < x</tt>) then a postcondition of \p stable_sort is that \c x
|
385 |
-
* still precedes \c y.
|
386 |
-
*
|
387 |
-
* This version of \p stable_sort compares objects using a function object
|
388 |
-
* \p comp.
|
389 |
-
*
|
390 |
-
* \param first The beginning of the sequence.
|
391 |
-
* \param last The end of the sequence.
|
392 |
-
* \param comp Comparison operator.
|
393 |
-
*
|
394 |
-
* \tparam RandomAccessIterator is a model of <a href="http://www.sgi.com/tech/stl/RandomAccessIterator.html">Random Access Iterator</a>,
|
395 |
-
* \p RandomAccessIterator is mutable,
|
396 |
-
* and \p RandomAccessIterator's \c value_type is convertible to \p StrictWeakOrdering's
|
397 |
-
* \c first_argument_type and \c second_argument_type.
|
398 |
-
* \tparam StrictWeakOrdering is a model of <a href="http://www.sgi.com/tech/stl/StrictWeakOrdering.html">Strict Weak Ordering</a>.
|
399 |
-
*
|
400 |
-
* The following code demonstrates how to sort integers in descending order
|
401 |
-
* using the greater<int> comparison operator.
|
402 |
-
*
|
403 |
-
* \code
|
404 |
-
* #include <thrust/sort.h>
|
405 |
-
* #include <thrust/functional.h>
|
406 |
-
* ...
|
407 |
-
* const int N = 6;
|
408 |
-
* int A[N] = {1, 4, 2, 8, 5, 7};
|
409 |
-
* thrust::sort(A, A + N, thrust::greater<int>());
|
410 |
-
* // A is now {8, 7, 5, 4, 2, 1};
|
411 |
-
* \endcode
|
412 |
-
*
|
413 |
-
* \see http://www.sgi.com/tech/stl/stable_sort.html
|
414 |
-
* \see \p sort
|
415 |
-
* \see \p stable_sort_by_key
|
416 |
-
*/
|
417 |
-
template<typename RandomAccessIterator,
|
418 |
-
typename StrictWeakOrdering>
|
419 |
-
void stable_sort(RandomAccessIterator first,
|
420 |
-
RandomAccessIterator last,
|
421 |
-
StrictWeakOrdering comp);
|
422 |
-
|
423 |
-
|
424 |
-
///////////////
|
425 |
-
// Key Value //
|
426 |
-
///////////////
|
427 |
-
|
428 |
-
|
429 |
-
/*! \p sort_by_key performs a key-value sort. That is, \p sort_by_key sorts the
|
430 |
-
* elements in <tt>[keys_first, keys_last)</tt> and <tt>[values_first,
|
431 |
-
* values_first + (keys_last - keys_first))</tt> into ascending key order,
|
432 |
-
* meaning that if \c i and \c j are any two valid iterators in <tt>[keys_first,
|
433 |
-
* keys_last)</tt> such that \c i precedes \c j, and \c p and \c q are iterators
|
434 |
-
* in <tt>[values_first, values_first + (keys_last - keys_first))</tt>
|
435 |
-
* corresponding to \c i and \c j respectively, then \c *j is not less than
|
436 |
-
* \c *i.
|
437 |
-
*
|
438 |
-
* Note: \c sort_by_key is not guaranteed to be stable. That is, suppose that
|
439 |
-
* \c *i and \c *j are equivalent: neither one is less than the other. It is not
|
440 |
-
* guaranteed that the relative order of these two keys or the relative
|
441 |
-
* order of their corresponding values will be preserved by \p sort_by_key.
|
442 |
-
*
|
443 |
-
* This version of \p sort_by_key compares key objects using \c operator<.
|
444 |
-
*
|
445 |
-
* The algorithm's execution is parallelized as determined by \p exec.
|
446 |
-
*
|
447 |
-
* \param exec The execution policy to use for parallelization.
|
448 |
-
* \param keys_first The beginning of the key sequence.
|
449 |
-
* \param keys_last The end of the key sequence.
|
450 |
-
* \param values_first The beginning of the value sequence.
|
451 |
-
*
|
452 |
-
* \tparam DerivedPolicy The name of the derived execution policy.
|
453 |
-
* \tparam RandomAccessIterator1 is a model of <a href="http://www.sgi.com/tech/stl/RandomAccessIterator.html">Random Access Iterator</a>,
|
454 |
-
* \p RandomAccessIterator1 is mutable,
|
455 |
-
* and \p RandomAccessIterator1's \c value_type is a model of <a href="http://www.sgi.com/tech/stl/LessThanComparable.html">LessThan Comparable</a>,
|
456 |
-
* and the ordering relation on \p RandomAccessIterator1's \c value_type is a <em>strict weak ordering</em>, as defined in the
|
457 |
-
* <a href="http://www.sgi.com/tech/stl/LessThanComparable.html">LessThan Comparable</a> requirements.
|
458 |
-
* \tparam RandomAccessIterator2 is a model of <a href="http://www.sgi.com/tech/stl/RandomAccessIterator.hml">Random Access Iterator</a>,
|
459 |
-
* and \p RandomAccessIterator2 is mutable.
|
460 |
-
*
|
461 |
-
* \pre The range <tt>[keys_first, keys_last))</tt> shall not overlap the range <tt>[values_first, values_first + (keys_last - keys_first))</tt>.
|
462 |
-
*
|
463 |
-
* The following code snippet demonstrates how to use \p sort_by_key to sort
|
464 |
-
* an array of character values using integers as sorting keys using the \p thrust::host execution policy
|
465 |
-
* for parallelization:
|
466 |
-
*
|
467 |
-
* \code
|
468 |
-
* #include <thrust/sort.h>
|
469 |
-
* #include <thrust/execution_policy.h>
|
470 |
-
* ...
|
471 |
-
* const int N = 6;
|
472 |
-
* int keys[N] = { 1, 4, 2, 8, 5, 7};
|
473 |
-
* char values[N] = {'a', 'b', 'c', 'd', 'e', 'f'};
|
474 |
-
* thrust::sort_by_key(thrust::host, keys, keys + N, values);
|
475 |
-
* // keys is now { 1, 2, 4, 5, 7, 8}
|
476 |
-
* // values is now {'a', 'c', 'b', 'e', 'f', 'd'}
|
477 |
-
* \endcode
|
478 |
-
*
|
479 |
-
* \see http://www.sgi.com/tech/stl/sort.html
|
480 |
-
* \see \p stable_sort_by_key
|
481 |
-
* \see \p sort
|
482 |
-
*/
|
483 |
-
template<typename DerivedPolicy,
|
484 |
-
typename RandomAccessIterator1,
|
485 |
-
typename RandomAccessIterator2>
|
486 |
-
__host__ __device__
|
487 |
-
void sort_by_key(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
488 |
-
RandomAccessIterator1 keys_first,
|
489 |
-
RandomAccessIterator1 keys_last,
|
490 |
-
RandomAccessIterator2 values_first);
|
491 |
-
|
492 |
-
|
493 |
-
/*! \p sort_by_key performs a key-value sort. That is, \p sort_by_key sorts the
|
494 |
-
* elements in <tt>[keys_first, keys_last)</tt> and <tt>[values_first,
|
495 |
-
* values_first + (keys_last - keys_first))</tt> into ascending key order,
|
496 |
-
* meaning that if \c i and \c j are any two valid iterators in <tt>[keys_first,
|
497 |
-
* keys_last)</tt> such that \c i precedes \c j, and \c p and \c q are iterators
|
498 |
-
* in <tt>[values_first, values_first + (keys_last - keys_first))</tt>
|
499 |
-
* corresponding to \c i and \c j respectively, then \c *j is not less than
|
500 |
-
* \c *i.
|
501 |
-
*
|
502 |
-
* Note: \c sort_by_key is not guaranteed to be stable. That is, suppose that
|
503 |
-
* \c *i and \c *j are equivalent: neither one is less than the other. It is not
|
504 |
-
* guaranteed that the relative order of these two keys or the relative
|
505 |
-
* order of their corresponding values will be preserved by \p sort_by_key.
|
506 |
-
*
|
507 |
-
* This version of \p sort_by_key compares key objects using \c operator<.
|
508 |
-
*
|
509 |
-
* \param keys_first The beginning of the key sequence.
|
510 |
-
* \param keys_last The end of the key sequence.
|
511 |
-
* \param values_first The beginning of the value sequence.
|
512 |
-
*
|
513 |
-
* \tparam RandomAccessIterator1 is a model of <a href="http://www.sgi.com/tech/stl/RandomAccessIterator.html">Random Access Iterator</a>,
|
514 |
-
* \p RandomAccessIterator1 is mutable,
|
515 |
-
* and \p RandomAccessIterator1's \c value_type is a model of <a href="http://www.sgi.com/tech/stl/LessThanComparable.html">LessThan Comparable</a>,
|
516 |
-
* and the ordering relation on \p RandomAccessIterator1's \c value_type is a <em>strict weak ordering</em>, as defined in the
|
517 |
-
* <a href="http://www.sgi.com/tech/stl/LessThanComparable.html">LessThan Comparable</a> requirements.
|
518 |
-
* \tparam RandomAccessIterator2 is a model of <a href="http://www.sgi.com/tech/stl/RandomAccessIterator.hml">Random Access Iterator</a>,
|
519 |
-
* and \p RandomAccessIterator2 is mutable.
|
520 |
-
*
|
521 |
-
* \pre The range <tt>[keys_first, keys_last))</tt> shall not overlap the range <tt>[values_first, values_first + (keys_last - keys_first))</tt>.
|
522 |
-
*
|
523 |
-
* The following code snippet demonstrates how to use \p sort_by_key to sort
|
524 |
-
* an array of character values using integers as sorting keys.
|
525 |
-
*
|
526 |
-
* \code
|
527 |
-
* #include <thrust/sort.h>
|
528 |
-
* ...
|
529 |
-
* const int N = 6;
|
530 |
-
* int keys[N] = { 1, 4, 2, 8, 5, 7};
|
531 |
-
* char values[N] = {'a', 'b', 'c', 'd', 'e', 'f'};
|
532 |
-
* thrust::sort_by_key(keys, keys + N, values);
|
533 |
-
* // keys is now { 1, 2, 4, 5, 7, 8}
|
534 |
-
* // values is now {'a', 'c', 'b', 'e', 'f', 'd'}
|
535 |
-
* \endcode
|
536 |
-
*
|
537 |
-
* \see http://www.sgi.com/tech/stl/sort.html
|
538 |
-
* \see \p stable_sort_by_key
|
539 |
-
* \see \p sort
|
540 |
-
*/
|
541 |
-
template<typename RandomAccessIterator1,
|
542 |
-
typename RandomAccessIterator2>
|
543 |
-
void sort_by_key(RandomAccessIterator1 keys_first,
|
544 |
-
RandomAccessIterator1 keys_last,
|
545 |
-
RandomAccessIterator2 values_first);
|
546 |
-
|
547 |
-
|
548 |
-
/*! \p sort_by_key performs a key-value sort. That is, \p sort_by_key sorts the
|
549 |
-
* elements in <tt>[keys_first, keys_last)</tt> and <tt>[values_first,
|
550 |
-
* values_first + (keys_last - keys_first))</tt> into ascending key order,
|
551 |
-
* meaning that if \c i and \c j are any two valid iterators in <tt>[keys_first,
|
552 |
-
* keys_last)</tt> such that \c i precedes \c j, and \c p and \c q are iterators
|
553 |
-
* in <tt>[values_first, values_first + (keys_last - keys_first))</tt>
|
554 |
-
* corresponding to \c i and \c j respectively, then \c *j is not less than
|
555 |
-
* \c *i.
|
556 |
-
*
|
557 |
-
* Note: \c sort_by_key is not guaranteed to be stable. That is, suppose that
|
558 |
-
* \c *i and \c *j are equivalent: neither one is less than the other. It is not
|
559 |
-
* guaranteed that the relative order of these two keys or the relative
|
560 |
-
* order of their corresponding values will be preserved by \p sort_by_key.
|
561 |
-
*
|
562 |
-
* This version of \p sort_by_key compares key objects using a function object
|
563 |
-
* \c comp.
|
564 |
-
*
|
565 |
-
* The algorithm's execution is parallelized as determined by \p exec.
|
566 |
-
*
|
567 |
-
* \param exec The execution policy to use for parallelization.
|
568 |
-
* \param keys_first The beginning of the key sequence.
|
569 |
-
* \param keys_last The end of the key sequence.
|
570 |
-
* \param values_first The beginning of the value sequence.
|
571 |
-
* \param comp Comparison operator.
|
572 |
-
*
|
573 |
-
* \tparam DerivedPolicy The name of the derived execution policy.
|
574 |
-
* \tparam RandomAccessIterator1 is a model of <a href="http://www.sgi.com/tech/stl/RandomAccessIterator.html">Random Access Iterator</a>,
|
575 |
-
* \p RandomAccessIterator1 is mutable,
|
576 |
-
* and \p RandomAccessIterator1's \c value_type is convertible to \p StrictWeakOrdering's
|
577 |
-
* \c first_argument_type and \c second_argument_type.
|
578 |
-
* \tparam RandomAccessIterator2 is a model of <a href="http://www.sgi.com/tech/stl/RandomAccessIterator.hml">Random Access Iterator</a>,
|
579 |
-
* and \p RandomAccessIterator2 is mutable.
|
580 |
-
* \tparam StrictWeakOrdering is a model of <a href="http://www.sgi.com/tech/stl/StrictWeakOrdering.html">Strict Weak Ordering</a>.
|
581 |
-
*
|
582 |
-
* \pre The range <tt>[keys_first, keys_last))</tt> shall not overlap the range <tt>[values_first, values_first + (keys_last - keys_first))</tt>.
|
583 |
-
*
|
584 |
-
* The following code snippet demonstrates how to use \p sort_by_key to sort
|
585 |
-
* an array of character values using integers as sorting keys using the \p thrust::host execution policy
|
586 |
-
* for parallelization.The keys are sorted in descending order using the <tt>greater<int></tt> comparison operator.
|
587 |
-
*
|
588 |
-
* \code
|
589 |
-
* #include <thrust/sort.h>
|
590 |
-
* #include <thrust/execution_policy.h>
|
591 |
-
* ...
|
592 |
-
* const int N = 6;
|
593 |
-
* int keys[N] = { 1, 4, 2, 8, 5, 7};
|
594 |
-
* char values[N] = {'a', 'b', 'c', 'd', 'e', 'f'};
|
595 |
-
* thrust::sort_by_key(thrust::host, keys, keys + N, values, thrust::greater<int>());
|
596 |
-
* // keys is now { 8, 7, 5, 4, 2, 1}
|
597 |
-
* // values is now {'d', 'f', 'e', 'b', 'c', 'a'}
|
598 |
-
* \endcode
|
599 |
-
*
|
600 |
-
* \see http://www.sgi.com/tech/stl/sort.html
|
601 |
-
* \see \p stable_sort_by_key
|
602 |
-
* \see \p sort
|
603 |
-
*/
|
604 |
-
template<typename DerivedPolicy,
|
605 |
-
typename RandomAccessIterator1,
|
606 |
-
typename RandomAccessIterator2,
|
607 |
-
typename StrictWeakOrdering>
|
608 |
-
__host__ __device__
|
609 |
-
void sort_by_key(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
610 |
-
RandomAccessIterator1 keys_first,
|
611 |
-
RandomAccessIterator1 keys_last,
|
612 |
-
RandomAccessIterator2 values_first,
|
613 |
-
StrictWeakOrdering comp);
|
614 |
-
|
615 |
-
|
616 |
-
/*! \p sort_by_key performs a key-value sort. That is, \p sort_by_key sorts the
|
617 |
-
* elements in <tt>[keys_first, keys_last)</tt> and <tt>[values_first,
|
618 |
-
* values_first + (keys_last - keys_first))</tt> into ascending key order,
|
619 |
-
* meaning that if \c i and \c j are any two valid iterators in <tt>[keys_first,
|
620 |
-
* keys_last)</tt> such that \c i precedes \c j, and \c p and \c q are iterators
|
621 |
-
* in <tt>[values_first, values_first + (keys_last - keys_first))</tt>
|
622 |
-
* corresponding to \c i and \c j respectively, then \c *j is not less than
|
623 |
-
* \c *i.
|
624 |
-
*
|
625 |
-
* Note: \c sort_by_key is not guaranteed to be stable. That is, suppose that
|
626 |
-
* \c *i and \c *j are equivalent: neither one is less than the other. It is not
|
627 |
-
* guaranteed that the relative order of these two keys or the relative
|
628 |
-
* order of their corresponding values will be preserved by \p sort_by_key.
|
629 |
-
*
|
630 |
-
* This version of \p sort_by_key compares key objects using a function object
|
631 |
-
* \c comp.
|
632 |
-
*
|
633 |
-
* \param keys_first The beginning of the key sequence.
|
634 |
-
* \param keys_last The end of the key sequence.
|
635 |
-
* \param values_first The beginning of the value sequence.
|
636 |
-
* \param comp Comparison operator.
|
637 |
-
*
|
638 |
-
* \tparam RandomAccessIterator1 is a model of <a href="http://www.sgi.com/tech/stl/RandomAccessIterator.html">Random Access Iterator</a>,
|
639 |
-
* \p RandomAccessIterator1 is mutable,
|
640 |
-
* and \p RandomAccessIterator1's \c value_type is convertible to \p StrictWeakOrdering's
|
641 |
-
* \c first_argument_type and \c second_argument_type.
|
642 |
-
* \tparam RandomAccessIterator2 is a model of <a href="http://www.sgi.com/tech/stl/RandomAccessIterator.hml">Random Access Iterator</a>,
|
643 |
-
* and \p RandomAccessIterator2 is mutable.
|
644 |
-
* \tparam StrictWeakOrdering is a model of <a href="http://www.sgi.com/tech/stl/StrictWeakOrdering.html">Strict Weak Ordering</a>.
|
645 |
-
*
|
646 |
-
* \pre The range <tt>[keys_first, keys_last))</tt> shall not overlap the range <tt>[values_first, values_first + (keys_last - keys_first))</tt>.
|
647 |
-
*
|
648 |
-
* The following code snippet demonstrates how to use \p sort_by_key to sort
|
649 |
-
* an array of character values using integers as sorting keys. The keys
|
650 |
-
* are sorted in descending order using the greater<int> comparison operator.
|
651 |
-
*
|
652 |
-
* \code
|
653 |
-
* #include <thrust/sort.h>
|
654 |
-
* ...
|
655 |
-
* const int N = 6;
|
656 |
-
* int keys[N] = { 1, 4, 2, 8, 5, 7};
|
657 |
-
* char values[N] = {'a', 'b', 'c', 'd', 'e', 'f'};
|
658 |
-
* thrust::sort_by_key(keys, keys + N, values, thrust::greater<int>());
|
659 |
-
* // keys is now { 8, 7, 5, 4, 2, 1}
|
660 |
-
* // values is now {'d', 'f', 'e', 'b', 'c', 'a'}
|
661 |
-
* \endcode
|
662 |
-
*
|
663 |
-
* \see http://www.sgi.com/tech/stl/sort.html
|
664 |
-
* \see \p stable_sort_by_key
|
665 |
-
* \see \p sort
|
666 |
-
*/
|
667 |
-
template<typename RandomAccessIterator1,
|
668 |
-
typename RandomAccessIterator2,
|
669 |
-
typename StrictWeakOrdering>
|
670 |
-
void sort_by_key(RandomAccessIterator1 keys_first,
|
671 |
-
RandomAccessIterator1 keys_last,
|
672 |
-
RandomAccessIterator2 values_first,
|
673 |
-
StrictWeakOrdering comp);
|
674 |
-
|
675 |
-
|
676 |
-
/*! \p stable_sort_by_key performs a key-value sort. That is, \p stable_sort_by_key
|
677 |
-
* sorts the elements in <tt>[keys_first, keys_last)</tt> and <tt>[values_first,
|
678 |
-
* values_first + (keys_last - keys_first))</tt> into ascending key order,
|
679 |
-
* meaning that if \c i and \c j are any two valid iterators in <tt>[keys_first,
|
680 |
-
* keys_last)</tt> such that \c i precedes \c j, and \c p and \c q are iterators
|
681 |
-
* in <tt>[values_first, values_first + (keys_last - keys_first))</tt>
|
682 |
-
* corresponding to \c i and \c j respectively, then \c *j is not less than
|
683 |
-
* \c *i.
|
684 |
-
*
|
685 |
-
* As the name suggests, \p stable_sort_by_key is stable: it preserves the
|
686 |
-
* relative ordering of equivalent elements. That is, if \c x and \c y
|
687 |
-
* are elements in <tt>[keys_first, keys_last)</tt> such that \c x precedes \c y,
|
688 |
-
* and if the two elements are equivalent (neither <tt>x < y</tt> nor
|
689 |
-
* <tt>y < x</tt>) then a postcondition of \p stable_sort_by_key is that \c x
|
690 |
-
* still precedes \c y.
|
691 |
-
*
|
692 |
-
* This version of \p stable_sort_by_key compares key objects using \c operator<.
|
693 |
-
*
|
694 |
-
* The algorithm's execution is parallelized as determined by \p exec.
|
695 |
-
*
|
696 |
-
* \param exec The execution policy to use for parallelization.
|
697 |
-
* \param keys_first The beginning of the key sequence.
|
698 |
-
* \param keys_last The end of the key sequence.
|
699 |
-
* \param values_first The beginning of the value sequence.
|
700 |
-
*
|
701 |
-
* \tparam DerivedPolicy The name of the derived execution policy.
|
702 |
-
* \tparam RandomAccessIterator1 is a model of <a href="http://www.sgi.com/tech/stl/RandomAccessIterator.html">Random Access Iterator</a>,
|
703 |
-
* \p RandomAccessIterator1 is mutable,
|
704 |
-
* and \p RandomAccessIterator1's \c value_type is a model of <a href="http://www.sgi.com/tech/stl/LessThanComparable.html">LessThan Comparable</a>,
|
705 |
-
* and the ordering relation on \p RandomAccessIterator1's \c value_type is a <em>strict weak ordering</em>, as defined in the
|
706 |
-
* <a href="http://www.sgi.com/tech/stl/LessThanComparable.html">LessThan Comparable</a> requirements.
|
707 |
-
* \tparam RandomAccessIterator2 is a model of <a href="http://www.sgi.com/tech/stl/RandomAccessIterator.hml">Random Access Iterator</a>,
|
708 |
-
* and \p RandomAccessIterator2 is mutable.
|
709 |
-
*
|
710 |
-
* \pre The range <tt>[keys_first, keys_last))</tt> shall not overlap the range <tt>[values_first, values_first + (keys_last - keys_first))</tt>.
|
711 |
-
*
|
712 |
-
* The following code snippet demonstrates how to use \p stable_sort_by_key to sort
|
713 |
-
* an array of characters using integers as sorting keys using the \p thrust::host execution policy for
|
714 |
-
* parallelization:
|
715 |
-
*
|
716 |
-
* \code
|
717 |
-
* #include <thrust/sort.h>
|
718 |
-
* #include <thrust/execution_policy.h>
|
719 |
-
* ...
|
720 |
-
* const int N = 6;
|
721 |
-
* int keys[N] = { 1, 4, 2, 8, 5, 7};
|
722 |
-
* char values[N] = {'a', 'b', 'c', 'd', 'e', 'f'};
|
723 |
-
* thrust::stable_sort_by_key(thrust::host, keys, keys + N, values);
|
724 |
-
* // keys is now { 1, 2, 4, 5, 7, 8}
|
725 |
-
* // values is now {'a', 'c', 'b', 'e', 'f', 'd'}
|
726 |
-
* \endcode
|
727 |
-
*
|
728 |
-
* \see http://www.sgi.com/tech/stl/sort.html
|
729 |
-
* \see \p sort_by_key
|
730 |
-
* \see \p stable_sort
|
731 |
-
*/
|
732 |
-
template<typename DerivedPolicy,
|
733 |
-
typename RandomAccessIterator1,
|
734 |
-
typename RandomAccessIterator2>
|
735 |
-
__host__ __device__
|
736 |
-
void stable_sort_by_key(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
737 |
-
RandomAccessIterator1 keys_first,
|
738 |
-
RandomAccessIterator1 keys_last,
|
739 |
-
RandomAccessIterator2 values_first);
|
740 |
-
|
741 |
-
|
742 |
-
/*! \p stable_sort_by_key performs a key-value sort. That is, \p stable_sort_by_key
|
743 |
-
* sorts the elements in <tt>[keys_first, keys_last)</tt> and <tt>[values_first,
|
744 |
-
* values_first + (keys_last - keys_first))</tt> into ascending key order,
|
745 |
-
* meaning that if \c i and \c j are any two valid iterators in <tt>[keys_first,
|
746 |
-
* keys_last)</tt> such that \c i precedes \c j, and \c p and \c q are iterators
|
747 |
-
* in <tt>[values_first, values_first + (keys_last - keys_first))</tt>
|
748 |
-
* corresponding to \c i and \c j respectively, then \c *j is not less than
|
749 |
-
* \c *i.
|
750 |
-
*
|
751 |
-
* As the name suggests, \p stable_sort_by_key is stable: it preserves the
|
752 |
-
* relative ordering of equivalent elements. That is, if \c x and \c y
|
753 |
-
* are elements in <tt>[keys_first, keys_last)</tt> such that \c x precedes \c y,
|
754 |
-
* and if the two elements are equivalent (neither <tt>x < y</tt> nor
|
755 |
-
* <tt>y < x</tt>) then a postcondition of \p stable_sort_by_key is that \c x
|
756 |
-
* still precedes \c y.
|
757 |
-
*
|
758 |
-
* This version of \p stable_sort_by_key compares key objects using \c operator<.
|
759 |
-
*
|
760 |
-
* \param keys_first The beginning of the key sequence.
|
761 |
-
* \param keys_last The end of the key sequence.
|
762 |
-
* \param values_first The beginning of the value sequence.
|
763 |
-
*
|
764 |
-
* \tparam RandomAccessIterator1 is a model of <a href="http://www.sgi.com/tech/stl/RandomAccessIterator.html">Random Access Iterator</a>,
|
765 |
-
* \p RandomAccessIterator1 is mutable,
|
766 |
-
* and \p RandomAccessIterator1's \c value_type is a model of <a href="http://www.sgi.com/tech/stl/LessThanComparable.html">LessThan Comparable</a>,
|
767 |
-
* and the ordering relation on \p RandomAccessIterator1's \c value_type is a <em>strict weak ordering</em>, as defined in the
|
768 |
-
* <a href="http://www.sgi.com/tech/stl/LessThanComparable.html">LessThan Comparable</a> requirements.
|
769 |
-
* \tparam RandomAccessIterator2 is a model of <a href="http://www.sgi.com/tech/stl/RandomAccessIterator.hml">Random Access Iterator</a>,
|
770 |
-
* and \p RandomAccessIterator2 is mutable.
|
771 |
-
*
|
772 |
-
* \pre The range <tt>[keys_first, keys_last))</tt> shall not overlap the range <tt>[values_first, values_first + (keys_last - keys_first))</tt>.
|
773 |
-
*
|
774 |
-
* The following code snippet demonstrates how to use \p stable_sort_by_key to sort
|
775 |
-
* an array of characters using integers as sorting keys.
|
776 |
-
*
|
777 |
-
* \code
|
778 |
-
* #include <thrust/sort.h>
|
779 |
-
* ...
|
780 |
-
* const int N = 6;
|
781 |
-
* int keys[N] = { 1, 4, 2, 8, 5, 7};
|
782 |
-
* char values[N] = {'a', 'b', 'c', 'd', 'e', 'f'};
|
783 |
-
* thrust::stable_sort_by_key(keys, keys + N, values);
|
784 |
-
* // keys is now { 1, 2, 4, 5, 7, 8}
|
785 |
-
* // values is now {'a', 'c', 'b', 'e', 'f', 'd'}
|
786 |
-
* \endcode
|
787 |
-
*
|
788 |
-
* \see http://www.sgi.com/tech/stl/sort.html
|
789 |
-
* \see \p sort_by_key
|
790 |
-
* \see \p stable_sort
|
791 |
-
*/
|
792 |
-
template<typename RandomAccessIterator1,
|
793 |
-
typename RandomAccessIterator2>
|
794 |
-
void stable_sort_by_key(RandomAccessIterator1 keys_first,
|
795 |
-
RandomAccessIterator1 keys_last,
|
796 |
-
RandomAccessIterator2 values_first);
|
797 |
-
|
798 |
-
|
799 |
-
/*! \p stable_sort_by_key performs a key-value sort. That is, \p stable_sort_by_key
|
800 |
-
* sorts the elements in <tt>[keys_first, keys_last)</tt> and <tt>[values_first,
|
801 |
-
* values_first + (keys_last - keys_first))</tt> into ascending key order,
|
802 |
-
* meaning that if \c i and \c j are any two valid iterators in <tt>[keys_first,
|
803 |
-
* keys_last)</tt> such that \c i precedes \c j, and \c p and \c q are iterators
|
804 |
-
* in <tt>[values_first, values_first + (keys_last - keys_first))</tt>
|
805 |
-
* corresponding to \c i and \c j respectively, then \c *j is not less than
|
806 |
-
* \c *i.
|
807 |
-
*
|
808 |
-
* As the name suggests, \p stable_sort_by_key is stable: it preserves the
|
809 |
-
* relative ordering of equivalent elements. That is, if \c x and \c y
|
810 |
-
* are elements in <tt>[keys_first, keys_last)</tt> such that \c x precedes \c y,
|
811 |
-
* and if the two elements are equivalent (neither <tt>x < y</tt> nor
|
812 |
-
* <tt>y < x</tt>) then a postcondition of \p stable_sort_by_key is that \c x
|
813 |
-
* still precedes \c y.
|
814 |
-
*
|
815 |
-
* This version of \p stable_sort_by_key compares key objects using the function
|
816 |
-
* object \p comp.
|
817 |
-
*
|
818 |
-
* The algorithm's execution is parallelized as determined by \p exec.
|
819 |
-
*
|
820 |
-
* \param exec The execution policy to use for parallelization.
|
821 |
-
* \param keys_first The beginning of the key sequence.
|
822 |
-
* \param keys_last The end of the key sequence.
|
823 |
-
* \param values_first The beginning of the value sequence.
|
824 |
-
* \param comp Comparison operator.
|
825 |
-
*
|
826 |
-
* \tparam DerivedPolicy The name of the derived execution policy.
|
827 |
-
* \tparam RandomAccessIterator1 is a model of <a href="http://www.sgi.com/tech/stl/RandomAccessIterator.html">Random Access Iterator</a>,
|
828 |
-
* \p RandomAccessIterator1 is mutable,
|
829 |
-
* and \p RandomAccessIterator1's \c value_type is convertible to \p StrictWeakOrdering's
|
830 |
-
* \c first_argument_type and \c second_argument_type.
|
831 |
-
* \tparam RandomAccessIterator2 is a model of <a href="http://www.sgi.com/tech/stl/RandomAccessIterator.hml">Random Access Iterator</a>,
|
832 |
-
* and \p RandomAccessIterator2 is mutable.
|
833 |
-
* \tparam StrictWeakOrdering is a model of <a href="http://www.sgi.com/tech/stl/StrictWeakOrdering.html">Strict Weak Ordering</a>.
|
834 |
-
*
|
835 |
-
* \pre The range <tt>[keys_first, keys_last))</tt> shall not overlap the range <tt>[values_first, values_first + (keys_last - keys_first))</tt>.
|
836 |
-
*
|
837 |
-
* The following code snippet demonstrates how to use \p sort_by_key to sort
|
838 |
-
* an array of character values using integers as sorting keys using the \p thrust::host execution policy for
|
839 |
-
* parallelization. The keys are sorted in descending order using the <tt>greater<int></tt> comparison operator.
|
840 |
-
*
|
841 |
-
* \code
|
842 |
-
* #include <thrust/sort.h>
|
843 |
-
* #include <thrust/execution_policy.h>
|
844 |
-
* ...
|
845 |
-
* const int N = 6;
|
846 |
-
* int keys[N] = { 1, 4, 2, 8, 5, 7};
|
847 |
-
* char values[N] = {'a', 'b', 'c', 'd', 'e', 'f'};
|
848 |
-
* thrust::stable_sort_by_key(thrust::host, keys, keys + N, values, thrust::greater<int>());
|
849 |
-
* // keys is now { 8, 7, 5, 4, 2, 1}
|
850 |
-
* // values is now {'d', 'f', 'e', 'b', 'c', 'a'}
|
851 |
-
* \endcode
|
852 |
-
*
|
853 |
-
*
|
854 |
-
* \see http://www.sgi.com/tech/stl/sort.html
|
855 |
-
* \see \p sort_by_key
|
856 |
-
* \see \p stable_sort
|
857 |
-
*/
|
858 |
-
template<typename DerivedPolicy,
|
859 |
-
typename RandomAccessIterator1,
|
860 |
-
typename RandomAccessIterator2,
|
861 |
-
typename StrictWeakOrdering>
|
862 |
-
__host__ __device__
|
863 |
-
void stable_sort_by_key(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
864 |
-
RandomAccessIterator1 keys_first,
|
865 |
-
RandomAccessIterator1 keys_last,
|
866 |
-
RandomAccessIterator2 values_first,
|
867 |
-
StrictWeakOrdering comp);
|
868 |
-
|
869 |
-
|
870 |
-
/*! \p stable_sort_by_key performs a key-value sort. That is, \p stable_sort_by_key
|
871 |
-
* sorts the elements in <tt>[keys_first, keys_last)</tt> and <tt>[values_first,
|
872 |
-
* values_first + (keys_last - keys_first))</tt> into ascending key order,
|
873 |
-
* meaning that if \c i and \c j are any two valid iterators in <tt>[keys_first,
|
874 |
-
* keys_last)</tt> such that \c i precedes \c j, and \c p and \c q are iterators
|
875 |
-
* in <tt>[values_first, values_first + (keys_last - keys_first))</tt>
|
876 |
-
* corresponding to \c i and \c j respectively, then \c *j is not less than
|
877 |
-
* \c *i.
|
878 |
-
*
|
879 |
-
* As the name suggests, \p stable_sort_by_key is stable: it preserves the
|
880 |
-
* relative ordering of equivalent elements. That is, if \c x and \c y
|
881 |
-
* are elements in <tt>[keys_first, keys_last)</tt> such that \c x precedes \c y,
|
882 |
-
* and if the two elements are equivalent (neither <tt>x < y</tt> nor
|
883 |
-
* <tt>y < x</tt>) then a postcondition of \p stable_sort_by_key is that \c x
|
884 |
-
* still precedes \c y.
|
885 |
-
*
|
886 |
-
* This version of \p stable_sort_by_key compares key objects using the function
|
887 |
-
* object \p comp.
|
888 |
-
*
|
889 |
-
* \param keys_first The beginning of the key sequence.
|
890 |
-
* \param keys_last The end of the key sequence.
|
891 |
-
* \param values_first The beginning of the value sequence.
|
892 |
-
* \param comp Comparison operator.
|
893 |
-
*
|
894 |
-
* \tparam RandomAccessIterator1 is a model of <a href="http://www.sgi.com/tech/stl/RandomAccessIterator.html">Random Access Iterator</a>,
|
895 |
-
* \p RandomAccessIterator1 is mutable,
|
896 |
-
* and \p RandomAccessIterator1's \c value_type is convertible to \p StrictWeakOrdering's
|
897 |
-
* \c first_argument_type and \c second_argument_type.
|
898 |
-
* \tparam RandomAccessIterator2 is a model of <a href="http://www.sgi.com/tech/stl/RandomAccessIterator.hml">Random Access Iterator</a>,
|
899 |
-
* and \p RandomAccessIterator2 is mutable.
|
900 |
-
* \tparam StrictWeakOrdering is a model of <a href="http://www.sgi.com/tech/stl/StrictWeakOrdering.html">Strict Weak Ordering</a>.
|
901 |
-
*
|
902 |
-
* \pre The range <tt>[keys_first, keys_last))</tt> shall not overlap the range <tt>[values_first, values_first + (keys_last - keys_first))</tt>.
|
903 |
-
*
|
904 |
-
* The following code snippet demonstrates how to use \p sort_by_key to sort
|
905 |
-
* an array of character values using integers as sorting keys. The keys
|
906 |
-
* are sorted in descending order using the greater<int> comparison operator.
|
907 |
-
*
|
908 |
-
* \code
|
909 |
-
* #include <thrust/sort.h>
|
910 |
-
* ...
|
911 |
-
* const int N = 6;
|
912 |
-
* int keys[N] = { 1, 4, 2, 8, 5, 7};
|
913 |
-
* char values[N] = {'a', 'b', 'c', 'd', 'e', 'f'};
|
914 |
-
* thrust::stable_sort_by_key(keys, keys + N, values, thrust::greater<int>());
|
915 |
-
* // keys is now { 8, 7, 5, 4, 2, 1}
|
916 |
-
* // values is now {'d', 'f', 'e', 'b', 'c', 'a'}
|
917 |
-
* \endcode
|
918 |
-
*
|
919 |
-
*
|
920 |
-
* \see http://www.sgi.com/tech/stl/sort.html
|
921 |
-
* \see \p sort_by_key
|
922 |
-
* \see \p stable_sort
|
923 |
-
*/
|
924 |
-
template<typename RandomAccessIterator1,
|
925 |
-
typename RandomAccessIterator2,
|
926 |
-
typename StrictWeakOrdering>
|
927 |
-
void stable_sort_by_key(RandomAccessIterator1 keys_first,
|
928 |
-
RandomAccessIterator1 keys_last,
|
929 |
-
RandomAccessIterator2 values_first,
|
930 |
-
StrictWeakOrdering comp);
|
931 |
-
|
932 |
-
|
933 |
-
/*! \} // end sorting
|
934 |
-
*/
|
935 |
-
|
936 |
-
|
937 |
-
/*! \addtogroup reductions
|
938 |
-
* \{
|
939 |
-
* \addtogroup predicates
|
940 |
-
* \{
|
941 |
-
*/
|
942 |
-
|
943 |
-
|
944 |
-
/*! \p is_sorted returns \c true if the range <tt>[first, last)</tt> is
|
945 |
-
* sorted in ascending order, and \c false otherwise.
|
946 |
-
*
|
947 |
-
* Specifically, this version of \p is_sorted returns \c false if for
|
948 |
-
* some iterator \c i in the range <tt>[first, last - 1)</tt> the
|
949 |
-
* expression <tt>*(i + 1) < *i</tt> is \c true.
|
950 |
-
*
|
951 |
-
* The algorithm's execution is parallelized as determined by \p exec.
|
952 |
-
*
|
953 |
-
* \param exec The execution policy to use for parallelization.
|
954 |
-
* \param first The beginning of the sequence.
|
955 |
-
* \param last The end of the sequence.
|
956 |
-
* \return \c true, if the sequence is sorted; \c false, otherwise.
|
957 |
-
*
|
958 |
-
* \tparam DerivedPolicy The name of the derived execution policy.
|
959 |
-
* \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a>,
|
960 |
-
* \p ForwardIterator's \c value_type is a model of <a href="http://www.sgi.com/tech/stl/LessThanComparable.html">LessThan Comparable</a>,
|
961 |
-
* and the ordering on objects of \p ForwardIterator's \c value_type is a <em>strict weak ordering</em>, as defined
|
962 |
-
* in the <a href="http://www.sgi.com/tech/stl/LessThanComparable.html">LessThan Comparable</a> requirements.
|
963 |
-
*
|
964 |
-
*
|
965 |
-
* The following code demonstrates how to use \p is_sorted to test whether the
|
966 |
-
* contents of a \c device_vector are stored in ascending order using the \p thrust::device execution policy
|
967 |
-
* for parallelization:
|
968 |
-
*
|
969 |
-
* \code
|
970 |
-
* #include <thrust/sort.h>
|
971 |
-
* #include <thrust/device_vector.h>
|
972 |
-
* #include <thrust/sort.h>
|
973 |
-
* #include <thrust/execution_policy.h>
|
974 |
-
* ...
|
975 |
-
* thrust::device_vector<int> v(6);
|
976 |
-
* v[0] = 1;
|
977 |
-
* v[1] = 4;
|
978 |
-
* v[2] = 2;
|
979 |
-
* v[3] = 8;
|
980 |
-
* v[4] = 5;
|
981 |
-
* v[5] = 7;
|
982 |
-
*
|
983 |
-
* bool result = thrust::is_sorted(thrust::device, v.begin(), v.end());
|
984 |
-
*
|
985 |
-
* // result == false
|
986 |
-
*
|
987 |
-
* thrust::sort(v.begin(), v.end());
|
988 |
-
* result = thrust::is_sorted(thrust::device, v.begin(), v.end());
|
989 |
-
*
|
990 |
-
* // result == true
|
991 |
-
* \endcode
|
992 |
-
*
|
993 |
-
* \see http://www.sgi.com/tech/stl/is_sorted.html
|
994 |
-
* \see is_sorted_until
|
995 |
-
* \see \c sort
|
996 |
-
* \see \c stable_sort
|
997 |
-
* \see \c less<T>
|
998 |
-
*/
|
999 |
-
template<typename DerivedPolicy, typename ForwardIterator>
|
1000 |
-
__host__ __device__
|
1001 |
-
bool is_sorted(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
1002 |
-
ForwardIterator first,
|
1003 |
-
ForwardIterator last);
|
1004 |
-
|
1005 |
-
|
1006 |
-
/*! \p is_sorted returns \c true if the range <tt>[first, last)</tt> is
|
1007 |
-
* sorted in ascending order, and \c false otherwise.
|
1008 |
-
*
|
1009 |
-
* Specifically, this version of \p is_sorted returns \c false if for
|
1010 |
-
* some iterator \c i in the range <tt>[first, last - 1)</tt> the
|
1011 |
-
* expression <tt>*(i + 1) < *i</tt> is \c true.
|
1012 |
-
*
|
1013 |
-
* \param first The beginning of the sequence.
|
1014 |
-
* \param last The end of the sequence.
|
1015 |
-
* \return \c true, if the sequence is sorted; \c false, otherwise.
|
1016 |
-
*
|
1017 |
-
* \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a>,
|
1018 |
-
* \p ForwardIterator's \c value_type is a model of <a href="http://www.sgi.com/tech/stl/LessThanComparable.html">LessThan Comparable</a>,
|
1019 |
-
* and the ordering on objects of \p ForwardIterator's \c value_type is a <em>strict weak ordering</em>, as defined
|
1020 |
-
* in the <a href="http://www.sgi.com/tech/stl/LessThanComparable.html">LessThan Comparable</a> requirements.
|
1021 |
-
*
|
1022 |
-
*
|
1023 |
-
* The following code demonstrates how to use \p is_sorted to test whether the
|
1024 |
-
* contents of a \c device_vector are stored in ascending order.
|
1025 |
-
*
|
1026 |
-
* \code
|
1027 |
-
* #include <thrust/sort.h>
|
1028 |
-
* #include <thrust/device_vector.h>
|
1029 |
-
* #include <thrust/sort.h>
|
1030 |
-
* ...
|
1031 |
-
* thrust::device_vector<int> v(6);
|
1032 |
-
* v[0] = 1;
|
1033 |
-
* v[1] = 4;
|
1034 |
-
* v[2] = 2;
|
1035 |
-
* v[3] = 8;
|
1036 |
-
* v[4] = 5;
|
1037 |
-
* v[5] = 7;
|
1038 |
-
*
|
1039 |
-
* bool result = thrust::is_sorted(v.begin(), v.end());
|
1040 |
-
*
|
1041 |
-
* // result == false
|
1042 |
-
*
|
1043 |
-
* thrust::sort(v.begin(), v.end());
|
1044 |
-
* result = thrust::is_sorted(v.begin(), v.end());
|
1045 |
-
*
|
1046 |
-
* // result == true
|
1047 |
-
* \endcode
|
1048 |
-
*
|
1049 |
-
* \see http://www.sgi.com/tech/stl/is_sorted.html
|
1050 |
-
* \see is_sorted_until
|
1051 |
-
* \see \c sort
|
1052 |
-
* \see \c stable_sort
|
1053 |
-
* \see \c less<T>
|
1054 |
-
*/
|
1055 |
-
template<typename ForwardIterator>
|
1056 |
-
bool is_sorted(ForwardIterator first,
|
1057 |
-
ForwardIterator last);
|
1058 |
-
|
1059 |
-
|
1060 |
-
/*! \p is_sorted returns \c true if the range <tt>[first, last)</tt> is sorted in ascending
|
1061 |
-
* order accoring to a user-defined comparison operation, and \c false otherwise.
|
1062 |
-
*
|
1063 |
-
* Specifically, this version of \p is_sorted returns \c false if for some iterator \c i in
|
1064 |
-
* the range <tt>[first, last - 1)</tt> the expression <tt>comp(*(i + 1), *i)</tt> is \c true.
|
1065 |
-
*
|
1066 |
-
* The algorithm's execution is parallelized as determined by \p exec.
|
1067 |
-
*
|
1068 |
-
* \param exec The execution policy to use for parallelization.
|
1069 |
-
* \param first The beginning of the sequence.
|
1070 |
-
* \param last The end of the sequence.
|
1071 |
-
* \param comp Comparison operator.
|
1072 |
-
* \return \c true, if the sequence is sorted according to comp; \c false, otherwise.
|
1073 |
-
*
|
1074 |
-
* \tparam DerivedPolicy The name of the derived execution policy.
|
1075 |
-
* \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a>,
|
1076 |
-
* and \p ForwardIterator's \c value_type is convertible to both \c StrictWeakOrdering's \c first_argument_type
|
1077 |
-
* and \c second_argument_type.
|
1078 |
-
* \tparam Compare is a model of <a href="http://www.sgi.com/tech/stl/StrictWeakOrdering.html">Strict Weak Ordering</a>.
|
1079 |
-
*
|
1080 |
-
* The following code snippet demonstrates how to use \p is_sorted to test whether the
|
1081 |
-
* contents of a \c device_vector are stored in descending order using the \p thrust::device execution
|
1082 |
-
* policy for parallelization:
|
1083 |
-
*
|
1084 |
-
* \code
|
1085 |
-
* #include <thrust/sort.h>
|
1086 |
-
* #include <thrust/functional.h>
|
1087 |
-
* #include <thrust/device_vector.h>
|
1088 |
-
* #include <thrust/execution_policy.h>
|
1089 |
-
* ...
|
1090 |
-
* thrust::device_vector<int> v(6);
|
1091 |
-
* v[0] = 1;
|
1092 |
-
* v[1] = 4;
|
1093 |
-
* v[2] = 2;
|
1094 |
-
* v[3] = 8;
|
1095 |
-
* v[4] = 5;
|
1096 |
-
* v[5] = 7;
|
1097 |
-
*
|
1098 |
-
* thrust::greater<int> comp;
|
1099 |
-
* bool result = thrust::is_sorted(thrust::device, v.begin(), v.end(), comp);
|
1100 |
-
*
|
1101 |
-
* // result == false
|
1102 |
-
*
|
1103 |
-
* thrust::sort(v.begin(), v.end(), comp);
|
1104 |
-
* result = thrust::is_sorted(thrust::device, v.begin(), v.end(), comp);
|
1105 |
-
*
|
1106 |
-
* // result == true
|
1107 |
-
* \endcode
|
1108 |
-
*
|
1109 |
-
* \see http://www.sgi.com/tech/stl/is_sorted.html
|
1110 |
-
* \see \c sort
|
1111 |
-
* \see \c stable_sort
|
1112 |
-
* \see \c less<T>
|
1113 |
-
*/
|
1114 |
-
template<typename DerivedPolicy, typename ForwardIterator, typename Compare>
|
1115 |
-
__host__ __device__
|
1116 |
-
bool is_sorted(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
1117 |
-
ForwardIterator first,
|
1118 |
-
ForwardIterator last,
|
1119 |
-
Compare comp);
|
1120 |
-
|
1121 |
-
|
1122 |
-
/*! \p is_sorted returns \c true if the range <tt>[first, last)</tt> is sorted in ascending
|
1123 |
-
* order accoring to a user-defined comparison operation, and \c false otherwise.
|
1124 |
-
*
|
1125 |
-
* Specifically, this version of \p is_sorted returns \c false if for some iterator \c i in
|
1126 |
-
* the range <tt>[first, last - 1)</tt> the expression <tt>comp(*(i + 1), *i)</tt> is \c true.
|
1127 |
-
*
|
1128 |
-
* \param first The beginning of the sequence.
|
1129 |
-
* \param last The end of the sequence.
|
1130 |
-
* \param comp Comparison operator.
|
1131 |
-
* \return \c true, if the sequence is sorted according to comp; \c false, otherwise.
|
1132 |
-
*
|
1133 |
-
* \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a>,
|
1134 |
-
* and \p ForwardIterator's \c value_type is convertible to both \c StrictWeakOrdering's \c first_argument_type
|
1135 |
-
* and \c second_argument_type.
|
1136 |
-
* \tparam Compare is a model of <a href="http://www.sgi.com/tech/stl/StrictWeakOrdering.html">Strict Weak Ordering</a>.
|
1137 |
-
*
|
1138 |
-
* The following code snippet demonstrates how to use \p is_sorted to test whether the
|
1139 |
-
* contents of a \c device_vector are stored in descending order.
|
1140 |
-
*
|
1141 |
-
* \code
|
1142 |
-
* #include <thrust/sort.h>
|
1143 |
-
* #include <thrust/functional.h>
|
1144 |
-
* #include <thrust/device_vector.h>
|
1145 |
-
* ...
|
1146 |
-
* thrust::device_vector<int> v(6);
|
1147 |
-
* v[0] = 1;
|
1148 |
-
* v[1] = 4;
|
1149 |
-
* v[2] = 2;
|
1150 |
-
* v[3] = 8;
|
1151 |
-
* v[4] = 5;
|
1152 |
-
* v[5] = 7;
|
1153 |
-
*
|
1154 |
-
* thrust::greater<int> comp;
|
1155 |
-
* bool result = thrust::is_sorted(v.begin(), v.end(), comp);
|
1156 |
-
*
|
1157 |
-
* // result == false
|
1158 |
-
*
|
1159 |
-
* thrust::sort(v.begin(), v.end(), comp);
|
1160 |
-
* result = thrust::is_sorted(v.begin(), v.end(), comp);
|
1161 |
-
*
|
1162 |
-
* // result == true
|
1163 |
-
* \endcode
|
1164 |
-
*
|
1165 |
-
* \see http://www.sgi.com/tech/stl/is_sorted.html
|
1166 |
-
* \see \c sort
|
1167 |
-
* \see \c stable_sort
|
1168 |
-
* \see \c less<T>
|
1169 |
-
*/
|
1170 |
-
template<typename ForwardIterator, typename Compare>
|
1171 |
-
bool is_sorted(ForwardIterator first,
|
1172 |
-
ForwardIterator last,
|
1173 |
-
Compare comp);
|
1174 |
-
|
1175 |
-
|
1176 |
-
/*! This version of \p is_sorted_until returns the last iterator \c i in <tt>[first,last]</tt> for
|
1177 |
-
* which the range <tt>[first,last)</tt> is sorted using \c operator<. If <tt>distance(first,last) < 2</tt>,
|
1178 |
-
* \p is_sorted_until simply returns \p last.
|
1179 |
-
*
|
1180 |
-
* The algorithm's execution is parallelized as determined by \p exec.
|
1181 |
-
*
|
1182 |
-
* \param exec The execution policy to use for parallelization.
|
1183 |
-
* \param first The beginning of the range of interest.
|
1184 |
-
* \param last The end of the range of interest.
|
1185 |
-
* \return The last iterator in the input range for which it is sorted.
|
1186 |
-
*
|
1187 |
-
* \tparam DerivedPolicy The name of the derived execution policy.
|
1188 |
-
* \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a> and
|
1189 |
-
* \p ForwardIterator's \c value_type is a model of <a href="http://www.sgi.com/tech/stl/LessThanComparable.html">LessThan Comparable</a>.
|
1190 |
-
*
|
1191 |
-
* The following code snippet demonstrates how to use \p is_sorted_until to find the first position
|
1192 |
-
* in an array where the data becomes unsorted using the \p thrust::host execution policy for
|
1193 |
-
* parallelization:
|
1194 |
-
*
|
1195 |
-
* \code
|
1196 |
-
* #include <thrust/sort.h>
|
1197 |
-
* #include <thrust/execution_policy.h>
|
1198 |
-
*
|
1199 |
-
* ...
|
1200 |
-
*
|
1201 |
-
* int A[8] = {0, 1, 2, 3, 0, 1, 2, 3};
|
1202 |
-
*
|
1203 |
-
* int * B = thrust::is_sorted_until(thrust::host, A, A + 8);
|
1204 |
-
*
|
1205 |
-
* // B - A is 4
|
1206 |
-
* // [A, B) is sorted
|
1207 |
-
* \endcode
|
1208 |
-
*
|
1209 |
-
* \see \p is_sorted
|
1210 |
-
* \see \p sort
|
1211 |
-
* \see \p sort_by_key
|
1212 |
-
* \see \p stable_sort
|
1213 |
-
* \see \p stable_sort_by_key
|
1214 |
-
*/
|
1215 |
-
template<typename DerivedPolicy, typename ForwardIterator>
|
1216 |
-
__host__ __device__
|
1217 |
-
ForwardIterator is_sorted_until(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
1218 |
-
ForwardIterator first,
|
1219 |
-
ForwardIterator last);
|
1220 |
-
|
1221 |
-
|
1222 |
-
/*! This version of \p is_sorted_until returns the last iterator \c i in <tt>[first,last]</tt> for
|
1223 |
-
* which the range <tt>[first,last)</tt> is sorted using \c operator<. If <tt>distance(first,last) < 2</tt>,
|
1224 |
-
* \p is_sorted_until simply returns \p last.
|
1225 |
-
*
|
1226 |
-
* \param first The beginning of the range of interest.
|
1227 |
-
* \param last The end of the range of interest.
|
1228 |
-
* \return The last iterator in the input range for which it is sorted.
|
1229 |
-
*
|
1230 |
-
* \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a> and
|
1231 |
-
* \p ForwardIterator's \c value_type is a model of <a href="http://www.sgi.com/tech/stl/LessThanComparable.html">LessThan Comparable</a>.
|
1232 |
-
*
|
1233 |
-
* The following code snippet demonstrates how to use \p is_sorted_until to find the first position
|
1234 |
-
* in an array where the data becomes unsorted:
|
1235 |
-
*
|
1236 |
-
* \code
|
1237 |
-
* #include <thrust/sort.h>
|
1238 |
-
*
|
1239 |
-
* ...
|
1240 |
-
*
|
1241 |
-
* int A[8] = {0, 1, 2, 3, 0, 1, 2, 3};
|
1242 |
-
*
|
1243 |
-
* int * B = thrust::is_sorted_until(A, A + 8);
|
1244 |
-
*
|
1245 |
-
* // B - A is 4
|
1246 |
-
* // [A, B) is sorted
|
1247 |
-
* \endcode
|
1248 |
-
*
|
1249 |
-
* \see \p is_sorted
|
1250 |
-
* \see \p sort
|
1251 |
-
* \see \p sort_by_key
|
1252 |
-
* \see \p stable_sort
|
1253 |
-
* \see \p stable_sort_by_key
|
1254 |
-
*/
|
1255 |
-
template<typename ForwardIterator>
|
1256 |
-
ForwardIterator is_sorted_until(ForwardIterator first,
|
1257 |
-
ForwardIterator last);
|
1258 |
-
|
1259 |
-
|
1260 |
-
/*! This version of \p is_sorted_until returns the last iterator \c i in <tt>[first,last]</tt> for
|
1261 |
-
* which the range <tt>[first,last)</tt> is sorted using the function object \c comp. If <tt>distance(first,last) < 2</tt>,
|
1262 |
-
* \p is_sorted_until simply returns \p last.
|
1263 |
-
*
|
1264 |
-
* The algorithm's execution is parallelized as determined by \p exec.
|
1265 |
-
*
|
1266 |
-
* \param exec The execution policy to use for parallelization:
|
1267 |
-
* \param first The beginning of the range of interest.
|
1268 |
-
* \param last The end of the range of interest.
|
1269 |
-
* \param comp The function object to use for comparison.
|
1270 |
-
* \return The last iterator in the input range for which it is sorted.
|
1271 |
-
*
|
1272 |
-
* \tparam DerivedPolicy The name of the derived execution policy.
|
1273 |
-
* \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a> and
|
1274 |
-
* \p ForwardIterator's \c value_type is convertible to \p Compare's \c argument_type.
|
1275 |
-
* \tparam Compare is a model of <a href="http://www.sgi.com/tech/stl/StrictWeakOrdering.html">Strict Weak Ordering</a>.
|
1276 |
-
*
|
1277 |
-
* The following code snippet demonstrates how to use \p is_sorted_until to find the first position
|
1278 |
-
* in an array where the data becomes unsorted in descending order using the \p thrust::host execution
|
1279 |
-
* policy for parallelization:
|
1280 |
-
*
|
1281 |
-
* \code
|
1282 |
-
* #include <thrust/sort.h>
|
1283 |
-
* #include <thrust/functional.h>
|
1284 |
-
* #include <thrust/execution_policy.h>
|
1285 |
-
*
|
1286 |
-
* ...
|
1287 |
-
*
|
1288 |
-
* int A[8] = {3, 2, 1, 0, 3, 2, 1, 0};
|
1289 |
-
*
|
1290 |
-
* thrust::greater<int> comp;
|
1291 |
-
* int * B = thrust::is_sorted_until(thrust::host, A, A + 8, comp);
|
1292 |
-
*
|
1293 |
-
* // B - A is 4
|
1294 |
-
* // [A, B) is sorted in descending order
|
1295 |
-
* \endcode
|
1296 |
-
*
|
1297 |
-
* \see \p is_sorted
|
1298 |
-
* \see \p sort
|
1299 |
-
* \see \p sort_by_key
|
1300 |
-
* \see \p stable_sort
|
1301 |
-
* \see \p stable_sort_by_key
|
1302 |
-
*/
|
1303 |
-
template<typename DerivedPolicy, typename ForwardIterator, typename Compare>
|
1304 |
-
__host__ __device__
|
1305 |
-
ForwardIterator is_sorted_until(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
1306 |
-
ForwardIterator first,
|
1307 |
-
ForwardIterator last,
|
1308 |
-
Compare comp);
|
1309 |
-
|
1310 |
-
|
1311 |
-
/*! This version of \p is_sorted_until returns the last iterator \c i in <tt>[first,last]</tt> for
|
1312 |
-
* which the range <tt>[first,last)</tt> is sorted using the function object \c comp. If <tt>distance(first,last) < 2</tt>,
|
1313 |
-
* \p is_sorted_until simply returns \p last.
|
1314 |
-
*
|
1315 |
-
* \param first The beginning of the range of interest.
|
1316 |
-
* \param last The end of the range of interest.
|
1317 |
-
* \param comp The function object to use for comparison.
|
1318 |
-
* \return The last iterator in the input range for which it is sorted.
|
1319 |
-
*
|
1320 |
-
* \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a> and
|
1321 |
-
* \p ForwardIterator's \c value_type is convertible to \p Compare's \c argument_type.
|
1322 |
-
* \tparam Compare is a model of <a href="http://www.sgi.com/tech/stl/StrictWeakOrdering.html">Strict Weak Ordering</a>.
|
1323 |
-
*
|
1324 |
-
* The following code snippet demonstrates how to use \p is_sorted_until to find the first position
|
1325 |
-
* in an array where the data becomes unsorted in descending order:
|
1326 |
-
*
|
1327 |
-
* \code
|
1328 |
-
* #include <thrust/sort.h>
|
1329 |
-
* #include <thrust/functional.h>
|
1330 |
-
*
|
1331 |
-
* ...
|
1332 |
-
*
|
1333 |
-
* int A[8] = {3, 2, 1, 0, 3, 2, 1, 0};
|
1334 |
-
*
|
1335 |
-
* thrust::greater<int> comp;
|
1336 |
-
* int * B = thrust::is_sorted_until(A, A + 8, comp);
|
1337 |
-
*
|
1338 |
-
* // B - A is 4
|
1339 |
-
* // [A, B) is sorted in descending order
|
1340 |
-
* \endcode
|
1341 |
-
*
|
1342 |
-
* \see \p is_sorted
|
1343 |
-
* \see \p sort
|
1344 |
-
* \see \p sort_by_key
|
1345 |
-
* \see \p stable_sort
|
1346 |
-
* \see \p stable_sort_by_key
|
1347 |
-
*/
|
1348 |
-
template<typename ForwardIterator, typename Compare>
|
1349 |
-
ForwardIterator is_sorted_until(ForwardIterator first,
|
1350 |
-
ForwardIterator last,
|
1351 |
-
Compare comp);
|
1352 |
-
|
1353 |
-
|
1354 |
-
/*! \} // end predicates
|
1355 |
-
* \} // end reductions
|
1356 |
-
*/
|
1357 |
-
|
1358 |
-
|
1359 |
-
} // end namespace thrust
|
1360 |
-
|
1361 |
-
#include <thrust/detail/sort.inl>
|
1362 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/gather.h
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
|
21 |
-
// this system inherits gather
|
22 |
-
#include <thrust/system/cpp/detail/gather.h>
|
23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/type_traits/integer_sequence.h
DELETED
@@ -1,262 +0,0 @@
|
|
1 |
-
///////////////////////////////////////////////////////////////////////////////
|
2 |
-
// Copyright (c) 2018 NVIDIA Corporation
|
3 |
-
// Copyright (c) 2015-2018 Bryce Adelstein Lelbach aka wash
|
4 |
-
//
|
5 |
-
// Distributed under the Boost Software License, Version 1.0. (See accompanying
|
6 |
-
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
|
7 |
-
///////////////////////////////////////////////////////////////////////////////
|
8 |
-
|
9 |
-
/*! \file integer_sequence.h
|
10 |
-
* \brief C++14's \c integer_sequence and associated helper aliases plus some
|
11 |
-
* extensions.
|
12 |
-
*/
|
13 |
-
|
14 |
-
#pragma once
|
15 |
-
|
16 |
-
#include <thrust/detail/config.h>
|
17 |
-
#include <thrust/detail/cpp11_required.h>
|
18 |
-
|
19 |
-
#if THRUST_CPP_DIALECT >= 2011
|
20 |
-
|
21 |
-
#include <type_traits>
|
22 |
-
#include <utility>
|
23 |
-
#include <cstdint>
|
24 |
-
#include <utility>
|
25 |
-
|
26 |
-
namespace thrust
|
27 |
-
{
|
28 |
-
|
29 |
-
#if THRUST_CPP_DIALECT >= 2014
|
30 |
-
|
31 |
-
// A compile-time sequence of integral constants of type T.
|
32 |
-
template <typename T, T... Is>
|
33 |
-
using integer_sequence = std::integer_sequence<T, Is...>;
|
34 |
-
|
35 |
-
// A compile-time sequence of std::size_t constants.
|
36 |
-
template <std::size_t... Is>
|
37 |
-
using index_sequence = std::index_sequence<Is...>;
|
38 |
-
|
39 |
-
// Create a new integer_sequence with elements 0, 1, 2, ..., N - 1.
|
40 |
-
template <typename T, std::size_t N>
|
41 |
-
using make_integer_sequence = std::make_integer_sequence<T, N>;
|
42 |
-
|
43 |
-
// Create a new index_sequence with elements 0, 1, 2, ..., N - 1.
|
44 |
-
template <std::size_t N>
|
45 |
-
using make_index_sequence = std::make_index_sequence<N>;
|
46 |
-
|
47 |
-
///////////////////////////////////////////////////////////////////////////////
|
48 |
-
|
49 |
-
#else // Older than C++14.
|
50 |
-
|
51 |
-
// A compile-time sequence of integral constants of type T.
|
52 |
-
template <typename T, T... Is>
|
53 |
-
struct integer_sequence;
|
54 |
-
|
55 |
-
// A compile-time sequence of std::size_t constants.
|
56 |
-
template <std::size_t... Is>
|
57 |
-
using index_sequence = integer_sequence<std::size_t, Is...>;
|
58 |
-
|
59 |
-
///////////////////////////////////////////////////////////////////////////////
|
60 |
-
|
61 |
-
namespace detail
|
62 |
-
{
|
63 |
-
|
64 |
-
// Create a new integer_sequence containing the elements of Sequence0 followed
|
65 |
-
// by the elements of Sequence1. Sequence0::size() is added to each element from
|
66 |
-
// Sequence1 in the new sequence.
|
67 |
-
template <typename Sequence0, typename Sequence1>
|
68 |
-
struct merge_and_renumber_integer_sequences_impl;
|
69 |
-
template <typename Sequence0, typename Sequence1>
|
70 |
-
using merge_and_renumber_integer_sequences =
|
71 |
-
typename merge_and_renumber_integer_sequences_impl<
|
72 |
-
Sequence0, Sequence1
|
73 |
-
>::type;
|
74 |
-
|
75 |
-
// Create a new integer_sequence with elements 0, 1, 2, ..., N - 1.
|
76 |
-
template <typename T, std::size_t N>
|
77 |
-
struct make_integer_sequence_impl;
|
78 |
-
|
79 |
-
|
80 |
-
} // namespace detail
|
81 |
-
|
82 |
-
///////////////////////////////////////////////////////////////////////////////
|
83 |
-
|
84 |
-
// Create a new integer_sequence with elements 0, 1, 2, ..., N - 1.
|
85 |
-
template <typename T, std::size_t N>
|
86 |
-
using make_integer_sequence =
|
87 |
-
typename detail::make_integer_sequence_impl<T, N>::type;
|
88 |
-
|
89 |
-
// Create a new index_sequence with elements 0, 1, 2, ..., N - 1.
|
90 |
-
template <std::size_t N>
|
91 |
-
using make_index_sequence =
|
92 |
-
make_integer_sequence<std::size_t, N>;
|
93 |
-
|
94 |
-
///////////////////////////////////////////////////////////////////////////////
|
95 |
-
|
96 |
-
template <typename T, T... Is>
|
97 |
-
struct integer_sequence
|
98 |
-
{
|
99 |
-
using type = integer_sequence;
|
100 |
-
using value_type = T;
|
101 |
-
using size_type = std::size_t;
|
102 |
-
|
103 |
-
__host__ __device__
|
104 |
-
static constexpr size_type size() noexcept
|
105 |
-
{
|
106 |
-
return sizeof...(Is);
|
107 |
-
}
|
108 |
-
};
|
109 |
-
///////////////////////////////////////////////////////////////////////////////
|
110 |
-
|
111 |
-
namespace detail
|
112 |
-
{
|
113 |
-
|
114 |
-
template <typename T, T... Is0, T... Is1>
|
115 |
-
struct merge_and_renumber_integer_sequences_impl<
|
116 |
-
integer_sequence<T, Is0...>, integer_sequence<T, Is1...>
|
117 |
-
>
|
118 |
-
{
|
119 |
-
using type = integer_sequence<T, Is0..., (sizeof...(Is0) + Is1)...>;
|
120 |
-
};
|
121 |
-
|
122 |
-
///////////////////////////////////////////////////////////////////////////////
|
123 |
-
|
124 |
-
template <typename T, std::size_t N>
|
125 |
-
struct make_integer_sequence_impl
|
126 |
-
{
|
127 |
-
using type = merge_and_renumber_integer_sequences<
|
128 |
-
make_integer_sequence<T, N / 2>
|
129 |
-
, make_integer_sequence<T, N - N / 2>
|
130 |
-
>;
|
131 |
-
};
|
132 |
-
|
133 |
-
template <typename T>
|
134 |
-
struct make_integer_sequence_impl<T, 0>
|
135 |
-
{
|
136 |
-
using type = integer_sequence<T>;
|
137 |
-
};
|
138 |
-
|
139 |
-
template <typename T>
|
140 |
-
struct make_integer_sequence_impl<T, 1>
|
141 |
-
{
|
142 |
-
using type = integer_sequence<T, 0>;
|
143 |
-
};
|
144 |
-
|
145 |
-
} // namespace detail
|
146 |
-
|
147 |
-
#endif // THRUST_CPP_DIALECT >= 2014
|
148 |
-
|
149 |
-
///////////////////////////////////////////////////////////////////////////////
|
150 |
-
|
151 |
-
namespace detail
|
152 |
-
{
|
153 |
-
|
154 |
-
// Create a new integer_sequence containing the elements of Sequence0 followed
|
155 |
-
// by the elements of Sequence1. Sequence1::size() is added to each element from
|
156 |
-
// Sequence0 in the new sequence.
|
157 |
-
template <typename Sequence0, typename Sequence1>
|
158 |
-
struct merge_and_renumber_reversed_integer_sequences_impl;
|
159 |
-
template <typename Sequence0, typename Sequence1>
|
160 |
-
using merge_and_renumber_reversed_integer_sequences =
|
161 |
-
typename merge_and_renumber_reversed_integer_sequences_impl<
|
162 |
-
Sequence0, Sequence1
|
163 |
-
>::type;
|
164 |
-
|
165 |
-
// Create a new integer_sequence with elements N - 1, N - 2, N - 3, ..., 0.
|
166 |
-
template <typename T, std::size_t N>
|
167 |
-
struct make_reversed_integer_sequence_impl;
|
168 |
-
|
169 |
-
// Add a new element to the front of an integer_sequence<>.
|
170 |
-
template <typename T, T I, typename Sequence>
|
171 |
-
struct integer_sequence_push_front_impl;
|
172 |
-
|
173 |
-
// Add a new element to the back of an integer_sequence<>.
|
174 |
-
template <typename T, T I, typename Sequence>
|
175 |
-
struct integer_sequence_push_back_impl;
|
176 |
-
|
177 |
-
}
|
178 |
-
|
179 |
-
///////////////////////////////////////////////////////////////////////////////
|
180 |
-
|
181 |
-
// Create a new integer_sequence with elements N - 1, N - 2, N - 3, ..., 0.
|
182 |
-
template <typename T, std::size_t N>
|
183 |
-
using make_reversed_integer_sequence =
|
184 |
-
typename detail::make_reversed_integer_sequence_impl<T, N>::type;
|
185 |
-
|
186 |
-
// Create a new index_sequence with elements N - 1, N - 2, N - 3, ..., 0.
|
187 |
-
template <std::size_t N>
|
188 |
-
using make_reversed_index_sequence =
|
189 |
-
make_reversed_integer_sequence<std::size_t, N>;
|
190 |
-
|
191 |
-
// Add a new element to the front of an integer_sequence<>.
|
192 |
-
template <typename T, T I, typename Sequence>
|
193 |
-
using integer_sequence_push_front =
|
194 |
-
typename detail::integer_sequence_push_front_impl<T, I, Sequence>::type;
|
195 |
-
|
196 |
-
// Add a new element to the back of an integer_sequence<>.
|
197 |
-
template <typename T, T I, typename Sequence>
|
198 |
-
using integer_sequence_push_back =
|
199 |
-
typename detail::integer_sequence_push_back_impl<T, I, Sequence>::type;
|
200 |
-
|
201 |
-
///////////////////////////////////////////////////////////////////////////////
|
202 |
-
|
203 |
-
namespace detail
|
204 |
-
{
|
205 |
-
|
206 |
-
template <typename T, T... Is0, T... Is1>
|
207 |
-
struct merge_and_renumber_reversed_integer_sequences_impl<
|
208 |
-
integer_sequence<T, Is0...>, integer_sequence<T, Is1...>
|
209 |
-
>
|
210 |
-
{
|
211 |
-
using type = integer_sequence<T, (sizeof...(Is1) + Is0)..., Is1...>;
|
212 |
-
};
|
213 |
-
|
214 |
-
///////////////////////////////////////////////////////////////////////////////
|
215 |
-
|
216 |
-
template <typename T, std::size_t N>
|
217 |
-
struct make_reversed_integer_sequence_impl
|
218 |
-
{
|
219 |
-
using type = merge_and_renumber_reversed_integer_sequences<
|
220 |
-
make_reversed_integer_sequence<T, N / 2>
|
221 |
-
, make_reversed_integer_sequence<T, N - N / 2>
|
222 |
-
>;
|
223 |
-
};
|
224 |
-
|
225 |
-
///////////////////////////////////////////////////////////////////////////////
|
226 |
-
|
227 |
-
template <typename T>
|
228 |
-
struct make_reversed_integer_sequence_impl<T, 0>
|
229 |
-
{
|
230 |
-
using type = integer_sequence<T>;
|
231 |
-
};
|
232 |
-
|
233 |
-
template <typename T>
|
234 |
-
struct make_reversed_integer_sequence_impl<T, 1>
|
235 |
-
{
|
236 |
-
using type = integer_sequence<T, 0>;
|
237 |
-
};
|
238 |
-
|
239 |
-
///////////////////////////////////////////////////////////////////////////////
|
240 |
-
|
241 |
-
template <typename T, T I0, T... Is>
|
242 |
-
struct integer_sequence_push_front_impl<T, I0, integer_sequence<T, Is...> >
|
243 |
-
{
|
244 |
-
using type = integer_sequence<T, I0, Is...>;
|
245 |
-
};
|
246 |
-
|
247 |
-
///////////////////////////////////////////////////////////////////////////////
|
248 |
-
|
249 |
-
template <typename T, T I0, T... Is>
|
250 |
-
struct integer_sequence_push_back_impl<T, I0, integer_sequence<T, Is...> >
|
251 |
-
{
|
252 |
-
using type = integer_sequence<T, Is..., I0>;
|
253 |
-
};
|
254 |
-
|
255 |
-
///////////////////////////////////////////////////////////////////////////////
|
256 |
-
|
257 |
-
} // namespace detail
|
258 |
-
|
259 |
-
} // end namespace thrust
|
260 |
-
|
261 |
-
#endif // THRUST_CPP_DIALECT >= 2011
|
262 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/lama-example/models/ade20k/segm_lib/nn/modules/comm.py
DELETED
@@ -1,131 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
# File : comm.py
|
3 |
-
# Author : Jiayuan Mao
|
4 |
-
# Email : [email protected]
|
5 |
-
# Date : 27/01/2018
|
6 |
-
#
|
7 |
-
# This file is part of Synchronized-BatchNorm-PyTorch.
|
8 |
-
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
|
9 |
-
# Distributed under MIT License.
|
10 |
-
|
11 |
-
import queue
|
12 |
-
import collections
|
13 |
-
import threading
|
14 |
-
|
15 |
-
__all__ = ['FutureResult', 'SlavePipe', 'SyncMaster']
|
16 |
-
|
17 |
-
|
18 |
-
class FutureResult(object):
|
19 |
-
"""A thread-safe future implementation. Used only as one-to-one pipe."""
|
20 |
-
|
21 |
-
def __init__(self):
|
22 |
-
self._result = None
|
23 |
-
self._lock = threading.Lock()
|
24 |
-
self._cond = threading.Condition(self._lock)
|
25 |
-
|
26 |
-
def put(self, result):
|
27 |
-
with self._lock:
|
28 |
-
assert self._result is None, 'Previous result has\'t been fetched.'
|
29 |
-
self._result = result
|
30 |
-
self._cond.notify()
|
31 |
-
|
32 |
-
def get(self):
|
33 |
-
with self._lock:
|
34 |
-
if self._result is None:
|
35 |
-
self._cond.wait()
|
36 |
-
|
37 |
-
res = self._result
|
38 |
-
self._result = None
|
39 |
-
return res
|
40 |
-
|
41 |
-
|
42 |
-
_MasterRegistry = collections.namedtuple('MasterRegistry', ['result'])
|
43 |
-
_SlavePipeBase = collections.namedtuple('_SlavePipeBase', ['identifier', 'queue', 'result'])
|
44 |
-
|
45 |
-
|
46 |
-
class SlavePipe(_SlavePipeBase):
|
47 |
-
"""Pipe for master-slave communication."""
|
48 |
-
|
49 |
-
def run_slave(self, msg):
|
50 |
-
self.queue.put((self.identifier, msg))
|
51 |
-
ret = self.result.get()
|
52 |
-
self.queue.put(True)
|
53 |
-
return ret
|
54 |
-
|
55 |
-
|
56 |
-
class SyncMaster(object):
|
57 |
-
"""An abstract `SyncMaster` object.
|
58 |
-
|
59 |
-
- During the replication, as the data parallel will trigger an callback of each module, all slave devices should
|
60 |
-
call `register(id)` and obtain an `SlavePipe` to communicate with the master.
|
61 |
-
- During the forward pass, master device invokes `run_master`, all messages from slave devices will be collected,
|
62 |
-
and passed to a registered callback.
|
63 |
-
- After receiving the messages, the master device should gather the information and determine to message passed
|
64 |
-
back to each slave devices.
|
65 |
-
"""
|
66 |
-
|
67 |
-
def __init__(self, master_callback):
|
68 |
-
"""
|
69 |
-
|
70 |
-
Args:
|
71 |
-
master_callback: a callback to be invoked after having collected messages from slave devices.
|
72 |
-
"""
|
73 |
-
self._master_callback = master_callback
|
74 |
-
self._queue = queue.Queue()
|
75 |
-
self._registry = collections.OrderedDict()
|
76 |
-
self._activated = False
|
77 |
-
|
78 |
-
def register_slave(self, identifier):
|
79 |
-
"""
|
80 |
-
Register an slave device.
|
81 |
-
|
82 |
-
Args:
|
83 |
-
identifier: an identifier, usually is the device id.
|
84 |
-
|
85 |
-
Returns: a `SlavePipe` object which can be used to communicate with the master device.
|
86 |
-
|
87 |
-
"""
|
88 |
-
if self._activated:
|
89 |
-
assert self._queue.empty(), 'Queue is not clean before next initialization.'
|
90 |
-
self._activated = False
|
91 |
-
self._registry.clear()
|
92 |
-
future = FutureResult()
|
93 |
-
self._registry[identifier] = _MasterRegistry(future)
|
94 |
-
return SlavePipe(identifier, self._queue, future)
|
95 |
-
|
96 |
-
def run_master(self, master_msg):
|
97 |
-
"""
|
98 |
-
Main entry for the master device in each forward pass.
|
99 |
-
The messages were first collected from each devices (including the master device), and then
|
100 |
-
an callback will be invoked to compute the message to be sent back to each devices
|
101 |
-
(including the master device).
|
102 |
-
|
103 |
-
Args:
|
104 |
-
master_msg: the message that the master want to send to itself. This will be placed as the first
|
105 |
-
message when calling `master_callback`. For detailed usage, see `_SynchronizedBatchNorm` for an example.
|
106 |
-
|
107 |
-
Returns: the message to be sent back to the master device.
|
108 |
-
|
109 |
-
"""
|
110 |
-
self._activated = True
|
111 |
-
|
112 |
-
intermediates = [(0, master_msg)]
|
113 |
-
for i in range(self.nr_slaves):
|
114 |
-
intermediates.append(self._queue.get())
|
115 |
-
|
116 |
-
results = self._master_callback(intermediates)
|
117 |
-
assert results[0][0] == 0, 'The first result should belongs to the master.'
|
118 |
-
|
119 |
-
for i, res in results:
|
120 |
-
if i == 0:
|
121 |
-
continue
|
122 |
-
self._registry[i].result.put(res)
|
123 |
-
|
124 |
-
for i in range(self.nr_slaves):
|
125 |
-
assert self._queue.get() is True
|
126 |
-
|
127 |
-
return results[0][1]
|
128 |
-
|
129 |
-
@property
|
130 |
-
def nr_slaves(self):
|
131 |
-
return len(self._registry)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Chirag1994/Melanoma_Skin_Cancer_Detection_App/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Melanoma Skin Cancer Detection App
|
3 |
-
emoji: 💩
|
4 |
-
colorFrom: green
|
5 |
-
colorTo: blue
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.27.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Chukwuka/FoodVision-Model/README.md
DELETED
@@ -1,275 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: FoodVision Model
|
3 |
-
emoji: 🏢
|
4 |
-
colorFrom: blue
|
5 |
-
colorTo: indigo
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.16.2
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
14 |
-
|
15 |
-
# FoodVision-App
|
16 |
-
FoodVision App is an App that can classify three different kinds of food; pizza, steak, sushi respectively.
|
17 |
-
|
18 |
-
<p style='text-align: center'><a href='https://github.com/Sylvesterchuks/foodvision-app'>Click on this link to visit Github Repo</a></p>
|
19 |
-
|
20 |
-
## 09. PyTorch Model Deployment
|
21 |
-
|
22 |
-
Welcome to Milestone Project 3: PyTorch Model Deployment!
|
23 |
-
|
24 |
-
We've come a long way with our FoodVision Mini project.
|
25 |
-
|
26 |
-
But so far our PyTorch models have only been accessible to us.
|
27 |
-
|
28 |
-
How about we bring FoodVision Mini to life and make it publically accessible?
|
29 |
-
|
30 |
-
In other words, **we're going to deploy our FoodVision Mini model to the internet as a usable app!**
|
31 |
-
|
32 |
-
<img src="https://github.com/mrdbourke/pytorch-deep-learning/raw/main/images/09-model-deployment-what-were-doing-demo-trimmed-cropped-small.gif" alt="demo of foodvision mini computer vision model being used on a mobile device to predict on an image of sushi and getting it right" width=900/>
|
33 |
-
|
34 |
-
*Trying out the [deployed version of FoodVision Mini](https://huggingface.co/spaces/mrdbourke/foodvision_mini) (what we're going to build) on my lunch. The model got it right too 🍣!*
|
35 |
-
|
36 |
-
### What is machine learning model deployment?
|
37 |
-
|
38 |
-
**Machine learning model deployment** is the process of making your machine learning model accessible to someone or something else.
|
39 |
-
|
40 |
-
Someone else being a person who can interact with your model in some way.
|
41 |
-
|
42 |
-
For example, someone taking a photo on their smartphone of food and then having our FoodVision Mini model classify it into pizza, steak or sushi.
|
43 |
-
|
44 |
-
Something else might be another program, app or even another model that interacts with your machine learning model(s).
|
45 |
-
|
46 |
-
For example, a banking database might rely on a machine learning model making predictions as to whether a transaction is fraudulent or not before transferring funds.
|
47 |
-
|
48 |
-
Or an operating system may lower its resource consumption based on a machine learning model making predictions on how much power someone generally uses at specific times of day.
|
49 |
-
|
50 |
-
These use cases can be mixed and matched as well.
|
51 |
-
|
52 |
-
For example, a Tesla car's computer vision system will interact with the car's route planning program (something else) and then the route planning program will get inputs and feedback from the driver (someone else).
|
53 |
-
|
54 |
-
<img src="https://raw.githubusercontent.com/mrdbourke/pytorch-deep-learning/main/images/09-what-is-model-deployment-someone-or-something-else.png" width=900 alt="two use cases for model deployment, making your model available to someone else, for example, someone using it in an app, or making it available to something else such as another program or model"/>
|
55 |
-
|
56 |
-
*Machine learning model deployment involves making your model available to someone or something else. For example, someone might use your model as part of a food recognition app (such as FoodVision Mini or [Nutrify](https://nutrify.app)). And something else might be another model or program using your model such as a banking system using a machine learning model to detect if a transaction is fraud or not.*
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
### Why deploy a machine learning model?
|
61 |
-
|
62 |
-
One of the most important philosophical questions in machine learning is:
|
63 |
-
|
64 |
-
<div align="center">
|
65 |
-
<img src="https://raw.githubusercontent.com/mrdbourke/pytorch-deep-learning/main/images/09-does-it-exist.jpeg" alt="curious dinosaur often referred to as philosoraptor asking the question if a machine learning model never leaves a notebook, does it exist?" width=300/>
|
66 |
-
</div>
|
67 |
-
|
68 |
-
Deploying a model is as important as training one.
|
69 |
-
|
70 |
-
Because although you can get a pretty good idea of how your model's going to function by evaluting it on a well crafted test set or visualizing its results, you never really know how it's going to perform until you release it to the wild.
|
71 |
-
|
72 |
-
Having people who've never used your model interact with it will often reveal edge cases you never thought of during training.
|
73 |
-
|
74 |
-
For example, what happens if someone was to upload a photo that *wasn't* of food to our FoodVision Mini model?
|
75 |
-
|
76 |
-
One solution would be to create another model that first classifies images as "food" or "not food" and passing the target image through that model first (this is what [Nutrify](https://nutrify.app) does).
|
77 |
-
|
78 |
-
Then if the image is of "food" it goes to our FoodVision Mini model and gets classified into pizza, steak or sushi.
|
79 |
-
|
80 |
-
And if it's "not food", a message is displayed.
|
81 |
-
|
82 |
-
But what if these predictions were wrong?
|
83 |
-
|
84 |
-
What happens then?
|
85 |
-
|
86 |
-
You can see how these questions could keep going.
|
87 |
-
|
88 |
-
Thus this highlights the importance of model deployment: it helps you figure out errors in your model that aren't obvious during training/testing.
|
89 |
-
|
90 |
-
<img src="https://raw.githubusercontent.com/mrdbourke/pytorch-deep-learning/main/images/09-pytorch-workflow-with-deployment.png" alt="A PyTorch workflow with added model deployment and monitoring step" width=900/>
|
91 |
-
|
92 |
-
*We covered a PyTorch workflow back in [01. PyTorch Workflow](https://www.learnpytorch.io/01_pytorch_workflow/). But once you've got a good model, deployment is a good next step. Monitoring involves seeing how your model goes on the most important data split: data from the real world. For more resources on deployment and monitoring see [PyTorch Extra Resources](https://www.learnpytorch.io/pytorch_extra_resources/#resources-for-machine-learning-and-deep-learning-engineering).*
|
93 |
-
|
94 |
-
|
95 |
-
### Different types of machine learning model deployment
|
96 |
-
|
97 |
-
Whole books could be written on the different types of machine learning model deployment (and many good ones are listed in [PyTorch Extra Resources](https://www.learnpytorch.io/pytorch_extra_resources/#resources-for-machine-learning-and-deep-learning-engineering)).
|
98 |
-
|
99 |
-
And the field is still developing in terms of best practices.
|
100 |
-
|
101 |
-
But I like to start with the question:
|
102 |
-
|
103 |
-
> "What is the most ideal scenario for my machine learning model to be used?"
|
104 |
-
|
105 |
-
And then work backwards from there.
|
106 |
-
|
107 |
-
Of course, you may not know this ahead of time. But you're smart enough to imagine such things.
|
108 |
-
|
109 |
-
In the case of FoodVision Mini, our ideal scenario might be:
|
110 |
-
|
111 |
-
* Someone takes a photo on a mobile device (through an app or web broswer).
|
112 |
-
* The prediction comes back fast.
|
113 |
-
|
114 |
-
Easy.
|
115 |
-
|
116 |
-
So we've got two main criteria:
|
117 |
-
|
118 |
-
1. The model should work on a mobile device (this means there will be some compute constraints).
|
119 |
-
2. The model should make predictions *fast* (because a slow app is a boring app).
|
120 |
-
|
121 |
-
And of course, depending on your use case, your requirements may vary.
|
122 |
-
|
123 |
-
You may notice the above two points break down into another two questions:
|
124 |
-
|
125 |
-
1. **Where's it going to go?** - As in, where is it going to be stored?
|
126 |
-
2. **How's it going to function?** - As in, does it return predictions immediately? Or do they come later?
|
127 |
-
|
128 |
-
<img src="https://raw.githubusercontent.com/mrdbourke/pytorch-deep-learning/main/images/09-deployment-questions-to-ask.png" alt="some questions to ask when starting to deploy machine learning models, what's the model ideal use case, then work backwards and ask where's my model going to go and how's my model going to function" width=900/>
|
129 |
-
|
130 |
-
*When starting to deploy machine learning models, it's helpful to start by asking what's the most ideal use case and then work backwards from there, asking where the model's going to go and then how it's going to function.*
|
131 |
-
|
132 |
-
|
133 |
-
#### Where's it going to go?
|
134 |
-
|
135 |
-
When you deploy your machine learning model, where does it live?
|
136 |
-
|
137 |
-
The main debate here is usually on-device (also called edge/in the browser) or on the cloud (a computer/server that isn't the *actual* device someone/something calls the model from).
|
138 |
-
|
139 |
-
Both have their pros and cons.
|
140 |
-
|
141 |
-
| **Deployment location** | **Pros** | **Cons** |
|
142 |
-
| :----- | :----- | :----- |
|
143 |
-
| **On-device (edge/in the browser)** | Can be very fast (since no data leaves the device) | Limited compute power (larger models take longer to run) |
|
144 |
-
| | Privacy preserving (again no data has to leave the device) | Limited storage space (smaller model size required) |
|
145 |
-
| | No internet connection required (sometimes) | Device-specific skills often required |
|
146 |
-
| | | |
|
147 |
-
| **On cloud** | Near unlimited compute power (can scale up when needed) | Costs can get out of hand (if proper scaling limits aren't enforced) |
|
148 |
-
| | Can deploy one model and use everywhere (via API) | Predictions can be slower due to data having to leave device and predictions having to come back (network latency) |
|
149 |
-
| | Links into existing cloud ecosystem | Data has to leave device (this may cause privacy concerns) |
|
150 |
-
|
151 |
-
There are more details to these but I've left resources in the [extra-curriculum](https://www.learnpytorch.io/09_pytorch_model_deployment/#extra-curriculum) to learn more.
|
152 |
-
|
153 |
-
Let's give an example.
|
154 |
-
|
155 |
-
If we're deploying FoodVision Mini as an app, we want it to perform well and fast.
|
156 |
-
|
157 |
-
So which model would we prefer?
|
158 |
-
|
159 |
-
1. A model on-device that performs at 95% accuracy with an inference time (latency) of one second per prediction.
|
160 |
-
2. A model on the cloud that performs at 98% accuracy with an inference time of 10 seconds per per prediction (bigger, better model but takes longer to compute).
|
161 |
-
|
162 |
-
I've made these numbers up but they showcase a potential difference between on-device and on the cloud.
|
163 |
-
|
164 |
-
Option 1 could potentially be a smaller less performant model that runs fast because its able to fit on a mobile device.
|
165 |
-
|
166 |
-
Option 2 could potentially a larger more performant model that requires more compute and storage but it takes a bit longer to run because we have to send data off the device and get it back (so even though the actual prediction might be fast, the network time and data transfer has to factored in).
|
167 |
-
|
168 |
-
For FoodVision Mini, we'd likely prefer option 1, because the small hit in performance is far outweighed by the faster inference speed.
|
169 |
-
|
170 |
-
<img src="https://raw.githubusercontent.com/mrdbourke/pytorch-deep-learning/main/images/09-model-deployment-on-device-vs-cloud.png" width=900 alt="tesla computer vision system on device vs on the cloud"/>
|
171 |
-
|
172 |
-
*In the case of a Tesla car's computer vision system, which would be better? A smaller model that performs well on device (model is on the car) or a larger model that performs better that's on the cloud? In this case, you'd much prefer the model being on the car. The extra network time it would take for data to go from the car to the cloud and then back to the car just wouldn't be worth it (or potentially even possible with poor signal areas).*
|
173 |
-
|
174 |
-
> **Note:** For a full example of seeing what it's like to deploy a PyTorch model to an edge device, see the [PyTorch tutorial on achieving real-time inference (30fps+)](https://pytorch.org/tutorials/intermediate/realtime_rpi.html) with a computer vision model on a Raspberry Pi.
|
175 |
-
|
176 |
-
|
177 |
-
#### Ways to deploy a machine learning model
|
178 |
-
|
179 |
-
We've discussed a couple of options for deploying machine learning models (on-device and cloud).
|
180 |
-
|
181 |
-
And each of these will have their specific requirements:
|
182 |
-
|
183 |
-
| **Tool/resource** | **Deployment type** |
|
184 |
-
| :----- | :----- |
|
185 |
-
| [Google's ML Kit](https://developers.google.com/ml-kit) | On-device (Android and iOS) |
|
186 |
-
| [Apple's Core ML](https://developer.apple.com/documentation/coreml) and [`coremltools` Python package](https://coremltools.readme.io/docs) | On-device (all Apple devices) |
|
187 |
-
| [Amazon Web Service's (AWS) Sagemaker](https://aws.amazon.com/sagemaker/) | Cloud |
|
188 |
-
| [Google Cloud's Vertex AI](https://cloud.google.com/vertex-ai) | Cloud |
|
189 |
-
| [Microsoft's Azure Machine Learning](https://azure.microsoft.com/en-au/services/machine-learning/) | Cloud |
|
190 |
-
| [Hugging Face Spaces](https://huggingface.co/spaces) | Cloud |
|
191 |
-
| API with [FastAPI](https://fastapi.tiangolo.com) | Cloud/self-hosted server |
|
192 |
-
| API with [TorchServe](https://pytorch.org/serve/) | Cloud/self-hosted server |
|
193 |
-
| [ONNX (Open Neural Network Exchange)](https://onnx.ai/index.html) | Many/general |
|
194 |
-
| Many more... |
|
195 |
-
|
196 |
-
> **Note:** An [application programming interface (API)](https://en.wikipedia.org/wiki/API) is a way for two (or more) computer programs to interact with each other. For example, if your model was deployed as API, you would be able to write a program that could send data to it and then receive predictions back.
|
197 |
-
|
198 |
-
Which option you choose will be highly dependent on what you're building/who you're working with.
|
199 |
-
|
200 |
-
But with so many options, it can be very intimidating.
|
201 |
-
|
202 |
-
So best to start small and keep it simple.
|
203 |
-
|
204 |
-
And one of the best ways to do so is by turning your machine learning model into a demo app with [Gradio](https://gradio.app) and then deploying it on Hugging Face Spaces.
|
205 |
-
|
206 |
-
We'll be doing just that with FoodVision Mini later on.
|
207 |
-
|
208 |
-
<img src="https://raw.githubusercontent.com/mrdbourke/pytorch-deep-learning/main/images/09-tools-and-places-to-deploy-ml-models.png" alt="tools and places to deploy machine learning models" width=900/>
|
209 |
-
|
210 |
-
*A handful of places and tools to host and deploy machine learning models. There are plenty I've missed so if you'd like to add more, please leave a [discussion on GitHub](https://github.com/mrdbourke/pytorch-deep-learning/discussions).*
|
211 |
-
|
212 |
-
|
213 |
-
### What we're going to Learn
|
214 |
-
|
215 |
-
Enough talking about deploying a machine learning model.
|
216 |
-
|
217 |
-
Let's become machine learning engineers and actually deploy one.
|
218 |
-
|
219 |
-
Our goal is to deploy our FoodVision Model via a demo Gradio app with the following metrics:
|
220 |
-
1. **Performance:** 95%+ accuracy.
|
221 |
-
2. **Speed:** real-time inference of 30FPS+ (each prediction has a latency of lower than ~0.03s).
|
222 |
-
|
223 |
-
Then we'll deploy the one which performs closest to our goal metrics.
|
224 |
-
|
225 |
-
Finally, we'll finish with a (BIG) surprise bonus.
|
226 |
-
|
227 |
-
| **Topic** | **Contents** |
|
228 |
-
| :----- | :----- |
|
229 |
-
| **0. Getting setup** | We've written a fair bit of useful code over the past few sections, let's download it and make sure we can use it again. |
|
230 |
-
| **1. Get data** | Let's download the [`pizza_steak_sushi_20_percent.zip`](https://github.com/mrdbourke/pytorch-deep-learning/blob/main/data/pizza_steak_sushi_20_percent.zip) dataset so we can train our previously best performing models on the same dataset. |
|
231 |
-
| **2. Creating an Model feature extractor** | An EfficientNetB2 feature extractor performed the best on our pizza, steak, sushi dataset in [07. PyTorch Experiment Tracking](https://www.learnpytorch.io/07_pytorch_experiment_tracking/), let's recreate it as a candidate for deployment. |
|
232 |
-
| **3. Making predictions with our trained models and timing them** | We've built two of the best performing models yet, let's make predictions with them and track their results. |
|
233 |
-
| **4. Bringing FoodVision Mini to life by creating a Gradio demo** | One of our models performs better than the other (in terms of our goals), so let's turn it into a working app demo! |
|
234 |
-
| **5. Turning our FoodVision Mini Gradio demo into a deployable app** | Our Gradio app demo works locally, let's prepare it for deployment! |
|
235 |
-
| **6. Deploying our Gradio demo to HuggingFace Spaces** | Let's take FoodVision Mini to the web and make it pubically accessible for all! |
|
236 |
-
|
237 |
-
|
238 |
-
#### Uploading to Hugging Face
|
239 |
-
|
240 |
-
We've verfied our FoodVision Mini app works locally, however, the fun of creating a machine learning demo is to show it to other people and allow them to use it.
|
241 |
-
|
242 |
-
To do so, we're going to upload our FoodVision Mini demo to Hugging Face.
|
243 |
-
|
244 |
-
> **Note:** The following series of steps uses a Git (a file tracking system) workflow. For more on how Git works, I'd recommend going through the [Git and GitHub for Beginners tutorial](https://youtu.be/RGOj5yH7evk) on freeCodeCamp.
|
245 |
-
|
246 |
-
1. [Sign up](https://huggingface.co/join) for a Hugging Face account.
|
247 |
-
2. Start a new Hugging Face Space by going to your profile and then [clicking "New Space"](https://huggingface.co/new-space).
|
248 |
-
* **Note:** A Space in Hugging Face is also known as a "code repository" (a place to store your code/files) or "repo" for short.
|
249 |
-
3. Give the Space a name, for example, mine is called `mrdbourke/foodvision_mini`, you can see it here: https://huggingface.co/spaces/mrdbourke/foodvision_mini
|
250 |
-
4. Select a license (I used [MIT](https://opensource.org/licenses/MIT)).
|
251 |
-
5. Select Gradio as the Space SDK (software development kit).
|
252 |
-
* **Note:** You can use other options such as Streamlit but since our app is built with Gradio, we'll stick with that.
|
253 |
-
6. Choose whether your Space is it's public or private (I selected public since I'd like my Space to be available to others).
|
254 |
-
7. Click "Create Space".
|
255 |
-
8. Clone the repo locally by running something like: `git clone https://huggingface.co/spaces/[YOUR_USERNAME]/[YOUR_SPACE_NAME]` in terminal or command prompt.
|
256 |
-
* **Note:** You can also add files via uploading them under the "Files and versions" tab.
|
257 |
-
9. Copy/move the contents of the downloaded `foodvision_mini` folder to the cloned repo folder.
|
258 |
-
10. To upload and track larger files (e.g. files over 10MB or in our case, our PyTorch model file) you'll need to [install Git LFS](https://git-lfs.github.com/) (which stands for "git large file storage").
|
259 |
-
11. After you've installed Git LFS, you can activate it by running `git lfs install`.
|
260 |
-
12. In the `foodvision_mini` directory, track the files over 10MB with Git LFS with `git lfs track "*.file_extension"`.
|
261 |
-
* Track EffNetB2 PyTorch model file with `git lfs track "09_pretrained_effnetb2_feature_extractor_pizza_steak_sushi_20_percent.pth"`.
|
262 |
-
13. Track `.gitattributes` (automatically created when cloning from HuggingFace, this file will help ensure our larger files are tracked with Git LFS). You can see an example `.gitattributes` file on the [FoodVision Mini Hugging Face Space](https://huggingface.co/spaces/mrdbourke/foodvision_mini/blob/main/.gitattributes).
|
263 |
-
* `git add .gitattributes`
|
264 |
-
14. Add the rest of the `foodvision_mini` app files and commit them with:
|
265 |
-
* `git add *`
|
266 |
-
* `git commit -m "first commit"`
|
267 |
-
15. Push (upload) the files to Hugging Face:
|
268 |
-
* `git push`
|
269 |
-
16. Wait 3-5 minutes for the build to happen (future builds are faster) and your app to become live!
|
270 |
-
|
271 |
-
If everything worked, you should see a live running example of our FoodVision Mini Gradio demo like the one here: https://huggingface.co/spaces/mrdbourke/foodvision_mini
|
272 |
-
|
273 |
-
And we can even embed our FoodVision Mini Gradio demo into our notebook as an [iframe](https://gradio.app/sharing_your_app/#embedding-with-iframes) with [`IPython.display.IFrame`](https://ipython.readthedocs.io/en/stable/api/generated/IPython.display.html#IPython.display.IFrame) and a link to our space in the format `https://hf.space/embed/[YOUR_USERNAME]/[YOUR_SPACE_NAME]/+`.
|
274 |
-
|
275 |
-
<a href= "https://huggingface.co/spaces/Chukwuka/FoodVision-Model"> Click on this link to try out Foodvision App</a>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CikeyQI/Yunzai/Yunzai/plugins/ws-plugin/config/system/cfg_system.js
DELETED
@@ -1,201 +0,0 @@
|
|
1 |
-
export const cfgSchema = {
|
2 |
-
ws: {
|
3 |
-
title: 'ws连接设置,改动此设置会将所有已连接强制断开重连',
|
4 |
-
cfg: {
|
5 |
-
heartbeatInterval: {
|
6 |
-
title: '心跳频率',
|
7 |
-
key: '心跳',
|
8 |
-
type: 'num',
|
9 |
-
def: 5,
|
10 |
-
input: (n) => {
|
11 |
-
if (n >= 0) {
|
12 |
-
return n * 1
|
13 |
-
} else {
|
14 |
-
return 5
|
15 |
-
}
|
16 |
-
},
|
17 |
-
desc: '单位:秒,0为关闭心跳',
|
18 |
-
fileName: 'ws-config'
|
19 |
-
},
|
20 |
-
messagePostFormat: {
|
21 |
-
title: '上报数据类型',
|
22 |
-
key: '上报',
|
23 |
-
type: 'num',
|
24 |
-
def: 2,
|
25 |
-
input: (n) => Math.min(2, Math.max(n * 1 || 0, 1)),
|
26 |
-
desc: '上报数据类型: 1:string 2:array',
|
27 |
-
fileName: 'ws-config'
|
28 |
-
}
|
29 |
-
}
|
30 |
-
},
|
31 |
-
msg: {
|
32 |
-
title: '发送消息相关设置',
|
33 |
-
cfg: {
|
34 |
-
disconnectToMaster: {
|
35 |
-
title: '断连通知',
|
36 |
-
key: '断连通知',
|
37 |
-
def: false,
|
38 |
-
desc: '断开连接时是否通知主人',
|
39 |
-
fileName: 'msg-config'
|
40 |
-
},
|
41 |
-
reconnectToMaster: {
|
42 |
-
title: '重连通知',
|
43 |
-
key: '重连通知',
|
44 |
-
def: false,
|
45 |
-
desc: '重新连接成功时是否通知主人',
|
46 |
-
fileName: 'msg-config'
|
47 |
-
},
|
48 |
-
firstconnectToMaster: {
|
49 |
-
title: '首连通知',
|
50 |
-
key: '首连通知',
|
51 |
-
def: false,
|
52 |
-
desc: '首次连接时是否通知主人成功还是失败',
|
53 |
-
fileName: 'msg-config'
|
54 |
-
},
|
55 |
-
howToMaster: {
|
56 |
-
title: '通知哪个主人',
|
57 |
-
key: '主人',
|
58 |
-
type: 'num',
|
59 |
-
input: (n) => {
|
60 |
-
if (n >= 0) {
|
61 |
-
return n * 1
|
62 |
-
} else {
|
63 |
-
return 1
|
64 |
-
}
|
65 |
-
},
|
66 |
-
def: 1,
|
67 |
-
desc: `通知主人列表的第几个主人,为0时通知全部主人`,
|
68 |
-
fileName: 'msg-config'
|
69 |
-
},
|
70 |
-
muteStop: {
|
71 |
-
title: '禁言拦截',
|
72 |
-
key: '禁言拦截',
|
73 |
-
def: false,
|
74 |
-
desc: '被禁言或者全体禁言时是否拦截消息不上报',
|
75 |
-
fileName: 'msg-config'
|
76 |
-
},
|
77 |
-
redSendForwardMsgType: {
|
78 |
-
title: 'red转发方式',
|
79 |
-
key: 'red转发',
|
80 |
-
type: 'num',
|
81 |
-
def: 1,
|
82 |
-
desc: 'red 发送伪造转发消息方式 1:伪造转发 2:陆续发送 3:合并发送',
|
83 |
-
input: (n) => Math.min(3, Math.max(n * 1 || 0, 1)),
|
84 |
-
fileName: 'msg-config'
|
85 |
-
},
|
86 |
-
msgStoreTime: {
|
87 |
-
title: '消息存储时间',
|
88 |
-
key: '存储',
|
89 |
-
type: 'num',
|
90 |
-
input: (n) => {
|
91 |
-
if (n >= 0) {
|
92 |
-
return n * 1
|
93 |
-
} else {
|
94 |
-
return 600
|
95 |
-
}
|
96 |
-
},
|
97 |
-
def: 600,
|
98 |
-
desc: '用于撤回和回复消息,如果超过时间去获取就会获取不到,单位秒,0不存储',
|
99 |
-
fileName: 'msg-config'
|
100 |
-
}
|
101 |
-
}
|
102 |
-
},
|
103 |
-
notice: {
|
104 |
-
title: '通知相关设置',
|
105 |
-
cfg: {
|
106 |
-
groupAdmin: {
|
107 |
-
title: '管理员变动',
|
108 |
-
key: '管理',
|
109 |
-
def: false,
|
110 |
-
desc: '群管理员变动是否上报',
|
111 |
-
fileName: 'notice-config'
|
112 |
-
},
|
113 |
-
groupDecrease: {
|
114 |
-
title: '群成员减少',
|
115 |
-
key: '群员减少',
|
116 |
-
def: false,
|
117 |
-
desc: '群成员减少是否上报',
|
118 |
-
fileName: 'notice-config'
|
119 |
-
},
|
120 |
-
groupIncrease: {
|
121 |
-
title: '群成员增加',
|
122 |
-
key: '群员增加',
|
123 |
-
def: false,
|
124 |
-
desc: '群成员增加是否上报',
|
125 |
-
fileName: 'notice-config'
|
126 |
-
},
|
127 |
-
groupBan: {
|
128 |
-
title: '群禁言',
|
129 |
-
key: '禁言',
|
130 |
-
def: false,
|
131 |
-
desc: '群禁言是否上报',
|
132 |
-
fileName: 'notice-config'
|
133 |
-
},
|
134 |
-
friendIncrease: {
|
135 |
-
title: '好友添加',
|
136 |
-
key: '好友添加',
|
137 |
-
def: false,
|
138 |
-
desc: '好友添加是否上报',
|
139 |
-
fileName: 'notice-config'
|
140 |
-
},
|
141 |
-
groupRecall: {
|
142 |
-
title: '群消息撤回',
|
143 |
-
key: '群撤回',
|
144 |
-
def: false,
|
145 |
-
desc: '群消息撤回是否上报',
|
146 |
-
fileName: 'notice-config'
|
147 |
-
},
|
148 |
-
friendRecall: {
|
149 |
-
title: '好友消息撤回',
|
150 |
-
key: '好友撤回',
|
151 |
-
def: false,
|
152 |
-
desc: '好友消息撤回是否上报',
|
153 |
-
fileName: 'notice-config'
|
154 |
-
},
|
155 |
-
groupPoke: {
|
156 |
-
title: '群内戳一戳',
|
157 |
-
key: '戳一戳',
|
158 |
-
def: false,
|
159 |
-
desc: '群内戳一戳是否上报',
|
160 |
-
fileName: 'notice-config'
|
161 |
-
},
|
162 |
-
}
|
163 |
-
},
|
164 |
-
request: {
|
165 |
-
title: '请求相关设置',
|
166 |
-
cfg: {
|
167 |
-
friendAdd: {
|
168 |
-
title: '好友申请',
|
169 |
-
key: '好友申请',
|
170 |
-
def: false,
|
171 |
-
desc: '好友申请是否上报',
|
172 |
-
fileName: 'request-config'
|
173 |
-
},
|
174 |
-
groupInvite: {
|
175 |
-
title: '群聊邀请',
|
176 |
-
key: '群邀请',
|
177 |
-
def: false,
|
178 |
-
desc: '群聊邀请是否上报 (邀请机器人入群)',
|
179 |
-
fileName: 'request-config'
|
180 |
-
},
|
181 |
-
groupAdd: {
|
182 |
-
title: '群聊申请',
|
183 |
-
key: '群申请',
|
184 |
-
def: false,
|
185 |
-
desc: '群聊申���是否上报 (申请加入群聊)',
|
186 |
-
fileName: 'request-config'
|
187 |
-
},
|
188 |
-
}
|
189 |
-
},
|
190 |
-
setAll: {
|
191 |
-
title: '一键操作',
|
192 |
-
cfg: {
|
193 |
-
setAll: {
|
194 |
-
title: '全部设置',
|
195 |
-
key: '全部',
|
196 |
-
def: false,
|
197 |
-
desc: '一键 开启/关闭 全部设置项'
|
198 |
-
}
|
199 |
-
}
|
200 |
-
}
|
201 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CikeyQI/meme-api/meme_generator/memes/karyl_point/__init__.py
DELETED
@@ -1,18 +0,0 @@
|
|
1 |
-
from pathlib import Path
|
2 |
-
from typing import List
|
3 |
-
|
4 |
-
from pil_utils import BuildImage
|
5 |
-
|
6 |
-
from meme_generator import add_meme
|
7 |
-
|
8 |
-
img_dir = Path(__file__).parent / "images"
|
9 |
-
|
10 |
-
|
11 |
-
def karyl_point(images: List[BuildImage], texts, args):
|
12 |
-
img = images[0].convert("RGBA").rotate(7.5, expand=True).resize((225, 225))
|
13 |
-
frame = BuildImage.open(img_dir / "0.png")
|
14 |
-
frame.paste(img, (87, 790), alpha=True)
|
15 |
-
return frame.save_png()
|
16 |
-
|
17 |
-
|
18 |
-
add_meme("karyl_point", karyl_point, min_images=1, max_images=1, keywords=["凯露指"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CofAI/chat.v1/README.md
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Chat.CofAI
|
3 |
-
emoji: 🗨☕💬
|
4 |
-
colorFrom: indigo
|
5 |
-
colorTo: green
|
6 |
-
sdk: docker
|
7 |
-
pinned: false
|
8 |
-
duplicated_from: null
|
9 |
-
---
|
10 |
-
|
11 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CofAI/chat/g4f/Provider/Providers/Aichat.py
DELETED
@@ -1,35 +0,0 @@
|
|
1 |
-
import requests
|
2 |
-
import os
|
3 |
-
import json
|
4 |
-
from ...typing import sha256, Dict, get_type_hints
|
5 |
-
|
6 |
-
url = 'https://hteyun.com'
|
7 |
-
model = ['gpt-3.5-turbo', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-0613']
|
8 |
-
supports_stream = True
|
9 |
-
needs_auth = False
|
10 |
-
|
11 |
-
def _create_completion(model: str, messages: list, stream: bool, temperature: float = 0.7, **kwargs):
|
12 |
-
headers = {
|
13 |
-
'Content-Type': 'application/json',
|
14 |
-
}
|
15 |
-
data = {
|
16 |
-
'model': model,
|
17 |
-
'temperature': 0.7,
|
18 |
-
'presence_penalty': 0,
|
19 |
-
'messages': messages,
|
20 |
-
}
|
21 |
-
response = requests.post(url + '/api/chat-stream',
|
22 |
-
json=data, stream=True)
|
23 |
-
|
24 |
-
if stream:
|
25 |
-
for chunk in response.iter_content(chunk_size=None):
|
26 |
-
chunk = chunk.decode('utf-8')
|
27 |
-
if chunk.strip():
|
28 |
-
message = json.loads(chunk)['choices'][0]['message']['content']
|
29 |
-
yield message
|
30 |
-
else:
|
31 |
-
message = response.json()['choices'][0]['message']['content']
|
32 |
-
yield message
|
33 |
-
|
34 |
-
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
35 |
-
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fsspec/implementations/dbfs.py
DELETED
@@ -1,457 +0,0 @@
|
|
1 |
-
import base64
|
2 |
-
import urllib
|
3 |
-
|
4 |
-
import requests
|
5 |
-
|
6 |
-
from fsspec import AbstractFileSystem
|
7 |
-
from fsspec.spec import AbstractBufferedFile
|
8 |
-
|
9 |
-
|
10 |
-
class DatabricksException(Exception):
|
11 |
-
"""
|
12 |
-
Helper class for exceptions raised in this module.
|
13 |
-
"""
|
14 |
-
|
15 |
-
def __init__(self, error_code, message):
|
16 |
-
"""Create a new DatabricksException"""
|
17 |
-
super().__init__(message)
|
18 |
-
|
19 |
-
self.error_code = error_code
|
20 |
-
self.message = message
|
21 |
-
|
22 |
-
|
23 |
-
class DatabricksFileSystem(AbstractFileSystem):
|
24 |
-
"""
|
25 |
-
Get access to the Databricks filesystem implementation over HTTP.
|
26 |
-
Can be used inside and outside of a databricks cluster.
|
27 |
-
"""
|
28 |
-
|
29 |
-
def __init__(self, instance, token, **kwargs):
|
30 |
-
"""
|
31 |
-
Create a new DatabricksFileSystem.
|
32 |
-
|
33 |
-
Parameters
|
34 |
-
----------
|
35 |
-
instance: str
|
36 |
-
The instance URL of the databricks cluster.
|
37 |
-
For example for an Azure databricks cluster, this
|
38 |
-
has the form adb-<some-number>.<two digits>.azuredatabricks.net.
|
39 |
-
token: str
|
40 |
-
Your personal token. Find out more
|
41 |
-
here: https://docs.databricks.com/dev-tools/api/latest/authentication.html
|
42 |
-
"""
|
43 |
-
self.instance = instance
|
44 |
-
self.token = token
|
45 |
-
|
46 |
-
self.session = requests.Session()
|
47 |
-
self.session.headers.update({"Authorization": f"Bearer {self.token}"})
|
48 |
-
|
49 |
-
super().__init__(**kwargs)
|
50 |
-
|
51 |
-
def ls(self, path, detail=True):
|
52 |
-
"""
|
53 |
-
List the contents of the given path.
|
54 |
-
|
55 |
-
Parameters
|
56 |
-
----------
|
57 |
-
path: str
|
58 |
-
Absolute path
|
59 |
-
detail: bool
|
60 |
-
Return not only the list of filenames,
|
61 |
-
but also additional information on file sizes
|
62 |
-
and types.
|
63 |
-
"""
|
64 |
-
out = self._ls_from_cache(path)
|
65 |
-
if not out:
|
66 |
-
try:
|
67 |
-
r = self._send_to_api(
|
68 |
-
method="get", endpoint="list", json={"path": path}
|
69 |
-
)
|
70 |
-
except DatabricksException as e:
|
71 |
-
if e.error_code == "RESOURCE_DOES_NOT_EXIST":
|
72 |
-
raise FileNotFoundError(e.message)
|
73 |
-
|
74 |
-
raise e
|
75 |
-
files = r["files"]
|
76 |
-
out = [
|
77 |
-
{
|
78 |
-
"name": o["path"],
|
79 |
-
"type": "directory" if o["is_dir"] else "file",
|
80 |
-
"size": o["file_size"],
|
81 |
-
}
|
82 |
-
for o in files
|
83 |
-
]
|
84 |
-
self.dircache[path] = out
|
85 |
-
|
86 |
-
if detail:
|
87 |
-
return out
|
88 |
-
return [o["name"] for o in out]
|
89 |
-
|
90 |
-
def makedirs(self, path, exist_ok=True):
|
91 |
-
"""
|
92 |
-
Create a given absolute path and all of its parents.
|
93 |
-
|
94 |
-
Parameters
|
95 |
-
----------
|
96 |
-
path: str
|
97 |
-
Absolute path to create
|
98 |
-
exist_ok: bool
|
99 |
-
If false, checks if the folder
|
100 |
-
exists before creating it (and raises an
|
101 |
-
Exception if this is the case)
|
102 |
-
"""
|
103 |
-
if not exist_ok:
|
104 |
-
try:
|
105 |
-
# If the following succeeds, the path is already present
|
106 |
-
self._send_to_api(
|
107 |
-
method="get", endpoint="get-status", json={"path": path}
|
108 |
-
)
|
109 |
-
raise FileExistsError(f"Path {path} already exists")
|
110 |
-
except DatabricksException as e:
|
111 |
-
if e.error_code == "RESOURCE_DOES_NOT_EXIST":
|
112 |
-
pass
|
113 |
-
|
114 |
-
try:
|
115 |
-
self._send_to_api(method="post", endpoint="mkdirs", json={"path": path})
|
116 |
-
except DatabricksException as e:
|
117 |
-
if e.error_code == "RESOURCE_ALREADY_EXISTS":
|
118 |
-
raise FileExistsError(e.message)
|
119 |
-
|
120 |
-
raise e
|
121 |
-
self.invalidate_cache(self._parent(path))
|
122 |
-
|
123 |
-
def mkdir(self, path, create_parents=True, **kwargs):
|
124 |
-
"""
|
125 |
-
Create a given absolute path and all of its parents.
|
126 |
-
|
127 |
-
Parameters
|
128 |
-
----------
|
129 |
-
path: str
|
130 |
-
Absolute path to create
|
131 |
-
create_parents: bool
|
132 |
-
Whether to create all parents or not.
|
133 |
-
"False" is not implemented so far.
|
134 |
-
"""
|
135 |
-
if not create_parents:
|
136 |
-
raise NotImplementedError
|
137 |
-
|
138 |
-
self.mkdirs(path, **kwargs)
|
139 |
-
|
140 |
-
def rm(self, path, recursive=False):
|
141 |
-
"""
|
142 |
-
Remove the file or folder at the given absolute path.
|
143 |
-
|
144 |
-
Parameters
|
145 |
-
----------
|
146 |
-
path: str
|
147 |
-
Absolute path what to remove
|
148 |
-
recursive: bool
|
149 |
-
Recursively delete all files in a folder.
|
150 |
-
"""
|
151 |
-
try:
|
152 |
-
self._send_to_api(
|
153 |
-
method="post",
|
154 |
-
endpoint="delete",
|
155 |
-
json={"path": path, "recursive": recursive},
|
156 |
-
)
|
157 |
-
except DatabricksException as e:
|
158 |
-
# This is not really an exception, it just means
|
159 |
-
# not everything was deleted so far
|
160 |
-
if e.error_code == "PARTIAL_DELETE":
|
161 |
-
self.rm(path=path, recursive=recursive)
|
162 |
-
elif e.error_code == "IO_ERROR":
|
163 |
-
# Using the same exception as the os module would use here
|
164 |
-
raise OSError(e.message)
|
165 |
-
|
166 |
-
raise e
|
167 |
-
self.invalidate_cache(self._parent(path))
|
168 |
-
|
169 |
-
def mv(self, source_path, destination_path, recursive=False, maxdepth=None):
|
170 |
-
"""
|
171 |
-
Move a source to a destination path.
|
172 |
-
|
173 |
-
A note from the original [databricks API manual]
|
174 |
-
(https://docs.databricks.com/dev-tools/api/latest/dbfs.html#move).
|
175 |
-
|
176 |
-
When moving a large number of files the API call will time out after
|
177 |
-
approximately 60s, potentially resulting in partially moved data.
|
178 |
-
Therefore, for operations that move more than 10k files, we strongly
|
179 |
-
discourage using the DBFS REST API.
|
180 |
-
|
181 |
-
Parameters
|
182 |
-
----------
|
183 |
-
source_path: str
|
184 |
-
From where to move (absolute path)
|
185 |
-
destination_path: str
|
186 |
-
To where to move (absolute path)
|
187 |
-
recursive: bool
|
188 |
-
Not implemented to far.
|
189 |
-
maxdepth:
|
190 |
-
Not implemented to far.
|
191 |
-
"""
|
192 |
-
if recursive:
|
193 |
-
raise NotImplementedError
|
194 |
-
if maxdepth:
|
195 |
-
raise NotImplementedError
|
196 |
-
|
197 |
-
try:
|
198 |
-
self._send_to_api(
|
199 |
-
method="post",
|
200 |
-
endpoint="move",
|
201 |
-
json={"source_path": source_path, "destination_path": destination_path},
|
202 |
-
)
|
203 |
-
except DatabricksException as e:
|
204 |
-
if e.error_code == "RESOURCE_DOES_NOT_EXIST":
|
205 |
-
raise FileNotFoundError(e.message)
|
206 |
-
elif e.error_code == "RESOURCE_ALREADY_EXISTS":
|
207 |
-
raise FileExistsError(e.message)
|
208 |
-
|
209 |
-
raise e
|
210 |
-
self.invalidate_cache(self._parent(source_path))
|
211 |
-
self.invalidate_cache(self._parent(destination_path))
|
212 |
-
|
213 |
-
def _open(self, path, mode="rb", block_size="default", **kwargs):
|
214 |
-
"""
|
215 |
-
Overwrite the base class method to make sure to create a DBFile.
|
216 |
-
All arguments are copied from the base method.
|
217 |
-
|
218 |
-
Only the default blocksize is allowed.
|
219 |
-
"""
|
220 |
-
return DatabricksFile(self, path, mode=mode, block_size=block_size, **kwargs)
|
221 |
-
|
222 |
-
def _send_to_api(self, method, endpoint, json):
|
223 |
-
"""
|
224 |
-
Send the given json to the DBFS API
|
225 |
-
using a get or post request (specified by the argument `method`).
|
226 |
-
|
227 |
-
Parameters
|
228 |
-
----------
|
229 |
-
method: str
|
230 |
-
Which http method to use for communication; "get" or "post".
|
231 |
-
endpoint: str
|
232 |
-
Where to send the request to (last part of the API URL)
|
233 |
-
json: dict
|
234 |
-
Dictionary of information to send
|
235 |
-
"""
|
236 |
-
if method == "post":
|
237 |
-
session_call = self.session.post
|
238 |
-
elif method == "get":
|
239 |
-
session_call = self.session.get
|
240 |
-
else:
|
241 |
-
raise ValueError(f"Do not understand method {method}")
|
242 |
-
|
243 |
-
url = urllib.parse.urljoin(f"https://{self.instance}/api/2.0/dbfs/", endpoint)
|
244 |
-
|
245 |
-
r = session_call(url, json=json)
|
246 |
-
|
247 |
-
# The DBFS API will return a json, also in case of an exception.
|
248 |
-
# We want to preserve this information as good as possible.
|
249 |
-
try:
|
250 |
-
r.raise_for_status()
|
251 |
-
except requests.HTTPError as e:
|
252 |
-
# try to extract json error message
|
253 |
-
# if that fails, fall back to the original exception
|
254 |
-
try:
|
255 |
-
exception_json = e.response.json()
|
256 |
-
except Exception:
|
257 |
-
raise e
|
258 |
-
|
259 |
-
raise DatabricksException(**exception_json)
|
260 |
-
|
261 |
-
return r.json()
|
262 |
-
|
263 |
-
def _create_handle(self, path, overwrite=True):
|
264 |
-
"""
|
265 |
-
Internal function to create a handle, which can be used to
|
266 |
-
write blocks of a file to DBFS.
|
267 |
-
A handle has a unique identifier which needs to be passed
|
268 |
-
whenever written during this transaction.
|
269 |
-
The handle is active for 10 minutes - after that a new
|
270 |
-
write transaction needs to be created.
|
271 |
-
Make sure to close the handle after you are finished.
|
272 |
-
|
273 |
-
Parameters
|
274 |
-
----------
|
275 |
-
path: str
|
276 |
-
Absolute path for this file.
|
277 |
-
overwrite: bool
|
278 |
-
If a file already exist at this location, either overwrite
|
279 |
-
it or raise an exception.
|
280 |
-
"""
|
281 |
-
try:
|
282 |
-
r = self._send_to_api(
|
283 |
-
method="post",
|
284 |
-
endpoint="create",
|
285 |
-
json={"path": path, "overwrite": overwrite},
|
286 |
-
)
|
287 |
-
return r["handle"]
|
288 |
-
except DatabricksException as e:
|
289 |
-
if e.error_code == "RESOURCE_ALREADY_EXISTS":
|
290 |
-
raise FileExistsError(e.message)
|
291 |
-
|
292 |
-
raise e
|
293 |
-
|
294 |
-
def _close_handle(self, handle):
|
295 |
-
"""
|
296 |
-
Close a handle, which was opened by :func:`_create_handle`.
|
297 |
-
|
298 |
-
Parameters
|
299 |
-
----------
|
300 |
-
handle: str
|
301 |
-
Which handle to close.
|
302 |
-
"""
|
303 |
-
try:
|
304 |
-
self._send_to_api(method="post", endpoint="close", json={"handle": handle})
|
305 |
-
except DatabricksException as e:
|
306 |
-
if e.error_code == "RESOURCE_DOES_NOT_EXIST":
|
307 |
-
raise FileNotFoundError(e.message)
|
308 |
-
|
309 |
-
raise e
|
310 |
-
|
311 |
-
def _add_data(self, handle, data):
|
312 |
-
"""
|
313 |
-
Upload data to an already opened file handle
|
314 |
-
(opened by :func:`_create_handle`).
|
315 |
-
The maximal allowed data size is 1MB after
|
316 |
-
conversion to base64.
|
317 |
-
Remember to close the handle when you are finished.
|
318 |
-
|
319 |
-
Parameters
|
320 |
-
----------
|
321 |
-
handle: str
|
322 |
-
Which handle to upload data to.
|
323 |
-
data: bytes
|
324 |
-
Block of data to add to the handle.
|
325 |
-
"""
|
326 |
-
data = base64.b64encode(data).decode()
|
327 |
-
try:
|
328 |
-
self._send_to_api(
|
329 |
-
method="post",
|
330 |
-
endpoint="add-block",
|
331 |
-
json={"handle": handle, "data": data},
|
332 |
-
)
|
333 |
-
except DatabricksException as e:
|
334 |
-
if e.error_code == "RESOURCE_DOES_NOT_EXIST":
|
335 |
-
raise FileNotFoundError(e.message)
|
336 |
-
elif e.error_code == "MAX_BLOCK_SIZE_EXCEEDED":
|
337 |
-
raise ValueError(e.message)
|
338 |
-
|
339 |
-
raise e
|
340 |
-
|
341 |
-
def _get_data(self, path, start, end):
|
342 |
-
"""
|
343 |
-
Download data in bytes from a given absolute path in a block
|
344 |
-
from [start, start+length].
|
345 |
-
The maximum number of allowed bytes to read is 1MB.
|
346 |
-
|
347 |
-
Parameters
|
348 |
-
----------
|
349 |
-
path: str
|
350 |
-
Absolute path to download data from
|
351 |
-
start: int
|
352 |
-
Start position of the block
|
353 |
-
end: int
|
354 |
-
End position of the block
|
355 |
-
"""
|
356 |
-
try:
|
357 |
-
r = self._send_to_api(
|
358 |
-
method="get",
|
359 |
-
endpoint="read",
|
360 |
-
json={"path": path, "offset": start, "length": end - start},
|
361 |
-
)
|
362 |
-
return base64.b64decode(r["data"])
|
363 |
-
except DatabricksException as e:
|
364 |
-
if e.error_code == "RESOURCE_DOES_NOT_EXIST":
|
365 |
-
raise FileNotFoundError(e.message)
|
366 |
-
elif e.error_code in ["INVALID_PARAMETER_VALUE", "MAX_READ_SIZE_EXCEEDED"]:
|
367 |
-
raise ValueError(e.message)
|
368 |
-
|
369 |
-
raise e
|
370 |
-
|
371 |
-
def invalidate_cache(self, path=None):
|
372 |
-
if path is None:
|
373 |
-
self.dircache.clear()
|
374 |
-
else:
|
375 |
-
self.dircache.pop(path, None)
|
376 |
-
super().invalidate_cache(path)
|
377 |
-
|
378 |
-
|
379 |
-
class DatabricksFile(AbstractBufferedFile):
|
380 |
-
"""
|
381 |
-
Helper class for files referenced in the DatabricksFileSystem.
|
382 |
-
"""
|
383 |
-
|
384 |
-
DEFAULT_BLOCK_SIZE = 1 * 2**20 # only allowed block size
|
385 |
-
|
386 |
-
def __init__(
|
387 |
-
self,
|
388 |
-
fs,
|
389 |
-
path,
|
390 |
-
mode="rb",
|
391 |
-
block_size="default",
|
392 |
-
autocommit=True,
|
393 |
-
cache_type="readahead",
|
394 |
-
cache_options=None,
|
395 |
-
**kwargs,
|
396 |
-
):
|
397 |
-
"""
|
398 |
-
Create a new instance of the DatabricksFile.
|
399 |
-
|
400 |
-
The blocksize needs to be the default one.
|
401 |
-
"""
|
402 |
-
if block_size is None or block_size == "default":
|
403 |
-
block_size = self.DEFAULT_BLOCK_SIZE
|
404 |
-
|
405 |
-
assert (
|
406 |
-
block_size == self.DEFAULT_BLOCK_SIZE
|
407 |
-
), f"Only the default block size is allowed, not {block_size}"
|
408 |
-
|
409 |
-
super().__init__(
|
410 |
-
fs,
|
411 |
-
path,
|
412 |
-
mode=mode,
|
413 |
-
block_size=block_size,
|
414 |
-
autocommit=autocommit,
|
415 |
-
cache_type=cache_type,
|
416 |
-
cache_options=cache_options or {},
|
417 |
-
**kwargs,
|
418 |
-
)
|
419 |
-
|
420 |
-
def _initiate_upload(self):
|
421 |
-
"""Internal function to start a file upload"""
|
422 |
-
self.handle = self.fs._create_handle(self.path)
|
423 |
-
|
424 |
-
def _upload_chunk(self, final=False):
|
425 |
-
"""Internal function to add a chunk of data to a started upload"""
|
426 |
-
self.buffer.seek(0)
|
427 |
-
data = self.buffer.getvalue()
|
428 |
-
|
429 |
-
data_chunks = [
|
430 |
-
data[start:end] for start, end in self._to_sized_blocks(len(data))
|
431 |
-
]
|
432 |
-
|
433 |
-
for data_chunk in data_chunks:
|
434 |
-
self.fs._add_data(handle=self.handle, data=data_chunk)
|
435 |
-
|
436 |
-
if final:
|
437 |
-
self.fs._close_handle(handle=self.handle)
|
438 |
-
return True
|
439 |
-
|
440 |
-
def _fetch_range(self, start, end):
|
441 |
-
"""Internal function to download a block of data"""
|
442 |
-
return_buffer = b""
|
443 |
-
length = end - start
|
444 |
-
for chunk_start, chunk_end in self._to_sized_blocks(length, start):
|
445 |
-
return_buffer += self.fs._get_data(
|
446 |
-
path=self.path, start=chunk_start, end=chunk_end
|
447 |
-
)
|
448 |
-
|
449 |
-
return return_buffer
|
450 |
-
|
451 |
-
def _to_sized_blocks(self, length, start=0):
|
452 |
-
"""Helper function to split a range from 0 to total_length into bloksizes"""
|
453 |
-
end = start + length
|
454 |
-
for data_chunk in range(start, end, self.blocksize):
|
455 |
-
data_start = data_chunk
|
456 |
-
data_end = min(end, data_chunk + self.blocksize)
|
457 |
-
yield data_start, data_end
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/components/slider.py
DELETED
@@ -1,210 +0,0 @@
|
|
1 |
-
"""gr.Slider() component."""
|
2 |
-
|
3 |
-
from __future__ import annotations
|
4 |
-
|
5 |
-
import math
|
6 |
-
import random
|
7 |
-
from typing import Any, Callable, Literal
|
8 |
-
|
9 |
-
import numpy as np
|
10 |
-
from gradio_client.documentation import document, set_documentation_group
|
11 |
-
from gradio_client.serializing import NumberSerializable
|
12 |
-
|
13 |
-
from gradio.components.base import FormComponent, IOComponent, _Keywords
|
14 |
-
from gradio.deprecation import warn_style_method_deprecation
|
15 |
-
from gradio.events import Changeable, Inputable, Releaseable
|
16 |
-
from gradio.interpretation import NeighborInterpretable
|
17 |
-
|
18 |
-
set_documentation_group("component")
|
19 |
-
|
20 |
-
|
21 |
-
@document()
|
22 |
-
class Slider(
|
23 |
-
FormComponent,
|
24 |
-
Changeable,
|
25 |
-
Inputable,
|
26 |
-
Releaseable,
|
27 |
-
IOComponent,
|
28 |
-
NumberSerializable,
|
29 |
-
NeighborInterpretable,
|
30 |
-
):
|
31 |
-
"""
|
32 |
-
Creates a slider that ranges from `minimum` to `maximum` with a step size of `step`.
|
33 |
-
Preprocessing: passes slider value as a {float} into the function.
|
34 |
-
Postprocessing: expects an {int} or {float} returned from function and sets slider value to it as long as it is within range.
|
35 |
-
Examples-format: A {float} or {int} representing the slider's value.
|
36 |
-
|
37 |
-
Demos: sentence_builder, slider_release, generate_tone, titanic_survival, interface_random_slider, blocks_random_slider
|
38 |
-
Guides: create-your-own-friends-with-a-gan
|
39 |
-
"""
|
40 |
-
|
41 |
-
def __init__(
|
42 |
-
self,
|
43 |
-
minimum: float = 0,
|
44 |
-
maximum: float = 100,
|
45 |
-
value: float | Callable | None = None,
|
46 |
-
*,
|
47 |
-
step: float | None = None,
|
48 |
-
label: str | None = None,
|
49 |
-
info: str | None = None,
|
50 |
-
every: float | None = None,
|
51 |
-
show_label: bool | None = None,
|
52 |
-
container: bool = True,
|
53 |
-
scale: int | None = None,
|
54 |
-
min_width: int = 160,
|
55 |
-
interactive: bool | None = None,
|
56 |
-
visible: bool = True,
|
57 |
-
elem_id: str | None = None,
|
58 |
-
elem_classes: list[str] | str | None = None,
|
59 |
-
randomize: bool = False,
|
60 |
-
**kwargs,
|
61 |
-
):
|
62 |
-
"""
|
63 |
-
Parameters:
|
64 |
-
minimum: minimum value for slider.
|
65 |
-
maximum: maximum value for slider.
|
66 |
-
value: default value. If callable, the function will be called whenever the app loads to set the initial value of the component. Ignored if randomized=True.
|
67 |
-
step: increment between slider values.
|
68 |
-
label: component name in interface.
|
69 |
-
info: additional component description.
|
70 |
-
every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.
|
71 |
-
show_label: if True, will display label.
|
72 |
-
container: If True, will place the component in a container - providing some extra padding around the border.
|
73 |
-
scale: relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.
|
74 |
-
min_width: minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.
|
75 |
-
interactive: if True, slider will be adjustable; if False, adjusting will be disabled. If not provided, this is inferred based on whether the component is used as an input or output.
|
76 |
-
visible: If False, component will be hidden.
|
77 |
-
elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
|
78 |
-
elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.
|
79 |
-
randomize: If True, the value of the slider when the app loads is taken uniformly at random from the range given by the minimum and maximum.
|
80 |
-
"""
|
81 |
-
self.minimum = minimum
|
82 |
-
self.maximum = maximum
|
83 |
-
if step is None:
|
84 |
-
difference = maximum - minimum
|
85 |
-
power = math.floor(math.log10(difference) - 2)
|
86 |
-
self.step = 10**power
|
87 |
-
else:
|
88 |
-
self.step = step
|
89 |
-
if randomize:
|
90 |
-
value = self.get_random_value
|
91 |
-
IOComponent.__init__(
|
92 |
-
self,
|
93 |
-
label=label,
|
94 |
-
info=info,
|
95 |
-
every=every,
|
96 |
-
show_label=show_label,
|
97 |
-
container=container,
|
98 |
-
scale=scale,
|
99 |
-
min_width=min_width,
|
100 |
-
interactive=interactive,
|
101 |
-
visible=visible,
|
102 |
-
elem_id=elem_id,
|
103 |
-
elem_classes=elem_classes,
|
104 |
-
value=value,
|
105 |
-
**kwargs,
|
106 |
-
)
|
107 |
-
NeighborInterpretable.__init__(self)
|
108 |
-
|
109 |
-
def api_info(self) -> dict[str, dict | bool]:
|
110 |
-
return {
|
111 |
-
"info": {
|
112 |
-
"type": "number",
|
113 |
-
"description": f"numeric value between {self.minimum} and {self.maximum}",
|
114 |
-
},
|
115 |
-
"serialized_info": False,
|
116 |
-
}
|
117 |
-
|
118 |
-
def example_inputs(self) -> dict[str, Any]:
|
119 |
-
return {
|
120 |
-
"raw": self.minimum,
|
121 |
-
"serialized": self.minimum,
|
122 |
-
}
|
123 |
-
|
124 |
-
def get_config(self):
|
125 |
-
return {
|
126 |
-
"minimum": self.minimum,
|
127 |
-
"maximum": self.maximum,
|
128 |
-
"step": self.step,
|
129 |
-
"value": self.value,
|
130 |
-
**IOComponent.get_config(self),
|
131 |
-
}
|
132 |
-
|
133 |
-
def get_random_value(self):
|
134 |
-
n_steps = int((self.maximum - self.minimum) / self.step)
|
135 |
-
step = random.randint(0, n_steps)
|
136 |
-
value = self.minimum + step * self.step
|
137 |
-
# Round to number of decimals in step so that UI doesn't display long decimals
|
138 |
-
n_decimals = max(str(self.step)[::-1].find("."), 0)
|
139 |
-
if n_decimals:
|
140 |
-
value = round(value, n_decimals)
|
141 |
-
return value
|
142 |
-
|
143 |
-
@staticmethod
|
144 |
-
def update(
|
145 |
-
value: float | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE,
|
146 |
-
minimum: float | None = None,
|
147 |
-
maximum: float | None = None,
|
148 |
-
step: float | None = None,
|
149 |
-
label: str | None = None,
|
150 |
-
info: str | None = None,
|
151 |
-
show_label: bool | None = None,
|
152 |
-
container: bool | None = None,
|
153 |
-
scale: int | None = None,
|
154 |
-
min_width: int | None = None,
|
155 |
-
interactive: bool | None = None,
|
156 |
-
visible: bool | None = None,
|
157 |
-
):
|
158 |
-
return {
|
159 |
-
"minimum": minimum,
|
160 |
-
"maximum": maximum,
|
161 |
-
"step": step,
|
162 |
-
"label": label,
|
163 |
-
"info": info,
|
164 |
-
"show_label": show_label,
|
165 |
-
"container": container,
|
166 |
-
"scale": scale,
|
167 |
-
"min_width": min_width,
|
168 |
-
"interactive": interactive,
|
169 |
-
"visible": visible,
|
170 |
-
"value": value,
|
171 |
-
"__type__": "update",
|
172 |
-
}
|
173 |
-
|
174 |
-
def postprocess(self, y: float | None) -> float | None:
|
175 |
-
"""
|
176 |
-
Any postprocessing needed to be performed on function output.
|
177 |
-
Parameters:
|
178 |
-
y: numeric output
|
179 |
-
Returns:
|
180 |
-
numeric output or minimum number if None
|
181 |
-
"""
|
182 |
-
return self.minimum if y is None else y
|
183 |
-
|
184 |
-
def set_interpret_parameters(self, steps: int = 8) -> Slider:
|
185 |
-
"""
|
186 |
-
Calculates interpretation scores of numeric values ranging between the minimum and maximum values of the slider.
|
187 |
-
Parameters:
|
188 |
-
steps: Number of neighboring values to measure between the minimum and maximum values of the slider range.
|
189 |
-
"""
|
190 |
-
self.interpretation_steps = steps
|
191 |
-
return self
|
192 |
-
|
193 |
-
def get_interpretation_neighbors(self, x) -> tuple[object, dict]:
|
194 |
-
return (
|
195 |
-
np.linspace(self.minimum, self.maximum, self.interpretation_steps).tolist(),
|
196 |
-
{},
|
197 |
-
)
|
198 |
-
|
199 |
-
def style(
|
200 |
-
self,
|
201 |
-
*,
|
202 |
-
container: bool | None = None,
|
203 |
-
):
|
204 |
-
"""
|
205 |
-
This method is deprecated. Please set these arguments in the constructor instead.
|
206 |
-
"""
|
207 |
-
warn_style_method_deprecation()
|
208 |
-
if container is not None:
|
209 |
-
self.container = container
|
210 |
-
return self
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/utils/writer.py
DELETED
@@ -1,56 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
@Date: 2021/11/06
|
3 |
-
@description:
|
4 |
-
"""
|
5 |
-
import cv2
|
6 |
-
import numpy as np
|
7 |
-
|
8 |
-
|
9 |
-
def xyz2json(xyz, ratio, camera_height=1.6):
|
10 |
-
xyz = xyz * camera_height
|
11 |
-
ceiling_height = camera_height * ratio
|
12 |
-
layout_height = camera_height + ceiling_height
|
13 |
-
data = {
|
14 |
-
'cameraHeight': camera_height,
|
15 |
-
'layoutHeight': layout_height,
|
16 |
-
'cameraCeilingHeight': ceiling_height,
|
17 |
-
'layoutObj2ds': {
|
18 |
-
'num': 0,
|
19 |
-
'obj2ds': []
|
20 |
-
},
|
21 |
-
'layoutPoints': {
|
22 |
-
'num': xyz.shape[0],
|
23 |
-
'points': []
|
24 |
-
},
|
25 |
-
'layoutWalls': {
|
26 |
-
'num': xyz.shape[0],
|
27 |
-
'walls': []
|
28 |
-
}
|
29 |
-
}
|
30 |
-
|
31 |
-
xyz = np.concatenate([xyz, xyz[0:1, :]], axis=0)
|
32 |
-
R_180 = cv2.Rodrigues(np.array([0, -1 * np.pi, 0], np.float32))[0]
|
33 |
-
for i in range(xyz.shape[0] - 1):
|
34 |
-
a = np.dot(R_180, xyz[i, :])
|
35 |
-
a[0] *= -1
|
36 |
-
b = np.dot(R_180, xyz[i + 1, :])
|
37 |
-
b[0] *= -1
|
38 |
-
c = a.copy()
|
39 |
-
c[1] = 0
|
40 |
-
normal = np.cross(a - b, a - c)
|
41 |
-
normal /= np.linalg.norm(normal)
|
42 |
-
d = -np.sum(normal * a)
|
43 |
-
plane = np.asarray([normal[0], normal[1], normal[2], d])
|
44 |
-
|
45 |
-
data['layoutPoints']['points'].append({'xyz': a.tolist(), 'id': i})
|
46 |
-
|
47 |
-
next_i = 0 if i + 1 >= (xyz.shape[0] - 1) else i + 1
|
48 |
-
tmp = {
|
49 |
-
'normal': normal.tolist(),
|
50 |
-
'planeEquation': plane.tolist(),
|
51 |
-
'pointsIdx': [i, next_i]
|
52 |
-
}
|
53 |
-
data['layoutWalls']['walls'].append(tmp)
|
54 |
-
|
55 |
-
return data
|
56 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DeclK/pose/tools/inferencer.py
DELETED
@@ -1,160 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import mmcv
|
3 |
-
from pathlib import Path
|
4 |
-
from collections import namedtuple
|
5 |
-
import cv2 as cv
|
6 |
-
from tqdm import tqdm
|
7 |
-
from mmengine.registry import init_default_scope
|
8 |
-
from mmengine.visualization import Visualizer
|
9 |
-
from mmpose.apis import inference_topdown, init_model
|
10 |
-
from mmdet.apis import inference_detector, init_detector
|
11 |
-
from .utils import filter_by_catgory, filter_by_score, Timer
|
12 |
-
from .apis import build_onnx_model_and_task_processor, inference_onnx_model
|
13 |
-
|
14 |
-
|
15 |
-
class PoseInferencer:
|
16 |
-
def __init__(self,
|
17 |
-
det_cfg,
|
18 |
-
pose_cfg,
|
19 |
-
device='cpu') -> None:
|
20 |
-
# init
|
21 |
-
self.det_model_cfg = det_cfg.model_cfg
|
22 |
-
self.det_model_ckpt = det_cfg.model_ckpt
|
23 |
-
self.pose_model_cfg = pose_cfg.model_cfg
|
24 |
-
self.pose_model_ckpt = pose_cfg.model_ckpt
|
25 |
-
|
26 |
-
self.detector = init_detector(self.det_model_cfg,
|
27 |
-
self.det_model_ckpt,
|
28 |
-
device=device)
|
29 |
-
self.pose_model = init_model(self.pose_model_cfg,
|
30 |
-
self.pose_model_ckpt,
|
31 |
-
device=device)
|
32 |
-
# use this count to tell the progress
|
33 |
-
self.video_count = 0
|
34 |
-
|
35 |
-
def process_one_image(self, img):
|
36 |
-
init_default_scope('mmdet')
|
37 |
-
det_result = inference_detector(self.detector, img)
|
38 |
-
det_inst = det_result.pred_instances.cpu().numpy()
|
39 |
-
bboxes, scores, labels = (det_inst.bboxes,
|
40 |
-
det_inst.scores,
|
41 |
-
det_inst.labels)
|
42 |
-
bboxes, scores, labels = filter_by_score(bboxes, scores,
|
43 |
-
labels, 0.5)
|
44 |
-
bboxes, scores, labels = filter_by_catgory(bboxes, scores, labels,
|
45 |
-
['person'])
|
46 |
-
# inference with pose model
|
47 |
-
init_default_scope('mmpose')
|
48 |
-
pose_result = inference_topdown(self.pose_model, img, bboxes)
|
49 |
-
if len(pose_result) == 0:
|
50 |
-
# no detection place holder
|
51 |
-
keypoints = np.zeros((1, 17, 2))
|
52 |
-
pts_scores = np.zeros((1, 17))
|
53 |
-
bboxes = np.zeros((1, 4))
|
54 |
-
scores = np.zeros((1, ))
|
55 |
-
labels = np.zeros((1, ))
|
56 |
-
else:
|
57 |
-
keypoints = np.concatenate([r.pred_instances.keypoints
|
58 |
-
for r in pose_result])
|
59 |
-
pts_scores = np.concatenate([r.pred_instances.keypoint_scores
|
60 |
-
for r in pose_result])
|
61 |
-
|
62 |
-
DetInst = namedtuple('DetInst', ['bboxes', 'scores', 'labels'])
|
63 |
-
PoseInst = namedtuple('PoseInst', ['keypoints', 'pts_scores'])
|
64 |
-
return DetInst(bboxes, scores, labels), PoseInst(keypoints, pts_scores)
|
65 |
-
|
66 |
-
def inference_video(self, video_path):
|
67 |
-
""" Inference a video with detector and pose model
|
68 |
-
Return:
|
69 |
-
all_pose: a list of PoseInst, check the namedtuple definition
|
70 |
-
all_det: a list of DetInst
|
71 |
-
"""
|
72 |
-
video_reader = mmcv.VideoReader(video_path)
|
73 |
-
all_pose, all_det = [], []
|
74 |
-
|
75 |
-
for frame in tqdm(video_reader):
|
76 |
-
# inference with detector
|
77 |
-
det, pose = self.process_one_image(frame)
|
78 |
-
all_pose.append(pose)
|
79 |
-
all_det.append(det)
|
80 |
-
|
81 |
-
return all_det, all_pose
|
82 |
-
|
83 |
-
class PoseInferencerV2:
|
84 |
-
""" V2 Use onnx for detection model, still use pytorch for pose model.
|
85 |
-
"""
|
86 |
-
def __init__(self,
|
87 |
-
det_cfg,
|
88 |
-
pose_cfg,
|
89 |
-
device='cpu') -> None:
|
90 |
-
# init
|
91 |
-
self.det_deploy_cfg = det_cfg.deploy_cfg
|
92 |
-
self.det_model_cfg = det_cfg.model_cfg
|
93 |
-
self.det_backend_files = det_cfg.backend_files
|
94 |
-
|
95 |
-
self.pose_model_cfg = pose_cfg.model_cfg
|
96 |
-
self.pose_model_ckpt = pose_cfg.model_ckpt
|
97 |
-
|
98 |
-
self.detector, self.task_processor = \
|
99 |
-
build_onnx_model_and_task_processor(self.det_model_cfg,
|
100 |
-
self.det_deploy_cfg,
|
101 |
-
self.det_backend_files,
|
102 |
-
device)
|
103 |
-
self.pose_model = init_model(self.pose_model_cfg,
|
104 |
-
self.pose_model_ckpt,
|
105 |
-
device)
|
106 |
-
# use this count to tell the progress
|
107 |
-
self.video_count = 0
|
108 |
-
|
109 |
-
def process_one_image(self, img):
|
110 |
-
init_default_scope('mmdet')
|
111 |
-
det_result = inference_onnx_model(self.detector,
|
112 |
-
self.task_processor,
|
113 |
-
self.det_deploy_cfg,
|
114 |
-
img)
|
115 |
-
det_inst = det_result[0].pred_instances.cpu().numpy()
|
116 |
-
bboxes, scores, labels = (det_inst.bboxes,
|
117 |
-
det_inst.scores,
|
118 |
-
det_inst.labels)
|
119 |
-
bboxes, scores, labels = filter_by_score(bboxes, scores,
|
120 |
-
labels, 0.5)
|
121 |
-
bboxes, scores, labels = filter_by_catgory(bboxes, scores, labels,
|
122 |
-
['person'])
|
123 |
-
# inference with pose model
|
124 |
-
init_default_scope('mmpose')
|
125 |
-
pose_result = inference_topdown(self.pose_model, img, bboxes)
|
126 |
-
if len(pose_result) == 0:
|
127 |
-
# no detection place holder
|
128 |
-
keypoints = np.zeros((1, 17, 2))
|
129 |
-
pts_scores = np.zeros((1, 17))
|
130 |
-
bboxes = np.zeros((1, 4))
|
131 |
-
scores = np.zeros((1, ))
|
132 |
-
labels = np.zeros((1, ))
|
133 |
-
else:
|
134 |
-
keypoints = np.concatenate([r.pred_instances.keypoints
|
135 |
-
for r in pose_result])
|
136 |
-
pts_scores = np.concatenate([r.pred_instances.keypoint_scores
|
137 |
-
for r in pose_result])
|
138 |
-
|
139 |
-
DetInst = namedtuple('DetInst', ['bboxes', 'scores', 'labels'])
|
140 |
-
PoseInst = namedtuple('PoseInst', ['keypoints', 'pts_scores'])
|
141 |
-
return DetInst(bboxes, scores, labels), PoseInst(keypoints, pts_scores)
|
142 |
-
|
143 |
-
def inference_video(self, video_path):
|
144 |
-
""" Inference a video with detector and pose model
|
145 |
-
Return:
|
146 |
-
all_pose: a list of PoseInst, check the namedtuple definition
|
147 |
-
all_det: a list of DetInst
|
148 |
-
"""
|
149 |
-
video_reader = mmcv.VideoReader(video_path)
|
150 |
-
all_pose, all_det = [], []
|
151 |
-
|
152 |
-
count = self.video_count + 1
|
153 |
-
for frame in tqdm(video_reader, desc=f'Inference video {count}'):
|
154 |
-
# inference with detector
|
155 |
-
det, pose = self.process_one_image(frame)
|
156 |
-
all_pose.append(pose)
|
157 |
-
all_det.append(det)
|
158 |
-
self.video_count += 1
|
159 |
-
|
160 |
-
return all_det, all_pose
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DeepLabCut/MegaDetector_DeepLabCut/fonts/read.md
DELETED
File without changes
|
spaces/Demosthene-OR/avr23-cds-translation/app.py
DELETED
@@ -1,80 +0,0 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
import os.path
|
3 |
-
from collections import OrderedDict
|
4 |
-
from streamlit_option_menu import option_menu
|
5 |
-
# Define TITLE, TEAM_MEMBERS and PROMOTION values, in config.py.
|
6 |
-
import config
|
7 |
-
from tabs.custom_vectorizer import custom_tokenizer, custom_preprocessor
|
8 |
-
|
9 |
-
# Initialize a session state variable that tracks the sidebar state (either 'expanded' or 'collapsed').
|
10 |
-
if 'sidebar_state' not in st.session_state:
|
11 |
-
st.session_state.sidebar_state = 'expanded'
|
12 |
-
else:
|
13 |
-
st.session_state.sidebar_state = 'auto'
|
14 |
-
|
15 |
-
st.set_page_config (
|
16 |
-
page_title=config.TITLE,
|
17 |
-
page_icon= "assets/faviconV2.png",
|
18 |
-
initial_sidebar_state=st.session_state.sidebar_state
|
19 |
-
)
|
20 |
-
|
21 |
-
# Define the root folders depending on local/cloud run
|
22 |
-
thisfile = os.path.abspath(__file__)
|
23 |
-
if ('/' in thisfile):
|
24 |
-
os.chdir(os.path.dirname(thisfile))
|
25 |
-
|
26 |
-
# Tabs in the ./tabs folder, imported here.
|
27 |
-
from tabs import intro, exploration_tab, data_viz_tab, id_lang_tab, modelisation_dict_tab, modelisation_seq2seq_tab, game_tab
|
28 |
-
|
29 |
-
|
30 |
-
with open("style.css", "r") as f:
|
31 |
-
style = f.read()
|
32 |
-
|
33 |
-
st.markdown(f"<style>{style}</style>", unsafe_allow_html=True)
|
34 |
-
|
35 |
-
|
36 |
-
# Add tab in this ordered dict by
|
37 |
-
# passing the name in the sidebar as key and the imported tab
|
38 |
-
# as value as follow :
|
39 |
-
TABS = OrderedDict(
|
40 |
-
[
|
41 |
-
(intro.sidebar_name, intro),
|
42 |
-
(exploration_tab.sidebar_name, exploration_tab),
|
43 |
-
(data_viz_tab.sidebar_name, data_viz_tab),
|
44 |
-
(id_lang_tab.sidebar_name, id_lang_tab),
|
45 |
-
(modelisation_dict_tab.sidebar_name, modelisation_dict_tab),
|
46 |
-
(modelisation_seq2seq_tab.sidebar_name, modelisation_seq2seq_tab),
|
47 |
-
(game_tab.sidebar_name, game_tab ),
|
48 |
-
]
|
49 |
-
)
|
50 |
-
|
51 |
-
|
52 |
-
def run():
|
53 |
-
|
54 |
-
st.sidebar.image(
|
55 |
-
"assets/logo-datascientest.png",
|
56 |
-
width=200,
|
57 |
-
)
|
58 |
-
with st.sidebar:
|
59 |
-
tab_name = option_menu(None, list(TABS.keys()),
|
60 |
-
# icons=['house', 'bi-binoculars', 'bi bi-graph-up', 'bi-chat-right-text','bi-book', 'bi-body-text'], menu_icon="cast", default_index=0,
|
61 |
-
icons=['house', 'binoculars', 'graph-up', 'search','book', 'chat-right-text', 'controller'], menu_icon="cast", default_index=0,
|
62 |
-
styles={"container": {"padding": "0!important","background-color": "#10b8dd", "border-radius": "0!important"},
|
63 |
-
"nav-link": {"font-size": "1rem", "text-align": "left", "margin":"0em", "padding": "0em",
|
64 |
-
"padding-left": "0.2em", "--hover-color": "#eee", "font-weight": "400",
|
65 |
-
"font-family": "Source Sans Pro, sans-serif"}
|
66 |
-
})
|
67 |
-
# tab_name = st.sidebar.radio("", list(TABS.keys()), 0)
|
68 |
-
st.sidebar.markdown("---")
|
69 |
-
st.sidebar.markdown(f"## {config.PROMOTION}")
|
70 |
-
|
71 |
-
st.sidebar.markdown("### Team members:")
|
72 |
-
for member in config.TEAM_MEMBERS:
|
73 |
-
st.sidebar.markdown(member.sidebar_markdown(), unsafe_allow_html=True)
|
74 |
-
|
75 |
-
tab = TABS[tab_name]
|
76 |
-
tab.run()
|
77 |
-
|
78 |
-
|
79 |
-
if __name__ == "__main__":
|
80 |
-
run()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Detomo/ai-avatar-backend/app.js
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
var createError = require('http-errors');
|
2 |
-
var express = require('express');
|
3 |
-
var path = require('path');
|
4 |
-
var cookieParser = require('cookie-parser');
|
5 |
-
var logger = require('morgan');
|
6 |
-
var cors = require('cors');
|
7 |
-
|
8 |
-
var indexRouter = require('./routes/index');
|
9 |
-
|
10 |
-
var app = express();
|
11 |
-
|
12 |
-
|
13 |
-
// view engine setup
|
14 |
-
app.set('views', path.join(__dirname, 'views'));
|
15 |
-
app.set('view engine', 'pug');
|
16 |
-
|
17 |
-
var corsOptions = {
|
18 |
-
origin: '*'
|
19 |
-
};
|
20 |
-
app.use(cors(corsOptions));
|
21 |
-
app.use(logger('dev'));
|
22 |
-
app.use(express.json());
|
23 |
-
app.use(express.urlencoded({ extended: false }));
|
24 |
-
app.use(cookieParser());
|
25 |
-
app.use(express.static(path.join(__dirname, 'public')));
|
26 |
-
|
27 |
-
app.use('/', indexRouter);
|
28 |
-
|
29 |
-
// catch 404 and forward to error handler
|
30 |
-
app.use(function(req, res, next) {
|
31 |
-
next(createError(404));
|
32 |
-
});
|
33 |
-
|
34 |
-
// error handler
|
35 |
-
app.use(function(err, req, res, next) {
|
36 |
-
// set locals, only providing error in development
|
37 |
-
res.locals.message = err.message;
|
38 |
-
res.locals.error = req.app.get('env') === 'development' ? err : {};
|
39 |
-
|
40 |
-
// render the error page
|
41 |
-
res.status(err.status || 500);
|
42 |
-
res.render('error');
|
43 |
-
});
|
44 |
-
|
45 |
-
module.exports = app;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|