Commit
·
ef5ebd1
1
Parent(s):
1c4653a
Update parquet files (step 52 of 397)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/01zhangclare/bingai/Dockerfile +0 -34
- spaces/101-5/gpt4free/g4f/.v1/gpt4free/usesless/README.md +0 -33
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Chemistry Matters Book Free Download A 10-Volume Encyclopedia of Chemistry Topics and Concepts.md +0 -107
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Find Facebook Password With Facebook Id !!EXCLUSIVE!!.md +0 -29
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/For Those Looking for a Key rpowersaves - Reddit[1].md +0 -115
- spaces/1gistliPinn/ChatGPT4/Examples/AnyMusic 7.2.0 Crack 2020 With UPDATED Keygen.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/Download Gladiatus Hack 26 !!LINK!!.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/Download Special Software Huawei P9 Huawei [BEST].md +0 -16
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bloons TD 6 33.1 APK Enjoy the New Features and Fixes.md +0 -220
- spaces/1phancelerku/anime-remove-background/Download D-Mod and Unlock New Abilities for Foxes in Minecraft.md +0 -133
- spaces/1toTree/lora_test/ppdiffusers/utils/logging.py +0 -339
- spaces/7hao/bingo/src/pages/api/kblob.ts +0 -56
- spaces/AI-Hobbyist/Hoyo-RVC/vc_infer_pipeline.py +0 -431
- spaces/ALSv/FSW/roop/globals.py +0 -22
- spaces/AP123/dreamgaussian/README.md +0 -11
- spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/custom_dataset/__init__.py +0 -0
- spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb32-60e_deepfashion2_short_sleeved_shirt_256x192/td_hm_res50_4xb32-60e_deepfashion2_short_sleeved_shirt_256x192.py +0 -2861
- spaces/AchyuthGamer/OpenGPT/client/js/sidebar-toggler.js +0 -34
- spaces/Adapter/CoAdapter/ldm/modules/image_degradation/utils_image.py +0 -916
- spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/visibility/all.py +0 -17
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/PointToChild.js +0 -41
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/holygrail/HolyGrail.d.ts +0 -69
- spaces/AlawnCN/webui-docker/Dockerfile +0 -47
- spaces/AlekseyKorshuk/model-evaluation/README.md +0 -12
- spaces/Alichuan/VITS-Umamusume-voice-synthesizer/text/thai.py +0 -44
- spaces/Amrrs/DragGan-Inversion/stylegan_human/pti/pti_models/e4e/encoders/helpers.py +0 -145
- spaces/Andy1621/uniformer_image_detection/mmdet/models/backbones/darknet.py +0 -199
- spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r50-d8_512x1024_40k_cityscapes.py +0 -4
- spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_r50-d8_512x512_80k_ade20k.py +0 -6
- spaces/Andy1621/uniformer_image_segmentation/configs/hrnet/fcn_hr18s_512x1024_160k_cityscapes.py +0 -9
- spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_r101-d8_512x1024_40k_b16_cityscapes.py +0 -7
- spaces/Anmol12385/chat123/README.md +0 -13
- spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/guided_diffusion/scripts/classifier_sample.py +0 -131
- spaces/AntNikYab/NaturalLanguageProcessing/app.py +0 -22
- spaces/Arnx/MusicGenXvAKN/tests/modules/test_codebooks_patterns.py +0 -246
- spaces/Asahi402/White-box-Cartoonization/wbc/cartoonize.py +0 -112
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/certifi/__init__.py +0 -4
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/text.py +0 -1307
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/config/_validate_pyproject/error_reporting.py +0 -318
- spaces/AutoLLM/ArxivDigest/action.py +0 -142
- spaces/Aveygo/AstroSleuth/utils/convert_to_onnx.py +0 -15
- spaces/Aziizzz/ChestXrayClassification/README.md +0 -13
- spaces/Benson/text-generation/Examples/Descargar Cama Guerras En Minecraft Educacin Edicin.md +0 -52
- spaces/BetterAPI/BetterChat/src/lib/server/modelEndpoint.ts +0 -21
- spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/contrib/_securetransport/bindings.py +0 -519
- spaces/CAPTY222/runwayml-stable-diffusion-v1-5/README.md +0 -12
- spaces/CVPR/LIVE/pybind11/tests/test_copy_move.cpp +0 -213
- spaces/CVPR/LIVE/thrust/thrust/detail/config/cpp_dialect.h +0 -124
- spaces/CVPR/LIVE/thrust/thrust/detail/seq.h +0 -53
- spaces/CVPR/LIVE/thrust/thrust/device_allocator.h +0 -146
spaces/01zhangclare/bingai/Dockerfile
DELETED
@@ -1,34 +0,0 @@
|
|
1 |
-
# Build Stage
|
2 |
-
# 使用 golang:alpine 作为构建阶段的基础镜像
|
3 |
-
FROM golang:alpine AS builder
|
4 |
-
|
5 |
-
# 添加 git,以便之后能从GitHub克隆项目
|
6 |
-
RUN apk --no-cache add git
|
7 |
-
|
8 |
-
# 从 GitHub 克隆 go-proxy-bingai 项目到 /workspace/app 目录下
|
9 |
-
RUN git clone https://github.com/Harry-zklcdc/go-proxy-bingai.git /workspace/app
|
10 |
-
|
11 |
-
# 设置工作目录为之前克隆的项目目录
|
12 |
-
WORKDIR /workspace/app
|
13 |
-
|
14 |
-
# 编译 go 项目。-ldflags="-s -w" 是为了减少编译后的二进制大小
|
15 |
-
RUN go build -ldflags="-s -w" -tags netgo -trimpath -o go-proxy-bingai main.go
|
16 |
-
|
17 |
-
# Runtime Stage
|
18 |
-
# 使用轻量级的 alpine 镜像作为运行时的基础镜像
|
19 |
-
FROM alpine
|
20 |
-
|
21 |
-
# 设置工作目录
|
22 |
-
WORKDIR /workspace/app
|
23 |
-
|
24 |
-
# 从构建阶段复制编译后的二进制文件到运行时镜像中
|
25 |
-
COPY --from=builder /workspace/app/go-proxy-bingai .
|
26 |
-
|
27 |
-
# 设置环境变量,此处为随机字符
|
28 |
-
ENV Go_Proxy_BingAI_USER_TOKEN_1="kJs8hD92ncMzLaoQWYtX5rG6bE3fZ4iO"
|
29 |
-
|
30 |
-
# 暴露8080端口
|
31 |
-
EXPOSE 8080
|
32 |
-
|
33 |
-
# 容器启动时运行的命令
|
34 |
-
CMD ["/workspace/app/go-proxy-bingai"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/101-5/gpt4free/g4f/.v1/gpt4free/usesless/README.md
DELETED
@@ -1,33 +0,0 @@
|
|
1 |
-
ai.usesless.com
|
2 |
-
|
3 |
-
### Example: `usesless` <a name="example-usesless"></a>
|
4 |
-
|
5 |
-
### Token generation
|
6 |
-
<p>This will create account.json that contains email and token in json</p>
|
7 |
-
|
8 |
-
```python
|
9 |
-
from gpt4free import usesless
|
10 |
-
|
11 |
-
|
12 |
-
token = usesless.Account.create(logging=True)
|
13 |
-
print(token)
|
14 |
-
```
|
15 |
-
|
16 |
-
### Completion
|
17 |
-
<p>Insert token from account.json</p>
|
18 |
-
|
19 |
-
```python
|
20 |
-
import usesless
|
21 |
-
|
22 |
-
message_id = ""
|
23 |
-
token = <TOKENHERE> # usesless.Account.create(logging=True)
|
24 |
-
while True:
|
25 |
-
prompt = input("Question: ")
|
26 |
-
if prompt == "!stop":
|
27 |
-
break
|
28 |
-
|
29 |
-
req = usesless.Completion.create(prompt=prompt, parentMessageId=message_id, token=token)
|
30 |
-
|
31 |
-
print(f"Answer: {req['text']}")
|
32 |
-
message_id = req["id"]
|
33 |
-
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Chemistry Matters Book Free Download A 10-Volume Encyclopedia of Chemistry Topics and Concepts.md
DELETED
@@ -1,107 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Chemistry Matters Book Free Download</h1>
|
3 |
-
<p>Are you looking for a free and easy way to learn chemistry? Do you want to master the concepts and skills of this fascinating subject? If yes, then you should download Chemistry Matters, a comprehensive and engaging textbook for high school students. In this article, we will tell you what Chemistry Matters is, why you should download it for free, and how to do it. Let's get started!</p>
|
4 |
-
<h2>What is Chemistry Matters?</h2>
|
5 |
-
<p>Chemistry Matters is a textbook that covers the syllabus of chemistry for high school students. It is written by a team of experienced and qualified authors who have a passion for teaching and learning chemistry. The book aims to help students develop a deep understanding of the principles and applications of chemistry, as well as to foster their interest and curiosity in the subject.</p>
|
6 |
-
<h2>chemistry matters book free download</h2><br /><p><b><b>DOWNLOAD</b> 🆗 <a href="https://byltly.com/2uKvUA">https://byltly.com/2uKvUA</a></b></p><br /><br />
|
7 |
-
<h3>A comprehensive textbook for high school students</h3>
|
8 |
-
<p>Chemistry Matters covers all the topics that you need to know for your chemistry exams, such as atomic structure, chemical bonding, chemical reactions, stoichiometry, gases, solutions, acids and bases, equilibrium, electrochemistry, organic chemistry, and more. The book also includes chapters on environmental chemistry, biochemistry, nanotechnology, and green chemistry, which are relevant and interesting topics for today's world.</p>
|
9 |
-
<h3>The features and benefits of Chemistry Matters</h3>
|
10 |
-
<p>Chemistry Matters is not just a textbook, but also a learning companion that offers many features and benefits for students. Some of them are:</p>
|
11 |
-
<ul>
|
12 |
-
<li><b>Clear and concise explanations:</b> The book uses simple and precise language to explain the concepts and theories of chemistry. It also provides examples, diagrams, tables, and graphs to illustrate the points and make them easier to understand.</li>
|
13 |
-
<li><b>Practice questions and exercises:</b> The book contains a variety of questions and exercises at the end of each chapter to help students review and reinforce their learning. The questions range from multiple-choice, short-answer, structured, to essay-type questions. The book also provides answers and solutions to selected questions and exercises.</li>
|
14 |
-
<li><b>Summary and key points:</b> The book provides a summary and key points at the end of each chapter to help students recall and revise the main ideas and facts of the chapter.</li>
|
15 |
-
<li><b>Learning objectives and outcomes:</b> The book states the learning objectives and outcomes at the beginning of each chapter to help students focus on what they need to learn and achieve by the end of the chapter.</li>
|
16 |
-
<li><b>Experiments and investigations:</b> The book includes experiments and investigations that students can perform in the laboratory or at home to demonstrate and explore the phenomena and principles of chemistry. The book also provides safety tips, procedures, observations, results, discussions, and conclusions for each experiment and investigation.</li>
|
17 |
-
</ul>
|
18 |
-
<h3>How to use Chemistry Matters effectively</h3>
|
19 |
-
<p>To get the most out of Chemistry Matters, you should use it in conjunction with other learning resources and strategies. Here are some tips on how to use Chemistry Matters effectively:</p>
|
20 |
-
<ul>
|
21 |
-
<li><b>Read the book before and after class:</b> Reading the book before class will help you prepare for the lesson and have some background knowledge of the topic. Reading the book after class will help you consolidate your learning and fill in any gaps or doubts that you may have.</li>
|
22 |
-
<li><b>Do the practice questions and exercises:</b> Doing the practice questions and exercises will help you check your understanding and apply your knowledge of the topic. It will also help you practice your problem-solving skills and prepare for your exams.</li>
|
23 |
-
<li><b>Review the summary and key points:</b> Reviewing the summary and key points will help you refresh your memory and revise the important facts and concepts of the topic. It will also help you identify your strengths and weaknesses in the topic.</li>
|
24 |
-
<li><b>Do the experiments and investigations:</b> Doing the experiments and investigations will help you learn by doing and discover by yourself how chemistry works in real life. It will also help you develop your scientific skills such as observation, measurement, analysis, evaluation, communication, etc.</li>
|
25 |
-
</ul>
|
26 |
-
<h2>Why should you download Chemistry Matters for free?</h2>
|
27 |
-
<p>You may be wondering why you should download Chemistry Matters for free instead of buying a physical copy or renting one from a library. Well, there are many reasons why downloading Chemistry Matters for free is a smart choice. Here are some of them:</p>
|
28 |
-
<h3>Save money and time</h3>
|
29 |
-
<p>Downloading Chemistry Matters for free will save you money that you would otherwise spend on buying or renting a physical copy of the book. You can use that money for other purposes such as buying other books or materials that you need for your studies or hobbies. Downloading Chemistry Matters for free will also save you time that you would otherwise spend on going to a bookstore or a library to get a physical copy of the book. You can use that time for other activities such as studying more or having fun with your friends or family.</p>
|
30 |
-
<h3>Access the book anytime and anywhere</h3>
|
31 |
-
<p>Downloading Chemistry Matters for free will give you access to the book anytime and anywhere that you have an internet connection or a device that can read PDF files. You can read the book on your computer, laptop, tablet, smartphone, or e-reader at your convenience. You don't have to worry about losing or damaging your physical copy of the book or returning it on time to avoid fines or penalties. You can also share the book with your classmates or friends easily by sending them a link or a file.</p>
|
32 |
-
<h3>Enhance your learning experience with interactive features</h3>
|
33 |
-
<p>Downloading Chemistry Matters for free will enhance your learning experience with interactive features that are not available in a physical copy of the book. For example, you can zoom in or out on images or graphs to see them more clearly; you can highlight or annotate important parts of the text; you can search for keywords or phrases within the book; you can click on links or references to access more information or resources; you can watch videos or animations that explain or demonstrate some concepts or phenomena; etc.</p>
|
34 |
-
<p>chemistry matters textbook pdf download<br />
|
35 |
-
chemistry matters book online free<br />
|
36 |
-
chemistry matters ebook free download<br />
|
37 |
-
chemistry matters second edition pdf download<br />
|
38 |
-
chemistry matters book solutions free download<br />
|
39 |
-
chemistry matters gce o level textbook free download<br />
|
40 |
-
chemistry matters workbook pdf download<br />
|
41 |
-
chemistry matters book answers free download<br />
|
42 |
-
chemistry matters for the 21st century pdf download<br />
|
43 |
-
chemistry matters book review free download<br />
|
44 |
-
chemistry matters a molecular approach pdf download<br />
|
45 |
-
chemistry matters book summary free download<br />
|
46 |
-
chemistry matters an inquiry-based approach pdf download<br />
|
47 |
-
chemistry matters book notes free download<br />
|
48 |
-
chemistry matters by tan yin toon pdf download<br />
|
49 |
-
chemistry matters book quiz free download<br />
|
50 |
-
chemistry matters concepts and applications pdf download<br />
|
51 |
-
chemistry matters book test free download<br />
|
52 |
-
chemistry matters for cambridge igcse pdf download<br />
|
53 |
-
chemistry matters book questions free download<br />
|
54 |
-
chemistry matters fundamentals of chemistry pdf download<br />
|
55 |
-
chemistry matters book exercises free download<br />
|
56 |
-
chemistry matters gce n level textbook free download<br />
|
57 |
-
chemistry matters book worksheets free download<br />
|
58 |
-
chemistry matters in life and health pdf download<br />
|
59 |
-
chemistry matters book projects free download<br />
|
60 |
-
chemistry matters in the service of man pdf download<br />
|
61 |
-
chemistry matters book activities free download<br />
|
62 |
-
chemistry matters marshall cavendish pdf download<br />
|
63 |
-
chemistry matters book experiments free download<br />
|
64 |
-
chemistry matters practical book pdf download<br />
|
65 |
-
chemistry matters book videos free download<br />
|
66 |
-
chemistry matters student's book pdf download<br />
|
67 |
-
chemistry matters book slides free download<br />
|
68 |
-
chemistry matters teacher's edition pdf download<br />
|
69 |
-
chemistry matters book resources free download<br />
|
70 |
-
chemistry matters textbook answers pdf download<br />
|
71 |
-
chemistry matters book glossary free download<br />
|
72 |
-
chemistry matters textbook solutions pdf download<br />
|
73 |
-
chemistry matters book index free download<br />
|
74 |
-
how to get chemistry matters book for free<br />
|
75 |
-
where to find chemistry matters book free download<br />
|
76 |
-
best sites for chemistry matters book free download<br />
|
77 |
-
tips for downloading chemistry matters book for free<br />
|
78 |
-
alternatives to chemistry matters book free download<br />
|
79 |
-
benefits of reading chemistry matters book for free<br />
|
80 |
-
challenges of downloading chemistry matters book for free<br />
|
81 |
-
reviews of chemistry matters book free download<br />
|
82 |
-
feedback on chemistry matters book free download<br />
|
83 |
-
recommendations for chemistry matters book free download</p>
|
84 |
-
<h2>How to download Chemistry Matters for free?</h2>
|
85 |
-
<p>If you are convinced that downloading Chemistry Matters for free is a good idea, then you may be wondering how to do it. Well, it's very easy! Just follow these simple steps:</p>
|
86 |
-
<h3>Step 1: Visit the official website of Chemistry Matters</h3>
|
87 |
-
<p>The first step is to visit <a href="https://www.chemistrymatters.com">www.chemistrymatters.com</a>, which is the official website of Chemistry Matters. There you will find all the information about the book such as its authors, editions, contents, reviews, etc. You will also find links to download the book for free in different formats such as PDF, EPUB, MOBI, etc.</p>
|
88 |
-
<h3>Step 2: Register for a free account or log in with your existing one</h3>
|
89 |
-
<p>The second step is to register for a free account or log in with your existing one on the website. To register, you just need to provide your name, email address, and password. You will also need to agree to the terms and conditions and privacy policy of the website. To log in, you just need to enter your email address and password. You will also have the option to log in with your social media accounts such as Facebook, Twitter, Google, etc.</p>
|
90 |
-
<h3>Step 3: Choose the edition and format of the book you want to download</h3>
|
91 |
-
<p>The third step is to choose the edition and format of the book you want to download. There are two editions of Chemistry Matters: the first edition, which was published in 2015, and the second edition, which was published in 2019. The second edition has been updated and revised to reflect the latest changes and developments in chemistry. You can choose either edition depending on your preference or requirement. You can also choose between different formats such as PDF, EPUB, MOBI, etc. depending on your device or reader.</p>
|
92 |
-
<h3>Step 4: Click on the download button and enjoy your book</h3>
|
93 |
-
<p>The final step is to click on the download button and enjoy your book. You will see a pop-up window that will ask you to confirm your download and show you the progress of the download. Once the download is complete, you will be able to open and read your book on your device or reader. You can also transfer your book to other devices or readers if you want. Congratulations! You have successfully downloaded Chemistry Matters for free!</p>
|
94 |
-
<h2>Conclusion</h2>
|
95 |
-
<p>Chemistry Matters is a great textbook for high school students who want to learn chemistry in a fun and easy way. It covers all the topics that you need to know for your exams, and it also offers many features and benefits that will enhance your learning experience. You can download Chemistry Matters for free from its official website in a few simple steps. By doing so, you will save money and time, access the book anytime and anywhere, and enjoy interactive features that are not available in a physical copy of the book. So what are you waiting for? Download Chemistry Matters for free today and start learning chemistry like never before!</p>
|
96 |
-
<h2>FAQs</h2>
|
97 |
-
<p>Here are some frequently asked questions about Chemistry Matters and its free download:</p>
|
98 |
-
<table>
|
99 |
-
<tr><td><b>Q:</b> Is Chemistry Matters suitable for all levels of high school students?</td><td><b>A:</b> Yes, Chemistry Matters is suitable for all levels of high school students, from beginners to advanced. The book explains the concepts and theories of chemistry in a clear and concise way, and it also provides different levels of questions and exercises to cater to different abilities and needs of students.</td></tr>
|
100 |
-
<tr><td><b>Q:</b> Is Chemistry Matters compatible with all devices and readers?</td><td><b>A:</b> Yes, Chemistry Matters is compatible with all devices and readers that can read PDF, EPUB, or MOBI files. You can download the book in any of these formats depending on your preference or requirement.</td></tr>
|
101 |
-
<tr><td><b>Q:</b> Is Chemistry Matters safe to download?</td><td><b>A:</b> Yes, Chemistry Matters is safe to download from its official website. The website uses SSL encryption to protect your personal information and data. The book is also virus-free and malware-free.</td></tr>
|
102 |
-
<tr><td><b>Q:</b> Is Chemistry Matters updated regularly?</td><td><b>A:</b> Yes, Chemistry Matters is updated regularly to reflect the latest changes and developments in chemistry. The second edition of the book was published in 2019, which has been revised and improved from the first edition published in 2015.</td></tr>
|
103 |
-
<tr><td><b>Q:</b> Is Chemistry Matters available in other languages?</td><td><b>A:</b> No, Chemistry Matters is currently only available in English. However, the authors are working on translating the book into other languages such as Spanish, French, German, etc.</td></tr>
|
104 |
-
</table>
|
105 |
-
</p> 0a6ba089eb<br />
|
106 |
-
<br />
|
107 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Find Facebook Password With Facebook Id !!EXCLUSIVE!!.md
DELETED
@@ -1,29 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Find Your Facebook Password with Your Facebook ID</h1>
|
3 |
-
<p>If you have forgotten your Facebook password and you only remember your Facebook ID, which is the email address or phone number you used to sign up for Facebook, you may be able to recover your account using the Find Your Account page or from a friend's or family memberâs account. Here are some steps you can follow to find your Facebook password with your Facebook ID.</p>
|
4 |
-
<ol>
|
5 |
-
<li>Go to the <a href="https://www.facebook.com/login/identify/">Find Your Account page</a> at facebook.com/login/identify and enter your Facebook ID in the search box. Click Search.</li>
|
6 |
-
<li>You will see a list of accounts that match your Facebook ID. Choose the one that belongs to you and click This Is My Account.</li>
|
7 |
-
<li>You will be asked how you want to reset your password. You can choose to receive a code via email, SMS, or a phone call. Select the option that works best for you and click Continue.</li>
|
8 |
-
<li>Enter the code you received and click Continue.</li>
|
9 |
-
<li>You will be able to create a new password for your Facebook account. Make sure to choose a strong and secure password that you can remember. Click Continue.</li>
|
10 |
-
<li>You will be logged into your Facebook account with your new password. You can also review and update your security settings at this point.</li>
|
11 |
-
</ol>
|
12 |
-
<p>If you don't have access to the email address or phone number associated with your Facebook ID, you may still be able to recover your account from a friend's or family memberâs account. Here are some steps you can follow:</p>
|
13 |
-
<h2>find facebook password with facebook id</h2><br /><p><b><b>Download File</b> ––– <a href="https://byltly.com/2uKvhc">https://byltly.com/2uKvhc</a></b></p><br /><br />
|
14 |
-
<ol>
|
15 |
-
<li>From a computer, go to the profile of the account you'd like to recover.</li>
|
16 |
-
<li>Click on the three dots icon below the cover photo and select Find support or report profile.</li>
|
17 |
-
<li>Choose Something Else, then click Next.</li>
|
18 |
-
<li>Click Recover this account and follow the steps.</li>
|
19 |
-
</ol>
|
20 |
-
<p>If none of these methods work for you, you may have to create a new Facebook account with a different Facebook ID. However, before you do that, you can try contacting Facebook support and explain your situation. They may be able to help you restore your account if you can prove your identity.</p>
|
21 |
-
<p>Alternatively, if you have saved your Facebook password on your browser or device, you may be able to view it without resetting it. Here are some ways you can do that:</p>
|
22 |
-
<ul>
|
23 |
-
<li>If you use Google Chrome, go to Settings > Passwords > Saved Passwords and look for facebook.com. Click on the eye icon beside the password and enter your device password or use Touch ID to view it[^4^].</li>
|
24 |
-
<li>If you use Safari on iOS, go to Settings > Passwords > Website & App Passwords and look for facebook.com. Tap on it and use Touch ID to view your login details (username and password)[^4^].</li>
|
25 |
-
<li>If you use Firefox, go to Options > Privacy & Security > Logins and Passwords > Saved Logins and look for facebook.com. Click on the eye icon beside the password and enter your device password or use Touch ID to view it[^3^].</li>
|
26 |
-
</ul>
|
27 |
-
<p>We hope this article helped you find your Facebook password with your Facebook ID. Remember to always keep your password safe and secure, and change it regularly to prevent unauthorized access to your account.</p> cec2833e83<br />
|
28 |
-
<br />
|
29 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/For Those Looking for a Key rpowersaves - Reddit[1].md
DELETED
@@ -1,115 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Powersaves License Key Generator Crack: How to Get Unlimited Access to Your Favorite Games</h1>
|
3 |
-
<p>Do you love playing video games on your Nintendo 3DS, Switch, or Wii U? Do you wish you could unlock more cheats, codes, and enhancements for your favorite games? Do you want to backup and transfer your game saves between different consoles and regions? If you answered yes to any of these questions, then you might be interested in Powersaves.</p>
|
4 |
-
<h2>powersaves license key generator crack</h2><br /><p><b><b>Download Zip</b> ✯ <a href="https://byltly.com/2uKw7F">https://byltly.com/2uKw7F</a></b></p><br /><br />
|
5 |
-
<h2>What is Powersaves and Why Do You Need It?</h2>
|
6 |
-
<p>Powersaves is a device that allows you to backup and enhance your game saves. It works with hundreds of games across various platforms, such as Pokemon, Animal Crossing, Zelda, Mario, Fire Emblem, and more. With Powersaves, you can:</p>
|
7 |
-
<ul>
|
8 |
-
<li>Unlock cheats, codes, and enhancements that let you modify your game experience. For example, you can get unlimited items, money, health, lives, etc.</li>
|
9 |
-
<li>Backup your game saves to your PC or cloud storage. This way, you can restore them in case of corruption or deletion.</li>
|
10 |
-
<li>Transfer your game saves between different consoles and regions. This way, you can play your games on any device or location.</li>
|
11 |
-
</ul>
|
12 |
-
<p>To use Powersaves, you need a compatible device (such as a 3DS PowerSaves or a Switch PowerSaves Plus), a USB cable, and a PC with internet connection. You also need to download and install the Powersaves software on your PC.</p>
|
13 |
-
<h2>What is a License Key and Why Do You Need It?</h2>
|
14 |
-
<p>A license key is a code that activates your Powersaves device. It is usually printed on a sticker or card that comes with your device. You need a license key to access the online features of Powersaves, such as downloading cheats, codes, and enhancements from the official website.</p>
|
15 |
-
<p>You can get a license key by purchasing a Powersaves device or a subscription. A subscription gives you access to all the features of Powersaves for a certain period of time (such as 6 months or 12 months). You can buy a subscription from the official website or from other online retailers.</p>
|
16 |
-
<h2>What is a License Key Generator Crack and Why Do You Need It?</h2>
|
17 |
-
<p>A license key generator crack is a software that creates fake license keys for Powersaves. It is usually made by hackers or modders who want to use Powersaves without paying for it. You need a license key generator crack if you want to use Powersaves without purchasing a device or a subscription.</p>
|
18 |
-
<p>powersaves 3ds license key generator free download<br />
|
19 |
-
powersaves pro license key generator online<br />
|
20 |
-
powersaves license key generator reddit<br />
|
21 |
-
powersaves license key generator no survey<br />
|
22 |
-
powersaves license key generator 2022<br />
|
23 |
-
powersaves license key generator mac<br />
|
24 |
-
powersaves license key generator windows 10<br />
|
25 |
-
powersaves license key generator software<br />
|
26 |
-
powersaves license key generator apk<br />
|
27 |
-
powersaves license key generator android<br />
|
28 |
-
powersaves license key generator ios<br />
|
29 |
-
powersaves license key generator exe<br />
|
30 |
-
powersaves license key generator zip<br />
|
31 |
-
powersaves license key generator rar<br />
|
32 |
-
powersaves license key generator xml<br />
|
33 |
-
powersaves license key generator crack download<br />
|
34 |
-
powersaves license key generator crack reddit<br />
|
35 |
-
powersaves license key generator crack online<br />
|
36 |
-
powersaves license key generator crack no survey<br />
|
37 |
-
powersaves license key generator crack 2022<br />
|
38 |
-
powersaves license key generator crack mac<br />
|
39 |
-
powersaves license key generator crack windows 10<br />
|
40 |
-
powersaves license key generator crack software<br />
|
41 |
-
powersaves license key generator crack apk<br />
|
42 |
-
powersaves license key generator crack android<br />
|
43 |
-
powersaves license key generator crack ios<br />
|
44 |
-
powersaves license key generator crack exe<br />
|
45 |
-
powersaves license key generator crack zip<br />
|
46 |
-
powersaves license key generator crack rar<br />
|
47 |
-
powersaves license key generator crack xml<br />
|
48 |
-
how to get a free powersaves license key generator<br />
|
49 |
-
how to use a powersaves license key generator<br />
|
50 |
-
how to activate a powersaves license key generator<br />
|
51 |
-
how to install a powersaves license key generator<br />
|
52 |
-
how to download a powersaves license key generator<br />
|
53 |
-
how to update a powersaves license key generator<br />
|
54 |
-
how to fix a powersaves license key generator<br />
|
55 |
-
how to hack a powersaves license key generator<br />
|
56 |
-
how to bypass a powersaves license key generator<br />
|
57 |
-
how to remove a powersaves license key generator<br />
|
58 |
-
where to find a powersaves license key generator<br />
|
59 |
-
where to buy a powersaves license key generator<br />
|
60 |
-
where to download a powersaves license key generator<br />
|
61 |
-
where to get a free powersaves license key generator<br />
|
62 |
-
where to get a working powersaves license key generator<br />
|
63 |
-
where to get a legit powersaves license key generator<br />
|
64 |
-
where to get a cracked powersaves license key generator<br />
|
65 |
-
where to get a safe powersaves license key generator<br />
|
66 |
-
where to get a reliable powersaves license key generator</p>
|
67 |
-
<p>You can find license key generator cracks online or create your own. Some websites offer free downloads of license key generator cracks for various versions of Powersaves. Some users also share their own license key generator cracks on forums or social media. Alternatively, you can make your own license key generator crack by using programming tools and reverse engineering techniques.</p>
|
68 |
-
<h2>How to Use a License Key Generator Crack to Get Unlimited Access to Powersaves</h2>
|
69 |
-
<p>To use a license key generator crack to get unlimited access to Powersaves, you need to follow these steps:</p>
|
70 |
-
<ol>
|
71 |
-
<li>Download a license key generator crack from a reliable source or make your own. Make sure it is compatible with your version of Powersaves and your operating system.</li>
|
72 |
-
<li>Run the license key generator crack and copy the generated code. The code should look like a series of letters and numbers.</li>
|
73 |
-
<li>Enter the code in the Powersaves software and enjoy unlimited access to your favorite games. You should be able to download and apply cheats, codes, and enhancements from the official website or from other sources.</li>
|
74 |
-
</ol>
|
75 |
-
<h2>What are the Risks and Benefits of Using a License Key Generator Crack for Powersaves</h2>
|
76 |
-
<p>Using a license key generator crack for Powersaves has its advantages and disadvantages. Here are some of them:</p>
|
77 |
-
<table>
|
78 |
-
<tr>
|
79 |
-
<th>Benefits</th>
|
80 |
-
<th>Risks</th>
|
81 |
-
</tr>
|
82 |
-
<tr>
|
83 |
-
<td>You can save money by not buying a device or a subscription.</td>
|
84 |
-
<td>You can get banned from using Powersaves if the official website detects that you are using a fake license key.</td>
|
85 |
-
</tr>
|
86 |
-
<tr>
|
87 |
-
<td>You can access more features than the official version. For example, you can use cheats, codes, and enhancements that are not available on the official website.</td>
|
88 |
-
<td>You can get infected with malware if you download a license key generator crack from an untrusted source. Malware can harm your PC or steal your personal information.</td>
|
89 |
-
</tr>
|
90 |
-
<tr>
|
91 |
-
<td>You can customize your game experience according to your preferences. For example, you can make your games easier or harder by modifying various parameters.</td>
|
92 |
-
<td>You can lose your game saves if you use incompatible or corrupted cheats, codes, or enhancements. This can ruin your progress or damage your console.</td>
|
93 |
-
</tr>
|
94 |
-
</table>
|
95 |
-
<p>You should weigh the pros and cons before using a license key generator crack for Powersaves. You should also be aware of the legal and ethical implications of using such software. Using a license key generator crack for Powersaves may violate the terms of service of the official website or the copyright laws of your country.</p>
|
96 |
-
<h4>Conclusion</h4>
|
97 |
-
<p>Powersaves is a device that allows you to backup and enhance your game saves. It works with hundreds of games across various platforms. To use it, you need a license key that activates your device. You can get one by buying a device or a subscription from the official website or other online retailers.</p>
|
98 |
-
<p>A license key generator crack is a software that creates fake license keys for Powersaves. It allows you to use Powersaves without paying for it. You can find one online or make one yourself. However, using one has its risks and benefits. You may get banned, infected with malware, or lose your game saves. You may also violate some laws or ethics by using one.</p>
|
99 |
-
<p>You should decide whether using a license key generator crack for Powersaves is worth it for you. You should also respect the rights of the creators and owners of Powersaves and the games that you play with it.</p>
|
100 |
-
<h4>Frequently Asked Questions</h4>
|
101 |
-
<ul>
|
102 |
-
<li><b>Q: How do I know if my license key is valid?</b></li>
|
103 |
-
<li>A: You can check if your license key is valid by entering it in the Powersaves software. If it is valid, you should be able to access all the online features of Powersaves without any problems.</li>
|
104 |
-
<li><b>Q: How do I get more cheats, codes, and enhancements for my games?</b></li>
|
105 |
-
<li>A: You can get more cheats, codes, and enhancements for your games by visiting the official website of Powersaves or other websites that offer them. You can also create your own cheats, codes, and enhancements by using programming tools and hacking techniques.</li>
|
106 |
-
<li><b>Q: How do I backup and restore my game saves?</b></li>
|
107 |
-
<li>A: You can backup and restore your game saves by using the backup and restore functions in the Powersaves software. You can also backup your game saves to your PC or cloud storage by copying them manually.</li>
|
108 |
-
<li><b>Q: How do I transfer my game saves between different consoles and regions?</b></li>
|
109 |
-
using the transfer function in the Powersaves software. You can also transfer your game saves manually by copying them to and from your PC.</li>
|
110 |
-
<li><b>Q: How do I update my Powersaves device and software?</b></li>
|
111 |
-
<li>A: You can update your Powersaves device and software by connecting them to your PC and internet. The Powersaves software will automatically check for updates and prompt you to install them.</li>
|
112 |
-
</ul>
|
113 |
-
</p> 0a6ba089eb<br />
|
114 |
-
<br />
|
115 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/AnyMusic 7.2.0 Crack 2020 With UPDATED Keygen.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>AnyMusic 7.2.0 Crack 2020 With Keygen</h2><br /><p><b><b>DOWNLOAD</b> >>>>> <a href="https://imgfil.com/2uxYy9">https://imgfil.com/2uxYy9</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
August 24, 2020. Download KineMaster ... Altium Designer 20.0.11256 Crack Torrent Download 2019 Free Latest ... AnyMusic 7.2.0 Crack 2020 With Keygen 4d29de3e1b<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Download Gladiatus Hack 26 !!LINK!!.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>download gladiatus hack 26</h2><br /><p><b><b>Download Zip</b> ✦ <a href="https://imgfil.com/2uy05k">https://imgfil.com/2uy05k</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
21.062.542.902.76.284.903.92.971.282.563.921.522.942.672.851.923.953.381.49.124.771.053.493.993.68.853.991.056.153.282.333.991.21.077.250.93.571.132.822.351.22.299.362.963.982.231.832.491.05.935.902.27.170.933.952.093.803.752.82.866.092.83.581.842.941.962.921.741.971.091.902.662.951.911.901.102.752.921.971.912.752.802.851.99.929.762.282.932.851.662.612.551.782.492.721.911.921.771.741.871.931.842.531.71.156.890.95.184.684.382.722.492.772.252.331.091.343.791.853.291.651.52.434.781.962.493.891.852.332.81.292.972.381.171.551.891.492.891.851.372.662.231.651.852.012.661.672.591.882.602.221.71.234.371.863.282.931.892.872.942.661.632.312.251.721.792.572.551.772.552.232.391.332.671.32.083.211.873.211.352.372.292.181.582.812.461.611.881.922.251.681.721.741.711.741.862.921.741.991.791.632.151.701.781.92.232.72.541.431.711.741.921.752 4fefd39f24<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Download Special Software Huawei P9 Huawei [BEST].md
DELETED
@@ -1,16 +0,0 @@
|
|
1 |
-
<h2>Download special software huawei p9 huawei</h2><br /><p><b><b>Download File</b> ❤ <a href="https://imgfil.com/2uy0YM">https://imgfil.com/2uy0YM</a></b></p><br /><br />
|
2 |
-
|
3 |
-
Full how-to guide: | Huawei P9 review: Huawei P9 has . How to install Android on Huawei P9 Lite.
|
4 |
-
Step-by-step instructions for flashing a Huawei P9 smartphone.
|
5 |
-
Lte, P9 Plus, P9 Lite, P9, P9 Lite using the Multi Tool.
|
6 |
-
Huawei P9 and P9 Plus.
|
7 |
-
On this page, you will find information about "Huawei P9 Firmware" and also learn how to replace it.
|
8 |
-
Firmware for Huawei P9 Lite.
|
9 |
-
Huawei P9 Lite firmware.
|
10 |
-
Instructions for firmware smartphone Huawei P9 Lite.
|
11 |
-
Firmware - FlashTools.
|
12 |
-
Firmware Huawei P9 Lite VNS-AL00 on Android 7.0 Nougat.
|
13 |
-
Huawei P9 Lite - Firmware - w3bsit3-dns.com. 8a78ff9644<br />
|
14 |
-
<br />
|
15 |
-
<br />
|
16 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bloons TD 6 33.1 APK Enjoy the New Features and Fixes.md
DELETED
@@ -1,220 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Bloons TD 6 33.1 APK: Everything You Need to Know</h1>
|
3 |
-
<p>If you are a fan of tower defense games, you have probably heard of Bloons TD 6. This is one of the most popular and successful games in the genre, with millions of players around the world. In this article, we will tell you everything you need to know about Bloons TD 6 33.1 APK, the latest version of the game that you can download and install on your Android device.</p>
|
4 |
-
<h2>bloons td 6 33.1 apk</h2><br /><p><b><b>Download Zip</b> ➡ <a href="https://urlin.us/2uT0z5">https://urlin.us/2uT0z5</a></b></p><br /><br />
|
5 |
-
<h2>What is Bloons TD 6?</h2>
|
6 |
-
<p>Bloons TD 6 is a tower defense game developed by Ninja Kiwi, a New Zealand-based company that has been making games since 2006. The game is part of the Bloons series, which started as a simple flash game where you had to pop balloons with darts.</p>
|
7 |
-
<p>In Bloons TD 6, you have to defend your base from waves of colorful balloons (called bloons) that are trying to reach the end of a path. To do this, you have to place various monkey towers along the path that can shoot darts, boomerangs, bombs, lasers, and other projectiles at the bloons.</p>
|
8 |
-
<p>The game features over a dozen types of monkey towers with three upgrade paths each and unique activated abilities. You can also use heroes, which are powerful monkeys with special skills that level up automatically during a match.</p>
|
9 |
-
<p>The game has a lot of content and variety to offer. You can play on over 60 maps with different themes and layouts. You can choose from several game modes with different rules and challenges. You can also customize your monkeys and bloons with cosmetic items from the trophy store.</p>
|
10 |
-
<h3>What <h3>What's New in Bloons TD 6 33.1 APK?</h3>
|
11 |
-
<p>Bloons TD 6 is a game that is constantly updated with new content and improvements. The latest version of the game, 33.1, was released on June 16, 2023, and it brings a lot of new features and fixes to the game. Here are some of the highlights of the update:</p>
|
12 |
-
<p>bloons tower defense 6 33.1 apk download<br />
|
13 |
-
btd6 33.1 apk mod free<br />
|
14 |
-
bloons td 6 version 33.1 apk update<br />
|
15 |
-
btd6 v33.1 apk latest<br />
|
16 |
-
bloons tower defense 6 33.1 apk no mod<br />
|
17 |
-
btd6 33.1 apk cracked<br />
|
18 |
-
bloons td 6 33.1 apk obb<br />
|
19 |
-
btd6 v33.1 apk reddit<br />
|
20 |
-
bloons tower defense 6 33.1 apk android<br />
|
21 |
-
btd6 33.1 apk hack<br />
|
22 |
-
bloons td 6 33.1 apk full<br />
|
23 |
-
btd6 v33.1 apk mirror<br />
|
24 |
-
bloons tower defense 6 33.1 apk ios<br />
|
25 |
-
btd6 33.1 apk premium<br />
|
26 |
-
bloons td 6 33.1 apk unlocked<br />
|
27 |
-
btd6 v33.1 apk mega<br />
|
28 |
-
bloons tower defense 6 33.1 apk pc<br />
|
29 |
-
btd6 33.1 apk original<br />
|
30 |
-
bloons td 6 33.1 apk offline<br />
|
31 |
-
btd6 v33.1 apk mediafire<br />
|
32 |
-
bloons tower defense 6 33.1 apk online<br />
|
33 |
-
btd6 33.1 apk patched<br />
|
34 |
-
bloons td 6 33.1 apk unlimited money<br />
|
35 |
-
btd6 v33.1 apk apkpure<br />
|
36 |
-
bloons tower defense 6 33.1 apk cheats<br />
|
37 |
-
btd6 33.1 apk file<br />
|
38 |
-
bloons td 6 33.1 apk data<br />
|
39 |
-
btd6 v33.1 apk google drive<br />
|
40 |
-
bloons tower defense 6 33.1 apk review<br />
|
41 |
-
btd6 33.1 apk install<br />
|
42 |
-
bloons td 6 33.1 apk gameplay<br />
|
43 |
-
btd6 v33.1 apk youtube<br />
|
44 |
-
bloons tower defense 6 33.1 apk features<br />
|
45 |
-
btd6 33.1 apk size<br />
|
46 |
-
bloons td 6 33.1 apk requirements<br />
|
47 |
-
btd6 v33.1 apk changelog<br />
|
48 |
-
bloons tower defense 6 33.1 apk tips<br />
|
49 |
-
btd6 33.1 apk guide<br />
|
50 |
-
bloons td 6 33.1 apk wiki<br />
|
51 |
-
btd6 v33.1 apk forum<br />
|
52 |
-
bloons tower defense 6 33.1 apk news<br />
|
53 |
-
btd6 33.1 apk blog<br />
|
54 |
-
bloons td 6 33.1 apk support<br />
|
55 |
-
btd6 v33.1 apk feedback<br />
|
56 |
-
bloons tower defense 6 33.1 apk issues<br />
|
57 |
-
btd6 33.1 apk fix<br />
|
58 |
-
bloons td 6 33.1 apk error<br />
|
59 |
-
btd6 v33.1 apk solution<br />
|
60 |
-
bloons tower defense 6 33.1 apk troubleshooting</p>
|
61 |
-
<ul>
|
62 |
-
<li>A new map called The Bazaar, which is a desert-themed market with multiple paths and obstacles.</li>
|
63 |
-
<li>A new hero called Etienne, who is a drone operator that can deploy drones to attack bloons and support other monkeys.</li>
|
64 |
-
<li>A new boss event called The Dreadbloon, which is a massive metal bloon that can spawn other bloons and has multiple phases.</li>
|
65 |
-
<li>A new odyssey mode called Extreme Odyssey, which is a harder version of the regular odyssey mode with limited tower choices and lives.</li>
|
66 |
-
<li>A new trophy store item called Monkey Money Magnet, which increases the amount of monkey money you earn from playing the game.</li>
|
67 |
-
<li>Several balance changes, bug fixes, and performance improvements.</li>
|
68 |
-
</ul>
|
69 |
-
<p>If you want to see the full patch notes of the update, you can check them out on the official website or on the game's subreddit.</p>
|
70 |
-
<h2>How to Download and Install Bloons TD 6 33.1 APK?</h2>
|
71 |
-
<p>If you are interested in playing Bloons TD 6 on your Android device, you have two options. You can either buy the game from the Google Play Store for $4.99, or you can download the APK file for free from various sources online.</p>
|
72 |
-
<p>An APK file is an Android application package that contains all the files and data needed to run an app on your device. By downloading and installing an APK file, you can bypass the official app store and get access to apps that are not available or restricted in your region.</p>
|
73 |
-
<p>However, there are some risks and drawbacks associated with downloading and installing APK files. For one thing, you may not get the latest updates and features of the app. For another thing, you may expose your device to malware or viruses that can harm your data or system. Therefore, you should always be careful when downloading and installing APK files from unknown sources.</p>
|
74 |
-
<p>Here are the steps you need to follow if you want to download and install Bloons TD 6 33.1 APK on your device:</p> <h3>Requirements for Bloons TD 6 33.1 APK</h3>
|
75 |
-
<p>Before you download and install Bloons TD 6 33.1 APK, you should make sure that your device meets the minimum and recommended requirements for running the game smoothly. Here are the specifications you need to check:</p>
|
76 |
-
<table>
|
77 |
-
<tr>
|
78 |
-
<th>Minimum Requirements</th>
|
79 |
-
<th>Recommended Requirements</th>
|
80 |
-
</tr>
|
81 |
-
<tr>
|
82 |
-
<td>Android 5.0 or higher</td>
|
83 |
-
<td>Android 8.0 or higher</td>
|
84 |
-
</tr>
|
85 |
-
<tr>
|
86 |
-
<td>2 GB of RAM</td>
|
87 |
-
<td>4 GB of RAM or more</td>
|
88 |
-
</tr>
|
89 |
-
<tr>
|
90 |
-
<td>1 GB of free storage space</td>
|
91 |
-
<td>2 GB of free storage space or more</td>
|
92 |
-
</tr>
|
93 |
-
<tr>
|
94 |
-
<td>A stable internet connection</td>
|
95 |
-
<td>A fast and reliable internet connection</td>
|
96 |
-
</tr>
|
97 |
-
</table>
|
98 |
-
<p>If your device does not meet the minimum requirements, you may experience lag, crashes, or errors while playing the game. If your device meets the recommended requirements, you will enjoy a smooth and optimal gaming experience.</p>
|
99 |
-
<h3>Download Links for Bloons TD 6 33.1 APK</h3>
|
100 |
-
<p>Once you have checked your device's specifications, you can proceed to download the Bloons TD 6 33.1 APK file from one of the sources below. We have provided links to different websites that offer the APK file for free. However, we cannot guarantee the safety or quality of these files, so download them at your own risk.</p>
|
101 |
-
<table>
|
102 |
-
<tr>
|
103 |
-
<th>Website Name</th>
|
104 |
-
<th>Download Link</th>
|
105 |
-
<th>Description</th>
|
106 |
-
</tr>
|
107 |
-
<tr>
|
108 |
-
<td>APKPure</td>
|
109 |
-
<td><a href="(^1^)">Bloons TD 6 33.1 APK Download by Ninja Kiwi - APKPure.com</a></td>
|
110 |
-
<td>A popular website that provides APK files for various apps and games.</td>
|
111 |
-
</tr>
|
112 |
-
<tr>
|
113 |
-
<td>APKMirror</td>
|
114 |
-
<td><a href="(^2^)">Bloons TD 6 APKs - APKMirror</a></td>
|
115 |
-
<td>A reputable website that offers APK files for different versions of apps and games.</td>
|
116 |
-
</tr>
|
117 |
-
<tr>
|
118 |
-
<td>APKCombo</td>
|
119 |
-
<td><a href="(^3^)">Bloons TD 6 APK + OBB 33.1 (MOD, Unlimited Money) Download for Android - APKCombo.com</a></td>
|
120 |
-
<td>A website that provides APK and OBB files for apps and games, as well as modded versions with unlimited money.</td>
|
121 |
-
</tr> <h3>Installation Instructions for Bloons TD 6 33.1 APK</h3>
|
122 |
-
<p>After you have downloaded the Bloons TD 6 33.1 APK file from one of the sources above, you can install it on your device by following these steps:</p>
|
123 |
-
<ol>
|
124 |
-
<li>Go to your device's settings and enable the option to install apps from unknown sources. This will allow you to install APK files that are not from the Google Play Store.</li>
|
125 |
-
<li>Locate the APK file that you have downloaded on your device's storage. You can use a file manager app to help you find it.</li>
|
126 |
-
<li>Tap on the APK file and follow the on-screen instructions to install it. You may need to grant some permissions to the app, such as access to your storage, network, and device information.</li>
|
127 |
-
<li>Wait for the installation process to finish. You may see a confirmation message when it is done.</li>
|
128 |
-
<li>Launch the game from your app drawer or home screen and enjoy playing Bloons TD 6.</li>
|
129 |
-
</ol>
|
130 |
-
<p>Note: If you have downloaded an OBB file along with the APK file, you will need to copy the OBB file to the Android/obb folder on your device's storage before installing the APK file. The OBB file contains additional data for the game, such as graphics and sounds.</p>
|
131 |
-
<h2>How to Play Bloons TD 6?</h2>
|
132 |
-
<p>Bloons TD 6 is a fun and addictive game that will keep you entertained for hours. The game has a simple and intuitive interface that makes it easy to play. However, if you are new to the game or want to improve your skills, here are some basic tips on how to play Bloons TD 6:</p>
|
133 |
-
<h3>Game Modes in Bloons TD 6</h3>
|
134 |
-
<p>Bloons TD 6 has several game modes that you can choose from, depending on your preference and mood. Here are some of the game modes available:</p>
|
135 |
-
<ul>
|
136 |
-
<li>Standard Mode: This is the default mode where you can play any map with any difficulty level. You can also choose between different sub-modes, such as easy, medium, hard, impoppable, etc.</li>
|
137 |
-
<li>Co-op Mode: This is a multiplayer mode where you can team up with up to three other players online and work together to defend against the bloons. You can chat with your teammates and share money and lives.</li>
|
138 |
-
<li>Odyssey Mode: This is a special mode where you have to complete a series of maps with limited tower choices and lives. You can earn rewards for completing each map and the whole odyssey.</li>
|
139 |
-
<li>Boss Event Mode: This is a limited-time mode where you have to face a powerful boss bloon that has unique abilities and attacks. You can earn trophies and rewards for defeating the boss.</li>
|
140 |
-
</ul> <h3>Monkey Towers and Heroes in Bloons TD 6</h3>
|
141 |
-
<p>Bloons TD 6 has a wide range of monkey towers and heroes that you can use to pop the bloons. Each tower and hero has its own strengths, weaknesses, and abilities that you need to consider when placing them on the map. Here are some of the monkey towers and heroes available:</p>
|
142 |
-
<ul>
|
143 |
-
<li>Dart Monkey: This is the basic tower that shoots a single dart at a single bloon. It is cheap and versatile, but not very powerful. It can be upgraded to shoot faster, farther, or more darts at once.</li>
|
144 |
-
<li>Boomerang Monkey: This is a tower that throws a boomerang that can hit multiple bloons in a curved path. It is good for dealing with grouped bloons, but not very accurate. It can be upgraded to throw faster, more, or bigger boomerangs.</li>
|
145 |
-
<li>Bomb Shooter: This is a tower that launches a bomb that explodes and pops bloons in a radius. It is good for dealing with dense clusters of bloons, but not very fast. It can be upgraded to shoot bigger, faster, or more bombs.</li>
|
146 |
-
<li>Tack Shooter: This is a tower that shoots tacks in eight directions that can pop multiple bloons. It is good for covering a large area, but not very precise. It can be upgraded to shoot more, faster, or hotter tacks.</li>
|
147 |
-
<li>Ice Monkey: This is a tower that freezes bloons in its range, slowing them down and making them vulnerable to other attacks. It is good for controlling the bloon speed, but not very damaging. It can be upgraded to freeze more, longer, or stronger bloons.</li>
|
148 |
-
<li>Glue Gunner: This is a tower that shoots glue at bloons, slowing them down and making them take more damage from other attacks. It is good for weakening the bloon defense, but not very popping. It can be upgraded to shoot more, faster, or stronger glue.</li>
|
149 |
-
<li>Sniper Monkey: This is a tower that shoots a powerful bullet that can pop any bloon type and pierce through multiple layers. It is good for dealing with high-health bloons, but not very fast. It can be upgraded to shoot faster, harder, or farther.</li>
|
150 |
-
<li>Monkey Sub: This is a tower that can only be placed on water and shoots darts or torpedoes at bloons. It is good for covering water areas, but not very flexible. It can be upgraded to shoot faster, more, or underwater.</li>
|
151 |
-
<li>Monkey Buccaneer: This is a tower that can only be placed on water and shoots cannonballs or grapeshot at bloons. It is good for covering water areas, but not very precise. It can be upgraded to shoot bigger, faster, or more projectiles.</li>
|
152 |
-
<li>Monkey Ace: This is a tower that flies in the air and drops bombs or darts at bloons. It is good for covering large areas, but not very consistent. It can be upgraded to fly faster, more accurately, or more frequently.</li>
|
153 |
-
<li>Heli Pilot: This is a tower that flies in the air and shoots darts or missiles at bloons. It is good for targeting specific bloons, but not very cheap. It can be upgraded to fly faster, more powerfully, or more autonomously.</li>
|
154 |
-
<li>Mortar Monkey: This is a tower that launches explosive shells at a target area on the map. It is good for hitting hidden or hard-to-reach bloons, but not very accurate. It can be upgraded to launch bigger, faster, or more shells.</li>
|
155 |
-
<li>Wizard Monkey: This is a tower that casts magic spells that can pop different types of bloons. It is good for dealing with various bloon properties, but not very durable. It can be upgraded to cast stronger, faster, or more spells.</li>
|
156 |
-
<li>Super Monkey: This is a tower that shoots powerful beams of energy that can pop multiple bloons at once. It is good for dealing with massive amounts of bloons, but not very affordable. It can be upgraded to shoot stronger, wider, or more beams.</li>
|
157 |
-
<li>Ninja Monkey: This is a tower that throws shurikens or caltrops at bloons. It is good for popping camo bloons and dealing critical hits, but not very fast. It can be upgraded to throw faster, more accurately, or more stealthily.</li>
|
158 |
-
<li>Alchemist: This is a tower that throws potions at monkeys or bloons. It is good for buffing other monkeys or debuffing bloons, but not very popping. It can be upgraded to throw stronger, longer-lasting, or more potions.</li>
|
159 |
-
<li>Druid: This is popping camo bloons and summoning totems. His abilities are Brambles, which creates a patch of thorns that pops bloons, and Wall of Trees, which creates a wall of trees that blocks and eats bloons.</li>
|
160 |
-
<li>Captain Churchill: This is a tank hero that shoots powerful shells and missiles at bloons. He is good for popping armored and fortified bloons and dealing massive damage. His abilities are Shell Shock, which fires a shell that pops and stuns bloons in a large radius, and MOAB Barrage, which fires a volley of missiles that target MOABs.</li>
|
161 |
-
<li>Benjamin: This is a hacker hero that uses cybernetics and hacking to manipulate the bloons and the economy. He is good for earning extra money and reducing the bloon threat. His abilities are Biohack, which boosts the attack speed of nearby monkeys for a short time, and Syphon Funding, which steals money from the bloons and reduces their health.</li>
|
162 |
-
<li>Ezili: This is a voodoo hero that uses curses and hexes to pop bloons. She is good for popping regrow and purple bloons and dealing damage based on the bloon health. Her abilities are Heartstopper, which prevents the bloons from regrowing or healing for a short time, and MOAB Hex, which damages and weakens MOABs over time.</li>
|
163 |
-
<li>Pat Fusty: This is a giant monkey hero that uses his fists and roar to pop bloons. He is good for popping large groups of bloons and buffing other monkeys with his presence. His abilities are Rallying Roar, which increases the damage and range of nearby monkeys for a short time, and Big Squeeze, which grabs and crushes a MOAB or BFB.</li>
|
164 |
-
<li>Adora: This is a divine hero that uses holy energy to pop bloons. She is good for popping all types of bloons and leveling up faster than other heroes. Her abilities are Long Range Judgement, which fires a beam of light that pops bloons in a line, and Blood Sacrifice, which sacrifices some of your monkeys to increase her level and power.</li>
|
165 |
-
<li>Brickell: This is a naval hero that can only be placed on water and uses mines and submarines to pop bloons. She is good for buffing water-based monkeys and popping submerged bloons. Her abilities are Naval Tactics, which increases the attack speed and pierce of nearby water-based monkeys for a short time, and Mega Mine, which deploys a huge mine that explodes and pops bloons in a large radius.</li>
|
166 |
-
<li>Etienne: This is a drone hero that can deploy drones to attack bloons and support other monkeys. He is good for covering multiple areas and granting camo detection to nearby monkeys. His abilities are Drone Swarm, which summons more drones to attack the bloons, and UCAV, which launches a powerful drone that fires missiles at the bloons.</li>
|
167 |
-
</ul>
|
168 |
-
<h3>Tips and Tricks for Bloons TD 6</h3>
|
169 |
-
<p>Bloons TD 6 is a game that requires strategy, skill, and creativity to master. The game can be challenging at times, especially on higher difficulties or special modes. Here are some tips and tricks that can help you improve your performance and have more fun:</p>
|
170 |
-
<ul>
|
171 |
-
<li>Experiment with different towers, heroes, and upgrades: The game has a lot of options for you to customize your defense. Try out different combinations of towers, heroes, and upgrades to see what works best for each map, mode, and situation.</li>
|
172 |
-
<li>Use monkey knowledge wisely: Monkey knowledge is a system that allows you to unlock permanent buffs and benefits for your monkeys. You can earn monkey knowledge points by leveling up or completing achievements. You can spend them on different branches of the monkey knowledge tree, such as primary, military, magic, support, powers, or heroes. Choose the ones that suit your playstyle and strategy.</li>
|
173 |
-
<li>Use powers sparingly: Powers are special items that can give you an edge in the game. You can use them to boost your monkeys, pop more bloons, or get more money. However, powers are limited in quantity and can be expensive to buy or earn. Use them only when you really need them or when you want to have some fun.</li>
|
174 |
-
<li>Watch out for special bloon properties: Bloons can have different properties that make them harder to pop or more dangerous. For example, camo bloons can only be seen by monkeys with camo detection, lead bloons can only be popped by explosive or energy attacks, regrow bloons can regenerate their layers if not popped quickly enough, etc. Learn the different types of bloon properties and how to counter them with the right towers and upgrades.</li>
|
175 |
-
<li>Plan ahead and save up for late game: The game gets harder as you progress, with more and stronger bloons appearing on the screen. You need to be prepared for the late game, where you will face MOABs, BFBs, ZOMGs, DDTs, and BADs. These are huge bloons that can take a lot of damage and spawn more bloons when popped. You need to save up money and space for powerful towers and upgrades that can deal with these threats.</li>
|
176 |
-
<li>Have fun and try new things: The game has a lot of replay value and variety, with different maps, modes, challenges, achievements, and trophies to explore. You can also create your own custom challenges and share them with other players. Don't be afraid to try new things and experiment with different strategies. You may discover something new and exciting.</li>
|
177 |
-
</ul>
|
178 |
-
<h2>Why You Should Play Bloons TD 6?</h2>
|
179 |
-
<p>Bloons TD 6 is a game that has something for everyone. Whether you are a casual player who likes to relax and pop some bloons, or a hardcore player who likes to challenge yourself and test your skills, you will find something to enjoy in this game. Here are some of the reasons why you should play Bloons TD 6:</p>
|
180 |
-
<h3>Pros of Bloons TD 6</h3>
|
181 |
-
<ul>
|
182 |
-
<li>It has amazing graphics and animations: The game has a colorful and vibrant art style that makes it pleasing to the eye. The game also has smooth and fluid animations that make it satisfying to watch. The game runs well on most devices and has options to adjust the graphics quality and performance.</li>
|
183 |
-
<li>It has tons of content and variety: The game has over 60 maps, over a dozen towers, over 10 heroes, over 100 upgrades, over 20 game modes, over 100 achievements, over 50 trophies, and more. The game also has regular updates that add new content and improvements to the game. You will never run out of things to do or see in this game.</li>
|
184 |
-
<li>It has a great gameplay and balance: The game has a simple but addictive gameplay that makes it easy to pick up and hard to put down. The game also has a good balance between difficulty and fun, with different options to suit your preference and skill level. The game also has a lot of strategy and depth, with different combinations of towers, heroes, upgrades, powers, and monkey knowledge.</li>
|
185 |
-
<li>It has a friendly and active community: The game has a large and loyal fan base that loves the game and supports the developers. The game also has a friendly and active community that shares tips, tricks, challenges, feedback, fan art, memes, and more. You can join the official website, subreddit, discord server, or other platforms to interact with other players and have more fun.</li>
|
186 |
-
</ul>
|
187 |
-
<h3>Cons of Bloons TD 6</h3>
|
188 |
-
<ul>
|
189 |
-
<li>It can be expensive and grindy: The game costs $4.99 to buy from the Google Play Store, which may be too much for some people. The game also has some in-app purchases that can help you progress faster or unlock more content, but they can be pricey as well. The game also requires you to grind a lot of money and experience to afford the more expensive towers and upgrades or level up your heroes and monkey knowledge.</li>
|
190 |
-
<li>It can be frustrating and repetitive: The game can be very challenging at times, especially on higher difficulties or special modes. You may encounter bloons that are too hard to pop or levels that are too long or complex. You may also lose your progress or lives due to mistakes or bad luck. The game can also get repetitive after a while, with the same bloons, towers, heroes, upgrades, powers, etc.</li>
|
191 |
-
<li>It can have some bugs and glitches: The game is not perfect and can have some bugs and glitches that can affect your gameplay or experience. For example, you may encounter crashes , freezes, lags, or errors while playing the game. You may also encounter some visual or audio glitches that can ruin the immersion or quality of the game. The developers are working hard to fix these issues, but they may still occur from time to time.</li>
|
192 |
-
</ul>
|
193 |
-
<h3>User Reviews of Bloons TD 6</h3>
|
194 |
-
<p>To give you a better idea of what other players think of Bloons TD 6, here are some user reviews from different platforms, such as Steam, Google Play Store, etc. These reviews are taken verbatim from the sources and may contain some spelling or grammar errors.</p>
|
195 |
-
<ul>
|
196 |
-
<li>Steam: "Bloons TD 6 is a great game. It has a lot of content and replay value. The graphics are amazing and the gameplay is addictive. The game is challenging but fair, and there are many ways to play it. The game also has a nice community and regular updates. I highly recommend this game to anyone who likes tower defense games or just wants to have some fun."</li>
|
197 |
-
<li>Google Play Store: "Bloons TD 6 is a good game. It has a lot of variety and options. The game is fun and relaxing. The game is also easy to play and learn. The game has some problems, though. The game is expensive and sometimes crashes. The game also has some ads and microtransactions. I like this game, but it could be better."</li>
|
198 |
-
<li>App Store: "Bloons TD 6 is an awesome game. It has a lot of maps and modes. The game is exciting and challenging. The game is also beautiful and smooth. The game has some flaws, however. The game is hard and sometimes frustrating. The game also has some bugs and glitches. I love this game, but it needs some improvement."</li>
|
199 |
-
</ul>
|
200 |
-
<h2>Conclusion</h2>
|
201 |
-
<p>Bloons TD 6 is a tower defense game that will keep you entertained for hours with its colorful graphics, engaging gameplay, and varied content. Whether you are a casual or hardcore player, you will find something to enjoy in this game.</p>
|
202 |
-
<p>If you want to play Bloons TD 6 on your Android device, you can either buy it from the Google Play Store or download the APK file for free from various sources online. However, you should be careful when downloading and installing APK files from unknown sources, as they may pose some risks to your device or data.</p>
|
203 |
-
<p>If you want to learn more about Bloons TD 6, you can visit the official website, subreddit, discord server, or other platforms to get more information, tips, tricks, challenges, feedback, fan art, memes, and more.</p>
|
204 |
-
<p>We hope this article has helped you understand everything you need to know about Bloons TD 6 33.1 APK. Now go ahead and pop some bloons!</p>
|
205 |
-
<h2>FAQs</h2>
|
206 |
-
<p>Here are some frequently asked questions about Bloons TD 6 33.1 APK:</p>
|
207 |
-
<ul>
|
208 |
-
<li>Q: Is Bloons TD 6 free?</li>
|
209 |
-
<li>A: No, Bloons TD 6 is not free. You have to pay $4.99 to buy it from the Google Play Store. However, you can download the APK file for free from various sources online.</li>
|
210 |
-
<li>Q: Is Bloons TD 6 offline?</li>
|
211 |
-
<li>A: Yes, Bloons TD 6 can be played offline. You don't need an internet connection to play the game, except for some features such as co-op mode, boss events, daily challenges, etc.</li>
|
212 |
-
<li>Q: Is Bloons TD 6 multiplayer?</li>
|
213 |
-
<li>A: Yes, Bloons TD 6 has a multiplayer mode called co-op mode. You can team up with up to three other players online and work together to defend against the bloons.</li>
|
214 |
-
<li>Q: Is Bloons TD 6 cross-platform?</li>
|
215 |
-
<li>A: Yes, Bloons TD 6 is cross-platform. You can play with other players who have the game on different devices or platforms, such as Android, iOS, Windows, Mac, etc.</li>
|
216 |
-
<li>Q: Is Bloons TD 6 modded?</li>
|
217 |
-
<li>A: No, Bloons TD 6 is not modded. The APK file that we have provided in this article is the original version of the game that has not been modified or hacked in any way.</li>
|
218 |
-
</ul></p> 197e85843d<br />
|
219 |
-
<br />
|
220 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download D-Mod and Unlock New Abilities for Foxes in Minecraft.md
DELETED
@@ -1,133 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download Mod Dmod for Your Favorite Games</h1>
|
3 |
-
<p>Do you love playing games on your Android device? Do you wish you could change or add something to make them more fun, challenging, or immersive? If so, you might be interested in mod dmod.</p>
|
4 |
-
<p>Mod dmod is a term that refers to modifying or adding new features to existing games, especially on Android devices. Modding can enhance the gameplay, graphics, sound, or content of a game, making it more enjoyable and satisfying. Some mods can even create entirely new games based on the original ones.</p>
|
5 |
-
<h2>download mod dmod</h2><br /><p><b><b>Download</b> ☑ <a href="https://jinyurl.com/2uNL9Q">https://jinyurl.com/2uNL9Q</a></b></p><br /><br />
|
6 |
-
<p>For example, you can download mods for Minecraft that add new blocks, items, creatures, biomes, dimensions, quests, and more. You can also download mods for GTA San Andreas that improve the graphics, physics, vehicles, weapons, missions, characters, and more. Or you can download mods for Dmod that let you play custom maps created by other users.</p>
|
7 |
-
<p>In this article, we will show you how to download mod dmod for your favorite games. We will also explain the benefits and risks of modding games, and provide some tips and precautions to ensure a safe and smooth modding experience.</p>
|
8 |
-
<h2>Benefits of Modding Games</h2>
|
9 |
-
<p>Modding games can improve your gaming experience in various ways. Here are some of the benefits of modding games:</p>
|
10 |
-
<ul>
|
11 |
-
<li>You can customize your gaming experience according to your preferences and tastes. You can choose the mods that suit your style, mood, or interest. You can also mix and match different mods to create your own unique combination.</li>
|
12 |
-
<li>You can explore new possibilities and scenarios that are not available in the original game. You can discover new worlds, stories, characters, mechanics, and challenges that expand your gaming horizon. You can also create your own content and share it with other players.</li>
|
13 |
-
<li>You can improve the performance and compatibility of your game on different devices and platforms. You can optimize the graphics, sound, or controls of your game to match your device's specifications and capabilities. You can also fix bugs, errors, or glitches that may affect your game.</li>
|
14 |
-
<li>You can support the creative work of modders and developers who share their mods for free or for a small fee. You can appreciate their efforts and skills, and give them feedback or suggestions to improve their mods. You can also contribute to the modding community by donating, rating, reviewing, or recommending mods.</li>
|
15 |
-
<li>You can learn new skills and knowledge about game design, programming, art, and more. You can study how mods are made and how they work, and apply what you learn to your own projects. You can also collaborate with other modders and learn from their experiences.</li>
|
16 |
-
</ul>
|
17 |
-
<p>As you can see, modding games can offer you many benefits that can make your gaming experience more enjoyable and satisfying. However, modding games also has some risks and challenges that you should be aware of.</p>
|
18 |
-
<h2>Risks and Challenges of Modding Games</h2>
|
19 |
-
<p>Modding games is not without its drawbacks and dangers. Here are some of the risks and challenges of modding games:</p>
|
20 |
-
<ul>
|
21 |
-
<li>You may encounter bugs, glitches, crashes, or compatibility issues that affect your game or device. Some mods may not work properly or conflict with each other or with the original game. Some mods may also require additional resources or permissions that may slow down or harm your device.</li>
|
22 |
-
<li>You may violate the terms of service or intellectual property rights of the original game developers or publishers. Some mods may use unauthorized or illegal content or features that may infringe on the rights of the original game creators. Some mods may also be banned or removed by the game developers or publishers for violating their policies.</li>
|
23 |
-
<li>You may expose your device or data to malware, viruses, or hackers that may harm your security or privacy. Some mods may contain malicious code or software that may infect your device or steal your data. Some mods may also require you to access unsafe or untrusted websites or sources that may compromise your security or privacy.</li>
|
24 |
-
<li>You may lose your progress or achievements in the original game if you overwrite or delete any files or data. Some mods may require you to modify or replace some files or data in the original game folder. Some mods may also prevent you from saving or loading your game normally.</li>
|
25 |
-
</ul>
|
26 |
-
<p>Therefore, you should always be careful and responsible when downloading and installing mods for your games. You should also respect the rights and wishes of the original game creators and modders, and give them proper credit and feedback for their work.</p>
|
27 |
-
<h2>How to Download and Install Mods for Your Games</h2>
|
28 |
-
<p>Now that you know the benefits and risks of modding games, let's see how to download and install mods for your games. The general steps and methods are as follows:</p>
|
29 |
-
<p>download mod dmod minecraft<br />
|
30 |
-
download mod dmod curseforge<br />
|
31 |
-
download mod dmod files<br />
|
32 |
-
download mod dmod foxes<br />
|
33 |
-
download mod dmod bundles<br />
|
34 |
-
download mod dmod 1.7.10<br />
|
35 |
-
download mod dmod latest version<br />
|
36 |
-
download mod dmod installer<br />
|
37 |
-
download mod dmod patches<br />
|
38 |
-
download mod dmod demos<br />
|
39 |
-
download mod dmod media<br />
|
40 |
-
download mod dmod wiki<br />
|
41 |
-
download mod dmod license<br />
|
42 |
-
download mod dmod unlicense<br />
|
43 |
-
download mod dmod mojang<br />
|
44 |
-
download mod dmod et futurum requiem<br />
|
45 |
-
download mod dmod sweet berries<br />
|
46 |
-
download mod dmod rabbits<br />
|
47 |
-
download mod dmod nei<br />
|
48 |
-
download mod dmod gtnh fork<br />
|
49 |
-
download mod dmod hodgepodge<br />
|
50 |
-
download mod dmod mixin<br />
|
51 |
-
download mod dmod backlytra<br />
|
52 |
-
download mod dmod mixingasm<br />
|
53 |
-
download mod dmod looting fox fix<br />
|
54 |
-
download mod dmod windows<br />
|
55 |
-
download mod dmod macos<br />
|
56 |
-
download mod dmod linux<br />
|
57 |
-
download mod dmod android<br />
|
58 |
-
download mod dmod ios<br />
|
59 |
-
download mod dmod apk<br />
|
60 |
-
download mod dmod zip<br />
|
61 |
-
download mod dmod jar<br />
|
62 |
-
download mod dmod exe<br />
|
63 |
-
download mod dmod source code<br />
|
64 |
-
download mod dmod github<br />
|
65 |
-
download mod dmod reviews<br />
|
66 |
-
download mod dmod ratings<br />
|
67 |
-
download mod dmod comments<br />
|
68 |
-
download mod dmod feedbacks<br />
|
69 |
-
download mod dmod support<br />
|
70 |
-
download mod dmod issues<br />
|
71 |
-
download mod dmod bugs<br />
|
72 |
-
download mod dmod fixes<br />
|
73 |
-
download mod dmod updates<br />
|
74 |
-
download mod dmod changelog<br />
|
75 |
-
download mod dmod features<br />
|
76 |
-
download mod dmod screenshots<br />
|
77 |
-
download mod dmod videos</p>
|
78 |
-
<ol>
|
79 |
-
<li>Find a mod that you like and want to try. You can search online for mod websites, forums, blogs, videos, reviews, or recommendations. Some popular mod websites are <a href="">Mod DB</a>, <a href="">Nexus Mods</a>, <a href="">APKPure</a>, <a href="">HappyMod</a>, <a href="">Android-1</a>, etc.</li>
|
80 |
-
<li>Download the mod file to your device. Make sure the mod file is compatible with your device's specifications and capabilities. Make sure the mod file is safe and secure from malware, viruses, or hackers. Make sure the mod file is legal and authorized by the original game developers or publishers.</li>
|
81 |
-
<li>Install the mod file on your device. Depending on the type and format of the mod file, you may need to use different methods to install it. Some common methods are: <ul>
|
82 |
-
<li>Using a mod installer app: Some mods come with a mod installer app that can automatically install the mod for you. For example, <a href="">Dmod Installer</a> is a mod installer app that can install Dmod maps for you.</li>
|
83 |
-
<li>Using a file manager app: Some mods require you to manually copy or move the mod file to a specific folder on your device using a file manager app. For example, some Minecraft mods require you to copy or move the mod file to the "games/com.mojang/minecraftWorlds" folder on your device using a file manager app.</li>
|
84 |
-
<li>Using an APK file: Some mods are packaged as APK files that can be installed as standalone apps on your device. For example, some GTA San Andreas mods are APK files that can be installed as separate games on your device.</li>
|
85 |
-
</ul>
|
86 |
-
</li>
|
87 |
-
<li>Launch the modded game on your device. Depending on the type and format of the mod file, you may need to use different methods to launch it. Some common methods are: <ul>
|
88 |
-
<li>Using a mod launcher app: Some mods require you to use a mod launcher app to launch the modded game. For example, <a href="">BlockLauncher</a> is a mod launcher app that can launch Minecraft with mods.</li>
|
89 |
-
<li>Using the original game app: Some mods can be launched directly from the original game app. For example, some Dmod maps can be launched from the Dmod app.</li>
|
90 |
-
<li>Using the modded game app: Some mods are installed as separate apps that can be launched independently from the original game app. For example, some GTA San Andreas mods are installed as separate games that can be launched from their own icons.</li>
|
91 |
-
</ul>
|
92 |
-
</li>
|
93 |
-
</ol>
|
94 |
-
<p>These are the general steps and methods to download and install mods for your games. However, different games and mods may have different requirements and instructions, so you should always follow the specific guidelines and instructions provided by the modders or developers. You should also backup your original game files and data before installing any mods, in case something goes wrong or you want to revert to the original game.</p>
|
95 |
-
<h2>Conclusion</h2>
|
96 |
-
<p>In this article, we have shown you how to download mod dmod for your favorite games. We have also explained the benefits and risks of modding games, and provided some tips and precautions to ensure a safe and smooth modding experience.</p>
|
97 |
-
<p>Modding games can offer you many advantages that can make your gaming experience more enjoyable and satisfying. However, modding games also has some disadvantages and dangers that you should be aware of and avoid. Therefore, you should always be careful and responsible when downloading and installing mods for your games. You should also respect the rights and wishes of the original game creators and modders, and give them proper credit and feedback for their work.</p>
|
98 |
-
<p>If you are interested in modding games, you can explore more online resources and communities that can help you find, download, install, create, or share mods for your games. You can also learn more skills and knowledge about game design, programming, art, and more by studying how mods are made and how they work.</p>
|
99 |
-
<p>We hope this article has been helpful and informative for you. Happy modding!</p>
|
100 |
-
<h2>FAQs</h2>
|
101 |
-
<p>Here are some common or relevant questions that readers may have about mod dmod:</p>
|
102 |
-
<ol>
|
103 |
-
<li><b>What is the difference between mod dmod and hack?</b></li>
|
104 |
-
<p>A mod dmod is a modification or addition of new features to an existing game, while a hack is a manipulation or alteration of the game code or data to gain an unfair advantage or bypass restrictions. Mods are usually made for fun or creativity, while hacks are usually made for cheating or exploiting. Mods are usually legal and authorized by the original game developers or publishers, while hacks are usually illegal and unauthorized by them.</p>
|
105 |
-
<li><b>Where can I find mods for my games?</b></li>
|
106 |
-
<p>You can find mods for your games online on various websites, forums, blogs, videos, reviews, or recommendations. Some popular mod websites are <a href="">Mod DB</a>, <a href="">Nexus Mods</a>, <a href="">APKPure</a>, <a href="">HappyMod</a>, <a href="">Android-1</a>, etc. You can also find mods on social media platforms such as Facebook, Twitter, Instagram, YouTube, Reddit, Discord, etc.</p>
|
107 |
-
<li><b>How do I know if a mod is safe and secure?</b></li>
|
108 |
-
<p>You can check if a mod is safe and secure by following these tips: <ul>
|
109 |
-
<li>Download mods from reputable and trusted sources that have positive ratings, reviews, or feedback from other users.</li>
|
110 |
-
<li>Scan the mod file with an antivirus or anti-malware software before installing it on your device.</li>
|
111 |
-
<li>Read the description, instructions, permissions, requirements, changelog, updates, comments, or FAQs of the mod carefully before installing it on your device.</li>
|
112 |
-
<li>Avoid mods that ask for too many or unnecessary permissions or resources that may harm your device or data.</li>
|
113 |
-
<li>Avoid mods that use unauthorized or illegal content or features that may infringe on the rights of the original game creators or publishers.</li>
|
114 |
-
</ul>
|
115 |
-
</p>
|
116 |
-
<li><b>How do I uninstall or remove mods from my games?</b></li>
|
117 |
-
<p>You can uninstall or remove mods from your games by following these steps: <ul>
|
118 |
-
<li>Find the mod file or folder that you want to uninstall or remove on your device using a file manager app.</li>
|
119 |
-
<li>Delete the mod file or folder from your device, or move it to another location if you want to keep it for later use.</li>
|
120 |
-
<li>Launch the original game app on your device and check if the mod is gone or disabled.</li>
|
121 |
-
<li>Restore your original game files and data from a backup if you have one, or reinstall the original game app from the official source if you don't have one.</li>
|
122 |
-
</ul>
|
123 |
-
</p>
|
124 |
-
<li><b>What are some of the best mods for my games?</b></li>
|
125 |
-
<p>The answer to this question depends on your personal preferences and tastes, as well as the type and genre of your games. However, here are some of the most popular and recommended mods for some of the most popular and played games on Android devices: <ul>
|
126 |
-
<li>Minecraft: <a href="">Optifine</a>, <a href="">Shaders</a>, <a href="">Biomes O' Plenty</a>, <a href="">Twilight Forest</a>, <a href="">Pixelmon</a>, etc.</li>
|
127 |
-
<li>GTA San Andreas: <a href="">Cleo Mods</a>, <a href="">GTA V Graphics Mod</a>, <a href="">Superman Mod</a>, <a href="">Zombie Apocalypse Mod</a>, <a href="">Iron Man Mod</a>, etc.</li>
|
128 |
-
<li>Dmod: <a href="">The Community Project 2</a>, <a href="">The Last Stand 2</a>, <a href="">The Final Quest 2</a>, <a href="">The Scourger 2</a>, <a href="">The Blacksmith's Apprentice 2</a>, etc.</li>
|
129 |
-
</ul>
|
130 |
-
</p>
|
131 |
-
</ol></p> 401be4b1e0<br />
|
132 |
-
<br />
|
133 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1toTree/lora_test/ppdiffusers/utils/logging.py
DELETED
@@ -1,339 +0,0 @@
|
|
1 |
-
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
2 |
-
# Copyright 2020 Optuna, Hugging Face
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
""" Logging utilities."""
|
16 |
-
|
17 |
-
import logging
|
18 |
-
import os
|
19 |
-
import sys
|
20 |
-
import threading
|
21 |
-
from logging import CRITICAL # NOQA
|
22 |
-
from logging import DEBUG # NOQA
|
23 |
-
from logging import ERROR # NOQA
|
24 |
-
from logging import FATAL # NOQA
|
25 |
-
from logging import INFO # NOQA
|
26 |
-
from logging import NOTSET # NOQA
|
27 |
-
from logging import WARN # NOQA
|
28 |
-
from logging import WARNING # NOQA
|
29 |
-
from typing import Optional
|
30 |
-
|
31 |
-
from tqdm import auto as tqdm_lib
|
32 |
-
|
33 |
-
_lock = threading.Lock()
|
34 |
-
_default_handler: Optional[logging.Handler] = None
|
35 |
-
|
36 |
-
log_levels = {
|
37 |
-
"debug": logging.DEBUG,
|
38 |
-
"info": logging.INFO,
|
39 |
-
"warning": logging.WARNING,
|
40 |
-
"error": logging.ERROR,
|
41 |
-
"critical": logging.CRITICAL,
|
42 |
-
}
|
43 |
-
|
44 |
-
_default_log_level = logging.WARNING
|
45 |
-
|
46 |
-
_tqdm_active = True
|
47 |
-
|
48 |
-
|
49 |
-
def _get_default_logging_level():
|
50 |
-
"""
|
51 |
-
If PPDIFFUSERS_VERBOSITY env var is set to one of the valid choices return that as the new default level. If it is
|
52 |
-
not - fall back to `_default_log_level`
|
53 |
-
"""
|
54 |
-
env_level_str = os.getenv("PPDIFFUSERS_VERBOSITY", None)
|
55 |
-
if env_level_str:
|
56 |
-
if env_level_str in log_levels:
|
57 |
-
return log_levels[env_level_str]
|
58 |
-
else:
|
59 |
-
logging.getLogger().warning(
|
60 |
-
f"Unknown option PPDIFFUSERS_VERBOSITY={env_level_str}, "
|
61 |
-
f"has to be one of: { ', '.join(log_levels.keys()) }"
|
62 |
-
)
|
63 |
-
return _default_log_level
|
64 |
-
|
65 |
-
|
66 |
-
def _get_library_name() -> str:
|
67 |
-
return __name__.split(".")[0]
|
68 |
-
|
69 |
-
|
70 |
-
def _get_library_root_logger() -> logging.Logger:
|
71 |
-
return logging.getLogger(_get_library_name())
|
72 |
-
|
73 |
-
|
74 |
-
def _configure_library_root_logger() -> None:
|
75 |
-
global _default_handler
|
76 |
-
|
77 |
-
with _lock:
|
78 |
-
if _default_handler:
|
79 |
-
# This library has already configured the library root logger.
|
80 |
-
return
|
81 |
-
_default_handler = logging.StreamHandler() # Set sys.stderr as stream.
|
82 |
-
_default_handler.flush = sys.stderr.flush
|
83 |
-
|
84 |
-
# Apply our default configuration to the library root logger.
|
85 |
-
library_root_logger = _get_library_root_logger()
|
86 |
-
library_root_logger.addHandler(_default_handler)
|
87 |
-
library_root_logger.setLevel(_get_default_logging_level())
|
88 |
-
library_root_logger.propagate = False
|
89 |
-
|
90 |
-
|
91 |
-
def _reset_library_root_logger() -> None:
|
92 |
-
global _default_handler
|
93 |
-
|
94 |
-
with _lock:
|
95 |
-
if not _default_handler:
|
96 |
-
return
|
97 |
-
|
98 |
-
library_root_logger = _get_library_root_logger()
|
99 |
-
library_root_logger.removeHandler(_default_handler)
|
100 |
-
library_root_logger.setLevel(logging.NOTSET)
|
101 |
-
_default_handler = None
|
102 |
-
|
103 |
-
|
104 |
-
def get_log_levels_dict():
|
105 |
-
return log_levels
|
106 |
-
|
107 |
-
|
108 |
-
def get_logger(name: Optional[str] = None) -> logging.Logger:
|
109 |
-
"""
|
110 |
-
Return a logger with the specified name.
|
111 |
-
|
112 |
-
This function is not supposed to be directly accessed unless you are writing a custom ppdiffusers module.
|
113 |
-
"""
|
114 |
-
|
115 |
-
if name is None:
|
116 |
-
name = _get_library_name()
|
117 |
-
|
118 |
-
_configure_library_root_logger()
|
119 |
-
return logging.getLogger(name)
|
120 |
-
|
121 |
-
|
122 |
-
def get_verbosity() -> int:
|
123 |
-
"""
|
124 |
-
Return the current level for the PaddleNLP PPDiffusers' root logger as an int.
|
125 |
-
|
126 |
-
Returns:
|
127 |
-
`int`: The logging level.
|
128 |
-
|
129 |
-
<Tip>
|
130 |
-
|
131 |
-
PaddleNLP PPDiffusers has following logging levels:
|
132 |
-
|
133 |
-
- 50: `ppdiffusers.logging.CRITICAL` or `ppdiffusers.logging.FATAL`
|
134 |
-
- 40: `ppdiffusers.logging.ERROR`
|
135 |
-
- 30: `ppdiffusers.logging.WARNING` or `ppdiffusers.logging.WARN`
|
136 |
-
- 20: `ppdiffusers.logging.INFO`
|
137 |
-
- 10: `ppdiffusers.logging.DEBUG`
|
138 |
-
|
139 |
-
</Tip>"""
|
140 |
-
|
141 |
-
_configure_library_root_logger()
|
142 |
-
return _get_library_root_logger().getEffectiveLevel()
|
143 |
-
|
144 |
-
|
145 |
-
def set_verbosity(verbosity: int) -> None:
|
146 |
-
"""
|
147 |
-
Set the verbosity level for the PaddleNLP PPDiffusers' root logger.
|
148 |
-
|
149 |
-
Args:
|
150 |
-
verbosity (`int`):
|
151 |
-
Logging level, e.g., one of:
|
152 |
-
|
153 |
-
- `ppdiffusers.logging.CRITICAL` or `ppdiffusers.logging.FATAL`
|
154 |
-
- `ppdiffusers.logging.ERROR`
|
155 |
-
- `ppdiffusers.logging.WARNING` or `ppdiffusers.logging.WARN`
|
156 |
-
- `ppdiffusers.logging.INFO`
|
157 |
-
- `ppdiffusers.logging.DEBUG`
|
158 |
-
"""
|
159 |
-
|
160 |
-
_configure_library_root_logger()
|
161 |
-
_get_library_root_logger().setLevel(verbosity)
|
162 |
-
|
163 |
-
|
164 |
-
def set_verbosity_info():
|
165 |
-
"""Set the verbosity to the `INFO` level."""
|
166 |
-
return set_verbosity(INFO)
|
167 |
-
|
168 |
-
|
169 |
-
def set_verbosity_warning():
|
170 |
-
"""Set the verbosity to the `WARNING` level."""
|
171 |
-
return set_verbosity(WARNING)
|
172 |
-
|
173 |
-
|
174 |
-
def set_verbosity_debug():
|
175 |
-
"""Set the verbosity to the `DEBUG` level."""
|
176 |
-
return set_verbosity(DEBUG)
|
177 |
-
|
178 |
-
|
179 |
-
def set_verbosity_error():
|
180 |
-
"""Set the verbosity to the `ERROR` level."""
|
181 |
-
return set_verbosity(ERROR)
|
182 |
-
|
183 |
-
|
184 |
-
def disable_default_handler() -> None:
|
185 |
-
"""Disable the default handler of the PaddleNLP PPDiffusers' root logger."""
|
186 |
-
|
187 |
-
_configure_library_root_logger()
|
188 |
-
|
189 |
-
assert _default_handler is not None
|
190 |
-
_get_library_root_logger().removeHandler(_default_handler)
|
191 |
-
|
192 |
-
|
193 |
-
def enable_default_handler() -> None:
|
194 |
-
"""Enable the default handler of the PaddleNLP PPDiffusers' root logger."""
|
195 |
-
|
196 |
-
_configure_library_root_logger()
|
197 |
-
|
198 |
-
assert _default_handler is not None
|
199 |
-
_get_library_root_logger().addHandler(_default_handler)
|
200 |
-
|
201 |
-
|
202 |
-
def add_handler(handler: logging.Handler) -> None:
|
203 |
-
"""adds a handler to the PaddleNLP PPDiffusers' root logger."""
|
204 |
-
|
205 |
-
_configure_library_root_logger()
|
206 |
-
|
207 |
-
assert handler is not None
|
208 |
-
_get_library_root_logger().addHandler(handler)
|
209 |
-
|
210 |
-
|
211 |
-
def remove_handler(handler: logging.Handler) -> None:
|
212 |
-
"""removes given handler from the PaddleNLP PPDiffusers' root logger."""
|
213 |
-
|
214 |
-
_configure_library_root_logger()
|
215 |
-
|
216 |
-
assert handler is not None and handler not in _get_library_root_logger().handlers
|
217 |
-
_get_library_root_logger().removeHandler(handler)
|
218 |
-
|
219 |
-
|
220 |
-
def disable_propagation() -> None:
|
221 |
-
"""
|
222 |
-
Disable propagation of the library log outputs. Note that log propagation is disabled by default.
|
223 |
-
"""
|
224 |
-
|
225 |
-
_configure_library_root_logger()
|
226 |
-
_get_library_root_logger().propagate = False
|
227 |
-
|
228 |
-
|
229 |
-
def enable_propagation() -> None:
|
230 |
-
"""
|
231 |
-
Enable propagation of the library log outputs. Please disable the PaddleNLP PPDiffusers' default handler to prevent
|
232 |
-
double logging if the root logger has been configured.
|
233 |
-
"""
|
234 |
-
|
235 |
-
_configure_library_root_logger()
|
236 |
-
_get_library_root_logger().propagate = True
|
237 |
-
|
238 |
-
|
239 |
-
def enable_explicit_format() -> None:
|
240 |
-
"""
|
241 |
-
Enable explicit formatting for every PaddleNLP PPDiffusers' logger. The explicit formatter is as follows:
|
242 |
-
```
|
243 |
-
[LEVELNAME|FILENAME|LINE NUMBER] TIME >> MESSAGE
|
244 |
-
```
|
245 |
-
All handlers currently bound to the root logger are affected by this method.
|
246 |
-
"""
|
247 |
-
handlers = _get_library_root_logger().handlers
|
248 |
-
|
249 |
-
for handler in handlers:
|
250 |
-
formatter = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s")
|
251 |
-
handler.setFormatter(formatter)
|
252 |
-
|
253 |
-
|
254 |
-
def reset_format() -> None:
|
255 |
-
"""
|
256 |
-
Resets the formatting for PaddleNLP PPDiffusers' loggers.
|
257 |
-
|
258 |
-
All handlers currently bound to the root logger are affected by this method.
|
259 |
-
"""
|
260 |
-
handlers = _get_library_root_logger().handlers
|
261 |
-
|
262 |
-
for handler in handlers:
|
263 |
-
handler.setFormatter(None)
|
264 |
-
|
265 |
-
|
266 |
-
def warning_advice(self, *args, **kwargs):
|
267 |
-
"""
|
268 |
-
This method is identical to `logger.warning()`, but if env var PPDIFFUSERS_NO_ADVISORY_WARNINGS=1 is set, this
|
269 |
-
warning will not be printed
|
270 |
-
"""
|
271 |
-
no_advisory_warnings = os.getenv("PPDIFFUSERS_NO_ADVISORY_WARNINGS", False)
|
272 |
-
if no_advisory_warnings:
|
273 |
-
return
|
274 |
-
self.warning(*args, **kwargs)
|
275 |
-
|
276 |
-
|
277 |
-
logging.Logger.warning_advice = warning_advice
|
278 |
-
|
279 |
-
|
280 |
-
class EmptyTqdm:
|
281 |
-
"""Dummy tqdm which doesn't do anything."""
|
282 |
-
|
283 |
-
def __init__(self, *args, **kwargs): # pylint: disable=unused-argument
|
284 |
-
self._iterator = args[0] if args else None
|
285 |
-
|
286 |
-
def __iter__(self):
|
287 |
-
return iter(self._iterator)
|
288 |
-
|
289 |
-
def __getattr__(self, _):
|
290 |
-
"""Return empty function."""
|
291 |
-
|
292 |
-
def empty_fn(*args, **kwargs): # pylint: disable=unused-argument
|
293 |
-
return
|
294 |
-
|
295 |
-
return empty_fn
|
296 |
-
|
297 |
-
def __enter__(self):
|
298 |
-
return self
|
299 |
-
|
300 |
-
def __exit__(self, type_, value, traceback):
|
301 |
-
return
|
302 |
-
|
303 |
-
|
304 |
-
class _tqdm_cls:
|
305 |
-
def __call__(self, *args, **kwargs):
|
306 |
-
if _tqdm_active:
|
307 |
-
return tqdm_lib.tqdm(*args, **kwargs)
|
308 |
-
else:
|
309 |
-
return EmptyTqdm(*args, **kwargs)
|
310 |
-
|
311 |
-
def set_lock(self, *args, **kwargs):
|
312 |
-
self._lock = None
|
313 |
-
if _tqdm_active:
|
314 |
-
return tqdm_lib.tqdm.set_lock(*args, **kwargs)
|
315 |
-
|
316 |
-
def get_lock(self):
|
317 |
-
if _tqdm_active:
|
318 |
-
return tqdm_lib.tqdm.get_lock()
|
319 |
-
|
320 |
-
|
321 |
-
tqdm = _tqdm_cls()
|
322 |
-
|
323 |
-
|
324 |
-
def is_progress_bar_enabled() -> bool:
|
325 |
-
"""Return a boolean indicating whether tqdm progress bars are enabled."""
|
326 |
-
global _tqdm_active
|
327 |
-
return bool(_tqdm_active)
|
328 |
-
|
329 |
-
|
330 |
-
def enable_progress_bar():
|
331 |
-
"""Enable tqdm progress bar."""
|
332 |
-
global _tqdm_active
|
333 |
-
_tqdm_active = True
|
334 |
-
|
335 |
-
|
336 |
-
def disable_progress_bar():
|
337 |
-
"""Disable tqdm progress bar."""
|
338 |
-
global _tqdm_active
|
339 |
-
_tqdm_active = False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/7hao/bingo/src/pages/api/kblob.ts
DELETED
@@ -1,56 +0,0 @@
|
|
1 |
-
'use server'
|
2 |
-
|
3 |
-
import { NextApiRequest, NextApiResponse } from 'next'
|
4 |
-
import FormData from 'form-data'
|
5 |
-
import { fetch } from '@/lib/isomorphic'
|
6 |
-
import { KBlobRequest } from '@/lib/bots/bing/types'
|
7 |
-
|
8 |
-
const API_DOMAIN = 'https://bing.vcanbb.top'
|
9 |
-
|
10 |
-
export const config = {
|
11 |
-
api: {
|
12 |
-
bodyParser: {
|
13 |
-
sizeLimit: '10mb' // Set desired value here
|
14 |
-
}
|
15 |
-
}
|
16 |
-
}
|
17 |
-
|
18 |
-
export default async function handler(req: NextApiRequest, res: NextApiResponse) {
|
19 |
-
try {
|
20 |
-
const { knowledgeRequest, imageBase64 } = req.body as KBlobRequest
|
21 |
-
|
22 |
-
const formData = new FormData()
|
23 |
-
formData.append('knowledgeRequest', JSON.stringify(knowledgeRequest))
|
24 |
-
if (imageBase64) {
|
25 |
-
formData.append('imageBase64', imageBase64)
|
26 |
-
}
|
27 |
-
|
28 |
-
const response = await fetch(`${API_DOMAIN}/images/kblob`,
|
29 |
-
{
|
30 |
-
method: 'POST',
|
31 |
-
body: formData.getBuffer(),
|
32 |
-
headers: {
|
33 |
-
"sec-ch-ua": "\"Not/A)Brand\";v=\"99\", \"Google Chrome\";v=\"115\", \"Chromium\";v=\"115\"",
|
34 |
-
"sec-ch-ua-mobile": "?0",
|
35 |
-
"sec-ch-ua-platform": "\"Windows\"",
|
36 |
-
"Referer": `${API_DOMAIN}/web/index.html`,
|
37 |
-
"Referrer-Policy": "origin-when-cross-origin",
|
38 |
-
'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32',
|
39 |
-
...formData.getHeaders()
|
40 |
-
}
|
41 |
-
}
|
42 |
-
).then(res => res.text())
|
43 |
-
|
44 |
-
res.writeHead(200, {
|
45 |
-
'Content-Type': 'application/json',
|
46 |
-
})
|
47 |
-
res.end(response || JSON.stringify({ result: { value: 'UploadFailed', message: '请更换 IP 或代理后重试' } }))
|
48 |
-
} catch (e) {
|
49 |
-
return res.json({
|
50 |
-
result: {
|
51 |
-
value: 'UploadFailed',
|
52 |
-
message: `${e}`
|
53 |
-
}
|
54 |
-
})
|
55 |
-
}
|
56 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AI-Hobbyist/Hoyo-RVC/vc_infer_pipeline.py
DELETED
@@ -1,431 +0,0 @@
|
|
1 |
-
import numpy as np, parselmouth, torch, pdb
|
2 |
-
from time import time as ttime
|
3 |
-
import torch.nn.functional as F
|
4 |
-
import scipy.signal as signal
|
5 |
-
import pyworld, os, traceback, faiss, librosa, torchcrepe
|
6 |
-
from scipy import signal
|
7 |
-
from functools import lru_cache
|
8 |
-
|
9 |
-
bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000)
|
10 |
-
|
11 |
-
input_audio_path2wav = {}
|
12 |
-
|
13 |
-
|
14 |
-
@lru_cache
|
15 |
-
def cache_harvest_f0(input_audio_path, fs, f0max, f0min, frame_period):
|
16 |
-
audio = input_audio_path2wav[input_audio_path]
|
17 |
-
f0, t = pyworld.harvest(
|
18 |
-
audio,
|
19 |
-
fs=fs,
|
20 |
-
f0_ceil=f0max,
|
21 |
-
f0_floor=f0min,
|
22 |
-
frame_period=frame_period,
|
23 |
-
)
|
24 |
-
f0 = pyworld.stonemask(audio, f0, t, fs)
|
25 |
-
return f0
|
26 |
-
|
27 |
-
|
28 |
-
def change_rms(data1, sr1, data2, sr2, rate): # 1是输入音频,2是输出音频,rate是2的占比
|
29 |
-
# print(data1.max(),data2.max())
|
30 |
-
rms1 = librosa.feature.rms(
|
31 |
-
y=data1, frame_length=sr1 // 2 * 2, hop_length=sr1 // 2
|
32 |
-
) # 每半秒一个点
|
33 |
-
rms2 = librosa.feature.rms(y=data2, frame_length=sr2 // 2 * 2, hop_length=sr2 // 2)
|
34 |
-
rms1 = torch.from_numpy(rms1)
|
35 |
-
rms1 = F.interpolate(
|
36 |
-
rms1.unsqueeze(0), size=data2.shape[0], mode="linear"
|
37 |
-
).squeeze()
|
38 |
-
rms2 = torch.from_numpy(rms2)
|
39 |
-
rms2 = F.interpolate(
|
40 |
-
rms2.unsqueeze(0), size=data2.shape[0], mode="linear"
|
41 |
-
).squeeze()
|
42 |
-
rms2 = torch.max(rms2, torch.zeros_like(rms2) + 1e-6)
|
43 |
-
data2 *= (
|
44 |
-
torch.pow(rms1, torch.tensor(1 - rate))
|
45 |
-
* torch.pow(rms2, torch.tensor(rate - 1))
|
46 |
-
).numpy()
|
47 |
-
return data2
|
48 |
-
|
49 |
-
|
50 |
-
class VC(object):
|
51 |
-
def __init__(self, tgt_sr, config):
|
52 |
-
self.x_pad, self.x_query, self.x_center, self.x_max, self.is_half = (
|
53 |
-
config.x_pad,
|
54 |
-
config.x_query,
|
55 |
-
config.x_center,
|
56 |
-
config.x_max,
|
57 |
-
config.is_half,
|
58 |
-
)
|
59 |
-
self.sr = 16000 # hubert输入采样率
|
60 |
-
self.window = 160 # 每帧点数
|
61 |
-
self.t_pad = self.sr * self.x_pad # 每条前后pad时间
|
62 |
-
self.t_pad_tgt = tgt_sr * self.x_pad
|
63 |
-
self.t_pad2 = self.t_pad * 2
|
64 |
-
self.t_query = self.sr * self.x_query # 查询切点前后查询时间
|
65 |
-
self.t_center = self.sr * self.x_center # 查询切点位置
|
66 |
-
self.t_max = self.sr * self.x_max # 免查询时长阈值
|
67 |
-
self.device = config.device
|
68 |
-
|
69 |
-
def get_f0(
|
70 |
-
self,
|
71 |
-
input_audio_path,
|
72 |
-
x,
|
73 |
-
p_len,
|
74 |
-
f0_up_key,
|
75 |
-
f0_method,
|
76 |
-
filter_radius,
|
77 |
-
inp_f0=None,
|
78 |
-
):
|
79 |
-
global input_audio_path2wav
|
80 |
-
time_step = self.window / self.sr * 1000
|
81 |
-
f0_min = 50
|
82 |
-
f0_max = 1100
|
83 |
-
f0_mel_min = 1127 * np.log(1 + f0_min / 700)
|
84 |
-
f0_mel_max = 1127 * np.log(1 + f0_max / 700)
|
85 |
-
if f0_method == "pm":
|
86 |
-
f0 = (
|
87 |
-
parselmouth.Sound(x, self.sr)
|
88 |
-
.to_pitch_ac(
|
89 |
-
time_step=time_step / 1000,
|
90 |
-
voicing_threshold=0.6,
|
91 |
-
pitch_floor=f0_min,
|
92 |
-
pitch_ceiling=f0_max,
|
93 |
-
)
|
94 |
-
.selected_array["frequency"]
|
95 |
-
)
|
96 |
-
pad_size = (p_len - len(f0) + 1) // 2
|
97 |
-
if pad_size > 0 or p_len - len(f0) - pad_size > 0:
|
98 |
-
f0 = np.pad(
|
99 |
-
f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant"
|
100 |
-
)
|
101 |
-
elif f0_method == "harvest":
|
102 |
-
input_audio_path2wav[input_audio_path] = x.astype(np.double)
|
103 |
-
f0 = cache_harvest_f0(input_audio_path, self.sr, f0_max, f0_min, 10)
|
104 |
-
if filter_radius > 2:
|
105 |
-
f0 = signal.medfilt(f0, 3)
|
106 |
-
elif f0_method == "crepe":
|
107 |
-
model = "full"
|
108 |
-
# Pick a batch size that doesn't cause memory errors on your gpu
|
109 |
-
batch_size = 512
|
110 |
-
# Compute pitch using first gpu
|
111 |
-
audio = torch.tensor(np.copy(x))[None].float()
|
112 |
-
f0, pd = torchcrepe.predict(
|
113 |
-
audio,
|
114 |
-
self.sr,
|
115 |
-
self.window,
|
116 |
-
f0_min,
|
117 |
-
f0_max,
|
118 |
-
model,
|
119 |
-
batch_size=batch_size,
|
120 |
-
device=self.device,
|
121 |
-
return_periodicity=True,
|
122 |
-
)
|
123 |
-
pd = torchcrepe.filter.median(pd, 3)
|
124 |
-
f0 = torchcrepe.filter.mean(f0, 3)
|
125 |
-
f0[pd < 0.1] = 0
|
126 |
-
f0 = f0[0].cpu().numpy()
|
127 |
-
f0 *= pow(2, f0_up_key / 12)
|
128 |
-
# with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
|
129 |
-
tf0 = self.sr // self.window # 每秒f0点数
|
130 |
-
if inp_f0 is not None:
|
131 |
-
delta_t = np.round(
|
132 |
-
(inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1
|
133 |
-
).astype("int16")
|
134 |
-
replace_f0 = np.interp(
|
135 |
-
list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1]
|
136 |
-
)
|
137 |
-
shape = f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)].shape[0]
|
138 |
-
f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)] = replace_f0[
|
139 |
-
:shape
|
140 |
-
]
|
141 |
-
# with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
|
142 |
-
f0bak = f0.copy()
|
143 |
-
f0_mel = 1127 * np.log(1 + f0 / 700)
|
144 |
-
f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
|
145 |
-
f0_mel_max - f0_mel_min
|
146 |
-
) + 1
|
147 |
-
f0_mel[f0_mel <= 1] = 1
|
148 |
-
f0_mel[f0_mel > 255] = 255
|
149 |
-
f0_coarse = np.rint(f0_mel).astype(np.int)
|
150 |
-
return f0_coarse, f0bak # 1-0
|
151 |
-
|
152 |
-
def vc(
|
153 |
-
self,
|
154 |
-
model,
|
155 |
-
net_g,
|
156 |
-
sid,
|
157 |
-
audio0,
|
158 |
-
pitch,
|
159 |
-
pitchf,
|
160 |
-
times,
|
161 |
-
index,
|
162 |
-
big_npy,
|
163 |
-
index_rate,
|
164 |
-
version,
|
165 |
-
protect,
|
166 |
-
): # ,file_index,file_big_npy
|
167 |
-
feats = torch.from_numpy(audio0)
|
168 |
-
if self.is_half:
|
169 |
-
feats = feats.half()
|
170 |
-
else:
|
171 |
-
feats = feats.float()
|
172 |
-
if feats.dim() == 2: # double channels
|
173 |
-
feats = feats.mean(-1)
|
174 |
-
assert feats.dim() == 1, feats.dim()
|
175 |
-
feats = feats.view(1, -1)
|
176 |
-
padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False)
|
177 |
-
|
178 |
-
inputs = {
|
179 |
-
"source": feats.to(self.device),
|
180 |
-
"padding_mask": padding_mask,
|
181 |
-
"output_layer": 9 if version == "v1" else 12,
|
182 |
-
}
|
183 |
-
t0 = ttime()
|
184 |
-
with torch.no_grad():
|
185 |
-
logits = model.extract_features(**inputs)
|
186 |
-
feats = model.final_proj(logits[0]) if version == "v1" else logits[0]
|
187 |
-
if protect < 0.5 and pitch != None and pitchf != None:
|
188 |
-
feats0 = feats.clone()
|
189 |
-
if (
|
190 |
-
isinstance(index, type(None)) == False
|
191 |
-
and isinstance(big_npy, type(None)) == False
|
192 |
-
and index_rate != 0
|
193 |
-
):
|
194 |
-
npy = feats[0].cpu().numpy()
|
195 |
-
if self.is_half:
|
196 |
-
npy = npy.astype("float32")
|
197 |
-
|
198 |
-
# _, I = index.search(npy, 1)
|
199 |
-
# npy = big_npy[I.squeeze()]
|
200 |
-
|
201 |
-
score, ix = index.search(npy, k=8)
|
202 |
-
weight = np.square(1 / score)
|
203 |
-
weight /= weight.sum(axis=1, keepdims=True)
|
204 |
-
npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1)
|
205 |
-
|
206 |
-
if self.is_half:
|
207 |
-
npy = npy.astype("float16")
|
208 |
-
feats = (
|
209 |
-
torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate
|
210 |
-
+ (1 - index_rate) * feats
|
211 |
-
)
|
212 |
-
|
213 |
-
feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)
|
214 |
-
if protect < 0.5 and pitch != None and pitchf != None:
|
215 |
-
feats0 = F.interpolate(feats0.permute(0, 2, 1), scale_factor=2).permute(
|
216 |
-
0, 2, 1
|
217 |
-
)
|
218 |
-
t1 = ttime()
|
219 |
-
p_len = audio0.shape[0] // self.window
|
220 |
-
if feats.shape[1] < p_len:
|
221 |
-
p_len = feats.shape[1]
|
222 |
-
if pitch != None and pitchf != None:
|
223 |
-
pitch = pitch[:, :p_len]
|
224 |
-
pitchf = pitchf[:, :p_len]
|
225 |
-
|
226 |
-
if protect < 0.5 and pitch != None and pitchf != None:
|
227 |
-
pitchff = pitchf.clone()
|
228 |
-
pitchff[pitchf > 0] = 1
|
229 |
-
pitchff[pitchf < 1] = protect
|
230 |
-
pitchff = pitchff.unsqueeze(-1)
|
231 |
-
feats = feats * pitchff + feats0 * (1 - pitchff)
|
232 |
-
feats = feats.to(feats0.dtype)
|
233 |
-
p_len = torch.tensor([p_len], device=self.device).long()
|
234 |
-
with torch.no_grad():
|
235 |
-
if pitch != None and pitchf != None:
|
236 |
-
audio1 = (
|
237 |
-
(net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0])
|
238 |
-
.data.cpu()
|
239 |
-
.float()
|
240 |
-
.numpy()
|
241 |
-
)
|
242 |
-
else:
|
243 |
-
audio1 = (
|
244 |
-
(net_g.infer(feats, p_len, sid)[0][0, 0]).data.cpu().float().numpy()
|
245 |
-
)
|
246 |
-
del feats, p_len, padding_mask
|
247 |
-
if torch.cuda.is_available():
|
248 |
-
torch.cuda.empty_cache()
|
249 |
-
t2 = ttime()
|
250 |
-
times[0] += t1 - t0
|
251 |
-
times[2] += t2 - t1
|
252 |
-
return audio1
|
253 |
-
|
254 |
-
def pipeline(
|
255 |
-
self,
|
256 |
-
model,
|
257 |
-
net_g,
|
258 |
-
sid,
|
259 |
-
audio,
|
260 |
-
input_audio_path,
|
261 |
-
times,
|
262 |
-
f0_up_key,
|
263 |
-
f0_method,
|
264 |
-
file_index,
|
265 |
-
# file_big_npy,
|
266 |
-
index_rate,
|
267 |
-
if_f0,
|
268 |
-
filter_radius,
|
269 |
-
tgt_sr,
|
270 |
-
resample_sr,
|
271 |
-
rms_mix_rate,
|
272 |
-
version,
|
273 |
-
protect,
|
274 |
-
f0_file=None,
|
275 |
-
):
|
276 |
-
if (
|
277 |
-
file_index != ""
|
278 |
-
# and file_big_npy != ""
|
279 |
-
# and os.path.exists(file_big_npy) == True
|
280 |
-
and os.path.exists(file_index) == True
|
281 |
-
and index_rate != 0
|
282 |
-
):
|
283 |
-
try:
|
284 |
-
index = faiss.read_index(file_index)
|
285 |
-
# big_npy = np.load(file_big_npy)
|
286 |
-
big_npy = index.reconstruct_n(0, index.ntotal)
|
287 |
-
except:
|
288 |
-
traceback.print_exc()
|
289 |
-
index = big_npy = None
|
290 |
-
else:
|
291 |
-
index = big_npy = None
|
292 |
-
audio = signal.filtfilt(bh, ah, audio)
|
293 |
-
audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect")
|
294 |
-
opt_ts = []
|
295 |
-
if audio_pad.shape[0] > self.t_max:
|
296 |
-
audio_sum = np.zeros_like(audio)
|
297 |
-
for i in range(self.window):
|
298 |
-
audio_sum += audio_pad[i : i - self.window]
|
299 |
-
for t in range(self.t_center, audio.shape[0], self.t_center):
|
300 |
-
opt_ts.append(
|
301 |
-
t
|
302 |
-
- self.t_query
|
303 |
-
+ np.where(
|
304 |
-
np.abs(audio_sum[t - self.t_query : t + self.t_query])
|
305 |
-
== np.abs(audio_sum[t - self.t_query : t + self.t_query]).min()
|
306 |
-
)[0][0]
|
307 |
-
)
|
308 |
-
s = 0
|
309 |
-
audio_opt = []
|
310 |
-
t = None
|
311 |
-
t1 = ttime()
|
312 |
-
audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect")
|
313 |
-
p_len = audio_pad.shape[0] // self.window
|
314 |
-
inp_f0 = None
|
315 |
-
if hasattr(f0_file, "name") == True:
|
316 |
-
try:
|
317 |
-
with open(f0_file.name, "r") as f:
|
318 |
-
lines = f.read().strip("\n").split("\n")
|
319 |
-
inp_f0 = []
|
320 |
-
for line in lines:
|
321 |
-
inp_f0.append([float(i) for i in line.split(",")])
|
322 |
-
inp_f0 = np.array(inp_f0, dtype="float32")
|
323 |
-
except:
|
324 |
-
traceback.print_exc()
|
325 |
-
sid = torch.tensor(sid, device=self.device).unsqueeze(0).long()
|
326 |
-
pitch, pitchf = None, None
|
327 |
-
if if_f0 == 1:
|
328 |
-
pitch, pitchf = self.get_f0(
|
329 |
-
input_audio_path,
|
330 |
-
audio_pad,
|
331 |
-
p_len,
|
332 |
-
f0_up_key,
|
333 |
-
f0_method,
|
334 |
-
filter_radius,
|
335 |
-
inp_f0,
|
336 |
-
)
|
337 |
-
pitch = pitch[:p_len]
|
338 |
-
pitchf = pitchf[:p_len]
|
339 |
-
if self.device == "mps":
|
340 |
-
pitchf = pitchf.astype(np.float32)
|
341 |
-
pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long()
|
342 |
-
pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float()
|
343 |
-
t2 = ttime()
|
344 |
-
times[1] += t2 - t1
|
345 |
-
for t in opt_ts:
|
346 |
-
t = t // self.window * self.window
|
347 |
-
if if_f0 == 1:
|
348 |
-
audio_opt.append(
|
349 |
-
self.vc(
|
350 |
-
model,
|
351 |
-
net_g,
|
352 |
-
sid,
|
353 |
-
audio_pad[s : t + self.t_pad2 + self.window],
|
354 |
-
pitch[:, s // self.window : (t + self.t_pad2) // self.window],
|
355 |
-
pitchf[:, s // self.window : (t + self.t_pad2) // self.window],
|
356 |
-
times,
|
357 |
-
index,
|
358 |
-
big_npy,
|
359 |
-
index_rate,
|
360 |
-
version,
|
361 |
-
protect,
|
362 |
-
)[self.t_pad_tgt : -self.t_pad_tgt]
|
363 |
-
)
|
364 |
-
else:
|
365 |
-
audio_opt.append(
|
366 |
-
self.vc(
|
367 |
-
model,
|
368 |
-
net_g,
|
369 |
-
sid,
|
370 |
-
audio_pad[s : t + self.t_pad2 + self.window],
|
371 |
-
None,
|
372 |
-
None,
|
373 |
-
times,
|
374 |
-
index,
|
375 |
-
big_npy,
|
376 |
-
index_rate,
|
377 |
-
version,
|
378 |
-
protect,
|
379 |
-
)[self.t_pad_tgt : -self.t_pad_tgt]
|
380 |
-
)
|
381 |
-
s = t
|
382 |
-
if if_f0 == 1:
|
383 |
-
audio_opt.append(
|
384 |
-
self.vc(
|
385 |
-
model,
|
386 |
-
net_g,
|
387 |
-
sid,
|
388 |
-
audio_pad[t:],
|
389 |
-
pitch[:, t // self.window :] if t is not None else pitch,
|
390 |
-
pitchf[:, t // self.window :] if t is not None else pitchf,
|
391 |
-
times,
|
392 |
-
index,
|
393 |
-
big_npy,
|
394 |
-
index_rate,
|
395 |
-
version,
|
396 |
-
protect,
|
397 |
-
)[self.t_pad_tgt : -self.t_pad_tgt]
|
398 |
-
)
|
399 |
-
else:
|
400 |
-
audio_opt.append(
|
401 |
-
self.vc(
|
402 |
-
model,
|
403 |
-
net_g,
|
404 |
-
sid,
|
405 |
-
audio_pad[t:],
|
406 |
-
None,
|
407 |
-
None,
|
408 |
-
times,
|
409 |
-
index,
|
410 |
-
big_npy,
|
411 |
-
index_rate,
|
412 |
-
version,
|
413 |
-
protect,
|
414 |
-
)[self.t_pad_tgt : -self.t_pad_tgt]
|
415 |
-
)
|
416 |
-
audio_opt = np.concatenate(audio_opt)
|
417 |
-
if rms_mix_rate != 1:
|
418 |
-
audio_opt = change_rms(audio, 16000, audio_opt, tgt_sr, rms_mix_rate)
|
419 |
-
if resample_sr >= 16000 and tgt_sr != resample_sr:
|
420 |
-
audio_opt = librosa.resample(
|
421 |
-
audio_opt, orig_sr=tgt_sr, target_sr=resample_sr
|
422 |
-
)
|
423 |
-
audio_max = np.abs(audio_opt).max() / 0.99
|
424 |
-
max_int16 = 32768
|
425 |
-
if audio_max > 1:
|
426 |
-
max_int16 /= audio_max
|
427 |
-
audio_opt = (audio_opt * max_int16).astype(np.int16)
|
428 |
-
del pitch, pitchf, sid
|
429 |
-
if torch.cuda.is_available():
|
430 |
-
torch.cuda.empty_cache()
|
431 |
-
return audio_opt
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ALSv/FSW/roop/globals.py
DELETED
@@ -1,22 +0,0 @@
|
|
1 |
-
from typing import List, Optional
|
2 |
-
|
3 |
-
source_path: Optional[str] = None
|
4 |
-
target_path: Optional[str] = None
|
5 |
-
output_path: Optional[str] = None
|
6 |
-
headless: Optional[bool] = None
|
7 |
-
frame_processors: List[str] = []
|
8 |
-
keep_fps: Optional[bool] = None
|
9 |
-
keep_frames: Optional[bool] = None
|
10 |
-
skip_audio: Optional[bool] = None
|
11 |
-
many_faces: Optional[bool] = None
|
12 |
-
reference_face_position: Optional[int] = None
|
13 |
-
reference_frame_number: Optional[int] = None
|
14 |
-
similar_face_distance: Optional[float] = None
|
15 |
-
temp_frame_format: Optional[str] = None
|
16 |
-
temp_frame_quality: Optional[int] = None
|
17 |
-
output_video_encoder: Optional[str] = None
|
18 |
-
output_video_quality: Optional[int] = None
|
19 |
-
max_memory: Optional[int] = None
|
20 |
-
execution_providers: List[str] = []
|
21 |
-
execution_threads: Optional[int] = None
|
22 |
-
log_level: str = 'error'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AP123/dreamgaussian/README.md
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Dreamgaussian
|
3 |
-
emoji: 🏃
|
4 |
-
colorFrom: green
|
5 |
-
colorTo: green
|
6 |
-
sdk: static
|
7 |
-
pinned: false
|
8 |
-
license: mit
|
9 |
-
---
|
10 |
-
|
11 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/custom_dataset/__init__.py
DELETED
File without changes
|
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb32-60e_deepfashion2_short_sleeved_shirt_256x192/td_hm_res50_4xb32-60e_deepfashion2_short_sleeved_shirt_256x192.py
DELETED
@@ -1,2861 +0,0 @@
|
|
1 |
-
default_scope = 'mmpose'
|
2 |
-
default_hooks = dict(
|
3 |
-
timer=dict(type='IterTimerHook'),
|
4 |
-
logger=dict(type='LoggerHook', interval=50),
|
5 |
-
param_scheduler=dict(type='ParamSchedulerHook'),
|
6 |
-
checkpoint=dict(
|
7 |
-
type='CheckpointHook', interval=10, save_best='PCK', rule='greater'),
|
8 |
-
sampler_seed=dict(type='DistSamplerSeedHook'),
|
9 |
-
visualization=dict(type='PoseVisualizationHook', enable=False))
|
10 |
-
custom_hooks = [dict(type='SyncBuffersHook')]
|
11 |
-
env_cfg = dict(
|
12 |
-
cudnn_benchmark=False,
|
13 |
-
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
|
14 |
-
dist_cfg=dict(backend='nccl'))
|
15 |
-
vis_backends = [dict(type='LocalVisBackend')]
|
16 |
-
visualizer = dict(
|
17 |
-
type='PoseLocalVisualizer',
|
18 |
-
vis_backends=[dict(type='LocalVisBackend'),
|
19 |
-
dict(type='WandbVisBackend')],
|
20 |
-
name='visualizer')
|
21 |
-
log_processor = dict(
|
22 |
-
type='LogProcessor', window_size=50, by_epoch=True, num_digits=6)
|
23 |
-
log_level = 'INFO'
|
24 |
-
load_from = None
|
25 |
-
resume = False
|
26 |
-
backend_args = dict(backend='local')
|
27 |
-
train_cfg = dict(by_epoch=True, max_epochs=60, val_interval=10)
|
28 |
-
val_cfg = dict()
|
29 |
-
test_cfg = dict()
|
30 |
-
colors = dict(
|
31 |
-
sss=[255, 128, 0],
|
32 |
-
lss=[255, 0, 128],
|
33 |
-
sso=[128, 0, 255],
|
34 |
-
lso=[0, 128, 255],
|
35 |
-
vest=[0, 128, 128],
|
36 |
-
sling=[0, 0, 128],
|
37 |
-
shorts=[128, 128, 128],
|
38 |
-
trousers=[128, 0, 128],
|
39 |
-
skirt=[64, 128, 128],
|
40 |
-
ssd=[64, 64, 128],
|
41 |
-
lsd=[128, 64, 0],
|
42 |
-
vd=[128, 64, 255],
|
43 |
-
sd=[128, 64, 0])
|
44 |
-
dataset_info = dict(
|
45 |
-
dataset_name='deepfashion2',
|
46 |
-
paper_info=dict(
|
47 |
-
author=
|
48 |
-
'Yuying Ge and Ruimao Zhang and Lingyun Wu and Xiaogang Wang and Xiaoou Tang and Ping Luo',
|
49 |
-
title=
|
50 |
-
'DeepFashion2: A Versatile Benchmark for Detection, Pose Estimation, Segmentation and Re-Identification of Clothing Images',
|
51 |
-
container=
|
52 |
-
'Proceedings of IEEE Conference on Computer Vision and Pattern Recognition (CVPR)',
|
53 |
-
year='2019',
|
54 |
-
homepage='https://github.com/switchablenorms/DeepFashion2'),
|
55 |
-
keypoint_info=dict({
|
56 |
-
0:
|
57 |
-
dict(name='sss_kpt1', id=0, color=[255, 128, 0], type='', swap=''),
|
58 |
-
1:
|
59 |
-
dict(
|
60 |
-
name='sss_kpt2',
|
61 |
-
id=1,
|
62 |
-
color=[255, 128, 0],
|
63 |
-
type='',
|
64 |
-
swap='sss_kpt6'),
|
65 |
-
2:
|
66 |
-
dict(
|
67 |
-
name='sss_kpt3',
|
68 |
-
id=2,
|
69 |
-
color=[255, 128, 0],
|
70 |
-
type='',
|
71 |
-
swap='sss_kpt5'),
|
72 |
-
3:
|
73 |
-
dict(name='sss_kpt4', id=3, color=[255, 128, 0], type='', swap=''),
|
74 |
-
4:
|
75 |
-
dict(
|
76 |
-
name='sss_kpt5',
|
77 |
-
id=4,
|
78 |
-
color=[255, 128, 0],
|
79 |
-
type='',
|
80 |
-
swap='sss_kpt3'),
|
81 |
-
5:
|
82 |
-
dict(
|
83 |
-
name='sss_kpt6',
|
84 |
-
id=5,
|
85 |
-
color=[255, 128, 0],
|
86 |
-
type='',
|
87 |
-
swap='sss_kpt2'),
|
88 |
-
6:
|
89 |
-
dict(
|
90 |
-
name='sss_kpt7',
|
91 |
-
id=6,
|
92 |
-
color=[255, 128, 0],
|
93 |
-
type='',
|
94 |
-
swap='sss_kpt25'),
|
95 |
-
7:
|
96 |
-
dict(
|
97 |
-
name='sss_kpt8',
|
98 |
-
id=7,
|
99 |
-
color=[255, 128, 0],
|
100 |
-
type='',
|
101 |
-
swap='sss_kpt24'),
|
102 |
-
8:
|
103 |
-
dict(
|
104 |
-
name='sss_kpt9',
|
105 |
-
id=8,
|
106 |
-
color=[255, 128, 0],
|
107 |
-
type='',
|
108 |
-
swap='sss_kpt23'),
|
109 |
-
9:
|
110 |
-
dict(
|
111 |
-
name='sss_kpt10',
|
112 |
-
id=9,
|
113 |
-
color=[255, 128, 0],
|
114 |
-
type='',
|
115 |
-
swap='sss_kpt22'),
|
116 |
-
10:
|
117 |
-
dict(
|
118 |
-
name='sss_kpt11',
|
119 |
-
id=10,
|
120 |
-
color=[255, 128, 0],
|
121 |
-
type='',
|
122 |
-
swap='sss_kpt21'),
|
123 |
-
11:
|
124 |
-
dict(
|
125 |
-
name='sss_kpt12',
|
126 |
-
id=11,
|
127 |
-
color=[255, 128, 0],
|
128 |
-
type='',
|
129 |
-
swap='sss_kpt20'),
|
130 |
-
12:
|
131 |
-
dict(
|
132 |
-
name='sss_kpt13',
|
133 |
-
id=12,
|
134 |
-
color=[255, 128, 0],
|
135 |
-
type='',
|
136 |
-
swap='sss_kpt19'),
|
137 |
-
13:
|
138 |
-
dict(
|
139 |
-
name='sss_kpt14',
|
140 |
-
id=13,
|
141 |
-
color=[255, 128, 0],
|
142 |
-
type='',
|
143 |
-
swap='sss_kpt18'),
|
144 |
-
14:
|
145 |
-
dict(
|
146 |
-
name='sss_kpt15',
|
147 |
-
id=14,
|
148 |
-
color=[255, 128, 0],
|
149 |
-
type='',
|
150 |
-
swap='sss_kpt17'),
|
151 |
-
15:
|
152 |
-
dict(name='sss_kpt16', id=15, color=[255, 128, 0], type='', swap=''),
|
153 |
-
16:
|
154 |
-
dict(
|
155 |
-
name='sss_kpt17',
|
156 |
-
id=16,
|
157 |
-
color=[255, 128, 0],
|
158 |
-
type='',
|
159 |
-
swap='sss_kpt15'),
|
160 |
-
17:
|
161 |
-
dict(
|
162 |
-
name='sss_kpt18',
|
163 |
-
id=17,
|
164 |
-
color=[255, 128, 0],
|
165 |
-
type='',
|
166 |
-
swap='sss_kpt14'),
|
167 |
-
18:
|
168 |
-
dict(
|
169 |
-
name='sss_kpt19',
|
170 |
-
id=18,
|
171 |
-
color=[255, 128, 0],
|
172 |
-
type='',
|
173 |
-
swap='sss_kpt13'),
|
174 |
-
19:
|
175 |
-
dict(
|
176 |
-
name='sss_kpt20',
|
177 |
-
id=19,
|
178 |
-
color=[255, 128, 0],
|
179 |
-
type='',
|
180 |
-
swap='sss_kpt12'),
|
181 |
-
20:
|
182 |
-
dict(
|
183 |
-
name='sss_kpt21',
|
184 |
-
id=20,
|
185 |
-
color=[255, 128, 0],
|
186 |
-
type='',
|
187 |
-
swap='sss_kpt11'),
|
188 |
-
21:
|
189 |
-
dict(
|
190 |
-
name='sss_kpt22',
|
191 |
-
id=21,
|
192 |
-
color=[255, 128, 0],
|
193 |
-
type='',
|
194 |
-
swap='sss_kpt10'),
|
195 |
-
22:
|
196 |
-
dict(
|
197 |
-
name='sss_kpt23',
|
198 |
-
id=22,
|
199 |
-
color=[255, 128, 0],
|
200 |
-
type='',
|
201 |
-
swap='sss_kpt9'),
|
202 |
-
23:
|
203 |
-
dict(
|
204 |
-
name='sss_kpt24',
|
205 |
-
id=23,
|
206 |
-
color=[255, 128, 0],
|
207 |
-
type='',
|
208 |
-
swap='sss_kpt8'),
|
209 |
-
24:
|
210 |
-
dict(
|
211 |
-
name='sss_kpt25',
|
212 |
-
id=24,
|
213 |
-
color=[255, 128, 0],
|
214 |
-
type='',
|
215 |
-
swap='sss_kpt7'),
|
216 |
-
25:
|
217 |
-
dict(name='lss_kpt1', id=25, color=[255, 0, 128], type='', swap=''),
|
218 |
-
26:
|
219 |
-
dict(
|
220 |
-
name='lss_kpt2',
|
221 |
-
id=26,
|
222 |
-
color=[255, 0, 128],
|
223 |
-
type='',
|
224 |
-
swap='lss_kpt6'),
|
225 |
-
27:
|
226 |
-
dict(
|
227 |
-
name='lss_kpt3',
|
228 |
-
id=27,
|
229 |
-
color=[255, 0, 128],
|
230 |
-
type='',
|
231 |
-
swap='lss_kpt5'),
|
232 |
-
28:
|
233 |
-
dict(name='lss_kpt4', id=28, color=[255, 0, 128], type='', swap=''),
|
234 |
-
29:
|
235 |
-
dict(
|
236 |
-
name='lss_kpt5',
|
237 |
-
id=29,
|
238 |
-
color=[255, 0, 128],
|
239 |
-
type='',
|
240 |
-
swap='lss_kpt3'),
|
241 |
-
30:
|
242 |
-
dict(
|
243 |
-
name='lss_kpt6',
|
244 |
-
id=30,
|
245 |
-
color=[255, 0, 128],
|
246 |
-
type='',
|
247 |
-
swap='lss_kpt2'),
|
248 |
-
31:
|
249 |
-
dict(
|
250 |
-
name='lss_kpt7',
|
251 |
-
id=31,
|
252 |
-
color=[255, 0, 128],
|
253 |
-
type='',
|
254 |
-
swap='lss_kpt33'),
|
255 |
-
32:
|
256 |
-
dict(
|
257 |
-
name='lss_kpt8',
|
258 |
-
id=32,
|
259 |
-
color=[255, 0, 128],
|
260 |
-
type='',
|
261 |
-
swap='lss_kpt32'),
|
262 |
-
33:
|
263 |
-
dict(
|
264 |
-
name='lss_kpt9',
|
265 |
-
id=33,
|
266 |
-
color=[255, 0, 128],
|
267 |
-
type='',
|
268 |
-
swap='lss_kpt31'),
|
269 |
-
34:
|
270 |
-
dict(
|
271 |
-
name='lss_kpt10',
|
272 |
-
id=34,
|
273 |
-
color=[255, 0, 128],
|
274 |
-
type='',
|
275 |
-
swap='lss_kpt30'),
|
276 |
-
35:
|
277 |
-
dict(
|
278 |
-
name='lss_kpt11',
|
279 |
-
id=35,
|
280 |
-
color=[255, 0, 128],
|
281 |
-
type='',
|
282 |
-
swap='lss_kpt29'),
|
283 |
-
36:
|
284 |
-
dict(
|
285 |
-
name='lss_kpt12',
|
286 |
-
id=36,
|
287 |
-
color=[255, 0, 128],
|
288 |
-
type='',
|
289 |
-
swap='lss_kpt28'),
|
290 |
-
37:
|
291 |
-
dict(
|
292 |
-
name='lss_kpt13',
|
293 |
-
id=37,
|
294 |
-
color=[255, 0, 128],
|
295 |
-
type='',
|
296 |
-
swap='lss_kpt27'),
|
297 |
-
38:
|
298 |
-
dict(
|
299 |
-
name='lss_kpt14',
|
300 |
-
id=38,
|
301 |
-
color=[255, 0, 128],
|
302 |
-
type='',
|
303 |
-
swap='lss_kpt26'),
|
304 |
-
39:
|
305 |
-
dict(
|
306 |
-
name='lss_kpt15',
|
307 |
-
id=39,
|
308 |
-
color=[255, 0, 128],
|
309 |
-
type='',
|
310 |
-
swap='lss_kpt25'),
|
311 |
-
40:
|
312 |
-
dict(
|
313 |
-
name='lss_kpt16',
|
314 |
-
id=40,
|
315 |
-
color=[255, 0, 128],
|
316 |
-
type='',
|
317 |
-
swap='lss_kpt24'),
|
318 |
-
41:
|
319 |
-
dict(
|
320 |
-
name='lss_kpt17',
|
321 |
-
id=41,
|
322 |
-
color=[255, 0, 128],
|
323 |
-
type='',
|
324 |
-
swap='lss_kpt23'),
|
325 |
-
42:
|
326 |
-
dict(
|
327 |
-
name='lss_kpt18',
|
328 |
-
id=42,
|
329 |
-
color=[255, 0, 128],
|
330 |
-
type='',
|
331 |
-
swap='lss_kpt22'),
|
332 |
-
43:
|
333 |
-
dict(
|
334 |
-
name='lss_kpt19',
|
335 |
-
id=43,
|
336 |
-
color=[255, 0, 128],
|
337 |
-
type='',
|
338 |
-
swap='lss_kpt21'),
|
339 |
-
44:
|
340 |
-
dict(name='lss_kpt20', id=44, color=[255, 0, 128], type='', swap=''),
|
341 |
-
45:
|
342 |
-
dict(
|
343 |
-
name='lss_kpt21',
|
344 |
-
id=45,
|
345 |
-
color=[255, 0, 128],
|
346 |
-
type='',
|
347 |
-
swap='lss_kpt19'),
|
348 |
-
46:
|
349 |
-
dict(
|
350 |
-
name='lss_kpt22',
|
351 |
-
id=46,
|
352 |
-
color=[255, 0, 128],
|
353 |
-
type='',
|
354 |
-
swap='lss_kpt18'),
|
355 |
-
47:
|
356 |
-
dict(
|
357 |
-
name='lss_kpt23',
|
358 |
-
id=47,
|
359 |
-
color=[255, 0, 128],
|
360 |
-
type='',
|
361 |
-
swap='lss_kpt17'),
|
362 |
-
48:
|
363 |
-
dict(
|
364 |
-
name='lss_kpt24',
|
365 |
-
id=48,
|
366 |
-
color=[255, 0, 128],
|
367 |
-
type='',
|
368 |
-
swap='lss_kpt16'),
|
369 |
-
49:
|
370 |
-
dict(
|
371 |
-
name='lss_kpt25',
|
372 |
-
id=49,
|
373 |
-
color=[255, 0, 128],
|
374 |
-
type='',
|
375 |
-
swap='lss_kpt15'),
|
376 |
-
50:
|
377 |
-
dict(
|
378 |
-
name='lss_kpt26',
|
379 |
-
id=50,
|
380 |
-
color=[255, 0, 128],
|
381 |
-
type='',
|
382 |
-
swap='lss_kpt14'),
|
383 |
-
51:
|
384 |
-
dict(
|
385 |
-
name='lss_kpt27',
|
386 |
-
id=51,
|
387 |
-
color=[255, 0, 128],
|
388 |
-
type='',
|
389 |
-
swap='lss_kpt13'),
|
390 |
-
52:
|
391 |
-
dict(
|
392 |
-
name='lss_kpt28',
|
393 |
-
id=52,
|
394 |
-
color=[255, 0, 128],
|
395 |
-
type='',
|
396 |
-
swap='lss_kpt12'),
|
397 |
-
53:
|
398 |
-
dict(
|
399 |
-
name='lss_kpt29',
|
400 |
-
id=53,
|
401 |
-
color=[255, 0, 128],
|
402 |
-
type='',
|
403 |
-
swap='lss_kpt11'),
|
404 |
-
54:
|
405 |
-
dict(
|
406 |
-
name='lss_kpt30',
|
407 |
-
id=54,
|
408 |
-
color=[255, 0, 128],
|
409 |
-
type='',
|
410 |
-
swap='lss_kpt10'),
|
411 |
-
55:
|
412 |
-
dict(
|
413 |
-
name='lss_kpt31',
|
414 |
-
id=55,
|
415 |
-
color=[255, 0, 128],
|
416 |
-
type='',
|
417 |
-
swap='lss_kpt9'),
|
418 |
-
56:
|
419 |
-
dict(
|
420 |
-
name='lss_kpt32',
|
421 |
-
id=56,
|
422 |
-
color=[255, 0, 128],
|
423 |
-
type='',
|
424 |
-
swap='lss_kpt8'),
|
425 |
-
57:
|
426 |
-
dict(
|
427 |
-
name='lss_kpt33',
|
428 |
-
id=57,
|
429 |
-
color=[255, 0, 128],
|
430 |
-
type='',
|
431 |
-
swap='lss_kpt7'),
|
432 |
-
58:
|
433 |
-
dict(name='sso_kpt1', id=58, color=[128, 0, 255], type='', swap=''),
|
434 |
-
59:
|
435 |
-
dict(
|
436 |
-
name='sso_kpt2',
|
437 |
-
id=59,
|
438 |
-
color=[128, 0, 255],
|
439 |
-
type='',
|
440 |
-
swap='sso_kpt26'),
|
441 |
-
60:
|
442 |
-
dict(
|
443 |
-
name='sso_kpt3',
|
444 |
-
id=60,
|
445 |
-
color=[128, 0, 255],
|
446 |
-
type='',
|
447 |
-
swap='sso_kpt5'),
|
448 |
-
61:
|
449 |
-
dict(
|
450 |
-
name='sso_kpt4',
|
451 |
-
id=61,
|
452 |
-
color=[128, 0, 255],
|
453 |
-
type='',
|
454 |
-
swap='sso_kpt6'),
|
455 |
-
62:
|
456 |
-
dict(
|
457 |
-
name='sso_kpt5',
|
458 |
-
id=62,
|
459 |
-
color=[128, 0, 255],
|
460 |
-
type='',
|
461 |
-
swap='sso_kpt3'),
|
462 |
-
63:
|
463 |
-
dict(
|
464 |
-
name='sso_kpt6',
|
465 |
-
id=63,
|
466 |
-
color=[128, 0, 255],
|
467 |
-
type='',
|
468 |
-
swap='sso_kpt4'),
|
469 |
-
64:
|
470 |
-
dict(
|
471 |
-
name='sso_kpt7',
|
472 |
-
id=64,
|
473 |
-
color=[128, 0, 255],
|
474 |
-
type='',
|
475 |
-
swap='sso_kpt25'),
|
476 |
-
65:
|
477 |
-
dict(
|
478 |
-
name='sso_kpt8',
|
479 |
-
id=65,
|
480 |
-
color=[128, 0, 255],
|
481 |
-
type='',
|
482 |
-
swap='sso_kpt24'),
|
483 |
-
66:
|
484 |
-
dict(
|
485 |
-
name='sso_kpt9',
|
486 |
-
id=66,
|
487 |
-
color=[128, 0, 255],
|
488 |
-
type='',
|
489 |
-
swap='sso_kpt23'),
|
490 |
-
67:
|
491 |
-
dict(
|
492 |
-
name='sso_kpt10',
|
493 |
-
id=67,
|
494 |
-
color=[128, 0, 255],
|
495 |
-
type='',
|
496 |
-
swap='sso_kpt22'),
|
497 |
-
68:
|
498 |
-
dict(
|
499 |
-
name='sso_kpt11',
|
500 |
-
id=68,
|
501 |
-
color=[128, 0, 255],
|
502 |
-
type='',
|
503 |
-
swap='sso_kpt21'),
|
504 |
-
69:
|
505 |
-
dict(
|
506 |
-
name='sso_kpt12',
|
507 |
-
id=69,
|
508 |
-
color=[128, 0, 255],
|
509 |
-
type='',
|
510 |
-
swap='sso_kpt20'),
|
511 |
-
70:
|
512 |
-
dict(
|
513 |
-
name='sso_kpt13',
|
514 |
-
id=70,
|
515 |
-
color=[128, 0, 255],
|
516 |
-
type='',
|
517 |
-
swap='sso_kpt19'),
|
518 |
-
71:
|
519 |
-
dict(
|
520 |
-
name='sso_kpt14',
|
521 |
-
id=71,
|
522 |
-
color=[128, 0, 255],
|
523 |
-
type='',
|
524 |
-
swap='sso_kpt18'),
|
525 |
-
72:
|
526 |
-
dict(
|
527 |
-
name='sso_kpt15',
|
528 |
-
id=72,
|
529 |
-
color=[128, 0, 255],
|
530 |
-
type='',
|
531 |
-
swap='sso_kpt17'),
|
532 |
-
73:
|
533 |
-
dict(
|
534 |
-
name='sso_kpt16',
|
535 |
-
id=73,
|
536 |
-
color=[128, 0, 255],
|
537 |
-
type='',
|
538 |
-
swap='sso_kpt29'),
|
539 |
-
74:
|
540 |
-
dict(
|
541 |
-
name='sso_kpt17',
|
542 |
-
id=74,
|
543 |
-
color=[128, 0, 255],
|
544 |
-
type='',
|
545 |
-
swap='sso_kpt15'),
|
546 |
-
75:
|
547 |
-
dict(
|
548 |
-
name='sso_kpt18',
|
549 |
-
id=75,
|
550 |
-
color=[128, 0, 255],
|
551 |
-
type='',
|
552 |
-
swap='sso_kpt14'),
|
553 |
-
76:
|
554 |
-
dict(
|
555 |
-
name='sso_kpt19',
|
556 |
-
id=76,
|
557 |
-
color=[128, 0, 255],
|
558 |
-
type='',
|
559 |
-
swap='sso_kpt13'),
|
560 |
-
77:
|
561 |
-
dict(
|
562 |
-
name='sso_kpt20',
|
563 |
-
id=77,
|
564 |
-
color=[128, 0, 255],
|
565 |
-
type='',
|
566 |
-
swap='sso_kpt12'),
|
567 |
-
78:
|
568 |
-
dict(
|
569 |
-
name='sso_kpt21',
|
570 |
-
id=78,
|
571 |
-
color=[128, 0, 255],
|
572 |
-
type='',
|
573 |
-
swap='sso_kpt11'),
|
574 |
-
79:
|
575 |
-
dict(
|
576 |
-
name='sso_kpt22',
|
577 |
-
id=79,
|
578 |
-
color=[128, 0, 255],
|
579 |
-
type='',
|
580 |
-
swap='sso_kpt10'),
|
581 |
-
80:
|
582 |
-
dict(
|
583 |
-
name='sso_kpt23',
|
584 |
-
id=80,
|
585 |
-
color=[128, 0, 255],
|
586 |
-
type='',
|
587 |
-
swap='sso_kpt9'),
|
588 |
-
81:
|
589 |
-
dict(
|
590 |
-
name='sso_kpt24',
|
591 |
-
id=81,
|
592 |
-
color=[128, 0, 255],
|
593 |
-
type='',
|
594 |
-
swap='sso_kpt8'),
|
595 |
-
82:
|
596 |
-
dict(
|
597 |
-
name='sso_kpt25',
|
598 |
-
id=82,
|
599 |
-
color=[128, 0, 255],
|
600 |
-
type='',
|
601 |
-
swap='sso_kpt7'),
|
602 |
-
83:
|
603 |
-
dict(
|
604 |
-
name='sso_kpt26',
|
605 |
-
id=83,
|
606 |
-
color=[128, 0, 255],
|
607 |
-
type='',
|
608 |
-
swap='sso_kpt2'),
|
609 |
-
84:
|
610 |
-
dict(
|
611 |
-
name='sso_kpt27',
|
612 |
-
id=84,
|
613 |
-
color=[128, 0, 255],
|
614 |
-
type='',
|
615 |
-
swap='sso_kpt30'),
|
616 |
-
85:
|
617 |
-
dict(
|
618 |
-
name='sso_kpt28',
|
619 |
-
id=85,
|
620 |
-
color=[128, 0, 255],
|
621 |
-
type='',
|
622 |
-
swap='sso_kpt31'),
|
623 |
-
86:
|
624 |
-
dict(
|
625 |
-
name='sso_kpt29',
|
626 |
-
id=86,
|
627 |
-
color=[128, 0, 255],
|
628 |
-
type='',
|
629 |
-
swap='sso_kpt16'),
|
630 |
-
87:
|
631 |
-
dict(
|
632 |
-
name='sso_kpt30',
|
633 |
-
id=87,
|
634 |
-
color=[128, 0, 255],
|
635 |
-
type='',
|
636 |
-
swap='sso_kpt27'),
|
637 |
-
88:
|
638 |
-
dict(
|
639 |
-
name='sso_kpt31',
|
640 |
-
id=88,
|
641 |
-
color=[128, 0, 255],
|
642 |
-
type='',
|
643 |
-
swap='sso_kpt28'),
|
644 |
-
89:
|
645 |
-
dict(name='lso_kpt1', id=89, color=[0, 128, 255], type='', swap=''),
|
646 |
-
90:
|
647 |
-
dict(
|
648 |
-
name='lso_kpt2',
|
649 |
-
id=90,
|
650 |
-
color=[0, 128, 255],
|
651 |
-
type='',
|
652 |
-
swap='lso_kpt6'),
|
653 |
-
91:
|
654 |
-
dict(
|
655 |
-
name='lso_kpt3',
|
656 |
-
id=91,
|
657 |
-
color=[0, 128, 255],
|
658 |
-
type='',
|
659 |
-
swap='lso_kpt5'),
|
660 |
-
92:
|
661 |
-
dict(
|
662 |
-
name='lso_kpt4',
|
663 |
-
id=92,
|
664 |
-
color=[0, 128, 255],
|
665 |
-
type='',
|
666 |
-
swap='lso_kpt34'),
|
667 |
-
93:
|
668 |
-
dict(
|
669 |
-
name='lso_kpt5',
|
670 |
-
id=93,
|
671 |
-
color=[0, 128, 255],
|
672 |
-
type='',
|
673 |
-
swap='lso_kpt3'),
|
674 |
-
94:
|
675 |
-
dict(
|
676 |
-
name='lso_kpt6',
|
677 |
-
id=94,
|
678 |
-
color=[0, 128, 255],
|
679 |
-
type='',
|
680 |
-
swap='lso_kpt2'),
|
681 |
-
95:
|
682 |
-
dict(
|
683 |
-
name='lso_kpt7',
|
684 |
-
id=95,
|
685 |
-
color=[0, 128, 255],
|
686 |
-
type='',
|
687 |
-
swap='lso_kpt33'),
|
688 |
-
96:
|
689 |
-
dict(
|
690 |
-
name='lso_kpt8',
|
691 |
-
id=96,
|
692 |
-
color=[0, 128, 255],
|
693 |
-
type='',
|
694 |
-
swap='lso_kpt32'),
|
695 |
-
97:
|
696 |
-
dict(
|
697 |
-
name='lso_kpt9',
|
698 |
-
id=97,
|
699 |
-
color=[0, 128, 255],
|
700 |
-
type='',
|
701 |
-
swap='lso_kpt31'),
|
702 |
-
98:
|
703 |
-
dict(
|
704 |
-
name='lso_kpt10',
|
705 |
-
id=98,
|
706 |
-
color=[0, 128, 255],
|
707 |
-
type='',
|
708 |
-
swap='lso_kpt30'),
|
709 |
-
99:
|
710 |
-
dict(
|
711 |
-
name='lso_kpt11',
|
712 |
-
id=99,
|
713 |
-
color=[0, 128, 255],
|
714 |
-
type='',
|
715 |
-
swap='lso_kpt29'),
|
716 |
-
100:
|
717 |
-
dict(
|
718 |
-
name='lso_kpt12',
|
719 |
-
id=100,
|
720 |
-
color=[0, 128, 255],
|
721 |
-
type='',
|
722 |
-
swap='lso_kpt28'),
|
723 |
-
101:
|
724 |
-
dict(
|
725 |
-
name='lso_kpt13',
|
726 |
-
id=101,
|
727 |
-
color=[0, 128, 255],
|
728 |
-
type='',
|
729 |
-
swap='lso_kpt27'),
|
730 |
-
102:
|
731 |
-
dict(
|
732 |
-
name='lso_kpt14',
|
733 |
-
id=102,
|
734 |
-
color=[0, 128, 255],
|
735 |
-
type='',
|
736 |
-
swap='lso_kpt26'),
|
737 |
-
103:
|
738 |
-
dict(
|
739 |
-
name='lso_kpt15',
|
740 |
-
id=103,
|
741 |
-
color=[0, 128, 255],
|
742 |
-
type='',
|
743 |
-
swap='lso_kpt25'),
|
744 |
-
104:
|
745 |
-
dict(
|
746 |
-
name='lso_kpt16',
|
747 |
-
id=104,
|
748 |
-
color=[0, 128, 255],
|
749 |
-
type='',
|
750 |
-
swap='lso_kpt24'),
|
751 |
-
105:
|
752 |
-
dict(
|
753 |
-
name='lso_kpt17',
|
754 |
-
id=105,
|
755 |
-
color=[0, 128, 255],
|
756 |
-
type='',
|
757 |
-
swap='lso_kpt23'),
|
758 |
-
106:
|
759 |
-
dict(
|
760 |
-
name='lso_kpt18',
|
761 |
-
id=106,
|
762 |
-
color=[0, 128, 255],
|
763 |
-
type='',
|
764 |
-
swap='lso_kpt22'),
|
765 |
-
107:
|
766 |
-
dict(
|
767 |
-
name='lso_kpt19',
|
768 |
-
id=107,
|
769 |
-
color=[0, 128, 255],
|
770 |
-
type='',
|
771 |
-
swap='lso_kpt21'),
|
772 |
-
108:
|
773 |
-
dict(
|
774 |
-
name='lso_kpt20',
|
775 |
-
id=108,
|
776 |
-
color=[0, 128, 255],
|
777 |
-
type='',
|
778 |
-
swap='lso_kpt37'),
|
779 |
-
109:
|
780 |
-
dict(
|
781 |
-
name='lso_kpt21',
|
782 |
-
id=109,
|
783 |
-
color=[0, 128, 255],
|
784 |
-
type='',
|
785 |
-
swap='lso_kpt19'),
|
786 |
-
110:
|
787 |
-
dict(
|
788 |
-
name='lso_kpt22',
|
789 |
-
id=110,
|
790 |
-
color=[0, 128, 255],
|
791 |
-
type='',
|
792 |
-
swap='lso_kpt18'),
|
793 |
-
111:
|
794 |
-
dict(
|
795 |
-
name='lso_kpt23',
|
796 |
-
id=111,
|
797 |
-
color=[0, 128, 255],
|
798 |
-
type='',
|
799 |
-
swap='lso_kpt17'),
|
800 |
-
112:
|
801 |
-
dict(
|
802 |
-
name='lso_kpt24',
|
803 |
-
id=112,
|
804 |
-
color=[0, 128, 255],
|
805 |
-
type='',
|
806 |
-
swap='lso_kpt16'),
|
807 |
-
113:
|
808 |
-
dict(
|
809 |
-
name='lso_kpt25',
|
810 |
-
id=113,
|
811 |
-
color=[0, 128, 255],
|
812 |
-
type='',
|
813 |
-
swap='lso_kpt15'),
|
814 |
-
114:
|
815 |
-
dict(
|
816 |
-
name='lso_kpt26',
|
817 |
-
id=114,
|
818 |
-
color=[0, 128, 255],
|
819 |
-
type='',
|
820 |
-
swap='lso_kpt14'),
|
821 |
-
115:
|
822 |
-
dict(
|
823 |
-
name='lso_kpt27',
|
824 |
-
id=115,
|
825 |
-
color=[0, 128, 255],
|
826 |
-
type='',
|
827 |
-
swap='lso_kpt13'),
|
828 |
-
116:
|
829 |
-
dict(
|
830 |
-
name='lso_kpt28',
|
831 |
-
id=116,
|
832 |
-
color=[0, 128, 255],
|
833 |
-
type='',
|
834 |
-
swap='lso_kpt12'),
|
835 |
-
117:
|
836 |
-
dict(
|
837 |
-
name='lso_kpt29',
|
838 |
-
id=117,
|
839 |
-
color=[0, 128, 255],
|
840 |
-
type='',
|
841 |
-
swap='lso_kpt11'),
|
842 |
-
118:
|
843 |
-
dict(
|
844 |
-
name='lso_kpt30',
|
845 |
-
id=118,
|
846 |
-
color=[0, 128, 255],
|
847 |
-
type='',
|
848 |
-
swap='lso_kpt10'),
|
849 |
-
119:
|
850 |
-
dict(
|
851 |
-
name='lso_kpt31',
|
852 |
-
id=119,
|
853 |
-
color=[0, 128, 255],
|
854 |
-
type='',
|
855 |
-
swap='lso_kpt9'),
|
856 |
-
120:
|
857 |
-
dict(
|
858 |
-
name='lso_kpt32',
|
859 |
-
id=120,
|
860 |
-
color=[0, 128, 255],
|
861 |
-
type='',
|
862 |
-
swap='lso_kpt8'),
|
863 |
-
121:
|
864 |
-
dict(
|
865 |
-
name='lso_kpt33',
|
866 |
-
id=121,
|
867 |
-
color=[0, 128, 255],
|
868 |
-
type='',
|
869 |
-
swap='lso_kpt7'),
|
870 |
-
122:
|
871 |
-
dict(
|
872 |
-
name='lso_kpt34',
|
873 |
-
id=122,
|
874 |
-
color=[0, 128, 255],
|
875 |
-
type='',
|
876 |
-
swap='lso_kpt4'),
|
877 |
-
123:
|
878 |
-
dict(
|
879 |
-
name='lso_kpt35',
|
880 |
-
id=123,
|
881 |
-
color=[0, 128, 255],
|
882 |
-
type='',
|
883 |
-
swap='lso_kpt38'),
|
884 |
-
124:
|
885 |
-
dict(
|
886 |
-
name='lso_kpt36',
|
887 |
-
id=124,
|
888 |
-
color=[0, 128, 255],
|
889 |
-
type='',
|
890 |
-
swap='lso_kpt39'),
|
891 |
-
125:
|
892 |
-
dict(
|
893 |
-
name='lso_kpt37',
|
894 |
-
id=125,
|
895 |
-
color=[0, 128, 255],
|
896 |
-
type='',
|
897 |
-
swap='lso_kpt20'),
|
898 |
-
126:
|
899 |
-
dict(
|
900 |
-
name='lso_kpt38',
|
901 |
-
id=126,
|
902 |
-
color=[0, 128, 255],
|
903 |
-
type='',
|
904 |
-
swap='lso_kpt35'),
|
905 |
-
127:
|
906 |
-
dict(
|
907 |
-
name='lso_kpt39',
|
908 |
-
id=127,
|
909 |
-
color=[0, 128, 255],
|
910 |
-
type='',
|
911 |
-
swap='lso_kpt36'),
|
912 |
-
128:
|
913 |
-
dict(name='vest_kpt1', id=128, color=[0, 128, 128], type='', swap=''),
|
914 |
-
129:
|
915 |
-
dict(
|
916 |
-
name='vest_kpt2',
|
917 |
-
id=129,
|
918 |
-
color=[0, 128, 128],
|
919 |
-
type='',
|
920 |
-
swap='vest_kpt6'),
|
921 |
-
130:
|
922 |
-
dict(
|
923 |
-
name='vest_kpt3',
|
924 |
-
id=130,
|
925 |
-
color=[0, 128, 128],
|
926 |
-
type='',
|
927 |
-
swap='vest_kpt5'),
|
928 |
-
131:
|
929 |
-
dict(name='vest_kpt4', id=131, color=[0, 128, 128], type='', swap=''),
|
930 |
-
132:
|
931 |
-
dict(
|
932 |
-
name='vest_kpt5',
|
933 |
-
id=132,
|
934 |
-
color=[0, 128, 128],
|
935 |
-
type='',
|
936 |
-
swap='vest_kpt3'),
|
937 |
-
133:
|
938 |
-
dict(
|
939 |
-
name='vest_kpt6',
|
940 |
-
id=133,
|
941 |
-
color=[0, 128, 128],
|
942 |
-
type='',
|
943 |
-
swap='vest_kpt2'),
|
944 |
-
134:
|
945 |
-
dict(
|
946 |
-
name='vest_kpt7',
|
947 |
-
id=134,
|
948 |
-
color=[0, 128, 128],
|
949 |
-
type='',
|
950 |
-
swap='vest_kpt15'),
|
951 |
-
135:
|
952 |
-
dict(
|
953 |
-
name='vest_kpt8',
|
954 |
-
id=135,
|
955 |
-
color=[0, 128, 128],
|
956 |
-
type='',
|
957 |
-
swap='vest_kpt14'),
|
958 |
-
136:
|
959 |
-
dict(
|
960 |
-
name='vest_kpt9',
|
961 |
-
id=136,
|
962 |
-
color=[0, 128, 128],
|
963 |
-
type='',
|
964 |
-
swap='vest_kpt13'),
|
965 |
-
137:
|
966 |
-
dict(
|
967 |
-
name='vest_kpt10',
|
968 |
-
id=137,
|
969 |
-
color=[0, 128, 128],
|
970 |
-
type='',
|
971 |
-
swap='vest_kpt12'),
|
972 |
-
138:
|
973 |
-
dict(name='vest_kpt11', id=138, color=[0, 128, 128], type='', swap=''),
|
974 |
-
139:
|
975 |
-
dict(
|
976 |
-
name='vest_kpt12',
|
977 |
-
id=139,
|
978 |
-
color=[0, 128, 128],
|
979 |
-
type='',
|
980 |
-
swap='vest_kpt10'),
|
981 |
-
140:
|
982 |
-
dict(name='vest_kpt13', id=140, color=[0, 128, 128], type='', swap=''),
|
983 |
-
141:
|
984 |
-
dict(
|
985 |
-
name='vest_kpt14',
|
986 |
-
id=141,
|
987 |
-
color=[0, 128, 128],
|
988 |
-
type='',
|
989 |
-
swap='vest_kpt8'),
|
990 |
-
142:
|
991 |
-
dict(
|
992 |
-
name='vest_kpt15',
|
993 |
-
id=142,
|
994 |
-
color=[0, 128, 128],
|
995 |
-
type='',
|
996 |
-
swap='vest_kpt7'),
|
997 |
-
143:
|
998 |
-
dict(name='sling_kpt1', id=143, color=[0, 0, 128], type='', swap=''),
|
999 |
-
144:
|
1000 |
-
dict(
|
1001 |
-
name='sling_kpt2',
|
1002 |
-
id=144,
|
1003 |
-
color=[0, 0, 128],
|
1004 |
-
type='',
|
1005 |
-
swap='sling_kpt6'),
|
1006 |
-
145:
|
1007 |
-
dict(
|
1008 |
-
name='sling_kpt3',
|
1009 |
-
id=145,
|
1010 |
-
color=[0, 0, 128],
|
1011 |
-
type='',
|
1012 |
-
swap='sling_kpt5'),
|
1013 |
-
146:
|
1014 |
-
dict(name='sling_kpt4', id=146, color=[0, 0, 128], type='', swap=''),
|
1015 |
-
147:
|
1016 |
-
dict(
|
1017 |
-
name='sling_kpt5',
|
1018 |
-
id=147,
|
1019 |
-
color=[0, 0, 128],
|
1020 |
-
type='',
|
1021 |
-
swap='sling_kpt3'),
|
1022 |
-
148:
|
1023 |
-
dict(
|
1024 |
-
name='sling_kpt6',
|
1025 |
-
id=148,
|
1026 |
-
color=[0, 0, 128],
|
1027 |
-
type='',
|
1028 |
-
swap='sling_kpt2'),
|
1029 |
-
149:
|
1030 |
-
dict(
|
1031 |
-
name='sling_kpt7',
|
1032 |
-
id=149,
|
1033 |
-
color=[0, 0, 128],
|
1034 |
-
type='',
|
1035 |
-
swap='sling_kpt15'),
|
1036 |
-
150:
|
1037 |
-
dict(
|
1038 |
-
name='sling_kpt8',
|
1039 |
-
id=150,
|
1040 |
-
color=[0, 0, 128],
|
1041 |
-
type='',
|
1042 |
-
swap='sling_kpt14'),
|
1043 |
-
151:
|
1044 |
-
dict(
|
1045 |
-
name='sling_kpt9',
|
1046 |
-
id=151,
|
1047 |
-
color=[0, 0, 128],
|
1048 |
-
type='',
|
1049 |
-
swap='sling_kpt13'),
|
1050 |
-
152:
|
1051 |
-
dict(
|
1052 |
-
name='sling_kpt10',
|
1053 |
-
id=152,
|
1054 |
-
color=[0, 0, 128],
|
1055 |
-
type='',
|
1056 |
-
swap='sling_kpt12'),
|
1057 |
-
153:
|
1058 |
-
dict(name='sling_kpt11', id=153, color=[0, 0, 128], type='', swap=''),
|
1059 |
-
154:
|
1060 |
-
dict(
|
1061 |
-
name='sling_kpt12',
|
1062 |
-
id=154,
|
1063 |
-
color=[0, 0, 128],
|
1064 |
-
type='',
|
1065 |
-
swap='sling_kpt10'),
|
1066 |
-
155:
|
1067 |
-
dict(
|
1068 |
-
name='sling_kpt13',
|
1069 |
-
id=155,
|
1070 |
-
color=[0, 0, 128],
|
1071 |
-
type='',
|
1072 |
-
swap='sling_kpt9'),
|
1073 |
-
156:
|
1074 |
-
dict(
|
1075 |
-
name='sling_kpt14',
|
1076 |
-
id=156,
|
1077 |
-
color=[0, 0, 128],
|
1078 |
-
type='',
|
1079 |
-
swap='sling_kpt8'),
|
1080 |
-
157:
|
1081 |
-
dict(
|
1082 |
-
name='sling_kpt15',
|
1083 |
-
id=157,
|
1084 |
-
color=[0, 0, 128],
|
1085 |
-
type='',
|
1086 |
-
swap='sling_kpt7'),
|
1087 |
-
158:
|
1088 |
-
dict(
|
1089 |
-
name='shorts_kpt1',
|
1090 |
-
id=158,
|
1091 |
-
color=[128, 128, 128],
|
1092 |
-
type='',
|
1093 |
-
swap='shorts_kpt3'),
|
1094 |
-
159:
|
1095 |
-
dict(
|
1096 |
-
name='shorts_kpt2',
|
1097 |
-
id=159,
|
1098 |
-
color=[128, 128, 128],
|
1099 |
-
type='',
|
1100 |
-
swap=''),
|
1101 |
-
160:
|
1102 |
-
dict(
|
1103 |
-
name='shorts_kpt3',
|
1104 |
-
id=160,
|
1105 |
-
color=[128, 128, 128],
|
1106 |
-
type='',
|
1107 |
-
swap='shorts_kpt1'),
|
1108 |
-
161:
|
1109 |
-
dict(
|
1110 |
-
name='shorts_kpt4',
|
1111 |
-
id=161,
|
1112 |
-
color=[128, 128, 128],
|
1113 |
-
type='',
|
1114 |
-
swap='shorts_kpt10'),
|
1115 |
-
162:
|
1116 |
-
dict(
|
1117 |
-
name='shorts_kpt5',
|
1118 |
-
id=162,
|
1119 |
-
color=[128, 128, 128],
|
1120 |
-
type='',
|
1121 |
-
swap='shorts_kpt9'),
|
1122 |
-
163:
|
1123 |
-
dict(
|
1124 |
-
name='shorts_kpt6',
|
1125 |
-
id=163,
|
1126 |
-
color=[128, 128, 128],
|
1127 |
-
type='',
|
1128 |
-
swap='shorts_kpt8'),
|
1129 |
-
164:
|
1130 |
-
dict(
|
1131 |
-
name='shorts_kpt7',
|
1132 |
-
id=164,
|
1133 |
-
color=[128, 128, 128],
|
1134 |
-
type='',
|
1135 |
-
swap=''),
|
1136 |
-
165:
|
1137 |
-
dict(
|
1138 |
-
name='shorts_kpt8',
|
1139 |
-
id=165,
|
1140 |
-
color=[128, 128, 128],
|
1141 |
-
type='',
|
1142 |
-
swap='shorts_kpt6'),
|
1143 |
-
166:
|
1144 |
-
dict(
|
1145 |
-
name='shorts_kpt9',
|
1146 |
-
id=166,
|
1147 |
-
color=[128, 128, 128],
|
1148 |
-
type='',
|
1149 |
-
swap='shorts_kpt5'),
|
1150 |
-
167:
|
1151 |
-
dict(
|
1152 |
-
name='shorts_kpt10',
|
1153 |
-
id=167,
|
1154 |
-
color=[128, 128, 128],
|
1155 |
-
type='',
|
1156 |
-
swap='shorts_kpt4'),
|
1157 |
-
168:
|
1158 |
-
dict(
|
1159 |
-
name='trousers_kpt1',
|
1160 |
-
id=168,
|
1161 |
-
color=[128, 0, 128],
|
1162 |
-
type='',
|
1163 |
-
swap='trousers_kpt3'),
|
1164 |
-
169:
|
1165 |
-
dict(
|
1166 |
-
name='trousers_kpt2',
|
1167 |
-
id=169,
|
1168 |
-
color=[128, 0, 128],
|
1169 |
-
type='',
|
1170 |
-
swap=''),
|
1171 |
-
170:
|
1172 |
-
dict(
|
1173 |
-
name='trousers_kpt3',
|
1174 |
-
id=170,
|
1175 |
-
color=[128, 0, 128],
|
1176 |
-
type='',
|
1177 |
-
swap='trousers_kpt1'),
|
1178 |
-
171:
|
1179 |
-
dict(
|
1180 |
-
name='trousers_kpt4',
|
1181 |
-
id=171,
|
1182 |
-
color=[128, 0, 128],
|
1183 |
-
type='',
|
1184 |
-
swap='trousers_kpt14'),
|
1185 |
-
172:
|
1186 |
-
dict(
|
1187 |
-
name='trousers_kpt5',
|
1188 |
-
id=172,
|
1189 |
-
color=[128, 0, 128],
|
1190 |
-
type='',
|
1191 |
-
swap='trousers_kpt13'),
|
1192 |
-
173:
|
1193 |
-
dict(
|
1194 |
-
name='trousers_kpt6',
|
1195 |
-
id=173,
|
1196 |
-
color=[128, 0, 128],
|
1197 |
-
type='',
|
1198 |
-
swap='trousers_kpt12'),
|
1199 |
-
174:
|
1200 |
-
dict(
|
1201 |
-
name='trousers_kpt7',
|
1202 |
-
id=174,
|
1203 |
-
color=[128, 0, 128],
|
1204 |
-
type='',
|
1205 |
-
swap='trousers_kpt11'),
|
1206 |
-
175:
|
1207 |
-
dict(
|
1208 |
-
name='trousers_kpt8',
|
1209 |
-
id=175,
|
1210 |
-
color=[128, 0, 128],
|
1211 |
-
type='',
|
1212 |
-
swap='trousers_kpt10'),
|
1213 |
-
176:
|
1214 |
-
dict(
|
1215 |
-
name='trousers_kpt9',
|
1216 |
-
id=176,
|
1217 |
-
color=[128, 0, 128],
|
1218 |
-
type='',
|
1219 |
-
swap=''),
|
1220 |
-
177:
|
1221 |
-
dict(
|
1222 |
-
name='trousers_kpt10',
|
1223 |
-
id=177,
|
1224 |
-
color=[128, 0, 128],
|
1225 |
-
type='',
|
1226 |
-
swap='trousers_kpt8'),
|
1227 |
-
178:
|
1228 |
-
dict(
|
1229 |
-
name='trousers_kpt11',
|
1230 |
-
id=178,
|
1231 |
-
color=[128, 0, 128],
|
1232 |
-
type='',
|
1233 |
-
swap='trousers_kpt7'),
|
1234 |
-
179:
|
1235 |
-
dict(
|
1236 |
-
name='trousers_kpt12',
|
1237 |
-
id=179,
|
1238 |
-
color=[128, 0, 128],
|
1239 |
-
type='',
|
1240 |
-
swap='trousers_kpt6'),
|
1241 |
-
180:
|
1242 |
-
dict(
|
1243 |
-
name='trousers_kpt13',
|
1244 |
-
id=180,
|
1245 |
-
color=[128, 0, 128],
|
1246 |
-
type='',
|
1247 |
-
swap='trousers_kpt5'),
|
1248 |
-
181:
|
1249 |
-
dict(
|
1250 |
-
name='trousers_kpt14',
|
1251 |
-
id=181,
|
1252 |
-
color=[128, 0, 128],
|
1253 |
-
type='',
|
1254 |
-
swap='trousers_kpt4'),
|
1255 |
-
182:
|
1256 |
-
dict(
|
1257 |
-
name='skirt_kpt1',
|
1258 |
-
id=182,
|
1259 |
-
color=[64, 128, 128],
|
1260 |
-
type='',
|
1261 |
-
swap='skirt_kpt3'),
|
1262 |
-
183:
|
1263 |
-
dict(
|
1264 |
-
name='skirt_kpt2', id=183, color=[64, 128, 128], type='', swap=''),
|
1265 |
-
184:
|
1266 |
-
dict(
|
1267 |
-
name='skirt_kpt3',
|
1268 |
-
id=184,
|
1269 |
-
color=[64, 128, 128],
|
1270 |
-
type='',
|
1271 |
-
swap='skirt_kpt1'),
|
1272 |
-
185:
|
1273 |
-
dict(
|
1274 |
-
name='skirt_kpt4',
|
1275 |
-
id=185,
|
1276 |
-
color=[64, 128, 128],
|
1277 |
-
type='',
|
1278 |
-
swap='skirt_kpt8'),
|
1279 |
-
186:
|
1280 |
-
dict(
|
1281 |
-
name='skirt_kpt5',
|
1282 |
-
id=186,
|
1283 |
-
color=[64, 128, 128],
|
1284 |
-
type='',
|
1285 |
-
swap='skirt_kpt7'),
|
1286 |
-
187:
|
1287 |
-
dict(
|
1288 |
-
name='skirt_kpt6', id=187, color=[64, 128, 128], type='', swap=''),
|
1289 |
-
188:
|
1290 |
-
dict(
|
1291 |
-
name='skirt_kpt7',
|
1292 |
-
id=188,
|
1293 |
-
color=[64, 128, 128],
|
1294 |
-
type='',
|
1295 |
-
swap='skirt_kpt5'),
|
1296 |
-
189:
|
1297 |
-
dict(
|
1298 |
-
name='skirt_kpt8',
|
1299 |
-
id=189,
|
1300 |
-
color=[64, 128, 128],
|
1301 |
-
type='',
|
1302 |
-
swap='skirt_kpt4'),
|
1303 |
-
190:
|
1304 |
-
dict(name='ssd_kpt1', id=190, color=[64, 64, 128], type='', swap=''),
|
1305 |
-
191:
|
1306 |
-
dict(
|
1307 |
-
name='ssd_kpt2',
|
1308 |
-
id=191,
|
1309 |
-
color=[64, 64, 128],
|
1310 |
-
type='',
|
1311 |
-
swap='ssd_kpt6'),
|
1312 |
-
192:
|
1313 |
-
dict(
|
1314 |
-
name='ssd_kpt3',
|
1315 |
-
id=192,
|
1316 |
-
color=[64, 64, 128],
|
1317 |
-
type='',
|
1318 |
-
swap='ssd_kpt5'),
|
1319 |
-
193:
|
1320 |
-
dict(name='ssd_kpt4', id=193, color=[64, 64, 128], type='', swap=''),
|
1321 |
-
194:
|
1322 |
-
dict(
|
1323 |
-
name='ssd_kpt5',
|
1324 |
-
id=194,
|
1325 |
-
color=[64, 64, 128],
|
1326 |
-
type='',
|
1327 |
-
swap='ssd_kpt3'),
|
1328 |
-
195:
|
1329 |
-
dict(
|
1330 |
-
name='ssd_kpt6',
|
1331 |
-
id=195,
|
1332 |
-
color=[64, 64, 128],
|
1333 |
-
type='',
|
1334 |
-
swap='ssd_kpt2'),
|
1335 |
-
196:
|
1336 |
-
dict(
|
1337 |
-
name='ssd_kpt7',
|
1338 |
-
id=196,
|
1339 |
-
color=[64, 64, 128],
|
1340 |
-
type='',
|
1341 |
-
swap='ssd_kpt29'),
|
1342 |
-
197:
|
1343 |
-
dict(
|
1344 |
-
name='ssd_kpt8',
|
1345 |
-
id=197,
|
1346 |
-
color=[64, 64, 128],
|
1347 |
-
type='',
|
1348 |
-
swap='ssd_kpt28'),
|
1349 |
-
198:
|
1350 |
-
dict(
|
1351 |
-
name='ssd_kpt9',
|
1352 |
-
id=198,
|
1353 |
-
color=[64, 64, 128],
|
1354 |
-
type='',
|
1355 |
-
swap='ssd_kpt27'),
|
1356 |
-
199:
|
1357 |
-
dict(
|
1358 |
-
name='ssd_kpt10',
|
1359 |
-
id=199,
|
1360 |
-
color=[64, 64, 128],
|
1361 |
-
type='',
|
1362 |
-
swap='ssd_kpt26'),
|
1363 |
-
200:
|
1364 |
-
dict(
|
1365 |
-
name='ssd_kpt11',
|
1366 |
-
id=200,
|
1367 |
-
color=[64, 64, 128],
|
1368 |
-
type='',
|
1369 |
-
swap='ssd_kpt25'),
|
1370 |
-
201:
|
1371 |
-
dict(
|
1372 |
-
name='ssd_kpt12',
|
1373 |
-
id=201,
|
1374 |
-
color=[64, 64, 128],
|
1375 |
-
type='',
|
1376 |
-
swap='ssd_kpt24'),
|
1377 |
-
202:
|
1378 |
-
dict(
|
1379 |
-
name='ssd_kpt13',
|
1380 |
-
id=202,
|
1381 |
-
color=[64, 64, 128],
|
1382 |
-
type='',
|
1383 |
-
swap='ssd_kpt23'),
|
1384 |
-
203:
|
1385 |
-
dict(
|
1386 |
-
name='ssd_kpt14',
|
1387 |
-
id=203,
|
1388 |
-
color=[64, 64, 128],
|
1389 |
-
type='',
|
1390 |
-
swap='ssd_kpt22'),
|
1391 |
-
204:
|
1392 |
-
dict(
|
1393 |
-
name='ssd_kpt15',
|
1394 |
-
id=204,
|
1395 |
-
color=[64, 64, 128],
|
1396 |
-
type='',
|
1397 |
-
swap='ssd_kpt21'),
|
1398 |
-
205:
|
1399 |
-
dict(
|
1400 |
-
name='ssd_kpt16',
|
1401 |
-
id=205,
|
1402 |
-
color=[64, 64, 128],
|
1403 |
-
type='',
|
1404 |
-
swap='ssd_kpt20'),
|
1405 |
-
206:
|
1406 |
-
dict(
|
1407 |
-
name='ssd_kpt17',
|
1408 |
-
id=206,
|
1409 |
-
color=[64, 64, 128],
|
1410 |
-
type='',
|
1411 |
-
swap='ssd_kpt19'),
|
1412 |
-
207:
|
1413 |
-
dict(name='ssd_kpt18', id=207, color=[64, 64, 128], type='', swap=''),
|
1414 |
-
208:
|
1415 |
-
dict(
|
1416 |
-
name='ssd_kpt19',
|
1417 |
-
id=208,
|
1418 |
-
color=[64, 64, 128],
|
1419 |
-
type='',
|
1420 |
-
swap='ssd_kpt17'),
|
1421 |
-
209:
|
1422 |
-
dict(
|
1423 |
-
name='ssd_kpt20',
|
1424 |
-
id=209,
|
1425 |
-
color=[64, 64, 128],
|
1426 |
-
type='',
|
1427 |
-
swap='ssd_kpt16'),
|
1428 |
-
210:
|
1429 |
-
dict(
|
1430 |
-
name='ssd_kpt21',
|
1431 |
-
id=210,
|
1432 |
-
color=[64, 64, 128],
|
1433 |
-
type='',
|
1434 |
-
swap='ssd_kpt15'),
|
1435 |
-
211:
|
1436 |
-
dict(
|
1437 |
-
name='ssd_kpt22',
|
1438 |
-
id=211,
|
1439 |
-
color=[64, 64, 128],
|
1440 |
-
type='',
|
1441 |
-
swap='ssd_kpt14'),
|
1442 |
-
212:
|
1443 |
-
dict(
|
1444 |
-
name='ssd_kpt23',
|
1445 |
-
id=212,
|
1446 |
-
color=[64, 64, 128],
|
1447 |
-
type='',
|
1448 |
-
swap='ssd_kpt13'),
|
1449 |
-
213:
|
1450 |
-
dict(
|
1451 |
-
name='ssd_kpt24',
|
1452 |
-
id=213,
|
1453 |
-
color=[64, 64, 128],
|
1454 |
-
type='',
|
1455 |
-
swap='ssd_kpt12'),
|
1456 |
-
214:
|
1457 |
-
dict(
|
1458 |
-
name='ssd_kpt25',
|
1459 |
-
id=214,
|
1460 |
-
color=[64, 64, 128],
|
1461 |
-
type='',
|
1462 |
-
swap='ssd_kpt11'),
|
1463 |
-
215:
|
1464 |
-
dict(
|
1465 |
-
name='ssd_kpt26',
|
1466 |
-
id=215,
|
1467 |
-
color=[64, 64, 128],
|
1468 |
-
type='',
|
1469 |
-
swap='ssd_kpt10'),
|
1470 |
-
216:
|
1471 |
-
dict(
|
1472 |
-
name='ssd_kpt27',
|
1473 |
-
id=216,
|
1474 |
-
color=[64, 64, 128],
|
1475 |
-
type='',
|
1476 |
-
swap='ssd_kpt9'),
|
1477 |
-
217:
|
1478 |
-
dict(
|
1479 |
-
name='ssd_kpt28',
|
1480 |
-
id=217,
|
1481 |
-
color=[64, 64, 128],
|
1482 |
-
type='',
|
1483 |
-
swap='ssd_kpt8'),
|
1484 |
-
218:
|
1485 |
-
dict(
|
1486 |
-
name='ssd_kpt29',
|
1487 |
-
id=218,
|
1488 |
-
color=[64, 64, 128],
|
1489 |
-
type='',
|
1490 |
-
swap='ssd_kpt7'),
|
1491 |
-
219:
|
1492 |
-
dict(name='lsd_kpt1', id=219, color=[128, 64, 0], type='', swap=''),
|
1493 |
-
220:
|
1494 |
-
dict(
|
1495 |
-
name='lsd_kpt2',
|
1496 |
-
id=220,
|
1497 |
-
color=[128, 64, 0],
|
1498 |
-
type='',
|
1499 |
-
swap='lsd_kpt6'),
|
1500 |
-
221:
|
1501 |
-
dict(
|
1502 |
-
name='lsd_kpt3',
|
1503 |
-
id=221,
|
1504 |
-
color=[128, 64, 0],
|
1505 |
-
type='',
|
1506 |
-
swap='lsd_kpt5'),
|
1507 |
-
222:
|
1508 |
-
dict(name='lsd_kpt4', id=222, color=[128, 64, 0], type='', swap=''),
|
1509 |
-
223:
|
1510 |
-
dict(
|
1511 |
-
name='lsd_kpt5',
|
1512 |
-
id=223,
|
1513 |
-
color=[128, 64, 0],
|
1514 |
-
type='',
|
1515 |
-
swap='lsd_kpt3'),
|
1516 |
-
224:
|
1517 |
-
dict(
|
1518 |
-
name='lsd_kpt6',
|
1519 |
-
id=224,
|
1520 |
-
color=[128, 64, 0],
|
1521 |
-
type='',
|
1522 |
-
swap='lsd_kpt2'),
|
1523 |
-
225:
|
1524 |
-
dict(
|
1525 |
-
name='lsd_kpt7',
|
1526 |
-
id=225,
|
1527 |
-
color=[128, 64, 0],
|
1528 |
-
type='',
|
1529 |
-
swap='lsd_kpt37'),
|
1530 |
-
226:
|
1531 |
-
dict(
|
1532 |
-
name='lsd_kpt8',
|
1533 |
-
id=226,
|
1534 |
-
color=[128, 64, 0],
|
1535 |
-
type='',
|
1536 |
-
swap='lsd_kpt36'),
|
1537 |
-
227:
|
1538 |
-
dict(
|
1539 |
-
name='lsd_kpt9',
|
1540 |
-
id=227,
|
1541 |
-
color=[128, 64, 0],
|
1542 |
-
type='',
|
1543 |
-
swap='lsd_kpt35'),
|
1544 |
-
228:
|
1545 |
-
dict(
|
1546 |
-
name='lsd_kpt10',
|
1547 |
-
id=228,
|
1548 |
-
color=[128, 64, 0],
|
1549 |
-
type='',
|
1550 |
-
swap='lsd_kpt34'),
|
1551 |
-
229:
|
1552 |
-
dict(
|
1553 |
-
name='lsd_kpt11',
|
1554 |
-
id=229,
|
1555 |
-
color=[128, 64, 0],
|
1556 |
-
type='',
|
1557 |
-
swap='lsd_kpt33'),
|
1558 |
-
230:
|
1559 |
-
dict(
|
1560 |
-
name='lsd_kpt12',
|
1561 |
-
id=230,
|
1562 |
-
color=[128, 64, 0],
|
1563 |
-
type='',
|
1564 |
-
swap='lsd_kpt32'),
|
1565 |
-
231:
|
1566 |
-
dict(
|
1567 |
-
name='lsd_kpt13',
|
1568 |
-
id=231,
|
1569 |
-
color=[128, 64, 0],
|
1570 |
-
type='',
|
1571 |
-
swap='lsd_kpt31'),
|
1572 |
-
232:
|
1573 |
-
dict(
|
1574 |
-
name='lsd_kpt14',
|
1575 |
-
id=232,
|
1576 |
-
color=[128, 64, 0],
|
1577 |
-
type='',
|
1578 |
-
swap='lsd_kpt30'),
|
1579 |
-
233:
|
1580 |
-
dict(
|
1581 |
-
name='lsd_kpt15',
|
1582 |
-
id=233,
|
1583 |
-
color=[128, 64, 0],
|
1584 |
-
type='',
|
1585 |
-
swap='lsd_kpt29'),
|
1586 |
-
234:
|
1587 |
-
dict(
|
1588 |
-
name='lsd_kpt16',
|
1589 |
-
id=234,
|
1590 |
-
color=[128, 64, 0],
|
1591 |
-
type='',
|
1592 |
-
swap='lsd_kpt28'),
|
1593 |
-
235:
|
1594 |
-
dict(
|
1595 |
-
name='lsd_kpt17',
|
1596 |
-
id=235,
|
1597 |
-
color=[128, 64, 0],
|
1598 |
-
type='',
|
1599 |
-
swap='lsd_kpt27'),
|
1600 |
-
236:
|
1601 |
-
dict(
|
1602 |
-
name='lsd_kpt18',
|
1603 |
-
id=236,
|
1604 |
-
color=[128, 64, 0],
|
1605 |
-
type='',
|
1606 |
-
swap='lsd_kpt26'),
|
1607 |
-
237:
|
1608 |
-
dict(
|
1609 |
-
name='lsd_kpt19',
|
1610 |
-
id=237,
|
1611 |
-
color=[128, 64, 0],
|
1612 |
-
type='',
|
1613 |
-
swap='lsd_kpt25'),
|
1614 |
-
238:
|
1615 |
-
dict(
|
1616 |
-
name='lsd_kpt20',
|
1617 |
-
id=238,
|
1618 |
-
color=[128, 64, 0],
|
1619 |
-
type='',
|
1620 |
-
swap='lsd_kpt24'),
|
1621 |
-
239:
|
1622 |
-
dict(
|
1623 |
-
name='lsd_kpt21',
|
1624 |
-
id=239,
|
1625 |
-
color=[128, 64, 0],
|
1626 |
-
type='',
|
1627 |
-
swap='lsd_kpt23'),
|
1628 |
-
240:
|
1629 |
-
dict(name='lsd_kpt22', id=240, color=[128, 64, 0], type='', swap=''),
|
1630 |
-
241:
|
1631 |
-
dict(
|
1632 |
-
name='lsd_kpt23',
|
1633 |
-
id=241,
|
1634 |
-
color=[128, 64, 0],
|
1635 |
-
type='',
|
1636 |
-
swap='lsd_kpt21'),
|
1637 |
-
242:
|
1638 |
-
dict(
|
1639 |
-
name='lsd_kpt24',
|
1640 |
-
id=242,
|
1641 |
-
color=[128, 64, 0],
|
1642 |
-
type='',
|
1643 |
-
swap='lsd_kpt20'),
|
1644 |
-
243:
|
1645 |
-
dict(
|
1646 |
-
name='lsd_kpt25',
|
1647 |
-
id=243,
|
1648 |
-
color=[128, 64, 0],
|
1649 |
-
type='',
|
1650 |
-
swap='lsd_kpt19'),
|
1651 |
-
244:
|
1652 |
-
dict(
|
1653 |
-
name='lsd_kpt26',
|
1654 |
-
id=244,
|
1655 |
-
color=[128, 64, 0],
|
1656 |
-
type='',
|
1657 |
-
swap='lsd_kpt18'),
|
1658 |
-
245:
|
1659 |
-
dict(
|
1660 |
-
name='lsd_kpt27',
|
1661 |
-
id=245,
|
1662 |
-
color=[128, 64, 0],
|
1663 |
-
type='',
|
1664 |
-
swap='lsd_kpt17'),
|
1665 |
-
246:
|
1666 |
-
dict(
|
1667 |
-
name='lsd_kpt28',
|
1668 |
-
id=246,
|
1669 |
-
color=[128, 64, 0],
|
1670 |
-
type='',
|
1671 |
-
swap='lsd_kpt16'),
|
1672 |
-
247:
|
1673 |
-
dict(
|
1674 |
-
name='lsd_kpt29',
|
1675 |
-
id=247,
|
1676 |
-
color=[128, 64, 0],
|
1677 |
-
type='',
|
1678 |
-
swap='lsd_kpt15'),
|
1679 |
-
248:
|
1680 |
-
dict(
|
1681 |
-
name='lsd_kpt30',
|
1682 |
-
id=248,
|
1683 |
-
color=[128, 64, 0],
|
1684 |
-
type='',
|
1685 |
-
swap='lsd_kpt14'),
|
1686 |
-
249:
|
1687 |
-
dict(
|
1688 |
-
name='lsd_kpt31',
|
1689 |
-
id=249,
|
1690 |
-
color=[128, 64, 0],
|
1691 |
-
type='',
|
1692 |
-
swap='lsd_kpt13'),
|
1693 |
-
250:
|
1694 |
-
dict(
|
1695 |
-
name='lsd_kpt32',
|
1696 |
-
id=250,
|
1697 |
-
color=[128, 64, 0],
|
1698 |
-
type='',
|
1699 |
-
swap='lsd_kpt12'),
|
1700 |
-
251:
|
1701 |
-
dict(
|
1702 |
-
name='lsd_kpt33',
|
1703 |
-
id=251,
|
1704 |
-
color=[128, 64, 0],
|
1705 |
-
type='',
|
1706 |
-
swap='lsd_kpt11'),
|
1707 |
-
252:
|
1708 |
-
dict(
|
1709 |
-
name='lsd_kpt34',
|
1710 |
-
id=252,
|
1711 |
-
color=[128, 64, 0],
|
1712 |
-
type='',
|
1713 |
-
swap='lsd_kpt10'),
|
1714 |
-
253:
|
1715 |
-
dict(
|
1716 |
-
name='lsd_kpt35',
|
1717 |
-
id=253,
|
1718 |
-
color=[128, 64, 0],
|
1719 |
-
type='',
|
1720 |
-
swap='lsd_kpt9'),
|
1721 |
-
254:
|
1722 |
-
dict(
|
1723 |
-
name='lsd_kpt36',
|
1724 |
-
id=254,
|
1725 |
-
color=[128, 64, 0],
|
1726 |
-
type='',
|
1727 |
-
swap='lsd_kpt8'),
|
1728 |
-
255:
|
1729 |
-
dict(
|
1730 |
-
name='lsd_kpt37',
|
1731 |
-
id=255,
|
1732 |
-
color=[128, 64, 0],
|
1733 |
-
type='',
|
1734 |
-
swap='lsd_kpt7'),
|
1735 |
-
256:
|
1736 |
-
dict(name='vd_kpt1', id=256, color=[128, 64, 255], type='', swap=''),
|
1737 |
-
257:
|
1738 |
-
dict(
|
1739 |
-
name='vd_kpt2',
|
1740 |
-
id=257,
|
1741 |
-
color=[128, 64, 255],
|
1742 |
-
type='',
|
1743 |
-
swap='vd_kpt6'),
|
1744 |
-
258:
|
1745 |
-
dict(
|
1746 |
-
name='vd_kpt3',
|
1747 |
-
id=258,
|
1748 |
-
color=[128, 64, 255],
|
1749 |
-
type='',
|
1750 |
-
swap='vd_kpt5'),
|
1751 |
-
259:
|
1752 |
-
dict(name='vd_kpt4', id=259, color=[128, 64, 255], type='', swap=''),
|
1753 |
-
260:
|
1754 |
-
dict(
|
1755 |
-
name='vd_kpt5',
|
1756 |
-
id=260,
|
1757 |
-
color=[128, 64, 255],
|
1758 |
-
type='',
|
1759 |
-
swap='vd_kpt3'),
|
1760 |
-
261:
|
1761 |
-
dict(
|
1762 |
-
name='vd_kpt6',
|
1763 |
-
id=261,
|
1764 |
-
color=[128, 64, 255],
|
1765 |
-
type='',
|
1766 |
-
swap='vd_kpt2'),
|
1767 |
-
262:
|
1768 |
-
dict(
|
1769 |
-
name='vd_kpt7',
|
1770 |
-
id=262,
|
1771 |
-
color=[128, 64, 255],
|
1772 |
-
type='',
|
1773 |
-
swap='vd_kpt19'),
|
1774 |
-
263:
|
1775 |
-
dict(
|
1776 |
-
name='vd_kpt8',
|
1777 |
-
id=263,
|
1778 |
-
color=[128, 64, 255],
|
1779 |
-
type='',
|
1780 |
-
swap='vd_kpt18'),
|
1781 |
-
264:
|
1782 |
-
dict(
|
1783 |
-
name='vd_kpt9',
|
1784 |
-
id=264,
|
1785 |
-
color=[128, 64, 255],
|
1786 |
-
type='',
|
1787 |
-
swap='vd_kpt17'),
|
1788 |
-
265:
|
1789 |
-
dict(
|
1790 |
-
name='vd_kpt10',
|
1791 |
-
id=265,
|
1792 |
-
color=[128, 64, 255],
|
1793 |
-
type='',
|
1794 |
-
swap='vd_kpt16'),
|
1795 |
-
266:
|
1796 |
-
dict(
|
1797 |
-
name='vd_kpt11',
|
1798 |
-
id=266,
|
1799 |
-
color=[128, 64, 255],
|
1800 |
-
type='',
|
1801 |
-
swap='vd_kpt15'),
|
1802 |
-
267:
|
1803 |
-
dict(
|
1804 |
-
name='vd_kpt12',
|
1805 |
-
id=267,
|
1806 |
-
color=[128, 64, 255],
|
1807 |
-
type='',
|
1808 |
-
swap='vd_kpt14'),
|
1809 |
-
268:
|
1810 |
-
dict(name='vd_kpt13', id=268, color=[128, 64, 255], type='', swap=''),
|
1811 |
-
269:
|
1812 |
-
dict(
|
1813 |
-
name='vd_kpt14',
|
1814 |
-
id=269,
|
1815 |
-
color=[128, 64, 255],
|
1816 |
-
type='',
|
1817 |
-
swap='vd_kpt12'),
|
1818 |
-
270:
|
1819 |
-
dict(
|
1820 |
-
name='vd_kpt15',
|
1821 |
-
id=270,
|
1822 |
-
color=[128, 64, 255],
|
1823 |
-
type='',
|
1824 |
-
swap='vd_kpt11'),
|
1825 |
-
271:
|
1826 |
-
dict(
|
1827 |
-
name='vd_kpt16',
|
1828 |
-
id=271,
|
1829 |
-
color=[128, 64, 255],
|
1830 |
-
type='',
|
1831 |
-
swap='vd_kpt10'),
|
1832 |
-
272:
|
1833 |
-
dict(
|
1834 |
-
name='vd_kpt17',
|
1835 |
-
id=272,
|
1836 |
-
color=[128, 64, 255],
|
1837 |
-
type='',
|
1838 |
-
swap='vd_kpt9'),
|
1839 |
-
273:
|
1840 |
-
dict(
|
1841 |
-
name='vd_kpt18',
|
1842 |
-
id=273,
|
1843 |
-
color=[128, 64, 255],
|
1844 |
-
type='',
|
1845 |
-
swap='vd_kpt8'),
|
1846 |
-
274:
|
1847 |
-
dict(
|
1848 |
-
name='vd_kpt19',
|
1849 |
-
id=274,
|
1850 |
-
color=[128, 64, 255],
|
1851 |
-
type='',
|
1852 |
-
swap='vd_kpt7'),
|
1853 |
-
275:
|
1854 |
-
dict(name='sd_kpt1', id=275, color=[128, 64, 0], type='', swap=''),
|
1855 |
-
276:
|
1856 |
-
dict(
|
1857 |
-
name='sd_kpt2',
|
1858 |
-
id=276,
|
1859 |
-
color=[128, 64, 0],
|
1860 |
-
type='',
|
1861 |
-
swap='sd_kpt6'),
|
1862 |
-
277:
|
1863 |
-
dict(
|
1864 |
-
name='sd_kpt3',
|
1865 |
-
id=277,
|
1866 |
-
color=[128, 64, 0],
|
1867 |
-
type='',
|
1868 |
-
swap='sd_kpt5'),
|
1869 |
-
278:
|
1870 |
-
dict(name='sd_kpt4', id=278, color=[128, 64, 0], type='', swap=''),
|
1871 |
-
279:
|
1872 |
-
dict(
|
1873 |
-
name='sd_kpt5',
|
1874 |
-
id=279,
|
1875 |
-
color=[128, 64, 0],
|
1876 |
-
type='',
|
1877 |
-
swap='sd_kpt3'),
|
1878 |
-
280:
|
1879 |
-
dict(
|
1880 |
-
name='sd_kpt6',
|
1881 |
-
id=280,
|
1882 |
-
color=[128, 64, 0],
|
1883 |
-
type='',
|
1884 |
-
swap='sd_kpt2'),
|
1885 |
-
281:
|
1886 |
-
dict(
|
1887 |
-
name='sd_kpt7',
|
1888 |
-
id=281,
|
1889 |
-
color=[128, 64, 0],
|
1890 |
-
type='',
|
1891 |
-
swap='sd_kpt19'),
|
1892 |
-
282:
|
1893 |
-
dict(
|
1894 |
-
name='sd_kpt8',
|
1895 |
-
id=282,
|
1896 |
-
color=[128, 64, 0],
|
1897 |
-
type='',
|
1898 |
-
swap='sd_kpt18'),
|
1899 |
-
283:
|
1900 |
-
dict(
|
1901 |
-
name='sd_kpt9',
|
1902 |
-
id=283,
|
1903 |
-
color=[128, 64, 0],
|
1904 |
-
type='',
|
1905 |
-
swap='sd_kpt17'),
|
1906 |
-
284:
|
1907 |
-
dict(
|
1908 |
-
name='sd_kpt10',
|
1909 |
-
id=284,
|
1910 |
-
color=[128, 64, 0],
|
1911 |
-
type='',
|
1912 |
-
swap='sd_kpt16'),
|
1913 |
-
285:
|
1914 |
-
dict(
|
1915 |
-
name='sd_kpt11',
|
1916 |
-
id=285,
|
1917 |
-
color=[128, 64, 0],
|
1918 |
-
type='',
|
1919 |
-
swap='sd_kpt15'),
|
1920 |
-
286:
|
1921 |
-
dict(
|
1922 |
-
name='sd_kpt12',
|
1923 |
-
id=286,
|
1924 |
-
color=[128, 64, 0],
|
1925 |
-
type='',
|
1926 |
-
swap='sd_kpt14'),
|
1927 |
-
287:
|
1928 |
-
dict(name='sd_kpt13', id=287, color=[128, 64, 0], type='', swap=''),
|
1929 |
-
288:
|
1930 |
-
dict(
|
1931 |
-
name='sd_kpt14',
|
1932 |
-
id=288,
|
1933 |
-
color=[128, 64, 0],
|
1934 |
-
type='',
|
1935 |
-
swap='sd_kpt12'),
|
1936 |
-
289:
|
1937 |
-
dict(
|
1938 |
-
name='sd_kpt15',
|
1939 |
-
id=289,
|
1940 |
-
color=[128, 64, 0],
|
1941 |
-
type='',
|
1942 |
-
swap='sd_kpt11'),
|
1943 |
-
290:
|
1944 |
-
dict(
|
1945 |
-
name='sd_kpt16',
|
1946 |
-
id=290,
|
1947 |
-
color=[128, 64, 0],
|
1948 |
-
type='',
|
1949 |
-
swap='sd_kpt10'),
|
1950 |
-
291:
|
1951 |
-
dict(
|
1952 |
-
name='sd_kpt17',
|
1953 |
-
id=291,
|
1954 |
-
color=[128, 64, 0],
|
1955 |
-
type='',
|
1956 |
-
swap='sd_kpt9'),
|
1957 |
-
292:
|
1958 |
-
dict(
|
1959 |
-
name='sd_kpt18',
|
1960 |
-
id=292,
|
1961 |
-
color=[128, 64, 0],
|
1962 |
-
type='',
|
1963 |
-
swap='sd_kpt8'),
|
1964 |
-
293:
|
1965 |
-
dict(
|
1966 |
-
name='sd_kpt19',
|
1967 |
-
id=293,
|
1968 |
-
color=[128, 64, 0],
|
1969 |
-
type='',
|
1970 |
-
swap='sd_kpt7')
|
1971 |
-
}),
|
1972 |
-
skeleton_info=dict({
|
1973 |
-
0:
|
1974 |
-
dict(link=('sss_kpt1', 'sss_kpt2'), id=0, color=[255, 128, 0]),
|
1975 |
-
1:
|
1976 |
-
dict(link=('sss_kpt2', 'sss_kpt7'), id=1, color=[255, 128, 0]),
|
1977 |
-
2:
|
1978 |
-
dict(link=('sss_kpt7', 'sss_kpt8'), id=2, color=[255, 128, 0]),
|
1979 |
-
3:
|
1980 |
-
dict(link=('sss_kpt8', 'sss_kpt9'), id=3, color=[255, 128, 0]),
|
1981 |
-
4:
|
1982 |
-
dict(link=('sss_kpt9', 'sss_kpt10'), id=4, color=[255, 128, 0]),
|
1983 |
-
5:
|
1984 |
-
dict(link=('sss_kpt10', 'sss_kpt11'), id=5, color=[255, 128, 0]),
|
1985 |
-
6:
|
1986 |
-
dict(link=('sss_kpt11', 'sss_kpt12'), id=6, color=[255, 128, 0]),
|
1987 |
-
7:
|
1988 |
-
dict(link=('sss_kpt12', 'sss_kpt13'), id=7, color=[255, 128, 0]),
|
1989 |
-
8:
|
1990 |
-
dict(link=('sss_kpt13', 'sss_kpt14'), id=8, color=[255, 128, 0]),
|
1991 |
-
9:
|
1992 |
-
dict(link=('sss_kpt14', 'sss_kpt15'), id=9, color=[255, 128, 0]),
|
1993 |
-
10:
|
1994 |
-
dict(link=('sss_kpt15', 'sss_kpt16'), id=10, color=[255, 128, 0]),
|
1995 |
-
11:
|
1996 |
-
dict(link=('sss_kpt16', 'sss_kpt17'), id=11, color=[255, 128, 0]),
|
1997 |
-
12:
|
1998 |
-
dict(link=('sss_kpt17', 'sss_kpt18'), id=12, color=[255, 128, 0]),
|
1999 |
-
13:
|
2000 |
-
dict(link=('sss_kpt18', 'sss_kpt19'), id=13, color=[255, 128, 0]),
|
2001 |
-
14:
|
2002 |
-
dict(link=('sss_kpt19', 'sss_kpt20'), id=14, color=[255, 128, 0]),
|
2003 |
-
15:
|
2004 |
-
dict(link=('sss_kpt20', 'sss_kpt21'), id=15, color=[255, 128, 0]),
|
2005 |
-
16:
|
2006 |
-
dict(link=('sss_kpt21', 'sss_kpt22'), id=16, color=[255, 128, 0]),
|
2007 |
-
17:
|
2008 |
-
dict(link=('sss_kpt22', 'sss_kpt23'), id=17, color=[255, 128, 0]),
|
2009 |
-
18:
|
2010 |
-
dict(link=('sss_kpt23', 'sss_kpt24'), id=18, color=[255, 128, 0]),
|
2011 |
-
19:
|
2012 |
-
dict(link=('sss_kpt24', 'sss_kpt25'), id=19, color=[255, 128, 0]),
|
2013 |
-
20:
|
2014 |
-
dict(link=('sss_kpt25', 'sss_kpt6'), id=20, color=[255, 128, 0]),
|
2015 |
-
21:
|
2016 |
-
dict(link=('sss_kpt6', 'sss_kpt1'), id=21, color=[255, 128, 0]),
|
2017 |
-
22:
|
2018 |
-
dict(link=('sss_kpt2', 'sss_kpt3'), id=22, color=[255, 128, 0]),
|
2019 |
-
23:
|
2020 |
-
dict(link=('sss_kpt3', 'sss_kpt4'), id=23, color=[255, 128, 0]),
|
2021 |
-
24:
|
2022 |
-
dict(link=('sss_kpt4', 'sss_kpt5'), id=24, color=[255, 128, 0]),
|
2023 |
-
25:
|
2024 |
-
dict(link=('sss_kpt5', 'sss_kpt6'), id=25, color=[255, 128, 0]),
|
2025 |
-
26:
|
2026 |
-
dict(link=('lss_kpt1', 'lss_kpt2'), id=26, color=[255, 0, 128]),
|
2027 |
-
27:
|
2028 |
-
dict(link=('lss_kpt2', 'lss_kpt7'), id=27, color=[255, 0, 128]),
|
2029 |
-
28:
|
2030 |
-
dict(link=('lss_kpt7', 'lss_kpt8'), id=28, color=[255, 0, 128]),
|
2031 |
-
29:
|
2032 |
-
dict(link=('lss_kpt8', 'lss_kpt9'), id=29, color=[255, 0, 128]),
|
2033 |
-
30:
|
2034 |
-
dict(link=('lss_kpt9', 'lss_kpt10'), id=30, color=[255, 0, 128]),
|
2035 |
-
31:
|
2036 |
-
dict(link=('lss_kpt10', 'lss_kpt11'), id=31, color=[255, 0, 128]),
|
2037 |
-
32:
|
2038 |
-
dict(link=('lss_kpt11', 'lss_kpt12'), id=32, color=[255, 0, 128]),
|
2039 |
-
33:
|
2040 |
-
dict(link=('lss_kpt12', 'lss_kpt13'), id=33, color=[255, 0, 128]),
|
2041 |
-
34:
|
2042 |
-
dict(link=('lss_kpt13', 'lss_kpt14'), id=34, color=[255, 0, 128]),
|
2043 |
-
35:
|
2044 |
-
dict(link=('lss_kpt14', 'lss_kpt15'), id=35, color=[255, 0, 128]),
|
2045 |
-
36:
|
2046 |
-
dict(link=('lss_kpt15', 'lss_kpt16'), id=36, color=[255, 0, 128]),
|
2047 |
-
37:
|
2048 |
-
dict(link=('lss_kpt16', 'lss_kpt17'), id=37, color=[255, 0, 128]),
|
2049 |
-
38:
|
2050 |
-
dict(link=('lss_kpt17', 'lss_kpt18'), id=38, color=[255, 0, 128]),
|
2051 |
-
39:
|
2052 |
-
dict(link=('lss_kpt18', 'lss_kpt19'), id=39, color=[255, 0, 128]),
|
2053 |
-
40:
|
2054 |
-
dict(link=('lss_kpt19', 'lss_kpt20'), id=40, color=[255, 0, 128]),
|
2055 |
-
41:
|
2056 |
-
dict(link=('lss_kpt20', 'lss_kpt21'), id=41, color=[255, 0, 128]),
|
2057 |
-
42:
|
2058 |
-
dict(link=('lss_kpt21', 'lss_kpt22'), id=42, color=[255, 0, 128]),
|
2059 |
-
43:
|
2060 |
-
dict(link=('lss_kpt22', 'lss_kpt23'), id=43, color=[255, 0, 128]),
|
2061 |
-
44:
|
2062 |
-
dict(link=('lss_kpt23', 'lss_kpt24'), id=44, color=[255, 0, 128]),
|
2063 |
-
45:
|
2064 |
-
dict(link=('lss_kpt24', 'lss_kpt25'), id=45, color=[255, 0, 128]),
|
2065 |
-
46:
|
2066 |
-
dict(link=('lss_kpt25', 'lss_kpt26'), id=46, color=[255, 0, 128]),
|
2067 |
-
47:
|
2068 |
-
dict(link=('lss_kpt26', 'lss_kpt27'), id=47, color=[255, 0, 128]),
|
2069 |
-
48:
|
2070 |
-
dict(link=('lss_kpt27', 'lss_kpt28'), id=48, color=[255, 0, 128]),
|
2071 |
-
49:
|
2072 |
-
dict(link=('lss_kpt28', 'lss_kpt29'), id=49, color=[255, 0, 128]),
|
2073 |
-
50:
|
2074 |
-
dict(link=('lss_kpt29', 'lss_kpt30'), id=50, color=[255, 0, 128]),
|
2075 |
-
51:
|
2076 |
-
dict(link=('lss_kpt30', 'lss_kpt31'), id=51, color=[255, 0, 128]),
|
2077 |
-
52:
|
2078 |
-
dict(link=('lss_kpt31', 'lss_kpt32'), id=52, color=[255, 0, 128]),
|
2079 |
-
53:
|
2080 |
-
dict(link=('lss_kpt32', 'lss_kpt33'), id=53, color=[255, 0, 128]),
|
2081 |
-
54:
|
2082 |
-
dict(link=('lss_kpt33', 'lss_kpt6'), id=54, color=[255, 0, 128]),
|
2083 |
-
55:
|
2084 |
-
dict(link=('lss_kpt6', 'lss_kpt5'), id=55, color=[255, 0, 128]),
|
2085 |
-
56:
|
2086 |
-
dict(link=('lss_kpt5', 'lss_kpt4'), id=56, color=[255, 0, 128]),
|
2087 |
-
57:
|
2088 |
-
dict(link=('lss_kpt4', 'lss_kpt3'), id=57, color=[255, 0, 128]),
|
2089 |
-
58:
|
2090 |
-
dict(link=('lss_kpt3', 'lss_kpt2'), id=58, color=[255, 0, 128]),
|
2091 |
-
59:
|
2092 |
-
dict(link=('lss_kpt6', 'lss_kpt1'), id=59, color=[255, 0, 128]),
|
2093 |
-
60:
|
2094 |
-
dict(link=('sso_kpt1', 'sso_kpt4'), id=60, color=[128, 0, 255]),
|
2095 |
-
61:
|
2096 |
-
dict(link=('sso_kpt4', 'sso_kpt7'), id=61, color=[128, 0, 255]),
|
2097 |
-
62:
|
2098 |
-
dict(link=('sso_kpt7', 'sso_kpt8'), id=62, color=[128, 0, 255]),
|
2099 |
-
63:
|
2100 |
-
dict(link=('sso_kpt8', 'sso_kpt9'), id=63, color=[128, 0, 255]),
|
2101 |
-
64:
|
2102 |
-
dict(link=('sso_kpt9', 'sso_kpt10'), id=64, color=[128, 0, 255]),
|
2103 |
-
65:
|
2104 |
-
dict(link=('sso_kpt10', 'sso_kpt11'), id=65, color=[128, 0, 255]),
|
2105 |
-
66:
|
2106 |
-
dict(link=('sso_kpt11', 'sso_kpt12'), id=66, color=[128, 0, 255]),
|
2107 |
-
67:
|
2108 |
-
dict(link=('sso_kpt12', 'sso_kpt13'), id=67, color=[128, 0, 255]),
|
2109 |
-
68:
|
2110 |
-
dict(link=('sso_kpt13', 'sso_kpt14'), id=68, color=[128, 0, 255]),
|
2111 |
-
69:
|
2112 |
-
dict(link=('sso_kpt14', 'sso_kpt15'), id=69, color=[128, 0, 255]),
|
2113 |
-
70:
|
2114 |
-
dict(link=('sso_kpt15', 'sso_kpt16'), id=70, color=[128, 0, 255]),
|
2115 |
-
71:
|
2116 |
-
dict(link=('sso_kpt16', 'sso_kpt31'), id=71, color=[128, 0, 255]),
|
2117 |
-
72:
|
2118 |
-
dict(link=('sso_kpt31', 'sso_kpt30'), id=72, color=[128, 0, 255]),
|
2119 |
-
73:
|
2120 |
-
dict(link=('sso_kpt30', 'sso_kpt2'), id=73, color=[128, 0, 255]),
|
2121 |
-
74:
|
2122 |
-
dict(link=('sso_kpt2', 'sso_kpt3'), id=74, color=[128, 0, 255]),
|
2123 |
-
75:
|
2124 |
-
dict(link=('sso_kpt3', 'sso_kpt4'), id=75, color=[128, 0, 255]),
|
2125 |
-
76:
|
2126 |
-
dict(link=('sso_kpt1', 'sso_kpt6'), id=76, color=[128, 0, 255]),
|
2127 |
-
77:
|
2128 |
-
dict(link=('sso_kpt6', 'sso_kpt25'), id=77, color=[128, 0, 255]),
|
2129 |
-
78:
|
2130 |
-
dict(link=('sso_kpt25', 'sso_kpt24'), id=78, color=[128, 0, 255]),
|
2131 |
-
79:
|
2132 |
-
dict(link=('sso_kpt24', 'sso_kpt23'), id=79, color=[128, 0, 255]),
|
2133 |
-
80:
|
2134 |
-
dict(link=('sso_kpt23', 'sso_kpt22'), id=80, color=[128, 0, 255]),
|
2135 |
-
81:
|
2136 |
-
dict(link=('sso_kpt22', 'sso_kpt21'), id=81, color=[128, 0, 255]),
|
2137 |
-
82:
|
2138 |
-
dict(link=('sso_kpt21', 'sso_kpt20'), id=82, color=[128, 0, 255]),
|
2139 |
-
83:
|
2140 |
-
dict(link=('sso_kpt20', 'sso_kpt19'), id=83, color=[128, 0, 255]),
|
2141 |
-
84:
|
2142 |
-
dict(link=('sso_kpt19', 'sso_kpt18'), id=84, color=[128, 0, 255]),
|
2143 |
-
85:
|
2144 |
-
dict(link=('sso_kpt18', 'sso_kpt17'), id=85, color=[128, 0, 255]),
|
2145 |
-
86:
|
2146 |
-
dict(link=('sso_kpt17', 'sso_kpt29'), id=86, color=[128, 0, 255]),
|
2147 |
-
87:
|
2148 |
-
dict(link=('sso_kpt29', 'sso_kpt28'), id=87, color=[128, 0, 255]),
|
2149 |
-
88:
|
2150 |
-
dict(link=('sso_kpt28', 'sso_kpt27'), id=88, color=[128, 0, 255]),
|
2151 |
-
89:
|
2152 |
-
dict(link=('sso_kpt27', 'sso_kpt26'), id=89, color=[128, 0, 255]),
|
2153 |
-
90:
|
2154 |
-
dict(link=('sso_kpt26', 'sso_kpt5'), id=90, color=[128, 0, 255]),
|
2155 |
-
91:
|
2156 |
-
dict(link=('sso_kpt5', 'sso_kpt6'), id=91, color=[128, 0, 255]),
|
2157 |
-
92:
|
2158 |
-
dict(link=('lso_kpt1', 'lso_kpt2'), id=92, color=[0, 128, 255]),
|
2159 |
-
93:
|
2160 |
-
dict(link=('lso_kpt2', 'lso_kpt7'), id=93, color=[0, 128, 255]),
|
2161 |
-
94:
|
2162 |
-
dict(link=('lso_kpt7', 'lso_kpt8'), id=94, color=[0, 128, 255]),
|
2163 |
-
95:
|
2164 |
-
dict(link=('lso_kpt8', 'lso_kpt9'), id=95, color=[0, 128, 255]),
|
2165 |
-
96:
|
2166 |
-
dict(link=('lso_kpt9', 'lso_kpt10'), id=96, color=[0, 128, 255]),
|
2167 |
-
97:
|
2168 |
-
dict(link=('lso_kpt10', 'lso_kpt11'), id=97, color=[0, 128, 255]),
|
2169 |
-
98:
|
2170 |
-
dict(link=('lso_kpt11', 'lso_kpt12'), id=98, color=[0, 128, 255]),
|
2171 |
-
99:
|
2172 |
-
dict(link=('lso_kpt12', 'lso_kpt13'), id=99, color=[0, 128, 255]),
|
2173 |
-
100:
|
2174 |
-
dict(link=('lso_kpt13', 'lso_kpt14'), id=100, color=[0, 128, 255]),
|
2175 |
-
101:
|
2176 |
-
dict(link=('lso_kpt14', 'lso_kpt15'), id=101, color=[0, 128, 255]),
|
2177 |
-
102:
|
2178 |
-
dict(link=('lso_kpt15', 'lso_kpt16'), id=102, color=[0, 128, 255]),
|
2179 |
-
103:
|
2180 |
-
dict(link=('lso_kpt16', 'lso_kpt17'), id=103, color=[0, 128, 255]),
|
2181 |
-
104:
|
2182 |
-
dict(link=('lso_kpt17', 'lso_kpt18'), id=104, color=[0, 128, 255]),
|
2183 |
-
105:
|
2184 |
-
dict(link=('lso_kpt18', 'lso_kpt19'), id=105, color=[0, 128, 255]),
|
2185 |
-
106:
|
2186 |
-
dict(link=('lso_kpt19', 'lso_kpt20'), id=106, color=[0, 128, 255]),
|
2187 |
-
107:
|
2188 |
-
dict(link=('lso_kpt20', 'lso_kpt39'), id=107, color=[0, 128, 255]),
|
2189 |
-
108:
|
2190 |
-
dict(link=('lso_kpt39', 'lso_kpt38'), id=108, color=[0, 128, 255]),
|
2191 |
-
109:
|
2192 |
-
dict(link=('lso_kpt38', 'lso_kpt4'), id=109, color=[0, 128, 255]),
|
2193 |
-
110:
|
2194 |
-
dict(link=('lso_kpt4', 'lso_kpt3'), id=110, color=[0, 128, 255]),
|
2195 |
-
111:
|
2196 |
-
dict(link=('lso_kpt3', 'lso_kpt2'), id=111, color=[0, 128, 255]),
|
2197 |
-
112:
|
2198 |
-
dict(link=('lso_kpt1', 'lso_kpt6'), id=112, color=[0, 128, 255]),
|
2199 |
-
113:
|
2200 |
-
dict(link=('lso_kpt6', 'lso_kpt33'), id=113, color=[0, 128, 255]),
|
2201 |
-
114:
|
2202 |
-
dict(link=('lso_kpt33', 'lso_kpt32'), id=114, color=[0, 128, 255]),
|
2203 |
-
115:
|
2204 |
-
dict(link=('lso_kpt32', 'lso_kpt31'), id=115, color=[0, 128, 255]),
|
2205 |
-
116:
|
2206 |
-
dict(link=('lso_kpt31', 'lso_kpt30'), id=116, color=[0, 128, 255]),
|
2207 |
-
117:
|
2208 |
-
dict(link=('lso_kpt30', 'lso_kpt29'), id=117, color=[0, 128, 255]),
|
2209 |
-
118:
|
2210 |
-
dict(link=('lso_kpt29', 'lso_kpt28'), id=118, color=[0, 128, 255]),
|
2211 |
-
119:
|
2212 |
-
dict(link=('lso_kpt28', 'lso_kpt27'), id=119, color=[0, 128, 255]),
|
2213 |
-
120:
|
2214 |
-
dict(link=('lso_kpt27', 'lso_kpt26'), id=120, color=[0, 128, 255]),
|
2215 |
-
121:
|
2216 |
-
dict(link=('lso_kpt26', 'lso_kpt25'), id=121, color=[0, 128, 255]),
|
2217 |
-
122:
|
2218 |
-
dict(link=('lso_kpt25', 'lso_kpt24'), id=122, color=[0, 128, 255]),
|
2219 |
-
123:
|
2220 |
-
dict(link=('lso_kpt24', 'lso_kpt23'), id=123, color=[0, 128, 255]),
|
2221 |
-
124:
|
2222 |
-
dict(link=('lso_kpt23', 'lso_kpt22'), id=124, color=[0, 128, 255]),
|
2223 |
-
125:
|
2224 |
-
dict(link=('lso_kpt22', 'lso_kpt21'), id=125, color=[0, 128, 255]),
|
2225 |
-
126:
|
2226 |
-
dict(link=('lso_kpt21', 'lso_kpt37'), id=126, color=[0, 128, 255]),
|
2227 |
-
127:
|
2228 |
-
dict(link=('lso_kpt37', 'lso_kpt36'), id=127, color=[0, 128, 255]),
|
2229 |
-
128:
|
2230 |
-
dict(link=('lso_kpt36', 'lso_kpt35'), id=128, color=[0, 128, 255]),
|
2231 |
-
129:
|
2232 |
-
dict(link=('lso_kpt35', 'lso_kpt34'), id=129, color=[0, 128, 255]),
|
2233 |
-
130:
|
2234 |
-
dict(link=('lso_kpt34', 'lso_kpt5'), id=130, color=[0, 128, 255]),
|
2235 |
-
131:
|
2236 |
-
dict(link=('lso_kpt5', 'lso_kpt6'), id=131, color=[0, 128, 255]),
|
2237 |
-
132:
|
2238 |
-
dict(link=('vest_kpt1', 'vest_kpt2'), id=132, color=[0, 128, 128]),
|
2239 |
-
133:
|
2240 |
-
dict(link=('vest_kpt2', 'vest_kpt7'), id=133, color=[0, 128, 128]),
|
2241 |
-
134:
|
2242 |
-
dict(link=('vest_kpt7', 'vest_kpt8'), id=134, color=[0, 128, 128]),
|
2243 |
-
135:
|
2244 |
-
dict(link=('vest_kpt8', 'vest_kpt9'), id=135, color=[0, 128, 128]),
|
2245 |
-
136:
|
2246 |
-
dict(link=('vest_kpt9', 'vest_kpt10'), id=136, color=[0, 128, 128]),
|
2247 |
-
137:
|
2248 |
-
dict(link=('vest_kpt10', 'vest_kpt11'), id=137, color=[0, 128, 128]),
|
2249 |
-
138:
|
2250 |
-
dict(link=('vest_kpt11', 'vest_kpt12'), id=138, color=[0, 128, 128]),
|
2251 |
-
139:
|
2252 |
-
dict(link=('vest_kpt12', 'vest_kpt13'), id=139, color=[0, 128, 128]),
|
2253 |
-
140:
|
2254 |
-
dict(link=('vest_kpt13', 'vest_kpt14'), id=140, color=[0, 128, 128]),
|
2255 |
-
141:
|
2256 |
-
dict(link=('vest_kpt14', 'vest_kpt15'), id=141, color=[0, 128, 128]),
|
2257 |
-
142:
|
2258 |
-
dict(link=('vest_kpt15', 'vest_kpt6'), id=142, color=[0, 128, 128]),
|
2259 |
-
143:
|
2260 |
-
dict(link=('vest_kpt6', 'vest_kpt1'), id=143, color=[0, 128, 128]),
|
2261 |
-
144:
|
2262 |
-
dict(link=('vest_kpt2', 'vest_kpt3'), id=144, color=[0, 128, 128]),
|
2263 |
-
145:
|
2264 |
-
dict(link=('vest_kpt3', 'vest_kpt4'), id=145, color=[0, 128, 128]),
|
2265 |
-
146:
|
2266 |
-
dict(link=('vest_kpt4', 'vest_kpt5'), id=146, color=[0, 128, 128]),
|
2267 |
-
147:
|
2268 |
-
dict(link=('vest_kpt5', 'vest_kpt6'), id=147, color=[0, 128, 128]),
|
2269 |
-
148:
|
2270 |
-
dict(link=('sling_kpt1', 'sling_kpt2'), id=148, color=[0, 0, 128]),
|
2271 |
-
149:
|
2272 |
-
dict(link=('sling_kpt2', 'sling_kpt8'), id=149, color=[0, 0, 128]),
|
2273 |
-
150:
|
2274 |
-
dict(link=('sling_kpt8', 'sling_kpt9'), id=150, color=[0, 0, 128]),
|
2275 |
-
151:
|
2276 |
-
dict(link=('sling_kpt9', 'sling_kpt10'), id=151, color=[0, 0, 128]),
|
2277 |
-
152:
|
2278 |
-
dict(link=('sling_kpt10', 'sling_kpt11'), id=152, color=[0, 0, 128]),
|
2279 |
-
153:
|
2280 |
-
dict(link=('sling_kpt11', 'sling_kpt12'), id=153, color=[0, 0, 128]),
|
2281 |
-
154:
|
2282 |
-
dict(link=('sling_kpt12', 'sling_kpt13'), id=154, color=[0, 0, 128]),
|
2283 |
-
155:
|
2284 |
-
dict(link=('sling_kpt13', 'sling_kpt14'), id=155, color=[0, 0, 128]),
|
2285 |
-
156:
|
2286 |
-
dict(link=('sling_kpt14', 'sling_kpt6'), id=156, color=[0, 0, 128]),
|
2287 |
-
157:
|
2288 |
-
dict(link=('sling_kpt2', 'sling_kpt7'), id=157, color=[0, 0, 128]),
|
2289 |
-
158:
|
2290 |
-
dict(link=('sling_kpt6', 'sling_kpt15'), id=158, color=[0, 0, 128]),
|
2291 |
-
159:
|
2292 |
-
dict(link=('sling_kpt2', 'sling_kpt3'), id=159, color=[0, 0, 128]),
|
2293 |
-
160:
|
2294 |
-
dict(link=('sling_kpt3', 'sling_kpt4'), id=160, color=[0, 0, 128]),
|
2295 |
-
161:
|
2296 |
-
dict(link=('sling_kpt4', 'sling_kpt5'), id=161, color=[0, 0, 128]),
|
2297 |
-
162:
|
2298 |
-
dict(link=('sling_kpt5', 'sling_kpt6'), id=162, color=[0, 0, 128]),
|
2299 |
-
163:
|
2300 |
-
dict(link=('sling_kpt1', 'sling_kpt6'), id=163, color=[0, 0, 128]),
|
2301 |
-
164:
|
2302 |
-
dict(
|
2303 |
-
link=('shorts_kpt1', 'shorts_kpt4'), id=164, color=[128, 128,
|
2304 |
-
128]),
|
2305 |
-
165:
|
2306 |
-
dict(
|
2307 |
-
link=('shorts_kpt4', 'shorts_kpt5'), id=165, color=[128, 128,
|
2308 |
-
128]),
|
2309 |
-
166:
|
2310 |
-
dict(
|
2311 |
-
link=('shorts_kpt5', 'shorts_kpt6'), id=166, color=[128, 128,
|
2312 |
-
128]),
|
2313 |
-
167:
|
2314 |
-
dict(
|
2315 |
-
link=('shorts_kpt6', 'shorts_kpt7'), id=167, color=[128, 128,
|
2316 |
-
128]),
|
2317 |
-
168:
|
2318 |
-
dict(
|
2319 |
-
link=('shorts_kpt7', 'shorts_kpt8'), id=168, color=[128, 128,
|
2320 |
-
128]),
|
2321 |
-
169:
|
2322 |
-
dict(
|
2323 |
-
link=('shorts_kpt8', 'shorts_kpt9'), id=169, color=[128, 128,
|
2324 |
-
128]),
|
2325 |
-
170:
|
2326 |
-
dict(
|
2327 |
-
link=('shorts_kpt9', 'shorts_kpt10'),
|
2328 |
-
id=170,
|
2329 |
-
color=[128, 128, 128]),
|
2330 |
-
171:
|
2331 |
-
dict(
|
2332 |
-
link=('shorts_kpt10', 'shorts_kpt3'),
|
2333 |
-
id=171,
|
2334 |
-
color=[128, 128, 128]),
|
2335 |
-
172:
|
2336 |
-
dict(
|
2337 |
-
link=('shorts_kpt3', 'shorts_kpt2'), id=172, color=[128, 128,
|
2338 |
-
128]),
|
2339 |
-
173:
|
2340 |
-
dict(
|
2341 |
-
link=('shorts_kpt2', 'shorts_kpt1'), id=173, color=[128, 128,
|
2342 |
-
128]),
|
2343 |
-
174:
|
2344 |
-
dict(
|
2345 |
-
link=('trousers_kpt1', 'trousers_kpt4'),
|
2346 |
-
id=174,
|
2347 |
-
color=[128, 0, 128]),
|
2348 |
-
175:
|
2349 |
-
dict(
|
2350 |
-
link=('trousers_kpt4', 'trousers_kpt5'),
|
2351 |
-
id=175,
|
2352 |
-
color=[128, 0, 128]),
|
2353 |
-
176:
|
2354 |
-
dict(
|
2355 |
-
link=('trousers_kpt5', 'trousers_kpt6'),
|
2356 |
-
id=176,
|
2357 |
-
color=[128, 0, 128]),
|
2358 |
-
177:
|
2359 |
-
dict(
|
2360 |
-
link=('trousers_kpt6', 'trousers_kpt7'),
|
2361 |
-
id=177,
|
2362 |
-
color=[128, 0, 128]),
|
2363 |
-
178:
|
2364 |
-
dict(
|
2365 |
-
link=('trousers_kpt7', 'trousers_kpt8'),
|
2366 |
-
id=178,
|
2367 |
-
color=[128, 0, 128]),
|
2368 |
-
179:
|
2369 |
-
dict(
|
2370 |
-
link=('trousers_kpt8', 'trousers_kpt9'),
|
2371 |
-
id=179,
|
2372 |
-
color=[128, 0, 128]),
|
2373 |
-
180:
|
2374 |
-
dict(
|
2375 |
-
link=('trousers_kpt9', 'trousers_kpt10'),
|
2376 |
-
id=180,
|
2377 |
-
color=[128, 0, 128]),
|
2378 |
-
181:
|
2379 |
-
dict(
|
2380 |
-
link=('trousers_kpt10', 'trousers_kpt11'),
|
2381 |
-
id=181,
|
2382 |
-
color=[128, 0, 128]),
|
2383 |
-
182:
|
2384 |
-
dict(
|
2385 |
-
link=('trousers_kpt11', 'trousers_kpt12'),
|
2386 |
-
id=182,
|
2387 |
-
color=[128, 0, 128]),
|
2388 |
-
183:
|
2389 |
-
dict(
|
2390 |
-
link=('trousers_kpt12', 'trousers_kpt13'),
|
2391 |
-
id=183,
|
2392 |
-
color=[128, 0, 128]),
|
2393 |
-
184:
|
2394 |
-
dict(
|
2395 |
-
link=('trousers_kpt13', 'trousers_kpt14'),
|
2396 |
-
id=184,
|
2397 |
-
color=[128, 0, 128]),
|
2398 |
-
185:
|
2399 |
-
dict(
|
2400 |
-
link=('trousers_kpt14', 'trousers_kpt3'),
|
2401 |
-
id=185,
|
2402 |
-
color=[128, 0, 128]),
|
2403 |
-
186:
|
2404 |
-
dict(
|
2405 |
-
link=('trousers_kpt3', 'trousers_kpt2'),
|
2406 |
-
id=186,
|
2407 |
-
color=[128, 0, 128]),
|
2408 |
-
187:
|
2409 |
-
dict(
|
2410 |
-
link=('trousers_kpt2', 'trousers_kpt1'),
|
2411 |
-
id=187,
|
2412 |
-
color=[128, 0, 128]),
|
2413 |
-
188:
|
2414 |
-
dict(link=('skirt_kpt1', 'skirt_kpt4'), id=188, color=[64, 128, 128]),
|
2415 |
-
189:
|
2416 |
-
dict(link=('skirt_kpt4', 'skirt_kpt5'), id=189, color=[64, 128, 128]),
|
2417 |
-
190:
|
2418 |
-
dict(link=('skirt_kpt5', 'skirt_kpt6'), id=190, color=[64, 128, 128]),
|
2419 |
-
191:
|
2420 |
-
dict(link=('skirt_kpt6', 'skirt_kpt7'), id=191, color=[64, 128, 128]),
|
2421 |
-
192:
|
2422 |
-
dict(link=('skirt_kpt7', 'skirt_kpt8'), id=192, color=[64, 128, 128]),
|
2423 |
-
193:
|
2424 |
-
dict(link=('skirt_kpt8', 'skirt_kpt3'), id=193, color=[64, 128, 128]),
|
2425 |
-
194:
|
2426 |
-
dict(link=('skirt_kpt3', 'skirt_kpt2'), id=194, color=[64, 128, 128]),
|
2427 |
-
195:
|
2428 |
-
dict(link=('skirt_kpt2', 'skirt_kpt1'), id=195, color=[64, 128, 128]),
|
2429 |
-
196:
|
2430 |
-
dict(link=('ssd_kpt1', 'ssd_kpt2'), id=196, color=[64, 64, 128]),
|
2431 |
-
197:
|
2432 |
-
dict(link=('ssd_kpt2', 'ssd_kpt7'), id=197, color=[64, 64, 128]),
|
2433 |
-
198:
|
2434 |
-
dict(link=('ssd_kpt7', 'ssd_kpt8'), id=198, color=[64, 64, 128]),
|
2435 |
-
199:
|
2436 |
-
dict(link=('ssd_kpt8', 'ssd_kpt9'), id=199, color=[64, 64, 128]),
|
2437 |
-
200:
|
2438 |
-
dict(link=('ssd_kpt9', 'ssd_kpt10'), id=200, color=[64, 64, 128]),
|
2439 |
-
201:
|
2440 |
-
dict(link=('ssd_kpt10', 'ssd_kpt11'), id=201, color=[64, 64, 128]),
|
2441 |
-
202:
|
2442 |
-
dict(link=('ssd_kpt11', 'ssd_kpt12'), id=202, color=[64, 64, 128]),
|
2443 |
-
203:
|
2444 |
-
dict(link=('ssd_kpt12', 'ssd_kpt13'), id=203, color=[64, 64, 128]),
|
2445 |
-
204:
|
2446 |
-
dict(link=('ssd_kpt13', 'ssd_kpt14'), id=204, color=[64, 64, 128]),
|
2447 |
-
205:
|
2448 |
-
dict(link=('ssd_kpt14', 'ssd_kpt15'), id=205, color=[64, 64, 128]),
|
2449 |
-
206:
|
2450 |
-
dict(link=('ssd_kpt15', 'ssd_kpt16'), id=206, color=[64, 64, 128]),
|
2451 |
-
207:
|
2452 |
-
dict(link=('ssd_kpt16', 'ssd_kpt17'), id=207, color=[64, 64, 128]),
|
2453 |
-
208:
|
2454 |
-
dict(link=('ssd_kpt17', 'ssd_kpt18'), id=208, color=[64, 64, 128]),
|
2455 |
-
209:
|
2456 |
-
dict(link=('ssd_kpt18', 'ssd_kpt19'), id=209, color=[64, 64, 128]),
|
2457 |
-
210:
|
2458 |
-
dict(link=('ssd_kpt19', 'ssd_kpt20'), id=210, color=[64, 64, 128]),
|
2459 |
-
211:
|
2460 |
-
dict(link=('ssd_kpt20', 'ssd_kpt21'), id=211, color=[64, 64, 128]),
|
2461 |
-
212:
|
2462 |
-
dict(link=('ssd_kpt21', 'ssd_kpt22'), id=212, color=[64, 64, 128]),
|
2463 |
-
213:
|
2464 |
-
dict(link=('ssd_kpt22', 'ssd_kpt23'), id=213, color=[64, 64, 128]),
|
2465 |
-
214:
|
2466 |
-
dict(link=('ssd_kpt23', 'ssd_kpt24'), id=214, color=[64, 64, 128]),
|
2467 |
-
215:
|
2468 |
-
dict(link=('ssd_kpt24', 'ssd_kpt25'), id=215, color=[64, 64, 128]),
|
2469 |
-
216:
|
2470 |
-
dict(link=('ssd_kpt25', 'ssd_kpt26'), id=216, color=[64, 64, 128]),
|
2471 |
-
217:
|
2472 |
-
dict(link=('ssd_kpt26', 'ssd_kpt27'), id=217, color=[64, 64, 128]),
|
2473 |
-
218:
|
2474 |
-
dict(link=('ssd_kpt27', 'ssd_kpt28'), id=218, color=[64, 64, 128]),
|
2475 |
-
219:
|
2476 |
-
dict(link=('ssd_kpt28', 'ssd_kpt29'), id=219, color=[64, 64, 128]),
|
2477 |
-
220:
|
2478 |
-
dict(link=('ssd_kpt29', 'ssd_kpt6'), id=220, color=[64, 64, 128]),
|
2479 |
-
221:
|
2480 |
-
dict(link=('ssd_kpt6', 'ssd_kpt5'), id=221, color=[64, 64, 128]),
|
2481 |
-
222:
|
2482 |
-
dict(link=('ssd_kpt5', 'ssd_kpt4'), id=222, color=[64, 64, 128]),
|
2483 |
-
223:
|
2484 |
-
dict(link=('ssd_kpt4', 'ssd_kpt3'), id=223, color=[64, 64, 128]),
|
2485 |
-
224:
|
2486 |
-
dict(link=('ssd_kpt3', 'ssd_kpt2'), id=224, color=[64, 64, 128]),
|
2487 |
-
225:
|
2488 |
-
dict(link=('ssd_kpt6', 'ssd_kpt1'), id=225, color=[64, 64, 128]),
|
2489 |
-
226:
|
2490 |
-
dict(link=('lsd_kpt1', 'lsd_kpt2'), id=226, color=[128, 64, 0]),
|
2491 |
-
227:
|
2492 |
-
dict(link=('lsd_kpt2', 'lsd_kpt7'), id=228, color=[128, 64, 0]),
|
2493 |
-
228:
|
2494 |
-
dict(link=('lsd_kpt7', 'lsd_kpt8'), id=228, color=[128, 64, 0]),
|
2495 |
-
229:
|
2496 |
-
dict(link=('lsd_kpt8', 'lsd_kpt9'), id=229, color=[128, 64, 0]),
|
2497 |
-
230:
|
2498 |
-
dict(link=('lsd_kpt9', 'lsd_kpt10'), id=230, color=[128, 64, 0]),
|
2499 |
-
231:
|
2500 |
-
dict(link=('lsd_kpt10', 'lsd_kpt11'), id=231, color=[128, 64, 0]),
|
2501 |
-
232:
|
2502 |
-
dict(link=('lsd_kpt11', 'lsd_kpt12'), id=232, color=[128, 64, 0]),
|
2503 |
-
233:
|
2504 |
-
dict(link=('lsd_kpt12', 'lsd_kpt13'), id=233, color=[128, 64, 0]),
|
2505 |
-
234:
|
2506 |
-
dict(link=('lsd_kpt13', 'lsd_kpt14'), id=234, color=[128, 64, 0]),
|
2507 |
-
235:
|
2508 |
-
dict(link=('lsd_kpt14', 'lsd_kpt15'), id=235, color=[128, 64, 0]),
|
2509 |
-
236:
|
2510 |
-
dict(link=('lsd_kpt15', 'lsd_kpt16'), id=236, color=[128, 64, 0]),
|
2511 |
-
237:
|
2512 |
-
dict(link=('lsd_kpt16', 'lsd_kpt17'), id=237, color=[128, 64, 0]),
|
2513 |
-
238:
|
2514 |
-
dict(link=('lsd_kpt17', 'lsd_kpt18'), id=238, color=[128, 64, 0]),
|
2515 |
-
239:
|
2516 |
-
dict(link=('lsd_kpt18', 'lsd_kpt19'), id=239, color=[128, 64, 0]),
|
2517 |
-
240:
|
2518 |
-
dict(link=('lsd_kpt19', 'lsd_kpt20'), id=240, color=[128, 64, 0]),
|
2519 |
-
241:
|
2520 |
-
dict(link=('lsd_kpt20', 'lsd_kpt21'), id=241, color=[128, 64, 0]),
|
2521 |
-
242:
|
2522 |
-
dict(link=('lsd_kpt21', 'lsd_kpt22'), id=242, color=[128, 64, 0]),
|
2523 |
-
243:
|
2524 |
-
dict(link=('lsd_kpt22', 'lsd_kpt23'), id=243, color=[128, 64, 0]),
|
2525 |
-
244:
|
2526 |
-
dict(link=('lsd_kpt23', 'lsd_kpt24'), id=244, color=[128, 64, 0]),
|
2527 |
-
245:
|
2528 |
-
dict(link=('lsd_kpt24', 'lsd_kpt25'), id=245, color=[128, 64, 0]),
|
2529 |
-
246:
|
2530 |
-
dict(link=('lsd_kpt25', 'lsd_kpt26'), id=246, color=[128, 64, 0]),
|
2531 |
-
247:
|
2532 |
-
dict(link=('lsd_kpt26', 'lsd_kpt27'), id=247, color=[128, 64, 0]),
|
2533 |
-
248:
|
2534 |
-
dict(link=('lsd_kpt27', 'lsd_kpt28'), id=248, color=[128, 64, 0]),
|
2535 |
-
249:
|
2536 |
-
dict(link=('lsd_kpt28', 'lsd_kpt29'), id=249, color=[128, 64, 0]),
|
2537 |
-
250:
|
2538 |
-
dict(link=('lsd_kpt29', 'lsd_kpt30'), id=250, color=[128, 64, 0]),
|
2539 |
-
251:
|
2540 |
-
dict(link=('lsd_kpt30', 'lsd_kpt31'), id=251, color=[128, 64, 0]),
|
2541 |
-
252:
|
2542 |
-
dict(link=('lsd_kpt31', 'lsd_kpt32'), id=252, color=[128, 64, 0]),
|
2543 |
-
253:
|
2544 |
-
dict(link=('lsd_kpt32', 'lsd_kpt33'), id=253, color=[128, 64, 0]),
|
2545 |
-
254:
|
2546 |
-
dict(link=('lsd_kpt33', 'lsd_kpt34'), id=254, color=[128, 64, 0]),
|
2547 |
-
255:
|
2548 |
-
dict(link=('lsd_kpt34', 'lsd_kpt35'), id=255, color=[128, 64, 0]),
|
2549 |
-
256:
|
2550 |
-
dict(link=('lsd_kpt35', 'lsd_kpt36'), id=256, color=[128, 64, 0]),
|
2551 |
-
257:
|
2552 |
-
dict(link=('lsd_kpt36', 'lsd_kpt37'), id=257, color=[128, 64, 0]),
|
2553 |
-
258:
|
2554 |
-
dict(link=('lsd_kpt37', 'lsd_kpt6'), id=258, color=[128, 64, 0]),
|
2555 |
-
259:
|
2556 |
-
dict(link=('lsd_kpt6', 'lsd_kpt5'), id=259, color=[128, 64, 0]),
|
2557 |
-
260:
|
2558 |
-
dict(link=('lsd_kpt5', 'lsd_kpt4'), id=260, color=[128, 64, 0]),
|
2559 |
-
261:
|
2560 |
-
dict(link=('lsd_kpt4', 'lsd_kpt3'), id=261, color=[128, 64, 0]),
|
2561 |
-
262:
|
2562 |
-
dict(link=('lsd_kpt3', 'lsd_kpt2'), id=262, color=[128, 64, 0]),
|
2563 |
-
263:
|
2564 |
-
dict(link=('lsd_kpt6', 'lsd_kpt1'), id=263, color=[128, 64, 0]),
|
2565 |
-
264:
|
2566 |
-
dict(link=('vd_kpt1', 'vd_kpt2'), id=264, color=[128, 64, 255]),
|
2567 |
-
265:
|
2568 |
-
dict(link=('vd_kpt2', 'vd_kpt7'), id=265, color=[128, 64, 255]),
|
2569 |
-
266:
|
2570 |
-
dict(link=('vd_kpt7', 'vd_kpt8'), id=266, color=[128, 64, 255]),
|
2571 |
-
267:
|
2572 |
-
dict(link=('vd_kpt8', 'vd_kpt9'), id=267, color=[128, 64, 255]),
|
2573 |
-
268:
|
2574 |
-
dict(link=('vd_kpt9', 'vd_kpt10'), id=268, color=[128, 64, 255]),
|
2575 |
-
269:
|
2576 |
-
dict(link=('vd_kpt10', 'vd_kpt11'), id=269, color=[128, 64, 255]),
|
2577 |
-
270:
|
2578 |
-
dict(link=('vd_kpt11', 'vd_kpt12'), id=270, color=[128, 64, 255]),
|
2579 |
-
271:
|
2580 |
-
dict(link=('vd_kpt12', 'vd_kpt13'), id=271, color=[128, 64, 255]),
|
2581 |
-
272:
|
2582 |
-
dict(link=('vd_kpt13', 'vd_kpt14'), id=272, color=[128, 64, 255]),
|
2583 |
-
273:
|
2584 |
-
dict(link=('vd_kpt14', 'vd_kpt15'), id=273, color=[128, 64, 255]),
|
2585 |
-
274:
|
2586 |
-
dict(link=('vd_kpt15', 'vd_kpt16'), id=274, color=[128, 64, 255]),
|
2587 |
-
275:
|
2588 |
-
dict(link=('vd_kpt16', 'vd_kpt17'), id=275, color=[128, 64, 255]),
|
2589 |
-
276:
|
2590 |
-
dict(link=('vd_kpt17', 'vd_kpt18'), id=276, color=[128, 64, 255]),
|
2591 |
-
277:
|
2592 |
-
dict(link=('vd_kpt18', 'vd_kpt19'), id=277, color=[128, 64, 255]),
|
2593 |
-
278:
|
2594 |
-
dict(link=('vd_kpt19', 'vd_kpt6'), id=278, color=[128, 64, 255]),
|
2595 |
-
279:
|
2596 |
-
dict(link=('vd_kpt6', 'vd_kpt5'), id=279, color=[128, 64, 255]),
|
2597 |
-
280:
|
2598 |
-
dict(link=('vd_kpt5', 'vd_kpt4'), id=280, color=[128, 64, 255]),
|
2599 |
-
281:
|
2600 |
-
dict(link=('vd_kpt4', 'vd_kpt3'), id=281, color=[128, 64, 255]),
|
2601 |
-
282:
|
2602 |
-
dict(link=('vd_kpt3', 'vd_kpt2'), id=282, color=[128, 64, 255]),
|
2603 |
-
283:
|
2604 |
-
dict(link=('vd_kpt6', 'vd_kpt1'), id=283, color=[128, 64, 255]),
|
2605 |
-
284:
|
2606 |
-
dict(link=('sd_kpt1', 'sd_kpt2'), id=284, color=[128, 64, 0]),
|
2607 |
-
285:
|
2608 |
-
dict(link=('sd_kpt2', 'sd_kpt8'), id=285, color=[128, 64, 0]),
|
2609 |
-
286:
|
2610 |
-
dict(link=('sd_kpt8', 'sd_kpt9'), id=286, color=[128, 64, 0]),
|
2611 |
-
287:
|
2612 |
-
dict(link=('sd_kpt9', 'sd_kpt10'), id=287, color=[128, 64, 0]),
|
2613 |
-
288:
|
2614 |
-
dict(link=('sd_kpt10', 'sd_kpt11'), id=288, color=[128, 64, 0]),
|
2615 |
-
289:
|
2616 |
-
dict(link=('sd_kpt11', 'sd_kpt12'), id=289, color=[128, 64, 0]),
|
2617 |
-
290:
|
2618 |
-
dict(link=('sd_kpt12', 'sd_kpt13'), id=290, color=[128, 64, 0]),
|
2619 |
-
291:
|
2620 |
-
dict(link=('sd_kpt13', 'sd_kpt14'), id=291, color=[128, 64, 0]),
|
2621 |
-
292:
|
2622 |
-
dict(link=('sd_kpt14', 'sd_kpt15'), id=292, color=[128, 64, 0]),
|
2623 |
-
293:
|
2624 |
-
dict(link=('sd_kpt15', 'sd_kpt16'), id=293, color=[128, 64, 0]),
|
2625 |
-
294:
|
2626 |
-
dict(link=('sd_kpt16', 'sd_kpt17'), id=294, color=[128, 64, 0]),
|
2627 |
-
295:
|
2628 |
-
dict(link=('sd_kpt17', 'sd_kpt18'), id=295, color=[128, 64, 0]),
|
2629 |
-
296:
|
2630 |
-
dict(link=('sd_kpt18', 'sd_kpt6'), id=296, color=[128, 64, 0]),
|
2631 |
-
297:
|
2632 |
-
dict(link=('sd_kpt6', 'sd_kpt5'), id=297, color=[128, 64, 0]),
|
2633 |
-
298:
|
2634 |
-
dict(link=('sd_kpt5', 'sd_kpt4'), id=298, color=[128, 64, 0]),
|
2635 |
-
299:
|
2636 |
-
dict(link=('sd_kpt4', 'sd_kpt3'), id=299, color=[128, 64, 0]),
|
2637 |
-
300:
|
2638 |
-
dict(link=('sd_kpt3', 'sd_kpt2'), id=300, color=[128, 64, 0]),
|
2639 |
-
301:
|
2640 |
-
dict(link=('sd_kpt2', 'sd_kpt7'), id=301, color=[128, 64, 0]),
|
2641 |
-
302:
|
2642 |
-
dict(link=('sd_kpt6', 'sd_kpt19'), id=302, color=[128, 64, 0]),
|
2643 |
-
303:
|
2644 |
-
dict(link=('sd_kpt6', 'sd_kpt1'), id=303, color=[128, 64, 0])
|
2645 |
-
}),
|
2646 |
-
joint_weights=[
|
2647 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2648 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2649 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2650 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2651 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2652 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2653 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2654 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2655 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2656 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2657 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2658 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2659 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2660 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2661 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2662 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2663 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2664 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2665 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2666 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2667 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
|
2668 |
-
],
|
2669 |
-
sigmas=[])
|
2670 |
-
param_scheduler = [
|
2671 |
-
dict(
|
2672 |
-
type='LinearLR', begin=0, end=500, start_factor=0.001, by_epoch=False),
|
2673 |
-
dict(
|
2674 |
-
type='MultiStepLR',
|
2675 |
-
begin=0,
|
2676 |
-
end=60,
|
2677 |
-
milestones=[20, 40],
|
2678 |
-
gamma=0.1,
|
2679 |
-
by_epoch=True)
|
2680 |
-
]
|
2681 |
-
optim_wrapper = dict(optimizer=dict(type='Adam', lr=0.0005))
|
2682 |
-
auto_scale_lr = dict(base_batch_size=512)
|
2683 |
-
dataset_type = 'DeepFashion2Dataset'
|
2684 |
-
data_mode = 'topdown'
|
2685 |
-
data_root = 'data/deepfashion2/'
|
2686 |
-
codec = dict(
|
2687 |
-
type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2)
|
2688 |
-
train_pipeline = [
|
2689 |
-
dict(type='LoadImage'),
|
2690 |
-
dict(type='GetBBoxCenterScale'),
|
2691 |
-
dict(type='RandomFlip', direction='horizontal'),
|
2692 |
-
dict(
|
2693 |
-
type='RandomBBoxTransform',
|
2694 |
-
shift_prob=0,
|
2695 |
-
rotate_factor=60,
|
2696 |
-
scale_factor=(0.75, 1.25)),
|
2697 |
-
dict(type='TopdownAffine', input_size=(192, 256)),
|
2698 |
-
dict(
|
2699 |
-
type='GenerateTarget',
|
2700 |
-
encoder=dict(
|
2701 |
-
type='MSRAHeatmap',
|
2702 |
-
input_size=(192, 256),
|
2703 |
-
heatmap_size=(48, 64),
|
2704 |
-
sigma=2)),
|
2705 |
-
dict(type='PackPoseInputs')
|
2706 |
-
]
|
2707 |
-
val_pipeline = [
|
2708 |
-
dict(type='LoadImage', backend_args=dict(backend='local')),
|
2709 |
-
dict(type='GetBBoxCenterScale'),
|
2710 |
-
dict(type='TopdownAffine', input_size=(192, 256)),
|
2711 |
-
dict(type='PackPoseInputs')
|
2712 |
-
]
|
2713 |
-
train_dataloader = dict(
|
2714 |
-
batch_size=32,
|
2715 |
-
num_workers=8,
|
2716 |
-
persistent_workers=True,
|
2717 |
-
sampler=dict(type='DefaultSampler', shuffle=True),
|
2718 |
-
dataset=dict(
|
2719 |
-
type='DeepFashion2Dataset',
|
2720 |
-
data_root='data/deepfashion2/',
|
2721 |
-
data_mode='topdown',
|
2722 |
-
ann_file='train/deepfashion2_short_sleeved_shirt_train.json',
|
2723 |
-
data_prefix=dict(img='train/image/'),
|
2724 |
-
pipeline=[
|
2725 |
-
dict(type='LoadImage'),
|
2726 |
-
dict(type='GetBBoxCenterScale'),
|
2727 |
-
dict(type='RandomFlip', direction='horizontal'),
|
2728 |
-
dict(
|
2729 |
-
type='RandomBBoxTransform',
|
2730 |
-
shift_prob=0,
|
2731 |
-
rotate_factor=60,
|
2732 |
-
scale_factor=(0.75, 1.25)),
|
2733 |
-
dict(type='TopdownAffine', input_size=(192, 256)),
|
2734 |
-
dict(
|
2735 |
-
type='GenerateTarget',
|
2736 |
-
encoder=dict(
|
2737 |
-
type='MSRAHeatmap',
|
2738 |
-
input_size=(192, 256),
|
2739 |
-
heatmap_size=(48, 64),
|
2740 |
-
sigma=2)),
|
2741 |
-
dict(type='PackPoseInputs')
|
2742 |
-
]))
|
2743 |
-
val_dataloader = dict(
|
2744 |
-
batch_size=32,
|
2745 |
-
num_workers=4,
|
2746 |
-
persistent_workers=True,
|
2747 |
-
drop_last=False,
|
2748 |
-
sampler=dict(type='DefaultSampler', shuffle=False),
|
2749 |
-
dataset=dict(
|
2750 |
-
type='DeepFashion2Dataset',
|
2751 |
-
data_root='data/deepfashion2/',
|
2752 |
-
data_mode='topdown',
|
2753 |
-
ann_file='validation/deepfashion2_short_sleeved_shirt_validation.json',
|
2754 |
-
data_prefix=dict(img='validation/image/'),
|
2755 |
-
test_mode=True,
|
2756 |
-
pipeline=[
|
2757 |
-
dict(type='LoadImage', backend_args=dict(backend='local')),
|
2758 |
-
dict(type='GetBBoxCenterScale'),
|
2759 |
-
dict(type='TopdownAffine', input_size=(192, 256)),
|
2760 |
-
dict(type='PackPoseInputs')
|
2761 |
-
]))
|
2762 |
-
test_dataloader = dict(
|
2763 |
-
batch_size=32,
|
2764 |
-
num_workers=4,
|
2765 |
-
persistent_workers=True,
|
2766 |
-
drop_last=False,
|
2767 |
-
sampler=dict(type='DefaultSampler', shuffle=False),
|
2768 |
-
dataset=dict(
|
2769 |
-
type='DeepFashion2Dataset',
|
2770 |
-
data_root='data/deepfashion2/',
|
2771 |
-
data_mode='topdown',
|
2772 |
-
ann_file='validation/deepfashion2_short_sleeved_shirt_validation.json',
|
2773 |
-
data_prefix=dict(img='validation/image/'),
|
2774 |
-
test_mode=True,
|
2775 |
-
pipeline=[
|
2776 |
-
dict(type='LoadImage', backend_args=dict(backend='local')),
|
2777 |
-
dict(type='GetBBoxCenterScale'),
|
2778 |
-
dict(type='TopdownAffine', input_size=(192, 256)),
|
2779 |
-
dict(type='PackPoseInputs')
|
2780 |
-
]))
|
2781 |
-
channel_cfg = dict(
|
2782 |
-
num_output_channels=294,
|
2783 |
-
dataset_joints=294,
|
2784 |
-
dataset_channel=[[
|
2785 |
-
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
|
2786 |
-
20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
|
2787 |
-
38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
|
2788 |
-
56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
|
2789 |
-
74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
|
2790 |
-
92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
|
2791 |
-
108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
|
2792 |
-
122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
|
2793 |
-
136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
|
2794 |
-
150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
|
2795 |
-
164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
|
2796 |
-
178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
|
2797 |
-
192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205,
|
2798 |
-
206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
|
2799 |
-
220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233,
|
2800 |
-
234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
|
2801 |
-
248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261,
|
2802 |
-
262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275,
|
2803 |
-
276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289,
|
2804 |
-
290, 291, 292, 293
|
2805 |
-
]],
|
2806 |
-
inference_channel=[
|
2807 |
-
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
|
2808 |
-
20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
|
2809 |
-
38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
|
2810 |
-
56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
|
2811 |
-
74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
|
2812 |
-
92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
|
2813 |
-
108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
|
2814 |
-
122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
|
2815 |
-
136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
|
2816 |
-
150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
|
2817 |
-
164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
|
2818 |
-
178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
|
2819 |
-
192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205,
|
2820 |
-
206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
|
2821 |
-
220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233,
|
2822 |
-
234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
|
2823 |
-
248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261,
|
2824 |
-
262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275,
|
2825 |
-
276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289,
|
2826 |
-
290, 291, 292, 293
|
2827 |
-
])
|
2828 |
-
model = dict(
|
2829 |
-
type='TopdownPoseEstimator',
|
2830 |
-
data_preprocessor=dict(
|
2831 |
-
type='PoseDataPreprocessor',
|
2832 |
-
mean=[123.675, 116.28, 103.53],
|
2833 |
-
std=[58.395, 57.12, 57.375],
|
2834 |
-
bgr_to_rgb=True),
|
2835 |
-
backbone=dict(
|
2836 |
-
type='ResNet',
|
2837 |
-
depth=50,
|
2838 |
-
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
|
2839 |
-
head=dict(
|
2840 |
-
type='HeatmapHead',
|
2841 |
-
in_channels=2048,
|
2842 |
-
out_channels=294,
|
2843 |
-
loss=dict(type='KeypointMSELoss', use_target_weight=True),
|
2844 |
-
decoder=dict(
|
2845 |
-
type='MSRAHeatmap',
|
2846 |
-
input_size=(192, 256),
|
2847 |
-
heatmap_size=(48, 64),
|
2848 |
-
sigma=2)),
|
2849 |
-
test_cfg=dict(flip_test=True, flip_mode='heatmap', shift_heatmap=True))
|
2850 |
-
val_evaluator = [
|
2851 |
-
dict(type='PCKAccuracy', thr=0.2),
|
2852 |
-
dict(type='AUC'),
|
2853 |
-
dict(type='EPE')
|
2854 |
-
]
|
2855 |
-
test_evaluator = [
|
2856 |
-
dict(type='PCKAccuracy', thr=0.2),
|
2857 |
-
dict(type='AUC'),
|
2858 |
-
dict(type='EPE')
|
2859 |
-
]
|
2860 |
-
launcher = 'none'
|
2861 |
-
work_dir = './work_dirs/td_hm_res50_4xb32-60e_deepfashion2_short_sleeved_shirt_256x192'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT/client/js/sidebar-toggler.js
DELETED
@@ -1,34 +0,0 @@
|
|
1 |
-
const sidebar = document.querySelector(".sidebar");
|
2 |
-
const menuButton = document.querySelector(".menu-button");
|
3 |
-
|
4 |
-
function toggleSidebar(event) {
|
5 |
-
if (sidebar.classList.contains("shown")) {
|
6 |
-
hideSidebar(event.target);
|
7 |
-
} else {
|
8 |
-
showSidebar(event.target);
|
9 |
-
}
|
10 |
-
window.scrollTo(0, 0);
|
11 |
-
}
|
12 |
-
|
13 |
-
function showSidebar(target) {
|
14 |
-
sidebar.classList.add("shown");
|
15 |
-
target.classList.add("rotated");
|
16 |
-
document.body.style.overflow = "hidden";
|
17 |
-
}
|
18 |
-
|
19 |
-
function hideSidebar(target) {
|
20 |
-
sidebar.classList.remove("shown");
|
21 |
-
target.classList.remove("rotated");
|
22 |
-
document.body.style.overflow = "auto";
|
23 |
-
}
|
24 |
-
|
25 |
-
menuButton.addEventListener("click", toggleSidebar);
|
26 |
-
|
27 |
-
document.body.addEventListener('click', function(event) {
|
28 |
-
if (event.target.matches('.conversation-title')) {
|
29 |
-
const menuButtonStyle = window.getComputedStyle(menuButton);
|
30 |
-
if (menuButtonStyle.display !== 'none') {
|
31 |
-
hideSidebar(menuButton);
|
32 |
-
}
|
33 |
-
}
|
34 |
-
});
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Adapter/CoAdapter/ldm/modules/image_degradation/utils_image.py
DELETED
@@ -1,916 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import math
|
3 |
-
import random
|
4 |
-
import numpy as np
|
5 |
-
import torch
|
6 |
-
import cv2
|
7 |
-
from torchvision.utils import make_grid
|
8 |
-
from datetime import datetime
|
9 |
-
#import matplotlib.pyplot as plt # TODO: check with Dominik, also bsrgan.py vs bsrgan_light.py
|
10 |
-
|
11 |
-
|
12 |
-
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
|
13 |
-
|
14 |
-
|
15 |
-
'''
|
16 |
-
# --------------------------------------------
|
17 |
-
# Kai Zhang (github: https://github.com/cszn)
|
18 |
-
# 03/Mar/2019
|
19 |
-
# --------------------------------------------
|
20 |
-
# https://github.com/twhui/SRGAN-pyTorch
|
21 |
-
# https://github.com/xinntao/BasicSR
|
22 |
-
# --------------------------------------------
|
23 |
-
'''
|
24 |
-
|
25 |
-
|
26 |
-
IMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.tif']
|
27 |
-
|
28 |
-
|
29 |
-
def is_image_file(filename):
|
30 |
-
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
|
31 |
-
|
32 |
-
|
33 |
-
def get_timestamp():
|
34 |
-
return datetime.now().strftime('%y%m%d-%H%M%S')
|
35 |
-
|
36 |
-
|
37 |
-
def imshow(x, title=None, cbar=False, figsize=None):
|
38 |
-
plt.figure(figsize=figsize)
|
39 |
-
plt.imshow(np.squeeze(x), interpolation='nearest', cmap='gray')
|
40 |
-
if title:
|
41 |
-
plt.title(title)
|
42 |
-
if cbar:
|
43 |
-
plt.colorbar()
|
44 |
-
plt.show()
|
45 |
-
|
46 |
-
|
47 |
-
def surf(Z, cmap='rainbow', figsize=None):
|
48 |
-
plt.figure(figsize=figsize)
|
49 |
-
ax3 = plt.axes(projection='3d')
|
50 |
-
|
51 |
-
w, h = Z.shape[:2]
|
52 |
-
xx = np.arange(0,w,1)
|
53 |
-
yy = np.arange(0,h,1)
|
54 |
-
X, Y = np.meshgrid(xx, yy)
|
55 |
-
ax3.plot_surface(X,Y,Z,cmap=cmap)
|
56 |
-
#ax3.contour(X,Y,Z, zdim='z',offset=-2,cmap=cmap)
|
57 |
-
plt.show()
|
58 |
-
|
59 |
-
|
60 |
-
'''
|
61 |
-
# --------------------------------------------
|
62 |
-
# get image pathes
|
63 |
-
# --------------------------------------------
|
64 |
-
'''
|
65 |
-
|
66 |
-
|
67 |
-
def get_image_paths(dataroot):
|
68 |
-
paths = None # return None if dataroot is None
|
69 |
-
if dataroot is not None:
|
70 |
-
paths = sorted(_get_paths_from_images(dataroot))
|
71 |
-
return paths
|
72 |
-
|
73 |
-
|
74 |
-
def _get_paths_from_images(path):
|
75 |
-
assert os.path.isdir(path), '{:s} is not a valid directory'.format(path)
|
76 |
-
images = []
|
77 |
-
for dirpath, _, fnames in sorted(os.walk(path)):
|
78 |
-
for fname in sorted(fnames):
|
79 |
-
if is_image_file(fname):
|
80 |
-
img_path = os.path.join(dirpath, fname)
|
81 |
-
images.append(img_path)
|
82 |
-
assert images, '{:s} has no valid image file'.format(path)
|
83 |
-
return images
|
84 |
-
|
85 |
-
|
86 |
-
'''
|
87 |
-
# --------------------------------------------
|
88 |
-
# split large images into small images
|
89 |
-
# --------------------------------------------
|
90 |
-
'''
|
91 |
-
|
92 |
-
|
93 |
-
def patches_from_image(img, p_size=512, p_overlap=64, p_max=800):
|
94 |
-
w, h = img.shape[:2]
|
95 |
-
patches = []
|
96 |
-
if w > p_max and h > p_max:
|
97 |
-
w1 = list(np.arange(0, w-p_size, p_size-p_overlap, dtype=np.int))
|
98 |
-
h1 = list(np.arange(0, h-p_size, p_size-p_overlap, dtype=np.int))
|
99 |
-
w1.append(w-p_size)
|
100 |
-
h1.append(h-p_size)
|
101 |
-
# print(w1)
|
102 |
-
# print(h1)
|
103 |
-
for i in w1:
|
104 |
-
for j in h1:
|
105 |
-
patches.append(img[i:i+p_size, j:j+p_size,:])
|
106 |
-
else:
|
107 |
-
patches.append(img)
|
108 |
-
|
109 |
-
return patches
|
110 |
-
|
111 |
-
|
112 |
-
def imssave(imgs, img_path):
|
113 |
-
"""
|
114 |
-
imgs: list, N images of size WxHxC
|
115 |
-
"""
|
116 |
-
img_name, ext = os.path.splitext(os.path.basename(img_path))
|
117 |
-
|
118 |
-
for i, img in enumerate(imgs):
|
119 |
-
if img.ndim == 3:
|
120 |
-
img = img[:, :, [2, 1, 0]]
|
121 |
-
new_path = os.path.join(os.path.dirname(img_path), img_name+str('_s{:04d}'.format(i))+'.png')
|
122 |
-
cv2.imwrite(new_path, img)
|
123 |
-
|
124 |
-
|
125 |
-
def split_imageset(original_dataroot, taget_dataroot, n_channels=3, p_size=800, p_overlap=96, p_max=1000):
|
126 |
-
"""
|
127 |
-
split the large images from original_dataroot into small overlapped images with size (p_size)x(p_size),
|
128 |
-
and save them into taget_dataroot; only the images with larger size than (p_max)x(p_max)
|
129 |
-
will be splitted.
|
130 |
-
Args:
|
131 |
-
original_dataroot:
|
132 |
-
taget_dataroot:
|
133 |
-
p_size: size of small images
|
134 |
-
p_overlap: patch size in training is a good choice
|
135 |
-
p_max: images with smaller size than (p_max)x(p_max) keep unchanged.
|
136 |
-
"""
|
137 |
-
paths = get_image_paths(original_dataroot)
|
138 |
-
for img_path in paths:
|
139 |
-
# img_name, ext = os.path.splitext(os.path.basename(img_path))
|
140 |
-
img = imread_uint(img_path, n_channels=n_channels)
|
141 |
-
patches = patches_from_image(img, p_size, p_overlap, p_max)
|
142 |
-
imssave(patches, os.path.join(taget_dataroot,os.path.basename(img_path)))
|
143 |
-
#if original_dataroot == taget_dataroot:
|
144 |
-
#del img_path
|
145 |
-
|
146 |
-
'''
|
147 |
-
# --------------------------------------------
|
148 |
-
# makedir
|
149 |
-
# --------------------------------------------
|
150 |
-
'''
|
151 |
-
|
152 |
-
|
153 |
-
def mkdir(path):
|
154 |
-
if not os.path.exists(path):
|
155 |
-
os.makedirs(path)
|
156 |
-
|
157 |
-
|
158 |
-
def mkdirs(paths):
|
159 |
-
if isinstance(paths, str):
|
160 |
-
mkdir(paths)
|
161 |
-
else:
|
162 |
-
for path in paths:
|
163 |
-
mkdir(path)
|
164 |
-
|
165 |
-
|
166 |
-
def mkdir_and_rename(path):
|
167 |
-
if os.path.exists(path):
|
168 |
-
new_name = path + '_archived_' + get_timestamp()
|
169 |
-
print('Path already exists. Rename it to [{:s}]'.format(new_name))
|
170 |
-
os.rename(path, new_name)
|
171 |
-
os.makedirs(path)
|
172 |
-
|
173 |
-
|
174 |
-
'''
|
175 |
-
# --------------------------------------------
|
176 |
-
# read image from path
|
177 |
-
# opencv is fast, but read BGR numpy image
|
178 |
-
# --------------------------------------------
|
179 |
-
'''
|
180 |
-
|
181 |
-
|
182 |
-
# --------------------------------------------
|
183 |
-
# get uint8 image of size HxWxn_channles (RGB)
|
184 |
-
# --------------------------------------------
|
185 |
-
def imread_uint(path, n_channels=3):
|
186 |
-
# input: path
|
187 |
-
# output: HxWx3(RGB or GGG), or HxWx1 (G)
|
188 |
-
if n_channels == 1:
|
189 |
-
img = cv2.imread(path, 0) # cv2.IMREAD_GRAYSCALE
|
190 |
-
img = np.expand_dims(img, axis=2) # HxWx1
|
191 |
-
elif n_channels == 3:
|
192 |
-
img = cv2.imread(path, cv2.IMREAD_UNCHANGED) # BGR or G
|
193 |
-
if img.ndim == 2:
|
194 |
-
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) # GGG
|
195 |
-
else:
|
196 |
-
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # RGB
|
197 |
-
return img
|
198 |
-
|
199 |
-
|
200 |
-
# --------------------------------------------
|
201 |
-
# matlab's imwrite
|
202 |
-
# --------------------------------------------
|
203 |
-
def imsave(img, img_path):
|
204 |
-
img = np.squeeze(img)
|
205 |
-
if img.ndim == 3:
|
206 |
-
img = img[:, :, [2, 1, 0]]
|
207 |
-
cv2.imwrite(img_path, img)
|
208 |
-
|
209 |
-
def imwrite(img, img_path):
|
210 |
-
img = np.squeeze(img)
|
211 |
-
if img.ndim == 3:
|
212 |
-
img = img[:, :, [2, 1, 0]]
|
213 |
-
cv2.imwrite(img_path, img)
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
# --------------------------------------------
|
218 |
-
# get single image of size HxWxn_channles (BGR)
|
219 |
-
# --------------------------------------------
|
220 |
-
def read_img(path):
|
221 |
-
# read image by cv2
|
222 |
-
# return: Numpy float32, HWC, BGR, [0,1]
|
223 |
-
img = cv2.imread(path, cv2.IMREAD_UNCHANGED) # cv2.IMREAD_GRAYSCALE
|
224 |
-
img = img.astype(np.float32) / 255.
|
225 |
-
if img.ndim == 2:
|
226 |
-
img = np.expand_dims(img, axis=2)
|
227 |
-
# some images have 4 channels
|
228 |
-
if img.shape[2] > 3:
|
229 |
-
img = img[:, :, :3]
|
230 |
-
return img
|
231 |
-
|
232 |
-
|
233 |
-
'''
|
234 |
-
# --------------------------------------------
|
235 |
-
# image format conversion
|
236 |
-
# --------------------------------------------
|
237 |
-
# numpy(single) <---> numpy(unit)
|
238 |
-
# numpy(single) <---> tensor
|
239 |
-
# numpy(unit) <---> tensor
|
240 |
-
# --------------------------------------------
|
241 |
-
'''
|
242 |
-
|
243 |
-
|
244 |
-
# --------------------------------------------
|
245 |
-
# numpy(single) [0, 1] <---> numpy(unit)
|
246 |
-
# --------------------------------------------
|
247 |
-
|
248 |
-
|
249 |
-
def uint2single(img):
|
250 |
-
|
251 |
-
return np.float32(img/255.)
|
252 |
-
|
253 |
-
|
254 |
-
def single2uint(img):
|
255 |
-
|
256 |
-
return np.uint8((img.clip(0, 1)*255.).round())
|
257 |
-
|
258 |
-
|
259 |
-
def uint162single(img):
|
260 |
-
|
261 |
-
return np.float32(img/65535.)
|
262 |
-
|
263 |
-
|
264 |
-
def single2uint16(img):
|
265 |
-
|
266 |
-
return np.uint16((img.clip(0, 1)*65535.).round())
|
267 |
-
|
268 |
-
|
269 |
-
# --------------------------------------------
|
270 |
-
# numpy(unit) (HxWxC or HxW) <---> tensor
|
271 |
-
# --------------------------------------------
|
272 |
-
|
273 |
-
|
274 |
-
# convert uint to 4-dimensional torch tensor
|
275 |
-
def uint2tensor4(img):
|
276 |
-
if img.ndim == 2:
|
277 |
-
img = np.expand_dims(img, axis=2)
|
278 |
-
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().div(255.).unsqueeze(0)
|
279 |
-
|
280 |
-
|
281 |
-
# convert uint to 3-dimensional torch tensor
|
282 |
-
def uint2tensor3(img):
|
283 |
-
if img.ndim == 2:
|
284 |
-
img = np.expand_dims(img, axis=2)
|
285 |
-
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().div(255.)
|
286 |
-
|
287 |
-
|
288 |
-
# convert 2/3/4-dimensional torch tensor to uint
|
289 |
-
def tensor2uint(img):
|
290 |
-
img = img.data.squeeze().float().clamp_(0, 1).cpu().numpy()
|
291 |
-
if img.ndim == 3:
|
292 |
-
img = np.transpose(img, (1, 2, 0))
|
293 |
-
return np.uint8((img*255.0).round())
|
294 |
-
|
295 |
-
|
296 |
-
# --------------------------------------------
|
297 |
-
# numpy(single) (HxWxC) <---> tensor
|
298 |
-
# --------------------------------------------
|
299 |
-
|
300 |
-
|
301 |
-
# convert single (HxWxC) to 3-dimensional torch tensor
|
302 |
-
def single2tensor3(img):
|
303 |
-
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float()
|
304 |
-
|
305 |
-
|
306 |
-
# convert single (HxWxC) to 4-dimensional torch tensor
|
307 |
-
def single2tensor4(img):
|
308 |
-
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().unsqueeze(0)
|
309 |
-
|
310 |
-
|
311 |
-
# convert torch tensor to single
|
312 |
-
def tensor2single(img):
|
313 |
-
img = img.data.squeeze().float().cpu().numpy()
|
314 |
-
if img.ndim == 3:
|
315 |
-
img = np.transpose(img, (1, 2, 0))
|
316 |
-
|
317 |
-
return img
|
318 |
-
|
319 |
-
# convert torch tensor to single
|
320 |
-
def tensor2single3(img):
|
321 |
-
img = img.data.squeeze().float().cpu().numpy()
|
322 |
-
if img.ndim == 3:
|
323 |
-
img = np.transpose(img, (1, 2, 0))
|
324 |
-
elif img.ndim == 2:
|
325 |
-
img = np.expand_dims(img, axis=2)
|
326 |
-
return img
|
327 |
-
|
328 |
-
|
329 |
-
def single2tensor5(img):
|
330 |
-
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1, 3).float().unsqueeze(0)
|
331 |
-
|
332 |
-
|
333 |
-
def single32tensor5(img):
|
334 |
-
return torch.from_numpy(np.ascontiguousarray(img)).float().unsqueeze(0).unsqueeze(0)
|
335 |
-
|
336 |
-
|
337 |
-
def single42tensor4(img):
|
338 |
-
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1, 3).float()
|
339 |
-
|
340 |
-
|
341 |
-
# from skimage.io import imread, imsave
|
342 |
-
def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)):
|
343 |
-
'''
|
344 |
-
Converts a torch Tensor into an image Numpy array of BGR channel order
|
345 |
-
Input: 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order
|
346 |
-
Output: 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default)
|
347 |
-
'''
|
348 |
-
tensor = tensor.squeeze().float().cpu().clamp_(*min_max) # squeeze first, then clamp
|
349 |
-
tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0]) # to range [0,1]
|
350 |
-
n_dim = tensor.dim()
|
351 |
-
if n_dim == 4:
|
352 |
-
n_img = len(tensor)
|
353 |
-
img_np = make_grid(tensor, nrow=int(math.sqrt(n_img)), normalize=False).numpy()
|
354 |
-
img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR
|
355 |
-
elif n_dim == 3:
|
356 |
-
img_np = tensor.numpy()
|
357 |
-
img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR
|
358 |
-
elif n_dim == 2:
|
359 |
-
img_np = tensor.numpy()
|
360 |
-
else:
|
361 |
-
raise TypeError(
|
362 |
-
'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim))
|
363 |
-
if out_type == np.uint8:
|
364 |
-
img_np = (img_np * 255.0).round()
|
365 |
-
# Important. Unlike matlab, numpy.unit8() WILL NOT round by default.
|
366 |
-
return img_np.astype(out_type)
|
367 |
-
|
368 |
-
|
369 |
-
'''
|
370 |
-
# --------------------------------------------
|
371 |
-
# Augmentation, flipe and/or rotate
|
372 |
-
# --------------------------------------------
|
373 |
-
# The following two are enough.
|
374 |
-
# (1) augmet_img: numpy image of WxHxC or WxH
|
375 |
-
# (2) augment_img_tensor4: tensor image 1xCxWxH
|
376 |
-
# --------------------------------------------
|
377 |
-
'''
|
378 |
-
|
379 |
-
|
380 |
-
def augment_img(img, mode=0):
|
381 |
-
'''Kai Zhang (github: https://github.com/cszn)
|
382 |
-
'''
|
383 |
-
if mode == 0:
|
384 |
-
return img
|
385 |
-
elif mode == 1:
|
386 |
-
return np.flipud(np.rot90(img))
|
387 |
-
elif mode == 2:
|
388 |
-
return np.flipud(img)
|
389 |
-
elif mode == 3:
|
390 |
-
return np.rot90(img, k=3)
|
391 |
-
elif mode == 4:
|
392 |
-
return np.flipud(np.rot90(img, k=2))
|
393 |
-
elif mode == 5:
|
394 |
-
return np.rot90(img)
|
395 |
-
elif mode == 6:
|
396 |
-
return np.rot90(img, k=2)
|
397 |
-
elif mode == 7:
|
398 |
-
return np.flipud(np.rot90(img, k=3))
|
399 |
-
|
400 |
-
|
401 |
-
def augment_img_tensor4(img, mode=0):
|
402 |
-
'''Kai Zhang (github: https://github.com/cszn)
|
403 |
-
'''
|
404 |
-
if mode == 0:
|
405 |
-
return img
|
406 |
-
elif mode == 1:
|
407 |
-
return img.rot90(1, [2, 3]).flip([2])
|
408 |
-
elif mode == 2:
|
409 |
-
return img.flip([2])
|
410 |
-
elif mode == 3:
|
411 |
-
return img.rot90(3, [2, 3])
|
412 |
-
elif mode == 4:
|
413 |
-
return img.rot90(2, [2, 3]).flip([2])
|
414 |
-
elif mode == 5:
|
415 |
-
return img.rot90(1, [2, 3])
|
416 |
-
elif mode == 6:
|
417 |
-
return img.rot90(2, [2, 3])
|
418 |
-
elif mode == 7:
|
419 |
-
return img.rot90(3, [2, 3]).flip([2])
|
420 |
-
|
421 |
-
|
422 |
-
def augment_img_tensor(img, mode=0):
|
423 |
-
'''Kai Zhang (github: https://github.com/cszn)
|
424 |
-
'''
|
425 |
-
img_size = img.size()
|
426 |
-
img_np = img.data.cpu().numpy()
|
427 |
-
if len(img_size) == 3:
|
428 |
-
img_np = np.transpose(img_np, (1, 2, 0))
|
429 |
-
elif len(img_size) == 4:
|
430 |
-
img_np = np.transpose(img_np, (2, 3, 1, 0))
|
431 |
-
img_np = augment_img(img_np, mode=mode)
|
432 |
-
img_tensor = torch.from_numpy(np.ascontiguousarray(img_np))
|
433 |
-
if len(img_size) == 3:
|
434 |
-
img_tensor = img_tensor.permute(2, 0, 1)
|
435 |
-
elif len(img_size) == 4:
|
436 |
-
img_tensor = img_tensor.permute(3, 2, 0, 1)
|
437 |
-
|
438 |
-
return img_tensor.type_as(img)
|
439 |
-
|
440 |
-
|
441 |
-
def augment_img_np3(img, mode=0):
|
442 |
-
if mode == 0:
|
443 |
-
return img
|
444 |
-
elif mode == 1:
|
445 |
-
return img.transpose(1, 0, 2)
|
446 |
-
elif mode == 2:
|
447 |
-
return img[::-1, :, :]
|
448 |
-
elif mode == 3:
|
449 |
-
img = img[::-1, :, :]
|
450 |
-
img = img.transpose(1, 0, 2)
|
451 |
-
return img
|
452 |
-
elif mode == 4:
|
453 |
-
return img[:, ::-1, :]
|
454 |
-
elif mode == 5:
|
455 |
-
img = img[:, ::-1, :]
|
456 |
-
img = img.transpose(1, 0, 2)
|
457 |
-
return img
|
458 |
-
elif mode == 6:
|
459 |
-
img = img[:, ::-1, :]
|
460 |
-
img = img[::-1, :, :]
|
461 |
-
return img
|
462 |
-
elif mode == 7:
|
463 |
-
img = img[:, ::-1, :]
|
464 |
-
img = img[::-1, :, :]
|
465 |
-
img = img.transpose(1, 0, 2)
|
466 |
-
return img
|
467 |
-
|
468 |
-
|
469 |
-
def augment_imgs(img_list, hflip=True, rot=True):
|
470 |
-
# horizontal flip OR rotate
|
471 |
-
hflip = hflip and random.random() < 0.5
|
472 |
-
vflip = rot and random.random() < 0.5
|
473 |
-
rot90 = rot and random.random() < 0.5
|
474 |
-
|
475 |
-
def _augment(img):
|
476 |
-
if hflip:
|
477 |
-
img = img[:, ::-1, :]
|
478 |
-
if vflip:
|
479 |
-
img = img[::-1, :, :]
|
480 |
-
if rot90:
|
481 |
-
img = img.transpose(1, 0, 2)
|
482 |
-
return img
|
483 |
-
|
484 |
-
return [_augment(img) for img in img_list]
|
485 |
-
|
486 |
-
|
487 |
-
'''
|
488 |
-
# --------------------------------------------
|
489 |
-
# modcrop and shave
|
490 |
-
# --------------------------------------------
|
491 |
-
'''
|
492 |
-
|
493 |
-
|
494 |
-
def modcrop(img_in, scale):
|
495 |
-
# img_in: Numpy, HWC or HW
|
496 |
-
img = np.copy(img_in)
|
497 |
-
if img.ndim == 2:
|
498 |
-
H, W = img.shape
|
499 |
-
H_r, W_r = H % scale, W % scale
|
500 |
-
img = img[:H - H_r, :W - W_r]
|
501 |
-
elif img.ndim == 3:
|
502 |
-
H, W, C = img.shape
|
503 |
-
H_r, W_r = H % scale, W % scale
|
504 |
-
img = img[:H - H_r, :W - W_r, :]
|
505 |
-
else:
|
506 |
-
raise ValueError('Wrong img ndim: [{:d}].'.format(img.ndim))
|
507 |
-
return img
|
508 |
-
|
509 |
-
|
510 |
-
def shave(img_in, border=0):
|
511 |
-
# img_in: Numpy, HWC or HW
|
512 |
-
img = np.copy(img_in)
|
513 |
-
h, w = img.shape[:2]
|
514 |
-
img = img[border:h-border, border:w-border]
|
515 |
-
return img
|
516 |
-
|
517 |
-
|
518 |
-
'''
|
519 |
-
# --------------------------------------------
|
520 |
-
# image processing process on numpy image
|
521 |
-
# channel_convert(in_c, tar_type, img_list):
|
522 |
-
# rgb2ycbcr(img, only_y=True):
|
523 |
-
# bgr2ycbcr(img, only_y=True):
|
524 |
-
# ycbcr2rgb(img):
|
525 |
-
# --------------------------------------------
|
526 |
-
'''
|
527 |
-
|
528 |
-
|
529 |
-
def rgb2ycbcr(img, only_y=True):
|
530 |
-
'''same as matlab rgb2ycbcr
|
531 |
-
only_y: only return Y channel
|
532 |
-
Input:
|
533 |
-
uint8, [0, 255]
|
534 |
-
float, [0, 1]
|
535 |
-
'''
|
536 |
-
in_img_type = img.dtype
|
537 |
-
img.astype(np.float32)
|
538 |
-
if in_img_type != np.uint8:
|
539 |
-
img *= 255.
|
540 |
-
# convert
|
541 |
-
if only_y:
|
542 |
-
rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0
|
543 |
-
else:
|
544 |
-
rlt = np.matmul(img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786],
|
545 |
-
[24.966, 112.0, -18.214]]) / 255.0 + [16, 128, 128]
|
546 |
-
if in_img_type == np.uint8:
|
547 |
-
rlt = rlt.round()
|
548 |
-
else:
|
549 |
-
rlt /= 255.
|
550 |
-
return rlt.astype(in_img_type)
|
551 |
-
|
552 |
-
|
553 |
-
def ycbcr2rgb(img):
|
554 |
-
'''same as matlab ycbcr2rgb
|
555 |
-
Input:
|
556 |
-
uint8, [0, 255]
|
557 |
-
float, [0, 1]
|
558 |
-
'''
|
559 |
-
in_img_type = img.dtype
|
560 |
-
img.astype(np.float32)
|
561 |
-
if in_img_type != np.uint8:
|
562 |
-
img *= 255.
|
563 |
-
# convert
|
564 |
-
rlt = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621], [0, -0.00153632, 0.00791071],
|
565 |
-
[0.00625893, -0.00318811, 0]]) * 255.0 + [-222.921, 135.576, -276.836]
|
566 |
-
if in_img_type == np.uint8:
|
567 |
-
rlt = rlt.round()
|
568 |
-
else:
|
569 |
-
rlt /= 255.
|
570 |
-
return rlt.astype(in_img_type)
|
571 |
-
|
572 |
-
|
573 |
-
def bgr2ycbcr(img, only_y=True):
|
574 |
-
'''bgr version of rgb2ycbcr
|
575 |
-
only_y: only return Y channel
|
576 |
-
Input:
|
577 |
-
uint8, [0, 255]
|
578 |
-
float, [0, 1]
|
579 |
-
'''
|
580 |
-
in_img_type = img.dtype
|
581 |
-
img.astype(np.float32)
|
582 |
-
if in_img_type != np.uint8:
|
583 |
-
img *= 255.
|
584 |
-
# convert
|
585 |
-
if only_y:
|
586 |
-
rlt = np.dot(img, [24.966, 128.553, 65.481]) / 255.0 + 16.0
|
587 |
-
else:
|
588 |
-
rlt = np.matmul(img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786],
|
589 |
-
[65.481, -37.797, 112.0]]) / 255.0 + [16, 128, 128]
|
590 |
-
if in_img_type == np.uint8:
|
591 |
-
rlt = rlt.round()
|
592 |
-
else:
|
593 |
-
rlt /= 255.
|
594 |
-
return rlt.astype(in_img_type)
|
595 |
-
|
596 |
-
|
597 |
-
def channel_convert(in_c, tar_type, img_list):
|
598 |
-
# conversion among BGR, gray and y
|
599 |
-
if in_c == 3 and tar_type == 'gray': # BGR to gray
|
600 |
-
gray_list = [cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) for img in img_list]
|
601 |
-
return [np.expand_dims(img, axis=2) for img in gray_list]
|
602 |
-
elif in_c == 3 and tar_type == 'y': # BGR to y
|
603 |
-
y_list = [bgr2ycbcr(img, only_y=True) for img in img_list]
|
604 |
-
return [np.expand_dims(img, axis=2) for img in y_list]
|
605 |
-
elif in_c == 1 and tar_type == 'RGB': # gray/y to BGR
|
606 |
-
return [cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) for img in img_list]
|
607 |
-
else:
|
608 |
-
return img_list
|
609 |
-
|
610 |
-
|
611 |
-
'''
|
612 |
-
# --------------------------------------------
|
613 |
-
# metric, PSNR and SSIM
|
614 |
-
# --------------------------------------------
|
615 |
-
'''
|
616 |
-
|
617 |
-
|
618 |
-
# --------------------------------------------
|
619 |
-
# PSNR
|
620 |
-
# --------------------------------------------
|
621 |
-
def calculate_psnr(img1, img2, border=0):
|
622 |
-
# img1 and img2 have range [0, 255]
|
623 |
-
#img1 = img1.squeeze()
|
624 |
-
#img2 = img2.squeeze()
|
625 |
-
if not img1.shape == img2.shape:
|
626 |
-
raise ValueError('Input images must have the same dimensions.')
|
627 |
-
h, w = img1.shape[:2]
|
628 |
-
img1 = img1[border:h-border, border:w-border]
|
629 |
-
img2 = img2[border:h-border, border:w-border]
|
630 |
-
|
631 |
-
img1 = img1.astype(np.float64)
|
632 |
-
img2 = img2.astype(np.float64)
|
633 |
-
mse = np.mean((img1 - img2)**2)
|
634 |
-
if mse == 0:
|
635 |
-
return float('inf')
|
636 |
-
return 20 * math.log10(255.0 / math.sqrt(mse))
|
637 |
-
|
638 |
-
|
639 |
-
# --------------------------------------------
|
640 |
-
# SSIM
|
641 |
-
# --------------------------------------------
|
642 |
-
def calculate_ssim(img1, img2, border=0):
|
643 |
-
'''calculate SSIM
|
644 |
-
the same outputs as MATLAB's
|
645 |
-
img1, img2: [0, 255]
|
646 |
-
'''
|
647 |
-
#img1 = img1.squeeze()
|
648 |
-
#img2 = img2.squeeze()
|
649 |
-
if not img1.shape == img2.shape:
|
650 |
-
raise ValueError('Input images must have the same dimensions.')
|
651 |
-
h, w = img1.shape[:2]
|
652 |
-
img1 = img1[border:h-border, border:w-border]
|
653 |
-
img2 = img2[border:h-border, border:w-border]
|
654 |
-
|
655 |
-
if img1.ndim == 2:
|
656 |
-
return ssim(img1, img2)
|
657 |
-
elif img1.ndim == 3:
|
658 |
-
if img1.shape[2] == 3:
|
659 |
-
ssims = []
|
660 |
-
for i in range(3):
|
661 |
-
ssims.append(ssim(img1[:,:,i], img2[:,:,i]))
|
662 |
-
return np.array(ssims).mean()
|
663 |
-
elif img1.shape[2] == 1:
|
664 |
-
return ssim(np.squeeze(img1), np.squeeze(img2))
|
665 |
-
else:
|
666 |
-
raise ValueError('Wrong input image dimensions.')
|
667 |
-
|
668 |
-
|
669 |
-
def ssim(img1, img2):
|
670 |
-
C1 = (0.01 * 255)**2
|
671 |
-
C2 = (0.03 * 255)**2
|
672 |
-
|
673 |
-
img1 = img1.astype(np.float64)
|
674 |
-
img2 = img2.astype(np.float64)
|
675 |
-
kernel = cv2.getGaussianKernel(11, 1.5)
|
676 |
-
window = np.outer(kernel, kernel.transpose())
|
677 |
-
|
678 |
-
mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid
|
679 |
-
mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
|
680 |
-
mu1_sq = mu1**2
|
681 |
-
mu2_sq = mu2**2
|
682 |
-
mu1_mu2 = mu1 * mu2
|
683 |
-
sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
|
684 |
-
sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
|
685 |
-
sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2
|
686 |
-
|
687 |
-
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
|
688 |
-
(sigma1_sq + sigma2_sq + C2))
|
689 |
-
return ssim_map.mean()
|
690 |
-
|
691 |
-
|
692 |
-
'''
|
693 |
-
# --------------------------------------------
|
694 |
-
# matlab's bicubic imresize (numpy and torch) [0, 1]
|
695 |
-
# --------------------------------------------
|
696 |
-
'''
|
697 |
-
|
698 |
-
|
699 |
-
# matlab 'imresize' function, now only support 'bicubic'
|
700 |
-
def cubic(x):
|
701 |
-
absx = torch.abs(x)
|
702 |
-
absx2 = absx**2
|
703 |
-
absx3 = absx**3
|
704 |
-
return (1.5*absx3 - 2.5*absx2 + 1) * ((absx <= 1).type_as(absx)) + \
|
705 |
-
(-0.5*absx3 + 2.5*absx2 - 4*absx + 2) * (((absx > 1)*(absx <= 2)).type_as(absx))
|
706 |
-
|
707 |
-
|
708 |
-
def calculate_weights_indices(in_length, out_length, scale, kernel, kernel_width, antialiasing):
|
709 |
-
if (scale < 1) and (antialiasing):
|
710 |
-
# Use a modified kernel to simultaneously interpolate and antialias- larger kernel width
|
711 |
-
kernel_width = kernel_width / scale
|
712 |
-
|
713 |
-
# Output-space coordinates
|
714 |
-
x = torch.linspace(1, out_length, out_length)
|
715 |
-
|
716 |
-
# Input-space coordinates. Calculate the inverse mapping such that 0.5
|
717 |
-
# in output space maps to 0.5 in input space, and 0.5+scale in output
|
718 |
-
# space maps to 1.5 in input space.
|
719 |
-
u = x / scale + 0.5 * (1 - 1 / scale)
|
720 |
-
|
721 |
-
# What is the left-most pixel that can be involved in the computation?
|
722 |
-
left = torch.floor(u - kernel_width / 2)
|
723 |
-
|
724 |
-
# What is the maximum number of pixels that can be involved in the
|
725 |
-
# computation? Note: it's OK to use an extra pixel here; if the
|
726 |
-
# corresponding weights are all zero, it will be eliminated at the end
|
727 |
-
# of this function.
|
728 |
-
P = math.ceil(kernel_width) + 2
|
729 |
-
|
730 |
-
# The indices of the input pixels involved in computing the k-th output
|
731 |
-
# pixel are in row k of the indices matrix.
|
732 |
-
indices = left.view(out_length, 1).expand(out_length, P) + torch.linspace(0, P - 1, P).view(
|
733 |
-
1, P).expand(out_length, P)
|
734 |
-
|
735 |
-
# The weights used to compute the k-th output pixel are in row k of the
|
736 |
-
# weights matrix.
|
737 |
-
distance_to_center = u.view(out_length, 1).expand(out_length, P) - indices
|
738 |
-
# apply cubic kernel
|
739 |
-
if (scale < 1) and (antialiasing):
|
740 |
-
weights = scale * cubic(distance_to_center * scale)
|
741 |
-
else:
|
742 |
-
weights = cubic(distance_to_center)
|
743 |
-
# Normalize the weights matrix so that each row sums to 1.
|
744 |
-
weights_sum = torch.sum(weights, 1).view(out_length, 1)
|
745 |
-
weights = weights / weights_sum.expand(out_length, P)
|
746 |
-
|
747 |
-
# If a column in weights is all zero, get rid of it. only consider the first and last column.
|
748 |
-
weights_zero_tmp = torch.sum((weights == 0), 0)
|
749 |
-
if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6):
|
750 |
-
indices = indices.narrow(1, 1, P - 2)
|
751 |
-
weights = weights.narrow(1, 1, P - 2)
|
752 |
-
if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6):
|
753 |
-
indices = indices.narrow(1, 0, P - 2)
|
754 |
-
weights = weights.narrow(1, 0, P - 2)
|
755 |
-
weights = weights.contiguous()
|
756 |
-
indices = indices.contiguous()
|
757 |
-
sym_len_s = -indices.min() + 1
|
758 |
-
sym_len_e = indices.max() - in_length
|
759 |
-
indices = indices + sym_len_s - 1
|
760 |
-
return weights, indices, int(sym_len_s), int(sym_len_e)
|
761 |
-
|
762 |
-
|
763 |
-
# --------------------------------------------
|
764 |
-
# imresize for tensor image [0, 1]
|
765 |
-
# --------------------------------------------
|
766 |
-
def imresize(img, scale, antialiasing=True):
|
767 |
-
# Now the scale should be the same for H and W
|
768 |
-
# input: img: pytorch tensor, CHW or HW [0,1]
|
769 |
-
# output: CHW or HW [0,1] w/o round
|
770 |
-
need_squeeze = True if img.dim() == 2 else False
|
771 |
-
if need_squeeze:
|
772 |
-
img.unsqueeze_(0)
|
773 |
-
in_C, in_H, in_W = img.size()
|
774 |
-
out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
|
775 |
-
kernel_width = 4
|
776 |
-
kernel = 'cubic'
|
777 |
-
|
778 |
-
# Return the desired dimension order for performing the resize. The
|
779 |
-
# strategy is to perform the resize first along the dimension with the
|
780 |
-
# smallest scale factor.
|
781 |
-
# Now we do not support this.
|
782 |
-
|
783 |
-
# get weights and indices
|
784 |
-
weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
|
785 |
-
in_H, out_H, scale, kernel, kernel_width, antialiasing)
|
786 |
-
weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
|
787 |
-
in_W, out_W, scale, kernel, kernel_width, antialiasing)
|
788 |
-
# process H dimension
|
789 |
-
# symmetric copying
|
790 |
-
img_aug = torch.FloatTensor(in_C, in_H + sym_len_Hs + sym_len_He, in_W)
|
791 |
-
img_aug.narrow(1, sym_len_Hs, in_H).copy_(img)
|
792 |
-
|
793 |
-
sym_patch = img[:, :sym_len_Hs, :]
|
794 |
-
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
|
795 |
-
sym_patch_inv = sym_patch.index_select(1, inv_idx)
|
796 |
-
img_aug.narrow(1, 0, sym_len_Hs).copy_(sym_patch_inv)
|
797 |
-
|
798 |
-
sym_patch = img[:, -sym_len_He:, :]
|
799 |
-
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
|
800 |
-
sym_patch_inv = sym_patch.index_select(1, inv_idx)
|
801 |
-
img_aug.narrow(1, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
|
802 |
-
|
803 |
-
out_1 = torch.FloatTensor(in_C, out_H, in_W)
|
804 |
-
kernel_width = weights_H.size(1)
|
805 |
-
for i in range(out_H):
|
806 |
-
idx = int(indices_H[i][0])
|
807 |
-
for j in range(out_C):
|
808 |
-
out_1[j, i, :] = img_aug[j, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])
|
809 |
-
|
810 |
-
# process W dimension
|
811 |
-
# symmetric copying
|
812 |
-
out_1_aug = torch.FloatTensor(in_C, out_H, in_W + sym_len_Ws + sym_len_We)
|
813 |
-
out_1_aug.narrow(2, sym_len_Ws, in_W).copy_(out_1)
|
814 |
-
|
815 |
-
sym_patch = out_1[:, :, :sym_len_Ws]
|
816 |
-
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
|
817 |
-
sym_patch_inv = sym_patch.index_select(2, inv_idx)
|
818 |
-
out_1_aug.narrow(2, 0, sym_len_Ws).copy_(sym_patch_inv)
|
819 |
-
|
820 |
-
sym_patch = out_1[:, :, -sym_len_We:]
|
821 |
-
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
|
822 |
-
sym_patch_inv = sym_patch.index_select(2, inv_idx)
|
823 |
-
out_1_aug.narrow(2, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
|
824 |
-
|
825 |
-
out_2 = torch.FloatTensor(in_C, out_H, out_W)
|
826 |
-
kernel_width = weights_W.size(1)
|
827 |
-
for i in range(out_W):
|
828 |
-
idx = int(indices_W[i][0])
|
829 |
-
for j in range(out_C):
|
830 |
-
out_2[j, :, i] = out_1_aug[j, :, idx:idx + kernel_width].mv(weights_W[i])
|
831 |
-
if need_squeeze:
|
832 |
-
out_2.squeeze_()
|
833 |
-
return out_2
|
834 |
-
|
835 |
-
|
836 |
-
# --------------------------------------------
|
837 |
-
# imresize for numpy image [0, 1]
|
838 |
-
# --------------------------------------------
|
839 |
-
def imresize_np(img, scale, antialiasing=True):
|
840 |
-
# Now the scale should be the same for H and W
|
841 |
-
# input: img: Numpy, HWC or HW [0,1]
|
842 |
-
# output: HWC or HW [0,1] w/o round
|
843 |
-
img = torch.from_numpy(img)
|
844 |
-
need_squeeze = True if img.dim() == 2 else False
|
845 |
-
if need_squeeze:
|
846 |
-
img.unsqueeze_(2)
|
847 |
-
|
848 |
-
in_H, in_W, in_C = img.size()
|
849 |
-
out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
|
850 |
-
kernel_width = 4
|
851 |
-
kernel = 'cubic'
|
852 |
-
|
853 |
-
# Return the desired dimension order for performing the resize. The
|
854 |
-
# strategy is to perform the resize first along the dimension with the
|
855 |
-
# smallest scale factor.
|
856 |
-
# Now we do not support this.
|
857 |
-
|
858 |
-
# get weights and indices
|
859 |
-
weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
|
860 |
-
in_H, out_H, scale, kernel, kernel_width, antialiasing)
|
861 |
-
weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
|
862 |
-
in_W, out_W, scale, kernel, kernel_width, antialiasing)
|
863 |
-
# process H dimension
|
864 |
-
# symmetric copying
|
865 |
-
img_aug = torch.FloatTensor(in_H + sym_len_Hs + sym_len_He, in_W, in_C)
|
866 |
-
img_aug.narrow(0, sym_len_Hs, in_H).copy_(img)
|
867 |
-
|
868 |
-
sym_patch = img[:sym_len_Hs, :, :]
|
869 |
-
inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
|
870 |
-
sym_patch_inv = sym_patch.index_select(0, inv_idx)
|
871 |
-
img_aug.narrow(0, 0, sym_len_Hs).copy_(sym_patch_inv)
|
872 |
-
|
873 |
-
sym_patch = img[-sym_len_He:, :, :]
|
874 |
-
inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
|
875 |
-
sym_patch_inv = sym_patch.index_select(0, inv_idx)
|
876 |
-
img_aug.narrow(0, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
|
877 |
-
|
878 |
-
out_1 = torch.FloatTensor(out_H, in_W, in_C)
|
879 |
-
kernel_width = weights_H.size(1)
|
880 |
-
for i in range(out_H):
|
881 |
-
idx = int(indices_H[i][0])
|
882 |
-
for j in range(out_C):
|
883 |
-
out_1[i, :, j] = img_aug[idx:idx + kernel_width, :, j].transpose(0, 1).mv(weights_H[i])
|
884 |
-
|
885 |
-
# process W dimension
|
886 |
-
# symmetric copying
|
887 |
-
out_1_aug = torch.FloatTensor(out_H, in_W + sym_len_Ws + sym_len_We, in_C)
|
888 |
-
out_1_aug.narrow(1, sym_len_Ws, in_W).copy_(out_1)
|
889 |
-
|
890 |
-
sym_patch = out_1[:, :sym_len_Ws, :]
|
891 |
-
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
|
892 |
-
sym_patch_inv = sym_patch.index_select(1, inv_idx)
|
893 |
-
out_1_aug.narrow(1, 0, sym_len_Ws).copy_(sym_patch_inv)
|
894 |
-
|
895 |
-
sym_patch = out_1[:, -sym_len_We:, :]
|
896 |
-
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
|
897 |
-
sym_patch_inv = sym_patch.index_select(1, inv_idx)
|
898 |
-
out_1_aug.narrow(1, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
|
899 |
-
|
900 |
-
out_2 = torch.FloatTensor(out_H, out_W, in_C)
|
901 |
-
kernel_width = weights_W.size(1)
|
902 |
-
for i in range(out_W):
|
903 |
-
idx = int(indices_W[i][0])
|
904 |
-
for j in range(out_C):
|
905 |
-
out_2[:, i, j] = out_1_aug[:, idx:idx + kernel_width, j].mv(weights_W[i])
|
906 |
-
if need_squeeze:
|
907 |
-
out_2.squeeze_()
|
908 |
-
|
909 |
-
return out_2.numpy()
|
910 |
-
|
911 |
-
|
912 |
-
if __name__ == '__main__':
|
913 |
-
print('---')
|
914 |
-
# img = imread_uint('test.bmp', 3)
|
915 |
-
# img = uint2single(img)
|
916 |
-
# img_bicubic = imresize_np(img, 1/4)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/visibility/all.py
DELETED
@@ -1,17 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
-
from typing import TYPE_CHECKING, Any
|
4 |
-
|
5 |
-
from . import visibility_registry as VisibilityRegistry
|
6 |
-
from .base import BaseVisibility
|
7 |
-
|
8 |
-
if TYPE_CHECKING:
|
9 |
-
from agentverse.environments import BaseEnvironment
|
10 |
-
|
11 |
-
|
12 |
-
@VisibilityRegistry.register("all")
|
13 |
-
class AllVisibility(BaseVisibility):
|
14 |
-
"""All the messages can be seen by all the agents"""
|
15 |
-
|
16 |
-
def update_visible_agents(self, environment: BaseEnvironment):
|
17 |
-
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/PointToChild.js
DELETED
@@ -1,41 +0,0 @@
|
|
1 |
-
import IsFunction from '../../../plugins/utils/object/IsFunction.js';
|
2 |
-
import IsArray from '../../../plugins/utils/object/IsArray.js';
|
3 |
-
import ContainsPoint from '../utils/ContainsPoint.js';
|
4 |
-
|
5 |
-
var PointToChild = function (x, y, preTest, postTest, children) {
|
6 |
-
if (!IsFunction(preTest)) {
|
7 |
-
children = preTest;
|
8 |
-
preTest = undefined;
|
9 |
-
postTest = undefined;
|
10 |
-
}
|
11 |
-
|
12 |
-
if (children === undefined) {
|
13 |
-
if (this.sizerChildren) {
|
14 |
-
children = this.sizerChildren;
|
15 |
-
} else {
|
16 |
-
children = this.children;
|
17 |
-
}
|
18 |
-
}
|
19 |
-
|
20 |
-
if (IsArray(children)) {
|
21 |
-
var child;
|
22 |
-
for (var i = 0, cnt = children.length; i < cnt; i++) {
|
23 |
-
child = children[i];
|
24 |
-
if (ContainsPoint(child, x, y, preTest, postTest)) {
|
25 |
-
return child;
|
26 |
-
}
|
27 |
-
}
|
28 |
-
} else {
|
29 |
-
var child;
|
30 |
-
for (var key in children) {
|
31 |
-
child = children[key];
|
32 |
-
if (ContainsPoint(child, x, y, preTest, postTest)) {
|
33 |
-
return child;
|
34 |
-
}
|
35 |
-
}
|
36 |
-
}
|
37 |
-
|
38 |
-
return null;
|
39 |
-
}
|
40 |
-
|
41 |
-
export default PointToChild;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/holygrail/HolyGrail.d.ts
DELETED
@@ -1,69 +0,0 @@
|
|
1 |
-
// import * as Phaser from 'phaser';
|
2 |
-
import Sizer from '../sizer/Sizer';
|
3 |
-
|
4 |
-
export default HolyGrail;
|
5 |
-
|
6 |
-
declare namespace HolyGrail {
|
7 |
-
|
8 |
-
type HAlignTypes = number | 'left' | 'center' | 'right';
|
9 |
-
type VAlignTypes = number | 'top' | 'center' | 'bottom';
|
10 |
-
|
11 |
-
interface IConfig extends Sizer.IConfig {
|
12 |
-
space?: {
|
13 |
-
left?: number, right?: number, top?: number, bottom?: number,
|
14 |
-
|
15 |
-
header?: number | { left?: number, right?: number, top?: number, bottom?: number },
|
16 |
-
leftSide?: number | { left?: number, right?: number, top?: number, bottom?: number },
|
17 |
-
content?: { left?: number, right?: number, top?: number, bottom?: number },
|
18 |
-
rightSide?: number | { left?: number, right?: number, top?: number, bottom?: number },
|
19 |
-
footer?: number | { left?: number, right?: number, top?: number, bottom?: number },
|
20 |
-
};
|
21 |
-
|
22 |
-
background?: Phaser.GameObjects.GameObject,
|
23 |
-
|
24 |
-
header?: Phaser.GameObjects.GameObject,
|
25 |
-
|
26 |
-
leftSide?: Phaser.GameObjects.GameObject,
|
27 |
-
|
28 |
-
content?: Phaser.GameObjects.GameObject,
|
29 |
-
|
30 |
-
rightSide?: Phaser.GameObjects.GameObject,
|
31 |
-
|
32 |
-
footer?: Phaser.GameObjects.GameObject,
|
33 |
-
|
34 |
-
layoutMode?: 0 | 1 | 2 | 3 | 'FFF' | 'LFF' | 'FFR' | 'LFR',
|
35 |
-
|
36 |
-
proportion?: {
|
37 |
-
header?: number,
|
38 |
-
leftSide?: number,
|
39 |
-
content?: number,
|
40 |
-
rightSide?: number,
|
41 |
-
footer?: number,
|
42 |
-
},
|
43 |
-
|
44 |
-
expand?: {
|
45 |
-
header?: boolean,
|
46 |
-
leftSide?: boolean,
|
47 |
-
content?: boolean,
|
48 |
-
rightSide?: boolean,
|
49 |
-
footer?: boolean,
|
50 |
-
},
|
51 |
-
|
52 |
-
align?: {
|
53 |
-
header?: HAlignTypes,
|
54 |
-
leftSide?: VAlignTypes,
|
55 |
-
content?: HAlignTypes | VAlignTypes,
|
56 |
-
rightSide?: VAlignTypes,
|
57 |
-
footer?: HAlignTypes,
|
58 |
-
},
|
59 |
-
|
60 |
-
}
|
61 |
-
}
|
62 |
-
|
63 |
-
declare class HolyGrail extends Sizer {
|
64 |
-
constructor(
|
65 |
-
scene: Phaser.Scene,
|
66 |
-
config?: HolyGrail.IConfig
|
67 |
-
);
|
68 |
-
|
69 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlawnCN/webui-docker/Dockerfile
DELETED
@@ -1,47 +0,0 @@
|
|
1 |
-
# Dockerfile Private Nightly CPU
|
2 |
-
|
3 |
-
# https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/11.7.1/ubuntu2204/devel/cudnn8/Dockerfile
|
4 |
-
# FROM nvidia/cuda:11.7.1-cudnn8-devel-ubuntu22.04
|
5 |
-
# https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/11.7.1/ubuntu2204/base/Dockerfile
|
6 |
-
FROM nvidia/cuda:11.7.1-base-ubuntu22.04
|
7 |
-
ENV DEBIAN_FRONTEND noninteractive
|
8 |
-
|
9 |
-
RUN apt-get update -y && apt-get upgrade -y && apt-get install -y libgl1 libglib2.0-0 wget git git-lfs python3-pip python-is-python3 && rm -rf /var/lib/apt/lists/*
|
10 |
-
|
11 |
-
RUN adduser --disabled-password --gecos '' user
|
12 |
-
RUN mkdir /content && chown -R user:user /content
|
13 |
-
WORKDIR /content
|
14 |
-
USER user
|
15 |
-
|
16 |
-
RUN pip3 install --upgrade pip
|
17 |
-
RUN pip install https://github.com/camenduru/stable-diffusion-webui-colab/releases/download/0.0.16/xformers-0.0.16+814314d.d20230118-cp310-cp310-linux_x86_64.whl
|
18 |
-
RUN pip install --pre triton
|
19 |
-
RUN pip install numexpr
|
20 |
-
|
21 |
-
RUN git clone -b v2.0 https://github.com/camenduru/stable-diffusion-webui
|
22 |
-
RUN sed -i -e 's/ start()/ #start()/g' /content/stable-diffusion-webui/launch.py
|
23 |
-
RUN cd stable-diffusion-webui && python launch.py --skip-torch-cuda-test
|
24 |
-
|
25 |
-
# ----------------------------Delete this block if you don't want to see the extra header----------------------------
|
26 |
-
ADD --chown=user https://github.com/camenduru/webui-docker/raw/main/env_patch.py /content/env_patch.py
|
27 |
-
RUN sed -i -e '/import image_from_url_text/r /content/env_patch.py' /content/stable-diffusion-webui/modules/ui.py
|
28 |
-
ADD --chown=user https://github.com/camenduru/webui-docker/raw/main/header_patch.py /content/header_patch.py
|
29 |
-
RUN sed -i -e '/demo:/r /content/header_patch.py' /content/stable-diffusion-webui/modules/ui.py
|
30 |
-
# -------------------------------------------------------------------------------------------------------------------
|
31 |
-
|
32 |
-
ADD --chown=user https://raw.githubusercontent.com/camenduru/stable-diffusion-webui-scripts/main/run_n_times.py /content/stable-diffusion-webui/scripts/run_n_times.py
|
33 |
-
RUN git clone https://github.com/deforum-art/deforum-for-automatic1111-webui /content/stable-diffusion-webui/extensions/deforum-for-automatic1111-webui
|
34 |
-
RUN git clone https://github.com/AlUlkesh/stable-diffusion-webui-images-browser /content/stable-diffusion-webui/extensions/stable-diffusion-webui-images-browser
|
35 |
-
RUN git clone https://github.com/camenduru/stable-diffusion-webui-huggingface /content/stable-diffusion-webui/extensions/stable-diffusion-webui-huggingface
|
36 |
-
RUN git clone -b v2.0 https://github.com/camenduru/sd-civitai-browser /content/stable-diffusion-webui/extensions/sd-civitai-browser
|
37 |
-
RUN git clone https://github.com/kohya-ss/sd-webui-additional-networks /content/stable-diffusion-webui/extensions/sd-webui-additional-networks
|
38 |
-
|
39 |
-
COPY --chown=user config.json /content/config.json
|
40 |
-
COPY --chown=user ui-config.json /content/ui-config.json
|
41 |
-
|
42 |
-
ADD --chown=user https://huggingface.co/andite/anything-v4.0/resolve/main/anything-v4.5-pruned.ckpt /content/stable-diffusion-webui/models/Stable-diffusion/anything-v4.5-pruned.ckpt
|
43 |
-
ADD --chown=user https://huggingface.co/andite/anything-v4.0/resolve/main/anything-v4.0.vae.pt /content/stable-diffusion-webui/models/Stable-diffusion/anything-v4.5-pruned.vae.pt
|
44 |
-
|
45 |
-
EXPOSE 7860
|
46 |
-
|
47 |
-
CMD cd /content/stable-diffusion-webui && python webui.py --use-cpu all --no-half --listen --disable-console-progressbars --ui-config-file /content/ui-config.json --ui-settings-file /content/config.json
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlekseyKorshuk/model-evaluation/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Model Evaluation
|
3 |
-
emoji: 🔥
|
4 |
-
colorFrom: indigo
|
5 |
-
colorTo: red
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.28.1
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alichuan/VITS-Umamusume-voice-synthesizer/text/thai.py
DELETED
@@ -1,44 +0,0 @@
|
|
1 |
-
import re
|
2 |
-
from num_thai.thainumbers import NumThai
|
3 |
-
|
4 |
-
|
5 |
-
num = NumThai()
|
6 |
-
|
7 |
-
# List of (Latin alphabet, Thai) pairs:
|
8 |
-
_latin_to_thai = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
|
9 |
-
('a', 'เอ'),
|
10 |
-
('b','บี'),
|
11 |
-
('c','ซี'),
|
12 |
-
('d','ดี'),
|
13 |
-
('e','อี'),
|
14 |
-
('f','เอฟ'),
|
15 |
-
('g','จี'),
|
16 |
-
('h','เอช'),
|
17 |
-
('i','ไอ'),
|
18 |
-
('j','เจ'),
|
19 |
-
('k','เค'),
|
20 |
-
('l','แอล'),
|
21 |
-
('m','เอ็ม'),
|
22 |
-
('n','เอ็น'),
|
23 |
-
('o','โอ'),
|
24 |
-
('p','พี'),
|
25 |
-
('q','คิว'),
|
26 |
-
('r','แอร์'),
|
27 |
-
('s','เอส'),
|
28 |
-
('t','ที'),
|
29 |
-
('u','ยู'),
|
30 |
-
('v','วี'),
|
31 |
-
('w','ดับเบิลยู'),
|
32 |
-
('x','เอ็กซ์'),
|
33 |
-
('y','วาย'),
|
34 |
-
('z','ซี')
|
35 |
-
]]
|
36 |
-
|
37 |
-
|
38 |
-
def num_to_thai(text):
|
39 |
-
return re.sub(r'(?:\d+(?:,?\d+)?)+(?:\.\d+(?:,?\d+)?)?', lambda x: ''.join(num.NumberToTextThai(float(x.group(0).replace(',', '')))), text)
|
40 |
-
|
41 |
-
def latin_to_thai(text):
|
42 |
-
for regex, replacement in _latin_to_thai:
|
43 |
-
text = re.sub(regex, replacement, text)
|
44 |
-
return text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/stylegan_human/pti/pti_models/e4e/encoders/helpers.py
DELETED
@@ -1,145 +0,0 @@
|
|
1 |
-
from collections import namedtuple
|
2 |
-
import torch
|
3 |
-
import torch.nn.functional as F
|
4 |
-
from torch.nn import Conv2d, BatchNorm2d, PReLU, ReLU, Sigmoid, MaxPool2d, AdaptiveAvgPool2d, Sequential, Module
|
5 |
-
|
6 |
-
"""
|
7 |
-
ArcFace implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch)
|
8 |
-
"""
|
9 |
-
|
10 |
-
|
11 |
-
class Flatten(Module):
|
12 |
-
def forward(self, input):
|
13 |
-
return input.view(input.size(0), -1)
|
14 |
-
|
15 |
-
|
16 |
-
def l2_norm(input, axis=1):
|
17 |
-
norm = torch.norm(input, 2, axis, True)
|
18 |
-
output = torch.div(input, norm)
|
19 |
-
return output
|
20 |
-
|
21 |
-
|
22 |
-
class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])):
|
23 |
-
""" A named tuple describing a ResNet block. """
|
24 |
-
|
25 |
-
|
26 |
-
def get_block(in_channel, depth, num_units, stride=2):
|
27 |
-
return [Bottleneck(in_channel, depth, stride)] + [Bottleneck(depth, depth, 1) for i in range(num_units - 1)]
|
28 |
-
|
29 |
-
|
30 |
-
def get_blocks(num_layers):
|
31 |
-
if num_layers == 50:
|
32 |
-
blocks = [
|
33 |
-
get_block(in_channel=64, depth=64, num_units=3),
|
34 |
-
get_block(in_channel=64, depth=128, num_units=4),
|
35 |
-
get_block(in_channel=128, depth=256, num_units=14),
|
36 |
-
get_block(in_channel=256, depth=512, num_units=3)
|
37 |
-
]
|
38 |
-
elif num_layers == 100:
|
39 |
-
blocks = [
|
40 |
-
get_block(in_channel=64, depth=64, num_units=3),
|
41 |
-
get_block(in_channel=64, depth=128, num_units=13),
|
42 |
-
get_block(in_channel=128, depth=256, num_units=30),
|
43 |
-
get_block(in_channel=256, depth=512, num_units=3)
|
44 |
-
]
|
45 |
-
elif num_layers == 152:
|
46 |
-
blocks = [
|
47 |
-
get_block(in_channel=64, depth=64, num_units=3),
|
48 |
-
get_block(in_channel=64, depth=128, num_units=8),
|
49 |
-
get_block(in_channel=128, depth=256, num_units=36),
|
50 |
-
get_block(in_channel=256, depth=512, num_units=3)
|
51 |
-
]
|
52 |
-
else:
|
53 |
-
raise ValueError(
|
54 |
-
"Invalid number of layers: {}. Must be one of [50, 100, 152]".format(num_layers))
|
55 |
-
return blocks
|
56 |
-
|
57 |
-
|
58 |
-
class SEModule(Module):
|
59 |
-
def __init__(self, channels, reduction):
|
60 |
-
super(SEModule, self).__init__()
|
61 |
-
self.avg_pool = AdaptiveAvgPool2d(1)
|
62 |
-
self.fc1 = Conv2d(channels, channels // reduction,
|
63 |
-
kernel_size=1, padding=0, bias=False)
|
64 |
-
self.relu = ReLU(inplace=True)
|
65 |
-
self.fc2 = Conv2d(channels // reduction, channels,
|
66 |
-
kernel_size=1, padding=0, bias=False)
|
67 |
-
self.sigmoid = Sigmoid()
|
68 |
-
|
69 |
-
def forward(self, x):
|
70 |
-
module_input = x
|
71 |
-
x = self.avg_pool(x)
|
72 |
-
x = self.fc1(x)
|
73 |
-
x = self.relu(x)
|
74 |
-
x = self.fc2(x)
|
75 |
-
x = self.sigmoid(x)
|
76 |
-
return module_input * x
|
77 |
-
|
78 |
-
|
79 |
-
class bottleneck_IR(Module):
|
80 |
-
def __init__(self, in_channel, depth, stride):
|
81 |
-
super(bottleneck_IR, self).__init__()
|
82 |
-
if in_channel == depth:
|
83 |
-
self.shortcut_layer = MaxPool2d(1, stride)
|
84 |
-
else:
|
85 |
-
self.shortcut_layer = Sequential(
|
86 |
-
Conv2d(in_channel, depth, (1, 1), stride, bias=False),
|
87 |
-
BatchNorm2d(depth)
|
88 |
-
)
|
89 |
-
self.res_layer = Sequential(
|
90 |
-
BatchNorm2d(in_channel),
|
91 |
-
Conv2d(in_channel, depth, (3, 3), (1, 1),
|
92 |
-
1, bias=False), PReLU(depth),
|
93 |
-
Conv2d(depth, depth, (3, 3), stride, 1,
|
94 |
-
bias=False), BatchNorm2d(depth)
|
95 |
-
)
|
96 |
-
|
97 |
-
def forward(self, x):
|
98 |
-
shortcut = self.shortcut_layer(x)
|
99 |
-
res = self.res_layer(x)
|
100 |
-
return res + shortcut
|
101 |
-
|
102 |
-
|
103 |
-
class bottleneck_IR_SE(Module):
|
104 |
-
def __init__(self, in_channel, depth, stride):
|
105 |
-
super(bottleneck_IR_SE, self).__init__()
|
106 |
-
if in_channel == depth:
|
107 |
-
self.shortcut_layer = MaxPool2d(1, stride)
|
108 |
-
else:
|
109 |
-
self.shortcut_layer = Sequential(
|
110 |
-
Conv2d(in_channel, depth, (1, 1), stride, bias=False),
|
111 |
-
BatchNorm2d(depth)
|
112 |
-
)
|
113 |
-
self.res_layer = Sequential(
|
114 |
-
BatchNorm2d(in_channel),
|
115 |
-
Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),
|
116 |
-
PReLU(depth),
|
117 |
-
Conv2d(depth, depth, (3, 3), stride, 1, bias=False),
|
118 |
-
BatchNorm2d(depth),
|
119 |
-
SEModule(depth, 16)
|
120 |
-
)
|
121 |
-
|
122 |
-
def forward(self, x):
|
123 |
-
shortcut = self.shortcut_layer(x)
|
124 |
-
res = self.res_layer(x)
|
125 |
-
return res + shortcut
|
126 |
-
|
127 |
-
|
128 |
-
def _upsample_add(x, y):
|
129 |
-
"""Upsample and add two feature maps.
|
130 |
-
Args:
|
131 |
-
x: (Variable) top feature map to be upsampled.
|
132 |
-
y: (Variable) lateral feature map.
|
133 |
-
Returns:
|
134 |
-
(Variable) added feature map.
|
135 |
-
Note in PyTorch, when input size is odd, the upsampled feature map
|
136 |
-
with `F.upsample(..., scale_factor=2, mode='nearest')`
|
137 |
-
maybe not equal to the lateral feature map size.
|
138 |
-
e.g.
|
139 |
-
original input size: [N,_,15,15] ->
|
140 |
-
conv2d feature map size: [N,_,8,8] ->
|
141 |
-
upsampled feature map size: [N,_,16,16]
|
142 |
-
So we choose bilinear upsample which supports arbitrary output sizes.
|
143 |
-
"""
|
144 |
-
_, _, H, W = y.size()
|
145 |
-
return F.interpolate(x, size=(H, W), mode='bilinear', align_corners=True) + y
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/models/backbones/darknet.py
DELETED
@@ -1,199 +0,0 @@
|
|
1 |
-
# Copyright (c) 2019 Western Digital Corporation or its affiliates.
|
2 |
-
|
3 |
-
import logging
|
4 |
-
|
5 |
-
import torch.nn as nn
|
6 |
-
from mmcv.cnn import ConvModule, constant_init, kaiming_init
|
7 |
-
from mmcv.runner import load_checkpoint
|
8 |
-
from torch.nn.modules.batchnorm import _BatchNorm
|
9 |
-
|
10 |
-
from ..builder import BACKBONES
|
11 |
-
|
12 |
-
|
13 |
-
class ResBlock(nn.Module):
|
14 |
-
"""The basic residual block used in Darknet. Each ResBlock consists of two
|
15 |
-
ConvModules and the input is added to the final output. Each ConvModule is
|
16 |
-
composed of Conv, BN, and LeakyReLU. In YoloV3 paper, the first convLayer
|
17 |
-
has half of the number of the filters as much as the second convLayer. The
|
18 |
-
first convLayer has filter size of 1x1 and the second one has the filter
|
19 |
-
size of 3x3.
|
20 |
-
|
21 |
-
Args:
|
22 |
-
in_channels (int): The input channels. Must be even.
|
23 |
-
conv_cfg (dict): Config dict for convolution layer. Default: None.
|
24 |
-
norm_cfg (dict): Dictionary to construct and config norm layer.
|
25 |
-
Default: dict(type='BN', requires_grad=True)
|
26 |
-
act_cfg (dict): Config dict for activation layer.
|
27 |
-
Default: dict(type='LeakyReLU', negative_slope=0.1).
|
28 |
-
"""
|
29 |
-
|
30 |
-
def __init__(self,
|
31 |
-
in_channels,
|
32 |
-
conv_cfg=None,
|
33 |
-
norm_cfg=dict(type='BN', requires_grad=True),
|
34 |
-
act_cfg=dict(type='LeakyReLU', negative_slope=0.1)):
|
35 |
-
super(ResBlock, self).__init__()
|
36 |
-
assert in_channels % 2 == 0 # ensure the in_channels is even
|
37 |
-
half_in_channels = in_channels // 2
|
38 |
-
|
39 |
-
# shortcut
|
40 |
-
cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
|
41 |
-
|
42 |
-
self.conv1 = ConvModule(in_channels, half_in_channels, 1, **cfg)
|
43 |
-
self.conv2 = ConvModule(
|
44 |
-
half_in_channels, in_channels, 3, padding=1, **cfg)
|
45 |
-
|
46 |
-
def forward(self, x):
|
47 |
-
residual = x
|
48 |
-
out = self.conv1(x)
|
49 |
-
out = self.conv2(out)
|
50 |
-
out = out + residual
|
51 |
-
|
52 |
-
return out
|
53 |
-
|
54 |
-
|
55 |
-
@BACKBONES.register_module()
|
56 |
-
class Darknet(nn.Module):
|
57 |
-
"""Darknet backbone.
|
58 |
-
|
59 |
-
Args:
|
60 |
-
depth (int): Depth of Darknet. Currently only support 53.
|
61 |
-
out_indices (Sequence[int]): Output from which stages.
|
62 |
-
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
|
63 |
-
-1 means not freezing any parameters. Default: -1.
|
64 |
-
conv_cfg (dict): Config dict for convolution layer. Default: None.
|
65 |
-
norm_cfg (dict): Dictionary to construct and config norm layer.
|
66 |
-
Default: dict(type='BN', requires_grad=True)
|
67 |
-
act_cfg (dict): Config dict for activation layer.
|
68 |
-
Default: dict(type='LeakyReLU', negative_slope=0.1).
|
69 |
-
norm_eval (bool): Whether to set norm layers to eval mode, namely,
|
70 |
-
freeze running stats (mean and var). Note: Effect on Batch Norm
|
71 |
-
and its variants only.
|
72 |
-
|
73 |
-
Example:
|
74 |
-
>>> from mmdet.models import Darknet
|
75 |
-
>>> import torch
|
76 |
-
>>> self = Darknet(depth=53)
|
77 |
-
>>> self.eval()
|
78 |
-
>>> inputs = torch.rand(1, 3, 416, 416)
|
79 |
-
>>> level_outputs = self.forward(inputs)
|
80 |
-
>>> for level_out in level_outputs:
|
81 |
-
... print(tuple(level_out.shape))
|
82 |
-
...
|
83 |
-
(1, 256, 52, 52)
|
84 |
-
(1, 512, 26, 26)
|
85 |
-
(1, 1024, 13, 13)
|
86 |
-
"""
|
87 |
-
|
88 |
-
# Dict(depth: (layers, channels))
|
89 |
-
arch_settings = {
|
90 |
-
53: ((1, 2, 8, 8, 4), ((32, 64), (64, 128), (128, 256), (256, 512),
|
91 |
-
(512, 1024)))
|
92 |
-
}
|
93 |
-
|
94 |
-
def __init__(self,
|
95 |
-
depth=53,
|
96 |
-
out_indices=(3, 4, 5),
|
97 |
-
frozen_stages=-1,
|
98 |
-
conv_cfg=None,
|
99 |
-
norm_cfg=dict(type='BN', requires_grad=True),
|
100 |
-
act_cfg=dict(type='LeakyReLU', negative_slope=0.1),
|
101 |
-
norm_eval=True):
|
102 |
-
super(Darknet, self).__init__()
|
103 |
-
if depth not in self.arch_settings:
|
104 |
-
raise KeyError(f'invalid depth {depth} for darknet')
|
105 |
-
self.depth = depth
|
106 |
-
self.out_indices = out_indices
|
107 |
-
self.frozen_stages = frozen_stages
|
108 |
-
self.layers, self.channels = self.arch_settings[depth]
|
109 |
-
|
110 |
-
cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
|
111 |
-
|
112 |
-
self.conv1 = ConvModule(3, 32, 3, padding=1, **cfg)
|
113 |
-
|
114 |
-
self.cr_blocks = ['conv1']
|
115 |
-
for i, n_layers in enumerate(self.layers):
|
116 |
-
layer_name = f'conv_res_block{i + 1}'
|
117 |
-
in_c, out_c = self.channels[i]
|
118 |
-
self.add_module(
|
119 |
-
layer_name,
|
120 |
-
self.make_conv_res_block(in_c, out_c, n_layers, **cfg))
|
121 |
-
self.cr_blocks.append(layer_name)
|
122 |
-
|
123 |
-
self.norm_eval = norm_eval
|
124 |
-
|
125 |
-
def forward(self, x):
|
126 |
-
outs = []
|
127 |
-
for i, layer_name in enumerate(self.cr_blocks):
|
128 |
-
cr_block = getattr(self, layer_name)
|
129 |
-
x = cr_block(x)
|
130 |
-
if i in self.out_indices:
|
131 |
-
outs.append(x)
|
132 |
-
|
133 |
-
return tuple(outs)
|
134 |
-
|
135 |
-
def init_weights(self, pretrained=None):
|
136 |
-
if isinstance(pretrained, str):
|
137 |
-
logger = logging.getLogger()
|
138 |
-
load_checkpoint(self, pretrained, strict=False, logger=logger)
|
139 |
-
elif pretrained is None:
|
140 |
-
for m in self.modules():
|
141 |
-
if isinstance(m, nn.Conv2d):
|
142 |
-
kaiming_init(m)
|
143 |
-
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
|
144 |
-
constant_init(m, 1)
|
145 |
-
|
146 |
-
else:
|
147 |
-
raise TypeError('pretrained must be a str or None')
|
148 |
-
|
149 |
-
def _freeze_stages(self):
|
150 |
-
if self.frozen_stages >= 0:
|
151 |
-
for i in range(self.frozen_stages):
|
152 |
-
m = getattr(self, self.cr_blocks[i])
|
153 |
-
m.eval()
|
154 |
-
for param in m.parameters():
|
155 |
-
param.requires_grad = False
|
156 |
-
|
157 |
-
def train(self, mode=True):
|
158 |
-
super(Darknet, self).train(mode)
|
159 |
-
self._freeze_stages()
|
160 |
-
if mode and self.norm_eval:
|
161 |
-
for m in self.modules():
|
162 |
-
if isinstance(m, _BatchNorm):
|
163 |
-
m.eval()
|
164 |
-
|
165 |
-
@staticmethod
|
166 |
-
def make_conv_res_block(in_channels,
|
167 |
-
out_channels,
|
168 |
-
res_repeat,
|
169 |
-
conv_cfg=None,
|
170 |
-
norm_cfg=dict(type='BN', requires_grad=True),
|
171 |
-
act_cfg=dict(type='LeakyReLU',
|
172 |
-
negative_slope=0.1)):
|
173 |
-
"""In Darknet backbone, ConvLayer is usually followed by ResBlock. This
|
174 |
-
function will make that. The Conv layers always have 3x3 filters with
|
175 |
-
stride=2. The number of the filters in Conv layer is the same as the
|
176 |
-
out channels of the ResBlock.
|
177 |
-
|
178 |
-
Args:
|
179 |
-
in_channels (int): The number of input channels.
|
180 |
-
out_channels (int): The number of output channels.
|
181 |
-
res_repeat (int): The number of ResBlocks.
|
182 |
-
conv_cfg (dict): Config dict for convolution layer. Default: None.
|
183 |
-
norm_cfg (dict): Dictionary to construct and config norm layer.
|
184 |
-
Default: dict(type='BN', requires_grad=True)
|
185 |
-
act_cfg (dict): Config dict for activation layer.
|
186 |
-
Default: dict(type='LeakyReLU', negative_slope=0.1).
|
187 |
-
"""
|
188 |
-
|
189 |
-
cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
|
190 |
-
|
191 |
-
model = nn.Sequential()
|
192 |
-
model.add_module(
|
193 |
-
'conv',
|
194 |
-
ConvModule(
|
195 |
-
in_channels, out_channels, 3, stride=2, padding=1, **cfg))
|
196 |
-
for idx in range(res_repeat):
|
197 |
-
model.add_module('res{}'.format(idx),
|
198 |
-
ResBlock(out_channels, **cfg))
|
199 |
-
return model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r50-d8_512x1024_40k_cityscapes.py
DELETED
@@ -1,4 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/deeplabv3_r50-d8.py', '../_base_/datasets/cityscapes.py',
|
3 |
-
'../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py'
|
4 |
-
]
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_r50-d8_512x512_80k_ade20k.py
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/fcn_r50-d8.py', '../_base_/datasets/ade20k.py',
|
3 |
-
'../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py'
|
4 |
-
]
|
5 |
-
model = dict(
|
6 |
-
decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/hrnet/fcn_hr18s_512x1024_160k_cityscapes.py
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
_base_ = './fcn_hr18_512x1024_160k_cityscapes.py'
|
2 |
-
model = dict(
|
3 |
-
pretrained='open-mmlab://msra/hrnetv2_w18_small',
|
4 |
-
backbone=dict(
|
5 |
-
extra=dict(
|
6 |
-
stage1=dict(num_blocks=(2, )),
|
7 |
-
stage2=dict(num_blocks=(2, 2)),
|
8 |
-
stage3=dict(num_modules=3, num_blocks=(2, 2, 2)),
|
9 |
-
stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2)))))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_r101-d8_512x1024_40k_b16_cityscapes.py
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/ocrnet_r50-d8.py', '../_base_/datasets/cityscapes.py',
|
3 |
-
'../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py'
|
4 |
-
]
|
5 |
-
model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
|
6 |
-
optimizer = dict(lr=0.02)
|
7 |
-
lr_config = dict(min_lr=2e-4)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anmol12385/chat123/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Chat123
|
3 |
-
emoji: 🚀
|
4 |
-
colorFrom: green
|
5 |
-
colorTo: yellow
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.15.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: odc-by
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/guided_diffusion/scripts/classifier_sample.py
DELETED
@@ -1,131 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
Like image_sample.py, but use a noisy image classifier to guide the sampling
|
3 |
-
process towards more realistic images.
|
4 |
-
"""
|
5 |
-
|
6 |
-
import argparse
|
7 |
-
import os
|
8 |
-
|
9 |
-
import numpy as np
|
10 |
-
import torch as th
|
11 |
-
import torch.distributed as dist
|
12 |
-
import torch.nn.functional as F
|
13 |
-
|
14 |
-
from guided_diffusion import dist_util, logger
|
15 |
-
from guided_diffusion.script_util import (
|
16 |
-
NUM_CLASSES,
|
17 |
-
model_and_diffusion_defaults,
|
18 |
-
classifier_defaults,
|
19 |
-
create_model_and_diffusion,
|
20 |
-
create_classifier,
|
21 |
-
add_dict_to_argparser,
|
22 |
-
args_to_dict,
|
23 |
-
)
|
24 |
-
|
25 |
-
|
26 |
-
def main():
|
27 |
-
args = create_argparser().parse_args()
|
28 |
-
|
29 |
-
dist_util.setup_dist()
|
30 |
-
logger.configure()
|
31 |
-
|
32 |
-
logger.log("creating model and diffusion...")
|
33 |
-
model, diffusion = create_model_and_diffusion(
|
34 |
-
**args_to_dict(args, model_and_diffusion_defaults().keys())
|
35 |
-
)
|
36 |
-
model.load_state_dict(
|
37 |
-
dist_util.load_state_dict(args.model_path, map_location="cpu")
|
38 |
-
)
|
39 |
-
model.to(dist_util.dev())
|
40 |
-
if args.use_fp16:
|
41 |
-
model.convert_to_fp16()
|
42 |
-
model.eval()
|
43 |
-
|
44 |
-
logger.log("loading classifier...")
|
45 |
-
classifier = create_classifier(**args_to_dict(args, classifier_defaults().keys()))
|
46 |
-
classifier.load_state_dict(
|
47 |
-
dist_util.load_state_dict(args.classifier_path, map_location="cpu")
|
48 |
-
)
|
49 |
-
classifier.to(dist_util.dev())
|
50 |
-
if args.classifier_use_fp16:
|
51 |
-
classifier.convert_to_fp16()
|
52 |
-
classifier.eval()
|
53 |
-
|
54 |
-
def cond_fn(x, t, y=None):
|
55 |
-
assert y is not None
|
56 |
-
with th.enable_grad():
|
57 |
-
x_in = x.detach().requires_grad_(True)
|
58 |
-
logits = classifier(x_in, t)
|
59 |
-
log_probs = F.log_softmax(logits, dim=-1)
|
60 |
-
selected = log_probs[range(len(logits)), y.view(-1)]
|
61 |
-
return th.autograd.grad(selected.sum(), x_in)[0] * args.classifier_scale
|
62 |
-
|
63 |
-
def model_fn(x, t, y=None):
|
64 |
-
assert y is not None
|
65 |
-
return model(x, t, y if args.class_cond else None)
|
66 |
-
|
67 |
-
logger.log("sampling...")
|
68 |
-
all_images = []
|
69 |
-
all_labels = []
|
70 |
-
while len(all_images) * args.batch_size < args.num_samples:
|
71 |
-
model_kwargs = {}
|
72 |
-
classes = th.randint(
|
73 |
-
low=0, high=NUM_CLASSES, size=(args.batch_size,), device=dist_util.dev()
|
74 |
-
)
|
75 |
-
model_kwargs["y"] = classes
|
76 |
-
sample_fn = (
|
77 |
-
diffusion.p_sample_loop if not args.use_ddim else diffusion.ddim_sample_loop
|
78 |
-
)
|
79 |
-
sample = sample_fn(
|
80 |
-
model_fn,
|
81 |
-
(args.batch_size, 3, args.image_size, args.image_size),
|
82 |
-
clip_denoised=args.clip_denoised,
|
83 |
-
model_kwargs=model_kwargs,
|
84 |
-
cond_fn=cond_fn,
|
85 |
-
device=dist_util.dev(),
|
86 |
-
)
|
87 |
-
sample = ((sample + 1) * 127.5).clamp(0, 255).to(th.uint8)
|
88 |
-
sample = sample.permute(0, 2, 3, 1)
|
89 |
-
sample = sample.contiguous()
|
90 |
-
|
91 |
-
gathered_samples = [th.zeros_like(sample) for _ in range(dist.get_world_size())]
|
92 |
-
dist.all_gather(gathered_samples, sample) # gather not supported with NCCL
|
93 |
-
all_images.extend([sample.cpu().numpy() for sample in gathered_samples])
|
94 |
-
gathered_labels = [th.zeros_like(classes) for _ in range(dist.get_world_size())]
|
95 |
-
dist.all_gather(gathered_labels, classes)
|
96 |
-
all_labels.extend([labels.cpu().numpy() for labels in gathered_labels])
|
97 |
-
logger.log(f"created {len(all_images) * args.batch_size} samples")
|
98 |
-
|
99 |
-
arr = np.concatenate(all_images, axis=0)
|
100 |
-
arr = arr[: args.num_samples]
|
101 |
-
label_arr = np.concatenate(all_labels, axis=0)
|
102 |
-
label_arr = label_arr[: args.num_samples]
|
103 |
-
if dist.get_rank() == 0:
|
104 |
-
shape_str = "x".join([str(x) for x in arr.shape])
|
105 |
-
out_path = os.path.join(logger.get_dir(), f"samples_{shape_str}.npz")
|
106 |
-
logger.log(f"saving to {out_path}")
|
107 |
-
np.savez(out_path, arr, label_arr)
|
108 |
-
|
109 |
-
dist.barrier()
|
110 |
-
logger.log("sampling complete")
|
111 |
-
|
112 |
-
|
113 |
-
def create_argparser():
|
114 |
-
defaults = dict(
|
115 |
-
clip_denoised=True,
|
116 |
-
num_samples=10000,
|
117 |
-
batch_size=16,
|
118 |
-
use_ddim=False,
|
119 |
-
model_path="",
|
120 |
-
classifier_path="",
|
121 |
-
classifier_scale=1.0,
|
122 |
-
)
|
123 |
-
defaults.update(model_and_diffusion_defaults())
|
124 |
-
defaults.update(classifier_defaults())
|
125 |
-
parser = argparse.ArgumentParser()
|
126 |
-
add_dict_to_argparser(parser, defaults)
|
127 |
-
return parser
|
128 |
-
|
129 |
-
|
130 |
-
if __name__ == "__main__":
|
131 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AntNikYab/NaturalLanguageProcessing/app.py
DELETED
@@ -1,22 +0,0 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
import ssl
|
3 |
-
|
4 |
-
# Отключение проверки SSL-сертификата
|
5 |
-
ssl._create_default_https_context = ssl._create_unverified_context
|
6 |
-
|
7 |
-
st.set_page_config(
|
8 |
-
page_title='Проект. Обработка естественного языка',
|
9 |
-
layout='wide'
|
10 |
-
)
|
11 |
-
|
12 |
-
st.sidebar.header("Home page")
|
13 |
-
c1, c2 = st.columns(2)
|
14 |
-
c2.image('images/image.jpeg')
|
15 |
-
c1.markdown("""
|
16 |
-
# Проект. Обработка естественного языка
|
17 |
-
Cостоит из 3 частей:
|
18 |
-
### 1. Классификация отзыва на поликлиники
|
19 |
-
### 2. Генерация текста GPT-моделью в стиле А.С. Пушкина, В.В. Маяковского.
|
20 |
-
### Бонус - Кодекс Братана
|
21 |
-
### 3. Оценка степени токсичности пользовательского сообщения
|
22 |
-
""")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Arnx/MusicGenXvAKN/tests/modules/test_codebooks_patterns.py
DELETED
@@ -1,246 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
import pytest
|
8 |
-
import torch
|
9 |
-
|
10 |
-
from audiocraft.modules.codebooks_patterns import (
|
11 |
-
DelayedPatternProvider,
|
12 |
-
ParallelPatternProvider,
|
13 |
-
Pattern,
|
14 |
-
UnrolledPatternProvider,
|
15 |
-
)
|
16 |
-
|
17 |
-
|
18 |
-
class TestParallelPatternProvider:
|
19 |
-
|
20 |
-
@pytest.mark.parametrize("n_q", [1, 4, 32])
|
21 |
-
@pytest.mark.parametrize("timesteps", [0, 1, 16, 100])
|
22 |
-
def test_get_pattern(self, n_q: int, timesteps: int):
|
23 |
-
provider = ParallelPatternProvider(n_q)
|
24 |
-
pattern = provider.get_pattern(timesteps)
|
25 |
-
# + 1 to account for 1st step
|
26 |
-
assert len(pattern.layout) == timesteps + 1
|
27 |
-
|
28 |
-
@pytest.mark.parametrize("n_q", [1, 4, 32])
|
29 |
-
@pytest.mark.parametrize("timesteps", [8, 16, 100])
|
30 |
-
def test_pattern_content(self, n_q: int, timesteps: int):
|
31 |
-
provider = ParallelPatternProvider(n_q)
|
32 |
-
pattern = provider.get_pattern(timesteps)
|
33 |
-
for s, v in enumerate(pattern.layout):
|
34 |
-
for i, code in enumerate(v):
|
35 |
-
assert i == code.q
|
36 |
-
assert code.t == s - 1 # account for the 1st empty step
|
37 |
-
|
38 |
-
@pytest.mark.parametrize("n_q", [1, 4, 32])
|
39 |
-
@pytest.mark.parametrize("timesteps", [8, 16, 100])
|
40 |
-
def test_pattern_max_delay(self, n_q: int, timesteps: int):
|
41 |
-
provider = ParallelPatternProvider(n_q)
|
42 |
-
pattern = provider.get_pattern(timesteps)
|
43 |
-
assert pattern.max_delay == 0
|
44 |
-
assert len(pattern.valid_layout) == len(pattern.layout) - pattern.max_delay
|
45 |
-
|
46 |
-
|
47 |
-
class TestDelayedPatternProvider:
|
48 |
-
|
49 |
-
@pytest.mark.parametrize("n_q", [1, 4, 32])
|
50 |
-
@pytest.mark.parametrize("timesteps", [0, 1, 16, 100])
|
51 |
-
def test_get_pattern(self, n_q: int, timesteps: int):
|
52 |
-
delays = [
|
53 |
-
list(range(n_q)),
|
54 |
-
[0] + [1] * (n_q - 1),
|
55 |
-
[0] + [4] * (n_q - 1),
|
56 |
-
]
|
57 |
-
for delay in delays:
|
58 |
-
provider = DelayedPatternProvider(n_q, delay)
|
59 |
-
pattern = provider.get_pattern(timesteps)
|
60 |
-
# + 1 to account for 1st step
|
61 |
-
assert len(pattern.layout) == timesteps + max(delay) + 1
|
62 |
-
|
63 |
-
@pytest.mark.parametrize("n_q", [1, 4, 32])
|
64 |
-
@pytest.mark.parametrize("timesteps", [8, 16, 100])
|
65 |
-
def test_pattern_content(self, n_q: int, timesteps: int):
|
66 |
-
provider = DelayedPatternProvider(n_q)
|
67 |
-
pattern = provider.get_pattern(timesteps)
|
68 |
-
for s, v in enumerate(pattern.layout):
|
69 |
-
for i, code in enumerate(v):
|
70 |
-
assert i == code.q
|
71 |
-
assert code.t == max(0, s - code.q - 1)
|
72 |
-
|
73 |
-
@pytest.mark.parametrize("timesteps", [8, 16, 100])
|
74 |
-
@pytest.mark.parametrize("delay", [[0, 1, 2, 3], [0, 1, 1, 1], [0, 3, 3, 3], [0, 3]])
|
75 |
-
def test_pattern_max_delay(self, timesteps: int, delay: list):
|
76 |
-
provider = DelayedPatternProvider(len(delay), delay)
|
77 |
-
pattern = provider.get_pattern(timesteps)
|
78 |
-
assert pattern.max_delay == max(delay)
|
79 |
-
assert len(pattern.valid_layout) == len(pattern.layout) - pattern.max_delay
|
80 |
-
|
81 |
-
|
82 |
-
class TestUnrolledPatternProvider:
|
83 |
-
|
84 |
-
@pytest.mark.parametrize("timesteps", [0, 1, 16])
|
85 |
-
@pytest.mark.parametrize("flattening", [[0, 1, 2], [0, 1, 1]])
|
86 |
-
@pytest.mark.parametrize("delays", [[0, 0, 0], [0, 5, 5]])
|
87 |
-
def test_get_pattern(self, timesteps: int, flattening: list, delays: list):
|
88 |
-
n_q = len(flattening)
|
89 |
-
max_delay = max(delays)
|
90 |
-
provider = UnrolledPatternProvider(n_q, flattening, delays)
|
91 |
-
pattern = provider.get_pattern(timesteps)
|
92 |
-
assert len(pattern.layout) == provider.num_virtual_steps(timesteps) + max_delay
|
93 |
-
|
94 |
-
@pytest.mark.parametrize("timesteps", [0, 1, 16])
|
95 |
-
@pytest.mark.parametrize("flattening", [[0, 1, 2], [0, 1, 1]])
|
96 |
-
@pytest.mark.parametrize("delays", [[0, 0, 0], [0, 5, 5]])
|
97 |
-
def test_pattern_max_delay(self, timesteps: int, flattening: list, delays: list):
|
98 |
-
n_q = len(flattening)
|
99 |
-
max_delay = max(delays)
|
100 |
-
provider = UnrolledPatternProvider(n_q, flattening, delays)
|
101 |
-
pattern = provider.get_pattern(timesteps)
|
102 |
-
assert pattern.max_delay == max_delay
|
103 |
-
|
104 |
-
|
105 |
-
class TestPattern:
|
106 |
-
|
107 |
-
def ref_build_pattern_sequence(self, z: torch.Tensor, pattern: Pattern, special_token: int):
|
108 |
-
"""Reference method to build the sequence from the pattern without using fancy scatter."""
|
109 |
-
bs, n_q, T = z.shape
|
110 |
-
z = z.cpu().numpy()
|
111 |
-
assert n_q == pattern.n_q
|
112 |
-
assert T <= pattern.timesteps
|
113 |
-
inp = torch.full((bs, n_q, len(pattern.layout)), special_token, dtype=torch.long).numpy()
|
114 |
-
inp[:] = special_token
|
115 |
-
for s, v in enumerate(pattern.layout):
|
116 |
-
for (t, q) in v:
|
117 |
-
if t < T:
|
118 |
-
inp[:, q, s] = z[:, q, t]
|
119 |
-
return torch.from_numpy(inp)
|
120 |
-
|
121 |
-
def ref_revert_pattern_sequence(self, z: torch.Tensor, pattern: Pattern, special_token: int):
|
122 |
-
"""Reference method to revert the sequence from the pattern without using fancy scatter."""
|
123 |
-
z = z.cpu().numpy()
|
124 |
-
bs, n_q, S = z.shape
|
125 |
-
assert pattern.n_q == n_q
|
126 |
-
inp = torch.full((bs, pattern.n_q, pattern.timesteps), special_token, dtype=torch.long).numpy()
|
127 |
-
inp[:] = special_token
|
128 |
-
for s, v in enumerate(pattern.layout):
|
129 |
-
for (t, q) in v:
|
130 |
-
if t < pattern.timesteps:
|
131 |
-
inp[:, q, t] = z[:, q, s]
|
132 |
-
return torch.from_numpy(inp)
|
133 |
-
|
134 |
-
def ref_revert_pattern_logits(self, z: torch.Tensor, pattern: Pattern, special_token: float):
|
135 |
-
"""Reference method to revert the logits from the pattern without using fancy scatter."""
|
136 |
-
z = z.cpu().numpy()
|
137 |
-
bs, card, n_q, S = z.shape
|
138 |
-
assert pattern.n_q == n_q
|
139 |
-
ref_layout = pattern.layout
|
140 |
-
inp = torch.full((bs, card, pattern.n_q, pattern.timesteps), special_token, dtype=torch.float).numpy()
|
141 |
-
inp[:] = special_token
|
142 |
-
for s, v in enumerate(ref_layout[1:]):
|
143 |
-
if s < S:
|
144 |
-
for (t, q) in v:
|
145 |
-
if t < pattern.timesteps:
|
146 |
-
inp[:, :, q, t] = z[:, :, q, s]
|
147 |
-
return torch.from_numpy(inp)
|
148 |
-
|
149 |
-
def _get_pattern_providers(self, n_q: int):
|
150 |
-
pattern_provider_1 = ParallelPatternProvider(n_q)
|
151 |
-
pattern_provider_2 = DelayedPatternProvider(n_q, list(range(n_q)))
|
152 |
-
pattern_provider_3 = DelayedPatternProvider(n_q, [0] + [1] * (n_q - 1))
|
153 |
-
pattern_provider_4 = UnrolledPatternProvider(
|
154 |
-
n_q, flattening=list(range(n_q)), delays=[0] * n_q
|
155 |
-
)
|
156 |
-
pattern_provider_5 = UnrolledPatternProvider(
|
157 |
-
n_q, flattening=[0] + [1] * (n_q - 1), delays=[0] * n_q
|
158 |
-
)
|
159 |
-
pattern_provider_6 = UnrolledPatternProvider(
|
160 |
-
n_q, flattening=[0] + [1] * (n_q - 1), delays=[0] + [5] * (n_q - 1)
|
161 |
-
)
|
162 |
-
return [
|
163 |
-
pattern_provider_1,
|
164 |
-
pattern_provider_2,
|
165 |
-
pattern_provider_3,
|
166 |
-
pattern_provider_4,
|
167 |
-
pattern_provider_5,
|
168 |
-
pattern_provider_6,
|
169 |
-
]
|
170 |
-
|
171 |
-
@pytest.mark.parametrize("n_q", [1, 4, 32])
|
172 |
-
@pytest.mark.parametrize("timesteps", [16, 72])
|
173 |
-
def test_build_pattern_sequence(self, n_q: int, timesteps: int):
|
174 |
-
bs = 2
|
175 |
-
card = 256
|
176 |
-
special_token = card
|
177 |
-
|
178 |
-
pattern_providers = self._get_pattern_providers(n_q)
|
179 |
-
for pattern_provider in pattern_providers:
|
180 |
-
pattern = pattern_provider.get_pattern(timesteps)
|
181 |
-
# we can correctly build the sequence from the pattern
|
182 |
-
z = torch.randint(0, card, (bs, n_q, timesteps))
|
183 |
-
ref_res = self.ref_build_pattern_sequence(z, pattern, special_token)
|
184 |
-
res, indexes, mask = pattern.build_pattern_sequence(z, special_token)
|
185 |
-
assert (res == ref_res).float().mean() == 1.0
|
186 |
-
|
187 |
-
# expected assertion fails on the number of timesteps
|
188 |
-
invalid_timesteps = [timesteps + 1]
|
189 |
-
if pattern.num_sequence_steps != pattern.timesteps:
|
190 |
-
invalid_timesteps.append(pattern.num_sequence_steps)
|
191 |
-
for i_timesteps in invalid_timesteps:
|
192 |
-
z2 = torch.randint(0, card, (bs, n_q, i_timesteps))
|
193 |
-
with pytest.raises(AssertionError):
|
194 |
-
pattern.build_pattern_sequence(z2, special_token)
|
195 |
-
|
196 |
-
# expected assertion fails on the number of codebooks
|
197 |
-
invalid_qs = [0, n_q - 1, n_q + 1]
|
198 |
-
for i_q in invalid_qs:
|
199 |
-
z3 = torch.randint(0, card, (bs, i_q, timesteps))
|
200 |
-
with pytest.raises(AssertionError):
|
201 |
-
pattern.build_pattern_sequence(z3, special_token)
|
202 |
-
|
203 |
-
@pytest.mark.parametrize("n_q", [1, 4, 32])
|
204 |
-
@pytest.mark.parametrize("timesteps", [16, 72])
|
205 |
-
def test_revert_pattern_sequence(self, n_q: int, timesteps: int):
|
206 |
-
bs = 2
|
207 |
-
card = 256
|
208 |
-
special_token = card
|
209 |
-
|
210 |
-
pattern_providers = self._get_pattern_providers(n_q)
|
211 |
-
for pattern_provider in pattern_providers:
|
212 |
-
pattern = pattern_provider.get_pattern(timesteps)
|
213 |
-
# this works assuming previous tests are successful
|
214 |
-
z = torch.randint(0, card, (bs, n_q, timesteps))
|
215 |
-
s = self.ref_build_pattern_sequence(z, pattern, special_token)
|
216 |
-
ref_out = self.ref_revert_pattern_sequence(s, pattern, special_token)
|
217 |
-
# ensure our reference script retrieve the original sequence
|
218 |
-
assert z.shape == ref_out.shape
|
219 |
-
assert (z == ref_out).float().mean() == 1.0
|
220 |
-
# now we can test the scatter version
|
221 |
-
out, indexes, mask = pattern.revert_pattern_sequence(s, special_token)
|
222 |
-
assert out.shape == ref_out.shape
|
223 |
-
assert (out == ref_out).float().mean() == 1.0
|
224 |
-
|
225 |
-
@pytest.mark.parametrize("n_q", [1, 4, 32])
|
226 |
-
@pytest.mark.parametrize("timesteps", [16, 72])
|
227 |
-
@pytest.mark.parametrize("card", [1, 2, 256, 1024])
|
228 |
-
def test_revert_pattern_logits(self, n_q: int, timesteps: int, card: int):
|
229 |
-
bs = 2
|
230 |
-
special_token = card
|
231 |
-
logits_special_token = float('nan')
|
232 |
-
|
233 |
-
pattern_providers = self._get_pattern_providers(n_q)
|
234 |
-
for pattern_provider in pattern_providers:
|
235 |
-
pattern = pattern_provider.get_pattern(timesteps)
|
236 |
-
# this works assuming previous tests are successful
|
237 |
-
z = torch.randint(0, card, (bs, n_q, timesteps))
|
238 |
-
s = self.ref_build_pattern_sequence(z, pattern, special_token)
|
239 |
-
logits = torch.randn((bs, card, n_q, s.shape[-1]))
|
240 |
-
ref_out = self.ref_revert_pattern_logits(logits, pattern, logits_special_token)
|
241 |
-
# ensure our reference script retrieve the original sequence
|
242 |
-
assert ref_out.shape == torch.Size([bs, card, n_q, timesteps])
|
243 |
-
# now we can test the scatter version
|
244 |
-
out, indexes, mask = pattern.revert_pattern_logits(logits, logits_special_token)
|
245 |
-
assert out.shape == ref_out.shape
|
246 |
-
assert (out == ref_out).float().mean() == 1.0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Asahi402/White-box-Cartoonization/wbc/cartoonize.py
DELETED
@@ -1,112 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import cv2
|
3 |
-
import numpy as np
|
4 |
-
import tensorflow as tf
|
5 |
-
import wbc.network as network
|
6 |
-
import wbc.guided_filter as guided_filter
|
7 |
-
from tqdm import tqdm
|
8 |
-
|
9 |
-
|
10 |
-
def resize_crop(image):
|
11 |
-
h, w, c = np.shape(image)
|
12 |
-
if min(h, w) > 720:
|
13 |
-
if h > w:
|
14 |
-
h, w = int(720 * h / w), 720
|
15 |
-
else:
|
16 |
-
h, w = 720, int(720 * w / h)
|
17 |
-
image = cv2.resize(image, (w, h),
|
18 |
-
interpolation=cv2.INTER_AREA)
|
19 |
-
h, w = (h // 8) * 8, (w // 8) * 8
|
20 |
-
image = image[:h, :w, :]
|
21 |
-
return image
|
22 |
-
|
23 |
-
|
24 |
-
def cartoonize(load_folder, save_folder, model_path):
|
25 |
-
print(model_path)
|
26 |
-
input_photo = tf.placeholder(tf.float32, [1, None, None, 3])
|
27 |
-
network_out = network.unet_generator(input_photo)
|
28 |
-
final_out = guided_filter.guided_filter(input_photo, network_out, r=1, eps=5e-3)
|
29 |
-
|
30 |
-
all_vars = tf.trainable_variables()
|
31 |
-
gene_vars = [var for var in all_vars if 'generator' in var.name]
|
32 |
-
saver = tf.train.Saver(var_list=gene_vars)
|
33 |
-
|
34 |
-
config = tf.ConfigProto()
|
35 |
-
config.gpu_options.allow_growth = True
|
36 |
-
sess = tf.Session(config=config)
|
37 |
-
|
38 |
-
sess.run(tf.global_variables_initializer())
|
39 |
-
saver.restore(sess, tf.train.latest_checkpoint(model_path))
|
40 |
-
name_list = os.listdir(load_folder)
|
41 |
-
for name in tqdm(name_list):
|
42 |
-
try:
|
43 |
-
load_path = os.path.join(load_folder, name)
|
44 |
-
save_path = os.path.join(save_folder, name)
|
45 |
-
image = cv2.imread(load_path)
|
46 |
-
image = resize_crop(image)
|
47 |
-
batch_image = image.astype(np.float32) / 127.5 - 1
|
48 |
-
batch_image = np.expand_dims(batch_image, axis=0)
|
49 |
-
output = sess.run(final_out, feed_dict={input_photo: batch_image})
|
50 |
-
output = (np.squeeze(output) + 1) * 127.5
|
51 |
-
output = np.clip(output, 0, 255).astype(np.uint8)
|
52 |
-
cv2.imwrite(save_path, output)
|
53 |
-
except:
|
54 |
-
print('cartoonize {} failed'.format(load_path))
|
55 |
-
|
56 |
-
|
57 |
-
class Cartoonize:
|
58 |
-
def __init__(self, model_path):
|
59 |
-
print(model_path)
|
60 |
-
self.input_photo = tf.placeholder(tf.float32, [1, None, None, 3])
|
61 |
-
network_out = network.unet_generator(self.input_photo)
|
62 |
-
self.final_out = guided_filter.guided_filter(self.input_photo, network_out, r=1, eps=5e-3)
|
63 |
-
|
64 |
-
all_vars = tf.trainable_variables()
|
65 |
-
gene_vars = [var for var in all_vars if 'generator' in var.name]
|
66 |
-
saver = tf.train.Saver(var_list=gene_vars)
|
67 |
-
|
68 |
-
config = tf.ConfigProto()
|
69 |
-
config.gpu_options.allow_growth = True
|
70 |
-
self.sess = tf.Session(config=config)
|
71 |
-
|
72 |
-
self.sess.run(tf.global_variables_initializer())
|
73 |
-
saver.restore(self.sess, tf.train.latest_checkpoint(model_path))
|
74 |
-
|
75 |
-
def run(self, load_folder, save_folder):
|
76 |
-
name_list = os.listdir(load_folder)
|
77 |
-
for name in tqdm(name_list):
|
78 |
-
try:
|
79 |
-
load_path = os.path.join(load_folder, name)
|
80 |
-
save_path = os.path.join(save_folder, name)
|
81 |
-
image = cv2.imread(load_path)
|
82 |
-
image = resize_crop(image)
|
83 |
-
batch_image = image.astype(np.float32) / 127.5 - 1
|
84 |
-
batch_image = np.expand_dims(batch_image, axis=0)
|
85 |
-
output = self.sess.run(self.final_out, feed_dict={self.input_photo: batch_image})
|
86 |
-
output = (np.squeeze(output) + 1) * 127.5
|
87 |
-
output = np.clip(output, 0, 255).astype(np.uint8)
|
88 |
-
cv2.imwrite(save_path, output)
|
89 |
-
except:
|
90 |
-
print('cartoonize {} failed'.format(load_path))
|
91 |
-
|
92 |
-
def run_sigle(self, load_path, save_path):
|
93 |
-
try:
|
94 |
-
image = cv2.imread(load_path)
|
95 |
-
image = resize_crop(image)
|
96 |
-
batch_image = image.astype(np.float32) / 127.5 - 1
|
97 |
-
batch_image = np.expand_dims(batch_image, axis=0)
|
98 |
-
output = self.sess.run(self.final_out, feed_dict={self.input_photo: batch_image})
|
99 |
-
output = (np.squeeze(output) + 1) * 127.5
|
100 |
-
output = np.clip(output, 0, 255).astype(np.uint8)
|
101 |
-
cv2.imwrite(save_path, output)
|
102 |
-
except:
|
103 |
-
print('cartoonize {} failed'.format(load_path))
|
104 |
-
|
105 |
-
|
106 |
-
if __name__ == '__main__':
|
107 |
-
model_path = 'saved_models'
|
108 |
-
load_folder = 'test_images'
|
109 |
-
save_folder = 'cartoonized_images'
|
110 |
-
if not os.path.exists(save_folder):
|
111 |
-
os.mkdir(save_folder)
|
112 |
-
cartoonize(load_folder, save_folder, model_path)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/certifi/__init__.py
DELETED
@@ -1,4 +0,0 @@
|
|
1 |
-
from .core import contents, where
|
2 |
-
|
3 |
-
__all__ = ["contents", "where"]
|
4 |
-
__version__ = "2022.12.07"
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/text.py
DELETED
@@ -1,1307 +0,0 @@
|
|
1 |
-
import re
|
2 |
-
from functools import partial, reduce
|
3 |
-
from math import gcd
|
4 |
-
from operator import itemgetter
|
5 |
-
from typing import (
|
6 |
-
TYPE_CHECKING,
|
7 |
-
Any,
|
8 |
-
Callable,
|
9 |
-
Dict,
|
10 |
-
Iterable,
|
11 |
-
List,
|
12 |
-
NamedTuple,
|
13 |
-
Optional,
|
14 |
-
Tuple,
|
15 |
-
Union,
|
16 |
-
)
|
17 |
-
|
18 |
-
from ._loop import loop_last
|
19 |
-
from ._pick import pick_bool
|
20 |
-
from ._wrap import divide_line
|
21 |
-
from .align import AlignMethod
|
22 |
-
from .cells import cell_len, set_cell_size
|
23 |
-
from .containers import Lines
|
24 |
-
from .control import strip_control_codes
|
25 |
-
from .emoji import EmojiVariant
|
26 |
-
from .jupyter import JupyterMixin
|
27 |
-
from .measure import Measurement
|
28 |
-
from .segment import Segment
|
29 |
-
from .style import Style, StyleType
|
30 |
-
|
31 |
-
if TYPE_CHECKING: # pragma: no cover
|
32 |
-
from .console import Console, ConsoleOptions, JustifyMethod, OverflowMethod
|
33 |
-
|
34 |
-
DEFAULT_JUSTIFY: "JustifyMethod" = "default"
|
35 |
-
DEFAULT_OVERFLOW: "OverflowMethod" = "fold"
|
36 |
-
|
37 |
-
|
38 |
-
_re_whitespace = re.compile(r"\s+$")
|
39 |
-
|
40 |
-
TextType = Union[str, "Text"]
|
41 |
-
|
42 |
-
GetStyleCallable = Callable[[str], Optional[StyleType]]
|
43 |
-
|
44 |
-
|
45 |
-
class Span(NamedTuple):
|
46 |
-
"""A marked up region in some text."""
|
47 |
-
|
48 |
-
start: int
|
49 |
-
"""Span start index."""
|
50 |
-
end: int
|
51 |
-
"""Span end index."""
|
52 |
-
style: Union[str, Style]
|
53 |
-
"""Style associated with the span."""
|
54 |
-
|
55 |
-
def __repr__(self) -> str:
|
56 |
-
return f"Span({self.start}, {self.end}, {self.style!r})"
|
57 |
-
|
58 |
-
def __bool__(self) -> bool:
|
59 |
-
return self.end > self.start
|
60 |
-
|
61 |
-
def split(self, offset: int) -> Tuple["Span", Optional["Span"]]:
|
62 |
-
"""Split a span in to 2 from a given offset."""
|
63 |
-
|
64 |
-
if offset < self.start:
|
65 |
-
return self, None
|
66 |
-
if offset >= self.end:
|
67 |
-
return self, None
|
68 |
-
|
69 |
-
start, end, style = self
|
70 |
-
span1 = Span(start, min(end, offset), style)
|
71 |
-
span2 = Span(span1.end, end, style)
|
72 |
-
return span1, span2
|
73 |
-
|
74 |
-
def move(self, offset: int) -> "Span":
|
75 |
-
"""Move start and end by a given offset.
|
76 |
-
|
77 |
-
Args:
|
78 |
-
offset (int): Number of characters to add to start and end.
|
79 |
-
|
80 |
-
Returns:
|
81 |
-
TextSpan: A new TextSpan with adjusted position.
|
82 |
-
"""
|
83 |
-
start, end, style = self
|
84 |
-
return Span(start + offset, end + offset, style)
|
85 |
-
|
86 |
-
def right_crop(self, offset: int) -> "Span":
|
87 |
-
"""Crop the span at the given offset.
|
88 |
-
|
89 |
-
Args:
|
90 |
-
offset (int): A value between start and end.
|
91 |
-
|
92 |
-
Returns:
|
93 |
-
Span: A new (possibly smaller) span.
|
94 |
-
"""
|
95 |
-
start, end, style = self
|
96 |
-
if offset >= end:
|
97 |
-
return self
|
98 |
-
return Span(start, min(offset, end), style)
|
99 |
-
|
100 |
-
|
101 |
-
class Text(JupyterMixin):
|
102 |
-
"""Text with color / style.
|
103 |
-
|
104 |
-
Args:
|
105 |
-
text (str, optional): Default unstyled text. Defaults to "".
|
106 |
-
style (Union[str, Style], optional): Base style for text. Defaults to "".
|
107 |
-
justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None.
|
108 |
-
overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None.
|
109 |
-
no_wrap (bool, optional): Disable text wrapping, or None for default. Defaults to None.
|
110 |
-
end (str, optional): Character to end text with. Defaults to "\\\\n".
|
111 |
-
tab_size (int): Number of spaces per tab, or ``None`` to use ``console.tab_size``. Defaults to 8.
|
112 |
-
spans (List[Span], optional). A list of predefined style spans. Defaults to None.
|
113 |
-
"""
|
114 |
-
|
115 |
-
__slots__ = [
|
116 |
-
"_text",
|
117 |
-
"style",
|
118 |
-
"justify",
|
119 |
-
"overflow",
|
120 |
-
"no_wrap",
|
121 |
-
"end",
|
122 |
-
"tab_size",
|
123 |
-
"_spans",
|
124 |
-
"_length",
|
125 |
-
]
|
126 |
-
|
127 |
-
def __init__(
|
128 |
-
self,
|
129 |
-
text: str = "",
|
130 |
-
style: Union[str, Style] = "",
|
131 |
-
*,
|
132 |
-
justify: Optional["JustifyMethod"] = None,
|
133 |
-
overflow: Optional["OverflowMethod"] = None,
|
134 |
-
no_wrap: Optional[bool] = None,
|
135 |
-
end: str = "\n",
|
136 |
-
tab_size: Optional[int] = 8,
|
137 |
-
spans: Optional[List[Span]] = None,
|
138 |
-
) -> None:
|
139 |
-
sanitized_text = strip_control_codes(text)
|
140 |
-
self._text = [sanitized_text]
|
141 |
-
self.style = style
|
142 |
-
self.justify: Optional["JustifyMethod"] = justify
|
143 |
-
self.overflow: Optional["OverflowMethod"] = overflow
|
144 |
-
self.no_wrap = no_wrap
|
145 |
-
self.end = end
|
146 |
-
self.tab_size = tab_size
|
147 |
-
self._spans: List[Span] = spans or []
|
148 |
-
self._length: int = len(sanitized_text)
|
149 |
-
|
150 |
-
def __len__(self) -> int:
|
151 |
-
return self._length
|
152 |
-
|
153 |
-
def __bool__(self) -> bool:
|
154 |
-
return bool(self._length)
|
155 |
-
|
156 |
-
def __str__(self) -> str:
|
157 |
-
return self.plain
|
158 |
-
|
159 |
-
def __repr__(self) -> str:
|
160 |
-
return f"<text {self.plain!r} {self._spans!r}>"
|
161 |
-
|
162 |
-
def __add__(self, other: Any) -> "Text":
|
163 |
-
if isinstance(other, (str, Text)):
|
164 |
-
result = self.copy()
|
165 |
-
result.append(other)
|
166 |
-
return result
|
167 |
-
return NotImplemented
|
168 |
-
|
169 |
-
def __eq__(self, other: object) -> bool:
|
170 |
-
if not isinstance(other, Text):
|
171 |
-
return NotImplemented
|
172 |
-
return self.plain == other.plain and self._spans == other._spans
|
173 |
-
|
174 |
-
def __contains__(self, other: object) -> bool:
|
175 |
-
if isinstance(other, str):
|
176 |
-
return other in self.plain
|
177 |
-
elif isinstance(other, Text):
|
178 |
-
return other.plain in self.plain
|
179 |
-
return False
|
180 |
-
|
181 |
-
def __getitem__(self, slice: Union[int, slice]) -> "Text":
|
182 |
-
def get_text_at(offset: int) -> "Text":
|
183 |
-
_Span = Span
|
184 |
-
text = Text(
|
185 |
-
self.plain[offset],
|
186 |
-
spans=[
|
187 |
-
_Span(0, 1, style)
|
188 |
-
for start, end, style in self._spans
|
189 |
-
if end > offset >= start
|
190 |
-
],
|
191 |
-
end="",
|
192 |
-
)
|
193 |
-
return text
|
194 |
-
|
195 |
-
if isinstance(slice, int):
|
196 |
-
return get_text_at(slice)
|
197 |
-
else:
|
198 |
-
start, stop, step = slice.indices(len(self.plain))
|
199 |
-
if step == 1:
|
200 |
-
lines = self.divide([start, stop])
|
201 |
-
return lines[1]
|
202 |
-
else:
|
203 |
-
# This would be a bit of work to implement efficiently
|
204 |
-
# For now, its not required
|
205 |
-
raise TypeError("slices with step!=1 are not supported")
|
206 |
-
|
207 |
-
@property
|
208 |
-
def cell_len(self) -> int:
|
209 |
-
"""Get the number of cells required to render this text."""
|
210 |
-
return cell_len(self.plain)
|
211 |
-
|
212 |
-
@property
|
213 |
-
def markup(self) -> str:
|
214 |
-
"""Get console markup to render this Text.
|
215 |
-
|
216 |
-
Returns:
|
217 |
-
str: A string potentially creating markup tags.
|
218 |
-
"""
|
219 |
-
from .markup import escape
|
220 |
-
|
221 |
-
output: List[str] = []
|
222 |
-
|
223 |
-
plain = self.plain
|
224 |
-
markup_spans = [
|
225 |
-
(0, False, self.style),
|
226 |
-
*((span.start, False, span.style) for span in self._spans),
|
227 |
-
*((span.end, True, span.style) for span in self._spans),
|
228 |
-
(len(plain), True, self.style),
|
229 |
-
]
|
230 |
-
markup_spans.sort(key=itemgetter(0, 1))
|
231 |
-
position = 0
|
232 |
-
append = output.append
|
233 |
-
for offset, closing, style in markup_spans:
|
234 |
-
if offset > position:
|
235 |
-
append(escape(plain[position:offset]))
|
236 |
-
position = offset
|
237 |
-
if style:
|
238 |
-
append(f"[/{style}]" if closing else f"[{style}]")
|
239 |
-
markup = "".join(output)
|
240 |
-
return markup
|
241 |
-
|
242 |
-
@classmethod
|
243 |
-
def from_markup(
|
244 |
-
cls,
|
245 |
-
text: str,
|
246 |
-
*,
|
247 |
-
style: Union[str, Style] = "",
|
248 |
-
emoji: bool = True,
|
249 |
-
emoji_variant: Optional[EmojiVariant] = None,
|
250 |
-
justify: Optional["JustifyMethod"] = None,
|
251 |
-
overflow: Optional["OverflowMethod"] = None,
|
252 |
-
end: str = "\n",
|
253 |
-
) -> "Text":
|
254 |
-
"""Create Text instance from markup.
|
255 |
-
|
256 |
-
Args:
|
257 |
-
text (str): A string containing console markup.
|
258 |
-
emoji (bool, optional): Also render emoji code. Defaults to True.
|
259 |
-
justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None.
|
260 |
-
overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None.
|
261 |
-
end (str, optional): Character to end text with. Defaults to "\\\\n".
|
262 |
-
|
263 |
-
Returns:
|
264 |
-
Text: A Text instance with markup rendered.
|
265 |
-
"""
|
266 |
-
from .markup import render
|
267 |
-
|
268 |
-
rendered_text = render(text, style, emoji=emoji, emoji_variant=emoji_variant)
|
269 |
-
rendered_text.justify = justify
|
270 |
-
rendered_text.overflow = overflow
|
271 |
-
rendered_text.end = end
|
272 |
-
return rendered_text
|
273 |
-
|
274 |
-
@classmethod
|
275 |
-
def from_ansi(
|
276 |
-
cls,
|
277 |
-
text: str,
|
278 |
-
*,
|
279 |
-
style: Union[str, Style] = "",
|
280 |
-
justify: Optional["JustifyMethod"] = None,
|
281 |
-
overflow: Optional["OverflowMethod"] = None,
|
282 |
-
no_wrap: Optional[bool] = None,
|
283 |
-
end: str = "\n",
|
284 |
-
tab_size: Optional[int] = 8,
|
285 |
-
) -> "Text":
|
286 |
-
"""Create a Text object from a string containing ANSI escape codes.
|
287 |
-
|
288 |
-
Args:
|
289 |
-
text (str): A string containing escape codes.
|
290 |
-
style (Union[str, Style], optional): Base style for text. Defaults to "".
|
291 |
-
justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None.
|
292 |
-
overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None.
|
293 |
-
no_wrap (bool, optional): Disable text wrapping, or None for default. Defaults to None.
|
294 |
-
end (str, optional): Character to end text with. Defaults to "\\\\n".
|
295 |
-
tab_size (int): Number of spaces per tab, or ``None`` to use ``console.tab_size``. Defaults to 8.
|
296 |
-
"""
|
297 |
-
from .ansi import AnsiDecoder
|
298 |
-
|
299 |
-
joiner = Text(
|
300 |
-
"\n",
|
301 |
-
justify=justify,
|
302 |
-
overflow=overflow,
|
303 |
-
no_wrap=no_wrap,
|
304 |
-
end=end,
|
305 |
-
tab_size=tab_size,
|
306 |
-
style=style,
|
307 |
-
)
|
308 |
-
decoder = AnsiDecoder()
|
309 |
-
result = joiner.join(line for line in decoder.decode(text))
|
310 |
-
return result
|
311 |
-
|
312 |
-
@classmethod
|
313 |
-
def styled(
|
314 |
-
cls,
|
315 |
-
text: str,
|
316 |
-
style: StyleType = "",
|
317 |
-
*,
|
318 |
-
justify: Optional["JustifyMethod"] = None,
|
319 |
-
overflow: Optional["OverflowMethod"] = None,
|
320 |
-
) -> "Text":
|
321 |
-
"""Construct a Text instance with a pre-applied styled. A style applied in this way won't be used
|
322 |
-
to pad the text when it is justified.
|
323 |
-
|
324 |
-
Args:
|
325 |
-
text (str): A string containing console markup.
|
326 |
-
style (Union[str, Style]): Style to apply to the text. Defaults to "".
|
327 |
-
justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None.
|
328 |
-
overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None.
|
329 |
-
|
330 |
-
Returns:
|
331 |
-
Text: A text instance with a style applied to the entire string.
|
332 |
-
"""
|
333 |
-
styled_text = cls(text, justify=justify, overflow=overflow)
|
334 |
-
styled_text.stylize(style)
|
335 |
-
return styled_text
|
336 |
-
|
337 |
-
@classmethod
|
338 |
-
def assemble(
|
339 |
-
cls,
|
340 |
-
*parts: Union[str, "Text", Tuple[str, StyleType]],
|
341 |
-
style: Union[str, Style] = "",
|
342 |
-
justify: Optional["JustifyMethod"] = None,
|
343 |
-
overflow: Optional["OverflowMethod"] = None,
|
344 |
-
no_wrap: Optional[bool] = None,
|
345 |
-
end: str = "\n",
|
346 |
-
tab_size: int = 8,
|
347 |
-
meta: Optional[Dict[str, Any]] = None,
|
348 |
-
) -> "Text":
|
349 |
-
"""Construct a text instance by combining a sequence of strings with optional styles.
|
350 |
-
The positional arguments should be either strings, or a tuple of string + style.
|
351 |
-
|
352 |
-
Args:
|
353 |
-
style (Union[str, Style], optional): Base style for text. Defaults to "".
|
354 |
-
justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None.
|
355 |
-
overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None.
|
356 |
-
end (str, optional): Character to end text with. Defaults to "\\\\n".
|
357 |
-
tab_size (int): Number of spaces per tab, or ``None`` to use ``console.tab_size``. Defaults to 8.
|
358 |
-
meta (Dict[str, Any], optional). Meta data to apply to text, or None for no meta data. Default to None
|
359 |
-
|
360 |
-
Returns:
|
361 |
-
Text: A new text instance.
|
362 |
-
"""
|
363 |
-
text = cls(
|
364 |
-
style=style,
|
365 |
-
justify=justify,
|
366 |
-
overflow=overflow,
|
367 |
-
no_wrap=no_wrap,
|
368 |
-
end=end,
|
369 |
-
tab_size=tab_size,
|
370 |
-
)
|
371 |
-
append = text.append
|
372 |
-
_Text = Text
|
373 |
-
for part in parts:
|
374 |
-
if isinstance(part, (_Text, str)):
|
375 |
-
append(part)
|
376 |
-
else:
|
377 |
-
append(*part)
|
378 |
-
if meta:
|
379 |
-
text.apply_meta(meta)
|
380 |
-
return text
|
381 |
-
|
382 |
-
@property
|
383 |
-
def plain(self) -> str:
|
384 |
-
"""Get the text as a single string."""
|
385 |
-
if len(self._text) != 1:
|
386 |
-
self._text[:] = ["".join(self._text)]
|
387 |
-
return self._text[0]
|
388 |
-
|
389 |
-
@plain.setter
|
390 |
-
def plain(self, new_text: str) -> None:
|
391 |
-
"""Set the text to a new value."""
|
392 |
-
if new_text != self.plain:
|
393 |
-
sanitized_text = strip_control_codes(new_text)
|
394 |
-
self._text[:] = [sanitized_text]
|
395 |
-
old_length = self._length
|
396 |
-
self._length = len(sanitized_text)
|
397 |
-
if old_length > self._length:
|
398 |
-
self._trim_spans()
|
399 |
-
|
400 |
-
@property
|
401 |
-
def spans(self) -> List[Span]:
|
402 |
-
"""Get a reference to the internal list of spans."""
|
403 |
-
return self._spans
|
404 |
-
|
405 |
-
@spans.setter
|
406 |
-
def spans(self, spans: List[Span]) -> None:
|
407 |
-
"""Set spans."""
|
408 |
-
self._spans = spans[:]
|
409 |
-
|
410 |
-
def blank_copy(self, plain: str = "") -> "Text":
|
411 |
-
"""Return a new Text instance with copied meta data (but not the string or spans)."""
|
412 |
-
copy_self = Text(
|
413 |
-
plain,
|
414 |
-
style=self.style,
|
415 |
-
justify=self.justify,
|
416 |
-
overflow=self.overflow,
|
417 |
-
no_wrap=self.no_wrap,
|
418 |
-
end=self.end,
|
419 |
-
tab_size=self.tab_size,
|
420 |
-
)
|
421 |
-
return copy_self
|
422 |
-
|
423 |
-
def copy(self) -> "Text":
|
424 |
-
"""Return a copy of this instance."""
|
425 |
-
copy_self = Text(
|
426 |
-
self.plain,
|
427 |
-
style=self.style,
|
428 |
-
justify=self.justify,
|
429 |
-
overflow=self.overflow,
|
430 |
-
no_wrap=self.no_wrap,
|
431 |
-
end=self.end,
|
432 |
-
tab_size=self.tab_size,
|
433 |
-
)
|
434 |
-
copy_self._spans[:] = self._spans
|
435 |
-
return copy_self
|
436 |
-
|
437 |
-
def stylize(
|
438 |
-
self,
|
439 |
-
style: Union[str, Style],
|
440 |
-
start: int = 0,
|
441 |
-
end: Optional[int] = None,
|
442 |
-
) -> None:
|
443 |
-
"""Apply a style to the text, or a portion of the text.
|
444 |
-
|
445 |
-
Args:
|
446 |
-
style (Union[str, Style]): Style instance or style definition to apply.
|
447 |
-
start (int): Start offset (negative indexing is supported). Defaults to 0.
|
448 |
-
end (Optional[int], optional): End offset (negative indexing is supported), or None for end of text. Defaults to None.
|
449 |
-
"""
|
450 |
-
if style:
|
451 |
-
length = len(self)
|
452 |
-
if start < 0:
|
453 |
-
start = length + start
|
454 |
-
if end is None:
|
455 |
-
end = length
|
456 |
-
if end < 0:
|
457 |
-
end = length + end
|
458 |
-
if start >= length or end <= start:
|
459 |
-
# Span not in text or not valid
|
460 |
-
return
|
461 |
-
self._spans.append(Span(start, min(length, end), style))
|
462 |
-
|
463 |
-
def stylize_before(
|
464 |
-
self,
|
465 |
-
style: Union[str, Style],
|
466 |
-
start: int = 0,
|
467 |
-
end: Optional[int] = None,
|
468 |
-
) -> None:
|
469 |
-
"""Apply a style to the text, or a portion of the text. Styles will be applied before other styles already present.
|
470 |
-
|
471 |
-
Args:
|
472 |
-
style (Union[str, Style]): Style instance or style definition to apply.
|
473 |
-
start (int): Start offset (negative indexing is supported). Defaults to 0.
|
474 |
-
end (Optional[int], optional): End offset (negative indexing is supported), or None for end of text. Defaults to None.
|
475 |
-
"""
|
476 |
-
if style:
|
477 |
-
length = len(self)
|
478 |
-
if start < 0:
|
479 |
-
start = length + start
|
480 |
-
if end is None:
|
481 |
-
end = length
|
482 |
-
if end < 0:
|
483 |
-
end = length + end
|
484 |
-
if start >= length or end <= start:
|
485 |
-
# Span not in text or not valid
|
486 |
-
return
|
487 |
-
self._spans.insert(0, Span(start, min(length, end), style))
|
488 |
-
|
489 |
-
def apply_meta(
|
490 |
-
self, meta: Dict[str, Any], start: int = 0, end: Optional[int] = None
|
491 |
-
) -> None:
|
492 |
-
"""Apply meta data to the text, or a portion of the text.
|
493 |
-
|
494 |
-
Args:
|
495 |
-
meta (Dict[str, Any]): A dict of meta information.
|
496 |
-
start (int): Start offset (negative indexing is supported). Defaults to 0.
|
497 |
-
end (Optional[int], optional): End offset (negative indexing is supported), or None for end of text. Defaults to None.
|
498 |
-
|
499 |
-
"""
|
500 |
-
style = Style.from_meta(meta)
|
501 |
-
self.stylize(style, start=start, end=end)
|
502 |
-
|
503 |
-
def on(self, meta: Optional[Dict[str, Any]] = None, **handlers: Any) -> "Text":
|
504 |
-
"""Apply event handlers (used by Textual project).
|
505 |
-
|
506 |
-
Example:
|
507 |
-
>>> from rich.text import Text
|
508 |
-
>>> text = Text("hello world")
|
509 |
-
>>> text.on(click="view.toggle('world')")
|
510 |
-
|
511 |
-
Args:
|
512 |
-
meta (Dict[str, Any]): Mapping of meta information.
|
513 |
-
**handlers: Keyword args are prefixed with "@" to defined handlers.
|
514 |
-
|
515 |
-
Returns:
|
516 |
-
Text: Self is returned to method may be chained.
|
517 |
-
"""
|
518 |
-
meta = {} if meta is None else meta
|
519 |
-
meta.update({f"@{key}": value for key, value in handlers.items()})
|
520 |
-
self.stylize(Style.from_meta(meta))
|
521 |
-
return self
|
522 |
-
|
523 |
-
def remove_suffix(self, suffix: str) -> None:
|
524 |
-
"""Remove a suffix if it exists.
|
525 |
-
|
526 |
-
Args:
|
527 |
-
suffix (str): Suffix to remove.
|
528 |
-
"""
|
529 |
-
if self.plain.endswith(suffix):
|
530 |
-
self.right_crop(len(suffix))
|
531 |
-
|
532 |
-
def get_style_at_offset(self, console: "Console", offset: int) -> Style:
|
533 |
-
"""Get the style of a character at give offset.
|
534 |
-
|
535 |
-
Args:
|
536 |
-
console (~Console): Console where text will be rendered.
|
537 |
-
offset (int): Offset in to text (negative indexing supported)
|
538 |
-
|
539 |
-
Returns:
|
540 |
-
Style: A Style instance.
|
541 |
-
"""
|
542 |
-
# TODO: This is a little inefficient, it is only used by full justify
|
543 |
-
if offset < 0:
|
544 |
-
offset = len(self) + offset
|
545 |
-
get_style = console.get_style
|
546 |
-
style = get_style(self.style).copy()
|
547 |
-
for start, end, span_style in self._spans:
|
548 |
-
if end > offset >= start:
|
549 |
-
style += get_style(span_style, default="")
|
550 |
-
return style
|
551 |
-
|
552 |
-
def highlight_regex(
|
553 |
-
self,
|
554 |
-
re_highlight: str,
|
555 |
-
style: Optional[Union[GetStyleCallable, StyleType]] = None,
|
556 |
-
*,
|
557 |
-
style_prefix: str = "",
|
558 |
-
) -> int:
|
559 |
-
"""Highlight text with a regular expression, where group names are
|
560 |
-
translated to styles.
|
561 |
-
|
562 |
-
Args:
|
563 |
-
re_highlight (str): A regular expression.
|
564 |
-
style (Union[GetStyleCallable, StyleType]): Optional style to apply to whole match, or a callable
|
565 |
-
which accepts the matched text and returns a style. Defaults to None.
|
566 |
-
style_prefix (str, optional): Optional prefix to add to style group names.
|
567 |
-
|
568 |
-
Returns:
|
569 |
-
int: Number of regex matches
|
570 |
-
"""
|
571 |
-
count = 0
|
572 |
-
append_span = self._spans.append
|
573 |
-
_Span = Span
|
574 |
-
plain = self.plain
|
575 |
-
for match in re.finditer(re_highlight, plain):
|
576 |
-
get_span = match.span
|
577 |
-
if style:
|
578 |
-
start, end = get_span()
|
579 |
-
match_style = style(plain[start:end]) if callable(style) else style
|
580 |
-
if match_style is not None and end > start:
|
581 |
-
append_span(_Span(start, end, match_style))
|
582 |
-
|
583 |
-
count += 1
|
584 |
-
for name in match.groupdict().keys():
|
585 |
-
start, end = get_span(name)
|
586 |
-
if start != -1 and end > start:
|
587 |
-
append_span(_Span(start, end, f"{style_prefix}{name}"))
|
588 |
-
return count
|
589 |
-
|
590 |
-
def highlight_words(
|
591 |
-
self,
|
592 |
-
words: Iterable[str],
|
593 |
-
style: Union[str, Style],
|
594 |
-
*,
|
595 |
-
case_sensitive: bool = True,
|
596 |
-
) -> int:
|
597 |
-
"""Highlight words with a style.
|
598 |
-
|
599 |
-
Args:
|
600 |
-
words (Iterable[str]): Worlds to highlight.
|
601 |
-
style (Union[str, Style]): Style to apply.
|
602 |
-
case_sensitive (bool, optional): Enable case sensitive matchings. Defaults to True.
|
603 |
-
|
604 |
-
Returns:
|
605 |
-
int: Number of words highlighted.
|
606 |
-
"""
|
607 |
-
re_words = "|".join(re.escape(word) for word in words)
|
608 |
-
add_span = self._spans.append
|
609 |
-
count = 0
|
610 |
-
_Span = Span
|
611 |
-
for match in re.finditer(
|
612 |
-
re_words, self.plain, flags=0 if case_sensitive else re.IGNORECASE
|
613 |
-
):
|
614 |
-
start, end = match.span(0)
|
615 |
-
add_span(_Span(start, end, style))
|
616 |
-
count += 1
|
617 |
-
return count
|
618 |
-
|
619 |
-
def rstrip(self) -> None:
|
620 |
-
"""Strip whitespace from end of text."""
|
621 |
-
self.plain = self.plain.rstrip()
|
622 |
-
|
623 |
-
def rstrip_end(self, size: int) -> None:
|
624 |
-
"""Remove whitespace beyond a certain width at the end of the text.
|
625 |
-
|
626 |
-
Args:
|
627 |
-
size (int): The desired size of the text.
|
628 |
-
"""
|
629 |
-
text_length = len(self)
|
630 |
-
if text_length > size:
|
631 |
-
excess = text_length - size
|
632 |
-
whitespace_match = _re_whitespace.search(self.plain)
|
633 |
-
if whitespace_match is not None:
|
634 |
-
whitespace_count = len(whitespace_match.group(0))
|
635 |
-
self.right_crop(min(whitespace_count, excess))
|
636 |
-
|
637 |
-
def set_length(self, new_length: int) -> None:
|
638 |
-
"""Set new length of the text, clipping or padding is required."""
|
639 |
-
length = len(self)
|
640 |
-
if length != new_length:
|
641 |
-
if length < new_length:
|
642 |
-
self.pad_right(new_length - length)
|
643 |
-
else:
|
644 |
-
self.right_crop(length - new_length)
|
645 |
-
|
646 |
-
def __rich_console__(
|
647 |
-
self, console: "Console", options: "ConsoleOptions"
|
648 |
-
) -> Iterable[Segment]:
|
649 |
-
tab_size: int = console.tab_size or self.tab_size or 8
|
650 |
-
justify = self.justify or options.justify or DEFAULT_JUSTIFY
|
651 |
-
|
652 |
-
overflow = self.overflow or options.overflow or DEFAULT_OVERFLOW
|
653 |
-
|
654 |
-
lines = self.wrap(
|
655 |
-
console,
|
656 |
-
options.max_width,
|
657 |
-
justify=justify,
|
658 |
-
overflow=overflow,
|
659 |
-
tab_size=tab_size or 8,
|
660 |
-
no_wrap=pick_bool(self.no_wrap, options.no_wrap, False),
|
661 |
-
)
|
662 |
-
all_lines = Text("\n").join(lines)
|
663 |
-
yield from all_lines.render(console, end=self.end)
|
664 |
-
|
665 |
-
def __rich_measure__(
|
666 |
-
self, console: "Console", options: "ConsoleOptions"
|
667 |
-
) -> Measurement:
|
668 |
-
text = self.plain
|
669 |
-
lines = text.splitlines()
|
670 |
-
max_text_width = max(cell_len(line) for line in lines) if lines else 0
|
671 |
-
words = text.split()
|
672 |
-
min_text_width = (
|
673 |
-
max(cell_len(word) for word in words) if words else max_text_width
|
674 |
-
)
|
675 |
-
return Measurement(min_text_width, max_text_width)
|
676 |
-
|
677 |
-
def render(self, console: "Console", end: str = "") -> Iterable["Segment"]:
|
678 |
-
"""Render the text as Segments.
|
679 |
-
|
680 |
-
Args:
|
681 |
-
console (Console): Console instance.
|
682 |
-
end (Optional[str], optional): Optional end character.
|
683 |
-
|
684 |
-
Returns:
|
685 |
-
Iterable[Segment]: Result of render that may be written to the console.
|
686 |
-
"""
|
687 |
-
_Segment = Segment
|
688 |
-
text = self.plain
|
689 |
-
if not self._spans:
|
690 |
-
yield Segment(text)
|
691 |
-
if end:
|
692 |
-
yield _Segment(end)
|
693 |
-
return
|
694 |
-
get_style = partial(console.get_style, default=Style.null())
|
695 |
-
|
696 |
-
enumerated_spans = list(enumerate(self._spans, 1))
|
697 |
-
style_map = {index: get_style(span.style) for index, span in enumerated_spans}
|
698 |
-
style_map[0] = get_style(self.style)
|
699 |
-
|
700 |
-
spans = [
|
701 |
-
(0, False, 0),
|
702 |
-
*((span.start, False, index) for index, span in enumerated_spans),
|
703 |
-
*((span.end, True, index) for index, span in enumerated_spans),
|
704 |
-
(len(text), True, 0),
|
705 |
-
]
|
706 |
-
spans.sort(key=itemgetter(0, 1))
|
707 |
-
|
708 |
-
stack: List[int] = []
|
709 |
-
stack_append = stack.append
|
710 |
-
stack_pop = stack.remove
|
711 |
-
|
712 |
-
style_cache: Dict[Tuple[Style, ...], Style] = {}
|
713 |
-
style_cache_get = style_cache.get
|
714 |
-
combine = Style.combine
|
715 |
-
|
716 |
-
def get_current_style() -> Style:
|
717 |
-
"""Construct current style from stack."""
|
718 |
-
styles = tuple(style_map[_style_id] for _style_id in sorted(stack))
|
719 |
-
cached_style = style_cache_get(styles)
|
720 |
-
if cached_style is not None:
|
721 |
-
return cached_style
|
722 |
-
current_style = combine(styles)
|
723 |
-
style_cache[styles] = current_style
|
724 |
-
return current_style
|
725 |
-
|
726 |
-
for (offset, leaving, style_id), (next_offset, _, _) in zip(spans, spans[1:]):
|
727 |
-
if leaving:
|
728 |
-
stack_pop(style_id)
|
729 |
-
else:
|
730 |
-
stack_append(style_id)
|
731 |
-
if next_offset > offset:
|
732 |
-
yield _Segment(text[offset:next_offset], get_current_style())
|
733 |
-
if end:
|
734 |
-
yield _Segment(end)
|
735 |
-
|
736 |
-
def join(self, lines: Iterable["Text"]) -> "Text":
|
737 |
-
"""Join text together with this instance as the separator.
|
738 |
-
|
739 |
-
Args:
|
740 |
-
lines (Iterable[Text]): An iterable of Text instances to join.
|
741 |
-
|
742 |
-
Returns:
|
743 |
-
Text: A new text instance containing join text.
|
744 |
-
"""
|
745 |
-
|
746 |
-
new_text = self.blank_copy()
|
747 |
-
|
748 |
-
def iter_text() -> Iterable["Text"]:
|
749 |
-
if self.plain:
|
750 |
-
for last, line in loop_last(lines):
|
751 |
-
yield line
|
752 |
-
if not last:
|
753 |
-
yield self
|
754 |
-
else:
|
755 |
-
yield from lines
|
756 |
-
|
757 |
-
extend_text = new_text._text.extend
|
758 |
-
append_span = new_text._spans.append
|
759 |
-
extend_spans = new_text._spans.extend
|
760 |
-
offset = 0
|
761 |
-
_Span = Span
|
762 |
-
|
763 |
-
for text in iter_text():
|
764 |
-
extend_text(text._text)
|
765 |
-
if text.style:
|
766 |
-
append_span(_Span(offset, offset + len(text), text.style))
|
767 |
-
extend_spans(
|
768 |
-
_Span(offset + start, offset + end, style)
|
769 |
-
for start, end, style in text._spans
|
770 |
-
)
|
771 |
-
offset += len(text)
|
772 |
-
new_text._length = offset
|
773 |
-
return new_text
|
774 |
-
|
775 |
-
def expand_tabs(self, tab_size: Optional[int] = None) -> None:
|
776 |
-
"""Converts tabs to spaces.
|
777 |
-
|
778 |
-
Args:
|
779 |
-
tab_size (int, optional): Size of tabs. Defaults to 8.
|
780 |
-
|
781 |
-
"""
|
782 |
-
if "\t" not in self.plain:
|
783 |
-
return
|
784 |
-
pos = 0
|
785 |
-
if tab_size is None:
|
786 |
-
tab_size = self.tab_size
|
787 |
-
assert tab_size is not None
|
788 |
-
result = self.blank_copy()
|
789 |
-
append = result.append
|
790 |
-
|
791 |
-
_style = self.style
|
792 |
-
for line in self.split("\n", include_separator=True):
|
793 |
-
parts = line.split("\t", include_separator=True)
|
794 |
-
for part in parts:
|
795 |
-
if part.plain.endswith("\t"):
|
796 |
-
part._text = [part.plain[:-1] + " "]
|
797 |
-
append(part)
|
798 |
-
pos += len(part)
|
799 |
-
spaces = tab_size - ((pos - 1) % tab_size) - 1
|
800 |
-
if spaces:
|
801 |
-
append(" " * spaces, _style)
|
802 |
-
pos += spaces
|
803 |
-
else:
|
804 |
-
append(part)
|
805 |
-
self._text = [result.plain]
|
806 |
-
self._length = len(self.plain)
|
807 |
-
self._spans[:] = result._spans
|
808 |
-
|
809 |
-
def truncate(
|
810 |
-
self,
|
811 |
-
max_width: int,
|
812 |
-
*,
|
813 |
-
overflow: Optional["OverflowMethod"] = None,
|
814 |
-
pad: bool = False,
|
815 |
-
) -> None:
|
816 |
-
"""Truncate text if it is longer that a given width.
|
817 |
-
|
818 |
-
Args:
|
819 |
-
max_width (int): Maximum number of characters in text.
|
820 |
-
overflow (str, optional): Overflow method: "crop", "fold", or "ellipsis". Defaults to None, to use self.overflow.
|
821 |
-
pad (bool, optional): Pad with spaces if the length is less than max_width. Defaults to False.
|
822 |
-
"""
|
823 |
-
_overflow = overflow or self.overflow or DEFAULT_OVERFLOW
|
824 |
-
if _overflow != "ignore":
|
825 |
-
length = cell_len(self.plain)
|
826 |
-
if length > max_width:
|
827 |
-
if _overflow == "ellipsis":
|
828 |
-
self.plain = set_cell_size(self.plain, max_width - 1) + "…"
|
829 |
-
else:
|
830 |
-
self.plain = set_cell_size(self.plain, max_width)
|
831 |
-
if pad and length < max_width:
|
832 |
-
spaces = max_width - length
|
833 |
-
self._text = [f"{self.plain}{' ' * spaces}"]
|
834 |
-
self._length = len(self.plain)
|
835 |
-
|
836 |
-
def _trim_spans(self) -> None:
|
837 |
-
"""Remove or modify any spans that are over the end of the text."""
|
838 |
-
max_offset = len(self.plain)
|
839 |
-
_Span = Span
|
840 |
-
self._spans[:] = [
|
841 |
-
(
|
842 |
-
span
|
843 |
-
if span.end < max_offset
|
844 |
-
else _Span(span.start, min(max_offset, span.end), span.style)
|
845 |
-
)
|
846 |
-
for span in self._spans
|
847 |
-
if span.start < max_offset
|
848 |
-
]
|
849 |
-
|
850 |
-
def pad(self, count: int, character: str = " ") -> None:
|
851 |
-
"""Pad left and right with a given number of characters.
|
852 |
-
|
853 |
-
Args:
|
854 |
-
count (int): Width of padding.
|
855 |
-
"""
|
856 |
-
assert len(character) == 1, "Character must be a string of length 1"
|
857 |
-
if count:
|
858 |
-
pad_characters = character * count
|
859 |
-
self.plain = f"{pad_characters}{self.plain}{pad_characters}"
|
860 |
-
_Span = Span
|
861 |
-
self._spans[:] = [
|
862 |
-
_Span(start + count, end + count, style)
|
863 |
-
for start, end, style in self._spans
|
864 |
-
]
|
865 |
-
|
866 |
-
def pad_left(self, count: int, character: str = " ") -> None:
|
867 |
-
"""Pad the left with a given character.
|
868 |
-
|
869 |
-
Args:
|
870 |
-
count (int): Number of characters to pad.
|
871 |
-
character (str, optional): Character to pad with. Defaults to " ".
|
872 |
-
"""
|
873 |
-
assert len(character) == 1, "Character must be a string of length 1"
|
874 |
-
if count:
|
875 |
-
self.plain = f"{character * count}{self.plain}"
|
876 |
-
_Span = Span
|
877 |
-
self._spans[:] = [
|
878 |
-
_Span(start + count, end + count, style)
|
879 |
-
for start, end, style in self._spans
|
880 |
-
]
|
881 |
-
|
882 |
-
def pad_right(self, count: int, character: str = " ") -> None:
|
883 |
-
"""Pad the right with a given character.
|
884 |
-
|
885 |
-
Args:
|
886 |
-
count (int): Number of characters to pad.
|
887 |
-
character (str, optional): Character to pad with. Defaults to " ".
|
888 |
-
"""
|
889 |
-
assert len(character) == 1, "Character must be a string of length 1"
|
890 |
-
if count:
|
891 |
-
self.plain = f"{self.plain}{character * count}"
|
892 |
-
|
893 |
-
def align(self, align: AlignMethod, width: int, character: str = " ") -> None:
|
894 |
-
"""Align text to a given width.
|
895 |
-
|
896 |
-
Args:
|
897 |
-
align (AlignMethod): One of "left", "center", or "right".
|
898 |
-
width (int): Desired width.
|
899 |
-
character (str, optional): Character to pad with. Defaults to " ".
|
900 |
-
"""
|
901 |
-
self.truncate(width)
|
902 |
-
excess_space = width - cell_len(self.plain)
|
903 |
-
if excess_space:
|
904 |
-
if align == "left":
|
905 |
-
self.pad_right(excess_space, character)
|
906 |
-
elif align == "center":
|
907 |
-
left = excess_space // 2
|
908 |
-
self.pad_left(left, character)
|
909 |
-
self.pad_right(excess_space - left, character)
|
910 |
-
else:
|
911 |
-
self.pad_left(excess_space, character)
|
912 |
-
|
913 |
-
def append(
|
914 |
-
self, text: Union["Text", str], style: Optional[Union[str, "Style"]] = None
|
915 |
-
) -> "Text":
|
916 |
-
"""Add text with an optional style.
|
917 |
-
|
918 |
-
Args:
|
919 |
-
text (Union[Text, str]): A str or Text to append.
|
920 |
-
style (str, optional): A style name. Defaults to None.
|
921 |
-
|
922 |
-
Returns:
|
923 |
-
Text: Returns self for chaining.
|
924 |
-
"""
|
925 |
-
|
926 |
-
if not isinstance(text, (str, Text)):
|
927 |
-
raise TypeError("Only str or Text can be appended to Text")
|
928 |
-
|
929 |
-
if len(text):
|
930 |
-
if isinstance(text, str):
|
931 |
-
sanitized_text = strip_control_codes(text)
|
932 |
-
self._text.append(sanitized_text)
|
933 |
-
offset = len(self)
|
934 |
-
text_length = len(sanitized_text)
|
935 |
-
if style is not None:
|
936 |
-
self._spans.append(Span(offset, offset + text_length, style))
|
937 |
-
self._length += text_length
|
938 |
-
elif isinstance(text, Text):
|
939 |
-
_Span = Span
|
940 |
-
if style is not None:
|
941 |
-
raise ValueError(
|
942 |
-
"style must not be set when appending Text instance"
|
943 |
-
)
|
944 |
-
text_length = self._length
|
945 |
-
if text.style is not None:
|
946 |
-
self._spans.append(
|
947 |
-
_Span(text_length, text_length + len(text), text.style)
|
948 |
-
)
|
949 |
-
self._text.append(text.plain)
|
950 |
-
self._spans.extend(
|
951 |
-
_Span(start + text_length, end + text_length, style)
|
952 |
-
for start, end, style in text._spans
|
953 |
-
)
|
954 |
-
self._length += len(text)
|
955 |
-
return self
|
956 |
-
|
957 |
-
def append_text(self, text: "Text") -> "Text":
|
958 |
-
"""Append another Text instance. This method is more performant that Text.append, but
|
959 |
-
only works for Text.
|
960 |
-
|
961 |
-
Returns:
|
962 |
-
Text: Returns self for chaining.
|
963 |
-
"""
|
964 |
-
_Span = Span
|
965 |
-
text_length = self._length
|
966 |
-
if text.style is not None:
|
967 |
-
self._spans.append(_Span(text_length, text_length + len(text), text.style))
|
968 |
-
self._text.append(text.plain)
|
969 |
-
self._spans.extend(
|
970 |
-
_Span(start + text_length, end + text_length, style)
|
971 |
-
for start, end, style in text._spans
|
972 |
-
)
|
973 |
-
self._length += len(text)
|
974 |
-
return self
|
975 |
-
|
976 |
-
def append_tokens(
|
977 |
-
self, tokens: Iterable[Tuple[str, Optional[StyleType]]]
|
978 |
-
) -> "Text":
|
979 |
-
"""Append iterable of str and style. Style may be a Style instance or a str style definition.
|
980 |
-
|
981 |
-
Args:
|
982 |
-
pairs (Iterable[Tuple[str, Optional[StyleType]]]): An iterable of tuples containing str content and style.
|
983 |
-
|
984 |
-
Returns:
|
985 |
-
Text: Returns self for chaining.
|
986 |
-
"""
|
987 |
-
append_text = self._text.append
|
988 |
-
append_span = self._spans.append
|
989 |
-
_Span = Span
|
990 |
-
offset = len(self)
|
991 |
-
for content, style in tokens:
|
992 |
-
append_text(content)
|
993 |
-
if style is not None:
|
994 |
-
append_span(_Span(offset, offset + len(content), style))
|
995 |
-
offset += len(content)
|
996 |
-
self._length = offset
|
997 |
-
return self
|
998 |
-
|
999 |
-
def copy_styles(self, text: "Text") -> None:
|
1000 |
-
"""Copy styles from another Text instance.
|
1001 |
-
|
1002 |
-
Args:
|
1003 |
-
text (Text): A Text instance to copy styles from, must be the same length.
|
1004 |
-
"""
|
1005 |
-
self._spans.extend(text._spans)
|
1006 |
-
|
1007 |
-
def split(
|
1008 |
-
self,
|
1009 |
-
separator: str = "\n",
|
1010 |
-
*,
|
1011 |
-
include_separator: bool = False,
|
1012 |
-
allow_blank: bool = False,
|
1013 |
-
) -> Lines:
|
1014 |
-
"""Split rich text in to lines, preserving styles.
|
1015 |
-
|
1016 |
-
Args:
|
1017 |
-
separator (str, optional): String to split on. Defaults to "\\\\n".
|
1018 |
-
include_separator (bool, optional): Include the separator in the lines. Defaults to False.
|
1019 |
-
allow_blank (bool, optional): Return a blank line if the text ends with a separator. Defaults to False.
|
1020 |
-
|
1021 |
-
Returns:
|
1022 |
-
List[RichText]: A list of rich text, one per line of the original.
|
1023 |
-
"""
|
1024 |
-
assert separator, "separator must not be empty"
|
1025 |
-
|
1026 |
-
text = self.plain
|
1027 |
-
if separator not in text:
|
1028 |
-
return Lines([self.copy()])
|
1029 |
-
|
1030 |
-
if include_separator:
|
1031 |
-
lines = self.divide(
|
1032 |
-
match.end() for match in re.finditer(re.escape(separator), text)
|
1033 |
-
)
|
1034 |
-
else:
|
1035 |
-
|
1036 |
-
def flatten_spans() -> Iterable[int]:
|
1037 |
-
for match in re.finditer(re.escape(separator), text):
|
1038 |
-
start, end = match.span()
|
1039 |
-
yield start
|
1040 |
-
yield end
|
1041 |
-
|
1042 |
-
lines = Lines(
|
1043 |
-
line for line in self.divide(flatten_spans()) if line.plain != separator
|
1044 |
-
)
|
1045 |
-
|
1046 |
-
if not allow_blank and text.endswith(separator):
|
1047 |
-
lines.pop()
|
1048 |
-
|
1049 |
-
return lines
|
1050 |
-
|
1051 |
-
def divide(self, offsets: Iterable[int]) -> Lines:
|
1052 |
-
"""Divide text in to a number of lines at given offsets.
|
1053 |
-
|
1054 |
-
Args:
|
1055 |
-
offsets (Iterable[int]): Offsets used to divide text.
|
1056 |
-
|
1057 |
-
Returns:
|
1058 |
-
Lines: New RichText instances between offsets.
|
1059 |
-
"""
|
1060 |
-
_offsets = list(offsets)
|
1061 |
-
|
1062 |
-
if not _offsets:
|
1063 |
-
return Lines([self.copy()])
|
1064 |
-
|
1065 |
-
text = self.plain
|
1066 |
-
text_length = len(text)
|
1067 |
-
divide_offsets = [0, *_offsets, text_length]
|
1068 |
-
line_ranges = list(zip(divide_offsets, divide_offsets[1:]))
|
1069 |
-
|
1070 |
-
style = self.style
|
1071 |
-
justify = self.justify
|
1072 |
-
overflow = self.overflow
|
1073 |
-
_Text = Text
|
1074 |
-
new_lines = Lines(
|
1075 |
-
_Text(
|
1076 |
-
text[start:end],
|
1077 |
-
style=style,
|
1078 |
-
justify=justify,
|
1079 |
-
overflow=overflow,
|
1080 |
-
)
|
1081 |
-
for start, end in line_ranges
|
1082 |
-
)
|
1083 |
-
if not self._spans:
|
1084 |
-
return new_lines
|
1085 |
-
|
1086 |
-
_line_appends = [line._spans.append for line in new_lines._lines]
|
1087 |
-
line_count = len(line_ranges)
|
1088 |
-
_Span = Span
|
1089 |
-
|
1090 |
-
for span_start, span_end, style in self._spans:
|
1091 |
-
|
1092 |
-
lower_bound = 0
|
1093 |
-
upper_bound = line_count
|
1094 |
-
start_line_no = (lower_bound + upper_bound) // 2
|
1095 |
-
|
1096 |
-
while True:
|
1097 |
-
line_start, line_end = line_ranges[start_line_no]
|
1098 |
-
if span_start < line_start:
|
1099 |
-
upper_bound = start_line_no - 1
|
1100 |
-
elif span_start > line_end:
|
1101 |
-
lower_bound = start_line_no + 1
|
1102 |
-
else:
|
1103 |
-
break
|
1104 |
-
start_line_no = (lower_bound + upper_bound) // 2
|
1105 |
-
|
1106 |
-
if span_end < line_end:
|
1107 |
-
end_line_no = start_line_no
|
1108 |
-
else:
|
1109 |
-
end_line_no = lower_bound = start_line_no
|
1110 |
-
upper_bound = line_count
|
1111 |
-
|
1112 |
-
while True:
|
1113 |
-
line_start, line_end = line_ranges[end_line_no]
|
1114 |
-
if span_end < line_start:
|
1115 |
-
upper_bound = end_line_no - 1
|
1116 |
-
elif span_end > line_end:
|
1117 |
-
lower_bound = end_line_no + 1
|
1118 |
-
else:
|
1119 |
-
break
|
1120 |
-
end_line_no = (lower_bound + upper_bound) // 2
|
1121 |
-
|
1122 |
-
for line_no in range(start_line_no, end_line_no + 1):
|
1123 |
-
line_start, line_end = line_ranges[line_no]
|
1124 |
-
new_start = max(0, span_start - line_start)
|
1125 |
-
new_end = min(span_end - line_start, line_end - line_start)
|
1126 |
-
if new_end > new_start:
|
1127 |
-
_line_appends[line_no](_Span(new_start, new_end, style))
|
1128 |
-
|
1129 |
-
return new_lines
|
1130 |
-
|
1131 |
-
def right_crop(self, amount: int = 1) -> None:
|
1132 |
-
"""Remove a number of characters from the end of the text."""
|
1133 |
-
max_offset = len(self.plain) - amount
|
1134 |
-
_Span = Span
|
1135 |
-
self._spans[:] = [
|
1136 |
-
(
|
1137 |
-
span
|
1138 |
-
if span.end < max_offset
|
1139 |
-
else _Span(span.start, min(max_offset, span.end), span.style)
|
1140 |
-
)
|
1141 |
-
for span in self._spans
|
1142 |
-
if span.start < max_offset
|
1143 |
-
]
|
1144 |
-
self._text = [self.plain[:-amount]]
|
1145 |
-
self._length -= amount
|
1146 |
-
|
1147 |
-
def wrap(
|
1148 |
-
self,
|
1149 |
-
console: "Console",
|
1150 |
-
width: int,
|
1151 |
-
*,
|
1152 |
-
justify: Optional["JustifyMethod"] = None,
|
1153 |
-
overflow: Optional["OverflowMethod"] = None,
|
1154 |
-
tab_size: int = 8,
|
1155 |
-
no_wrap: Optional[bool] = None,
|
1156 |
-
) -> Lines:
|
1157 |
-
"""Word wrap the text.
|
1158 |
-
|
1159 |
-
Args:
|
1160 |
-
console (Console): Console instance.
|
1161 |
-
width (int): Number of characters per line.
|
1162 |
-
emoji (bool, optional): Also render emoji code. Defaults to True.
|
1163 |
-
justify (str, optional): Justify method: "default", "left", "center", "full", "right". Defaults to "default".
|
1164 |
-
overflow (str, optional): Overflow method: "crop", "fold", or "ellipsis". Defaults to None.
|
1165 |
-
tab_size (int, optional): Default tab size. Defaults to 8.
|
1166 |
-
no_wrap (bool, optional): Disable wrapping, Defaults to False.
|
1167 |
-
|
1168 |
-
Returns:
|
1169 |
-
Lines: Number of lines.
|
1170 |
-
"""
|
1171 |
-
wrap_justify = justify or self.justify or DEFAULT_JUSTIFY
|
1172 |
-
wrap_overflow = overflow or self.overflow or DEFAULT_OVERFLOW
|
1173 |
-
|
1174 |
-
no_wrap = pick_bool(no_wrap, self.no_wrap, False) or overflow == "ignore"
|
1175 |
-
|
1176 |
-
lines = Lines()
|
1177 |
-
for line in self.split(allow_blank=True):
|
1178 |
-
if "\t" in line:
|
1179 |
-
line.expand_tabs(tab_size)
|
1180 |
-
if no_wrap:
|
1181 |
-
new_lines = Lines([line])
|
1182 |
-
else:
|
1183 |
-
offsets = divide_line(str(line), width, fold=wrap_overflow == "fold")
|
1184 |
-
new_lines = line.divide(offsets)
|
1185 |
-
for line in new_lines:
|
1186 |
-
line.rstrip_end(width)
|
1187 |
-
if wrap_justify:
|
1188 |
-
new_lines.justify(
|
1189 |
-
console, width, justify=wrap_justify, overflow=wrap_overflow
|
1190 |
-
)
|
1191 |
-
for line in new_lines:
|
1192 |
-
line.truncate(width, overflow=wrap_overflow)
|
1193 |
-
lines.extend(new_lines)
|
1194 |
-
return lines
|
1195 |
-
|
1196 |
-
def fit(self, width: int) -> Lines:
|
1197 |
-
"""Fit the text in to given width by chopping in to lines.
|
1198 |
-
|
1199 |
-
Args:
|
1200 |
-
width (int): Maximum characters in a line.
|
1201 |
-
|
1202 |
-
Returns:
|
1203 |
-
Lines: Lines container.
|
1204 |
-
"""
|
1205 |
-
lines: Lines = Lines()
|
1206 |
-
append = lines.append
|
1207 |
-
for line in self.split():
|
1208 |
-
line.set_length(width)
|
1209 |
-
append(line)
|
1210 |
-
return lines
|
1211 |
-
|
1212 |
-
def detect_indentation(self) -> int:
|
1213 |
-
"""Auto-detect indentation of code.
|
1214 |
-
|
1215 |
-
Returns:
|
1216 |
-
int: Number of spaces used to indent code.
|
1217 |
-
"""
|
1218 |
-
|
1219 |
-
_indentations = {
|
1220 |
-
len(match.group(1))
|
1221 |
-
for match in re.finditer(r"^( *)(.*)$", self.plain, flags=re.MULTILINE)
|
1222 |
-
}
|
1223 |
-
|
1224 |
-
try:
|
1225 |
-
indentation = (
|
1226 |
-
reduce(gcd, [indent for indent in _indentations if not indent % 2]) or 1
|
1227 |
-
)
|
1228 |
-
except TypeError:
|
1229 |
-
indentation = 1
|
1230 |
-
|
1231 |
-
return indentation
|
1232 |
-
|
1233 |
-
def with_indent_guides(
|
1234 |
-
self,
|
1235 |
-
indent_size: Optional[int] = None,
|
1236 |
-
*,
|
1237 |
-
character: str = "│",
|
1238 |
-
style: StyleType = "dim green",
|
1239 |
-
) -> "Text":
|
1240 |
-
"""Adds indent guide lines to text.
|
1241 |
-
|
1242 |
-
Args:
|
1243 |
-
indent_size (Optional[int]): Size of indentation, or None to auto detect. Defaults to None.
|
1244 |
-
character (str, optional): Character to use for indentation. Defaults to "│".
|
1245 |
-
style (Union[Style, str], optional): Style of indent guides.
|
1246 |
-
|
1247 |
-
Returns:
|
1248 |
-
Text: New text with indentation guides.
|
1249 |
-
"""
|
1250 |
-
|
1251 |
-
_indent_size = self.detect_indentation() if indent_size is None else indent_size
|
1252 |
-
|
1253 |
-
text = self.copy()
|
1254 |
-
text.expand_tabs()
|
1255 |
-
indent_line = f"{character}{' ' * (_indent_size - 1)}"
|
1256 |
-
|
1257 |
-
re_indent = re.compile(r"^( *)(.*)$")
|
1258 |
-
new_lines: List[Text] = []
|
1259 |
-
add_line = new_lines.append
|
1260 |
-
blank_lines = 0
|
1261 |
-
for line in text.split(allow_blank=True):
|
1262 |
-
match = re_indent.match(line.plain)
|
1263 |
-
if not match or not match.group(2):
|
1264 |
-
blank_lines += 1
|
1265 |
-
continue
|
1266 |
-
indent = match.group(1)
|
1267 |
-
full_indents, remaining_space = divmod(len(indent), _indent_size)
|
1268 |
-
new_indent = f"{indent_line * full_indents}{' ' * remaining_space}"
|
1269 |
-
line.plain = new_indent + line.plain[len(new_indent) :]
|
1270 |
-
line.stylize(style, 0, len(new_indent))
|
1271 |
-
if blank_lines:
|
1272 |
-
new_lines.extend([Text(new_indent, style=style)] * blank_lines)
|
1273 |
-
blank_lines = 0
|
1274 |
-
add_line(line)
|
1275 |
-
if blank_lines:
|
1276 |
-
new_lines.extend([Text("", style=style)] * blank_lines)
|
1277 |
-
|
1278 |
-
new_text = text.blank_copy("\n").join(new_lines)
|
1279 |
-
return new_text
|
1280 |
-
|
1281 |
-
|
1282 |
-
if __name__ == "__main__": # pragma: no cover
|
1283 |
-
from pip._vendor.rich.console import Console
|
1284 |
-
|
1285 |
-
text = Text(
|
1286 |
-
"""\nLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.\n"""
|
1287 |
-
)
|
1288 |
-
text.highlight_words(["Lorem"], "bold")
|
1289 |
-
text.highlight_words(["ipsum"], "italic")
|
1290 |
-
|
1291 |
-
console = Console()
|
1292 |
-
|
1293 |
-
console.rule("justify='left'")
|
1294 |
-
console.print(text, style="red")
|
1295 |
-
console.print()
|
1296 |
-
|
1297 |
-
console.rule("justify='center'")
|
1298 |
-
console.print(text, style="green", justify="center")
|
1299 |
-
console.print()
|
1300 |
-
|
1301 |
-
console.rule("justify='right'")
|
1302 |
-
console.print(text, style="blue", justify="right")
|
1303 |
-
console.print()
|
1304 |
-
|
1305 |
-
console.rule("justify='full'")
|
1306 |
-
console.print(text, style="magenta", justify="full")
|
1307 |
-
console.print()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/config/_validate_pyproject/error_reporting.py
DELETED
@@ -1,318 +0,0 @@
|
|
1 |
-
import io
|
2 |
-
import json
|
3 |
-
import logging
|
4 |
-
import os
|
5 |
-
import re
|
6 |
-
from contextlib import contextmanager
|
7 |
-
from textwrap import indent, wrap
|
8 |
-
from typing import Any, Dict, Iterator, List, Optional, Sequence, Union, cast
|
9 |
-
|
10 |
-
from .fastjsonschema_exceptions import JsonSchemaValueException
|
11 |
-
|
12 |
-
_logger = logging.getLogger(__name__)
|
13 |
-
|
14 |
-
_MESSAGE_REPLACEMENTS = {
|
15 |
-
"must be named by propertyName definition": "keys must be named by",
|
16 |
-
"one of contains definition": "at least one item that matches",
|
17 |
-
" same as const definition:": "",
|
18 |
-
"only specified items": "only items matching the definition",
|
19 |
-
}
|
20 |
-
|
21 |
-
_SKIP_DETAILS = (
|
22 |
-
"must not be empty",
|
23 |
-
"is always invalid",
|
24 |
-
"must not be there",
|
25 |
-
)
|
26 |
-
|
27 |
-
_NEED_DETAILS = {"anyOf", "oneOf", "anyOf", "contains", "propertyNames", "not", "items"}
|
28 |
-
|
29 |
-
_CAMEL_CASE_SPLITTER = re.compile(r"\W+|([A-Z][^A-Z\W]*)")
|
30 |
-
_IDENTIFIER = re.compile(r"^[\w_]+$", re.I)
|
31 |
-
|
32 |
-
_TOML_JARGON = {
|
33 |
-
"object": "table",
|
34 |
-
"property": "key",
|
35 |
-
"properties": "keys",
|
36 |
-
"property names": "keys",
|
37 |
-
}
|
38 |
-
|
39 |
-
|
40 |
-
class ValidationError(JsonSchemaValueException):
|
41 |
-
"""Report violations of a given JSON schema.
|
42 |
-
|
43 |
-
This class extends :exc:`~fastjsonschema.JsonSchemaValueException`
|
44 |
-
by adding the following properties:
|
45 |
-
|
46 |
-
- ``summary``: an improved version of the ``JsonSchemaValueException`` error message
|
47 |
-
with only the necessary information)
|
48 |
-
|
49 |
-
- ``details``: more contextual information about the error like the failing schema
|
50 |
-
itself and the value that violates the schema.
|
51 |
-
|
52 |
-
Depending on the level of the verbosity of the ``logging`` configuration
|
53 |
-
the exception message will be only ``summary`` (default) or a combination of
|
54 |
-
``summary`` and ``details`` (when the logging level is set to :obj:`logging.DEBUG`).
|
55 |
-
"""
|
56 |
-
|
57 |
-
summary = ""
|
58 |
-
details = ""
|
59 |
-
_original_message = ""
|
60 |
-
|
61 |
-
@classmethod
|
62 |
-
def _from_jsonschema(cls, ex: JsonSchemaValueException):
|
63 |
-
formatter = _ErrorFormatting(ex)
|
64 |
-
obj = cls(str(formatter), ex.value, formatter.name, ex.definition, ex.rule)
|
65 |
-
debug_code = os.getenv("JSONSCHEMA_DEBUG_CODE_GENERATION", "false").lower()
|
66 |
-
if debug_code != "false": # pragma: no cover
|
67 |
-
obj.__cause__, obj.__traceback__ = ex.__cause__, ex.__traceback__
|
68 |
-
obj._original_message = ex.message
|
69 |
-
obj.summary = formatter.summary
|
70 |
-
obj.details = formatter.details
|
71 |
-
return obj
|
72 |
-
|
73 |
-
|
74 |
-
@contextmanager
|
75 |
-
def detailed_errors():
|
76 |
-
try:
|
77 |
-
yield
|
78 |
-
except JsonSchemaValueException as ex:
|
79 |
-
raise ValidationError._from_jsonschema(ex) from None
|
80 |
-
|
81 |
-
|
82 |
-
class _ErrorFormatting:
|
83 |
-
def __init__(self, ex: JsonSchemaValueException):
|
84 |
-
self.ex = ex
|
85 |
-
self.name = f"`{self._simplify_name(ex.name)}`"
|
86 |
-
self._original_message = self.ex.message.replace(ex.name, self.name)
|
87 |
-
self._summary = ""
|
88 |
-
self._details = ""
|
89 |
-
|
90 |
-
def __str__(self) -> str:
|
91 |
-
if _logger.getEffectiveLevel() <= logging.DEBUG and self.details:
|
92 |
-
return f"{self.summary}\n\n{self.details}"
|
93 |
-
|
94 |
-
return self.summary
|
95 |
-
|
96 |
-
@property
|
97 |
-
def summary(self) -> str:
|
98 |
-
if not self._summary:
|
99 |
-
self._summary = self._expand_summary()
|
100 |
-
|
101 |
-
return self._summary
|
102 |
-
|
103 |
-
@property
|
104 |
-
def details(self) -> str:
|
105 |
-
if not self._details:
|
106 |
-
self._details = self._expand_details()
|
107 |
-
|
108 |
-
return self._details
|
109 |
-
|
110 |
-
def _simplify_name(self, name):
|
111 |
-
x = len("data.")
|
112 |
-
return name[x:] if name.startswith("data.") else name
|
113 |
-
|
114 |
-
def _expand_summary(self):
|
115 |
-
msg = self._original_message
|
116 |
-
|
117 |
-
for bad, repl in _MESSAGE_REPLACEMENTS.items():
|
118 |
-
msg = msg.replace(bad, repl)
|
119 |
-
|
120 |
-
if any(substring in msg for substring in _SKIP_DETAILS):
|
121 |
-
return msg
|
122 |
-
|
123 |
-
schema = self.ex.rule_definition
|
124 |
-
if self.ex.rule in _NEED_DETAILS and schema:
|
125 |
-
summary = _SummaryWriter(_TOML_JARGON)
|
126 |
-
return f"{msg}:\n\n{indent(summary(schema), ' ')}"
|
127 |
-
|
128 |
-
return msg
|
129 |
-
|
130 |
-
def _expand_details(self) -> str:
|
131 |
-
optional = []
|
132 |
-
desc_lines = self.ex.definition.pop("$$description", [])
|
133 |
-
desc = self.ex.definition.pop("description", None) or " ".join(desc_lines)
|
134 |
-
if desc:
|
135 |
-
description = "\n".join(
|
136 |
-
wrap(
|
137 |
-
desc,
|
138 |
-
width=80,
|
139 |
-
initial_indent=" ",
|
140 |
-
subsequent_indent=" ",
|
141 |
-
break_long_words=False,
|
142 |
-
)
|
143 |
-
)
|
144 |
-
optional.append(f"DESCRIPTION:\n{description}")
|
145 |
-
schema = json.dumps(self.ex.definition, indent=4)
|
146 |
-
value = json.dumps(self.ex.value, indent=4)
|
147 |
-
defaults = [
|
148 |
-
f"GIVEN VALUE:\n{indent(value, ' ')}",
|
149 |
-
f"OFFENDING RULE: {self.ex.rule!r}",
|
150 |
-
f"DEFINITION:\n{indent(schema, ' ')}",
|
151 |
-
]
|
152 |
-
return "\n\n".join(optional + defaults)
|
153 |
-
|
154 |
-
|
155 |
-
class _SummaryWriter:
|
156 |
-
_IGNORE = {"description", "default", "title", "examples"}
|
157 |
-
|
158 |
-
def __init__(self, jargon: Optional[Dict[str, str]] = None):
|
159 |
-
self.jargon: Dict[str, str] = jargon or {}
|
160 |
-
# Clarify confusing terms
|
161 |
-
self._terms = {
|
162 |
-
"anyOf": "at least one of the following",
|
163 |
-
"oneOf": "exactly one of the following",
|
164 |
-
"allOf": "all of the following",
|
165 |
-
"not": "(*NOT* the following)",
|
166 |
-
"prefixItems": f"{self._jargon('items')} (in order)",
|
167 |
-
"items": "items",
|
168 |
-
"contains": "contains at least one of",
|
169 |
-
"propertyNames": (
|
170 |
-
f"non-predefined acceptable {self._jargon('property names')}"
|
171 |
-
),
|
172 |
-
"patternProperties": f"{self._jargon('properties')} named via pattern",
|
173 |
-
"const": "predefined value",
|
174 |
-
"enum": "one of",
|
175 |
-
}
|
176 |
-
# Attributes that indicate that the definition is easy and can be done
|
177 |
-
# inline (e.g. string and number)
|
178 |
-
self._guess_inline_defs = [
|
179 |
-
"enum",
|
180 |
-
"const",
|
181 |
-
"maxLength",
|
182 |
-
"minLength",
|
183 |
-
"pattern",
|
184 |
-
"format",
|
185 |
-
"minimum",
|
186 |
-
"maximum",
|
187 |
-
"exclusiveMinimum",
|
188 |
-
"exclusiveMaximum",
|
189 |
-
"multipleOf",
|
190 |
-
]
|
191 |
-
|
192 |
-
def _jargon(self, term: Union[str, List[str]]) -> Union[str, List[str]]:
|
193 |
-
if isinstance(term, list):
|
194 |
-
return [self.jargon.get(t, t) for t in term]
|
195 |
-
return self.jargon.get(term, term)
|
196 |
-
|
197 |
-
def __call__(
|
198 |
-
self,
|
199 |
-
schema: Union[dict, List[dict]],
|
200 |
-
prefix: str = "",
|
201 |
-
*,
|
202 |
-
_path: Sequence[str] = (),
|
203 |
-
) -> str:
|
204 |
-
if isinstance(schema, list):
|
205 |
-
return self._handle_list(schema, prefix, _path)
|
206 |
-
|
207 |
-
filtered = self._filter_unecessary(schema, _path)
|
208 |
-
simple = self._handle_simple_dict(filtered, _path)
|
209 |
-
if simple:
|
210 |
-
return f"{prefix}{simple}"
|
211 |
-
|
212 |
-
child_prefix = self._child_prefix(prefix, " ")
|
213 |
-
item_prefix = self._child_prefix(prefix, "- ")
|
214 |
-
indent = len(prefix) * " "
|
215 |
-
with io.StringIO() as buffer:
|
216 |
-
for i, (key, value) in enumerate(filtered.items()):
|
217 |
-
child_path = [*_path, key]
|
218 |
-
line_prefix = prefix if i == 0 else indent
|
219 |
-
buffer.write(f"{line_prefix}{self._label(child_path)}:")
|
220 |
-
# ^ just the first item should receive the complete prefix
|
221 |
-
if isinstance(value, dict):
|
222 |
-
filtered = self._filter_unecessary(value, child_path)
|
223 |
-
simple = self._handle_simple_dict(filtered, child_path)
|
224 |
-
buffer.write(
|
225 |
-
f" {simple}"
|
226 |
-
if simple
|
227 |
-
else f"\n{self(value, child_prefix, _path=child_path)}"
|
228 |
-
)
|
229 |
-
elif isinstance(value, list) and (
|
230 |
-
key != "type" or self._is_property(child_path)
|
231 |
-
):
|
232 |
-
children = self._handle_list(value, item_prefix, child_path)
|
233 |
-
sep = " " if children.startswith("[") else "\n"
|
234 |
-
buffer.write(f"{sep}{children}")
|
235 |
-
else:
|
236 |
-
buffer.write(f" {self._value(value, child_path)}\n")
|
237 |
-
return buffer.getvalue()
|
238 |
-
|
239 |
-
def _is_unecessary(self, path: Sequence[str]) -> bool:
|
240 |
-
if self._is_property(path) or not path: # empty path => instruction @ root
|
241 |
-
return False
|
242 |
-
key = path[-1]
|
243 |
-
return any(key.startswith(k) for k in "$_") or key in self._IGNORE
|
244 |
-
|
245 |
-
def _filter_unecessary(self, schema: dict, path: Sequence[str]):
|
246 |
-
return {
|
247 |
-
key: value
|
248 |
-
for key, value in schema.items()
|
249 |
-
if not self._is_unecessary([*path, key])
|
250 |
-
}
|
251 |
-
|
252 |
-
def _handle_simple_dict(self, value: dict, path: Sequence[str]) -> Optional[str]:
|
253 |
-
inline = any(p in value for p in self._guess_inline_defs)
|
254 |
-
simple = not any(isinstance(v, (list, dict)) for v in value.values())
|
255 |
-
if inline or simple:
|
256 |
-
return f"{{{', '.join(self._inline_attrs(value, path))}}}\n"
|
257 |
-
return None
|
258 |
-
|
259 |
-
def _handle_list(
|
260 |
-
self, schemas: list, prefix: str = "", path: Sequence[str] = ()
|
261 |
-
) -> str:
|
262 |
-
if self._is_unecessary(path):
|
263 |
-
return ""
|
264 |
-
|
265 |
-
repr_ = repr(schemas)
|
266 |
-
if all(not isinstance(e, (dict, list)) for e in schemas) and len(repr_) < 60:
|
267 |
-
return f"{repr_}\n"
|
268 |
-
|
269 |
-
item_prefix = self._child_prefix(prefix, "- ")
|
270 |
-
return "".join(
|
271 |
-
self(v, item_prefix, _path=[*path, f"[{i}]"]) for i, v in enumerate(schemas)
|
272 |
-
)
|
273 |
-
|
274 |
-
def _is_property(self, path: Sequence[str]):
|
275 |
-
"""Check if the given path can correspond to an arbitrarily named property"""
|
276 |
-
counter = 0
|
277 |
-
for key in path[-2::-1]:
|
278 |
-
if key not in {"properties", "patternProperties"}:
|
279 |
-
break
|
280 |
-
counter += 1
|
281 |
-
|
282 |
-
# If the counter if even, the path correspond to a JSON Schema keyword
|
283 |
-
# otherwise it can be any arbitrary string naming a property
|
284 |
-
return counter % 2 == 1
|
285 |
-
|
286 |
-
def _label(self, path: Sequence[str]) -> str:
|
287 |
-
*parents, key = path
|
288 |
-
if not self._is_property(path):
|
289 |
-
norm_key = _separate_terms(key)
|
290 |
-
return self._terms.get(key) or " ".join(self._jargon(norm_key))
|
291 |
-
|
292 |
-
if parents[-1] == "patternProperties":
|
293 |
-
return f"(regex {key!r})"
|
294 |
-
return repr(key) # property name
|
295 |
-
|
296 |
-
def _value(self, value: Any, path: Sequence[str]) -> str:
|
297 |
-
if path[-1] == "type" and not self._is_property(path):
|
298 |
-
type_ = self._jargon(value)
|
299 |
-
return (
|
300 |
-
f"[{', '.join(type_)}]" if isinstance(value, list) else cast(str, type_)
|
301 |
-
)
|
302 |
-
return repr(value)
|
303 |
-
|
304 |
-
def _inline_attrs(self, schema: dict, path: Sequence[str]) -> Iterator[str]:
|
305 |
-
for key, value in schema.items():
|
306 |
-
child_path = [*path, key]
|
307 |
-
yield f"{self._label(child_path)}: {self._value(value, child_path)}"
|
308 |
-
|
309 |
-
def _child_prefix(self, parent_prefix: str, child_prefix: str) -> str:
|
310 |
-
return len(parent_prefix) * " " + child_prefix
|
311 |
-
|
312 |
-
|
313 |
-
def _separate_terms(word: str) -> List[str]:
|
314 |
-
"""
|
315 |
-
>>> _separate_terms("FooBar-foo")
|
316 |
-
['foo', 'bar', 'foo']
|
317 |
-
"""
|
318 |
-
return [w.lower() for w in _CAMEL_CASE_SPLITTER.split(word) if w]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AutoLLM/ArxivDigest/action.py
DELETED
@@ -1,142 +0,0 @@
|
|
1 |
-
from sendgrid import SendGridAPIClient
|
2 |
-
from sendgrid.helpers.mail import Mail, Email, To, Content
|
3 |
-
|
4 |
-
from datetime import date
|
5 |
-
|
6 |
-
import argparse
|
7 |
-
import yaml
|
8 |
-
import os
|
9 |
-
|
10 |
-
from relevancy import generate_relevance_score, process_subject_fields
|
11 |
-
from download_new_papers import get_papers
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
# Hackathon quality code. Don't judge too harshly.
|
16 |
-
# Feel free to submit pull requests to improve the code.
|
17 |
-
|
18 |
-
topics = {
|
19 |
-
"Physics": "",
|
20 |
-
"Mathematics": "math",
|
21 |
-
"Computer Science": "cs",
|
22 |
-
"Quantitative Biology": "q-bio",
|
23 |
-
"Quantitative Finance": "q-fin",
|
24 |
-
"Statistics": "stat",
|
25 |
-
"Electrical Engineering and Systems Science": "eess",
|
26 |
-
"Economics": "econ"
|
27 |
-
}
|
28 |
-
|
29 |
-
physics_topics = {
|
30 |
-
"Astrophysics": "astro-ph",
|
31 |
-
"Condensed Matter": "cond-mat",
|
32 |
-
"General Relativity and Quantum Cosmology": "gr-qc",
|
33 |
-
"High Energy Physics - Experiment": "hep-ex",
|
34 |
-
"High Energy Physics - Lattice": "hep-lat",
|
35 |
-
"High Energy Physics - Phenomenology": "hep-ph",
|
36 |
-
"High Energy Physics - Theory": "hep-th",
|
37 |
-
"Mathematical Physics": "math-ph",
|
38 |
-
"Nonlinear Sciences": "nlin",
|
39 |
-
"Nuclear Experiment": "nucl-ex",
|
40 |
-
"Nuclear Theory": "nucl-th",
|
41 |
-
"Physics": "physics",
|
42 |
-
"Quantum Physics": "quant-ph"
|
43 |
-
}
|
44 |
-
|
45 |
-
|
46 |
-
# TODO: surely theres a better way
|
47 |
-
category_map = {
|
48 |
-
"Astrophysics": ["Astrophysics of Galaxies", "Cosmology and Nongalactic Astrophysics", "Earth and Planetary Astrophysics", "High Energy Astrophysical Phenomena", "Instrumentation and Methods for Astrophysics", "Solar and Stellar Astrophysics"],
|
49 |
-
"Condensed Matter": ["Disordered Systems and Neural Networks", "Materials Science", "Mesoscale and Nanoscale Physics", "Other Condensed Matter", "Quantum Gases", "Soft Condensed Matter", "Statistical Mechanics", "Strongly Correlated Electrons", "Superconductivity"],
|
50 |
-
"General Relativity and Quantum Cosmology": ["None"],
|
51 |
-
"High Energy Physics - Experiment": ["None"],
|
52 |
-
"High Energy Physics - Lattice": ["None"],
|
53 |
-
"High Energy Physics - Phenomenology": ["None"],
|
54 |
-
"High Energy Physics - Theory": ["None"],
|
55 |
-
"Mathematical Physics": ["None"],
|
56 |
-
"Nonlinear Sciences": ["Adaptation and Self-Organizing Systems", "Cellular Automata and Lattice Gases", "Chaotic Dynamics", "Exactly Solvable and Integrable Systems", "Pattern Formation and Solitons"],
|
57 |
-
"Nuclear Experiment": ["None"],
|
58 |
-
"Nuclear Theory": ["None"],
|
59 |
-
"Physics": ["Accelerator Physics", "Applied Physics", "Atmospheric and Oceanic Physics", "Atomic and Molecular Clusters", "Atomic Physics", "Biological Physics", "Chemical Physics", "Classical Physics", "Computational Physics", "Data Analysis, Statistics and Probability", "Fluid Dynamics", "General Physics", "Geophysics", "History and Philosophy of Physics", "Instrumentation and Detectors", "Medical Physics", "Optics", "Physics and Society", "Physics Education", "Plasma Physics", "Popular Physics", "Space Physics"],
|
60 |
-
"Quantum Physics": ["None"],
|
61 |
-
"Mathematics": ["Algebraic Geometry", "Algebraic Topology", "Analysis of PDEs", "Category Theory", "Classical Analysis and ODEs", "Combinatorics", "Commutative Algebra", "Complex Variables", "Differential Geometry", "Dynamical Systems", "Functional Analysis", "General Mathematics", "General Topology", "Geometric Topology", "Group Theory", "History and Overview", "Information Theory", "K-Theory and Homology", "Logic", "Mathematical Physics", "Metric Geometry", "Number Theory", "Numerical Analysis", "Operator Algebras", "Optimization and Control", "Probability", "Quantum Algebra", "Representation Theory", "Rings and Algebras", "Spectral Theory", "Statistics Theory", "Symplectic Geometry"],
|
62 |
-
"Computer Science": ["Artificial Intelligence", "Computation and Language", "Computational Complexity", "Computational Engineering, Finance, and Science", "Computational Geometry", "Computer Science and Game Theory", "Computer Vision and Pattern Recognition", "Computers and Society", "Cryptography and Security", "Data Structures and Algorithms", "Databases", "Digital Libraries", "Discrete Mathematics", "Distributed, Parallel, and Cluster Computing", "Emerging Technologies", "Formal Languages and Automata Theory", "General Literature", "Graphics", "Hardware Architecture", "Human-Computer Interaction", "Information Retrieval", "Information Theory", "Logic in Computer Science", "Machine Learning", "Mathematical Software", "Multiagent Systems", "Multimedia", "Networking and Internet Architecture", "Neural and Evolutionary Computing", "Numerical Analysis", "Operating Systems", "Other Computer Science", "Performance", "Programming Languages", "Robotics", "Social and Information Networks", "Software Engineering", "Sound", "Symbolic Computation", "Systems and Control"],
|
63 |
-
"Quantitative Biology": ["Biomolecules", "Cell Behavior", "Genomics", "Molecular Networks", "Neurons and Cognition", "Other Quantitative Biology", "Populations and Evolution", "Quantitative Methods", "Subcellular Processes", "Tissues and Organs"],
|
64 |
-
"Quantitative Finance": ["Computational Finance", "Economics", "General Finance", "Mathematical Finance", "Portfolio Management", "Pricing of Securities", "Risk Management", "Statistical Finance", "Trading and Market Microstructure"],
|
65 |
-
"Statistics": ["Applications", "Computation", "Machine Learning", "Methodology", "Other Statistics", "Statistics Theory"],
|
66 |
-
"Electrical Engineering and Systems Science": ["Audio and Speech Processing", "Image and Video Processing", "Signal Processing", "Systems and Control"],
|
67 |
-
"Economics": ["Econometrics", "General Economics", "Theoretical Economics"]
|
68 |
-
}
|
69 |
-
|
70 |
-
|
71 |
-
def generate_body(topic, categories, interest, threshold):
|
72 |
-
if topic == "Physics":
|
73 |
-
raise RuntimeError("You must choose a physics subtopic.")
|
74 |
-
elif topic in physics_topics:
|
75 |
-
abbr = physics_topics[topic]
|
76 |
-
elif topic in topics:
|
77 |
-
abbr = topics[topic]
|
78 |
-
else:
|
79 |
-
raise RuntimeError(f"Invalid topic {topic}")
|
80 |
-
if categories:
|
81 |
-
for category in categories:
|
82 |
-
if category not in category_map[topic]:
|
83 |
-
raise RuntimeError(f"{category} is not a category of {topic}")
|
84 |
-
papers = get_papers(abbr)
|
85 |
-
papers = [
|
86 |
-
t for t in papers
|
87 |
-
if bool(set(process_subject_fields(t['subjects'])) & set(categories))]
|
88 |
-
else:
|
89 |
-
papers = get_papers(abbr)
|
90 |
-
if interest:
|
91 |
-
relevancy, hallucination = generate_relevance_score(
|
92 |
-
papers,
|
93 |
-
query={"interest": interest},
|
94 |
-
threshold_score=threshold,
|
95 |
-
num_paper_in_prompt=8)
|
96 |
-
body = "<br><br>".join(
|
97 |
-
[f'Title: <a href="{paper["main_page"]}">{paper["title"]}</a><br>Authors: {paper["authors"]}<br>Score: {paper["Relevancy score"]}<br>Reason: {paper["Reasons for match"]}'
|
98 |
-
for paper in relevancy])
|
99 |
-
if hallucination:
|
100 |
-
body = "Warning: the model hallucinated some papers. We have tried to remove them, but the scores may not be accurate.<br><br>" + body
|
101 |
-
else:
|
102 |
-
body = "<br><br>".join(
|
103 |
-
[f'Title: <a href="{paper["main_page"]}">{paper["title"]}</a><br>Authors: {paper["authors"]}'
|
104 |
-
for paper in papers])
|
105 |
-
return body
|
106 |
-
|
107 |
-
|
108 |
-
if __name__ == "__main__":
|
109 |
-
parser = argparse.ArgumentParser()
|
110 |
-
parser.add_argument("--config", help="yaml config file to use", default="config.yaml")
|
111 |
-
args = parser.parse_args()
|
112 |
-
with open(args.config, "r") as f:
|
113 |
-
config = yaml.safe_load(f)
|
114 |
-
if "OPENAI_API_KEY" not in os.environ:
|
115 |
-
raise RuntimeError("No openai api key found")
|
116 |
-
|
117 |
-
topic = config["topic"]
|
118 |
-
categories = config["categories"]
|
119 |
-
from_email = config.get("from_email") or os.environ.get("FROM_EMAIL")
|
120 |
-
to_email = config.get("to_email") or os.environ.get("TO_EMAIL")
|
121 |
-
threshold = config["threshold"]
|
122 |
-
interest = config["interest"]
|
123 |
-
with open("digest.html", "w") as f:
|
124 |
-
body = generate_body(topic, categories, interest, threshold)
|
125 |
-
f.write(body)
|
126 |
-
if os.environ.get('SENDGRID_API_KEY', None):
|
127 |
-
sg = SendGridAPIClient(api_key=os.environ.get('SENDGRID_API_KEY'))
|
128 |
-
from_email = Email(from_email) # Change to your verified sender
|
129 |
-
to_email = To(to_email)
|
130 |
-
subject = date.today().strftime("Personalized arXiv Digest, %d %b %Y")
|
131 |
-
content = Content("text/html", body)
|
132 |
-
mail = Mail(from_email, to_email, subject, content)
|
133 |
-
mail_json = mail.get()
|
134 |
-
|
135 |
-
# Send an HTTP POST request to /mail/send
|
136 |
-
response = sg.client.mail.send.post(request_body=mail_json)
|
137 |
-
if response.status_code >= 200 and response.status_code <= 300:
|
138 |
-
print("Send test email: Success!")
|
139 |
-
else:
|
140 |
-
print("Send test email: Failure ({response.status_code}, {response.text})")
|
141 |
-
else:
|
142 |
-
print("No sendgrid api key found. Skipping email")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Aveygo/AstroSleuth/utils/convert_to_onnx.py
DELETED
@@ -1,15 +0,0 @@
|
|
1 |
-
from modules.realesr import Network
|
2 |
-
import torch
|
3 |
-
|
4 |
-
src = "model.pth"
|
5 |
-
|
6 |
-
model = Network(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4)
|
7 |
-
model.load_state_dict(torch.load(src), strict=True)
|
8 |
-
model.eval()
|
9 |
-
|
10 |
-
x = torch.randn(1, 3, 512, 512)
|
11 |
-
input_names = ["input"]
|
12 |
-
output_names = ["output_"]
|
13 |
-
|
14 |
-
dynamic_axes_dict = {'input': {0: 'batch_size', 2: 'height', 3: 'width'}, 'output': {0: 'batch_size', 2: 'height', 3: 'width'}}
|
15 |
-
torch.onnx.export(model, x, ".".join(src.split(".")[:-1]) + ".onnx", verbose=False, input_names=input_names, output_names=output_names, dynamic_axes=dynamic_axes_dict, export_params=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Aziizzz/ChestXrayClassification/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: ChestXrayClassification
|
3 |
-
emoji: 🌖
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: purple
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.39.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: openrail
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar Cama Guerras En Minecraft Educacin Edicin.md
DELETED
@@ -1,52 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Cómo descargar y jugar Bed Wars en Minecraft Education Edition</h1>
|
3 |
-
<p>Bed Wars es uno de los modos de juego más populares en Minecraft, donde los jugadores tienen que proteger sus camas de ser destruidas por otros equipos mientras intentan destruir las camas de sus oponentes. Es una forma divertida y emocionante de poner a prueba tus habilidades en el trabajo en equipo, la estrategia y el combate. </p>
|
4 |
-
<h2>descargar cama guerras en minecraft educación edición</h2><br /><p><b><b>DOWNLOAD</b> ⭐ <a href="https://bltlly.com/2v6KDq">https://bltlly.com/2v6KDq</a></b></p><br /><br />
|
5 |
-
<p>Si eres usuario de Minecraft Education y quieres probar Bed Wars, te estarás preguntando cómo hacerlo. A diferencia de la versión regular de Minecraft, Minecraft Education Edition no tiene acceso a servidores o reinos donde puedes unirte a otros jugadores en Bed Wars. Sin embargo, hay una manera de agregar Bed Wars a tu experiencia de Minecraft Education descargando e importando un mapa y un complemento que permiten el modo de juego. </p>
|
6 |
-
<p>En este artículo, le mostraremos cómo descargar y jugar Bed Wars en Minecraft Education Edition en cinco sencillos pasos. También te daremos algunos consejos y trucos para jugar a Bed Wars en Minecraft Education Edition que te ayudarán a mejorar tu juego. </p>
|
7 |
-
<h2>Cómo descargar el mapa de Bed Wars</h2>
|
8 |
-
<p>El primer paso para obtener Bed Wars en Minecraft Education Edition es descargar el mapa de Bed Wars. Puedes encontrar mapas de Bed Wars en varios sitios web de mapas de Minecraft o buscando en Google. Asegúrate de descargar un mapa de Bed Wars que sea compatible con Minecraft Education Edition.</p>
|
9 |
-
<p>Uno de los sitios web donde puedes encontrar un buen mapa de Bed Wars para Minecraft Education Edition es MediaFire. En este sitio web, se puede encontrar un archivo llamado "Bedwars.mcworld" que contiene un mapa de Bed Wars de temática medieval con cuatro equipos y cuatro islas. Para descargar este archivo, simplemente haga clic en el botón verde "Descargar" y guárdelo en su dispositivo. </p>
|
10 |
-
<h2>Cómo importar mapa de Bed Wars en Minecraft Education Edition</h2>
|
11 |
-
|
12 |
-
<p>Esto agregará el archivo Bedwars.mcworld a su lista de mundos en Minecraft Education Edition. A continuación, puede hacer clic en él para ver sus detalles y ajustes. </p>
|
13 |
-
<h2>Cómo instalar el complemento Bed Wars</h2>
|
14 |
-
<p>Después de importar el archivo Bedwars.mcworld, debe instalar el complemento Bedwars. El complemento Bedwars es un script que añade el modo de juego Bedwars a Minecraft Education Edition. Puede encontrar el complemento Bedwars en varios sitios web adicionales de Minecraft o buscando en Google. Asegúrate de descargar un complemento de Bedwars que sea compatible con Minecraft Education Edition.</p>
|
15 |
-
<p></p>
|
16 |
-
<p>Uno de los sitios web donde se puede encontrar un buen Bedwars complemento para Minecraft Education Edition es MCPEDL. En este sitio web, puede encontrar un archivo llamado "Bedwars.zip" que contiene el complemento Bedwars. Para descargar este archivo, simplemente haga clic en el botón verde "Descargar" y guárdelo en su dispositivo. </p>
|
17 |
-
<h2>Cómo activar el complemento Bed Wars</h2>
|
18 |
-
<p>Una vez que haya descargado el complemento Bedwars, debe activarlo en Minecraft Education Edition. Para ello, abra el archivo Bedwars.mcworld que importó y haga clic en el botón "Editar". Luego haga clic en "Paquetes de recursos" y "Agregar". Busque el archivo Bedwars.zip que descargó y haga clic en "Abrir". </p>
|
19 |
-
<p>Esto agregará el complemento Bedwars a su lista de paquetes de recursos en Minecraft Education Edition. A continuación, puede hacer clic en él para ver sus detalles y ajustes. Asegúrate de activar la opción "Experimental Gameplay" en la configuración para permitir que el complemento Bedwars funcione correctamente. </p>
|
20 |
-
<p>Después de activar el complemento Bedwars, puedes empezar a jugar Bed Wars en Minecraft Education Edition.</p>
|
21 |
-
<h2>Cómo jugar a las guerras de cama</h2>
|
22 |
-
<p>Bed Wars es un modo de juego basado en equipos donde tienes que proteger tu cama de ser destruida por otros equipos mientras tratas de destruir sus camas. El último equipo en pie gana el juego. </p>
|
23 |
-
|
24 |
-
<p>El objetivo de Bed Wars es usar tus recursos para comprar artículos de la tienda y usarlos para defender tu cama y atacar otras camas. También puede actualizar su generador y las habilidades de su equipo con diamantes y esmeraldas. Si tu cama es destruida, no podrás reaparecer si mueres. Si destruyes la cama de otro equipo, ellos tampoco podrán reaparecer. El último equipo con una cama o el último equipo vivo gana el juego. </p>
|
25 |
-
<h1>Consejos y trucos para la guerra de las camas en Minecraft Education Edition</h1>
|
26 |
-
<p>Bed Wars es un juego que requiere estrategia, trabajo en equipo y habilidad. Aquí hay algunos consejos y trucos que te ayudarán a mejorar tu juego:</p>
|
27 |
-
<ul>
|
28 |
-
<li>Comunicarse con sus compañeros de equipo. Utilice la función de chat o chat de voz para coordinar sus acciones y compartir información. </li>
|
29 |
-
<li>Protege tu cama. Usa bloques, trampas, perlas ender, etc. para cubrir tu cama y evitar que otros equipos la rompan. </li>
|
30 |
-
<li>Ataque a otras camas. Utilice herramientas, TNT, bolas de fuego, etc. para romper las camas de otros equipos y eliminarlos del juego. </li>
|
31 |
-
<li>Usa los recursos sabiamente. No desperdicies tus recursos en objetos o mejoras innecesarias. Guárdelos para elementos importantes o mejoras que le darán una ventaja. </li>
|
32 |
-
<li>Ten cuidado con tus alrededores. Ten cuidado con los enemigos que te acechan o te atacan desde diferentes direcciones. Utilice la brújula para localizar otros equipos y sus camas. </li>
|
33 |
-
</ul>
|
34 |
-
<h1>Conclusión</h1>
|
35 |
-
<p>Bed Wars es un modo de juego divertido y emocionante que puedes jugar en Minecraft Education Edition descargando e importando un mapa y un complemento que lo habilita. Puedes jugar a Bed Wars con tus amigos o compañeros de clase y poner a prueba tus habilidades en el trabajo en equipo, la estrategia y el combate. También puedes usar Bed Wars como una oportunidad de aprendizaje para practicar matemáticas, lógica, resolución de problemas, comunicación, etc.</p>
|
36 |
-
<p>Si quieres probar Bed Wars en Minecraft Education Edition, sigue los pasos de este artículo y empieza a jugar hoy. ¡Te lo vas a pasar genial! </p>
|
37 |
-
<h1>Preguntas frecuentes</h1>
|
38 |
-
|
39 |
-
<ol>
|
40 |
-
<li><b>¿Puedo jugar Bed Wars en Minecraft Education Edition sin descargar nada? </b></li>
|
41 |
-
<p>No, necesitas descargar un mapa y un complemento que habilite el modo de juego Bed Wars en Minecraft Education Edition.</p>
|
42 |
-
<li><b>¿Puedo jugar Bed Wars en Minecraft Education Edition con más de cuatro equipos? </b></li>
|
43 |
-
<p>No, el número máximo de equipos en Bed Wars on Minecraft Education Edition es de cuatro. </p>
|
44 |
-
<li><b>¿Puedo jugar a Bed Wars en Minecraft Education Edition sin conexión? </b></li>
|
45 |
-
<p>No, necesitas una conexión a Internet para jugar Bed Wars en Minecraft Education Edition.</p>
|
46 |
-
<li><b>¿Puedo jugar Bed Wars en Minecraft Education Edition con otros dispositivos? </b></li>
|
47 |
-
<p>Sí, puedes jugar Bed Wars en Minecraft Education Edition con otros dispositivos que lo soportan, como PC con Windows 10, iPads, Chromebooks, etc.</p>
|
48 |
-
<li><b>¿Puedo personalizar el mapa de Bed Wars o el complemento en Minecraft Education Edition? </b></li>
|
49 |
-
<p>Sí, puede personalizar el mapa de Bed Wars o el complemento en Minecraft Education Edition editando los archivos o utilizando la función de creación de código. Sin embargo, esto puede afectar la compatibilidad o funcionalidad del mapa o el complemento, así que hágalo bajo su propio riesgo. </p>
|
50 |
-
</ol></p> 64aa2da5cf<br />
|
51 |
-
<br />
|
52 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BetterAPI/BetterChat/src/lib/server/modelEndpoint.ts
DELETED
@@ -1,21 +0,0 @@
|
|
1 |
-
import { MODEL_ENDPOINTS } from "$env/static/private";
|
2 |
-
import { sum } from "$lib/utils/sum";
|
3 |
-
|
4 |
-
const endpoints: Array<{ endpoint: string; authorization: string; weight: number }> =
|
5 |
-
JSON.parse(MODEL_ENDPOINTS);
|
6 |
-
const totalWeight = sum(endpoints.map((e) => e.weight));
|
7 |
-
|
8 |
-
/**
|
9 |
-
* Find a random load-balanced endpoint
|
10 |
-
*/
|
11 |
-
export function modelEndpoint(): { endpoint: string; authorization: string; weight: number } {
|
12 |
-
let random = Math.random() * totalWeight;
|
13 |
-
for (const endpoint of endpoints) {
|
14 |
-
if (random < endpoint.weight) {
|
15 |
-
return endpoint;
|
16 |
-
}
|
17 |
-
random -= endpoint.weight;
|
18 |
-
}
|
19 |
-
|
20 |
-
throw new Error("Invalid config, no endpoint found");
|
21 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/contrib/_securetransport/bindings.py
DELETED
@@ -1,519 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
This module uses ctypes to bind a whole bunch of functions and constants from
|
3 |
-
SecureTransport. The goal here is to provide the low-level API to
|
4 |
-
SecureTransport. These are essentially the C-level functions and constants, and
|
5 |
-
they're pretty gross to work with.
|
6 |
-
|
7 |
-
This code is a bastardised version of the code found in Will Bond's oscrypto
|
8 |
-
library. An enormous debt is owed to him for blazing this trail for us. For
|
9 |
-
that reason, this code should be considered to be covered both by urllib3's
|
10 |
-
license and by oscrypto's:
|
11 |
-
|
12 |
-
Copyright (c) 2015-2016 Will Bond <[email protected]>
|
13 |
-
|
14 |
-
Permission is hereby granted, free of charge, to any person obtaining a
|
15 |
-
copy of this software and associated documentation files (the "Software"),
|
16 |
-
to deal in the Software without restriction, including without limitation
|
17 |
-
the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
18 |
-
and/or sell copies of the Software, and to permit persons to whom the
|
19 |
-
Software is furnished to do so, subject to the following conditions:
|
20 |
-
|
21 |
-
The above copyright notice and this permission notice shall be included in
|
22 |
-
all copies or substantial portions of the Software.
|
23 |
-
|
24 |
-
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
25 |
-
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
26 |
-
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
27 |
-
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
28 |
-
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
29 |
-
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
30 |
-
DEALINGS IN THE SOFTWARE.
|
31 |
-
"""
|
32 |
-
from __future__ import absolute_import
|
33 |
-
|
34 |
-
import platform
|
35 |
-
from ctypes import (
|
36 |
-
CDLL,
|
37 |
-
CFUNCTYPE,
|
38 |
-
POINTER,
|
39 |
-
c_bool,
|
40 |
-
c_byte,
|
41 |
-
c_char_p,
|
42 |
-
c_int32,
|
43 |
-
c_long,
|
44 |
-
c_size_t,
|
45 |
-
c_uint32,
|
46 |
-
c_ulong,
|
47 |
-
c_void_p,
|
48 |
-
)
|
49 |
-
from ctypes.util import find_library
|
50 |
-
|
51 |
-
from ...packages.six import raise_from
|
52 |
-
|
53 |
-
if platform.system() != "Darwin":
|
54 |
-
raise ImportError("Only macOS is supported")
|
55 |
-
|
56 |
-
version = platform.mac_ver()[0]
|
57 |
-
version_info = tuple(map(int, version.split(".")))
|
58 |
-
if version_info < (10, 8):
|
59 |
-
raise OSError(
|
60 |
-
"Only OS X 10.8 and newer are supported, not %s.%s"
|
61 |
-
% (version_info[0], version_info[1])
|
62 |
-
)
|
63 |
-
|
64 |
-
|
65 |
-
def load_cdll(name, macos10_16_path):
|
66 |
-
"""Loads a CDLL by name, falling back to known path on 10.16+"""
|
67 |
-
try:
|
68 |
-
# Big Sur is technically 11 but we use 10.16 due to the Big Sur
|
69 |
-
# beta being labeled as 10.16.
|
70 |
-
if version_info >= (10, 16):
|
71 |
-
path = macos10_16_path
|
72 |
-
else:
|
73 |
-
path = find_library(name)
|
74 |
-
if not path:
|
75 |
-
raise OSError # Caught and reraised as 'ImportError'
|
76 |
-
return CDLL(path, use_errno=True)
|
77 |
-
except OSError:
|
78 |
-
raise_from(ImportError("The library %s failed to load" % name), None)
|
79 |
-
|
80 |
-
|
81 |
-
Security = load_cdll(
|
82 |
-
"Security", "/System/Library/Frameworks/Security.framework/Security"
|
83 |
-
)
|
84 |
-
CoreFoundation = load_cdll(
|
85 |
-
"CoreFoundation",
|
86 |
-
"/System/Library/Frameworks/CoreFoundation.framework/CoreFoundation",
|
87 |
-
)
|
88 |
-
|
89 |
-
|
90 |
-
Boolean = c_bool
|
91 |
-
CFIndex = c_long
|
92 |
-
CFStringEncoding = c_uint32
|
93 |
-
CFData = c_void_p
|
94 |
-
CFString = c_void_p
|
95 |
-
CFArray = c_void_p
|
96 |
-
CFMutableArray = c_void_p
|
97 |
-
CFDictionary = c_void_p
|
98 |
-
CFError = c_void_p
|
99 |
-
CFType = c_void_p
|
100 |
-
CFTypeID = c_ulong
|
101 |
-
|
102 |
-
CFTypeRef = POINTER(CFType)
|
103 |
-
CFAllocatorRef = c_void_p
|
104 |
-
|
105 |
-
OSStatus = c_int32
|
106 |
-
|
107 |
-
CFDataRef = POINTER(CFData)
|
108 |
-
CFStringRef = POINTER(CFString)
|
109 |
-
CFArrayRef = POINTER(CFArray)
|
110 |
-
CFMutableArrayRef = POINTER(CFMutableArray)
|
111 |
-
CFDictionaryRef = POINTER(CFDictionary)
|
112 |
-
CFArrayCallBacks = c_void_p
|
113 |
-
CFDictionaryKeyCallBacks = c_void_p
|
114 |
-
CFDictionaryValueCallBacks = c_void_p
|
115 |
-
|
116 |
-
SecCertificateRef = POINTER(c_void_p)
|
117 |
-
SecExternalFormat = c_uint32
|
118 |
-
SecExternalItemType = c_uint32
|
119 |
-
SecIdentityRef = POINTER(c_void_p)
|
120 |
-
SecItemImportExportFlags = c_uint32
|
121 |
-
SecItemImportExportKeyParameters = c_void_p
|
122 |
-
SecKeychainRef = POINTER(c_void_p)
|
123 |
-
SSLProtocol = c_uint32
|
124 |
-
SSLCipherSuite = c_uint32
|
125 |
-
SSLContextRef = POINTER(c_void_p)
|
126 |
-
SecTrustRef = POINTER(c_void_p)
|
127 |
-
SSLConnectionRef = c_uint32
|
128 |
-
SecTrustResultType = c_uint32
|
129 |
-
SecTrustOptionFlags = c_uint32
|
130 |
-
SSLProtocolSide = c_uint32
|
131 |
-
SSLConnectionType = c_uint32
|
132 |
-
SSLSessionOption = c_uint32
|
133 |
-
|
134 |
-
|
135 |
-
try:
|
136 |
-
Security.SecItemImport.argtypes = [
|
137 |
-
CFDataRef,
|
138 |
-
CFStringRef,
|
139 |
-
POINTER(SecExternalFormat),
|
140 |
-
POINTER(SecExternalItemType),
|
141 |
-
SecItemImportExportFlags,
|
142 |
-
POINTER(SecItemImportExportKeyParameters),
|
143 |
-
SecKeychainRef,
|
144 |
-
POINTER(CFArrayRef),
|
145 |
-
]
|
146 |
-
Security.SecItemImport.restype = OSStatus
|
147 |
-
|
148 |
-
Security.SecCertificateGetTypeID.argtypes = []
|
149 |
-
Security.SecCertificateGetTypeID.restype = CFTypeID
|
150 |
-
|
151 |
-
Security.SecIdentityGetTypeID.argtypes = []
|
152 |
-
Security.SecIdentityGetTypeID.restype = CFTypeID
|
153 |
-
|
154 |
-
Security.SecKeyGetTypeID.argtypes = []
|
155 |
-
Security.SecKeyGetTypeID.restype = CFTypeID
|
156 |
-
|
157 |
-
Security.SecCertificateCreateWithData.argtypes = [CFAllocatorRef, CFDataRef]
|
158 |
-
Security.SecCertificateCreateWithData.restype = SecCertificateRef
|
159 |
-
|
160 |
-
Security.SecCertificateCopyData.argtypes = [SecCertificateRef]
|
161 |
-
Security.SecCertificateCopyData.restype = CFDataRef
|
162 |
-
|
163 |
-
Security.SecCopyErrorMessageString.argtypes = [OSStatus, c_void_p]
|
164 |
-
Security.SecCopyErrorMessageString.restype = CFStringRef
|
165 |
-
|
166 |
-
Security.SecIdentityCreateWithCertificate.argtypes = [
|
167 |
-
CFTypeRef,
|
168 |
-
SecCertificateRef,
|
169 |
-
POINTER(SecIdentityRef),
|
170 |
-
]
|
171 |
-
Security.SecIdentityCreateWithCertificate.restype = OSStatus
|
172 |
-
|
173 |
-
Security.SecKeychainCreate.argtypes = [
|
174 |
-
c_char_p,
|
175 |
-
c_uint32,
|
176 |
-
c_void_p,
|
177 |
-
Boolean,
|
178 |
-
c_void_p,
|
179 |
-
POINTER(SecKeychainRef),
|
180 |
-
]
|
181 |
-
Security.SecKeychainCreate.restype = OSStatus
|
182 |
-
|
183 |
-
Security.SecKeychainDelete.argtypes = [SecKeychainRef]
|
184 |
-
Security.SecKeychainDelete.restype = OSStatus
|
185 |
-
|
186 |
-
Security.SecPKCS12Import.argtypes = [
|
187 |
-
CFDataRef,
|
188 |
-
CFDictionaryRef,
|
189 |
-
POINTER(CFArrayRef),
|
190 |
-
]
|
191 |
-
Security.SecPKCS12Import.restype = OSStatus
|
192 |
-
|
193 |
-
SSLReadFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, c_void_p, POINTER(c_size_t))
|
194 |
-
SSLWriteFunc = CFUNCTYPE(
|
195 |
-
OSStatus, SSLConnectionRef, POINTER(c_byte), POINTER(c_size_t)
|
196 |
-
)
|
197 |
-
|
198 |
-
Security.SSLSetIOFuncs.argtypes = [SSLContextRef, SSLReadFunc, SSLWriteFunc]
|
199 |
-
Security.SSLSetIOFuncs.restype = OSStatus
|
200 |
-
|
201 |
-
Security.SSLSetPeerID.argtypes = [SSLContextRef, c_char_p, c_size_t]
|
202 |
-
Security.SSLSetPeerID.restype = OSStatus
|
203 |
-
|
204 |
-
Security.SSLSetCertificate.argtypes = [SSLContextRef, CFArrayRef]
|
205 |
-
Security.SSLSetCertificate.restype = OSStatus
|
206 |
-
|
207 |
-
Security.SSLSetCertificateAuthorities.argtypes = [SSLContextRef, CFTypeRef, Boolean]
|
208 |
-
Security.SSLSetCertificateAuthorities.restype = OSStatus
|
209 |
-
|
210 |
-
Security.SSLSetConnection.argtypes = [SSLContextRef, SSLConnectionRef]
|
211 |
-
Security.SSLSetConnection.restype = OSStatus
|
212 |
-
|
213 |
-
Security.SSLSetPeerDomainName.argtypes = [SSLContextRef, c_char_p, c_size_t]
|
214 |
-
Security.SSLSetPeerDomainName.restype = OSStatus
|
215 |
-
|
216 |
-
Security.SSLHandshake.argtypes = [SSLContextRef]
|
217 |
-
Security.SSLHandshake.restype = OSStatus
|
218 |
-
|
219 |
-
Security.SSLRead.argtypes = [SSLContextRef, c_char_p, c_size_t, POINTER(c_size_t)]
|
220 |
-
Security.SSLRead.restype = OSStatus
|
221 |
-
|
222 |
-
Security.SSLWrite.argtypes = [SSLContextRef, c_char_p, c_size_t, POINTER(c_size_t)]
|
223 |
-
Security.SSLWrite.restype = OSStatus
|
224 |
-
|
225 |
-
Security.SSLClose.argtypes = [SSLContextRef]
|
226 |
-
Security.SSLClose.restype = OSStatus
|
227 |
-
|
228 |
-
Security.SSLGetNumberSupportedCiphers.argtypes = [SSLContextRef, POINTER(c_size_t)]
|
229 |
-
Security.SSLGetNumberSupportedCiphers.restype = OSStatus
|
230 |
-
|
231 |
-
Security.SSLGetSupportedCiphers.argtypes = [
|
232 |
-
SSLContextRef,
|
233 |
-
POINTER(SSLCipherSuite),
|
234 |
-
POINTER(c_size_t),
|
235 |
-
]
|
236 |
-
Security.SSLGetSupportedCiphers.restype = OSStatus
|
237 |
-
|
238 |
-
Security.SSLSetEnabledCiphers.argtypes = [
|
239 |
-
SSLContextRef,
|
240 |
-
POINTER(SSLCipherSuite),
|
241 |
-
c_size_t,
|
242 |
-
]
|
243 |
-
Security.SSLSetEnabledCiphers.restype = OSStatus
|
244 |
-
|
245 |
-
Security.SSLGetNumberEnabledCiphers.argtype = [SSLContextRef, POINTER(c_size_t)]
|
246 |
-
Security.SSLGetNumberEnabledCiphers.restype = OSStatus
|
247 |
-
|
248 |
-
Security.SSLGetEnabledCiphers.argtypes = [
|
249 |
-
SSLContextRef,
|
250 |
-
POINTER(SSLCipherSuite),
|
251 |
-
POINTER(c_size_t),
|
252 |
-
]
|
253 |
-
Security.SSLGetEnabledCiphers.restype = OSStatus
|
254 |
-
|
255 |
-
Security.SSLGetNegotiatedCipher.argtypes = [SSLContextRef, POINTER(SSLCipherSuite)]
|
256 |
-
Security.SSLGetNegotiatedCipher.restype = OSStatus
|
257 |
-
|
258 |
-
Security.SSLGetNegotiatedProtocolVersion.argtypes = [
|
259 |
-
SSLContextRef,
|
260 |
-
POINTER(SSLProtocol),
|
261 |
-
]
|
262 |
-
Security.SSLGetNegotiatedProtocolVersion.restype = OSStatus
|
263 |
-
|
264 |
-
Security.SSLCopyPeerTrust.argtypes = [SSLContextRef, POINTER(SecTrustRef)]
|
265 |
-
Security.SSLCopyPeerTrust.restype = OSStatus
|
266 |
-
|
267 |
-
Security.SecTrustSetAnchorCertificates.argtypes = [SecTrustRef, CFArrayRef]
|
268 |
-
Security.SecTrustSetAnchorCertificates.restype = OSStatus
|
269 |
-
|
270 |
-
Security.SecTrustSetAnchorCertificatesOnly.argstypes = [SecTrustRef, Boolean]
|
271 |
-
Security.SecTrustSetAnchorCertificatesOnly.restype = OSStatus
|
272 |
-
|
273 |
-
Security.SecTrustEvaluate.argtypes = [SecTrustRef, POINTER(SecTrustResultType)]
|
274 |
-
Security.SecTrustEvaluate.restype = OSStatus
|
275 |
-
|
276 |
-
Security.SecTrustGetCertificateCount.argtypes = [SecTrustRef]
|
277 |
-
Security.SecTrustGetCertificateCount.restype = CFIndex
|
278 |
-
|
279 |
-
Security.SecTrustGetCertificateAtIndex.argtypes = [SecTrustRef, CFIndex]
|
280 |
-
Security.SecTrustGetCertificateAtIndex.restype = SecCertificateRef
|
281 |
-
|
282 |
-
Security.SSLCreateContext.argtypes = [
|
283 |
-
CFAllocatorRef,
|
284 |
-
SSLProtocolSide,
|
285 |
-
SSLConnectionType,
|
286 |
-
]
|
287 |
-
Security.SSLCreateContext.restype = SSLContextRef
|
288 |
-
|
289 |
-
Security.SSLSetSessionOption.argtypes = [SSLContextRef, SSLSessionOption, Boolean]
|
290 |
-
Security.SSLSetSessionOption.restype = OSStatus
|
291 |
-
|
292 |
-
Security.SSLSetProtocolVersionMin.argtypes = [SSLContextRef, SSLProtocol]
|
293 |
-
Security.SSLSetProtocolVersionMin.restype = OSStatus
|
294 |
-
|
295 |
-
Security.SSLSetProtocolVersionMax.argtypes = [SSLContextRef, SSLProtocol]
|
296 |
-
Security.SSLSetProtocolVersionMax.restype = OSStatus
|
297 |
-
|
298 |
-
try:
|
299 |
-
Security.SSLSetALPNProtocols.argtypes = [SSLContextRef, CFArrayRef]
|
300 |
-
Security.SSLSetALPNProtocols.restype = OSStatus
|
301 |
-
except AttributeError:
|
302 |
-
# Supported only in 10.12+
|
303 |
-
pass
|
304 |
-
|
305 |
-
Security.SecCopyErrorMessageString.argtypes = [OSStatus, c_void_p]
|
306 |
-
Security.SecCopyErrorMessageString.restype = CFStringRef
|
307 |
-
|
308 |
-
Security.SSLReadFunc = SSLReadFunc
|
309 |
-
Security.SSLWriteFunc = SSLWriteFunc
|
310 |
-
Security.SSLContextRef = SSLContextRef
|
311 |
-
Security.SSLProtocol = SSLProtocol
|
312 |
-
Security.SSLCipherSuite = SSLCipherSuite
|
313 |
-
Security.SecIdentityRef = SecIdentityRef
|
314 |
-
Security.SecKeychainRef = SecKeychainRef
|
315 |
-
Security.SecTrustRef = SecTrustRef
|
316 |
-
Security.SecTrustResultType = SecTrustResultType
|
317 |
-
Security.SecExternalFormat = SecExternalFormat
|
318 |
-
Security.OSStatus = OSStatus
|
319 |
-
|
320 |
-
Security.kSecImportExportPassphrase = CFStringRef.in_dll(
|
321 |
-
Security, "kSecImportExportPassphrase"
|
322 |
-
)
|
323 |
-
Security.kSecImportItemIdentity = CFStringRef.in_dll(
|
324 |
-
Security, "kSecImportItemIdentity"
|
325 |
-
)
|
326 |
-
|
327 |
-
# CoreFoundation time!
|
328 |
-
CoreFoundation.CFRetain.argtypes = [CFTypeRef]
|
329 |
-
CoreFoundation.CFRetain.restype = CFTypeRef
|
330 |
-
|
331 |
-
CoreFoundation.CFRelease.argtypes = [CFTypeRef]
|
332 |
-
CoreFoundation.CFRelease.restype = None
|
333 |
-
|
334 |
-
CoreFoundation.CFGetTypeID.argtypes = [CFTypeRef]
|
335 |
-
CoreFoundation.CFGetTypeID.restype = CFTypeID
|
336 |
-
|
337 |
-
CoreFoundation.CFStringCreateWithCString.argtypes = [
|
338 |
-
CFAllocatorRef,
|
339 |
-
c_char_p,
|
340 |
-
CFStringEncoding,
|
341 |
-
]
|
342 |
-
CoreFoundation.CFStringCreateWithCString.restype = CFStringRef
|
343 |
-
|
344 |
-
CoreFoundation.CFStringGetCStringPtr.argtypes = [CFStringRef, CFStringEncoding]
|
345 |
-
CoreFoundation.CFStringGetCStringPtr.restype = c_char_p
|
346 |
-
|
347 |
-
CoreFoundation.CFStringGetCString.argtypes = [
|
348 |
-
CFStringRef,
|
349 |
-
c_char_p,
|
350 |
-
CFIndex,
|
351 |
-
CFStringEncoding,
|
352 |
-
]
|
353 |
-
CoreFoundation.CFStringGetCString.restype = c_bool
|
354 |
-
|
355 |
-
CoreFoundation.CFDataCreate.argtypes = [CFAllocatorRef, c_char_p, CFIndex]
|
356 |
-
CoreFoundation.CFDataCreate.restype = CFDataRef
|
357 |
-
|
358 |
-
CoreFoundation.CFDataGetLength.argtypes = [CFDataRef]
|
359 |
-
CoreFoundation.CFDataGetLength.restype = CFIndex
|
360 |
-
|
361 |
-
CoreFoundation.CFDataGetBytePtr.argtypes = [CFDataRef]
|
362 |
-
CoreFoundation.CFDataGetBytePtr.restype = c_void_p
|
363 |
-
|
364 |
-
CoreFoundation.CFDictionaryCreate.argtypes = [
|
365 |
-
CFAllocatorRef,
|
366 |
-
POINTER(CFTypeRef),
|
367 |
-
POINTER(CFTypeRef),
|
368 |
-
CFIndex,
|
369 |
-
CFDictionaryKeyCallBacks,
|
370 |
-
CFDictionaryValueCallBacks,
|
371 |
-
]
|
372 |
-
CoreFoundation.CFDictionaryCreate.restype = CFDictionaryRef
|
373 |
-
|
374 |
-
CoreFoundation.CFDictionaryGetValue.argtypes = [CFDictionaryRef, CFTypeRef]
|
375 |
-
CoreFoundation.CFDictionaryGetValue.restype = CFTypeRef
|
376 |
-
|
377 |
-
CoreFoundation.CFArrayCreate.argtypes = [
|
378 |
-
CFAllocatorRef,
|
379 |
-
POINTER(CFTypeRef),
|
380 |
-
CFIndex,
|
381 |
-
CFArrayCallBacks,
|
382 |
-
]
|
383 |
-
CoreFoundation.CFArrayCreate.restype = CFArrayRef
|
384 |
-
|
385 |
-
CoreFoundation.CFArrayCreateMutable.argtypes = [
|
386 |
-
CFAllocatorRef,
|
387 |
-
CFIndex,
|
388 |
-
CFArrayCallBacks,
|
389 |
-
]
|
390 |
-
CoreFoundation.CFArrayCreateMutable.restype = CFMutableArrayRef
|
391 |
-
|
392 |
-
CoreFoundation.CFArrayAppendValue.argtypes = [CFMutableArrayRef, c_void_p]
|
393 |
-
CoreFoundation.CFArrayAppendValue.restype = None
|
394 |
-
|
395 |
-
CoreFoundation.CFArrayGetCount.argtypes = [CFArrayRef]
|
396 |
-
CoreFoundation.CFArrayGetCount.restype = CFIndex
|
397 |
-
|
398 |
-
CoreFoundation.CFArrayGetValueAtIndex.argtypes = [CFArrayRef, CFIndex]
|
399 |
-
CoreFoundation.CFArrayGetValueAtIndex.restype = c_void_p
|
400 |
-
|
401 |
-
CoreFoundation.kCFAllocatorDefault = CFAllocatorRef.in_dll(
|
402 |
-
CoreFoundation, "kCFAllocatorDefault"
|
403 |
-
)
|
404 |
-
CoreFoundation.kCFTypeArrayCallBacks = c_void_p.in_dll(
|
405 |
-
CoreFoundation, "kCFTypeArrayCallBacks"
|
406 |
-
)
|
407 |
-
CoreFoundation.kCFTypeDictionaryKeyCallBacks = c_void_p.in_dll(
|
408 |
-
CoreFoundation, "kCFTypeDictionaryKeyCallBacks"
|
409 |
-
)
|
410 |
-
CoreFoundation.kCFTypeDictionaryValueCallBacks = c_void_p.in_dll(
|
411 |
-
CoreFoundation, "kCFTypeDictionaryValueCallBacks"
|
412 |
-
)
|
413 |
-
|
414 |
-
CoreFoundation.CFTypeRef = CFTypeRef
|
415 |
-
CoreFoundation.CFArrayRef = CFArrayRef
|
416 |
-
CoreFoundation.CFStringRef = CFStringRef
|
417 |
-
CoreFoundation.CFDictionaryRef = CFDictionaryRef
|
418 |
-
|
419 |
-
except (AttributeError):
|
420 |
-
raise ImportError("Error initializing ctypes")
|
421 |
-
|
422 |
-
|
423 |
-
class CFConst(object):
|
424 |
-
"""
|
425 |
-
A class object that acts as essentially a namespace for CoreFoundation
|
426 |
-
constants.
|
427 |
-
"""
|
428 |
-
|
429 |
-
kCFStringEncodingUTF8 = CFStringEncoding(0x08000100)
|
430 |
-
|
431 |
-
|
432 |
-
class SecurityConst(object):
|
433 |
-
"""
|
434 |
-
A class object that acts as essentially a namespace for Security constants.
|
435 |
-
"""
|
436 |
-
|
437 |
-
kSSLSessionOptionBreakOnServerAuth = 0
|
438 |
-
|
439 |
-
kSSLProtocol2 = 1
|
440 |
-
kSSLProtocol3 = 2
|
441 |
-
kTLSProtocol1 = 4
|
442 |
-
kTLSProtocol11 = 7
|
443 |
-
kTLSProtocol12 = 8
|
444 |
-
# SecureTransport does not support TLS 1.3 even if there's a constant for it
|
445 |
-
kTLSProtocol13 = 10
|
446 |
-
kTLSProtocolMaxSupported = 999
|
447 |
-
|
448 |
-
kSSLClientSide = 1
|
449 |
-
kSSLStreamType = 0
|
450 |
-
|
451 |
-
kSecFormatPEMSequence = 10
|
452 |
-
|
453 |
-
kSecTrustResultInvalid = 0
|
454 |
-
kSecTrustResultProceed = 1
|
455 |
-
# This gap is present on purpose: this was kSecTrustResultConfirm, which
|
456 |
-
# is deprecated.
|
457 |
-
kSecTrustResultDeny = 3
|
458 |
-
kSecTrustResultUnspecified = 4
|
459 |
-
kSecTrustResultRecoverableTrustFailure = 5
|
460 |
-
kSecTrustResultFatalTrustFailure = 6
|
461 |
-
kSecTrustResultOtherError = 7
|
462 |
-
|
463 |
-
errSSLProtocol = -9800
|
464 |
-
errSSLWouldBlock = -9803
|
465 |
-
errSSLClosedGraceful = -9805
|
466 |
-
errSSLClosedNoNotify = -9816
|
467 |
-
errSSLClosedAbort = -9806
|
468 |
-
|
469 |
-
errSSLXCertChainInvalid = -9807
|
470 |
-
errSSLCrypto = -9809
|
471 |
-
errSSLInternal = -9810
|
472 |
-
errSSLCertExpired = -9814
|
473 |
-
errSSLCertNotYetValid = -9815
|
474 |
-
errSSLUnknownRootCert = -9812
|
475 |
-
errSSLNoRootCert = -9813
|
476 |
-
errSSLHostNameMismatch = -9843
|
477 |
-
errSSLPeerHandshakeFail = -9824
|
478 |
-
errSSLPeerUserCancelled = -9839
|
479 |
-
errSSLWeakPeerEphemeralDHKey = -9850
|
480 |
-
errSSLServerAuthCompleted = -9841
|
481 |
-
errSSLRecordOverflow = -9847
|
482 |
-
|
483 |
-
errSecVerifyFailed = -67808
|
484 |
-
errSecNoTrustSettings = -25263
|
485 |
-
errSecItemNotFound = -25300
|
486 |
-
errSecInvalidTrustSettings = -25262
|
487 |
-
|
488 |
-
# Cipher suites. We only pick the ones our default cipher string allows.
|
489 |
-
# Source: https://developer.apple.com/documentation/security/1550981-ssl_cipher_suite_values
|
490 |
-
TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 = 0xC02C
|
491 |
-
TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 = 0xC030
|
492 |
-
TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 = 0xC02B
|
493 |
-
TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 = 0xC02F
|
494 |
-
TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 = 0xCCA9
|
495 |
-
TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 = 0xCCA8
|
496 |
-
TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 = 0x009F
|
497 |
-
TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 = 0x009E
|
498 |
-
TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 = 0xC024
|
499 |
-
TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 = 0xC028
|
500 |
-
TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA = 0xC00A
|
501 |
-
TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA = 0xC014
|
502 |
-
TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 = 0x006B
|
503 |
-
TLS_DHE_RSA_WITH_AES_256_CBC_SHA = 0x0039
|
504 |
-
TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 = 0xC023
|
505 |
-
TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 = 0xC027
|
506 |
-
TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA = 0xC009
|
507 |
-
TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA = 0xC013
|
508 |
-
TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 = 0x0067
|
509 |
-
TLS_DHE_RSA_WITH_AES_128_CBC_SHA = 0x0033
|
510 |
-
TLS_RSA_WITH_AES_256_GCM_SHA384 = 0x009D
|
511 |
-
TLS_RSA_WITH_AES_128_GCM_SHA256 = 0x009C
|
512 |
-
TLS_RSA_WITH_AES_256_CBC_SHA256 = 0x003D
|
513 |
-
TLS_RSA_WITH_AES_128_CBC_SHA256 = 0x003C
|
514 |
-
TLS_RSA_WITH_AES_256_CBC_SHA = 0x0035
|
515 |
-
TLS_RSA_WITH_AES_128_CBC_SHA = 0x002F
|
516 |
-
TLS_AES_128_GCM_SHA256 = 0x1301
|
517 |
-
TLS_AES_256_GCM_SHA384 = 0x1302
|
518 |
-
TLS_AES_128_CCM_8_SHA256 = 0x1305
|
519 |
-
TLS_AES_128_CCM_SHA256 = 0x1304
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CAPTY222/runwayml-stable-diffusion-v1-5/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Runwayml Stable Diffusion V1 5
|
3 |
-
emoji: ⚡
|
4 |
-
colorFrom: pink
|
5 |
-
colorTo: indigo
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.23.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/pybind11/tests/test_copy_move.cpp
DELETED
@@ -1,213 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
tests/test_copy_move_policies.cpp -- 'copy' and 'move' return value policies
|
3 |
-
and related tests
|
4 |
-
|
5 |
-
Copyright (c) 2016 Ben North <[email protected]>
|
6 |
-
|
7 |
-
All rights reserved. Use of this source code is governed by a
|
8 |
-
BSD-style license that can be found in the LICENSE file.
|
9 |
-
*/
|
10 |
-
|
11 |
-
#include "pybind11_tests.h"
|
12 |
-
#include "constructor_stats.h"
|
13 |
-
#include <pybind11/stl.h>
|
14 |
-
|
15 |
-
template <typename derived>
|
16 |
-
struct empty {
|
17 |
-
static const derived& get_one() { return instance_; }
|
18 |
-
static derived instance_;
|
19 |
-
};
|
20 |
-
|
21 |
-
struct lacking_copy_ctor : public empty<lacking_copy_ctor> {
|
22 |
-
lacking_copy_ctor() {}
|
23 |
-
lacking_copy_ctor(const lacking_copy_ctor& other) = delete;
|
24 |
-
};
|
25 |
-
|
26 |
-
template <> lacking_copy_ctor empty<lacking_copy_ctor>::instance_ = {};
|
27 |
-
|
28 |
-
struct lacking_move_ctor : public empty<lacking_move_ctor> {
|
29 |
-
lacking_move_ctor() {}
|
30 |
-
lacking_move_ctor(const lacking_move_ctor& other) = delete;
|
31 |
-
lacking_move_ctor(lacking_move_ctor&& other) = delete;
|
32 |
-
};
|
33 |
-
|
34 |
-
template <> lacking_move_ctor empty<lacking_move_ctor>::instance_ = {};
|
35 |
-
|
36 |
-
/* Custom type caster move/copy test classes */
|
37 |
-
class MoveOnlyInt {
|
38 |
-
public:
|
39 |
-
MoveOnlyInt() { print_default_created(this); }
|
40 |
-
MoveOnlyInt(int v) : value{std::move(v)} { print_created(this, value); }
|
41 |
-
MoveOnlyInt(MoveOnlyInt &&m) { print_move_created(this, m.value); std::swap(value, m.value); }
|
42 |
-
MoveOnlyInt &operator=(MoveOnlyInt &&m) { print_move_assigned(this, m.value); std::swap(value, m.value); return *this; }
|
43 |
-
MoveOnlyInt(const MoveOnlyInt &) = delete;
|
44 |
-
MoveOnlyInt &operator=(const MoveOnlyInt &) = delete;
|
45 |
-
~MoveOnlyInt() { print_destroyed(this); }
|
46 |
-
|
47 |
-
int value;
|
48 |
-
};
|
49 |
-
class MoveOrCopyInt {
|
50 |
-
public:
|
51 |
-
MoveOrCopyInt() { print_default_created(this); }
|
52 |
-
MoveOrCopyInt(int v) : value{std::move(v)} { print_created(this, value); }
|
53 |
-
MoveOrCopyInt(MoveOrCopyInt &&m) { print_move_created(this, m.value); std::swap(value, m.value); }
|
54 |
-
MoveOrCopyInt &operator=(MoveOrCopyInt &&m) { print_move_assigned(this, m.value); std::swap(value, m.value); return *this; }
|
55 |
-
MoveOrCopyInt(const MoveOrCopyInt &c) { print_copy_created(this, c.value); value = c.value; }
|
56 |
-
MoveOrCopyInt &operator=(const MoveOrCopyInt &c) { print_copy_assigned(this, c.value); value = c.value; return *this; }
|
57 |
-
~MoveOrCopyInt() { print_destroyed(this); }
|
58 |
-
|
59 |
-
int value;
|
60 |
-
};
|
61 |
-
class CopyOnlyInt {
|
62 |
-
public:
|
63 |
-
CopyOnlyInt() { print_default_created(this); }
|
64 |
-
CopyOnlyInt(int v) : value{std::move(v)} { print_created(this, value); }
|
65 |
-
CopyOnlyInt(const CopyOnlyInt &c) { print_copy_created(this, c.value); value = c.value; }
|
66 |
-
CopyOnlyInt &operator=(const CopyOnlyInt &c) { print_copy_assigned(this, c.value); value = c.value; return *this; }
|
67 |
-
~CopyOnlyInt() { print_destroyed(this); }
|
68 |
-
|
69 |
-
int value;
|
70 |
-
};
|
71 |
-
PYBIND11_NAMESPACE_BEGIN(pybind11)
|
72 |
-
PYBIND11_NAMESPACE_BEGIN(detail)
|
73 |
-
template <> struct type_caster<MoveOnlyInt> {
|
74 |
-
PYBIND11_TYPE_CASTER(MoveOnlyInt, _("MoveOnlyInt"));
|
75 |
-
bool load(handle src, bool) { value = MoveOnlyInt(src.cast<int>()); return true; }
|
76 |
-
static handle cast(const MoveOnlyInt &m, return_value_policy r, handle p) { return pybind11::cast(m.value, r, p); }
|
77 |
-
};
|
78 |
-
|
79 |
-
template <> struct type_caster<MoveOrCopyInt> {
|
80 |
-
PYBIND11_TYPE_CASTER(MoveOrCopyInt, _("MoveOrCopyInt"));
|
81 |
-
bool load(handle src, bool) { value = MoveOrCopyInt(src.cast<int>()); return true; }
|
82 |
-
static handle cast(const MoveOrCopyInt &m, return_value_policy r, handle p) { return pybind11::cast(m.value, r, p); }
|
83 |
-
};
|
84 |
-
|
85 |
-
template <> struct type_caster<CopyOnlyInt> {
|
86 |
-
protected:
|
87 |
-
CopyOnlyInt value;
|
88 |
-
public:
|
89 |
-
static constexpr auto name = _("CopyOnlyInt");
|
90 |
-
bool load(handle src, bool) { value = CopyOnlyInt(src.cast<int>()); return true; }
|
91 |
-
static handle cast(const CopyOnlyInt &m, return_value_policy r, handle p) { return pybind11::cast(m.value, r, p); }
|
92 |
-
static handle cast(const CopyOnlyInt *src, return_value_policy policy, handle parent) {
|
93 |
-
if (!src) return none().release();
|
94 |
-
return cast(*src, policy, parent);
|
95 |
-
}
|
96 |
-
operator CopyOnlyInt*() { return &value; }
|
97 |
-
operator CopyOnlyInt&() { return value; }
|
98 |
-
template <typename T> using cast_op_type = pybind11::detail::cast_op_type<T>;
|
99 |
-
};
|
100 |
-
PYBIND11_NAMESPACE_END(detail)
|
101 |
-
PYBIND11_NAMESPACE_END(pybind11)
|
102 |
-
|
103 |
-
TEST_SUBMODULE(copy_move_policies, m) {
|
104 |
-
// test_lacking_copy_ctor
|
105 |
-
py::class_<lacking_copy_ctor>(m, "lacking_copy_ctor")
|
106 |
-
.def_static("get_one", &lacking_copy_ctor::get_one,
|
107 |
-
py::return_value_policy::copy);
|
108 |
-
// test_lacking_move_ctor
|
109 |
-
py::class_<lacking_move_ctor>(m, "lacking_move_ctor")
|
110 |
-
.def_static("get_one", &lacking_move_ctor::get_one,
|
111 |
-
py::return_value_policy::move);
|
112 |
-
|
113 |
-
// test_move_and_copy_casts
|
114 |
-
m.def("move_and_copy_casts", [](py::object o) {
|
115 |
-
int r = 0;
|
116 |
-
r += py::cast<MoveOrCopyInt>(o).value; /* moves */
|
117 |
-
r += py::cast<MoveOnlyInt>(o).value; /* moves */
|
118 |
-
r += py::cast<CopyOnlyInt>(o).value; /* copies */
|
119 |
-
MoveOrCopyInt m1(py::cast<MoveOrCopyInt>(o)); /* moves */
|
120 |
-
MoveOnlyInt m2(py::cast<MoveOnlyInt>(o)); /* moves */
|
121 |
-
CopyOnlyInt m3(py::cast<CopyOnlyInt>(o)); /* copies */
|
122 |
-
r += m1.value + m2.value + m3.value;
|
123 |
-
|
124 |
-
return r;
|
125 |
-
});
|
126 |
-
|
127 |
-
// test_move_and_copy_loads
|
128 |
-
m.def("move_only", [](MoveOnlyInt m) { return m.value; });
|
129 |
-
m.def("move_or_copy", [](MoveOrCopyInt m) { return m.value; });
|
130 |
-
m.def("copy_only", [](CopyOnlyInt m) { return m.value; });
|
131 |
-
m.def("move_pair", [](std::pair<MoveOnlyInt, MoveOrCopyInt> p) {
|
132 |
-
return p.first.value + p.second.value;
|
133 |
-
});
|
134 |
-
m.def("move_tuple", [](std::tuple<MoveOnlyInt, MoveOrCopyInt, MoveOnlyInt> t) {
|
135 |
-
return std::get<0>(t).value + std::get<1>(t).value + std::get<2>(t).value;
|
136 |
-
});
|
137 |
-
m.def("copy_tuple", [](std::tuple<CopyOnlyInt, CopyOnlyInt> t) {
|
138 |
-
return std::get<0>(t).value + std::get<1>(t).value;
|
139 |
-
});
|
140 |
-
m.def("move_copy_nested", [](std::pair<MoveOnlyInt, std::pair<std::tuple<MoveOrCopyInt, CopyOnlyInt, std::tuple<MoveOnlyInt>>, MoveOrCopyInt>> x) {
|
141 |
-
return x.first.value + std::get<0>(x.second.first).value + std::get<1>(x.second.first).value +
|
142 |
-
std::get<0>(std::get<2>(x.second.first)).value + x.second.second.value;
|
143 |
-
});
|
144 |
-
m.def("move_and_copy_cstats", []() {
|
145 |
-
ConstructorStats::gc();
|
146 |
-
// Reset counts to 0 so that previous tests don't affect later ones:
|
147 |
-
auto &mc = ConstructorStats::get<MoveOrCopyInt>();
|
148 |
-
mc.move_assignments = mc.move_constructions = mc.copy_assignments = mc.copy_constructions = 0;
|
149 |
-
auto &mo = ConstructorStats::get<MoveOnlyInt>();
|
150 |
-
mo.move_assignments = mo.move_constructions = mo.copy_assignments = mo.copy_constructions = 0;
|
151 |
-
auto &co = ConstructorStats::get<CopyOnlyInt>();
|
152 |
-
co.move_assignments = co.move_constructions = co.copy_assignments = co.copy_constructions = 0;
|
153 |
-
py::dict d;
|
154 |
-
d["MoveOrCopyInt"] = py::cast(mc, py::return_value_policy::reference);
|
155 |
-
d["MoveOnlyInt"] = py::cast(mo, py::return_value_policy::reference);
|
156 |
-
d["CopyOnlyInt"] = py::cast(co, py::return_value_policy::reference);
|
157 |
-
return d;
|
158 |
-
});
|
159 |
-
#ifdef PYBIND11_HAS_OPTIONAL
|
160 |
-
// test_move_and_copy_load_optional
|
161 |
-
m.attr("has_optional") = true;
|
162 |
-
m.def("move_optional", [](std::optional<MoveOnlyInt> o) {
|
163 |
-
return o->value;
|
164 |
-
});
|
165 |
-
m.def("move_or_copy_optional", [](std::optional<MoveOrCopyInt> o) {
|
166 |
-
return o->value;
|
167 |
-
});
|
168 |
-
m.def("copy_optional", [](std::optional<CopyOnlyInt> o) {
|
169 |
-
return o->value;
|
170 |
-
});
|
171 |
-
m.def("move_optional_tuple", [](std::optional<std::tuple<MoveOrCopyInt, MoveOnlyInt, CopyOnlyInt>> x) {
|
172 |
-
return std::get<0>(*x).value + std::get<1>(*x).value + std::get<2>(*x).value;
|
173 |
-
});
|
174 |
-
#else
|
175 |
-
m.attr("has_optional") = false;
|
176 |
-
#endif
|
177 |
-
|
178 |
-
// #70 compilation issue if operator new is not public
|
179 |
-
struct PrivateOpNew {
|
180 |
-
int value = 1;
|
181 |
-
private:
|
182 |
-
#if defined(_MSC_VER)
|
183 |
-
# pragma warning(disable: 4822) // warning C4822: local class member function does not have a body
|
184 |
-
#endif
|
185 |
-
void *operator new(size_t bytes);
|
186 |
-
};
|
187 |
-
py::class_<PrivateOpNew>(m, "PrivateOpNew").def_readonly("value", &PrivateOpNew::value);
|
188 |
-
m.def("private_op_new_value", []() { return PrivateOpNew(); });
|
189 |
-
m.def("private_op_new_reference", []() -> const PrivateOpNew & {
|
190 |
-
static PrivateOpNew x{};
|
191 |
-
return x;
|
192 |
-
}, py::return_value_policy::reference);
|
193 |
-
|
194 |
-
// test_move_fallback
|
195 |
-
// #389: rvp::move should fall-through to copy on non-movable objects
|
196 |
-
struct MoveIssue1 {
|
197 |
-
int v;
|
198 |
-
MoveIssue1(int v) : v{v} {}
|
199 |
-
MoveIssue1(const MoveIssue1 &c) = default;
|
200 |
-
MoveIssue1(MoveIssue1 &&) = delete;
|
201 |
-
};
|
202 |
-
py::class_<MoveIssue1>(m, "MoveIssue1").def(py::init<int>()).def_readwrite("value", &MoveIssue1::v);
|
203 |
-
|
204 |
-
struct MoveIssue2 {
|
205 |
-
int v;
|
206 |
-
MoveIssue2(int v) : v{v} {}
|
207 |
-
MoveIssue2(MoveIssue2 &&) = default;
|
208 |
-
};
|
209 |
-
py::class_<MoveIssue2>(m, "MoveIssue2").def(py::init<int>()).def_readwrite("value", &MoveIssue2::v);
|
210 |
-
|
211 |
-
m.def("get_moveissue1", [](int i) { return new MoveIssue1(i); }, py::return_value_policy::move);
|
212 |
-
m.def("get_moveissue2", [](int i) { return MoveIssue2(i); }, py::return_value_policy::move);
|
213 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/detail/config/cpp_dialect.h
DELETED
@@ -1,124 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2020 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
/*! \file cpp_dialect.h
|
18 |
-
* \brief Detect the version of the C++ standard used by the compiler.
|
19 |
-
*/
|
20 |
-
|
21 |
-
#pragma once
|
22 |
-
|
23 |
-
#include <thrust/detail/config/compiler.h>
|
24 |
-
|
25 |
-
// Deprecation warnings may be silenced by defining the following macros. These
|
26 |
-
// may be combined.
|
27 |
-
// - THRUST_IGNORE_DEPRECATED_CPP_DIALECT:
|
28 |
-
// Ignore all deprecated C++ dialects and outdated compilers.
|
29 |
-
// - THRUST_IGNORE_DEPRECATED_CPP_11:
|
30 |
-
// Ignore deprecation warnings when compiling with C++11. C++03 and outdated
|
31 |
-
// compilers will still issue warnings.
|
32 |
-
// - THRUST_IGNORE_DEPRECATED_COMPILER
|
33 |
-
// Ignore deprecation warnings when using deprecated compilers. Compiling
|
34 |
-
// with C++03 and C++11 will still issue warnings.
|
35 |
-
|
36 |
-
// Check for the CUB opt-outs as well:
|
37 |
-
#if !defined(THRUST_IGNORE_DEPRECATED_CPP_DIALECT) && \
|
38 |
-
defined(CUB_IGNORE_DEPRECATED_CPP_DIALECT)
|
39 |
-
# define THRUST_IGNORE_DEPRECATED_CPP_DIALECT
|
40 |
-
#endif
|
41 |
-
#if !defined(THRUST_IGNORE_DEPRECATED_CPP_11) && \
|
42 |
-
defined(CUB_IGNORE_DEPRECATED_CPP_11)
|
43 |
-
# define THRUST_IGNORE_DEPRECATED_CPP_11
|
44 |
-
#endif
|
45 |
-
#if !defined(THRUST_IGNORE_DEPRECATED_COMPILER) && \
|
46 |
-
defined(CUB_IGNORE_DEPRECATED_COMPILER)
|
47 |
-
# define THRUST_IGNORE_DEPRECATED_COMPILER
|
48 |
-
#endif
|
49 |
-
|
50 |
-
#ifdef THRUST_IGNORE_DEPRECATED_CPP_DIALECT
|
51 |
-
# define THRUST_IGNORE_DEPRECATED_CPP_11
|
52 |
-
# define THRUST_IGNORE_DEPRECATED_COMPILER
|
53 |
-
#endif
|
54 |
-
|
55 |
-
// Define this to override the built-in detection.
|
56 |
-
#ifndef THRUST_CPP_DIALECT
|
57 |
-
|
58 |
-
// MSVC does not define __cplusplus correctly. _MSVC_LANG is used instead.
|
59 |
-
// This macro is only defined in MSVC 2015U3+.
|
60 |
-
# ifdef _MSVC_LANG // Do not replace with THRUST_HOST_COMPILER test (see above)
|
61 |
-
// MSVC2015 reports C++14 but lacks extended constexpr support. Treat as C++11.
|
62 |
-
# if THRUST_MSVC_VERSION < 1910 && _MSVC_LANG > 201103L /* MSVC < 2017 && CPP > 2011 */
|
63 |
-
# define THRUST_CPLUSPLUS 201103L /* Fix to 2011 */
|
64 |
-
# else
|
65 |
-
# define THRUST_CPLUSPLUS _MSVC_LANG /* We'll trust this for now. */
|
66 |
-
# endif // MSVC 2015 C++14 fix
|
67 |
-
# else
|
68 |
-
# define THRUST_CPLUSPLUS __cplusplus
|
69 |
-
# endif
|
70 |
-
|
71 |
-
// Detect current dialect:
|
72 |
-
# if THRUST_CPLUSPLUS < 201103L
|
73 |
-
# define THRUST_CPP_DIALECT 2003
|
74 |
-
# elif THRUST_CPLUSPLUS < 201402L
|
75 |
-
# define THRUST_CPP_DIALECT 2011
|
76 |
-
# elif THRUST_CPLUSPLUS < 201703L
|
77 |
-
# define THRUST_CPP_DIALECT 2014
|
78 |
-
# elif THRUST_CPLUSPLUS == 201703L
|
79 |
-
# define THRUST_CPP_DIALECT 2017
|
80 |
-
# elif THRUST_CPLUSPLUS > 201703L // unknown, but is higher than 2017.
|
81 |
-
# define THRUST_CPP_DIALECT 2020
|
82 |
-
# endif
|
83 |
-
|
84 |
-
# undef THRUST_CPLUSPLUS // cleanup
|
85 |
-
|
86 |
-
#endif // !THRUST_CPP_DIALECT
|
87 |
-
|
88 |
-
// Define THRUST_COMPILER_DEPRECATION macro:
|
89 |
-
#if THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_MSVC
|
90 |
-
# define THRUST_COMP_DEPR_IMPL(msg) \
|
91 |
-
__pragma(message(__FILE__ ":" THRUST_COMP_DEPR_IMPL0(__LINE__) ": warning: " #msg))
|
92 |
-
# define THRUST_COMP_DEPR_IMPL0(x) THRUST_COMP_DEPR_IMPL1(x)
|
93 |
-
# define THRUST_COMP_DEPR_IMPL1(x) #x
|
94 |
-
#else // clang / gcc:
|
95 |
-
# define THRUST_COMP_DEPR_IMPL(msg) THRUST_COMP_DEPR_IMPL0(GCC warning #msg)
|
96 |
-
# define THRUST_COMP_DEPR_IMPL0(expr) _Pragma(#expr)
|
97 |
-
# define THRUST_COMP_DEPR_IMPL1 /* intentionally blank */
|
98 |
-
#endif
|
99 |
-
|
100 |
-
#define THRUST_COMPILER_DEPRECATION(REQ, FIX) \
|
101 |
-
THRUST_COMP_DEPR_IMPL(Thrust requires REQ. Please FIX. Define THRUST_IGNORE_DEPRECATED_CPP_DIALECT to suppress this message.)
|
102 |
-
|
103 |
-
// Minimum required compiler checks:
|
104 |
-
#ifndef THRUST_IGNORE_DEPRECATED_COMPILER
|
105 |
-
# if THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_GCC && THRUST_GCC_VERSION < 50000
|
106 |
-
THRUST_COMPILER_DEPRECATION(GCC 5.0, upgrade your compiler);
|
107 |
-
# endif
|
108 |
-
# if THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_CLANG && THRUST_CLANG_VERSION < 60000
|
109 |
-
THRUST_COMPILER_DEPRECATION(Clang 6.0, upgrade your compiler);
|
110 |
-
# endif
|
111 |
-
# if THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_MSVC && THRUST_MSVC_VERSION < 1910
|
112 |
-
THRUST_COMPILER_DEPRECATION(MSVC 2017, upgrade your compiler);
|
113 |
-
# endif
|
114 |
-
#endif
|
115 |
-
|
116 |
-
#if !defined(THRUST_IGNORE_DEPRECATED_CPP_DIALECT) && THRUST_CPP_DIALECT < 2014 && \
|
117 |
-
(THRUST_CPP_DIALECT != 2011 || !defined(THRUST_IGNORE_DEPRECATED_CPP_11))
|
118 |
-
THRUST_COMPILER_DEPRECATION(C++14, pass -std=c++14 to your compiler);
|
119 |
-
#endif
|
120 |
-
|
121 |
-
#undef THRUST_COMPILER_DEPRECATION
|
122 |
-
#undef THRUST_COMP_DEPR_IMPL
|
123 |
-
#undef THRUST_COMP_DEPR_IMPL0
|
124 |
-
#undef THRUST_COMP_DEPR_IMPL1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/detail/seq.h
DELETED
@@ -1,53 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2018 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
#include <thrust/detail/allocator_aware_execution_policy.h>
|
21 |
-
#include <thrust/system/detail/sequential/execution_policy.h>
|
22 |
-
|
23 |
-
namespace thrust
|
24 |
-
{
|
25 |
-
namespace detail
|
26 |
-
{
|
27 |
-
|
28 |
-
|
29 |
-
struct seq_t : thrust::system::detail::sequential::execution_policy<seq_t>,
|
30 |
-
thrust::detail::allocator_aware_execution_policy<
|
31 |
-
thrust::system::detail::sequential::execution_policy>
|
32 |
-
{
|
33 |
-
__host__ __device__
|
34 |
-
THRUST_CONSTEXPR seq_t() : thrust::system::detail::sequential::execution_policy<seq_t>() {}
|
35 |
-
|
36 |
-
// allow any execution_policy to convert to seq_t
|
37 |
-
template<typename DerivedPolicy>
|
38 |
-
__host__ __device__
|
39 |
-
seq_t(const thrust::execution_policy<DerivedPolicy> &)
|
40 |
-
: thrust::system::detail::sequential::execution_policy<seq_t>()
|
41 |
-
{}
|
42 |
-
};
|
43 |
-
|
44 |
-
|
45 |
-
} // end detail
|
46 |
-
|
47 |
-
|
48 |
-
THRUST_INLINE_CONSTANT detail::seq_t seq;
|
49 |
-
|
50 |
-
|
51 |
-
} // end thrust
|
52 |
-
|
53 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/device_allocator.h
DELETED
@@ -1,146 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2018 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
|
18 |
-
/*! \file device_allocator.h
|
19 |
-
* \brief An allocator which creates new elements in device memory
|
20 |
-
*/
|
21 |
-
|
22 |
-
#pragma once
|
23 |
-
|
24 |
-
#include <thrust/detail/config.h>
|
25 |
-
#include <thrust/device_ptr.h>
|
26 |
-
#include <thrust/mr/allocator.h>
|
27 |
-
#include <thrust/memory/detail/device_system_resource.h>
|
28 |
-
|
29 |
-
#include <limits>
|
30 |
-
#include <stdexcept>
|
31 |
-
|
32 |
-
namespace thrust
|
33 |
-
{
|
34 |
-
|
35 |
-
/** \addtogroup memory_resources Memory Resources
|
36 |
-
* \ingroup memory_management_classes
|
37 |
-
* \{
|
38 |
-
*/
|
39 |
-
|
40 |
-
/*! Memory resource adaptor that turns any memory resource that returns a fancy
|
41 |
-
* with the same tag as \p device_ptr, and adapts it to a resource that returns
|
42 |
-
* a \p device_ptr.
|
43 |
-
*/
|
44 |
-
template<typename Upstream>
|
45 |
-
class device_ptr_memory_resource THRUST_FINAL
|
46 |
-
: public thrust::mr::memory_resource<
|
47 |
-
device_ptr<void>
|
48 |
-
>
|
49 |
-
{
|
50 |
-
typedef typename Upstream::pointer upstream_ptr;
|
51 |
-
|
52 |
-
public:
|
53 |
-
/*! Initialize the adaptor with the global instance of the upstream resource. Obtains
|
54 |
-
* the global instance by calling \p get_global_resource.
|
55 |
-
*/
|
56 |
-
__host__
|
57 |
-
device_ptr_memory_resource() : m_upstream(mr::get_global_resource<Upstream>())
|
58 |
-
{
|
59 |
-
}
|
60 |
-
|
61 |
-
/*! Initialize the adaptor with an upstream resource.
|
62 |
-
*
|
63 |
-
* \param upstream the upstream memory resource to adapt.
|
64 |
-
*/
|
65 |
-
__host__
|
66 |
-
device_ptr_memory_resource(Upstream * upstream) : m_upstream(upstream)
|
67 |
-
{
|
68 |
-
}
|
69 |
-
|
70 |
-
THRUST_NODISCARD __host__
|
71 |
-
virtual pointer do_allocate(std::size_t bytes, std::size_t alignment = THRUST_MR_DEFAULT_ALIGNMENT) THRUST_OVERRIDE
|
72 |
-
{
|
73 |
-
return pointer(m_upstream->do_allocate(bytes, alignment).get());
|
74 |
-
}
|
75 |
-
|
76 |
-
__host__
|
77 |
-
virtual void do_deallocate(pointer p, std::size_t bytes, std::size_t alignment) THRUST_OVERRIDE
|
78 |
-
{
|
79 |
-
m_upstream->do_deallocate(upstream_ptr(p.get()), bytes, alignment);
|
80 |
-
}
|
81 |
-
|
82 |
-
private:
|
83 |
-
Upstream * m_upstream;
|
84 |
-
};
|
85 |
-
|
86 |
-
/*! \}
|
87 |
-
*/
|
88 |
-
|
89 |
-
/*! \addtogroup memory_management Memory Management
|
90 |
-
* \addtogroup memory_management_classes Memory Management Classes
|
91 |
-
* \ingroup memory_management
|
92 |
-
* \{
|
93 |
-
*/
|
94 |
-
template<typename T>
|
95 |
-
class device_allocator
|
96 |
-
: public thrust::mr::stateless_resource_allocator<
|
97 |
-
T,
|
98 |
-
device_ptr_memory_resource<device_memory_resource>
|
99 |
-
>
|
100 |
-
{
|
101 |
-
typedef thrust::mr::stateless_resource_allocator<
|
102 |
-
T,
|
103 |
-
device_ptr_memory_resource<device_memory_resource>
|
104 |
-
> base;
|
105 |
-
|
106 |
-
public:
|
107 |
-
/*! The \p rebind metafunction provides the type of a \p device_allocator
|
108 |
-
* instantiated with another type.
|
109 |
-
*
|
110 |
-
* \tparam U the other type to use for instantiation.
|
111 |
-
*/
|
112 |
-
template<typename U>
|
113 |
-
struct rebind
|
114 |
-
{
|
115 |
-
/*! The typedef \p other gives the type of the rebound \p device_allocator.
|
116 |
-
*/
|
117 |
-
typedef device_allocator<U> other;
|
118 |
-
};
|
119 |
-
|
120 |
-
/*! Default constructor has no effect. */
|
121 |
-
__host__
|
122 |
-
device_allocator() {}
|
123 |
-
|
124 |
-
/*! Copy constructor has no effect. */
|
125 |
-
__host__
|
126 |
-
device_allocator(const device_allocator& other) : base(other) {}
|
127 |
-
|
128 |
-
/*! Constructor from other \p device_allocator has no effect. */
|
129 |
-
template<typename U>
|
130 |
-
__host__
|
131 |
-
device_allocator(const device_allocator<U>& other) : base(other) {}
|
132 |
-
|
133 |
-
#if THRUST_CPP_DIALECT >= 2011
|
134 |
-
device_allocator & operator=(const device_allocator &) = default;
|
135 |
-
#endif
|
136 |
-
|
137 |
-
/*! Destructor has no effect. */
|
138 |
-
__host__
|
139 |
-
~device_allocator() {}
|
140 |
-
};
|
141 |
-
|
142 |
-
/*! \}
|
143 |
-
*/
|
144 |
-
|
145 |
-
} // end thrust
|
146 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|