Commit
·
760e1a0
1
Parent(s):
ee70fa2
Update parquet files (step 75 of 397)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/101-5/gpt4free/testing/interference_test.py +0 -15
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/EssentialPIM Free 8.6 Crack Full Version Serial Keys [2021].md +0 -143
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/APK YouTube Ukuran Kecil Aplikasi Streaming dan Download Video Hemat Data.md +0 -132
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Among Us vs Zombies APK A Fun and Challenging Game for Everyone.md +0 -110
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Baixe agora o livro A tica protestante e o esprito do capitalismo a anlise de Max Weber sobre a relao entre religio e economia.md +0 -104
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download GTA 5 Grand Theft Auto APK for Android and Explore the Open World of Los Santos on PC and Mac.md +0 -137
- spaces/1toTree/lora_test/ppdiffusers/schedulers/scheduling_ipndm.py +0 -163
- spaces/A00001/bingothoo/src/lib/isomorphic/index.ts +0 -17
- spaces/AILab-CVC/SEED-LLaMA/models/model_tools.py +0 -18
- spaces/ANDRYHA/FakeNewsClassifier/README.md +0 -13
- spaces/Aaajdhdhdhahdbbaabs/Hshdhdhd/Dockerfile +0 -21
- spaces/AchyuthGamer/Free-Accounts-Generator/fortnite/css/style.css +0 -80
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/webfontloader-plugin.js +0 -15
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridsizer/GridSizer.d.ts +0 -145
- spaces/Alpaca233/SadTalker/README.md +0 -15
- spaces/Ameaou/academic-chatgpt3.1/crazy_functions/Latex全文翻译.py +0 -175
- spaces/Amrrs/DragGan-Inversion/stylegan_human/pti/pti_models/e4e/encoders/model_irse.py +0 -91
- spaces/Amrrs/gradio-sentiment-analyzer/README.md +0 -37
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/resnet_flax.py +0 -124
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/shap_e/__init__.py +0 -27
- spaces/Andy1621/uniformer_image_detection/configs/cascade_rcnn/README.md +0 -55
- spaces/Andy1621/uniformer_image_detection/configs/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_20e_coco.py +0 -14
- spaces/Andy1621/uniformer_image_detection/configs/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_2x_coco.py +0 -4
- spaces/Andy1621/uniformer_image_detection/configs/pascal_voc/ssd512_voc0712.py +0 -53
- spaces/Andy1621/uniformer_image_detection/configs/retinanet/retinanet_r50_caffe_fpn_mstrain_3x_coco.py +0 -4
- spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/mask_heads/mask_point_head.py +0 -300
- spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r101b-d8_769x769_80k_cityscapes.py +0 -4
- spaces/ArkanDash/rvc-models-new/README.md +0 -13
- spaces/AvaterClasher/Food_Classifier_Moni/app.py +0 -77
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/transforms/augmentation.py +0 -377
- spaces/BartPoint/VoiceChange/infer_pack/modules/F0Predictor/DioF0Predictor.py +0 -90
- spaces/Benson/text-generation/Examples/ Imo Apk.md +0 -48
- spaces/Benson/text-generation/Examples/Call Of Duty Pc Descargar Black Ops 4.md +0 -81
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/_log_render.py +0 -94
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/logging.py +0 -289
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/utils/logger.py +0 -221
- spaces/CVPR/LIVE/pybind11/tests/test_docstring_options.cpp +0 -61
- spaces/CVPR/LIVE/thrust/dependencies/cub/test/test_util.h +0 -1648
- spaces/CVPR/WALT/cwalt/Clip_WALT_Generate.py +0 -284
- spaces/CVPR/WALT/walt/datasets/pipelines/loading.py +0 -465
- spaces/CVPR/lama-example/saicinpainting/training/modules/multidilated_conv.py +0 -98
- spaces/CVPR/regionclip-demo/detectron2/modeling/roi_heads/cascade_rcnn.py +0 -298
- spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/__init__.py +0 -0
- spaces/CikeyQI/Yunzai/Yunzai/lib/plugins/runtime.js +0 -245
- spaces/CjangCjengh/Sanskrit-TTS/text/cleaners.py +0 -5
- spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/modeling/backbone/fbnet_builder.py +0 -829
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/aiohttp/client_reqrep.py +0 -1134
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_c_v_t.py +0 -47
- spaces/Dinoking/Guccio-AI-Designer/models/stylegan/stylegan_tf/dnnlib/submission/__init__.py +0 -9
- spaces/Dinoking/Guccio-AI-Designer/netdissect/upsegmodel/prroi_pool/test_prroi_pooling2d.py +0 -56
spaces/101-5/gpt4free/testing/interference_test.py
DELETED
@@ -1,15 +0,0 @@
|
|
1 |
-
import openai
|
2 |
-
|
3 |
-
openai.api_key = ''
|
4 |
-
openai.api_base = 'http://localhost:1337'
|
5 |
-
|
6 |
-
chat_completion = openai.ChatCompletion.create(stream=True,
|
7 |
-
model='gpt-3.5-turbo', messages=[{'role': 'user', 'content': 'write a poem about a tree'}])
|
8 |
-
|
9 |
-
#print(chat_completion.choices[0].message.content)
|
10 |
-
|
11 |
-
for token in chat_completion:
|
12 |
-
|
13 |
-
content = token['choices'][0]['delta'].get('content')
|
14 |
-
if content != None:
|
15 |
-
print(content)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/EssentialPIM Free 8.6 Crack Full Version Serial Keys [2021].md
DELETED
@@ -1,143 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>EssentialPIM Free 8.6 Crack Full Version Serial Keys</h1>
|
3 |
-
<p>Are you looking for a way to manage your personal information more efficiently and conveniently? Do you want to use a powerful and versatile software that can help you organize your tasks, notes, contacts, calendar, email, and more? If yes, then you might want to check out EssentialPIM, a personal information manager that has been trusted by millions of users worldwide. But what if you don't want to pay for the Pro or Business versions of the software? Is there a way to get all the features and benefits of EssentialPIM for free? In this article, we will tell you everything you need to know about EssentialPIM Free 8.6 Crack Full Version Serial Keys, including what it is, how to get it, what are its advantages and disadvantages, and more.</p>
|
4 |
-
<h2>What is EssentialPIM and why do you need it?</h2>
|
5 |
-
<h3>EssentialPIM is a personal information manager that helps you organize your life</h3>
|
6 |
-
<p>EssentialPIM is a software that allows you to store, manage, and access all your personal information in one place. You can use it to create and edit tasks, notes, contacts, calendar events, email messages, passwords, and more. You can also link different items together, such as attaching files or notes to tasks or contacts, or creating reminders for events or emails. You can also customize the appearance and behavior of the software according to your preferences and needs.</p>
|
7 |
-
<h2>EssentialPIM Free 8.6 Crack Full Version Serial Keys</h2><br /><p><b><b>Download</b> ===== <a href="https://byltly.com/2uKwf9">https://byltly.com/2uKwf9</a></b></p><br /><br />
|
8 |
-
<h3>EssentialPIM has many features to manage your tasks, notes, contacts, calendar, email, and more</h3>
|
9 |
-
<p>EssentialPIM has a user-friendly interface that lets you easily switch between different modules and views. You can also use keyboard shortcuts or drag-and-drop operations to perform various actions. Some of the features that EssentialPIM offers are:</p>
|
10 |
-
<ul>
|
11 |
-
<li><b>Tasks:</b> You can create tasks with different attributes such as priority, status, category, start date, due date, completion percentage, etc. You can also assign tasks to other people or groups, add subtasks or dependencies, track time spent on tasks, etc.</li>
|
12 |
-
<li><b>Notes:</b> You can create notes with rich text formatting, images, tables, hyperlinks, etc. You can also organize notes into hierarchical trees or tabs, add tags or keywords, search for notes by content or properties, etc.</li>
|
13 |
-
<li><b>Contacts:</b> You can create contacts with detailed information such as name, address, phone number, email address, birthday, photo, etc. You can also group contacts into categories or folders, add custom fields or comments, send emails or SMS messages to contacts directly from the software,</li>
|
14 |
-
<li><b>Calendar:</b> You can create calendar events with different attributes such as subject, location, description, start time, end time, recurrence pattern, reminder, category, etc. You can also view your calendar in different modes such as day, week, month, year, agenda, etc. You can also sync your calendar with Google Calendar, Outlook, or other online services.</li>
|
15 |
-
<li><b>Email:</b> You can send and receive email messages using POP3 or IMAP protocols. You can also manage multiple email accounts, create rules or filters, use templates or signatures, attach files or items, etc.</li>
|
16 |
-
<li><b>Passwords:</b> You can store and manage your passwords for various websites or applications. You can also generate strong passwords, encrypt your data with AES-256 algorithm, use a master password or a key file for protection, etc.</li>
|
17 |
-
</ul>
|
18 |
-
<h3>EssentialPIM can sync with various cloud services and devices</h3>
|
19 |
-
<p>EssentialPIM can sync your data with various cloud services such as Google Drive, Dropbox, iCloud, OneDrive, etc. You can also sync your data with other devices such as Android phones or tablets, iPhones or iPads, Windows phones or tablets, etc. You can also export or import your data in various formats such as CSV, HTML, ICS, VCF, EML, TXT, etc.</p>
|
20 |
-
<h2>How to get EssentialPIM Free 8.6 Crack Full Version Serial Keys?</h2>
|
21 |
-
<h3>EssentialPIM Free 8.6 Crack is a modified version of the software that bypasses the license verification</h3>
|
22 |
-
<p>EssentialPIM Free 8.6 Crack is a version of the software that has been modified by some hackers or crackers to bypass the license verification process. This means that you can use the software without entering a valid serial key or activating it online. This way,you can access all the features and benefits of the Pro and Business versions of the software without paying any fees or subscriptions.</p>
|
23 |
-
<h3>EssentialPIM Free 8.6 Crack Full Version Serial Keys can be downloaded from various websites</h3>
|
24 |
-
<p>EssentialPIM Free 8.6 Crack Full Version Serial Keys can be downloaded from various websites that offer cracked software or serial keys. Some of these websites are:</p>
|
25 |
-
<table>
|
26 |
-
<tr>
|
27 |
-
<th>Name</th>
|
28 |
-
<th>URL</th>
|
29 |
-
</tr>
|
30 |
-
<tr>
|
31 |
-
<td>All tips tunes</td>
|
32 |
-
<td></td>
|
33 |
-
</tr>
|
34 |
-
<tr>
|
35 |
-
<td>BEST PDF</td>
|
36 |
-
<td></td>
|
37 |
-
</tr>
|
38 |
-
<tr>
|
39 |
-
<td>HOT PDF</td>
|
40 |
-
<td></td>
|
41 |
-
</tr>
|
42 |
-
</table>
|
43 |
-
<p>You can also search for other websites using keywords such as "EssentialPIM Free 8.6 Crack", "EssentialPIM Free 8.6 Keygen", "EssentialPIM Free 8.6 License Key", etc.</p>
|
44 |
-
<h3>EssentialPIM Free 8.6 Crack Full Version Serial Keys can be installed and activated with a few steps</h3>
|
45 |
-
<p>To install and activate EssentialPIM Free 8.6 Crack Full Version Serial Keys,you need to follow these steps:</p>
|
46 |
-
<ol>
|
47 |
-
<li>Download the crack file from one of the websites mentioned above.</li>
|
48 |
-
<li>Extract the file using a program such as WinRAR or WinZip.</li>
|
49 |
-
<li>Run the setup file and follow the instructions to install the software.</li>
|
50 |
-
<li>Copy the crack file from the extracted folder and paste it into the installation directory of the software.</li>
|
51 |
-
<li>Run the software and enter any serial key from the crack file when prompted.</li>
|
52 |
-
<li>Enjoy using EssentialPIM Pro Business for free!</li>
|
53 |
-
</ol>
|
54 |
-
<h2>What are the benefits of using EssentialPIM Free 8.6 Crack Full Version Serial Keys?</h2>
|
55 |
-
<h3>EssentialPIM Free 8.6 Crack Full Version Serial Keys gives you access to all the features of the Pro and Business versions</h3>
|
56 |
-
<p>The Pro version of EssentialPIM has some additional features that are not available in the Free version,such as:</p>
|
57 |
-
<p>EssentialPIM Free 8.6 Crack Download with License Key<br />
|
58 |
-
How to Activate EssentialPIM Free 8.6 Full Version for Free<br />
|
59 |
-
EssentialPIM Free 8.6 Serial Key Generator Online<br />
|
60 |
-
EssentialPIM Free 8.6 Crack + Keygen Full Setup<br />
|
61 |
-
EssentialPIM Free 8.6 Crack Patch with Activation Code<br />
|
62 |
-
EssentialPIM Free 8.6 Full Version Crack Free Download<br />
|
63 |
-
EssentialPIM Free 8.6 License Key Crack Latest Version<br />
|
64 |
-
EssentialPIM Free 8.6 Crack + Serial Number Working<br />
|
65 |
-
EssentialPIM Free 8.6 Full Crack with Registration Key<br />
|
66 |
-
EssentialPIM Free 8.6 Crack + Torrent Download Link<br />
|
67 |
-
EssentialPIM Free 8.6 Serial Key Crack No Survey<br />
|
68 |
-
EssentialPIM Free 8.6 Full Version with Crack and Key<br />
|
69 |
-
EssentialPIM Free 8.6 Crack + Product Key Updated<br />
|
70 |
-
EssentialPIM Free 8.6 Crack + Portable Version Download<br />
|
71 |
-
EssentialPIM Free 8.6 Serial Key Full Crack Lifetime<br />
|
72 |
-
EssentialPIM Free 8.6 Crack + Serial Key Free Download<br />
|
73 |
-
EssentialPIM Free 8.6 Full Version Crack with Keygen<br />
|
74 |
-
EssentialPIM Free 8.6 Serial Key Crack Latest Download<br />
|
75 |
-
EssentialPIM Free 8.6 Crack + Activation Key Full Version<br />
|
76 |
-
EssentialPIM Free 8.6 Full Crack with Serial Number<br />
|
77 |
-
EssentialPIM Free 8.6 Serial Key + Crack Download Link<br />
|
78 |
-
EssentialPIM Free 8.6 Full Version with Crack and Serial Key<br />
|
79 |
-
EssentialPIM Free 8.6 Crack + Registration Code Working<br />
|
80 |
-
EssentialPIM Free 8.6 Full Crack + License Key Download<br />
|
81 |
-
EssentialPIM Free 8.6 Serial Key Full Version Crack Download<br />
|
82 |
-
EssentialPIM Free 8.6 Crack + Serial Keygen Full Version<br />
|
83 |
-
EssentialPIM Free 8.6 Full Version Crack with Activation Code<br />
|
84 |
-
EssentialPIM Free 8.6 Serial Key + Patch Download Link<br />
|
85 |
-
EssentialPIM Free 8.6 Full Version with Crack and Activation Key<br />
|
86 |
-
EssentialPIM Free 8.6 Crack + License Code Updated<br />
|
87 |
-
EssentialPIM Free 8.6 Full Crack + Serial Keygen Download<br />
|
88 |
-
EssentialPIM Free 8.6 Serial Key Full Version with Crack<br />
|
89 |
-
EssentialPIM Free 8.6 Crack + Registration Key Working<br />
|
90 |
-
EssentialPIM Free 8.6 Full Version with Crack and License Key<br />
|
91 |
-
EssentialPIM Free 8.6 Serial Key + Keygen Download Link<br />
|
92 |
-
EssentialPIM Free 8.6 Full Version with Crack and Registration Key<br />
|
93 |
-
EssentialPIM Free 8.6 Crack + Activation Code Updated<br />
|
94 |
-
EssentialPIM Free 8.6 Full Crack + Registration Code Download<br />
|
95 |
-
EssentialPIM Free 8.6 Serial Key Full Version with Activation Code<br />
|
96 |
-
EssentialPIM Free 8.6 Crack + License Key Working<br />
|
97 |
-
EssentialPIM Free 8.6 Full Version with Crack and Activation Code<br />
|
98 |
-
EssentialPIM Free 8.6 Serial Key + Activation Code Download Link<br />
|
99 |
-
EssentialPIM Free 8.6 Full Version with Crack and License Code<br />
|
100 |
-
EssentialPIM Free 8.6 Crack + Registration Code Updated<br />
|
101 |
-
EssentialPIM Free 8.6 Full Crack + Activation Code Download</p>
|
102 |
-
<ul>
|
103 |
-
<ul>
|
104 |
-
<li><b>Advanced search and filtering:</b> You can use various criteria and operators to find any item in any module quickly and easily.</li>
|
105 |
-
<li><b>Templates:</b> You can create and use templates for tasks, notes, contacts, email messages, etc. to save time and ensure consistency.</li>
|
106 |
-
<li><b>Sticky notes:</b> You can create sticky notes on your desktop to remind you of important things or to jot down ideas.</li>
|
107 |
-
<li><b>Global cross-linking:</b> You can link any item to any other item in any module, such as linking a task to a contact or a note to an email.</li>
|
108 |
-
<li><b>Tags:</b> You can assign colorful tags to any item in any module, and use them to filter, sort, or group your data.</li>
|
109 |
-
</ul>
|
110 |
-
<p>The Business version of EssentialPIM has some additional features that are not available in the Pro version, such as:</p>
|
111 |
-
<ul>
|
112 |
-
<li><b>Multi-user access to database:</b> You can share your database with other users over a network, and control their access rights and permissions.</li>
|
113 |
-
<li><b>Data synchronization with EPIM Cloud:</b> You can sync your data with EPIM Cloud, a secure online service that stores your data on encrypted servers.</li>
|
114 |
-
</ul>
|
115 |
-
<h3>EssentialPIM Free 8.6 Crack Full Version Serial Keys allows you to use the software without paying any fees or subscriptions</h3>
|
116 |
-
<p>EssentialPIM Free 8.6 Crack Full Version Serial Keys allows you to use the software without paying any fees or subscriptions. This means that you can save money and enjoy the software for as long as you want. You don't have to worry about renewing your license or updating your payment information. You can also use the software on multiple computers or devices without any limitations.</p>
|
117 |
-
<h3>EssentialPIM Free 8.6 Crack Full Version Serial Keys lets you enjoy the latest updates and improvements of the software</h3>
|
118 |
-
<p>EssentialPIM Free 8.6 Crack Full Version Serial Keys lets you enjoy the latest updates and improvements of the software. This means that you can always have the most recent version of the software with all the bug fixes and new features. You don't have to wait for the official release or download the updates manually. You can also benefit from the feedback and suggestions of other users who use the cracked version of the software.</p>
|
119 |
-
<h2>What are the risks of using EssentialPIM Free 8.6 Crack Full Version Serial Keys?</h2>
|
120 |
-
<h3>EssentialPIM Free 8.6 Crack Full Version Serial Keys may contain malware or viruses that can harm your computer or data</h3>
|
121 |
-
<p>EssentialPIM Free 8.6 Crack Full Version Serial Keys may contain malware or viruses that can harm your computer or data. This means that you may expose your system to security threats and compromise your privacy. The crack file may contain malicious code that can infect your computer with spyware, ransomware, trojans, worms, etc. The crack file may also modify or delete your files, folders, registry entries, etc. The crack file may also steal your personal information such as passwords, credit card numbers, bank accounts, etc.</p>
|
122 |
-
<h3>EssentialPIM Free 8.6 Crack Full Version Serial Keys may violate the terms and conditions of the software and expose you to legal issues</h3>
|
123 |
-
<p>EssentialPIM Free 8.6 Crack Full Version Serial Keys may violate the terms and conditions of the software and expose you to legal issues. This means that you may break the law and face legal consequences. The crack file may infringe the intellectual property rights of the software developer and owner. The crack file may also breach the license agreement that you accepted when you installed the software. The crack file may also damage the reputation and revenue of the software developer and owner.</p>
|
124 |
-
<h3>EssentialPIM Free 8.6 Crack Full Version Serial Keys may not work properly or cause errors and crashes</h3>
|
125 |
-
<p>EssentialPIM Free 8.6 Crack Full Version Serial Keys may not work properly or cause errors and crashes. This means that you may experience poor performance and reliability issues with the software. The crack file may not be compatible with your system or with other programs that you use. The crack file may also interfere with the normal functioning of the software or cause conflicts with its features. The crack file may also prevent you from receiving technical support or customer service from the software developer and owner.</p>
|
126 |
-
<h2>Conclusion</h2>
|
127 |
-
<p>In conclusion, EssentialPIM Free 8.6 Crack Full Version Serial Keys is a way to get all the features and benefits of EssentialPIM for free, but it also comes with many risks and disadvantages. While it may seem tempting to use a cracked version of the software, it is not worth risking your computer, data, privacy, legality, or satisfaction. Instead, we recommend that you use the official version of EssentialPIM that suits your needs and budget. You can download EssentialPIM from its official website [here](https://www.essentialpim.com/). You can also try EssentialPIM Pro or Business for free for 30 days before deciding whether to buy it or not.</p>
|
128 |
-
<h2>FAQs</h2>
|
129 |
-
<ul>
|
130 |
-
<li><b>Q: Is EssentialPIM safe to use?</b></li>
|
131 |
-
<li>A: EssentialPIM is safe to use if you download it from its official website [here](https://www.essentialpim.com/). However, if you download a cracked version of EssentialPIM from an untrusted source, you may expose yourself to malware or viruses that can harm your computer or data.</li>
|
132 |
-
<li><b>Q: How much does EssentialPIM cost?</b></li>
|
133 |
-
<li>A: EssentialPIM has three versions: Free, Pro, and Business. The Free version is completely free for non-commercial use only. The Pro version costs $39.95 for a lifetime license per user (or $24.95 for a one-year license per user). The Business version costs $59.95 for a lifetime license per user (or $34.95 for a one-year license per user).</li>
|
134 |
-
<li><b>Q: What are some alternatives to EssentialPIM?</b></li>
|
135 |
-
<li>A: Some alternatives to EssentialPIM are Outlook, Thunderbird, Evernote, OneNote, Google Workspace, and Microsoft 365.</li>
|
136 |
-
<li><b>Q: How can I contact EssentialPIM support?</b></li>
|
137 |
-
<li>A: You can contact EssentialPIM support by filling out this form [here](https://www.essentialpim.com/support/contact-us). You can also visit their forum [here](https://www.essentialpim.com/forum/) or their knowledge base [here](https://www.essentialpim.com/help/).</li>
|
138 |
-
<li><b>Q: How can I update EssentialPIM?</b></li>
|
139 |
-
<li>A: You can update EssentialPIM by clicking on Help > Check for Updates in the software menu. You can also download the latest version of the software from its official website [here](https://www.essentialpim.com/download).</li>
|
140 |
-
</ul>
|
141 |
-
</p> 0a6ba089eb<br />
|
142 |
-
<br />
|
143 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/APK YouTube Ukuran Kecil Aplikasi Streaming dan Download Video Hemat Data.md
DELETED
@@ -1,132 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Download APK YouTube Ukuran Kecil: Cara dan Manfaatnya</h1>
|
3 |
-
<p>YouTube adalah salah satu platform video terbesar dan terpopuler di dunia. Jutaan orang menonton, mengunggah, dan berbagi video di YouTube setiap hari. Namun, untuk menikmati semua fitur dan konten yang ditawarkan oleh YouTube, kita membutuhkan aplikasi YouTube resmi yang bisa diunduh dari Google Play Store.</p>
|
4 |
-
<p>Aplikasi YouTube resmi memiliki beberapa kelemahan, seperti ukurannya yang besar, iklan yang mengganggu, ketergantungan pada Google Play Services atau Google API, dan keterbatasan dalam mengunduh video. Oleh karena itu, banyak orang yang mencari cara alternatif untuk menonton dan mengunduh video YouTube dengan lebih mudah dan hemat.</p>
|
5 |
-
<h2>download apk youtube ukuran kecil</h2><br /><p><b><b>Download</b> ★★★★★ <a href="https://urlin.us/2uSTpF">https://urlin.us/2uSTpF</a></b></p><br /><br />
|
6 |
-
<p>Salah satu cara alternatif tersebut adalah dengan menggunakan APK YouTube ukuran kecil. Apa itu APK YouTube ukuran kecil? Bagaimana cara download APK YouTube ukuran kecil? Dan apa saja manfaatnya? Simak ulasan lengkapnya di bawah ini.</p>
|
7 |
-
<h2>Apa itu APK YouTube Ukuran Kecil?</h2>
|
8 |
-
<p>APK YouTube ukuran kecil adalah sebuah file aplikasi Android yang berfungsi untuk menonton dan mengunduh video YouTube dengan ukuran yang lebih kecil daripada aplikasi YouTube resmi. Aplikasi ini biasanya dibuat oleh pihak ketiga yang tidak berafiliasi dengan Google atau YouTube.</p>
|
9 |
-
<p>APK YouTube ukuran kecil memiliki beberapa perbedaan dengan aplikasi YouTube resmi, antara lain:</p>
|
10 |
-
<ul>
|
11 |
-
<li>Ukurannya lebih kecil, biasanya hanya sekitar 10 MB atau kurang, sedangkan aplikasi YouTube resmi bisa mencapai 100 MB atau lebih.</li>
|
12 |
-
<li>Tidak memerlukan Google Play Services atau Google API untuk berfungsi, sehingga bisa digunakan di perangkat Android yang tidak memiliki layanan Google.</li>
|
13 |
-
<li>Bisa memilih kualitas video dan format unduhan sesuai dengan preferensi pengguna, baik itu MP4, MP3, 3GP, WEBM, atau lainnya.</li>
|
14 |
-
<li>Bisa menonton video tanpa iklan dan dalam mode latar belakang, sehingga tidak terganggu oleh iklan yang muncul di tengah-tengah video atau saat ingin melakukan multitasking.</li>
|
15 |
-
<li>Bisa mengunduh video dari berbagai situs media sosial selain YouTube, seperti Instagram, Facebook, Twitter, TikTok, dan lainnya.</li>
|
16 |
-
</ul>
|
17 |
-
<h2>Cara Download APK YouTube Ukuran Kecil</h2>
|
18 |
-
<p>Untuk download APK YouTube ukuran kecil, kamu bisa mengikuti langkah-langkah berikut ini:</p>
|
19 |
-
<ol>
|
20 |
-
<li>Buka browser web di perangkat Android kamu, seperti Chrome, Firefox, Opera, atau lainnya.</li>
|
21 |
-
<li>Kunjungi salah satu situs download APK YouTube ukuran kecil yang terpercaya, seperti APKPure, APKMirror, Uptodown, atau lainnya. Kamu bisa mencari nama aplikasi yang kamu inginkan, seperti YouTube Vanced, YouTube Go, YouTube Downloader, atau lainnya.</li>
|
22 |
-
<li>Pilih aplikasi yang kamu inginkan dan klik tombol download untuk mengunduh file APK-nya. Pastikan kamu memeriksa ukuran, versi, dan tanggal rilis aplikasi sebelum mengunduhnya.</li>
|
23 |
-
<li>Setelah file APK selesai diunduh, buka file manager di perangkat Android kamu dan cari file APK yang telah kamu unduh. Biasanya file APK akan tersimpan di folder Download atau Downloads.</li>
|
24 |
-
<li>Klik file APK untuk menginstal aplikasi. Jika muncul peringatan bahwa instalasi dari sumber tidak dikenal tidak diizinkan, kamu harus mengaktifkan opsi "Izinkan dari sumber ini" atau "Sumber tidak dikenal" di pengaturan keamanan perangkat Android kamu.</li>
|
25 |
-
<li>Tunggu proses instalasi selesai dan buka aplikasi yang telah terinstal. Kamu bisa menikmati fitur dan konten YouTube dengan lebih mudah dan hemat.</li>
|
26 |
-
</ol>
|
27 |
-
<h2>Manfaat Download APK YouTube Ukuran Kecil</h2>
|
28 |
-
<p>Dengan download APK YouTube ukuran kecil, kamu bisa mendapatkan beberapa manfaat, antara lain:</p>
|
29 |
-
<ul>
|
30 |
-
<li>Kamu bisa hemat ruang penyimpanan dan kuota internet, karena ukuran file APK dan data yang digunakan lebih kecil daripada aplikasi YouTube resmi.</li>
|
31 |
-
<li>Kamu tidak perlu Google Play Services atau Google API untuk menjalankan aplikasi, sehingga bisa digunakan di perangkat Android yang tidak memiliki layanan Google atau memiliki versi Android yang lama.</li>
|
32 |
-
<li>Kamu bisa memilih kualitas video dan format unduhan sesuai dengan keinginan dan kebutuhan kamu, baik itu MP4, MP3, 3GP, WEBM, atau lainnya. Kamu juga bisa mengatur kecepatan unduhan dan jumlah unduhan secara bersamaan.</li>
|
33 |
-
<li>Kamu bisa menonton video tanpa iklan dan dalam mode latar belakang, sehingga tidak terganggu oleh iklan yang muncul di tengah-tengah video atau saat ingin melakukan multitasking. Kamu juga bisa menonton video dalam mode layar penuh atau pop-up.</li>
|
34 |
-
<li>Kamu bisa mengunduh video dari berbagai situs media sosial selain YouTube, seperti Instagram, Facebook, Twitter, TikTok, dan lainnya. Kamu juga bisa berbagi video yang telah kamu unduh dengan mudah melalui aplikasi lain.</li>
|
35 |
-
</ul>
|
36 |
-
<h2>Alternatif Lain dari APK YouTube Ukuran Kecil</h2>
|
37 |
-
<p>Selain menggunakan APK YouTube ukuran kecil, kamu juga bisa mencoba beberapa aplikasi alternatif lain yang bisa digunakan untuk menonton dan mengunduh video YouTube. Berikut adalah daftar beberapa aplikasi alternatif tersebut beserta kelebihan dan kekurangannya:</p>
|
38 |
-
<table>
|
39 |
-
<tr>
|
40 |
-
<th>Nama Aplikasi</th>
|
41 |
-
<th>Kelebihan</th>
|
42 |
-
<th>Kekurangan</th>
|
43 |
-
</tr>
|
44 |
-
<tr>
|
45 |
-
<td>YouTube Vanced</td>
|
46 |
-
<td>- Tidak ada iklan<br>- Bisa menonton dalam mode latar belakang<br>- Bisa menyesuaikan tema dan warna<br>- Bisa mengaktifkan fitur sponsor block<br>- Mendukung fitur picture-in-picture</td>
|
47 |
-
<td>- Memerlukan Vanced Manager untuk menginstal<br>- Tidak bisa login dengan akun Google<br>- Tidak bisa mengunduh video</td>
|
48 |
-
</tr>
|
49 |
-
<tr>
|
50 |
-
<td>YouTube Go</td>
|
51 |
-
<td>- Ukurannya sangat kecil<br>- Bisa menghemat kuota internet<br>- Bisa memilih kualitas video sebelum menonton atau mengunduh<br>- Bisa berbagi video dengan teman secara offline<br>- Bisa login dengan akun Google</td>
|
52 |
-
<td>- Tidak ada fitur latar belakang<br>- Tidak ada fitur picture-in-picture<br>- Tidak ada fitur sponsor block<br>- Tidak mendukung situs media sosial lain</td>
|
53 |
-
</tr>
|
54 |
-
<tr>
|
55 |
-
<td>YouTube Downloader</td>
|
56 |
-
<td>- Bisa mengunduh video dalam berbagai format dan kualitas<br>- Bisa mengunduh audio dari video<br>- Bisa mengunduh playlist dan saluran YouTube<br>- Bisa mengunduh video dari situs media sosial lain<br>- B isa mengubah format video menjadi MP3, MP4, 3GP, atau WEBM</td>
|
57 |
-
<td>- Tidak ada fitur latar belakang<br>- Tidak ada fitur picture-in-picture<br>- Tidak ada fitur sponsor block<br>- Memerlukan izin akses banyak</td>
|
58 |
-
</tr>
|
59 |
-
<tr>
|
60 |
-
<td>NewPipe</td>
|
61 |
-
<td>- Tidak ada iklan<br>- Bisa menonton dalam mode latar belakang<br>- Bisa mengunduh video dalam berbagai format dan kualitas<br>- Bisa mengunduh audio dari video<br>- Bisa mengaktifkan fitur sponsor block</td>
|
62 |
-
<td>- Tidak bisa login dengan akun Google<br>- Tidak mendukung fitur picture-in-picture<br>- Tidak mendukung situs media sosial lain</td>
|
63 |
-
</tr>
|
64 |
-
<tr>
|
65 |
-
<td>Snaptube</td>
|
66 |
-
<td>- Bisa mengunduh video dalam berbagai format dan kualitas<br>- Bisa mengunduh audio dari video<br>- Bisa mengunduh playlist dan saluran YouTube<br>- Bisa mengunduh video dari situs media sosial lain<br>- Bisa login dengan akun Google</td>
|
67 |
-
<td>- Ada iklan<br>- Tidak ada fitur latar belakang<br>- Tidak ada fitur picture-in-picture<br>- Tidak ada fitur sponsor block</td>
|
68 |
-
</tr>
|
69 |
-
</table>
|
70 |
-
<h2>Kesimpulan</h2>
|
71 |
-
<p>APK YouTube ukuran kecil adalah sebuah file aplikasi Android yang berfungsi untuk menonton dan mengunduh video YouTube dengan ukuran yang lebih kecil daripada aplikasi YouTube resmi. Aplikasi ini memiliki beberapa kelebihan, seperti hemat ruang penyimpanan dan kuota internet, tidak perlu Google Play Services atau Google API, bisa memilih kualitas video dan format unduhan, bisa menonton video tanpa iklan dan dalam mode latar belakang, dan bisa mengunduh video dari berbagai situs media sosial.</p>
|
72 |
-
<p>Untuk download APK YouTube ukuran kecil, kamu bisa mengikuti langkah-langkah yang telah kami jelaskan di atas. Kamu juga bisa mencoba beberapa aplikasi alternatif lain yang bisa digunakan untuk menonton dan mengunduh video YouTube, seperti YouTube Vanced, YouTube Go, YouTube Downloader, NewPipe, atau Snaptube. Setiap aplikasi memiliki kelebihan dan kekurangan masing-masing, jadi kamu bisa memilih yang sesuai dengan kebutuhan dan selera kamu.</p>
|
73 |
-
<p>download apk youtube go ukuran kecil<br />
|
74 |
-
download apk youtube lite ukuran kecil<br />
|
75 |
-
download apk youtube mod ukuran kecil<br />
|
76 |
-
download apk youtube premium ukuran kecil<br />
|
77 |
-
download apk youtube pro ukuran kecil<br />
|
78 |
-
download apk youtube tanpa iklan ukuran kecil<br />
|
79 |
-
download apk youtube terbaru ukuran kecil<br />
|
80 |
-
download apk youtube tercepat ukuran kecil<br />
|
81 |
-
download apk youtube video downloader ukuran kecil<br />
|
82 |
-
download aplikasi youtube downloader free ukuran kecil[^1^]<br />
|
83 |
-
download aplikasi youtube go hemat kuota ukuran kecil[^2^]<br />
|
84 |
-
download aplikasi youtube gratis dan terbaik ukuran kecil[^1^]<br />
|
85 |
-
download aplikasi youtube mod tanpa iklan ukuran kecil<br />
|
86 |
-
download aplikasi youtube offline ukuran kecil<br />
|
87 |
-
download aplikasi youtube premium gratis ukuran kecil<br />
|
88 |
-
download aplikasi youtube pro full version ukuran kecil<br />
|
89 |
-
download aplikasi youtube terbaru dan tercepat ukuran kecil<br />
|
90 |
-
download aplikasi youtube video converter ukuran kecil<br />
|
91 |
-
cara download apk youtube di android ukuran kecil<br />
|
92 |
-
cara download apk youtube di pc ukuran kecil<br />
|
93 |
-
cara download apk youtube di iphone ukuran kecil<br />
|
94 |
-
cara download apk youtube dari uptodown ukuran kecil[^3^]<br />
|
95 |
-
cara download apk youtube dari google play store ukuran kecil<br />
|
96 |
-
cara download apk youtube dari jalantikus ukuran kecil[^1^]<br />
|
97 |
-
cara download apk youtube dari apkpure ukuran kecil<br />
|
98 |
-
cara install apk youtube di android ukuran kecil<br />
|
99 |
-
cara install apk youtube di pc ukuran kecil<br />
|
100 |
-
cara install apk youtube di iphone ukuran kecil<br />
|
101 |
-
cara update apk youtube di android ukuran kecil<br />
|
102 |
-
cara update apk youtube di pc ukuran kecil<br />
|
103 |
-
cara update apk youtube di iphone ukuran kecil<br />
|
104 |
-
link download apk youtube untuk android ukuran kecil<br />
|
105 |
-
link download apk youtube untuk pc ukuran kecil<br />
|
106 |
-
link download apk youtube untuk iphone ukuran kecil<br />
|
107 |
-
link download aplikasi youtube downloader free untuk android ukuran kecil[^1^]<br />
|
108 |
-
link download aplikasi youtube go hemat kuota untuk android ukuran kecil[^2^]<br />
|
109 |
-
link download aplikasi youtube gratis dan terbaik untuk android ukuran kecil[^1^]<br />
|
110 |
-
link download aplikasi youtube mod tanpa iklan untuk android ukuran kecil<br />
|
111 |
-
link download aplikasi youtube offline untuk android ukuran kecil<br />
|
112 |
-
link download aplikasi youtube premium gratis untuk android ukuran kecil<br />
|
113 |
-
link download aplikasi youtube pro full version untuk android ukuran kecil<br />
|
114 |
-
link download aplikasi youtube terbaru dan tercepat untuk android ukuran kecil<br />
|
115 |
-
link download aplikasi youtube video converter untuk android ukuran kecil<br />
|
116 |
-
review apk youtube untuk android ukuran kecil<br />
|
117 |
-
review aplikasi youtube downloader free untuk android ukuran kecil[^1^]<br />
|
118 |
-
review aplikasi youtube go hemat kuota untuk android ukuran kecil[^2^]<br />
|
119 |
-
review aplikasi youtube gratis dan terbaik untuk android ukuran kecil[^1^]<br />
|
120 |
-
review aplikasi youtube mod tanpa iklan untuk android ukuran kecil</p>
|
121 |
-
<p>Semoga artikel ini bermanfaat untuk kamu yang ingin menikmati konten YouTube dengan lebih mudah dan hemat. Jika kamu memiliki pertanyaan atau saran tentang topik ini, silakan tulis di kolom komentar di bawah ini. Terima kasih telah membaca artikel ini sampai habis.</p>
|
122 |
-
<h2>FAQ</h2>
|
123 |
-
<p>Berikut adalah beberapa pertanyaan yang sering diajukan oleh pembaca tentang topik artikel ini:</p>
|
124 |
-
<ol>
|
125 |
-
<li><b>Apakah APK YouTube ukuran kecil aman untuk digunakan?</b><br>A: Secara umum, APK YouTube ukuran kecil aman untuk digunakan asalkan kamu mengunduhnya dari situs yang terpercaya dan tidak mengandung virus atau malware. Namun, kamu harus tetap berhati-hati dan memeriksa izin akses yang diminta oleh aplikasi sebelum menginstalnya.</li>
|
126 |
-
<li><b>Apakah APK YouTube ukuran kecil legal untuk digunakan?</b><br>A: Secara hukum, APK YouTube ukuran kecil tidak legal untuk digunakan karena melanggar hak cipta dan persyaratan layanan YouTube. Namun, secara praktis, banyak orang yang menggunakan aplikasi ini tanpa mendapat masalah atau sanksi dari pihak YouTube. Namun, kamu harus tetap bertanggung jawab atas penggunaan aplikasi ini dan tidak menggunakan konten YouTube untuk tujuan komersial atau ilegal.</li>
|
127 |
-
<li><b>Apakah APK YouTube ukuran kecil bisa diupdate?</b><br>A: Ya, APK YouTube ukuran kecil bisa diupdate jika ada versi terbaru yang dirilis oleh pengembangnya. Kamu bisa mengunjungi situs download APK YouTube ukuran kecil yang kamu gunakan sebelumnya untuk mencari versi terbaru dari aplikasi yang kamu inginkan. Kamu juga bisa mengaktifkan notifikasi update di pengaturan aplikasi jika tersedia.</li>
|
128 |
-
<li><b>Apakah APK YouTube ukuran kecil bisa digunakan di PC atau laptop?</b><br>A: Ya, APK YouTube ukuran kecil bisa digunakan di PC atau laptop dengan bantuan emulator Android, seperti BlueStacks, NoxPlayer, MEmu, atau lainnya. Emulator Android adalah sebuah program yang bisa menjalankan aplikasi Android di PC atau laptop. Kamu bisa menginstal emulator Android di PC atau laptop kamu dan kemudian mengunduh dan menjalankan APK YouTube ukuran kecil di dalamnya.</li>
|
129 |
-
<li><b>Apakah APK YouTube ukuran kecil bisa digunakan di iPhone atau iPad?</b><br>A: Tidak, APK YouTube ukuran kecil tidak bisa digunakan di iPhone atau iPad karena file APK hanya bisa dijalankan di perangkat Android. Jika kamu ingin menonton dan mengunduh video YouTube di iPhone atau iPad, kamu bisa mencari aplikasi alternatif lain yang tersedia di App Store, seperti Documents by Readdle, MyMedia, Video Saver, atau lainnya.</li>
|
130 |
-
</ol></p> 197e85843d<br />
|
131 |
-
<br />
|
132 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Among Us vs Zombies APK A Fun and Challenging Game for Everyone.md
DELETED
@@ -1,110 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Among Us vs Zombies APK: A New Twist on the Popular Game</h1>
|
3 |
-
<p>If you are a fan of the hit game Among Us, you might be interested in trying out a new mod that adds a zombie twist to the gameplay. In this article, we will tell you everything you need to know about Among Us vs Zombies APK, including what it is, how to download and install it, how to play it, and why you should give it a try.</p>
|
4 |
-
<h2>What is Among Us vs Zombies APK?</h2>
|
5 |
-
<p>Among Us vs Zombies APK is a modified version of the original Among Us game that introduces a new role: the zombie. The zombie is an impostor who can infect other players and turn them into zombies as well. The goal of the zombie is to infect all the crewmates before they complete their tasks or vote out the impostors. The goal of the crewmates is to either finish their tasks, vote out the impostors, or kill the zombies with weapons.</p>
|
6 |
-
<h2>among us vs zombies apk</h2><br /><p><b><b>Download File</b> · <a href="https://urlin.us/2uSZR3">https://urlin.us/2uSZR3</a></b></p><br /><br />
|
7 |
-
<h3>How to download and install Among Us vs Zombies APK</h3>
|
8 |
-
<p>To download and install Among Us vs Zombies APK, you will need to follow these steps:</p>
|
9 |
-
<ol>
|
10 |
-
<li>Go to a trusted website that offers the APK file, such as [Boyfriend Vs Among Us Zombies - FNF MOD - APKCombo](^1^).</li>
|
11 |
-
<li>Click on the download button and wait for the file to be downloaded.</li>
|
12 |
-
<li>Open the file manager on your device and locate the downloaded file.</li>
|
13 |
-
<li>Tap on the file and allow the installation from unknown sources if prompted.</li>
|
14 |
-
<li>Wait for the installation to finish and launch the game.</li>
|
15 |
-
</ol>
|
16 |
-
<h3>How to play Among Us vs Zombies APK</h3>
|
17 |
-
<p>To play Among Us vs Zombies APK, you will need to follow these rules:</p>
|
18 |
-
<h4>The roles of crewmates and zombies</h4>
|
19 |
-
<ul>
|
20 |
-
<li>The crewmates are the innocent players who have to complete their tasks or find out who the impostors are. They can use weapons to kill zombies, but they have limited ammo and reloading time.</li>
|
21 |
-
<li>The zombies are the impostors who have to infect all the crewmates or kill them. They can use their bite ability to infect other players, but they have a cooldown time and a limited range. They can also sabotage and vent like normal impostors.</li>
|
22 |
-
</ul>
|
23 |
-
<h4>The game modes and maps</h4>
|
24 |
-
<ul>
|
25 |
-
<li>The game modes are similar to the original Among Us game, such as Classic, Hide and Seek, and Freeplay. You can customize the game settings such as the number of impostors, zombies, tasks, weapons, etc.</li>
|
26 |
-
<li>The maps are also similar to the original Among Us game, such as The Skeld, Mira HQ, Polus, and The Airship. You can explore the different rooms and vents, but be careful of zombies lurking around.</li>
|
27 |
-
</ul>
|
28 |
-
<h4>The tips and tricks for winning</h4>
|
29 |
-
<ul>
|
30 |
-
<li>If you are a crewmate, you should stick together with other crewmates, communicate with them, use weapons wisely, and avoid being alone or isolated.</li>
|
31 |
-
<li>If you are a zombie, you should act like a normal crewmate, blend in with them, use your bite ability strategically, and avoid being caught or killed by weapons.</li>
|
32 |
-
</ul>
|
33 |
-
<h2>Why should you try Among Us vs Zombies APK?</h2>
|
34 |
-
<h3>The benefits of playing Among Us vs Zombies APK</h3>
|
35 |
-
<p>Playing Among Us vs Zombies APK has some benefits that make it worth trying, such as:</p>
|
36 |
-
<h4>It is fun and challenging</h4>
|
37 |
-
<p>Playing Among Us vs Zombies APK adds a new layer of fun and challenge to the original game. You can enjoy the thrill of being a zombie or the suspense of being a crewmate. You can also test your skills and strategies in different game modes and maps.</p>
|
38 |
-
<h4>It is free and easy to use</h4>
|
39 |
-
<p>Playing Among Us vs Zombies APK does not cost you anything, as it is a free mod that you can download and install on your device. It is also easy to use, as it has a simple and user-friendly interface. You can play it with your friends online or offline, as long as you have the same version of the mod.</p>
|
40 |
-
<h4>It is compatible with most devices</h4>
|
41 |
-
<p>Playing Among Us vs Zombies APK does not require you to have a high-end device, as it is compatible with most Android devices. It has a low file size and does not consume much battery or data. You can play it on your phone or tablet without any problems.</p>
|
42 |
-
<p>among us vs zombies mod apk download<br />
|
43 |
-
among us vs zombies animation season 1<br />
|
44 |
-
among us vs zombies game online free<br />
|
45 |
-
among us vs zombies apk pure<br />
|
46 |
-
among us vs zombies mod menu apk<br />
|
47 |
-
among us vs zombies season 2 episode 1<br />
|
48 |
-
among us vs zombies gameplay android<br />
|
49 |
-
among us vs zombies hack apk<br />
|
50 |
-
among us vs zombies video youtube<br />
|
51 |
-
among us vs zombies apk latest version<br />
|
52 |
-
among us vs zombies mod apk unlimited money<br />
|
53 |
-
among us vs zombies season 1 episode 6<br />
|
54 |
-
among us vs zombies game download for pc<br />
|
55 |
-
among us vs zombies apk offline<br />
|
56 |
-
among us vs zombies mod apk revdl<br />
|
57 |
-
among us vs zombies season 3 trailer<br />
|
58 |
-
among us vs zombies game play store<br />
|
59 |
-
among us vs zombies apk no ads<br />
|
60 |
-
among us vs zombies mod apk android 1<br />
|
61 |
-
among us vs zombies season 1 full movie<br />
|
62 |
-
among us vs zombies game online multiplayer<br />
|
63 |
-
among us vs zombies apk uptodown<br />
|
64 |
-
among us vs zombies mod apk rexdl<br />
|
65 |
-
among us vs zombies season 2 release date<br />
|
66 |
-
among us vs zombies game free download for android<br />
|
67 |
-
among us vs zombies apk modded<br />
|
68 |
-
among us vs zombies mod apk happymod<br />
|
69 |
-
among us vs zombies season 4 teaser<br />
|
70 |
-
among us vs zombies game online unblocked<br />
|
71 |
-
among us vs zombies apk mirror<br />
|
72 |
-
among us vs zombies mod apk unlimited everything<br />
|
73 |
-
among us vs zombies season 1 episode 1<br />
|
74 |
-
among us vs zombies game download apkpure<br />
|
75 |
-
among us vs zombies apk obb<br />
|
76 |
-
among us vs zombies mod apk no root<br />
|
77 |
-
among us vs zombies season 5 announcement<br />
|
78 |
-
among us vs zombies game online with friends<br />
|
79 |
-
among us vs zombies apk for ios<br />
|
80 |
-
among us vs zombies mod apk unlocked all skins<br />
|
81 |
-
among us vs zombies season 1 episode 5</p>
|
82 |
-
<h3>The drawbacks of playing Among Us vs Zombies APK</h3>
|
83 |
-
<p>Playing Among Us vs Zombies APK also has some drawbacks that you should be aware of, such as:</p>
|
84 |
-
<h4>It is not an official version of Among Us</h4>
|
85 |
-
<p>Playing Among Us vs Zombies APK means that you are playing a modded version of the game that is not authorized or endorsed by the developers of Among Us. This means that you may encounter some issues or conflicts with the original game, such as updates, features, or servers.</p>
|
86 |
-
<h4>It may have bugs and glitches</h4>
|
87 |
-
<p>Playing Among Us vs Zombies APK means that you are playing a modded version of the game that is not fully tested or optimized. This means that you may experience some bugs and glitches while playing, such as crashes, freezes, errors, or lags.</p>
|
88 |
-
<h4>It may not be safe or secure</h4>
|
89 |
-
<p>Playing Among Us vs Zombies APK means that you are downloading and installing a file from an unknown source that may not be safe or secure. This means that you may expose your device to viruses, malware, spyware, or hackers. You should always scan the file before installing it and use a VPN when playing online.</p>
|
90 |
-
<h2>Conclusion</h2>
|
91 |
-
<p>In conclusion, Among Us vs Zombies APK is a new twist on the popular game Among Us that adds a zombie role to the gameplay. It is a fun and challenging mod that you can download and install for free on your device. However, it also has some drawbacks that you should consider before playing, such as being unofficial, buggy, and risky. If you are interested in trying out this mod, you should follow the steps we provided above and be careful when playing online.</p>
|
92 |
-
<h3>Frequently Asked Questions</h3>
|
93 |
-
<ol>
|
94 |
-
<li>What is the difference between Among Us vs Zombies APK and Among Us Zombie Mode?</li>
|
95 |
-
<p>Among Us vs Zombies APK is a modded version of the game that introduces a new role: the zombie. The zombie can infect other players and turn them into zombies as well. Among Us Zombie Mode is an official game mode that was added in the Halloween update. The zombie mode is similar to hide and seek mode, where one player is randomly chosen as the zombie and has to chase and kill other players.</p>
|
96 |
-
<li>Can I play Among Us vs Zombies APK with other players who have the original version of Among Us?</li>
|
97 |
-
<p>No, you cannot play Among Us vs Zombies APK with other players who have the original version of Among Us. You can only play with other players who have the same version of the mod as you. You can either create or join a private lobby with your friends or join a public lobby with random players.</p>
|
98 |
-
<li>How can I update Among Us vs Zombies APK?</li>
|
99 |
-
<p>To update Among Us vs Zombies APK, you will need to download and install the latest version of the mod from a trusted website. You should also delete the old version of the mod from your device to avoid any conflicts or issues.</p>
|
100 |
-
<li>Is there a way to play Among Us vs Zombies APK on PC?</li>
|
101 |
-
<p>Yes, there is a way to play Among Us vs Zombies APK on PC. You will need to use an Android emulator such as BlueStacks or NoxPlayer to run the mod on your computer. You will also need to download and install the mod from a trusted website on your emulator.</p>
|
102 |
-
<li>What are some other mods for Among Us that I can try?</li>
|
103 |
-
<p>Some other mods for Among Us that you can try are:</p>
|
104 |
-
<ul>
|
105 |
-
<li>Among Us Naruto Mod: A mod that adds Naruto characters and abilities to the game.</li>
|
106 |
-
<li>Among Us Town of Salem Mod: A mod that adds roles and mechanics from the game Town of Salem to the game.</li>
|
107 |
-
<li>Among Us Airship Mod: A mod that adds the new Airship map and tasks to the game before the official release.</li>
|
108 |
-
</ul></p> 197e85843d<br />
|
109 |
-
<br />
|
110 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Baixe agora o livro A tica protestante e o esprito do capitalismo a anlise de Max Weber sobre a relao entre religio e economia.md
DELETED
@@ -1,104 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Download livro a ética protestante e o espírito do capitalismo pdf</h1>
|
3 |
-
<p>If you are interested in learning more about the relationship between religion and capitalism, you might want to read <em>A ética protestante e o espírito do capitalismo</em>, or <em>The Protestant Ethic and the Spirit of Capitalism</em>, by Max Weber. This is a book written by one of the most influential sociologists and economists of the 20th century, who argued that the religious ideas of groups such as the Calvinists played a role in creating the capitalistic spirit. In this article, we will explain what this book is about, why you should read it, and how you can download it in pdf format.</p>
|
4 |
-
<h2>download livro a ética protestante e o espírito do capitalismo pdf</h2><br /><p><b><b>DOWNLOAD</b> ↔ <a href="https://urlin.us/2uT20k">https://urlin.us/2uT20k</a></b></p><br /><br />
|
5 |
-
<h2>What is this book about?</h2>
|
6 |
-
<p><em>The Protestant Ethic and the Spirit of Capitalism</em> is a study of the relationship between the ethics of ascetic Protestantism and the emergence of the spirit of modern capitalism. Weber argues that the modern spirit of capitalism sees profit as an end in itself, and pursuing profit as virtuous. He believes that this spirit exists in opposition to traditionalism—a system in which people worked only hard enough to get by.</p>
|
7 |
-
<h3>The main thesis of Max Weber</h3>
|
8 |
-
<p>Weber's main thesis is that the religious ideas of groups such as the Calvinists played a role in creating the capitalistic spirit. He observes a correlation between being Protestant and being involved in business, and declares his intent to explore religion as a potential cause of the modern economic conditions. He focuses on Calvinism, a branch of Protestantism that emphasizes the doctrine of predestination—that God has already determined who is saved and damned. Weber infers that this doctrine created a psychological need for clues about one's salvation, and that Calvinists looked to their success in worldly activity as a sign of God's favor. Thus, they developed an ethic of hard work, frugality, and rationality, which Weber calls the Protestant ethic. This ethic, according to Weber, broke down the traditional economic system and paved the way for modern capitalism.</p>
|
9 |
-
<h3>The historical and cultural context of the book</h3>
|
10 |
-
<p>Weber wrote this book in the early 20th century, when Germany was undergoing rapid industrialization and urbanization. He was influenced by his personal background, as he was born into a wealthy family with a Protestant father and a Catholic mother. He was also influenced by other thinkers, such as Karl Marx, who analyzed the economic and social aspects of capitalism, but Weber disagreed with some aspects of Marx's theory. Weber wanted to provide a cultural explanation for capitalism, rather than a purely materialistic one. He also wanted to show how religion could have both positive and negative effects on society.</p>
|
11 |
-
<h3>The relevance and impact of the book</h3>
|
12 |
-
<p><em>The Protestant Ethic and the Spirit of Capitalism</em> is considered one of the most important works of sociology and economics ever written. It has inspired many debates and criticisms, as well as further research on the topics of religion, culture, and development. It has also influenced many fields and disciplines, such as history, psychology, anthropology, political science, and management. The book is still relevant today, as it helps us understand some of the values and attitudes that shape our modern world.</p>
|
13 |
-
<h2>Why should you read this book?</h2>
|
14 |
-
<p>There are many reasons why you should read this book, but here are some of the most compelling ones:</p>
|
15 |
-
<h3>It is a classic work of sociology and economics</h3> <p>This book is a masterpiece of social science, as it combines historical analysis, empirical data, theoretical arguments, and comparative perspectives. It shows how Weber applied his method of verstehen, or interpretive understanding, to explain the complex phenomena of human behavior and social change. It also demonstrates his skill in synthesizing various sources of information, such as statistics, documents, biographies, and literature. Reading this book will enrich your knowledge and appreciation of sociology and economics as disciplines that study human society and its development.</p>
|
16 |
-
<p>Baixar livro a ética protestante e o espírito do capitalismo pdf grátis<br />
|
17 |
-
Download grátis do livro de Max Weber a ética protestante e o espírito do capitalismo<br />
|
18 |
-
Como baixar o livro a ética protestante e o espírito do capitalismo em pdf<br />
|
19 |
-
Resumo do livro a ética protestante e o espírito do capitalismo pdf<br />
|
20 |
-
Download livro a ética protestante e o espírito do capitalismo pdf Google Books<br />
|
21 |
-
Livro a ética protestante e o espírito do capitalismo pdf online<br />
|
22 |
-
Download livro a ética protestante e o espírito do capitalismo pdf Academia.edu<br />
|
23 |
-
Livro a ética protestante e o espírito do capitalismo pdf completo<br />
|
24 |
-
Download livro a ética protestante e o espírito do capitalismo pdf Companhia das Letras<br />
|
25 |
-
Livro a ética protestante e o espírito do capitalismo pdf download direto<br />
|
26 |
-
Download livro a ética protestante e o espírito do capitalismo pdf original<br />
|
27 |
-
Livro a ética protestante e o espírito do capitalismo pdf versão ampliada<br />
|
28 |
-
Download livro a ética protestante e o espírito do capitalismo pdf tradução de José Marcos Mariani de Macedo<br />
|
29 |
-
Livro a ética protestante e o espírito do capitalismo pdf comentado por Antônio Flávio Pierucci<br />
|
30 |
-
Download livro a ética protestante e o espírito do capitalismo pdf sociologia da religião<br />
|
31 |
-
Livro a ética protestante e o espírito do capitalismo pdf análise da cultura capitalista<br />
|
32 |
-
Download livro a ética protestante e o espírito do capitalismo pdf relação entre religião e economia<br />
|
33 |
-
Livro a ética protestante e o espírito do capitalismo pdf conceito de desencantamento do mundo<br />
|
34 |
-
Download livro a ética protestante e o espírito do capitalismo pdf conceito de ação racional<br />
|
35 |
-
Livro a ética protestante e o espírito do capitalismo pdf conceito de vocação profissional<br />
|
36 |
-
Download livro a ética protestante e o espírito do capitalismo pdf conceito de ascese intramundana<br />
|
37 |
-
Livro a ética protestante e o espírito do capitalismo pdf influência da moral puritana<br />
|
38 |
-
Download livro a ética protestante e o espírito do capitalismo pdf influência das seitas protestantes<br />
|
39 |
-
Livro a ética protestante e o espírito do capitalismo pdf comparação entre católicos e protestantes<br />
|
40 |
-
Download livro a ética protestante e o espírito do capitalismo pdf crítica ao marxismo</p>
|
41 |
-
<h3>It offers a fascinating perspective on the origins of capitalism</h3>
|
42 |
-
<p>This book is not just a historical account of how capitalism emerged, but also a cultural analysis of how it was shaped by certain values and beliefs. Weber argues that capitalism is not a natural or inevitable outcome of human progress, but rather a contingent and historical product of specific cultural factors. He shows how the Protestant ethic, which originated in the 16th and 17th centuries, influenced the development of capitalism in the 18th and 19th centuries. He also compares the different forms of capitalism that emerged in different regions and countries, such as England, Germany, France, and the United States. Reading this book will help you understand the diversity and complexity of capitalism as a global phenomenon.</p>
|
43 |
-
<h3>It challenges some common assumptions about religion and society</h3>
|
44 |
-
<p>This book is not only a critique of capitalism, but also a critique of some aspects of modernity and rationality. Weber challenges the idea that religion is a backward or irrational force that hinders social progress. He argues that religion can have both positive and negative effects on society, depending on how it is interpreted and practiced. He also challenges the idea that rationalization is a linear or homogeneous process that leads to more efficiency and freedom. He argues that rationalization can have unintended consequences, such as disenchantment, alienation, and bureaucracy. Reading this book will make you rethink some of the assumptions and stereotypes that you may have about religion and society.</p>
|
45 |
-
<h2>How can you download this book in pdf format?</h2>
|
46 |
-
<p>If you are convinced that this book is worth reading, you may wonder how you can get a copy of it in pdf format. There are several ways to do this, but you should also be aware of some legal and ethical issues that may arise.</p>
|
47 |
-
<h3>The legal and ethical issues of downloading books online</h3>
|
48 |
-
<p>Before you download any book online, you should check if it is in the public domain or not. The public domain refers to works that are not protected by intellectual property rights, such as copyright or trademark. This means that anyone can use, copy, distribute, or modify these works without permission or payment. The public domain status of a work depends on the laws of each country and the date of publication or death of the author.</p>
|
49 |
-
<p>In general, works published before 1926 are in the public domain in the United States, while works published before 1900 are in the public domain in most European countries. However, there may be exceptions or variations depending on the type of work, the author's nationality, or the source of publication. For example, some works may have been renewed or restored by their owners or heirs, while others may have been translated or edited by different publishers or authors.</p>
|
50 |
-
<p>If a work is not in the public domain, you need to obtain permission from the owner or holder of the rights to download it legally. This may involve paying a fee or agreeing to certain terms and conditions. If you download a work without permission, you may be violating the law and risking legal consequences. You may also be harming the author or publisher by depriving them of their income or recognition.</p>
|
51 |
-
<p>Therefore, before you download any book online, you should do some research and verify its legal status and availability. You should also respect the rights and interests of the creators and owners of the works that you want to read.</p>
|
52 |
-
<h3>The best sources to find this book for free or for a low price</h3>
|
53 |
-
<p>If you are looking for <em>The Protestant Ethic and the Spirit of Capitalism</em> in pdf format, there are some sources that you can try:</p>
|
54 |
-
<ul>
|
55 |
-
<li><a href="">Project Gutenberg</a>: This is a website that offers over 60,000 free ebooks in various formats, including pdf. Most of these ebooks are in the public domain or have been donated by their authors or publishers. You can search for this book by its title or author name, and download it for free.</li>
|
56 |
-
<li><a href="">Internet Archive</a>: This is a website that provides access to millions of books, movies, music, websites, and other digital content. It also has a collection of over 20 million ebooks in various formats, including pdf. Some of these ebooks are in the public domain or have been uploaded by users or libraries. You can search for this book by its title or author name, and download it for free or borrow it for a limited time.</li>
|
57 |
-
<li><a href="">Google Books</a>: This is a website that allows you to search and preview millions of books from various sources, including libraries, publishers, and authors. Some of these books are available in full view, while others are only in snippet or limited preview. You can search for this book by its title or author name, and see if it is available in full view or not. If it is, you can download it in pdf format for free.</li>
|
58 |
-
<li><a href="">Amazon Kindle Store</a>: This is a website that sells ebooks for the Kindle device or app. You can find thousands of ebooks in various genres and languages, including <em>The Protestant Ethic and the Spirit of Capitalism</em>. You can buy this book for a low price, or get it for free if you have a Kindle Unlimited subscription or a Prime membership. You can also read a sample of the book before you buy it.</li>
|
59 |
-
</ul>
|
60 |
-
<h3>The advantages and disadvantages of reading books in pdf format</h3>
|
61 |
-
<p>Reading books in pdf format has some advantages and disadvantages that you should consider before you download them. Here are some of them:</p>
|
62 |
-
<table>
|
63 |
-
<tr>
|
64 |
-
<th>Advantages</th>
|
65 |
-
<th>Disadvantages</th>
|
66 |
-
</tr>
|
67 |
-
<tr>
|
68 |
-
<td>- You can access them on any device that supports pdf files, such as computers, tablets, smartphones, or e-readers.</td>
|
69 |
-
<td>- You may not be able to adjust the font size, style, or layout of the text to suit your preferences or needs.</td>
|
70 |
-
</tr>
|
71 |
-
<tr>
|
72 |
-
<td>- You can save them on your device or cloud storage, and read them offline or online.</td>
|
73 |
-
<td>- You may encounter compatibility or formatting issues, especially if the pdf file is scanned or converted from another format.</td>
|
74 |
-
</tr>
|
75 |
-
<tr>
|
76 |
-
<td>- You can print them out if you prefer reading on paper.</td>
|
77 |
-
<td>- You may not be able to use some features that are available in other formats, such as bookmarks, highlights, notes, or links.</td>
|
78 |
-
</tr>
|
79 |
-
<tr>
|
80 |
-
<td>- You can share them with others easily via email or social media.</td>
|
81 |
-
<td>- You may infringe the rights of the authors or publishers if you share them without permission or attribution.</td>
|
82 |
-
</tr>
|
83 |
-
</table>
|
84 |
-
<h2>Conclusion</h2>
|
85 |
-
<p>In conclusion, <em>The Protestant Ethic and the Spirit of Capitalism</em> is a book that explores the relationship between religion and capitalism. It is a classic work of sociology and economics that offers a fascinating perspective on the origins of capitalism, challenges some common assumptions about religion and society, and influences many fields and disciplines. If you want to read this book, you can download it in pdf format from various sources, but you should also be aware of the legal and ethical issues of downloading books online. Reading books in pdf format has some advantages and disadvantages that you should consider before you download them.</p>
|
86 |
-
<p>We hope that this article has helped you learn more about this book and how to download it in pdf format. If you have any questions or comments, please feel free to leave them below. Thank you for reading!</p>
|
87 |
-
<h2>FAQs</h2>
|
88 |
-
<h3>Who is Max Weber?</h3>
|
89 |
-
<p>Max Weber (1864-1920) was a German sociologist, economist, historian, philosopher, and political scientist. He is widely regarded as one of the founders of modern sociology and one of the most influential thinkers of the 20th century. He wrote many books and essays on topics such as religion, culture, politics, law, bureaucracy, rationality, and social action.</p>
|
90 |
-
<h3>What is capitalism?</h3>
|
91 |
-
<p>Capitalism is an economic system based on private ownership of the means of production and distribution of goods and services. It is characterized by free markets, competition, profit motive, individualism, and consumerism. Capitalism emerged in Europe in the 16th century and spread to other parts of the world through trade, colonization, and industrialization.</p>
|
92 |
-
<h3>What is Protestantism?</h3>
|
93 |
-
<p>Protestantism is a branch of Christianity that originated in the 16th century as a result of the Reformation. It is based on the rejection of some doctrines and practices of the Roman Catholic Church, such as papal authority, indulgences, sacraments, and clerical celibacy. It emphasizes the authority of the Bible, salvation by faith alone, and the priesthood of all believers. Some of the major denominations of Protestantism are Lutheranism, Calvinism, Anglicanism, Methodism, Baptism, Presbyterianism, and Pentecostalism.</p>
|
94 |
-
<h3>What is pdf format?</h3>
|
95 |
-
<p>Pdf (Portable Document Format) is a file format <p>Pdf (Portable Document Format) is a file format that preserves the layout, fonts, images, and graphics of a document, regardless of the application or platform that created it. It was developed by Adobe Systems in 1993 and is now an open standard. Pdf files can be viewed, printed, and edited using various software programs, such as Adobe Acrobat Reader, Microsoft Word, or Google Docs.</p>
|
96 |
-
<h3>How can I convert other formats to pdf?</h3>
|
97 |
-
<p>If you have a document in another format, such as Word, Excel, PowerPoint, or HTML, and you want to convert it to pdf, you can use one of the following methods:</p>
|
98 |
-
<ul>
|
99 |
-
<li>Use an online converter: There are many websites that offer free or paid services to convert different formats to pdf. Some examples are <a href="">PDF Converter</a>, <a href="">Smallpdf</a>, and <a href="">Zamzar</a>. You just need to upload your file, choose the output format, and download the converted file.</li>
|
100 |
-
<li>Use a desktop software: There are many software programs that can create or edit pdf files, such as Adobe Acrobat, Microsoft Office, or LibreOffice. You just need to open your file, choose the save as or export option, and select the pdf format.</li>
|
101 |
-
<li>Use a browser extension: There are some browser extensions that can convert web pages or other online content to pdf files, such as <a href="">Save as PDF</a>, <a href="">Webpage to PDF</a>, and <a href="">Print Friendly & PDF</a>. You just need to install the extension, open the web page or content that you want to convert, and click on the extension icon.</li>
|
102 |
-
</ul></p> 197e85843d<br />
|
103 |
-
<br />
|
104 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download GTA 5 Grand Theft Auto APK for Android and Explore the Open World of Los Santos on PC and Mac.md
DELETED
@@ -1,137 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Download GTA 5 APK Android: How to Play Grand Theft Auto V on Your Mobile Device</h1>
|
3 |
-
<p>If you are a fan of action-adventure games, you have probably heard of Grand Theft Auto V, or GTA 5 for short. This game is one of the most popular and successful video games of all time, with millions of players around the world. But did you know that you can also play GTA 5 on your mobile device? Yes, you read that right. You can download GTA 5 APK Android and enjoy this amazing game on your smartphone or tablet. In this article, we will show you how to do that in a few simple steps. But first, let's see what GTA 5 is and why it is so popular.</p>
|
4 |
-
<h2>Introduction</h2>
|
5 |
-
<h3>What is GTA 5 and why is it so popular?</h3>
|
6 |
-
<p>GTA 5 is an open-world action-adventure game developed by Rockstar Games and released in 2013. The game is set in the fictional city of Los Santos, which is based on Los Angeles, and follows the lives of three criminal protagonists: Michael, a retired bank robber; Trevor, a psychopathic drug dealer; and Franklin, a young street hustler. The game allows you to switch between these characters at any time and experience the story from different perspectives. You can also explore the vast and diverse world of Los Santos, which includes urban areas, mountains, deserts, beaches, and countryside. You can drive various vehicles, such as cars, bikes, planes, helicopters, boats, and even submarines. You can also engage in various activities, such as shooting, fighting, racing, robbing, gambling, golfing, tennis, yoga, hunting, scuba diving, skydiving, and more. You can also customize your characters' appearance, clothes, weapons, vehicles, and properties. You can also play online with other players in GTA Online mode, which offers even more content and features.</p>
|
7 |
-
<h2>download gta 5 apk android</h2><br /><p><b><b>Download</b> ✔ <a href="https://urlin.us/2uSZGk">https://urlin.us/2uSZGk</a></b></p><br /><br />
|
8 |
-
<p>GTA 5 is so popular because it offers an unparalleled level of freedom and fun. You can do almost anything you want in the game and create your own adventures. You can also enjoy the stunning graphics, realistic physics, immersive sound effects, witty dialogue, dark humor, and satirical commentary on modern society. The game has received critical acclaim from critics and gamers alike and has won numerous awards. It has also sold over 150 million copies worldwide and has become one of the best-selling video games of all time.</p>
|
9 |
-
<h3>What are the benefits of playing GTA 5 on your mobile device?</h3>
|
10 |
-
<p>Playing GTA 5 on your mobile device has many benefits. Here are some of them:</p>
|
11 |
-
<ul>
|
12 |
-
<li>You can play GTA 5 anytime and anywhere you want. You don't need a console or a PC to enjoy this game. You just need your smartphone or tablet and an internet connection.</li>
|
13 |
-
<li>You can save space on your device. You don't need to download the entire game file, which is over 60 GB in size. You just need to download the GTA 5 APK Android file, which is much smaller and faster to install.</li>
|
14 |
-
<li>You can enjoy the same gameplay experience as on other platforms. You can access all the features, missions, characters, vehicles, weapons, and activities that GTA 5 offers. You can also adjust the graphics settings, controls, and sound options to suit your preferences.</li>
|
15 |
-
<li>You can play GTA 5 with other mobile players. You can join GTA Online mode and interact with other players who are also using their mobile devices. You can chat, cooperate, compete, and have fun with them.</li>
|
16 |
-
</ul>
|
17 |
-
<p>As you can see, playing GTA 5 on your mobile device is a great way to enjoy this amazing game. But how do you download GTA 5 APK Android and install it on your device? Let's find out in the next section.</p>
|
18 |
-
<h3>How to download GTA 5 APK Android and install it on your device?</h3>
|
19 |
-
<p>Downloading GTA 5 APK Android and installing it on your device is not as hard as you might think. You just need to follow these three simple steps:</p>
|
20 |
-
<h2>Main Body</h2>
|
21 |
-
<h3>Step 1: Download GTA 5 APK Android from a trusted source</h3>
|
22 |
-
<p>The first step is to download the GTA 5 APK Android file from a trusted source. This is very important because there are many fake and malicious websites that claim to offer GTA 5 APK Android but actually contain malware and viruses that can harm your device and steal your personal information. You don't want that to happen, do you?</p>
|
23 |
-
<p>So how do you find a reliable and safe website to download GTA 5 APK Android? Here are some tips:</p>
|
24 |
-
<h4>How to find a reliable and safe website to download GTA 5 APK Android?</h4>
|
25 |
-
<ul>
|
26 |
-
<li>Do some research before downloading anything. Look for reviews, ratings, comments, feedback, and testimonials from other users who have downloaded GTA 5 APK Android from the website. See what they have to say about the quality, performance, security, and customer service of the website.</li>
|
27 |
-
<li>Check the domain name and the URL of the website. Make sure they are legitimate and not misspelled or suspicious. For example, avoid websites that have names like "gtavapk.com" or "gta5android.net". These are likely to be fake and dangerous.</li>
|
28 |
-
<li>Look for signs of credibility and professionalism on the website. See if the website has a clear and detailed description of GTA 5 APK Android, its features, requirements, installation process, screenshots, videos, and FAQs. See if the website has a contact page, a privacy policy, a terms of service, and a disclaimer. See if the website has a secure connection (HTTPS) and a valid certificate.</li>
|
29 |
-
<li>Avoid websites that ask for personal information or payment before downloading GTA 5 APK Android. These are likely to be scams that want to trick you into giving them your money or your data. You don't need to pay or register anything to download GTA 5 APK Android.</li>
|
30 |
-
</ul>
|
31 |
-
<p>By following these tips, you should be able to find a reliable and safe website to download GTA 5 APK Android. One such website that we recommend is [GTA5Mobile.com]. This website has been verified by many users and has a high reputation for providing quality GTA 5 APK Android files. You can download GTA 5 APK Android from this website for free and without any hassle.</p>
|
32 |
-
<h4>How to avoid malware and viruses when downloading GTA 5 APK Android?</h4>
|
33 |
-
<ul>
|
34 |
-
<li>Use a reputable antivirus software on your device. Scan the GTA 5 APK Android file before installing it on your device. Delete any file that is detected as malicious or infected.</li>
|
35 |
-
<li>Use a VPN service on your device. This will encrypt your internet traffic and protect your online privacy and security. This will also help you bypass any geo-restrictions or censorship that might prevent you from accessing some websites.</li>
|
36 |
-
<li>Use a firewall on your device. This will block any unauthorized or suspicious connections or requests from entering or leaving your device.</li>
|
37 |
-
</ul>
|
38 |
-
<p>By following these tips, you should be able to avoid malware and viruses when downloading GTA 5 APK Android.</p>
|
39 |
-
<p>How to download gta 5 apk android for free<br />
|
40 |
-
Download gta 5 mobile – grand theft auto apk for android, play on pc and mac<br />
|
41 |
-
GTA 5 apk android download – latest version, no root required<br />
|
42 |
-
Download gta 5 apk + obb data for android devices<br />
|
43 |
-
GTA 5 android apk download – best tips and tricks<br />
|
44 |
-
Download gta 5 apk android full game offline<br />
|
45 |
-
GTA 5 apk android mod menu – how to install and use<br />
|
46 |
-
Download gta 5 apk android highly compressed<br />
|
47 |
-
GTA 5 apk android online – how to play with friends<br />
|
48 |
-
Download gta 5 apk android without verification<br />
|
49 |
-
GTA 5 apk android cheats – how to activate and use<br />
|
50 |
-
Download gta 5 apk android with real cars and bikes<br />
|
51 |
-
GTA 5 apk android graphics settings – how to optimize and improve<br />
|
52 |
-
Download gta 5 apk android with unlimited money and weapons<br />
|
53 |
-
GTA 5 apk android system requirements – minimum and recommended specs<br />
|
54 |
-
Download gta 5 apk android from official website<br />
|
55 |
-
GTA 5 apk android review – pros and cons, features and gameplay<br />
|
56 |
-
Download gta 5 apk android with missions and story mode<br />
|
57 |
-
GTA 5 apk android update – latest news and patch notes<br />
|
58 |
-
Download gta 5 apk android with custom skins and outfits<br />
|
59 |
-
GTA 5 apk android controller support – how to connect and use<br />
|
60 |
-
Download gta 5 apk android with voice chat and multiplayer mode<br />
|
61 |
-
GTA 5 apk android download size – how much space do you need<br />
|
62 |
-
Download gta 5 apk android with realistic physics and ragdoll effects<br />
|
63 |
-
GTA 5 apk android bugs and glitches – how to fix and avoid<br />
|
64 |
-
Download gta 5 apk android with new maps and locations<br />
|
65 |
-
GTA 5 apk android best settings – how to increase performance and fps<br />
|
66 |
-
Download gta 5 apk android with zombies and survival mode<br />
|
67 |
-
GTA 5 apk android comparison – how does it differ from pc and console versions<br />
|
68 |
-
Download gta 5 apk android with vr support and immersive experience<br />
|
69 |
-
GTA 5 apk android alternatives – other games like gta 5 for android<br />
|
70 |
-
Download gta 5 apk android with cloud save and backup feature<br />
|
71 |
-
GTA 5 apk android problems and solutions – how to troubleshoot and solve common issues<br />
|
72 |
-
Download gta 5 apk android with soundtracks and radio stations<br />
|
73 |
-
GTA 5 apk android secrets and easter eggs – how to find and unlock them</p>
|
74 |
-
<h3>Step 2: Enable unknown sources on your device settings</h3>
|
75 |
-
<p>The second step is to enable unknown sources on your device settings. This is necessary because GTA 5 APK Android is not available on the official Google Play Store or App Store. Therefore, you need to allow your device to install apps from sources other than the official ones.</p>
|
76 |
-
<p>This step is different depending on the type of device you have. Here are some instructions:</p>
|
77 |
-
<h4>How to enable unknown sources on Android devices?</h4>
|
78 |
-
<ul>
|
79 |
-
<li>Go to Settings > Security > Unknown Sources.</li>
|
80 |
-
<li>Toggle the switch to turn it on.</li>
|
81 |
-
<li>A warning message will appear. Tap OK to confirm.</li>
|
82 |
-
</ul> <h4>Why is this step necessary and what are the risks involved?</h4>
|
83 |
-
<p>This step is necessary because by default, Android devices only allow installing apps from the official Google Play Store. This is to prevent installing apps that are not verified or authorized by Google. However, this also means that you cannot install apps that are not available on the Google Play Store, such as GTA 5 APK Android.</p>
|
84 |
-
<p>The risks involved in this step are that you might install apps that are harmful or malicious to your device or your data. Some apps might contain malware, viruses, spyware, adware, or other unwanted programs that can damage your device, steal your information, or compromise your security. Some apps might also have bugs, errors, or glitches that can cause your device to malfunction, crash, or freeze.</p>
|
85 |
-
<p>Therefore, you should be careful and cautious when enabling unknown sources on your device settings. You should only download and install apps from trusted and reputable sources. You should also scan the apps with antivirus software before installing them. You should also disable unknown sources after installing GTA 5 APK Android to prevent accidental or unauthorized installations of other apps.</p>
|
86 |
-
<h3>Step 3: Install GTA 5 APK Android on your device and launch the game</h3>
|
87 |
-
<p>The third and final step is to install GTA 5 APK Android on your device and launch the game. This is the easiest and most exciting step. You are almost ready to play GTA 5 on your mobile device.</p>
|
88 |
-
<p>Here are some instructions:</p>
|
89 |
-
<h4>How to install GTA 5 APK Android on your device?</h4>
|
90 |
-
<ul>
|
91 |
-
<li>Locate the GTA 5 APK Android file that you downloaded from the website. You can find it in your Downloads folder or in the notification bar.</li>
|
92 |
-
<li>Tap on the file to open it.</li>
|
93 |
-
<li>A pop-up window will appear. Tap Install to start the installation process.</li>
|
94 |
-
<li>Wait for a few minutes until the installation is complete.</li>
|
95 |
-
<li>A confirmation message will appear. Tap Done to finish the installation.</li>
|
96 |
-
</ul>
|
97 |
-
<h4>How to launch the game and start playing GTA 5 on your mobile device?</h4>
|
98 |
-
<ul>
|
99 |
-
<li>Go to your app drawer and look for the GTA 5 icon.</li>
|
100 |
-
<li>Tap on the icon to launch the game.</li>
|
101 |
-
<li>A loading screen will appear. Wait for a few seconds until the game loads.</li>
|
102 |
-
<li>A welcome screen will appear. Tap Start Game to begin playing GTA 5 on your mobile device.</li>
|
103 |
-
<li>A menu screen will appear. You can choose between Story Mode or Online Mode. You can also adjust the settings, options, and features of the game according to your preferences.</li>
|
104 |
-
<li>Select your preferred mode and enjoy playing GTA 5 on your mobile device.</li>
|
105 |
-
</ul>
|
106 |
-
<h2>Conclusion</h2>
|
107 |
-
<h3>Summary of the main points and tips</h3>
|
108 |
-
<p>In this article, we have shown you how to download GTA 5 APK Android and play Grand Theft Auto V on your mobile device. We have explained what GTA 5 is and why it is so popular. We have also listed the benefits of playing GTA 5 on your mobile device. We have also given you a step-by-step guide on how to download GTA 5 APK Android from a trusted source, enable unknown sources on your device settings, install GTA 5 APK Android on your device, and launch the game.</p>
|
109 |
-
<p>Here are some tips to remember when downloading and playing GTA 5 APK Android:</p>
|
110 |
-
<ul>
|
111 |
-
<li>Download GTA 5 APK Android only from a trusted and reputable website, such as [GTA5Mobile.com]. Avoid fake and malicious websites that might harm your device or data.</li>
|
112 |
-
<li>Scan the GTA 5 APK Android file with antivirus software before installing it on your device. Delete any file that is detected as malicious or infected.</li>
|
113 |
-
<li>Enable unknown sources on your device settings only when installing GTA 5 APK Android. Disable it after installing the game to prevent accidental or unauthorized installations of other apps.</li>
|
114 |
-
<li>Adjust the graphics settings, controls, and sound options of the game according to your preferences and device capabilities. You can also customize your characters' appearance, clothes, weapons, vehicles, and properties in the game.</li>
|
115 |
-
<li>Play online with other mobile players in GTA Online mode. Chat, cooperate, compete, and have fun with them.</li>
|
116 |
-
</ul>
|
117 |
-
<h3>Call to action and final thoughts</h3>
|
118 |
-
<p>If you are ready to play GTA 5 on your mobile device, what are you waiting for? Download GTA 5 APK Android now and enjoy this amazing game anytime and anywhere you want. You will not regret it.</p>
|
119 |
-
<p>GTA 5 is one of the best games ever made and playing it on your mobile device is a unique and thrilling experience. You can explore the vast and diverse world of Los Santos, which is based on Los Angeles, and follow the lives of three criminal protagonists: Michael, Trevor, and Franklin. You can also drive various vehicles, engage in various activities, and customize your characters and properties. You can also play online with other players and interact with them.</p>
|
120 |
-
<p>GTA 5 APK Android is the best way to play GTA 5 on your mobile device. It is easy to download and install, and it offers the same gameplay experience as on other platforms. It also saves space on your device and allows you to play GTA 5 anytime and anywhere you want.</p>
|
121 |
-
<p>So don't wait any longer. Download GTA 5 APK Android today and have fun playing GTA 5 on your mobile device. You will love it.</p>
|
122 |
-
<h2>FAQs</h2>
|
123 |
-
<p>Here are some frequently asked questions about GTA 5 APK Android:</p>
|
124 |
-
<ol>
|
125 |
-
<li>Is GTA 5 APK Android free?</li>
|
126 |
-
<p>Yes, GTA 5 APK Android is free to download and play. You don't need to pay or register anything to enjoy this game.</p>
|
127 |
-
<li>Is GTA 5 APK Android safe?</li>
|
128 |
-
<p>Yes, GTA 5 APK Android is safe to download and install. However, you should only download it from a trusted and reputable website, such as [GTA5Mobile.com]. You should also scan the file with antivirus software before installing it on your device.</p>
|
129 |
-
<li>Is GTA 5 APK Android compatible with my device?</li>
|
130 |
-
<p>GTA 5 APK Android is compatible with most Android devices that have at least 4 GB of RAM and a quad-core processor. However, some devices might have issues with the graphics or performance of the game. You can adjust the settings of the game to suit your device capabilities.</p>
|
131 |
-
<li>How much space does GTA 5 APK Android take on my device?</li>
|
132 |
-
<p>GTA 5 APK Android takes about 1 GB of space on your device. However, you might need more space for the additional data files that the game will download when you launch it for the first time.</p>
|
133 |
-
<li>Can I play GTA 5 APK Android offline?</li>
|
134 |
-
<p>No, you cannot play GTA 5 APK Android offline. You need an internet connection to play this game. However, you can play the Story Mode without an internet connection once you have downloaded the data files.</p>
|
135 |
-
</ol></p> 197e85843d<br />
|
136 |
-
<br />
|
137 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1toTree/lora_test/ppdiffusers/schedulers/scheduling_ipndm.py
DELETED
@@ -1,163 +0,0 @@
|
|
1 |
-
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
2 |
-
# Copyright 2022 Zhejiang University Team and The HuggingFace Team. All rights reserved.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
import math
|
17 |
-
from typing import List, Optional, Tuple, Union
|
18 |
-
|
19 |
-
import numpy as np
|
20 |
-
import paddle
|
21 |
-
|
22 |
-
from ..configuration_utils import ConfigMixin, register_to_config
|
23 |
-
from .scheduling_utils import SchedulerMixin, SchedulerOutput
|
24 |
-
|
25 |
-
|
26 |
-
class IPNDMScheduler(SchedulerMixin, ConfigMixin):
|
27 |
-
"""
|
28 |
-
Improved Pseudo numerical methods for diffusion models (iPNDM) ported from @crowsonkb's amazing k-diffusion
|
29 |
-
[library](https://github.com/crowsonkb/v-diffusion-pytorch/blob/987f8985e38208345c1959b0ea767a625831cc9b/diffusion/sampling.py#L296)
|
30 |
-
|
31 |
-
[`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
|
32 |
-
function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
|
33 |
-
[`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and
|
34 |
-
[`~SchedulerMixin.from_pretrained`] functions.
|
35 |
-
|
36 |
-
For more details, see the original paper: https://arxiv.org/abs/2202.09778
|
37 |
-
|
38 |
-
Args:
|
39 |
-
num_train_timesteps (`int`): number of diffusion steps used to train the model.
|
40 |
-
trained_betas (`np.ndarray`, optional):
|
41 |
-
option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc.
|
42 |
-
"""
|
43 |
-
|
44 |
-
order = 1
|
45 |
-
|
46 |
-
@register_to_config
|
47 |
-
def __init__(
|
48 |
-
self, num_train_timesteps: int = 1000, trained_betas: Optional[Union[np.ndarray, List[float]]] = None
|
49 |
-
):
|
50 |
-
# set `betas`, `alphas`, `timesteps`
|
51 |
-
self.set_timesteps(num_train_timesteps)
|
52 |
-
|
53 |
-
# standard deviation of the initial noise distribution
|
54 |
-
self.init_noise_sigma = 1.0
|
55 |
-
|
56 |
-
# For now we only support F-PNDM, i.e. the runge-kutta method
|
57 |
-
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
|
58 |
-
# mainly at formula (9), (12), (13) and the Algorithm 2.
|
59 |
-
self.pndm_order = 4
|
60 |
-
|
61 |
-
# running values
|
62 |
-
self.ets = []
|
63 |
-
|
64 |
-
def set_timesteps(self, num_inference_steps: int):
|
65 |
-
"""
|
66 |
-
Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference.
|
67 |
-
|
68 |
-
Args:
|
69 |
-
num_inference_steps (`int`):
|
70 |
-
the number of diffusion steps used when generating samples with a pre-trained model.
|
71 |
-
"""
|
72 |
-
self.num_inference_steps = num_inference_steps
|
73 |
-
steps = paddle.linspace(1, 0, num_inference_steps + 1)[:-1]
|
74 |
-
steps = paddle.concat([steps, paddle.to_tensor([0.0])])
|
75 |
-
|
76 |
-
if self.config.trained_betas is not None:
|
77 |
-
self.betas = paddle.to_tensor(self.config.trained_betas, dtype="float32")
|
78 |
-
else:
|
79 |
-
self.betas = paddle.sin(steps * math.pi / 2) ** 2
|
80 |
-
|
81 |
-
self.alphas = (1.0 - self.betas**2) ** 0.5
|
82 |
-
|
83 |
-
self.timesteps = (paddle.atan2(self.betas, self.alphas) / math.pi * 2)[:-1]
|
84 |
-
|
85 |
-
self.ets = []
|
86 |
-
|
87 |
-
def step(
|
88 |
-
self,
|
89 |
-
model_output: paddle.Tensor,
|
90 |
-
timestep: int,
|
91 |
-
sample: paddle.Tensor,
|
92 |
-
return_dict: bool = True,
|
93 |
-
) -> Union[SchedulerOutput, Tuple]:
|
94 |
-
"""
|
95 |
-
Step function propagating the sample with the linear multi-step method. This has one forward pass with multiple
|
96 |
-
times to approximate the solution.
|
97 |
-
|
98 |
-
Args:
|
99 |
-
model_output (`paddle.Tensor`): direct output from learned diffusion model.
|
100 |
-
timestep (`int`): current discrete timestep in the diffusion chain.
|
101 |
-
sample (`paddle.Tensor`):
|
102 |
-
current instance of sample being created by diffusion process.
|
103 |
-
return_dict (`bool`): option for returning tuple rather than SchedulerOutput class
|
104 |
-
|
105 |
-
Returns:
|
106 |
-
[`~scheduling_utils.SchedulerOutput`] or `tuple`: [`~scheduling_utils.SchedulerOutput`] if `return_dict` is
|
107 |
-
True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor.
|
108 |
-
|
109 |
-
"""
|
110 |
-
if self.num_inference_steps is None:
|
111 |
-
raise ValueError(
|
112 |
-
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
|
113 |
-
)
|
114 |
-
|
115 |
-
timestep_index = (self.timesteps == timestep).nonzero().item()
|
116 |
-
prev_timestep_index = timestep_index + 1
|
117 |
-
|
118 |
-
ets = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
|
119 |
-
self.ets.append(ets)
|
120 |
-
|
121 |
-
if len(self.ets) == 1:
|
122 |
-
ets = self.ets[-1]
|
123 |
-
elif len(self.ets) == 2:
|
124 |
-
ets = (3 * self.ets[-1] - self.ets[-2]) / 2
|
125 |
-
elif len(self.ets) == 3:
|
126 |
-
ets = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
|
127 |
-
else:
|
128 |
-
ets = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
|
129 |
-
|
130 |
-
prev_sample = self._get_prev_sample(sample, timestep_index, prev_timestep_index, ets)
|
131 |
-
|
132 |
-
if not return_dict:
|
133 |
-
return (prev_sample,)
|
134 |
-
|
135 |
-
return SchedulerOutput(prev_sample=prev_sample)
|
136 |
-
|
137 |
-
def scale_model_input(self, sample: paddle.Tensor, *args, **kwargs) -> paddle.Tensor:
|
138 |
-
"""
|
139 |
-
Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
|
140 |
-
current timestep.
|
141 |
-
|
142 |
-
Args:
|
143 |
-
sample (`paddle.Tensor`): input sample
|
144 |
-
|
145 |
-
Returns:
|
146 |
-
`paddle.Tensor`: scaled input sample
|
147 |
-
"""
|
148 |
-
return sample
|
149 |
-
|
150 |
-
def _get_prev_sample(self, sample, timestep_index, prev_timestep_index, ets):
|
151 |
-
alpha = self.alphas[timestep_index]
|
152 |
-
sigma = self.betas[timestep_index]
|
153 |
-
|
154 |
-
next_alpha = self.alphas[prev_timestep_index]
|
155 |
-
next_sigma = self.betas[prev_timestep_index]
|
156 |
-
|
157 |
-
pred = (sample - sigma * ets) / max(alpha, 1e-8)
|
158 |
-
prev_sample = next_alpha * pred + ets * next_sigma
|
159 |
-
|
160 |
-
return prev_sample
|
161 |
-
|
162 |
-
def __len__(self):
|
163 |
-
return self.config.num_train_timesteps
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/A00001/bingothoo/src/lib/isomorphic/index.ts
DELETED
@@ -1,17 +0,0 @@
|
|
1 |
-
'use client'
|
2 |
-
|
3 |
-
import Default from './browser'
|
4 |
-
|
5 |
-
let exportsModel: any = {}
|
6 |
-
|
7 |
-
if (process.browser) {
|
8 |
-
Object.assign(exportsModel, require('./browser').default)
|
9 |
-
} else {
|
10 |
-
Object.assign(exportsModel, require('./node').default)
|
11 |
-
}
|
12 |
-
|
13 |
-
export default exportsModel! as typeof Default
|
14 |
-
|
15 |
-
export const fetch: typeof Default.fetch = exportsModel!.fetch
|
16 |
-
export const WebSocket: typeof Default.WebSocket = exportsModel!.WebSocket
|
17 |
-
export const debug: typeof Default.debug = exportsModel!.debug
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AILab-CVC/SEED-LLaMA/models/model_tools.py
DELETED
@@ -1,18 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from .llama_xformer import LlamaForCausalLM
|
3 |
-
|
4 |
-
|
5 |
-
def get_pretrained_llama_causal_model(pretrained_model_name_or_path=None, torch_dtype='fp16', **kwargs):
|
6 |
-
if torch_dtype == 'fp16' or torch_dtype == 'float16':
|
7 |
-
torch_dtype = torch.float16
|
8 |
-
elif torch_dtype == 'bf16' or torch_dtype == 'bfloat16':
|
9 |
-
torch_dtype = torch.bfloat16
|
10 |
-
else:
|
11 |
-
torch_dtype == torch.float32
|
12 |
-
model = LlamaForCausalLM.from_pretrained(
|
13 |
-
pretrained_model_name_or_path=pretrained_model_name_or_path,
|
14 |
-
torch_dtype=torch_dtype,
|
15 |
-
**kwargs,
|
16 |
-
)
|
17 |
-
|
18 |
-
return model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ANDRYHA/FakeNewsClassifier/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: FakeNewsClassifier
|
3 |
-
emoji: 🔥
|
4 |
-
colorFrom: blue
|
5 |
-
colorTo: green
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 1.2.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Aaajdhdhdhahdbbaabs/Hshdhdhd/Dockerfile
DELETED
@@ -1,21 +0,0 @@
|
|
1 |
-
FROM node:18-bullseye-slim
|
2 |
-
|
3 |
-
RUN apt-get update && \
|
4 |
-
|
5 |
-
apt-get install -y git
|
6 |
-
|
7 |
-
RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app
|
8 |
-
|
9 |
-
WORKDIR /app
|
10 |
-
|
11 |
-
RUN npm install
|
12 |
-
|
13 |
-
COPY Dockerfile greeting.md* .env* ./
|
14 |
-
|
15 |
-
RUN npm run build
|
16 |
-
|
17 |
-
EXPOSE 7860
|
18 |
-
|
19 |
-
ENV NODE_ENV=production
|
20 |
-
|
21 |
-
CMD [ "npm", "start" ]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/Free-Accounts-Generator/fortnite/css/style.css
DELETED
@@ -1,80 +0,0 @@
|
|
1 |
-
body {
|
2 |
-
font-family: Verdana, Geneva, sans-serif;
|
3 |
-
font-size: 1.2em;
|
4 |
-
margin: 2%;
|
5 |
-
max-width: 100%;
|
6 |
-
padding: 80px 30px;
|
7 |
-
line-height: 1.65em;
|
8 |
-
background-image: url('https://huggingface.co/spaces/AchyuthGamer/Free-Accounts-Generator/resolve/main/img/fortnite.jpg');
|
9 |
-
color: #fff;
|
10 |
-
font-weight: 300;
|
11 |
-
|
12 |
-
}
|
13 |
-
|
14 |
-
h1 {
|
15 |
-
text-align: center;
|
16 |
-
margin: 19% 0 5% 0;
|
17 |
-
font-size: 60px;
|
18 |
-
text-shadow: 0 0 38px #FFFF00, 0 0 38px #0000FF;
|
19 |
-
}
|
20 |
-
|
21 |
-
h4 {
|
22 |
-
text-align: center;
|
23 |
-
margin: 50% 0 5% 0;
|
24 |
-
}
|
25 |
-
|
26 |
-
#wordbox {
|
27 |
-
/*opacity: 0;*/
|
28 |
-
margin: 30px auto 0;
|
29 |
-
display: block;
|
30 |
-
width: 80%;
|
31 |
-
height: 50px;
|
32 |
-
font-size: 25px;
|
33 |
-
text-align: center;
|
34 |
-
background: #fff;
|
35 |
-
border-radius: 6px;
|
36 |
-
color: #black;
|
37 |
-
transition: 1s linear;
|
38 |
-
}
|
39 |
-
|
40 |
-
#button {
|
41 |
-
-webkit-box-sizing: border-box;
|
42 |
-
-moz-box-sizing: border-box;
|
43 |
-
box-sizing: border-box;
|
44 |
-
background: #0b7fba;
|
45 |
-
border: 0;
|
46 |
-
color: #fff;
|
47 |
-
font-size: 20px;
|
48 |
-
padding: 1em 2em;
|
49 |
-
cursor: pointer;
|
50 |
-
margin: 0 auto 80px;
|
51 |
-
display: block;
|
52 |
-
text-align: center;
|
53 |
-
border-radius: 6px;
|
54 |
-
font-weight: bold;
|
55 |
-
transition: all 0.3s ease;
|
56 |
-
background-image: linear-gradient(to right, #25aae1, #4481eb, #04befe, #3f86ed);
|
57 |
-
box-shadow: 0 4px 15px 0 rgba(65, 132, 234, 0.75);
|
58 |
-
}
|
59 |
-
|
60 |
-
#button:hover {
|
61 |
-
background-position: 100% 0;
|
62 |
-
-moz-transition: all 0.4s ease-in-out;
|
63 |
-
-o-transition: all 0.4s ease-in-out;
|
64 |
-
-webkit-transition: all 0.4s ease-in-out;
|
65 |
-
transition: all 0.4s ease-in-out;
|
66 |
-
transform: scale(1.2);
|
67 |
-
cursor: pointer; }
|
68 |
-
|
69 |
-
#button:focus {
|
70 |
-
outline: none;
|
71 |
-
}
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
span {
|
76 |
-
position: bottom;
|
77 |
-
top: 0;
|
78 |
-
left: 0;
|
79 |
-
margin: 40px;
|
80 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/webfontloader-plugin.js
DELETED
@@ -1,15 +0,0 @@
|
|
1 |
-
import LoaderCallback from './loader/webfontloader/WebFontLoaderCallback.js';
|
2 |
-
|
3 |
-
class WebFontLoaderPlugin extends Phaser.Plugins.BasePlugin {
|
4 |
-
constructor(pluginManager) {
|
5 |
-
super(pluginManager);
|
6 |
-
|
7 |
-
pluginManager.registerFileType('rexWebFont', LoaderCallback);
|
8 |
-
}
|
9 |
-
|
10 |
-
addToScene(scene) {
|
11 |
-
scene.sys.load['rexWebFont'] = LoaderCallback;
|
12 |
-
}
|
13 |
-
}
|
14 |
-
|
15 |
-
export default WebFontLoaderPlugin;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridsizer/GridSizer.d.ts
DELETED
@@ -1,145 +0,0 @@
|
|
1 |
-
// import * as Phaser from 'phaser';
|
2 |
-
import BaseSizer from '../basesizer/BaseSizer.js';
|
3 |
-
|
4 |
-
export default GridSizer;
|
5 |
-
|
6 |
-
declare namespace GridSizer {
|
7 |
-
type AlignTypes = number | 'center' | 'left' | 'right' | 'top' | 'bottom' |
|
8 |
-
'left-top' | 'left-center' | 'left-bottom' |
|
9 |
-
'center-top' | 'center-center' | 'center-bottom' |
|
10 |
-
'right-top' | 'right-center' | 'right-bottom';
|
11 |
-
type PaddingTypes = number |
|
12 |
-
{
|
13 |
-
left?: number,
|
14 |
-
right?: number,
|
15 |
-
top?: number,
|
16 |
-
bottom?: number
|
17 |
-
};
|
18 |
-
|
19 |
-
type CreateCellContainerCallbackType = (
|
20 |
-
scene: Phaser.Scene,
|
21 |
-
x: number, y: number,
|
22 |
-
config: {
|
23 |
-
column?: number, row?: number,
|
24 |
-
|
25 |
-
align?: GridSizer.AlignTypes,
|
26 |
-
padding?: GridSizer.PaddingTypes,
|
27 |
-
expand?: boolean,
|
28 |
-
key?: string
|
29 |
-
}
|
30 |
-
) => Phaser.GameObjects.GameObject;
|
31 |
-
|
32 |
-
interface IConfig extends BaseSizer.IConfig {
|
33 |
-
x?: number,
|
34 |
-
y?: number,
|
35 |
-
width?: number,
|
36 |
-
height?: number,
|
37 |
-
|
38 |
-
column?: number,
|
39 |
-
row?: number,
|
40 |
-
|
41 |
-
columnProportions?: number | number[],
|
42 |
-
rowProportions?: number | number[],
|
43 |
-
|
44 |
-
space?: {
|
45 |
-
left?: number, right?: number, top?: number, bottom?: number,
|
46 |
-
|
47 |
-
column?: number | number[],
|
48 |
-
row?: number | number[],
|
49 |
-
|
50 |
-
indentLeftOdd?: number, indentLeftEven?: number,
|
51 |
-
indentTopOdd?: number, indentTopEven?: number,
|
52 |
-
},
|
53 |
-
|
54 |
-
createCellContainerCallback?: CreateCellContainerCallbackType
|
55 |
-
}
|
56 |
-
|
57 |
-
}
|
58 |
-
|
59 |
-
|
60 |
-
declare class GridSizer extends BaseSizer {
|
61 |
-
sizerChildren: (Phaser.GameObjects.GameObject | null)[];
|
62 |
-
|
63 |
-
constructor(
|
64 |
-
scene: Phaser.Scene,
|
65 |
-
config?: GridSizer.IConfig
|
66 |
-
);
|
67 |
-
|
68 |
-
constructor(
|
69 |
-
scene: Phaser.Scene,
|
70 |
-
x: number, y: number,
|
71 |
-
config?: GridSizer.IConfig
|
72 |
-
);
|
73 |
-
|
74 |
-
constructor(
|
75 |
-
scene: Phaser.Scene,
|
76 |
-
x: number, y: number,
|
77 |
-
width: number, height: number,
|
78 |
-
config?: GridSizer.IConfig
|
79 |
-
);
|
80 |
-
|
81 |
-
constructor(
|
82 |
-
scene: Phaser.Scene,
|
83 |
-
x: number, y: number,
|
84 |
-
width: number, height: number,
|
85 |
-
column: number, row: number,
|
86 |
-
config?: GridSizer.IConfig
|
87 |
-
);
|
88 |
-
|
89 |
-
setColumnProportion(columnIndex: number, proportion: number): this;
|
90 |
-
setRowProportion(rowIndex: number, proportion: number): this;
|
91 |
-
|
92 |
-
add(
|
93 |
-
gameObject: Phaser.GameObjects.GameObject,
|
94 |
-
config?: {
|
95 |
-
column?: number | undefined,
|
96 |
-
row?: number | undefined | true,
|
97 |
-
align?: GridSizer.AlignTypes,
|
98 |
-
padding?: GridSizer.PaddingTypes,
|
99 |
-
expand?: boolean,
|
100 |
-
key?: string
|
101 |
-
}
|
102 |
-
): this;
|
103 |
-
|
104 |
-
add(
|
105 |
-
gameObject: Phaser.GameObjects.GameObject,
|
106 |
-
columnIndex?: number | undefined,
|
107 |
-
rowIndex?: number | undefined | true,
|
108 |
-
align?: GridSizer.AlignTypes,
|
109 |
-
padding?: GridSizer.PaddingTypes,
|
110 |
-
expand?: boolean,
|
111 |
-
key?: string
|
112 |
-
): this;
|
113 |
-
|
114 |
-
remove(
|
115 |
-
gameObject: Phaser.GameObjects.GameObject,
|
116 |
-
destroyChild?: boolean
|
117 |
-
): this;
|
118 |
-
|
119 |
-
removeAt(
|
120 |
-
columnIndex: number,
|
121 |
-
rowIndex: number,
|
122 |
-
destroyChild?: boolean
|
123 |
-
): this;
|
124 |
-
|
125 |
-
removeAll(
|
126 |
-
destroyChild?: boolean
|
127 |
-
): this;
|
128 |
-
|
129 |
-
clear(
|
130 |
-
destroyChild?: boolean
|
131 |
-
): this;
|
132 |
-
|
133 |
-
columnCount: number;
|
134 |
-
rowCount: number;
|
135 |
-
|
136 |
-
resetGrid(
|
137 |
-
column: number, row: number,
|
138 |
-
columnProportions?: number | number[],
|
139 |
-
rowProportions?: number | number[],
|
140 |
-
space?: {
|
141 |
-
column?: number | number[],
|
142 |
-
row?: number | number[],
|
143 |
-
}
|
144 |
-
): this;
|
145 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alpaca233/SadTalker/README.md
DELETED
@@ -1,15 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: SadTalker
|
3 |
-
emoji: 🌊
|
4 |
-
colorFrom: blue
|
5 |
-
colorTo: pink
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.37.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
duplicated_from: kevinwang676/SadTalker
|
12 |
-
---
|
13 |
-
|
14 |
-
|
15 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ameaou/academic-chatgpt3.1/crazy_functions/Latex全文翻译.py
DELETED
@@ -1,175 +0,0 @@
|
|
1 |
-
from toolbox import update_ui
|
2 |
-
from toolbox import CatchException, report_execption, write_results_to_file
|
3 |
-
fast_debug = False
|
4 |
-
|
5 |
-
class PaperFileGroup():
|
6 |
-
def __init__(self):
|
7 |
-
self.file_paths = []
|
8 |
-
self.file_contents = []
|
9 |
-
self.sp_file_contents = []
|
10 |
-
self.sp_file_index = []
|
11 |
-
self.sp_file_tag = []
|
12 |
-
|
13 |
-
# count_token
|
14 |
-
from request_llm.bridge_all import model_info
|
15 |
-
enc = model_info["gpt-3.5-turbo"]['tokenizer']
|
16 |
-
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
|
17 |
-
self.get_token_num = get_token_num
|
18 |
-
|
19 |
-
def run_file_split(self, max_token_limit=1900):
|
20 |
-
"""
|
21 |
-
将长文本分离开来
|
22 |
-
"""
|
23 |
-
for index, file_content in enumerate(self.file_contents):
|
24 |
-
if self.get_token_num(file_content) < max_token_limit:
|
25 |
-
self.sp_file_contents.append(file_content)
|
26 |
-
self.sp_file_index.append(index)
|
27 |
-
self.sp_file_tag.append(self.file_paths[index])
|
28 |
-
else:
|
29 |
-
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
30 |
-
segments = breakdown_txt_to_satisfy_token_limit_for_pdf(file_content, self.get_token_num, max_token_limit)
|
31 |
-
for j, segment in enumerate(segments):
|
32 |
-
self.sp_file_contents.append(segment)
|
33 |
-
self.sp_file_index.append(index)
|
34 |
-
self.sp_file_tag.append(self.file_paths[index] + f".part-{j}.tex")
|
35 |
-
|
36 |
-
print('Segmentation: done')
|
37 |
-
|
38 |
-
def 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en'):
|
39 |
-
import time, os, re
|
40 |
-
from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
41 |
-
|
42 |
-
# <-------- 读取Latex文件,删除其中的所有注释 ---------->
|
43 |
-
pfg = PaperFileGroup()
|
44 |
-
|
45 |
-
for index, fp in enumerate(file_manifest):
|
46 |
-
with open(fp, 'r', encoding='utf-8', errors='replace') as f:
|
47 |
-
file_content = f.read()
|
48 |
-
# 定义注释的正则表达式
|
49 |
-
comment_pattern = r'%.*'
|
50 |
-
# 使用正则表达式查找注释,并替换为空字符串
|
51 |
-
clean_tex_content = re.sub(comment_pattern, '', file_content)
|
52 |
-
# 记录删除注释后的文本
|
53 |
-
pfg.file_paths.append(fp)
|
54 |
-
pfg.file_contents.append(clean_tex_content)
|
55 |
-
|
56 |
-
# <-------- 拆分过长的latex文件 ---------->
|
57 |
-
pfg.run_file_split(max_token_limit=1024)
|
58 |
-
n_split = len(pfg.sp_file_contents)
|
59 |
-
|
60 |
-
# <-------- 抽取摘要 ---------->
|
61 |
-
# if language == 'en':
|
62 |
-
# abs_extract_inputs = f"Please write an abstract for this paper"
|
63 |
-
|
64 |
-
# # 单线,获取文章meta信息
|
65 |
-
# paper_meta_info = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
66 |
-
# inputs=abs_extract_inputs,
|
67 |
-
# inputs_show_user=f"正在抽取摘要信息。",
|
68 |
-
# llm_kwargs=llm_kwargs,
|
69 |
-
# chatbot=chatbot, history=[],
|
70 |
-
# sys_prompt="Your job is to collect information from materials。",
|
71 |
-
# )
|
72 |
-
|
73 |
-
# <-------- 多线程润色开始 ---------->
|
74 |
-
if language == 'en->zh':
|
75 |
-
inputs_array = ["Below is a section from an English academic paper, translate it into Chinese, do not modify any latex command such as \section, \cite and equations:" +
|
76 |
-
f"\n\n{frag}" for frag in pfg.sp_file_contents]
|
77 |
-
inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag]
|
78 |
-
sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)]
|
79 |
-
elif language == 'zh->en':
|
80 |
-
inputs_array = [f"Below is a section from a Chinese academic paper, translate it into English, do not modify any latex command such as \section, \cite and equations:" +
|
81 |
-
f"\n\n{frag}" for frag in pfg.sp_file_contents]
|
82 |
-
inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag]
|
83 |
-
sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)]
|
84 |
-
|
85 |
-
gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
86 |
-
inputs_array=inputs_array,
|
87 |
-
inputs_show_user_array=inputs_show_user_array,
|
88 |
-
llm_kwargs=llm_kwargs,
|
89 |
-
chatbot=chatbot,
|
90 |
-
history_array=[[""] for _ in range(n_split)],
|
91 |
-
sys_prompt_array=sys_prompt_array,
|
92 |
-
# max_workers=5, # OpenAI所允许的最大并行过载
|
93 |
-
scroller_max_len = 80
|
94 |
-
)
|
95 |
-
|
96 |
-
# <-------- 整理结果,退出 ---------->
|
97 |
-
create_report_file_name = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + f"-chatgpt.polish.md"
|
98 |
-
res = write_results_to_file(gpt_response_collection, file_name=create_report_file_name)
|
99 |
-
history = gpt_response_collection
|
100 |
-
chatbot.append((f"{fp}完成了吗?", res))
|
101 |
-
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
@CatchException
|
108 |
-
def Latex英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
109 |
-
# 基本信息:功能、贡献者
|
110 |
-
chatbot.append([
|
111 |
-
"函数插件功能?",
|
112 |
-
"对整个Latex项目进行翻译。函数插件贡献者: Binary-Husky"])
|
113 |
-
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
114 |
-
|
115 |
-
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
116 |
-
try:
|
117 |
-
import tiktoken
|
118 |
-
except:
|
119 |
-
report_execption(chatbot, history,
|
120 |
-
a=f"解析项目: {txt}",
|
121 |
-
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
|
122 |
-
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
123 |
-
return
|
124 |
-
history = [] # 清空历史,以免输入溢出
|
125 |
-
import glob, os
|
126 |
-
if os.path.exists(txt):
|
127 |
-
project_folder = txt
|
128 |
-
else:
|
129 |
-
if txt == "": txt = '空空如也的输入栏'
|
130 |
-
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
131 |
-
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
132 |
-
return
|
133 |
-
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
134 |
-
if len(file_manifest) == 0:
|
135 |
-
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
136 |
-
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
137 |
-
return
|
138 |
-
yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en->zh')
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
@CatchException
|
145 |
-
def Latex中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
146 |
-
# 基本信息:功能、贡献者
|
147 |
-
chatbot.append([
|
148 |
-
"函数插件功能?",
|
149 |
-
"对整个Latex项目进行翻译。函数插件贡献者: Binary-Husky"])
|
150 |
-
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
151 |
-
|
152 |
-
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
153 |
-
try:
|
154 |
-
import tiktoken
|
155 |
-
except:
|
156 |
-
report_execption(chatbot, history,
|
157 |
-
a=f"解析项目: {txt}",
|
158 |
-
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
|
159 |
-
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
160 |
-
return
|
161 |
-
history = [] # 清空历史,以免输入溢出
|
162 |
-
import glob, os
|
163 |
-
if os.path.exists(txt):
|
164 |
-
project_folder = txt
|
165 |
-
else:
|
166 |
-
if txt == "": txt = '空空如也的输入栏'
|
167 |
-
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
168 |
-
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
169 |
-
return
|
170 |
-
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
171 |
-
if len(file_manifest) == 0:
|
172 |
-
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
173 |
-
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
174 |
-
return
|
175 |
-
yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh->en')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/stylegan_human/pti/pti_models/e4e/encoders/model_irse.py
DELETED
@@ -1,91 +0,0 @@
|
|
1 |
-
from torch.nn import Linear, Conv2d, BatchNorm1d, BatchNorm2d, PReLU, Dropout, Sequential, Module
|
2 |
-
from encoder4editing.models.encoders.helpers import get_blocks, Flatten, bottleneck_IR, bottleneck_IR_SE, l2_norm
|
3 |
-
|
4 |
-
"""
|
5 |
-
Modified Backbone implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch)
|
6 |
-
"""
|
7 |
-
|
8 |
-
|
9 |
-
class Backbone(Module):
|
10 |
-
def __init__(self, input_size, num_layers, mode='ir', drop_ratio=0.4, affine=True):
|
11 |
-
super(Backbone, self).__init__()
|
12 |
-
assert input_size in [112, 224], "input_size should be 112 or 224"
|
13 |
-
assert num_layers in [
|
14 |
-
50, 100, 152], "num_layers should be 50, 100 or 152"
|
15 |
-
assert mode in ['ir', 'ir_se'], "mode should be ir or ir_se"
|
16 |
-
blocks = get_blocks(num_layers)
|
17 |
-
if mode == 'ir':
|
18 |
-
unit_module = bottleneck_IR
|
19 |
-
elif mode == 'ir_se':
|
20 |
-
unit_module = bottleneck_IR_SE
|
21 |
-
self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),
|
22 |
-
BatchNorm2d(64),
|
23 |
-
PReLU(64))
|
24 |
-
if input_size == 112:
|
25 |
-
self.output_layer = Sequential(BatchNorm2d(512),
|
26 |
-
Dropout(drop_ratio),
|
27 |
-
Flatten(),
|
28 |
-
Linear(512 * 7 * 7, 512),
|
29 |
-
BatchNorm1d(512, affine=affine))
|
30 |
-
else:
|
31 |
-
self.output_layer = Sequential(BatchNorm2d(512),
|
32 |
-
Dropout(drop_ratio),
|
33 |
-
Flatten(),
|
34 |
-
Linear(512 * 14 * 14, 512),
|
35 |
-
BatchNorm1d(512, affine=affine))
|
36 |
-
|
37 |
-
modules = []
|
38 |
-
for block in blocks:
|
39 |
-
for bottleneck in block:
|
40 |
-
modules.append(unit_module(bottleneck.in_channel,
|
41 |
-
bottleneck.depth,
|
42 |
-
bottleneck.stride))
|
43 |
-
self.body = Sequential(*modules)
|
44 |
-
|
45 |
-
def forward(self, x):
|
46 |
-
x = self.input_layer(x)
|
47 |
-
x = self.body(x)
|
48 |
-
x = self.output_layer(x)
|
49 |
-
return l2_norm(x)
|
50 |
-
|
51 |
-
|
52 |
-
def IR_50(input_size):
|
53 |
-
"""Constructs a ir-50 model."""
|
54 |
-
model = Backbone(input_size, num_layers=50, mode='ir',
|
55 |
-
drop_ratio=0.4, affine=False)
|
56 |
-
return model
|
57 |
-
|
58 |
-
|
59 |
-
def IR_101(input_size):
|
60 |
-
"""Constructs a ir-101 model."""
|
61 |
-
model = Backbone(input_size, num_layers=100, mode='ir',
|
62 |
-
drop_ratio=0.4, affine=False)
|
63 |
-
return model
|
64 |
-
|
65 |
-
|
66 |
-
def IR_152(input_size):
|
67 |
-
"""Constructs a ir-152 model."""
|
68 |
-
model = Backbone(input_size, num_layers=152, mode='ir',
|
69 |
-
drop_ratio=0.4, affine=False)
|
70 |
-
return model
|
71 |
-
|
72 |
-
|
73 |
-
def IR_SE_50(input_size):
|
74 |
-
"""Constructs a ir_se-50 model."""
|
75 |
-
model = Backbone(input_size, num_layers=50, mode='ir_se',
|
76 |
-
drop_ratio=0.4, affine=False)
|
77 |
-
return model
|
78 |
-
|
79 |
-
|
80 |
-
def IR_SE_101(input_size):
|
81 |
-
"""Constructs a ir_se-101 model."""
|
82 |
-
model = Backbone(input_size, num_layers=100, mode='ir_se',
|
83 |
-
drop_ratio=0.4, affine=False)
|
84 |
-
return model
|
85 |
-
|
86 |
-
|
87 |
-
def IR_SE_152(input_size):
|
88 |
-
"""Constructs a ir_se-152 model."""
|
89 |
-
model = Backbone(input_size, num_layers=152, mode='ir_se',
|
90 |
-
drop_ratio=0.4, affine=False)
|
91 |
-
return model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/gradio-sentiment-analyzer/README.md
DELETED
@@ -1,37 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Gradio Sentiment Analyzer
|
3 |
-
emoji: 🔥
|
4 |
-
colorFrom: indigo
|
5 |
-
colorTo: blue
|
6 |
-
sdk: gradio
|
7 |
-
app_file: app.py
|
8 |
-
pinned: false
|
9 |
-
---
|
10 |
-
|
11 |
-
# Configuration
|
12 |
-
|
13 |
-
`title`: _string_
|
14 |
-
Display title for the Space
|
15 |
-
|
16 |
-
`emoji`: _string_
|
17 |
-
Space emoji (emoji-only character allowed)
|
18 |
-
|
19 |
-
`colorFrom`: _string_
|
20 |
-
Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
|
21 |
-
|
22 |
-
`colorTo`: _string_
|
23 |
-
Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
|
24 |
-
|
25 |
-
`sdk`: _string_
|
26 |
-
Can be either `gradio` or `streamlit`
|
27 |
-
|
28 |
-
`sdk_version` : _string_
|
29 |
-
Only applicable for `streamlit` SDK.
|
30 |
-
See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
|
31 |
-
|
32 |
-
`app_file`: _string_
|
33 |
-
Path to your main application file (which contains either `gradio` or `streamlit` Python code).
|
34 |
-
Path is relative to the root of the repository.
|
35 |
-
|
36 |
-
`pinned`: _boolean_
|
37 |
-
Whether the Space stays on top of your list.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/resnet_flax.py
DELETED
@@ -1,124 +0,0 @@
|
|
1 |
-
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
import flax.linen as nn
|
15 |
-
import jax
|
16 |
-
import jax.numpy as jnp
|
17 |
-
|
18 |
-
|
19 |
-
class FlaxUpsample2D(nn.Module):
|
20 |
-
out_channels: int
|
21 |
-
dtype: jnp.dtype = jnp.float32
|
22 |
-
|
23 |
-
def setup(self):
|
24 |
-
self.conv = nn.Conv(
|
25 |
-
self.out_channels,
|
26 |
-
kernel_size=(3, 3),
|
27 |
-
strides=(1, 1),
|
28 |
-
padding=((1, 1), (1, 1)),
|
29 |
-
dtype=self.dtype,
|
30 |
-
)
|
31 |
-
|
32 |
-
def __call__(self, hidden_states):
|
33 |
-
batch, height, width, channels = hidden_states.shape
|
34 |
-
hidden_states = jax.image.resize(
|
35 |
-
hidden_states,
|
36 |
-
shape=(batch, height * 2, width * 2, channels),
|
37 |
-
method="nearest",
|
38 |
-
)
|
39 |
-
hidden_states = self.conv(hidden_states)
|
40 |
-
return hidden_states
|
41 |
-
|
42 |
-
|
43 |
-
class FlaxDownsample2D(nn.Module):
|
44 |
-
out_channels: int
|
45 |
-
dtype: jnp.dtype = jnp.float32
|
46 |
-
|
47 |
-
def setup(self):
|
48 |
-
self.conv = nn.Conv(
|
49 |
-
self.out_channels,
|
50 |
-
kernel_size=(3, 3),
|
51 |
-
strides=(2, 2),
|
52 |
-
padding=((1, 1), (1, 1)), # padding="VALID",
|
53 |
-
dtype=self.dtype,
|
54 |
-
)
|
55 |
-
|
56 |
-
def __call__(self, hidden_states):
|
57 |
-
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
|
58 |
-
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
|
59 |
-
hidden_states = self.conv(hidden_states)
|
60 |
-
return hidden_states
|
61 |
-
|
62 |
-
|
63 |
-
class FlaxResnetBlock2D(nn.Module):
|
64 |
-
in_channels: int
|
65 |
-
out_channels: int = None
|
66 |
-
dropout_prob: float = 0.0
|
67 |
-
use_nin_shortcut: bool = None
|
68 |
-
dtype: jnp.dtype = jnp.float32
|
69 |
-
|
70 |
-
def setup(self):
|
71 |
-
out_channels = self.in_channels if self.out_channels is None else self.out_channels
|
72 |
-
|
73 |
-
self.norm1 = nn.GroupNorm(num_groups=32, epsilon=1e-5)
|
74 |
-
self.conv1 = nn.Conv(
|
75 |
-
out_channels,
|
76 |
-
kernel_size=(3, 3),
|
77 |
-
strides=(1, 1),
|
78 |
-
padding=((1, 1), (1, 1)),
|
79 |
-
dtype=self.dtype,
|
80 |
-
)
|
81 |
-
|
82 |
-
self.time_emb_proj = nn.Dense(out_channels, dtype=self.dtype)
|
83 |
-
|
84 |
-
self.norm2 = nn.GroupNorm(num_groups=32, epsilon=1e-5)
|
85 |
-
self.dropout = nn.Dropout(self.dropout_prob)
|
86 |
-
self.conv2 = nn.Conv(
|
87 |
-
out_channels,
|
88 |
-
kernel_size=(3, 3),
|
89 |
-
strides=(1, 1),
|
90 |
-
padding=((1, 1), (1, 1)),
|
91 |
-
dtype=self.dtype,
|
92 |
-
)
|
93 |
-
|
94 |
-
use_nin_shortcut = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
|
95 |
-
|
96 |
-
self.conv_shortcut = None
|
97 |
-
if use_nin_shortcut:
|
98 |
-
self.conv_shortcut = nn.Conv(
|
99 |
-
out_channels,
|
100 |
-
kernel_size=(1, 1),
|
101 |
-
strides=(1, 1),
|
102 |
-
padding="VALID",
|
103 |
-
dtype=self.dtype,
|
104 |
-
)
|
105 |
-
|
106 |
-
def __call__(self, hidden_states, temb, deterministic=True):
|
107 |
-
residual = hidden_states
|
108 |
-
hidden_states = self.norm1(hidden_states)
|
109 |
-
hidden_states = nn.swish(hidden_states)
|
110 |
-
hidden_states = self.conv1(hidden_states)
|
111 |
-
|
112 |
-
temb = self.time_emb_proj(nn.swish(temb))
|
113 |
-
temb = jnp.expand_dims(jnp.expand_dims(temb, 1), 1)
|
114 |
-
hidden_states = hidden_states + temb
|
115 |
-
|
116 |
-
hidden_states = self.norm2(hidden_states)
|
117 |
-
hidden_states = nn.swish(hidden_states)
|
118 |
-
hidden_states = self.dropout(hidden_states, deterministic)
|
119 |
-
hidden_states = self.conv2(hidden_states)
|
120 |
-
|
121 |
-
if self.conv_shortcut is not None:
|
122 |
-
residual = self.conv_shortcut(residual)
|
123 |
-
|
124 |
-
return hidden_states + residual
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/shap_e/__init__.py
DELETED
@@ -1,27 +0,0 @@
|
|
1 |
-
from ...utils import (
|
2 |
-
OptionalDependencyNotAvailable,
|
3 |
-
is_torch_available,
|
4 |
-
is_transformers_available,
|
5 |
-
is_transformers_version,
|
6 |
-
)
|
7 |
-
|
8 |
-
|
9 |
-
try:
|
10 |
-
if not (is_transformers_available() and is_torch_available()):
|
11 |
-
raise OptionalDependencyNotAvailable()
|
12 |
-
except OptionalDependencyNotAvailable:
|
13 |
-
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
|
14 |
-
else:
|
15 |
-
from .camera import create_pan_cameras
|
16 |
-
from .pipeline_shap_e import ShapEPipeline
|
17 |
-
from .pipeline_shap_e_img2img import ShapEImg2ImgPipeline
|
18 |
-
from .renderer import (
|
19 |
-
BoundingBoxVolume,
|
20 |
-
ImportanceRaySampler,
|
21 |
-
MLPNeRFModelOutput,
|
22 |
-
MLPNeRSTFModel,
|
23 |
-
ShapEParamsProjModel,
|
24 |
-
ShapERenderer,
|
25 |
-
StratifiedRaySampler,
|
26 |
-
VoidNeRFModel,
|
27 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/cascade_rcnn/README.md
DELETED
@@ -1,55 +0,0 @@
|
|
1 |
-
# Cascade R-CNN: High Quality Object Detection and Instance Segmentation
|
2 |
-
|
3 |
-
## Introduction
|
4 |
-
|
5 |
-
[ALGORITHM]
|
6 |
-
|
7 |
-
```latex
|
8 |
-
@article{Cai_2019,
|
9 |
-
title={Cascade R-CNN: High Quality Object Detection and Instance Segmentation},
|
10 |
-
ISSN={1939-3539},
|
11 |
-
url={http://dx.doi.org/10.1109/tpami.2019.2956516},
|
12 |
-
DOI={10.1109/tpami.2019.2956516},
|
13 |
-
journal={IEEE Transactions on Pattern Analysis and Machine Intelligence},
|
14 |
-
publisher={Institute of Electrical and Electronics Engineers (IEEE)},
|
15 |
-
author={Cai, Zhaowei and Vasconcelos, Nuno},
|
16 |
-
year={2019},
|
17 |
-
pages={1–1}
|
18 |
-
}
|
19 |
-
```
|
20 |
-
|
21 |
-
## Results and models
|
22 |
-
|
23 |
-
### Cascade R-CNN
|
24 |
-
|
25 |
-
| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download |
|
26 |
-
| :-------------: | :-----: | :-----: | :------: | :------------: | :----: |:------:|:--------:|
|
27 |
-
| R-50-FPN | caffe | 1x | 4.2 | | 40.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_r50_caffe_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_caffe_fpn_1x_coco/cascade_rcnn_r50_caffe_fpn_1x_coco_bbox_mAP-0.404_20200504_174853-b857be87.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_caffe_fpn_1x_coco/cascade_rcnn_r50_caffe_fpn_1x_coco_20200504_174853.log.json) |
|
28 |
-
| R-50-FPN | pytorch | 1x | 4.4 | 16.1 | 40.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco/cascade_rcnn_r50_fpn_1x_coco_20200316-3dc56deb.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco/cascade_rcnn_r50_fpn_1x_coco_20200316_214748.log.json) |
|
29 |
-
| R-50-FPN | pytorch | 20e | - | - | 41.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco/cascade_rcnn_r50_fpn_20e_coco_bbox_mAP-0.41_20200504_175131-e9872a90.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco/cascade_rcnn_r50_fpn_20e_coco_20200504_175131.log.json) |
|
30 |
-
| R-101-FPN | caffe | 1x | 6.2 | | 42.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_r101_caffe_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r101_caffe_fpn_1x_coco/cascade_rcnn_r101_caffe_fpn_1x_coco_bbox_mAP-0.423_20200504_175649-cab8dbd5.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r101_caffe_fpn_1x_coco/cascade_rcnn_r101_caffe_fpn_1x_coco_20200504_175649.log.json) |
|
31 |
-
| R-101-FPN | pytorch | 1x | 6.4 | 13.5 | 42.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco/cascade_rcnn_r101_fpn_1x_coco_20200317-0b6a2fbf.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco/cascade_rcnn_r101_fpn_1x_coco_20200317_101744.log.json) |
|
32 |
-
| R-101-FPN | pytorch | 20e | - | - | 42.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_r101_fpn_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r101_fpn_20e_coco/cascade_rcnn_r101_fpn_20e_coco_bbox_mAP-0.425_20200504_231812-5057dcc5.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r101_fpn_20e_coco/cascade_rcnn_r101_fpn_20e_coco_20200504_231812.log.json) |
|
33 |
-
| X-101-32x4d-FPN | pytorch | 1x | 7.6 | 10.9 | 43.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_1x_coco/cascade_rcnn_x101_32x4d_fpn_1x_coco_20200316-95c2deb6.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_1x_coco/cascade_rcnn_x101_32x4d_fpn_1x_coco_20200316_055608.log.json) |
|
34 |
-
| X-101-32x4d-FPN | pytorch | 20e | 7.6 | | 43.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_20e_coco/cascade_rcnn_x101_32x4d_fpn_20e_coco_20200906_134608-9ae0a720.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_20e_coco/cascade_rcnn_x101_32x4d_fpn_20e_coco_20200906_134608.log.json) |
|
35 |
-
| X-101-64x4d-FPN | pytorch | 1x | 10.7 | | 44.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_1x_coco/cascade_rcnn_x101_64x4d_fpn_1x_coco_20200515_075702-43ce6a30.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_1x_coco/cascade_rcnn_x101_64x4d_fpn_1x_coco_20200515_075702.log.json) |
|
36 |
-
| X-101-64x4d-FPN | pytorch | 20e | 10.7 | | 44.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_20e_coco/cascade_rcnn_x101_64x4d_fpn_20e_coco_20200509_224357-051557b1.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_20e_coco/cascade_rcnn_x101_64x4d_fpn_20e_coco_20200509_224357.log.json)|
|
37 |
-
|
38 |
-
### Cascade Mask R-CNN
|
39 |
-
|
40 |
-
| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download |
|
41 |
-
| :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :------: | :--------: |
|
42 |
-
| R-50-FPN | caffe | 1x | 5.9 | | 41.2 | 36.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_1x_coco/cascade_mask_rcnn_r50_caffe_fpn_1x_coco_bbox_mAP-0.412__segm_mAP-0.36_20200504_174659-5004b251.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_1x_coco/cascade_mask_rcnn_r50_caffe_fpn_1x_coco_20200504_174659.log.json) |
|
43 |
-
| R-50-FPN | pytorch | 1x | 6.0 | 11.2 | 41.2 | 35.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco/cascade_mask_rcnn_r50_fpn_1x_coco_20200203-9d4dcb24.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco/cascade_mask_rcnn_r50_fpn_1x_coco_20200203_170449.log.json) |
|
44 |
-
| R-50-FPN | pytorch | 20e | - | - | 41.9 | 36.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco/cascade_mask_rcnn_r50_fpn_20e_coco_bbox_mAP-0.419__segm_mAP-0.365_20200504_174711-4af8e66e.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco/cascade_mask_rcnn_r50_fpn_20e_coco_20200504_174711.log.json)|
|
45 |
-
| R-101-FPN | caffe | 1x | 7.8 | | 43.2 | 37.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_1x_coco/cascade_mask_rcnn_r101_caffe_fpn_1x_coco_bbox_mAP-0.432__segm_mAP-0.376_20200504_174813-5c1e9599.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_1x_coco/cascade_mask_rcnn_r101_caffe_fpn_1x_coco_20200504_174813.log.json)|
|
46 |
-
| R-101-FPN | pytorch | 1x | 7.9 | 9.8 | 42.9 | 37.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco/cascade_mask_rcnn_r101_fpn_1x_coco_20200203-befdf6ee.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco/cascade_mask_rcnn_r101_fpn_1x_coco_20200203_092521.log.json) |
|
47 |
-
| R-101-FPN | pytorch | 20e | - | - | 43.4 | 37.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_fpn_20e_coco/cascade_mask_rcnn_r101_fpn_20e_coco_bbox_mAP-0.434__segm_mAP-0.378_20200504_174836-005947da.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_fpn_20e_coco/cascade_mask_rcnn_r101_fpn_20e_coco_20200504_174836.log.json)|
|
48 |
-
| X-101-32x4d-FPN | pytorch | 1x | 9.2 | 8.6 | 44.3 | 38.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco_20200201-0f411b1f.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco_20200201_052416.log.json) |
|
49 |
-
| X-101-32x4d-FPN | pytorch | 20e | 9.2 | - | 45.0 | 39.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco_20200528_083917-ed1f4751.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco_20200528_083917.log.json) |
|
50 |
-
| X-101-64x4d-FPN | pytorch | 1x | 12.2 | 6.7 | 45.3 | 39.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco_20200203-9a2db89d.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco_20200203_044059.log.json) |
|
51 |
-
| X-101-64x4d-FPN | pytorch | 20e | 12.2 | | 45.6 |39.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco_20200512_161033-bdb5126a.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco_20200512_161033.log.json)|
|
52 |
-
|
53 |
-
**Notes:**
|
54 |
-
|
55 |
-
- The `20e` schedule in Cascade (Mask) R-CNN indicates decreasing the lr at 16 and 19 epochs, with a total of 20 epochs.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_20e_coco.py
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
_base_ = './cascade_rcnn_r50_fpn_20e_coco.py'
|
2 |
-
model = dict(
|
3 |
-
type='CascadeRCNN',
|
4 |
-
pretrained='open-mmlab://resnext101_64x4d',
|
5 |
-
backbone=dict(
|
6 |
-
type='ResNeXt',
|
7 |
-
depth=101,
|
8 |
-
groups=64,
|
9 |
-
base_width=4,
|
10 |
-
num_stages=4,
|
11 |
-
out_indices=(0, 1, 2, 3),
|
12 |
-
frozen_stages=1,
|
13 |
-
norm_cfg=dict(type='BN', requires_grad=True),
|
14 |
-
style='pytorch'))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_2x_coco.py
DELETED
@@ -1,4 +0,0 @@
|
|
1 |
-
_base_ = './fcos_hrnetv2p_w18_gn-head_4x4_1x_coco.py'
|
2 |
-
# learning policy
|
3 |
-
lr_config = dict(step=[16, 22])
|
4 |
-
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/pascal_voc/ssd512_voc0712.py
DELETED
@@ -1,53 +0,0 @@
|
|
1 |
-
_base_ = 'ssd300_voc0712.py'
|
2 |
-
input_size = 512
|
3 |
-
model = dict(
|
4 |
-
backbone=dict(input_size=input_size),
|
5 |
-
bbox_head=dict(
|
6 |
-
in_channels=(512, 1024, 512, 256, 256, 256, 256),
|
7 |
-
anchor_generator=dict(
|
8 |
-
input_size=input_size,
|
9 |
-
strides=[8, 16, 32, 64, 128, 256, 512],
|
10 |
-
basesize_ratio_range=(0.15, 0.9),
|
11 |
-
ratios=([2], [2, 3], [2, 3], [2, 3], [2, 3], [2], [2]))))
|
12 |
-
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
|
13 |
-
train_pipeline = [
|
14 |
-
dict(type='LoadImageFromFile', to_float32=True),
|
15 |
-
dict(type='LoadAnnotations', with_bbox=True),
|
16 |
-
dict(
|
17 |
-
type='PhotoMetricDistortion',
|
18 |
-
brightness_delta=32,
|
19 |
-
contrast_range=(0.5, 1.5),
|
20 |
-
saturation_range=(0.5, 1.5),
|
21 |
-
hue_delta=18),
|
22 |
-
dict(
|
23 |
-
type='Expand',
|
24 |
-
mean=img_norm_cfg['mean'],
|
25 |
-
to_rgb=img_norm_cfg['to_rgb'],
|
26 |
-
ratio_range=(1, 4)),
|
27 |
-
dict(
|
28 |
-
type='MinIoURandomCrop',
|
29 |
-
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
|
30 |
-
min_crop_size=0.3),
|
31 |
-
dict(type='Resize', img_scale=(512, 512), keep_ratio=False),
|
32 |
-
dict(type='Normalize', **img_norm_cfg),
|
33 |
-
dict(type='RandomFlip', flip_ratio=0.5),
|
34 |
-
dict(type='DefaultFormatBundle'),
|
35 |
-
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
|
36 |
-
]
|
37 |
-
test_pipeline = [
|
38 |
-
dict(type='LoadImageFromFile'),
|
39 |
-
dict(
|
40 |
-
type='MultiScaleFlipAug',
|
41 |
-
img_scale=(512, 512),
|
42 |
-
flip=False,
|
43 |
-
transforms=[
|
44 |
-
dict(type='Resize', keep_ratio=False),
|
45 |
-
dict(type='Normalize', **img_norm_cfg),
|
46 |
-
dict(type='ImageToTensor', keys=['img']),
|
47 |
-
dict(type='Collect', keys=['img']),
|
48 |
-
])
|
49 |
-
]
|
50 |
-
data = dict(
|
51 |
-
train=dict(dataset=dict(pipeline=train_pipeline)),
|
52 |
-
val=dict(pipeline=test_pipeline),
|
53 |
-
test=dict(pipeline=test_pipeline))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/retinanet/retinanet_r50_caffe_fpn_mstrain_3x_coco.py
DELETED
@@ -1,4 +0,0 @@
|
|
1 |
-
_base_ = './retinanet_r50_caffe_fpn_mstrain_1x_coco.py'
|
2 |
-
# learning policy
|
3 |
-
lr_config = dict(step=[28, 34])
|
4 |
-
runner = dict(type='EpochBasedRunner', max_epochs=36)
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/mask_heads/mask_point_head.py
DELETED
@@ -1,300 +0,0 @@
|
|
1 |
-
# Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend/point_head/point_head.py # noqa
|
2 |
-
|
3 |
-
import torch
|
4 |
-
import torch.nn as nn
|
5 |
-
from mmcv.cnn import ConvModule, normal_init
|
6 |
-
from mmcv.ops import point_sample, rel_roi_point_to_rel_img_point
|
7 |
-
|
8 |
-
from mmdet.models.builder import HEADS, build_loss
|
9 |
-
|
10 |
-
|
11 |
-
@HEADS.register_module()
|
12 |
-
class MaskPointHead(nn.Module):
|
13 |
-
"""A mask point head use in PointRend.
|
14 |
-
|
15 |
-
``MaskPointHead`` use shared multi-layer perceptron (equivalent to
|
16 |
-
nn.Conv1d) to predict the logit of input points. The fine-grained feature
|
17 |
-
and coarse feature will be concatenate together for predication.
|
18 |
-
|
19 |
-
Args:
|
20 |
-
num_fcs (int): Number of fc layers in the head. Default: 3.
|
21 |
-
in_channels (int): Number of input channels. Default: 256.
|
22 |
-
fc_channels (int): Number of fc channels. Default: 256.
|
23 |
-
num_classes (int): Number of classes for logits. Default: 80.
|
24 |
-
class_agnostic (bool): Whether use class agnostic classification.
|
25 |
-
If so, the output channels of logits will be 1. Default: False.
|
26 |
-
coarse_pred_each_layer (bool): Whether concatenate coarse feature with
|
27 |
-
the output of each fc layer. Default: True.
|
28 |
-
conv_cfg (dict | None): Dictionary to construct and config conv layer.
|
29 |
-
Default: dict(type='Conv1d'))
|
30 |
-
norm_cfg (dict | None): Dictionary to construct and config norm layer.
|
31 |
-
Default: None.
|
32 |
-
loss_point (dict): Dictionary to construct and config loss layer of
|
33 |
-
point head. Default: dict(type='CrossEntropyLoss', use_mask=True,
|
34 |
-
loss_weight=1.0).
|
35 |
-
"""
|
36 |
-
|
37 |
-
def __init__(self,
|
38 |
-
num_classes,
|
39 |
-
num_fcs=3,
|
40 |
-
in_channels=256,
|
41 |
-
fc_channels=256,
|
42 |
-
class_agnostic=False,
|
43 |
-
coarse_pred_each_layer=True,
|
44 |
-
conv_cfg=dict(type='Conv1d'),
|
45 |
-
norm_cfg=None,
|
46 |
-
act_cfg=dict(type='ReLU'),
|
47 |
-
loss_point=dict(
|
48 |
-
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)):
|
49 |
-
super().__init__()
|
50 |
-
self.num_fcs = num_fcs
|
51 |
-
self.in_channels = in_channels
|
52 |
-
self.fc_channels = fc_channels
|
53 |
-
self.num_classes = num_classes
|
54 |
-
self.class_agnostic = class_agnostic
|
55 |
-
self.coarse_pred_each_layer = coarse_pred_each_layer
|
56 |
-
self.conv_cfg = conv_cfg
|
57 |
-
self.norm_cfg = norm_cfg
|
58 |
-
self.loss_point = build_loss(loss_point)
|
59 |
-
|
60 |
-
fc_in_channels = in_channels + num_classes
|
61 |
-
self.fcs = nn.ModuleList()
|
62 |
-
for _ in range(num_fcs):
|
63 |
-
fc = ConvModule(
|
64 |
-
fc_in_channels,
|
65 |
-
fc_channels,
|
66 |
-
kernel_size=1,
|
67 |
-
stride=1,
|
68 |
-
padding=0,
|
69 |
-
conv_cfg=conv_cfg,
|
70 |
-
norm_cfg=norm_cfg,
|
71 |
-
act_cfg=act_cfg)
|
72 |
-
self.fcs.append(fc)
|
73 |
-
fc_in_channels = fc_channels
|
74 |
-
fc_in_channels += num_classes if self.coarse_pred_each_layer else 0
|
75 |
-
|
76 |
-
out_channels = 1 if self.class_agnostic else self.num_classes
|
77 |
-
self.fc_logits = nn.Conv1d(
|
78 |
-
fc_in_channels, out_channels, kernel_size=1, stride=1, padding=0)
|
79 |
-
|
80 |
-
def init_weights(self):
|
81 |
-
"""Initialize last classification layer of MaskPointHead, conv layers
|
82 |
-
are already initialized by ConvModule."""
|
83 |
-
normal_init(self.fc_logits, std=0.001)
|
84 |
-
|
85 |
-
def forward(self, fine_grained_feats, coarse_feats):
|
86 |
-
"""Classify each point base on fine grained and coarse feats.
|
87 |
-
|
88 |
-
Args:
|
89 |
-
fine_grained_feats (Tensor): Fine grained feature sampled from FPN,
|
90 |
-
shape (num_rois, in_channels, num_points).
|
91 |
-
coarse_feats (Tensor): Coarse feature sampled from CoarseMaskHead,
|
92 |
-
shape (num_rois, num_classes, num_points).
|
93 |
-
|
94 |
-
Returns:
|
95 |
-
Tensor: Point classification results,
|
96 |
-
shape (num_rois, num_class, num_points).
|
97 |
-
"""
|
98 |
-
|
99 |
-
x = torch.cat([fine_grained_feats, coarse_feats], dim=1)
|
100 |
-
for fc in self.fcs:
|
101 |
-
x = fc(x)
|
102 |
-
if self.coarse_pred_each_layer:
|
103 |
-
x = torch.cat((x, coarse_feats), dim=1)
|
104 |
-
return self.fc_logits(x)
|
105 |
-
|
106 |
-
def get_targets(self, rois, rel_roi_points, sampling_results, gt_masks,
|
107 |
-
cfg):
|
108 |
-
"""Get training targets of MaskPointHead for all images.
|
109 |
-
|
110 |
-
Args:
|
111 |
-
rois (Tensor): Region of Interest, shape (num_rois, 5).
|
112 |
-
rel_roi_points: Points coordinates relative to RoI, shape
|
113 |
-
(num_rois, num_points, 2).
|
114 |
-
sampling_results (:obj:`SamplingResult`): Sampling result after
|
115 |
-
sampling and assignment.
|
116 |
-
gt_masks (Tensor) : Ground truth segmentation masks of
|
117 |
-
corresponding boxes, shape (num_rois, height, width).
|
118 |
-
cfg (dict): Training cfg.
|
119 |
-
|
120 |
-
Returns:
|
121 |
-
Tensor: Point target, shape (num_rois, num_points).
|
122 |
-
"""
|
123 |
-
|
124 |
-
num_imgs = len(sampling_results)
|
125 |
-
rois_list = []
|
126 |
-
rel_roi_points_list = []
|
127 |
-
for batch_ind in range(num_imgs):
|
128 |
-
inds = (rois[:, 0] == batch_ind)
|
129 |
-
rois_list.append(rois[inds])
|
130 |
-
rel_roi_points_list.append(rel_roi_points[inds])
|
131 |
-
pos_assigned_gt_inds_list = [
|
132 |
-
res.pos_assigned_gt_inds for res in sampling_results
|
133 |
-
]
|
134 |
-
cfg_list = [cfg for _ in range(num_imgs)]
|
135 |
-
|
136 |
-
point_targets = map(self._get_target_single, rois_list,
|
137 |
-
rel_roi_points_list, pos_assigned_gt_inds_list,
|
138 |
-
gt_masks, cfg_list)
|
139 |
-
point_targets = list(point_targets)
|
140 |
-
|
141 |
-
if len(point_targets) > 0:
|
142 |
-
point_targets = torch.cat(point_targets)
|
143 |
-
|
144 |
-
return point_targets
|
145 |
-
|
146 |
-
def _get_target_single(self, rois, rel_roi_points, pos_assigned_gt_inds,
|
147 |
-
gt_masks, cfg):
|
148 |
-
"""Get training target of MaskPointHead for each image."""
|
149 |
-
num_pos = rois.size(0)
|
150 |
-
num_points = cfg.num_points
|
151 |
-
if num_pos > 0:
|
152 |
-
gt_masks_th = (
|
153 |
-
gt_masks.to_tensor(rois.dtype, rois.device).index_select(
|
154 |
-
0, pos_assigned_gt_inds))
|
155 |
-
gt_masks_th = gt_masks_th.unsqueeze(1)
|
156 |
-
rel_img_points = rel_roi_point_to_rel_img_point(
|
157 |
-
rois, rel_roi_points, gt_masks_th.shape[2:])
|
158 |
-
point_targets = point_sample(gt_masks_th,
|
159 |
-
rel_img_points).squeeze(1)
|
160 |
-
else:
|
161 |
-
point_targets = rois.new_zeros((0, num_points))
|
162 |
-
return point_targets
|
163 |
-
|
164 |
-
def loss(self, point_pred, point_targets, labels):
|
165 |
-
"""Calculate loss for MaskPointHead.
|
166 |
-
|
167 |
-
Args:
|
168 |
-
point_pred (Tensor): Point predication result, shape
|
169 |
-
(num_rois, num_classes, num_points).
|
170 |
-
point_targets (Tensor): Point targets, shape (num_roi, num_points).
|
171 |
-
labels (Tensor): Class label of corresponding boxes,
|
172 |
-
shape (num_rois, )
|
173 |
-
|
174 |
-
Returns:
|
175 |
-
dict[str, Tensor]: a dictionary of point loss components
|
176 |
-
"""
|
177 |
-
|
178 |
-
loss = dict()
|
179 |
-
if self.class_agnostic:
|
180 |
-
loss_point = self.loss_point(point_pred, point_targets,
|
181 |
-
torch.zeros_like(labels))
|
182 |
-
else:
|
183 |
-
loss_point = self.loss_point(point_pred, point_targets, labels)
|
184 |
-
loss['loss_point'] = loss_point
|
185 |
-
return loss
|
186 |
-
|
187 |
-
def _get_uncertainty(self, mask_pred, labels):
|
188 |
-
"""Estimate uncertainty based on pred logits.
|
189 |
-
|
190 |
-
We estimate uncertainty as L1 distance between 0.0 and the logits
|
191 |
-
prediction in 'mask_pred' for the foreground class in `classes`.
|
192 |
-
|
193 |
-
Args:
|
194 |
-
mask_pred (Tensor): mask predication logits, shape (num_rois,
|
195 |
-
num_classes, mask_height, mask_width).
|
196 |
-
|
197 |
-
labels (list[Tensor]): Either predicted or ground truth label for
|
198 |
-
each predicted mask, of length num_rois.
|
199 |
-
|
200 |
-
Returns:
|
201 |
-
scores (Tensor): Uncertainty scores with the most uncertain
|
202 |
-
locations having the highest uncertainty score,
|
203 |
-
shape (num_rois, 1, mask_height, mask_width)
|
204 |
-
"""
|
205 |
-
if mask_pred.shape[1] == 1:
|
206 |
-
gt_class_logits = mask_pred.clone()
|
207 |
-
else:
|
208 |
-
inds = torch.arange(mask_pred.shape[0], device=mask_pred.device)
|
209 |
-
gt_class_logits = mask_pred[inds, labels].unsqueeze(1)
|
210 |
-
return -torch.abs(gt_class_logits)
|
211 |
-
|
212 |
-
def get_roi_rel_points_train(self, mask_pred, labels, cfg):
|
213 |
-
"""Get ``num_points`` most uncertain points with random points during
|
214 |
-
train.
|
215 |
-
|
216 |
-
Sample points in [0, 1] x [0, 1] coordinate space based on their
|
217 |
-
uncertainty. The uncertainties are calculated for each point using
|
218 |
-
'_get_uncertainty()' function that takes point's logit prediction as
|
219 |
-
input.
|
220 |
-
|
221 |
-
Args:
|
222 |
-
mask_pred (Tensor): A tensor of shape (num_rois, num_classes,
|
223 |
-
mask_height, mask_width) for class-specific or class-agnostic
|
224 |
-
prediction.
|
225 |
-
labels (list): The ground truth class for each instance.
|
226 |
-
cfg (dict): Training config of point head.
|
227 |
-
|
228 |
-
Returns:
|
229 |
-
point_coords (Tensor): A tensor of shape (num_rois, num_points, 2)
|
230 |
-
that contains the coordinates sampled points.
|
231 |
-
"""
|
232 |
-
num_points = cfg.num_points
|
233 |
-
oversample_ratio = cfg.oversample_ratio
|
234 |
-
importance_sample_ratio = cfg.importance_sample_ratio
|
235 |
-
assert oversample_ratio >= 1
|
236 |
-
assert 0 <= importance_sample_ratio <= 1
|
237 |
-
batch_size = mask_pred.shape[0]
|
238 |
-
num_sampled = int(num_points * oversample_ratio)
|
239 |
-
point_coords = torch.rand(
|
240 |
-
batch_size, num_sampled, 2, device=mask_pred.device)
|
241 |
-
point_logits = point_sample(mask_pred, point_coords)
|
242 |
-
# It is crucial to calculate uncertainty based on the sampled
|
243 |
-
# prediction value for the points. Calculating uncertainties of the
|
244 |
-
# coarse predictions first and sampling them for points leads to
|
245 |
-
# incorrect results. To illustrate this: assume uncertainty func(
|
246 |
-
# logits)=-abs(logits), a sampled point between two coarse
|
247 |
-
# predictions with -1 and 1 logits has 0 logits, and therefore 0
|
248 |
-
# uncertainty value. However, if we calculate uncertainties for the
|
249 |
-
# coarse predictions first, both will have -1 uncertainty,
|
250 |
-
# and sampled point will get -1 uncertainty.
|
251 |
-
point_uncertainties = self._get_uncertainty(point_logits, labels)
|
252 |
-
num_uncertain_points = int(importance_sample_ratio * num_points)
|
253 |
-
num_random_points = num_points - num_uncertain_points
|
254 |
-
idx = torch.topk(
|
255 |
-
point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1]
|
256 |
-
shift = num_sampled * torch.arange(
|
257 |
-
batch_size, dtype=torch.long, device=mask_pred.device)
|
258 |
-
idx += shift[:, None]
|
259 |
-
point_coords = point_coords.view(-1, 2)[idx.view(-1), :].view(
|
260 |
-
batch_size, num_uncertain_points, 2)
|
261 |
-
if num_random_points > 0:
|
262 |
-
rand_roi_coords = torch.rand(
|
263 |
-
batch_size, num_random_points, 2, device=mask_pred.device)
|
264 |
-
point_coords = torch.cat((point_coords, rand_roi_coords), dim=1)
|
265 |
-
return point_coords
|
266 |
-
|
267 |
-
def get_roi_rel_points_test(self, mask_pred, pred_label, cfg):
|
268 |
-
"""Get ``num_points`` most uncertain points during test.
|
269 |
-
|
270 |
-
Args:
|
271 |
-
mask_pred (Tensor): A tensor of shape (num_rois, num_classes,
|
272 |
-
mask_height, mask_width) for class-specific or class-agnostic
|
273 |
-
prediction.
|
274 |
-
pred_label (list): The predication class for each instance.
|
275 |
-
cfg (dict): Testing config of point head.
|
276 |
-
|
277 |
-
Returns:
|
278 |
-
point_indices (Tensor): A tensor of shape (num_rois, num_points)
|
279 |
-
that contains indices from [0, mask_height x mask_width) of the
|
280 |
-
most uncertain points.
|
281 |
-
point_coords (Tensor): A tensor of shape (num_rois, num_points, 2)
|
282 |
-
that contains [0, 1] x [0, 1] normalized coordinates of the
|
283 |
-
most uncertain points from the [mask_height, mask_width] grid .
|
284 |
-
"""
|
285 |
-
num_points = cfg.subdivision_num_points
|
286 |
-
uncertainty_map = self._get_uncertainty(mask_pred, pred_label)
|
287 |
-
num_rois, _, mask_height, mask_width = uncertainty_map.shape
|
288 |
-
h_step = 1.0 / mask_height
|
289 |
-
w_step = 1.0 / mask_width
|
290 |
-
|
291 |
-
uncertainty_map = uncertainty_map.view(num_rois,
|
292 |
-
mask_height * mask_width)
|
293 |
-
num_points = min(mask_height * mask_width, num_points)
|
294 |
-
point_indices = uncertainty_map.topk(num_points, dim=1)[1]
|
295 |
-
point_coords = uncertainty_map.new_zeros(num_rois, num_points, 2)
|
296 |
-
point_coords[:, :, 0] = w_step / 2.0 + (point_indices %
|
297 |
-
mask_width).float() * w_step
|
298 |
-
point_coords[:, :, 1] = h_step / 2.0 + (point_indices //
|
299 |
-
mask_width).float() * h_step
|
300 |
-
return point_indices, point_coords
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r101b-d8_769x769_80k_cityscapes.py
DELETED
@@ -1,4 +0,0 @@
|
|
1 |
-
_base_ = './deeplabv3_r50-d8_769x769_80k_cityscapes.py'
|
2 |
-
model = dict(
|
3 |
-
pretrained='torchvision://resnet101',
|
4 |
-
backbone=dict(type='ResNet', depth=101))
|
|
|
|
|
|
|
|
|
|
spaces/ArkanDash/rvc-models-new/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: RVC Genshin Impact
|
3 |
-
emoji: 🎤
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: purple
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.40.1
|
8 |
-
app_file: app.py
|
9 |
-
pinned: true
|
10 |
-
license: mit
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AvaterClasher/Food_Classifier_Moni/app.py
DELETED
@@ -1,77 +0,0 @@
|
|
1 |
-
### 1. Imports and class names setup ###
|
2 |
-
import gradio as gr
|
3 |
-
import os
|
4 |
-
import torch
|
5 |
-
|
6 |
-
from model import create_effnetb2_model
|
7 |
-
from timeit import default_timer as timer
|
8 |
-
from typing import Tuple, Dict
|
9 |
-
|
10 |
-
# Setup class names
|
11 |
-
class_names = ["pizza", "steak", "sushi"]
|
12 |
-
|
13 |
-
### 2. Model and transforms preparation ###
|
14 |
-
|
15 |
-
# Create EffNetB2 model
|
16 |
-
effnetb2, effnetb2_transforms = create_effnetb2_model(
|
17 |
-
num_classes=3, # len(class_names) would also work
|
18 |
-
)
|
19 |
-
|
20 |
-
# Load saved weights
|
21 |
-
effnetb2.load_state_dict(
|
22 |
-
torch.load(
|
23 |
-
f="09_pretrained_effnetb2_feature_extractor_pizza_steak_sushi_20_percent.pth",
|
24 |
-
map_location=torch.device("cpu"), # load to CPU
|
25 |
-
)
|
26 |
-
)
|
27 |
-
|
28 |
-
### 3. Predict function ###
|
29 |
-
|
30 |
-
# Create predict function
|
31 |
-
def predict(img) -> Tuple[Dict, float]:
|
32 |
-
"""Transforms and performs a prediction on img and returns prediction and time taken.
|
33 |
-
"""
|
34 |
-
# Start the timer
|
35 |
-
start_time = timer()
|
36 |
-
|
37 |
-
# Transform the target image and add a batch dimension
|
38 |
-
img = effnetb2_transforms(img).unsqueeze(0)
|
39 |
-
|
40 |
-
# Put model into evaluation mode and turn on inference mode
|
41 |
-
effnetb2.eval()
|
42 |
-
with torch.inference_mode():
|
43 |
-
# Pass the transformed image through the model and turn the prediction logits into prediction probabilities
|
44 |
-
pred_probs = torch.softmax(effnetb2(img), dim=1)
|
45 |
-
|
46 |
-
# Create a prediction label and prediction probability dictionary for each prediction class (this is the required format for Gradio's output parameter)
|
47 |
-
pred_labels_and_probs = {class_names[i]: float(pred_probs[0][i]) for i in range(len(class_names))}
|
48 |
-
|
49 |
-
# Calculate the prediction time
|
50 |
-
pred_time = round(timer() - start_time, 5)
|
51 |
-
|
52 |
-
# Return the prediction dictionary and prediction time
|
53 |
-
return pred_labels_and_probs, pred_time
|
54 |
-
|
55 |
-
### 4. Gradio app ###
|
56 |
-
|
57 |
-
# Create title, description and article strings
|
58 |
-
title = "Food Classifier Moni 🍣"
|
59 |
-
description = "An EfficientNetB2 feature extractor computer vision model to classify images of food as pizza, steak or sushi."
|
60 |
-
article = "Created by Soumyadip Moni"
|
61 |
-
|
62 |
-
# Create examples list from "examples/" directory
|
63 |
-
example_list = [["examples/" + example] for example in os.listdir("examples")]
|
64 |
-
|
65 |
-
# Create the Gradio demo
|
66 |
-
demo = gr.Interface(fn=predict, # mapping function from input to output
|
67 |
-
inputs=gr.Image(type="pil"), # what are the inputs?
|
68 |
-
outputs=[gr.Label(num_top_classes=3, label="Predictions"), # what are the outputs?
|
69 |
-
gr.Number(label="Prediction time (s)")], # our fn has two outputs, therefore we have two outputs
|
70 |
-
# Create examples list from "examples/" directory
|
71 |
-
examples=example_list,
|
72 |
-
title=title,
|
73 |
-
description=description,
|
74 |
-
article=article)
|
75 |
-
|
76 |
-
# Launch the demo!
|
77 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/transforms/augmentation.py
DELETED
@@ -1,377 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
3 |
-
|
4 |
-
import inspect
|
5 |
-
import numpy as np
|
6 |
-
import pprint
|
7 |
-
from typing import Any, List, Optional, Tuple, Union
|
8 |
-
from fvcore.transforms.transform import Transform, TransformList
|
9 |
-
|
10 |
-
"""
|
11 |
-
See "Data Augmentation" tutorial for an overview of the system:
|
12 |
-
https://detectron2.readthedocs.io/tutorials/augmentation.html
|
13 |
-
"""
|
14 |
-
|
15 |
-
|
16 |
-
__all__ = [
|
17 |
-
"Augmentation",
|
18 |
-
"AugmentationList",
|
19 |
-
"AugInput",
|
20 |
-
"TransformGen",
|
21 |
-
"apply_transform_gens",
|
22 |
-
"StandardAugInput",
|
23 |
-
"apply_augmentations",
|
24 |
-
]
|
25 |
-
|
26 |
-
|
27 |
-
def _check_img_dtype(img):
|
28 |
-
assert isinstance(img, np.ndarray), "[Augmentation] Needs an numpy array, but got a {}!".format(
|
29 |
-
type(img)
|
30 |
-
)
|
31 |
-
assert not isinstance(img.dtype, np.integer) or (
|
32 |
-
img.dtype == np.uint8
|
33 |
-
), "[Augmentation] Got image of type {}, use uint8 or floating points instead!".format(
|
34 |
-
img.dtype
|
35 |
-
)
|
36 |
-
assert img.ndim in [2, 3], img.ndim
|
37 |
-
|
38 |
-
|
39 |
-
def _get_aug_input_args(aug, aug_input) -> List[Any]:
|
40 |
-
"""
|
41 |
-
Get the arguments to be passed to ``aug.get_transform`` from the input ``aug_input``.
|
42 |
-
"""
|
43 |
-
if aug.input_args is None:
|
44 |
-
# Decide what attributes are needed automatically
|
45 |
-
prms = list(inspect.signature(aug.get_transform).parameters.items())
|
46 |
-
# The default behavior is: if there is one parameter, then its "image"
|
47 |
-
# (work automatically for majority of use cases, and also avoid BC breaking),
|
48 |
-
# Otherwise, use the argument names.
|
49 |
-
if len(prms) == 1:
|
50 |
-
names = ("image",)
|
51 |
-
else:
|
52 |
-
names = []
|
53 |
-
for name, prm in prms:
|
54 |
-
if prm.kind in (inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD):
|
55 |
-
raise TypeError(
|
56 |
-
f""" \
|
57 |
-
The default implementation of `{type(aug)}.__call__` does not allow \
|
58 |
-
`{type(aug)}.get_transform` to use variable-length arguments (*args, **kwargs)! \
|
59 |
-
If arguments are unknown, reimplement `__call__` instead. \
|
60 |
-
"""
|
61 |
-
)
|
62 |
-
names.append(name)
|
63 |
-
aug.input_args = tuple(names)
|
64 |
-
|
65 |
-
args = []
|
66 |
-
for f in aug.input_args:
|
67 |
-
try:
|
68 |
-
args.append(getattr(aug_input, f))
|
69 |
-
except AttributeError as e:
|
70 |
-
raise AttributeError(
|
71 |
-
f"{type(aug)}.get_transform needs input attribute '{f}', "
|
72 |
-
f"but it is not an attribute of {type(aug_input)}!"
|
73 |
-
) from e
|
74 |
-
return args
|
75 |
-
|
76 |
-
|
77 |
-
class Augmentation:
|
78 |
-
"""
|
79 |
-
Augmentation defines (often random) policies/strategies to generate :class:`Transform`
|
80 |
-
from data. It is often used for pre-processing of input data.
|
81 |
-
|
82 |
-
A "policy" that generates a :class:`Transform` may, in the most general case,
|
83 |
-
need arbitrary information from input data in order to determine what transforms
|
84 |
-
to apply. Therefore, each :class:`Augmentation` instance defines the arguments
|
85 |
-
needed by its :meth:`get_transform` method. When called with the positional arguments,
|
86 |
-
the :meth:`get_transform` method executes the policy.
|
87 |
-
|
88 |
-
Note that :class:`Augmentation` defines the policies to create a :class:`Transform`,
|
89 |
-
but not how to execute the actual transform operations to those data.
|
90 |
-
Its :meth:`__call__` method will use :meth:`AugInput.transform` to execute the transform.
|
91 |
-
|
92 |
-
The returned `Transform` object is meant to describe deterministic transformation, which means
|
93 |
-
it can be re-applied on associated data, e.g. the geometry of an image and its segmentation
|
94 |
-
masks need to be transformed together.
|
95 |
-
(If such re-application is not needed, then determinism is not a crucial requirement.)
|
96 |
-
"""
|
97 |
-
|
98 |
-
input_args: Optional[Tuple[str]] = None
|
99 |
-
"""
|
100 |
-
Stores the attribute names needed by :meth:`get_transform`, e.g. ``("image", "sem_seg")``.
|
101 |
-
By default, it is just a tuple of argument names in :meth:`self.get_transform`, which often only
|
102 |
-
contain "image". As long as the argument name convention is followed, there is no need for
|
103 |
-
users to touch this attribute.
|
104 |
-
"""
|
105 |
-
|
106 |
-
def _init(self, params=None):
|
107 |
-
if params:
|
108 |
-
for k, v in params.items():
|
109 |
-
if k != "self" and not k.startswith("_"):
|
110 |
-
setattr(self, k, v)
|
111 |
-
|
112 |
-
def get_transform(self, *args) -> Transform:
|
113 |
-
"""
|
114 |
-
Execute the policy based on input data, and decide what transform to apply to inputs.
|
115 |
-
|
116 |
-
Args:
|
117 |
-
args: Any fixed-length positional arguments. By default, the name of the arguments
|
118 |
-
should exist in the :class:`AugInput` to be used.
|
119 |
-
|
120 |
-
Returns:
|
121 |
-
Transform: Returns the deterministic transform to apply to the input.
|
122 |
-
|
123 |
-
Examples:
|
124 |
-
::
|
125 |
-
class MyAug:
|
126 |
-
# if a policy needs to know both image and semantic segmentation
|
127 |
-
def get_transform(image, sem_seg) -> T.Transform:
|
128 |
-
pass
|
129 |
-
tfm: Transform = MyAug().get_transform(image, sem_seg)
|
130 |
-
new_image = tfm.apply_image(image)
|
131 |
-
|
132 |
-
Notes:
|
133 |
-
Users can freely use arbitrary new argument names in custom
|
134 |
-
:meth:`get_transform` method, as long as they are available in the
|
135 |
-
input data. In detectron2 we use the following convention:
|
136 |
-
|
137 |
-
* image: (H,W) or (H,W,C) ndarray of type uint8 in range [0, 255], or
|
138 |
-
floating point in range [0, 1] or [0, 255].
|
139 |
-
* boxes: (N,4) ndarray of float32. It represents the instance bounding boxes
|
140 |
-
of N instances. Each is in XYXY format in unit of absolute coordinates.
|
141 |
-
* sem_seg: (H,W) ndarray of type uint8. Each element is an integer label of pixel.
|
142 |
-
|
143 |
-
We do not specify convention for other types and do not include builtin
|
144 |
-
:class:`Augmentation` that uses other types in detectron2.
|
145 |
-
"""
|
146 |
-
raise NotImplementedError
|
147 |
-
|
148 |
-
def __call__(self, aug_input) -> Transform:
|
149 |
-
"""
|
150 |
-
Augment the given `aug_input` **in-place**, and return the transform that's used.
|
151 |
-
|
152 |
-
This method will be called to apply the augmentation. In most augmentation, it
|
153 |
-
is enough to use the default implementation, which calls :meth:`get_transform`
|
154 |
-
using the inputs. But a subclass can overwrite it to have more complicated logic.
|
155 |
-
|
156 |
-
Args:
|
157 |
-
aug_input (AugInput): an object that has attributes needed by this augmentation
|
158 |
-
(defined by ``self.get_transform``). Its ``transform`` method will be called
|
159 |
-
to in-place transform it.
|
160 |
-
|
161 |
-
Returns:
|
162 |
-
Transform: the transform that is applied on the input.
|
163 |
-
"""
|
164 |
-
args = _get_aug_input_args(self, aug_input)
|
165 |
-
tfm = self.get_transform(*args)
|
166 |
-
assert isinstance(tfm, (Transform, TransformList)), (
|
167 |
-
f"{type(self)}.get_transform must return an instance of Transform! "
|
168 |
-
f"Got {type(tfm)} instead."
|
169 |
-
)
|
170 |
-
aug_input.transform(tfm)
|
171 |
-
return tfm
|
172 |
-
|
173 |
-
def _rand_range(self, low=1.0, high=None, size=None):
|
174 |
-
"""
|
175 |
-
Uniform float random number between low and high.
|
176 |
-
"""
|
177 |
-
if high is None:
|
178 |
-
low, high = 0, low
|
179 |
-
if size is None:
|
180 |
-
size = []
|
181 |
-
return np.random.uniform(low, high, size)
|
182 |
-
|
183 |
-
def __repr__(self):
|
184 |
-
"""
|
185 |
-
Produce something like:
|
186 |
-
"MyAugmentation(field1={self.field1}, field2={self.field2})"
|
187 |
-
"""
|
188 |
-
try:
|
189 |
-
sig = inspect.signature(self.__init__)
|
190 |
-
classname = type(self).__name__
|
191 |
-
argstr = []
|
192 |
-
for name, param in sig.parameters.items():
|
193 |
-
assert (
|
194 |
-
param.kind != param.VAR_POSITIONAL and param.kind != param.VAR_KEYWORD
|
195 |
-
), "The default __repr__ doesn't support *args or **kwargs"
|
196 |
-
assert hasattr(self, name), (
|
197 |
-
"Attribute {} not found! "
|
198 |
-
"Default __repr__ only works if attributes match the constructor.".format(name)
|
199 |
-
)
|
200 |
-
attr = getattr(self, name)
|
201 |
-
default = param.default
|
202 |
-
if default is attr:
|
203 |
-
continue
|
204 |
-
attr_str = pprint.pformat(attr)
|
205 |
-
if "\n" in attr_str:
|
206 |
-
# don't show it if pformat decides to use >1 lines
|
207 |
-
attr_str = "..."
|
208 |
-
argstr.append("{}={}".format(name, attr_str))
|
209 |
-
return "{}({})".format(classname, ", ".join(argstr))
|
210 |
-
except AssertionError:
|
211 |
-
return super().__repr__()
|
212 |
-
|
213 |
-
__str__ = __repr__
|
214 |
-
|
215 |
-
|
216 |
-
def _transform_to_aug(tfm_or_aug):
|
217 |
-
"""
|
218 |
-
Wrap Transform into Augmentation.
|
219 |
-
Private, used internally to implement augmentations.
|
220 |
-
"""
|
221 |
-
assert isinstance(tfm_or_aug, (Transform, Augmentation)), tfm_or_aug
|
222 |
-
if isinstance(tfm_or_aug, Augmentation):
|
223 |
-
return tfm_or_aug
|
224 |
-
else:
|
225 |
-
|
226 |
-
class _TransformToAug(Augmentation):
|
227 |
-
def __init__(self, tfm: Transform):
|
228 |
-
self.tfm = tfm
|
229 |
-
|
230 |
-
def get_transform(self, *args):
|
231 |
-
return self.tfm
|
232 |
-
|
233 |
-
def __repr__(self):
|
234 |
-
return repr(self.tfm)
|
235 |
-
|
236 |
-
__str__ = __repr__
|
237 |
-
|
238 |
-
return _TransformToAug(tfm_or_aug)
|
239 |
-
|
240 |
-
|
241 |
-
class AugmentationList(Augmentation):
|
242 |
-
"""
|
243 |
-
Apply a sequence of augmentations.
|
244 |
-
|
245 |
-
It has ``__call__`` method to apply the augmentations.
|
246 |
-
|
247 |
-
Note that :meth:`get_transform` method is impossible (will throw error if called)
|
248 |
-
for :class:`AugmentationList`, because in order to apply a sequence of augmentations,
|
249 |
-
the kth augmentation must be applied first, to provide inputs needed by the (k+1)th
|
250 |
-
augmentation.
|
251 |
-
"""
|
252 |
-
|
253 |
-
def __init__(self, augs):
|
254 |
-
"""
|
255 |
-
Args:
|
256 |
-
augs (list[Augmentation or Transform]):
|
257 |
-
"""
|
258 |
-
super().__init__()
|
259 |
-
self.augs = [_transform_to_aug(x) for x in augs]
|
260 |
-
|
261 |
-
def __call__(self, aug_input) -> Transform:
|
262 |
-
tfms = []
|
263 |
-
for x in self.augs:
|
264 |
-
tfm = x(aug_input)
|
265 |
-
tfms.append(tfm)
|
266 |
-
return TransformList(tfms)
|
267 |
-
|
268 |
-
def __repr__(self):
|
269 |
-
msgs = [str(x) for x in self.augs]
|
270 |
-
return "AugmentationList[{}]".format(", ".join(msgs))
|
271 |
-
|
272 |
-
__str__ = __repr__
|
273 |
-
|
274 |
-
|
275 |
-
class AugInput:
|
276 |
-
"""
|
277 |
-
Input that can be used with :meth:`Augmentation.__call__`.
|
278 |
-
This is a standard implementation for the majority of use cases.
|
279 |
-
This class provides the standard attributes **"image", "boxes", "sem_seg"**
|
280 |
-
defined in :meth:`__init__` and they may be needed by different augmentations.
|
281 |
-
Most augmentation policies do not need attributes beyond these three.
|
282 |
-
|
283 |
-
After applying augmentations to these attributes (using :meth:`AugInput.transform`),
|
284 |
-
the returned transforms can then be used to transform other data structures that users have.
|
285 |
-
|
286 |
-
Examples:
|
287 |
-
::
|
288 |
-
input = AugInput(image, boxes=boxes)
|
289 |
-
tfms = augmentation(input)
|
290 |
-
transformed_image = input.image
|
291 |
-
transformed_boxes = input.boxes
|
292 |
-
transformed_other_data = tfms.apply_other(other_data)
|
293 |
-
|
294 |
-
An extended project that works with new data types may implement augmentation policies
|
295 |
-
that need other inputs. An algorithm may need to transform inputs in a way different
|
296 |
-
from the standard approach defined in this class. In those rare situations, users can
|
297 |
-
implement a class similar to this class, that satify the following condition:
|
298 |
-
|
299 |
-
* The input must provide access to these data in the form of attribute access
|
300 |
-
(``getattr``). For example, if an :class:`Augmentation` to be applied needs "image"
|
301 |
-
and "sem_seg" arguments, its input must have the attribute "image" and "sem_seg".
|
302 |
-
* The input must have a ``transform(tfm: Transform) -> None`` method which
|
303 |
-
in-place transforms all its attributes.
|
304 |
-
"""
|
305 |
-
|
306 |
-
# TODO maybe should support more builtin data types here
|
307 |
-
def __init__(
|
308 |
-
self,
|
309 |
-
image: np.ndarray,
|
310 |
-
*,
|
311 |
-
boxes: Optional[np.ndarray] = None,
|
312 |
-
sem_seg: Optional[np.ndarray] = None,
|
313 |
-
):
|
314 |
-
"""
|
315 |
-
Args:
|
316 |
-
image (ndarray): (H,W) or (H,W,C) ndarray of type uint8 in range [0, 255], or
|
317 |
-
floating point in range [0, 1] or [0, 255]. The meaning of C is up
|
318 |
-
to users.
|
319 |
-
boxes (ndarray or None): Nx4 float32 boxes in XYXY_ABS mode
|
320 |
-
sem_seg (ndarray or None): HxW uint8 semantic segmentation mask. Each element
|
321 |
-
is an integer label of pixel.
|
322 |
-
"""
|
323 |
-
_check_img_dtype(image)
|
324 |
-
self.image = image
|
325 |
-
self.boxes = boxes
|
326 |
-
self.sem_seg = sem_seg
|
327 |
-
|
328 |
-
def transform(self, tfm: Transform) -> None:
|
329 |
-
"""
|
330 |
-
In-place transform all attributes of this class.
|
331 |
-
|
332 |
-
By "in-place", it means after calling this method, accessing an attribute such
|
333 |
-
as ``self.image`` will return transformed data.
|
334 |
-
"""
|
335 |
-
self.image = tfm.apply_image(self.image)
|
336 |
-
if self.boxes is not None:
|
337 |
-
self.boxes = tfm.apply_box(self.boxes)
|
338 |
-
if self.sem_seg is not None:
|
339 |
-
self.sem_seg = tfm.apply_segmentation(self.sem_seg)
|
340 |
-
|
341 |
-
def apply_augmentations(
|
342 |
-
self, augmentations: List[Union[Augmentation, Transform]]
|
343 |
-
) -> TransformList:
|
344 |
-
"""
|
345 |
-
Equivalent of ``AugmentationList(augmentations)(self)``
|
346 |
-
"""
|
347 |
-
return AugmentationList(augmentations)(self)
|
348 |
-
|
349 |
-
|
350 |
-
def apply_augmentations(augmentations: List[Union[Transform, Augmentation]], inputs):
|
351 |
-
"""
|
352 |
-
Use ``T.AugmentationList(augmentations)(inputs)`` instead.
|
353 |
-
"""
|
354 |
-
if isinstance(inputs, np.ndarray):
|
355 |
-
# handle the common case of image-only Augmentation, also for backward compatibility
|
356 |
-
image_only = True
|
357 |
-
inputs = AugInput(inputs)
|
358 |
-
else:
|
359 |
-
image_only = False
|
360 |
-
tfms = inputs.apply_augmentations(augmentations)
|
361 |
-
return inputs.image if image_only else inputs, tfms
|
362 |
-
|
363 |
-
|
364 |
-
apply_transform_gens = apply_augmentations
|
365 |
-
"""
|
366 |
-
Alias for backward-compatibility.
|
367 |
-
"""
|
368 |
-
|
369 |
-
TransformGen = Augmentation
|
370 |
-
"""
|
371 |
-
Alias for Augmentation, since it is something that generates :class:`Transform`s
|
372 |
-
"""
|
373 |
-
|
374 |
-
StandardAugInput = AugInput
|
375 |
-
"""
|
376 |
-
Alias for compatibility. It's not worth the complexity to have two classes.
|
377 |
-
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BartPoint/VoiceChange/infer_pack/modules/F0Predictor/DioF0Predictor.py
DELETED
@@ -1,90 +0,0 @@
|
|
1 |
-
from infer_pack.modules.F0Predictor.F0Predictor import F0Predictor
|
2 |
-
import pyworld
|
3 |
-
import numpy as np
|
4 |
-
|
5 |
-
|
6 |
-
class DioF0Predictor(F0Predictor):
|
7 |
-
def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100):
|
8 |
-
self.hop_length = hop_length
|
9 |
-
self.f0_min = f0_min
|
10 |
-
self.f0_max = f0_max
|
11 |
-
self.sampling_rate = sampling_rate
|
12 |
-
|
13 |
-
def interpolate_f0(self, f0):
|
14 |
-
"""
|
15 |
-
对F0进行插值处理
|
16 |
-
"""
|
17 |
-
|
18 |
-
data = np.reshape(f0, (f0.size, 1))
|
19 |
-
|
20 |
-
vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
|
21 |
-
vuv_vector[data > 0.0] = 1.0
|
22 |
-
vuv_vector[data <= 0.0] = 0.0
|
23 |
-
|
24 |
-
ip_data = data
|
25 |
-
|
26 |
-
frame_number = data.size
|
27 |
-
last_value = 0.0
|
28 |
-
for i in range(frame_number):
|
29 |
-
if data[i] <= 0.0:
|
30 |
-
j = i + 1
|
31 |
-
for j in range(i + 1, frame_number):
|
32 |
-
if data[j] > 0.0:
|
33 |
-
break
|
34 |
-
if j < frame_number - 1:
|
35 |
-
if last_value > 0.0:
|
36 |
-
step = (data[j] - data[i - 1]) / float(j - i)
|
37 |
-
for k in range(i, j):
|
38 |
-
ip_data[k] = data[i - 1] + step * (k - i + 1)
|
39 |
-
else:
|
40 |
-
for k in range(i, j):
|
41 |
-
ip_data[k] = data[j]
|
42 |
-
else:
|
43 |
-
for k in range(i, frame_number):
|
44 |
-
ip_data[k] = last_value
|
45 |
-
else:
|
46 |
-
ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝
|
47 |
-
last_value = data[i]
|
48 |
-
|
49 |
-
return ip_data[:, 0], vuv_vector[:, 0]
|
50 |
-
|
51 |
-
def resize_f0(self, x, target_len):
|
52 |
-
source = np.array(x)
|
53 |
-
source[source < 0.001] = np.nan
|
54 |
-
target = np.interp(
|
55 |
-
np.arange(0, len(source) * target_len, len(source)) / target_len,
|
56 |
-
np.arange(0, len(source)),
|
57 |
-
source,
|
58 |
-
)
|
59 |
-
res = np.nan_to_num(target)
|
60 |
-
return res
|
61 |
-
|
62 |
-
def compute_f0(self, wav, p_len=None):
|
63 |
-
if p_len is None:
|
64 |
-
p_len = wav.shape[0] // self.hop_length
|
65 |
-
f0, t = pyworld.dio(
|
66 |
-
wav.astype(np.double),
|
67 |
-
fs=self.sampling_rate,
|
68 |
-
f0_floor=self.f0_min,
|
69 |
-
f0_ceil=self.f0_max,
|
70 |
-
frame_period=1000 * self.hop_length / self.sampling_rate,
|
71 |
-
)
|
72 |
-
f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate)
|
73 |
-
for index, pitch in enumerate(f0):
|
74 |
-
f0[index] = round(pitch, 1)
|
75 |
-
return self.interpolate_f0(self.resize_f0(f0, p_len))[0]
|
76 |
-
|
77 |
-
def compute_f0_uv(self, wav, p_len=None):
|
78 |
-
if p_len is None:
|
79 |
-
p_len = wav.shape[0] // self.hop_length
|
80 |
-
f0, t = pyworld.dio(
|
81 |
-
wav.astype(np.double),
|
82 |
-
fs=self.sampling_rate,
|
83 |
-
f0_floor=self.f0_min,
|
84 |
-
f0_ceil=self.f0_max,
|
85 |
-
frame_period=1000 * self.hop_length / self.sampling_rate,
|
86 |
-
)
|
87 |
-
f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate)
|
88 |
-
for index, pitch in enumerate(f0):
|
89 |
-
f0[index] = round(pitch, 1)
|
90 |
-
return self.interpolate_f0(self.resize_f0(f0, p_len))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/ Imo Apk.md
DELETED
@@ -1,48 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Salón de uñas juego Apk Descargar: Una guía para los amantes del arte de uñas</h1>
|
3 |
-
<p>¿Te encanta hacer las uñas y crear bellas uñas? ¿Quieres divertirte y expresar tu creatividad sin gastar dinero o tiempo en un salón de uñas real? Si usted respondió que sí, entonces es posible que desee probar un juego de salón de uñas apk descargar. Un juego de salón de uñas es un juego móvil que le permite ejecutar su propio salón de uñas virtual y diseñar uñas increíbles para usted o sus clientes. Puede elegir entre diferentes modos de juego, formas de uñas, colores, patrones, efectos y accesorios para crear manicuras impresionantes. También puedes interactuar con clientes virtuales, completar desafíos, desbloquear nuevas funciones y compartir tus creaciones con otros jugadores. En este artículo, le mostraremos cómo descargar e instalar un juego de salón de uñas en su dispositivo, cómo jugarlo y crear diseños de uñas impresionantes, y cómo mejorar sus habilidades y experiencia. ¡Vamos a empezar! </p>
|
4 |
-
<h2>скачать imo apk</h2><br /><p><b><b>Download Zip</b> ✑ ✑ ✑ <a href="https://bltlly.com/2v6JBu">https://bltlly.com/2v6JBu</a></b></p><br /><br />
|
5 |
-
<h2>Cómo descargar e instalar juegos de salón de uñas en su dispositivo</h2>
|
6 |
-
<p>Si desea jugar un juego de salón de uñas en su dispositivo, tendrá que descargar e instalar un archivo apk. Un archivo apk es un paquete de aplicaciones Android que contiene todos los archivos y datos necesarios para ejecutar una aplicación en su dispositivo. Usted puede encontrar muchas fuentes de archivos apk en línea, pero hay que tener cuidado con su fiabilidad y seguridad. Estos son los pasos que debe seguir para descargar e instalar un archivo apk juego de salón de uñas en su dispositivo:</p>
|
7 |
-
<ol>
|
8 |
-
<li>Encontrar una fuente confiable para el archivo apk. Usted puede buscar "descarga apk juego de salón de uñas" en Google o Bing, o utilizar un sitio web de confianza como APKCombo o APKPure. Asegúrate de leer los comentarios, calificaciones, descripciones y permisos de la aplicación antes de descargarla. </li>
|
9 |
-
|
10 |
-
<li>Descargar e instalar el archivo apk. Una vez que haya encontrado el archivo apk que desea, toque en él para comenzar a descargarlo. Puede ver un mensaje de advertencia pidiéndole que confirme la descarga. Pulse Aceptar o Sí para continuar. Una vez completada la descarga, toque nuevamente el archivo para comenzar a instalarlo. Puede ver otro mensaje de advertencia pidiéndole que confirme la instalación. Pulse Instalar o Sí para continuar. </li>
|
11 |
-
<li>Iniciar el juego y disfrutar. Después de la instalación se ha completado, se puede encontrar el icono de la aplicación en la pantalla de inicio o cajón de aplicaciones. Toque en él para iniciar el juego y empezar a jugar. </li>
|
12 |
-
</ol>
|
13 |
-
<h2>Cómo jugar juegos de salón de uñas y crear diseños de uñas impresionantes</h2>
|
14 |
-
<p>Ahora que ha descargado e instalado un juego de salón de uñas en su dispositivo, puede comenzar a jugar y crear diseños de uñas impresionantes. Estos son los pasos básicos que debe seguir para jugar un juego de salón de uñas y crear un arte de uñas increíble:</p>
|
15 |
-
<ol>
|
16 |
-
<li>Elige un modo de juego y un cliente. La mayoría de los juegos de salón de uñas tienen diferentes modos de juego, como el modo libre, el modo desafío o el modo historia. Puede elegir el que se adapte a su preferencia y nivel de habilidad. También puede elegir un cliente para servir, ya sea virtual o usted mismo. Cada cliente puede tener diferentes preferencias, solicitudes o calificaciones para su arte de uñas. </li>
|
17 |
-
<li>Siga las instrucciones y utilice las herramientas para preparar las uñas. Antes de que pueda aplicar cualquier esmalte de uñas o diseño, es necesario preparar las uñas mediante la limpieza, corte, limado y pulido. Puede usar varias herramientas, como tijeras, cortaúñas, archivos, tampones, empujadores de cutículas y cepillos. Es necesario seguir las instrucciones en la pantalla y utilizar las herramientas correctamente para evitar dañar las uñas. </li>
|
18 |
-
|
19 |
-
<li>Añadir efectos especiales, pegatinas, gemas y accesorios. Para hacer su arte de uñas más llamativo y creativo, puede agregar efectos especiales, pegatinas, gemas y accesorios a las uñas. Puedes elegir entre diferentes efectos, como destellos, estrellas, corazones, flores o estampados de animales. También puede agregar pegatinas de varias formas y temas, como letras, emojis, frutas o dibujos animados. También puedes añadir gemas de diferentes tamaños y colores para que tus uñas brillen. También puedes añadir accesorios a tus dedos o muñecas, como anillos, pulseras o relojes. </li>
|
20 |
-
<li>Mostrar su arte de uñas y ganar monedas y calificaciones. Después de terminar su diseño de uñas, usted puede mostrar a su cliente y ver su reacción. También puede tomar una foto de su arte de uñas y guardarlo en su galería o compartirlo con sus amigos y otros jugadores. También puede ganar monedas y calificaciones para su arte de uñas basado en lo bien que siguió las instrucciones y lo satisfecho que estaba su cliente. Puede utilizar las monedas para comprar más herramientas y características para su salón de uñas. </li>
|
21 |
-
</ol>
|
22 |
-
<h2> Cómo mejorar su salón de uñas Habilidades de juego y experiencia</h2>
|
23 |
-
<p>Si quieres mejorar tus habilidades y experiencia de juego de salón de uñas, puedes probar estos consejos:</p>
|
24 |
-
<ul>
|
25 |
-
<li>Completa varios desafíos y misiones para desbloquear nuevos diseños y características. La mayoría de los juegos de salón de uñas tienen desafíos y misiones que ponen a prueba sus habilidades y creatividad. Puede completarlos para desbloquear nuevos diseños y características para su juego de salón de uñas. Por ejemplo, puede que tenga que crear un diseño de uñas específico para un cliente o utilizar una determinada herramienta o color. </li>
|
26 |
-
<li>Interactuar con los clientes virtuales y cumplir con sus peticiones de uñas. La mayoría de los juegos de salón de uñas tienen clientes virtuales que visitan su salón de uñas y pedir su servicio. Puede interactuar con ellos y cumplir con sus solicitudes de arte de uñas para ganar monedas y calificaciones. También puedes aprender más sobre sus personalidades y preferencias hablando con ellos. </li>
|
27 |
-
|
28 |
-
<li>Descubre nuevas tendencias y estilos en arte de uñas y moda. La mayoría de los juegos de salón de uñas tienen actualizaciones que introducen nuevas tendencias y estilos en el arte de uñas y la moda. Puedes descubrirlos jugando el juego regularmente o siguiendo las cuentas de redes sociales del juego. También puede inspirarse en las tendencias y técnicas del arte de uñas reales navegando por revistas o blogs en línea. </li>
|
29 |
-
<li>Comparte tus creaciones de uñas con tus amigos y otros jugadores. La mayoría de los juegos de salón de uñas tienen características sociales que le permiten compartir sus creaciones de uñas con sus amigos y otros jugadores. Puede enviarles fotos de su arte de uñas o invitarlos a visitar su salón de uñas virtual. También puedes ver sus creaciones y darles comentarios o cumplidos. </li>
|
30 |
-
</ul>
|
31 |
-
<h2>Conclusión</h2>
|
32 |
-
<p>Juegos de salón de uñas son divertidos y creativos juegos móviles que le permiten ejecutar su propio salón de uñas virtual y diseñar uñas increíbles para usted o sus clientes. Puede descargar un archivo apk de una fuente confiable en línea e instalarlo en su dispositivo siguiendo los pasos que hemos explicado. A continuación, puede jugar el juego y crear diseños de uñas impresionantes mediante la elección de varios modos de juego, formas de uñas, colores, patrones, efectos y accesorios. También puedes mejorar tus habilidades y experiencia completando retos, interactuando con clientes, mejorando tus herramientas y habilidades, descubriendo nuevas tendencias y estilos, y compartiendo tus creaciones con otros. Juegos de salón de uñas son una gran manera de divertirse y expresar su creatividad sin gastar dinero o tiempo en un salón de uñas real. ¿Por qué no darles una oportunidad y ver por ti mismo? <h2>FAQs</h2>
|
33 |
-
<p>Aquí están algunas de las preguntas más frecuentes sobre juegos de salón de uñas:</p>
|
34 |
-
<p></p>
|
35 |
-
<ol>
|
36 |
-
<li> ¿Cuáles son algunos de los mejores juegos de salón de uñas para descargar? </li>
|
37 |
-
|
38 |
-
<li>¿Cómo puedo evitar anuncios y compras en la aplicación en juegos de salón de uñas? </li>
|
39 |
-
<p>Anuncios y compras en la aplicación son comunes en la mayoría de los juegos de salón de uñas gratis, pero pueden ser molestos y distracción. Puedes evitarlos apagando tu conexión a Internet mientras juegas, o usando una aplicación de bloqueo de anuncios. También puedes buscar versiones modificadas o hackeadas del juego que eliminen anuncios y compras en la aplicación, pero ten cuidado con su fiabilidad y seguridad. </p>
|
40 |
-
<li>¿Cómo puedo inspirarme en las tendencias y técnicas del arte del clavo real? </li>
|
41 |
-
<p>Si quieres inspirarte en las tendencias y técnicas del arte del clavo real, puedes navegar por revistas en línea o blogs que presentan arte del clavo, como Nail It! Magazine, Nails Magazine, o El Nailasaurus. También puedes seguir a artistas de uñas en plataformas de redes sociales como Instagram o Pinterest, como @nail_unistella, @nailsbymei o @simplynailogical. También puedes ver tutoriales de uñas en YouTube o TikTok, como CutePolish, Nail Career Education o Nails By Jema. </p>
|
42 |
-
<li>¿Cómo puedo hacer mis propios diseños de uñas en juegos de salón de uñas? </li>
|
43 |
-
<p>Si desea hacer sus propios diseños de uñas en los juegos de salón de uñas, puede utilizar el modo libre o el modo personalizado que algunos juegos ofrecen. Estos modos le permiten crear sus propios diseños sin seguir instrucciones o solicitudes. Puede utilizar su imaginación y creatividad para mezclar y combinar diferentes colores, patrones, efectos y accesorios. También puede utilizar el modo de foto o el modo de cámara que algunos juegos ofrecen. Estos modos le permiten tomar una foto de sus uñas reales o utilizar la cámara de su dispositivo para escanear las uñas y aplicar arte de uñas virtuales a ellos. </p>
|
44 |
-
<li> ¿Cómo puedo aprender más sobre el cuidado de las uñas y la salud? </li>
|
45 |
-
|
46 |
-
</ol></p> 64aa2da5cf<br />
|
47 |
-
<br />
|
48 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Call Of Duty Pc Descargar Black Ops 4.md
DELETED
@@ -1,81 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Gidigidi Mp3 Descargar Black Sherif: Cómo disfrutar del último éxito del rapero ghanés</h1>
|
3 |
-
<p>Si eres un fan de la música africana, especialmente el rap ghanés, probablemente hayas oído hablar de <strong>gidigidi mp3 download Black Sherif</strong>. Esta es una de las canciones más calientes del continente en este momento, y ha estado haciendo olas en varias cartas y plataformas. Pero, ¿qué es gidigidi y quién es Black Sherif? ¿Y cómo se puede descargar y disfrutar de esta increíble canción? En este artículo, responderemos estas preguntas y más, así que sigue leyendo. </p>
|
4 |
-
<h2>¿Qué es gidigidi y quién es Black Sherif? </h2>
|
5 |
-
<p>Gidigidi es una palabra yoruba que significa <em>muchísimo</em> o <em>grandemente</em>. También es el título de una canción de <strong>Black Sherif</strong>, un cantante y rapero ghanés que saltó a la fama en 2021 con sus canciones <em>Primer Sermón</em> y <em>Segundo Sermón</em>. Siguió con su sencillo <em>Kwaku the Traveller</em>, que alcanzó el número uno en las listas de Apple Music de Ghana y Nigeria. Luego lanzó su álbum debut, <em>The Villain I Never Was</em>, el 5 de octubre de 2022. </p>
|
6 |
-
<h2>call of duty pc descargar black ops 4</h2><br /><p><b><b>Download Zip</b> ❤❤❤ <a href="https://bltlly.com/2v6MM5">https://bltlly.com/2v6MM5</a></b></p><br /><br />
|
7 |
-
<p>Black Sherif, cuyo verdadero nombre es Mohammed Ismail Sharrif, nació el 9 de enero de 2002, en Konongo-Zongo, en la Región Ashanti de Ghana. Comenzó su carrera musical en 2019 con su canción <em>Cry for Me</em>, y desde entonces ha estado haciendo olas con su mezcla única de highlife, reggae, hip-hop, drill y afrofusión. También es conocido por sus letras pegadizas, que a menudo reflejan sus experiencias de vida y problemas sociales. </p>
|
8 |
-
<h2>¿Por qué es gidigidi mp3 descargar Black Sherif popular y tendencia? </h2>
|
9 |
-
<p>Gidigidi mp3 download Black Sherif es popular y trending porque es una gran canción que muestra el talento y versatilidad de Black Sherif. La canción cuenta con otros dos artistas, Smallgod y Tory Lanez, que añaden su propio sabor y estilo a la pista. La canción tiene un gancho pegadizo, un ritmo genial y un flujo suave que te hará querer bailar y cantar. </p>
|
10 |
-
|
11 |
-
<h2>¿Cuáles son los beneficios de descargar gidigidi mp3 por Black Sherif? </h2>
|
12 |
-
<p>Descargar gidigidi mp3 por Black Sherif tiene muchos beneficios, como:</p>
|
13 |
-
<ul>
|
14 |
-
<li>Puede escuchar la canción sin conexión, sin preocuparse por la conexión a Internet o los cargos de datos. </li>
|
15 |
-
<li> Puede transferir la canción a cualquier dispositivo, como su teléfono, portátil, tableta o reproductor de mp3. </li>
|
16 |
-
<li> Puede crear su propia lista de reproducción y mezclar la canción con otras canciones de su elección. </li>
|
17 |
-
<li>Puedes apoyar al artista y mostrar tu aprecio por su trabajo. </li>
|
18 |
-
<li> Puedes disfrutar de la canción en cualquier momento, en cualquier lugar y en cualquier estado de ánimo. </li>
|
19 |
-
</ul>
|
20 |
-
<h2>Cómo descargar Gidigidi Mp3 por Black Sherif</h2>
|
21 |
-
<p>Descargar gidigidi mp3 por Black Sherif es fácil y simple, si sigue estos pasos:</p>
|
22 |
-
<h3>Paso 1: Encuentre un sitio confiable y legal para descargar mp3</h3>
|
23 |
-
<p>El primer paso es encontrar un sitio de descarga de mp3 confiable y legal que ofrece gidigidi mp3 por Black Sherif. Hay muchos sitios que dicen ofrecer descargas de mp3 gratis, pero algunos de ellos pueden ser inseguros, ilegales o de baja calidad. Por lo tanto, usted debe hacer alguna investigación y comprobar las revisiones y calificaciones del sitio antes de usarlo. También debe asegurarse de que el sitio tiene una licencia válida y permiso para distribuir la canción. </p>
|
24 |
-
<p>Algunos de los sitios de descarga mp3 fiables y legales que ofrecen gidigidi mp3 por Black Sherif son:</p>
|
25 |
-
<tabla>
|
26 |
-
<tr><th>Nombre del sitio</th><th>URL</th><th>Características</th></tr>
|
27 |
-
<tr><td>Audiomack</td><td></td><td>- Transmisión y descarga gratuitas e ilimitadas<br>- Archivos de audio de alta calidad<br>- Interfaz y aplicación fácil de usar<br>- Soporta varios géneros y artistas</td></tr>
|
28 |
-
<tr><td>Boomplay</td><td></td><td>- Transmisión y descarga gratuitas e ilimitadas<br>- Archivos de audio de alta calidad<br>- Interfaz y aplicación fácil de usar br>- Soporta varios géneros y artistas<br>- Ofrece recompensas y descuentos</td></tr>
|
29 |
-
|
30 |
-
<tr><td>GhanaSongs</td><td></td><td>- Transmisión y descarga gratuitas e ilimitadas<br>- Archivos de audio de alta calidad<br>- Interfaz y aplicación fácil de usar<br>- Soporta varios géneros y artistas<br>- Ofrece noticias y actualizaciones sobre música ghanesa</td></tr>
|
31 |
-
<tr><td>Mp3Juices</td><td></td><td>>- Transmisión y descarga gratuita e ilimitada<br>- Archivos de audio de alta calidad<br>- Interfaz y aplicación fácil de usar<br>- Soporta varios géneros y artistas<br>- Ofrece un motor de búsqueda que encuentra archivos mp3 de múltiples fuentes</td></tr>
|
32 |
-
</tabla>
|
33 |
-
<h3>Paso 2: Buscar gidigidi mp3 descargar Sherif negro en el sitio</h3>
|
34 |
-
<p>El segundo paso es buscar gidigidi mp3 descargar Black Sherif en el sitio que ha elegido. Puede utilizar la barra de búsqueda o la función de búsqueda para encontrar la canción. También puede filtrar los resultados por género, artista, álbum o popularidad. Deberías ver el título de la canción, nombre del artista, duración, tamaño y calidad del archivo mp3. </p>
|
35 |
-
<p></p>
|
36 |
-
<h3>Paso 3: Elija la calidad y el formato del archivo mp3</h3>
|
37 |
-
<p>El tercer paso es elegir la calidad y el formato del archivo mp3 que desea descargar. La calidad del archivo mp3 depende de la tasa de bits, que se mide en kilobits por segundo (kbps). Cuanto mayor sea la tasa de bits, mejor será la calidad del sonido, pero también mayor será el tamaño del archivo. El formato del archivo mp3 depende de la extensión, que suele ser . mp3 o . m4a. La extensión determina cómo el archivo es codificado y decodificado por diferentes dispositivos. El formato más común es . mp3, que es compatible con la mayoría de los dispositivos. </p>
|
38 |
-
<p>Puede elegir la calidad y el formato del archivo mp3 de acuerdo con su preferencia y la capacidad del dispositivo. Algunos sitios pueden ofrecer diferentes opciones de calidad y formato, mientras que otros pueden tener una opción fija. Debería ver la calidad y el formato del archivo mp3 junto al botón de descarga. </p>
|
39 |
-
<h3>Paso 4: Haga clic en el botón de descarga y guarde el archivo en su dispositivo</h3>
|
40 |
-
|
41 |
-
<h2>Cómo disfrutar de Gidigidi Mp3 por Black Sherif</h2>
|
42 |
-
<p>Ahora que has descargado gidigidi mp3 por Black Sherif, puedes disfrutarlo de muchas maneras, como:</p>
|
43 |
-
<h3>Escuchar la canción con auriculares o altavoces</h3>
|
44 |
-
<p>La mejor manera de disfrutar de gidigidi mp3 por Black Sherif es escucharlo con auriculares o altavoces. Esto le permitirá escuchar la canción claramente y apreciar su calidad de sonido. También puede ajustar el volumen y la configuración del ecualizador para adaptarse a sus preferencias. Puede escuchar la canción en su dispositivo o en cualquier otro dispositivo que admita la reproducción de mp3, como un estéreo de automóvil, un sistema de cine en casa o un altavoz inteligente. </p>
|
45 |
-
<h3>Canta junto a las letras y aprende algunas palabras yorubas</h3>
|
46 |
-
<p>Otra manera de disfrutar de gidigidi mp3 por Black Sherif es cantar junto a las letras y aprender algunas palabras yorubas. La canción tiene un gancho pegadizo que va así:</p>
|
47 |
-
<blockquote>
|
48 |
-
<p>Gidigidi gidigidi gidigidi gidigidi<br>
|
49 |
-
Gidigidi gidigidi gidigidi gidigidi<br>
|
50 |
-
Gidigidi gidigidi gidigidi gidigidi<br>
|
51 |
-
Gidigidi gidigidi gidigidi gidigidi</p>
|
52 |
-
</blockquote>
|
53 |
-
<p>Puedes memorizar y repetir fácilmente este gancho, y divertirte con él. También puedes aprender algunas palabras yorubas de la canción, como:</p>
|
54 |
-
<ul>
|
55 |
-
<li>Omo: niño o hijo</li>
|
56 |
-
<li>Oluwa: Dios o señor</li>
|
57 |
-
<li>Owo: dinero o mano</li>
|
58 |
-
<li>Alubarika: bendición o gracia</li>
|
59 |
-
<li>Amin: amén o así sea</li>
|
60 |
-
</ul>
|
61 |
-
<h3>Ver el video musical oficial en YouTube u otras plataformas</h3>
|
62 |
-
|
63 |
-
<h3>Comparte la canción con tus amigos y familiares en las redes sociales</h3>
|
64 |
-
<p>Una cuarta manera de disfrutar de gidigidi mp3 por Black Sherif es compartir la canción con tus amigos y familiares en las redes sociales. Puedes publicar la canción en tu Facebook, Twitter, Instagram, TikTok, WhatsApp o cualquier otra plataforma que utilices. También puedes etiquetar a Black Sherif y usar el hashtag #gidigidibyblacksherif para mostrar tu apoyo y aprecio por su trabajo. También puede unirse a la conversación y ver lo que otras personas están diciendo sobre la canción. Incluso puede tener la oportunidad de interactuar con el propio Black Sherif, ya que es muy activo y receptivo en las redes sociales. </p>
|
65 |
-
<h2>Conclusión</h2>
|
66 |
-
<p>Gidigidi mp3 download Black Sherif es una de las mejores canciones de 2022, y no deberías perdértela. Es una canción que te hará sentir bien, lleno de energía e inspirado. También es una canción que te presentará algo de rap y cultura ghanesa. Es fácil y sencillo descargar y disfrutar de esta canción, si sigues los pasos que te hemos dado en este artículo. </p>
|
67 |
-
<p>Entonces, ¿qué estás esperando? Sigue adelante y descarga gidigidi mp3 por Black Sherif hoy, y disfrútalo de la manera que quieras. No te arrepentirás de ello. Y si quieres más canciones de Black Sherif, puedes echar un vistazo a su álbum <em>The Villain I Never Was</em>, que está disponible en todas las plataformas de streaming. </p>
|
68 |
-
<p>Gracias por leer este artículo. Esperamos que le haya resultado útil e informativo. Si tiene alguna pregunta o comentario, no dude en dejarlos a continuación. Y no te olvides de compartir este artículo con tus amigos y familiares que pueden estar interesados en gidigidi mp3 download Black Sherif.</p>
|
69 |
-
<h2>Preguntas frecuentes</h2>
|
70 |
-
<h4>¿Quién es Sherif Negro? </h4>
|
71 |
-
|
72 |
-
<h4>¿Cuál es el significado de gidigidi? </h4>
|
73 |
-
<p>Gidigidi es una palabra yoruba que significa <em>muchísimo</em> o <em>grandemente</em>. También es el título de una canción de Black Sherif, con Smallgod y Tory Lanez. La canción trata sobre expresar gratitud y aprecio por las bendiciones y oportunidades en la vida. </p>
|
74 |
-
<h4> ¿Qué género de música es gidigidi por Black Sherif? </h4>
|
75 |
-
<p>Gidigidi de Black Sherif es un género de música que se puede describir como afrofusión, que es una fusión de música africana con otros géneros, como hip-hop, reggae, dancehall y pop. La canción tiene elementos de highlife, que es un género ghanés que utiliza guitarras, cuernos y percusión, y taladro, que es un género británico que utiliza ritmos rápidos, bajo y argot. </p>
|
76 |
-
<h4>¿Cuándo fue liberado gidigidi por Black Sherif? </h4>
|
77 |
-
<p>Gidigidi de Black Sherif fue lanzado el 15 de octubre de 2022, como el segundo sencillo de su álbum <em>The Villain I Never Was</em>. La canción fue producida por MOG Beatz y mezclada por Samsney. La canción fue acompañada por un video musical oficial, dirigido por JWillz.</p>
|
78 |
-
<h4>¿Dónde puedo encontrar más canciones de Black Sherif? </h4>
|
79 |
-
<p>Puedes encontrar más canciones de Black Sherif en su álbum <em>The Villain I Never Was</em>, que está disponible en todas las plataformas de streaming, como Spotify, Apple Music, Audiomack, Boomplay y YouTube. También puedes seguirlo en sus cuentas de redes sociales, como Instagram, Twitter, Facebook y TikTok.</p> 64aa2da5cf<br />
|
80 |
-
<br />
|
81 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/_log_render.py
DELETED
@@ -1,94 +0,0 @@
|
|
1 |
-
from datetime import datetime
|
2 |
-
from typing import Iterable, List, Optional, TYPE_CHECKING, Union, Callable
|
3 |
-
|
4 |
-
|
5 |
-
from .text import Text, TextType
|
6 |
-
|
7 |
-
if TYPE_CHECKING:
|
8 |
-
from .console import Console, ConsoleRenderable, RenderableType
|
9 |
-
from .table import Table
|
10 |
-
|
11 |
-
FormatTimeCallable = Callable[[datetime], Text]
|
12 |
-
|
13 |
-
|
14 |
-
class LogRender:
|
15 |
-
def __init__(
|
16 |
-
self,
|
17 |
-
show_time: bool = True,
|
18 |
-
show_level: bool = False,
|
19 |
-
show_path: bool = True,
|
20 |
-
time_format: Union[str, FormatTimeCallable] = "[%x %X]",
|
21 |
-
omit_repeated_times: bool = True,
|
22 |
-
level_width: Optional[int] = 8,
|
23 |
-
) -> None:
|
24 |
-
self.show_time = show_time
|
25 |
-
self.show_level = show_level
|
26 |
-
self.show_path = show_path
|
27 |
-
self.time_format = time_format
|
28 |
-
self.omit_repeated_times = omit_repeated_times
|
29 |
-
self.level_width = level_width
|
30 |
-
self._last_time: Optional[Text] = None
|
31 |
-
|
32 |
-
def __call__(
|
33 |
-
self,
|
34 |
-
console: "Console",
|
35 |
-
renderables: Iterable["ConsoleRenderable"],
|
36 |
-
log_time: Optional[datetime] = None,
|
37 |
-
time_format: Optional[Union[str, FormatTimeCallable]] = None,
|
38 |
-
level: TextType = "",
|
39 |
-
path: Optional[str] = None,
|
40 |
-
line_no: Optional[int] = None,
|
41 |
-
link_path: Optional[str] = None,
|
42 |
-
) -> "Table":
|
43 |
-
from .containers import Renderables
|
44 |
-
from .table import Table
|
45 |
-
|
46 |
-
output = Table.grid(padding=(0, 1))
|
47 |
-
output.expand = True
|
48 |
-
if self.show_time:
|
49 |
-
output.add_column(style="log.time")
|
50 |
-
if self.show_level:
|
51 |
-
output.add_column(style="log.level", width=self.level_width)
|
52 |
-
output.add_column(ratio=1, style="log.message", overflow="fold")
|
53 |
-
if self.show_path and path:
|
54 |
-
output.add_column(style="log.path")
|
55 |
-
row: List["RenderableType"] = []
|
56 |
-
if self.show_time:
|
57 |
-
log_time = log_time or console.get_datetime()
|
58 |
-
time_format = time_format or self.time_format
|
59 |
-
if callable(time_format):
|
60 |
-
log_time_display = time_format(log_time)
|
61 |
-
else:
|
62 |
-
log_time_display = Text(log_time.strftime(time_format))
|
63 |
-
if log_time_display == self._last_time and self.omit_repeated_times:
|
64 |
-
row.append(Text(" " * len(log_time_display)))
|
65 |
-
else:
|
66 |
-
row.append(log_time_display)
|
67 |
-
self._last_time = log_time_display
|
68 |
-
if self.show_level:
|
69 |
-
row.append(level)
|
70 |
-
|
71 |
-
row.append(Renderables(renderables))
|
72 |
-
if self.show_path and path:
|
73 |
-
path_text = Text()
|
74 |
-
path_text.append(
|
75 |
-
path, style=f"link file://{link_path}" if link_path else ""
|
76 |
-
)
|
77 |
-
if line_no:
|
78 |
-
path_text.append(":")
|
79 |
-
path_text.append(
|
80 |
-
f"{line_no}",
|
81 |
-
style=f"link file://{link_path}#{line_no}" if link_path else "",
|
82 |
-
)
|
83 |
-
row.append(path_text)
|
84 |
-
|
85 |
-
output.add_row(*row)
|
86 |
-
return output
|
87 |
-
|
88 |
-
|
89 |
-
if __name__ == "__main__": # pragma: no cover
|
90 |
-
from pip._vendor.rich.console import Console
|
91 |
-
|
92 |
-
c = Console()
|
93 |
-
c.print("[on blue]Hello", justify="right")
|
94 |
-
c.log("[on blue]hello", justify="right")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/logging.py
DELETED
@@ -1,289 +0,0 @@
|
|
1 |
-
import logging
|
2 |
-
from datetime import datetime
|
3 |
-
from logging import Handler, LogRecord
|
4 |
-
from pathlib import Path
|
5 |
-
from types import ModuleType
|
6 |
-
from typing import ClassVar, Iterable, List, Optional, Type, Union
|
7 |
-
|
8 |
-
from pip._vendor.rich._null_file import NullFile
|
9 |
-
|
10 |
-
from . import get_console
|
11 |
-
from ._log_render import FormatTimeCallable, LogRender
|
12 |
-
from .console import Console, ConsoleRenderable
|
13 |
-
from .highlighter import Highlighter, ReprHighlighter
|
14 |
-
from .text import Text
|
15 |
-
from .traceback import Traceback
|
16 |
-
|
17 |
-
|
18 |
-
class RichHandler(Handler):
|
19 |
-
"""A logging handler that renders output with Rich. The time / level / message and file are displayed in columns.
|
20 |
-
The level is color coded, and the message is syntax highlighted.
|
21 |
-
|
22 |
-
Note:
|
23 |
-
Be careful when enabling console markup in log messages if you have configured logging for libraries not
|
24 |
-
under your control. If a dependency writes messages containing square brackets, it may not produce the intended output.
|
25 |
-
|
26 |
-
Args:
|
27 |
-
level (Union[int, str], optional): Log level. Defaults to logging.NOTSET.
|
28 |
-
console (:class:`~rich.console.Console`, optional): Optional console instance to write logs.
|
29 |
-
Default will use a global console instance writing to stdout.
|
30 |
-
show_time (bool, optional): Show a column for the time. Defaults to True.
|
31 |
-
omit_repeated_times (bool, optional): Omit repetition of the same time. Defaults to True.
|
32 |
-
show_level (bool, optional): Show a column for the level. Defaults to True.
|
33 |
-
show_path (bool, optional): Show the path to the original log call. Defaults to True.
|
34 |
-
enable_link_path (bool, optional): Enable terminal link of path column to file. Defaults to True.
|
35 |
-
highlighter (Highlighter, optional): Highlighter to style log messages, or None to use ReprHighlighter. Defaults to None.
|
36 |
-
markup (bool, optional): Enable console markup in log messages. Defaults to False.
|
37 |
-
rich_tracebacks (bool, optional): Enable rich tracebacks with syntax highlighting and formatting. Defaults to False.
|
38 |
-
tracebacks_width (Optional[int], optional): Number of characters used to render tracebacks, or None for full width. Defaults to None.
|
39 |
-
tracebacks_extra_lines (int, optional): Additional lines of code to render tracebacks, or None for full width. Defaults to None.
|
40 |
-
tracebacks_theme (str, optional): Override pygments theme used in traceback.
|
41 |
-
tracebacks_word_wrap (bool, optional): Enable word wrapping of long tracebacks lines. Defaults to True.
|
42 |
-
tracebacks_show_locals (bool, optional): Enable display of locals in tracebacks. Defaults to False.
|
43 |
-
tracebacks_suppress (Sequence[Union[str, ModuleType]]): Optional sequence of modules or paths to exclude from traceback.
|
44 |
-
locals_max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
|
45 |
-
Defaults to 10.
|
46 |
-
locals_max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to 80.
|
47 |
-
log_time_format (Union[str, TimeFormatterCallable], optional): If ``log_time`` is enabled, either string for strftime or callable that formats the time. Defaults to "[%x %X] ".
|
48 |
-
keywords (List[str], optional): List of words to highlight instead of ``RichHandler.KEYWORDS``.
|
49 |
-
"""
|
50 |
-
|
51 |
-
KEYWORDS: ClassVar[Optional[List[str]]] = [
|
52 |
-
"GET",
|
53 |
-
"POST",
|
54 |
-
"HEAD",
|
55 |
-
"PUT",
|
56 |
-
"DELETE",
|
57 |
-
"OPTIONS",
|
58 |
-
"TRACE",
|
59 |
-
"PATCH",
|
60 |
-
]
|
61 |
-
HIGHLIGHTER_CLASS: ClassVar[Type[Highlighter]] = ReprHighlighter
|
62 |
-
|
63 |
-
def __init__(
|
64 |
-
self,
|
65 |
-
level: Union[int, str] = logging.NOTSET,
|
66 |
-
console: Optional[Console] = None,
|
67 |
-
*,
|
68 |
-
show_time: bool = True,
|
69 |
-
omit_repeated_times: bool = True,
|
70 |
-
show_level: bool = True,
|
71 |
-
show_path: bool = True,
|
72 |
-
enable_link_path: bool = True,
|
73 |
-
highlighter: Optional[Highlighter] = None,
|
74 |
-
markup: bool = False,
|
75 |
-
rich_tracebacks: bool = False,
|
76 |
-
tracebacks_width: Optional[int] = None,
|
77 |
-
tracebacks_extra_lines: int = 3,
|
78 |
-
tracebacks_theme: Optional[str] = None,
|
79 |
-
tracebacks_word_wrap: bool = True,
|
80 |
-
tracebacks_show_locals: bool = False,
|
81 |
-
tracebacks_suppress: Iterable[Union[str, ModuleType]] = (),
|
82 |
-
locals_max_length: int = 10,
|
83 |
-
locals_max_string: int = 80,
|
84 |
-
log_time_format: Union[str, FormatTimeCallable] = "[%x %X]",
|
85 |
-
keywords: Optional[List[str]] = None,
|
86 |
-
) -> None:
|
87 |
-
super().__init__(level=level)
|
88 |
-
self.console = console or get_console()
|
89 |
-
self.highlighter = highlighter or self.HIGHLIGHTER_CLASS()
|
90 |
-
self._log_render = LogRender(
|
91 |
-
show_time=show_time,
|
92 |
-
show_level=show_level,
|
93 |
-
show_path=show_path,
|
94 |
-
time_format=log_time_format,
|
95 |
-
omit_repeated_times=omit_repeated_times,
|
96 |
-
level_width=None,
|
97 |
-
)
|
98 |
-
self.enable_link_path = enable_link_path
|
99 |
-
self.markup = markup
|
100 |
-
self.rich_tracebacks = rich_tracebacks
|
101 |
-
self.tracebacks_width = tracebacks_width
|
102 |
-
self.tracebacks_extra_lines = tracebacks_extra_lines
|
103 |
-
self.tracebacks_theme = tracebacks_theme
|
104 |
-
self.tracebacks_word_wrap = tracebacks_word_wrap
|
105 |
-
self.tracebacks_show_locals = tracebacks_show_locals
|
106 |
-
self.tracebacks_suppress = tracebacks_suppress
|
107 |
-
self.locals_max_length = locals_max_length
|
108 |
-
self.locals_max_string = locals_max_string
|
109 |
-
self.keywords = keywords
|
110 |
-
|
111 |
-
def get_level_text(self, record: LogRecord) -> Text:
|
112 |
-
"""Get the level name from the record.
|
113 |
-
|
114 |
-
Args:
|
115 |
-
record (LogRecord): LogRecord instance.
|
116 |
-
|
117 |
-
Returns:
|
118 |
-
Text: A tuple of the style and level name.
|
119 |
-
"""
|
120 |
-
level_name = record.levelname
|
121 |
-
level_text = Text.styled(
|
122 |
-
level_name.ljust(8), f"logging.level.{level_name.lower()}"
|
123 |
-
)
|
124 |
-
return level_text
|
125 |
-
|
126 |
-
def emit(self, record: LogRecord) -> None:
|
127 |
-
"""Invoked by logging."""
|
128 |
-
message = self.format(record)
|
129 |
-
traceback = None
|
130 |
-
if (
|
131 |
-
self.rich_tracebacks
|
132 |
-
and record.exc_info
|
133 |
-
and record.exc_info != (None, None, None)
|
134 |
-
):
|
135 |
-
exc_type, exc_value, exc_traceback = record.exc_info
|
136 |
-
assert exc_type is not None
|
137 |
-
assert exc_value is not None
|
138 |
-
traceback = Traceback.from_exception(
|
139 |
-
exc_type,
|
140 |
-
exc_value,
|
141 |
-
exc_traceback,
|
142 |
-
width=self.tracebacks_width,
|
143 |
-
extra_lines=self.tracebacks_extra_lines,
|
144 |
-
theme=self.tracebacks_theme,
|
145 |
-
word_wrap=self.tracebacks_word_wrap,
|
146 |
-
show_locals=self.tracebacks_show_locals,
|
147 |
-
locals_max_length=self.locals_max_length,
|
148 |
-
locals_max_string=self.locals_max_string,
|
149 |
-
suppress=self.tracebacks_suppress,
|
150 |
-
)
|
151 |
-
message = record.getMessage()
|
152 |
-
if self.formatter:
|
153 |
-
record.message = record.getMessage()
|
154 |
-
formatter = self.formatter
|
155 |
-
if hasattr(formatter, "usesTime") and formatter.usesTime():
|
156 |
-
record.asctime = formatter.formatTime(record, formatter.datefmt)
|
157 |
-
message = formatter.formatMessage(record)
|
158 |
-
|
159 |
-
message_renderable = self.render_message(record, message)
|
160 |
-
log_renderable = self.render(
|
161 |
-
record=record, traceback=traceback, message_renderable=message_renderable
|
162 |
-
)
|
163 |
-
if isinstance(self.console.file, NullFile):
|
164 |
-
# Handles pythonw, where stdout/stderr are null, and we return NullFile
|
165 |
-
# instance from Console.file. In this case, we still want to make a log record
|
166 |
-
# even though we won't be writing anything to a file.
|
167 |
-
self.handleError(record)
|
168 |
-
else:
|
169 |
-
try:
|
170 |
-
self.console.print(log_renderable)
|
171 |
-
except Exception:
|
172 |
-
self.handleError(record)
|
173 |
-
|
174 |
-
def render_message(self, record: LogRecord, message: str) -> "ConsoleRenderable":
|
175 |
-
"""Render message text in to Text.
|
176 |
-
|
177 |
-
Args:
|
178 |
-
record (LogRecord): logging Record.
|
179 |
-
message (str): String containing log message.
|
180 |
-
|
181 |
-
Returns:
|
182 |
-
ConsoleRenderable: Renderable to display log message.
|
183 |
-
"""
|
184 |
-
use_markup = getattr(record, "markup", self.markup)
|
185 |
-
message_text = Text.from_markup(message) if use_markup else Text(message)
|
186 |
-
|
187 |
-
highlighter = getattr(record, "highlighter", self.highlighter)
|
188 |
-
if highlighter:
|
189 |
-
message_text = highlighter(message_text)
|
190 |
-
|
191 |
-
if self.keywords is None:
|
192 |
-
self.keywords = self.KEYWORDS
|
193 |
-
|
194 |
-
if self.keywords:
|
195 |
-
message_text.highlight_words(self.keywords, "logging.keyword")
|
196 |
-
|
197 |
-
return message_text
|
198 |
-
|
199 |
-
def render(
|
200 |
-
self,
|
201 |
-
*,
|
202 |
-
record: LogRecord,
|
203 |
-
traceback: Optional[Traceback],
|
204 |
-
message_renderable: "ConsoleRenderable",
|
205 |
-
) -> "ConsoleRenderable":
|
206 |
-
"""Render log for display.
|
207 |
-
|
208 |
-
Args:
|
209 |
-
record (LogRecord): logging Record.
|
210 |
-
traceback (Optional[Traceback]): Traceback instance or None for no Traceback.
|
211 |
-
message_renderable (ConsoleRenderable): Renderable (typically Text) containing log message contents.
|
212 |
-
|
213 |
-
Returns:
|
214 |
-
ConsoleRenderable: Renderable to display log.
|
215 |
-
"""
|
216 |
-
path = Path(record.pathname).name
|
217 |
-
level = self.get_level_text(record)
|
218 |
-
time_format = None if self.formatter is None else self.formatter.datefmt
|
219 |
-
log_time = datetime.fromtimestamp(record.created)
|
220 |
-
|
221 |
-
log_renderable = self._log_render(
|
222 |
-
self.console,
|
223 |
-
[message_renderable] if not traceback else [message_renderable, traceback],
|
224 |
-
log_time=log_time,
|
225 |
-
time_format=time_format,
|
226 |
-
level=level,
|
227 |
-
path=path,
|
228 |
-
line_no=record.lineno,
|
229 |
-
link_path=record.pathname if self.enable_link_path else None,
|
230 |
-
)
|
231 |
-
return log_renderable
|
232 |
-
|
233 |
-
|
234 |
-
if __name__ == "__main__": # pragma: no cover
|
235 |
-
from time import sleep
|
236 |
-
|
237 |
-
FORMAT = "%(message)s"
|
238 |
-
# FORMAT = "%(asctime)-15s - %(levelname)s - %(message)s"
|
239 |
-
logging.basicConfig(
|
240 |
-
level="NOTSET",
|
241 |
-
format=FORMAT,
|
242 |
-
datefmt="[%X]",
|
243 |
-
handlers=[RichHandler(rich_tracebacks=True, tracebacks_show_locals=True)],
|
244 |
-
)
|
245 |
-
log = logging.getLogger("rich")
|
246 |
-
|
247 |
-
log.info("Server starting...")
|
248 |
-
log.info("Listening on http://127.0.0.1:8080")
|
249 |
-
sleep(1)
|
250 |
-
|
251 |
-
log.info("GET /index.html 200 1298")
|
252 |
-
log.info("GET /imgs/backgrounds/back1.jpg 200 54386")
|
253 |
-
log.info("GET /css/styles.css 200 54386")
|
254 |
-
log.warning("GET /favicon.ico 404 242")
|
255 |
-
sleep(1)
|
256 |
-
|
257 |
-
log.debug(
|
258 |
-
"JSONRPC request\n--> %r\n<-- %r",
|
259 |
-
{
|
260 |
-
"version": "1.1",
|
261 |
-
"method": "confirmFruitPurchase",
|
262 |
-
"params": [["apple", "orange", "mangoes", "pomelo"], 1.123],
|
263 |
-
"id": "194521489",
|
264 |
-
},
|
265 |
-
{"version": "1.1", "result": True, "error": None, "id": "194521489"},
|
266 |
-
)
|
267 |
-
log.debug(
|
268 |
-
"Loading configuration file /adasd/asdasd/qeqwe/qwrqwrqwr/sdgsdgsdg/werwerwer/dfgerert/ertertert/ertetert/werwerwer"
|
269 |
-
)
|
270 |
-
log.error("Unable to find 'pomelo' in database!")
|
271 |
-
log.info("POST /jsonrpc/ 200 65532")
|
272 |
-
log.info("POST /admin/ 401 42234")
|
273 |
-
log.warning("password was rejected for admin site.")
|
274 |
-
|
275 |
-
def divide() -> None:
|
276 |
-
number = 1
|
277 |
-
divisor = 0
|
278 |
-
foos = ["foo"] * 100
|
279 |
-
log.debug("in divide")
|
280 |
-
try:
|
281 |
-
number / divisor
|
282 |
-
except:
|
283 |
-
log.exception("An error of some kind occurred!")
|
284 |
-
|
285 |
-
divide()
|
286 |
-
sleep(1)
|
287 |
-
log.critical("Out of memory!")
|
288 |
-
log.info("Server exited with code=-1")
|
289 |
-
log.info("[bold]EXITING...[/bold]", extra=dict(markup=True))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/utils/logger.py
DELETED
@@ -1,221 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
2 |
-
import functools
|
3 |
-
import logging
|
4 |
-
import os
|
5 |
-
import sys
|
6 |
-
import time
|
7 |
-
from collections import Counter
|
8 |
-
from fvcore.common.file_io import PathManager
|
9 |
-
from tabulate import tabulate
|
10 |
-
from termcolor import colored
|
11 |
-
|
12 |
-
|
13 |
-
class _ColorfulFormatter(logging.Formatter):
|
14 |
-
def __init__(self, *args, **kwargs):
|
15 |
-
self._root_name = kwargs.pop("root_name") + "."
|
16 |
-
self._abbrev_name = kwargs.pop("abbrev_name", "")
|
17 |
-
if len(self._abbrev_name):
|
18 |
-
self._abbrev_name = self._abbrev_name + "."
|
19 |
-
super(_ColorfulFormatter, self).__init__(*args, **kwargs)
|
20 |
-
|
21 |
-
def formatMessage(self, record):
|
22 |
-
record.name = record.name.replace(self._root_name, self._abbrev_name)
|
23 |
-
log = super(_ColorfulFormatter, self).formatMessage(record)
|
24 |
-
if record.levelno == logging.WARNING:
|
25 |
-
prefix = colored("WARNING", "red", attrs=["blink"])
|
26 |
-
elif record.levelno == logging.ERROR or record.levelno == logging.CRITICAL:
|
27 |
-
prefix = colored("ERROR", "red", attrs=["blink", "underline"])
|
28 |
-
else:
|
29 |
-
return log
|
30 |
-
return prefix + " " + log
|
31 |
-
|
32 |
-
|
33 |
-
@functools.lru_cache() # so that calling setup_logger multiple times won't add many handlers
|
34 |
-
def setup_logger(
|
35 |
-
output=None, distributed_rank=0, *, color=True, name="detectron2", abbrev_name=None
|
36 |
-
):
|
37 |
-
"""
|
38 |
-
Initialize the detectron2 logger and set its verbosity level to "INFO".
|
39 |
-
|
40 |
-
Args:
|
41 |
-
output (str): a file name or a directory to save log. If None, will not save log file.
|
42 |
-
If ends with ".txt" or ".log", assumed to be a file name.
|
43 |
-
Otherwise, logs will be saved to `output/log.txt`.
|
44 |
-
name (str): the root module name of this logger
|
45 |
-
abbrev_name (str): an abbreviation of the module, to avoid long names in logs.
|
46 |
-
Set to "" to not log the root module in logs.
|
47 |
-
By default, will abbreviate "detectron2" to "d2" and leave other
|
48 |
-
modules unchanged.
|
49 |
-
|
50 |
-
Returns:
|
51 |
-
logging.Logger: a logger
|
52 |
-
"""
|
53 |
-
logger = logging.getLogger(name)
|
54 |
-
logger.setLevel(logging.DEBUG)
|
55 |
-
logger.propagate = False
|
56 |
-
|
57 |
-
if abbrev_name is None:
|
58 |
-
abbrev_name = "d2" if name == "detectron2" else name
|
59 |
-
|
60 |
-
plain_formatter = logging.Formatter(
|
61 |
-
"[%(asctime)s] %(name)s %(levelname)s: %(message)s", datefmt="%m/%d %H:%M:%S"
|
62 |
-
)
|
63 |
-
# stdout logging: master only
|
64 |
-
if distributed_rank == 0:
|
65 |
-
ch = logging.StreamHandler(stream=sys.stdout)
|
66 |
-
ch.setLevel(logging.DEBUG)
|
67 |
-
if color:
|
68 |
-
formatter = _ColorfulFormatter(
|
69 |
-
colored("[%(asctime)s %(name)s]: ", "green") + "%(message)s",
|
70 |
-
datefmt="%m/%d %H:%M:%S",
|
71 |
-
root_name=name,
|
72 |
-
abbrev_name=str(abbrev_name),
|
73 |
-
)
|
74 |
-
else:
|
75 |
-
formatter = plain_formatter
|
76 |
-
ch.setFormatter(formatter)
|
77 |
-
logger.addHandler(ch)
|
78 |
-
|
79 |
-
# file logging: all workers
|
80 |
-
if output is not None:
|
81 |
-
if output.endswith(".txt") or output.endswith(".log"):
|
82 |
-
filename = output
|
83 |
-
else:
|
84 |
-
filename = os.path.join(output, "log.txt")
|
85 |
-
if distributed_rank > 0:
|
86 |
-
filename = filename + ".rank{}".format(distributed_rank)
|
87 |
-
PathManager.mkdirs(os.path.dirname(filename))
|
88 |
-
|
89 |
-
fh = logging.StreamHandler(_cached_log_stream(filename))
|
90 |
-
fh.setLevel(logging.DEBUG)
|
91 |
-
fh.setFormatter(plain_formatter)
|
92 |
-
logger.addHandler(fh)
|
93 |
-
|
94 |
-
return logger
|
95 |
-
|
96 |
-
|
97 |
-
# cache the opened file object, so that different calls to `setup_logger`
|
98 |
-
# with the same file name can safely write to the same file.
|
99 |
-
@functools.lru_cache(maxsize=None)
|
100 |
-
def _cached_log_stream(filename):
|
101 |
-
return PathManager.open(filename, "a")
|
102 |
-
|
103 |
-
|
104 |
-
"""
|
105 |
-
Below are some other convenient logging methods.
|
106 |
-
They are mainly adopted from
|
107 |
-
https://github.com/abseil/abseil-py/blob/master/absl/logging/__init__.py
|
108 |
-
"""
|
109 |
-
|
110 |
-
|
111 |
-
def _find_caller():
|
112 |
-
"""
|
113 |
-
Returns:
|
114 |
-
str: module name of the caller
|
115 |
-
tuple: a hashable key to be used to identify different callers
|
116 |
-
"""
|
117 |
-
frame = sys._getframe(2)
|
118 |
-
while frame:
|
119 |
-
code = frame.f_code
|
120 |
-
if os.path.join("utils", "logger.") not in code.co_filename:
|
121 |
-
mod_name = frame.f_globals["__name__"]
|
122 |
-
if mod_name == "__main__":
|
123 |
-
mod_name = "detectron2"
|
124 |
-
return mod_name, (code.co_filename, frame.f_lineno, code.co_name)
|
125 |
-
frame = frame.f_back
|
126 |
-
|
127 |
-
|
128 |
-
_LOG_COUNTER = Counter()
|
129 |
-
_LOG_TIMER = {}
|
130 |
-
|
131 |
-
|
132 |
-
def log_first_n(lvl, msg, n=1, *, name=None, key="caller"):
|
133 |
-
"""
|
134 |
-
Log only for the first n times.
|
135 |
-
|
136 |
-
Args:
|
137 |
-
lvl (int): the logging level
|
138 |
-
msg (str):
|
139 |
-
n (int):
|
140 |
-
name (str): name of the logger to use. Will use the caller's module by default.
|
141 |
-
key (str or tuple[str]): the string(s) can be one of "caller" or
|
142 |
-
"message", which defines how to identify duplicated logs.
|
143 |
-
For example, if called with `n=1, key="caller"`, this function
|
144 |
-
will only log the first call from the same caller, regardless of
|
145 |
-
the message content.
|
146 |
-
If called with `n=1, key="message"`, this function will log the
|
147 |
-
same content only once, even if they are called from different places.
|
148 |
-
If called with `n=1, key=("caller", "message")`, this function
|
149 |
-
will not log only if the same caller has logged the same message before.
|
150 |
-
"""
|
151 |
-
if isinstance(key, str):
|
152 |
-
key = (key,)
|
153 |
-
assert len(key) > 0
|
154 |
-
|
155 |
-
caller_module, caller_key = _find_caller()
|
156 |
-
hash_key = ()
|
157 |
-
if "caller" in key:
|
158 |
-
hash_key = hash_key + caller_key
|
159 |
-
if "message" in key:
|
160 |
-
hash_key = hash_key + (msg,)
|
161 |
-
|
162 |
-
_LOG_COUNTER[hash_key] += 1
|
163 |
-
if _LOG_COUNTER[hash_key] <= n:
|
164 |
-
logging.getLogger(name or caller_module).log(lvl, msg)
|
165 |
-
|
166 |
-
|
167 |
-
def log_every_n(lvl, msg, n=1, *, name=None):
|
168 |
-
"""
|
169 |
-
Log once per n times.
|
170 |
-
|
171 |
-
Args:
|
172 |
-
lvl (int): the logging level
|
173 |
-
msg (str):
|
174 |
-
n (int):
|
175 |
-
name (str): name of the logger to use. Will use the caller's module by default.
|
176 |
-
"""
|
177 |
-
caller_module, key = _find_caller()
|
178 |
-
_LOG_COUNTER[key] += 1
|
179 |
-
if n == 1 or _LOG_COUNTER[key] % n == 1:
|
180 |
-
logging.getLogger(name or caller_module).log(lvl, msg)
|
181 |
-
|
182 |
-
|
183 |
-
def log_every_n_seconds(lvl, msg, n=1, *, name=None):
|
184 |
-
"""
|
185 |
-
Log no more than once per n seconds.
|
186 |
-
|
187 |
-
Args:
|
188 |
-
lvl (int): the logging level
|
189 |
-
msg (str):
|
190 |
-
n (int):
|
191 |
-
name (str): name of the logger to use. Will use the caller's module by default.
|
192 |
-
"""
|
193 |
-
caller_module, key = _find_caller()
|
194 |
-
last_logged = _LOG_TIMER.get(key, None)
|
195 |
-
current_time = time.time()
|
196 |
-
if last_logged is None or current_time - last_logged >= n:
|
197 |
-
logging.getLogger(name or caller_module).log(lvl, msg)
|
198 |
-
_LOG_TIMER[key] = current_time
|
199 |
-
|
200 |
-
|
201 |
-
def create_small_table(small_dict):
|
202 |
-
"""
|
203 |
-
Create a small table using the keys of small_dict as headers. This is only
|
204 |
-
suitable for small dictionaries.
|
205 |
-
|
206 |
-
Args:
|
207 |
-
small_dict (dict): a result dictionary of only a few items.
|
208 |
-
|
209 |
-
Returns:
|
210 |
-
str: the table as a string.
|
211 |
-
"""
|
212 |
-
keys, values = tuple(zip(*small_dict.items()))
|
213 |
-
table = tabulate(
|
214 |
-
[values],
|
215 |
-
headers=keys,
|
216 |
-
tablefmt="pipe",
|
217 |
-
floatfmt=".3f",
|
218 |
-
stralign="center",
|
219 |
-
numalign="center",
|
220 |
-
)
|
221 |
-
return table
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/pybind11/tests/test_docstring_options.cpp
DELETED
@@ -1,61 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
tests/test_docstring_options.cpp -- generation of docstrings and signatures
|
3 |
-
|
4 |
-
Copyright (c) 2016 Wenzel Jakob <[email protected]>
|
5 |
-
|
6 |
-
All rights reserved. Use of this source code is governed by a
|
7 |
-
BSD-style license that can be found in the LICENSE file.
|
8 |
-
*/
|
9 |
-
|
10 |
-
#include "pybind11_tests.h"
|
11 |
-
|
12 |
-
TEST_SUBMODULE(docstring_options, m) {
|
13 |
-
// test_docstring_options
|
14 |
-
{
|
15 |
-
py::options options;
|
16 |
-
options.disable_function_signatures();
|
17 |
-
|
18 |
-
m.def("test_function1", [](int, int) {}, py::arg("a"), py::arg("b"));
|
19 |
-
m.def("test_function2", [](int, int) {}, py::arg("a"), py::arg("b"), "A custom docstring");
|
20 |
-
|
21 |
-
m.def("test_overloaded1", [](int) {}, py::arg("i"), "Overload docstring");
|
22 |
-
m.def("test_overloaded1", [](double) {}, py::arg("d"));
|
23 |
-
|
24 |
-
m.def("test_overloaded2", [](int) {}, py::arg("i"), "overload docstring 1");
|
25 |
-
m.def("test_overloaded2", [](double) {}, py::arg("d"), "overload docstring 2");
|
26 |
-
|
27 |
-
m.def("test_overloaded3", [](int) {}, py::arg("i"));
|
28 |
-
m.def("test_overloaded3", [](double) {}, py::arg("d"), "Overload docstr");
|
29 |
-
|
30 |
-
options.enable_function_signatures();
|
31 |
-
|
32 |
-
m.def("test_function3", [](int, int) {}, py::arg("a"), py::arg("b"));
|
33 |
-
m.def("test_function4", [](int, int) {}, py::arg("a"), py::arg("b"), "A custom docstring");
|
34 |
-
|
35 |
-
options.disable_function_signatures().disable_user_defined_docstrings();
|
36 |
-
|
37 |
-
m.def("test_function5", [](int, int) {}, py::arg("a"), py::arg("b"), "A custom docstring");
|
38 |
-
|
39 |
-
{
|
40 |
-
py::options nested_options;
|
41 |
-
nested_options.enable_user_defined_docstrings();
|
42 |
-
m.def("test_function6", [](int, int) {}, py::arg("a"), py::arg("b"), "A custom docstring");
|
43 |
-
}
|
44 |
-
}
|
45 |
-
|
46 |
-
m.def("test_function7", [](int, int) {}, py::arg("a"), py::arg("b"), "A custom docstring");
|
47 |
-
|
48 |
-
{
|
49 |
-
py::options options;
|
50 |
-
options.disable_user_defined_docstrings();
|
51 |
-
|
52 |
-
struct DocstringTestFoo {
|
53 |
-
int value;
|
54 |
-
void setValue(int v) { value = v; }
|
55 |
-
int getValue() const { return value; }
|
56 |
-
};
|
57 |
-
py::class_<DocstringTestFoo>(m, "DocstringTestFoo", "This is a class docstring")
|
58 |
-
.def_property("value_prop", &DocstringTestFoo::getValue, &DocstringTestFoo::setValue, "This is a property docstring")
|
59 |
-
;
|
60 |
-
}
|
61 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/dependencies/cub/test/test_util.h
DELETED
@@ -1,1648 +0,0 @@
|
|
1 |
-
/******************************************************************************
|
2 |
-
* Copyright (c) 2011, Duane Merrill. All rights reserved.
|
3 |
-
* Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
|
4 |
-
*
|
5 |
-
* Redistribution and use in source and binary forms, with or without
|
6 |
-
* modification, are permitted provided that the following conditions are met:
|
7 |
-
* * Redistributions of source code must retain the above copyright
|
8 |
-
* notice, this list of conditions and the following disclaimer.
|
9 |
-
* * Redistributions in binary form must reproduce the above copyright
|
10 |
-
* notice, this list of conditions and the following disclaimer in the
|
11 |
-
* documentation and/or other materials provided with the distribution.
|
12 |
-
* * Neither the name of the NVIDIA CORPORATION nor the
|
13 |
-
* names of its contributors may be used to endorse or promote products
|
14 |
-
* derived from this software without specific prior written permission.
|
15 |
-
*
|
16 |
-
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
17 |
-
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
18 |
-
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
19 |
-
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
|
20 |
-
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
21 |
-
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
22 |
-
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
23 |
-
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
24 |
-
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
25 |
-
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
26 |
-
*
|
27 |
-
******************************************************************************/
|
28 |
-
|
29 |
-
|
30 |
-
#pragma once
|
31 |
-
|
32 |
-
#if defined(_WIN32) || defined(_WIN64)
|
33 |
-
#include <windows.h>
|
34 |
-
#undef small // Windows is terrible for polluting macro namespace
|
35 |
-
#else
|
36 |
-
#include <sys/resource.h>
|
37 |
-
#endif
|
38 |
-
|
39 |
-
#include <cuda_runtime.h>
|
40 |
-
|
41 |
-
#include <stdio.h>
|
42 |
-
#include <float.h>
|
43 |
-
|
44 |
-
#include <cmath>
|
45 |
-
#include <string>
|
46 |
-
#include <vector>
|
47 |
-
#include <sstream>
|
48 |
-
#include <iostream>
|
49 |
-
#include <limits>
|
50 |
-
|
51 |
-
#include "mersenne.h"
|
52 |
-
#include "half.h"
|
53 |
-
|
54 |
-
#include "cub/util_debug.cuh"
|
55 |
-
#include "cub/util_device.cuh"
|
56 |
-
#include "cub/util_type.cuh"
|
57 |
-
#include "cub/util_macro.cuh"
|
58 |
-
#include "cub/iterator/discard_output_iterator.cuh"
|
59 |
-
|
60 |
-
/******************************************************************************
|
61 |
-
* Type conversion macros
|
62 |
-
******************************************************************************/
|
63 |
-
|
64 |
-
/**
|
65 |
-
* Return a value of type `T` with the same bitwise representation of `in`.
|
66 |
-
* Types `T` and `U` must be the same size.
|
67 |
-
*/
|
68 |
-
template <typename T, typename U>
|
69 |
-
T SafeBitCast(const U& in)
|
70 |
-
{
|
71 |
-
static_assert(sizeof(T) == sizeof(U), "Types must be same size.");
|
72 |
-
T out;
|
73 |
-
memcpy(&out, &in, sizeof(T));
|
74 |
-
return out;
|
75 |
-
}
|
76 |
-
|
77 |
-
/******************************************************************************
|
78 |
-
* Assertion macros
|
79 |
-
******************************************************************************/
|
80 |
-
|
81 |
-
/**
|
82 |
-
* Assert equals
|
83 |
-
*/
|
84 |
-
#define AssertEquals(a, b) if ((a) != (b)) { std::cerr << "\n(" << __FILE__ << ": " << __LINE__ << ")\n"; exit(1);}
|
85 |
-
|
86 |
-
|
87 |
-
/******************************************************************************
|
88 |
-
* Command-line parsing functionality
|
89 |
-
******************************************************************************/
|
90 |
-
|
91 |
-
/**
|
92 |
-
* Utility for parsing command line arguments
|
93 |
-
*/
|
94 |
-
struct CommandLineArgs
|
95 |
-
{
|
96 |
-
|
97 |
-
std::vector<std::string> keys;
|
98 |
-
std::vector<std::string> values;
|
99 |
-
std::vector<std::string> args;
|
100 |
-
cudaDeviceProp deviceProp;
|
101 |
-
float device_giga_bandwidth;
|
102 |
-
size_t device_free_physmem;
|
103 |
-
size_t device_total_physmem;
|
104 |
-
|
105 |
-
/**
|
106 |
-
* Constructor
|
107 |
-
*/
|
108 |
-
CommandLineArgs(int argc, char **argv) :
|
109 |
-
keys(10),
|
110 |
-
values(10)
|
111 |
-
{
|
112 |
-
using namespace std;
|
113 |
-
|
114 |
-
// Initialize mersenne generator
|
115 |
-
unsigned int mersenne_init[4]= {0x123, 0x234, 0x345, 0x456};
|
116 |
-
mersenne::init_by_array(mersenne_init, 4);
|
117 |
-
|
118 |
-
for (int i = 1; i < argc; i++)
|
119 |
-
{
|
120 |
-
string arg = argv[i];
|
121 |
-
|
122 |
-
if ((arg[0] != '-') || (arg[1] != '-'))
|
123 |
-
{
|
124 |
-
args.push_back(arg);
|
125 |
-
continue;
|
126 |
-
}
|
127 |
-
|
128 |
-
string::size_type pos;
|
129 |
-
string key, val;
|
130 |
-
if ((pos = arg.find('=')) == string::npos) {
|
131 |
-
key = string(arg, 2, arg.length() - 2);
|
132 |
-
val = "";
|
133 |
-
} else {
|
134 |
-
key = string(arg, 2, pos - 2);
|
135 |
-
val = string(arg, pos + 1, arg.length() - 1);
|
136 |
-
}
|
137 |
-
|
138 |
-
keys.push_back(key);
|
139 |
-
values.push_back(val);
|
140 |
-
}
|
141 |
-
}
|
142 |
-
|
143 |
-
|
144 |
-
/**
|
145 |
-
* Checks whether a flag "--<flag>" is present in the commandline
|
146 |
-
*/
|
147 |
-
bool CheckCmdLineFlag(const char* arg_name)
|
148 |
-
{
|
149 |
-
using namespace std;
|
150 |
-
|
151 |
-
for (int i = 0; i < int(keys.size()); ++i)
|
152 |
-
{
|
153 |
-
if (keys[i] == string(arg_name))
|
154 |
-
return true;
|
155 |
-
}
|
156 |
-
return false;
|
157 |
-
}
|
158 |
-
|
159 |
-
|
160 |
-
/**
|
161 |
-
* Returns number of naked (non-flag and non-key-value) commandline parameters
|
162 |
-
*/
|
163 |
-
template <typename T>
|
164 |
-
int NumNakedArgs()
|
165 |
-
{
|
166 |
-
return args.size();
|
167 |
-
}
|
168 |
-
|
169 |
-
|
170 |
-
/**
|
171 |
-
* Returns the commandline parameter for a given index (not including flags)
|
172 |
-
*/
|
173 |
-
template <typename T>
|
174 |
-
void GetCmdLineArgument(int index, T &val)
|
175 |
-
{
|
176 |
-
using namespace std;
|
177 |
-
if (index < args.size()) {
|
178 |
-
istringstream str_stream(args[index]);
|
179 |
-
str_stream >> val;
|
180 |
-
}
|
181 |
-
}
|
182 |
-
|
183 |
-
/**
|
184 |
-
* Returns the value specified for a given commandline parameter --<flag>=<value>
|
185 |
-
*/
|
186 |
-
template <typename T>
|
187 |
-
void GetCmdLineArgument(const char *arg_name, T &val)
|
188 |
-
{
|
189 |
-
using namespace std;
|
190 |
-
|
191 |
-
for (int i = 0; i < int(keys.size()); ++i)
|
192 |
-
{
|
193 |
-
if (keys[i] == string(arg_name))
|
194 |
-
{
|
195 |
-
istringstream str_stream(values[i]);
|
196 |
-
str_stream >> val;
|
197 |
-
}
|
198 |
-
}
|
199 |
-
}
|
200 |
-
|
201 |
-
|
202 |
-
/**
|
203 |
-
* Returns the values specified for a given commandline parameter --<flag>=<value>,<value>*
|
204 |
-
*/
|
205 |
-
template <typename T>
|
206 |
-
void GetCmdLineArguments(const char *arg_name, std::vector<T> &vals)
|
207 |
-
{
|
208 |
-
using namespace std;
|
209 |
-
|
210 |
-
if (CheckCmdLineFlag(arg_name))
|
211 |
-
{
|
212 |
-
// Clear any default values
|
213 |
-
vals.clear();
|
214 |
-
|
215 |
-
// Recover from multi-value string
|
216 |
-
for (int i = 0; i < keys.size(); ++i)
|
217 |
-
{
|
218 |
-
if (keys[i] == string(arg_name))
|
219 |
-
{
|
220 |
-
string val_string(values[i]);
|
221 |
-
istringstream str_stream(val_string);
|
222 |
-
string::size_type old_pos = 0;
|
223 |
-
string::size_type new_pos = 0;
|
224 |
-
|
225 |
-
// Iterate comma-separated values
|
226 |
-
T val;
|
227 |
-
while ((new_pos = val_string.find(',', old_pos)) != string::npos)
|
228 |
-
{
|
229 |
-
if (new_pos != old_pos)
|
230 |
-
{
|
231 |
-
str_stream.width(new_pos - old_pos);
|
232 |
-
str_stream >> val;
|
233 |
-
vals.push_back(val);
|
234 |
-
}
|
235 |
-
|
236 |
-
// skip over comma
|
237 |
-
str_stream.ignore(1);
|
238 |
-
old_pos = new_pos + 1;
|
239 |
-
}
|
240 |
-
|
241 |
-
// Read last value
|
242 |
-
str_stream >> val;
|
243 |
-
vals.push_back(val);
|
244 |
-
}
|
245 |
-
}
|
246 |
-
}
|
247 |
-
}
|
248 |
-
|
249 |
-
|
250 |
-
/**
|
251 |
-
* The number of pairs parsed
|
252 |
-
*/
|
253 |
-
int ParsedArgc()
|
254 |
-
{
|
255 |
-
return (int) keys.size();
|
256 |
-
}
|
257 |
-
|
258 |
-
/**
|
259 |
-
* Initialize device
|
260 |
-
*/
|
261 |
-
cudaError_t DeviceInit(int dev = -1)
|
262 |
-
{
|
263 |
-
cudaError_t error = cudaSuccess;
|
264 |
-
|
265 |
-
do
|
266 |
-
{
|
267 |
-
int deviceCount;
|
268 |
-
error = CubDebug(cudaGetDeviceCount(&deviceCount));
|
269 |
-
if (error) break;
|
270 |
-
|
271 |
-
if (deviceCount == 0) {
|
272 |
-
fprintf(stderr, "No devices supporting CUDA.\n");
|
273 |
-
exit(1);
|
274 |
-
}
|
275 |
-
if (dev < 0)
|
276 |
-
{
|
277 |
-
GetCmdLineArgument("device", dev);
|
278 |
-
}
|
279 |
-
if ((dev > deviceCount - 1) || (dev < 0))
|
280 |
-
{
|
281 |
-
dev = 0;
|
282 |
-
}
|
283 |
-
|
284 |
-
error = CubDebug(cudaSetDevice(dev));
|
285 |
-
if (error) break;
|
286 |
-
|
287 |
-
CubDebugExit(cudaMemGetInfo(&device_free_physmem, &device_total_physmem));
|
288 |
-
|
289 |
-
int ptx_version = 0;
|
290 |
-
error = CubDebug(cub::PtxVersion(ptx_version));
|
291 |
-
if (error) break;
|
292 |
-
|
293 |
-
error = CubDebug(cudaGetDeviceProperties(&deviceProp, dev));
|
294 |
-
if (error) break;
|
295 |
-
|
296 |
-
if (deviceProp.major < 1) {
|
297 |
-
fprintf(stderr, "Device does not support CUDA.\n");
|
298 |
-
exit(1);
|
299 |
-
}
|
300 |
-
|
301 |
-
device_giga_bandwidth = float(deviceProp.memoryBusWidth) * deviceProp.memoryClockRate * 2 / 8 / 1000 / 1000;
|
302 |
-
|
303 |
-
if (!CheckCmdLineFlag("quiet"))
|
304 |
-
{
|
305 |
-
printf(
|
306 |
-
"Using device %d: %s (PTX version %d, SM%d, %d SMs, "
|
307 |
-
"%lld free / %lld total MB physmem, "
|
308 |
-
"%.3f GB/s @ %d kHz mem clock, ECC %s)\n",
|
309 |
-
dev,
|
310 |
-
deviceProp.name,
|
311 |
-
ptx_version,
|
312 |
-
deviceProp.major * 100 + deviceProp.minor * 10,
|
313 |
-
deviceProp.multiProcessorCount,
|
314 |
-
(unsigned long long) device_free_physmem / 1024 / 1024,
|
315 |
-
(unsigned long long) device_total_physmem / 1024 / 1024,
|
316 |
-
device_giga_bandwidth,
|
317 |
-
deviceProp.memoryClockRate,
|
318 |
-
(deviceProp.ECCEnabled) ? "on" : "off");
|
319 |
-
fflush(stdout);
|
320 |
-
}
|
321 |
-
|
322 |
-
} while (0);
|
323 |
-
|
324 |
-
return error;
|
325 |
-
}
|
326 |
-
};
|
327 |
-
|
328 |
-
/******************************************************************************
|
329 |
-
* Random bits generator
|
330 |
-
******************************************************************************/
|
331 |
-
|
332 |
-
int g_num_rand_samples = 0;
|
333 |
-
|
334 |
-
|
335 |
-
template <typename T>
|
336 |
-
bool IsNaN(T /* val */) { return false; }
|
337 |
-
|
338 |
-
template<>
|
339 |
-
__noinline__ bool IsNaN<float>(float val)
|
340 |
-
{
|
341 |
-
return std::isnan(val);
|
342 |
-
}
|
343 |
-
|
344 |
-
template<>
|
345 |
-
__noinline__ bool IsNaN<float1>(float1 val)
|
346 |
-
{
|
347 |
-
return (IsNaN(val.x));
|
348 |
-
}
|
349 |
-
|
350 |
-
template<>
|
351 |
-
__noinline__ bool IsNaN<float2>(float2 val)
|
352 |
-
{
|
353 |
-
return (IsNaN(val.y) || IsNaN(val.x));
|
354 |
-
}
|
355 |
-
|
356 |
-
template<>
|
357 |
-
__noinline__ bool IsNaN<float3>(float3 val)
|
358 |
-
{
|
359 |
-
return (IsNaN(val.z) || IsNaN(val.y) || IsNaN(val.x));
|
360 |
-
}
|
361 |
-
|
362 |
-
template<>
|
363 |
-
__noinline__ bool IsNaN<float4>(float4 val)
|
364 |
-
{
|
365 |
-
return (IsNaN(val.y) || IsNaN(val.x) || IsNaN(val.w) || IsNaN(val.z));
|
366 |
-
}
|
367 |
-
|
368 |
-
template<>
|
369 |
-
__noinline__ bool IsNaN<double>(double val)
|
370 |
-
{
|
371 |
-
return std::isnan(val);
|
372 |
-
}
|
373 |
-
|
374 |
-
template<>
|
375 |
-
__noinline__ bool IsNaN<double1>(double1 val)
|
376 |
-
{
|
377 |
-
return (IsNaN(val.x));
|
378 |
-
}
|
379 |
-
|
380 |
-
template<>
|
381 |
-
__noinline__ bool IsNaN<double2>(double2 val)
|
382 |
-
{
|
383 |
-
return (IsNaN(val.y) || IsNaN(val.x));
|
384 |
-
}
|
385 |
-
|
386 |
-
template<>
|
387 |
-
__noinline__ bool IsNaN<double3>(double3 val)
|
388 |
-
{
|
389 |
-
return (IsNaN(val.z) || IsNaN(val.y) || IsNaN(val.x));
|
390 |
-
}
|
391 |
-
|
392 |
-
template<>
|
393 |
-
__noinline__ bool IsNaN<double4>(double4 val)
|
394 |
-
{
|
395 |
-
return (IsNaN(val.y) || IsNaN(val.x) || IsNaN(val.w) || IsNaN(val.z));
|
396 |
-
}
|
397 |
-
|
398 |
-
|
399 |
-
template<>
|
400 |
-
__noinline__ bool IsNaN<half_t>(half_t val)
|
401 |
-
{
|
402 |
-
const auto bits = SafeBitCast<unsigned short>(val);
|
403 |
-
|
404 |
-
// commented bit is always true, leaving for documentation:
|
405 |
-
return (((bits >= 0x7C01) && (bits <= 0x7FFF)) ||
|
406 |
-
((bits >= 0xFC01) /*&& (bits <= 0xFFFFFFFF)*/));
|
407 |
-
}
|
408 |
-
|
409 |
-
|
410 |
-
|
411 |
-
/**
|
412 |
-
* Generates random keys.
|
413 |
-
*
|
414 |
-
* We always take the second-order byte from rand() because the higher-order
|
415 |
-
* bits returned by rand() are commonly considered more uniformly distributed
|
416 |
-
* than the lower-order bits.
|
417 |
-
*
|
418 |
-
* We can decrease the entropy level of keys by adopting the technique
|
419 |
-
* of Thearling and Smith in which keys are computed from the bitwise AND of
|
420 |
-
* multiple random samples:
|
421 |
-
*
|
422 |
-
* entropy_reduction | Effectively-unique bits per key
|
423 |
-
* -----------------------------------------------------
|
424 |
-
* -1 | 0
|
425 |
-
* 0 | 32
|
426 |
-
* 1 | 25.95 (81%)
|
427 |
-
* 2 | 17.41 (54%)
|
428 |
-
* 3 | 10.78 (34%)
|
429 |
-
* 4 | 6.42 (20%)
|
430 |
-
* ... | ...
|
431 |
-
*
|
432 |
-
*/
|
433 |
-
template <typename K>
|
434 |
-
void RandomBits(
|
435 |
-
K &key,
|
436 |
-
int entropy_reduction = 0,
|
437 |
-
int begin_bit = 0,
|
438 |
-
int end_bit = sizeof(K) * 8)
|
439 |
-
{
|
440 |
-
const int NUM_BYTES = sizeof(K);
|
441 |
-
const int WORD_BYTES = sizeof(unsigned int);
|
442 |
-
const int NUM_WORDS = (NUM_BYTES + WORD_BYTES - 1) / WORD_BYTES;
|
443 |
-
|
444 |
-
unsigned int word_buff[NUM_WORDS];
|
445 |
-
|
446 |
-
if (entropy_reduction == -1)
|
447 |
-
{
|
448 |
-
memset((void *) &key, 0, sizeof(key));
|
449 |
-
return;
|
450 |
-
}
|
451 |
-
|
452 |
-
if (end_bit < 0)
|
453 |
-
end_bit = sizeof(K) * 8;
|
454 |
-
|
455 |
-
while (true)
|
456 |
-
{
|
457 |
-
// Generate random word_buff
|
458 |
-
for (int j = 0; j < NUM_WORDS; j++)
|
459 |
-
{
|
460 |
-
int current_bit = j * WORD_BYTES * 8;
|
461 |
-
|
462 |
-
unsigned int word = 0xffffffff;
|
463 |
-
word &= 0xffffffff << CUB_MAX(0, begin_bit - current_bit);
|
464 |
-
word &= 0xffffffff >> CUB_MAX(0, (current_bit + (WORD_BYTES * 8)) - end_bit);
|
465 |
-
|
466 |
-
for (int i = 0; i <= entropy_reduction; i++)
|
467 |
-
{
|
468 |
-
// Grab some of the higher bits from rand (better entropy, supposedly)
|
469 |
-
word &= mersenne::genrand_int32();
|
470 |
-
g_num_rand_samples++;
|
471 |
-
}
|
472 |
-
|
473 |
-
word_buff[j] = word;
|
474 |
-
}
|
475 |
-
|
476 |
-
memcpy(&key, word_buff, sizeof(K));
|
477 |
-
|
478 |
-
K copy = key;
|
479 |
-
if (!IsNaN(copy))
|
480 |
-
break; // avoids NaNs when generating random floating point numbers
|
481 |
-
}
|
482 |
-
}
|
483 |
-
|
484 |
-
/// Randomly select number between [0:max)
|
485 |
-
template <typename T>
|
486 |
-
T RandomValue(T max)
|
487 |
-
{
|
488 |
-
unsigned int bits;
|
489 |
-
unsigned int max_int = (unsigned int) -1;
|
490 |
-
do {
|
491 |
-
RandomBits(bits);
|
492 |
-
} while (bits == max_int);
|
493 |
-
|
494 |
-
return (T) ((double(bits) / double(max_int)) * double(max));
|
495 |
-
}
|
496 |
-
|
497 |
-
|
498 |
-
/******************************************************************************
|
499 |
-
* Console printing utilities
|
500 |
-
******************************************************************************/
|
501 |
-
|
502 |
-
/**
|
503 |
-
* Helper for casting character types to integers for cout printing
|
504 |
-
*/
|
505 |
-
template <typename T>
|
506 |
-
T CoutCast(T val) { return val; }
|
507 |
-
|
508 |
-
int CoutCast(char val) { return val; }
|
509 |
-
|
510 |
-
int CoutCast(unsigned char val) { return val; }
|
511 |
-
|
512 |
-
int CoutCast(signed char val) { return val; }
|
513 |
-
|
514 |
-
|
515 |
-
|
516 |
-
/******************************************************************************
|
517 |
-
* Test value initialization utilities
|
518 |
-
******************************************************************************/
|
519 |
-
|
520 |
-
/**
|
521 |
-
* Test problem generation options
|
522 |
-
*/
|
523 |
-
enum GenMode
|
524 |
-
{
|
525 |
-
UNIFORM, // Assign to '2', regardless of integer seed
|
526 |
-
INTEGER_SEED, // Assign to integer seed
|
527 |
-
RANDOM, // Assign to random, regardless of integer seed
|
528 |
-
RANDOM_BIT, // Assign to randomly chosen 0 or 1, regardless of integer seed
|
529 |
-
};
|
530 |
-
|
531 |
-
/**
|
532 |
-
* Initialize value
|
533 |
-
*/
|
534 |
-
template <typename T>
|
535 |
-
__host__ __device__ __forceinline__ void InitValue(GenMode gen_mode, T &value, int index = 0)
|
536 |
-
{
|
537 |
-
switch (gen_mode)
|
538 |
-
{
|
539 |
-
#if (CUB_PTX_ARCH == 0)
|
540 |
-
case RANDOM:
|
541 |
-
RandomBits(value);
|
542 |
-
break;
|
543 |
-
case RANDOM_BIT:
|
544 |
-
char c;
|
545 |
-
RandomBits(c, 0, 0, 1);
|
546 |
-
value = (c > 0) ? (T) 1 : (T) -1;
|
547 |
-
break;
|
548 |
-
#endif
|
549 |
-
case UNIFORM:
|
550 |
-
value = 2;
|
551 |
-
break;
|
552 |
-
case INTEGER_SEED:
|
553 |
-
default:
|
554 |
-
value = (T) index;
|
555 |
-
break;
|
556 |
-
}
|
557 |
-
}
|
558 |
-
|
559 |
-
|
560 |
-
/**
|
561 |
-
* Initialize value (bool)
|
562 |
-
*/
|
563 |
-
__host__ __device__ __forceinline__ void InitValue(GenMode gen_mode, bool &value, int index = 0)
|
564 |
-
{
|
565 |
-
switch (gen_mode)
|
566 |
-
{
|
567 |
-
#if (CUB_PTX_ARCH == 0)
|
568 |
-
case RANDOM:
|
569 |
-
case RANDOM_BIT:
|
570 |
-
char c;
|
571 |
-
RandomBits(c, 0, 0, 1);
|
572 |
-
value = (c > 0);
|
573 |
-
break;
|
574 |
-
#endif
|
575 |
-
case UNIFORM:
|
576 |
-
value = true;
|
577 |
-
break;
|
578 |
-
case INTEGER_SEED:
|
579 |
-
default:
|
580 |
-
value = (index > 0);
|
581 |
-
break;
|
582 |
-
}
|
583 |
-
}
|
584 |
-
|
585 |
-
|
586 |
-
/**
|
587 |
-
* cub::NullType test initialization
|
588 |
-
*/
|
589 |
-
__host__ __device__ __forceinline__ void InitValue(GenMode /* gen_mode */,
|
590 |
-
cub::NullType &/* value */,
|
591 |
-
int /* index */ = 0)
|
592 |
-
{}
|
593 |
-
|
594 |
-
|
595 |
-
/**
|
596 |
-
* cub::KeyValuePair<OffsetT, ValueT>test initialization
|
597 |
-
*/
|
598 |
-
template <typename KeyT, typename ValueT>
|
599 |
-
__host__ __device__ __forceinline__ void InitValue(
|
600 |
-
GenMode gen_mode,
|
601 |
-
cub::KeyValuePair<KeyT, ValueT>& value,
|
602 |
-
int index = 0)
|
603 |
-
{
|
604 |
-
InitValue(gen_mode, value.value, index);
|
605 |
-
|
606 |
-
// Assign corresponding flag with a likelihood of the last bit being set with entropy-reduction level 3
|
607 |
-
RandomBits(value.key, 3);
|
608 |
-
value.key = (value.key & 0x1);
|
609 |
-
}
|
610 |
-
|
611 |
-
|
612 |
-
|
613 |
-
/******************************************************************************
|
614 |
-
* Comparison and ostream operators
|
615 |
-
******************************************************************************/
|
616 |
-
|
617 |
-
/**
|
618 |
-
* KeyValuePair ostream operator
|
619 |
-
*/
|
620 |
-
template <typename Key, typename Value>
|
621 |
-
std::ostream& operator<<(std::ostream& os, const cub::KeyValuePair<Key, Value> &val)
|
622 |
-
{
|
623 |
-
os << '(' << CoutCast(val.key) << ',' << CoutCast(val.value) << ')';
|
624 |
-
return os;
|
625 |
-
}
|
626 |
-
|
627 |
-
|
628 |
-
/******************************************************************************
|
629 |
-
* Comparison and ostream operators for CUDA vector types
|
630 |
-
******************************************************************************/
|
631 |
-
|
632 |
-
/**
|
633 |
-
* Vector1 overloads
|
634 |
-
*/
|
635 |
-
#define CUB_VEC_OVERLOAD_1(T, BaseT) \
|
636 |
-
/* Ostream output */ \
|
637 |
-
std::ostream& operator<<( \
|
638 |
-
std::ostream& os, \
|
639 |
-
const T& val) \
|
640 |
-
{ \
|
641 |
-
os << '(' << CoutCast(val.x) << ')'; \
|
642 |
-
return os; \
|
643 |
-
} \
|
644 |
-
/* Inequality */ \
|
645 |
-
__host__ __device__ __forceinline__ bool operator!=( \
|
646 |
-
const T &a, \
|
647 |
-
const T &b) \
|
648 |
-
{ \
|
649 |
-
return (a.x != b.x); \
|
650 |
-
} \
|
651 |
-
/* Equality */ \
|
652 |
-
__host__ __device__ __forceinline__ bool operator==( \
|
653 |
-
const T &a, \
|
654 |
-
const T &b) \
|
655 |
-
{ \
|
656 |
-
return (a.x == b.x); \
|
657 |
-
} \
|
658 |
-
/* Test initialization */ \
|
659 |
-
__host__ __device__ __forceinline__ void InitValue(GenMode gen_mode, T &value, int index = 0) \
|
660 |
-
{ \
|
661 |
-
InitValue(gen_mode, value.x, index); \
|
662 |
-
} \
|
663 |
-
/* Max */ \
|
664 |
-
__host__ __device__ __forceinline__ bool operator>( \
|
665 |
-
const T &a, \
|
666 |
-
const T &b) \
|
667 |
-
{ \
|
668 |
-
return (a.x > b.x); \
|
669 |
-
} \
|
670 |
-
/* Min */ \
|
671 |
-
__host__ __device__ __forceinline__ bool operator<( \
|
672 |
-
const T &a, \
|
673 |
-
const T &b) \
|
674 |
-
{ \
|
675 |
-
return (a.x < b.x); \
|
676 |
-
} \
|
677 |
-
/* Summation (non-reference addends for VS2003 -O3 warpscan workaround */ \
|
678 |
-
__host__ __device__ __forceinline__ T operator+( \
|
679 |
-
T a, \
|
680 |
-
T b) \
|
681 |
-
{ \
|
682 |
-
T retval = make_##T(a.x + b.x); \
|
683 |
-
return retval; \
|
684 |
-
} \
|
685 |
-
namespace cub { \
|
686 |
-
template<> \
|
687 |
-
struct NumericTraits<T> \
|
688 |
-
{ \
|
689 |
-
static const Category CATEGORY = NOT_A_NUMBER; \
|
690 |
-
enum { \
|
691 |
-
PRIMITIVE = false, \
|
692 |
-
NULL_TYPE = false, \
|
693 |
-
}; \
|
694 |
-
static T Max() \
|
695 |
-
{ \
|
696 |
-
T retval = { \
|
697 |
-
NumericTraits<BaseT>::Max()}; \
|
698 |
-
return retval; \
|
699 |
-
} \
|
700 |
-
static T Lowest() \
|
701 |
-
{ \
|
702 |
-
T retval = { \
|
703 |
-
NumericTraits<BaseT>::Lowest()}; \
|
704 |
-
return retval; \
|
705 |
-
} \
|
706 |
-
}; \
|
707 |
-
} /* namespace std */
|
708 |
-
|
709 |
-
|
710 |
-
|
711 |
-
/**
|
712 |
-
* Vector2 overloads
|
713 |
-
*/
|
714 |
-
#define CUB_VEC_OVERLOAD_2(T, BaseT) \
|
715 |
-
/* Ostream output */ \
|
716 |
-
std::ostream& operator<<( \
|
717 |
-
std::ostream& os, \
|
718 |
-
const T& val) \
|
719 |
-
{ \
|
720 |
-
os << '(' \
|
721 |
-
<< CoutCast(val.x) << ',' \
|
722 |
-
<< CoutCast(val.y) << ')'; \
|
723 |
-
return os; \
|
724 |
-
} \
|
725 |
-
/* Inequality */ \
|
726 |
-
__host__ __device__ __forceinline__ bool operator!=( \
|
727 |
-
const T &a, \
|
728 |
-
const T &b) \
|
729 |
-
{ \
|
730 |
-
return (a.x != b.x) || \
|
731 |
-
(a.y != b.y); \
|
732 |
-
} \
|
733 |
-
/* Equality */ \
|
734 |
-
__host__ __device__ __forceinline__ bool operator==( \
|
735 |
-
const T &a, \
|
736 |
-
const T &b) \
|
737 |
-
{ \
|
738 |
-
return (a.x == b.x) && \
|
739 |
-
(a.y == b.y); \
|
740 |
-
} \
|
741 |
-
/* Test initialization */ \
|
742 |
-
__host__ __device__ __forceinline__ void InitValue(GenMode gen_mode, T &value, int index = 0) \
|
743 |
-
{ \
|
744 |
-
InitValue(gen_mode, value.x, index); \
|
745 |
-
InitValue(gen_mode, value.y, index); \
|
746 |
-
} \
|
747 |
-
/* Max */ \
|
748 |
-
__host__ __device__ __forceinline__ bool operator>( \
|
749 |
-
const T &a, \
|
750 |
-
const T &b) \
|
751 |
-
{ \
|
752 |
-
if (a.x > b.x) return true; else if (b.x > a.x) return false; \
|
753 |
-
return a.y > b.y; \
|
754 |
-
} \
|
755 |
-
/* Min */ \
|
756 |
-
__host__ __device__ __forceinline__ bool operator<( \
|
757 |
-
const T &a, \
|
758 |
-
const T &b) \
|
759 |
-
{ \
|
760 |
-
if (a.x < b.x) return true; else if (b.x < a.x) return false; \
|
761 |
-
return a.y < b.y; \
|
762 |
-
} \
|
763 |
-
/* Summation (non-reference addends for VS2003 -O3 warpscan workaround */ \
|
764 |
-
__host__ __device__ __forceinline__ T operator+( \
|
765 |
-
T a, \
|
766 |
-
T b) \
|
767 |
-
{ \
|
768 |
-
T retval = make_##T( \
|
769 |
-
a.x + b.x, \
|
770 |
-
a.y + b.y); \
|
771 |
-
return retval; \
|
772 |
-
} \
|
773 |
-
namespace cub { \
|
774 |
-
template<> \
|
775 |
-
struct NumericTraits<T> \
|
776 |
-
{ \
|
777 |
-
static const Category CATEGORY = NOT_A_NUMBER; \
|
778 |
-
enum { \
|
779 |
-
PRIMITIVE = false, \
|
780 |
-
NULL_TYPE = false, \
|
781 |
-
}; \
|
782 |
-
static T Max() \
|
783 |
-
{ \
|
784 |
-
T retval = { \
|
785 |
-
NumericTraits<BaseT>::Max(), \
|
786 |
-
NumericTraits<BaseT>::Max()}; \
|
787 |
-
return retval; \
|
788 |
-
} \
|
789 |
-
static T Lowest() \
|
790 |
-
{ \
|
791 |
-
T retval = { \
|
792 |
-
NumericTraits<BaseT>::Lowest(), \
|
793 |
-
NumericTraits<BaseT>::Lowest()}; \
|
794 |
-
return retval; \
|
795 |
-
} \
|
796 |
-
}; \
|
797 |
-
} /* namespace cub */
|
798 |
-
|
799 |
-
|
800 |
-
|
801 |
-
/**
|
802 |
-
* Vector3 overloads
|
803 |
-
*/
|
804 |
-
#define CUB_VEC_OVERLOAD_3(T, BaseT) \
|
805 |
-
/* Ostream output */ \
|
806 |
-
std::ostream& operator<<( \
|
807 |
-
std::ostream& os, \
|
808 |
-
const T& val) \
|
809 |
-
{ \
|
810 |
-
os << '(' \
|
811 |
-
<< CoutCast(val.x) << ',' \
|
812 |
-
<< CoutCast(val.y) << ',' \
|
813 |
-
<< CoutCast(val.z) << ')'; \
|
814 |
-
return os; \
|
815 |
-
} \
|
816 |
-
/* Inequality */ \
|
817 |
-
__host__ __device__ __forceinline__ bool operator!=( \
|
818 |
-
const T &a, \
|
819 |
-
const T &b) \
|
820 |
-
{ \
|
821 |
-
return (a.x != b.x) || \
|
822 |
-
(a.y != b.y) || \
|
823 |
-
(a.z != b.z); \
|
824 |
-
} \
|
825 |
-
/* Equality */ \
|
826 |
-
__host__ __device__ __forceinline__ bool operator==( \
|
827 |
-
const T &a, \
|
828 |
-
const T &b) \
|
829 |
-
{ \
|
830 |
-
return (a.x == b.x) && \
|
831 |
-
(a.y == b.y) && \
|
832 |
-
(a.z == b.z); \
|
833 |
-
} \
|
834 |
-
/* Test initialization */ \
|
835 |
-
__host__ __device__ __forceinline__ void InitValue(GenMode gen_mode, T &value, int index = 0) \
|
836 |
-
{ \
|
837 |
-
InitValue(gen_mode, value.x, index); \
|
838 |
-
InitValue(gen_mode, value.y, index); \
|
839 |
-
InitValue(gen_mode, value.z, index); \
|
840 |
-
} \
|
841 |
-
/* Max */ \
|
842 |
-
__host__ __device__ __forceinline__ bool operator>( \
|
843 |
-
const T &a, \
|
844 |
-
const T &b) \
|
845 |
-
{ \
|
846 |
-
if (a.x > b.x) return true; else if (b.x > a.x) return false; \
|
847 |
-
if (a.y > b.y) return true; else if (b.y > a.y) return false; \
|
848 |
-
return a.z > b.z; \
|
849 |
-
} \
|
850 |
-
/* Min */ \
|
851 |
-
__host__ __device__ __forceinline__ bool operator<( \
|
852 |
-
const T &a, \
|
853 |
-
const T &b) \
|
854 |
-
{ \
|
855 |
-
if (a.x < b.x) return true; else if (b.x < a.x) return false; \
|
856 |
-
if (a.y < b.y) return true; else if (b.y < a.y) return false; \
|
857 |
-
return a.z < b.z; \
|
858 |
-
} \
|
859 |
-
/* Summation (non-reference addends for VS2003 -O3 warpscan workaround */ \
|
860 |
-
__host__ __device__ __forceinline__ T operator+( \
|
861 |
-
T a, \
|
862 |
-
T b) \
|
863 |
-
{ \
|
864 |
-
T retval = make_##T( \
|
865 |
-
a.x + b.x, \
|
866 |
-
a.y + b.y, \
|
867 |
-
a.z + b.z); \
|
868 |
-
return retval; \
|
869 |
-
} \
|
870 |
-
namespace cub { \
|
871 |
-
template<> \
|
872 |
-
struct NumericTraits<T> \
|
873 |
-
{ \
|
874 |
-
static const Category CATEGORY = NOT_A_NUMBER; \
|
875 |
-
enum { \
|
876 |
-
PRIMITIVE = false, \
|
877 |
-
NULL_TYPE = false, \
|
878 |
-
}; \
|
879 |
-
static T Max() \
|
880 |
-
{ \
|
881 |
-
T retval = { \
|
882 |
-
NumericTraits<BaseT>::Max(), \
|
883 |
-
NumericTraits<BaseT>::Max(), \
|
884 |
-
NumericTraits<BaseT>::Max()}; \
|
885 |
-
return retval; \
|
886 |
-
} \
|
887 |
-
static T Lowest() \
|
888 |
-
{ \
|
889 |
-
T retval = { \
|
890 |
-
NumericTraits<BaseT>::Lowest(), \
|
891 |
-
NumericTraits<BaseT>::Lowest(), \
|
892 |
-
NumericTraits<BaseT>::Lowest()}; \
|
893 |
-
return retval; \
|
894 |
-
} \
|
895 |
-
}; \
|
896 |
-
} /* namespace cub */
|
897 |
-
|
898 |
-
|
899 |
-
/**
|
900 |
-
* Vector4 overloads
|
901 |
-
*/
|
902 |
-
#define CUB_VEC_OVERLOAD_4(T, BaseT) \
|
903 |
-
/* Ostream output */ \
|
904 |
-
std::ostream& operator<<( \
|
905 |
-
std::ostream& os, \
|
906 |
-
const T& val) \
|
907 |
-
{ \
|
908 |
-
os << '(' \
|
909 |
-
<< CoutCast(val.x) << ',' \
|
910 |
-
<< CoutCast(val.y) << ',' \
|
911 |
-
<< CoutCast(val.z) << ',' \
|
912 |
-
<< CoutCast(val.w) << ')'; \
|
913 |
-
return os; \
|
914 |
-
} \
|
915 |
-
/* Inequality */ \
|
916 |
-
__host__ __device__ __forceinline__ bool operator!=( \
|
917 |
-
const T &a, \
|
918 |
-
const T &b) \
|
919 |
-
{ \
|
920 |
-
return (a.x != b.x) || \
|
921 |
-
(a.y != b.y) || \
|
922 |
-
(a.z != b.z) || \
|
923 |
-
(a.w != b.w); \
|
924 |
-
} \
|
925 |
-
/* Equality */ \
|
926 |
-
__host__ __device__ __forceinline__ bool operator==( \
|
927 |
-
const T &a, \
|
928 |
-
const T &b) \
|
929 |
-
{ \
|
930 |
-
return (a.x == b.x) && \
|
931 |
-
(a.y == b.y) && \
|
932 |
-
(a.z == b.z) && \
|
933 |
-
(a.w == b.w); \
|
934 |
-
} \
|
935 |
-
/* Test initialization */ \
|
936 |
-
__host__ __device__ __forceinline__ void InitValue(GenMode gen_mode, T &value, int index = 0) \
|
937 |
-
{ \
|
938 |
-
InitValue(gen_mode, value.x, index); \
|
939 |
-
InitValue(gen_mode, value.y, index); \
|
940 |
-
InitValue(gen_mode, value.z, index); \
|
941 |
-
InitValue(gen_mode, value.w, index); \
|
942 |
-
} \
|
943 |
-
/* Max */ \
|
944 |
-
__host__ __device__ __forceinline__ bool operator>( \
|
945 |
-
const T &a, \
|
946 |
-
const T &b) \
|
947 |
-
{ \
|
948 |
-
if (a.x > b.x) return true; else if (b.x > a.x) return false; \
|
949 |
-
if (a.y > b.y) return true; else if (b.y > a.y) return false; \
|
950 |
-
if (a.z > b.z) return true; else if (b.z > a.z) return false; \
|
951 |
-
return a.w > b.w; \
|
952 |
-
} \
|
953 |
-
/* Min */ \
|
954 |
-
__host__ __device__ __forceinline__ bool operator<( \
|
955 |
-
const T &a, \
|
956 |
-
const T &b) \
|
957 |
-
{ \
|
958 |
-
if (a.x < b.x) return true; else if (b.x < a.x) return false; \
|
959 |
-
if (a.y < b.y) return true; else if (b.y < a.y) return false; \
|
960 |
-
if (a.z < b.z) return true; else if (b.z < a.z) return false; \
|
961 |
-
return a.w < b.w; \
|
962 |
-
} \
|
963 |
-
/* Summation (non-reference addends for VS2003 -O3 warpscan workaround */ \
|
964 |
-
__host__ __device__ __forceinline__ T operator+( \
|
965 |
-
T a, \
|
966 |
-
T b) \
|
967 |
-
{ \
|
968 |
-
T retval = make_##T( \
|
969 |
-
a.x + b.x, \
|
970 |
-
a.y + b.y, \
|
971 |
-
a.z + b.z, \
|
972 |
-
a.w + b.w); \
|
973 |
-
return retval; \
|
974 |
-
} \
|
975 |
-
namespace cub { \
|
976 |
-
template<> \
|
977 |
-
struct NumericTraits<T> \
|
978 |
-
{ \
|
979 |
-
static const Category CATEGORY = NOT_A_NUMBER; \
|
980 |
-
enum { \
|
981 |
-
PRIMITIVE = false, \
|
982 |
-
NULL_TYPE = false, \
|
983 |
-
}; \
|
984 |
-
static T Max() \
|
985 |
-
{ \
|
986 |
-
T retval = { \
|
987 |
-
NumericTraits<BaseT>::Max(), \
|
988 |
-
NumericTraits<BaseT>::Max(), \
|
989 |
-
NumericTraits<BaseT>::Max(), \
|
990 |
-
NumericTraits<BaseT>::Max()}; \
|
991 |
-
return retval; \
|
992 |
-
} \
|
993 |
-
static T Lowest() \
|
994 |
-
{ \
|
995 |
-
T retval = { \
|
996 |
-
NumericTraits<BaseT>::Lowest(), \
|
997 |
-
NumericTraits<BaseT>::Lowest(), \
|
998 |
-
NumericTraits<BaseT>::Lowest(), \
|
999 |
-
NumericTraits<BaseT>::Lowest()}; \
|
1000 |
-
return retval; \
|
1001 |
-
} \
|
1002 |
-
}; \
|
1003 |
-
} /* namespace cub */
|
1004 |
-
|
1005 |
-
/**
|
1006 |
-
* All vector overloads
|
1007 |
-
*/
|
1008 |
-
#define CUB_VEC_OVERLOAD(COMPONENT_T, BaseT) \
|
1009 |
-
CUB_VEC_OVERLOAD_1(COMPONENT_T##1, BaseT) \
|
1010 |
-
CUB_VEC_OVERLOAD_2(COMPONENT_T##2, BaseT) \
|
1011 |
-
CUB_VEC_OVERLOAD_3(COMPONENT_T##3, BaseT) \
|
1012 |
-
CUB_VEC_OVERLOAD_4(COMPONENT_T##4, BaseT)
|
1013 |
-
|
1014 |
-
/**
|
1015 |
-
* Define for types
|
1016 |
-
*/
|
1017 |
-
CUB_VEC_OVERLOAD(char, char)
|
1018 |
-
CUB_VEC_OVERLOAD(short, short)
|
1019 |
-
CUB_VEC_OVERLOAD(int, int)
|
1020 |
-
CUB_VEC_OVERLOAD(long, long)
|
1021 |
-
CUB_VEC_OVERLOAD(longlong, long long)
|
1022 |
-
CUB_VEC_OVERLOAD(uchar, unsigned char)
|
1023 |
-
CUB_VEC_OVERLOAD(ushort, unsigned short)
|
1024 |
-
CUB_VEC_OVERLOAD(uint, unsigned int)
|
1025 |
-
CUB_VEC_OVERLOAD(ulong, unsigned long)
|
1026 |
-
CUB_VEC_OVERLOAD(ulonglong, unsigned long long)
|
1027 |
-
CUB_VEC_OVERLOAD(float, float)
|
1028 |
-
CUB_VEC_OVERLOAD(double, double)
|
1029 |
-
|
1030 |
-
|
1031 |
-
//---------------------------------------------------------------------
|
1032 |
-
// Complex data type TestFoo
|
1033 |
-
//---------------------------------------------------------------------
|
1034 |
-
|
1035 |
-
/**
|
1036 |
-
* TestFoo complex data type
|
1037 |
-
*/
|
1038 |
-
struct TestFoo
|
1039 |
-
{
|
1040 |
-
long long x;
|
1041 |
-
int y;
|
1042 |
-
short z;
|
1043 |
-
char w;
|
1044 |
-
|
1045 |
-
// Factory
|
1046 |
-
static __host__ __device__ __forceinline__ TestFoo MakeTestFoo(long long x, int y, short z, char w)
|
1047 |
-
{
|
1048 |
-
TestFoo retval = {x, y, z, w};
|
1049 |
-
return retval;
|
1050 |
-
}
|
1051 |
-
|
1052 |
-
// Assignment from int operator
|
1053 |
-
__host__ __device__ __forceinline__ TestFoo& operator =(int b)
|
1054 |
-
{
|
1055 |
-
x = b;
|
1056 |
-
y = b;
|
1057 |
-
z = b;
|
1058 |
-
w = b;
|
1059 |
-
return *this;
|
1060 |
-
}
|
1061 |
-
|
1062 |
-
// Summation operator
|
1063 |
-
__host__ __device__ __forceinline__ TestFoo operator+(const TestFoo &b) const
|
1064 |
-
{
|
1065 |
-
return MakeTestFoo(x + b.x, y + b.y, z + b.z, w + b.w);
|
1066 |
-
}
|
1067 |
-
|
1068 |
-
// Inequality operator
|
1069 |
-
__host__ __device__ __forceinline__ bool operator !=(const TestFoo &b) const
|
1070 |
-
{
|
1071 |
-
return (x != b.x) || (y != b.y) || (z != b.z) || (w != b.w);
|
1072 |
-
}
|
1073 |
-
|
1074 |
-
// Equality operator
|
1075 |
-
__host__ __device__ __forceinline__ bool operator ==(const TestFoo &b) const
|
1076 |
-
{
|
1077 |
-
return (x == b.x) && (y == b.y) && (z == b.z) && (w == b.w);
|
1078 |
-
}
|
1079 |
-
|
1080 |
-
// Less than operator
|
1081 |
-
__host__ __device__ __forceinline__ bool operator <(const TestFoo &b) const
|
1082 |
-
{
|
1083 |
-
if (x < b.x) return true; else if (b.x < x) return false;
|
1084 |
-
if (y < b.y) return true; else if (b.y < y) return false;
|
1085 |
-
if (z < b.z) return true; else if (b.z < z) return false;
|
1086 |
-
return w < b.w;
|
1087 |
-
}
|
1088 |
-
|
1089 |
-
// Greater than operator
|
1090 |
-
__host__ __device__ __forceinline__ bool operator >(const TestFoo &b) const
|
1091 |
-
{
|
1092 |
-
if (x > b.x) return true; else if (b.x > x) return false;
|
1093 |
-
if (y > b.y) return true; else if (b.y > y) return false;
|
1094 |
-
if (z > b.z) return true; else if (b.z > z) return false;
|
1095 |
-
return w > b.w;
|
1096 |
-
}
|
1097 |
-
|
1098 |
-
};
|
1099 |
-
|
1100 |
-
/**
|
1101 |
-
* TestFoo ostream operator
|
1102 |
-
*/
|
1103 |
-
std::ostream& operator<<(std::ostream& os, const TestFoo& val)
|
1104 |
-
{
|
1105 |
-
os << '(' << val.x << ',' << val.y << ',' << val.z << ',' << CoutCast(val.w) << ')';
|
1106 |
-
return os;
|
1107 |
-
}
|
1108 |
-
|
1109 |
-
/**
|
1110 |
-
* TestFoo test initialization
|
1111 |
-
*/
|
1112 |
-
__host__ __device__ __forceinline__ void InitValue(GenMode gen_mode, TestFoo &value, int index = 0)
|
1113 |
-
{
|
1114 |
-
InitValue(gen_mode, value.x, index);
|
1115 |
-
InitValue(gen_mode, value.y, index);
|
1116 |
-
InitValue(gen_mode, value.z, index);
|
1117 |
-
InitValue(gen_mode, value.w, index);
|
1118 |
-
}
|
1119 |
-
|
1120 |
-
|
1121 |
-
/// numeric_limits<TestFoo> specialization
|
1122 |
-
namespace cub {
|
1123 |
-
template<>
|
1124 |
-
struct NumericTraits<TestFoo>
|
1125 |
-
{
|
1126 |
-
static const Category CATEGORY = NOT_A_NUMBER;
|
1127 |
-
enum {
|
1128 |
-
PRIMITIVE = false,
|
1129 |
-
NULL_TYPE = false,
|
1130 |
-
};
|
1131 |
-
static TestFoo Max()
|
1132 |
-
{
|
1133 |
-
return TestFoo::MakeTestFoo(
|
1134 |
-
NumericTraits<long long>::Max(),
|
1135 |
-
NumericTraits<int>::Max(),
|
1136 |
-
NumericTraits<short>::Max(),
|
1137 |
-
NumericTraits<char>::Max());
|
1138 |
-
}
|
1139 |
-
|
1140 |
-
static TestFoo Lowest()
|
1141 |
-
{
|
1142 |
-
return TestFoo::MakeTestFoo(
|
1143 |
-
NumericTraits<long long>::Lowest(),
|
1144 |
-
NumericTraits<int>::Lowest(),
|
1145 |
-
NumericTraits<short>::Lowest(),
|
1146 |
-
NumericTraits<char>::Lowest());
|
1147 |
-
}
|
1148 |
-
};
|
1149 |
-
} // namespace cub
|
1150 |
-
|
1151 |
-
|
1152 |
-
//---------------------------------------------------------------------
|
1153 |
-
// Complex data type TestBar (with optimizations for fence-free warp-synchrony)
|
1154 |
-
//---------------------------------------------------------------------
|
1155 |
-
|
1156 |
-
/**
|
1157 |
-
* TestBar complex data type
|
1158 |
-
*/
|
1159 |
-
struct TestBar
|
1160 |
-
{
|
1161 |
-
long long x;
|
1162 |
-
int y;
|
1163 |
-
|
1164 |
-
// Constructor
|
1165 |
-
__host__ __device__ __forceinline__ TestBar() : x(0), y(0)
|
1166 |
-
{}
|
1167 |
-
|
1168 |
-
// Constructor
|
1169 |
-
__host__ __device__ __forceinline__ TestBar(int b) : x(b), y(b)
|
1170 |
-
{}
|
1171 |
-
|
1172 |
-
// Constructor
|
1173 |
-
__host__ __device__ __forceinline__ TestBar(long long x, int y) : x(x), y(y)
|
1174 |
-
{}
|
1175 |
-
|
1176 |
-
// Assignment from int operator
|
1177 |
-
__host__ __device__ __forceinline__ TestBar& operator =(int b)
|
1178 |
-
{
|
1179 |
-
x = b;
|
1180 |
-
y = b;
|
1181 |
-
return *this;
|
1182 |
-
}
|
1183 |
-
|
1184 |
-
// Summation operator
|
1185 |
-
__host__ __device__ __forceinline__ TestBar operator+(const TestBar &b) const
|
1186 |
-
{
|
1187 |
-
return TestBar(x + b.x, y + b.y);
|
1188 |
-
}
|
1189 |
-
|
1190 |
-
// Inequality operator
|
1191 |
-
__host__ __device__ __forceinline__ bool operator !=(const TestBar &b) const
|
1192 |
-
{
|
1193 |
-
return (x != b.x) || (y != b.y);
|
1194 |
-
}
|
1195 |
-
|
1196 |
-
// Equality operator
|
1197 |
-
__host__ __device__ __forceinline__ bool operator ==(const TestBar &b) const
|
1198 |
-
{
|
1199 |
-
return (x == b.x) && (y == b.y);
|
1200 |
-
}
|
1201 |
-
|
1202 |
-
// Less than operator
|
1203 |
-
__host__ __device__ __forceinline__ bool operator <(const TestBar &b) const
|
1204 |
-
{
|
1205 |
-
if (x < b.x) return true; else if (b.x < x) return false;
|
1206 |
-
return y < b.y;
|
1207 |
-
}
|
1208 |
-
|
1209 |
-
// Greater than operator
|
1210 |
-
__host__ __device__ __forceinline__ bool operator >(const TestBar &b) const
|
1211 |
-
{
|
1212 |
-
if (x > b.x) return true; else if (b.x > x) return false;
|
1213 |
-
return y > b.y;
|
1214 |
-
}
|
1215 |
-
|
1216 |
-
};
|
1217 |
-
|
1218 |
-
|
1219 |
-
/**
|
1220 |
-
* TestBar ostream operator
|
1221 |
-
*/
|
1222 |
-
std::ostream& operator<<(std::ostream& os, const TestBar& val)
|
1223 |
-
{
|
1224 |
-
os << '(' << val.x << ',' << val.y << ')';
|
1225 |
-
return os;
|
1226 |
-
}
|
1227 |
-
|
1228 |
-
/**
|
1229 |
-
* TestBar test initialization
|
1230 |
-
*/
|
1231 |
-
__host__ __device__ __forceinline__ void InitValue(GenMode gen_mode, TestBar &value, int index = 0)
|
1232 |
-
{
|
1233 |
-
InitValue(gen_mode, value.x, index);
|
1234 |
-
InitValue(gen_mode, value.y, index);
|
1235 |
-
}
|
1236 |
-
|
1237 |
-
/// numeric_limits<TestBar> specialization
|
1238 |
-
namespace cub {
|
1239 |
-
template<>
|
1240 |
-
struct NumericTraits<TestBar>
|
1241 |
-
{
|
1242 |
-
static const Category CATEGORY = NOT_A_NUMBER;
|
1243 |
-
enum {
|
1244 |
-
PRIMITIVE = false,
|
1245 |
-
NULL_TYPE = false,
|
1246 |
-
};
|
1247 |
-
static TestBar Max()
|
1248 |
-
{
|
1249 |
-
return TestBar(
|
1250 |
-
NumericTraits<long long>::Max(),
|
1251 |
-
NumericTraits<int>::Max());
|
1252 |
-
}
|
1253 |
-
|
1254 |
-
static TestBar Lowest()
|
1255 |
-
{
|
1256 |
-
return TestBar(
|
1257 |
-
NumericTraits<long long>::Lowest(),
|
1258 |
-
NumericTraits<int>::Lowest());
|
1259 |
-
}
|
1260 |
-
};
|
1261 |
-
} // namespace cub
|
1262 |
-
|
1263 |
-
|
1264 |
-
/******************************************************************************
|
1265 |
-
* Helper routines for list comparison and display
|
1266 |
-
******************************************************************************/
|
1267 |
-
|
1268 |
-
|
1269 |
-
/**
|
1270 |
-
* Compares the equivalence of two arrays
|
1271 |
-
*/
|
1272 |
-
template <typename S, typename T, typename OffsetT>
|
1273 |
-
int CompareResults(T* computed, S* reference, OffsetT len, bool verbose = true)
|
1274 |
-
{
|
1275 |
-
for (OffsetT i = 0; i < len; i++)
|
1276 |
-
{
|
1277 |
-
if (computed[i] != reference[i])
|
1278 |
-
{
|
1279 |
-
if (verbose) std::cout << "INCORRECT: [" << i << "]: "
|
1280 |
-
<< CoutCast(computed[i]) << " != "
|
1281 |
-
<< CoutCast(reference[i]);
|
1282 |
-
return 1;
|
1283 |
-
}
|
1284 |
-
}
|
1285 |
-
return 0;
|
1286 |
-
}
|
1287 |
-
|
1288 |
-
|
1289 |
-
/**
|
1290 |
-
* Compares the equivalence of two arrays
|
1291 |
-
*/
|
1292 |
-
template <typename OffsetT>
|
1293 |
-
int CompareResults(float* computed, float* reference, OffsetT len, bool verbose = true)
|
1294 |
-
{
|
1295 |
-
for (OffsetT i = 0; i < len; i++)
|
1296 |
-
{
|
1297 |
-
if (computed[i] != reference[i])
|
1298 |
-
{
|
1299 |
-
float difference = std::abs(computed[i]-reference[i]);
|
1300 |
-
float fraction = difference / std::abs(reference[i]);
|
1301 |
-
|
1302 |
-
if (fraction > 0.0001)
|
1303 |
-
{
|
1304 |
-
if (verbose) std::cout << "INCORRECT: [" << i << "]: "
|
1305 |
-
<< "(computed) " << CoutCast(computed[i]) << " != "
|
1306 |
-
<< CoutCast(reference[i]) << " (difference:" << difference << ", fraction: " << fraction << ")";
|
1307 |
-
return 1;
|
1308 |
-
}
|
1309 |
-
}
|
1310 |
-
}
|
1311 |
-
return 0;
|
1312 |
-
}
|
1313 |
-
|
1314 |
-
|
1315 |
-
/**
|
1316 |
-
* Compares the equivalence of two arrays
|
1317 |
-
*/
|
1318 |
-
template <typename OffsetT>
|
1319 |
-
int CompareResults(cub::NullType* computed, cub::NullType* reference, OffsetT len, bool verbose = true)
|
1320 |
-
{
|
1321 |
-
return 0;
|
1322 |
-
}
|
1323 |
-
|
1324 |
-
/**
|
1325 |
-
* Compares the equivalence of two arrays
|
1326 |
-
*/
|
1327 |
-
template <typename OffsetT>
|
1328 |
-
int CompareResults(double* computed, double* reference, OffsetT len, bool verbose = true)
|
1329 |
-
{
|
1330 |
-
for (OffsetT i = 0; i < len; i++)
|
1331 |
-
{
|
1332 |
-
if (computed[i] != reference[i])
|
1333 |
-
{
|
1334 |
-
double difference = std::abs(computed[i]-reference[i]);
|
1335 |
-
double fraction = difference / std::abs(reference[i]);
|
1336 |
-
|
1337 |
-
if (fraction > 0.0001)
|
1338 |
-
{
|
1339 |
-
if (verbose) std::cout << "INCORRECT: [" << i << "]: "
|
1340 |
-
<< CoutCast(computed[i]) << " != "
|
1341 |
-
<< CoutCast(reference[i]) << " (difference:" << difference << ", fraction: " << fraction << ")";
|
1342 |
-
return 1;
|
1343 |
-
}
|
1344 |
-
}
|
1345 |
-
}
|
1346 |
-
return 0;
|
1347 |
-
}
|
1348 |
-
|
1349 |
-
|
1350 |
-
/**
|
1351 |
-
* Verify the contents of a device array match those
|
1352 |
-
* of a host array
|
1353 |
-
*/
|
1354 |
-
int CompareDeviceResults(
|
1355 |
-
cub::NullType */* h_reference */,
|
1356 |
-
cub::NullType */* d_data */,
|
1357 |
-
size_t /* num_items */,
|
1358 |
-
bool /* verbose */ = true,
|
1359 |
-
bool /* display_data */ = false)
|
1360 |
-
{
|
1361 |
-
return 0;
|
1362 |
-
}
|
1363 |
-
|
1364 |
-
/**
|
1365 |
-
* Verify the contents of a device array match those
|
1366 |
-
* of a host array
|
1367 |
-
*/
|
1368 |
-
template <typename S, typename OffsetT>
|
1369 |
-
int CompareDeviceResults(
|
1370 |
-
S *h_reference,
|
1371 |
-
cub::DiscardOutputIterator<OffsetT> d_data,
|
1372 |
-
size_t num_items,
|
1373 |
-
bool verbose = true,
|
1374 |
-
bool display_data = false)
|
1375 |
-
{
|
1376 |
-
return 0;
|
1377 |
-
}
|
1378 |
-
|
1379 |
-
/**
|
1380 |
-
* Verify the contents of a device array match those
|
1381 |
-
* of a host array
|
1382 |
-
*/
|
1383 |
-
template <typename S, typename T>
|
1384 |
-
int CompareDeviceResults(
|
1385 |
-
S *h_reference,
|
1386 |
-
T *d_data,
|
1387 |
-
size_t num_items,
|
1388 |
-
bool verbose = true,
|
1389 |
-
bool display_data = false)
|
1390 |
-
{
|
1391 |
-
// Allocate array on host
|
1392 |
-
T *h_data = (T*) malloc(num_items * sizeof(T));
|
1393 |
-
|
1394 |
-
// Copy data back
|
1395 |
-
cudaMemcpy(h_data, d_data, sizeof(T) * num_items, cudaMemcpyDeviceToHost);
|
1396 |
-
|
1397 |
-
// Display data
|
1398 |
-
if (display_data)
|
1399 |
-
{
|
1400 |
-
printf("Reference:\n");
|
1401 |
-
for (int i = 0; i < int(num_items); i++)
|
1402 |
-
{
|
1403 |
-
std::cout << CoutCast(h_reference[i]) << ", ";
|
1404 |
-
}
|
1405 |
-
printf("\n\nComputed:\n");
|
1406 |
-
for (int i = 0; i < int(num_items); i++)
|
1407 |
-
{
|
1408 |
-
std::cout << CoutCast(h_data[i]) << ", ";
|
1409 |
-
}
|
1410 |
-
printf("\n\n");
|
1411 |
-
}
|
1412 |
-
|
1413 |
-
// Check
|
1414 |
-
int retval = CompareResults(h_data, h_reference, num_items, verbose);
|
1415 |
-
|
1416 |
-
// Cleanup
|
1417 |
-
if (h_data) free(h_data);
|
1418 |
-
|
1419 |
-
return retval;
|
1420 |
-
}
|
1421 |
-
|
1422 |
-
|
1423 |
-
/**
|
1424 |
-
* Verify the contents of a device array match those
|
1425 |
-
* of a device array
|
1426 |
-
*/
|
1427 |
-
template <typename T>
|
1428 |
-
int CompareDeviceDeviceResults(
|
1429 |
-
T *d_reference,
|
1430 |
-
T *d_data,
|
1431 |
-
size_t num_items,
|
1432 |
-
bool verbose = true,
|
1433 |
-
bool display_data = false)
|
1434 |
-
{
|
1435 |
-
// Allocate array on host
|
1436 |
-
T *h_reference = (T*) malloc(num_items * sizeof(T));
|
1437 |
-
T *h_data = (T*) malloc(num_items * sizeof(T));
|
1438 |
-
|
1439 |
-
// Copy data back
|
1440 |
-
cudaMemcpy(h_reference, d_reference, sizeof(T) * num_items, cudaMemcpyDeviceToHost);
|
1441 |
-
cudaMemcpy(h_data, d_data, sizeof(T) * num_items, cudaMemcpyDeviceToHost);
|
1442 |
-
|
1443 |
-
// Display data
|
1444 |
-
if (display_data) {
|
1445 |
-
printf("Reference:\n");
|
1446 |
-
for (int i = 0; i < num_items; i++)
|
1447 |
-
{
|
1448 |
-
std::cout << CoutCast(h_reference[i]) << ", ";
|
1449 |
-
}
|
1450 |
-
printf("\n\nComputed:\n");
|
1451 |
-
for (int i = 0; i < num_items; i++)
|
1452 |
-
{
|
1453 |
-
std::cout << CoutCast(h_data[i]) << ", ";
|
1454 |
-
}
|
1455 |
-
printf("\n\n");
|
1456 |
-
}
|
1457 |
-
|
1458 |
-
// Check
|
1459 |
-
int retval = CompareResults(h_data, h_reference, num_items, verbose);
|
1460 |
-
|
1461 |
-
// Cleanup
|
1462 |
-
if (h_reference) free(h_reference);
|
1463 |
-
if (h_data) free(h_data);
|
1464 |
-
|
1465 |
-
return retval;
|
1466 |
-
}
|
1467 |
-
|
1468 |
-
|
1469 |
-
/**
|
1470 |
-
* Print the contents of a host array
|
1471 |
-
*/
|
1472 |
-
void DisplayResults(
|
1473 |
-
cub::NullType */* h_data */,
|
1474 |
-
size_t /* num_items */)
|
1475 |
-
{}
|
1476 |
-
|
1477 |
-
|
1478 |
-
/**
|
1479 |
-
* Print the contents of a host array
|
1480 |
-
*/
|
1481 |
-
template <typename InputIteratorT>
|
1482 |
-
void DisplayResults(
|
1483 |
-
InputIteratorT h_data,
|
1484 |
-
size_t num_items)
|
1485 |
-
{
|
1486 |
-
// Display data
|
1487 |
-
for (int i = 0; i < int(num_items); i++)
|
1488 |
-
{
|
1489 |
-
std::cout << CoutCast(h_data[i]) << ", ";
|
1490 |
-
}
|
1491 |
-
printf("\n");
|
1492 |
-
}
|
1493 |
-
|
1494 |
-
|
1495 |
-
/**
|
1496 |
-
* Print the contents of a device array
|
1497 |
-
*/
|
1498 |
-
template <typename T>
|
1499 |
-
void DisplayDeviceResults(
|
1500 |
-
T *d_data,
|
1501 |
-
size_t num_items)
|
1502 |
-
{
|
1503 |
-
// Allocate array on host
|
1504 |
-
T *h_data = (T*) malloc(num_items * sizeof(T));
|
1505 |
-
|
1506 |
-
// Copy data back
|
1507 |
-
cudaMemcpy(h_data, d_data, sizeof(T) * num_items, cudaMemcpyDeviceToHost);
|
1508 |
-
|
1509 |
-
DisplayResults(h_data, num_items);
|
1510 |
-
|
1511 |
-
// Cleanup
|
1512 |
-
if (h_data) free(h_data);
|
1513 |
-
}
|
1514 |
-
|
1515 |
-
|
1516 |
-
/******************************************************************************
|
1517 |
-
* Segment descriptor generation
|
1518 |
-
******************************************************************************/
|
1519 |
-
|
1520 |
-
/**
|
1521 |
-
* Initialize segments
|
1522 |
-
*/
|
1523 |
-
void InitializeSegments(
|
1524 |
-
int num_items,
|
1525 |
-
int num_segments,
|
1526 |
-
int *h_segment_offsets,
|
1527 |
-
bool verbose = false)
|
1528 |
-
{
|
1529 |
-
if (num_segments <= 0)
|
1530 |
-
return;
|
1531 |
-
|
1532 |
-
unsigned int expected_segment_length = (num_items + num_segments - 1) / num_segments;
|
1533 |
-
int offset = 0;
|
1534 |
-
for (int i = 0; i < num_segments; ++i)
|
1535 |
-
{
|
1536 |
-
h_segment_offsets[i] = offset;
|
1537 |
-
|
1538 |
-
unsigned int segment_length = RandomValue((expected_segment_length * 2) + 1);
|
1539 |
-
offset += segment_length;
|
1540 |
-
offset = CUB_MIN(offset, num_items);
|
1541 |
-
}
|
1542 |
-
h_segment_offsets[num_segments] = num_items;
|
1543 |
-
|
1544 |
-
if (verbose)
|
1545 |
-
{
|
1546 |
-
printf("Segment offsets: ");
|
1547 |
-
DisplayResults(h_segment_offsets, num_segments + 1);
|
1548 |
-
}
|
1549 |
-
}
|
1550 |
-
|
1551 |
-
|
1552 |
-
/******************************************************************************
|
1553 |
-
* Timing
|
1554 |
-
******************************************************************************/
|
1555 |
-
|
1556 |
-
|
1557 |
-
struct CpuTimer
|
1558 |
-
{
|
1559 |
-
#if defined(_WIN32) || defined(_WIN64)
|
1560 |
-
|
1561 |
-
LARGE_INTEGER ll_freq;
|
1562 |
-
LARGE_INTEGER ll_start;
|
1563 |
-
LARGE_INTEGER ll_stop;
|
1564 |
-
|
1565 |
-
CpuTimer()
|
1566 |
-
{
|
1567 |
-
QueryPerformanceFrequency(&ll_freq);
|
1568 |
-
}
|
1569 |
-
|
1570 |
-
void Start()
|
1571 |
-
{
|
1572 |
-
QueryPerformanceCounter(&ll_start);
|
1573 |
-
}
|
1574 |
-
|
1575 |
-
void Stop()
|
1576 |
-
{
|
1577 |
-
QueryPerformanceCounter(&ll_stop);
|
1578 |
-
}
|
1579 |
-
|
1580 |
-
float ElapsedMillis()
|
1581 |
-
{
|
1582 |
-
double start = double(ll_start.QuadPart) / double(ll_freq.QuadPart);
|
1583 |
-
double stop = double(ll_stop.QuadPart) / double(ll_freq.QuadPart);
|
1584 |
-
|
1585 |
-
return float((stop - start) * 1000);
|
1586 |
-
}
|
1587 |
-
|
1588 |
-
#else
|
1589 |
-
|
1590 |
-
rusage start;
|
1591 |
-
rusage stop;
|
1592 |
-
|
1593 |
-
void Start()
|
1594 |
-
{
|
1595 |
-
getrusage(RUSAGE_SELF, &start);
|
1596 |
-
}
|
1597 |
-
|
1598 |
-
void Stop()
|
1599 |
-
{
|
1600 |
-
getrusage(RUSAGE_SELF, &stop);
|
1601 |
-
}
|
1602 |
-
|
1603 |
-
float ElapsedMillis()
|
1604 |
-
{
|
1605 |
-
float sec = stop.ru_utime.tv_sec - start.ru_utime.tv_sec;
|
1606 |
-
float usec = stop.ru_utime.tv_usec - start.ru_utime.tv_usec;
|
1607 |
-
|
1608 |
-
return (sec * 1000) + (usec / 1000);
|
1609 |
-
}
|
1610 |
-
|
1611 |
-
#endif
|
1612 |
-
};
|
1613 |
-
|
1614 |
-
struct GpuTimer
|
1615 |
-
{
|
1616 |
-
cudaEvent_t start;
|
1617 |
-
cudaEvent_t stop;
|
1618 |
-
|
1619 |
-
GpuTimer()
|
1620 |
-
{
|
1621 |
-
cudaEventCreate(&start);
|
1622 |
-
cudaEventCreate(&stop);
|
1623 |
-
}
|
1624 |
-
|
1625 |
-
~GpuTimer()
|
1626 |
-
{
|
1627 |
-
cudaEventDestroy(start);
|
1628 |
-
cudaEventDestroy(stop);
|
1629 |
-
}
|
1630 |
-
|
1631 |
-
void Start()
|
1632 |
-
{
|
1633 |
-
cudaEventRecord(start, 0);
|
1634 |
-
}
|
1635 |
-
|
1636 |
-
void Stop()
|
1637 |
-
{
|
1638 |
-
cudaEventRecord(stop, 0);
|
1639 |
-
}
|
1640 |
-
|
1641 |
-
float ElapsedMillis()
|
1642 |
-
{
|
1643 |
-
float elapsed;
|
1644 |
-
cudaEventSynchronize(stop);
|
1645 |
-
cudaEventElapsedTime(&elapsed, start, stop);
|
1646 |
-
return elapsed;
|
1647 |
-
}
|
1648 |
-
};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/cwalt/Clip_WALT_Generate.py
DELETED
@@ -1,284 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python3
|
2 |
-
# -*- coding: utf-8 -*-
|
3 |
-
"""
|
4 |
-
Created on Fri May 20 15:15:11 2022
|
5 |
-
|
6 |
-
@author: dinesh
|
7 |
-
"""
|
8 |
-
|
9 |
-
from collections import OrderedDict
|
10 |
-
from matplotlib import pyplot as plt
|
11 |
-
from .utils import *
|
12 |
-
import scipy.interpolate
|
13 |
-
|
14 |
-
from scipy import interpolate
|
15 |
-
from .clustering_utils import *
|
16 |
-
import glob
|
17 |
-
import cv2
|
18 |
-
from PIL import Image
|
19 |
-
|
20 |
-
|
21 |
-
import json
|
22 |
-
import cv2
|
23 |
-
|
24 |
-
import numpy as np
|
25 |
-
from tqdm import tqdm
|
26 |
-
|
27 |
-
|
28 |
-
def ignore_indexes(tracks_all, labels_all):
|
29 |
-
# get repeating bounding boxes
|
30 |
-
get_indexes = lambda x, xs: [i for (y, i) in zip(xs, range(len(xs))) if x == y]
|
31 |
-
ignore_ind = []
|
32 |
-
for index, track in enumerate(tracks_all):
|
33 |
-
print('in ignore', index, len(tracks_all))
|
34 |
-
if index in ignore_ind:
|
35 |
-
continue
|
36 |
-
|
37 |
-
if labels_all[index] < 1 or labels_all[index] > 3:
|
38 |
-
ignore_ind.extend([index])
|
39 |
-
|
40 |
-
ind = get_indexes(track, tracks_all)
|
41 |
-
if len(ind) > 30:
|
42 |
-
ignore_ind.extend(ind)
|
43 |
-
|
44 |
-
return ignore_ind
|
45 |
-
|
46 |
-
def repeated_indexes_old(tracks_all,ignore_ind, unoccluded_indexes=None):
|
47 |
-
# get repeating bounding boxes
|
48 |
-
get_indexes = lambda x, xs: [i for (y, i) in zip(xs, range(len(xs))) if bb_intersection_over_union(x, y) > 0.8 and i not in ignore_ind]
|
49 |
-
repeat_ind = []
|
50 |
-
repeat_inds =[]
|
51 |
-
if unoccluded_indexes == None:
|
52 |
-
for index, track in enumerate(tracks_all):
|
53 |
-
if index in repeat_ind or index in ignore_ind:
|
54 |
-
continue
|
55 |
-
ind = get_indexes(track, tracks_all)
|
56 |
-
if len(ind) > 20:
|
57 |
-
repeat_ind.extend(ind)
|
58 |
-
repeat_inds.append([ind,track])
|
59 |
-
else:
|
60 |
-
for index in unoccluded_indexes:
|
61 |
-
if index in repeat_ind or index in ignore_ind:
|
62 |
-
continue
|
63 |
-
ind = get_indexes(tracks_all[index], tracks_all)
|
64 |
-
if len(ind) > 3:
|
65 |
-
repeat_ind.extend(ind)
|
66 |
-
repeat_inds.append([ind,tracks_all[index]])
|
67 |
-
return repeat_inds
|
68 |
-
|
69 |
-
def get_unoccluded_instances(timestamps_final, tracks_all, ignore_ind=[], threshold = 0.01):
|
70 |
-
get_indexes = lambda x, xs: [i for (y, i) in zip(xs, range(len(xs))) if x==y]
|
71 |
-
unoccluded_indexes = []
|
72 |
-
time_checked = []
|
73 |
-
stationary_obj = []
|
74 |
-
count =0
|
75 |
-
|
76 |
-
for time in tqdm(np.unique(timestamps_final), desc="Detecting Unocclued objects in Image "):
|
77 |
-
count += 1
|
78 |
-
if [time.year,time.month, time.day, time.hour, time.minute, time.second, time.microsecond] in time_checked:
|
79 |
-
analyze_bb = []
|
80 |
-
for ind in unoccluded_indexes_time:
|
81 |
-
for ind_compare in same_time_instances:
|
82 |
-
iou = bb_intersection_over_union(tracks_all[ind], tracks_all[ind_compare])
|
83 |
-
if iou < 0.5 and iou > 0:
|
84 |
-
analyze_bb.extend([ind_compare])
|
85 |
-
if iou > 0.99:
|
86 |
-
stationary_obj.extend([str(ind_compare)+'+'+str(ind)])
|
87 |
-
|
88 |
-
for ind in analyze_bb:
|
89 |
-
occ = False
|
90 |
-
for ind_compare in same_time_instances:
|
91 |
-
if bb_intersection_over_union_unoccluded(tracks_all[ind], tracks_all[ind_compare], threshold=threshold) > threshold and ind_compare != ind:
|
92 |
-
occ = True
|
93 |
-
break
|
94 |
-
if occ == False:
|
95 |
-
unoccluded_indexes.extend([ind])
|
96 |
-
continue
|
97 |
-
|
98 |
-
same_time_instances = get_indexes(time,timestamps_final)
|
99 |
-
unoccluded_indexes_time = []
|
100 |
-
|
101 |
-
for ind in same_time_instances:
|
102 |
-
if tracks_all[ind][4] < 0.9 or ind in ignore_ind:# or ind != 1859:
|
103 |
-
continue
|
104 |
-
occ = False
|
105 |
-
for ind_compare in same_time_instances:
|
106 |
-
if bb_intersection_over_union_unoccluded(tracks_all[ind], tracks_all[ind_compare], threshold=threshold) > threshold and ind_compare != ind and tracks_all[ind_compare][4] < 0.5:
|
107 |
-
occ = True
|
108 |
-
break
|
109 |
-
if occ==False:
|
110 |
-
unoccluded_indexes.extend([ind])
|
111 |
-
unoccluded_indexes_time.extend([ind])
|
112 |
-
time_checked.append([time.year,time.month, time.day, time.hour, time.minute, time.second, time.microsecond])
|
113 |
-
return unoccluded_indexes,stationary_obj
|
114 |
-
|
115 |
-
def visualize_unoccluded_detection(timestamps_final,tracks_all,segmentation_all, unoccluded_indexes, cwalt_data_path, camera_name, ignore_ind=[]):
|
116 |
-
tracks_final = []
|
117 |
-
tracks_final.append([])
|
118 |
-
try:
|
119 |
-
os.mkdir(cwalt_data_path + '/' + camera_name+'_unoccluded_car_detection/')
|
120 |
-
except:
|
121 |
-
print('Unoccluded debugging exists')
|
122 |
-
|
123 |
-
for time in tqdm(np.unique(timestamps_final), desc="Visualizing Unocclued objects in Image "):
|
124 |
-
get_indexes = lambda x, xs: [i for (y, i) in zip(xs, range(len(xs))) if x==y]
|
125 |
-
ind = get_indexes(time, timestamps_final)
|
126 |
-
image_unocc = False
|
127 |
-
for index in ind:
|
128 |
-
if index not in unoccluded_indexes:
|
129 |
-
continue
|
130 |
-
else:
|
131 |
-
image_unocc = True
|
132 |
-
break
|
133 |
-
if image_unocc == False:
|
134 |
-
continue
|
135 |
-
|
136 |
-
for week_loop in range(5):
|
137 |
-
try:
|
138 |
-
image = np.array(Image.open(cwalt_data_path+'/week' +str(week_loop)+'/'+ str(time).replace(' ','T').replace(':','-').split('+')[0] + '.jpg'))
|
139 |
-
break
|
140 |
-
except:
|
141 |
-
continue
|
142 |
-
|
143 |
-
try:
|
144 |
-
mask = image*0
|
145 |
-
except:
|
146 |
-
print('image not found for ' + str(time).replace(' ','T').replace(':','-').split('+')[0] + '.jpg' )
|
147 |
-
continue
|
148 |
-
image_original = image.copy()
|
149 |
-
|
150 |
-
for index in ind:
|
151 |
-
track = tracks_all[index]
|
152 |
-
|
153 |
-
if index in ignore_ind:
|
154 |
-
continue
|
155 |
-
if index not in unoccluded_indexes:
|
156 |
-
continue
|
157 |
-
try:
|
158 |
-
bb_left, bb_top, bb_width, bb_height, confidence, id = track
|
159 |
-
except:
|
160 |
-
bb_left, bb_top, bb_width, bb_height, confidence = track
|
161 |
-
|
162 |
-
if confidence > 0.6:
|
163 |
-
mask = poly_seg(image, segmentation_all[index])
|
164 |
-
cv2.imwrite(cwalt_data_path + '/' + camera_name+'_unoccluded_car_detection/' + str(index)+'.png', mask[:, :, ::-1])
|
165 |
-
|
166 |
-
def repeated_indexes(tracks_all,ignore_ind, repeat_count = 10, unoccluded_indexes=None):
|
167 |
-
get_indexes = lambda x, xs: [i for (y, i) in zip(xs, range(len(xs))) if bb_intersection_over_union(x, y) > 0.8 and i not in ignore_ind]
|
168 |
-
repeat_ind = []
|
169 |
-
repeat_inds =[]
|
170 |
-
if unoccluded_indexes == None:
|
171 |
-
for index, track in enumerate(tracks_all):
|
172 |
-
if index in repeat_ind or index in ignore_ind:
|
173 |
-
continue
|
174 |
-
|
175 |
-
ind = get_indexes(track, tracks_all)
|
176 |
-
if len(ind) > repeat_count:
|
177 |
-
repeat_ind.extend(ind)
|
178 |
-
repeat_inds.append([ind,track])
|
179 |
-
else:
|
180 |
-
for index in unoccluded_indexes:
|
181 |
-
if index in repeat_ind or index in ignore_ind:
|
182 |
-
continue
|
183 |
-
ind = get_indexes(tracks_all[index], tracks_all)
|
184 |
-
if len(ind) > repeat_count:
|
185 |
-
repeat_ind.extend(ind)
|
186 |
-
repeat_inds.append([ind,tracks_all[index]])
|
187 |
-
|
188 |
-
|
189 |
-
return repeat_inds
|
190 |
-
|
191 |
-
def poly_seg(image, segm):
|
192 |
-
poly = np.array(segm).reshape((int(len(segm)/2), 2))
|
193 |
-
overlay = image.copy()
|
194 |
-
alpha = 0.5
|
195 |
-
cv2.fillPoly(overlay, [poly], color=(255, 255, 0))
|
196 |
-
cv2.addWeighted(overlay, alpha, image, 1 - alpha, 0, image)
|
197 |
-
return image
|
198 |
-
|
199 |
-
def visualize_unoccuded_clusters(repeat_inds, tracks, segmentation_all, timestamps_final, cwalt_data_path):
|
200 |
-
for index_, repeat_ind in enumerate(repeat_inds):
|
201 |
-
image = np.array(Image.open(cwalt_data_path+'/'+'T18-median_image.jpg'))
|
202 |
-
try:
|
203 |
-
os.mkdir(cwalt_data_path+ '/Cwalt_database/')
|
204 |
-
except:
|
205 |
-
print('folder exists')
|
206 |
-
try:
|
207 |
-
os.mkdir(cwalt_data_path+ '/Cwalt_database/' + str(index_) +'/')
|
208 |
-
except:
|
209 |
-
print(cwalt_data_path+ '/Cwalt_database/' + str(index_) +'/')
|
210 |
-
|
211 |
-
for i in repeat_ind[0]:
|
212 |
-
try:
|
213 |
-
bb_left, bb_top, bb_width, bb_height, confidence = tracks[i]#bbox
|
214 |
-
except:
|
215 |
-
bb_left, bb_top, bb_width, bb_height, confidence, track_id = tracks[i]#bbox
|
216 |
-
|
217 |
-
cv2.rectangle(image,(int(bb_left), int(bb_top)),(int(bb_left+bb_width), int(bb_top+bb_height)),(0, 0, 255), 2)
|
218 |
-
time = timestamps_final[i]
|
219 |
-
for week_loop in range(5):
|
220 |
-
try:
|
221 |
-
image1 = np.array(Image.open(cwalt_data_path+'/week' +str(week_loop)+'/'+ str(time).replace(' ','T').replace(':','-').split('+')[0] + '.jpg'))
|
222 |
-
break
|
223 |
-
except:
|
224 |
-
continue
|
225 |
-
|
226 |
-
crop = image1[int(bb_top): int(bb_top + bb_height), int(bb_left):int(bb_left + bb_width)]
|
227 |
-
cv2.imwrite(cwalt_data_path+ '/Cwalt_database/' + str(index_) +'/o_' + str(i) +'.jpg', crop[:, :, ::-1])
|
228 |
-
image1 = poly_seg(image1,segmentation_all[i])
|
229 |
-
crop = image1[int(bb_top): int(bb_top + bb_height), int(bb_left):int(bb_left + bb_width)]
|
230 |
-
cv2.imwrite(cwalt_data_path+ '/Cwalt_database/' + str(index_) +'/' + str(i)+'.jpg', crop[:, :, ::-1])
|
231 |
-
if index_ > 100:
|
232 |
-
break
|
233 |
-
|
234 |
-
cv2.imwrite(cwalt_data_path+ '/Cwalt_database/' + str(index_) +'.jpg', image[:, :, ::-1])
|
235 |
-
|
236 |
-
def Get_unoccluded_objects(camera_name, debug = False, scale=True):
|
237 |
-
cwalt_data_path = 'data/' + camera_name
|
238 |
-
data_folder = cwalt_data_path
|
239 |
-
json_file_path = cwalt_data_path + '/' + camera_name + '.json'
|
240 |
-
|
241 |
-
with open(json_file_path, 'r') as j:
|
242 |
-
annotations = json.loads(j.read())
|
243 |
-
|
244 |
-
tracks_all = [parse_bbox(anno['bbox']) for anno in annotations]
|
245 |
-
segmentation_all = [parse_bbox(anno['segmentation']) for anno in annotations]
|
246 |
-
labels_all = [anno['label_id'] for anno in annotations]
|
247 |
-
timestamps_final = [parse(anno['time']) for anno in annotations]
|
248 |
-
|
249 |
-
if scale ==True:
|
250 |
-
scale_factor = 2
|
251 |
-
tracks_all_numpy = np.array(tracks_all)
|
252 |
-
tracks_all_numpy[:,:4] = np.array(tracks_all)[:,:4]/scale_factor
|
253 |
-
tracks_all = tracks_all_numpy.tolist()
|
254 |
-
|
255 |
-
segmentation_all_scaled = []
|
256 |
-
for list_loop in segmentation_all:
|
257 |
-
segmentation_all_scaled.append((np.floor_divide(np.array(list_loop),scale_factor)).tolist())
|
258 |
-
segmentation_all = segmentation_all_scaled
|
259 |
-
|
260 |
-
if debug == True:
|
261 |
-
timestamps_final = timestamps_final[:1000]
|
262 |
-
labels_all = labels_all[:1000]
|
263 |
-
segmentation_all = segmentation_all[:1000]
|
264 |
-
tracks_all = tracks_all[:1000]
|
265 |
-
|
266 |
-
unoccluded_indexes, stationary = get_unoccluded_instances(timestamps_final, tracks_all, threshold = 0.05)
|
267 |
-
if debug == True:
|
268 |
-
visualize_unoccluded_detection(timestamps_final, tracks_all, segmentation_all, unoccluded_indexes, cwalt_data_path, camera_name)
|
269 |
-
|
270 |
-
tracks_all_unoccluded = [tracks_all[i] for i in unoccluded_indexes]
|
271 |
-
segmentation_all_unoccluded = [segmentation_all[i] for i in unoccluded_indexes]
|
272 |
-
labels_all_unoccluded = [labels_all[i] for i in unoccluded_indexes]
|
273 |
-
timestamps_final_unoccluded = [timestamps_final[i] for i in unoccluded_indexes]
|
274 |
-
np.savez(json_file_path,tracks_all_unoccluded=tracks_all_unoccluded, segmentation_all_unoccluded=segmentation_all_unoccluded, labels_all_unoccluded=labels_all_unoccluded, timestamps_final_unoccluded=timestamps_final_unoccluded )
|
275 |
-
|
276 |
-
if debug == True:
|
277 |
-
repeat_inds_clusters = repeated_indexes(tracks_all_unoccluded,[], repeat_count=1)
|
278 |
-
visualize_unoccuded_clusters(repeat_inds_clusters, tracks_all_unoccluded, segmentation_all_unoccluded, timestamps_final_unoccluded, cwalt_data_path)
|
279 |
-
else:
|
280 |
-
repeat_inds_clusters = repeated_indexes(tracks_all_unoccluded,[], repeat_count=10)
|
281 |
-
|
282 |
-
np.savez(json_file_path + '_clubbed', repeat_inds=repeat_inds_clusters)
|
283 |
-
np.savez(json_file_path + '_stationary', stationary=stationary)
|
284 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/walt/datasets/pipelines/loading.py
DELETED
@@ -1,465 +0,0 @@
|
|
1 |
-
import os.path as osp
|
2 |
-
|
3 |
-
import mmcv
|
4 |
-
import numpy as np
|
5 |
-
import pycocotools.mask as maskUtils
|
6 |
-
|
7 |
-
from mmdet.core import BitmapMasks, PolygonMasks
|
8 |
-
from ..builder import PIPELINES
|
9 |
-
|
10 |
-
|
11 |
-
@PIPELINES.register_module()
|
12 |
-
class LoadImageFromFile(object):
|
13 |
-
"""Load an image from file.
|
14 |
-
|
15 |
-
Required keys are "img_prefix" and "img_info" (a dict that must contain the
|
16 |
-
key "filename"). Added or updated keys are "filename", "img", "img_shape",
|
17 |
-
"ori_shape" (same as `img_shape`), "pad_shape" (same as `img_shape`),
|
18 |
-
"scale_factor" (1.0) and "img_norm_cfg" (means=0 and stds=1).
|
19 |
-
|
20 |
-
Args:
|
21 |
-
to_float32 (bool): Whether to convert the loaded image to a float32
|
22 |
-
numpy array. If set to False, the loaded image is an uint8 array.
|
23 |
-
Defaults to False.
|
24 |
-
color_type (str): The flag argument for :func:`mmcv.imfrombytes`.
|
25 |
-
Defaults to 'color'.
|
26 |
-
file_client_args (dict): Arguments to instantiate a FileClient.
|
27 |
-
See :class:`mmcv.fileio.FileClient` for details.
|
28 |
-
Defaults to ``dict(backend='disk')``.
|
29 |
-
"""
|
30 |
-
|
31 |
-
def __init__(self,
|
32 |
-
to_float32=False,
|
33 |
-
color_type='color',
|
34 |
-
file_client_args=dict(backend='disk')):
|
35 |
-
self.to_float32 = to_float32
|
36 |
-
self.color_type = color_type
|
37 |
-
self.file_client_args = file_client_args.copy()
|
38 |
-
self.file_client = None
|
39 |
-
|
40 |
-
def __call__(self, results):
|
41 |
-
"""Call functions to load image and get image meta information.
|
42 |
-
|
43 |
-
Args:
|
44 |
-
results (dict): Result dict from :obj:`mmdet.CustomDataset`.
|
45 |
-
|
46 |
-
Returns:
|
47 |
-
dict: The dict contains loaded image and meta information.
|
48 |
-
"""
|
49 |
-
|
50 |
-
if self.file_client is None:
|
51 |
-
self.file_client = mmcv.FileClient(**self.file_client_args)
|
52 |
-
|
53 |
-
if results['img_prefix'] is not None:
|
54 |
-
filename = osp.join(results['img_prefix'],
|
55 |
-
results['img_info']['filename'])
|
56 |
-
else:
|
57 |
-
filename = results['img_info']['filename']
|
58 |
-
|
59 |
-
img_bytes = self.file_client.get(filename)
|
60 |
-
img = mmcv.imfrombytes(img_bytes, flag=self.color_type)
|
61 |
-
if self.to_float32:
|
62 |
-
img = img.astype(np.float32)
|
63 |
-
|
64 |
-
results['filename'] = filename
|
65 |
-
results['ori_filename'] = results['img_info']['filename']
|
66 |
-
results['img'] = img
|
67 |
-
results['img_shape'] = img.shape
|
68 |
-
results['ori_shape'] = img.shape
|
69 |
-
results['img_fields'] = ['img']
|
70 |
-
return results
|
71 |
-
|
72 |
-
def __repr__(self):
|
73 |
-
repr_str = (f'{self.__class__.__name__}('
|
74 |
-
f'to_float32={self.to_float32}, '
|
75 |
-
f"color_type='{self.color_type}', "
|
76 |
-
f'file_client_args={self.file_client_args})')
|
77 |
-
return repr_str
|
78 |
-
|
79 |
-
|
80 |
-
@PIPELINES.register_module()
|
81 |
-
class LoadImageFromWebcam(LoadImageFromFile):
|
82 |
-
"""Load an image from webcam.
|
83 |
-
|
84 |
-
Similar with :obj:`LoadImageFromFile`, but the image read from webcam is in
|
85 |
-
``results['img']``.
|
86 |
-
"""
|
87 |
-
|
88 |
-
def __call__(self, results):
|
89 |
-
"""Call functions to add image meta information.
|
90 |
-
|
91 |
-
Args:
|
92 |
-
results (dict): Result dict with Webcam read image in
|
93 |
-
``results['img']``.
|
94 |
-
|
95 |
-
Returns:
|
96 |
-
dict: The dict contains loaded image and meta information.
|
97 |
-
"""
|
98 |
-
|
99 |
-
img = results['img']
|
100 |
-
if self.to_float32:
|
101 |
-
img = img.astype(np.float32)
|
102 |
-
|
103 |
-
results['filename'] = None
|
104 |
-
results['ori_filename'] = None
|
105 |
-
results['img'] = img
|
106 |
-
results['img_shape'] = img.shape
|
107 |
-
results['ori_shape'] = img.shape
|
108 |
-
results['img_fields'] = ['img']
|
109 |
-
return results
|
110 |
-
|
111 |
-
|
112 |
-
@PIPELINES.register_module()
|
113 |
-
class LoadMultiChannelImageFromFiles(object):
|
114 |
-
"""Load multi-channel images from a list of separate channel files.
|
115 |
-
|
116 |
-
Required keys are "img_prefix" and "img_info" (a dict that must contain the
|
117 |
-
key "filename", which is expected to be a list of filenames).
|
118 |
-
Added or updated keys are "filename", "img", "img_shape",
|
119 |
-
"ori_shape" (same as `img_shape`), "pad_shape" (same as `img_shape`),
|
120 |
-
"scale_factor" (1.0) and "img_norm_cfg" (means=0 and stds=1).
|
121 |
-
|
122 |
-
Args:
|
123 |
-
to_float32 (bool): Whether to convert the loaded image to a float32
|
124 |
-
numpy array. If set to False, the loaded image is an uint8 array.
|
125 |
-
Defaults to False.
|
126 |
-
color_type (str): The flag argument for :func:`mmcv.imfrombytes`.
|
127 |
-
Defaults to 'color'.
|
128 |
-
file_client_args (dict): Arguments to instantiate a FileClient.
|
129 |
-
See :class:`mmcv.fileio.FileClient` for details.
|
130 |
-
Defaults to ``dict(backend='disk')``.
|
131 |
-
"""
|
132 |
-
|
133 |
-
def __init__(self,
|
134 |
-
to_float32=False,
|
135 |
-
color_type='unchanged',
|
136 |
-
file_client_args=dict(backend='disk')):
|
137 |
-
self.to_float32 = to_float32
|
138 |
-
self.color_type = color_type
|
139 |
-
self.file_client_args = file_client_args.copy()
|
140 |
-
self.file_client = None
|
141 |
-
|
142 |
-
def __call__(self, results):
|
143 |
-
"""Call functions to load multiple images and get images meta
|
144 |
-
information.
|
145 |
-
|
146 |
-
Args:
|
147 |
-
results (dict): Result dict from :obj:`mmdet.CustomDataset`.
|
148 |
-
|
149 |
-
Returns:
|
150 |
-
dict: The dict contains loaded images and meta information.
|
151 |
-
"""
|
152 |
-
|
153 |
-
if self.file_client is None:
|
154 |
-
self.file_client = mmcv.FileClient(**self.file_client_args)
|
155 |
-
|
156 |
-
if results['img_prefix'] is not None:
|
157 |
-
filename = [
|
158 |
-
osp.join(results['img_prefix'], fname)
|
159 |
-
for fname in results['img_info']['filename']
|
160 |
-
]
|
161 |
-
else:
|
162 |
-
filename = results['img_info']['filename']
|
163 |
-
|
164 |
-
img = []
|
165 |
-
for name in filename:
|
166 |
-
img_bytes = self.file_client.get(name)
|
167 |
-
img.append(mmcv.imfrombytes(img_bytes, flag=self.color_type))
|
168 |
-
img = np.stack(img, axis=-1)
|
169 |
-
if self.to_float32:
|
170 |
-
img = img.astype(np.float32)
|
171 |
-
|
172 |
-
results['filename'] = filename
|
173 |
-
results['ori_filename'] = results['img_info']['filename']
|
174 |
-
results['img'] = img
|
175 |
-
results['img_shape'] = img.shape
|
176 |
-
results['ori_shape'] = img.shape
|
177 |
-
# Set initial values for default meta_keys
|
178 |
-
results['pad_shape'] = img.shape
|
179 |
-
results['scale_factor'] = 1.0
|
180 |
-
num_channels = 1 if len(img.shape) < 3 else img.shape[2]
|
181 |
-
results['img_norm_cfg'] = dict(
|
182 |
-
mean=np.zeros(num_channels, dtype=np.float32),
|
183 |
-
std=np.ones(num_channels, dtype=np.float32),
|
184 |
-
to_rgb=False)
|
185 |
-
return results
|
186 |
-
|
187 |
-
def __repr__(self):
|
188 |
-
repr_str = (f'{self.__class__.__name__}('
|
189 |
-
f'to_float32={self.to_float32}, '
|
190 |
-
f"color_type='{self.color_type}', "
|
191 |
-
f'file_client_args={self.file_client_args})')
|
192 |
-
return repr_str
|
193 |
-
|
194 |
-
|
195 |
-
@PIPELINES.register_module()
|
196 |
-
class LoadAnnotations(object):
|
197 |
-
"""Load mutiple types of annotations.
|
198 |
-
|
199 |
-
Args:
|
200 |
-
with_bbox (bool): Whether to parse and load the bbox annotation.
|
201 |
-
Default: True.
|
202 |
-
with_label (bool): Whether to parse and load the label annotation.
|
203 |
-
Default: True.
|
204 |
-
with_mask (bool): Whether to parse and load the mask annotation.
|
205 |
-
Default: False.
|
206 |
-
with_seg (bool): Whether to parse and load the semantic segmentation
|
207 |
-
annotation. Default: False.
|
208 |
-
poly2mask (bool): Whether to convert the instance masks from polygons
|
209 |
-
to bitmaps. Default: True.
|
210 |
-
file_client_args (dict): Arguments to instantiate a FileClient.
|
211 |
-
See :class:`mmcv.fileio.FileClient` for details.
|
212 |
-
Defaults to ``dict(backend='disk')``.
|
213 |
-
"""
|
214 |
-
|
215 |
-
def __init__(self,
|
216 |
-
with_bbox=True,
|
217 |
-
with_label=True,
|
218 |
-
with_mask=False,
|
219 |
-
with_seg=False,
|
220 |
-
poly2mask=True,
|
221 |
-
file_client_args=dict(backend='disk')):
|
222 |
-
self.with_bbox = with_bbox
|
223 |
-
self.with_label = with_label
|
224 |
-
self.with_mask = with_mask
|
225 |
-
self.with_seg = with_seg
|
226 |
-
self.poly2mask = poly2mask
|
227 |
-
self.file_client_args = file_client_args.copy()
|
228 |
-
self.file_client = None
|
229 |
-
|
230 |
-
def _load_bboxes(self, results):
|
231 |
-
"""Private function to load bounding box annotations.
|
232 |
-
|
233 |
-
Args:
|
234 |
-
results (dict): Result dict from :obj:`mmdet.CustomDataset`.
|
235 |
-
|
236 |
-
Returns:
|
237 |
-
dict: The dict contains loaded bounding box annotations.
|
238 |
-
"""
|
239 |
-
|
240 |
-
ann_info = results['ann_info']
|
241 |
-
results['gt_bboxes'] = ann_info['bboxes'].copy()
|
242 |
-
try:
|
243 |
-
results['gt_bboxes_3d'] = ann_info['bboxes_3d'].copy()
|
244 |
-
results['gt_bboxes_3d_proj'] = ann_info['bboxes_3d_proj'].copy()
|
245 |
-
results['bbox3d_fields'].append('gt_bboxes_3d')
|
246 |
-
results['bbox3d_fields'].append('gt_bboxes_3d_proj')
|
247 |
-
except:
|
248 |
-
print('3d data not loaded')
|
249 |
-
|
250 |
-
gt_bboxes_ignore = ann_info.get('bboxes_ignore', None)
|
251 |
-
if gt_bboxes_ignore is not None:
|
252 |
-
results['gt_bboxes_ignore'] = gt_bboxes_ignore.copy()
|
253 |
-
results['bbox_fields'].append('gt_bboxes_ignore')
|
254 |
-
results['bbox_fields'].append('gt_bboxes')
|
255 |
-
return results
|
256 |
-
|
257 |
-
def _load_labels(self, results):
|
258 |
-
"""Private function to load label annotations.
|
259 |
-
|
260 |
-
Args:
|
261 |
-
results (dict): Result dict from :obj:`mmdet.CustomDataset`.
|
262 |
-
|
263 |
-
Returns:
|
264 |
-
dict: The dict contains loaded label annotations.
|
265 |
-
"""
|
266 |
-
|
267 |
-
results['gt_labels'] = results['ann_info']['labels'].copy()
|
268 |
-
return results
|
269 |
-
|
270 |
-
def _poly2mask(self, mask_ann, img_h, img_w):
|
271 |
-
"""Private function to convert masks represented with polygon to
|
272 |
-
bitmaps.
|
273 |
-
|
274 |
-
Args:
|
275 |
-
mask_ann (list | dict): Polygon mask annotation input.
|
276 |
-
img_h (int): The height of output mask.
|
277 |
-
img_w (int): The width of output mask.
|
278 |
-
|
279 |
-
Returns:
|
280 |
-
numpy.ndarray: The decode bitmap mask of shape (img_h, img_w).
|
281 |
-
"""
|
282 |
-
|
283 |
-
if isinstance(mask_ann, list):
|
284 |
-
# polygon -- a single object might consist of multiple parts
|
285 |
-
# we merge all parts into one mask rle code
|
286 |
-
rles = maskUtils.frPyObjects(mask_ann, img_h, img_w)
|
287 |
-
rle = maskUtils.merge(rles)
|
288 |
-
elif isinstance(mask_ann['counts'], list):
|
289 |
-
# uncompressed RLE
|
290 |
-
rle = maskUtils.frPyObjects(mask_ann, img_h, img_w)
|
291 |
-
else:
|
292 |
-
# rle
|
293 |
-
rle = mask_ann
|
294 |
-
mask = maskUtils.decode(rle)
|
295 |
-
return mask
|
296 |
-
|
297 |
-
def process_polygons(self, polygons):
|
298 |
-
"""Convert polygons to list of ndarray and filter invalid polygons.
|
299 |
-
|
300 |
-
Args:
|
301 |
-
polygons (list[list]): Polygons of one instance.
|
302 |
-
|
303 |
-
Returns:
|
304 |
-
list[numpy.ndarray]: Processed polygons.
|
305 |
-
"""
|
306 |
-
|
307 |
-
polygons = [np.array(p) for p in polygons]
|
308 |
-
valid_polygons = []
|
309 |
-
for polygon in polygons:
|
310 |
-
if len(polygon) % 2 == 0 and len(polygon) >= 6:
|
311 |
-
valid_polygons.append(polygon)
|
312 |
-
return valid_polygons
|
313 |
-
|
314 |
-
def _load_masks(self, results):
|
315 |
-
"""Private function to load mask annotations.
|
316 |
-
|
317 |
-
Args:
|
318 |
-
results (dict): Result dict from :obj:`mmdet.CustomDataset`.
|
319 |
-
|
320 |
-
Returns:
|
321 |
-
dict: The dict contains loaded mask annotations.
|
322 |
-
If ``self.poly2mask`` is set ``True``, `gt_mask` will contain
|
323 |
-
:obj:`PolygonMasks`. Otherwise, :obj:`BitmapMasks` is used.
|
324 |
-
"""
|
325 |
-
|
326 |
-
h, w = results['img_info']['height'], results['img_info']['width']
|
327 |
-
gt_masks = results['ann_info']['masks']
|
328 |
-
if self.poly2mask:
|
329 |
-
gt_masks = BitmapMasks(
|
330 |
-
[self._poly2mask(mask, h, w) for mask in gt_masks], h, w)
|
331 |
-
else:
|
332 |
-
gt_masks = PolygonMasks(
|
333 |
-
[self.process_polygons(polygons) for polygons in gt_masks], h,
|
334 |
-
w)
|
335 |
-
results['gt_masks'] = gt_masks
|
336 |
-
results['mask_fields'].append('gt_masks')
|
337 |
-
return results
|
338 |
-
|
339 |
-
def _load_semantic_seg(self, results):
|
340 |
-
"""Private function to load semantic segmentation annotations.
|
341 |
-
|
342 |
-
Args:
|
343 |
-
results (dict): Result dict from :obj:`dataset`.
|
344 |
-
|
345 |
-
Returns:
|
346 |
-
dict: The dict contains loaded semantic segmentation annotations.
|
347 |
-
"""
|
348 |
-
|
349 |
-
if self.file_client is None:
|
350 |
-
self.file_client = mmcv.FileClient(**self.file_client_args)
|
351 |
-
|
352 |
-
filename = osp.join(results['seg_prefix'],
|
353 |
-
results['ann_info']['seg_map'])
|
354 |
-
img_bytes = self.file_client.get(filename)
|
355 |
-
results['gt_semantic_seg'] = mmcv.imfrombytes(
|
356 |
-
img_bytes, flag='unchanged').squeeze()
|
357 |
-
results['seg_fields'].append('gt_semantic_seg')
|
358 |
-
return results
|
359 |
-
|
360 |
-
def __call__(self, results):
|
361 |
-
"""Call function to load multiple types annotations.
|
362 |
-
|
363 |
-
Args:
|
364 |
-
results (dict): Result dict from :obj:`mmdet.CustomDataset`.
|
365 |
-
|
366 |
-
Returns:
|
367 |
-
dict: The dict contains loaded bounding box, label, mask and
|
368 |
-
semantic segmentation annotations.
|
369 |
-
"""
|
370 |
-
|
371 |
-
if self.with_bbox:
|
372 |
-
results = self._load_bboxes(results)
|
373 |
-
if results is None:
|
374 |
-
return None
|
375 |
-
if self.with_label:
|
376 |
-
results = self._load_labels(results)
|
377 |
-
if self.with_mask:
|
378 |
-
results = self._load_masks(results)
|
379 |
-
if self.with_seg:
|
380 |
-
results = self._load_semantic_seg(results)
|
381 |
-
return results
|
382 |
-
|
383 |
-
def __repr__(self):
|
384 |
-
repr_str = self.__class__.__name__
|
385 |
-
repr_str += f'(with_bbox={self.with_bbox}, '
|
386 |
-
repr_str += f'with_label={self.with_label}, '
|
387 |
-
repr_str += f'with_mask={self.with_mask}, '
|
388 |
-
repr_str += f'with_seg={self.with_seg}, '
|
389 |
-
repr_str += f'poly2mask={self.poly2mask}, '
|
390 |
-
repr_str += f'poly2mask={self.file_client_args})'
|
391 |
-
return repr_str
|
392 |
-
|
393 |
-
|
394 |
-
@PIPELINES.register_module()
|
395 |
-
class LoadProposals(object):
|
396 |
-
"""Load proposal pipeline.
|
397 |
-
|
398 |
-
Required key is "proposals". Updated keys are "proposals", "bbox_fields".
|
399 |
-
|
400 |
-
Args:
|
401 |
-
num_max_proposals (int, optional): Maximum number of proposals to load.
|
402 |
-
If not specified, all proposals will be loaded.
|
403 |
-
"""
|
404 |
-
|
405 |
-
def __init__(self, num_max_proposals=None):
|
406 |
-
self.num_max_proposals = num_max_proposals
|
407 |
-
|
408 |
-
def __call__(self, results):
|
409 |
-
"""Call function to load proposals from file.
|
410 |
-
|
411 |
-
Args:
|
412 |
-
results (dict): Result dict from :obj:`mmdet.CustomDataset`.
|
413 |
-
|
414 |
-
Returns:
|
415 |
-
dict: The dict contains loaded proposal annotations.
|
416 |
-
"""
|
417 |
-
|
418 |
-
proposals = results['proposals']
|
419 |
-
if proposals.shape[1] not in (4, 5):
|
420 |
-
raise AssertionError(
|
421 |
-
'proposals should have shapes (n, 4) or (n, 5), '
|
422 |
-
f'but found {proposals.shape}')
|
423 |
-
proposals = proposals[:, :4]
|
424 |
-
|
425 |
-
if self.num_max_proposals is not None:
|
426 |
-
proposals = proposals[:self.num_max_proposals]
|
427 |
-
|
428 |
-
if len(proposals) == 0:
|
429 |
-
proposals = np.array([[0, 0, 0, 0]], dtype=np.float32)
|
430 |
-
results['proposals'] = proposals
|
431 |
-
results['bbox_fields'].append('proposals')
|
432 |
-
return results
|
433 |
-
|
434 |
-
def __repr__(self):
|
435 |
-
return self.__class__.__name__ + \
|
436 |
-
f'(num_max_proposals={self.num_max_proposals})'
|
437 |
-
|
438 |
-
|
439 |
-
@PIPELINES.register_module()
|
440 |
-
class FilterAnnotations(object):
|
441 |
-
"""Filter invalid annotations.
|
442 |
-
|
443 |
-
Args:
|
444 |
-
min_gt_bbox_wh (tuple[int]): Minimum width and height of ground truth
|
445 |
-
boxes.
|
446 |
-
"""
|
447 |
-
|
448 |
-
def __init__(self, min_gt_bbox_wh):
|
449 |
-
# TODO: add more filter options
|
450 |
-
self.min_gt_bbox_wh = min_gt_bbox_wh
|
451 |
-
|
452 |
-
def __call__(self, results):
|
453 |
-
assert 'gt_bboxes' in results
|
454 |
-
gt_bboxes = results['gt_bboxes']
|
455 |
-
w = gt_bboxes[:, 2] - gt_bboxes[:, 0]
|
456 |
-
h = gt_bboxes[:, 3] - gt_bboxes[:, 1]
|
457 |
-
keep = (w > self.min_gt_bbox_wh[0]) & (h > self.min_gt_bbox_wh[1])
|
458 |
-
if not keep.any():
|
459 |
-
return None
|
460 |
-
else:
|
461 |
-
keys = ('gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg')
|
462 |
-
for key in keys:
|
463 |
-
if key in results:
|
464 |
-
results[key] = results[key][keep]
|
465 |
-
return results
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/lama-example/saicinpainting/training/modules/multidilated_conv.py
DELETED
@@ -1,98 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
import random
|
4 |
-
from saicinpainting.training.modules.depthwise_sep_conv import DepthWiseSeperableConv
|
5 |
-
|
6 |
-
class MultidilatedConv(nn.Module):
|
7 |
-
def __init__(self, in_dim, out_dim, kernel_size, dilation_num=3, comb_mode='sum', equal_dim=True,
|
8 |
-
shared_weights=False, padding=1, min_dilation=1, shuffle_in_channels=False, use_depthwise=False, **kwargs):
|
9 |
-
super().__init__()
|
10 |
-
convs = []
|
11 |
-
self.equal_dim = equal_dim
|
12 |
-
assert comb_mode in ('cat_out', 'sum', 'cat_in', 'cat_both'), comb_mode
|
13 |
-
if comb_mode in ('cat_out', 'cat_both'):
|
14 |
-
self.cat_out = True
|
15 |
-
if equal_dim:
|
16 |
-
assert out_dim % dilation_num == 0
|
17 |
-
out_dims = [out_dim // dilation_num] * dilation_num
|
18 |
-
self.index = sum([[i + j * (out_dims[0]) for j in range(dilation_num)] for i in range(out_dims[0])], [])
|
19 |
-
else:
|
20 |
-
out_dims = [out_dim // 2 ** (i + 1) for i in range(dilation_num - 1)]
|
21 |
-
out_dims.append(out_dim - sum(out_dims))
|
22 |
-
index = []
|
23 |
-
starts = [0] + out_dims[:-1]
|
24 |
-
lengths = [out_dims[i] // out_dims[-1] for i in range(dilation_num)]
|
25 |
-
for i in range(out_dims[-1]):
|
26 |
-
for j in range(dilation_num):
|
27 |
-
index += list(range(starts[j], starts[j] + lengths[j]))
|
28 |
-
starts[j] += lengths[j]
|
29 |
-
self.index = index
|
30 |
-
assert(len(index) == out_dim)
|
31 |
-
self.out_dims = out_dims
|
32 |
-
else:
|
33 |
-
self.cat_out = False
|
34 |
-
self.out_dims = [out_dim] * dilation_num
|
35 |
-
|
36 |
-
if comb_mode in ('cat_in', 'cat_both'):
|
37 |
-
if equal_dim:
|
38 |
-
assert in_dim % dilation_num == 0
|
39 |
-
in_dims = [in_dim // dilation_num] * dilation_num
|
40 |
-
else:
|
41 |
-
in_dims = [in_dim // 2 ** (i + 1) for i in range(dilation_num - 1)]
|
42 |
-
in_dims.append(in_dim - sum(in_dims))
|
43 |
-
self.in_dims = in_dims
|
44 |
-
self.cat_in = True
|
45 |
-
else:
|
46 |
-
self.cat_in = False
|
47 |
-
self.in_dims = [in_dim] * dilation_num
|
48 |
-
|
49 |
-
conv_type = DepthWiseSeperableConv if use_depthwise else nn.Conv2d
|
50 |
-
dilation = min_dilation
|
51 |
-
for i in range(dilation_num):
|
52 |
-
if isinstance(padding, int):
|
53 |
-
cur_padding = padding * dilation
|
54 |
-
else:
|
55 |
-
cur_padding = padding[i]
|
56 |
-
convs.append(conv_type(
|
57 |
-
self.in_dims[i], self.out_dims[i], kernel_size, padding=cur_padding, dilation=dilation, **kwargs
|
58 |
-
))
|
59 |
-
if i > 0 and shared_weights:
|
60 |
-
convs[-1].weight = convs[0].weight
|
61 |
-
convs[-1].bias = convs[0].bias
|
62 |
-
dilation *= 2
|
63 |
-
self.convs = nn.ModuleList(convs)
|
64 |
-
|
65 |
-
self.shuffle_in_channels = shuffle_in_channels
|
66 |
-
if self.shuffle_in_channels:
|
67 |
-
# shuffle list as shuffling of tensors is nondeterministic
|
68 |
-
in_channels_permute = list(range(in_dim))
|
69 |
-
random.shuffle(in_channels_permute)
|
70 |
-
# save as buffer so it is saved and loaded with checkpoint
|
71 |
-
self.register_buffer('in_channels_permute', torch.tensor(in_channels_permute))
|
72 |
-
|
73 |
-
def forward(self, x):
|
74 |
-
if self.shuffle_in_channels:
|
75 |
-
x = x[:, self.in_channels_permute]
|
76 |
-
|
77 |
-
outs = []
|
78 |
-
if self.cat_in:
|
79 |
-
if self.equal_dim:
|
80 |
-
x = x.chunk(len(self.convs), dim=1)
|
81 |
-
else:
|
82 |
-
new_x = []
|
83 |
-
start = 0
|
84 |
-
for dim in self.in_dims:
|
85 |
-
new_x.append(x[:, start:start+dim])
|
86 |
-
start += dim
|
87 |
-
x = new_x
|
88 |
-
for i, conv in enumerate(self.convs):
|
89 |
-
if self.cat_in:
|
90 |
-
input = x[i]
|
91 |
-
else:
|
92 |
-
input = x
|
93 |
-
outs.append(conv(input))
|
94 |
-
if self.cat_out:
|
95 |
-
out = torch.cat(outs, dim=1)[:, self.index]
|
96 |
-
else:
|
97 |
-
out = sum(outs)
|
98 |
-
return out
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/regionclip-demo/detectron2/modeling/roi_heads/cascade_rcnn.py
DELETED
@@ -1,298 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
from typing import List
|
3 |
-
import torch
|
4 |
-
from torch import nn
|
5 |
-
from torch.autograd.function import Function
|
6 |
-
|
7 |
-
from detectron2.config import configurable
|
8 |
-
from detectron2.layers import ShapeSpec
|
9 |
-
from detectron2.structures import Boxes, Instances, pairwise_iou
|
10 |
-
from detectron2.utils.events import get_event_storage
|
11 |
-
|
12 |
-
from ..box_regression import Box2BoxTransform
|
13 |
-
from ..matcher import Matcher
|
14 |
-
from ..poolers import ROIPooler
|
15 |
-
from .box_head import build_box_head
|
16 |
-
from .fast_rcnn import FastRCNNOutputLayers, fast_rcnn_inference
|
17 |
-
from .roi_heads import ROI_HEADS_REGISTRY, StandardROIHeads
|
18 |
-
|
19 |
-
|
20 |
-
class _ScaleGradient(Function):
|
21 |
-
@staticmethod
|
22 |
-
def forward(ctx, input, scale):
|
23 |
-
ctx.scale = scale
|
24 |
-
return input
|
25 |
-
|
26 |
-
@staticmethod
|
27 |
-
def backward(ctx, grad_output):
|
28 |
-
return grad_output * ctx.scale, None
|
29 |
-
|
30 |
-
|
31 |
-
@ROI_HEADS_REGISTRY.register()
|
32 |
-
class CascadeROIHeads(StandardROIHeads):
|
33 |
-
"""
|
34 |
-
The ROI heads that implement :paper:`Cascade R-CNN`.
|
35 |
-
"""
|
36 |
-
|
37 |
-
@configurable
|
38 |
-
def __init__(
|
39 |
-
self,
|
40 |
-
*,
|
41 |
-
box_in_features: List[str],
|
42 |
-
box_pooler: ROIPooler,
|
43 |
-
box_heads: List[nn.Module],
|
44 |
-
box_predictors: List[nn.Module],
|
45 |
-
proposal_matchers: List[Matcher],
|
46 |
-
**kwargs,
|
47 |
-
):
|
48 |
-
"""
|
49 |
-
NOTE: this interface is experimental.
|
50 |
-
|
51 |
-
Args:
|
52 |
-
box_pooler (ROIPooler): pooler that extracts region features from given boxes
|
53 |
-
box_heads (list[nn.Module]): box head for each cascade stage
|
54 |
-
box_predictors (list[nn.Module]): box predictor for each cascade stage
|
55 |
-
proposal_matchers (list[Matcher]): matcher with different IoU thresholds to
|
56 |
-
match boxes with ground truth for each stage. The first matcher matches
|
57 |
-
RPN proposals with ground truth, the other matchers use boxes predicted
|
58 |
-
by the previous stage as proposals and match them with ground truth.
|
59 |
-
"""
|
60 |
-
assert "proposal_matcher" not in kwargs, (
|
61 |
-
"CascadeROIHeads takes 'proposal_matchers=' for each stage instead "
|
62 |
-
"of one 'proposal_matcher='."
|
63 |
-
)
|
64 |
-
# The first matcher matches RPN proposals with ground truth, done in the base class
|
65 |
-
kwargs["proposal_matcher"] = proposal_matchers[0]
|
66 |
-
num_stages = self.num_cascade_stages = len(box_heads)
|
67 |
-
box_heads = nn.ModuleList(box_heads)
|
68 |
-
box_predictors = nn.ModuleList(box_predictors)
|
69 |
-
assert len(box_predictors) == num_stages, f"{len(box_predictors)} != {num_stages}!"
|
70 |
-
assert len(proposal_matchers) == num_stages, f"{len(proposal_matchers)} != {num_stages}!"
|
71 |
-
super().__init__(
|
72 |
-
box_in_features=box_in_features,
|
73 |
-
box_pooler=box_pooler,
|
74 |
-
box_head=box_heads,
|
75 |
-
box_predictor=box_predictors,
|
76 |
-
**kwargs,
|
77 |
-
)
|
78 |
-
self.proposal_matchers = proposal_matchers
|
79 |
-
|
80 |
-
@classmethod
|
81 |
-
def from_config(cls, cfg, input_shape):
|
82 |
-
ret = super().from_config(cfg, input_shape)
|
83 |
-
ret.pop("proposal_matcher")
|
84 |
-
return ret
|
85 |
-
|
86 |
-
@classmethod
|
87 |
-
def _init_box_head(cls, cfg, input_shape):
|
88 |
-
# fmt: off
|
89 |
-
in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES
|
90 |
-
pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
|
91 |
-
pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features)
|
92 |
-
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
|
93 |
-
pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE
|
94 |
-
cascade_bbox_reg_weights = cfg.MODEL.ROI_BOX_CASCADE_HEAD.BBOX_REG_WEIGHTS
|
95 |
-
cascade_ious = cfg.MODEL.ROI_BOX_CASCADE_HEAD.IOUS
|
96 |
-
assert len(cascade_bbox_reg_weights) == len(cascade_ious)
|
97 |
-
assert cfg.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG, \
|
98 |
-
"CascadeROIHeads only support class-agnostic regression now!"
|
99 |
-
assert cascade_ious[0] == cfg.MODEL.ROI_HEADS.IOU_THRESHOLDS[0]
|
100 |
-
# fmt: on
|
101 |
-
|
102 |
-
in_channels = [input_shape[f].channels for f in in_features]
|
103 |
-
# Check all channel counts are equal
|
104 |
-
assert len(set(in_channels)) == 1, in_channels
|
105 |
-
in_channels = in_channels[0]
|
106 |
-
|
107 |
-
box_pooler = ROIPooler(
|
108 |
-
output_size=pooler_resolution,
|
109 |
-
scales=pooler_scales,
|
110 |
-
sampling_ratio=sampling_ratio,
|
111 |
-
pooler_type=pooler_type,
|
112 |
-
)
|
113 |
-
pooled_shape = ShapeSpec(
|
114 |
-
channels=in_channels, width=pooler_resolution, height=pooler_resolution
|
115 |
-
)
|
116 |
-
|
117 |
-
box_heads, box_predictors, proposal_matchers = [], [], []
|
118 |
-
for match_iou, bbox_reg_weights in zip(cascade_ious, cascade_bbox_reg_weights):
|
119 |
-
box_head = build_box_head(cfg, pooled_shape)
|
120 |
-
box_heads.append(box_head)
|
121 |
-
box_predictors.append(
|
122 |
-
FastRCNNOutputLayers(
|
123 |
-
cfg,
|
124 |
-
box_head.output_shape,
|
125 |
-
box2box_transform=Box2BoxTransform(weights=bbox_reg_weights),
|
126 |
-
)
|
127 |
-
)
|
128 |
-
proposal_matchers.append(Matcher([match_iou], [0, 1], allow_low_quality_matches=False))
|
129 |
-
return {
|
130 |
-
"box_in_features": in_features,
|
131 |
-
"box_pooler": box_pooler,
|
132 |
-
"box_heads": box_heads,
|
133 |
-
"box_predictors": box_predictors,
|
134 |
-
"proposal_matchers": proposal_matchers,
|
135 |
-
}
|
136 |
-
|
137 |
-
def forward(self, images, features, proposals, targets=None):
|
138 |
-
del images
|
139 |
-
if self.training:
|
140 |
-
proposals = self.label_and_sample_proposals(proposals, targets)
|
141 |
-
|
142 |
-
if self.training:
|
143 |
-
# Need targets to box head
|
144 |
-
losses = self._forward_box(features, proposals, targets)
|
145 |
-
losses.update(self._forward_mask(features, proposals))
|
146 |
-
losses.update(self._forward_keypoint(features, proposals))
|
147 |
-
return proposals, losses
|
148 |
-
else:
|
149 |
-
pred_instances = self._forward_box(features, proposals)
|
150 |
-
pred_instances = self.forward_with_given_boxes(features, pred_instances)
|
151 |
-
return pred_instances, {}
|
152 |
-
|
153 |
-
def _forward_box(self, features, proposals, targets=None):
|
154 |
-
"""
|
155 |
-
Args:
|
156 |
-
features, targets: the same as in
|
157 |
-
Same as in :meth:`ROIHeads.forward`.
|
158 |
-
proposals (list[Instances]): the per-image object proposals with
|
159 |
-
their matching ground truth.
|
160 |
-
Each has fields "proposal_boxes", and "objectness_logits",
|
161 |
-
"gt_classes", "gt_boxes".
|
162 |
-
"""
|
163 |
-
features = [features[f] for f in self.box_in_features]
|
164 |
-
head_outputs = [] # (predictor, predictions, proposals)
|
165 |
-
prev_pred_boxes = None
|
166 |
-
image_sizes = [x.image_size for x in proposals]
|
167 |
-
for k in range(self.num_cascade_stages):
|
168 |
-
if k > 0:
|
169 |
-
# The output boxes of the previous stage are used to create the input
|
170 |
-
# proposals of the next stage.
|
171 |
-
proposals = self._create_proposals_from_boxes(prev_pred_boxes, image_sizes)
|
172 |
-
if self.training:
|
173 |
-
proposals = self._match_and_label_boxes(proposals, k, targets)
|
174 |
-
predictions = self._run_stage(features, proposals, k)
|
175 |
-
prev_pred_boxes = self.box_predictor[k].predict_boxes(predictions, proposals)
|
176 |
-
head_outputs.append((self.box_predictor[k], predictions, proposals))
|
177 |
-
|
178 |
-
if self.training:
|
179 |
-
losses = {}
|
180 |
-
storage = get_event_storage()
|
181 |
-
for stage, (predictor, predictions, proposals) in enumerate(head_outputs):
|
182 |
-
with storage.name_scope("stage{}".format(stage)):
|
183 |
-
stage_losses = predictor.losses(predictions, proposals)
|
184 |
-
losses.update({k + "_stage{}".format(stage): v for k, v in stage_losses.items()})
|
185 |
-
return losses
|
186 |
-
else:
|
187 |
-
# Each is a list[Tensor] of length #image. Each tensor is Ri x (K+1)
|
188 |
-
scores_per_stage = [h[0].predict_probs(h[1], h[2]) for h in head_outputs]
|
189 |
-
|
190 |
-
# Average the scores across heads
|
191 |
-
scores = [
|
192 |
-
sum(list(scores_per_image)) * (1.0 / self.num_cascade_stages)
|
193 |
-
for scores_per_image in zip(*scores_per_stage)
|
194 |
-
]
|
195 |
-
# Use the boxes of the last head
|
196 |
-
predictor, predictions, proposals = head_outputs[-1]
|
197 |
-
boxes = predictor.predict_boxes(predictions, proposals)
|
198 |
-
pred_instances, _ = fast_rcnn_inference(
|
199 |
-
boxes,
|
200 |
-
scores,
|
201 |
-
image_sizes,
|
202 |
-
predictor.test_score_thresh,
|
203 |
-
predictor.test_nms_thresh,
|
204 |
-
predictor.test_topk_per_image,
|
205 |
-
)
|
206 |
-
return pred_instances
|
207 |
-
|
208 |
-
@torch.no_grad()
|
209 |
-
def _match_and_label_boxes(self, proposals, stage, targets):
|
210 |
-
"""
|
211 |
-
Match proposals with groundtruth using the matcher at the given stage.
|
212 |
-
Label the proposals as foreground or background based on the match.
|
213 |
-
|
214 |
-
Args:
|
215 |
-
proposals (list[Instances]): One Instances for each image, with
|
216 |
-
the field "proposal_boxes".
|
217 |
-
stage (int): the current stage
|
218 |
-
targets (list[Instances]): the ground truth instances
|
219 |
-
|
220 |
-
Returns:
|
221 |
-
list[Instances]: the same proposals, but with fields "gt_classes" and "gt_boxes"
|
222 |
-
"""
|
223 |
-
num_fg_samples, num_bg_samples = [], []
|
224 |
-
for proposals_per_image, targets_per_image in zip(proposals, targets):
|
225 |
-
match_quality_matrix = pairwise_iou(
|
226 |
-
targets_per_image.gt_boxes, proposals_per_image.proposal_boxes
|
227 |
-
)
|
228 |
-
# proposal_labels are 0 or 1
|
229 |
-
matched_idxs, proposal_labels = self.proposal_matchers[stage](match_quality_matrix)
|
230 |
-
if len(targets_per_image) > 0:
|
231 |
-
gt_classes = targets_per_image.gt_classes[matched_idxs]
|
232 |
-
# Label unmatched proposals (0 label from matcher) as background (label=num_classes)
|
233 |
-
gt_classes[proposal_labels == 0] = self.num_classes
|
234 |
-
gt_boxes = targets_per_image.gt_boxes[matched_idxs]
|
235 |
-
else:
|
236 |
-
gt_classes = torch.zeros_like(matched_idxs) + self.num_classes
|
237 |
-
gt_boxes = Boxes(
|
238 |
-
targets_per_image.gt_boxes.tensor.new_zeros((len(proposals_per_image), 4))
|
239 |
-
)
|
240 |
-
proposals_per_image.gt_classes = gt_classes
|
241 |
-
proposals_per_image.gt_boxes = gt_boxes
|
242 |
-
|
243 |
-
num_fg_samples.append((proposal_labels == 1).sum().item())
|
244 |
-
num_bg_samples.append(proposal_labels.numel() - num_fg_samples[-1])
|
245 |
-
|
246 |
-
# Log the number of fg/bg samples in each stage
|
247 |
-
storage = get_event_storage()
|
248 |
-
storage.put_scalar(
|
249 |
-
"stage{}/roi_head/num_fg_samples".format(stage),
|
250 |
-
sum(num_fg_samples) / len(num_fg_samples),
|
251 |
-
)
|
252 |
-
storage.put_scalar(
|
253 |
-
"stage{}/roi_head/num_bg_samples".format(stage),
|
254 |
-
sum(num_bg_samples) / len(num_bg_samples),
|
255 |
-
)
|
256 |
-
return proposals
|
257 |
-
|
258 |
-
def _run_stage(self, features, proposals, stage):
|
259 |
-
"""
|
260 |
-
Args:
|
261 |
-
features (list[Tensor]): #lvl input features to ROIHeads
|
262 |
-
proposals (list[Instances]): #image Instances, with the field "proposal_boxes"
|
263 |
-
stage (int): the current stage
|
264 |
-
|
265 |
-
Returns:
|
266 |
-
Same output as `FastRCNNOutputLayers.forward()`.
|
267 |
-
"""
|
268 |
-
box_features = self.box_pooler(features, [x.proposal_boxes for x in proposals])
|
269 |
-
# The original implementation averages the losses among heads,
|
270 |
-
# but scale up the parameter gradients of the heads.
|
271 |
-
# This is equivalent to adding the losses among heads,
|
272 |
-
# but scale down the gradients on features.
|
273 |
-
box_features = _ScaleGradient.apply(box_features, 1.0 / self.num_cascade_stages)
|
274 |
-
box_features = self.box_head[stage](box_features)
|
275 |
-
return self.box_predictor[stage](box_features)
|
276 |
-
|
277 |
-
def _create_proposals_from_boxes(self, boxes, image_sizes):
|
278 |
-
"""
|
279 |
-
Args:
|
280 |
-
boxes (list[Tensor]): per-image predicted boxes, each of shape Ri x 4
|
281 |
-
image_sizes (list[tuple]): list of image shapes in (h, w)
|
282 |
-
|
283 |
-
Returns:
|
284 |
-
list[Instances]: per-image proposals with the given boxes.
|
285 |
-
"""
|
286 |
-
# Just like RPN, the proposals should not have gradients
|
287 |
-
boxes = [Boxes(b.detach()) for b in boxes]
|
288 |
-
proposals = []
|
289 |
-
for boxes_per_image, image_size in zip(boxes, image_sizes):
|
290 |
-
boxes_per_image.clip(image_size)
|
291 |
-
if self.training:
|
292 |
-
# do not filter empty boxes at inference time,
|
293 |
-
# because the scores from each stage need to be aligned and added later
|
294 |
-
boxes_per_image = boxes_per_image[boxes_per_image.nonempty()]
|
295 |
-
prop = Instances(image_size)
|
296 |
-
prop.proposal_boxes = boxes_per_image
|
297 |
-
proposals.append(prop)
|
298 |
-
return proposals
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/__init__.py
DELETED
File without changes
|
spaces/CikeyQI/Yunzai/Yunzai/lib/plugins/runtime.js
DELETED
@@ -1,245 +0,0 @@
|
|
1 |
-
/**
|
2 |
-
* plugin的runtime,可通过e.runtime访问
|
3 |
-
*
|
4 |
-
* 提供一些常用的运行时变量、方法及model获取
|
5 |
-
* 降低对目录结构的依赖
|
6 |
-
*/
|
7 |
-
import lodash from 'lodash'
|
8 |
-
import fs from 'node:fs'
|
9 |
-
import gsCfg from '../../plugins/genshin/model/gsCfg.js'
|
10 |
-
import common from '../common/common.js'
|
11 |
-
import cfg from '../config/config.js'
|
12 |
-
import MysApi from '../../plugins/genshin/model/mys/mysApi.js'
|
13 |
-
import MysInfo from '../../plugins/genshin/model/mys/mysInfo.js'
|
14 |
-
import puppeteer from '../puppeteer/puppeteer.js'
|
15 |
-
import { Version } from '#miao'
|
16 |
-
import NoteUser from '../../plugins/genshin/model/mys/NoteUser.js'
|
17 |
-
import MysUser from '../../plugins/genshin/model/mys/MysUser.js'
|
18 |
-
import Handler from './handler.js'
|
19 |
-
|
20 |
-
/**
|
21 |
-
* 常用的处理方法
|
22 |
-
*/
|
23 |
-
|
24 |
-
export default class Runtime {
|
25 |
-
constructor (e) {
|
26 |
-
this.e = e
|
27 |
-
this._mysInfo = {}
|
28 |
-
|
29 |
-
this.handler = {
|
30 |
-
has: Handler.has,
|
31 |
-
call: Handler.call,
|
32 |
-
callAll: Handler.callAll
|
33 |
-
}
|
34 |
-
}
|
35 |
-
|
36 |
-
get uid () {
|
37 |
-
return this.user?.uid
|
38 |
-
}
|
39 |
-
|
40 |
-
get hasCk () {
|
41 |
-
return this.user?.hasCk
|
42 |
-
}
|
43 |
-
|
44 |
-
get user () {
|
45 |
-
return this.e.user
|
46 |
-
}
|
47 |
-
|
48 |
-
get cfg () {
|
49 |
-
return cfg
|
50 |
-
}
|
51 |
-
|
52 |
-
get gsCfg () {
|
53 |
-
return gsCfg
|
54 |
-
}
|
55 |
-
|
56 |
-
get common () {
|
57 |
-
return common
|
58 |
-
}
|
59 |
-
|
60 |
-
get puppeteer () {
|
61 |
-
return puppeteer
|
62 |
-
}
|
63 |
-
|
64 |
-
get MysInfo () {
|
65 |
-
return MysInfo
|
66 |
-
}
|
67 |
-
|
68 |
-
get NoteUser () {
|
69 |
-
return NoteUser
|
70 |
-
}
|
71 |
-
|
72 |
-
get MysUser () {
|
73 |
-
return MysUser
|
74 |
-
}
|
75 |
-
|
76 |
-
static async init (e) {
|
77 |
-
await MysInfo.initCache()
|
78 |
-
let runtime = new Runtime(e)
|
79 |
-
e.runtime = runtime
|
80 |
-
e.game = e.isSr ? 'sr' : 'gs'
|
81 |
-
await runtime.initUser()
|
82 |
-
return runtime
|
83 |
-
}
|
84 |
-
|
85 |
-
async initUser () {
|
86 |
-
let e = this.e
|
87 |
-
let user = await NoteUser.create(e)
|
88 |
-
if (user) {
|
89 |
-
e.user = new Proxy(user, {
|
90 |
-
get (self, key, receiver) {
|
91 |
-
let game = e.isSr ? 'sr' : 'gs'
|
92 |
-
let fnMap = {
|
93 |
-
uid: 'getUid',
|
94 |
-
uidList: 'getUidList',
|
95 |
-
mysUser: 'getMysUser',
|
96 |
-
ckUidList: 'getCkUidList'
|
97 |
-
}
|
98 |
-
if (fnMap[key]) {
|
99 |
-
return self[fnMap[key]](game)
|
100 |
-
}
|
101 |
-
if (key === 'uidData') {
|
102 |
-
return self.getUidData('', game)
|
103 |
-
}
|
104 |
-
if (['getUid', 'getUidList', 'getMysUser', 'getCkUidList', 'getUidMapList', 'getGameDs'].includes(key)) {
|
105 |
-
return (_game, arg2) => {
|
106 |
-
return self[key](_game || game, arg2)
|
107 |
-
}
|
108 |
-
}
|
109 |
-
if (['getUidData', 'hasUid', 'addRegUid', 'delRegUid', 'setMainUid'].includes(key)) {
|
110 |
-
return (uid, _game = '') => {
|
111 |
-
return self[key](uid, _game || game)
|
112 |
-
}
|
113 |
-
}
|
114 |
-
return self[key]
|
115 |
-
}
|
116 |
-
})
|
117 |
-
}
|
118 |
-
}
|
119 |
-
|
120 |
-
/**
|
121 |
-
* 获取MysInfo实例
|
122 |
-
*
|
123 |
-
* @param targetType all: 所有用户均可, cookie:查询用户必须具备Cookie
|
124 |
-
* @returns {Promise<boolean|MysInfo>}
|
125 |
-
*/
|
126 |
-
async getMysInfo (targetType = 'all') {
|
127 |
-
if (!this._mysInfo[targetType]) {
|
128 |
-
this._mysInfo[targetType] = await MysInfo.init(this.e, targetType === 'cookie' ? 'detail' : 'roleIndex')
|
129 |
-
}
|
130 |
-
return this._mysInfo[targetType]
|
131 |
-
}
|
132 |
-
|
133 |
-
async getUid () {
|
134 |
-
return await MysInfo.getUid(this.e)
|
135 |
-
}
|
136 |
-
|
137 |
-
/**
|
138 |
-
* 获取MysApi实例
|
139 |
-
*
|
140 |
-
* @param targetType all: 所有用户均可, cookie:查询用户必须具备Cookie
|
141 |
-
* @param option MysApi option
|
142 |
-
* @returns {Promise<boolean|MysApi>}
|
143 |
-
*/
|
144 |
-
async getMysApi (targetType = 'all', option = {}) {
|
145 |
-
let mys = await this.getMysInfo(targetType)
|
146 |
-
if (mys.uid && mys?.ckInfo?.ck) {
|
147 |
-
return new MysApi(mys.uid, mys.ckInfo.ck, option)
|
148 |
-
}
|
149 |
-
return false
|
150 |
-
}
|
151 |
-
|
152 |
-
/**
|
153 |
-
* 生成MysApi实例
|
154 |
-
* @param uid
|
155 |
-
* @param ck
|
156 |
-
* @param option
|
157 |
-
* @returns {Promise<MysApi>}
|
158 |
-
*/
|
159 |
-
async createMysApi (uid, ck, option) {
|
160 |
-
return new MysApi(uid, ck, option)
|
161 |
-
}
|
162 |
-
|
163 |
-
/**
|
164 |
-
*
|
165 |
-
* @param plugin plugin key
|
166 |
-
* @param path html文件路径,相对于plugin resources目录
|
167 |
-
* @param data 渲染数据
|
168 |
-
* @param cfg 渲染配置
|
169 |
-
* @param cfg.retType 返回值类型
|
170 |
-
* * default/空:自动发送图片,返回true
|
171 |
-
* * msgId:自动发送图片,返回msg id
|
172 |
-
* * base64: 不自动发送图像,返回图像base64数据
|
173 |
-
* @param cfg.beforeRender({data}) 可改写渲染的data数据
|
174 |
-
* @returns {Promise<boolean>}
|
175 |
-
*/
|
176 |
-
async render (plugin, path, data = {}, cfg = {}) {
|
177 |
-
// 处理传入的path
|
178 |
-
path = path.replace(/.html$/, '')
|
179 |
-
let paths = lodash.filter(path.split('/'), (p) => !!p)
|
180 |
-
path = paths.join('/')
|
181 |
-
// 创建目录
|
182 |
-
const mkdir = (check) => {
|
183 |
-
let currDir = `${process.cwd()}/temp`
|
184 |
-
for (let p of check.split('/')) {
|
185 |
-
currDir = `${currDir}/${p}`
|
186 |
-
if (!fs.existsSync(currDir)) {
|
187 |
-
fs.mkdirSync(currDir)
|
188 |
-
}
|
189 |
-
}
|
190 |
-
return currDir
|
191 |
-
}
|
192 |
-
mkdir(`html/${plugin}/${path}`)
|
193 |
-
// 自动计算pluResPath
|
194 |
-
let pluResPath = `../../../${lodash.repeat('../', paths.length)}plugins/${plugin}/resources/`
|
195 |
-
let miaoResPath = `../../../${lodash.repeat('../', paths.length)}plugins/miao-plugin/resources/`
|
196 |
-
const layoutPath = process.cwd() + '/plugins/miao-plugin/resources/common/layout/'
|
197 |
-
// 渲染data
|
198 |
-
data = {
|
199 |
-
sys: {
|
200 |
-
scale: 1
|
201 |
-
},
|
202 |
-
/** miao 相关参数 **/
|
203 |
-
copyright: `Created By TRSS-Yunzai<span class="version">${Version.yunzai}</span> `,
|
204 |
-
_res_path: pluResPath,
|
205 |
-
_miao_path: miaoResPath,
|
206 |
-
_tpl_path: process.cwd() + '/plugins/miao-plugin/resources/common/tpl/',
|
207 |
-
defaultLayout: layoutPath + 'default.html',
|
208 |
-
elemLayout: layoutPath + 'elem.html',
|
209 |
-
|
210 |
-
...data,
|
211 |
-
|
212 |
-
/** 默认参数 **/
|
213 |
-
_plugin: plugin,
|
214 |
-
_htmlPath: path,
|
215 |
-
pluResPath,
|
216 |
-
tplFile: `./plugins/${plugin}/resources/${path}.html`,
|
217 |
-
saveId: data.saveId || data.save_id || paths[paths.length - 1],
|
218 |
-
pageGotoParams: {
|
219 |
-
waitUntil: 'networkidle2'
|
220 |
-
}
|
221 |
-
}
|
222 |
-
// 处理beforeRender
|
223 |
-
if (cfg.beforeRender) {
|
224 |
-
data = cfg.beforeRender({ data }) || data
|
225 |
-
}
|
226 |
-
// 保存模板数据
|
227 |
-
if (process.argv.includes('dev')) {
|
228 |
-
// debug下保存当前页面的渲染数据,方便模板编写与调试
|
229 |
-
// 由于只用于调试,开发者只关注自己当时开发的文件即可,暂不考虑app及plugin的命名冲突
|
230 |
-
let saveDir = mkdir(`ViewData/${plugin}`)
|
231 |
-
let file = `${saveDir}/${data._htmlPath.split('/').join('_')}.json`
|
232 |
-
fs.writeFileSync(file, JSON.stringify(data))
|
233 |
-
}
|
234 |
-
// 截图
|
235 |
-
let base64 = await puppeteer.screenshot(`${plugin}/${path}`, data)
|
236 |
-
if (cfg.retType === 'base64') {
|
237 |
-
return base64
|
238 |
-
}
|
239 |
-
let ret = true
|
240 |
-
if (base64) {
|
241 |
-
ret = await this.e.reply(base64)
|
242 |
-
}
|
243 |
-
return cfg.retType === 'msgId' ? ret : true
|
244 |
-
}
|
245 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CjangCjengh/Sanskrit-TTS/text/cleaners.py
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
def sanskrit_cleaners(text):
|
2 |
-
text = text.replace('॥', '।').replace('ॐ', 'ओम्')
|
3 |
-
if len(text)==0 or text[-1] != '।':
|
4 |
-
text += ' ।'
|
5 |
-
return text
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/modeling/backbone/fbnet_builder.py
DELETED
@@ -1,829 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
FBNet model builder
|
3 |
-
"""
|
4 |
-
|
5 |
-
from __future__ import absolute_import, division, print_function, unicode_literals
|
6 |
-
|
7 |
-
import copy
|
8 |
-
import logging
|
9 |
-
import math
|
10 |
-
from collections import OrderedDict
|
11 |
-
|
12 |
-
import torch
|
13 |
-
import torch.nn as nn
|
14 |
-
from maskrcnn_benchmark.layers import (
|
15 |
-
BatchNorm2d,
|
16 |
-
Conv2d,
|
17 |
-
FrozenBatchNorm2d,
|
18 |
-
interpolate,
|
19 |
-
)
|
20 |
-
from maskrcnn_benchmark.layers.misc import _NewEmptyTensorOp
|
21 |
-
|
22 |
-
|
23 |
-
logger = logging.getLogger(__name__)
|
24 |
-
|
25 |
-
|
26 |
-
def _py2_round(x):
|
27 |
-
return math.floor(x + 0.5) if x >= 0.0 else math.ceil(x - 0.5)
|
28 |
-
|
29 |
-
|
30 |
-
def _get_divisible_by(num, divisible_by, min_val):
|
31 |
-
ret = int(num)
|
32 |
-
if divisible_by > 0 and num % divisible_by != 0:
|
33 |
-
ret = int((_py2_round(num / divisible_by) or min_val) * divisible_by)
|
34 |
-
return ret
|
35 |
-
|
36 |
-
|
37 |
-
PRIMITIVES = {
|
38 |
-
"skip": lambda C_in, C_out, expansion, stride, **kwargs: Identity(
|
39 |
-
C_in, C_out, stride
|
40 |
-
),
|
41 |
-
"ir_k3": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
|
42 |
-
C_in, C_out, expansion, stride, **kwargs
|
43 |
-
),
|
44 |
-
"ir_k5": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
|
45 |
-
C_in, C_out, expansion, stride, kernel=5, **kwargs
|
46 |
-
),
|
47 |
-
"ir_k7": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
|
48 |
-
C_in, C_out, expansion, stride, kernel=7, **kwargs
|
49 |
-
),
|
50 |
-
"ir_k1": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
|
51 |
-
C_in, C_out, expansion, stride, kernel=1, **kwargs
|
52 |
-
),
|
53 |
-
"shuffle": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
|
54 |
-
C_in, C_out, expansion, stride, shuffle_type="mid", pw_group=4, **kwargs
|
55 |
-
),
|
56 |
-
"basic_block": lambda C_in, C_out, expansion, stride, **kwargs: CascadeConv3x3(
|
57 |
-
C_in, C_out, stride
|
58 |
-
),
|
59 |
-
"shift_5x5": lambda C_in, C_out, expansion, stride, **kwargs: ShiftBlock5x5(
|
60 |
-
C_in, C_out, expansion, stride
|
61 |
-
),
|
62 |
-
# layer search 2
|
63 |
-
"ir_k3_e1": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
|
64 |
-
C_in, C_out, 1, stride, kernel=3, **kwargs
|
65 |
-
),
|
66 |
-
"ir_k3_e3": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
|
67 |
-
C_in, C_out, 3, stride, kernel=3, **kwargs
|
68 |
-
),
|
69 |
-
"ir_k3_e6": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
|
70 |
-
C_in, C_out, 6, stride, kernel=3, **kwargs
|
71 |
-
),
|
72 |
-
"ir_k3_s4": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
|
73 |
-
C_in, C_out, 4, stride, kernel=3, shuffle_type="mid", pw_group=4, **kwargs
|
74 |
-
),
|
75 |
-
"ir_k5_e1": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
|
76 |
-
C_in, C_out, 1, stride, kernel=5, **kwargs
|
77 |
-
),
|
78 |
-
"ir_k5_e3": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
|
79 |
-
C_in, C_out, 3, stride, kernel=5, **kwargs
|
80 |
-
),
|
81 |
-
"ir_k5_e6": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
|
82 |
-
C_in, C_out, 6, stride, kernel=5, **kwargs
|
83 |
-
),
|
84 |
-
"ir_k5_s4": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
|
85 |
-
C_in, C_out, 4, stride, kernel=5, shuffle_type="mid", pw_group=4, **kwargs
|
86 |
-
),
|
87 |
-
# layer search se
|
88 |
-
"ir_k3_e1_se": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
|
89 |
-
C_in, C_out, 1, stride, kernel=3, se=True, **kwargs
|
90 |
-
),
|
91 |
-
"ir_k3_e3_se": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
|
92 |
-
C_in, C_out, 3, stride, kernel=3, se=True, **kwargs
|
93 |
-
),
|
94 |
-
"ir_k3_e6_se": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
|
95 |
-
C_in, C_out, 6, stride, kernel=3, se=True, **kwargs
|
96 |
-
),
|
97 |
-
"ir_k3_s4_se": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
|
98 |
-
C_in,
|
99 |
-
C_out,
|
100 |
-
4,
|
101 |
-
stride,
|
102 |
-
kernel=3,
|
103 |
-
shuffle_type="mid",
|
104 |
-
pw_group=4,
|
105 |
-
se=True,
|
106 |
-
**kwargs
|
107 |
-
),
|
108 |
-
"ir_k5_e1_se": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
|
109 |
-
C_in, C_out, 1, stride, kernel=5, se=True, **kwargs
|
110 |
-
),
|
111 |
-
"ir_k5_e3_se": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
|
112 |
-
C_in, C_out, 3, stride, kernel=5, se=True, **kwargs
|
113 |
-
),
|
114 |
-
"ir_k5_e6_se": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
|
115 |
-
C_in, C_out, 6, stride, kernel=5, se=True, **kwargs
|
116 |
-
),
|
117 |
-
"ir_k5_s4_se": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
|
118 |
-
C_in,
|
119 |
-
C_out,
|
120 |
-
4,
|
121 |
-
stride,
|
122 |
-
kernel=5,
|
123 |
-
shuffle_type="mid",
|
124 |
-
pw_group=4,
|
125 |
-
se=True,
|
126 |
-
**kwargs
|
127 |
-
),
|
128 |
-
# layer search 3 (in addition to layer search 2)
|
129 |
-
"ir_k3_s2": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
|
130 |
-
C_in, C_out, 1, stride, kernel=3, shuffle_type="mid", pw_group=2, **kwargs
|
131 |
-
),
|
132 |
-
"ir_k5_s2": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
|
133 |
-
C_in, C_out, 1, stride, kernel=5, shuffle_type="mid", pw_group=2, **kwargs
|
134 |
-
),
|
135 |
-
"ir_k3_s2_se": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
|
136 |
-
C_in,
|
137 |
-
C_out,
|
138 |
-
1,
|
139 |
-
stride,
|
140 |
-
kernel=3,
|
141 |
-
shuffle_type="mid",
|
142 |
-
pw_group=2,
|
143 |
-
se=True,
|
144 |
-
**kwargs
|
145 |
-
),
|
146 |
-
"ir_k5_s2_se": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
|
147 |
-
C_in,
|
148 |
-
C_out,
|
149 |
-
1,
|
150 |
-
stride,
|
151 |
-
kernel=5,
|
152 |
-
shuffle_type="mid",
|
153 |
-
pw_group=2,
|
154 |
-
se=True,
|
155 |
-
**kwargs
|
156 |
-
),
|
157 |
-
# layer search 4 (in addition to layer search 3)
|
158 |
-
"ir_k3_sep": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
|
159 |
-
C_in, C_out, expansion, stride, kernel=3, cdw=True, **kwargs
|
160 |
-
),
|
161 |
-
"ir_k33_e1": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
|
162 |
-
C_in, C_out, 1, stride, kernel=3, cdw=True, **kwargs
|
163 |
-
),
|
164 |
-
"ir_k33_e3": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
|
165 |
-
C_in, C_out, 3, stride, kernel=3, cdw=True, **kwargs
|
166 |
-
),
|
167 |
-
"ir_k33_e6": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
|
168 |
-
C_in, C_out, 6, stride, kernel=3, cdw=True, **kwargs
|
169 |
-
),
|
170 |
-
# layer search 5 (in addition to layer search 4)
|
171 |
-
"ir_k7_e1": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
|
172 |
-
C_in, C_out, 1, stride, kernel=7, **kwargs
|
173 |
-
),
|
174 |
-
"ir_k7_e3": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
|
175 |
-
C_in, C_out, 3, stride, kernel=7, **kwargs
|
176 |
-
),
|
177 |
-
"ir_k7_e6": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
|
178 |
-
C_in, C_out, 6, stride, kernel=7, **kwargs
|
179 |
-
),
|
180 |
-
"ir_k7_sep": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
|
181 |
-
C_in, C_out, expansion, stride, kernel=7, cdw=True, **kwargs
|
182 |
-
),
|
183 |
-
"ir_k7_sep_e1": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
|
184 |
-
C_in, C_out, 1, stride, kernel=7, cdw=True, **kwargs
|
185 |
-
),
|
186 |
-
"ir_k7_sep_e3": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
|
187 |
-
C_in, C_out, 3, stride, kernel=7, cdw=True, **kwargs
|
188 |
-
),
|
189 |
-
"ir_k7_sep_e6": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
|
190 |
-
C_in, C_out, 6, stride, kernel=7, cdw=True, **kwargs
|
191 |
-
),
|
192 |
-
}
|
193 |
-
|
194 |
-
|
195 |
-
class Identity(nn.Module):
|
196 |
-
def __init__(self, C_in, C_out, stride):
|
197 |
-
super(Identity, self).__init__()
|
198 |
-
self.conv = (
|
199 |
-
ConvBNRelu(
|
200 |
-
C_in,
|
201 |
-
C_out,
|
202 |
-
kernel=1,
|
203 |
-
stride=stride,
|
204 |
-
pad=0,
|
205 |
-
no_bias=1,
|
206 |
-
use_relu="relu",
|
207 |
-
bn_type="bn",
|
208 |
-
)
|
209 |
-
if C_in != C_out or stride != 1
|
210 |
-
else None
|
211 |
-
)
|
212 |
-
|
213 |
-
def forward(self, x):
|
214 |
-
if self.conv:
|
215 |
-
out = self.conv(x)
|
216 |
-
else:
|
217 |
-
out = x
|
218 |
-
return out
|
219 |
-
|
220 |
-
|
221 |
-
class CascadeConv3x3(nn.Sequential):
|
222 |
-
def __init__(self, C_in, C_out, stride):
|
223 |
-
assert stride in [1, 2]
|
224 |
-
ops = [
|
225 |
-
Conv2d(C_in, C_in, 3, stride, 1, bias=False),
|
226 |
-
BatchNorm2d(C_in),
|
227 |
-
nn.ReLU(inplace=True),
|
228 |
-
Conv2d(C_in, C_out, 3, 1, 1, bias=False),
|
229 |
-
BatchNorm2d(C_out),
|
230 |
-
]
|
231 |
-
super(CascadeConv3x3, self).__init__(*ops)
|
232 |
-
self.res_connect = (stride == 1) and (C_in == C_out)
|
233 |
-
|
234 |
-
def forward(self, x):
|
235 |
-
y = super(CascadeConv3x3, self).forward(x)
|
236 |
-
if self.res_connect:
|
237 |
-
y += x
|
238 |
-
return y
|
239 |
-
|
240 |
-
|
241 |
-
class Shift(nn.Module):
|
242 |
-
def __init__(self, C, kernel_size, stride, padding):
|
243 |
-
super(Shift, self).__init__()
|
244 |
-
self.C = C
|
245 |
-
kernel = torch.zeros((C, 1, kernel_size, kernel_size), dtype=torch.float32)
|
246 |
-
ch_idx = 0
|
247 |
-
|
248 |
-
assert stride in [1, 2]
|
249 |
-
self.stride = stride
|
250 |
-
self.padding = padding
|
251 |
-
self.kernel_size = kernel_size
|
252 |
-
self.dilation = 1
|
253 |
-
|
254 |
-
hks = kernel_size // 2
|
255 |
-
ksq = kernel_size ** 2
|
256 |
-
|
257 |
-
for i in range(kernel_size):
|
258 |
-
for j in range(kernel_size):
|
259 |
-
if i == hks and j == hks:
|
260 |
-
num_ch = C // ksq + C % ksq
|
261 |
-
else:
|
262 |
-
num_ch = C // ksq
|
263 |
-
kernel[ch_idx : ch_idx + num_ch, 0, i, j] = 1
|
264 |
-
ch_idx += num_ch
|
265 |
-
|
266 |
-
self.register_parameter("bias", None)
|
267 |
-
self.kernel = nn.Parameter(kernel, requires_grad=False)
|
268 |
-
|
269 |
-
def forward(self, x):
|
270 |
-
if x.numel() > 0:
|
271 |
-
return nn.functional.conv2d(
|
272 |
-
x,
|
273 |
-
self.kernel,
|
274 |
-
self.bias,
|
275 |
-
(self.stride, self.stride),
|
276 |
-
(self.padding, self.padding),
|
277 |
-
self.dilation,
|
278 |
-
self.C, # groups
|
279 |
-
)
|
280 |
-
|
281 |
-
output_shape = [
|
282 |
-
(i + 2 * p - (di * (k - 1) + 1)) // d + 1
|
283 |
-
for i, p, di, k, d in zip(
|
284 |
-
x.shape[-2:],
|
285 |
-
(self.padding, self.dilation),
|
286 |
-
(self.dilation, self.dilation),
|
287 |
-
(self.kernel_size, self.kernel_size),
|
288 |
-
(self.stride, self.stride),
|
289 |
-
)
|
290 |
-
]
|
291 |
-
output_shape = [x.shape[0], self.C] + output_shape
|
292 |
-
return _NewEmptyTensorOp.apply(x, output_shape)
|
293 |
-
|
294 |
-
|
295 |
-
class ShiftBlock5x5(nn.Sequential):
|
296 |
-
def __init__(self, C_in, C_out, expansion, stride):
|
297 |
-
assert stride in [1, 2]
|
298 |
-
self.res_connect = (stride == 1) and (C_in == C_out)
|
299 |
-
|
300 |
-
C_mid = _get_divisible_by(C_in * expansion, 8, 8)
|
301 |
-
|
302 |
-
ops = [
|
303 |
-
# pw
|
304 |
-
Conv2d(C_in, C_mid, 1, 1, 0, bias=False),
|
305 |
-
BatchNorm2d(C_mid),
|
306 |
-
nn.ReLU(inplace=True),
|
307 |
-
# shift
|
308 |
-
Shift(C_mid, 5, stride, 2),
|
309 |
-
# pw-linear
|
310 |
-
Conv2d(C_mid, C_out, 1, 1, 0, bias=False),
|
311 |
-
BatchNorm2d(C_out),
|
312 |
-
]
|
313 |
-
super(ShiftBlock5x5, self).__init__(*ops)
|
314 |
-
|
315 |
-
def forward(self, x):
|
316 |
-
y = super(ShiftBlock5x5, self).forward(x)
|
317 |
-
if self.res_connect:
|
318 |
-
y += x
|
319 |
-
return y
|
320 |
-
|
321 |
-
|
322 |
-
class ChannelShuffle(nn.Module):
|
323 |
-
def __init__(self, groups):
|
324 |
-
super(ChannelShuffle, self).__init__()
|
325 |
-
self.groups = groups
|
326 |
-
|
327 |
-
def forward(self, x):
|
328 |
-
"""Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]"""
|
329 |
-
N, C, H, W = x.size()
|
330 |
-
g = self.groups
|
331 |
-
assert C % g == 0, "Incompatible group size {} for input channel {}".format(
|
332 |
-
g, C
|
333 |
-
)
|
334 |
-
return (
|
335 |
-
x.view(N, g, int(C / g), H, W)
|
336 |
-
.permute(0, 2, 1, 3, 4)
|
337 |
-
.contiguous()
|
338 |
-
.view(N, C, H, W)
|
339 |
-
)
|
340 |
-
|
341 |
-
|
342 |
-
class ConvBNRelu(nn.Sequential):
|
343 |
-
def __init__(
|
344 |
-
self,
|
345 |
-
input_depth,
|
346 |
-
output_depth,
|
347 |
-
kernel,
|
348 |
-
stride,
|
349 |
-
pad,
|
350 |
-
no_bias,
|
351 |
-
use_relu,
|
352 |
-
bn_type,
|
353 |
-
group=1,
|
354 |
-
*args,
|
355 |
-
**kwargs
|
356 |
-
):
|
357 |
-
super(ConvBNRelu, self).__init__()
|
358 |
-
|
359 |
-
assert use_relu in ["relu", None]
|
360 |
-
if isinstance(bn_type, (list, tuple)):
|
361 |
-
assert len(bn_type) == 2
|
362 |
-
assert bn_type[0] == "gn"
|
363 |
-
gn_group = bn_type[1]
|
364 |
-
bn_type = bn_type[0]
|
365 |
-
assert bn_type in ["bn", "af", "gn", None]
|
366 |
-
assert stride in [1, 2, 4]
|
367 |
-
|
368 |
-
op = Conv2d(
|
369 |
-
input_depth,
|
370 |
-
output_depth,
|
371 |
-
kernel_size=kernel,
|
372 |
-
stride=stride,
|
373 |
-
padding=pad,
|
374 |
-
bias=not no_bias,
|
375 |
-
groups=group,
|
376 |
-
*args,
|
377 |
-
**kwargs
|
378 |
-
)
|
379 |
-
nn.init.kaiming_normal_(op.weight, mode="fan_out", nonlinearity="relu")
|
380 |
-
if op.bias is not None:
|
381 |
-
nn.init.constant_(op.bias, 0.0)
|
382 |
-
self.add_module("conv", op)
|
383 |
-
|
384 |
-
if bn_type == "bn":
|
385 |
-
bn_op = BatchNorm2d(output_depth)
|
386 |
-
elif bn_type == "gn":
|
387 |
-
bn_op = nn.GroupNorm(num_groups=gn_group, num_channels=output_depth)
|
388 |
-
elif bn_type == "af":
|
389 |
-
bn_op = FrozenBatchNorm2d(output_depth)
|
390 |
-
if bn_type is not None:
|
391 |
-
self.add_module("bn", bn_op)
|
392 |
-
|
393 |
-
if use_relu == "relu":
|
394 |
-
self.add_module("relu", nn.ReLU(inplace=True))
|
395 |
-
|
396 |
-
|
397 |
-
class SEModule(nn.Module):
|
398 |
-
reduction = 4
|
399 |
-
|
400 |
-
def __init__(self, C):
|
401 |
-
super(SEModule, self).__init__()
|
402 |
-
mid = max(C // self.reduction, 8)
|
403 |
-
conv1 = Conv2d(C, mid, 1, 1, 0)
|
404 |
-
conv2 = Conv2d(mid, C, 1, 1, 0)
|
405 |
-
|
406 |
-
self.op = nn.Sequential(
|
407 |
-
nn.AdaptiveAvgPool2d(1), conv1, nn.ReLU(inplace=True), conv2, nn.Sigmoid()
|
408 |
-
)
|
409 |
-
|
410 |
-
def forward(self, x):
|
411 |
-
return x * self.op(x)
|
412 |
-
|
413 |
-
|
414 |
-
class Upsample(nn.Module):
|
415 |
-
def __init__(self, scale_factor, mode, align_corners=None):
|
416 |
-
super(Upsample, self).__init__()
|
417 |
-
self.scale = scale_factor
|
418 |
-
self.mode = mode
|
419 |
-
self.align_corners = align_corners
|
420 |
-
|
421 |
-
def forward(self, x):
|
422 |
-
return interpolate(
|
423 |
-
x, scale_factor=self.scale, mode=self.mode,
|
424 |
-
align_corners=self.align_corners
|
425 |
-
)
|
426 |
-
|
427 |
-
|
428 |
-
def _get_upsample_op(stride):
|
429 |
-
assert (
|
430 |
-
stride in [1, 2, 4]
|
431 |
-
or stride in [-1, -2, -4]
|
432 |
-
or (isinstance(stride, tuple) and all(x in [-1, -2, -4] for x in stride))
|
433 |
-
)
|
434 |
-
|
435 |
-
scales = stride
|
436 |
-
ret = None
|
437 |
-
if isinstance(stride, tuple) or stride < 0:
|
438 |
-
scales = [-x for x in stride] if isinstance(stride, tuple) else -stride
|
439 |
-
stride = 1
|
440 |
-
ret = Upsample(scale_factor=scales, mode="nearest", align_corners=None)
|
441 |
-
|
442 |
-
return ret, stride
|
443 |
-
|
444 |
-
|
445 |
-
class IRFBlock(nn.Module):
|
446 |
-
def __init__(
|
447 |
-
self,
|
448 |
-
input_depth,
|
449 |
-
output_depth,
|
450 |
-
expansion,
|
451 |
-
stride,
|
452 |
-
bn_type="bn",
|
453 |
-
kernel=3,
|
454 |
-
width_divisor=1,
|
455 |
-
shuffle_type=None,
|
456 |
-
pw_group=1,
|
457 |
-
se=False,
|
458 |
-
cdw=False,
|
459 |
-
dw_skip_bn=False,
|
460 |
-
dw_skip_relu=False,
|
461 |
-
):
|
462 |
-
super(IRFBlock, self).__init__()
|
463 |
-
|
464 |
-
assert kernel in [1, 3, 5, 7], kernel
|
465 |
-
|
466 |
-
self.use_res_connect = stride == 1 and input_depth == output_depth
|
467 |
-
self.output_depth = output_depth
|
468 |
-
|
469 |
-
mid_depth = int(input_depth * expansion)
|
470 |
-
mid_depth = _get_divisible_by(mid_depth, width_divisor, width_divisor)
|
471 |
-
|
472 |
-
# pw
|
473 |
-
self.pw = ConvBNRelu(
|
474 |
-
input_depth,
|
475 |
-
mid_depth,
|
476 |
-
kernel=1,
|
477 |
-
stride=1,
|
478 |
-
pad=0,
|
479 |
-
no_bias=1,
|
480 |
-
use_relu="relu",
|
481 |
-
bn_type=bn_type,
|
482 |
-
group=pw_group,
|
483 |
-
)
|
484 |
-
|
485 |
-
# negative stride to do upsampling
|
486 |
-
self.upscale, stride = _get_upsample_op(stride)
|
487 |
-
|
488 |
-
# dw
|
489 |
-
if kernel == 1:
|
490 |
-
self.dw = nn.Sequential()
|
491 |
-
elif cdw:
|
492 |
-
dw1 = ConvBNRelu(
|
493 |
-
mid_depth,
|
494 |
-
mid_depth,
|
495 |
-
kernel=kernel,
|
496 |
-
stride=stride,
|
497 |
-
pad=(kernel // 2),
|
498 |
-
group=mid_depth,
|
499 |
-
no_bias=1,
|
500 |
-
use_relu="relu",
|
501 |
-
bn_type=bn_type,
|
502 |
-
)
|
503 |
-
dw2 = ConvBNRelu(
|
504 |
-
mid_depth,
|
505 |
-
mid_depth,
|
506 |
-
kernel=kernel,
|
507 |
-
stride=1,
|
508 |
-
pad=(kernel // 2),
|
509 |
-
group=mid_depth,
|
510 |
-
no_bias=1,
|
511 |
-
use_relu="relu" if not dw_skip_relu else None,
|
512 |
-
bn_type=bn_type if not dw_skip_bn else None,
|
513 |
-
)
|
514 |
-
self.dw = nn.Sequential(OrderedDict([("dw1", dw1), ("dw2", dw2)]))
|
515 |
-
else:
|
516 |
-
self.dw = ConvBNRelu(
|
517 |
-
mid_depth,
|
518 |
-
mid_depth,
|
519 |
-
kernel=kernel,
|
520 |
-
stride=stride,
|
521 |
-
pad=(kernel // 2),
|
522 |
-
group=mid_depth,
|
523 |
-
no_bias=1,
|
524 |
-
use_relu="relu" if not dw_skip_relu else None,
|
525 |
-
bn_type=bn_type if not dw_skip_bn else None,
|
526 |
-
)
|
527 |
-
|
528 |
-
# pw-linear
|
529 |
-
self.pwl = ConvBNRelu(
|
530 |
-
mid_depth,
|
531 |
-
output_depth,
|
532 |
-
kernel=1,
|
533 |
-
stride=1,
|
534 |
-
pad=0,
|
535 |
-
no_bias=1,
|
536 |
-
use_relu=None,
|
537 |
-
bn_type=bn_type,
|
538 |
-
group=pw_group,
|
539 |
-
)
|
540 |
-
|
541 |
-
self.shuffle_type = shuffle_type
|
542 |
-
if shuffle_type is not None:
|
543 |
-
self.shuffle = ChannelShuffle(pw_group)
|
544 |
-
|
545 |
-
self.se4 = SEModule(output_depth) if se else nn.Sequential()
|
546 |
-
|
547 |
-
self.output_depth = output_depth
|
548 |
-
|
549 |
-
def forward(self, x):
|
550 |
-
y = self.pw(x)
|
551 |
-
if self.shuffle_type == "mid":
|
552 |
-
y = self.shuffle(y)
|
553 |
-
if self.upscale is not None:
|
554 |
-
y = self.upscale(y)
|
555 |
-
y = self.dw(y)
|
556 |
-
y = self.pwl(y)
|
557 |
-
if self.use_res_connect:
|
558 |
-
y += x
|
559 |
-
y = self.se4(y)
|
560 |
-
return y
|
561 |
-
|
562 |
-
|
563 |
-
def _expand_block_cfg(block_cfg):
|
564 |
-
assert isinstance(block_cfg, list)
|
565 |
-
ret = []
|
566 |
-
for idx in range(block_cfg[2]):
|
567 |
-
cur = copy.deepcopy(block_cfg)
|
568 |
-
cur[2] = 1
|
569 |
-
cur[3] = 1 if idx >= 1 else cur[3]
|
570 |
-
ret.append(cur)
|
571 |
-
return ret
|
572 |
-
|
573 |
-
|
574 |
-
def expand_stage_cfg(stage_cfg):
|
575 |
-
""" For a single stage """
|
576 |
-
assert isinstance(stage_cfg, list)
|
577 |
-
ret = []
|
578 |
-
for x in stage_cfg:
|
579 |
-
ret += _expand_block_cfg(x)
|
580 |
-
return ret
|
581 |
-
|
582 |
-
|
583 |
-
def expand_stages_cfg(stage_cfgs):
|
584 |
-
""" For a list of stages """
|
585 |
-
assert isinstance(stage_cfgs, list)
|
586 |
-
ret = []
|
587 |
-
for x in stage_cfgs:
|
588 |
-
ret.append(expand_stage_cfg(x))
|
589 |
-
return ret
|
590 |
-
|
591 |
-
|
592 |
-
def _block_cfgs_to_list(block_cfgs):
|
593 |
-
assert isinstance(block_cfgs, list)
|
594 |
-
ret = []
|
595 |
-
for stage_idx, stage in enumerate(block_cfgs):
|
596 |
-
stage = expand_stage_cfg(stage)
|
597 |
-
for block_idx, block in enumerate(stage):
|
598 |
-
cur = {"stage_idx": stage_idx, "block_idx": block_idx, "block": block}
|
599 |
-
ret.append(cur)
|
600 |
-
return ret
|
601 |
-
|
602 |
-
|
603 |
-
def _add_to_arch(arch, info, name):
|
604 |
-
""" arch = [{block_0}, {block_1}, ...]
|
605 |
-
info = [
|
606 |
-
# stage 0
|
607 |
-
[
|
608 |
-
block0_info,
|
609 |
-
block1_info,
|
610 |
-
...
|
611 |
-
], ...
|
612 |
-
]
|
613 |
-
convert to:
|
614 |
-
arch = [
|
615 |
-
{
|
616 |
-
block_0,
|
617 |
-
name: block0_info,
|
618 |
-
},
|
619 |
-
{
|
620 |
-
block_1,
|
621 |
-
name: block1_info,
|
622 |
-
}, ...
|
623 |
-
]
|
624 |
-
"""
|
625 |
-
assert isinstance(arch, list) and all(isinstance(x, dict) for x in arch)
|
626 |
-
assert isinstance(info, list) and all(isinstance(x, list) for x in info)
|
627 |
-
idx = 0
|
628 |
-
for stage_idx, stage in enumerate(info):
|
629 |
-
for block_idx, block in enumerate(stage):
|
630 |
-
assert (
|
631 |
-
arch[idx]["stage_idx"] == stage_idx
|
632 |
-
and arch[idx]["block_idx"] == block_idx
|
633 |
-
), "Index ({}, {}) does not match for block {}".format(
|
634 |
-
stage_idx, block_idx, arch[idx]
|
635 |
-
)
|
636 |
-
assert name not in arch[idx]
|
637 |
-
arch[idx][name] = block
|
638 |
-
idx += 1
|
639 |
-
|
640 |
-
|
641 |
-
def unify_arch_def(arch_def):
|
642 |
-
""" unify the arch_def to:
|
643 |
-
{
|
644 |
-
...,
|
645 |
-
"arch": [
|
646 |
-
{
|
647 |
-
"stage_idx": idx,
|
648 |
-
"block_idx": idx,
|
649 |
-
...
|
650 |
-
},
|
651 |
-
{}, ...
|
652 |
-
]
|
653 |
-
}
|
654 |
-
"""
|
655 |
-
ret = copy.deepcopy(arch_def)
|
656 |
-
|
657 |
-
assert "block_cfg" in arch_def and "stages" in arch_def["block_cfg"]
|
658 |
-
assert "stages" not in ret
|
659 |
-
# copy 'first', 'last' etc. inside arch_def['block_cfg'] to ret
|
660 |
-
ret.update({x: arch_def["block_cfg"][x] for x in arch_def["block_cfg"]})
|
661 |
-
ret["stages"] = _block_cfgs_to_list(arch_def["block_cfg"]["stages"])
|
662 |
-
del ret["block_cfg"]
|
663 |
-
|
664 |
-
assert "block_op_type" in arch_def
|
665 |
-
_add_to_arch(ret["stages"], arch_def["block_op_type"], "block_op_type")
|
666 |
-
del ret["block_op_type"]
|
667 |
-
|
668 |
-
return ret
|
669 |
-
|
670 |
-
|
671 |
-
def get_num_stages(arch_def):
|
672 |
-
ret = 0
|
673 |
-
for x in arch_def["stages"]:
|
674 |
-
ret = max(x["stage_idx"], ret)
|
675 |
-
ret = ret + 1
|
676 |
-
return ret
|
677 |
-
|
678 |
-
|
679 |
-
def get_blocks(arch_def, stage_indices=None, block_indices=None):
|
680 |
-
ret = copy.deepcopy(arch_def)
|
681 |
-
ret["stages"] = []
|
682 |
-
for block in arch_def["stages"]:
|
683 |
-
keep = True
|
684 |
-
if stage_indices not in (None, []) and block["stage_idx"] not in stage_indices:
|
685 |
-
keep = False
|
686 |
-
if block_indices not in (None, []) and block["block_idx"] not in block_indices:
|
687 |
-
keep = False
|
688 |
-
if keep:
|
689 |
-
ret["stages"].append(block)
|
690 |
-
return ret
|
691 |
-
|
692 |
-
|
693 |
-
class FBNetBuilder(object):
|
694 |
-
def __init__(
|
695 |
-
self,
|
696 |
-
width_ratio,
|
697 |
-
bn_type="bn",
|
698 |
-
width_divisor=1,
|
699 |
-
dw_skip_bn=False,
|
700 |
-
dw_skip_relu=False,
|
701 |
-
):
|
702 |
-
self.width_ratio = width_ratio
|
703 |
-
self.last_depth = -1
|
704 |
-
self.bn_type = bn_type
|
705 |
-
self.width_divisor = width_divisor
|
706 |
-
self.dw_skip_bn = dw_skip_bn
|
707 |
-
self.dw_skip_relu = dw_skip_relu
|
708 |
-
|
709 |
-
def add_first(self, stage_info, dim_in=3, pad=True):
|
710 |
-
# stage_info: [c, s, kernel]
|
711 |
-
assert len(stage_info) >= 2
|
712 |
-
channel = stage_info[0]
|
713 |
-
stride = stage_info[1]
|
714 |
-
out_depth = self._get_divisible_width(int(channel * self.width_ratio))
|
715 |
-
kernel = 3
|
716 |
-
if len(stage_info) > 2:
|
717 |
-
kernel = stage_info[2]
|
718 |
-
|
719 |
-
out = ConvBNRelu(
|
720 |
-
dim_in,
|
721 |
-
out_depth,
|
722 |
-
kernel=kernel,
|
723 |
-
stride=stride,
|
724 |
-
pad=kernel // 2 if pad else 0,
|
725 |
-
no_bias=1,
|
726 |
-
use_relu="relu",
|
727 |
-
bn_type=self.bn_type,
|
728 |
-
)
|
729 |
-
self.last_depth = out_depth
|
730 |
-
return out
|
731 |
-
|
732 |
-
def add_blocks(self, blocks):
|
733 |
-
""" blocks: [{}, {}, ...]
|
734 |
-
"""
|
735 |
-
assert isinstance(blocks, list) and all(
|
736 |
-
isinstance(x, dict) for x in blocks
|
737 |
-
), blocks
|
738 |
-
|
739 |
-
modules = OrderedDict()
|
740 |
-
for block in blocks:
|
741 |
-
stage_idx = block["stage_idx"]
|
742 |
-
block_idx = block["block_idx"]
|
743 |
-
block_op_type = block["block_op_type"]
|
744 |
-
tcns = block["block"]
|
745 |
-
n = tcns[2]
|
746 |
-
assert n == 1
|
747 |
-
nnblock = self.add_ir_block(tcns, [block_op_type])
|
748 |
-
nn_name = "xif{}_{}".format(stage_idx, block_idx)
|
749 |
-
assert nn_name not in modules
|
750 |
-
modules[nn_name] = nnblock
|
751 |
-
ret = nn.Sequential(modules)
|
752 |
-
return ret
|
753 |
-
|
754 |
-
def add_last(self, stage_info):
|
755 |
-
""" skip last layer if channel_scale == 0
|
756 |
-
use the same output channel if channel_scale < 0
|
757 |
-
"""
|
758 |
-
assert len(stage_info) == 2
|
759 |
-
channels = stage_info[0]
|
760 |
-
channel_scale = stage_info[1]
|
761 |
-
|
762 |
-
if channel_scale == 0.0:
|
763 |
-
return nn.Sequential()
|
764 |
-
|
765 |
-
if channel_scale > 0:
|
766 |
-
last_channel = (
|
767 |
-
int(channels * self.width_ratio) if self.width_ratio > 1.0 else channels
|
768 |
-
)
|
769 |
-
last_channel = int(last_channel * channel_scale)
|
770 |
-
else:
|
771 |
-
last_channel = int(self.last_depth * (-channel_scale))
|
772 |
-
last_channel = self._get_divisible_width(last_channel)
|
773 |
-
|
774 |
-
if last_channel == 0:
|
775 |
-
return nn.Sequential()
|
776 |
-
|
777 |
-
dim_in = self.last_depth
|
778 |
-
ret = ConvBNRelu(
|
779 |
-
dim_in,
|
780 |
-
last_channel,
|
781 |
-
kernel=1,
|
782 |
-
stride=1,
|
783 |
-
pad=0,
|
784 |
-
no_bias=1,
|
785 |
-
use_relu="relu",
|
786 |
-
bn_type=self.bn_type,
|
787 |
-
)
|
788 |
-
self.last_depth = last_channel
|
789 |
-
return ret
|
790 |
-
|
791 |
-
# def add_final_pool(self, model, blob_in, kernel_size):
|
792 |
-
# ret = model.AveragePool(blob_in, "final_avg", kernel=kernel_size, stride=1)
|
793 |
-
# return ret
|
794 |
-
|
795 |
-
def _add_ir_block(
|
796 |
-
self, dim_in, dim_out, stride, expand_ratio, block_op_type, **kwargs
|
797 |
-
):
|
798 |
-
ret = PRIMITIVES[block_op_type](
|
799 |
-
dim_in,
|
800 |
-
dim_out,
|
801 |
-
expansion=expand_ratio,
|
802 |
-
stride=stride,
|
803 |
-
bn_type=self.bn_type,
|
804 |
-
width_divisor=self.width_divisor,
|
805 |
-
dw_skip_bn=self.dw_skip_bn,
|
806 |
-
dw_skip_relu=self.dw_skip_relu,
|
807 |
-
**kwargs
|
808 |
-
)
|
809 |
-
return ret, ret.output_depth
|
810 |
-
|
811 |
-
def add_ir_block(self, tcns, block_op_types, **kwargs):
|
812 |
-
t, c, n, s = tcns
|
813 |
-
assert n == 1
|
814 |
-
out_depth = self._get_divisible_width(int(c * self.width_ratio))
|
815 |
-
dim_in = self.last_depth
|
816 |
-
op, ret_depth = self._add_ir_block(
|
817 |
-
dim_in,
|
818 |
-
out_depth,
|
819 |
-
stride=s,
|
820 |
-
expand_ratio=t,
|
821 |
-
block_op_type=block_op_types[0],
|
822 |
-
**kwargs
|
823 |
-
)
|
824 |
-
self.last_depth = ret_depth
|
825 |
-
return op
|
826 |
-
|
827 |
-
def _get_divisible_width(self, width):
|
828 |
-
ret = _get_divisible_by(int(width), self.width_divisor, self.width_divisor)
|
829 |
-
return ret
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/aiohttp/client_reqrep.py
DELETED
@@ -1,1134 +0,0 @@
|
|
1 |
-
import asyncio
|
2 |
-
import codecs
|
3 |
-
import functools
|
4 |
-
import io
|
5 |
-
import re
|
6 |
-
import sys
|
7 |
-
import traceback
|
8 |
-
import warnings
|
9 |
-
from hashlib import md5, sha1, sha256
|
10 |
-
from http.cookies import CookieError, Morsel, SimpleCookie
|
11 |
-
from types import MappingProxyType, TracebackType
|
12 |
-
from typing import (
|
13 |
-
TYPE_CHECKING,
|
14 |
-
Any,
|
15 |
-
Dict,
|
16 |
-
Iterable,
|
17 |
-
List,
|
18 |
-
Mapping,
|
19 |
-
Optional,
|
20 |
-
Tuple,
|
21 |
-
Type,
|
22 |
-
Union,
|
23 |
-
cast,
|
24 |
-
)
|
25 |
-
|
26 |
-
import attr
|
27 |
-
from multidict import CIMultiDict, CIMultiDictProxy, MultiDict, MultiDictProxy
|
28 |
-
from yarl import URL
|
29 |
-
|
30 |
-
from . import hdrs, helpers, http, multipart, payload
|
31 |
-
from .abc import AbstractStreamWriter
|
32 |
-
from .client_exceptions import (
|
33 |
-
ClientConnectionError,
|
34 |
-
ClientOSError,
|
35 |
-
ClientResponseError,
|
36 |
-
ContentTypeError,
|
37 |
-
InvalidURL,
|
38 |
-
ServerFingerprintMismatch,
|
39 |
-
)
|
40 |
-
from .formdata import FormData
|
41 |
-
from .helpers import (
|
42 |
-
PY_36,
|
43 |
-
BaseTimerContext,
|
44 |
-
BasicAuth,
|
45 |
-
HeadersMixin,
|
46 |
-
TimerNoop,
|
47 |
-
noop,
|
48 |
-
reify,
|
49 |
-
set_result,
|
50 |
-
)
|
51 |
-
from .http import SERVER_SOFTWARE, HttpVersion10, HttpVersion11, StreamWriter
|
52 |
-
from .log import client_logger
|
53 |
-
from .streams import StreamReader
|
54 |
-
from .typedefs import (
|
55 |
-
DEFAULT_JSON_DECODER,
|
56 |
-
JSONDecoder,
|
57 |
-
LooseCookies,
|
58 |
-
LooseHeaders,
|
59 |
-
RawHeaders,
|
60 |
-
)
|
61 |
-
|
62 |
-
try:
|
63 |
-
import ssl
|
64 |
-
from ssl import SSLContext
|
65 |
-
except ImportError: # pragma: no cover
|
66 |
-
ssl = None # type: ignore[assignment]
|
67 |
-
SSLContext = object # type: ignore[misc,assignment]
|
68 |
-
|
69 |
-
try:
|
70 |
-
import cchardet as chardet
|
71 |
-
except ImportError: # pragma: no cover
|
72 |
-
import charset_normalizer as chardet # type: ignore[no-redef]
|
73 |
-
|
74 |
-
|
75 |
-
__all__ = ("ClientRequest", "ClientResponse", "RequestInfo", "Fingerprint")
|
76 |
-
|
77 |
-
|
78 |
-
if TYPE_CHECKING: # pragma: no cover
|
79 |
-
from .client import ClientSession
|
80 |
-
from .connector import Connection
|
81 |
-
from .tracing import Trace
|
82 |
-
|
83 |
-
|
84 |
-
json_re = re.compile(r"^application/(?:[\w.+-]+?\+)?json")
|
85 |
-
|
86 |
-
|
87 |
-
@attr.s(auto_attribs=True, frozen=True, slots=True)
|
88 |
-
class ContentDisposition:
|
89 |
-
type: Optional[str]
|
90 |
-
parameters: "MappingProxyType[str, str]"
|
91 |
-
filename: Optional[str]
|
92 |
-
|
93 |
-
|
94 |
-
@attr.s(auto_attribs=True, frozen=True, slots=True)
|
95 |
-
class RequestInfo:
|
96 |
-
url: URL
|
97 |
-
method: str
|
98 |
-
headers: "CIMultiDictProxy[str]"
|
99 |
-
real_url: URL = attr.ib()
|
100 |
-
|
101 |
-
@real_url.default
|
102 |
-
def real_url_default(self) -> URL:
|
103 |
-
return self.url
|
104 |
-
|
105 |
-
|
106 |
-
class Fingerprint:
|
107 |
-
HASHFUNC_BY_DIGESTLEN = {
|
108 |
-
16: md5,
|
109 |
-
20: sha1,
|
110 |
-
32: sha256,
|
111 |
-
}
|
112 |
-
|
113 |
-
def __init__(self, fingerprint: bytes) -> None:
|
114 |
-
digestlen = len(fingerprint)
|
115 |
-
hashfunc = self.HASHFUNC_BY_DIGESTLEN.get(digestlen)
|
116 |
-
if not hashfunc:
|
117 |
-
raise ValueError("fingerprint has invalid length")
|
118 |
-
elif hashfunc is md5 or hashfunc is sha1:
|
119 |
-
raise ValueError(
|
120 |
-
"md5 and sha1 are insecure and " "not supported. Use sha256."
|
121 |
-
)
|
122 |
-
self._hashfunc = hashfunc
|
123 |
-
self._fingerprint = fingerprint
|
124 |
-
|
125 |
-
@property
|
126 |
-
def fingerprint(self) -> bytes:
|
127 |
-
return self._fingerprint
|
128 |
-
|
129 |
-
def check(self, transport: asyncio.Transport) -> None:
|
130 |
-
if not transport.get_extra_info("sslcontext"):
|
131 |
-
return
|
132 |
-
sslobj = transport.get_extra_info("ssl_object")
|
133 |
-
cert = sslobj.getpeercert(binary_form=True)
|
134 |
-
got = self._hashfunc(cert).digest()
|
135 |
-
if got != self._fingerprint:
|
136 |
-
host, port, *_ = transport.get_extra_info("peername")
|
137 |
-
raise ServerFingerprintMismatch(self._fingerprint, got, host, port)
|
138 |
-
|
139 |
-
|
140 |
-
if ssl is not None:
|
141 |
-
SSL_ALLOWED_TYPES = (ssl.SSLContext, bool, Fingerprint, type(None))
|
142 |
-
else: # pragma: no cover
|
143 |
-
SSL_ALLOWED_TYPES = type(None)
|
144 |
-
|
145 |
-
|
146 |
-
def _merge_ssl_params(
|
147 |
-
ssl: Union["SSLContext", bool, Fingerprint, None],
|
148 |
-
verify_ssl: Optional[bool],
|
149 |
-
ssl_context: Optional["SSLContext"],
|
150 |
-
fingerprint: Optional[bytes],
|
151 |
-
) -> Union["SSLContext", bool, Fingerprint, None]:
|
152 |
-
if verify_ssl is not None and not verify_ssl:
|
153 |
-
warnings.warn(
|
154 |
-
"verify_ssl is deprecated, use ssl=False instead",
|
155 |
-
DeprecationWarning,
|
156 |
-
stacklevel=3,
|
157 |
-
)
|
158 |
-
if ssl is not None:
|
159 |
-
raise ValueError(
|
160 |
-
"verify_ssl, ssl_context, fingerprint and ssl "
|
161 |
-
"parameters are mutually exclusive"
|
162 |
-
)
|
163 |
-
else:
|
164 |
-
ssl = False
|
165 |
-
if ssl_context is not None:
|
166 |
-
warnings.warn(
|
167 |
-
"ssl_context is deprecated, use ssl=context instead",
|
168 |
-
DeprecationWarning,
|
169 |
-
stacklevel=3,
|
170 |
-
)
|
171 |
-
if ssl is not None:
|
172 |
-
raise ValueError(
|
173 |
-
"verify_ssl, ssl_context, fingerprint and ssl "
|
174 |
-
"parameters are mutually exclusive"
|
175 |
-
)
|
176 |
-
else:
|
177 |
-
ssl = ssl_context
|
178 |
-
if fingerprint is not None:
|
179 |
-
warnings.warn(
|
180 |
-
"fingerprint is deprecated, " "use ssl=Fingerprint(fingerprint) instead",
|
181 |
-
DeprecationWarning,
|
182 |
-
stacklevel=3,
|
183 |
-
)
|
184 |
-
if ssl is not None:
|
185 |
-
raise ValueError(
|
186 |
-
"verify_ssl, ssl_context, fingerprint and ssl "
|
187 |
-
"parameters are mutually exclusive"
|
188 |
-
)
|
189 |
-
else:
|
190 |
-
ssl = Fingerprint(fingerprint)
|
191 |
-
if not isinstance(ssl, SSL_ALLOWED_TYPES):
|
192 |
-
raise TypeError(
|
193 |
-
"ssl should be SSLContext, bool, Fingerprint or None, "
|
194 |
-
"got {!r} instead.".format(ssl)
|
195 |
-
)
|
196 |
-
return ssl
|
197 |
-
|
198 |
-
|
199 |
-
@attr.s(auto_attribs=True, slots=True, frozen=True)
|
200 |
-
class ConnectionKey:
|
201 |
-
# the key should contain an information about used proxy / TLS
|
202 |
-
# to prevent reusing wrong connections from a pool
|
203 |
-
host: str
|
204 |
-
port: Optional[int]
|
205 |
-
is_ssl: bool
|
206 |
-
ssl: Union[SSLContext, None, bool, Fingerprint]
|
207 |
-
proxy: Optional[URL]
|
208 |
-
proxy_auth: Optional[BasicAuth]
|
209 |
-
proxy_headers_hash: Optional[int] # hash(CIMultiDict)
|
210 |
-
|
211 |
-
|
212 |
-
def _is_expected_content_type(
|
213 |
-
response_content_type: str, expected_content_type: str
|
214 |
-
) -> bool:
|
215 |
-
if expected_content_type == "application/json":
|
216 |
-
return json_re.match(response_content_type) is not None
|
217 |
-
return expected_content_type in response_content_type
|
218 |
-
|
219 |
-
|
220 |
-
class ClientRequest:
|
221 |
-
GET_METHODS = {
|
222 |
-
hdrs.METH_GET,
|
223 |
-
hdrs.METH_HEAD,
|
224 |
-
hdrs.METH_OPTIONS,
|
225 |
-
hdrs.METH_TRACE,
|
226 |
-
}
|
227 |
-
POST_METHODS = {hdrs.METH_PATCH, hdrs.METH_POST, hdrs.METH_PUT}
|
228 |
-
ALL_METHODS = GET_METHODS.union(POST_METHODS).union({hdrs.METH_DELETE})
|
229 |
-
|
230 |
-
DEFAULT_HEADERS = {
|
231 |
-
hdrs.ACCEPT: "*/*",
|
232 |
-
hdrs.ACCEPT_ENCODING: "gzip, deflate",
|
233 |
-
}
|
234 |
-
|
235 |
-
body = b""
|
236 |
-
auth = None
|
237 |
-
response = None
|
238 |
-
|
239 |
-
_writer = None # async task for streaming data
|
240 |
-
_continue = None # waiter future for '100 Continue' response
|
241 |
-
|
242 |
-
# N.B.
|
243 |
-
# Adding __del__ method with self._writer closing doesn't make sense
|
244 |
-
# because _writer is instance method, thus it keeps a reference to self.
|
245 |
-
# Until writer has finished finalizer will not be called.
|
246 |
-
|
247 |
-
def __init__(
|
248 |
-
self,
|
249 |
-
method: str,
|
250 |
-
url: URL,
|
251 |
-
*,
|
252 |
-
params: Optional[Mapping[str, str]] = None,
|
253 |
-
headers: Optional[LooseHeaders] = None,
|
254 |
-
skip_auto_headers: Iterable[str] = frozenset(),
|
255 |
-
data: Any = None,
|
256 |
-
cookies: Optional[LooseCookies] = None,
|
257 |
-
auth: Optional[BasicAuth] = None,
|
258 |
-
version: http.HttpVersion = http.HttpVersion11,
|
259 |
-
compress: Optional[str] = None,
|
260 |
-
chunked: Optional[bool] = None,
|
261 |
-
expect100: bool = False,
|
262 |
-
loop: Optional[asyncio.AbstractEventLoop] = None,
|
263 |
-
response_class: Optional[Type["ClientResponse"]] = None,
|
264 |
-
proxy: Optional[URL] = None,
|
265 |
-
proxy_auth: Optional[BasicAuth] = None,
|
266 |
-
timer: Optional[BaseTimerContext] = None,
|
267 |
-
session: Optional["ClientSession"] = None,
|
268 |
-
ssl: Union[SSLContext, bool, Fingerprint, None] = None,
|
269 |
-
proxy_headers: Optional[LooseHeaders] = None,
|
270 |
-
traces: Optional[List["Trace"]] = None,
|
271 |
-
):
|
272 |
-
|
273 |
-
if loop is None:
|
274 |
-
loop = asyncio.get_event_loop()
|
275 |
-
|
276 |
-
assert isinstance(url, URL), url
|
277 |
-
assert isinstance(proxy, (URL, type(None))), proxy
|
278 |
-
# FIXME: session is None in tests only, need to fix tests
|
279 |
-
# assert session is not None
|
280 |
-
self._session = cast("ClientSession", session)
|
281 |
-
if params:
|
282 |
-
q = MultiDict(url.query)
|
283 |
-
url2 = url.with_query(params)
|
284 |
-
q.extend(url2.query)
|
285 |
-
url = url.with_query(q)
|
286 |
-
self.original_url = url
|
287 |
-
self.url = url.with_fragment(None)
|
288 |
-
self.method = method.upper()
|
289 |
-
self.chunked = chunked
|
290 |
-
self.compress = compress
|
291 |
-
self.loop = loop
|
292 |
-
self.length = None
|
293 |
-
if response_class is None:
|
294 |
-
real_response_class = ClientResponse
|
295 |
-
else:
|
296 |
-
real_response_class = response_class
|
297 |
-
self.response_class: Type[ClientResponse] = real_response_class
|
298 |
-
self._timer = timer if timer is not None else TimerNoop()
|
299 |
-
self._ssl = ssl
|
300 |
-
|
301 |
-
if loop.get_debug():
|
302 |
-
self._source_traceback = traceback.extract_stack(sys._getframe(1))
|
303 |
-
|
304 |
-
self.update_version(version)
|
305 |
-
self.update_host(url)
|
306 |
-
self.update_headers(headers)
|
307 |
-
self.update_auto_headers(skip_auto_headers)
|
308 |
-
self.update_cookies(cookies)
|
309 |
-
self.update_content_encoding(data)
|
310 |
-
self.update_auth(auth)
|
311 |
-
self.update_proxy(proxy, proxy_auth, proxy_headers)
|
312 |
-
|
313 |
-
self.update_body_from_data(data)
|
314 |
-
if data is not None or self.method not in self.GET_METHODS:
|
315 |
-
self.update_transfer_encoding()
|
316 |
-
self.update_expect_continue(expect100)
|
317 |
-
if traces is None:
|
318 |
-
traces = []
|
319 |
-
self._traces = traces
|
320 |
-
|
321 |
-
def is_ssl(self) -> bool:
|
322 |
-
return self.url.scheme in ("https", "wss")
|
323 |
-
|
324 |
-
@property
|
325 |
-
def ssl(self) -> Union["SSLContext", None, bool, Fingerprint]:
|
326 |
-
return self._ssl
|
327 |
-
|
328 |
-
@property
|
329 |
-
def connection_key(self) -> ConnectionKey:
|
330 |
-
proxy_headers = self.proxy_headers
|
331 |
-
if proxy_headers:
|
332 |
-
h: Optional[int] = hash(tuple((k, v) for k, v in proxy_headers.items()))
|
333 |
-
else:
|
334 |
-
h = None
|
335 |
-
return ConnectionKey(
|
336 |
-
self.host,
|
337 |
-
self.port,
|
338 |
-
self.is_ssl(),
|
339 |
-
self.ssl,
|
340 |
-
self.proxy,
|
341 |
-
self.proxy_auth,
|
342 |
-
h,
|
343 |
-
)
|
344 |
-
|
345 |
-
@property
|
346 |
-
def host(self) -> str:
|
347 |
-
ret = self.url.raw_host
|
348 |
-
assert ret is not None
|
349 |
-
return ret
|
350 |
-
|
351 |
-
@property
|
352 |
-
def port(self) -> Optional[int]:
|
353 |
-
return self.url.port
|
354 |
-
|
355 |
-
@property
|
356 |
-
def request_info(self) -> RequestInfo:
|
357 |
-
headers: CIMultiDictProxy[str] = CIMultiDictProxy(self.headers)
|
358 |
-
return RequestInfo(self.url, self.method, headers, self.original_url)
|
359 |
-
|
360 |
-
def update_host(self, url: URL) -> None:
|
361 |
-
"""Update destination host, port and connection type (ssl)."""
|
362 |
-
# get host/port
|
363 |
-
if not url.raw_host:
|
364 |
-
raise InvalidURL(url)
|
365 |
-
|
366 |
-
# basic auth info
|
367 |
-
username, password = url.user, url.password
|
368 |
-
if username:
|
369 |
-
self.auth = helpers.BasicAuth(username, password or "")
|
370 |
-
|
371 |
-
def update_version(self, version: Union[http.HttpVersion, str]) -> None:
|
372 |
-
"""Convert request version to two elements tuple.
|
373 |
-
|
374 |
-
parser HTTP version '1.1' => (1, 1)
|
375 |
-
"""
|
376 |
-
if isinstance(version, str):
|
377 |
-
v = [part.strip() for part in version.split(".", 1)]
|
378 |
-
try:
|
379 |
-
version = http.HttpVersion(int(v[0]), int(v[1]))
|
380 |
-
except ValueError:
|
381 |
-
raise ValueError(
|
382 |
-
f"Can not parse http version number: {version}"
|
383 |
-
) from None
|
384 |
-
self.version = version
|
385 |
-
|
386 |
-
def update_headers(self, headers: Optional[LooseHeaders]) -> None:
|
387 |
-
"""Update request headers."""
|
388 |
-
self.headers: CIMultiDict[str] = CIMultiDict()
|
389 |
-
|
390 |
-
# add host
|
391 |
-
netloc = cast(str, self.url.raw_host)
|
392 |
-
if helpers.is_ipv6_address(netloc):
|
393 |
-
netloc = f"[{netloc}]"
|
394 |
-
if self.url.port is not None and not self.url.is_default_port():
|
395 |
-
netloc += ":" + str(self.url.port)
|
396 |
-
self.headers[hdrs.HOST] = netloc
|
397 |
-
|
398 |
-
if headers:
|
399 |
-
if isinstance(headers, (dict, MultiDictProxy, MultiDict)):
|
400 |
-
headers = headers.items() # type: ignore[assignment]
|
401 |
-
|
402 |
-
for key, value in headers: # type: ignore[misc]
|
403 |
-
# A special case for Host header
|
404 |
-
if key.lower() == "host":
|
405 |
-
self.headers[key] = value
|
406 |
-
else:
|
407 |
-
self.headers.add(key, value)
|
408 |
-
|
409 |
-
def update_auto_headers(self, skip_auto_headers: Iterable[str]) -> None:
|
410 |
-
self.skip_auto_headers = CIMultiDict(
|
411 |
-
(hdr, None) for hdr in sorted(skip_auto_headers)
|
412 |
-
)
|
413 |
-
used_headers = self.headers.copy()
|
414 |
-
used_headers.extend(self.skip_auto_headers) # type: ignore[arg-type]
|
415 |
-
|
416 |
-
for hdr, val in self.DEFAULT_HEADERS.items():
|
417 |
-
if hdr not in used_headers:
|
418 |
-
self.headers.add(hdr, val)
|
419 |
-
|
420 |
-
if hdrs.USER_AGENT not in used_headers:
|
421 |
-
self.headers[hdrs.USER_AGENT] = SERVER_SOFTWARE
|
422 |
-
|
423 |
-
def update_cookies(self, cookies: Optional[LooseCookies]) -> None:
|
424 |
-
"""Update request cookies header."""
|
425 |
-
if not cookies:
|
426 |
-
return
|
427 |
-
|
428 |
-
c: SimpleCookie[str] = SimpleCookie()
|
429 |
-
if hdrs.COOKIE in self.headers:
|
430 |
-
c.load(self.headers.get(hdrs.COOKIE, ""))
|
431 |
-
del self.headers[hdrs.COOKIE]
|
432 |
-
|
433 |
-
if isinstance(cookies, Mapping):
|
434 |
-
iter_cookies = cookies.items()
|
435 |
-
else:
|
436 |
-
iter_cookies = cookies # type: ignore[assignment]
|
437 |
-
for name, value in iter_cookies:
|
438 |
-
if isinstance(value, Morsel):
|
439 |
-
# Preserve coded_value
|
440 |
-
mrsl_val = value.get(value.key, Morsel())
|
441 |
-
mrsl_val.set(value.key, value.value, value.coded_value)
|
442 |
-
c[name] = mrsl_val
|
443 |
-
else:
|
444 |
-
c[name] = value # type: ignore[assignment]
|
445 |
-
|
446 |
-
self.headers[hdrs.COOKIE] = c.output(header="", sep=";").strip()
|
447 |
-
|
448 |
-
def update_content_encoding(self, data: Any) -> None:
|
449 |
-
"""Set request content encoding."""
|
450 |
-
if data is None:
|
451 |
-
return
|
452 |
-
|
453 |
-
enc = self.headers.get(hdrs.CONTENT_ENCODING, "").lower()
|
454 |
-
if enc:
|
455 |
-
if self.compress:
|
456 |
-
raise ValueError(
|
457 |
-
"compress can not be set " "if Content-Encoding header is set"
|
458 |
-
)
|
459 |
-
elif self.compress:
|
460 |
-
if not isinstance(self.compress, str):
|
461 |
-
self.compress = "deflate"
|
462 |
-
self.headers[hdrs.CONTENT_ENCODING] = self.compress
|
463 |
-
self.chunked = True # enable chunked, no need to deal with length
|
464 |
-
|
465 |
-
def update_transfer_encoding(self) -> None:
|
466 |
-
"""Analyze transfer-encoding header."""
|
467 |
-
te = self.headers.get(hdrs.TRANSFER_ENCODING, "").lower()
|
468 |
-
|
469 |
-
if "chunked" in te:
|
470 |
-
if self.chunked:
|
471 |
-
raise ValueError(
|
472 |
-
"chunked can not be set "
|
473 |
-
'if "Transfer-Encoding: chunked" header is set'
|
474 |
-
)
|
475 |
-
|
476 |
-
elif self.chunked:
|
477 |
-
if hdrs.CONTENT_LENGTH in self.headers:
|
478 |
-
raise ValueError(
|
479 |
-
"chunked can not be set " "if Content-Length header is set"
|
480 |
-
)
|
481 |
-
|
482 |
-
self.headers[hdrs.TRANSFER_ENCODING] = "chunked"
|
483 |
-
else:
|
484 |
-
if hdrs.CONTENT_LENGTH not in self.headers:
|
485 |
-
self.headers[hdrs.CONTENT_LENGTH] = str(len(self.body))
|
486 |
-
|
487 |
-
def update_auth(self, auth: Optional[BasicAuth]) -> None:
|
488 |
-
"""Set basic auth."""
|
489 |
-
if auth is None:
|
490 |
-
auth = self.auth
|
491 |
-
if auth is None:
|
492 |
-
return
|
493 |
-
|
494 |
-
if not isinstance(auth, helpers.BasicAuth):
|
495 |
-
raise TypeError("BasicAuth() tuple is required instead")
|
496 |
-
|
497 |
-
self.headers[hdrs.AUTHORIZATION] = auth.encode()
|
498 |
-
|
499 |
-
def update_body_from_data(self, body: Any) -> None:
|
500 |
-
if body is None:
|
501 |
-
return
|
502 |
-
|
503 |
-
# FormData
|
504 |
-
if isinstance(body, FormData):
|
505 |
-
body = body()
|
506 |
-
|
507 |
-
try:
|
508 |
-
body = payload.PAYLOAD_REGISTRY.get(body, disposition=None)
|
509 |
-
except payload.LookupError:
|
510 |
-
body = FormData(body)()
|
511 |
-
|
512 |
-
self.body = body
|
513 |
-
|
514 |
-
# enable chunked encoding if needed
|
515 |
-
if not self.chunked:
|
516 |
-
if hdrs.CONTENT_LENGTH not in self.headers:
|
517 |
-
size = body.size
|
518 |
-
if size is None:
|
519 |
-
self.chunked = True
|
520 |
-
else:
|
521 |
-
if hdrs.CONTENT_LENGTH not in self.headers:
|
522 |
-
self.headers[hdrs.CONTENT_LENGTH] = str(size)
|
523 |
-
|
524 |
-
# copy payload headers
|
525 |
-
assert body.headers
|
526 |
-
for (key, value) in body.headers.items():
|
527 |
-
if key in self.headers:
|
528 |
-
continue
|
529 |
-
if key in self.skip_auto_headers:
|
530 |
-
continue
|
531 |
-
self.headers[key] = value
|
532 |
-
|
533 |
-
def update_expect_continue(self, expect: bool = False) -> None:
|
534 |
-
if expect:
|
535 |
-
self.headers[hdrs.EXPECT] = "100-continue"
|
536 |
-
elif self.headers.get(hdrs.EXPECT, "").lower() == "100-continue":
|
537 |
-
expect = True
|
538 |
-
|
539 |
-
if expect:
|
540 |
-
self._continue = self.loop.create_future()
|
541 |
-
|
542 |
-
def update_proxy(
|
543 |
-
self,
|
544 |
-
proxy: Optional[URL],
|
545 |
-
proxy_auth: Optional[BasicAuth],
|
546 |
-
proxy_headers: Optional[LooseHeaders],
|
547 |
-
) -> None:
|
548 |
-
if proxy_auth and not isinstance(proxy_auth, helpers.BasicAuth):
|
549 |
-
raise ValueError("proxy_auth must be None or BasicAuth() tuple")
|
550 |
-
self.proxy = proxy
|
551 |
-
self.proxy_auth = proxy_auth
|
552 |
-
self.proxy_headers = proxy_headers
|
553 |
-
|
554 |
-
def keep_alive(self) -> bool:
|
555 |
-
if self.version < HttpVersion10:
|
556 |
-
# keep alive not supported at all
|
557 |
-
return False
|
558 |
-
if self.version == HttpVersion10:
|
559 |
-
if self.headers.get(hdrs.CONNECTION) == "keep-alive":
|
560 |
-
return True
|
561 |
-
else: # no headers means we close for Http 1.0
|
562 |
-
return False
|
563 |
-
elif self.headers.get(hdrs.CONNECTION) == "close":
|
564 |
-
return False
|
565 |
-
|
566 |
-
return True
|
567 |
-
|
568 |
-
async def write_bytes(
|
569 |
-
self, writer: AbstractStreamWriter, conn: "Connection"
|
570 |
-
) -> None:
|
571 |
-
"""Support coroutines that yields bytes objects."""
|
572 |
-
# 100 response
|
573 |
-
if self._continue is not None:
|
574 |
-
await writer.drain()
|
575 |
-
await self._continue
|
576 |
-
|
577 |
-
protocol = conn.protocol
|
578 |
-
assert protocol is not None
|
579 |
-
try:
|
580 |
-
if isinstance(self.body, payload.Payload):
|
581 |
-
await self.body.write(writer)
|
582 |
-
else:
|
583 |
-
if isinstance(self.body, (bytes, bytearray)):
|
584 |
-
self.body = (self.body,) # type: ignore[assignment]
|
585 |
-
|
586 |
-
for chunk in self.body:
|
587 |
-
await writer.write(chunk) # type: ignore[arg-type]
|
588 |
-
|
589 |
-
await writer.write_eof()
|
590 |
-
except OSError as exc:
|
591 |
-
if exc.errno is None and isinstance(exc, asyncio.TimeoutError):
|
592 |
-
protocol.set_exception(exc)
|
593 |
-
else:
|
594 |
-
new_exc = ClientOSError(
|
595 |
-
exc.errno, "Can not write request body for %s" % self.url
|
596 |
-
)
|
597 |
-
new_exc.__context__ = exc
|
598 |
-
new_exc.__cause__ = exc
|
599 |
-
protocol.set_exception(new_exc)
|
600 |
-
except asyncio.CancelledError as exc:
|
601 |
-
if not conn.closed:
|
602 |
-
protocol.set_exception(exc)
|
603 |
-
except Exception as exc:
|
604 |
-
protocol.set_exception(exc)
|
605 |
-
finally:
|
606 |
-
self._writer = None
|
607 |
-
|
608 |
-
async def send(self, conn: "Connection") -> "ClientResponse":
|
609 |
-
# Specify request target:
|
610 |
-
# - CONNECT request must send authority form URI
|
611 |
-
# - not CONNECT proxy must send absolute form URI
|
612 |
-
# - most common is origin form URI
|
613 |
-
if self.method == hdrs.METH_CONNECT:
|
614 |
-
connect_host = self.url.raw_host
|
615 |
-
assert connect_host is not None
|
616 |
-
if helpers.is_ipv6_address(connect_host):
|
617 |
-
connect_host = f"[{connect_host}]"
|
618 |
-
path = f"{connect_host}:{self.url.port}"
|
619 |
-
elif self.proxy and not self.is_ssl():
|
620 |
-
path = str(self.url)
|
621 |
-
else:
|
622 |
-
path = self.url.raw_path
|
623 |
-
if self.url.raw_query_string:
|
624 |
-
path += "?" + self.url.raw_query_string
|
625 |
-
|
626 |
-
protocol = conn.protocol
|
627 |
-
assert protocol is not None
|
628 |
-
writer = StreamWriter(
|
629 |
-
protocol,
|
630 |
-
self.loop,
|
631 |
-
on_chunk_sent=functools.partial(
|
632 |
-
self._on_chunk_request_sent, self.method, self.url
|
633 |
-
),
|
634 |
-
on_headers_sent=functools.partial(
|
635 |
-
self._on_headers_request_sent, self.method, self.url
|
636 |
-
),
|
637 |
-
)
|
638 |
-
|
639 |
-
if self.compress:
|
640 |
-
writer.enable_compression(self.compress)
|
641 |
-
|
642 |
-
if self.chunked is not None:
|
643 |
-
writer.enable_chunking()
|
644 |
-
|
645 |
-
# set default content-type
|
646 |
-
if (
|
647 |
-
self.method in self.POST_METHODS
|
648 |
-
and hdrs.CONTENT_TYPE not in self.skip_auto_headers
|
649 |
-
and hdrs.CONTENT_TYPE not in self.headers
|
650 |
-
):
|
651 |
-
self.headers[hdrs.CONTENT_TYPE] = "application/octet-stream"
|
652 |
-
|
653 |
-
# set the connection header
|
654 |
-
connection = self.headers.get(hdrs.CONNECTION)
|
655 |
-
if not connection:
|
656 |
-
if self.keep_alive():
|
657 |
-
if self.version == HttpVersion10:
|
658 |
-
connection = "keep-alive"
|
659 |
-
else:
|
660 |
-
if self.version == HttpVersion11:
|
661 |
-
connection = "close"
|
662 |
-
|
663 |
-
if connection is not None:
|
664 |
-
self.headers[hdrs.CONNECTION] = connection
|
665 |
-
|
666 |
-
# status + headers
|
667 |
-
status_line = "{0} {1} HTTP/{2[0]}.{2[1]}".format(
|
668 |
-
self.method, path, self.version
|
669 |
-
)
|
670 |
-
await writer.write_headers(status_line, self.headers)
|
671 |
-
|
672 |
-
self._writer = self.loop.create_task(self.write_bytes(writer, conn))
|
673 |
-
|
674 |
-
response_class = self.response_class
|
675 |
-
assert response_class is not None
|
676 |
-
self.response = response_class(
|
677 |
-
self.method,
|
678 |
-
self.original_url,
|
679 |
-
writer=self._writer,
|
680 |
-
continue100=self._continue,
|
681 |
-
timer=self._timer,
|
682 |
-
request_info=self.request_info,
|
683 |
-
traces=self._traces,
|
684 |
-
loop=self.loop,
|
685 |
-
session=self._session,
|
686 |
-
)
|
687 |
-
return self.response
|
688 |
-
|
689 |
-
async def close(self) -> None:
|
690 |
-
if self._writer is not None:
|
691 |
-
try:
|
692 |
-
await self._writer
|
693 |
-
finally:
|
694 |
-
self._writer = None
|
695 |
-
|
696 |
-
def terminate(self) -> None:
|
697 |
-
if self._writer is not None:
|
698 |
-
if not self.loop.is_closed():
|
699 |
-
self._writer.cancel()
|
700 |
-
self._writer = None
|
701 |
-
|
702 |
-
async def _on_chunk_request_sent(self, method: str, url: URL, chunk: bytes) -> None:
|
703 |
-
for trace in self._traces:
|
704 |
-
await trace.send_request_chunk_sent(method, url, chunk)
|
705 |
-
|
706 |
-
async def _on_headers_request_sent(
|
707 |
-
self, method: str, url: URL, headers: "CIMultiDict[str]"
|
708 |
-
) -> None:
|
709 |
-
for trace in self._traces:
|
710 |
-
await trace.send_request_headers(method, url, headers)
|
711 |
-
|
712 |
-
|
713 |
-
class ClientResponse(HeadersMixin):
|
714 |
-
|
715 |
-
# from the Status-Line of the response
|
716 |
-
version = None # HTTP-Version
|
717 |
-
status: int = None # type: ignore[assignment] # Status-Code
|
718 |
-
reason = None # Reason-Phrase
|
719 |
-
|
720 |
-
content: StreamReader = None # type: ignore[assignment] # Payload stream
|
721 |
-
_headers: "CIMultiDictProxy[str]" = None # type: ignore[assignment]
|
722 |
-
_raw_headers: RawHeaders = None # type: ignore[assignment] # Response raw headers
|
723 |
-
|
724 |
-
_connection = None # current connection
|
725 |
-
_source_traceback = None
|
726 |
-
# setted up by ClientRequest after ClientResponse object creation
|
727 |
-
# post-init stage allows to not change ctor signature
|
728 |
-
_closed = True # to allow __del__ for non-initialized properly response
|
729 |
-
_released = False
|
730 |
-
|
731 |
-
def __init__(
|
732 |
-
self,
|
733 |
-
method: str,
|
734 |
-
url: URL,
|
735 |
-
*,
|
736 |
-
writer: "asyncio.Task[None]",
|
737 |
-
continue100: Optional["asyncio.Future[bool]"],
|
738 |
-
timer: BaseTimerContext,
|
739 |
-
request_info: RequestInfo,
|
740 |
-
traces: List["Trace"],
|
741 |
-
loop: asyncio.AbstractEventLoop,
|
742 |
-
session: "ClientSession",
|
743 |
-
) -> None:
|
744 |
-
assert isinstance(url, URL)
|
745 |
-
|
746 |
-
self.method = method
|
747 |
-
self.cookies: SimpleCookie[str] = SimpleCookie()
|
748 |
-
|
749 |
-
self._real_url = url
|
750 |
-
self._url = url.with_fragment(None)
|
751 |
-
self._body: Any = None
|
752 |
-
self._writer: Optional[asyncio.Task[None]] = writer
|
753 |
-
self._continue = continue100 # None by default
|
754 |
-
self._closed = True
|
755 |
-
self._history: Tuple[ClientResponse, ...] = ()
|
756 |
-
self._request_info = request_info
|
757 |
-
self._timer = timer if timer is not None else TimerNoop()
|
758 |
-
self._cache: Dict[str, Any] = {}
|
759 |
-
self._traces = traces
|
760 |
-
self._loop = loop
|
761 |
-
# store a reference to session #1985
|
762 |
-
self._session: Optional[ClientSession] = session
|
763 |
-
if loop.get_debug():
|
764 |
-
self._source_traceback = traceback.extract_stack(sys._getframe(1))
|
765 |
-
|
766 |
-
@reify
|
767 |
-
def url(self) -> URL:
|
768 |
-
return self._url
|
769 |
-
|
770 |
-
@reify
|
771 |
-
def url_obj(self) -> URL:
|
772 |
-
warnings.warn("Deprecated, use .url #1654", DeprecationWarning, stacklevel=2)
|
773 |
-
return self._url
|
774 |
-
|
775 |
-
@reify
|
776 |
-
def real_url(self) -> URL:
|
777 |
-
return self._real_url
|
778 |
-
|
779 |
-
@reify
|
780 |
-
def host(self) -> str:
|
781 |
-
assert self._url.host is not None
|
782 |
-
return self._url.host
|
783 |
-
|
784 |
-
@reify
|
785 |
-
def headers(self) -> "CIMultiDictProxy[str]":
|
786 |
-
return self._headers
|
787 |
-
|
788 |
-
@reify
|
789 |
-
def raw_headers(self) -> RawHeaders:
|
790 |
-
return self._raw_headers
|
791 |
-
|
792 |
-
@reify
|
793 |
-
def request_info(self) -> RequestInfo:
|
794 |
-
return self._request_info
|
795 |
-
|
796 |
-
@reify
|
797 |
-
def content_disposition(self) -> Optional[ContentDisposition]:
|
798 |
-
raw = self._headers.get(hdrs.CONTENT_DISPOSITION)
|
799 |
-
if raw is None:
|
800 |
-
return None
|
801 |
-
disposition_type, params_dct = multipart.parse_content_disposition(raw)
|
802 |
-
params = MappingProxyType(params_dct)
|
803 |
-
filename = multipart.content_disposition_filename(params)
|
804 |
-
return ContentDisposition(disposition_type, params, filename)
|
805 |
-
|
806 |
-
def __del__(self, _warnings: Any = warnings) -> None:
|
807 |
-
if self._closed:
|
808 |
-
return
|
809 |
-
|
810 |
-
if self._connection is not None:
|
811 |
-
self._connection.release()
|
812 |
-
self._cleanup_writer()
|
813 |
-
|
814 |
-
if self._loop.get_debug():
|
815 |
-
if PY_36:
|
816 |
-
kwargs = {"source": self}
|
817 |
-
else:
|
818 |
-
kwargs = {}
|
819 |
-
_warnings.warn(f"Unclosed response {self!r}", ResourceWarning, **kwargs)
|
820 |
-
context = {"client_response": self, "message": "Unclosed response"}
|
821 |
-
if self._source_traceback:
|
822 |
-
context["source_traceback"] = self._source_traceback
|
823 |
-
self._loop.call_exception_handler(context)
|
824 |
-
|
825 |
-
def __repr__(self) -> str:
|
826 |
-
out = io.StringIO()
|
827 |
-
ascii_encodable_url = str(self.url)
|
828 |
-
if self.reason:
|
829 |
-
ascii_encodable_reason = self.reason.encode(
|
830 |
-
"ascii", "backslashreplace"
|
831 |
-
).decode("ascii")
|
832 |
-
else:
|
833 |
-
ascii_encodable_reason = self.reason
|
834 |
-
print(
|
835 |
-
"<ClientResponse({}) [{} {}]>".format(
|
836 |
-
ascii_encodable_url, self.status, ascii_encodable_reason
|
837 |
-
),
|
838 |
-
file=out,
|
839 |
-
)
|
840 |
-
print(self.headers, file=out)
|
841 |
-
return out.getvalue()
|
842 |
-
|
843 |
-
@property
|
844 |
-
def connection(self) -> Optional["Connection"]:
|
845 |
-
return self._connection
|
846 |
-
|
847 |
-
@reify
|
848 |
-
def history(self) -> Tuple["ClientResponse", ...]:
|
849 |
-
"""A sequence of of responses, if redirects occurred."""
|
850 |
-
return self._history
|
851 |
-
|
852 |
-
@reify
|
853 |
-
def links(self) -> "MultiDictProxy[MultiDictProxy[Union[str, URL]]]":
|
854 |
-
links_str = ", ".join(self.headers.getall("link", []))
|
855 |
-
|
856 |
-
if not links_str:
|
857 |
-
return MultiDictProxy(MultiDict())
|
858 |
-
|
859 |
-
links: MultiDict[MultiDictProxy[Union[str, URL]]] = MultiDict()
|
860 |
-
|
861 |
-
for val in re.split(r",(?=\s*<)", links_str):
|
862 |
-
match = re.match(r"\s*<(.*)>(.*)", val)
|
863 |
-
if match is None: # pragma: no cover
|
864 |
-
# the check exists to suppress mypy error
|
865 |
-
continue
|
866 |
-
url, params_str = match.groups()
|
867 |
-
params = params_str.split(";")[1:]
|
868 |
-
|
869 |
-
link: MultiDict[Union[str, URL]] = MultiDict()
|
870 |
-
|
871 |
-
for param in params:
|
872 |
-
match = re.match(r"^\s*(\S*)\s*=\s*(['\"]?)(.*?)(\2)\s*$", param, re.M)
|
873 |
-
if match is None: # pragma: no cover
|
874 |
-
# the check exists to suppress mypy error
|
875 |
-
continue
|
876 |
-
key, _, value, _ = match.groups()
|
877 |
-
|
878 |
-
link.add(key, value)
|
879 |
-
|
880 |
-
key = link.get("rel", url) # type: ignore[assignment]
|
881 |
-
|
882 |
-
link.add("url", self.url.join(URL(url)))
|
883 |
-
|
884 |
-
links.add(key, MultiDictProxy(link))
|
885 |
-
|
886 |
-
return MultiDictProxy(links)
|
887 |
-
|
888 |
-
async def start(self, connection: "Connection") -> "ClientResponse":
|
889 |
-
"""Start response processing."""
|
890 |
-
self._closed = False
|
891 |
-
self._protocol = connection.protocol
|
892 |
-
self._connection = connection
|
893 |
-
|
894 |
-
with self._timer:
|
895 |
-
while True:
|
896 |
-
# read response
|
897 |
-
try:
|
898 |
-
protocol = self._protocol
|
899 |
-
message, payload = await protocol.read() # type: ignore[union-attr]
|
900 |
-
except http.HttpProcessingError as exc:
|
901 |
-
raise ClientResponseError(
|
902 |
-
self.request_info,
|
903 |
-
self.history,
|
904 |
-
status=exc.code,
|
905 |
-
message=exc.message,
|
906 |
-
headers=exc.headers,
|
907 |
-
) from exc
|
908 |
-
|
909 |
-
if message.code < 100 or message.code > 199 or message.code == 101:
|
910 |
-
break
|
911 |
-
|
912 |
-
if self._continue is not None:
|
913 |
-
set_result(self._continue, True)
|
914 |
-
self._continue = None
|
915 |
-
|
916 |
-
# payload eof handler
|
917 |
-
payload.on_eof(self._response_eof)
|
918 |
-
|
919 |
-
# response status
|
920 |
-
self.version = message.version
|
921 |
-
self.status = message.code
|
922 |
-
self.reason = message.reason
|
923 |
-
|
924 |
-
# headers
|
925 |
-
self._headers = message.headers # type is CIMultiDictProxy
|
926 |
-
self._raw_headers = message.raw_headers # type is Tuple[bytes, bytes]
|
927 |
-
|
928 |
-
# payload
|
929 |
-
self.content = payload
|
930 |
-
|
931 |
-
# cookies
|
932 |
-
for hdr in self.headers.getall(hdrs.SET_COOKIE, ()):
|
933 |
-
try:
|
934 |
-
self.cookies.load(hdr)
|
935 |
-
except CookieError as exc:
|
936 |
-
client_logger.warning("Can not load response cookies: %s", exc)
|
937 |
-
return self
|
938 |
-
|
939 |
-
def _response_eof(self) -> None:
|
940 |
-
if self._closed:
|
941 |
-
return
|
942 |
-
|
943 |
-
if self._connection is not None:
|
944 |
-
# websocket, protocol could be None because
|
945 |
-
# connection could be detached
|
946 |
-
if (
|
947 |
-
self._connection.protocol is not None
|
948 |
-
and self._connection.protocol.upgraded
|
949 |
-
):
|
950 |
-
return
|
951 |
-
|
952 |
-
self._connection.release()
|
953 |
-
self._connection = None
|
954 |
-
|
955 |
-
self._closed = True
|
956 |
-
self._cleanup_writer()
|
957 |
-
|
958 |
-
@property
|
959 |
-
def closed(self) -> bool:
|
960 |
-
return self._closed
|
961 |
-
|
962 |
-
def close(self) -> None:
|
963 |
-
if not self._released:
|
964 |
-
self._notify_content()
|
965 |
-
if self._closed:
|
966 |
-
return
|
967 |
-
|
968 |
-
self._closed = True
|
969 |
-
if self._loop is None or self._loop.is_closed():
|
970 |
-
return
|
971 |
-
|
972 |
-
if self._connection is not None:
|
973 |
-
self._connection.close()
|
974 |
-
self._connection = None
|
975 |
-
self._cleanup_writer()
|
976 |
-
|
977 |
-
def release(self) -> Any:
|
978 |
-
if not self._released:
|
979 |
-
self._notify_content()
|
980 |
-
if self._closed:
|
981 |
-
return noop()
|
982 |
-
|
983 |
-
self._closed = True
|
984 |
-
if self._connection is not None:
|
985 |
-
self._connection.release()
|
986 |
-
self._connection = None
|
987 |
-
|
988 |
-
self._cleanup_writer()
|
989 |
-
return noop()
|
990 |
-
|
991 |
-
@property
|
992 |
-
def ok(self) -> bool:
|
993 |
-
"""Returns ``True`` if ``status`` is less than ``400``, ``False`` if not.
|
994 |
-
|
995 |
-
This is **not** a check for ``200 OK`` but a check that the response
|
996 |
-
status is under 400.
|
997 |
-
"""
|
998 |
-
return 400 > self.status
|
999 |
-
|
1000 |
-
def raise_for_status(self) -> None:
|
1001 |
-
if not self.ok:
|
1002 |
-
# reason should always be not None for a started response
|
1003 |
-
assert self.reason is not None
|
1004 |
-
self.release()
|
1005 |
-
raise ClientResponseError(
|
1006 |
-
self.request_info,
|
1007 |
-
self.history,
|
1008 |
-
status=self.status,
|
1009 |
-
message=self.reason,
|
1010 |
-
headers=self.headers,
|
1011 |
-
)
|
1012 |
-
|
1013 |
-
def _cleanup_writer(self) -> None:
|
1014 |
-
if self._writer is not None:
|
1015 |
-
self._writer.cancel()
|
1016 |
-
self._writer = None
|
1017 |
-
self._session = None
|
1018 |
-
|
1019 |
-
def _notify_content(self) -> None:
|
1020 |
-
content = self.content
|
1021 |
-
if content and content.exception() is None:
|
1022 |
-
content.set_exception(ClientConnectionError("Connection closed"))
|
1023 |
-
self._released = True
|
1024 |
-
|
1025 |
-
async def wait_for_close(self) -> None:
|
1026 |
-
if self._writer is not None:
|
1027 |
-
try:
|
1028 |
-
await self._writer
|
1029 |
-
finally:
|
1030 |
-
self._writer = None
|
1031 |
-
self.release()
|
1032 |
-
|
1033 |
-
async def read(self) -> bytes:
|
1034 |
-
"""Read response payload."""
|
1035 |
-
if self._body is None:
|
1036 |
-
try:
|
1037 |
-
self._body = await self.content.read()
|
1038 |
-
for trace in self._traces:
|
1039 |
-
await trace.send_response_chunk_received(
|
1040 |
-
self.method, self.url, self._body
|
1041 |
-
)
|
1042 |
-
except BaseException:
|
1043 |
-
self.close()
|
1044 |
-
raise
|
1045 |
-
elif self._released:
|
1046 |
-
raise ClientConnectionError("Connection closed")
|
1047 |
-
|
1048 |
-
return self._body # type: ignore[no-any-return]
|
1049 |
-
|
1050 |
-
def get_encoding(self) -> str:
|
1051 |
-
ctype = self.headers.get(hdrs.CONTENT_TYPE, "").lower()
|
1052 |
-
mimetype = helpers.parse_mimetype(ctype)
|
1053 |
-
|
1054 |
-
encoding = mimetype.parameters.get("charset")
|
1055 |
-
if encoding:
|
1056 |
-
try:
|
1057 |
-
codecs.lookup(encoding)
|
1058 |
-
except LookupError:
|
1059 |
-
encoding = None
|
1060 |
-
if not encoding:
|
1061 |
-
if mimetype.type == "application" and (
|
1062 |
-
mimetype.subtype == "json" or mimetype.subtype == "rdap"
|
1063 |
-
):
|
1064 |
-
# RFC 7159 states that the default encoding is UTF-8.
|
1065 |
-
# RFC 7483 defines application/rdap+json
|
1066 |
-
encoding = "utf-8"
|
1067 |
-
elif self._body is None:
|
1068 |
-
raise RuntimeError(
|
1069 |
-
"Cannot guess the encoding of " "a not yet read body"
|
1070 |
-
)
|
1071 |
-
else:
|
1072 |
-
encoding = chardet.detect(self._body)["encoding"]
|
1073 |
-
if not encoding:
|
1074 |
-
encoding = "utf-8"
|
1075 |
-
|
1076 |
-
return encoding
|
1077 |
-
|
1078 |
-
async def text(self, encoding: Optional[str] = None, errors: str = "strict") -> str:
|
1079 |
-
"""Read response payload and decode."""
|
1080 |
-
if self._body is None:
|
1081 |
-
await self.read()
|
1082 |
-
|
1083 |
-
if encoding is None:
|
1084 |
-
encoding = self.get_encoding()
|
1085 |
-
|
1086 |
-
return self._body.decode( # type: ignore[no-any-return,union-attr]
|
1087 |
-
encoding, errors=errors
|
1088 |
-
)
|
1089 |
-
|
1090 |
-
async def json(
|
1091 |
-
self,
|
1092 |
-
*,
|
1093 |
-
encoding: Optional[str] = None,
|
1094 |
-
loads: JSONDecoder = DEFAULT_JSON_DECODER,
|
1095 |
-
content_type: Optional[str] = "application/json",
|
1096 |
-
) -> Any:
|
1097 |
-
"""Read and decodes JSON response."""
|
1098 |
-
if self._body is None:
|
1099 |
-
await self.read()
|
1100 |
-
|
1101 |
-
if content_type:
|
1102 |
-
ctype = self.headers.get(hdrs.CONTENT_TYPE, "").lower()
|
1103 |
-
if not _is_expected_content_type(ctype, content_type):
|
1104 |
-
raise ContentTypeError(
|
1105 |
-
self.request_info,
|
1106 |
-
self.history,
|
1107 |
-
message=(
|
1108 |
-
"Attempt to decode JSON with " "unexpected mimetype: %s" % ctype
|
1109 |
-
),
|
1110 |
-
headers=self.headers,
|
1111 |
-
)
|
1112 |
-
|
1113 |
-
stripped = self._body.strip() # type: ignore[union-attr]
|
1114 |
-
if not stripped:
|
1115 |
-
return None
|
1116 |
-
|
1117 |
-
if encoding is None:
|
1118 |
-
encoding = self.get_encoding()
|
1119 |
-
|
1120 |
-
return loads(stripped.decode(encoding))
|
1121 |
-
|
1122 |
-
async def __aenter__(self) -> "ClientResponse":
|
1123 |
-
return self
|
1124 |
-
|
1125 |
-
async def __aexit__(
|
1126 |
-
self,
|
1127 |
-
exc_type: Optional[Type[BaseException]],
|
1128 |
-
exc_val: Optional[BaseException],
|
1129 |
-
exc_tb: Optional[TracebackType],
|
1130 |
-
) -> None:
|
1131 |
-
# similar to _RequestContextManager, we do not need to check
|
1132 |
-
# for exceptions, response object can close connection
|
1133 |
-
# if state is broken
|
1134 |
-
self.release()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_c_v_t.py
DELETED
@@ -1,47 +0,0 @@
|
|
1 |
-
from fontTools.misc.textTools import safeEval
|
2 |
-
from . import DefaultTable
|
3 |
-
import sys
|
4 |
-
import array
|
5 |
-
|
6 |
-
|
7 |
-
class table__c_v_t(DefaultTable.DefaultTable):
|
8 |
-
def decompile(self, data, ttFont):
|
9 |
-
values = array.array("h")
|
10 |
-
values.frombytes(data)
|
11 |
-
if sys.byteorder != "big":
|
12 |
-
values.byteswap()
|
13 |
-
self.values = values
|
14 |
-
|
15 |
-
def compile(self, ttFont):
|
16 |
-
values = self.values[:]
|
17 |
-
if sys.byteorder != "big":
|
18 |
-
values.byteswap()
|
19 |
-
return values.tobytes()
|
20 |
-
|
21 |
-
def toXML(self, writer, ttFont):
|
22 |
-
for i in range(len(self.values)):
|
23 |
-
value = self.values[i]
|
24 |
-
writer.simpletag("cv", value=value, index=i)
|
25 |
-
writer.newline()
|
26 |
-
|
27 |
-
def fromXML(self, name, attrs, content, ttFont):
|
28 |
-
if not hasattr(self, "values"):
|
29 |
-
self.values = array.array("h")
|
30 |
-
if name == "cv":
|
31 |
-
index = safeEval(attrs["index"])
|
32 |
-
value = safeEval(attrs["value"])
|
33 |
-
for i in range(1 + index - len(self.values)):
|
34 |
-
self.values.append(0)
|
35 |
-
self.values[index] = value
|
36 |
-
|
37 |
-
def __len__(self):
|
38 |
-
return len(self.values)
|
39 |
-
|
40 |
-
def __getitem__(self, index):
|
41 |
-
return self.values[index]
|
42 |
-
|
43 |
-
def __setitem__(self, index, value):
|
44 |
-
self.values[index] = value
|
45 |
-
|
46 |
-
def __delitem__(self, index):
|
47 |
-
del self.values[index]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Dinoking/Guccio-AI-Designer/models/stylegan/stylegan_tf/dnnlib/submission/__init__.py
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
2 |
-
#
|
3 |
-
# This work is licensed under the Creative Commons Attribution-NonCommercial
|
4 |
-
# 4.0 International License. To view a copy of this license, visit
|
5 |
-
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
|
6 |
-
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
|
7 |
-
|
8 |
-
from . import run_context
|
9 |
-
from . import submit
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Dinoking/Guccio-AI-Designer/netdissect/upsegmodel/prroi_pool/test_prroi_pooling2d.py
DELETED
@@ -1,56 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
# File : test_prroi_pooling2d.py
|
3 |
-
# Author : Jiayuan Mao
|
4 |
-
# Email : [email protected]
|
5 |
-
# Date : 18/02/2018
|
6 |
-
#
|
7 |
-
# This file is part of Jacinle.
|
8 |
-
|
9 |
-
import unittest
|
10 |
-
|
11 |
-
import torch
|
12 |
-
import torch.nn as nn
|
13 |
-
import torch.nn.functional as F
|
14 |
-
|
15 |
-
from jactorch.utils.unittest import TorchTestCase
|
16 |
-
|
17 |
-
from prroi_pool import PrRoIPool2D
|
18 |
-
|
19 |
-
|
20 |
-
class TestPrRoIPool2D(TorchTestCase):
|
21 |
-
def test_forward(self):
|
22 |
-
pool = PrRoIPool2D(7, 7, spatial_scale=0.5)
|
23 |
-
features = torch.rand((4, 16, 24, 32)).cuda()
|
24 |
-
rois = torch.tensor([
|
25 |
-
[0, 0, 0, 14, 14],
|
26 |
-
[1, 14, 14, 28, 28],
|
27 |
-
]).float().cuda()
|
28 |
-
|
29 |
-
out = pool(features, rois)
|
30 |
-
out_gold = F.avg_pool2d(features, kernel_size=2, stride=1)
|
31 |
-
|
32 |
-
self.assertTensorClose(out, torch.stack((
|
33 |
-
out_gold[0, :, :7, :7],
|
34 |
-
out_gold[1, :, 7:14, 7:14],
|
35 |
-
), dim=0))
|
36 |
-
|
37 |
-
def test_backward_shapeonly(self):
|
38 |
-
pool = PrRoIPool2D(2, 2, spatial_scale=0.5)
|
39 |
-
|
40 |
-
features = torch.rand((4, 2, 24, 32)).cuda()
|
41 |
-
rois = torch.tensor([
|
42 |
-
[0, 0, 0, 4, 4],
|
43 |
-
[1, 14, 14, 18, 18],
|
44 |
-
]).float().cuda()
|
45 |
-
features.requires_grad = rois.requires_grad = True
|
46 |
-
out = pool(features, rois)
|
47 |
-
|
48 |
-
loss = out.sum()
|
49 |
-
loss.backward()
|
50 |
-
|
51 |
-
self.assertTupleEqual(features.size(), features.grad.size())
|
52 |
-
self.assertTupleEqual(rois.size(), rois.grad.size())
|
53 |
-
|
54 |
-
|
55 |
-
if __name__ == '__main__':
|
56 |
-
unittest.main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|