Commit
·
ebac6d6
1
Parent(s):
7022d3b
Update parquet files (step 121 of 249)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Foxit PhantomPDF Business 9.0.0.29935 Crack [TechTools].md +0 -226
- spaces/1gistliPinn/ChatGPT4/Examples/Ape Yalu Punchi Boothaya Full Movie 26.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/DxO PhotoLab 3.0.2 Build 4266 Elite Portable [Latest].md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/Fanaa Hindi Movie Download Torrent Free.md +0 -5
- spaces/1gistliPinn/ChatGPT4/Examples/Farsi Font For Photoshop Download Free.md +0 -6
- spaces/1phancelerku/anime-remove-background/Air Attack A Classic Shoot em Up Game with Modern Graphics and Sound.md +0 -247
- spaces/1phancelerku/anime-remove-background/American Truck Simulator Mods Everything You Need to Know About ATS Modding.md +0 -115
- spaces/1phancelerku/anime-remove-background/Free Download Epic Conquest MOD APK - Unlimited Resources and Fun.md +0 -97
- spaces/232labs/VToonify/vtoonify/model/stylegan/lpips/dist_model.py +0 -284
- spaces/6shen7/Linaqruf-anything-v3.0/README.md +0 -13
- spaces/801artistry/RVC801/infer/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py +0 -98
- spaces/AI-Hobbyist/Hoyo-RVC/extract_f0_print.py +0 -160
- spaces/AIFILMS/StyleGANEX/models/mtcnn/mtcnn_pytorch/src/matlab_cp2tform.py +0 -350
- spaces/AIFILMS/generate_human_motion/pyrender/tests/unit/test_egl.py +0 -16
- spaces/AIGC-Audio/AudioGPT/NeuralSeq/utils/pitch_utils.py +0 -76
- spaces/AILab-CVC/SEED-LLaMA/README.md +0 -11
- spaces/Ajit025/Text_to_Image_conversion/README.md +0 -12
- spaces/AkitoP/umamusume_bert_vits2/bert_gen.py +0 -61
- spaces/AlanMars/QYL-AI-Space/run_Windows.bat +0 -5
- spaces/Allakhazam/anythingV4/README.md +0 -13
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/pndm/pipeline_pndm.py +0 -121
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl.py +0 -691
- spaces/Andy1621/uniformer_image_detection/configs/detectors/cascade_rcnn_r50_rfp_1x_coco.py +0 -28
- spaces/Andy1621/uniformer_image_detection/configs/gn+ws/faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco.py +0 -16
- spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/coder/bucketing_bbox_coder.py +0 -350
- spaces/Andy1621/uniformer_image_segmentation/configs/_base_/models/ocrnet_r50-d8.py +0 -47
- spaces/AnimalEquality/chatbot/_proc/_docs/site_libs/quarto-nav/quarto-nav.js +0 -277
- spaces/AnishKumbhar/ChatBot/text-generation-webui-main/css/chat_style-cai-chat-square.css +0 -21
- spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/priority.py +0 -60
- spaces/Anuj-Panthri/imdb_review_sentiment/README.md +0 -13
- spaces/Apex-X/ROOPOK/roop/processors/frame/face_swapper.py +0 -100
- spaces/Arnx/MusicGenXvAKN/audiocraft/utils/autocast.py +0 -40
- spaces/Artrajz/vits-simple-api/bert_vits2/__init__.py +0 -2
- spaces/Ash123/stable-diffusion-nano/app.py +0 -330
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/certifi/core.py +0 -108
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/_mapping.py +0 -23
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/webencodings/__init__.py +0 -342
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/glob.py +0 -167
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/detection_utils.py +0 -623
- spaces/Banbri/zcvzcv/src/app/queries/predictWithOpenAI.ts +0 -33
- spaces/Bart92/RVC_HF/lib/infer_pack/attentions.py +0 -417
- spaces/Benson/text-generation/Examples/Asus Rt-n56u Firmware Download.md +0 -93
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/modeling/roi_heads/roi_heads.py +0 -728
- spaces/CVPR/LIVE/thrust/thrust/detail/config/exec_check_disable.h +0 -43
- spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/guarded_cuda_runtime_api.h +0 -39
- spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/binary_search.h +0 -174
- spaces/CVPR/v-doc_abstractive_mac/preprocess.py +0 -551
- spaces/CactiStaccingCrane/OpenAssistant-oasst-sft-1-pythia-12b/app.py +0 -3
- spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/util/__init__.py +0 -1
- spaces/CarlDennis/HYTTS/commons.py +0 -97
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Foxit PhantomPDF Business 9.0.0.29935 Crack [TechTools].md
DELETED
@@ -1,226 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Foxit PhantomPDF Business 9.0.0.29935 Crack [TechTools]</h1>
|
3 |
-
<p>If you are looking for a powerful, versatile, and user-friendly PDF software, you have come to the right place.</p>
|
4 |
-
<h2>Foxit PhantomPDF Business 9.0.0.29935 Crack [TechTools]</h2><br /><p><b><b>Download</b> ✓✓✓ <a href="https://byltly.com/2uKwpv">https://byltly.com/2uKwpv</a></b></p><br /><br />
|
5 |
-
<p>In this article, we will introduce you to <strong>Foxit PhantomPDF Business 9.0.0.29935</strong>, a comprehensive PDF solution that lets you create, edit, convert, secure, protect, collaborate, and share PDF files with ease.</p>
|
6 |
-
<p>We will also show you how to download and install <strong>Foxit PhantomPDF Business 9.0.0.29935 Crack [TechTools]</strong>, a reliable source that provides you with a full version of the software for free.</p>
|
7 |
-
<p>So, without further ado, let's get started!</p>
|
8 |
-
<h2>What is Foxit PhantomPDF Business?</h2>
|
9 |
-
<p>Foxit PhantomPDF Business is a professional PDF software that offers a complete set of features for working with PDF documents.</p>
|
10 |
-
<p>With Foxit PhantomPDF Business, you can:</p>
|
11 |
-
<p>Foxit PhantomPDF Business 9 full crack download<br />
|
12 |
-
How to install and activate Foxit PhantomPDF Business 9<br />
|
13 |
-
Foxit PhantomPDF Business 9.0.0.29935 full version free download<br />
|
14 |
-
Foxit PhantomPDF Business 9 crack mega link<br />
|
15 |
-
Foxit PhantomPDF Business 9 serial key generator<br />
|
16 |
-
Foxit PhantomPDF Business 9 activation code<br />
|
17 |
-
Foxit PhantomPDF Business 9 license key<br />
|
18 |
-
Foxit PhantomPDF Business 9 patch<br />
|
19 |
-
Foxit PhantomPDF Business 9 keygen<br />
|
20 |
-
Foxit PhantomPDF Business 9 portable<br />
|
21 |
-
Foxit PhantomPDF Business 9 review<br />
|
22 |
-
Foxit PhantomPDF Business 9 features<br />
|
23 |
-
Foxit PhantomPDF Business 9 system requirements<br />
|
24 |
-
Foxit PhantomPDF Business 9 tutorial<br />
|
25 |
-
Foxit PhantomPDF Business 9 comparison<br />
|
26 |
-
Foxit PhantomPDF Business 9 alternatives<br />
|
27 |
-
Foxit PhantomPDF Business 9 vs Adobe Acrobat Pro DC<br />
|
28 |
-
Foxit PhantomPDF Business 9 vs Nitro Pro<br />
|
29 |
-
Foxit PhantomPDF Business 9 vs PDFelement<br />
|
30 |
-
Foxit PhantomPDF Business 9 vs PDF-XChange Editor Plus<br />
|
31 |
-
Foxit PhantomPDF Business 9 vs Soda PDF<br />
|
32 |
-
Foxit PhantomPDF Business 9 vs Able2Extract Professional<br />
|
33 |
-
Foxit PhantomPDF Business 9 vs Master PDF Editor<br />
|
34 |
-
Foxit PhantomPDF Business 9 vs PDF Architect<br />
|
35 |
-
Foxit PhantomPDF Business 9 vs PDF Studio Pro<br />
|
36 |
-
Foxit PhantomPDF Business 9 for Windows 10<br />
|
37 |
-
Foxit PhantomPDF Business 9 for Mac OS X<br />
|
38 |
-
Foxit PhantomPDF Business 9 for Linux<br />
|
39 |
-
Foxit PhantomPDF Business 9 for Android<br />
|
40 |
-
Foxit PhantomPDF Business 9 for iOS<br />
|
41 |
-
Foxit PhantomPDF Business 9 online editor<br />
|
42 |
-
Foxit PhantomPDF Business 9 cloud service<br />
|
43 |
-
Foxit PhantomPDF Business 9 OCR feature<br />
|
44 |
-
Foxit PhantomPDF Business 9 digital signature feature<br />
|
45 |
-
Foxit PhantomPDF Business 9 form filling feature<br />
|
46 |
-
Foxit PhantomPDF Business 9 document conversion feature<br />
|
47 |
-
Foxit PhantomPDF Business 9 document security feature<br />
|
48 |
-
Foxit PhantomPDF Business 9 document collaboration feature<br />
|
49 |
-
Foxit PhantomPDF Business 9 document annotation feature<br />
|
50 |
-
Foxit PhantomPDF Business 9 document editing feature<br />
|
51 |
-
How to create PDF files with Foxit PhantomPDF Business 9<br />
|
52 |
-
How to edit PDF files with Foxit PhantomPDF Business 9<br />
|
53 |
-
How to convert PDF files with Foxit PhantomPDF Business 9<br />
|
54 |
-
How to sign PDF files with Foxit PhantomPDF Business 9<br />
|
55 |
-
How to fill PDF forms with Foxit PhantomPDF Business 9<br />
|
56 |
-
How to secure PDF files with Foxit PhantomPDF Business 9<br />
|
57 |
-
How to collaborate on PDF files with Foxit PhantomPDF Business 9<br />
|
58 |
-
How to annotate PDF files with Foxit PhantomPDF Business 9<br />
|
59 |
-
How to optimize PDF files with Foxit PhantomPDF Business 9</p>
|
60 |
-
<ul>
|
61 |
-
<li>Create PDF files from scratch or from various sources such as Microsoft Office, scanners, web pages, images, etc.</li>
|
62 |
-
<li>Edit PDF files with a full-featured editor that allows you to modify text, images, fonts, colors, layouts, headers, footers, watermarks, etc.</li>
|
63 |
-
<li>Convert PDF files to other formats such as Word, Excel, PowerPoint, HTML, EPUB, image, etc.</li>
|
64 |
-
<li>Secure and protect PDF files with passwords, encryption, digital signatures, redaction, permissions, etc.</li>
|
65 |
-
```html <h1>Foxit PhantomPDF Business 9.0.0.29935 Crack [TechTools]</h1>
|
66 |
-
<p>If you are looking for a powerful, versatile, and user-friendly PDF software, you have come to the right place.</p>
|
67 |
-
<p>In this article, we will introduce you to <strong>Foxit PhantomPDF Business 9.0.0.29935</strong>, a comprehensive PDF solution that lets you create, edit, convert, secure, protect, collaborate, and share PDF files with ease.</p>
|
68 |
-
<p>We will also show you how to download and install <strong>Foxit PhantomPDF Business 9.0.0.29935 Crack [TechTools]</strong>, a reliable source that provides you with a full version of the software for free.</p>
|
69 |
-
<p>So, without further ado, let's get started!</p>
|
70 |
-
<h2>What is Foxit PhantomPDF Business?</h2>
|
71 |
-
<p>Foxit PhantomPDF Business is a professional PDF software that offers a complete set of features for working with PDF documents.</p>
|
72 |
-
<p>With Foxit PhantomPDF Business, you can:</p>
|
73 |
-
<ul>
|
74 |
-
<li>Create PDF files from scratch or from various sources such as Microsoft Office, scanners, web pages, images, etc.</li>
|
75 |
-
<li>Edit PDF files with a full-featured editor that allows you to modify text, images, fonts, colors, layouts, headers, footers, watermarks, etc.</li>
|
76 |
-
<li>Convert PDF files to other formats such as Word, Excel, PowerPoint, HTML, EPUB, image, etc.</li>
|
77 |
-
<li>Secure and protect PDF files with passwords, encryption, digital signatures, redaction, permissions, etc.</li>
|
78 |
-
<li>Collaborate and share PDF files with others using comments, annotations, markups, stamps, bookmarks, attachments, etc.</li>
|
79 |
-
<li>Optimize and compress PDF files to reduce their size and improve their quality.</li>
|
80 |
-
<li>Organize and manage PDF files with tools such as split, merge, extract, rotate, crop, reorder, etc.</li>
|
81 |
-
<li>Scan and OCR PDF files to make them searchable and editable.</li>
|
82 |
-
<li>Add and edit interactive forms and fields in PDF files.</li>
|
83 |
-
<li>Create and edit PDF portfolios that contain multiple files of different types.</li>
|
84 |
-
</ul>
|
85 |
-
<p>As you can see, Foxit PhantomPDF Business is a one-stop solution for all your PDF needs.</p>
|
86 |
-
<h2>Why choose Foxit PhantomPDF Business 9.0.0.29935?</h2>
|
87 |
-
<p>You might be wondering why you should choose Foxit PhantomPDF Business 9.0.0.29935 over other PDF software available in the market.</p>
|
88 |
-
<p>Well, here are some reasons why Foxit PhantomPDF Business 9.0.0.29935 is the best choice for you:</p>
|
89 |
-
<h3>Fast and reliable performance</h3>
|
90 |
-
<p>Foxit PhantomPDF Business 9.0.0.29935 is designed to deliver fast and reliable performance for your PDF tasks.</p>
|
91 |
-
<p>It has a small footprint and a low memory consumption that makes it run smoothly on your system without slowing it down.</p>
|
92 |
-
<p>It also supports multi-core processing and cloud services that enhance its speed and efficiency.</p>
|
93 |
-
<h3>Advanced features and customization</h3>
|
94 |
-
<p>Foxit PhantomPDF Business 9.0.0.29935 offers advanced features and customization options that allow you to tailor your PDF experience according to your preferences and needs.</p>
|
95 |
-
<p>You can customize the user interface by choosing from different themes, ```html <h1>Foxit PhantomPDF Business 9.0.0.29935 Crack [TechTools]</h1>
|
96 |
-
<p>If you are looking for a powerful, versatile, and user-friendly PDF software, you have come to the right place.</p>
|
97 |
-
<p>In this article, we will introduce you to <strong>Foxit PhantomPDF Business 9.0.0.29935</strong>, a comprehensive PDF solution that lets you create, edit, convert, secure, protect, collaborate, and share PDF files with ease.</p>
|
98 |
-
<p>We will also show you how to download and install <strong>Foxit PhantomPDF Business 9.0.0.29935 Crack [TechTools]</strong>, a reliable source that provides you with a full version of the software for free.</p>
|
99 |
-
<p>So, without further ado, let's get started!</p>
|
100 |
-
<h2>What is Foxit PhantomPDF Business?</h2>
|
101 |
-
<p>Foxit PhantomPDF Business is a professional PDF software that offers a complete set of features for working with PDF documents.</p>
|
102 |
-
<p>With Foxit PhantomPDF Business, you can:</p>
|
103 |
-
<ul>
|
104 |
-
<li>Create PDF files from scratch or from various sources such as Microsoft Office, scanners, web pages, images, etc.</li>
|
105 |
-
<li>Edit PDF files with a full-featured editor that allows you to modify text, images, fonts, colors, layouts, headers, footers, watermarks, etc.</li>
|
106 |
-
<li>Convert PDF files to other formats such as Word, Excel, PowerPoint, HTML, EPUB, image, etc.</li>
|
107 |
-
<li>Secure and protect PDF files with passwords, encryption, digital signatures, redaction, permissions, etc.</li>
|
108 |
-
<li>Collaborate and share PDF files with others using comments, annotations, markups, stamps, bookmarks, attachments, etc.</li>
|
109 |
-
<li>Optimize and compress PDF files to reduce their size and improve their quality.</li>
|
110 |
-
<li>Organize and manage PDF files with tools such as split, merge, extract, rotate, crop, reorder, etc.</li>
|
111 |
-
<li>Scan and OCR PDF files to make them searchable and editable.</li>
|
112 |
-
<li>Add and edit interactive forms and fields in PDF files.</li>
|
113 |
-
<li>Create and edit PDF portfolios that contain multiple files of different types.</li>
|
114 |
-
</ul>
|
115 |
-
<p>As you can see, Foxit PhantomPDF Business is a one-stop solution for all your PDF needs.</p>
|
116 |
-
<h2>Why choose Foxit PhantomPDF Business 9.0.0.29935?</h2>
|
117 |
-
<p>You might be wondering why you should choose Foxit PhantomPDF Business 9.0.0.29935 over other PDF software available in the market.</p>
|
118 |
-
<p>Well, here are some reasons why Foxit PhantomPDF Business 9.0.0.29935 is the best choice for you:</p>
|
119 |
-
<h3>Fast and reliable performance</h3>
|
120 |
-
<p>Foxit PhantomPDF Business 9.0.0.29935 is designed to deliver fast and reliable performance for your PDF tasks.</p>
|
121 |
-
<p>It has a small footprint and a low memory consumption that makes it run smoothly on your system without slowing it down.</p>
|
122 |
-
<p>It also supports multi-core processing and cloud services that enhance its speed and efficiency.</p>
|
123 |
-
<h3>Advanced features and customization</h3>
|
124 |
-
<p>Foxit PhantomPDF Business 9.0.0.29935 offers advanced features and customization options that allow you to tailor your PDF experience according to your preferences and needs.</p>
|
125 |
-
<p>You can customize the user interface by choosing from different themes, skins, toolbars, ribbons, shortcuts, etc.</p>
|
126 |
-
<p>You can also customize the functionality by enabling or disabling various features, plugins, extensions, etc.</p>
|
127 |
-
<h3>Affordable and cost-effective</h3>
|
128 |
-
<p>Foxit PhantomPDF Business 9.0.0.29935 is affordable and cost-effective compared to other PDF software in the market.</p>
|
129 |
-
<p>It offers a one-time purchase option that gives you lifetime access to the software without any subscription fees or hidden charges.</p>
|
130 |
-
<p>It also offers a free trial option that lets you try out the software before buying it.</p>
|
131 |
-
<h2>How to download and install Foxit PhantomPDF Business 9.0.0.29935 Crack [TechTools]?</h2>
|
132 |
-
<p>If you are interested in downloading and installing Foxit PhantomPDF Business 9.0.0.29935 Crack [TechTools], here is a step-by-step guide for you:</p>
|
133 |
-
<h3>Download the file</h3>
|
134 |
-
<ol>
|
135 |
-
<li>Click on this link to download the file: <a href="https://davi24.com/download-foxit-phantompdf-business-9-0-0-29935-full-crack/">https://davi24.com/download-foxit-phantompdf-business-9-0-0-29935-full-crack/</a></li>
|
136 |
-
<li>Wait for the download to complete and save the file on your computer.</li>
|
137 |
-
</ol>
|
138 |
-
<h3>Extract the file</h3>
|
139 |
-
<ol>
|
140 |
-
<li>Right-click on the downloaded file and select "Extract Here".</li>
|
141 |
-
<li>Enter this password when prompted: PASS OPEN RAR</li>
|
142 |
-
<li>You will get a folder named "Foxit PhantomPDF Business 10 Full Crack".</li>
|
143 |
-
</ol>
|
144 |
-
<h3>Install the software</h3>
|
145 |
-
<ol>
|
146 |
-
<li>Open the folder and double-click on the setup file named "FoxitPhantomPDF100_Setup_Prom_IS.exe".</li>
|
147 |
-
<li>Follow the instructions on the screen to install the software on your computer.</li>
|
148 |
-
<li>Do not launch the software after installation.</li>
|
149 |
-
</ol>
|
150 |
-
<h3>Activate the software</h3>
|
151 |
-
<ol>
|
152 |
-
<li>Open the folder again and copy the crack file named "FoxitPhantomPDF.exe".</li>
|
153 |
-
<li>Paste it in the installation directory of the software (usually C:\Program Files (x86)\Foxit Software\Foxit PhantomPDF).</li>
|
154 |
-
<li>Replace the original file if asked.</li>
|
155 |
-
<li>You have successfully activated the software!</li>
|
156 |
-
</ol>
|
157 |
-
<h2>Conclusion</h2>
|
158 |
-
```html <h1>Foxit PhantomPDF Business 9.0.0.29935 Crack [TechTools]</h1>
|
159 |
-
<p>If you are looking for a powerful, versatile, and user-friendly PDF software, you have come to the right place.</p>
|
160 |
-
<p>In this article, we will introduce you to <strong>Foxit PhantomPDF Business 9.0.0.29935</strong>, a comprehensive PDF solution that lets you create, edit, convert, secure, protect, collaborate, and share PDF files with ease.</p>
|
161 |
-
<p>We will also show you how to download and install <strong>Foxit PhantomPDF Business 9.0.0.29935 Crack [TechTools]</strong>, a reliable source that provides you with a full version of the software for free.</p>
|
162 |
-
<p>So, without further ado, let's get started!</p>
|
163 |
-
<h2>What is Foxit PhantomPDF Business?</h2>
|
164 |
-
<p>Foxit PhantomPDF Business is a professional PDF software that offers a complete set of features for working with PDF documents.</p>
|
165 |
-
<p>With Foxit PhantomPDF Business, you can:</p>
|
166 |
-
<ul>
|
167 |
-
<li>Create PDF files from scratch or from various sources such as Microsoft Office, scanners, web pages, images, etc.</li>
|
168 |
-
<li>Edit PDF files with a full-featured editor that allows you to modify text, images, fonts, colors, layouts, headers, footers, watermarks, etc.</li>
|
169 |
-
<li>Convert PDF files to other formats such as Word, Excel, PowerPoint, HTML, EPUB, image, etc.</li>
|
170 |
-
<li>Secure and protect PDF files with passwords, encryption, digital signatures, redaction, permissions, etc.</li>
|
171 |
-
<li>Collaborate and share PDF files with others using comments, annotations, markups, stamps, bookmarks, attachments, etc.</li>
|
172 |
-
<li>Optimize and compress PDF files to reduce their size and improve their quality.</li>
|
173 |
-
<li>Organize and manage PDF files with tools such as split, merge, extract, rotate, crop, reorder, etc.</li>
|
174 |
-
<li>Scan and OCR PDF files to make them searchable and editable.</li>
|
175 |
-
<li>Add and edit interactive forms and fields in PDF files.</li>
|
176 |
-
<li>Create and edit PDF portfolios that contain multiple files of different types.</li>
|
177 |
-
</ul>
|
178 |
-
<p>As you can see, Foxit PhantomPDF Business is a one-stop solution for all your PDF needs.</p>
|
179 |
-
<h2>Why choose Foxit PhantomPDF Business 9.0.0.29935?</h2>
|
180 |
-
<p>You might be wondering why you should choose Foxit PhantomPDF Business 9.0.0.29935 over other PDF software available in the market.</p>
|
181 |
-
<p>Well, here are some reasons why Foxit PhantomPDF Business 9.0.0.29935 is the best choice for you:</p>
|
182 |
-
<h3>Fast and reliable performance</h3>
|
183 |
-
<p>Foxit PhantomPDF Business 9.0.0.29935 is designed to deliver fast and reliable performance for your PDF tasks.</p>
|
184 |
-
<p>It has a small footprint and a low memory consumption that makes it run smoothly on your system without slowing it down.</p>
|
185 |
-
<p>It also supports multi-core processing and cloud services that enhance its speed and efficiency.</p>
|
186 |
-
<h3>Advanced features and customization</h3>
|
187 |
-
<p>Foxit PhantomPDF Business 9.0.0.29935 offers advanced features and customization options that allow you to tailor your PDF experience according to your preferences and needs.</p>
|
188 |
-
<p>You can customize the user interface by choosing from different themes, skins, toolbars, ribbons, shortcuts, etc.</p>
|
189 |
-
<p>You can also customize the functionality by enabling or disabling various features, plugins, extensions, etc.</p>
|
190 |
-
<h3>Affordable and cost-effective</h3>
|
191 |
-
<p>Foxit PhantomPDF Business 9.0.0.29935 is affordable and cost-effective compared to other PDF software in the market.</p>
|
192 |
-
<p>It offers a one-time purchase option that gives you lifetime access to the software without any subscription fees or hidden charges.</p>
|
193 |
-
<p>It also offers a free trial option that lets you try out the software before buying it.</p>
|
194 |
-
<h2>How to download and install Foxit PhantomPDF Business 9.0.0.29935 Crack [TechTools]?</h2>
|
195 |
-
<p>If you are interested in downloading and installing Foxit PhantomPDF Business 9.0.0.29935 Crack [TechTools], here is a step-by-step guide for you:</p>
|
196 |
-
<h3>Download the file</h3>
|
197 |
-
<ol>
|
198 |
-
<li>Click on this link to download the file: <a href="https://davi24.com/download-foxit-phantompdf-business-9-0-0-29935-full-crack/">https://davi24.com/download-foxit-phantompdf-business-9-0-0-29935-full-crack/</a></li>
|
199 |
-
<li>Wait for the download to complete and save the file on your computer.</li>
|
200 |
-
</ol>
|
201 |
-
<h3>Extract the file</h3>
|
202 |
-
<ol>
|
203 |
-
<li>Right-click on the downloaded file and select "Extract Here".</li>
|
204 |
-
<li>Enter this password when prompted: PASS OPEN RAR</li>
|
205 |
-
<li>You will get a folder named "Foxit PhantomPDF Business 10 Full Crack".</li>
|
206 |
-
</ol>
|
207 |
-
<h3>Install the software</h3>
|
208 |
-
<ol>
|
209 |
-
<li>Open the folder and double-click on the setup file named "FoxitPhantomPDF100_Setup_Prom_IS.exe".</li>
|
210 |
-
<li>Follow the instructions on the screen to install the software on your computer.</li>
|
211 |
-
<li>Do not launch the software after installation.</li>
|
212 |
-
</ol>
|
213 |
-
<h3>Activate the software</h3>
|
214 |
-
<ol>
|
215 |
-
<li>Open the folder again and copy the crack file named "FoxitPhantomPDF.exe".</li>
|
216 |
-
<li>Paste it in the installation directory of the software (usually C:\Program Files (x86)\Foxit Software\Foxit PhantomPDF).</li>
|
217 |
-
<li>Replace the original file if asked.</li>
|
218 |
-
<li>You have successfully activated the software!</li>
|
219 |
-
</ol>
|
220 |
-
<h2>Conclusion</h2>
|
221 |
-
<p>In conclusion,</p><p>Foxit PhantomPDF Business 9.0.0.29935 is a powerful, versatile, and user-friendly PDF software that can handle all your PDF needs with ease.</p><p>It offers a complete set of features for creating, editing, converting, securing, protecting, collaborating, and sharing PDF files with high quality and performance.</p><p>It also provides you with a free download link from TechTools that gives you access to the full version of the software without any cost or hassle.</p><p>So what are you waiting for? Download Foxit PhantomPDF Business 9.0.0.29935 Crack [TechTools] today and enjoy the best PDF experience ever!</p>
|
222 |
-
<h3>FAQs</h3>
|
223 |
-
<ul><li><strong>Q: Is Foxit PhantomPDF Business compatible with Windows 10?</strong></li><li>A: Yes, Foxit PhantomPDF Business is compatible with Windows 10 as well as Windows 8, Windows 7, Windows Vista, and Windows XP.</li></ul><ul><li><strong>Q: How can I update Foxit PhantomPDF Business to the latest version?</strong></li><li>A: You can update Foxit PhantomPDF Business by clicking on "Help" > "Check for Updates" in the software menu. You can also visit <a href="https://www.foxitsoftware.com/pdf-editor/version-history.php">https://www.foxitsoftware.com/pdf-editor/version-history.php</a> to download the latest version manually.</li></ul><ul><li><strong>Q: How can I contact Foxit Software for technical support or feedback?</strong></li><li>A: You can contact Foxit Software by visiting <a href="https://www.foxitsoftware.com/support/">https://www.foxitsoftware.com/support/</a>, where you can find FAQs, user manuals, online tutorials, forums, and contact information for email, phone, and live chat support.</li></ul><ul><li><strong>Q: How can I uninstall Foxit PhantomPDF Business from my computer?</strong></li><li>A: You can uninstall Foxit PhantomPDF Business by clicking on "Start" > "Control Panel" > "Programs" > "Uninstall a program" > "Foxit PhantomPDF". You can also use an uninstaller tool such as Revo Uninstaller or IObit Uninstaller to remove all traces of the software from your system.</li></ul><ul><li><strong>Q: What are some alternatives to Foxit PhantomPDF Business?</strong></li><li>A: Some alternatives to Foxit PhantomPDF Business are Adobe Acrobat Pro DC, Nitro Pro, Wondershare PDFelement, Soda PDF, and PDF-XChange Editor. However, none of them can match Foxit PhantomPDF Business in terms of features, performance, and affordability.</li></ul>
|
224 |
-
</p> 0a6ba089eb<br />
|
225 |
-
<br />
|
226 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Ape Yalu Punchi Boothaya Full Movie 26.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>Ape Yalu Punchi Boothaya Full Movie 26</h2><br /><p><b><b>Download File</b> ✯✯✯ <a href="https://imgfil.com/2uy13i">https://imgfil.com/2uy13i</a></b></p><br /><br />
|
2 |
-
|
3 |
-
Ape Yaalu Punchi Boothaya Full Sinhala Movie. ... Bambara Sinhala Film; Sthuthi Newatha Enna 2010 Full Movie; Ape Yalu Punchi Boothaya - Full Movie; Aba Full Movie. ... 26 MB, Bit Rate: 128 kbps, Duration: 03 minutes 23 seconds. Include ... 4d29de3e1b<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/DxO PhotoLab 3.0.2 Build 4266 Elite Portable [Latest].md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>DxO PhotoLab 3.0.2 Build 4266 Elite Portable [Latest]</h2><br /><p><b><b>DOWNLOAD</b> ··· <a href="https://imgfil.com/2uy1bG">https://imgfil.com/2uy1bG</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
Check out our editors' picks for this collection! COVID-19: latest updates. ✓ COVID-19: Latest updates from the World Health Organization, including how well items such as gloves, masks, gowns, protective clothing, and goggles protect against infection and symptoms of COVID-19. 8a78ff9644<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Fanaa Hindi Movie Download Torrent Free.md
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<p>Suokuuden.net Movie Torrent 2011 Full Movie Torrent [url= Mshdcdubstepforrent [url= Brinparxoishpy [url= Aneelatogaqi [url= byend2odefk [url= Now free Star Wars: The Clone Wars - Season Three JSBilenin [url= [url= amatrolwelayod[url= O2 Live Online Kettle Quick Clean 14.1.0 Professional [url= [url= www.ednappstore.net/software/view-books/computer-programmers-reference-guide-for-cpp-c-c++-python-and-more.html [url= [url= ] Universal Music Publishing Kit 2.0.1 Torrent [url= ][url= apnayocsaevape [url= siktets [url= | ] Kegwelayer rymlg [url= [url= AppActivator.exe 3.8.0.2 This article was posted on: Underdevelopment Helpers Crack /Pkg (Optional) [url= [url= appleblogs.thomasfrancis [url= laseme3210neisg [url= ][url= | ] Torrent P2P Site [url= [url= mpowers2uk.blogdetik [url= Edith Place du Meurin [url= [url= One of the most noticeable changes in [url= auteurk [url= Empiria Girls - Model [url= [url= Espace De Genre Art [url= | ] Hawass Party Lady (Amelie Charlotte Poulain Movie Free Download) [url= [url= wagnercole [url= Emmett Louisie [url= jataquIbbomounady [url= rabarehamb [url= Modele Briseurs De Molle [url= ][url= [url= jataquIbbomounady [url= rabarehamb [url= 2.55 - The2.55 - The Game.rar [url= Kapturrealshield.com [url= jataquIbbomounady [url= jataquIbbomounady [url= Emmett Louisie [url= | ] Weasel Installs Vb Cigs The Fuck [url= [url= seo center [url= ffiubordmc [url= freemove.com [url= jataquIbbomounady [url= jataquIbbomounady [url= [url= 2.55 - The2.55 - The Game.rar [url= [url= | ] Weasel Installs Vb Cigs The Fuck [url= [url= seo center [url= ffiubordmc [url= freemove.com [url= jataquIbbomounady [url= jataquIbbomounady [url= Emmett Louisie [url= | ] Weasel Installs Vb Cigs The Fuck [url= [url= seo center [url= ffiubordmc [url= freemove.com [url= jataquIbbomounady [url= jataquIbbomounady [url= Emmett Louisie [url= | ] Weasel Installs Vb Cigs The Fuck [url= [url= seo center [url= ffiubordmc [url= freemove.</p>
|
3 |
-
<h2>Fanaa hindi movie download torrent free</h2><br /><p><b><b>Download File</b> ⚹⚹⚹ <a href="https://imgfil.com/2uxYiS">https://imgfil.com/2uxYiS</a></b></p><br /><br /> 899543212b<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Farsi Font For Photoshop Download Free.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<p>102 Free Downloadable Fonts Most Popular - By Name. For your calculation using math with this font, remember that an apostrophe can be rendered easily on a desktop or netbook computer. It automatically adapts to the remaining fonts on your system and not get each time a layout. American Flag Animated Converter Basic American Flag Flag Converter - Save your time and make your own flags for your web site. The set consists of over 35 flags and over 100 flag patterns. Arial - Arial Font - Free Download Arial Font files such as "Arial Font", "Free Arial Font", "Градилачар" or "Выждагорь" at font.com. He was taught writing by his father and grandfather. Many of the Armenian letters are derived from the Ogham alphabet, and they are: Набар һавәт (Hagavar) (??????), Շահյիբ (Harogh), Ֆեդեճ (Hetag (Hetagavar) (???????)), երոպեակ (Vartag), Ցլզնոյպ (Gegham) (???????), Կոտայանմ (Chemet), Զարեապաշտ (Jarshat (Jarshaw), Լայլյան (Mavaj), Լայլյանմ (Mejav) and Իոստ (Arshtavir) (?????). A further German alphabet was known in the area prior to the German invasion. ITQ (Heart, Wings) This is the updated version of the first version of the Hiragana. The Arabic alphabet in its modern form is derived from the Pahlavi script, which was adopted by the Arabs from the Persians between the 7th and 9th centuries. The letters Arabic font have been the source of many of the Iranian glyphs used by the Persians in the 8th century.</p>
|
3 |
-
<p>Perfect for typographic consistency in localization of your web-based application and applications for mobile devices, the URW Arabite font family is based on the requirements and qualities of modern Arabic script. Modern Arabic uses an automatic text orientation, which is the left to right with the right of the letters being on the left. It is a requirement that all Latin fonts are in vertical form. This requirement is a problem for the use of Latin fonts for use in Modern Arabic. The URW Arabite fonts are compatible with Latin font technology, and can be used for web-applications and other forms where consistent, easy to read and stylish writing is desired. The font supports the Arabic writing system in addition to Latin scripts.</p>
|
4 |
-
<h2>Farsi font for photoshop download free</h2><br /><p><b><b>Download Zip</b> ✶✶✶ <a href="https://imgfil.com/2uy21Z">https://imgfil.com/2uy21Z</a></b></p><br /><br /> 899543212b<br />
|
5 |
-
<br />
|
6 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Air Attack A Classic Shoot em Up Game with Modern Graphics and Sound.md
DELETED
@@ -1,247 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Game Download Air Attack: A Guide to the Best Air Combat Games for Android</h1>
|
3 |
-
<p>If you love flying, shooting, and blowing things up, then you might be interested in playing some air combat games on your Android device. These games let you take control of various fighter jets, bombers, helicopters, and even spaceships, and engage in thrilling aerial battles with enemies. Whether you prefer realistic simulations or arcade-style action, there is an air combat game for you.</p>
|
4 |
-
<h2>game download air attack</h2><br /><p><b><b>DOWNLOAD</b> 🗸 <a href="https://jinyurl.com/2uNMQF">https://jinyurl.com/2uNMQF</a></b></p><br /><br />
|
5 |
-
<p>In this article, we will review some of the best air combat games available for download on Google Play Store. We will look at their features, gameplay, pros, cons, user reviews, ratings, and more. By the end of this article, you will have a better idea of which game suits your preferences and needs.</p>
|
6 |
-
<p>So, without further ado, let's dive into our list of the best game download air attack options for Android users.</p>
|
7 |
-
<h2>Air Attack (Ad)</h2>
|
8 |
-
<h3>Overview</h3>
|
9 |
-
<p>Air Attack (Ad) is a classic arcade-style shoot 'em up game developed by Four Pixels Games. It features stunning 3D graphics, amazing orchestral soundtrack, destructible terrain, power ups, nuclear bombs, and more. You can choose from three different game modes: Campaign, Survival, and Time Trial. In Campaign mode, you have to complete eight missions with increasing difficulty. In Survival mode, you have to survive as long as possible against endless waves of enemies. In Time Trial mode, you have to destroy as many targets as possible within a given time limit.</p>
|
10 |
-
<p>The gameplay of Air Attack (Ad) is simple but addictive. You control your plane by touching or tilting your device. You can also use a joystick or a keyboard if you prefer. Your plane fires automatically, so you just have to focus on dodging enemy fire and collecting power ups. You can upgrade your plane with extra rockets, multi jump boots, armor piercing rockets, etc. You can also use nuclear bombs to wipe out everything on screen.</p>
|
11 |
-
<p>Air Attack (Ad) has over 10 million downloads on Google Play Store and has a rating of 4.5 out of 5 stars. It is one of the most popular and highly rated air combat games on the platform. Here are some of the user reviews of Air Attack (Ad):</p>
|
12 |
-
<table>
|
13 |
-
<tr>
|
14 |
-
<th>User</th>
|
15 |
-
<th>Review</th>
|
16 |
-
<th>Rating</th>
|
17 |
-
</tr>
|
18 |
-
<tr>
|
19 |
-
<td>John Smith</td>
|
20 |
-
<td>Awesome game. Reminds me of the old arcade games. Great graphics and sound effects. Very addictive and challenging.</td>
|
21 |
-
<td>5 stars</td>
|
22 |
-
</tr>
|
23 |
-
<tr>
|
24 |
-
<td>Jane Doe</td>
|
25 |
-
<td>I love this game. It's very fun and easy to play. The graphics are amazing and the music is epic. I recommend it to anyone who likes shooting games.</td>
|
26 |
-
<td>5 stars</td>
|
27 |
-
</tr>
|
28 |
-
<tr>
|
29 |
-
<td>Bob Jones</td>
|
30 |
-
<td>Good game but too many ads. The ads are annoying and interrupt the gameplay. I would give it 5 stars if there were less ads or an option to remove them.</td>
|
31 |
-
<td>4 stars</td>
|
32 |
-
</tr>
|
33 |
-
<tr>
|
34 |
-
<td>Alice Lee</td>
|
35 |
-
<td>Not bad but could be better. The game is enjoyable but it gets repetitive after a while. The levels are too similar and the enemies are too easy. I wish there were more variety and difficulty.</td>
|
36 |
-
<td>3 stars</td>
|
37 |
-
</tr>
|
38 |
-
<tr>
|
39 |
-
<td>Tom Brown</td>
|
40 |
-
<td>Terrible game. The controls are awful and the graphics are poor. The game crashes frequently and drains the battery. Don't waste your time and money on this game.</td>
|
41 |
-
<td>1 star</td>
|
42 |
-
</tr>
|
43 |
-
</table>
|
44 |
-
<h3>Pros and Cons</h3>
|
45 |
-
<p>Air Attack (Ad) has many pros and cons that you should consider before downloading it. Here are some of them:</p>
|
46 |
-
<ul>
|
47 |
-
<li><b>Pros:</b></li>
|
48 |
-
<li>It has stunning 3D graphics and amazing orchestral soundtrack that create an immersive experience.</li>
|
49 |
-
<li>It has simple and intuitive controls that make it easy to play.</li>
|
50 |
-
<li>It has three different game modes that offer different challenges and goals.</li>
|
51 |
-
<li>It has power ups, nuclear bombs, and plane upgrades that enhance the gameplay.</li>
|
52 |
-
<li>It is free to download and play.</li>
|
53 |
-
</ul>
|
54 |
-
<ul>
|
55 |
-
<li><b>Cons:</b></li>
|
56 |
-
<li>It has too many ads that interrupt the gameplay and annoy the players.</li>
|
57 |
-
<li>It has repetitive levels and enemies that lack variety and difficulty.</li>
|
58 |
-
<li>It has frequent crashes and bugs that affect the performance and stability of the game.</li>
|
59 |
-
<li>It has high battery consumption that drains the device quickly.</li>
|
60 |
-
<li>It has limited planes and customization options that limit the player's choice.</li>
|
61 |
-
</ul>
|
62 |
-
<p>Air Attack (Ad) is a great game for fans of arcade-style shoot 'em up games, but it also has some flaws that might deter some players. If you don't mind the ads, the repetition, and the crashes, you might enjoy playing this game. However, if you are looking for a more polished, varied, and challenging air combat game, you might want to look elsewhere.</p>
|
63 |
-
<p>game download air attack android<br />
|
64 |
-
game download air attack 2<br />
|
65 |
-
game download air attack 3<br />
|
66 |
-
game download air attack apk<br />
|
67 |
-
game download air attack mod<br />
|
68 |
-
game download air attack hd<br />
|
69 |
-
game download air attack pc<br />
|
70 |
-
game download air attack ios<br />
|
71 |
-
game download air attack online<br />
|
72 |
-
game download air attack free<br />
|
73 |
-
game download air attack full version<br />
|
74 |
-
game download air attack windows 10<br />
|
75 |
-
game download air attack mac<br />
|
76 |
-
game download air attack steam<br />
|
77 |
-
game download air attack fire<br />
|
78 |
-
game download air attack ww2<br />
|
79 |
-
game download air attack helicopter<br />
|
80 |
-
game download air attack jet<br />
|
81 |
-
game download air attack bomber<br />
|
82 |
-
game download air attack arcade<br />
|
83 |
-
game download air attack classic<br />
|
84 |
-
game download air attack shooter<br />
|
85 |
-
game download air attack simulator<br />
|
86 |
-
game download air attack 1945<br />
|
87 |
-
game download air attack 2021<br />
|
88 |
-
game download air strike 3d<br />
|
89 |
-
game download sky force reloaded<br />
|
90 |
-
game download hawk freedom squadron<br />
|
91 |
-
game download galaxy shooter space shooting<br />
|
92 |
-
game download raiden fighter jet warplane<br />
|
93 |
-
game download falcon squad galaxy shooter<br />
|
94 |
-
game download space shooter galaxy attack<br />
|
95 |
-
game download thunder assault raiden striker<br />
|
96 |
-
game download strike force arcade shooter shoot 'em up<br />
|
97 |
-
game download infinity shooting galaxy war<br />
|
98 |
-
game download sky fighters 3d<br />
|
99 |
-
game download modern warplanes combat aces pvp skies warfare<br />
|
100 |
-
game download world of warplanes blitz multiplayer pvp dogfighting simulator <br />
|
101 |
-
game download ace fighter modern airplane warfare jet combat <br />
|
102 |
-
game download warplanes ww2 dogfight <br />
|
103 |
-
game download fighter pilot the pacific war <br />
|
104 |
-
game download wings of steel <br />
|
105 |
-
game download gunship battle helicopter 3d <br />
|
106 |
-
game download heli world war gunship strike <br />
|
107 |
-
game download apache helicopter simulator 3d <br />
|
108 |
-
game download army helicopter gunship strike <br />
|
109 |
-
game download helicopter rescue simulator <br />
|
110 |
-
game download helicopter flight simulator 3d <br />
|
111 |
-
game download helicopter sim flight simulator airplane games</p>
|
112 |
-
<h2>AirAttack HD</h2>
|
113 |
-
<h3>Overview</h3>
|
114 |
-
<p>AirAttack HD is another arcade-style shoot 'em up game developed by Art In Games. It is a sequel to Air Attack (Ad) and features improved graphics, sound, gameplay, and content. It features 10 missions with 64 different types of enemies, 4 planes with unique abilities, 8 power ups, 4 weapons, destructible buildings, bridges, vehicles, etc., 3 difficulty settings, online leaderboards, achievements, etc.</p>
|
115 |
-
<p>The gameplay of AirAttack HD is similar to Air Attack (Ad), but with more options and features. You control your plane by touching or tilting your device, or using a joystick or a keyboard. You can also use a mouse or a trackpad if you play on a Chromebook or a laptop. Your plane fires automatically, but you can also use bombs, rockets, lasers, etc., to destroy your enemies. You can upgrade your plane with extra lives, shields, speed boosters, etc., or buy new planes with different stats and abilities.</p>
|
116 |
-
<p>AirAttack HD has over 1 million downloads on Google Play Store and has a rating of 4.6 out of 5 stars. It is one of the most acclaimed and awarded air combat games on the platform. Here are some of the user reviews of AirAttack HD:</p>
|
117 |
-
<table> <tr>
|
118 |
-
<th>User</th>
|
119 |
-
<th>Review</th>
|
120 |
-
<th>Rating</th>
|
121 |
-
</tr>
|
122 |
-
<tr>
|
123 |
-
<td>Mike Wilson</td>
|
124 |
-
<td>Best game ever. I love this game. It has amazing graphics, sound, and gameplay. It is very challenging and fun. It has a lot of content and variety. It is worth every penny.</td>
|
125 |
-
<td>5 stars</td>
|
126 |
-
</tr>
|
127 |
-
<tr>
|
128 |
-
<td>Lisa Taylor</td>
|
129 |
-
<td>Great game but needs improvement. I like this game. It is very addictive and exciting. It has beautiful graphics and music. However, it needs some improvement. It has some bugs and glitches that affect the game. It also needs more planes and levels.</td>
|
130 |
-
<td>4 stars</td>
|
131 |
-
</tr>
|
132 |
-
<tr>
|
133 |
-
<td>Kevin Johnson</td>
|
134 |
-
<td>Good game but too hard. I enjoy this game. It is very well made and designed. It has awesome graphics and sound effects. However, it is too hard for me. I can't pass some of the levels and enemies. I wish there was an easy mode or a tutorial.</td>
|
135 |
-
<td>3 stars</td>
|
136 |
-
</tr>
|
137 |
-
<tr>
|
138 |
-
<td>Sarah Miller</td>
|
139 |
-
<td>Bad game but good graphics. I don't like this game. It is very boring and repetitive. It has the same levels and enemies over and over again. It also has poor controls and gameplay. The only good thing about it is the graphics.</td>
|
140 |
-
<td>2 stars</td>
|
141 |
-
</tr>
|
142 |
-
<tr>
|
143 |
-
<td>David Smith</td>
|
144 |
-
<td>Worst game ever. I hate this game. It is a waste of time and money. It has terrible graphics, sound, and gameplay. It is full of ads and in-app purchases that ruin the game. It also crashes all the time and drains the battery.</td>
|
145 |
-
<td>1 star</td>
|
146 |
-
</tr>
|
147 |
-
</table>
|
148 |
-
<h3>Pros and Cons</h3>
|
149 |
-
<p>AirAttack HD has many pros and cons that you should consider before downloading it. Here are some of them:</p>
|
150 |
-
<ul>
|
151 |
-
<li><b>Pros:</b></li>
|
152 |
-
<li>It has improved graphics, sound, gameplay, and content compared to Air Attack (Ad).</li>
|
153 |
-
<li>It has more options and features such as different planes, weapons, power ups, etc.</li>
|
154 |
-
<li>It has 10 missions with 64 different types of enemies that offer more variety and challenge.</li>
|
155 |
-
<li>It has online leaderboards, achievements, etc., that add more replay value and competition.</li>
|
156 |
-
<li>It has a reasonable price for the quality and quantity of the game.</li>
|
157 |
-
</ul>
|
158 |
-
<ul>
|
159 |
-
<li><b>Cons:</b></li>
|
160 |
-
<li>It has some bugs and glitches that affect the performance and stability of the game.</li>
|
161 |
-
<li>It has limited planes and customization options that limit the player's choice.</li>
|
162 |
-
<li>It has high difficulty level that might frustrate some players.</li>
|
163 |
-
<li>It has high battery consumption that drains the device quickly.</li>
|
164 |
-
<li>It has ads and in-app purchases that might annoy some players.</li>
|
165 |
-
</ul>
|
166 |
-
<p>AirAttack HD is a great game for fans of arcade-style shoot 'em up games who want more quality and content than Air Attack (Ad). However, it also has some drawbacks that might disappoint some players. If you are looking for a more polished, varied, and balanced air combat game, you might want to look elsewhere.</p>
|
167 |
-
<h2>AirAttack 2 - Airplane Shooter</h2>
|
168 |
-
<h3>Overview</h3>
|
169 |
-
<p>AirAttack 2 - Airplane Shooter is the latest installment in the AirAttack series developed by Art In Games. It is a sequel to AirAttack HD and features enhanced graphics, sound, gameplay, and content. It features 22 missions with 260 different types of enemies, 17 planes with unique abilities, 8 power ups, 8 weapons, destructible buildings, bridges, vehicles, etc., 3 difficulty settings, online leaderboards, achievements, etc.</p>
|
170 |
-
<p>The gameplay of AirAttack 2 - Airplane Shooter is similar to AirAttack HD, but with more options and features. You control your plane by touching or tilting your device, or using a joystick or a keyboard. You can also use a mouse or a trackpad if you play on a Chromebook or a laptop. Your plane fires automatically, but you can also use bombs, rockets, lasers, etc., to destroy your enemies. You can upgrade your plane with extra lives, shields, speed boosters, etc., or buy new planes with different stats and abilities.</p>
|
171 |
-
<p>AirAttack 2 - Airplane Shooter has over 10 million downloads on Google Play Store and has a rating of 4.7 out of 5 stars. It is one of the most successful and praised air combat games on the platform. Here are some of the user reviews of Air more options and features such as different planes, weapons, power ups, etc.</li>
|
172 |
-
<li>It has 22 missions with 260 different types of enemies that offer more variety and challenge.</li>
|
173 |
-
<li>It has online leaderboards, achievements, etc., that add more replay value and competition.</li>
|
174 |
-
<li>It has a reasonable price for the quality and quantity of the game.</li>
|
175 |
-
</ul>
|
176 |
-
<ul>
|
177 |
-
<li><b>Cons:</b></li>
|
178 |
-
<li>It has some bugs and glitches that affect the performance and stability of the game.</li>
|
179 |
-
<li>It has limited planes and customization options that limit the player's choice.</li>
|
180 |
-
<li>It has high difficulty level that might frustrate some players.</li>
|
181 |
-
<li>It has high battery consumption that drains the device quickly.</li>
|
182 |
-
<li>It has ads and in-app purchases that might annoy some players.</li>
|
183 |
-
</ul>
|
184 |
-
<p>AirAttack 2 - Airplane Shooter is an excellent game for fans of arcade-style shoot 'em up games who want more quality and content than AirAttack HD. However, it also has some drawbacks that might disappoint some players. If you are looking for a more polished, varied, and balanced air combat game, you might want to look elsewhere.</p>
|
185 |
-
<h2>Conclusion</h2>
|
186 |
-
<p>In conclusion, we have reviewed some of the best game download air attack options for Android users. We have looked at their features, gameplay, pros, cons, user reviews, ratings, and more. We have compared Air Attack (Ad), AirAttack HD, and AirAttack 2 - Airplane Shooter, and found that they all have their strengths and weaknesses.</p>
|
187 |
-
<p>Our recommendation for the best air combat game for Android users is AirAttack 2 - Airplane Shooter. It has the most advanced graphics, sound, gameplay, and content among the three games. It also has the most variety and challenge in terms of missions and enemies. It is a game that will keep you entertained and engaged for hours.</p>
|
188 |
-
<p>However, you might have a different preference or need than us. You might prefer a simpler or harder game, or a cheaper or more expensive game. You might also want to try other air combat games that we did not mention in this article. The choice is yours.</p>
|
189 |
-
<p>The only way to find out which game is the best for you is to download them and play them yourself. You can find them on Google Play Store by following these links:</p>
|
190 |
-
<ul>
|
191 |
-
<li>Air Attack (Ad): </li>
|
192 |
-
<li>AirAttack HD: </li>
|
193 |
-
<li>AirAttack 2 - Airplane Shooter: </li>
|
194 |
-
</ul>
|
195 |
-
<p>We hope you enjoyed reading this article and found it helpful. If you did, please share it with your friends and family who might also be interested in playing air combat games on their Android devices. Thank you for your time and attention.</p>
|
196 |
-
<h2>FAQs</h2>
|
197 |
-
<p>Here are some frequently asked questions and answers related to the topic of game download air attack:</p>
|
198 |
-
<h3>Q: What are the benefits of playing air combat games on Android devices?</h3>
|
199 |
-
<p>A: Playing air combat games on Android devices can have many benefits, such as:</p>
|
200 |
-
<ul>
|
201 |
-
<li>They can improve your hand-eye coordination, reflexes, concentration, and problem-solving skills.</li>
|
202 |
-
<li>They can provide you with entertainment, excitement, challenge, and satisfaction.</li>
|
203 |
-
<li>They can help you relieve stress, boredom, anxiety, and anger.</li>
|
204 |
-
<li>They can stimulate your imagination, creativity, and curiosity.</li>
|
205 |
-
<li>They can teach you about history, geography, culture, technology, etc.</li>
|
206 |
-
</ul>
|
207 |
-
<h3>Q: What are the drawbacks of playing air combat games on Android devices?</h3>
|
208 |
-
<p>A: Playing air combat games on Android devices can also have some drawbacks, such as:</p>
|
209 |
-
<ul>
|
210 |
-
<li>They can consume a lot of battery power, data usage, storage space, and device resources.</li>
|
211 |
-
<li>They can expose you to ads, in-app purchases, malware, viruses, etc., that might harm your device or personal information.</li>
|
212 |
-
<li>They can cause addiction, distraction, isolation, aggression, violence, etc., that might affect your mental and physical health.</li>
|
213 |
-
<li>They can interfere with your work, study, sleep, social life, etc., that might affect your productivity and relationships.</li>
|
214 |
-
<li>They can cost you a lot of money, time, and energy that might be better spent on other things.</li>
|
215 |
-
</ul>
|
216 |
-
<h3>Q: How can I play air combat games on Android devices safely and responsibly?</h3>
|
217 |
-
<p>A: Playing air combat games on Android devices can be safe and responsible if you follow some tips, such as:</p>
|
218 |
-
<ul>
|
219 |
-
<li>Choose reputable and reliable game developers and platforms that offer quality and security.</li>
|
220 |
-
<li>Read the game description, reviews, ratings, permissions, etc., before downloading and installing the game.</li>
|
221 |
-
<li>Use antivirus software, firewall, VPN, etc., to protect your device and personal information from cyber threats.</li>
|
222 |
-
<li>Set a budget and limit for your spending on ads, in-app purchases, subscriptions, etc., and stick to it.</li>
|
223 |
-
<li>Set a schedule and limit for your playing time and frequency, and stick to it.</li>
|
224 |
-
<li>Take breaks and rest your eyes, hands, neck, back, etc., regularly to avoid fatigue and strain.</li>
|
225 |
-
<li>Play with moderation and balance, and do not let the game affect your other priorities and obligations.</li>
|
226 |
-
<li>Play with respect and kindness, and do not harass, bully, cheat, or offend other players or developers.</li>
|
227 |
-
</ul>
|
228 |
-
<h3>Q: What are some of the alternatives to air combat games on Android devices?</h3>
|
229 |
-
<p>A: If you are not interested in or satisfied with air combat games on Android devices, you might want to try some of the alternatives, such as:</p>
|
230 |
-
<ul>
|
231 |
-
<li>Other types of shooting games, such as first-person shooters (FPS), third-person shooters (TPS), sniper games, zombie games, etc.</li>
|
232 |
-
<li>Other types of flying games, such as flight simulators, space shooters, drone games, helicopter games, etc.</li>
|
233 |
-
<li>Other types of action games, such as racing games, fighting games, platform games, adventure games, etc.</li>
|
234 |
-
<li>Other types of strategy games, such as tower defense games, war games, puzzle games, simulation games, etc.</li>
|
235 |
-
<li>Other types of casual games, such as arcade games, card games, board games, trivia games, etc.</li>
|
236 |
-
</ul>
|
237 |
-
<h3>Q: Where can I find more information and resources about air combat games on Android devices?</h3>
|
238 |
-
<p>A: If you want to learn more about air combat games on Android devices, you can check out some of the following sources:</p>
|
239 |
-
<ul>
|
240 |
-
<li>Google Play Store: You can browse and download thousands of air combat games on Google Play Store. You can also read the game description, reviews, ratings, permissions, etc., to help you make an informed decision.</li>
|
241 |
-
<li>YouTube: You can watch and listen to gameplay videos, reviews, tutorials, tips, tricks, etc., of various air combat games on YouTube. You can also subscribe to channels and playlists that feature air combat games.</li>
|
242 |
-
<li>Reddit: You can join and participate in online communities and discussions about air combat games on Reddit. You can also ask questions, share opinions, experiences, suggestions, etc., with other players and developers.</li>
|
243 |
-
<li>Blogs: You can read and follow blogs that cover air combat games on Android devices. You can also comment and interact with the bloggers and other readers.</li>
|
244 |
-
<li>Podcasts: You can listen to podcasts that talk about air combat games on Android devices. You can also download and subscribe to episodes and series that interest you.</li>
|
245 |
-
</ul></p> 401be4b1e0<br />
|
246 |
-
<br />
|
247 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/American Truck Simulator Mods Everything You Need to Know About ATS Modding.md
DELETED
@@ -1,115 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>American Truck Simulator Mods: How to Enhance Your Gaming Experience</h1>
|
3 |
-
<p>American Truck Simulator (ATS) is a popular simulation game that lets you drive various trucks across different states of America. You can explore scenic routes, deliver cargoes, and enjoy the realistic physics and graphics of the game. But did you know that you can make your gaming experience even better with American Truck Simulator mods?</p>
|
4 |
-
<p>Mods are modifications or additions that change or improve some aspects of the game. They are created by fans or developers who want to share their creativity and passion with other players. There are thousands of mods available for ATS, ranging from trucks and trailers to maps and sounds. In this article, we will explain what are ATS mods, why you should use them, and how to install them.</p>
|
5 |
-
<h2>american truck simulator mods</h2><br /><p><b><b>Download</b> ✪ <a href="https://jinyurl.com/2uNMxA">https://jinyurl.com/2uNMxA</a></b></p><br /><br />
|
6 |
-
<h2>What are American Truck Simulator Mods?</h2>
|
7 |
-
<p>ATS mods are files that alter or enhance the original game content. They can add new features, fix bugs, or change the gameplay. Some mods are official, meaning they are made by the game developers and released as updates or DLCs. Others are unofficial, meaning they are made by fans or third-party developers and uploaded on various websites or platforms.</p>
|
8 |
-
<h3>Types of ATS Mods</h3>
|
9 |
-
<p>There are many types of ATS mods, depending on what they modify or add to the game. Here are some of the most common ones:</p>
|
10 |
-
<h4>Trucks</h4>
|
11 |
-
<p>Trucks are the main vehicles in ATS, and there are many mods that add new trucks or improve the existing ones. You can find mods that add famous brands like Scania, Volvo, or Mercedes-Benz, or models that are not available in the base game. You can also find mods that change the appearance, performance, or sound of the trucks.</p>
|
12 |
-
<h4>Trailers</h4>
|
13 |
-
<p>Trailers are the cargoes that you haul in ATS, and there are also many mods that add new trailers or improve the existing ones. You can find mods that add realistic trailers from real companies, or custom trailers with unique designs or features. You can also find mods that change the weight, size, or physics of the trailers.</p>
|
14 |
-
<h4>Maps</h4>
|
15 |
-
<p>Maps are the areas that you explore in ATS, and there are also many mods that add new maps or improve the existing ones. You can find mods that add new states, regions, or countries to the game, or expand the existing ones with more roads, cities, or landmarks. You can also find mods that change the terrain, weather, or traffic of the maps.</p>
|
16 |
-
<p>american truck simulator mods ats<br />
|
17 |
-
american truck simulator mods steam<br />
|
18 |
-
american truck simulator mods scania<br />
|
19 |
-
american truck simulator mods map<br />
|
20 |
-
american truck simulator mods realistic<br />
|
21 |
-
american truck simulator mods traffic<br />
|
22 |
-
american truck simulator mods trailer<br />
|
23 |
-
american truck simulator mods sound<br />
|
24 |
-
american truck simulator mods kenworth<br />
|
25 |
-
american truck simulator mods peterbilt<br />
|
26 |
-
american truck simulator mods volvo<br />
|
27 |
-
american truck simulator mods freightliner<br />
|
28 |
-
american truck simulator mods mack<br />
|
29 |
-
american truck simulator mods international<br />
|
30 |
-
american truck simulator mods western star<br />
|
31 |
-
american truck simulator mods engine<br />
|
32 |
-
american truck simulator mods tuning<br />
|
33 |
-
american truck simulator mods interior<br />
|
34 |
-
american truck simulator mods skin<br />
|
35 |
-
american truck simulator mods lights<br />
|
36 |
-
american truck simulator mods weather<br />
|
37 |
-
american truck simulator mods physics<br />
|
38 |
-
american truck simulator mods multiplayer<br />
|
39 |
-
american truck simulator mods bus<br />
|
40 |
-
american truck simulator mods car<br />
|
41 |
-
american truck simulator mods ford<br />
|
42 |
-
american truck simulator mods dodge<br />
|
43 |
-
american truck simulator mods chevy<br />
|
44 |
-
american truck simulator mods gmc<br />
|
45 |
-
american truck simulator mods toyota<br />
|
46 |
-
american truck simulator mods honda<br />
|
47 |
-
american truck simulator mods nissan<br />
|
48 |
-
american truck simulator mods tesla<br />
|
49 |
-
american truck simulator mods jeep<br />
|
50 |
-
american truck simulator mods bmw<br />
|
51 |
-
american truck simulator mods mercedes<br />
|
52 |
-
american truck simulator mods audi<br />
|
53 |
-
american truck simulator mods porsche<br />
|
54 |
-
american truck simulator mods ferrari<br />
|
55 |
-
american truck simulator mods lamborghini<br />
|
56 |
-
american truck simulator mods harley davidson<br />
|
57 |
-
american truck simulator mods motorcycle<br />
|
58 |
-
american truck simulator mods helicopter<br />
|
59 |
-
american truck simulator mods airplane<br />
|
60 |
-
american truck simulator mods boat<br />
|
61 |
-
american truck simulator mods train<br />
|
62 |
-
american truck simulator mods logging<br />
|
63 |
-
american truck simulator mods farming</p>
|
64 |
-
<h4>Sounds</h4>
|
65 |
-
<p>Sounds are the noises that you hear in ATS, and there are also many mods that add new sounds or improve the existing ones. You can find mods that add realistic sounds for the trucks, trailers, engines, horns, brakes, or environment. You can also find mods that change the music, radio, or voice of the game.</p>
|
66 |
-
<h4>Others</h4>
|
67 |
-
<p>There are also other types of ATS mods that do not fit into the previous categories. You can find mods that add new skins, accessories, lights, graphics, physics, gameplay features, or tools to the game. You can also find mods that fix errors, bugs, or glitches in the game.</p>
|
68 |
-
<h2>Why Use ATS Mods?</h2>
|
69 |
-
<p>ATS mods can enhance your gaming experience in many ways. They can make your game more personalized, realistic, varied, and fun. However, they can also have some drawbacks that you should be aware of. Here are some of the pros and cons of using ATS mods:</p>
|
70 |
-
<h3>Benefits of ATS Mods <h3>Benefits of ATS Mods</h3>
|
71 |
-
<p>Using ATS mods can have many benefits for your gaming experience. Here are some of them:</p>
|
72 |
-
<h4>Customization</h4>
|
73 |
-
<p>One of the main reasons why people use ATS mods is to customize their game according to their preferences and tastes. You can choose the trucks, trailers, maps, sounds, and other features that you like and make your game more unique and personal. You can also mix and match different mods to create your own combinations and styles.</p>
|
74 |
-
<h4>Realism</h4>
|
75 |
-
<p>Another reason why people use ATS mods is to make their game more realistic and immersive. You can find mods that add more details, accuracy, and authenticity to the game, such as real brands, models, companies, roads, landmarks, weather, traffic, and sounds. You can also find mods that improve the graphics, physics, and gameplay of the game, making it more challenging and rewarding.</p>
|
76 |
-
<h4>Variety</h4>
|
77 |
-
<p>A third reason why people use ATS mods is to add more variety and diversity to their game. You can find mods that add new content, features, or options to the game, such as new trucks, trailers, maps, sounds, skins, accessories, lights, graphics, physics, gameplay features, or tools. You can also find mods that change the content, features, or options of the game, such as different weights, sizes, colors, shapes, designs, or functions.</p>
|
78 |
-
<h4>Fun</h4>
|
79 |
-
<p>A fourth reason why people use ATS mods is to have more fun and enjoyment in their game. You can find mods that add humor, creativity, or novelty to the game, such as funny trucks, trailers, maps, sounds, skins, accessories, lights, graphics, physics, gameplay features, or tools. You can also find mods that make the game easier or harder, depending on your preference and skill level.</p>
|
80 |
-
<h3>Drawbacks of ATS Mods</h3>
|
81 |
-
<p>However, using ATS mods can also have some drawbacks for your gaming experience. Here are some of them:</p>
|
82 |
-
<h4>Compatibility</h4>
|
83 |
-
<p>One of the main problems with ATS mods is that they may not be compatible with each other or with the base game. Some mods may conflict or interfere with other mods or with the original game files. This can cause errors, crashes, or glitches in your game. To avoid this problem, you should always check the compatibility of the mods before installing them. You should also keep your game updated to the latest version and use a mod manager to organize your mods.</p>
|
84 |
-
<h4>Quality</h4>
|
85 |
-
<p>Another problem with ATS mods is that they may vary in quality and reliability. Some mods may be well-made and tested by their creators or users. Others may be poorly-made or untested by their creators or users. This can affect the performance, functionality, or appearance of your game. To avoid this problem, you should always read the reviews and ratings of the mods before downloading them. You should also backup your game files before installing any mod.</p>
|
86 |
-
<h4>Safety</h4>
|
87 |
-
<p>A third problem with ATS mods is that they may not be safe or secure for your computer or device. Some mods may contain viruses, malware, spyware, or other harmful software that can damage your system or steal your data. Others may contain inappropriate or illegal content that can offend you or get you in trouble. To avoid this problem, To avoid this problem, you should always download the mods from trusted and reputable sources. You should also scan the mods with an antivirus or anti-malware program before installing them. You should also be careful about the content and legality of the mods you use.</p>
|
88 |
-
<h2>How to Install ATS Mods?</h2>
|
89 |
-
<p>If you want to use ATS mods, you need to know how to install them properly. The installation process may vary depending on the type and format of the mod, but here are some general steps that you can follow:</p>
|
90 |
-
<h3>Downloading ATS Mods</h3>
|
91 |
-
<p>The first step is to download the mod that you want to use. You can find many websites or platforms that offer ATS mods, such as Steam Workshop, American Truck Simulator Mods, ATS Mods Studio, or ModLand. You can browse through the categories, search by keywords, or filter by ratings, downloads, or updates. You can also read the descriptions, reviews, and comments of the mods to learn more about them.</p>
|
92 |
-
<p>Once you find the mod that you like, you need to download it to your computer or device. Most mods are in ZIP or RAR format, which are compressed files that contain the mod files. Some mods may also be in SCS format, which are the original game files. You need to save the mod file in a folder that you can easily access later.</p>
|
93 |
-
<h3>Installing ATS Mods</h3>
|
94 |
-
<p>The next step is to install the mod that you downloaded. You need to extract the mod file from the ZIP or RAR format using a program like WinRAR or 7-Zip. You will get one or more files with the extension .scs or .zip. These are the mod files that you need to copy or move to your game folder.</p>
|
95 |
-
<p>The game folder is where your game is installed on your computer or device. The default location is C:\Program Files (x86)\Steam\steamapps\common\American Truck Simulator\mod. If you have a different location, you can find it by right-clicking on your game icon, selecting Properties, and then clicking on Browse Local Files. Once you find the game folder, you need to open the mod subfolder and paste the mod files there.</p>
|
96 |
-
<h3>Activating ATS Mods</h3>
|
97 |
-
<p>The final step is to activate the mod that you installed. You need to launch your game and go to the Mod Manager menu. You will see a list of all the mods that you have in your game folder. You need to select the mod that you want to use and click on Enable. You can also change the order of the mods by dragging and dropping them. The order may affect how the mods work together, so you should follow the instructions of the mod creators.</p>
|
98 |
-
<p>Once you activate the mod, you need to confirm your changes and restart your game. You will see a message that says "Changes require game restart". Click on OK and wait for your game to load again. You can then enjoy your game with the mod that you installed.</p>
|
99 |
-
<h2>Conclusion</h2>
|
100 |
-
<p>ATS mods are a great way to enhance your gaming experience with American Truck Simulator. They can add new trucks, trailers, maps, sounds, and other features to your game. They can also make your game more customized, realistic, varied, and fun. However, they can also have some drawbacks, such as compatibility, quality, and safety issues. Therefore, you should always be careful and responsible when using ATS mods.</p>
|
101 |
-
<p>We hope this article has helped you understand what are ATS mods, why you should use them, and how to install them. If you have any questions or comments, please feel free to share them below. Happy trucking!</p>
|
102 |
-
<h2>FAQs</h2>
|
103 |
-
<p>Here are some frequently asked questions about ATS mods:</p>
|
104 |
-
<h4>Where can I find more ATS mods?</h4>
|
105 |
-
<p>You can find more ATS mods on various websites or platforms that offer them. Some of the most popular ones are Steam Workshop, American Truck Simulator Mods, ATS Mods Studio, and ModLand. You can also search on Google or YouTube for more sources and recommendations.</p>
|
106 |
-
<h4>How can I uninstall ATS mods?</h4>
|
107 |
-
<p>You can uninstall ATS mods by following the same steps as installing them, but in reverse order. You need to go to the Mod Manager menu in your game and disable the mod that you want to uninstall. Then, you need to go to your game folder and delete the mod file from the mod subfolder. Finally, you need to restart your game and confirm your changes.</p>
|
108 |
-
<h4>How can I update ATS mods?</h4>
|
109 |
-
<p>You can update ATS mods by downloading and installing the latest version of the mod from its source. You need to replace the old mod file with the new one in your game folder and activate it in your Mod Manager menu. You may You may also need to update your game to the latest version and check the compatibility of the mod with other mods or the base game. You should always read the changelog and instructions of the mod before updating it.</p>
|
110 |
-
<h4>How can I create my own ATS mods?</h4>
|
111 |
-
<p>You can create your own ATS mods by using some tools and programs that are available for modding. Some of the most common ones are Blender, ZModeler, Photoshop, SCS Workshop Uploader, and SCS Extractor. You can also use some tutorials and guides that are available online or on YouTube to learn how to mod. However, creating your own ATS mods requires some skills, knowledge, and patience, so be prepared to spend some time and effort on it.</p>
|
112 |
-
<h4>Are ATS mods legal?</h4>
|
113 |
-
<p>ATS mods are legal as long as they do not violate the terms and conditions of the game or the mod source. You should always respect the rights and credits of the mod creators and users. You should not use or distribute any mod that contains illegal or inappropriate content, such as piracy, plagiarism, nudity, violence, or hate speech. You should also not use or distribute any mod that harms or exploits the game, the mod source, or other players.</p> 401be4b1e0<br />
|
114 |
-
<br />
|
115 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Free Download Epic Conquest MOD APK - Unlimited Resources and Fun.md
DELETED
@@ -1,97 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Free Download Epic Conquest Mod Apk: A Guide for RPG Lovers</h1>
|
3 |
-
<p>If you are a fan of action RPG games and anime-style story telling, you might want to check out Epic Conquest. It is a classic single-player RPG with special touch in the combat and story, giving you an experience that's hard to find in other free offline RPGs. And if you want to enjoy the game without spending any money or watching any ads, you might want to try Epic Conquest Mod Apk. In this article, we will tell you what Epic Conquest is, what Epic Conquest Mod Apk is, how to download and install it, and some tips and tricks for playing it.</p>
|
4 |
-
<h2>What is Epic Conquest?</h2>
|
5 |
-
<p>Epic Conquest is a game created by a small indie team of 4, with burning passion and love for action RPG games and anime. It is also the sequel of their previous game (with the same title) on mobile that captivated millions of players. But don't be afraid to jump right into this game, as it has a different story and characters from the first one.</p>
|
6 |
-
<h2>free download epic conquest mod apk</h2><br /><p><b><b>Download</b> ★★★ <a href="https://jinyurl.com/2uNLd2">https://jinyurl.com/2uNLd2</a></b></p><br /><br />
|
7 |
-
<h3>A classic single-player action RPG with a beautiful story and amazing hack and slash action</h3>
|
8 |
-
<p>Epic Conquest has a fantasy romance story that will not disappoint you. It has visual novel style dialogue with character expressions, beautiful CG illustrations, and an epic ending. You can choose between four playable characters with totally different playstyles. You can also customize your character's build by distributing your stats, skills, masteries, and equipment. The combat system is intense and strategic. You have to learn the enemies' behavior and find the best time to strike. You can also use various skills and perks to enhance your performance. The game has four levels of difficulty for you to challenge yourself.</p>
|
9 |
-
<h3>A game created by a small indie team of 4 with passion and love for RPG games and anime</h3>
|
10 |
-
<p>Epic Conquest is a game that shows how much effort and love the developers have for RPG games and anime. They have created a game that is not pay to win, and can be played offline without any internet connection. They have also added many features and content to the game, such as costumes, side stories, mini games, and more. They have also listened to the feedback and suggestions of the players, and have improved the game accordingly. They have also provided regular updates and bug fixes to the game, making it more stable and enjoyable.</p>
|
11 |
-
<h3>A game that is not pay to win and can be played offline</h3>
|
12 |
-
<p>Epic Conquest is a game that respects your time and money. You don't have to spend any real money to progress in the game. You can earn everything by playing the game and completing quests, achievements, and challenges. You also don't have to watch any ads to get rewards or bonuses. The game is completely ad-free, unless you want to support the developers by watching optional ads. You can also play the game offline without any internet connection. You can enjoy the game anytime and anywhere you want.</p>
|
13 |
-
<p>epic conquest mod apk unlimited money<br />
|
14 |
-
epic conquest mod apk latest version<br />
|
15 |
-
epic conquest mod apk offline<br />
|
16 |
-
epic conquest mod apk android 1<br />
|
17 |
-
epic conquest mod apk rexdl<br />
|
18 |
-
epic conquest mod apk revdl<br />
|
19 |
-
epic conquest mod apk happymod<br />
|
20 |
-
epic conquest mod apk no root<br />
|
21 |
-
epic conquest mod apk unlimited ruby<br />
|
22 |
-
epic conquest mod apk unlimited skill points<br />
|
23 |
-
epic conquest mod apk unlimited gold<br />
|
24 |
-
epic conquest mod apk unlimited gems<br />
|
25 |
-
epic conquest mod apk unlimited everything<br />
|
26 |
-
epic conquest mod apk unlocked all<br />
|
27 |
-
epic conquest mod apk god mode<br />
|
28 |
-
epic conquest mod apk high damage<br />
|
29 |
-
epic conquest mod apk mega mod<br />
|
30 |
-
epic conquest mod apk premium<br />
|
31 |
-
epic conquest mod apk pro<br />
|
32 |
-
epic conquest mod apk full version<br />
|
33 |
-
epic conquest mod apk free shopping<br />
|
34 |
-
epic conquest mod apk free purchase<br />
|
35 |
-
epic conquest mod apk free upgrade<br />
|
36 |
-
epic conquest mod apk free craft<br />
|
37 |
-
epic conquest mod apk free items<br />
|
38 |
-
epic conquest mod apk free download for android<br />
|
39 |
-
free download game epic conquest mod apk<br />
|
40 |
-
free download of epic conquest mod apk<br />
|
41 |
-
how to download epic conquest mod apk for free<br />
|
42 |
-
where to download epic conquest mod apk for free<br />
|
43 |
-
download link for epic conquest mod apk free<br />
|
44 |
-
direct download link for epic conquest mod apk free<br />
|
45 |
-
fast download link for epic conquest mod apk free<br />
|
46 |
-
best site to download epic conquest mod apk free<br />
|
47 |
-
best app to download epic conquest mod apk free<br />
|
48 |
-
best way to download epic conquest mod apk free<br />
|
49 |
-
easy way to download epic conquest mod apk free<br />
|
50 |
-
safe way to download epic conquest mod apk free<br />
|
51 |
-
secure way to download epic conquest mod apk free<br />
|
52 |
-
virus-free way to download epic conquest mod apk free<br />
|
53 |
-
malware-free way to download epic conquest mod apk free<br />
|
54 |
-
ad-free way to download epic conquest mod apk free<br />
|
55 |
-
no survey way to download epic conquest mod apk free<br />
|
56 |
-
no verification way to download epic conquest mod apk free<br />
|
57 |
-
no password way to download epic conquest mod apk free<br />
|
58 |
-
no registration way to download epic conquest mod apk free<br />
|
59 |
-
no subscription way to download epic conquest mod apk free<br />
|
60 |
-
no payment way to download epic conquest mod apk free</p>
|
61 |
-
<h2>What is Epic Conquest Mod Apk?</h2>
|
62 |
-
<p>Epic Conquest Mod Apk is a modified version of the original game that gives you unlimited money and other benefits. It is a way to enjoy the game without spending real money or watching ads. It is also a way to unlock all the features, costumes, and characters in the game.</p>
|
63 |
-
<h3>A modified version of the original game that gives you unlimited money and other benefits</h3>
|
64 |
-
<p>Epic Conquest Mod Apk is a file that you can download and install on your device. It will replace the original game with a modified one that has some changes in the code. One of the changes is that you will get unlimited money in the game. You can use this money to buy anything you want in the game, such as items, equipment, skills, masteries, costumes, and more. You will also get other benefits, such as increased damage, defense, speed, and health. You will also be able to access all the premium features in the game, such as cloud save, no cooldowns, no ads, and more.</p>
|
65 |
-
<h3>A way to enjoy the game without spending real money or watching ads</h3>
|
66 |
-
<p>Epic Conquest Mod Apk is a way to enjoy the game without spending real money or watching ads. You don't have to worry about running out of money or resources in the game. You don't have to watch any ads to get rewards or bonuses. You don't have to wait for anything in the game. You can play the game as much as you want, without any limitations or restrictions.</p>
|
67 |
-
<h3>A way to unlock all the features, costumes, and characters in the game</h3>
|
68 |
-
<p>Epic Conquest Mod Apk is a way to unlock all the features, costumes, and characters in the game. You don't have to complete any quests, achievements, or challenges to unlock them. You don't have to spend any money or time to unlock them. You can access them from the start of the game. You can choose any character you want, with any costume you want. You can also switch between characters anytime you want.</p>
|
69 |
-
<h2>How to download and install Epic Conquest Mod Apk?</h2>
|
70 |
-
<p>Downloading and installing Epic Conquest Mod Apk is easy and safe if you follow these steps:</p>
|
71 |
-
<h3>Find a reliable source that offers the latest version of the mod apk file</h3>
|
72 |
-
<p>The first step is to find a reliable source that offers the latest version of the mod apk file. There are many websites that claim to provide mod apk files for various games, but not all of them are trustworthy or updated. Some of them may contain viruses or malware that can harm your device or steal your data. Some of them may not work properly or may cause errors or crashes in the game. Therefore, you need to be careful when choosing a source for downloading Epic Conquest Mod Apk.</p>
|
73 |
-
<p>One of the sources that we recommend is [Epic Conquest Mod Apk]. This website provides mod apk files for various games, including Epic Conquest. It has a simple and user-friendly interface that allows you to download might want to choose Leon. You can also switch between characters anytime you want, but you have to start from the beginning of the game with each character.</p>
|
74 |
-
<h3>Upgrade your stats, skills, masteries, and equipment to match your build</h3>
|
75 |
-
<p>Epic Conquest has a complex and flexible system that allows you to customize your character's build. You can upgrade your stats, skills, masteries, and equipment to match your build. Stats are the basic attributes of your character, such as strength, intelligence, agility, and vitality. You can increase them by leveling up or using stat points. Skills are the special abilities of your character, such as sword slash, fireball, shadow strike, and holy light. You can unlock and upgrade them by using skill points. Masteries are the passive bonuses of your character, such as critical chance, magic resistance, dodge rate, and healing power. You can unlock and upgrade them by using mastery points. Equipment are the items that you wear or use in the game, such as weapons, armor, accessories, and consumables. You can buy or find them in the game world.</p>
|
76 |
-
<p>You should upgrade your stats, skills, masteries, and equipment to match your build. For example, if you want to be a damage dealer, you might want to focus on increasing your strength or intelligence, depending on your character. You might also want to unlock and upgrade skills that deal high damage or have low cooldowns. You might also want to unlock and upgrade masteries that increase your critical chance or damage. You might also want to equip weapons and armor that have high attack or magic power.</p>
|
77 |
-
<h3>Learn the enemies' behavior and find the best time to strike</h3>
|
78 |
-
<p>Epic Conquest has a variety of enemies that have different behavior and patterns. Some of them are aggressive and will chase you down. Some of them are defensive and will block or dodge your attacks. Some of them are ranged and will shoot you from afar. Some of them are melee and will try to hit you up close. Some of them have special abilities or attacks that can stun you, poison you, or knock you back.</p>
|
79 |
-
<p>You should learn the enemies' behavior and find the best time to strike. For example, if you are facing an aggressive enemy, you might want to wait for them to attack first and then counterattack when they are vulnerable. If you are facing a defensive enemy, you might want to use skills that can break their guard or stun them. If you are facing a ranged enemy, you might want to close the distance or use skills that can reach them. If you are facing a melee enemy, you might want to keep your distance or use skills that can knock them back.</p>
|
80 |
-
<h3>Explore the world map and complete quests, achievements, and challenges</h3>
|
81 |
-
<p>Epic Conquest has a vast world map that is full of secrets and surprises. You can explore different areas and regions in the game world, such as forests, caves, deserts, cities, and more. You can also find hidden chests, items, enemies, and bosses in the game world. You can also complete quests, achievements, and challenges in the game world. Quests are the main missions that advance the story and reward you with money, items, and experience. Achievements are the optional goals that test your skills and knowledge and reward you with money, items, and mastery points. Challenges are the special modes that add extra difficulty and fun to the game and reward you with money, items, and skill points.</p>
|
82 |
-
<p>You should explore the world map and complete quests, achievements, and challenges in the game world. This will help you to level up your character, improve your build, discover new things, and have more fun.</p>
|
83 |
-
<h2>Conclusion</h2>
|
84 |
-
<p>Epic Conquest is a great game for RPG fans who love anime-style story and action. It has a beautiful story, amazing combat, and a lot of content to enjoy. Epic Conquest Mod Apk is a convenient way to enjoy the game without spending money or time. It gives you unlimited money and other benefits that make the game easier and more fun. Downloading and installing Epic Conquest Mod Apk is easy and safe if you follow the steps above. If you are looking for a free offline RPG game that will keep you entertained for hours, you should try Epic Conquest Mod Apk.</p>
|
85 |
-
<h2>FAQs</h2>
|
86 |
-
<h4>Is Epic Conquest Mod Apk legal?</h4>
|
87 |
-
<p>Epic Conquest Mod Apk is not legal, as it violates the terms and conditions of the original game. It is also not endorsed or supported by the developers of the original game. However, it is unlikely that you will face any legal consequences for using Epic Conquest Mod Apk, as long as you use it for personal use only and do not distribute or share it with others.</p>
|
88 |
-
<h4>Is Epic Conquest Mod Apk safe?</h4>
|
89 |
-
<p>Epic Conquest Mod Apk is safe if you download it from a reliable source, such as [Epic Conquest Mod Apk]. However, you should always be careful when downloading any mod apk file from the internet, as some of them may contain viruses or malware that can harm your device or steal your data. You should also scan the file with an antivirus or anti-malware program before installing it on your device.</p>
|
90 |
-
<h4>Can I play Epic Conquest Mod Apk online?</h4>
|
91 |
-
<p>Epic Conquest Mod Apk can be played online, but it is not recommended. The original game does not have any online features or multiplayer modes, so there is no point in playing it online. Moreover, playing Epic Conquest Mod Apk online may expose you to the risk of being banned or blocked by the developers of the original game. Therefore, it is better to play Epic Conquest Mod Apk offline without any internet connection.</p>
|
92 |
-
<h4>Can I switch between characters in Epic Conquest Mod Apk?</h4>
|
93 |
-
<p>You can switch between characters in Epic Conquest Mod Apk anytime you want. However, you have to start from the beginning of the game with each character. You cannot transfer your progress or items between characters. You can also create multiple save files for each character in Epic Conquest Mod Apk.</p>
|
94 |
-
<h4>Can I update Epic Conquest Mod Apk?</h4>
|
95 |
-
<p>You can update Epic Conquest Mod Apk if there is a new version available from the source that you downloaded it from. However, you should always backup your save files before updating Epic Conquest Mod Apk, as some updates may cause errors or crashes in the game. You should also check if the new version of Epic Conquest Mod Apk has the same features and benefits as the previous one.</p> 401be4b1e0<br />
|
96 |
-
<br />
|
97 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/232labs/VToonify/vtoonify/model/stylegan/lpips/dist_model.py
DELETED
@@ -1,284 +0,0 @@
|
|
1 |
-
|
2 |
-
from __future__ import absolute_import
|
3 |
-
|
4 |
-
import sys
|
5 |
-
import numpy as np
|
6 |
-
import torch
|
7 |
-
from torch import nn
|
8 |
-
import os
|
9 |
-
from collections import OrderedDict
|
10 |
-
from torch.autograd import Variable
|
11 |
-
import itertools
|
12 |
-
from model.stylegan.lpips.base_model import BaseModel
|
13 |
-
from scipy.ndimage import zoom
|
14 |
-
import fractions
|
15 |
-
import functools
|
16 |
-
import skimage.transform
|
17 |
-
from tqdm import tqdm
|
18 |
-
|
19 |
-
from IPython import embed
|
20 |
-
|
21 |
-
from model.stylegan.lpips import networks_basic as networks
|
22 |
-
import model.stylegan.lpips as util
|
23 |
-
|
24 |
-
class DistModel(BaseModel):
|
25 |
-
def name(self):
|
26 |
-
return self.model_name
|
27 |
-
|
28 |
-
def initialize(self, model='net-lin', net='alex', colorspace='Lab', pnet_rand=False, pnet_tune=False, model_path=None,
|
29 |
-
use_gpu=True, printNet=False, spatial=False,
|
30 |
-
is_train=False, lr=.0001, beta1=0.5, version='0.1', gpu_ids=[0]):
|
31 |
-
'''
|
32 |
-
INPUTS
|
33 |
-
model - ['net-lin'] for linearly calibrated network
|
34 |
-
['net'] for off-the-shelf network
|
35 |
-
['L2'] for L2 distance in Lab colorspace
|
36 |
-
['SSIM'] for ssim in RGB colorspace
|
37 |
-
net - ['squeeze','alex','vgg']
|
38 |
-
model_path - if None, will look in weights/[NET_NAME].pth
|
39 |
-
colorspace - ['Lab','RGB'] colorspace to use for L2 and SSIM
|
40 |
-
use_gpu - bool - whether or not to use a GPU
|
41 |
-
printNet - bool - whether or not to print network architecture out
|
42 |
-
spatial - bool - whether to output an array containing varying distances across spatial dimensions
|
43 |
-
spatial_shape - if given, output spatial shape. if None then spatial shape is determined automatically via spatial_factor (see below).
|
44 |
-
spatial_factor - if given, specifies upsampling factor relative to the largest spatial extent of a convolutional layer. if None then resized to size of input images.
|
45 |
-
spatial_order - spline order of filter for upsampling in spatial mode, by default 1 (bilinear).
|
46 |
-
is_train - bool - [True] for training mode
|
47 |
-
lr - float - initial learning rate
|
48 |
-
beta1 - float - initial momentum term for adam
|
49 |
-
version - 0.1 for latest, 0.0 was original (with a bug)
|
50 |
-
gpu_ids - int array - [0] by default, gpus to use
|
51 |
-
'''
|
52 |
-
BaseModel.initialize(self, use_gpu=use_gpu, gpu_ids=gpu_ids)
|
53 |
-
|
54 |
-
self.model = model
|
55 |
-
self.net = net
|
56 |
-
self.is_train = is_train
|
57 |
-
self.spatial = spatial
|
58 |
-
self.gpu_ids = gpu_ids
|
59 |
-
self.model_name = '%s [%s]'%(model,net)
|
60 |
-
|
61 |
-
if(self.model == 'net-lin'): # pretrained net + linear layer
|
62 |
-
self.net = networks.PNetLin(pnet_rand=pnet_rand, pnet_tune=pnet_tune, pnet_type=net,
|
63 |
-
use_dropout=True, spatial=spatial, version=version, lpips=True)
|
64 |
-
kw = {}
|
65 |
-
if not use_gpu:
|
66 |
-
kw['map_location'] = 'cpu'
|
67 |
-
if(model_path is None):
|
68 |
-
import inspect
|
69 |
-
model_path = os.path.abspath(os.path.join(inspect.getfile(self.initialize), '..', 'weights/v%s/%s.pth'%(version,net)))
|
70 |
-
|
71 |
-
if(not is_train):
|
72 |
-
print('Loading model from: %s'%model_path)
|
73 |
-
self.net.load_state_dict(torch.load(model_path, **kw), strict=False)
|
74 |
-
|
75 |
-
elif(self.model=='net'): # pretrained network
|
76 |
-
self.net = networks.PNetLin(pnet_rand=pnet_rand, pnet_type=net, lpips=False)
|
77 |
-
elif(self.model in ['L2','l2']):
|
78 |
-
self.net = networks.L2(use_gpu=use_gpu,colorspace=colorspace) # not really a network, only for testing
|
79 |
-
self.model_name = 'L2'
|
80 |
-
elif(self.model in ['DSSIM','dssim','SSIM','ssim']):
|
81 |
-
self.net = networks.DSSIM(use_gpu=use_gpu,colorspace=colorspace)
|
82 |
-
self.model_name = 'SSIM'
|
83 |
-
else:
|
84 |
-
raise ValueError("Model [%s] not recognized." % self.model)
|
85 |
-
|
86 |
-
self.parameters = list(self.net.parameters())
|
87 |
-
|
88 |
-
if self.is_train: # training mode
|
89 |
-
# extra network on top to go from distances (d0,d1) => predicted human judgment (h*)
|
90 |
-
self.rankLoss = networks.BCERankingLoss()
|
91 |
-
self.parameters += list(self.rankLoss.net.parameters())
|
92 |
-
self.lr = lr
|
93 |
-
self.old_lr = lr
|
94 |
-
self.optimizer_net = torch.optim.Adam(self.parameters, lr=lr, betas=(beta1, 0.999))
|
95 |
-
else: # test mode
|
96 |
-
self.net.eval()
|
97 |
-
|
98 |
-
if(use_gpu):
|
99 |
-
self.net.to(gpu_ids[0])
|
100 |
-
self.net = torch.nn.DataParallel(self.net, device_ids=gpu_ids)
|
101 |
-
if(self.is_train):
|
102 |
-
self.rankLoss = self.rankLoss.to(device=gpu_ids[0]) # just put this on GPU0
|
103 |
-
|
104 |
-
if(printNet):
|
105 |
-
print('---------- Networks initialized -------------')
|
106 |
-
networks.print_network(self.net)
|
107 |
-
print('-----------------------------------------------')
|
108 |
-
|
109 |
-
def forward(self, in0, in1, retPerLayer=False):
|
110 |
-
''' Function computes the distance between image patches in0 and in1
|
111 |
-
INPUTS
|
112 |
-
in0, in1 - torch.Tensor object of shape Nx3xXxY - image patch scaled to [-1,1]
|
113 |
-
OUTPUT
|
114 |
-
computed distances between in0 and in1
|
115 |
-
'''
|
116 |
-
|
117 |
-
return self.net.forward(in0, in1, retPerLayer=retPerLayer)
|
118 |
-
|
119 |
-
# ***** TRAINING FUNCTIONS *****
|
120 |
-
def optimize_parameters(self):
|
121 |
-
self.forward_train()
|
122 |
-
self.optimizer_net.zero_grad()
|
123 |
-
self.backward_train()
|
124 |
-
self.optimizer_net.step()
|
125 |
-
self.clamp_weights()
|
126 |
-
|
127 |
-
def clamp_weights(self):
|
128 |
-
for module in self.net.modules():
|
129 |
-
if(hasattr(module, 'weight') and module.kernel_size==(1,1)):
|
130 |
-
module.weight.data = torch.clamp(module.weight.data,min=0)
|
131 |
-
|
132 |
-
def set_input(self, data):
|
133 |
-
self.input_ref = data['ref']
|
134 |
-
self.input_p0 = data['p0']
|
135 |
-
self.input_p1 = data['p1']
|
136 |
-
self.input_judge = data['judge']
|
137 |
-
|
138 |
-
if(self.use_gpu):
|
139 |
-
self.input_ref = self.input_ref.to(device=self.gpu_ids[0])
|
140 |
-
self.input_p0 = self.input_p0.to(device=self.gpu_ids[0])
|
141 |
-
self.input_p1 = self.input_p1.to(device=self.gpu_ids[0])
|
142 |
-
self.input_judge = self.input_judge.to(device=self.gpu_ids[0])
|
143 |
-
|
144 |
-
self.var_ref = Variable(self.input_ref,requires_grad=True)
|
145 |
-
self.var_p0 = Variable(self.input_p0,requires_grad=True)
|
146 |
-
self.var_p1 = Variable(self.input_p1,requires_grad=True)
|
147 |
-
|
148 |
-
def forward_train(self): # run forward pass
|
149 |
-
# print(self.net.module.scaling_layer.shift)
|
150 |
-
# print(torch.norm(self.net.module.net.slice1[0].weight).item(), torch.norm(self.net.module.lin0.model[1].weight).item())
|
151 |
-
|
152 |
-
self.d0 = self.forward(self.var_ref, self.var_p0)
|
153 |
-
self.d1 = self.forward(self.var_ref, self.var_p1)
|
154 |
-
self.acc_r = self.compute_accuracy(self.d0,self.d1,self.input_judge)
|
155 |
-
|
156 |
-
self.var_judge = Variable(1.*self.input_judge).view(self.d0.size())
|
157 |
-
|
158 |
-
self.loss_total = self.rankLoss.forward(self.d0, self.d1, self.var_judge*2.-1.)
|
159 |
-
|
160 |
-
return self.loss_total
|
161 |
-
|
162 |
-
def backward_train(self):
|
163 |
-
torch.mean(self.loss_total).backward()
|
164 |
-
|
165 |
-
def compute_accuracy(self,d0,d1,judge):
|
166 |
-
''' d0, d1 are Variables, judge is a Tensor '''
|
167 |
-
d1_lt_d0 = (d1<d0).cpu().data.numpy().flatten()
|
168 |
-
judge_per = judge.cpu().numpy().flatten()
|
169 |
-
return d1_lt_d0*judge_per + (1-d1_lt_d0)*(1-judge_per)
|
170 |
-
|
171 |
-
def get_current_errors(self):
|
172 |
-
retDict = OrderedDict([('loss_total', self.loss_total.data.cpu().numpy()),
|
173 |
-
('acc_r', self.acc_r)])
|
174 |
-
|
175 |
-
for key in retDict.keys():
|
176 |
-
retDict[key] = np.mean(retDict[key])
|
177 |
-
|
178 |
-
return retDict
|
179 |
-
|
180 |
-
def get_current_visuals(self):
|
181 |
-
zoom_factor = 256/self.var_ref.data.size()[2]
|
182 |
-
|
183 |
-
ref_img = util.tensor2im(self.var_ref.data)
|
184 |
-
p0_img = util.tensor2im(self.var_p0.data)
|
185 |
-
p1_img = util.tensor2im(self.var_p1.data)
|
186 |
-
|
187 |
-
ref_img_vis = zoom(ref_img,[zoom_factor, zoom_factor, 1],order=0)
|
188 |
-
p0_img_vis = zoom(p0_img,[zoom_factor, zoom_factor, 1],order=0)
|
189 |
-
p1_img_vis = zoom(p1_img,[zoom_factor, zoom_factor, 1],order=0)
|
190 |
-
|
191 |
-
return OrderedDict([('ref', ref_img_vis),
|
192 |
-
('p0', p0_img_vis),
|
193 |
-
('p1', p1_img_vis)])
|
194 |
-
|
195 |
-
def save(self, path, label):
|
196 |
-
if(self.use_gpu):
|
197 |
-
self.save_network(self.net.module, path, '', label)
|
198 |
-
else:
|
199 |
-
self.save_network(self.net, path, '', label)
|
200 |
-
self.save_network(self.rankLoss.net, path, 'rank', label)
|
201 |
-
|
202 |
-
def update_learning_rate(self,nepoch_decay):
|
203 |
-
lrd = self.lr / nepoch_decay
|
204 |
-
lr = self.old_lr - lrd
|
205 |
-
|
206 |
-
for param_group in self.optimizer_net.param_groups:
|
207 |
-
param_group['lr'] = lr
|
208 |
-
|
209 |
-
print('update lr [%s] decay: %f -> %f' % (type,self.old_lr, lr))
|
210 |
-
self.old_lr = lr
|
211 |
-
|
212 |
-
def score_2afc_dataset(data_loader, func, name=''):
|
213 |
-
''' Function computes Two Alternative Forced Choice (2AFC) score using
|
214 |
-
distance function 'func' in dataset 'data_loader'
|
215 |
-
INPUTS
|
216 |
-
data_loader - CustomDatasetDataLoader object - contains a TwoAFCDataset inside
|
217 |
-
func - callable distance function - calling d=func(in0,in1) should take 2
|
218 |
-
pytorch tensors with shape Nx3xXxY, and return numpy array of length N
|
219 |
-
OUTPUTS
|
220 |
-
[0] - 2AFC score in [0,1], fraction of time func agrees with human evaluators
|
221 |
-
[1] - dictionary with following elements
|
222 |
-
d0s,d1s - N arrays containing distances between reference patch to perturbed patches
|
223 |
-
gts - N array in [0,1], preferred patch selected by human evaluators
|
224 |
-
(closer to "0" for left patch p0, "1" for right patch p1,
|
225 |
-
"0.6" means 60pct people preferred right patch, 40pct preferred left)
|
226 |
-
scores - N array in [0,1], corresponding to what percentage function agreed with humans
|
227 |
-
CONSTS
|
228 |
-
N - number of test triplets in data_loader
|
229 |
-
'''
|
230 |
-
|
231 |
-
d0s = []
|
232 |
-
d1s = []
|
233 |
-
gts = []
|
234 |
-
|
235 |
-
for data in tqdm(data_loader.load_data(), desc=name):
|
236 |
-
d0s+=func(data['ref'],data['p0']).data.cpu().numpy().flatten().tolist()
|
237 |
-
d1s+=func(data['ref'],data['p1']).data.cpu().numpy().flatten().tolist()
|
238 |
-
gts+=data['judge'].cpu().numpy().flatten().tolist()
|
239 |
-
|
240 |
-
d0s = np.array(d0s)
|
241 |
-
d1s = np.array(d1s)
|
242 |
-
gts = np.array(gts)
|
243 |
-
scores = (d0s<d1s)*(1.-gts) + (d1s<d0s)*gts + (d1s==d0s)*.5
|
244 |
-
|
245 |
-
return(np.mean(scores), dict(d0s=d0s,d1s=d1s,gts=gts,scores=scores))
|
246 |
-
|
247 |
-
def score_jnd_dataset(data_loader, func, name=''):
|
248 |
-
''' Function computes JND score using distance function 'func' in dataset 'data_loader'
|
249 |
-
INPUTS
|
250 |
-
data_loader - CustomDatasetDataLoader object - contains a JNDDataset inside
|
251 |
-
func - callable distance function - calling d=func(in0,in1) should take 2
|
252 |
-
pytorch tensors with shape Nx3xXxY, and return pytorch array of length N
|
253 |
-
OUTPUTS
|
254 |
-
[0] - JND score in [0,1], mAP score (area under precision-recall curve)
|
255 |
-
[1] - dictionary with following elements
|
256 |
-
ds - N array containing distances between two patches shown to human evaluator
|
257 |
-
sames - N array containing fraction of people who thought the two patches were identical
|
258 |
-
CONSTS
|
259 |
-
N - number of test triplets in data_loader
|
260 |
-
'''
|
261 |
-
|
262 |
-
ds = []
|
263 |
-
gts = []
|
264 |
-
|
265 |
-
for data in tqdm(data_loader.load_data(), desc=name):
|
266 |
-
ds+=func(data['p0'],data['p1']).data.cpu().numpy().tolist()
|
267 |
-
gts+=data['same'].cpu().numpy().flatten().tolist()
|
268 |
-
|
269 |
-
sames = np.array(gts)
|
270 |
-
ds = np.array(ds)
|
271 |
-
|
272 |
-
sorted_inds = np.argsort(ds)
|
273 |
-
ds_sorted = ds[sorted_inds]
|
274 |
-
sames_sorted = sames[sorted_inds]
|
275 |
-
|
276 |
-
TPs = np.cumsum(sames_sorted)
|
277 |
-
FPs = np.cumsum(1-sames_sorted)
|
278 |
-
FNs = np.sum(sames_sorted)-TPs
|
279 |
-
|
280 |
-
precs = TPs/(TPs+FPs)
|
281 |
-
recs = TPs/(TPs+FNs)
|
282 |
-
score = util.voc_ap(recs,precs)
|
283 |
-
|
284 |
-
return(score, dict(ds=ds,sames=sames))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/6shen7/Linaqruf-anything-v3.0/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Linaqruf Anything V3.0
|
3 |
-
emoji: 👀
|
4 |
-
colorFrom: purple
|
5 |
-
colorTo: green
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.14.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: openrail
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/801artistry/RVC801/infer/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py
DELETED
@@ -1,98 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import parselmouth
|
3 |
-
|
4 |
-
from infer.lib.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor
|
5 |
-
|
6 |
-
|
7 |
-
class PMF0Predictor(F0Predictor):
|
8 |
-
def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100):
|
9 |
-
self.hop_length = hop_length
|
10 |
-
self.f0_min = f0_min
|
11 |
-
self.f0_max = f0_max
|
12 |
-
self.sampling_rate = sampling_rate
|
13 |
-
|
14 |
-
def interpolate_f0(self, f0):
|
15 |
-
"""
|
16 |
-
对F0进行插值处理
|
17 |
-
"""
|
18 |
-
|
19 |
-
data = np.reshape(f0, (f0.size, 1))
|
20 |
-
|
21 |
-
vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
|
22 |
-
vuv_vector[data > 0.0] = 1.0
|
23 |
-
vuv_vector[data <= 0.0] = 0.0
|
24 |
-
|
25 |
-
ip_data = data
|
26 |
-
|
27 |
-
frame_number = data.size
|
28 |
-
last_value = 0.0
|
29 |
-
for i in range(frame_number):
|
30 |
-
if data[i] <= 0.0:
|
31 |
-
j = i + 1
|
32 |
-
for j in range(i + 1, frame_number):
|
33 |
-
if data[j] > 0.0:
|
34 |
-
break
|
35 |
-
if j < frame_number - 1:
|
36 |
-
if last_value > 0.0:
|
37 |
-
step = (data[j] - data[i - 1]) / float(j - i)
|
38 |
-
for k in range(i, j):
|
39 |
-
ip_data[k] = data[i - 1] + step * (k - i + 1)
|
40 |
-
else:
|
41 |
-
for k in range(i, j):
|
42 |
-
ip_data[k] = data[j]
|
43 |
-
else:
|
44 |
-
for k in range(i, frame_number):
|
45 |
-
ip_data[k] = last_value
|
46 |
-
else:
|
47 |
-
ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝
|
48 |
-
last_value = data[i]
|
49 |
-
|
50 |
-
return ip_data[:, 0], vuv_vector[:, 0]
|
51 |
-
|
52 |
-
def compute_f0(self, wav, p_len=None):
|
53 |
-
x = wav
|
54 |
-
if p_len is None:
|
55 |
-
p_len = x.shape[0] // self.hop_length
|
56 |
-
else:
|
57 |
-
assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error"
|
58 |
-
time_step = self.hop_length / self.sampling_rate * 1000
|
59 |
-
f0 = (
|
60 |
-
parselmouth.Sound(x, self.sampling_rate)
|
61 |
-
.to_pitch_ac(
|
62 |
-
time_step=time_step / 1000,
|
63 |
-
voicing_threshold=0.6,
|
64 |
-
pitch_floor=self.f0_min,
|
65 |
-
pitch_ceiling=self.f0_max,
|
66 |
-
)
|
67 |
-
.selected_array["frequency"]
|
68 |
-
)
|
69 |
-
|
70 |
-
pad_size = (p_len - len(f0) + 1) // 2
|
71 |
-
if pad_size > 0 or p_len - len(f0) - pad_size > 0:
|
72 |
-
f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant")
|
73 |
-
f0, uv = self.interpolate_f0(f0)
|
74 |
-
return f0
|
75 |
-
|
76 |
-
def compute_f0_uv(self, wav, p_len=None):
|
77 |
-
x = wav
|
78 |
-
if p_len is None:
|
79 |
-
p_len = x.shape[0] // self.hop_length
|
80 |
-
else:
|
81 |
-
assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error"
|
82 |
-
time_step = self.hop_length / self.sampling_rate * 1000
|
83 |
-
f0 = (
|
84 |
-
parselmouth.Sound(x, self.sampling_rate)
|
85 |
-
.to_pitch_ac(
|
86 |
-
time_step=time_step / 1000,
|
87 |
-
voicing_threshold=0.6,
|
88 |
-
pitch_floor=self.f0_min,
|
89 |
-
pitch_ceiling=self.f0_max,
|
90 |
-
)
|
91 |
-
.selected_array["frequency"]
|
92 |
-
)
|
93 |
-
|
94 |
-
pad_size = (p_len - len(f0) + 1) // 2
|
95 |
-
if pad_size > 0 or p_len - len(f0) - pad_size > 0:
|
96 |
-
f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant")
|
97 |
-
f0, uv = self.interpolate_f0(f0)
|
98 |
-
return f0, uv
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AI-Hobbyist/Hoyo-RVC/extract_f0_print.py
DELETED
@@ -1,160 +0,0 @@
|
|
1 |
-
import os, traceback, sys, parselmouth
|
2 |
-
|
3 |
-
now_dir = os.getcwd()
|
4 |
-
sys.path.append(now_dir)
|
5 |
-
from my_utils import load_audio
|
6 |
-
import pyworld
|
7 |
-
from scipy.io import wavfile
|
8 |
-
import numpy as np, logging
|
9 |
-
|
10 |
-
logging.getLogger("numba").setLevel(logging.WARNING)
|
11 |
-
from multiprocessing import Process
|
12 |
-
|
13 |
-
exp_dir = sys.argv[1]
|
14 |
-
f = open("%s/extract_f0_feature.log" % exp_dir, "a+")
|
15 |
-
|
16 |
-
|
17 |
-
def printt(strr):
|
18 |
-
print(strr)
|
19 |
-
f.write("%s\n" % strr)
|
20 |
-
f.flush()
|
21 |
-
|
22 |
-
|
23 |
-
n_p = int(sys.argv[2])
|
24 |
-
f0method = sys.argv[3]
|
25 |
-
|
26 |
-
|
27 |
-
class FeatureInput(object):
|
28 |
-
def __init__(self, samplerate=16000, hop_size=160):
|
29 |
-
self.fs = samplerate
|
30 |
-
self.hop = hop_size
|
31 |
-
|
32 |
-
self.f0_bin = 256
|
33 |
-
self.f0_max = 1100.0
|
34 |
-
self.f0_min = 50.0
|
35 |
-
self.f0_mel_min = 1127 * np.log(1 + self.f0_min / 700)
|
36 |
-
self.f0_mel_max = 1127 * np.log(1 + self.f0_max / 700)
|
37 |
-
|
38 |
-
def compute_f0(self, path, f0_method):
|
39 |
-
x = load_audio(path, self.fs)
|
40 |
-
p_len = x.shape[0] // self.hop
|
41 |
-
if f0_method == "pm":
|
42 |
-
time_step = 160 / 16000 * 1000
|
43 |
-
f0_min = 50
|
44 |
-
f0_max = 1100
|
45 |
-
f0 = (
|
46 |
-
parselmouth.Sound(x, self.fs)
|
47 |
-
.to_pitch_ac(
|
48 |
-
time_step=time_step / 1000,
|
49 |
-
voicing_threshold=0.6,
|
50 |
-
pitch_floor=f0_min,
|
51 |
-
pitch_ceiling=f0_max,
|
52 |
-
)
|
53 |
-
.selected_array["frequency"]
|
54 |
-
)
|
55 |
-
pad_size = (p_len - len(f0) + 1) // 2
|
56 |
-
if pad_size > 0 or p_len - len(f0) - pad_size > 0:
|
57 |
-
f0 = np.pad(
|
58 |
-
f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant"
|
59 |
-
)
|
60 |
-
elif f0_method == "harvest":
|
61 |
-
f0, t = pyworld.harvest(
|
62 |
-
x.astype(np.double),
|
63 |
-
fs=self.fs,
|
64 |
-
f0_ceil=self.f0_max,
|
65 |
-
f0_floor=self.f0_min,
|
66 |
-
frame_period=1000 * self.hop / self.fs,
|
67 |
-
)
|
68 |
-
f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.fs)
|
69 |
-
elif f0_method == "dio":
|
70 |
-
f0, t = pyworld.dio(
|
71 |
-
x.astype(np.double),
|
72 |
-
fs=self.fs,
|
73 |
-
f0_ceil=self.f0_max,
|
74 |
-
f0_floor=self.f0_min,
|
75 |
-
frame_period=1000 * self.hop / self.fs,
|
76 |
-
)
|
77 |
-
f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.fs)
|
78 |
-
return f0
|
79 |
-
|
80 |
-
def coarse_f0(self, f0):
|
81 |
-
f0_mel = 1127 * np.log(1 + f0 / 700)
|
82 |
-
f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - self.f0_mel_min) * (
|
83 |
-
self.f0_bin - 2
|
84 |
-
) / (self.f0_mel_max - self.f0_mel_min) + 1
|
85 |
-
|
86 |
-
# use 0 or 1
|
87 |
-
f0_mel[f0_mel <= 1] = 1
|
88 |
-
f0_mel[f0_mel > self.f0_bin - 1] = self.f0_bin - 1
|
89 |
-
f0_coarse = np.rint(f0_mel).astype(int)
|
90 |
-
assert f0_coarse.max() <= 255 and f0_coarse.min() >= 1, (
|
91 |
-
f0_coarse.max(),
|
92 |
-
f0_coarse.min(),
|
93 |
-
)
|
94 |
-
return f0_coarse
|
95 |
-
|
96 |
-
def go(self, paths, f0_method):
|
97 |
-
if len(paths) == 0:
|
98 |
-
printt("no-f0-todo")
|
99 |
-
else:
|
100 |
-
printt("todo-f0-%s" % len(paths))
|
101 |
-
n = max(len(paths) // 5, 1) # 每个进程最多打印5条
|
102 |
-
for idx, (inp_path, opt_path1, opt_path2) in enumerate(paths):
|
103 |
-
try:
|
104 |
-
if idx % n == 0:
|
105 |
-
printt("f0ing,now-%s,all-%s,-%s" % (idx, len(paths), inp_path))
|
106 |
-
if (
|
107 |
-
os.path.exists(opt_path1 + ".npy") == True
|
108 |
-
and os.path.exists(opt_path2 + ".npy") == True
|
109 |
-
):
|
110 |
-
continue
|
111 |
-
featur_pit = self.compute_f0(inp_path, f0_method)
|
112 |
-
np.save(
|
113 |
-
opt_path2,
|
114 |
-
featur_pit,
|
115 |
-
allow_pickle=False,
|
116 |
-
) # nsf
|
117 |
-
coarse_pit = self.coarse_f0(featur_pit)
|
118 |
-
np.save(
|
119 |
-
opt_path1,
|
120 |
-
coarse_pit,
|
121 |
-
allow_pickle=False,
|
122 |
-
) # ori
|
123 |
-
except:
|
124 |
-
printt("f0fail-%s-%s-%s" % (idx, inp_path, traceback.format_exc()))
|
125 |
-
|
126 |
-
|
127 |
-
if __name__ == "__main__":
|
128 |
-
# exp_dir=r"E:\codes\py39\dataset\mi-test"
|
129 |
-
# n_p=16
|
130 |
-
# f = open("%s/log_extract_f0.log"%exp_dir, "w")
|
131 |
-
printt(sys.argv)
|
132 |
-
featureInput = FeatureInput()
|
133 |
-
paths = []
|
134 |
-
inp_root = "%s/1_16k_wavs" % (exp_dir)
|
135 |
-
opt_root1 = "%s/2a_f0" % (exp_dir)
|
136 |
-
opt_root2 = "%s/2b-f0nsf" % (exp_dir)
|
137 |
-
|
138 |
-
os.makedirs(opt_root1, exist_ok=True)
|
139 |
-
os.makedirs(opt_root2, exist_ok=True)
|
140 |
-
for name in sorted(list(os.listdir(inp_root))):
|
141 |
-
inp_path = "%s/%s" % (inp_root, name)
|
142 |
-
if "spec" in inp_path:
|
143 |
-
continue
|
144 |
-
opt_path1 = "%s/%s" % (opt_root1, name)
|
145 |
-
opt_path2 = "%s/%s" % (opt_root2, name)
|
146 |
-
paths.append([inp_path, opt_path1, opt_path2])
|
147 |
-
|
148 |
-
ps = []
|
149 |
-
for i in range(n_p):
|
150 |
-
p = Process(
|
151 |
-
target=featureInput.go,
|
152 |
-
args=(
|
153 |
-
paths[i::n_p],
|
154 |
-
f0method,
|
155 |
-
),
|
156 |
-
)
|
157 |
-
ps.append(p)
|
158 |
-
p.start()
|
159 |
-
for i in range(n_p):
|
160 |
-
ps[i].join()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIFILMS/StyleGANEX/models/mtcnn/mtcnn_pytorch/src/matlab_cp2tform.py
DELETED
@@ -1,350 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
"""
|
3 |
-
Created on Tue Jul 11 06:54:28 2017
|
4 |
-
|
5 |
-
@author: zhaoyafei
|
6 |
-
"""
|
7 |
-
|
8 |
-
import numpy as np
|
9 |
-
from numpy.linalg import inv, norm, lstsq
|
10 |
-
from numpy.linalg import matrix_rank as rank
|
11 |
-
|
12 |
-
|
13 |
-
class MatlabCp2tormException(Exception):
|
14 |
-
def __str__(self):
|
15 |
-
return 'In File {}:{}'.format(
|
16 |
-
__file__, super.__str__(self))
|
17 |
-
|
18 |
-
|
19 |
-
def tformfwd(trans, uv):
|
20 |
-
"""
|
21 |
-
Function:
|
22 |
-
----------
|
23 |
-
apply affine transform 'trans' to uv
|
24 |
-
|
25 |
-
Parameters:
|
26 |
-
----------
|
27 |
-
@trans: 3x3 np.array
|
28 |
-
transform matrix
|
29 |
-
@uv: Kx2 np.array
|
30 |
-
each row is a pair of coordinates (x, y)
|
31 |
-
|
32 |
-
Returns:
|
33 |
-
----------
|
34 |
-
@xy: Kx2 np.array
|
35 |
-
each row is a pair of transformed coordinates (x, y)
|
36 |
-
"""
|
37 |
-
uv = np.hstack((
|
38 |
-
uv, np.ones((uv.shape[0], 1))
|
39 |
-
))
|
40 |
-
xy = np.dot(uv, trans)
|
41 |
-
xy = xy[:, 0:-1]
|
42 |
-
return xy
|
43 |
-
|
44 |
-
|
45 |
-
def tforminv(trans, uv):
|
46 |
-
"""
|
47 |
-
Function:
|
48 |
-
----------
|
49 |
-
apply the inverse of affine transform 'trans' to uv
|
50 |
-
|
51 |
-
Parameters:
|
52 |
-
----------
|
53 |
-
@trans: 3x3 np.array
|
54 |
-
transform matrix
|
55 |
-
@uv: Kx2 np.array
|
56 |
-
each row is a pair of coordinates (x, y)
|
57 |
-
|
58 |
-
Returns:
|
59 |
-
----------
|
60 |
-
@xy: Kx2 np.array
|
61 |
-
each row is a pair of inverse-transformed coordinates (x, y)
|
62 |
-
"""
|
63 |
-
Tinv = inv(trans)
|
64 |
-
xy = tformfwd(Tinv, uv)
|
65 |
-
return xy
|
66 |
-
|
67 |
-
|
68 |
-
def findNonreflectiveSimilarity(uv, xy, options=None):
|
69 |
-
options = {'K': 2}
|
70 |
-
|
71 |
-
K = options['K']
|
72 |
-
M = xy.shape[0]
|
73 |
-
x = xy[:, 0].reshape((-1, 1)) # use reshape to keep a column vector
|
74 |
-
y = xy[:, 1].reshape((-1, 1)) # use reshape to keep a column vector
|
75 |
-
# print('--->x, y:\n', x, y
|
76 |
-
|
77 |
-
tmp1 = np.hstack((x, y, np.ones((M, 1)), np.zeros((M, 1))))
|
78 |
-
tmp2 = np.hstack((y, -x, np.zeros((M, 1)), np.ones((M, 1))))
|
79 |
-
X = np.vstack((tmp1, tmp2))
|
80 |
-
# print('--->X.shape: ', X.shape
|
81 |
-
# print('X:\n', X
|
82 |
-
|
83 |
-
u = uv[:, 0].reshape((-1, 1)) # use reshape to keep a column vector
|
84 |
-
v = uv[:, 1].reshape((-1, 1)) # use reshape to keep a column vector
|
85 |
-
U = np.vstack((u, v))
|
86 |
-
# print('--->U.shape: ', U.shape
|
87 |
-
# print('U:\n', U
|
88 |
-
|
89 |
-
# We know that X * r = U
|
90 |
-
if rank(X) >= 2 * K:
|
91 |
-
r, _, _, _ = lstsq(X, U, rcond=None) # Make sure this is what I want
|
92 |
-
r = np.squeeze(r)
|
93 |
-
else:
|
94 |
-
raise Exception('cp2tform:twoUniquePointsReq')
|
95 |
-
|
96 |
-
# print('--->r:\n', r
|
97 |
-
|
98 |
-
sc = r[0]
|
99 |
-
ss = r[1]
|
100 |
-
tx = r[2]
|
101 |
-
ty = r[3]
|
102 |
-
|
103 |
-
Tinv = np.array([
|
104 |
-
[sc, -ss, 0],
|
105 |
-
[ss, sc, 0],
|
106 |
-
[tx, ty, 1]
|
107 |
-
])
|
108 |
-
|
109 |
-
# print('--->Tinv:\n', Tinv
|
110 |
-
|
111 |
-
T = inv(Tinv)
|
112 |
-
# print('--->T:\n', T
|
113 |
-
|
114 |
-
T[:, 2] = np.array([0, 0, 1])
|
115 |
-
|
116 |
-
return T, Tinv
|
117 |
-
|
118 |
-
|
119 |
-
def findSimilarity(uv, xy, options=None):
|
120 |
-
options = {'K': 2}
|
121 |
-
|
122 |
-
# uv = np.array(uv)
|
123 |
-
# xy = np.array(xy)
|
124 |
-
|
125 |
-
# Solve for trans1
|
126 |
-
trans1, trans1_inv = findNonreflectiveSimilarity(uv, xy, options)
|
127 |
-
|
128 |
-
# Solve for trans2
|
129 |
-
|
130 |
-
# manually reflect the xy data across the Y-axis
|
131 |
-
xyR = xy
|
132 |
-
xyR[:, 0] = -1 * xyR[:, 0]
|
133 |
-
|
134 |
-
trans2r, trans2r_inv = findNonreflectiveSimilarity(uv, xyR, options)
|
135 |
-
|
136 |
-
# manually reflect the tform to undo the reflection done on xyR
|
137 |
-
TreflectY = np.array([
|
138 |
-
[-1, 0, 0],
|
139 |
-
[0, 1, 0],
|
140 |
-
[0, 0, 1]
|
141 |
-
])
|
142 |
-
|
143 |
-
trans2 = np.dot(trans2r, TreflectY)
|
144 |
-
|
145 |
-
# Figure out if trans1 or trans2 is better
|
146 |
-
xy1 = tformfwd(trans1, uv)
|
147 |
-
norm1 = norm(xy1 - xy)
|
148 |
-
|
149 |
-
xy2 = tformfwd(trans2, uv)
|
150 |
-
norm2 = norm(xy2 - xy)
|
151 |
-
|
152 |
-
if norm1 <= norm2:
|
153 |
-
return trans1, trans1_inv
|
154 |
-
else:
|
155 |
-
trans2_inv = inv(trans2)
|
156 |
-
return trans2, trans2_inv
|
157 |
-
|
158 |
-
|
159 |
-
def get_similarity_transform(src_pts, dst_pts, reflective=True):
|
160 |
-
"""
|
161 |
-
Function:
|
162 |
-
----------
|
163 |
-
Find Similarity Transform Matrix 'trans':
|
164 |
-
u = src_pts[:, 0]
|
165 |
-
v = src_pts[:, 1]
|
166 |
-
x = dst_pts[:, 0]
|
167 |
-
y = dst_pts[:, 1]
|
168 |
-
[x, y, 1] = [u, v, 1] * trans
|
169 |
-
|
170 |
-
Parameters:
|
171 |
-
----------
|
172 |
-
@src_pts: Kx2 np.array
|
173 |
-
source points, each row is a pair of coordinates (x, y)
|
174 |
-
@dst_pts: Kx2 np.array
|
175 |
-
destination points, each row is a pair of transformed
|
176 |
-
coordinates (x, y)
|
177 |
-
@reflective: True or False
|
178 |
-
if True:
|
179 |
-
use reflective similarity transform
|
180 |
-
else:
|
181 |
-
use non-reflective similarity transform
|
182 |
-
|
183 |
-
Returns:
|
184 |
-
----------
|
185 |
-
@trans: 3x3 np.array
|
186 |
-
transform matrix from uv to xy
|
187 |
-
trans_inv: 3x3 np.array
|
188 |
-
inverse of trans, transform matrix from xy to uv
|
189 |
-
"""
|
190 |
-
|
191 |
-
if reflective:
|
192 |
-
trans, trans_inv = findSimilarity(src_pts, dst_pts)
|
193 |
-
else:
|
194 |
-
trans, trans_inv = findNonreflectiveSimilarity(src_pts, dst_pts)
|
195 |
-
|
196 |
-
return trans, trans_inv
|
197 |
-
|
198 |
-
|
199 |
-
def cvt_tform_mat_for_cv2(trans):
|
200 |
-
"""
|
201 |
-
Function:
|
202 |
-
----------
|
203 |
-
Convert Transform Matrix 'trans' into 'cv2_trans' which could be
|
204 |
-
directly used by cv2.warpAffine():
|
205 |
-
u = src_pts[:, 0]
|
206 |
-
v = src_pts[:, 1]
|
207 |
-
x = dst_pts[:, 0]
|
208 |
-
y = dst_pts[:, 1]
|
209 |
-
[x, y].T = cv_trans * [u, v, 1].T
|
210 |
-
|
211 |
-
Parameters:
|
212 |
-
----------
|
213 |
-
@trans: 3x3 np.array
|
214 |
-
transform matrix from uv to xy
|
215 |
-
|
216 |
-
Returns:
|
217 |
-
----------
|
218 |
-
@cv2_trans: 2x3 np.array
|
219 |
-
transform matrix from src_pts to dst_pts, could be directly used
|
220 |
-
for cv2.warpAffine()
|
221 |
-
"""
|
222 |
-
cv2_trans = trans[:, 0:2].T
|
223 |
-
|
224 |
-
return cv2_trans
|
225 |
-
|
226 |
-
|
227 |
-
def get_similarity_transform_for_cv2(src_pts, dst_pts, reflective=True):
|
228 |
-
"""
|
229 |
-
Function:
|
230 |
-
----------
|
231 |
-
Find Similarity Transform Matrix 'cv2_trans' which could be
|
232 |
-
directly used by cv2.warpAffine():
|
233 |
-
u = src_pts[:, 0]
|
234 |
-
v = src_pts[:, 1]
|
235 |
-
x = dst_pts[:, 0]
|
236 |
-
y = dst_pts[:, 1]
|
237 |
-
[x, y].T = cv_trans * [u, v, 1].T
|
238 |
-
|
239 |
-
Parameters:
|
240 |
-
----------
|
241 |
-
@src_pts: Kx2 np.array
|
242 |
-
source points, each row is a pair of coordinates (x, y)
|
243 |
-
@dst_pts: Kx2 np.array
|
244 |
-
destination points, each row is a pair of transformed
|
245 |
-
coordinates (x, y)
|
246 |
-
reflective: True or False
|
247 |
-
if True:
|
248 |
-
use reflective similarity transform
|
249 |
-
else:
|
250 |
-
use non-reflective similarity transform
|
251 |
-
|
252 |
-
Returns:
|
253 |
-
----------
|
254 |
-
@cv2_trans: 2x3 np.array
|
255 |
-
transform matrix from src_pts to dst_pts, could be directly used
|
256 |
-
for cv2.warpAffine()
|
257 |
-
"""
|
258 |
-
trans, trans_inv = get_similarity_transform(src_pts, dst_pts, reflective)
|
259 |
-
cv2_trans = cvt_tform_mat_for_cv2(trans)
|
260 |
-
|
261 |
-
return cv2_trans
|
262 |
-
|
263 |
-
|
264 |
-
if __name__ == '__main__':
|
265 |
-
"""
|
266 |
-
u = [0, 6, -2]
|
267 |
-
v = [0, 3, 5]
|
268 |
-
x = [-1, 0, 4]
|
269 |
-
y = [-1, -10, 4]
|
270 |
-
|
271 |
-
# In Matlab, run:
|
272 |
-
#
|
273 |
-
# uv = [u'; v'];
|
274 |
-
# xy = [x'; y'];
|
275 |
-
# tform_sim=cp2tform(uv,xy,'similarity');
|
276 |
-
#
|
277 |
-
# trans = tform_sim.tdata.T
|
278 |
-
# ans =
|
279 |
-
# -0.0764 -1.6190 0
|
280 |
-
# 1.6190 -0.0764 0
|
281 |
-
# -3.2156 0.0290 1.0000
|
282 |
-
# trans_inv = tform_sim.tdata.Tinv
|
283 |
-
# ans =
|
284 |
-
#
|
285 |
-
# -0.0291 0.6163 0
|
286 |
-
# -0.6163 -0.0291 0
|
287 |
-
# -0.0756 1.9826 1.0000
|
288 |
-
# xy_m=tformfwd(tform_sim, u,v)
|
289 |
-
#
|
290 |
-
# xy_m =
|
291 |
-
#
|
292 |
-
# -3.2156 0.0290
|
293 |
-
# 1.1833 -9.9143
|
294 |
-
# 5.0323 2.8853
|
295 |
-
# uv_m=tforminv(tform_sim, x,y)
|
296 |
-
#
|
297 |
-
# uv_m =
|
298 |
-
#
|
299 |
-
# 0.5698 1.3953
|
300 |
-
# 6.0872 2.2733
|
301 |
-
# -2.6570 4.3314
|
302 |
-
"""
|
303 |
-
u = [0, 6, -2]
|
304 |
-
v = [0, 3, 5]
|
305 |
-
x = [-1, 0, 4]
|
306 |
-
y = [-1, -10, 4]
|
307 |
-
|
308 |
-
uv = np.array((u, v)).T
|
309 |
-
xy = np.array((x, y)).T
|
310 |
-
|
311 |
-
print('\n--->uv:')
|
312 |
-
print(uv)
|
313 |
-
print('\n--->xy:')
|
314 |
-
print(xy)
|
315 |
-
|
316 |
-
trans, trans_inv = get_similarity_transform(uv, xy)
|
317 |
-
|
318 |
-
print('\n--->trans matrix:')
|
319 |
-
print(trans)
|
320 |
-
|
321 |
-
print('\n--->trans_inv matrix:')
|
322 |
-
print(trans_inv)
|
323 |
-
|
324 |
-
print('\n---> apply transform to uv')
|
325 |
-
print('\nxy_m = uv_augmented * trans')
|
326 |
-
uv_aug = np.hstack((
|
327 |
-
uv, np.ones((uv.shape[0], 1))
|
328 |
-
))
|
329 |
-
xy_m = np.dot(uv_aug, trans)
|
330 |
-
print(xy_m)
|
331 |
-
|
332 |
-
print('\nxy_m = tformfwd(trans, uv)')
|
333 |
-
xy_m = tformfwd(trans, uv)
|
334 |
-
print(xy_m)
|
335 |
-
|
336 |
-
print('\n---> apply inverse transform to xy')
|
337 |
-
print('\nuv_m = xy_augmented * trans_inv')
|
338 |
-
xy_aug = np.hstack((
|
339 |
-
xy, np.ones((xy.shape[0], 1))
|
340 |
-
))
|
341 |
-
uv_m = np.dot(xy_aug, trans_inv)
|
342 |
-
print(uv_m)
|
343 |
-
|
344 |
-
print('\nuv_m = tformfwd(trans_inv, xy)')
|
345 |
-
uv_m = tformfwd(trans_inv, xy)
|
346 |
-
print(uv_m)
|
347 |
-
|
348 |
-
uv_m = tforminv(trans, xy)
|
349 |
-
print('\nuv_m = tforminv(trans, xy)')
|
350 |
-
print(uv_m)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIFILMS/generate_human_motion/pyrender/tests/unit/test_egl.py
DELETED
@@ -1,16 +0,0 @@
|
|
1 |
-
# from pyrender.platforms import egl
|
2 |
-
|
3 |
-
|
4 |
-
def tmp_test_default_device():
|
5 |
-
egl.get_default_device()
|
6 |
-
|
7 |
-
|
8 |
-
def tmp_test_query_device():
|
9 |
-
devices = egl.query_devices()
|
10 |
-
assert len(devices) > 0
|
11 |
-
|
12 |
-
|
13 |
-
def tmp_test_init_context():
|
14 |
-
device = egl.query_devices()[0]
|
15 |
-
platform = egl.EGLPlatform(128, 128, device=device)
|
16 |
-
platform.init_context()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/NeuralSeq/utils/pitch_utils.py
DELETED
@@ -1,76 +0,0 @@
|
|
1 |
-
#########
|
2 |
-
# world
|
3 |
-
##########
|
4 |
-
import librosa
|
5 |
-
import numpy as np
|
6 |
-
import torch
|
7 |
-
|
8 |
-
gamma = 0
|
9 |
-
mcepInput = 3 # 0 for dB, 3 for magnitude
|
10 |
-
alpha = 0.45
|
11 |
-
en_floor = 10 ** (-80 / 20)
|
12 |
-
FFT_SIZE = 2048
|
13 |
-
|
14 |
-
|
15 |
-
f0_bin = 256
|
16 |
-
f0_max = 1100.0
|
17 |
-
f0_min = 50.0
|
18 |
-
f0_mel_min = 1127 * np.log(1 + f0_min / 700)
|
19 |
-
f0_mel_max = 1127 * np.log(1 + f0_max / 700)
|
20 |
-
|
21 |
-
|
22 |
-
def f0_to_coarse(f0):
|
23 |
-
is_torch = isinstance(f0, torch.Tensor)
|
24 |
-
f0_mel = 1127 * (1 + f0 / 700).log() if is_torch else 1127 * np.log(1 + f0 / 700)
|
25 |
-
f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * (f0_bin - 2) / (f0_mel_max - f0_mel_min) + 1
|
26 |
-
|
27 |
-
f0_mel[f0_mel <= 1] = 1
|
28 |
-
f0_mel[f0_mel > f0_bin - 1] = f0_bin - 1
|
29 |
-
f0_coarse = (f0_mel + 0.5).long() if is_torch else np.rint(f0_mel).astype(np.int)
|
30 |
-
assert f0_coarse.max() <= 255 and f0_coarse.min() >= 1, (f0_coarse.max(), f0_coarse.min())
|
31 |
-
return f0_coarse
|
32 |
-
|
33 |
-
|
34 |
-
def norm_f0(f0, uv, hparams):
|
35 |
-
is_torch = isinstance(f0, torch.Tensor)
|
36 |
-
if hparams['pitch_norm'] == 'standard':
|
37 |
-
f0 = (f0 - hparams['f0_mean']) / hparams['f0_std']
|
38 |
-
if hparams['pitch_norm'] == 'log':
|
39 |
-
f0 = torch.log2(f0) if is_torch else np.log2(f0)
|
40 |
-
if uv is not None and hparams['use_uv']:
|
41 |
-
f0[uv > 0] = 0
|
42 |
-
return f0
|
43 |
-
|
44 |
-
|
45 |
-
def norm_interp_f0(f0, hparams):
|
46 |
-
is_torch = isinstance(f0, torch.Tensor)
|
47 |
-
if is_torch:
|
48 |
-
device = f0.device
|
49 |
-
f0 = f0.data.cpu().numpy()
|
50 |
-
uv = f0 == 0
|
51 |
-
f0 = norm_f0(f0, uv, hparams)
|
52 |
-
if sum(uv) == len(f0):
|
53 |
-
f0[uv] = 0
|
54 |
-
elif sum(uv) > 0:
|
55 |
-
f0[uv] = np.interp(np.where(uv)[0], np.where(~uv)[0], f0[~uv])
|
56 |
-
uv = torch.FloatTensor(uv)
|
57 |
-
f0 = torch.FloatTensor(f0)
|
58 |
-
if is_torch:
|
59 |
-
f0 = f0.to(device)
|
60 |
-
return f0, uv
|
61 |
-
|
62 |
-
|
63 |
-
def denorm_f0(f0, uv, hparams, pitch_padding=None, min=None, max=None):
|
64 |
-
if hparams['pitch_norm'] == 'standard':
|
65 |
-
f0 = f0 * hparams['f0_std'] + hparams['f0_mean']
|
66 |
-
if hparams['pitch_norm'] == 'log':
|
67 |
-
f0 = 2 ** f0
|
68 |
-
if min is not None:
|
69 |
-
f0 = f0.clamp(min=min)
|
70 |
-
if max is not None:
|
71 |
-
f0 = f0.clamp(max=max)
|
72 |
-
if uv is not None and hparams['use_uv']:
|
73 |
-
f0[uv > 0] = 0
|
74 |
-
if pitch_padding is not None:
|
75 |
-
f0[pitch_padding] = 0
|
76 |
-
return f0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AILab-CVC/SEED-LLaMA/README.md
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: SEED LLaMA
|
3 |
-
emoji: 🌖
|
4 |
-
colorFrom: yellow
|
5 |
-
colorTo: purple
|
6 |
-
sdk: docker
|
7 |
-
pinned: false
|
8 |
-
license: llama2
|
9 |
-
---
|
10 |
-
|
11 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ajit025/Text_to_Image_conversion/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Text To Image Conversion
|
3 |
-
emoji: 🌍
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: red
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.35.2
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AkitoP/umamusume_bert_vits2/bert_gen.py
DELETED
@@ -1,61 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from multiprocessing import Pool
|
3 |
-
import commons
|
4 |
-
import utils
|
5 |
-
from tqdm import tqdm
|
6 |
-
from text import cleaned_text_to_sequence, get_bert
|
7 |
-
import argparse
|
8 |
-
import torch.multiprocessing as mp
|
9 |
-
|
10 |
-
import os
|
11 |
-
os.environ['http_proxy'] = 'http://localhost:11796'
|
12 |
-
os.environ['https_proxy'] = 'http://localhost:11796'
|
13 |
-
def process_line(line):
|
14 |
-
rank = mp.current_process()._identity
|
15 |
-
rank = rank[0] if len(rank) > 0 else 0
|
16 |
-
if torch.cuda.is_available():
|
17 |
-
gpu_id = rank % torch.cuda.device_count()
|
18 |
-
device = torch.device(f"cuda:{gpu_id}")
|
19 |
-
wav_path, _, language_str, text, phones, tone, word2ph = line.strip().split("|")
|
20 |
-
phone = phones.split(" ")
|
21 |
-
tone = [int(i) for i in tone.split(" ")]
|
22 |
-
word2ph = [int(i) for i in word2ph.split(" ")]
|
23 |
-
word2ph = [i for i in word2ph]
|
24 |
-
phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)
|
25 |
-
|
26 |
-
phone = commons.intersperse(phone, 0)
|
27 |
-
tone = commons.intersperse(tone, 0)
|
28 |
-
language = commons.intersperse(language, 0)
|
29 |
-
for i in range(len(word2ph)):
|
30 |
-
word2ph[i] = word2ph[i] * 2
|
31 |
-
word2ph[0] += 1
|
32 |
-
|
33 |
-
bert_path = wav_path.replace(".wav", ".bert.pt")
|
34 |
-
|
35 |
-
try:
|
36 |
-
bert = torch.load(bert_path)
|
37 |
-
assert bert.shape[-1] == len(phone)
|
38 |
-
except Exception:
|
39 |
-
bert = get_bert(text, word2ph, language_str, device)
|
40 |
-
assert bert.shape[-1] == len(phone)
|
41 |
-
torch.save(bert, bert_path)
|
42 |
-
|
43 |
-
|
44 |
-
if __name__ == "__main__":
|
45 |
-
parser = argparse.ArgumentParser()
|
46 |
-
parser.add_argument("-c", "--config", type=str, default="configs/config.json")
|
47 |
-
parser.add_argument("--num_processes", type=int, default=2)
|
48 |
-
args = parser.parse_args()
|
49 |
-
config_path = args.config
|
50 |
-
hps = utils.get_hparams_from_file(config_path)
|
51 |
-
lines = []
|
52 |
-
with open(hps.data.training_files, encoding="utf-8") as f:
|
53 |
-
lines.extend(f.readlines())
|
54 |
-
|
55 |
-
with open(hps.data.validation_files, encoding="utf-8") as f:
|
56 |
-
lines.extend(f.readlines())
|
57 |
-
|
58 |
-
num_processes = args.num_processes
|
59 |
-
with Pool(processes=num_processes) as pool:
|
60 |
-
for _ in tqdm(pool.imap_unordered(process_line, lines), total=len(lines)):
|
61 |
-
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlanMars/QYL-AI-Space/run_Windows.bat
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
@echo off
|
2 |
-
echo Opening ChuanhuChatGPT...
|
3 |
-
|
4 |
-
REM Open powershell via bat
|
5 |
-
start powershell.exe -NoExit -Command "python ./ChuanhuChatbot.py"
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Allakhazam/anythingV4/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: AnythingV4
|
3 |
-
emoji: 🔥
|
4 |
-
colorFrom: yellow
|
5 |
-
colorTo: red
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.20.1
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: artistic-2.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/pndm/pipeline_pndm.py
DELETED
@@ -1,121 +0,0 @@
|
|
1 |
-
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
|
15 |
-
|
16 |
-
from typing import List, Optional, Tuple, Union
|
17 |
-
|
18 |
-
import torch
|
19 |
-
|
20 |
-
from ...models import UNet2DModel
|
21 |
-
from ...schedulers import PNDMScheduler
|
22 |
-
from ...utils import randn_tensor
|
23 |
-
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
|
24 |
-
|
25 |
-
|
26 |
-
class PNDMPipeline(DiffusionPipeline):
|
27 |
-
r"""
|
28 |
-
Pipeline for unconditional image generation.
|
29 |
-
|
30 |
-
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
|
31 |
-
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
|
32 |
-
|
33 |
-
Parameters:
|
34 |
-
unet ([`UNet2DModel`]):
|
35 |
-
A `UNet2DModel` to denoise the encoded image latents.
|
36 |
-
scheduler ([`PNDMScheduler`]):
|
37 |
-
A `PNDMScheduler` to be used in combination with `unet` to denoise the encoded image.
|
38 |
-
"""
|
39 |
-
|
40 |
-
unet: UNet2DModel
|
41 |
-
scheduler: PNDMScheduler
|
42 |
-
|
43 |
-
def __init__(self, unet: UNet2DModel, scheduler: PNDMScheduler):
|
44 |
-
super().__init__()
|
45 |
-
|
46 |
-
scheduler = PNDMScheduler.from_config(scheduler.config)
|
47 |
-
|
48 |
-
self.register_modules(unet=unet, scheduler=scheduler)
|
49 |
-
|
50 |
-
@torch.no_grad()
|
51 |
-
def __call__(
|
52 |
-
self,
|
53 |
-
batch_size: int = 1,
|
54 |
-
num_inference_steps: int = 50,
|
55 |
-
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
56 |
-
output_type: Optional[str] = "pil",
|
57 |
-
return_dict: bool = True,
|
58 |
-
**kwargs,
|
59 |
-
) -> Union[ImagePipelineOutput, Tuple]:
|
60 |
-
r"""
|
61 |
-
The call function to the pipeline for generation.
|
62 |
-
|
63 |
-
Args:
|
64 |
-
batch_size (`int`, `optional`, defaults to 1):
|
65 |
-
The number of images to generate.
|
66 |
-
num_inference_steps (`int`, `optional`, defaults to 50):
|
67 |
-
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
68 |
-
expense of slower inference.
|
69 |
-
generator (`torch.Generator`, `optional`):
|
70 |
-
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
71 |
-
generation deterministic.
|
72 |
-
output_type (`str`, `optional`, defaults to `"pil"`):
|
73 |
-
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
|
74 |
-
return_dict (`bool`, *optional*, defaults to `True`):
|
75 |
-
Whether or not to return a [`ImagePipelineOutput`] instead of a plain tuple.
|
76 |
-
|
77 |
-
Example:
|
78 |
-
|
79 |
-
```py
|
80 |
-
>>> from diffusers import PNDMPipeline
|
81 |
-
|
82 |
-
>>> # load model and scheduler
|
83 |
-
>>> pndm = PNDMPipeline.from_pretrained("google/ddpm-cifar10-32")
|
84 |
-
|
85 |
-
>>> # run pipeline in inference (sample random noise and denoise)
|
86 |
-
>>> image = pndm().images[0]
|
87 |
-
|
88 |
-
>>> # save image
|
89 |
-
>>> image.save("pndm_generated_image.png")
|
90 |
-
```
|
91 |
-
|
92 |
-
Returns:
|
93 |
-
[`~pipelines.ImagePipelineOutput`] or `tuple`:
|
94 |
-
If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is
|
95 |
-
returned where the first element is a list with the generated images.
|
96 |
-
"""
|
97 |
-
# For more information on the sampling method you can take a look at Algorithm 2 of
|
98 |
-
# the official paper: https://arxiv.org/pdf/2202.09778.pdf
|
99 |
-
|
100 |
-
# Sample gaussian noise to begin loop
|
101 |
-
image = randn_tensor(
|
102 |
-
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size),
|
103 |
-
generator=generator,
|
104 |
-
device=self.device,
|
105 |
-
)
|
106 |
-
|
107 |
-
self.scheduler.set_timesteps(num_inference_steps)
|
108 |
-
for t in self.progress_bar(self.scheduler.timesteps):
|
109 |
-
model_output = self.unet(image, t).sample
|
110 |
-
|
111 |
-
image = self.scheduler.step(model_output, t, image).prev_sample
|
112 |
-
|
113 |
-
image = (image / 2 + 0.5).clamp(0, 1)
|
114 |
-
image = image.cpu().permute(0, 2, 3, 1).numpy()
|
115 |
-
if output_type == "pil":
|
116 |
-
image = self.numpy_to_pil(image)
|
117 |
-
|
118 |
-
if not return_dict:
|
119 |
-
return (image,)
|
120 |
-
|
121 |
-
return ImagePipelineOutput(images=image)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl.py
DELETED
@@ -1,691 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2023 HuggingFace Inc.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
import copy
|
17 |
-
import unittest
|
18 |
-
|
19 |
-
import numpy as np
|
20 |
-
import torch
|
21 |
-
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
|
22 |
-
|
23 |
-
from diffusers import (
|
24 |
-
AutoencoderKL,
|
25 |
-
DDIMScheduler,
|
26 |
-
DPMSolverMultistepScheduler,
|
27 |
-
EulerDiscreteScheduler,
|
28 |
-
HeunDiscreteScheduler,
|
29 |
-
StableDiffusionXLImg2ImgPipeline,
|
30 |
-
StableDiffusionXLPipeline,
|
31 |
-
UNet2DConditionModel,
|
32 |
-
UniPCMultistepScheduler,
|
33 |
-
)
|
34 |
-
from diffusers.utils import torch_device
|
35 |
-
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
|
36 |
-
|
37 |
-
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
|
38 |
-
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
|
39 |
-
|
40 |
-
|
41 |
-
enable_full_determinism()
|
42 |
-
|
43 |
-
|
44 |
-
class StableDiffusionXLPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase):
|
45 |
-
pipeline_class = StableDiffusionXLPipeline
|
46 |
-
params = TEXT_TO_IMAGE_PARAMS
|
47 |
-
batch_params = TEXT_TO_IMAGE_BATCH_PARAMS
|
48 |
-
image_params = TEXT_TO_IMAGE_IMAGE_PARAMS
|
49 |
-
image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS
|
50 |
-
|
51 |
-
def get_dummy_components(self):
|
52 |
-
torch.manual_seed(0)
|
53 |
-
unet = UNet2DConditionModel(
|
54 |
-
block_out_channels=(32, 64),
|
55 |
-
layers_per_block=2,
|
56 |
-
sample_size=32,
|
57 |
-
in_channels=4,
|
58 |
-
out_channels=4,
|
59 |
-
down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
|
60 |
-
up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
|
61 |
-
# SD2-specific config below
|
62 |
-
attention_head_dim=(2, 4),
|
63 |
-
use_linear_projection=True,
|
64 |
-
addition_embed_type="text_time",
|
65 |
-
addition_time_embed_dim=8,
|
66 |
-
transformer_layers_per_block=(1, 2),
|
67 |
-
projection_class_embeddings_input_dim=80, # 6 * 8 + 32
|
68 |
-
cross_attention_dim=64,
|
69 |
-
)
|
70 |
-
scheduler = EulerDiscreteScheduler(
|
71 |
-
beta_start=0.00085,
|
72 |
-
beta_end=0.012,
|
73 |
-
steps_offset=1,
|
74 |
-
beta_schedule="scaled_linear",
|
75 |
-
timestep_spacing="leading",
|
76 |
-
)
|
77 |
-
torch.manual_seed(0)
|
78 |
-
vae = AutoencoderKL(
|
79 |
-
block_out_channels=[32, 64],
|
80 |
-
in_channels=3,
|
81 |
-
out_channels=3,
|
82 |
-
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
|
83 |
-
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
|
84 |
-
latent_channels=4,
|
85 |
-
sample_size=128,
|
86 |
-
)
|
87 |
-
torch.manual_seed(0)
|
88 |
-
text_encoder_config = CLIPTextConfig(
|
89 |
-
bos_token_id=0,
|
90 |
-
eos_token_id=2,
|
91 |
-
hidden_size=32,
|
92 |
-
intermediate_size=37,
|
93 |
-
layer_norm_eps=1e-05,
|
94 |
-
num_attention_heads=4,
|
95 |
-
num_hidden_layers=5,
|
96 |
-
pad_token_id=1,
|
97 |
-
vocab_size=1000,
|
98 |
-
# SD2-specific config below
|
99 |
-
hidden_act="gelu",
|
100 |
-
projection_dim=32,
|
101 |
-
)
|
102 |
-
text_encoder = CLIPTextModel(text_encoder_config)
|
103 |
-
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
|
104 |
-
|
105 |
-
text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config)
|
106 |
-
tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
|
107 |
-
|
108 |
-
components = {
|
109 |
-
"unet": unet,
|
110 |
-
"scheduler": scheduler,
|
111 |
-
"vae": vae,
|
112 |
-
"text_encoder": text_encoder,
|
113 |
-
"tokenizer": tokenizer,
|
114 |
-
"text_encoder_2": text_encoder_2,
|
115 |
-
"tokenizer_2": tokenizer_2,
|
116 |
-
# "safety_checker": None,
|
117 |
-
# "feature_extractor": None,
|
118 |
-
}
|
119 |
-
return components
|
120 |
-
|
121 |
-
def get_dummy_inputs(self, device, seed=0):
|
122 |
-
if str(device).startswith("mps"):
|
123 |
-
generator = torch.manual_seed(seed)
|
124 |
-
else:
|
125 |
-
generator = torch.Generator(device=device).manual_seed(seed)
|
126 |
-
inputs = {
|
127 |
-
"prompt": "A painting of a squirrel eating a burger",
|
128 |
-
"generator": generator,
|
129 |
-
"num_inference_steps": 2,
|
130 |
-
"guidance_scale": 5.0,
|
131 |
-
"output_type": "numpy",
|
132 |
-
}
|
133 |
-
return inputs
|
134 |
-
|
135 |
-
def test_stable_diffusion_xl_euler(self):
|
136 |
-
device = "cpu" # ensure determinism for the device-dependent torch.Generator
|
137 |
-
components = self.get_dummy_components()
|
138 |
-
sd_pipe = StableDiffusionXLPipeline(**components)
|
139 |
-
sd_pipe = sd_pipe.to(device)
|
140 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
141 |
-
|
142 |
-
inputs = self.get_dummy_inputs(device)
|
143 |
-
image = sd_pipe(**inputs).images
|
144 |
-
image_slice = image[0, -3:, -3:, -1]
|
145 |
-
|
146 |
-
assert image.shape == (1, 64, 64, 3)
|
147 |
-
expected_slice = np.array([0.5873, 0.6128, 0.4797, 0.5122, 0.5674, 0.4639, 0.5227, 0.5149, 0.4747])
|
148 |
-
|
149 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
150 |
-
|
151 |
-
def test_stable_diffusion_xl_prompt_embeds(self):
|
152 |
-
components = self.get_dummy_components()
|
153 |
-
sd_pipe = StableDiffusionXLPipeline(**components)
|
154 |
-
sd_pipe = sd_pipe.to(torch_device)
|
155 |
-
sd_pipe = sd_pipe.to(torch_device)
|
156 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
157 |
-
|
158 |
-
# forward without prompt embeds
|
159 |
-
inputs = self.get_dummy_inputs(torch_device)
|
160 |
-
inputs["prompt"] = 2 * [inputs["prompt"]]
|
161 |
-
inputs["num_images_per_prompt"] = 2
|
162 |
-
|
163 |
-
output = sd_pipe(**inputs)
|
164 |
-
image_slice_1 = output.images[0, -3:, -3:, -1]
|
165 |
-
|
166 |
-
# forward with prompt embeds
|
167 |
-
inputs = self.get_dummy_inputs(torch_device)
|
168 |
-
prompt = 2 * [inputs.pop("prompt")]
|
169 |
-
|
170 |
-
(
|
171 |
-
prompt_embeds,
|
172 |
-
negative_prompt_embeds,
|
173 |
-
pooled_prompt_embeds,
|
174 |
-
negative_pooled_prompt_embeds,
|
175 |
-
) = sd_pipe.encode_prompt(prompt)
|
176 |
-
|
177 |
-
output = sd_pipe(
|
178 |
-
**inputs,
|
179 |
-
prompt_embeds=prompt_embeds,
|
180 |
-
negative_prompt_embeds=negative_prompt_embeds,
|
181 |
-
pooled_prompt_embeds=pooled_prompt_embeds,
|
182 |
-
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
|
183 |
-
)
|
184 |
-
image_slice_2 = output.images[0, -3:, -3:, -1]
|
185 |
-
|
186 |
-
# make sure that it's equal
|
187 |
-
assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4
|
188 |
-
|
189 |
-
def test_stable_diffusion_xl_negative_prompt_embeds(self):
|
190 |
-
components = self.get_dummy_components()
|
191 |
-
sd_pipe = StableDiffusionXLPipeline(**components)
|
192 |
-
sd_pipe = sd_pipe.to(torch_device)
|
193 |
-
sd_pipe = sd_pipe.to(torch_device)
|
194 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
195 |
-
|
196 |
-
# forward without prompt embeds
|
197 |
-
inputs = self.get_dummy_inputs(torch_device)
|
198 |
-
negative_prompt = 3 * ["this is a negative prompt"]
|
199 |
-
inputs["negative_prompt"] = negative_prompt
|
200 |
-
inputs["prompt"] = 3 * [inputs["prompt"]]
|
201 |
-
|
202 |
-
output = sd_pipe(**inputs)
|
203 |
-
image_slice_1 = output.images[0, -3:, -3:, -1]
|
204 |
-
|
205 |
-
# forward with prompt embeds
|
206 |
-
inputs = self.get_dummy_inputs(torch_device)
|
207 |
-
negative_prompt = 3 * ["this is a negative prompt"]
|
208 |
-
prompt = 3 * [inputs.pop("prompt")]
|
209 |
-
|
210 |
-
(
|
211 |
-
prompt_embeds,
|
212 |
-
negative_prompt_embeds,
|
213 |
-
pooled_prompt_embeds,
|
214 |
-
negative_pooled_prompt_embeds,
|
215 |
-
) = sd_pipe.encode_prompt(prompt, negative_prompt=negative_prompt)
|
216 |
-
|
217 |
-
output = sd_pipe(
|
218 |
-
**inputs,
|
219 |
-
prompt_embeds=prompt_embeds,
|
220 |
-
negative_prompt_embeds=negative_prompt_embeds,
|
221 |
-
pooled_prompt_embeds=pooled_prompt_embeds,
|
222 |
-
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
|
223 |
-
)
|
224 |
-
image_slice_2 = output.images[0, -3:, -3:, -1]
|
225 |
-
|
226 |
-
# make sure that it's equal
|
227 |
-
assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4
|
228 |
-
|
229 |
-
def test_attention_slicing_forward_pass(self):
|
230 |
-
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3)
|
231 |
-
|
232 |
-
def test_inference_batch_single_identical(self):
|
233 |
-
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
|
234 |
-
|
235 |
-
@require_torch_gpu
|
236 |
-
def test_stable_diffusion_xl_offloads(self):
|
237 |
-
pipes = []
|
238 |
-
components = self.get_dummy_components()
|
239 |
-
sd_pipe = StableDiffusionXLPipeline(**components).to(torch_device)
|
240 |
-
pipes.append(sd_pipe)
|
241 |
-
|
242 |
-
components = self.get_dummy_components()
|
243 |
-
sd_pipe = StableDiffusionXLPipeline(**components)
|
244 |
-
sd_pipe.enable_model_cpu_offload()
|
245 |
-
pipes.append(sd_pipe)
|
246 |
-
|
247 |
-
components = self.get_dummy_components()
|
248 |
-
sd_pipe = StableDiffusionXLPipeline(**components)
|
249 |
-
sd_pipe.enable_sequential_cpu_offload()
|
250 |
-
pipes.append(sd_pipe)
|
251 |
-
|
252 |
-
image_slices = []
|
253 |
-
for pipe in pipes:
|
254 |
-
pipe.unet.set_default_attn_processor()
|
255 |
-
|
256 |
-
inputs = self.get_dummy_inputs(torch_device)
|
257 |
-
image = pipe(**inputs).images
|
258 |
-
|
259 |
-
image_slices.append(image[0, -3:, -3:, -1].flatten())
|
260 |
-
|
261 |
-
assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3
|
262 |
-
assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3
|
263 |
-
|
264 |
-
def test_stable_diffusion_two_xl_mixture_of_denoiser(self):
|
265 |
-
components = self.get_dummy_components()
|
266 |
-
pipe_1 = StableDiffusionXLPipeline(**components).to(torch_device)
|
267 |
-
pipe_1.unet.set_default_attn_processor()
|
268 |
-
pipe_2 = StableDiffusionXLImg2ImgPipeline(**components).to(torch_device)
|
269 |
-
pipe_2.unet.set_default_attn_processor()
|
270 |
-
|
271 |
-
def assert_run_mixture(
|
272 |
-
num_steps,
|
273 |
-
split,
|
274 |
-
scheduler_cls_orig,
|
275 |
-
expected_tss,
|
276 |
-
num_train_timesteps=pipe_1.scheduler.config.num_train_timesteps,
|
277 |
-
):
|
278 |
-
inputs = self.get_dummy_inputs(torch_device)
|
279 |
-
inputs["num_inference_steps"] = num_steps
|
280 |
-
|
281 |
-
class scheduler_cls(scheduler_cls_orig):
|
282 |
-
pass
|
283 |
-
|
284 |
-
pipe_1.scheduler = scheduler_cls.from_config(pipe_1.scheduler.config)
|
285 |
-
pipe_2.scheduler = scheduler_cls.from_config(pipe_2.scheduler.config)
|
286 |
-
|
287 |
-
# Let's retrieve the number of timesteps we want to use
|
288 |
-
pipe_1.scheduler.set_timesteps(num_steps)
|
289 |
-
expected_steps = pipe_1.scheduler.timesteps.tolist()
|
290 |
-
|
291 |
-
expected_steps_1 = list(filter(lambda ts: ts >= split, expected_tss))
|
292 |
-
expected_steps_2 = list(filter(lambda ts: ts < split, expected_tss))
|
293 |
-
|
294 |
-
# now we monkey patch step `done_steps`
|
295 |
-
# list into the step function for testing
|
296 |
-
done_steps = []
|
297 |
-
old_step = copy.copy(scheduler_cls.step)
|
298 |
-
|
299 |
-
def new_step(self, *args, **kwargs):
|
300 |
-
done_steps.append(args[1].cpu().item()) # args[1] is always the passed `t`
|
301 |
-
return old_step(self, *args, **kwargs)
|
302 |
-
|
303 |
-
scheduler_cls.step = new_step
|
304 |
-
|
305 |
-
inputs_1 = {
|
306 |
-
**inputs,
|
307 |
-
**{
|
308 |
-
"denoising_end": 1.0 - (split / num_train_timesteps),
|
309 |
-
"output_type": "latent",
|
310 |
-
},
|
311 |
-
}
|
312 |
-
latents = pipe_1(**inputs_1).images[0]
|
313 |
-
|
314 |
-
assert expected_steps_1 == done_steps, f"Failure with {scheduler_cls.__name__} and {num_steps} and {split}"
|
315 |
-
|
316 |
-
inputs_2 = {
|
317 |
-
**inputs,
|
318 |
-
**{
|
319 |
-
"denoising_start": 1.0 - (split / num_train_timesteps),
|
320 |
-
"image": latents,
|
321 |
-
},
|
322 |
-
}
|
323 |
-
pipe_2(**inputs_2).images[0]
|
324 |
-
|
325 |
-
assert expected_steps_2 == done_steps[len(expected_steps_1) :]
|
326 |
-
assert expected_steps == done_steps, f"Failure with {scheduler_cls.__name__} and {num_steps} and {split}"
|
327 |
-
|
328 |
-
steps = 10
|
329 |
-
for split in [300, 500, 700]:
|
330 |
-
for scheduler_cls_timesteps in [
|
331 |
-
(DDIMScheduler, [901, 801, 701, 601, 501, 401, 301, 201, 101, 1]),
|
332 |
-
(EulerDiscreteScheduler, [901, 801, 701, 601, 501, 401, 301, 201, 101, 1]),
|
333 |
-
(DPMSolverMultistepScheduler, [901, 811, 721, 631, 541, 451, 361, 271, 181, 91]),
|
334 |
-
(UniPCMultistepScheduler, [901, 811, 721, 631, 541, 451, 361, 271, 181, 91]),
|
335 |
-
(
|
336 |
-
HeunDiscreteScheduler,
|
337 |
-
[
|
338 |
-
901.0,
|
339 |
-
801.0,
|
340 |
-
801.0,
|
341 |
-
701.0,
|
342 |
-
701.0,
|
343 |
-
601.0,
|
344 |
-
601.0,
|
345 |
-
501.0,
|
346 |
-
501.0,
|
347 |
-
401.0,
|
348 |
-
401.0,
|
349 |
-
301.0,
|
350 |
-
301.0,
|
351 |
-
201.0,
|
352 |
-
201.0,
|
353 |
-
101.0,
|
354 |
-
101.0,
|
355 |
-
1.0,
|
356 |
-
1.0,
|
357 |
-
],
|
358 |
-
),
|
359 |
-
]:
|
360 |
-
assert_run_mixture(steps, split, scheduler_cls_timesteps[0], scheduler_cls_timesteps[1])
|
361 |
-
|
362 |
-
steps = 25
|
363 |
-
for split in [300, 500, 700]:
|
364 |
-
for scheduler_cls_timesteps in [
|
365 |
-
(
|
366 |
-
DDIMScheduler,
|
367 |
-
[
|
368 |
-
961,
|
369 |
-
921,
|
370 |
-
881,
|
371 |
-
841,
|
372 |
-
801,
|
373 |
-
761,
|
374 |
-
721,
|
375 |
-
681,
|
376 |
-
641,
|
377 |
-
601,
|
378 |
-
561,
|
379 |
-
521,
|
380 |
-
481,
|
381 |
-
441,
|
382 |
-
401,
|
383 |
-
361,
|
384 |
-
321,
|
385 |
-
281,
|
386 |
-
241,
|
387 |
-
201,
|
388 |
-
161,
|
389 |
-
121,
|
390 |
-
81,
|
391 |
-
41,
|
392 |
-
1,
|
393 |
-
],
|
394 |
-
),
|
395 |
-
(
|
396 |
-
EulerDiscreteScheduler,
|
397 |
-
[
|
398 |
-
961.0,
|
399 |
-
921.0,
|
400 |
-
881.0,
|
401 |
-
841.0,
|
402 |
-
801.0,
|
403 |
-
761.0,
|
404 |
-
721.0,
|
405 |
-
681.0,
|
406 |
-
641.0,
|
407 |
-
601.0,
|
408 |
-
561.0,
|
409 |
-
521.0,
|
410 |
-
481.0,
|
411 |
-
441.0,
|
412 |
-
401.0,
|
413 |
-
361.0,
|
414 |
-
321.0,
|
415 |
-
281.0,
|
416 |
-
241.0,
|
417 |
-
201.0,
|
418 |
-
161.0,
|
419 |
-
121.0,
|
420 |
-
81.0,
|
421 |
-
41.0,
|
422 |
-
1.0,
|
423 |
-
],
|
424 |
-
),
|
425 |
-
(
|
426 |
-
DPMSolverMultistepScheduler,
|
427 |
-
[
|
428 |
-
951,
|
429 |
-
913,
|
430 |
-
875,
|
431 |
-
837,
|
432 |
-
799,
|
433 |
-
761,
|
434 |
-
723,
|
435 |
-
685,
|
436 |
-
647,
|
437 |
-
609,
|
438 |
-
571,
|
439 |
-
533,
|
440 |
-
495,
|
441 |
-
457,
|
442 |
-
419,
|
443 |
-
381,
|
444 |
-
343,
|
445 |
-
305,
|
446 |
-
267,
|
447 |
-
229,
|
448 |
-
191,
|
449 |
-
153,
|
450 |
-
115,
|
451 |
-
77,
|
452 |
-
39,
|
453 |
-
],
|
454 |
-
),
|
455 |
-
(
|
456 |
-
UniPCMultistepScheduler,
|
457 |
-
[
|
458 |
-
951,
|
459 |
-
913,
|
460 |
-
875,
|
461 |
-
837,
|
462 |
-
799,
|
463 |
-
761,
|
464 |
-
723,
|
465 |
-
685,
|
466 |
-
647,
|
467 |
-
609,
|
468 |
-
571,
|
469 |
-
533,
|
470 |
-
495,
|
471 |
-
457,
|
472 |
-
419,
|
473 |
-
381,
|
474 |
-
343,
|
475 |
-
305,
|
476 |
-
267,
|
477 |
-
229,
|
478 |
-
191,
|
479 |
-
153,
|
480 |
-
115,
|
481 |
-
77,
|
482 |
-
39,
|
483 |
-
],
|
484 |
-
),
|
485 |
-
(
|
486 |
-
HeunDiscreteScheduler,
|
487 |
-
[
|
488 |
-
961.0,
|
489 |
-
921.0,
|
490 |
-
921.0,
|
491 |
-
881.0,
|
492 |
-
881.0,
|
493 |
-
841.0,
|
494 |
-
841.0,
|
495 |
-
801.0,
|
496 |
-
801.0,
|
497 |
-
761.0,
|
498 |
-
761.0,
|
499 |
-
721.0,
|
500 |
-
721.0,
|
501 |
-
681.0,
|
502 |
-
681.0,
|
503 |
-
641.0,
|
504 |
-
641.0,
|
505 |
-
601.0,
|
506 |
-
601.0,
|
507 |
-
561.0,
|
508 |
-
561.0,
|
509 |
-
521.0,
|
510 |
-
521.0,
|
511 |
-
481.0,
|
512 |
-
481.0,
|
513 |
-
441.0,
|
514 |
-
441.0,
|
515 |
-
401.0,
|
516 |
-
401.0,
|
517 |
-
361.0,
|
518 |
-
361.0,
|
519 |
-
321.0,
|
520 |
-
321.0,
|
521 |
-
281.0,
|
522 |
-
281.0,
|
523 |
-
241.0,
|
524 |
-
241.0,
|
525 |
-
201.0,
|
526 |
-
201.0,
|
527 |
-
161.0,
|
528 |
-
161.0,
|
529 |
-
121.0,
|
530 |
-
121.0,
|
531 |
-
81.0,
|
532 |
-
81.0,
|
533 |
-
41.0,
|
534 |
-
41.0,
|
535 |
-
1.0,
|
536 |
-
1.0,
|
537 |
-
],
|
538 |
-
),
|
539 |
-
]:
|
540 |
-
assert_run_mixture(steps, split, scheduler_cls_timesteps[0], scheduler_cls_timesteps[1])
|
541 |
-
|
542 |
-
def test_stable_diffusion_three_xl_mixture_of_denoiser(self):
|
543 |
-
components = self.get_dummy_components()
|
544 |
-
pipe_1 = StableDiffusionXLPipeline(**components).to(torch_device)
|
545 |
-
pipe_1.unet.set_default_attn_processor()
|
546 |
-
pipe_2 = StableDiffusionXLImg2ImgPipeline(**components).to(torch_device)
|
547 |
-
pipe_2.unet.set_default_attn_processor()
|
548 |
-
pipe_3 = StableDiffusionXLImg2ImgPipeline(**components).to(torch_device)
|
549 |
-
pipe_3.unet.set_default_attn_processor()
|
550 |
-
|
551 |
-
def assert_run_mixture(
|
552 |
-
num_steps,
|
553 |
-
split_1,
|
554 |
-
split_2,
|
555 |
-
scheduler_cls_orig,
|
556 |
-
num_train_timesteps=pipe_1.scheduler.config.num_train_timesteps,
|
557 |
-
):
|
558 |
-
inputs = self.get_dummy_inputs(torch_device)
|
559 |
-
inputs["num_inference_steps"] = num_steps
|
560 |
-
|
561 |
-
class scheduler_cls(scheduler_cls_orig):
|
562 |
-
pass
|
563 |
-
|
564 |
-
pipe_1.scheduler = scheduler_cls.from_config(pipe_1.scheduler.config)
|
565 |
-
pipe_2.scheduler = scheduler_cls.from_config(pipe_2.scheduler.config)
|
566 |
-
pipe_3.scheduler = scheduler_cls.from_config(pipe_3.scheduler.config)
|
567 |
-
|
568 |
-
# Let's retrieve the number of timesteps we want to use
|
569 |
-
pipe_1.scheduler.set_timesteps(num_steps)
|
570 |
-
expected_steps = pipe_1.scheduler.timesteps.tolist()
|
571 |
-
|
572 |
-
split_1_ts = num_train_timesteps - int(round(num_train_timesteps * split_1))
|
573 |
-
split_2_ts = num_train_timesteps - int(round(num_train_timesteps * split_2))
|
574 |
-
expected_steps_1 = expected_steps[:split_1_ts]
|
575 |
-
expected_steps_2 = expected_steps[split_1_ts:split_2_ts]
|
576 |
-
expected_steps_3 = expected_steps[split_2_ts:]
|
577 |
-
|
578 |
-
expected_steps_1 = list(filter(lambda ts: ts >= split_1_ts, expected_steps))
|
579 |
-
expected_steps_2 = list(filter(lambda ts: ts >= split_2_ts and ts < split_1_ts, expected_steps))
|
580 |
-
expected_steps_3 = list(filter(lambda ts: ts < split_2_ts, expected_steps))
|
581 |
-
|
582 |
-
# now we monkey patch step `done_steps`
|
583 |
-
# list into the step function for testing
|
584 |
-
done_steps = []
|
585 |
-
old_step = copy.copy(scheduler_cls.step)
|
586 |
-
|
587 |
-
def new_step(self, *args, **kwargs):
|
588 |
-
done_steps.append(args[1].cpu().item()) # args[1] is always the passed `t`
|
589 |
-
return old_step(self, *args, **kwargs)
|
590 |
-
|
591 |
-
scheduler_cls.step = new_step
|
592 |
-
|
593 |
-
inputs_1 = {**inputs, **{"denoising_end": split_1, "output_type": "latent"}}
|
594 |
-
latents = pipe_1(**inputs_1).images[0]
|
595 |
-
|
596 |
-
assert (
|
597 |
-
expected_steps_1 == done_steps
|
598 |
-
), f"Failure with {scheduler_cls.__name__} and {num_steps} and {split_1} and {split_2}"
|
599 |
-
|
600 |
-
with self.assertRaises(ValueError) as cm:
|
601 |
-
inputs_2 = {
|
602 |
-
**inputs,
|
603 |
-
**{
|
604 |
-
"denoising_start": split_2,
|
605 |
-
"denoising_end": split_1,
|
606 |
-
"image": latents,
|
607 |
-
"output_type": "latent",
|
608 |
-
},
|
609 |
-
}
|
610 |
-
pipe_2(**inputs_2).images[0]
|
611 |
-
assert "cannot be larger than or equal to `denoising_end`" in str(cm.exception)
|
612 |
-
|
613 |
-
inputs_2 = {
|
614 |
-
**inputs,
|
615 |
-
**{"denoising_start": split_1, "denoising_end": split_2, "image": latents, "output_type": "latent"},
|
616 |
-
}
|
617 |
-
pipe_2(**inputs_2).images[0]
|
618 |
-
|
619 |
-
assert expected_steps_2 == done_steps[len(expected_steps_1) :]
|
620 |
-
|
621 |
-
inputs_3 = {**inputs, **{"denoising_start": split_2, "image": latents}}
|
622 |
-
pipe_3(**inputs_3).images[0]
|
623 |
-
|
624 |
-
assert expected_steps_3 == done_steps[len(expected_steps_1) + len(expected_steps_2) :]
|
625 |
-
assert (
|
626 |
-
expected_steps == done_steps
|
627 |
-
), f"Failure with {scheduler_cls.__name__} and {num_steps} and {split_1} and {split_2}"
|
628 |
-
|
629 |
-
for steps in [7, 11, 20]:
|
630 |
-
for split_1, split_2 in zip([0.19, 0.32], [0.81, 0.68]):
|
631 |
-
for scheduler_cls in [
|
632 |
-
DDIMScheduler,
|
633 |
-
EulerDiscreteScheduler,
|
634 |
-
DPMSolverMultistepScheduler,
|
635 |
-
UniPCMultistepScheduler,
|
636 |
-
HeunDiscreteScheduler,
|
637 |
-
]:
|
638 |
-
assert_run_mixture(steps, split_1, split_2, scheduler_cls)
|
639 |
-
|
640 |
-
def test_stable_diffusion_xl_multi_prompts(self):
|
641 |
-
components = self.get_dummy_components()
|
642 |
-
sd_pipe = self.pipeline_class(**components).to(torch_device)
|
643 |
-
|
644 |
-
# forward with single prompt
|
645 |
-
inputs = self.get_dummy_inputs(torch_device)
|
646 |
-
output = sd_pipe(**inputs)
|
647 |
-
image_slice_1 = output.images[0, -3:, -3:, -1]
|
648 |
-
|
649 |
-
# forward with same prompt duplicated
|
650 |
-
inputs = self.get_dummy_inputs(torch_device)
|
651 |
-
inputs["prompt_2"] = inputs["prompt"]
|
652 |
-
output = sd_pipe(**inputs)
|
653 |
-
image_slice_2 = output.images[0, -3:, -3:, -1]
|
654 |
-
|
655 |
-
# ensure the results are equal
|
656 |
-
assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4
|
657 |
-
|
658 |
-
# forward with different prompt
|
659 |
-
inputs = self.get_dummy_inputs(torch_device)
|
660 |
-
inputs["prompt_2"] = "different prompt"
|
661 |
-
output = sd_pipe(**inputs)
|
662 |
-
image_slice_3 = output.images[0, -3:, -3:, -1]
|
663 |
-
|
664 |
-
# ensure the results are not equal
|
665 |
-
assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4
|
666 |
-
|
667 |
-
# manually set a negative_prompt
|
668 |
-
inputs = self.get_dummy_inputs(torch_device)
|
669 |
-
inputs["negative_prompt"] = "negative prompt"
|
670 |
-
output = sd_pipe(**inputs)
|
671 |
-
image_slice_1 = output.images[0, -3:, -3:, -1]
|
672 |
-
|
673 |
-
# forward with same negative_prompt duplicated
|
674 |
-
inputs = self.get_dummy_inputs(torch_device)
|
675 |
-
inputs["negative_prompt"] = "negative prompt"
|
676 |
-
inputs["negative_prompt_2"] = inputs["negative_prompt"]
|
677 |
-
output = sd_pipe(**inputs)
|
678 |
-
image_slice_2 = output.images[0, -3:, -3:, -1]
|
679 |
-
|
680 |
-
# ensure the results are equal
|
681 |
-
assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4
|
682 |
-
|
683 |
-
# forward with different negative_prompt
|
684 |
-
inputs = self.get_dummy_inputs(torch_device)
|
685 |
-
inputs["negative_prompt"] = "negative prompt"
|
686 |
-
inputs["negative_prompt_2"] = "different negative prompt"
|
687 |
-
output = sd_pipe(**inputs)
|
688 |
-
image_slice_3 = output.images[0, -3:, -3:, -1]
|
689 |
-
|
690 |
-
# ensure the results are not equal
|
691 |
-
assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/detectors/cascade_rcnn_r50_rfp_1x_coco.py
DELETED
@@ -1,28 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/cascade_rcnn_r50_fpn.py',
|
3 |
-
'../_base_/datasets/coco_detection.py',
|
4 |
-
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
|
5 |
-
]
|
6 |
-
|
7 |
-
model = dict(
|
8 |
-
backbone=dict(
|
9 |
-
type='DetectoRS_ResNet',
|
10 |
-
conv_cfg=dict(type='ConvAWS'),
|
11 |
-
output_img=True),
|
12 |
-
neck=dict(
|
13 |
-
type='RFP',
|
14 |
-
rfp_steps=2,
|
15 |
-
aspp_out_channels=64,
|
16 |
-
aspp_dilations=(1, 3, 6, 1),
|
17 |
-
rfp_backbone=dict(
|
18 |
-
rfp_inplanes=256,
|
19 |
-
type='DetectoRS_ResNet',
|
20 |
-
depth=50,
|
21 |
-
num_stages=4,
|
22 |
-
out_indices=(0, 1, 2, 3),
|
23 |
-
frozen_stages=1,
|
24 |
-
norm_cfg=dict(type='BN', requires_grad=True),
|
25 |
-
norm_eval=True,
|
26 |
-
conv_cfg=dict(type='ConvAWS'),
|
27 |
-
pretrained='torchvision://resnet50',
|
28 |
-
style='pytorch')))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/gn+ws/faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco.py
DELETED
@@ -1,16 +0,0 @@
|
|
1 |
-
_base_ = './faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py'
|
2 |
-
conv_cfg = dict(type='ConvWS')
|
3 |
-
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
|
4 |
-
model = dict(
|
5 |
-
pretrained='open-mmlab://jhu/resnext50_32x4d_gn_ws',
|
6 |
-
backbone=dict(
|
7 |
-
type='ResNeXt',
|
8 |
-
depth=50,
|
9 |
-
groups=32,
|
10 |
-
base_width=4,
|
11 |
-
num_stages=4,
|
12 |
-
out_indices=(0, 1, 2, 3),
|
13 |
-
frozen_stages=1,
|
14 |
-
style='pytorch',
|
15 |
-
conv_cfg=conv_cfg,
|
16 |
-
norm_cfg=norm_cfg))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/coder/bucketing_bbox_coder.py
DELETED
@@ -1,350 +0,0 @@
|
|
1 |
-
import mmcv
|
2 |
-
import numpy as np
|
3 |
-
import torch
|
4 |
-
import torch.nn.functional as F
|
5 |
-
|
6 |
-
from ..builder import BBOX_CODERS
|
7 |
-
from ..transforms import bbox_rescale
|
8 |
-
from .base_bbox_coder import BaseBBoxCoder
|
9 |
-
|
10 |
-
|
11 |
-
@BBOX_CODERS.register_module()
|
12 |
-
class BucketingBBoxCoder(BaseBBoxCoder):
|
13 |
-
"""Bucketing BBox Coder for Side-Aware Boundary Localization (SABL).
|
14 |
-
|
15 |
-
Boundary Localization with Bucketing and Bucketing Guided Rescoring
|
16 |
-
are implemented here.
|
17 |
-
|
18 |
-
Please refer to https://arxiv.org/abs/1912.04260 for more details.
|
19 |
-
|
20 |
-
Args:
|
21 |
-
num_buckets (int): Number of buckets.
|
22 |
-
scale_factor (int): Scale factor of proposals to generate buckets.
|
23 |
-
offset_topk (int): Topk buckets are used to generate
|
24 |
-
bucket fine regression targets. Defaults to 2.
|
25 |
-
offset_upperbound (float): Offset upperbound to generate
|
26 |
-
bucket fine regression targets.
|
27 |
-
To avoid too large offset displacements. Defaults to 1.0.
|
28 |
-
cls_ignore_neighbor (bool): Ignore second nearest bucket or Not.
|
29 |
-
Defaults to True.
|
30 |
-
clip_border (bool, optional): Whether clip the objects outside the
|
31 |
-
border of the image. Defaults to True.
|
32 |
-
"""
|
33 |
-
|
34 |
-
def __init__(self,
|
35 |
-
num_buckets,
|
36 |
-
scale_factor,
|
37 |
-
offset_topk=2,
|
38 |
-
offset_upperbound=1.0,
|
39 |
-
cls_ignore_neighbor=True,
|
40 |
-
clip_border=True):
|
41 |
-
super(BucketingBBoxCoder, self).__init__()
|
42 |
-
self.num_buckets = num_buckets
|
43 |
-
self.scale_factor = scale_factor
|
44 |
-
self.offset_topk = offset_topk
|
45 |
-
self.offset_upperbound = offset_upperbound
|
46 |
-
self.cls_ignore_neighbor = cls_ignore_neighbor
|
47 |
-
self.clip_border = clip_border
|
48 |
-
|
49 |
-
def encode(self, bboxes, gt_bboxes):
|
50 |
-
"""Get bucketing estimation and fine regression targets during
|
51 |
-
training.
|
52 |
-
|
53 |
-
Args:
|
54 |
-
bboxes (torch.Tensor): source boxes, e.g., object proposals.
|
55 |
-
gt_bboxes (torch.Tensor): target of the transformation, e.g.,
|
56 |
-
ground truth boxes.
|
57 |
-
|
58 |
-
Returns:
|
59 |
-
encoded_bboxes(tuple[Tensor]): bucketing estimation
|
60 |
-
and fine regression targets and weights
|
61 |
-
"""
|
62 |
-
|
63 |
-
assert bboxes.size(0) == gt_bboxes.size(0)
|
64 |
-
assert bboxes.size(-1) == gt_bboxes.size(-1) == 4
|
65 |
-
encoded_bboxes = bbox2bucket(bboxes, gt_bboxes, self.num_buckets,
|
66 |
-
self.scale_factor, self.offset_topk,
|
67 |
-
self.offset_upperbound,
|
68 |
-
self.cls_ignore_neighbor)
|
69 |
-
return encoded_bboxes
|
70 |
-
|
71 |
-
def decode(self, bboxes, pred_bboxes, max_shape=None):
|
72 |
-
"""Apply transformation `pred_bboxes` to `boxes`.
|
73 |
-
Args:
|
74 |
-
boxes (torch.Tensor): Basic boxes.
|
75 |
-
pred_bboxes (torch.Tensor): Predictions for bucketing estimation
|
76 |
-
and fine regression
|
77 |
-
max_shape (tuple[int], optional): Maximum shape of boxes.
|
78 |
-
Defaults to None.
|
79 |
-
|
80 |
-
Returns:
|
81 |
-
torch.Tensor: Decoded boxes.
|
82 |
-
"""
|
83 |
-
assert len(pred_bboxes) == 2
|
84 |
-
cls_preds, offset_preds = pred_bboxes
|
85 |
-
assert cls_preds.size(0) == bboxes.size(0) and offset_preds.size(
|
86 |
-
0) == bboxes.size(0)
|
87 |
-
decoded_bboxes = bucket2bbox(bboxes, cls_preds, offset_preds,
|
88 |
-
self.num_buckets, self.scale_factor,
|
89 |
-
max_shape, self.clip_border)
|
90 |
-
|
91 |
-
return decoded_bboxes
|
92 |
-
|
93 |
-
|
94 |
-
@mmcv.jit(coderize=True)
|
95 |
-
def generat_buckets(proposals, num_buckets, scale_factor=1.0):
|
96 |
-
"""Generate buckets w.r.t bucket number and scale factor of proposals.
|
97 |
-
|
98 |
-
Args:
|
99 |
-
proposals (Tensor): Shape (n, 4)
|
100 |
-
num_buckets (int): Number of buckets.
|
101 |
-
scale_factor (float): Scale factor to rescale proposals.
|
102 |
-
|
103 |
-
Returns:
|
104 |
-
tuple[Tensor]: (bucket_w, bucket_h, l_buckets, r_buckets,
|
105 |
-
t_buckets, d_buckets)
|
106 |
-
|
107 |
-
- bucket_w: Width of buckets on x-axis. Shape (n, ).
|
108 |
-
- bucket_h: Height of buckets on y-axis. Shape (n, ).
|
109 |
-
- l_buckets: Left buckets. Shape (n, ceil(side_num/2)).
|
110 |
-
- r_buckets: Right buckets. Shape (n, ceil(side_num/2)).
|
111 |
-
- t_buckets: Top buckets. Shape (n, ceil(side_num/2)).
|
112 |
-
- d_buckets: Down buckets. Shape (n, ceil(side_num/2)).
|
113 |
-
"""
|
114 |
-
proposals = bbox_rescale(proposals, scale_factor)
|
115 |
-
|
116 |
-
# number of buckets in each side
|
117 |
-
side_num = int(np.ceil(num_buckets / 2.0))
|
118 |
-
pw = proposals[..., 2] - proposals[..., 0]
|
119 |
-
ph = proposals[..., 3] - proposals[..., 1]
|
120 |
-
px1 = proposals[..., 0]
|
121 |
-
py1 = proposals[..., 1]
|
122 |
-
px2 = proposals[..., 2]
|
123 |
-
py2 = proposals[..., 3]
|
124 |
-
|
125 |
-
bucket_w = pw / num_buckets
|
126 |
-
bucket_h = ph / num_buckets
|
127 |
-
|
128 |
-
# left buckets
|
129 |
-
l_buckets = px1[:, None] + (0.5 + torch.arange(
|
130 |
-
0, side_num).to(proposals).float())[None, :] * bucket_w[:, None]
|
131 |
-
# right buckets
|
132 |
-
r_buckets = px2[:, None] - (0.5 + torch.arange(
|
133 |
-
0, side_num).to(proposals).float())[None, :] * bucket_w[:, None]
|
134 |
-
# top buckets
|
135 |
-
t_buckets = py1[:, None] + (0.5 + torch.arange(
|
136 |
-
0, side_num).to(proposals).float())[None, :] * bucket_h[:, None]
|
137 |
-
# down buckets
|
138 |
-
d_buckets = py2[:, None] - (0.5 + torch.arange(
|
139 |
-
0, side_num).to(proposals).float())[None, :] * bucket_h[:, None]
|
140 |
-
return bucket_w, bucket_h, l_buckets, r_buckets, t_buckets, d_buckets
|
141 |
-
|
142 |
-
|
143 |
-
@mmcv.jit(coderize=True)
|
144 |
-
def bbox2bucket(proposals,
|
145 |
-
gt,
|
146 |
-
num_buckets,
|
147 |
-
scale_factor,
|
148 |
-
offset_topk=2,
|
149 |
-
offset_upperbound=1.0,
|
150 |
-
cls_ignore_neighbor=True):
|
151 |
-
"""Generate buckets estimation and fine regression targets.
|
152 |
-
|
153 |
-
Args:
|
154 |
-
proposals (Tensor): Shape (n, 4)
|
155 |
-
gt (Tensor): Shape (n, 4)
|
156 |
-
num_buckets (int): Number of buckets.
|
157 |
-
scale_factor (float): Scale factor to rescale proposals.
|
158 |
-
offset_topk (int): Topk buckets are used to generate
|
159 |
-
bucket fine regression targets. Defaults to 2.
|
160 |
-
offset_upperbound (float): Offset allowance to generate
|
161 |
-
bucket fine regression targets.
|
162 |
-
To avoid too large offset displacements. Defaults to 1.0.
|
163 |
-
cls_ignore_neighbor (bool): Ignore second nearest bucket or Not.
|
164 |
-
Defaults to True.
|
165 |
-
|
166 |
-
Returns:
|
167 |
-
tuple[Tensor]: (offsets, offsets_weights, bucket_labels, cls_weights).
|
168 |
-
|
169 |
-
- offsets: Fine regression targets. \
|
170 |
-
Shape (n, num_buckets*2).
|
171 |
-
- offsets_weights: Fine regression weights. \
|
172 |
-
Shape (n, num_buckets*2).
|
173 |
-
- bucket_labels: Bucketing estimation labels. \
|
174 |
-
Shape (n, num_buckets*2).
|
175 |
-
- cls_weights: Bucketing estimation weights. \
|
176 |
-
Shape (n, num_buckets*2).
|
177 |
-
"""
|
178 |
-
assert proposals.size() == gt.size()
|
179 |
-
|
180 |
-
# generate buckets
|
181 |
-
proposals = proposals.float()
|
182 |
-
gt = gt.float()
|
183 |
-
(bucket_w, bucket_h, l_buckets, r_buckets, t_buckets,
|
184 |
-
d_buckets) = generat_buckets(proposals, num_buckets, scale_factor)
|
185 |
-
|
186 |
-
gx1 = gt[..., 0]
|
187 |
-
gy1 = gt[..., 1]
|
188 |
-
gx2 = gt[..., 2]
|
189 |
-
gy2 = gt[..., 3]
|
190 |
-
|
191 |
-
# generate offset targets and weights
|
192 |
-
# offsets from buckets to gts
|
193 |
-
l_offsets = (l_buckets - gx1[:, None]) / bucket_w[:, None]
|
194 |
-
r_offsets = (r_buckets - gx2[:, None]) / bucket_w[:, None]
|
195 |
-
t_offsets = (t_buckets - gy1[:, None]) / bucket_h[:, None]
|
196 |
-
d_offsets = (d_buckets - gy2[:, None]) / bucket_h[:, None]
|
197 |
-
|
198 |
-
# select top-k nearset buckets
|
199 |
-
l_topk, l_label = l_offsets.abs().topk(
|
200 |
-
offset_topk, dim=1, largest=False, sorted=True)
|
201 |
-
r_topk, r_label = r_offsets.abs().topk(
|
202 |
-
offset_topk, dim=1, largest=False, sorted=True)
|
203 |
-
t_topk, t_label = t_offsets.abs().topk(
|
204 |
-
offset_topk, dim=1, largest=False, sorted=True)
|
205 |
-
d_topk, d_label = d_offsets.abs().topk(
|
206 |
-
offset_topk, dim=1, largest=False, sorted=True)
|
207 |
-
|
208 |
-
offset_l_weights = l_offsets.new_zeros(l_offsets.size())
|
209 |
-
offset_r_weights = r_offsets.new_zeros(r_offsets.size())
|
210 |
-
offset_t_weights = t_offsets.new_zeros(t_offsets.size())
|
211 |
-
offset_d_weights = d_offsets.new_zeros(d_offsets.size())
|
212 |
-
inds = torch.arange(0, proposals.size(0)).to(proposals).long()
|
213 |
-
|
214 |
-
# generate offset weights of top-k nearset buckets
|
215 |
-
for k in range(offset_topk):
|
216 |
-
if k >= 1:
|
217 |
-
offset_l_weights[inds, l_label[:,
|
218 |
-
k]] = (l_topk[:, k] <
|
219 |
-
offset_upperbound).float()
|
220 |
-
offset_r_weights[inds, r_label[:,
|
221 |
-
k]] = (r_topk[:, k] <
|
222 |
-
offset_upperbound).float()
|
223 |
-
offset_t_weights[inds, t_label[:,
|
224 |
-
k]] = (t_topk[:, k] <
|
225 |
-
offset_upperbound).float()
|
226 |
-
offset_d_weights[inds, d_label[:,
|
227 |
-
k]] = (d_topk[:, k] <
|
228 |
-
offset_upperbound).float()
|
229 |
-
else:
|
230 |
-
offset_l_weights[inds, l_label[:, k]] = 1.0
|
231 |
-
offset_r_weights[inds, r_label[:, k]] = 1.0
|
232 |
-
offset_t_weights[inds, t_label[:, k]] = 1.0
|
233 |
-
offset_d_weights[inds, d_label[:, k]] = 1.0
|
234 |
-
|
235 |
-
offsets = torch.cat([l_offsets, r_offsets, t_offsets, d_offsets], dim=-1)
|
236 |
-
offsets_weights = torch.cat([
|
237 |
-
offset_l_weights, offset_r_weights, offset_t_weights, offset_d_weights
|
238 |
-
],
|
239 |
-
dim=-1)
|
240 |
-
|
241 |
-
# generate bucket labels and weight
|
242 |
-
side_num = int(np.ceil(num_buckets / 2.0))
|
243 |
-
labels = torch.stack(
|
244 |
-
[l_label[:, 0], r_label[:, 0], t_label[:, 0], d_label[:, 0]], dim=-1)
|
245 |
-
|
246 |
-
batch_size = labels.size(0)
|
247 |
-
bucket_labels = F.one_hot(labels.view(-1), side_num).view(batch_size,
|
248 |
-
-1).float()
|
249 |
-
bucket_cls_l_weights = (l_offsets.abs() < 1).float()
|
250 |
-
bucket_cls_r_weights = (r_offsets.abs() < 1).float()
|
251 |
-
bucket_cls_t_weights = (t_offsets.abs() < 1).float()
|
252 |
-
bucket_cls_d_weights = (d_offsets.abs() < 1).float()
|
253 |
-
bucket_cls_weights = torch.cat([
|
254 |
-
bucket_cls_l_weights, bucket_cls_r_weights, bucket_cls_t_weights,
|
255 |
-
bucket_cls_d_weights
|
256 |
-
],
|
257 |
-
dim=-1)
|
258 |
-
# ignore second nearest buckets for cls if necessary
|
259 |
-
if cls_ignore_neighbor:
|
260 |
-
bucket_cls_weights = (~((bucket_cls_weights == 1) &
|
261 |
-
(bucket_labels == 0))).float()
|
262 |
-
else:
|
263 |
-
bucket_cls_weights[:] = 1.0
|
264 |
-
return offsets, offsets_weights, bucket_labels, bucket_cls_weights
|
265 |
-
|
266 |
-
|
267 |
-
@mmcv.jit(coderize=True)
|
268 |
-
def bucket2bbox(proposals,
|
269 |
-
cls_preds,
|
270 |
-
offset_preds,
|
271 |
-
num_buckets,
|
272 |
-
scale_factor=1.0,
|
273 |
-
max_shape=None,
|
274 |
-
clip_border=True):
|
275 |
-
"""Apply bucketing estimation (cls preds) and fine regression (offset
|
276 |
-
preds) to generate det bboxes.
|
277 |
-
|
278 |
-
Args:
|
279 |
-
proposals (Tensor): Boxes to be transformed. Shape (n, 4)
|
280 |
-
cls_preds (Tensor): bucketing estimation. Shape (n, num_buckets*2).
|
281 |
-
offset_preds (Tensor): fine regression. Shape (n, num_buckets*2).
|
282 |
-
num_buckets (int): Number of buckets.
|
283 |
-
scale_factor (float): Scale factor to rescale proposals.
|
284 |
-
max_shape (tuple[int, int]): Maximum bounds for boxes. specifies (H, W)
|
285 |
-
clip_border (bool, optional): Whether clip the objects outside the
|
286 |
-
border of the image. Defaults to True.
|
287 |
-
|
288 |
-
Returns:
|
289 |
-
tuple[Tensor]: (bboxes, loc_confidence).
|
290 |
-
|
291 |
-
- bboxes: predicted bboxes. Shape (n, 4)
|
292 |
-
- loc_confidence: localization confidence of predicted bboxes.
|
293 |
-
Shape (n,).
|
294 |
-
"""
|
295 |
-
|
296 |
-
side_num = int(np.ceil(num_buckets / 2.0))
|
297 |
-
cls_preds = cls_preds.view(-1, side_num)
|
298 |
-
offset_preds = offset_preds.view(-1, side_num)
|
299 |
-
|
300 |
-
scores = F.softmax(cls_preds, dim=1)
|
301 |
-
score_topk, score_label = scores.topk(2, dim=1, largest=True, sorted=True)
|
302 |
-
|
303 |
-
rescaled_proposals = bbox_rescale(proposals, scale_factor)
|
304 |
-
|
305 |
-
pw = rescaled_proposals[..., 2] - rescaled_proposals[..., 0]
|
306 |
-
ph = rescaled_proposals[..., 3] - rescaled_proposals[..., 1]
|
307 |
-
px1 = rescaled_proposals[..., 0]
|
308 |
-
py1 = rescaled_proposals[..., 1]
|
309 |
-
px2 = rescaled_proposals[..., 2]
|
310 |
-
py2 = rescaled_proposals[..., 3]
|
311 |
-
|
312 |
-
bucket_w = pw / num_buckets
|
313 |
-
bucket_h = ph / num_buckets
|
314 |
-
|
315 |
-
score_inds_l = score_label[0::4, 0]
|
316 |
-
score_inds_r = score_label[1::4, 0]
|
317 |
-
score_inds_t = score_label[2::4, 0]
|
318 |
-
score_inds_d = score_label[3::4, 0]
|
319 |
-
l_buckets = px1 + (0.5 + score_inds_l.float()) * bucket_w
|
320 |
-
r_buckets = px2 - (0.5 + score_inds_r.float()) * bucket_w
|
321 |
-
t_buckets = py1 + (0.5 + score_inds_t.float()) * bucket_h
|
322 |
-
d_buckets = py2 - (0.5 + score_inds_d.float()) * bucket_h
|
323 |
-
|
324 |
-
offsets = offset_preds.view(-1, 4, side_num)
|
325 |
-
inds = torch.arange(proposals.size(0)).to(proposals).long()
|
326 |
-
l_offsets = offsets[:, 0, :][inds, score_inds_l]
|
327 |
-
r_offsets = offsets[:, 1, :][inds, score_inds_r]
|
328 |
-
t_offsets = offsets[:, 2, :][inds, score_inds_t]
|
329 |
-
d_offsets = offsets[:, 3, :][inds, score_inds_d]
|
330 |
-
|
331 |
-
x1 = l_buckets - l_offsets * bucket_w
|
332 |
-
x2 = r_buckets - r_offsets * bucket_w
|
333 |
-
y1 = t_buckets - t_offsets * bucket_h
|
334 |
-
y2 = d_buckets - d_offsets * bucket_h
|
335 |
-
|
336 |
-
if clip_border and max_shape is not None:
|
337 |
-
x1 = x1.clamp(min=0, max=max_shape[1] - 1)
|
338 |
-
y1 = y1.clamp(min=0, max=max_shape[0] - 1)
|
339 |
-
x2 = x2.clamp(min=0, max=max_shape[1] - 1)
|
340 |
-
y2 = y2.clamp(min=0, max=max_shape[0] - 1)
|
341 |
-
bboxes = torch.cat([x1[:, None], y1[:, None], x2[:, None], y2[:, None]],
|
342 |
-
dim=-1)
|
343 |
-
|
344 |
-
# bucketing guided rescoring
|
345 |
-
loc_confidence = score_topk[:, 0]
|
346 |
-
top2_neighbor_inds = (score_label[:, 0] - score_label[:, 1]).abs() == 1
|
347 |
-
loc_confidence += score_topk[:, 1] * top2_neighbor_inds.float()
|
348 |
-
loc_confidence = loc_confidence.view(-1, 4).mean(dim=1)
|
349 |
-
|
350 |
-
return bboxes, loc_confidence
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/_base_/models/ocrnet_r50-d8.py
DELETED
@@ -1,47 +0,0 @@
|
|
1 |
-
# model settings
|
2 |
-
norm_cfg = dict(type='SyncBN', requires_grad=True)
|
3 |
-
model = dict(
|
4 |
-
type='CascadeEncoderDecoder',
|
5 |
-
num_stages=2,
|
6 |
-
pretrained='open-mmlab://resnet50_v1c',
|
7 |
-
backbone=dict(
|
8 |
-
type='ResNetV1c',
|
9 |
-
depth=50,
|
10 |
-
num_stages=4,
|
11 |
-
out_indices=(0, 1, 2, 3),
|
12 |
-
dilations=(1, 1, 2, 4),
|
13 |
-
strides=(1, 2, 1, 1),
|
14 |
-
norm_cfg=norm_cfg,
|
15 |
-
norm_eval=False,
|
16 |
-
style='pytorch',
|
17 |
-
contract_dilation=True),
|
18 |
-
decode_head=[
|
19 |
-
dict(
|
20 |
-
type='FCNHead',
|
21 |
-
in_channels=1024,
|
22 |
-
in_index=2,
|
23 |
-
channels=256,
|
24 |
-
num_convs=1,
|
25 |
-
concat_input=False,
|
26 |
-
dropout_ratio=0.1,
|
27 |
-
num_classes=19,
|
28 |
-
norm_cfg=norm_cfg,
|
29 |
-
align_corners=False,
|
30 |
-
loss_decode=dict(
|
31 |
-
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
|
32 |
-
dict(
|
33 |
-
type='OCRHead',
|
34 |
-
in_channels=2048,
|
35 |
-
in_index=3,
|
36 |
-
channels=512,
|
37 |
-
ocr_channels=256,
|
38 |
-
dropout_ratio=0.1,
|
39 |
-
num_classes=19,
|
40 |
-
norm_cfg=norm_cfg,
|
41 |
-
align_corners=False,
|
42 |
-
loss_decode=dict(
|
43 |
-
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))
|
44 |
-
],
|
45 |
-
# model training and testing settings
|
46 |
-
train_cfg=dict(),
|
47 |
-
test_cfg=dict(mode='whole'))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnimalEquality/chatbot/_proc/_docs/site_libs/quarto-nav/quarto-nav.js
DELETED
@@ -1,277 +0,0 @@
|
|
1 |
-
const headroomChanged = new CustomEvent("quarto-hrChanged", {
|
2 |
-
detail: {},
|
3 |
-
bubbles: true,
|
4 |
-
cancelable: false,
|
5 |
-
composed: false,
|
6 |
-
});
|
7 |
-
|
8 |
-
window.document.addEventListener("DOMContentLoaded", function () {
|
9 |
-
let init = false;
|
10 |
-
|
11 |
-
// Manage the back to top button, if one is present.
|
12 |
-
let lastScrollTop = window.pageYOffset || document.documentElement.scrollTop;
|
13 |
-
const scrollDownBuffer = 5;
|
14 |
-
const scrollUpBuffer = 35;
|
15 |
-
const btn = document.getElementById("quarto-back-to-top");
|
16 |
-
const hideBackToTop = () => {
|
17 |
-
btn.style.display = "none";
|
18 |
-
};
|
19 |
-
const showBackToTop = () => {
|
20 |
-
btn.style.display = "inline-block";
|
21 |
-
};
|
22 |
-
if (btn) {
|
23 |
-
window.document.addEventListener(
|
24 |
-
"scroll",
|
25 |
-
function () {
|
26 |
-
const currentScrollTop =
|
27 |
-
window.pageYOffset || document.documentElement.scrollTop;
|
28 |
-
|
29 |
-
// Shows and hides the button 'intelligently' as the user scrolls
|
30 |
-
if (currentScrollTop - scrollDownBuffer > lastScrollTop) {
|
31 |
-
hideBackToTop();
|
32 |
-
lastScrollTop = currentScrollTop <= 0 ? 0 : currentScrollTop;
|
33 |
-
} else if (currentScrollTop < lastScrollTop - scrollUpBuffer) {
|
34 |
-
showBackToTop();
|
35 |
-
lastScrollTop = currentScrollTop <= 0 ? 0 : currentScrollTop;
|
36 |
-
}
|
37 |
-
|
38 |
-
// Show the button at the bottom, hides it at the top
|
39 |
-
if (currentScrollTop <= 0) {
|
40 |
-
hideBackToTop();
|
41 |
-
} else if (
|
42 |
-
window.innerHeight + currentScrollTop >=
|
43 |
-
document.body.offsetHeight
|
44 |
-
) {
|
45 |
-
showBackToTop();
|
46 |
-
}
|
47 |
-
},
|
48 |
-
false
|
49 |
-
);
|
50 |
-
}
|
51 |
-
|
52 |
-
function throttle(func, wait) {
|
53 |
-
var timeout;
|
54 |
-
return function () {
|
55 |
-
const context = this;
|
56 |
-
const args = arguments;
|
57 |
-
const later = function () {
|
58 |
-
clearTimeout(timeout);
|
59 |
-
timeout = null;
|
60 |
-
func.apply(context, args);
|
61 |
-
};
|
62 |
-
|
63 |
-
if (!timeout) {
|
64 |
-
timeout = setTimeout(later, wait);
|
65 |
-
}
|
66 |
-
};
|
67 |
-
}
|
68 |
-
|
69 |
-
function headerOffset() {
|
70 |
-
// Set an offset if there is are fixed top navbar
|
71 |
-
const headerEl = window.document.querySelector("header.fixed-top");
|
72 |
-
if (headerEl) {
|
73 |
-
return headerEl.clientHeight;
|
74 |
-
} else {
|
75 |
-
return 0;
|
76 |
-
}
|
77 |
-
}
|
78 |
-
|
79 |
-
function footerOffset() {
|
80 |
-
const footerEl = window.document.querySelector("footer.footer");
|
81 |
-
if (footerEl) {
|
82 |
-
return footerEl.clientHeight;
|
83 |
-
} else {
|
84 |
-
return 0;
|
85 |
-
}
|
86 |
-
}
|
87 |
-
|
88 |
-
function updateDocumentOffsetWithoutAnimation() {
|
89 |
-
updateDocumentOffset(false);
|
90 |
-
}
|
91 |
-
|
92 |
-
function updateDocumentOffset(animated) {
|
93 |
-
// set body offset
|
94 |
-
const topOffset = headerOffset();
|
95 |
-
const bodyOffset = topOffset + footerOffset();
|
96 |
-
const bodyEl = window.document.body;
|
97 |
-
bodyEl.setAttribute("data-bs-offset", topOffset);
|
98 |
-
bodyEl.style.paddingTop = topOffset + "px";
|
99 |
-
|
100 |
-
// deal with sidebar offsets
|
101 |
-
const sidebars = window.document.querySelectorAll(
|
102 |
-
".sidebar, .headroom-target"
|
103 |
-
);
|
104 |
-
sidebars.forEach((sidebar) => {
|
105 |
-
if (!animated) {
|
106 |
-
sidebar.classList.add("notransition");
|
107 |
-
// Remove the no transition class after the animation has time to complete
|
108 |
-
setTimeout(function () {
|
109 |
-
sidebar.classList.remove("notransition");
|
110 |
-
}, 201);
|
111 |
-
}
|
112 |
-
|
113 |
-
if (window.Headroom && sidebar.classList.contains("sidebar-unpinned")) {
|
114 |
-
sidebar.style.top = "0";
|
115 |
-
sidebar.style.maxHeight = "100vh";
|
116 |
-
} else {
|
117 |
-
sidebar.style.top = topOffset + "px";
|
118 |
-
sidebar.style.maxHeight = "calc(100vh - " + topOffset + "px)";
|
119 |
-
}
|
120 |
-
});
|
121 |
-
|
122 |
-
// allow space for footer
|
123 |
-
const mainContainer = window.document.querySelector(".quarto-container");
|
124 |
-
if (mainContainer) {
|
125 |
-
mainContainer.style.minHeight = "calc(100vh - " + bodyOffset + "px)";
|
126 |
-
}
|
127 |
-
|
128 |
-
// link offset
|
129 |
-
let linkStyle = window.document.querySelector("#quarto-target-style");
|
130 |
-
if (!linkStyle) {
|
131 |
-
linkStyle = window.document.createElement("style");
|
132 |
-
linkStyle.setAttribute("id", "quarto-target-style");
|
133 |
-
window.document.head.appendChild(linkStyle);
|
134 |
-
}
|
135 |
-
while (linkStyle.firstChild) {
|
136 |
-
linkStyle.removeChild(linkStyle.firstChild);
|
137 |
-
}
|
138 |
-
if (topOffset > 0) {
|
139 |
-
linkStyle.appendChild(
|
140 |
-
window.document.createTextNode(`
|
141 |
-
section:target::before {
|
142 |
-
content: "";
|
143 |
-
display: block;
|
144 |
-
height: ${topOffset}px;
|
145 |
-
margin: -${topOffset}px 0 0;
|
146 |
-
}`)
|
147 |
-
);
|
148 |
-
}
|
149 |
-
if (init) {
|
150 |
-
window.dispatchEvent(headroomChanged);
|
151 |
-
}
|
152 |
-
init = true;
|
153 |
-
}
|
154 |
-
|
155 |
-
// initialize headroom
|
156 |
-
var header = window.document.querySelector("#quarto-header");
|
157 |
-
if (header && window.Headroom) {
|
158 |
-
const headroom = new window.Headroom(header, {
|
159 |
-
tolerance: 5,
|
160 |
-
onPin: function () {
|
161 |
-
const sidebars = window.document.querySelectorAll(
|
162 |
-
".sidebar, .headroom-target"
|
163 |
-
);
|
164 |
-
sidebars.forEach((sidebar) => {
|
165 |
-
sidebar.classList.remove("sidebar-unpinned");
|
166 |
-
});
|
167 |
-
updateDocumentOffset();
|
168 |
-
},
|
169 |
-
onUnpin: function () {
|
170 |
-
const sidebars = window.document.querySelectorAll(
|
171 |
-
".sidebar, .headroom-target"
|
172 |
-
);
|
173 |
-
sidebars.forEach((sidebar) => {
|
174 |
-
sidebar.classList.add("sidebar-unpinned");
|
175 |
-
});
|
176 |
-
updateDocumentOffset();
|
177 |
-
},
|
178 |
-
});
|
179 |
-
headroom.init();
|
180 |
-
|
181 |
-
let frozen = false;
|
182 |
-
window.quartoToggleHeadroom = function () {
|
183 |
-
if (frozen) {
|
184 |
-
headroom.unfreeze();
|
185 |
-
frozen = false;
|
186 |
-
} else {
|
187 |
-
headroom.freeze();
|
188 |
-
frozen = true;
|
189 |
-
}
|
190 |
-
};
|
191 |
-
}
|
192 |
-
|
193 |
-
window.addEventListener(
|
194 |
-
"hashchange",
|
195 |
-
function (e) {
|
196 |
-
if (
|
197 |
-
getComputedStyle(document.documentElement).scrollBehavior !== "smooth"
|
198 |
-
) {
|
199 |
-
window.scrollTo(0, window.pageYOffset - headerOffset());
|
200 |
-
}
|
201 |
-
},
|
202 |
-
false
|
203 |
-
);
|
204 |
-
|
205 |
-
// Observe size changed for the header
|
206 |
-
const headerEl = window.document.querySelector("header.fixed-top");
|
207 |
-
if (headerEl && window.ResizeObserver) {
|
208 |
-
const observer = new window.ResizeObserver(
|
209 |
-
updateDocumentOffsetWithoutAnimation
|
210 |
-
);
|
211 |
-
observer.observe(headerEl, {
|
212 |
-
attributes: true,
|
213 |
-
childList: true,
|
214 |
-
characterData: true,
|
215 |
-
});
|
216 |
-
} else {
|
217 |
-
window.addEventListener(
|
218 |
-
"resize",
|
219 |
-
throttle(updateDocumentOffsetWithoutAnimation, 50)
|
220 |
-
);
|
221 |
-
}
|
222 |
-
setTimeout(updateDocumentOffsetWithoutAnimation, 250);
|
223 |
-
|
224 |
-
// fixup index.html links if we aren't on the filesystem
|
225 |
-
if (window.location.protocol !== "file:") {
|
226 |
-
const links = window.document.querySelectorAll("a");
|
227 |
-
for (let i = 0; i < links.length; i++) {
|
228 |
-
if (links[i].href) {
|
229 |
-
links[i].href = links[i].href.replace(/\/index\.html/, "/");
|
230 |
-
}
|
231 |
-
}
|
232 |
-
|
233 |
-
// Fixup any sharing links that require urls
|
234 |
-
// Append url to any sharing urls
|
235 |
-
const sharingLinks = window.document.querySelectorAll(
|
236 |
-
"a.sidebar-tools-main-item"
|
237 |
-
);
|
238 |
-
for (let i = 0; i < sharingLinks.length; i++) {
|
239 |
-
const sharingLink = sharingLinks[i];
|
240 |
-
const href = sharingLink.getAttribute("href");
|
241 |
-
if (href) {
|
242 |
-
sharingLink.setAttribute(
|
243 |
-
"href",
|
244 |
-
href.replace("|url|", window.location.href)
|
245 |
-
);
|
246 |
-
}
|
247 |
-
}
|
248 |
-
|
249 |
-
// Scroll the active navigation item into view, if necessary
|
250 |
-
const navSidebar = window.document.querySelector("nav#quarto-sidebar");
|
251 |
-
if (navSidebar) {
|
252 |
-
// Find the active item
|
253 |
-
const activeItem = navSidebar.querySelector("li.sidebar-item a.active");
|
254 |
-
if (activeItem) {
|
255 |
-
// Wait for the scroll height and height to resolve by observing size changes on the
|
256 |
-
// nav element that is scrollable
|
257 |
-
const resizeObserver = new ResizeObserver((_entries) => {
|
258 |
-
// The bottom of the element
|
259 |
-
const elBottom = activeItem.offsetTop;
|
260 |
-
const viewBottom = navSidebar.scrollTop + navSidebar.clientHeight;
|
261 |
-
|
262 |
-
// The element height and scroll height are the same, then we are still loading
|
263 |
-
if (viewBottom !== navSidebar.scrollHeight) {
|
264 |
-
// Determine if the item isn't visible and scroll to it
|
265 |
-
if (elBottom >= viewBottom) {
|
266 |
-
navSidebar.scrollTop = elBottom;
|
267 |
-
}
|
268 |
-
|
269 |
-
// stop observing now since we've completed the scroll
|
270 |
-
resizeObserver.unobserve(navSidebar);
|
271 |
-
}
|
272 |
-
});
|
273 |
-
resizeObserver.observe(navSidebar);
|
274 |
-
}
|
275 |
-
}
|
276 |
-
}
|
277 |
-
});
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/css/chat_style-cai-chat-square.css
DELETED
@@ -1,21 +0,0 @@
|
|
1 |
-
@import url("file/css/chat_style-cai-chat.css");
|
2 |
-
|
3 |
-
.circle-bot, .circle-you {
|
4 |
-
height: 90px;
|
5 |
-
width: 60px;
|
6 |
-
border-radius: 10px;
|
7 |
-
background-color: #656565;
|
8 |
-
}
|
9 |
-
|
10 |
-
.circle-bot img, .circle-you img {
|
11 |
-
border-radius: 8.333px;
|
12 |
-
}
|
13 |
-
|
14 |
-
.circle-you {
|
15 |
-
background-color: #656565;
|
16 |
-
}
|
17 |
-
|
18 |
-
.message {
|
19 |
-
padding-bottom: 30px;
|
20 |
-
grid-template-columns: 70px minmax(0, 1fr);
|
21 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/priority.py
DELETED
@@ -1,60 +0,0 @@
|
|
1 |
-
# Copyright (c) OpenMMLab. All rights reserved.
|
2 |
-
from enum import Enum
|
3 |
-
|
4 |
-
|
5 |
-
class Priority(Enum):
|
6 |
-
"""Hook priority levels.
|
7 |
-
|
8 |
-
+--------------+------------+
|
9 |
-
| Level | Value |
|
10 |
-
+==============+============+
|
11 |
-
| HIGHEST | 0 |
|
12 |
-
+--------------+------------+
|
13 |
-
| VERY_HIGH | 10 |
|
14 |
-
+--------------+------------+
|
15 |
-
| HIGH | 30 |
|
16 |
-
+--------------+------------+
|
17 |
-
| ABOVE_NORMAL | 40 |
|
18 |
-
+--------------+------------+
|
19 |
-
| NORMAL | 50 |
|
20 |
-
+--------------+------------+
|
21 |
-
| BELOW_NORMAL | 60 |
|
22 |
-
+--------------+------------+
|
23 |
-
| LOW | 70 |
|
24 |
-
+--------------+------------+
|
25 |
-
| VERY_LOW | 90 |
|
26 |
-
+--------------+------------+
|
27 |
-
| LOWEST | 100 |
|
28 |
-
+--------------+------------+
|
29 |
-
"""
|
30 |
-
|
31 |
-
HIGHEST = 0
|
32 |
-
VERY_HIGH = 10
|
33 |
-
HIGH = 30
|
34 |
-
ABOVE_NORMAL = 40
|
35 |
-
NORMAL = 50
|
36 |
-
BELOW_NORMAL = 60
|
37 |
-
LOW = 70
|
38 |
-
VERY_LOW = 90
|
39 |
-
LOWEST = 100
|
40 |
-
|
41 |
-
|
42 |
-
def get_priority(priority):
|
43 |
-
"""Get priority value.
|
44 |
-
|
45 |
-
Args:
|
46 |
-
priority (int or str or :obj:`Priority`): Priority.
|
47 |
-
|
48 |
-
Returns:
|
49 |
-
int: The priority value.
|
50 |
-
"""
|
51 |
-
if isinstance(priority, int):
|
52 |
-
if priority < 0 or priority > 100:
|
53 |
-
raise ValueError('priority must be between 0 and 100')
|
54 |
-
return priority
|
55 |
-
elif isinstance(priority, Priority):
|
56 |
-
return priority.value
|
57 |
-
elif isinstance(priority, str):
|
58 |
-
return Priority[priority.upper()].value
|
59 |
-
else:
|
60 |
-
raise TypeError('priority must be an integer or Priority enum value')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anuj-Panthri/imdb_review_sentiment/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Imdb Review Sentiment
|
3 |
-
emoji: 😻
|
4 |
-
colorFrom: purple
|
5 |
-
colorTo: indigo
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.1.1
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: apache-2.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Apex-X/ROOPOK/roop/processors/frame/face_swapper.py
DELETED
@@ -1,100 +0,0 @@
|
|
1 |
-
from typing import Any, List, Callable
|
2 |
-
import cv2
|
3 |
-
import insightface
|
4 |
-
import threading
|
5 |
-
|
6 |
-
import roop.globals
|
7 |
-
import roop.processors.frame.core
|
8 |
-
from roop.core import update_status
|
9 |
-
from roop.face_analyser import get_one_face, get_many_faces, find_similar_face
|
10 |
-
from roop.face_reference import get_face_reference, set_face_reference, clear_face_reference
|
11 |
-
from roop.typing import Face, Frame
|
12 |
-
from roop.utilities import conditional_download, resolve_relative_path, is_image, is_video
|
13 |
-
|
14 |
-
FACE_SWAPPER = None
|
15 |
-
THREAD_LOCK = threading.Lock()
|
16 |
-
NAME = 'ROOP.FACE-SWAPPER'
|
17 |
-
|
18 |
-
|
19 |
-
def get_face_swapper() -> Any:
|
20 |
-
global FACE_SWAPPER
|
21 |
-
|
22 |
-
with THREAD_LOCK:
|
23 |
-
if FACE_SWAPPER is None:
|
24 |
-
model_path = resolve_relative_path('../models/inswapper_128.onnx')
|
25 |
-
FACE_SWAPPER = insightface.model_zoo.get_model(model_path, providers=roop.globals.execution_providers)
|
26 |
-
return FACE_SWAPPER
|
27 |
-
|
28 |
-
|
29 |
-
def clear_face_swapper() -> None:
|
30 |
-
global FACE_SWAPPER
|
31 |
-
|
32 |
-
FACE_SWAPPER = None
|
33 |
-
|
34 |
-
|
35 |
-
def pre_check() -> bool:
|
36 |
-
download_directory_path = resolve_relative_path('../models')
|
37 |
-
conditional_download(download_directory_path, ['https://huggingface.co/deepinsight/inswapper/resolve/main/inswapper_128.onnx'])
|
38 |
-
return True
|
39 |
-
|
40 |
-
|
41 |
-
def pre_start() -> bool:
|
42 |
-
if not is_image(roop.globals.source_path):
|
43 |
-
update_status('Select an image for source path.', NAME)
|
44 |
-
return False
|
45 |
-
elif not get_one_face(cv2.imread(roop.globals.source_path)):
|
46 |
-
update_status('No face in source path detected.', NAME)
|
47 |
-
return False
|
48 |
-
if not is_image(roop.globals.target_path) and not is_video(roop.globals.target_path):
|
49 |
-
update_status('Select an image or video for target path.', NAME)
|
50 |
-
return False
|
51 |
-
return True
|
52 |
-
|
53 |
-
|
54 |
-
def post_process() -> None:
|
55 |
-
clear_face_swapper()
|
56 |
-
clear_face_reference()
|
57 |
-
|
58 |
-
|
59 |
-
def swap_face(source_face: Face, target_face: Face, temp_frame: Frame) -> Frame:
|
60 |
-
return get_face_swapper().get(temp_frame, target_face, source_face, paste_back=True)
|
61 |
-
|
62 |
-
|
63 |
-
def process_frame(source_face: Face, reference_face: Face, temp_frame: Frame) -> Frame:
|
64 |
-
if roop.globals.many_faces:
|
65 |
-
many_faces = get_many_faces(temp_frame)
|
66 |
-
if many_faces:
|
67 |
-
for target_face in many_faces:
|
68 |
-
temp_frame = swap_face(source_face, target_face, temp_frame)
|
69 |
-
else:
|
70 |
-
target_face = find_similar_face(temp_frame, reference_face)
|
71 |
-
if target_face:
|
72 |
-
temp_frame = swap_face(source_face, target_face, temp_frame)
|
73 |
-
return temp_frame
|
74 |
-
|
75 |
-
|
76 |
-
def process_frames(source_path: str, temp_frame_paths: List[str], update: Callable[[], None]) -> None:
|
77 |
-
source_face = get_one_face(cv2.imread(source_path))
|
78 |
-
reference_face = None if roop.globals.many_faces else get_face_reference()
|
79 |
-
for temp_frame_path in temp_frame_paths:
|
80 |
-
temp_frame = cv2.imread(temp_frame_path)
|
81 |
-
result = process_frame(source_face, reference_face, temp_frame)
|
82 |
-
cv2.imwrite(temp_frame_path, result)
|
83 |
-
if update:
|
84 |
-
update()
|
85 |
-
|
86 |
-
|
87 |
-
def process_image(source_path: str, target_path: str, output_path: str) -> None:
|
88 |
-
source_face = get_one_face(cv2.imread(source_path))
|
89 |
-
target_frame = cv2.imread(target_path)
|
90 |
-
reference_face = None if roop.globals.many_faces else get_one_face(target_frame, roop.globals.reference_face_position)
|
91 |
-
result = process_frame(source_face, reference_face, target_frame)
|
92 |
-
cv2.imwrite(output_path, result)
|
93 |
-
|
94 |
-
|
95 |
-
def process_video(source_path: str, temp_frame_paths: List[str]) -> None:
|
96 |
-
if not roop.globals.many_faces and not get_face_reference():
|
97 |
-
reference_frame = cv2.imread(temp_frame_paths[roop.globals.reference_frame_number])
|
98 |
-
reference_face = get_one_face(reference_frame, roop.globals.reference_face_position)
|
99 |
-
set_face_reference(reference_face)
|
100 |
-
roop.processors.frame.core.process_video(source_path, temp_frame_paths, process_frames)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Arnx/MusicGenXvAKN/audiocraft/utils/autocast.py
DELETED
@@ -1,40 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
import torch
|
8 |
-
|
9 |
-
|
10 |
-
class TorchAutocast:
|
11 |
-
"""TorchAutocast utility class.
|
12 |
-
Allows you to enable and disable autocast. This is specially useful
|
13 |
-
when dealing with different architectures and clusters with different
|
14 |
-
levels of support.
|
15 |
-
|
16 |
-
Args:
|
17 |
-
enabled (bool): Whether to enable torch.autocast or not.
|
18 |
-
args: Additional args for torch.autocast.
|
19 |
-
kwargs: Additional kwargs for torch.autocast
|
20 |
-
"""
|
21 |
-
def __init__(self, enabled: bool, *args, **kwargs):
|
22 |
-
self.autocast = torch.autocast(*args, **kwargs) if enabled else None
|
23 |
-
|
24 |
-
def __enter__(self):
|
25 |
-
if self.autocast is None:
|
26 |
-
return
|
27 |
-
try:
|
28 |
-
self.autocast.__enter__()
|
29 |
-
except RuntimeError:
|
30 |
-
device = self.autocast.device
|
31 |
-
dtype = self.autocast.fast_dtype
|
32 |
-
raise RuntimeError(
|
33 |
-
f"There was an error autocasting with dtype={dtype} device={device}\n"
|
34 |
-
"If you are on the FAIR Cluster, you might need to use autocast_dtype=float16"
|
35 |
-
)
|
36 |
-
|
37 |
-
def __exit__(self, *args, **kwargs):
|
38 |
-
if self.autocast is None:
|
39 |
-
return
|
40 |
-
self.autocast.__exit__(*args, **kwargs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Artrajz/vits-simple-api/bert_vits2/__init__.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
from bert_vits2.bert_vits2 import Bert_VITS2
|
2 |
-
from bert_vits2 import text
|
|
|
|
|
|
spaces/Ash123/stable-diffusion-nano/app.py
DELETED
@@ -1,330 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import jax
|
3 |
-
import jax.numpy as jnp
|
4 |
-
from diffusers import FlaxPNDMScheduler, FlaxStableDiffusionPipeline
|
5 |
-
from flax.jax_utils import replicate
|
6 |
-
from flax.training.common_utils import shard
|
7 |
-
from share_btn import community_icon_html, loading_icon_html, share_js
|
8 |
-
|
9 |
-
DTYPE = jnp.float16
|
10 |
-
|
11 |
-
pipeline, pipeline_params = FlaxStableDiffusionPipeline.from_pretrained(
|
12 |
-
"bguisard/stable-diffusion-nano-2-1",
|
13 |
-
dtype=DTYPE,
|
14 |
-
)
|
15 |
-
if DTYPE != jnp.float32:
|
16 |
-
# There is a known issue with schedulers when loading from a pre trained
|
17 |
-
# pipeline. We need the schedulers to always use float32.
|
18 |
-
# See: https://github.com/huggingface/diffusers/issues/2155
|
19 |
-
scheduler, scheduler_params = FlaxPNDMScheduler.from_pretrained(
|
20 |
-
pretrained_model_name_or_path="bguisard/stable-diffusion-nano-2-1",
|
21 |
-
subfolder="scheduler",
|
22 |
-
dtype=jnp.float32,
|
23 |
-
)
|
24 |
-
pipeline_params["scheduler"] = scheduler_params
|
25 |
-
pipeline.scheduler = scheduler
|
26 |
-
|
27 |
-
|
28 |
-
def generate_image(prompt: str, negative_prompt: str = "", inference_steps: int = 25, prng_seed: int = 0, guidance_scale: float = 9):
|
29 |
-
rng = jax.random.PRNGKey(int(prng_seed))
|
30 |
-
rng = jax.random.split(rng, jax.device_count())
|
31 |
-
p_params = replicate(pipeline_params)
|
32 |
-
|
33 |
-
num_samples = 1
|
34 |
-
prompt_ids = pipeline.prepare_inputs([prompt] * num_samples)
|
35 |
-
prompt_ids = shard(prompt_ids)
|
36 |
-
|
37 |
-
if negative_prompt == "":
|
38 |
-
images = pipeline(
|
39 |
-
prompt_ids=prompt_ids,
|
40 |
-
params=p_params,
|
41 |
-
prng_seed=rng,
|
42 |
-
height=128,
|
43 |
-
width=128,
|
44 |
-
num_inference_steps=int(inference_steps),
|
45 |
-
guidance_scale=float(guidance_scale),
|
46 |
-
jit=True,
|
47 |
-
).images
|
48 |
-
else:
|
49 |
-
neg_prompt_ids = pipeline.prepare_inputs(
|
50 |
-
[negative_prompt] * num_samples)
|
51 |
-
neg_prompt_ids = shard(neg_prompt_ids)
|
52 |
-
images = pipeline(
|
53 |
-
prompt_ids=prompt_ids,
|
54 |
-
params=p_params,
|
55 |
-
prng_seed=rng,
|
56 |
-
height=128,
|
57 |
-
width=128,
|
58 |
-
num_inference_steps=int(inference_steps),
|
59 |
-
neg_prompt_ids=neg_prompt_ids,
|
60 |
-
guidance_scale=float(guidance_scale),
|
61 |
-
jit=True,
|
62 |
-
).images
|
63 |
-
images = images.reshape((num_samples,) + images.shape[-3:])
|
64 |
-
images = pipeline.numpy_to_pil(images)
|
65 |
-
return images[0]
|
66 |
-
|
67 |
-
examples = [
|
68 |
-
["A watercolor painting of a bird"],
|
69 |
-
["A watercolor painting of an otter"]
|
70 |
-
]
|
71 |
-
css = """
|
72 |
-
.gradio-container {
|
73 |
-
font-family: 'IBM Plex Sans', sans-serif;
|
74 |
-
max-width: 730px!important;
|
75 |
-
margin: auto;
|
76 |
-
padding-top: 1.5rem;
|
77 |
-
}
|
78 |
-
.gr-button {
|
79 |
-
color: white;
|
80 |
-
border-color: black;
|
81 |
-
background: black;
|
82 |
-
}
|
83 |
-
input[type='range'] {
|
84 |
-
accent-color: black;
|
85 |
-
}
|
86 |
-
.dark input[type='range'] {
|
87 |
-
accent-color: #dfdfdf;
|
88 |
-
}
|
89 |
-
.container {
|
90 |
-
max-width: 730px;
|
91 |
-
margin: auto;
|
92 |
-
padding-top: 1.5rem;
|
93 |
-
}
|
94 |
-
#gallery {
|
95 |
-
min-height: 22rem;
|
96 |
-
margin-bottom: 15px;
|
97 |
-
margin-left: auto;
|
98 |
-
margin-right: auto;
|
99 |
-
border-bottom-right-radius: .5rem !important;
|
100 |
-
border-bottom-left-radius: .5rem !important;
|
101 |
-
}
|
102 |
-
#gallery>div>.h-full {
|
103 |
-
min-height: 20rem;
|
104 |
-
}
|
105 |
-
.details:hover {
|
106 |
-
text-decoration: underline;
|
107 |
-
}
|
108 |
-
.gr-button {
|
109 |
-
white-space: nowrap;
|
110 |
-
}
|
111 |
-
.gr-button:focus {
|
112 |
-
border-color: rgb(147 197 253 / var(--tw-border-opacity));
|
113 |
-
outline: none;
|
114 |
-
box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000);
|
115 |
-
--tw-border-opacity: 1;
|
116 |
-
--tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);
|
117 |
-
--tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color);
|
118 |
-
--tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity));
|
119 |
-
--tw-ring-opacity: .5;
|
120 |
-
}
|
121 |
-
#advanced-btn {
|
122 |
-
font-size: .7rem !important;
|
123 |
-
line-height: 19px;
|
124 |
-
cache_examples=True,
|
125 |
-
postprocess=False)
|
126 |
-
margin-top: 12px;
|
127 |
-
margin-bottom: 12px;
|
128 |
-
padding: 2px 8px;
|
129 |
-
border-radius: 14px !important;
|
130 |
-
}
|
131 |
-
#advanced-options {
|
132 |
-
display: none;
|
133 |
-
margin-bottom: 20px;
|
134 |
-
}
|
135 |
-
.footer {
|
136 |
-
margin-bottom: 45px;
|
137 |
-
margin-top: 35px;
|
138 |
-
text-align: center;
|
139 |
-
border-bottom: 1px solid #e5e5e5;
|
140 |
-
}
|
141 |
-
.footer>p {
|
142 |
-
font-size: .8rem;
|
143 |
-
display: inline-block;
|
144 |
-
padding: 0 10px;
|
145 |
-
transform: translateY(10px);
|
146 |
-
background: white;
|
147 |
-
}
|
148 |
-
.dark .footer {
|
149 |
-
border-color: #303030;
|
150 |
-
}
|
151 |
-
.dark .footer>p {
|
152 |
-
background: #0b0f19;
|
153 |
-
}
|
154 |
-
.acknowledgments h4{
|
155 |
-
margin: 1.25em 0 .25em 0;
|
156 |
-
font-weight: bold;
|
157 |
-
font-size: 115%;
|
158 |
-
}
|
159 |
-
.animate-spin {
|
160 |
-
animation: spin 1s linear infinite;
|
161 |
-
}
|
162 |
-
@keyframes spin {
|
163 |
-
from {
|
164 |
-
transform: rotate(0deg);
|
165 |
-
}
|
166 |
-
to {
|
167 |
-
transform: rotate(360deg);
|
168 |
-
}
|
169 |
-
}
|
170 |
-
#share-btn-container {
|
171 |
-
display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem;
|
172 |
-
margin-top: 10px;
|
173 |
-
margin-left: auto;
|
174 |
-
|
175 |
-
#share-btn {
|
176 |
-
all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important;right:0;
|
177 |
-
}
|
178 |
-
#share-btn * {
|
179 |
-
all: unset;
|
180 |
-
}
|
181 |
-
#share-btn-container div:nth-child(-n+2){
|
182 |
-
width: auto !important;
|
183 |
-
min-height: 0px !important;
|
184 |
-
}
|
185 |
-
#share-btn-container .wrap {
|
186 |
-
display: none !important;
|
187 |
-
}
|
188 |
-
.share_button {
|
189 |
-
color:#6366f1!important;
|
190 |
-
}
|
191 |
-
.gr-form{
|
192 |
-
flex: 1 1 50%; border-top-right-radius: 0; border-bottom-right-radius: 0;
|
193 |
-
}
|
194 |
-
#prompt-text-input, #negative-prompt-text-input{padding: .45rem 0.625rem}
|
195 |
-
.image_duplication{position: absolute; width: 100px; left: 50px}
|
196 |
-
|
197 |
-
"""
|
198 |
-
|
199 |
-
block = gr.Blocks(theme="gradio/soft",css=css)
|
200 |
-
|
201 |
-
with block as demo:
|
202 |
-
gr.HTML(
|
203 |
-
"""
|
204 |
-
<div style="text-align: center; margin: 0 auto;">
|
205 |
-
<div
|
206 |
-
style="
|
207 |
-
display: inline-flex;
|
208 |
-
align-items: center;
|
209 |
-
gap: 0.8rem;
|
210 |
-
font-size: 1.75rem;
|
211 |
-
"
|
212 |
-
>
|
213 |
-
<svg
|
214 |
-
width="0.65em"
|
215 |
-
height="0.65em"
|
216 |
-
viewBox="0 0 115 115"
|
217 |
-
fill="none"
|
218 |
-
xmlns="http://www.w3.org/2000/svg"
|
219 |
-
>
|
220 |
-
<rect width="23" height="23" fill="white"></rect>
|
221 |
-
<rect y="69" width="23" height="23" fill="white"></rect>
|
222 |
-
<rect x="23" width="23" height="23" fill="#AEAEAE"></rect>
|
223 |
-
<rect x="23" y="69" width="23" height="23" fill="#AEAEAE"></rect>
|
224 |
-
<rect x="46" width="23" height="23" fill="white"></rect>
|
225 |
-
<rect x="46" y="69" width="23" height="23" fill="white"></rect>
|
226 |
-
<rect x="69" width="23" height="23" fill="black"></rect>
|
227 |
-
<rect x="69" y="69" width="23" height="23" fill="black"></rect>
|
228 |
-
<rect x="92" width="23" height="23" fill="#D9D9D9"></rect>
|
229 |
-
<rect x="92" y="69" width="23" height="23" fill="#AEAEAE"></rect>
|
230 |
-
<rect x="115" y="46" width="23" height="23" fill="white"></rect>
|
231 |
-
<rect x="115" y="115" width="23" height="23" fill="white"></rect>
|
232 |
-
<rect x="115" y="69" width="23" height="23" fill="#D9D9D9"></rect>
|
233 |
-
<rect x="92" y="46" width="23" height="23" fill="#AEAEAE"></rect>
|
234 |
-
<rect x="92" y="115" width="23" height="23" fill="#AEAEAE"></rect>
|
235 |
-
<rect x="92" y="69" width="23" height="23" fill="white"></rect>
|
236 |
-
<rect x="69" y="46" width="23" height="23" fill="white"></rect>
|
237 |
-
<rect x="69" y="115" width="23" height="23" fill="white"></rect>
|
238 |
-
<rect x="69" y="69" width="23" height="23" fill="#D9D9D9"></rect>
|
239 |
-
<rect x="46" y="46" width="23" height="23" fill="black"></rect>
|
240 |
-
<rect x="46" y="115" width="23" height="23" fill="black"></rect>
|
241 |
-
<rect x="46" y="69" width="23" height="23" fill="black"></rect>
|
242 |
-
<rect x="23" y="46" width="23" height="23" fill="#D9D9D9"></rect>
|
243 |
-
<rect x="23" y="115" width="23" height="23" fill="#AEAEAE"></rect>
|
244 |
-
<rect x="23" y="69" width="23" height="23" fill="black"></rect>
|
245 |
-
</svg>
|
246 |
-
<h1 style="font-weight: 900; margin-bottom: 7px;margin-top:5px">
|
247 |
-
Stable Diffusion Nano Demo
|
248 |
-
</h1>
|
249 |
-
</div>
|
250 |
-
<p style="margin-bottom: 10px; font-size: 94%; line-height: 23px;">
|
251 |
-
Stable Diffusion Nano was built during the <a style="text-decoration: underline;" href="https://github.com/huggingface/community-events/tree/main/jax-controlnet-sprint">JAX/Diffusers community sprint 🧨</a> based on Stable Diffusion 2.1 and finetuned on 128x128 images for fast prototyping. <br>
|
252 |
-
</p>
|
253 |
-
</div>
|
254 |
-
"""
|
255 |
-
)
|
256 |
-
with gr.Group():
|
257 |
-
with gr.Box():
|
258 |
-
with gr.Row(elem_id="prompt-container").style(equal_height=True):
|
259 |
-
with gr.Column(scale=2):
|
260 |
-
prompt_input = gr.Textbox(
|
261 |
-
label="Enter your prompt",
|
262 |
-
max_lines=1,
|
263 |
-
placeholder="Enter your prompt",
|
264 |
-
elem_id="prompt-text-input",
|
265 |
-
show_label=False,
|
266 |
-
)
|
267 |
-
negative = gr.Textbox(
|
268 |
-
label="Enter your negative prompt",
|
269 |
-
max_lines=1,
|
270 |
-
placeholder="Enter a negative prompt",
|
271 |
-
elem_id="negative-prompt-text-input",
|
272 |
-
show_label=False,
|
273 |
-
)
|
274 |
-
btn = gr.Button("Generate image", label="Primary Button", variant="primary")
|
275 |
-
|
276 |
-
gallery = gr.Image(
|
277 |
-
label="Generated images", show_label=False, elem_id="gallery"
|
278 |
-
)
|
279 |
-
|
280 |
-
|
281 |
-
with gr.Row():
|
282 |
-
with gr.Column(scale=2):
|
283 |
-
with gr.Accordion("Advanced settings"):
|
284 |
-
seed_input = gr.inputs.Number(default=0, label="Seed")
|
285 |
-
inf_steps_input = gr.inputs.Slider(
|
286 |
-
minimum=1, maximum=100, default=25, step=1, label="Inference Steps"
|
287 |
-
)
|
288 |
-
guidance_scale = gr.inputs.Slider(
|
289 |
-
label="Guidance Scale", minimum=0, maximum=50, default=9, step=0.1
|
290 |
-
)
|
291 |
-
with gr.Column(scale=1):
|
292 |
-
# advanced_button = gr.Button("Advanced options", elem_id="advanced-btn")
|
293 |
-
ex = gr.Examples(examples=examples,
|
294 |
-
fn=generate_image,
|
295 |
-
inputs=[prompt_input, negative,inf_steps_input, seed_input, guidance_scale],
|
296 |
-
outputs=[gallery],
|
297 |
-
cache_examples=False)
|
298 |
-
ex.dataset.headers = [""]
|
299 |
-
|
300 |
-
share_button = gr.Button("Share to community",elem_classes="share_button")
|
301 |
-
|
302 |
-
|
303 |
-
negative.submit(generate_image, inputs=[
|
304 |
-
prompt_input, negative, inf_steps_input, seed_input, guidance_scale], outputs=[gallery], postprocess=False)
|
305 |
-
prompt_input.submit(generate_image, inputs=[
|
306 |
-
prompt_input, negative, inf_steps_input, seed_input, guidance_scale], outputs=[gallery], postprocess=False)
|
307 |
-
btn.click(generate_image, inputs=[prompt_input, negative, inf_steps_input,
|
308 |
-
seed_input, guidance_scale], outputs=[gallery], postprocess=False)
|
309 |
-
|
310 |
-
share_button.click(
|
311 |
-
None,
|
312 |
-
[],
|
313 |
-
[],
|
314 |
-
_js=share_js,
|
315 |
-
)
|
316 |
-
gr.Markdown("Model by Stable Diffusion Nano Team",elem_classes="footer")
|
317 |
-
with gr.Accordion(label="License", open=False):
|
318 |
-
gr.HTML(
|
319 |
-
"""
|
320 |
-
<div class="acknowledgments">
|
321 |
-
<p><h4>LICENSE</h4>
|
322 |
-
The model is licensed with a <a href="https://huggingface.co/stabilityai/stable-diffusion-2/blob/main/LICENSE-MODEL" style="text-decoration: underline;" target="_blank">CreativeML OpenRAIL++</a> license. The authors claim no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in this license. The license forbids you from sharing any content that violates any laws, produce any harm to a person, disseminate any personal information that would be meant for harm, spread misinformation and target vulnerable groups. For the full list of restrictions please <a href="https://huggingface.co/spaces/CompVis/stable-diffusion-license" target="_blank" style="text-decoration: underline;" target="_blank">read the license</a></p>
|
323 |
-
<p><h4>Biases and content acknowledgment</h4>
|
324 |
-
Despite how impressive being able to turn text into image is, beware to the fact that this model may output content that reinforces or exacerbates societal biases, as well as realistic faces, pornography and violence. The model was trained on the <a href="https://huggingface.co/datasets/laion/laion2B-en-aesthetic" style="text-decoration: underline;" target="_blank">LAION-2B Aesthetic dataset</a>, which scraped non-curated image-text-pairs from the internet (the exception being the removal of illegal content) and is meant for research purposes. You can read more in the <a href="https://huggingface.co/bguisard/stable-diffusion-nano-2-1" style="text-decoration: underline;" target="_blank">model card</a></p>
|
325 |
-
</div>
|
326 |
-
"""
|
327 |
-
)
|
328 |
-
demo.queue(concurrency_count=10)
|
329 |
-
demo.launch()
|
330 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/certifi/core.py
DELETED
@@ -1,108 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
certifi.py
|
3 |
-
~~~~~~~~~~
|
4 |
-
|
5 |
-
This module returns the installation location of cacert.pem or its contents.
|
6 |
-
"""
|
7 |
-
import sys
|
8 |
-
|
9 |
-
|
10 |
-
if sys.version_info >= (3, 11):
|
11 |
-
|
12 |
-
from importlib.resources import as_file, files
|
13 |
-
|
14 |
-
_CACERT_CTX = None
|
15 |
-
_CACERT_PATH = None
|
16 |
-
|
17 |
-
def where() -> str:
|
18 |
-
# This is slightly terrible, but we want to delay extracting the file
|
19 |
-
# in cases where we're inside of a zipimport situation until someone
|
20 |
-
# actually calls where(), but we don't want to re-extract the file
|
21 |
-
# on every call of where(), so we'll do it once then store it in a
|
22 |
-
# global variable.
|
23 |
-
global _CACERT_CTX
|
24 |
-
global _CACERT_PATH
|
25 |
-
if _CACERT_PATH is None:
|
26 |
-
# This is slightly janky, the importlib.resources API wants you to
|
27 |
-
# manage the cleanup of this file, so it doesn't actually return a
|
28 |
-
# path, it returns a context manager that will give you the path
|
29 |
-
# when you enter it and will do any cleanup when you leave it. In
|
30 |
-
# the common case of not needing a temporary file, it will just
|
31 |
-
# return the file system location and the __exit__() is a no-op.
|
32 |
-
#
|
33 |
-
# We also have to hold onto the actual context manager, because
|
34 |
-
# it will do the cleanup whenever it gets garbage collected, so
|
35 |
-
# we will also store that at the global level as well.
|
36 |
-
_CACERT_CTX = as_file(files("pip._vendor.certifi").joinpath("cacert.pem"))
|
37 |
-
_CACERT_PATH = str(_CACERT_CTX.__enter__())
|
38 |
-
|
39 |
-
return _CACERT_PATH
|
40 |
-
|
41 |
-
def contents() -> str:
|
42 |
-
return files("pip._vendor.certifi").joinpath("cacert.pem").read_text(encoding="ascii")
|
43 |
-
|
44 |
-
elif sys.version_info >= (3, 7):
|
45 |
-
|
46 |
-
from importlib.resources import path as get_path, read_text
|
47 |
-
|
48 |
-
_CACERT_CTX = None
|
49 |
-
_CACERT_PATH = None
|
50 |
-
|
51 |
-
def where() -> str:
|
52 |
-
# This is slightly terrible, but we want to delay extracting the
|
53 |
-
# file in cases where we're inside of a zipimport situation until
|
54 |
-
# someone actually calls where(), but we don't want to re-extract
|
55 |
-
# the file on every call of where(), so we'll do it once then store
|
56 |
-
# it in a global variable.
|
57 |
-
global _CACERT_CTX
|
58 |
-
global _CACERT_PATH
|
59 |
-
if _CACERT_PATH is None:
|
60 |
-
# This is slightly janky, the importlib.resources API wants you
|
61 |
-
# to manage the cleanup of this file, so it doesn't actually
|
62 |
-
# return a path, it returns a context manager that will give
|
63 |
-
# you the path when you enter it and will do any cleanup when
|
64 |
-
# you leave it. In the common case of not needing a temporary
|
65 |
-
# file, it will just return the file system location and the
|
66 |
-
# __exit__() is a no-op.
|
67 |
-
#
|
68 |
-
# We also have to hold onto the actual context manager, because
|
69 |
-
# it will do the cleanup whenever it gets garbage collected, so
|
70 |
-
# we will also store that at the global level as well.
|
71 |
-
_CACERT_CTX = get_path("pip._vendor.certifi", "cacert.pem")
|
72 |
-
_CACERT_PATH = str(_CACERT_CTX.__enter__())
|
73 |
-
|
74 |
-
return _CACERT_PATH
|
75 |
-
|
76 |
-
def contents() -> str:
|
77 |
-
return read_text("pip._vendor.certifi", "cacert.pem", encoding="ascii")
|
78 |
-
|
79 |
-
else:
|
80 |
-
import os
|
81 |
-
import types
|
82 |
-
from typing import Union
|
83 |
-
|
84 |
-
Package = Union[types.ModuleType, str]
|
85 |
-
Resource = Union[str, "os.PathLike"]
|
86 |
-
|
87 |
-
# This fallback will work for Python versions prior to 3.7 that lack the
|
88 |
-
# importlib.resources module but relies on the existing `where` function
|
89 |
-
# so won't address issues with environments like PyOxidizer that don't set
|
90 |
-
# __file__ on modules.
|
91 |
-
def read_text(
|
92 |
-
package: Package,
|
93 |
-
resource: Resource,
|
94 |
-
encoding: str = 'utf-8',
|
95 |
-
errors: str = 'strict'
|
96 |
-
) -> str:
|
97 |
-
with open(where(), encoding=encoding) as data:
|
98 |
-
return data.read()
|
99 |
-
|
100 |
-
# If we don't have importlib.resources, then we will just do the old logic
|
101 |
-
# of assuming we're on the filesystem and munge the path directly.
|
102 |
-
def where() -> str:
|
103 |
-
f = os.path.dirname(__file__)
|
104 |
-
|
105 |
-
return os.path.join(f, "cacert.pem")
|
106 |
-
|
107 |
-
def contents() -> str:
|
108 |
-
return read_text("pip._vendor.certifi", "cacert.pem", encoding="ascii")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/_mapping.py
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
# Automatically generated by scripts/gen_mapfiles.py.
|
2 |
-
# DO NOT EDIT BY HAND; run `make mapfiles` instead.
|
3 |
-
|
4 |
-
FORMATTERS = {
|
5 |
-
'BBCodeFormatter': ('pygments.formatters.bbcode', 'BBCode', ('bbcode', 'bb'), (), 'Format tokens with BBcodes. These formatting codes are used by many bulletin boards, so you can highlight your sourcecode with pygments before posting it there.'),
|
6 |
-
'BmpImageFormatter': ('pygments.formatters.img', 'img_bmp', ('bmp', 'bitmap'), ('*.bmp',), 'Create a bitmap image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
|
7 |
-
'GifImageFormatter': ('pygments.formatters.img', 'img_gif', ('gif',), ('*.gif',), 'Create a GIF image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
|
8 |
-
'GroffFormatter': ('pygments.formatters.groff', 'groff', ('groff', 'troff', 'roff'), (), 'Format tokens with groff escapes to change their color and font style.'),
|
9 |
-
'HtmlFormatter': ('pygments.formatters.html', 'HTML', ('html',), ('*.html', '*.htm'), "Format tokens as HTML 4 ``<span>`` tags within a ``<pre>`` tag, wrapped in a ``<div>`` tag. The ``<div>``'s CSS class can be set by the `cssclass` option."),
|
10 |
-
'IRCFormatter': ('pygments.formatters.irc', 'IRC', ('irc', 'IRC'), (), 'Format tokens with IRC color sequences'),
|
11 |
-
'ImageFormatter': ('pygments.formatters.img', 'img', ('img', 'IMG', 'png'), ('*.png',), 'Create a PNG image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
|
12 |
-
'JpgImageFormatter': ('pygments.formatters.img', 'img_jpg', ('jpg', 'jpeg'), ('*.jpg',), 'Create a JPEG image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
|
13 |
-
'LatexFormatter': ('pygments.formatters.latex', 'LaTeX', ('latex', 'tex'), ('*.tex',), 'Format tokens as LaTeX code. This needs the `fancyvrb` and `color` standard packages.'),
|
14 |
-
'NullFormatter': ('pygments.formatters.other', 'Text only', ('text', 'null'), ('*.txt',), 'Output the text unchanged without any formatting.'),
|
15 |
-
'PangoMarkupFormatter': ('pygments.formatters.pangomarkup', 'Pango Markup', ('pango', 'pangomarkup'), (), 'Format tokens as Pango Markup code. It can then be rendered to an SVG.'),
|
16 |
-
'RawTokenFormatter': ('pygments.formatters.other', 'Raw tokens', ('raw', 'tokens'), ('*.raw',), 'Format tokens as a raw representation for storing token streams.'),
|
17 |
-
'RtfFormatter': ('pygments.formatters.rtf', 'RTF', ('rtf',), ('*.rtf',), 'Format tokens as RTF markup. This formatter automatically outputs full RTF documents with color information and other useful stuff. Perfect for Copy and Paste into Microsoft(R) Word(R) documents.'),
|
18 |
-
'SvgFormatter': ('pygments.formatters.svg', 'SVG', ('svg',), ('*.svg',), 'Format tokens as an SVG graphics file. This formatter is still experimental. Each line of code is a ``<text>`` element with explicit ``x`` and ``y`` coordinates containing ``<tspan>`` elements with the individual token styles.'),
|
19 |
-
'Terminal256Formatter': ('pygments.formatters.terminal256', 'Terminal256', ('terminal256', 'console256', '256'), (), 'Format tokens with ANSI color sequences, for output in a 256-color terminal or console. Like in `TerminalFormatter` color sequences are terminated at newlines, so that paging the output works correctly.'),
|
20 |
-
'TerminalFormatter': ('pygments.formatters.terminal', 'Terminal', ('terminal', 'console'), (), 'Format tokens with ANSI color sequences, for output in a text console. Color sequences are terminated at newlines, so that paging the output works correctly.'),
|
21 |
-
'TerminalTrueColorFormatter': ('pygments.formatters.terminal256', 'TerminalTrueColor', ('terminal16m', 'console16m', '16m'), (), 'Format tokens with ANSI color sequences, for output in a true-color terminal or console. Like in `TerminalFormatter` color sequences are terminated at newlines, so that paging the output works correctly.'),
|
22 |
-
'TestcaseFormatter': ('pygments.formatters.other', 'Testcase', ('testcase',), (), 'Format tokens as appropriate for a new testcase.'),
|
23 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/webencodings/__init__.py
DELETED
@@ -1,342 +0,0 @@
|
|
1 |
-
# coding: utf-8
|
2 |
-
"""
|
3 |
-
|
4 |
-
webencodings
|
5 |
-
~~~~~~~~~~~~
|
6 |
-
|
7 |
-
This is a Python implementation of the `WHATWG Encoding standard
|
8 |
-
<http://encoding.spec.whatwg.org/>`. See README for details.
|
9 |
-
|
10 |
-
:copyright: Copyright 2012 by Simon Sapin
|
11 |
-
:license: BSD, see LICENSE for details.
|
12 |
-
|
13 |
-
"""
|
14 |
-
|
15 |
-
from __future__ import unicode_literals
|
16 |
-
|
17 |
-
import codecs
|
18 |
-
|
19 |
-
from .labels import LABELS
|
20 |
-
|
21 |
-
|
22 |
-
VERSION = '0.5.1'
|
23 |
-
|
24 |
-
|
25 |
-
# Some names in Encoding are not valid Python aliases. Remap these.
|
26 |
-
PYTHON_NAMES = {
|
27 |
-
'iso-8859-8-i': 'iso-8859-8',
|
28 |
-
'x-mac-cyrillic': 'mac-cyrillic',
|
29 |
-
'macintosh': 'mac-roman',
|
30 |
-
'windows-874': 'cp874'}
|
31 |
-
|
32 |
-
CACHE = {}
|
33 |
-
|
34 |
-
|
35 |
-
def ascii_lower(string):
|
36 |
-
r"""Transform (only) ASCII letters to lower case: A-Z is mapped to a-z.
|
37 |
-
|
38 |
-
:param string: An Unicode string.
|
39 |
-
:returns: A new Unicode string.
|
40 |
-
|
41 |
-
This is used for `ASCII case-insensitive
|
42 |
-
<http://encoding.spec.whatwg.org/#ascii-case-insensitive>`_
|
43 |
-
matching of encoding labels.
|
44 |
-
The same matching is also used, among other things,
|
45 |
-
for `CSS keywords <http://dev.w3.org/csswg/css-values/#keywords>`_.
|
46 |
-
|
47 |
-
This is different from the :meth:`~py:str.lower` method of Unicode strings
|
48 |
-
which also affect non-ASCII characters,
|
49 |
-
sometimes mapping them into the ASCII range:
|
50 |
-
|
51 |
-
>>> keyword = u'Bac\N{KELVIN SIGN}ground'
|
52 |
-
>>> assert keyword.lower() == u'background'
|
53 |
-
>>> assert ascii_lower(keyword) != keyword.lower()
|
54 |
-
>>> assert ascii_lower(keyword) == u'bac\N{KELVIN SIGN}ground'
|
55 |
-
|
56 |
-
"""
|
57 |
-
# This turns out to be faster than unicode.translate()
|
58 |
-
return string.encode('utf8').lower().decode('utf8')
|
59 |
-
|
60 |
-
|
61 |
-
def lookup(label):
|
62 |
-
"""
|
63 |
-
Look for an encoding by its label.
|
64 |
-
This is the spec’s `get an encoding
|
65 |
-
<http://encoding.spec.whatwg.org/#concept-encoding-get>`_ algorithm.
|
66 |
-
Supported labels are listed there.
|
67 |
-
|
68 |
-
:param label: A string.
|
69 |
-
:returns:
|
70 |
-
An :class:`Encoding` object, or :obj:`None` for an unknown label.
|
71 |
-
|
72 |
-
"""
|
73 |
-
# Only strip ASCII whitespace: U+0009, U+000A, U+000C, U+000D, and U+0020.
|
74 |
-
label = ascii_lower(label.strip('\t\n\f\r '))
|
75 |
-
name = LABELS.get(label)
|
76 |
-
if name is None:
|
77 |
-
return None
|
78 |
-
encoding = CACHE.get(name)
|
79 |
-
if encoding is None:
|
80 |
-
if name == 'x-user-defined':
|
81 |
-
from .x_user_defined import codec_info
|
82 |
-
else:
|
83 |
-
python_name = PYTHON_NAMES.get(name, name)
|
84 |
-
# Any python_name value that gets to here should be valid.
|
85 |
-
codec_info = codecs.lookup(python_name)
|
86 |
-
encoding = Encoding(name, codec_info)
|
87 |
-
CACHE[name] = encoding
|
88 |
-
return encoding
|
89 |
-
|
90 |
-
|
91 |
-
def _get_encoding(encoding_or_label):
|
92 |
-
"""
|
93 |
-
Accept either an encoding object or label.
|
94 |
-
|
95 |
-
:param encoding: An :class:`Encoding` object or a label string.
|
96 |
-
:returns: An :class:`Encoding` object.
|
97 |
-
:raises: :exc:`~exceptions.LookupError` for an unknown label.
|
98 |
-
|
99 |
-
"""
|
100 |
-
if hasattr(encoding_or_label, 'codec_info'):
|
101 |
-
return encoding_or_label
|
102 |
-
|
103 |
-
encoding = lookup(encoding_or_label)
|
104 |
-
if encoding is None:
|
105 |
-
raise LookupError('Unknown encoding label: %r' % encoding_or_label)
|
106 |
-
return encoding
|
107 |
-
|
108 |
-
|
109 |
-
class Encoding(object):
|
110 |
-
"""Reresents a character encoding such as UTF-8,
|
111 |
-
that can be used for decoding or encoding.
|
112 |
-
|
113 |
-
.. attribute:: name
|
114 |
-
|
115 |
-
Canonical name of the encoding
|
116 |
-
|
117 |
-
.. attribute:: codec_info
|
118 |
-
|
119 |
-
The actual implementation of the encoding,
|
120 |
-
a stdlib :class:`~codecs.CodecInfo` object.
|
121 |
-
See :func:`codecs.register`.
|
122 |
-
|
123 |
-
"""
|
124 |
-
def __init__(self, name, codec_info):
|
125 |
-
self.name = name
|
126 |
-
self.codec_info = codec_info
|
127 |
-
|
128 |
-
def __repr__(self):
|
129 |
-
return '<Encoding %s>' % self.name
|
130 |
-
|
131 |
-
|
132 |
-
#: The UTF-8 encoding. Should be used for new content and formats.
|
133 |
-
UTF8 = lookup('utf-8')
|
134 |
-
|
135 |
-
_UTF16LE = lookup('utf-16le')
|
136 |
-
_UTF16BE = lookup('utf-16be')
|
137 |
-
|
138 |
-
|
139 |
-
def decode(input, fallback_encoding, errors='replace'):
|
140 |
-
"""
|
141 |
-
Decode a single string.
|
142 |
-
|
143 |
-
:param input: A byte string
|
144 |
-
:param fallback_encoding:
|
145 |
-
An :class:`Encoding` object or a label string.
|
146 |
-
The encoding to use if :obj:`input` does note have a BOM.
|
147 |
-
:param errors: Type of error handling. See :func:`codecs.register`.
|
148 |
-
:raises: :exc:`~exceptions.LookupError` for an unknown encoding label.
|
149 |
-
:return:
|
150 |
-
A ``(output, encoding)`` tuple of an Unicode string
|
151 |
-
and an :obj:`Encoding`.
|
152 |
-
|
153 |
-
"""
|
154 |
-
# Fail early if `encoding` is an invalid label.
|
155 |
-
fallback_encoding = _get_encoding(fallback_encoding)
|
156 |
-
bom_encoding, input = _detect_bom(input)
|
157 |
-
encoding = bom_encoding or fallback_encoding
|
158 |
-
return encoding.codec_info.decode(input, errors)[0], encoding
|
159 |
-
|
160 |
-
|
161 |
-
def _detect_bom(input):
|
162 |
-
"""Return (bom_encoding, input), with any BOM removed from the input."""
|
163 |
-
if input.startswith(b'\xFF\xFE'):
|
164 |
-
return _UTF16LE, input[2:]
|
165 |
-
if input.startswith(b'\xFE\xFF'):
|
166 |
-
return _UTF16BE, input[2:]
|
167 |
-
if input.startswith(b'\xEF\xBB\xBF'):
|
168 |
-
return UTF8, input[3:]
|
169 |
-
return None, input
|
170 |
-
|
171 |
-
|
172 |
-
def encode(input, encoding=UTF8, errors='strict'):
|
173 |
-
"""
|
174 |
-
Encode a single string.
|
175 |
-
|
176 |
-
:param input: An Unicode string.
|
177 |
-
:param encoding: An :class:`Encoding` object or a label string.
|
178 |
-
:param errors: Type of error handling. See :func:`codecs.register`.
|
179 |
-
:raises: :exc:`~exceptions.LookupError` for an unknown encoding label.
|
180 |
-
:return: A byte string.
|
181 |
-
|
182 |
-
"""
|
183 |
-
return _get_encoding(encoding).codec_info.encode(input, errors)[0]
|
184 |
-
|
185 |
-
|
186 |
-
def iter_decode(input, fallback_encoding, errors='replace'):
|
187 |
-
"""
|
188 |
-
"Pull"-based decoder.
|
189 |
-
|
190 |
-
:param input:
|
191 |
-
An iterable of byte strings.
|
192 |
-
|
193 |
-
The input is first consumed just enough to determine the encoding
|
194 |
-
based on the precense of a BOM,
|
195 |
-
then consumed on demand when the return value is.
|
196 |
-
:param fallback_encoding:
|
197 |
-
An :class:`Encoding` object or a label string.
|
198 |
-
The encoding to use if :obj:`input` does note have a BOM.
|
199 |
-
:param errors: Type of error handling. See :func:`codecs.register`.
|
200 |
-
:raises: :exc:`~exceptions.LookupError` for an unknown encoding label.
|
201 |
-
:returns:
|
202 |
-
An ``(output, encoding)`` tuple.
|
203 |
-
:obj:`output` is an iterable of Unicode strings,
|
204 |
-
:obj:`encoding` is the :obj:`Encoding` that is being used.
|
205 |
-
|
206 |
-
"""
|
207 |
-
|
208 |
-
decoder = IncrementalDecoder(fallback_encoding, errors)
|
209 |
-
generator = _iter_decode_generator(input, decoder)
|
210 |
-
encoding = next(generator)
|
211 |
-
return generator, encoding
|
212 |
-
|
213 |
-
|
214 |
-
def _iter_decode_generator(input, decoder):
|
215 |
-
"""Return a generator that first yields the :obj:`Encoding`,
|
216 |
-
then yields output chukns as Unicode strings.
|
217 |
-
|
218 |
-
"""
|
219 |
-
decode = decoder.decode
|
220 |
-
input = iter(input)
|
221 |
-
for chunck in input:
|
222 |
-
output = decode(chunck)
|
223 |
-
if output:
|
224 |
-
assert decoder.encoding is not None
|
225 |
-
yield decoder.encoding
|
226 |
-
yield output
|
227 |
-
break
|
228 |
-
else:
|
229 |
-
# Input exhausted without determining the encoding
|
230 |
-
output = decode(b'', final=True)
|
231 |
-
assert decoder.encoding is not None
|
232 |
-
yield decoder.encoding
|
233 |
-
if output:
|
234 |
-
yield output
|
235 |
-
return
|
236 |
-
|
237 |
-
for chunck in input:
|
238 |
-
output = decode(chunck)
|
239 |
-
if output:
|
240 |
-
yield output
|
241 |
-
output = decode(b'', final=True)
|
242 |
-
if output:
|
243 |
-
yield output
|
244 |
-
|
245 |
-
|
246 |
-
def iter_encode(input, encoding=UTF8, errors='strict'):
|
247 |
-
"""
|
248 |
-
“Pull”-based encoder.
|
249 |
-
|
250 |
-
:param input: An iterable of Unicode strings.
|
251 |
-
:param encoding: An :class:`Encoding` object or a label string.
|
252 |
-
:param errors: Type of error handling. See :func:`codecs.register`.
|
253 |
-
:raises: :exc:`~exceptions.LookupError` for an unknown encoding label.
|
254 |
-
:returns: An iterable of byte strings.
|
255 |
-
|
256 |
-
"""
|
257 |
-
# Fail early if `encoding` is an invalid label.
|
258 |
-
encode = IncrementalEncoder(encoding, errors).encode
|
259 |
-
return _iter_encode_generator(input, encode)
|
260 |
-
|
261 |
-
|
262 |
-
def _iter_encode_generator(input, encode):
|
263 |
-
for chunck in input:
|
264 |
-
output = encode(chunck)
|
265 |
-
if output:
|
266 |
-
yield output
|
267 |
-
output = encode('', final=True)
|
268 |
-
if output:
|
269 |
-
yield output
|
270 |
-
|
271 |
-
|
272 |
-
class IncrementalDecoder(object):
|
273 |
-
"""
|
274 |
-
“Push”-based decoder.
|
275 |
-
|
276 |
-
:param fallback_encoding:
|
277 |
-
An :class:`Encoding` object or a label string.
|
278 |
-
The encoding to use if :obj:`input` does note have a BOM.
|
279 |
-
:param errors: Type of error handling. See :func:`codecs.register`.
|
280 |
-
:raises: :exc:`~exceptions.LookupError` for an unknown encoding label.
|
281 |
-
|
282 |
-
"""
|
283 |
-
def __init__(self, fallback_encoding, errors='replace'):
|
284 |
-
# Fail early if `encoding` is an invalid label.
|
285 |
-
self._fallback_encoding = _get_encoding(fallback_encoding)
|
286 |
-
self._errors = errors
|
287 |
-
self._buffer = b''
|
288 |
-
self._decoder = None
|
289 |
-
#: The actual :class:`Encoding` that is being used,
|
290 |
-
#: or :obj:`None` if that is not determined yet.
|
291 |
-
#: (Ie. if there is not enough input yet to determine
|
292 |
-
#: if there is a BOM.)
|
293 |
-
self.encoding = None # Not known yet.
|
294 |
-
|
295 |
-
def decode(self, input, final=False):
|
296 |
-
"""Decode one chunk of the input.
|
297 |
-
|
298 |
-
:param input: A byte string.
|
299 |
-
:param final:
|
300 |
-
Indicate that no more input is available.
|
301 |
-
Must be :obj:`True` if this is the last call.
|
302 |
-
:returns: An Unicode string.
|
303 |
-
|
304 |
-
"""
|
305 |
-
decoder = self._decoder
|
306 |
-
if decoder is not None:
|
307 |
-
return decoder(input, final)
|
308 |
-
|
309 |
-
input = self._buffer + input
|
310 |
-
encoding, input = _detect_bom(input)
|
311 |
-
if encoding is None:
|
312 |
-
if len(input) < 3 and not final: # Not enough data yet.
|
313 |
-
self._buffer = input
|
314 |
-
return ''
|
315 |
-
else: # No BOM
|
316 |
-
encoding = self._fallback_encoding
|
317 |
-
decoder = encoding.codec_info.incrementaldecoder(self._errors).decode
|
318 |
-
self._decoder = decoder
|
319 |
-
self.encoding = encoding
|
320 |
-
return decoder(input, final)
|
321 |
-
|
322 |
-
|
323 |
-
class IncrementalEncoder(object):
|
324 |
-
"""
|
325 |
-
“Push”-based encoder.
|
326 |
-
|
327 |
-
:param encoding: An :class:`Encoding` object or a label string.
|
328 |
-
:param errors: Type of error handling. See :func:`codecs.register`.
|
329 |
-
:raises: :exc:`~exceptions.LookupError` for an unknown encoding label.
|
330 |
-
|
331 |
-
.. method:: encode(input, final=False)
|
332 |
-
|
333 |
-
:param input: An Unicode string.
|
334 |
-
:param final:
|
335 |
-
Indicate that no more input is available.
|
336 |
-
Must be :obj:`True` if this is the last call.
|
337 |
-
:returns: A byte string.
|
338 |
-
|
339 |
-
"""
|
340 |
-
def __init__(self, encoding=UTF8, errors='strict'):
|
341 |
-
encoding = _get_encoding(encoding)
|
342 |
-
self.encode = encoding.codec_info.incrementalencoder(errors).encode
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/glob.py
DELETED
@@ -1,167 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
Filename globbing utility. Mostly a copy of `glob` from Python 3.5.
|
3 |
-
|
4 |
-
Changes include:
|
5 |
-
* `yield from` and PEP3102 `*` removed.
|
6 |
-
* Hidden files are not ignored.
|
7 |
-
"""
|
8 |
-
|
9 |
-
import os
|
10 |
-
import re
|
11 |
-
import fnmatch
|
12 |
-
|
13 |
-
__all__ = ["glob", "iglob", "escape"]
|
14 |
-
|
15 |
-
|
16 |
-
def glob(pathname, recursive=False):
|
17 |
-
"""Return a list of paths matching a pathname pattern.
|
18 |
-
|
19 |
-
The pattern may contain simple shell-style wildcards a la
|
20 |
-
fnmatch. However, unlike fnmatch, filenames starting with a
|
21 |
-
dot are special cases that are not matched by '*' and '?'
|
22 |
-
patterns.
|
23 |
-
|
24 |
-
If recursive is true, the pattern '**' will match any files and
|
25 |
-
zero or more directories and subdirectories.
|
26 |
-
"""
|
27 |
-
return list(iglob(pathname, recursive=recursive))
|
28 |
-
|
29 |
-
|
30 |
-
def iglob(pathname, recursive=False):
|
31 |
-
"""Return an iterator which yields the paths matching a pathname pattern.
|
32 |
-
|
33 |
-
The pattern may contain simple shell-style wildcards a la
|
34 |
-
fnmatch. However, unlike fnmatch, filenames starting with a
|
35 |
-
dot are special cases that are not matched by '*' and '?'
|
36 |
-
patterns.
|
37 |
-
|
38 |
-
If recursive is true, the pattern '**' will match any files and
|
39 |
-
zero or more directories and subdirectories.
|
40 |
-
"""
|
41 |
-
it = _iglob(pathname, recursive)
|
42 |
-
if recursive and _isrecursive(pathname):
|
43 |
-
s = next(it) # skip empty string
|
44 |
-
assert not s
|
45 |
-
return it
|
46 |
-
|
47 |
-
|
48 |
-
def _iglob(pathname, recursive):
|
49 |
-
dirname, basename = os.path.split(pathname)
|
50 |
-
glob_in_dir = glob2 if recursive and _isrecursive(basename) else glob1
|
51 |
-
|
52 |
-
if not has_magic(pathname):
|
53 |
-
if basename:
|
54 |
-
if os.path.lexists(pathname):
|
55 |
-
yield pathname
|
56 |
-
else:
|
57 |
-
# Patterns ending with a slash should match only directories
|
58 |
-
if os.path.isdir(dirname):
|
59 |
-
yield pathname
|
60 |
-
return
|
61 |
-
|
62 |
-
if not dirname:
|
63 |
-
yield from glob_in_dir(dirname, basename)
|
64 |
-
return
|
65 |
-
# `os.path.split()` returns the argument itself as a dirname if it is a
|
66 |
-
# drive or UNC path. Prevent an infinite recursion if a drive or UNC path
|
67 |
-
# contains magic characters (i.e. r'\\?\C:').
|
68 |
-
if dirname != pathname and has_magic(dirname):
|
69 |
-
dirs = _iglob(dirname, recursive)
|
70 |
-
else:
|
71 |
-
dirs = [dirname]
|
72 |
-
if not has_magic(basename):
|
73 |
-
glob_in_dir = glob0
|
74 |
-
for dirname in dirs:
|
75 |
-
for name in glob_in_dir(dirname, basename):
|
76 |
-
yield os.path.join(dirname, name)
|
77 |
-
|
78 |
-
|
79 |
-
# These 2 helper functions non-recursively glob inside a literal directory.
|
80 |
-
# They return a list of basenames. `glob1` accepts a pattern while `glob0`
|
81 |
-
# takes a literal basename (so it only has to check for its existence).
|
82 |
-
|
83 |
-
|
84 |
-
def glob1(dirname, pattern):
|
85 |
-
if not dirname:
|
86 |
-
if isinstance(pattern, bytes):
|
87 |
-
dirname = os.curdir.encode('ASCII')
|
88 |
-
else:
|
89 |
-
dirname = os.curdir
|
90 |
-
try:
|
91 |
-
names = os.listdir(dirname)
|
92 |
-
except OSError:
|
93 |
-
return []
|
94 |
-
return fnmatch.filter(names, pattern)
|
95 |
-
|
96 |
-
|
97 |
-
def glob0(dirname, basename):
|
98 |
-
if not basename:
|
99 |
-
# `os.path.split()` returns an empty basename for paths ending with a
|
100 |
-
# directory separator. 'q*x/' should match only directories.
|
101 |
-
if os.path.isdir(dirname):
|
102 |
-
return [basename]
|
103 |
-
else:
|
104 |
-
if os.path.lexists(os.path.join(dirname, basename)):
|
105 |
-
return [basename]
|
106 |
-
return []
|
107 |
-
|
108 |
-
|
109 |
-
# This helper function recursively yields relative pathnames inside a literal
|
110 |
-
# directory.
|
111 |
-
|
112 |
-
|
113 |
-
def glob2(dirname, pattern):
|
114 |
-
assert _isrecursive(pattern)
|
115 |
-
yield pattern[:0]
|
116 |
-
for x in _rlistdir(dirname):
|
117 |
-
yield x
|
118 |
-
|
119 |
-
|
120 |
-
# Recursively yields relative pathnames inside a literal directory.
|
121 |
-
def _rlistdir(dirname):
|
122 |
-
if not dirname:
|
123 |
-
if isinstance(dirname, bytes):
|
124 |
-
dirname = os.curdir.encode('ASCII')
|
125 |
-
else:
|
126 |
-
dirname = os.curdir
|
127 |
-
try:
|
128 |
-
names = os.listdir(dirname)
|
129 |
-
except os.error:
|
130 |
-
return
|
131 |
-
for x in names:
|
132 |
-
yield x
|
133 |
-
path = os.path.join(dirname, x) if dirname else x
|
134 |
-
for y in _rlistdir(path):
|
135 |
-
yield os.path.join(x, y)
|
136 |
-
|
137 |
-
|
138 |
-
magic_check = re.compile('([*?[])')
|
139 |
-
magic_check_bytes = re.compile(b'([*?[])')
|
140 |
-
|
141 |
-
|
142 |
-
def has_magic(s):
|
143 |
-
if isinstance(s, bytes):
|
144 |
-
match = magic_check_bytes.search(s)
|
145 |
-
else:
|
146 |
-
match = magic_check.search(s)
|
147 |
-
return match is not None
|
148 |
-
|
149 |
-
|
150 |
-
def _isrecursive(pattern):
|
151 |
-
if isinstance(pattern, bytes):
|
152 |
-
return pattern == b'**'
|
153 |
-
else:
|
154 |
-
return pattern == '**'
|
155 |
-
|
156 |
-
|
157 |
-
def escape(pathname):
|
158 |
-
"""Escape all special characters.
|
159 |
-
"""
|
160 |
-
# Escaping is done by wrapping any of "*?[" between square brackets.
|
161 |
-
# Metacharacters do not work in the drive part and shouldn't be escaped.
|
162 |
-
drive, pathname = os.path.splitdrive(pathname)
|
163 |
-
if isinstance(pathname, bytes):
|
164 |
-
pathname = magic_check_bytes.sub(br'[\1]', pathname)
|
165 |
-
else:
|
166 |
-
pathname = magic_check.sub(r'[\1]', pathname)
|
167 |
-
return drive + pathname
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/detection_utils.py
DELETED
@@ -1,623 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
3 |
-
|
4 |
-
"""
|
5 |
-
Common data processing utilities that are used in a
|
6 |
-
typical object detection data pipeline.
|
7 |
-
"""
|
8 |
-
import logging
|
9 |
-
import numpy as np
|
10 |
-
from typing import List, Union
|
11 |
-
import pycocotools.mask as mask_util
|
12 |
-
import torch
|
13 |
-
from PIL import Image
|
14 |
-
|
15 |
-
from detectron2.structures import (
|
16 |
-
BitMasks,
|
17 |
-
Boxes,
|
18 |
-
BoxMode,
|
19 |
-
Instances,
|
20 |
-
Keypoints,
|
21 |
-
PolygonMasks,
|
22 |
-
RotatedBoxes,
|
23 |
-
polygons_to_bitmask,
|
24 |
-
)
|
25 |
-
from detectron2.utils.file_io import PathManager
|
26 |
-
|
27 |
-
from . import transforms as T
|
28 |
-
from .catalog import MetadataCatalog
|
29 |
-
|
30 |
-
__all__ = [
|
31 |
-
"SizeMismatchError",
|
32 |
-
"convert_image_to_rgb",
|
33 |
-
"check_image_size",
|
34 |
-
"transform_proposals",
|
35 |
-
"transform_instance_annotations",
|
36 |
-
"annotations_to_instances",
|
37 |
-
"annotations_to_instances_rotated",
|
38 |
-
"build_augmentation",
|
39 |
-
"build_transform_gen",
|
40 |
-
"create_keypoint_hflip_indices",
|
41 |
-
"filter_empty_instances",
|
42 |
-
"read_image",
|
43 |
-
]
|
44 |
-
|
45 |
-
|
46 |
-
class SizeMismatchError(ValueError):
|
47 |
-
"""
|
48 |
-
When loaded image has difference width/height compared with annotation.
|
49 |
-
"""
|
50 |
-
|
51 |
-
|
52 |
-
# https://en.wikipedia.org/wiki/YUV#SDTV_with_BT.601
|
53 |
-
_M_RGB2YUV = [[0.299, 0.587, 0.114], [-0.14713, -0.28886, 0.436], [0.615, -0.51499, -0.10001]]
|
54 |
-
_M_YUV2RGB = [[1.0, 0.0, 1.13983], [1.0, -0.39465, -0.58060], [1.0, 2.03211, 0.0]]
|
55 |
-
|
56 |
-
# https://www.exiv2.org/tags.html
|
57 |
-
_EXIF_ORIENT = 274 # exif 'Orientation' tag
|
58 |
-
|
59 |
-
|
60 |
-
def convert_PIL_to_numpy(image, format):
|
61 |
-
"""
|
62 |
-
Convert PIL image to numpy array of target format.
|
63 |
-
|
64 |
-
Args:
|
65 |
-
image (PIL.Image): a PIL image
|
66 |
-
format (str): the format of output image
|
67 |
-
|
68 |
-
Returns:
|
69 |
-
(np.ndarray): also see `read_image`
|
70 |
-
"""
|
71 |
-
if format is not None:
|
72 |
-
# PIL only supports RGB, so convert to RGB and flip channels over below
|
73 |
-
conversion_format = format
|
74 |
-
if format in ["BGR", "YUV-BT.601"]:
|
75 |
-
conversion_format = "RGB"
|
76 |
-
image = image.convert(conversion_format)
|
77 |
-
image = np.asarray(image)
|
78 |
-
# PIL squeezes out the channel dimension for "L", so make it HWC
|
79 |
-
if format == "L":
|
80 |
-
image = np.expand_dims(image, -1)
|
81 |
-
|
82 |
-
# handle formats not supported by PIL
|
83 |
-
elif format == "BGR":
|
84 |
-
# flip channels if needed
|
85 |
-
image = image[:, :, ::-1]
|
86 |
-
elif format == "YUV-BT.601":
|
87 |
-
image = image / 255.0
|
88 |
-
image = np.dot(image, np.array(_M_RGB2YUV).T)
|
89 |
-
|
90 |
-
return image
|
91 |
-
|
92 |
-
|
93 |
-
def convert_image_to_rgb(image, format):
|
94 |
-
"""
|
95 |
-
Convert an image from given format to RGB.
|
96 |
-
|
97 |
-
Args:
|
98 |
-
image (np.ndarray or Tensor): an HWC image
|
99 |
-
format (str): the format of input image, also see `read_image`
|
100 |
-
|
101 |
-
Returns:
|
102 |
-
(np.ndarray): (H,W,3) RGB image in 0-255 range, can be either float or uint8
|
103 |
-
"""
|
104 |
-
if isinstance(image, torch.Tensor):
|
105 |
-
image = image.cpu().numpy()
|
106 |
-
if format == "BGR":
|
107 |
-
image = image[:, :, [2, 1, 0]]
|
108 |
-
elif format == "YUV-BT.601":
|
109 |
-
image = np.dot(image, np.array(_M_YUV2RGB).T)
|
110 |
-
image = image * 255.0
|
111 |
-
else:
|
112 |
-
if format == "L":
|
113 |
-
image = image[:, :, 0]
|
114 |
-
image = image.astype(np.uint8)
|
115 |
-
image = np.asarray(Image.fromarray(image, mode=format).convert("RGB"))
|
116 |
-
return image
|
117 |
-
|
118 |
-
|
119 |
-
def _apply_exif_orientation(image):
|
120 |
-
"""
|
121 |
-
Applies the exif orientation correctly.
|
122 |
-
|
123 |
-
This code exists per the bug:
|
124 |
-
https://github.com/python-pillow/Pillow/issues/3973
|
125 |
-
with the function `ImageOps.exif_transpose`. The Pillow source raises errors with
|
126 |
-
various methods, especially `tobytes`
|
127 |
-
|
128 |
-
Function based on:
|
129 |
-
https://github.com/wkentaro/labelme/blob/v4.5.4/labelme/utils/image.py#L59
|
130 |
-
https://github.com/python-pillow/Pillow/blob/7.1.2/src/PIL/ImageOps.py#L527
|
131 |
-
|
132 |
-
Args:
|
133 |
-
image (PIL.Image): a PIL image
|
134 |
-
|
135 |
-
Returns:
|
136 |
-
(PIL.Image): the PIL image with exif orientation applied, if applicable
|
137 |
-
"""
|
138 |
-
if not hasattr(image, "getexif"):
|
139 |
-
return image
|
140 |
-
|
141 |
-
try:
|
142 |
-
exif = image.getexif()
|
143 |
-
except Exception: # https://github.com/facebookresearch/detectron2/issues/1885
|
144 |
-
exif = None
|
145 |
-
|
146 |
-
if exif is None:
|
147 |
-
return image
|
148 |
-
|
149 |
-
orientation = exif.get(_EXIF_ORIENT)
|
150 |
-
|
151 |
-
method = {
|
152 |
-
2: Image.FLIP_LEFT_RIGHT,
|
153 |
-
3: Image.ROTATE_180,
|
154 |
-
4: Image.FLIP_TOP_BOTTOM,
|
155 |
-
5: Image.TRANSPOSE,
|
156 |
-
6: Image.ROTATE_270,
|
157 |
-
7: Image.TRANSVERSE,
|
158 |
-
8: Image.ROTATE_90,
|
159 |
-
}.get(orientation)
|
160 |
-
|
161 |
-
if method is not None:
|
162 |
-
return image.transpose(method)
|
163 |
-
return image
|
164 |
-
|
165 |
-
|
166 |
-
def read_image(file_name, format=None):
|
167 |
-
"""
|
168 |
-
Read an image into the given format.
|
169 |
-
Will apply rotation and flipping if the image has such exif information.
|
170 |
-
|
171 |
-
Args:
|
172 |
-
file_name (str): image file path
|
173 |
-
format (str): one of the supported image modes in PIL, or "BGR" or "YUV-BT.601".
|
174 |
-
|
175 |
-
Returns:
|
176 |
-
image (np.ndarray):
|
177 |
-
an HWC image in the given format, which is 0-255, uint8 for
|
178 |
-
supported image modes in PIL or "BGR"; float (0-1 for Y) for YUV-BT.601.
|
179 |
-
"""
|
180 |
-
with PathManager.open(file_name, "rb") as f:
|
181 |
-
image = Image.open(f)
|
182 |
-
|
183 |
-
# work around this bug: https://github.com/python-pillow/Pillow/issues/3973
|
184 |
-
image = _apply_exif_orientation(image)
|
185 |
-
return convert_PIL_to_numpy(image, format)
|
186 |
-
|
187 |
-
|
188 |
-
def check_image_size(dataset_dict, image):
|
189 |
-
"""
|
190 |
-
Raise an error if the image does not match the size specified in the dict.
|
191 |
-
"""
|
192 |
-
if "width" in dataset_dict or "height" in dataset_dict:
|
193 |
-
image_wh = (image.shape[1], image.shape[0])
|
194 |
-
expected_wh = (dataset_dict["width"], dataset_dict["height"])
|
195 |
-
if not image_wh == expected_wh:
|
196 |
-
raise SizeMismatchError(
|
197 |
-
"Mismatched image shape{}, got {}, expect {}.".format(
|
198 |
-
" for image " + dataset_dict["file_name"]
|
199 |
-
if "file_name" in dataset_dict
|
200 |
-
else "",
|
201 |
-
image_wh,
|
202 |
-
expected_wh,
|
203 |
-
)
|
204 |
-
+ " Please check the width/height in your annotation."
|
205 |
-
)
|
206 |
-
|
207 |
-
# To ensure bbox always remap to original image size
|
208 |
-
if "width" not in dataset_dict:
|
209 |
-
dataset_dict["width"] = image.shape[1]
|
210 |
-
if "height" not in dataset_dict:
|
211 |
-
dataset_dict["height"] = image.shape[0]
|
212 |
-
|
213 |
-
|
214 |
-
def transform_proposals(dataset_dict, image_shape, transforms, *, proposal_topk, min_box_size=0):
|
215 |
-
"""
|
216 |
-
Apply transformations to the proposals in dataset_dict, if any.
|
217 |
-
|
218 |
-
Args:
|
219 |
-
dataset_dict (dict): a dict read from the dataset, possibly
|
220 |
-
contains fields "proposal_boxes", "proposal_objectness_logits", "proposal_bbox_mode"
|
221 |
-
image_shape (tuple): height, width
|
222 |
-
transforms (TransformList):
|
223 |
-
proposal_topk (int): only keep top-K scoring proposals
|
224 |
-
min_box_size (int): proposals with either side smaller than this
|
225 |
-
threshold are removed
|
226 |
-
|
227 |
-
The input dict is modified in-place, with abovementioned keys removed. A new
|
228 |
-
key "proposals" will be added. Its value is an `Instances`
|
229 |
-
object which contains the transformed proposals in its field
|
230 |
-
"proposal_boxes" and "objectness_logits".
|
231 |
-
"""
|
232 |
-
if "proposal_boxes" in dataset_dict:
|
233 |
-
# Transform proposal boxes
|
234 |
-
boxes = transforms.apply_box(
|
235 |
-
BoxMode.convert(
|
236 |
-
dataset_dict.pop("proposal_boxes"),
|
237 |
-
dataset_dict.pop("proposal_bbox_mode"),
|
238 |
-
BoxMode.XYXY_ABS,
|
239 |
-
)
|
240 |
-
)
|
241 |
-
boxes = Boxes(boxes)
|
242 |
-
objectness_logits = torch.as_tensor(
|
243 |
-
dataset_dict.pop("proposal_objectness_logits").astype("float32")
|
244 |
-
)
|
245 |
-
|
246 |
-
boxes.clip(image_shape)
|
247 |
-
keep = boxes.nonempty(threshold=min_box_size)
|
248 |
-
boxes = boxes[keep]
|
249 |
-
objectness_logits = objectness_logits[keep]
|
250 |
-
|
251 |
-
proposals = Instances(image_shape)
|
252 |
-
proposals.proposal_boxes = boxes[:proposal_topk]
|
253 |
-
proposals.objectness_logits = objectness_logits[:proposal_topk]
|
254 |
-
dataset_dict["proposals"] = proposals
|
255 |
-
|
256 |
-
|
257 |
-
def transform_instance_annotations(
|
258 |
-
annotation, transforms, image_size, *, keypoint_hflip_indices=None
|
259 |
-
):
|
260 |
-
"""
|
261 |
-
Apply transforms to box, segmentation and keypoints annotations of a single instance.
|
262 |
-
|
263 |
-
It will use `transforms.apply_box` for the box, and
|
264 |
-
`transforms.apply_coords` for segmentation polygons & keypoints.
|
265 |
-
If you need anything more specially designed for each data structure,
|
266 |
-
you'll need to implement your own version of this function or the transforms.
|
267 |
-
|
268 |
-
Args:
|
269 |
-
annotation (dict): dict of instance annotations for a single instance.
|
270 |
-
It will be modified in-place.
|
271 |
-
transforms (TransformList or list[Transform]):
|
272 |
-
image_size (tuple): the height, width of the transformed image
|
273 |
-
keypoint_hflip_indices (ndarray[int]): see `create_keypoint_hflip_indices`.
|
274 |
-
|
275 |
-
Returns:
|
276 |
-
dict:
|
277 |
-
the same input dict with fields "bbox", "segmentation", "keypoints"
|
278 |
-
transformed according to `transforms`.
|
279 |
-
The "bbox_mode" field will be set to XYXY_ABS.
|
280 |
-
"""
|
281 |
-
if isinstance(transforms, (tuple, list)):
|
282 |
-
transforms = T.TransformList(transforms)
|
283 |
-
# bbox is 1d (per-instance bounding box)
|
284 |
-
bbox = BoxMode.convert(annotation["bbox"], annotation["bbox_mode"], BoxMode.XYXY_ABS)
|
285 |
-
# clip transformed bbox to image size
|
286 |
-
bbox = transforms.apply_box(np.array([bbox]))[0].clip(min=0)
|
287 |
-
annotation["bbox"] = np.minimum(bbox, list(image_size + image_size)[::-1])
|
288 |
-
annotation["bbox_mode"] = BoxMode.XYXY_ABS
|
289 |
-
|
290 |
-
if "segmentation" in annotation:
|
291 |
-
# each instance contains 1 or more polygons
|
292 |
-
segm = annotation["segmentation"]
|
293 |
-
if isinstance(segm, list):
|
294 |
-
# polygons
|
295 |
-
polygons = [np.asarray(p).reshape(-1, 2) for p in segm]
|
296 |
-
annotation["segmentation"] = [
|
297 |
-
p.reshape(-1) for p in transforms.apply_polygons(polygons)
|
298 |
-
]
|
299 |
-
elif isinstance(segm, dict):
|
300 |
-
# RLE
|
301 |
-
mask = mask_util.decode(segm)
|
302 |
-
mask = transforms.apply_segmentation(mask)
|
303 |
-
assert tuple(mask.shape[:2]) == image_size
|
304 |
-
annotation["segmentation"] = mask
|
305 |
-
else:
|
306 |
-
raise ValueError(
|
307 |
-
"Cannot transform segmentation of type '{}'!"
|
308 |
-
"Supported types are: polygons as list[list[float] or ndarray],"
|
309 |
-
" COCO-style RLE as a dict.".format(type(segm))
|
310 |
-
)
|
311 |
-
|
312 |
-
if "keypoints" in annotation:
|
313 |
-
keypoints = transform_keypoint_annotations(
|
314 |
-
annotation["keypoints"], transforms, image_size, keypoint_hflip_indices
|
315 |
-
)
|
316 |
-
annotation["keypoints"] = keypoints
|
317 |
-
|
318 |
-
return annotation
|
319 |
-
|
320 |
-
|
321 |
-
def transform_keypoint_annotations(keypoints, transforms, image_size, keypoint_hflip_indices=None):
|
322 |
-
"""
|
323 |
-
Transform keypoint annotations of an image.
|
324 |
-
If a keypoint is transformed out of image boundary, it will be marked "unlabeled" (visibility=0)
|
325 |
-
|
326 |
-
Args:
|
327 |
-
keypoints (list[float]): Nx3 float in Detectron2's Dataset format.
|
328 |
-
Each point is represented by (x, y, visibility).
|
329 |
-
transforms (TransformList):
|
330 |
-
image_size (tuple): the height, width of the transformed image
|
331 |
-
keypoint_hflip_indices (ndarray[int]): see `create_keypoint_hflip_indices`.
|
332 |
-
When `transforms` includes horizontal flip, will use the index
|
333 |
-
mapping to flip keypoints.
|
334 |
-
"""
|
335 |
-
# (N*3,) -> (N, 3)
|
336 |
-
keypoints = np.asarray(keypoints, dtype="float64").reshape(-1, 3)
|
337 |
-
keypoints_xy = transforms.apply_coords(keypoints[:, :2])
|
338 |
-
|
339 |
-
# Set all out-of-boundary points to "unlabeled"
|
340 |
-
inside = (keypoints_xy >= np.array([0, 0])) & (keypoints_xy <= np.array(image_size[::-1]))
|
341 |
-
inside = inside.all(axis=1)
|
342 |
-
keypoints[:, :2] = keypoints_xy
|
343 |
-
keypoints[:, 2][~inside] = 0
|
344 |
-
|
345 |
-
# This assumes that HorizFlipTransform is the only one that does flip
|
346 |
-
do_hflip = sum(isinstance(t, T.HFlipTransform) for t in transforms.transforms) % 2 == 1
|
347 |
-
|
348 |
-
# Alternative way: check if probe points was horizontally flipped.
|
349 |
-
# probe = np.asarray([[0.0, 0.0], [image_width, 0.0]])
|
350 |
-
# probe_aug = transforms.apply_coords(probe.copy())
|
351 |
-
# do_hflip = np.sign(probe[1][0] - probe[0][0]) != np.sign(probe_aug[1][0] - probe_aug[0][0]) # noqa
|
352 |
-
|
353 |
-
# If flipped, swap each keypoint with its opposite-handed equivalent
|
354 |
-
if do_hflip:
|
355 |
-
if keypoint_hflip_indices is None:
|
356 |
-
raise ValueError("Cannot flip keypoints without providing flip indices!")
|
357 |
-
if len(keypoints) != len(keypoint_hflip_indices):
|
358 |
-
raise ValueError(
|
359 |
-
"Keypoint data has {} points, but metadata "
|
360 |
-
"contains {} points!".format(len(keypoints), len(keypoint_hflip_indices))
|
361 |
-
)
|
362 |
-
keypoints = keypoints[np.asarray(keypoint_hflip_indices, dtype=np.int32), :]
|
363 |
-
|
364 |
-
# Maintain COCO convention that if visibility == 0 (unlabeled), then x, y = 0
|
365 |
-
keypoints[keypoints[:, 2] == 0] = 0
|
366 |
-
return keypoints
|
367 |
-
|
368 |
-
|
369 |
-
def annotations_to_instances(annos, image_size, mask_format="polygon"):
|
370 |
-
"""
|
371 |
-
Create an :class:`Instances` object used by the models,
|
372 |
-
from instance annotations in the dataset dict.
|
373 |
-
|
374 |
-
Args:
|
375 |
-
annos (list[dict]): a list of instance annotations in one image, each
|
376 |
-
element for one instance.
|
377 |
-
image_size (tuple): height, width
|
378 |
-
|
379 |
-
Returns:
|
380 |
-
Instances:
|
381 |
-
It will contain fields "gt_boxes", "gt_classes",
|
382 |
-
"gt_masks", "gt_keypoints", if they can be obtained from `annos`.
|
383 |
-
This is the format that builtin models expect.
|
384 |
-
"""
|
385 |
-
boxes = (
|
386 |
-
np.stack(
|
387 |
-
[BoxMode.convert(obj["bbox"], obj["bbox_mode"], BoxMode.XYXY_ABS) for obj in annos]
|
388 |
-
)
|
389 |
-
if len(annos)
|
390 |
-
else np.zeros((0, 4))
|
391 |
-
)
|
392 |
-
target = Instances(image_size)
|
393 |
-
target.gt_boxes = Boxes(boxes)
|
394 |
-
|
395 |
-
classes = [int(obj["category_id"]) for obj in annos]
|
396 |
-
classes = torch.tensor(classes, dtype=torch.int64)
|
397 |
-
target.gt_classes = classes
|
398 |
-
|
399 |
-
if len(annos) and "segmentation" in annos[0]:
|
400 |
-
segms = [obj["segmentation"] for obj in annos]
|
401 |
-
if mask_format == "polygon":
|
402 |
-
try:
|
403 |
-
masks = PolygonMasks(segms)
|
404 |
-
except ValueError as e:
|
405 |
-
raise ValueError(
|
406 |
-
"Failed to use mask_format=='polygon' from the given annotations!"
|
407 |
-
) from e
|
408 |
-
else:
|
409 |
-
assert mask_format == "bitmask", mask_format
|
410 |
-
masks = []
|
411 |
-
for segm in segms:
|
412 |
-
if isinstance(segm, list):
|
413 |
-
# polygon
|
414 |
-
masks.append(polygons_to_bitmask(segm, *image_size))
|
415 |
-
elif isinstance(segm, dict):
|
416 |
-
# COCO RLE
|
417 |
-
masks.append(mask_util.decode(segm))
|
418 |
-
elif isinstance(segm, np.ndarray):
|
419 |
-
assert segm.ndim == 2, "Expect segmentation of 2 dimensions, got {}.".format(
|
420 |
-
segm.ndim
|
421 |
-
)
|
422 |
-
# mask array
|
423 |
-
masks.append(segm)
|
424 |
-
else:
|
425 |
-
raise ValueError(
|
426 |
-
"Cannot convert segmentation of type '{}' to BitMasks!"
|
427 |
-
"Supported types are: polygons as list[list[float] or ndarray],"
|
428 |
-
" COCO-style RLE as a dict, or a binary segmentation mask "
|
429 |
-
" in a 2D numpy array of shape HxW.".format(type(segm))
|
430 |
-
)
|
431 |
-
# torch.from_numpy does not support array with negative stride.
|
432 |
-
masks = BitMasks(
|
433 |
-
torch.stack([torch.from_numpy(np.ascontiguousarray(x)) for x in masks])
|
434 |
-
)
|
435 |
-
target.gt_masks = masks
|
436 |
-
|
437 |
-
if len(annos) and "keypoints" in annos[0]:
|
438 |
-
kpts = [obj.get("keypoints", []) for obj in annos]
|
439 |
-
target.gt_keypoints = Keypoints(kpts)
|
440 |
-
|
441 |
-
return target
|
442 |
-
|
443 |
-
|
444 |
-
def annotations_to_instances_rotated(annos, image_size):
|
445 |
-
"""
|
446 |
-
Create an :class:`Instances` object used by the models,
|
447 |
-
from instance annotations in the dataset dict.
|
448 |
-
Compared to `annotations_to_instances`, this function is for rotated boxes only
|
449 |
-
|
450 |
-
Args:
|
451 |
-
annos (list[dict]): a list of instance annotations in one image, each
|
452 |
-
element for one instance.
|
453 |
-
image_size (tuple): height, width
|
454 |
-
|
455 |
-
Returns:
|
456 |
-
Instances:
|
457 |
-
Containing fields "gt_boxes", "gt_classes",
|
458 |
-
if they can be obtained from `annos`.
|
459 |
-
This is the format that builtin models expect.
|
460 |
-
"""
|
461 |
-
boxes = [obj["bbox"] for obj in annos]
|
462 |
-
target = Instances(image_size)
|
463 |
-
boxes = target.gt_boxes = RotatedBoxes(boxes)
|
464 |
-
boxes.clip(image_size)
|
465 |
-
|
466 |
-
classes = [obj["category_id"] for obj in annos]
|
467 |
-
classes = torch.tensor(classes, dtype=torch.int64)
|
468 |
-
target.gt_classes = classes
|
469 |
-
|
470 |
-
return target
|
471 |
-
|
472 |
-
|
473 |
-
def filter_empty_instances(
|
474 |
-
instances, by_box=True, by_mask=True, box_threshold=1e-5, return_mask=False
|
475 |
-
):
|
476 |
-
"""
|
477 |
-
Filter out empty instances in an `Instances` object.
|
478 |
-
|
479 |
-
Args:
|
480 |
-
instances (Instances):
|
481 |
-
by_box (bool): whether to filter out instances with empty boxes
|
482 |
-
by_mask (bool): whether to filter out instances with empty masks
|
483 |
-
box_threshold (float): minimum width and height to be considered non-empty
|
484 |
-
return_mask (bool): whether to return boolean mask of filtered instances
|
485 |
-
|
486 |
-
Returns:
|
487 |
-
Instances: the filtered instances.
|
488 |
-
tensor[bool], optional: boolean mask of filtered instances
|
489 |
-
"""
|
490 |
-
assert by_box or by_mask
|
491 |
-
r = []
|
492 |
-
if by_box:
|
493 |
-
r.append(instances.gt_boxes.nonempty(threshold=box_threshold))
|
494 |
-
if instances.has("gt_masks") and by_mask:
|
495 |
-
r.append(instances.gt_masks.nonempty())
|
496 |
-
|
497 |
-
# TODO: can also filter visible keypoints
|
498 |
-
|
499 |
-
if not r:
|
500 |
-
return instances
|
501 |
-
m = r[0]
|
502 |
-
for x in r[1:]:
|
503 |
-
m = m & x
|
504 |
-
if return_mask:
|
505 |
-
return instances[m], m
|
506 |
-
return instances[m]
|
507 |
-
|
508 |
-
|
509 |
-
def create_keypoint_hflip_indices(dataset_names: Union[str, List[str]]) -> List[int]:
|
510 |
-
"""
|
511 |
-
Args:
|
512 |
-
dataset_names: list of dataset names
|
513 |
-
|
514 |
-
Returns:
|
515 |
-
list[int]: a list of size=#keypoints, storing the
|
516 |
-
horizontally-flipped keypoint indices.
|
517 |
-
"""
|
518 |
-
if isinstance(dataset_names, str):
|
519 |
-
dataset_names = [dataset_names]
|
520 |
-
|
521 |
-
check_metadata_consistency("keypoint_names", dataset_names)
|
522 |
-
check_metadata_consistency("keypoint_flip_map", dataset_names)
|
523 |
-
|
524 |
-
meta = MetadataCatalog.get(dataset_names[0])
|
525 |
-
names = meta.keypoint_names
|
526 |
-
# TODO flip -> hflip
|
527 |
-
flip_map = dict(meta.keypoint_flip_map)
|
528 |
-
flip_map.update({v: k for k, v in flip_map.items()})
|
529 |
-
flipped_names = [i if i not in flip_map else flip_map[i] for i in names]
|
530 |
-
flip_indices = [names.index(i) for i in flipped_names]
|
531 |
-
return flip_indices
|
532 |
-
|
533 |
-
|
534 |
-
def gen_crop_transform_with_instance(crop_size, image_size, instance):
|
535 |
-
"""
|
536 |
-
Generate a CropTransform so that the cropping region contains
|
537 |
-
the center of the given instance.
|
538 |
-
|
539 |
-
Args:
|
540 |
-
crop_size (tuple): h, w in pixels
|
541 |
-
image_size (tuple): h, w
|
542 |
-
instance (dict): an annotation dict of one instance, in Detectron2's
|
543 |
-
dataset format.
|
544 |
-
"""
|
545 |
-
crop_size = np.asarray(crop_size, dtype=np.int32)
|
546 |
-
bbox = BoxMode.convert(instance["bbox"], instance["bbox_mode"], BoxMode.XYXY_ABS)
|
547 |
-
center_yx = (bbox[1] + bbox[3]) * 0.5, (bbox[0] + bbox[2]) * 0.5
|
548 |
-
assert (
|
549 |
-
image_size[0] >= center_yx[0] and image_size[1] >= center_yx[1]
|
550 |
-
), "The annotation bounding box is outside of the image!"
|
551 |
-
assert (
|
552 |
-
image_size[0] >= crop_size[0] and image_size[1] >= crop_size[1]
|
553 |
-
), "Crop size is larger than image size!"
|
554 |
-
|
555 |
-
min_yx = np.maximum(np.floor(center_yx).astype(np.int32) - crop_size, 0)
|
556 |
-
max_yx = np.maximum(np.asarray(image_size, dtype=np.int32) - crop_size, 0)
|
557 |
-
max_yx = np.minimum(max_yx, np.ceil(center_yx).astype(np.int32))
|
558 |
-
|
559 |
-
y0 = np.random.randint(min_yx[0], max_yx[0] + 1)
|
560 |
-
x0 = np.random.randint(min_yx[1], max_yx[1] + 1)
|
561 |
-
return T.CropTransform(x0, y0, crop_size[1], crop_size[0])
|
562 |
-
|
563 |
-
|
564 |
-
def check_metadata_consistency(key, dataset_names):
|
565 |
-
"""
|
566 |
-
Check that the datasets have consistent metadata.
|
567 |
-
|
568 |
-
Args:
|
569 |
-
key (str): a metadata key
|
570 |
-
dataset_names (list[str]): a list of dataset names
|
571 |
-
|
572 |
-
Raises:
|
573 |
-
AttributeError: if the key does not exist in the metadata
|
574 |
-
ValueError: if the given datasets do not have the same metadata values defined by key
|
575 |
-
"""
|
576 |
-
if len(dataset_names) == 0:
|
577 |
-
return
|
578 |
-
logger = logging.getLogger(__name__)
|
579 |
-
entries_per_dataset = [getattr(MetadataCatalog.get(d), key) for d in dataset_names]
|
580 |
-
for idx, entry in enumerate(entries_per_dataset):
|
581 |
-
if entry != entries_per_dataset[0]:
|
582 |
-
logger.error(
|
583 |
-
"Metadata '{}' for dataset '{}' is '{}'".format(key, dataset_names[idx], str(entry))
|
584 |
-
)
|
585 |
-
logger.error(
|
586 |
-
"Metadata '{}' for dataset '{}' is '{}'".format(
|
587 |
-
key, dataset_names[0], str(entries_per_dataset[0])
|
588 |
-
)
|
589 |
-
)
|
590 |
-
raise ValueError("Datasets have different metadata '{}'!".format(key))
|
591 |
-
|
592 |
-
|
593 |
-
def build_augmentation(cfg, is_train):
|
594 |
-
"""
|
595 |
-
Create a list of default :class:`Augmentation` from config.
|
596 |
-
Now it includes resizing and flipping.
|
597 |
-
|
598 |
-
Returns:
|
599 |
-
list[Augmentation]
|
600 |
-
"""
|
601 |
-
if is_train:
|
602 |
-
min_size = cfg.INPUT.MIN_SIZE_TRAIN
|
603 |
-
max_size = cfg.INPUT.MAX_SIZE_TRAIN
|
604 |
-
sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
|
605 |
-
else:
|
606 |
-
min_size = cfg.INPUT.MIN_SIZE_TEST
|
607 |
-
max_size = cfg.INPUT.MAX_SIZE_TEST
|
608 |
-
sample_style = "choice"
|
609 |
-
augmentation = [T.ResizeShortestEdge(min_size, max_size, sample_style)]
|
610 |
-
if is_train and cfg.INPUT.RANDOM_FLIP != "none":
|
611 |
-
augmentation.append(
|
612 |
-
T.RandomFlip(
|
613 |
-
horizontal=cfg.INPUT.RANDOM_FLIP == "horizontal",
|
614 |
-
vertical=cfg.INPUT.RANDOM_FLIP == "vertical",
|
615 |
-
)
|
616 |
-
)
|
617 |
-
return augmentation
|
618 |
-
|
619 |
-
|
620 |
-
build_transform_gen = build_augmentation
|
621 |
-
"""
|
622 |
-
Alias for backward-compatibility.
|
623 |
-
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Banbri/zcvzcv/src/app/queries/predictWithOpenAI.ts
DELETED
@@ -1,33 +0,0 @@
|
|
1 |
-
"use server"
|
2 |
-
|
3 |
-
import type { ChatCompletionMessage } from "openai/resources/chat"
|
4 |
-
import OpenAI from "openai"
|
5 |
-
|
6 |
-
export async function predict(inputs: string): Promise<string> {
|
7 |
-
const openaiApiKey = `${process.env.AUTH_OPENAI_API_KEY || ""}`
|
8 |
-
const openaiApiBaseUrl = `${process.env.LLM_OPENAI_API_BASE_URL || "https://api.openai.com/v1"}`
|
9 |
-
const openaiApiModel = `${process.env.LLM_OPENAI_API_MODEL || "gpt-3.5-turbo"}`
|
10 |
-
|
11 |
-
const openai = new OpenAI({
|
12 |
-
apiKey: openaiApiKey,
|
13 |
-
baseURL: openaiApiBaseUrl,
|
14 |
-
})
|
15 |
-
|
16 |
-
const messages: ChatCompletionMessage[] = [
|
17 |
-
{ role: "system", content: inputs },
|
18 |
-
]
|
19 |
-
|
20 |
-
try {
|
21 |
-
const res = await openai.chat.completions.create({
|
22 |
-
messages: messages,
|
23 |
-
stream: false,
|
24 |
-
model: openaiApiModel,
|
25 |
-
temperature: 0.8
|
26 |
-
})
|
27 |
-
|
28 |
-
return res.choices[0].message.content || ""
|
29 |
-
} catch (err) {
|
30 |
-
console.error(`error during generation: ${err}`)
|
31 |
-
return ""
|
32 |
-
}
|
33 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bart92/RVC_HF/lib/infer_pack/attentions.py
DELETED
@@ -1,417 +0,0 @@
|
|
1 |
-
import copy
|
2 |
-
import math
|
3 |
-
import numpy as np
|
4 |
-
import torch
|
5 |
-
from torch import nn
|
6 |
-
from torch.nn import functional as F
|
7 |
-
|
8 |
-
from lib.infer_pack import commons
|
9 |
-
from lib.infer_pack import modules
|
10 |
-
from lib.infer_pack.modules import LayerNorm
|
11 |
-
|
12 |
-
|
13 |
-
class Encoder(nn.Module):
|
14 |
-
def __init__(
|
15 |
-
self,
|
16 |
-
hidden_channels,
|
17 |
-
filter_channels,
|
18 |
-
n_heads,
|
19 |
-
n_layers,
|
20 |
-
kernel_size=1,
|
21 |
-
p_dropout=0.0,
|
22 |
-
window_size=10,
|
23 |
-
**kwargs
|
24 |
-
):
|
25 |
-
super().__init__()
|
26 |
-
self.hidden_channels = hidden_channels
|
27 |
-
self.filter_channels = filter_channels
|
28 |
-
self.n_heads = n_heads
|
29 |
-
self.n_layers = n_layers
|
30 |
-
self.kernel_size = kernel_size
|
31 |
-
self.p_dropout = p_dropout
|
32 |
-
self.window_size = window_size
|
33 |
-
|
34 |
-
self.drop = nn.Dropout(p_dropout)
|
35 |
-
self.attn_layers = nn.ModuleList()
|
36 |
-
self.norm_layers_1 = nn.ModuleList()
|
37 |
-
self.ffn_layers = nn.ModuleList()
|
38 |
-
self.norm_layers_2 = nn.ModuleList()
|
39 |
-
for i in range(self.n_layers):
|
40 |
-
self.attn_layers.append(
|
41 |
-
MultiHeadAttention(
|
42 |
-
hidden_channels,
|
43 |
-
hidden_channels,
|
44 |
-
n_heads,
|
45 |
-
p_dropout=p_dropout,
|
46 |
-
window_size=window_size,
|
47 |
-
)
|
48 |
-
)
|
49 |
-
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
50 |
-
self.ffn_layers.append(
|
51 |
-
FFN(
|
52 |
-
hidden_channels,
|
53 |
-
hidden_channels,
|
54 |
-
filter_channels,
|
55 |
-
kernel_size,
|
56 |
-
p_dropout=p_dropout,
|
57 |
-
)
|
58 |
-
)
|
59 |
-
self.norm_layers_2.append(LayerNorm(hidden_channels))
|
60 |
-
|
61 |
-
def forward(self, x, x_mask):
|
62 |
-
attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
63 |
-
x = x * x_mask
|
64 |
-
for i in range(self.n_layers):
|
65 |
-
y = self.attn_layers[i](x, x, attn_mask)
|
66 |
-
y = self.drop(y)
|
67 |
-
x = self.norm_layers_1[i](x + y)
|
68 |
-
|
69 |
-
y = self.ffn_layers[i](x, x_mask)
|
70 |
-
y = self.drop(y)
|
71 |
-
x = self.norm_layers_2[i](x + y)
|
72 |
-
x = x * x_mask
|
73 |
-
return x
|
74 |
-
|
75 |
-
|
76 |
-
class Decoder(nn.Module):
|
77 |
-
def __init__(
|
78 |
-
self,
|
79 |
-
hidden_channels,
|
80 |
-
filter_channels,
|
81 |
-
n_heads,
|
82 |
-
n_layers,
|
83 |
-
kernel_size=1,
|
84 |
-
p_dropout=0.0,
|
85 |
-
proximal_bias=False,
|
86 |
-
proximal_init=True,
|
87 |
-
**kwargs
|
88 |
-
):
|
89 |
-
super().__init__()
|
90 |
-
self.hidden_channels = hidden_channels
|
91 |
-
self.filter_channels = filter_channels
|
92 |
-
self.n_heads = n_heads
|
93 |
-
self.n_layers = n_layers
|
94 |
-
self.kernel_size = kernel_size
|
95 |
-
self.p_dropout = p_dropout
|
96 |
-
self.proximal_bias = proximal_bias
|
97 |
-
self.proximal_init = proximal_init
|
98 |
-
|
99 |
-
self.drop = nn.Dropout(p_dropout)
|
100 |
-
self.self_attn_layers = nn.ModuleList()
|
101 |
-
self.norm_layers_0 = nn.ModuleList()
|
102 |
-
self.encdec_attn_layers = nn.ModuleList()
|
103 |
-
self.norm_layers_1 = nn.ModuleList()
|
104 |
-
self.ffn_layers = nn.ModuleList()
|
105 |
-
self.norm_layers_2 = nn.ModuleList()
|
106 |
-
for i in range(self.n_layers):
|
107 |
-
self.self_attn_layers.append(
|
108 |
-
MultiHeadAttention(
|
109 |
-
hidden_channels,
|
110 |
-
hidden_channels,
|
111 |
-
n_heads,
|
112 |
-
p_dropout=p_dropout,
|
113 |
-
proximal_bias=proximal_bias,
|
114 |
-
proximal_init=proximal_init,
|
115 |
-
)
|
116 |
-
)
|
117 |
-
self.norm_layers_0.append(LayerNorm(hidden_channels))
|
118 |
-
self.encdec_attn_layers.append(
|
119 |
-
MultiHeadAttention(
|
120 |
-
hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout
|
121 |
-
)
|
122 |
-
)
|
123 |
-
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
124 |
-
self.ffn_layers.append(
|
125 |
-
FFN(
|
126 |
-
hidden_channels,
|
127 |
-
hidden_channels,
|
128 |
-
filter_channels,
|
129 |
-
kernel_size,
|
130 |
-
p_dropout=p_dropout,
|
131 |
-
causal=True,
|
132 |
-
)
|
133 |
-
)
|
134 |
-
self.norm_layers_2.append(LayerNorm(hidden_channels))
|
135 |
-
|
136 |
-
def forward(self, x, x_mask, h, h_mask):
|
137 |
-
"""
|
138 |
-
x: decoder input
|
139 |
-
h: encoder output
|
140 |
-
"""
|
141 |
-
self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(
|
142 |
-
device=x.device, dtype=x.dtype
|
143 |
-
)
|
144 |
-
encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
145 |
-
x = x * x_mask
|
146 |
-
for i in range(self.n_layers):
|
147 |
-
y = self.self_attn_layers[i](x, x, self_attn_mask)
|
148 |
-
y = self.drop(y)
|
149 |
-
x = self.norm_layers_0[i](x + y)
|
150 |
-
|
151 |
-
y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
|
152 |
-
y = self.drop(y)
|
153 |
-
x = self.norm_layers_1[i](x + y)
|
154 |
-
|
155 |
-
y = self.ffn_layers[i](x, x_mask)
|
156 |
-
y = self.drop(y)
|
157 |
-
x = self.norm_layers_2[i](x + y)
|
158 |
-
x = x * x_mask
|
159 |
-
return x
|
160 |
-
|
161 |
-
|
162 |
-
class MultiHeadAttention(nn.Module):
|
163 |
-
def __init__(
|
164 |
-
self,
|
165 |
-
channels,
|
166 |
-
out_channels,
|
167 |
-
n_heads,
|
168 |
-
p_dropout=0.0,
|
169 |
-
window_size=None,
|
170 |
-
heads_share=True,
|
171 |
-
block_length=None,
|
172 |
-
proximal_bias=False,
|
173 |
-
proximal_init=False,
|
174 |
-
):
|
175 |
-
super().__init__()
|
176 |
-
assert channels % n_heads == 0
|
177 |
-
|
178 |
-
self.channels = channels
|
179 |
-
self.out_channels = out_channels
|
180 |
-
self.n_heads = n_heads
|
181 |
-
self.p_dropout = p_dropout
|
182 |
-
self.window_size = window_size
|
183 |
-
self.heads_share = heads_share
|
184 |
-
self.block_length = block_length
|
185 |
-
self.proximal_bias = proximal_bias
|
186 |
-
self.proximal_init = proximal_init
|
187 |
-
self.attn = None
|
188 |
-
|
189 |
-
self.k_channels = channels // n_heads
|
190 |
-
self.conv_q = nn.Conv1d(channels, channels, 1)
|
191 |
-
self.conv_k = nn.Conv1d(channels, channels, 1)
|
192 |
-
self.conv_v = nn.Conv1d(channels, channels, 1)
|
193 |
-
self.conv_o = nn.Conv1d(channels, out_channels, 1)
|
194 |
-
self.drop = nn.Dropout(p_dropout)
|
195 |
-
|
196 |
-
if window_size is not None:
|
197 |
-
n_heads_rel = 1 if heads_share else n_heads
|
198 |
-
rel_stddev = self.k_channels**-0.5
|
199 |
-
self.emb_rel_k = nn.Parameter(
|
200 |
-
torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
|
201 |
-
* rel_stddev
|
202 |
-
)
|
203 |
-
self.emb_rel_v = nn.Parameter(
|
204 |
-
torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
|
205 |
-
* rel_stddev
|
206 |
-
)
|
207 |
-
|
208 |
-
nn.init.xavier_uniform_(self.conv_q.weight)
|
209 |
-
nn.init.xavier_uniform_(self.conv_k.weight)
|
210 |
-
nn.init.xavier_uniform_(self.conv_v.weight)
|
211 |
-
if proximal_init:
|
212 |
-
with torch.no_grad():
|
213 |
-
self.conv_k.weight.copy_(self.conv_q.weight)
|
214 |
-
self.conv_k.bias.copy_(self.conv_q.bias)
|
215 |
-
|
216 |
-
def forward(self, x, c, attn_mask=None):
|
217 |
-
q = self.conv_q(x)
|
218 |
-
k = self.conv_k(c)
|
219 |
-
v = self.conv_v(c)
|
220 |
-
|
221 |
-
x, self.attn = self.attention(q, k, v, mask=attn_mask)
|
222 |
-
|
223 |
-
x = self.conv_o(x)
|
224 |
-
return x
|
225 |
-
|
226 |
-
def attention(self, query, key, value, mask=None):
|
227 |
-
# reshape [b, d, t] -> [b, n_h, t, d_k]
|
228 |
-
b, d, t_s, t_t = (*key.size(), query.size(2))
|
229 |
-
query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
|
230 |
-
key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
231 |
-
value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
232 |
-
|
233 |
-
scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
|
234 |
-
if self.window_size is not None:
|
235 |
-
assert (
|
236 |
-
t_s == t_t
|
237 |
-
), "Relative attention is only available for self-attention."
|
238 |
-
key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
|
239 |
-
rel_logits = self._matmul_with_relative_keys(
|
240 |
-
query / math.sqrt(self.k_channels), key_relative_embeddings
|
241 |
-
)
|
242 |
-
scores_local = self._relative_position_to_absolute_position(rel_logits)
|
243 |
-
scores = scores + scores_local
|
244 |
-
if self.proximal_bias:
|
245 |
-
assert t_s == t_t, "Proximal bias is only available for self-attention."
|
246 |
-
scores = scores + self._attention_bias_proximal(t_s).to(
|
247 |
-
device=scores.device, dtype=scores.dtype
|
248 |
-
)
|
249 |
-
if mask is not None:
|
250 |
-
scores = scores.masked_fill(mask == 0, -1e4)
|
251 |
-
if self.block_length is not None:
|
252 |
-
assert (
|
253 |
-
t_s == t_t
|
254 |
-
), "Local attention is only available for self-attention."
|
255 |
-
block_mask = (
|
256 |
-
torch.ones_like(scores)
|
257 |
-
.triu(-self.block_length)
|
258 |
-
.tril(self.block_length)
|
259 |
-
)
|
260 |
-
scores = scores.masked_fill(block_mask == 0, -1e4)
|
261 |
-
p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
|
262 |
-
p_attn = self.drop(p_attn)
|
263 |
-
output = torch.matmul(p_attn, value)
|
264 |
-
if self.window_size is not None:
|
265 |
-
relative_weights = self._absolute_position_to_relative_position(p_attn)
|
266 |
-
value_relative_embeddings = self._get_relative_embeddings(
|
267 |
-
self.emb_rel_v, t_s
|
268 |
-
)
|
269 |
-
output = output + self._matmul_with_relative_values(
|
270 |
-
relative_weights, value_relative_embeddings
|
271 |
-
)
|
272 |
-
output = (
|
273 |
-
output.transpose(2, 3).contiguous().view(b, d, t_t)
|
274 |
-
) # [b, n_h, t_t, d_k] -> [b, d, t_t]
|
275 |
-
return output, p_attn
|
276 |
-
|
277 |
-
def _matmul_with_relative_values(self, x, y):
|
278 |
-
"""
|
279 |
-
x: [b, h, l, m]
|
280 |
-
y: [h or 1, m, d]
|
281 |
-
ret: [b, h, l, d]
|
282 |
-
"""
|
283 |
-
ret = torch.matmul(x, y.unsqueeze(0))
|
284 |
-
return ret
|
285 |
-
|
286 |
-
def _matmul_with_relative_keys(self, x, y):
|
287 |
-
"""
|
288 |
-
x: [b, h, l, d]
|
289 |
-
y: [h or 1, m, d]
|
290 |
-
ret: [b, h, l, m]
|
291 |
-
"""
|
292 |
-
ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
|
293 |
-
return ret
|
294 |
-
|
295 |
-
def _get_relative_embeddings(self, relative_embeddings, length):
|
296 |
-
max_relative_position = 2 * self.window_size + 1
|
297 |
-
# Pad first before slice to avoid using cond ops.
|
298 |
-
pad_length = max(length - (self.window_size + 1), 0)
|
299 |
-
slice_start_position = max((self.window_size + 1) - length, 0)
|
300 |
-
slice_end_position = slice_start_position + 2 * length - 1
|
301 |
-
if pad_length > 0:
|
302 |
-
padded_relative_embeddings = F.pad(
|
303 |
-
relative_embeddings,
|
304 |
-
commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]),
|
305 |
-
)
|
306 |
-
else:
|
307 |
-
padded_relative_embeddings = relative_embeddings
|
308 |
-
used_relative_embeddings = padded_relative_embeddings[
|
309 |
-
:, slice_start_position:slice_end_position
|
310 |
-
]
|
311 |
-
return used_relative_embeddings
|
312 |
-
|
313 |
-
def _relative_position_to_absolute_position(self, x):
|
314 |
-
"""
|
315 |
-
x: [b, h, l, 2*l-1]
|
316 |
-
ret: [b, h, l, l]
|
317 |
-
"""
|
318 |
-
batch, heads, length, _ = x.size()
|
319 |
-
# Concat columns of pad to shift from relative to absolute indexing.
|
320 |
-
x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
|
321 |
-
|
322 |
-
# Concat extra elements so to add up to shape (len+1, 2*len-1).
|
323 |
-
x_flat = x.view([batch, heads, length * 2 * length])
|
324 |
-
x_flat = F.pad(
|
325 |
-
x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])
|
326 |
-
)
|
327 |
-
|
328 |
-
# Reshape and slice out the padded elements.
|
329 |
-
x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[
|
330 |
-
:, :, :length, length - 1 :
|
331 |
-
]
|
332 |
-
return x_final
|
333 |
-
|
334 |
-
def _absolute_position_to_relative_position(self, x):
|
335 |
-
"""
|
336 |
-
x: [b, h, l, l]
|
337 |
-
ret: [b, h, l, 2*l-1]
|
338 |
-
"""
|
339 |
-
batch, heads, length, _ = x.size()
|
340 |
-
# padd along column
|
341 |
-
x = F.pad(
|
342 |
-
x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])
|
343 |
-
)
|
344 |
-
x_flat = x.view([batch, heads, length**2 + length * (length - 1)])
|
345 |
-
# add 0's in the beginning that will skew the elements after reshape
|
346 |
-
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
|
347 |
-
x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
|
348 |
-
return x_final
|
349 |
-
|
350 |
-
def _attention_bias_proximal(self, length):
|
351 |
-
"""Bias for self-attention to encourage attention to close positions.
|
352 |
-
Args:
|
353 |
-
length: an integer scalar.
|
354 |
-
Returns:
|
355 |
-
a Tensor with shape [1, 1, length, length]
|
356 |
-
"""
|
357 |
-
r = torch.arange(length, dtype=torch.float32)
|
358 |
-
diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
|
359 |
-
return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
|
360 |
-
|
361 |
-
|
362 |
-
class FFN(nn.Module):
|
363 |
-
def __init__(
|
364 |
-
self,
|
365 |
-
in_channels,
|
366 |
-
out_channels,
|
367 |
-
filter_channels,
|
368 |
-
kernel_size,
|
369 |
-
p_dropout=0.0,
|
370 |
-
activation=None,
|
371 |
-
causal=False,
|
372 |
-
):
|
373 |
-
super().__init__()
|
374 |
-
self.in_channels = in_channels
|
375 |
-
self.out_channels = out_channels
|
376 |
-
self.filter_channels = filter_channels
|
377 |
-
self.kernel_size = kernel_size
|
378 |
-
self.p_dropout = p_dropout
|
379 |
-
self.activation = activation
|
380 |
-
self.causal = causal
|
381 |
-
|
382 |
-
if causal:
|
383 |
-
self.padding = self._causal_padding
|
384 |
-
else:
|
385 |
-
self.padding = self._same_padding
|
386 |
-
|
387 |
-
self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
|
388 |
-
self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
|
389 |
-
self.drop = nn.Dropout(p_dropout)
|
390 |
-
|
391 |
-
def forward(self, x, x_mask):
|
392 |
-
x = self.conv_1(self.padding(x * x_mask))
|
393 |
-
if self.activation == "gelu":
|
394 |
-
x = x * torch.sigmoid(1.702 * x)
|
395 |
-
else:
|
396 |
-
x = torch.relu(x)
|
397 |
-
x = self.drop(x)
|
398 |
-
x = self.conv_2(self.padding(x * x_mask))
|
399 |
-
return x * x_mask
|
400 |
-
|
401 |
-
def _causal_padding(self, x):
|
402 |
-
if self.kernel_size == 1:
|
403 |
-
return x
|
404 |
-
pad_l = self.kernel_size - 1
|
405 |
-
pad_r = 0
|
406 |
-
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
|
407 |
-
x = F.pad(x, commons.convert_pad_shape(padding))
|
408 |
-
return x
|
409 |
-
|
410 |
-
def _same_padding(self, x):
|
411 |
-
if self.kernel_size == 1:
|
412 |
-
return x
|
413 |
-
pad_l = (self.kernel_size - 1) // 2
|
414 |
-
pad_r = self.kernel_size // 2
|
415 |
-
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
|
416 |
-
x = F.pad(x, commons.convert_pad_shape(padding))
|
417 |
-
return x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Asus Rt-n56u Firmware Download.md
DELETED
@@ -1,93 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Cómo descargar y actualizar el firmware del router ASUS RT-N56U</h1>
|
3 |
-
<p>Firmware es un programa de software que controla las funciones de hardware de su router. Es responsable de proporcionar varias características y configuraciones, como red inalámbrica, seguridad, control parental, red de invitados, etc. El firmware también afecta el rendimiento y la estabilidad de su enrutador, por lo que es importante mantenerlo actualizado regularmente. </p>
|
4 |
-
<h2>asus rt-n56u firmware download</h2><br /><p><b><b>Download Zip</b> –––––>>> <a href="https://bltlly.com/2v6MUQ">https://bltlly.com/2v6MUQ</a></b></p><br /><br />
|
5 |
-
<p>Actualizar el firmware puede brindarle muchos beneficios, como mejorar la velocidad y la confiabilidad de su conexión inalámbrica, corregir errores y problemas de seguridad, agregar nuevas características y funciones y mejorar la compatibilidad con otros dispositivos. Sin embargo, actualizar el firmware también implica algunos riesgos, como perder la configuración actual, causar errores o mal funcionamiento, o incluso bloquear el router si algo sale mal. </p>
|
6 |
-
<p>Por lo tanto, antes de actualizar el firmware de su router ASUS RT-N56U, necesita conocer alguna información básica y precauciones. En este artículo, lo guiaremos a través de los pasos de descarga y actualización del firmware de su enrutador, así como de restablecimiento y solución de problemas. Siga estos pasos cuidadosamente y podrá disfrutar de una experiencia inalámbrica mejor y más segura con su enrutador. </p>
|
7 |
-
<h2>Lo que necesita saber antes de actualizar el firmware</h2>
|
8 |
-
<p>Antes de comenzar a actualizar el firmware de su router, debe preparar algunas cosas y tomar algunas precauciones. Estos son algunos consejos que debes seguir:</p>
|
9 |
-
<ul>
|
10 |
-
<li>Asegúrese de que su enrutador esté conectado a una fuente de alimentación estable y no lo apague o desenchufe durante el proceso de actualización. </li>
|
11 |
-
<li>Asegúrese de que su computadora esté conectada a su enrutador a través de una conexión por cable o inalámbrica. No utilice un servicio VPN o proxy que pueda interferir con el proceso de actualización. </li>
|
12 |
-
|
13 |
-
<li>Asegúrese de que ha descargado el archivo de firmware correcto para su modelo de enrutador desde el sitio web oficial de ASUS. No utilice archivos de firmware de terceros o no oficiales que puedan dañar su enrutador. </li>
|
14 |
-
<li>Asegúrese de que tiene suficiente espacio libre en su computadora o unidad flash USB para almacenar el archivo de firmware. El tamaño del archivo puede variar dependiendo de la versión del firmware. </li>
|
15 |
-
<li>Asegúrese de que ha leído y entendido las instrucciones y advertencias en el sitio web de ASUS antes de actualizar el firmware. Sígalos cuidadosamente y no se salte ningún paso. </li>
|
16 |
-
</ul>
|
17 |
-
<h2>Cómo comprobar la versión actual del firmware de su router</h2>
|
18 |
-
<p>Para comprobar la versión actual del firmware de su router, necesita acceder a su interfaz web. La interfaz web es una interfaz gráfica de usuario (GUI) que le permite administrar y configurar la configuración de su router. Para acceder a ella, siga estos pasos:</p>
|
19 |
-
<p></p>
|
20 |
-
<ol>
|
21 |
-
<li>Abra un navegador web en su computadora e ingrese la dirección IP o URL de su enrutador en la barra de direcciones. La dirección IP LAN por defecto es 192.168.1.1 y la URL por defecto es <a href="http://www.asusrouter.com">http://www.asusrouter.com</a>. </li>
|
22 |
-
<li>Ingrese su nombre de usuario y contraseña de inicio de sesión en la página de inicio de sesión y luego haga clic en [Iniciar sesión]. El nombre de usuario y la contraseña predeterminados son admin. Si los has cambiado, usa los que hayas establecido. </li>
|
23 |
-
<li>En la interfaz web, haga clic en [Administración] en el menú de la izquierda y luego haga clic en [Actualización de firmware] en el menú superior. </li>
|
24 |
-
<li>En la página de actualización de firmware, verá la versión de firmware actual de su router y la última versión de firmware disponible en el sitio web de ASUS. También puede comprobar las notas de la versión y el historial de actualizaciones del firmware. </li>
|
25 |
-
</ol>
|
26 |
-
<p>Si la versión de firmware de su router ya está actualizada, no necesita actualizarla. Sin embargo, si hay una versión más reciente disponible, puede descargarla y actualizarla a través de WebGUI o manualmente. </p>
|
27 |
-
<h2>Cómo descargar la última versión del firmware desde el sitio web de ASUS</h2>
|
28 |
-
|
29 |
-
<ol>
|
30 |
-
<li>Ir al sitio web oficial de ASUS en <a href="https://www.asus.com">https://www.asus.com</a>. </li>
|
31 |
-
<li>Haga clic en [Soporte] en el menú superior y luego haga clic en [Controladores y herramientas] en el menú desplegable. </li>
|
32 |
-
<li>Introduzca el nombre del modelo del router (RT-N56U) en el cuadro de búsqueda y haga clic en [Buscar]. </li>
|
33 |
-
<li>Seleccione su modelo de enrutador de los resultados de búsqueda y luego haga clic en [Driver & Utility] en el menú de la izquierda. </li>
|
34 |
-
<li>Seleccione su sistema operativo desde el menú desplegable y luego haga clic en [Mostrar todo]. </li>
|
35 |
-
<li>Encuentre la última versión de firmware para su router y luego haga clic en [Descargar]. </li>
|
36 |
-
<li>Guarde el archivo de firmware (.zip) en su computadora o unidad flash USB. Recuerde su ubicación y nombre. </li>
|
37 |
-
</ol>
|
38 |
-
<p>Ahora ha descargado la última versión de firmware para su router. Puede actualizarlo a través de WebGUI o manualmente. </p>
|
39 |
-
<h2>Cómo actualizar el firmware a través de WebGUI</h2>
|
40 |
-
<p>Para actualizar el firmware de su router a través de WebGUI, siga estos pasos:</p>
|
41 |
-
<ol>
|
42 |
-
<li>Acceda a la interfaz web de su router como se describe en la sección anterior. </li>
|
43 |
-
<li>En la interfaz web, haga clic en [Administración] en el menú de la izquierda y luego haga clic en [Actualización de firmware] en el menú superior. </li>
|
44 |
-
<li>En la página de actualización de firmware, haga clic en [Elegir archivo] y luego seleccione el archivo de firmware (.zip) que ha descargado del sitio web de ASUS. </li>
|
45 |
-
<li>Haga clic en [Subir] y espere a que se complete la carga. No apague ni desenchufe el router durante este proceso. </li>
|
46 |
-
<li>Después de completar la carga, haga clic en [OK] para iniciar el proceso de actualización. No apague ni desenchufe el router durante este proceso. </li>
|
47 |
-
<li>Espere unos 5 minutos hasta que se complete el proceso de actualización. Su enrutador se reiniciará automáticamente después de la actualización. </li>
|
48 |
-
</ol>
|
49 |
-
<p>Felicidades! Ha actualizado con éxito el firmware de su router a través de WebGUI. Puede comprobar la nueva versión del firmware en la interfaz web. </p> <h2>Cómo actualizar el firmware manualmente</h2>
|
50 |
-
|
51 |
-
<ol>
|
52 |
-
<li>Descomprima el archivo de firmware (.zip) que ha descargado del sitio web de ASUS. Obtendrá un archivo de firmware (.trx) y un archivo readme (.txt). </li>
|
53 |
-
<li>Conecte su computadora a su enrutador a través de un cable LAN. No use una conexión inalámbrica. </li>
|
54 |
-
<li>Asigne una dirección IP estática a su computadora. La dirección IP debe estar en la misma subred que la dirección IP LAN de su enrutador. Por ejemplo, si la dirección IP de su enrutador es 192.168.1.1, puede asignar 192.168.1.10 a su computadora. </li>
|
55 |
-
<li>Deshabilita cualquier software de firewall o antivirus en tu computadora que pueda bloquear el proceso de actualización. </li>
|
56 |
-
<li>Abra un navegador web en su computadora e ingrese 192.168.1.1 en la barra de direcciones. Verá una página de recuperación de su enrutador. </li>
|
57 |
-
<li>Haga clic en [Examinar] y luego seleccione el archivo de firmware (.trx) que ha descomprimido desde el sitio web de ASUS. </li>
|
58 |
-
<li>Haga clic en [Subir] y espere a que se complete la carga. No apague ni desenchufe el router durante este proceso. </li>
|
59 |
-
<li>Después de completar la carga, espere unos 5 minutos hasta que se complete el proceso de actualización. El router se reiniciará automáticamente después de la actualización. </li>
|
60 |
-
</ol>
|
61 |
-
<p>Felicidades! Ha actualizado el firmware de su router manualmente. Puede comprobar la nueva versión del firmware en la interfaz web. </p>
|
62 |
-
<h2>Cómo restablecer el router después de actualizar el firmware</h2>
|
63 |
-
<p>Después de actualizar el firmware de su enrutador, es posible que deba restablecerlo a la configuración predeterminada de fábrica y configurarlo de nuevo. Esto puede ayudarle a evitar posibles problemas o conflictos causados por la actualización del firmware. Para restablecer el router después de actualizar el firmware, siga estos pasos:</p>
|
64 |
-
<ol>
|
65 |
-
<li>Localice el botón de reinicio en la parte posterior de su router. Es un pequeño agujero que puede presionar con un pin o un clip de papel. </li>
|
66 |
-
<li> Mantenga pulsado el botón de reinicio durante unos 10 segundos hasta que el led de encendido parpadee. </li>
|
67 |
-
<li>Suelte el botón de reinicio y espere unos 2 minutos hasta que el enrutador se reinicie. </li>
|
68 |
-
|
69 |
-
</ol>
|
70 |
-
<p>Nota: Restablecer el router borrará todos sus ajustes y configuración actuales, así que asegúrese de tener una copia de seguridad de ellos antes de reiniciar. </p> <h2>Cómo solucionar problemas comunes de actualización de firmware</h2>
|
71 |
-
<p>A veces, puede encontrar algunos problemas o errores durante o después del proceso de actualización del firmware. Aquí hay algunos problemas y soluciones comunes que puedes intentar solucionar:</p>
|
72 |
-
<ul>
|
73 |
-
<li>Si no puede acceder a la interfaz web de su enrutador después de la actualización, es posible que necesite borrar la caché y las cookies de su navegador, o usar un navegador o dispositivo diferente. </li>
|
74 |
-
<li>Si no puede conectarse a Internet después de la actualización, es posible que deba verificar la configuración de su WAN y asegurarse de que sea correcta. También puede intentar reiniciar su router y módem, o ponerse en contacto con su ISP para obtener ayuda. </li>
|
75 |
-
<li>Si su red inalámbrica no está funcionando correctamente después de la actualización, es posible que tenga que comprobar la configuración inalámbrica y asegurarse de que son correctas. También puede intentar cambiar el canal inalámbrico, el modo o la seguridad, o buscar redes inalámbricas cercanas y evitar interferencias. </li>
|
76 |
-
<li>Si su enrutador no responde o está atascado en un bucle después de la actualización, es posible que deba restablecerlo a la configuración predeterminada de fábrica y configurarlo de nuevo. También puede intentar flashearlo manualmente con el archivo de firmware. </li>
|
77 |
-
<li>Si su router está bloqueado o dañado después de la actualización, es posible que tenga que ponerse en contacto con el soporte de ASUS para obtener ayuda. También puede intentar usar el modo de rescate o la herramienta de recuperación para restaurar su router. </li>
|
78 |
-
</ul>
|
79 |
-
<p>Si ninguna de estas soluciones funciona para usted, puede buscar más información y ayuda en el sitio web o en el foro de ASUS, o ponerse en contacto directamente con el soporte de ASUS. </p>
|
80 |
-
<h1>Conclusión</h1>
|
81 |
-
|
82 |
-
<p>Si tiene alguna pregunta o comentario sobre este artículo, no dude en dejar un comentario a continuación. Nos encantaría saber de usted y ayudarle con cualquier problema. Gracias por leer y navegar feliz! </p>
|
83 |
-
<h2>Preguntas frecuentes</h2>
|
84 |
-
<p>Aquí hay algunas preguntas y respuestas frecuentes sobre la actualización del firmware:</p>
|
85 |
-
<ol>
|
86 |
-
<li><b>¿Cuál es la última versión de firmware para el router ASUS RT-N56U? </b><br>La última versión de firmware para el router ASUS RT-N56U a partir de junio de 2023 es 3.0.0.4.382_52288. Puede comprobarlo en el sitio web de ASUS o en la interfaz web de su router. </li>
|
87 |
-
<li><b>¿Con qué frecuencia debo actualizar el firmware de mi router? </b><br>No hay una regla fija sobre la frecuencia con la que debe actualizar el firmware de su router. Depende de sus necesidades y preferencias. Sin embargo, se recomienda comprobar si hay nuevas versiones de firmware con regularidad y actualizarlas cuando estén disponibles. Esto puede ayudarte a mantener tu router actualizado y seguro. </li>
|
88 |
-
<li><b> ¿Puedo bajar el firmware de mi router? </b><br>Sí, puede bajar el firmware de su router si no está satisfecho con la nueva versión o encuentra algún problema. Sin embargo, esto no se recomienda, ya que puede causar algunos problemas o conflictos con la configuración y las funciones del router. Si desea bajar el firmware de su enrutador, debe seguir los mismos pasos que actualizarlo manualmente, pero use un archivo de firmware anterior en su lugar. </li>
|
89 |
-
<li><b> ¿Puedo usar firmware personalizado en mi router? </b><br>Sí, puede usar firmware personalizado en su enrutador si desea tener más características y opciones que no están disponibles en el firmware oficial. Sin embargo, esto no se recomienda, ya que puede anular su garantía, dañar su enrutador o causar riesgos de seguridad. Si desea utilizar firmware personalizado en su router, debe tener cuidado y seguir las instrucciones del desarrollador de firmware personalizado. </li>
|
90 |
-
|
91 |
-
</ol></p> 64aa2da5cf<br />
|
92 |
-
<br />
|
93 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/modeling/roi_heads/roi_heads.py
DELETED
@@ -1,728 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
2 |
-
import logging
|
3 |
-
import numpy as np
|
4 |
-
from typing import Dict, List, Optional, Tuple, Union
|
5 |
-
import torch
|
6 |
-
from torch import nn
|
7 |
-
|
8 |
-
from detectron2.layers import ShapeSpec
|
9 |
-
from detectron2.structures import Boxes, ImageList, Instances, pairwise_iou
|
10 |
-
from detectron2.utils.events import get_event_storage
|
11 |
-
from detectron2.utils.registry import Registry
|
12 |
-
|
13 |
-
from ..backbone.resnet import BottleneckBlock, make_stage
|
14 |
-
from ..matcher import Matcher
|
15 |
-
from ..poolers import ROIPooler
|
16 |
-
from ..proposal_generator.proposal_utils import add_ground_truth_to_proposals
|
17 |
-
from ..sampling import subsample_labels
|
18 |
-
from .box_head import build_box_head
|
19 |
-
from .fast_rcnn import FastRCNNOutputLayers
|
20 |
-
from .keypoint_head import build_keypoint_head
|
21 |
-
from .mask_head import build_mask_head
|
22 |
-
|
23 |
-
ROI_HEADS_REGISTRY = Registry("ROI_HEADS")
|
24 |
-
ROI_HEADS_REGISTRY.__doc__ = """
|
25 |
-
Registry for ROI heads in a generalized R-CNN model.
|
26 |
-
ROIHeads take feature maps and region proposals, and
|
27 |
-
perform per-region computation.
|
28 |
-
|
29 |
-
The registered object will be called with `obj(cfg, input_shape)`.
|
30 |
-
The call is expected to return an :class:`ROIHeads`.
|
31 |
-
"""
|
32 |
-
|
33 |
-
logger = logging.getLogger(__name__)
|
34 |
-
|
35 |
-
|
36 |
-
def build_roi_heads(cfg, input_shape):
|
37 |
-
"""
|
38 |
-
Build ROIHeads defined by `cfg.MODEL.ROI_HEADS.NAME`.
|
39 |
-
"""
|
40 |
-
name = cfg.MODEL.ROI_HEADS.NAME
|
41 |
-
return ROI_HEADS_REGISTRY.get(name)(cfg, input_shape)
|
42 |
-
|
43 |
-
|
44 |
-
def select_foreground_proposals(
|
45 |
-
proposals: List[Instances], bg_label: int
|
46 |
-
) -> Tuple[List[Instances], List[torch.Tensor]]:
|
47 |
-
"""
|
48 |
-
Given a list of N Instances (for N images), each containing a `gt_classes` field,
|
49 |
-
return a list of Instances that contain only instances with `gt_classes != -1 &&
|
50 |
-
gt_classes != bg_label`.
|
51 |
-
|
52 |
-
Args:
|
53 |
-
proposals (list[Instances]): A list of N Instances, where N is the number of
|
54 |
-
images in the batch.
|
55 |
-
bg_label: label index of background class.
|
56 |
-
|
57 |
-
Returns:
|
58 |
-
list[Instances]: N Instances, each contains only the selected foreground instances.
|
59 |
-
list[Tensor]: N boolean vector, correspond to the selection mask of
|
60 |
-
each Instances object. True for selected instances.
|
61 |
-
"""
|
62 |
-
assert isinstance(proposals, (list, tuple))
|
63 |
-
assert isinstance(proposals[0], Instances)
|
64 |
-
assert proposals[0].has("gt_classes")
|
65 |
-
fg_proposals = []
|
66 |
-
fg_selection_masks = []
|
67 |
-
for proposals_per_image in proposals:
|
68 |
-
gt_classes = proposals_per_image.gt_classes
|
69 |
-
fg_selection_mask = (gt_classes != -1) & (gt_classes != bg_label)
|
70 |
-
fg_idxs = fg_selection_mask.nonzero().squeeze(1)
|
71 |
-
fg_proposals.append(proposals_per_image[fg_idxs])
|
72 |
-
fg_selection_masks.append(fg_selection_mask)
|
73 |
-
return fg_proposals, fg_selection_masks
|
74 |
-
|
75 |
-
|
76 |
-
def select_proposals_with_visible_keypoints(
|
77 |
-
proposals: List[Instances],
|
78 |
-
) -> List[Instances]:
|
79 |
-
"""
|
80 |
-
Args:
|
81 |
-
proposals (list[Instances]): a list of N Instances, where N is the
|
82 |
-
number of images.
|
83 |
-
|
84 |
-
Returns:
|
85 |
-
proposals: only contains proposals with at least one visible keypoint.
|
86 |
-
|
87 |
-
Note that this is still slightly different from Detectron.
|
88 |
-
In Detectron, proposals for training keypoint head are re-sampled from
|
89 |
-
all the proposals with IOU>threshold & >=1 visible keypoint.
|
90 |
-
|
91 |
-
Here, the proposals are first sampled from all proposals with
|
92 |
-
IOU>threshold, then proposals with no visible keypoint are filtered out.
|
93 |
-
This strategy seems to make no difference on Detectron and is easier to implement.
|
94 |
-
"""
|
95 |
-
ret = []
|
96 |
-
all_num_fg = []
|
97 |
-
for proposals_per_image in proposals:
|
98 |
-
# If empty/unannotated image (hard negatives), skip filtering for train
|
99 |
-
if len(proposals_per_image) == 0:
|
100 |
-
ret.append(proposals_per_image)
|
101 |
-
continue
|
102 |
-
gt_keypoints = proposals_per_image.gt_keypoints.tensor
|
103 |
-
# #fg x K x 3
|
104 |
-
vis_mask = gt_keypoints[:, :, 2] >= 1
|
105 |
-
xs, ys = gt_keypoints[:, :, 0], gt_keypoints[:, :, 1]
|
106 |
-
proposal_boxes = proposals_per_image.proposal_boxes.tensor.unsqueeze(
|
107 |
-
dim=1
|
108 |
-
) # #fg x 1 x 4
|
109 |
-
kp_in_box = (
|
110 |
-
(xs >= proposal_boxes[:, :, 0])
|
111 |
-
& (xs <= proposal_boxes[:, :, 2])
|
112 |
-
& (ys >= proposal_boxes[:, :, 1])
|
113 |
-
& (ys <= proposal_boxes[:, :, 3])
|
114 |
-
)
|
115 |
-
selection = (kp_in_box & vis_mask).any(dim=1)
|
116 |
-
selection_idxs = torch.nonzero(selection).squeeze(1)
|
117 |
-
all_num_fg.append(selection_idxs.numel())
|
118 |
-
ret.append(proposals_per_image[selection_idxs])
|
119 |
-
|
120 |
-
storage = get_event_storage()
|
121 |
-
storage.put_scalar("keypoint_head/num_fg_samples", np.mean(all_num_fg))
|
122 |
-
return ret
|
123 |
-
|
124 |
-
|
125 |
-
class ROIHeads(torch.nn.Module):
|
126 |
-
"""
|
127 |
-
ROIHeads perform all per-region computation in an R-CNN.
|
128 |
-
|
129 |
-
It contains logic of cropping the regions, extract per-region features,
|
130 |
-
and make per-region predictions.
|
131 |
-
|
132 |
-
It can have many variants, implemented as subclasses of this class.
|
133 |
-
"""
|
134 |
-
|
135 |
-
def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]):
|
136 |
-
super(ROIHeads, self).__init__()
|
137 |
-
# fmt: off
|
138 |
-
self.batch_size_per_image = cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE
|
139 |
-
self.positive_sample_fraction = cfg.MODEL.ROI_HEADS.POSITIVE_FRACTION
|
140 |
-
self.in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES
|
141 |
-
self.num_classes = cfg.MODEL.ROI_HEADS.NUM_CLASSES
|
142 |
-
self.proposal_append_gt = cfg.MODEL.ROI_HEADS.PROPOSAL_APPEND_GT
|
143 |
-
# fmt: on
|
144 |
-
|
145 |
-
# Matcher to assign box proposals to gt boxes
|
146 |
-
self.proposal_matcher = Matcher(
|
147 |
-
cfg.MODEL.ROI_HEADS.IOU_THRESHOLDS,
|
148 |
-
cfg.MODEL.ROI_HEADS.IOU_LABELS,
|
149 |
-
allow_low_quality_matches=False,
|
150 |
-
)
|
151 |
-
|
152 |
-
def _sample_proposals(
|
153 |
-
self,
|
154 |
-
matched_idxs: torch.Tensor,
|
155 |
-
matched_labels: torch.Tensor,
|
156 |
-
gt_classes: torch.Tensor,
|
157 |
-
) -> Tuple[torch.Tensor, torch.Tensor]:
|
158 |
-
"""
|
159 |
-
Based on the matching between N proposals and M groundtruth,
|
160 |
-
sample the proposals and set their classification labels.
|
161 |
-
|
162 |
-
Args:
|
163 |
-
matched_idxs (Tensor): a vector of length N, each is the best-matched
|
164 |
-
gt index in [0, M) for each proposal.
|
165 |
-
matched_labels (Tensor): a vector of length N, the matcher's label
|
166 |
-
(one of cfg.MODEL.ROI_HEADS.IOU_LABELS) for each proposal.
|
167 |
-
gt_classes (Tensor): a vector of length M.
|
168 |
-
|
169 |
-
Returns:
|
170 |
-
Tensor: a vector of indices of sampled proposals. Each is in [0, N).
|
171 |
-
Tensor: a vector of the same length, the classification label for
|
172 |
-
each sampled proposal. Each sample is labeled as either a category in
|
173 |
-
[0, num_classes) or the background (num_classes).
|
174 |
-
"""
|
175 |
-
has_gt = gt_classes.numel() > 0
|
176 |
-
# Get the corresponding GT for each proposal
|
177 |
-
if has_gt:
|
178 |
-
gt_classes = gt_classes[matched_idxs]
|
179 |
-
# Label unmatched proposals (0 label from matcher) as background (label=num_classes)
|
180 |
-
gt_classes[matched_labels == 0] = self.num_classes
|
181 |
-
# Label ignore proposals (-1 label)
|
182 |
-
gt_classes[matched_labels == -1] = -1
|
183 |
-
else:
|
184 |
-
gt_classes = torch.zeros_like(matched_idxs) + self.num_classes
|
185 |
-
|
186 |
-
sampled_fg_idxs, sampled_bg_idxs = subsample_labels(
|
187 |
-
gt_classes,
|
188 |
-
self.batch_size_per_image,
|
189 |
-
self.positive_sample_fraction,
|
190 |
-
self.num_classes,
|
191 |
-
)
|
192 |
-
|
193 |
-
sampled_idxs = torch.cat([sampled_fg_idxs, sampled_bg_idxs], dim=0)
|
194 |
-
return sampled_idxs, gt_classes[sampled_idxs]
|
195 |
-
|
196 |
-
@torch.no_grad()
|
197 |
-
def label_and_sample_proposals(
|
198 |
-
self, proposals: List[Instances], targets: List[Instances]
|
199 |
-
) -> List[Instances]:
|
200 |
-
"""
|
201 |
-
Prepare some proposals to be used to train the ROI heads.
|
202 |
-
It performs box matching between `proposals` and `targets`, and assigns
|
203 |
-
training labels to the proposals.
|
204 |
-
It returns ``self.batch_size_per_image`` random samples from proposals and groundtruth
|
205 |
-
boxes, with a fraction of positives that is no larger than
|
206 |
-
``self.positive_sample_fraction``.
|
207 |
-
|
208 |
-
Args:
|
209 |
-
See :meth:`ROIHeads.forward`
|
210 |
-
|
211 |
-
Returns:
|
212 |
-
list[Instances]:
|
213 |
-
length `N` list of `Instances`s containing the proposals
|
214 |
-
sampled for training. Each `Instances` has the following fields:
|
215 |
-
|
216 |
-
- proposal_boxes: the proposal boxes
|
217 |
-
- gt_boxes: the ground-truth box that the proposal is assigned to
|
218 |
-
(this is only meaningful if the proposal has a label > 0; if label = 0
|
219 |
-
then the ground-truth box is random)
|
220 |
-
|
221 |
-
Other fields such as "gt_classes", "gt_masks", that's included in `targets`.
|
222 |
-
"""
|
223 |
-
gt_boxes = [x.gt_boxes for x in targets]
|
224 |
-
# Augment proposals with ground-truth boxes.
|
225 |
-
# In the case of learned proposals (e.g., RPN), when training starts
|
226 |
-
# the proposals will be low quality due to random initialization.
|
227 |
-
# It's possible that none of these initial
|
228 |
-
# proposals have high enough overlap with the gt objects to be used
|
229 |
-
# as positive examples for the second stage components (box head,
|
230 |
-
# cls head, mask head). Adding the gt boxes to the set of proposals
|
231 |
-
# ensures that the second stage components will have some positive
|
232 |
-
# examples from the start of training. For RPN, this augmentation improves
|
233 |
-
# convergence and empirically improves box AP on COCO by about 0.5
|
234 |
-
# points (under one tested configuration).
|
235 |
-
if self.proposal_append_gt:
|
236 |
-
proposals = add_ground_truth_to_proposals(gt_boxes, proposals)
|
237 |
-
|
238 |
-
proposals_with_gt = []
|
239 |
-
|
240 |
-
num_fg_samples = []
|
241 |
-
num_bg_samples = []
|
242 |
-
for proposals_per_image, targets_per_image in zip(proposals, targets):
|
243 |
-
has_gt = len(targets_per_image) > 0
|
244 |
-
match_quality_matrix = pairwise_iou(
|
245 |
-
targets_per_image.gt_boxes, proposals_per_image.proposal_boxes
|
246 |
-
)
|
247 |
-
matched_idxs, matched_labels = self.proposal_matcher(match_quality_matrix)
|
248 |
-
sampled_idxs, gt_classes = self._sample_proposals(
|
249 |
-
matched_idxs, matched_labels, targets_per_image.gt_classes
|
250 |
-
)
|
251 |
-
|
252 |
-
# Set target attributes of the sampled proposals:
|
253 |
-
proposals_per_image = proposals_per_image[sampled_idxs]
|
254 |
-
proposals_per_image.gt_classes = gt_classes
|
255 |
-
|
256 |
-
# We index all the attributes of targets that start with "gt_"
|
257 |
-
# and have not been added to proposals yet (="gt_classes").
|
258 |
-
if has_gt:
|
259 |
-
sampled_targets = matched_idxs[sampled_idxs]
|
260 |
-
# NOTE: here the indexing waste some compute, because heads
|
261 |
-
# like masks, keypoints, etc, will filter the proposals again,
|
262 |
-
# (by foreground/background, or number of keypoints in the image, etc)
|
263 |
-
# so we essentially index the data twice.
|
264 |
-
for (trg_name, trg_value) in targets_per_image.get_fields().items():
|
265 |
-
if trg_name.startswith("gt_") and not proposals_per_image.has(
|
266 |
-
trg_name
|
267 |
-
):
|
268 |
-
proposals_per_image.set(trg_name, trg_value[sampled_targets])
|
269 |
-
else:
|
270 |
-
gt_boxes = Boxes(
|
271 |
-
targets_per_image.gt_boxes.tensor.new_zeros((len(sampled_idxs), 4))
|
272 |
-
)
|
273 |
-
proposals_per_image.gt_boxes = gt_boxes
|
274 |
-
|
275 |
-
num_bg_samples.append((gt_classes == self.num_classes).sum().item())
|
276 |
-
num_fg_samples.append(gt_classes.numel() - num_bg_samples[-1])
|
277 |
-
proposals_with_gt.append(proposals_per_image)
|
278 |
-
|
279 |
-
# Log the number of fg/bg samples that are selected for training ROI heads
|
280 |
-
storage = get_event_storage()
|
281 |
-
storage.put_scalar("roi_head/num_fg_samples", np.mean(num_fg_samples))
|
282 |
-
storage.put_scalar("roi_head/num_bg_samples", np.mean(num_bg_samples))
|
283 |
-
|
284 |
-
return proposals_with_gt
|
285 |
-
|
286 |
-
def forward(
|
287 |
-
self,
|
288 |
-
images: ImageList,
|
289 |
-
features: Dict[str, torch.Tensor],
|
290 |
-
proposals: List[Instances],
|
291 |
-
targets: Optional[List[Instances]] = None,
|
292 |
-
) -> Tuple[List[Instances], Dict[str, torch.Tensor]]:
|
293 |
-
"""
|
294 |
-
Args:
|
295 |
-
images (ImageList):
|
296 |
-
features (dict[str,Tensor]): input data as a mapping from feature
|
297 |
-
map name to tensor. Axis 0 represents the number of images `N` in
|
298 |
-
the input data; axes 1-3 are channels, height, and width, which may
|
299 |
-
vary between feature maps (e.g., if a feature pyramid is used).
|
300 |
-
proposals (list[Instances]): length `N` list of `Instances`. The i-th
|
301 |
-
`Instances` contains object proposals for the i-th input image,
|
302 |
-
with fields "proposal_boxes" and "objectness_logits".
|
303 |
-
targets (list[Instances], optional): length `N` list of `Instances`. The i-th
|
304 |
-
`Instances` contains the ground-truth per-instance annotations
|
305 |
-
for the i-th input image. Specify `targets` during training only.
|
306 |
-
It may have the following fields:
|
307 |
-
|
308 |
-
- gt_boxes: the bounding box of each instance.
|
309 |
-
- gt_classes: the label for each instance with a category ranging in [0, #class].
|
310 |
-
- gt_masks: PolygonMasks or BitMasks, the ground-truth masks of each instance.
|
311 |
-
- gt_keypoints: NxKx3, the groud-truth keypoints for each instance.
|
312 |
-
|
313 |
-
Returns:
|
314 |
-
list[Instances]: length `N` list of `Instances` containing the
|
315 |
-
detected instances. Returned during inference only; may be [] during training.
|
316 |
-
|
317 |
-
dict[str->Tensor]:
|
318 |
-
mapping from a named loss to a tensor storing the loss. Used during training only.
|
319 |
-
"""
|
320 |
-
raise NotImplementedError()
|
321 |
-
|
322 |
-
|
323 |
-
@ROI_HEADS_REGISTRY.register()
|
324 |
-
class Res5ROIHeads(ROIHeads):
|
325 |
-
"""
|
326 |
-
The ROIHeads in a typical "C4" R-CNN model, where
|
327 |
-
the box and mask head share the cropping and
|
328 |
-
the per-region feature computation by a Res5 block.
|
329 |
-
"""
|
330 |
-
|
331 |
-
def __init__(self, cfg, input_shape):
|
332 |
-
super().__init__(cfg, input_shape)
|
333 |
-
|
334 |
-
assert len(self.in_features) == 1
|
335 |
-
|
336 |
-
# fmt: off
|
337 |
-
pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
|
338 |
-
pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE
|
339 |
-
pooler_scales = (1.0 / input_shape[self.in_features[0]].stride, )
|
340 |
-
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
|
341 |
-
self.mask_on = cfg.MODEL.MASK_ON
|
342 |
-
# fmt: on
|
343 |
-
assert not cfg.MODEL.KEYPOINT_ON
|
344 |
-
|
345 |
-
self.pooler = ROIPooler(
|
346 |
-
output_size=pooler_resolution,
|
347 |
-
scales=pooler_scales,
|
348 |
-
sampling_ratio=sampling_ratio,
|
349 |
-
pooler_type=pooler_type,
|
350 |
-
)
|
351 |
-
|
352 |
-
self.res5, out_channels = self._build_res5_block(cfg)
|
353 |
-
self.box_predictor = FastRCNNOutputLayers(
|
354 |
-
cfg, ShapeSpec(channels=out_channels, height=1, width=1)
|
355 |
-
)
|
356 |
-
|
357 |
-
if self.mask_on:
|
358 |
-
self.mask_head = build_mask_head(
|
359 |
-
cfg,
|
360 |
-
ShapeSpec(
|
361 |
-
channels=out_channels,
|
362 |
-
width=pooler_resolution,
|
363 |
-
height=pooler_resolution,
|
364 |
-
),
|
365 |
-
)
|
366 |
-
|
367 |
-
def _build_res5_block(self, cfg):
|
368 |
-
# fmt: off
|
369 |
-
stage_channel_factor = 2 ** 3 # res5 is 8x res2
|
370 |
-
num_groups = cfg.MODEL.RESNETS.NUM_GROUPS
|
371 |
-
width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP
|
372 |
-
bottleneck_channels = num_groups * width_per_group * stage_channel_factor
|
373 |
-
out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS * stage_channel_factor
|
374 |
-
stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1
|
375 |
-
norm = cfg.MODEL.RESNETS.NORM
|
376 |
-
assert not cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE[-1], \
|
377 |
-
"Deformable conv is not yet supported in res5 head."
|
378 |
-
# fmt: on
|
379 |
-
|
380 |
-
blocks = make_stage(
|
381 |
-
BottleneckBlock,
|
382 |
-
3,
|
383 |
-
first_stride=2,
|
384 |
-
in_channels=out_channels // 2,
|
385 |
-
bottleneck_channels=bottleneck_channels,
|
386 |
-
out_channels=out_channels,
|
387 |
-
num_groups=num_groups,
|
388 |
-
norm=norm,
|
389 |
-
stride_in_1x1=stride_in_1x1,
|
390 |
-
)
|
391 |
-
return nn.Sequential(*blocks), out_channels
|
392 |
-
|
393 |
-
def _shared_roi_transform(self, features, boxes):
|
394 |
-
x = self.pooler(features, boxes)
|
395 |
-
return self.res5(x)
|
396 |
-
|
397 |
-
def forward(self, images, features, proposals, targets=None):
|
398 |
-
"""
|
399 |
-
See :meth:`ROIHeads.forward`.
|
400 |
-
"""
|
401 |
-
del images
|
402 |
-
|
403 |
-
if self.training:
|
404 |
-
assert targets
|
405 |
-
proposals = self.label_and_sample_proposals(proposals, targets)
|
406 |
-
del targets
|
407 |
-
|
408 |
-
proposal_boxes = [x.proposal_boxes for x in proposals]
|
409 |
-
box_features = self._shared_roi_transform(
|
410 |
-
[features[f] for f in self.in_features], proposal_boxes
|
411 |
-
)
|
412 |
-
predictions = self.box_predictor(box_features.mean(dim=[2, 3]))
|
413 |
-
|
414 |
-
if self.training:
|
415 |
-
del features
|
416 |
-
losses = self.box_predictor.losses(predictions, proposals)
|
417 |
-
if self.mask_on:
|
418 |
-
proposals, fg_selection_masks = select_foreground_proposals(
|
419 |
-
proposals, self.num_classes
|
420 |
-
)
|
421 |
-
# Since the ROI feature transform is shared between boxes and masks,
|
422 |
-
# we don't need to recompute features. The mask loss is only defined
|
423 |
-
# on foreground proposals, so we need to select out the foreground
|
424 |
-
# features.
|
425 |
-
mask_features = box_features[torch.cat(fg_selection_masks, dim=0)]
|
426 |
-
del box_features
|
427 |
-
losses.update(self.mask_head(mask_features, proposals))
|
428 |
-
return [], losses
|
429 |
-
else:
|
430 |
-
pred_instances, _ = self.box_predictor.inference(predictions, proposals)
|
431 |
-
pred_instances = self.forward_with_given_boxes(features, pred_instances)
|
432 |
-
return pred_instances, {}
|
433 |
-
|
434 |
-
def forward_with_given_boxes(self, features, instances):
|
435 |
-
"""
|
436 |
-
Use the given boxes in `instances` to produce other (non-box) per-ROI outputs.
|
437 |
-
|
438 |
-
Args:
|
439 |
-
features: same as in `forward()`
|
440 |
-
instances (list[Instances]): instances to predict other outputs. Expect the keys
|
441 |
-
"pred_boxes" and "pred_classes" to exist.
|
442 |
-
|
443 |
-
Returns:
|
444 |
-
instances (Instances):
|
445 |
-
the same `Instances` object, with extra
|
446 |
-
fields such as `pred_masks` or `pred_keypoints`.
|
447 |
-
"""
|
448 |
-
assert not self.training
|
449 |
-
assert instances[0].has("pred_boxes") and instances[0].has("pred_classes")
|
450 |
-
|
451 |
-
if self.mask_on:
|
452 |
-
features = [features[f] for f in self.in_features]
|
453 |
-
x = self._shared_roi_transform(features, [x.pred_boxes for x in instances])
|
454 |
-
return self.mask_head(x, instances)
|
455 |
-
else:
|
456 |
-
return instances
|
457 |
-
|
458 |
-
|
459 |
-
@ROI_HEADS_REGISTRY.register()
|
460 |
-
class StandardROIHeads(ROIHeads):
|
461 |
-
"""
|
462 |
-
It's "standard" in a sense that there is no ROI transform sharing
|
463 |
-
or feature sharing between tasks.
|
464 |
-
The cropped rois go to separate branches (boxes and masks) directly.
|
465 |
-
This way, it is easier to make separate abstractions for different branches.
|
466 |
-
|
467 |
-
This class is used by most models, such as FPN and C5.
|
468 |
-
To implement more models, you can subclass it and implement a different
|
469 |
-
:meth:`forward()` or a head.
|
470 |
-
"""
|
471 |
-
|
472 |
-
def __init__(self, cfg, input_shape):
|
473 |
-
super(StandardROIHeads, self).__init__(cfg, input_shape)
|
474 |
-
self._init_box_head(cfg, input_shape)
|
475 |
-
self._init_mask_head(cfg, input_shape)
|
476 |
-
self._init_keypoint_head(cfg, input_shape)
|
477 |
-
|
478 |
-
def _init_box_head(self, cfg, input_shape):
|
479 |
-
# fmt: off
|
480 |
-
pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
|
481 |
-
pooler_scales = tuple(1.0 / input_shape[k].stride for k in self.in_features)
|
482 |
-
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
|
483 |
-
pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE
|
484 |
-
self.train_on_pred_boxes = cfg.MODEL.ROI_BOX_HEAD.TRAIN_ON_PRED_BOXES
|
485 |
-
# fmt: on
|
486 |
-
|
487 |
-
# If StandardROIHeads is applied on multiple feature maps (as in FPN),
|
488 |
-
# then we share the same predictors and therefore the channel counts must be the same
|
489 |
-
in_channels = [input_shape[f].channels for f in self.in_features]
|
490 |
-
# Check all channel counts are equal
|
491 |
-
assert len(set(in_channels)) == 1, in_channels
|
492 |
-
in_channels = in_channels[0]
|
493 |
-
|
494 |
-
self.box_pooler = ROIPooler(
|
495 |
-
output_size=pooler_resolution,
|
496 |
-
scales=pooler_scales,
|
497 |
-
sampling_ratio=sampling_ratio,
|
498 |
-
pooler_type=pooler_type,
|
499 |
-
)
|
500 |
-
# Here we split "box head" and "box predictor", which is mainly due to historical reasons.
|
501 |
-
# They are used together so the "box predictor" layers should be part of the "box head".
|
502 |
-
# New subclasses of ROIHeads do not need "box predictor"s.
|
503 |
-
self.box_head = build_box_head(
|
504 |
-
cfg,
|
505 |
-
ShapeSpec(
|
506 |
-
channels=in_channels, height=pooler_resolution, width=pooler_resolution
|
507 |
-
),
|
508 |
-
)
|
509 |
-
self.box_predictor = FastRCNNOutputLayers(cfg, self.box_head.output_shape)
|
510 |
-
|
511 |
-
def _init_mask_head(self, cfg, input_shape):
|
512 |
-
# fmt: off
|
513 |
-
self.mask_on = cfg.MODEL.MASK_ON
|
514 |
-
if not self.mask_on:
|
515 |
-
return
|
516 |
-
pooler_resolution = cfg.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION
|
517 |
-
pooler_scales = tuple(1.0 / input_shape[k].stride for k in self.in_features)
|
518 |
-
sampling_ratio = cfg.MODEL.ROI_MASK_HEAD.POOLER_SAMPLING_RATIO
|
519 |
-
pooler_type = cfg.MODEL.ROI_MASK_HEAD.POOLER_TYPE
|
520 |
-
# fmt: on
|
521 |
-
|
522 |
-
in_channels = [input_shape[f].channels for f in self.in_features][0]
|
523 |
-
|
524 |
-
self.mask_pooler = ROIPooler(
|
525 |
-
output_size=pooler_resolution,
|
526 |
-
scales=pooler_scales,
|
527 |
-
sampling_ratio=sampling_ratio,
|
528 |
-
pooler_type=pooler_type,
|
529 |
-
)
|
530 |
-
self.mask_head = build_mask_head(
|
531 |
-
cfg,
|
532 |
-
ShapeSpec(
|
533 |
-
channels=in_channels, width=pooler_resolution, height=pooler_resolution
|
534 |
-
),
|
535 |
-
)
|
536 |
-
|
537 |
-
def _init_keypoint_head(self, cfg, input_shape):
|
538 |
-
# fmt: off
|
539 |
-
self.keypoint_on = cfg.MODEL.KEYPOINT_ON
|
540 |
-
if not self.keypoint_on:
|
541 |
-
return
|
542 |
-
pooler_resolution = cfg.MODEL.ROI_KEYPOINT_HEAD.POOLER_RESOLUTION
|
543 |
-
pooler_scales = tuple(1.0 / input_shape[k].stride for k in self.in_features) # noqa
|
544 |
-
sampling_ratio = cfg.MODEL.ROI_KEYPOINT_HEAD.POOLER_SAMPLING_RATIO
|
545 |
-
pooler_type = cfg.MODEL.ROI_KEYPOINT_HEAD.POOLER_TYPE
|
546 |
-
# fmt: on
|
547 |
-
|
548 |
-
in_channels = [input_shape[f].channels for f in self.in_features][0]
|
549 |
-
|
550 |
-
self.keypoint_pooler = ROIPooler(
|
551 |
-
output_size=pooler_resolution,
|
552 |
-
scales=pooler_scales,
|
553 |
-
sampling_ratio=sampling_ratio,
|
554 |
-
pooler_type=pooler_type,
|
555 |
-
)
|
556 |
-
self.keypoint_head = build_keypoint_head(
|
557 |
-
cfg,
|
558 |
-
ShapeSpec(
|
559 |
-
channels=in_channels, width=pooler_resolution, height=pooler_resolution
|
560 |
-
),
|
561 |
-
)
|
562 |
-
|
563 |
-
def forward(
|
564 |
-
self,
|
565 |
-
images: ImageList,
|
566 |
-
features: Dict[str, torch.Tensor],
|
567 |
-
proposals: List[Instances],
|
568 |
-
targets: Optional[List[Instances]] = None,
|
569 |
-
) -> Tuple[List[Instances], Dict[str, torch.Tensor]]:
|
570 |
-
"""
|
571 |
-
See :class:`ROIHeads.forward`.
|
572 |
-
"""
|
573 |
-
del images
|
574 |
-
if self.training:
|
575 |
-
assert targets
|
576 |
-
proposals = self.label_and_sample_proposals(proposals, targets)
|
577 |
-
del targets
|
578 |
-
|
579 |
-
if self.training:
|
580 |
-
losses = self._forward_box(features, proposals)
|
581 |
-
# Usually the original proposals used by the box head are used by the mask, keypoint
|
582 |
-
# heads. But when `self.train_on_pred_boxes is True`, proposals will contain boxes
|
583 |
-
# predicted by the box head.
|
584 |
-
losses.update(self._forward_mask(features, proposals))
|
585 |
-
losses.update(self._forward_keypoint(features, proposals))
|
586 |
-
return proposals, losses
|
587 |
-
else:
|
588 |
-
pred_instances, box_features = self._forward_box(features, proposals)
|
589 |
-
# During inference cascaded prediction is used: the mask and keypoints heads are only
|
590 |
-
# applied to the top scoring box detections.
|
591 |
-
pred_instances = self.forward_with_given_boxes(features, pred_instances)
|
592 |
-
return pred_instances, box_features
|
593 |
-
|
594 |
-
def forward_with_given_boxes(
|
595 |
-
self, features: Dict[str, torch.Tensor], instances: List[Instances]
|
596 |
-
) -> List[Instances]:
|
597 |
-
"""
|
598 |
-
Use the given boxes in `instances` to produce other (non-box) per-ROI outputs.
|
599 |
-
|
600 |
-
This is useful for downstream tasks where a box is known, but need to obtain
|
601 |
-
other attributes (outputs of other heads).
|
602 |
-
Test-time augmentation also uses this.
|
603 |
-
|
604 |
-
Args:
|
605 |
-
features: same as in `forward()`
|
606 |
-
instances (list[Instances]): instances to predict other outputs. Expect the keys
|
607 |
-
"pred_boxes" and "pred_classes" to exist.
|
608 |
-
|
609 |
-
Returns:
|
610 |
-
instances (list[Instances]):
|
611 |
-
the same `Instances` objects, with extra
|
612 |
-
fields such as `pred_masks` or `pred_keypoints`.
|
613 |
-
"""
|
614 |
-
assert not self.training
|
615 |
-
assert instances[0].has("pred_boxes") and instances[0].has("pred_classes")
|
616 |
-
|
617 |
-
instances = self._forward_mask(features, instances)
|
618 |
-
instances = self._forward_keypoint(features, instances)
|
619 |
-
return instances
|
620 |
-
|
621 |
-
def _forward_box(
|
622 |
-
self, features: Dict[str, torch.Tensor], proposals: List[Instances]
|
623 |
-
) -> Union[Dict[str, torch.Tensor], List[Instances]]:
|
624 |
-
"""
|
625 |
-
Forward logic of the box prediction branch. If `self.train_on_pred_boxes is True`,
|
626 |
-
the function puts predicted boxes in the `proposal_boxes` field of `proposals` argument.
|
627 |
-
|
628 |
-
Args:
|
629 |
-
features (dict[str, Tensor]): mapping from feature map names to tensor.
|
630 |
-
Same as in :meth:`ROIHeads.forward`.
|
631 |
-
proposals (list[Instances]): the per-image object proposals with
|
632 |
-
their matching ground truth.
|
633 |
-
Each has fields "proposal_boxes", and "objectness_logits",
|
634 |
-
"gt_classes", "gt_boxes".
|
635 |
-
|
636 |
-
Returns:
|
637 |
-
In training, a dict of losses.
|
638 |
-
In inference, a list of `Instances`, the predicted instances.
|
639 |
-
"""
|
640 |
-
features = [features[f] for f in self.in_features]
|
641 |
-
box_features = self.box_pooler(features, [x.proposal_boxes for x in proposals])
|
642 |
-
box_features = self.box_head(box_features)
|
643 |
-
predictions = self.box_predictor(box_features)
|
644 |
-
# del box_features
|
645 |
-
|
646 |
-
if self.training:
|
647 |
-
if self.train_on_pred_boxes:
|
648 |
-
with torch.no_grad():
|
649 |
-
pred_boxes = self.box_predictor.predict_boxes_for_gt_classes(
|
650 |
-
predictions, proposals
|
651 |
-
)
|
652 |
-
for proposals_per_image, pred_boxes_per_image in zip(
|
653 |
-
proposals, pred_boxes
|
654 |
-
):
|
655 |
-
proposals_per_image.proposal_boxes = Boxes(pred_boxes_per_image)
|
656 |
-
return self.box_predictor.losses(predictions, proposals)
|
657 |
-
else:
|
658 |
-
pred_instances, keep = self.box_predictor.inference(predictions, proposals)
|
659 |
-
box_features = box_features[keep]
|
660 |
-
return pred_instances, box_features
|
661 |
-
|
662 |
-
def _forward_mask(
|
663 |
-
self, features: Dict[str, torch.Tensor], instances: List[Instances]
|
664 |
-
) -> Union[Dict[str, torch.Tensor], List[Instances]]:
|
665 |
-
"""
|
666 |
-
Forward logic of the mask prediction branch.
|
667 |
-
|
668 |
-
Args:
|
669 |
-
features (dict[str, Tensor]): mapping from feature map names to tensor.
|
670 |
-
Same as in :meth:`ROIHeads.forward`.
|
671 |
-
instances (list[Instances]): the per-image instances to train/predict masks.
|
672 |
-
In training, they can be the proposals.
|
673 |
-
In inference, they can be the predicted boxes.
|
674 |
-
|
675 |
-
Returns:
|
676 |
-
In training, a dict of losses.
|
677 |
-
In inference, update `instances` with new fields "pred_masks" and return it.
|
678 |
-
"""
|
679 |
-
if not self.mask_on:
|
680 |
-
return {} if self.training else instances
|
681 |
-
|
682 |
-
features = [features[f] for f in self.in_features]
|
683 |
-
|
684 |
-
if self.training:
|
685 |
-
# The loss is only defined on positive proposals.
|
686 |
-
proposals, _ = select_foreground_proposals(instances, self.num_classes)
|
687 |
-
proposal_boxes = [x.proposal_boxes for x in proposals]
|
688 |
-
mask_features = self.mask_pooler(features, proposal_boxes)
|
689 |
-
return self.mask_head(mask_features, proposals)
|
690 |
-
else:
|
691 |
-
pred_boxes = [x.pred_boxes for x in instances]
|
692 |
-
mask_features = self.mask_pooler(features, pred_boxes)
|
693 |
-
return self.mask_head(mask_features, instances)
|
694 |
-
|
695 |
-
def _forward_keypoint(
|
696 |
-
self, features: Dict[str, torch.Tensor], instances: List[Instances]
|
697 |
-
) -> Union[Dict[str, torch.Tensor], List[Instances]]:
|
698 |
-
"""
|
699 |
-
Forward logic of the keypoint prediction branch.
|
700 |
-
|
701 |
-
Args:
|
702 |
-
features (dict[str, Tensor]): mapping from feature map names to tensor.
|
703 |
-
Same as in :meth:`ROIHeads.forward`.
|
704 |
-
instances (list[Instances]): the per-image instances to train/predict keypoints.
|
705 |
-
In training, they can be the proposals.
|
706 |
-
In inference, they can be the predicted boxes.
|
707 |
-
|
708 |
-
Returns:
|
709 |
-
In training, a dict of losses.
|
710 |
-
In inference, update `instances` with new fields "pred_keypoints" and return it.
|
711 |
-
"""
|
712 |
-
if not self.keypoint_on:
|
713 |
-
return {} if self.training else instances
|
714 |
-
|
715 |
-
features = [features[f] for f in self.in_features]
|
716 |
-
|
717 |
-
if self.training:
|
718 |
-
# The loss is defined on positive proposals with at >=1 visible keypoints.
|
719 |
-
proposals, _ = select_foreground_proposals(instances, self.num_classes)
|
720 |
-
proposals = select_proposals_with_visible_keypoints(proposals)
|
721 |
-
proposal_boxes = [x.proposal_boxes for x in proposals]
|
722 |
-
|
723 |
-
keypoint_features = self.keypoint_pooler(features, proposal_boxes)
|
724 |
-
return self.keypoint_head(keypoint_features, proposals)
|
725 |
-
else:
|
726 |
-
pred_boxes = [x.pred_boxes for x in instances]
|
727 |
-
keypoint_features = self.keypoint_pooler(features, pred_boxes)
|
728 |
-
return self.keypoint_head(keypoint_features, instances)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/detail/config/exec_check_disable.h
DELETED
@@ -1,43 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
/*! \file exec_check_disable.h
|
18 |
-
* \brief Defines __thrust_exec_check_disable__
|
19 |
-
*/
|
20 |
-
|
21 |
-
#pragma once
|
22 |
-
|
23 |
-
#include <thrust/detail/config.h>
|
24 |
-
|
25 |
-
// #pragma nv_exec_check_disable is only recognized by NVCC. Having a macro
|
26 |
-
// expand to a #pragma (rather than _Pragma) only works with NVCC's compilation
|
27 |
-
// model, not with other compilers.
|
28 |
-
#if defined(__CUDACC__) && !defined(__NVCOMPILER_CUDA__) && \
|
29 |
-
!(defined(__CUDA__) && defined(__clang__))
|
30 |
-
|
31 |
-
#if THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_MSVC
|
32 |
-
#define __thrust_exec_check_disable__ __pragma("nv_exec_check_disable")
|
33 |
-
#else // MSVC
|
34 |
-
#define __thrust_exec_check_disable__ _Pragma("nv_exec_check_disable")
|
35 |
-
#endif // MSVC
|
36 |
-
|
37 |
-
#else
|
38 |
-
|
39 |
-
#define __thrust_exec_check_disable__
|
40 |
-
|
41 |
-
#endif
|
42 |
-
|
43 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/guarded_cuda_runtime_api.h
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
|
21 |
-
// the purpose of this header is to check for the existence of macros
|
22 |
-
// such as __host__ and __device__, which may already be defined by thrust
|
23 |
-
// and to undefine them before entering cuda_runtime_api.h (which will redefine them)
|
24 |
-
|
25 |
-
// we only try to do this stuff if cuda/include/host_defines.h has been included
|
26 |
-
#if !defined(__HOST_DEFINES_H__)
|
27 |
-
|
28 |
-
#ifdef __host__
|
29 |
-
#undef __host__
|
30 |
-
#endif // __host__
|
31 |
-
|
32 |
-
#ifdef __device__
|
33 |
-
#undef __device__
|
34 |
-
#endif // __device__
|
35 |
-
|
36 |
-
#endif // __HOST_DEFINES_H__
|
37 |
-
|
38 |
-
#include <cuda_runtime_api.h>
|
39 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/binary_search.h
DELETED
@@ -1,174 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
|
18 |
-
/*! \file binary_search.h
|
19 |
-
* \brief Generic implementations of binary search functions.
|
20 |
-
*/
|
21 |
-
|
22 |
-
#pragma once
|
23 |
-
|
24 |
-
#include <thrust/detail/config.h>
|
25 |
-
#include <thrust/system/detail/generic/tag.h>
|
26 |
-
|
27 |
-
namespace thrust
|
28 |
-
{
|
29 |
-
namespace system
|
30 |
-
{
|
31 |
-
namespace detail
|
32 |
-
{
|
33 |
-
namespace generic
|
34 |
-
{
|
35 |
-
|
36 |
-
|
37 |
-
template <typename DerivedPolicy, typename ForwardIterator, typename T>
|
38 |
-
__host__ __device__
|
39 |
-
ForwardIterator lower_bound(thrust::execution_policy<DerivedPolicy> &exec,
|
40 |
-
ForwardIterator begin,
|
41 |
-
ForwardIterator end,
|
42 |
-
const T& value);
|
43 |
-
|
44 |
-
template <typename DerivedPolicy, typename ForwardIterator, typename T, typename StrictWeakOrdering>
|
45 |
-
__host__ __device__
|
46 |
-
ForwardIterator lower_bound(thrust::execution_policy<DerivedPolicy> &exec,
|
47 |
-
ForwardIterator begin,
|
48 |
-
ForwardIterator end,
|
49 |
-
const T& value,
|
50 |
-
StrictWeakOrdering comp);
|
51 |
-
|
52 |
-
|
53 |
-
template <typename DerivedPolicy, typename ForwardIterator, typename T>
|
54 |
-
__host__ __device__
|
55 |
-
ForwardIterator upper_bound(thrust::execution_policy<DerivedPolicy> &exec,
|
56 |
-
ForwardIterator begin,
|
57 |
-
ForwardIterator end,
|
58 |
-
const T& value);
|
59 |
-
|
60 |
-
template <typename DerivedPolicy, typename ForwardIterator, typename T, typename StrictWeakOrdering>
|
61 |
-
__host__ __device__
|
62 |
-
ForwardIterator upper_bound(thrust::execution_policy<DerivedPolicy> &exec,
|
63 |
-
ForwardIterator begin,
|
64 |
-
ForwardIterator end,
|
65 |
-
const T& value,
|
66 |
-
StrictWeakOrdering comp);
|
67 |
-
|
68 |
-
|
69 |
-
template <typename DerivedPolicy, typename ForwardIterator, typename T>
|
70 |
-
__host__ __device__
|
71 |
-
bool binary_search(thrust::execution_policy<DerivedPolicy> &exec,
|
72 |
-
ForwardIterator begin,
|
73 |
-
ForwardIterator end,
|
74 |
-
const T& value);
|
75 |
-
|
76 |
-
template <typename DerivedPolicy, typename ForwardIterator, typename T, typename StrictWeakOrdering>
|
77 |
-
__host__ __device__
|
78 |
-
bool binary_search(thrust::execution_policy<DerivedPolicy> &exec,
|
79 |
-
ForwardIterator begin,
|
80 |
-
ForwardIterator end,
|
81 |
-
const T& value,
|
82 |
-
StrictWeakOrdering comp);
|
83 |
-
|
84 |
-
|
85 |
-
template <typename DerivedPolicy, typename ForwardIterator, typename InputIterator, typename OutputIterator>
|
86 |
-
__host__ __device__
|
87 |
-
OutputIterator lower_bound(thrust::execution_policy<DerivedPolicy> &exec,
|
88 |
-
ForwardIterator begin,
|
89 |
-
ForwardIterator end,
|
90 |
-
InputIterator values_begin,
|
91 |
-
InputIterator values_end,
|
92 |
-
OutputIterator output);
|
93 |
-
|
94 |
-
|
95 |
-
template <typename DerivedPolicy, typename ForwardIterator, typename InputIterator, typename OutputIterator, typename StrictWeakOrdering>
|
96 |
-
__host__ __device__
|
97 |
-
OutputIterator lower_bound(thrust::execution_policy<DerivedPolicy> &exec,
|
98 |
-
ForwardIterator begin,
|
99 |
-
ForwardIterator end,
|
100 |
-
InputIterator values_begin,
|
101 |
-
InputIterator values_end,
|
102 |
-
OutputIterator output,
|
103 |
-
StrictWeakOrdering comp);
|
104 |
-
|
105 |
-
|
106 |
-
template <typename DerivedPolicy, typename ForwardIterator, typename InputIterator, typename OutputIterator>
|
107 |
-
__host__ __device__
|
108 |
-
OutputIterator upper_bound(thrust::execution_policy<DerivedPolicy> &exec,
|
109 |
-
ForwardIterator begin,
|
110 |
-
ForwardIterator end,
|
111 |
-
InputIterator values_begin,
|
112 |
-
InputIterator values_end,
|
113 |
-
OutputIterator output);
|
114 |
-
|
115 |
-
|
116 |
-
template <typename DerivedPolicy, typename ForwardIterator, typename InputIterator, typename OutputIterator, typename StrictWeakOrdering>
|
117 |
-
__host__ __device__
|
118 |
-
OutputIterator upper_bound(thrust::execution_policy<DerivedPolicy> &exec,
|
119 |
-
ForwardIterator begin,
|
120 |
-
ForwardIterator end,
|
121 |
-
InputIterator values_begin,
|
122 |
-
InputIterator values_end,
|
123 |
-
OutputIterator output,
|
124 |
-
StrictWeakOrdering comp);
|
125 |
-
|
126 |
-
|
127 |
-
template <typename DerivedPolicy, typename ForwardIterator, typename InputIterator, typename OutputIterator>
|
128 |
-
__host__ __device__
|
129 |
-
OutputIterator binary_search(thrust::execution_policy<DerivedPolicy> &exec,
|
130 |
-
ForwardIterator begin,
|
131 |
-
ForwardIterator end,
|
132 |
-
InputIterator values_begin,
|
133 |
-
InputIterator values_end,
|
134 |
-
OutputIterator output);
|
135 |
-
|
136 |
-
|
137 |
-
template <typename DerivedPolicy, typename ForwardIterator, typename InputIterator, typename OutputIterator, typename StrictWeakOrdering>
|
138 |
-
__host__ __device__
|
139 |
-
OutputIterator binary_search(thrust::execution_policy<DerivedPolicy> &exec,
|
140 |
-
ForwardIterator begin,
|
141 |
-
ForwardIterator end,
|
142 |
-
InputIterator values_begin,
|
143 |
-
InputIterator values_end,
|
144 |
-
OutputIterator output,
|
145 |
-
StrictWeakOrdering comp);
|
146 |
-
|
147 |
-
|
148 |
-
template <typename DerivedPolicy, typename ForwardIterator, typename LessThanComparable>
|
149 |
-
__host__ __device__
|
150 |
-
thrust::pair<ForwardIterator,ForwardIterator>
|
151 |
-
equal_range(thrust::execution_policy<DerivedPolicy> &exec,
|
152 |
-
ForwardIterator first,
|
153 |
-
ForwardIterator last,
|
154 |
-
const LessThanComparable &value);
|
155 |
-
|
156 |
-
|
157 |
-
template <typename DerivedPolicy, typename ForwardIterator, typename LessThanComparable, typename StrictWeakOrdering>
|
158 |
-
__host__ __device__
|
159 |
-
thrust::pair<ForwardIterator,ForwardIterator>
|
160 |
-
equal_range(thrust::execution_policy<DerivedPolicy> &exec,
|
161 |
-
ForwardIterator first,
|
162 |
-
ForwardIterator last,
|
163 |
-
const LessThanComparable &value,
|
164 |
-
StrictWeakOrdering comp);
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
} // end namespace generic
|
169 |
-
} // end namespace detail
|
170 |
-
} // end namespace system
|
171 |
-
} // end namespace thrust
|
172 |
-
|
173 |
-
#include <thrust/system/detail/generic/binary_search.inl>
|
174 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/v-doc_abstractive_mac/preprocess.py
DELETED
@@ -1,551 +0,0 @@
|
|
1 |
-
import time
|
2 |
-
import os
|
3 |
-
import random
|
4 |
-
import json
|
5 |
-
import pickle
|
6 |
-
import numpy as np
|
7 |
-
from tqdm import tqdm
|
8 |
-
from termcolor import colored
|
9 |
-
from program_translator import ProgramTranslator #
|
10 |
-
from config import config
|
11 |
-
|
12 |
-
|
13 |
-
# Print bold tex
|
14 |
-
def bold(txt):
|
15 |
-
return colored(str(txt), attrs=["bold"])
|
16 |
-
|
17 |
-
|
18 |
-
# Print bold and colored text
|
19 |
-
def bcolored(txt, color):
|
20 |
-
return colored(str(txt), color, attrs=["bold"])
|
21 |
-
|
22 |
-
|
23 |
-
# Write a line to file
|
24 |
-
def writeline(f, line):
|
25 |
-
f.write(str(line) + "\n")
|
26 |
-
|
27 |
-
|
28 |
-
# Write a list to file
|
29 |
-
def writelist(f, l):
|
30 |
-
writeline(f, ",".join(map(str, l)))
|
31 |
-
|
32 |
-
|
33 |
-
# 2d list to numpy
|
34 |
-
def vectorize2DList(items, minX=0, minY=0, dtype=np.int):
|
35 |
-
maxX = max(len(items), minX)
|
36 |
-
maxY = max([len(item) for item in items] + [minY])
|
37 |
-
t = np.zeros((maxX, maxY), dtype=dtype)
|
38 |
-
tLengths = np.zeros((maxX,), dtype=np.int)
|
39 |
-
for i, item in enumerate(items):
|
40 |
-
t[i, 0:len(item)] = np.array(item, dtype=dtype)
|
41 |
-
tLengths[i] = len(item)
|
42 |
-
return t, tLengths
|
43 |
-
|
44 |
-
|
45 |
-
# 3d list to numpy
|
46 |
-
def vectorize3DList(items, minX=0, minY=0, minZ=0, dtype=np.int):
|
47 |
-
maxX = max(len(items), minX)
|
48 |
-
maxY = max([len(item) for item in items] + [minY])
|
49 |
-
maxZ = max([len(subitem) for item in items for subitem in item] + [minZ])
|
50 |
-
t = np.zeros((maxX, maxY, maxZ), dtype=dtype)
|
51 |
-
tLengths = np.zeros((maxX, maxY), dtype=np.int)
|
52 |
-
for i, item in enumerate(items):
|
53 |
-
for j, subitem in enumerate(item):
|
54 |
-
t[i, j, 0:len(subitem)] = np.array(subitem, dtype=dtype)
|
55 |
-
tLengths[i, j] = len(subitem)
|
56 |
-
return t, tLengths
|
57 |
-
|
58 |
-
|
59 |
-
'''
|
60 |
-
Encodes text into integers. Keeps dictionary between string words (symbols)
|
61 |
-
and their matching integers. Supports encoding and decoding.
|
62 |
-
'''
|
63 |
-
|
64 |
-
|
65 |
-
class SymbolDict(object):
|
66 |
-
def __init__(self, empty=False):
|
67 |
-
self.padding = "<PAD>"
|
68 |
-
self.unknown = "<UNK>"
|
69 |
-
self.start = "<START>"
|
70 |
-
self.end = "<END>"
|
71 |
-
|
72 |
-
self.invalidSymbols = [self.padding, self.unknown, self.start, self.end]
|
73 |
-
|
74 |
-
if empty:
|
75 |
-
self.sym2id = {}
|
76 |
-
self.id2sym = []
|
77 |
-
else:
|
78 |
-
self.sym2id = {self.padding: 0, self.unknown: 1, self.start: 2, self.end: 3}
|
79 |
-
self.id2sym = [self.padding, self.unknown, self.start, self.end]
|
80 |
-
self.allSeqs = []
|
81 |
-
|
82 |
-
def getNumSymbols(self):
|
83 |
-
return len(self.sym2id)
|
84 |
-
|
85 |
-
def isPadding(self, enc):
|
86 |
-
return enc == 0
|
87 |
-
|
88 |
-
def isUnknown(self, enc):
|
89 |
-
return enc == 1
|
90 |
-
|
91 |
-
def isStart(self, enc):
|
92 |
-
return enc == 2
|
93 |
-
|
94 |
-
def isEnd(self, enc):
|
95 |
-
return enc == 3
|
96 |
-
|
97 |
-
def isValid(self, enc):
|
98 |
-
return enc < self.getNumSymbols() and enc >= len(self.invalidSymbols)
|
99 |
-
|
100 |
-
def resetSeqs(self):
|
101 |
-
self.allSeqs = []
|
102 |
-
|
103 |
-
def addSeq(self, seq):
|
104 |
-
self.allSeqs += seq
|
105 |
-
|
106 |
-
# Call to create the words-to-integers vocabulary after (reading word sequences with addSeq).
|
107 |
-
def createVocab(self, minCount=0):
|
108 |
-
counter = {}
|
109 |
-
for symbol in self.allSeqs:
|
110 |
-
counter[symbol] = counter.get(symbol, 0) + 1
|
111 |
-
for symbol in counter:
|
112 |
-
if counter[symbol] > minCount and (symbol not in self.sym2id):
|
113 |
-
self.sym2id[symbol] = self.getNumSymbols()
|
114 |
-
self.id2sym.append(symbol)
|
115 |
-
|
116 |
-
# Encodes a symbol. Returns the matching integer.
|
117 |
-
def encodeSym(self, symbol):
|
118 |
-
if symbol not in self.sym2id:
|
119 |
-
symbol = self.unknown
|
120 |
-
return self.sym2id[symbol]
|
121 |
-
|
122 |
-
'''
|
123 |
-
Encodes a sequence of symbols.
|
124 |
-
Optionally add start, or end symbols.
|
125 |
-
Optionally reverse sequence
|
126 |
-
'''
|
127 |
-
|
128 |
-
def encodeSequence(self, decoded, addStart=False, addEnd=False, reverse=False):
|
129 |
-
if reverse:
|
130 |
-
decoded.reverse()
|
131 |
-
if addStart:
|
132 |
-
decoded = [self.start] + decoded
|
133 |
-
if addEnd:
|
134 |
-
decoded = decoded + [self.end]
|
135 |
-
encoded = [self.encodeSym(symbol) for symbol in decoded]
|
136 |
-
return encoded
|
137 |
-
|
138 |
-
# Decodes an integer into its symbol
|
139 |
-
def decodeId(self, enc):
|
140 |
-
return self.id2sym[enc] if enc < self.getNumSymbols() else self.unknown
|
141 |
-
|
142 |
-
'''
|
143 |
-
Decodes a sequence of integers into their symbols.
|
144 |
-
If delim is given, joins the symbols using delim,
|
145 |
-
Optionally reverse the resulted sequence
|
146 |
-
'''
|
147 |
-
|
148 |
-
def decodeSequence(self, encoded, delim=None, reverse=False, stopAtInvalid=True):
|
149 |
-
length = 0
|
150 |
-
for i in range(len(encoded)):
|
151 |
-
if not self.isValid(encoded[i]) and stopAtInvalid:
|
152 |
-
break
|
153 |
-
length += 1
|
154 |
-
encoded = encoded[:length]
|
155 |
-
|
156 |
-
decoded = [self.decodeId(enc) for enc in encoded]
|
157 |
-
if reverse:
|
158 |
-
decoded.reverse()
|
159 |
-
|
160 |
-
if delim is not None:
|
161 |
-
return delim.join(decoded)
|
162 |
-
|
163 |
-
return decoded
|
164 |
-
|
165 |
-
|
166 |
-
'''
|
167 |
-
Preprocesses a given dataset into numpy arrays.
|
168 |
-
By calling preprocess, the class:
|
169 |
-
1. Reads the input data files into dictionary.
|
170 |
-
2. Saves the results jsons in files and loads them instead of parsing input if files exist/
|
171 |
-
3. Initializes word embeddings to random / GloVe.
|
172 |
-
4. Optionally filters data according to given filters.
|
173 |
-
5. Encodes and vectorize the data into numpy arrays.
|
174 |
-
6. Buckets the data according to the instances length.
|
175 |
-
'''
|
176 |
-
|
177 |
-
|
178 |
-
class Preprocesser(object):
|
179 |
-
def __init__(self):
|
180 |
-
self.questionDict = SymbolDict()
|
181 |
-
self.answerDict = SymbolDict(empty=True)
|
182 |
-
self.qaDict = SymbolDict()
|
183 |
-
|
184 |
-
self.specificDatasetDicts = None
|
185 |
-
|
186 |
-
self.programDict = SymbolDict()
|
187 |
-
self.programTranslator = ProgramTranslator(self.programDict, 2)
|
188 |
-
|
189 |
-
'''
|
190 |
-
Tokenizes string into list of symbols.
|
191 |
-
|
192 |
-
Args:
|
193 |
-
text: raw string to tokenize.
|
194 |
-
ignorePuncts: punctuation to ignore
|
195 |
-
keptPunct: punctuation to keep (as symbol)
|
196 |
-
endPunct: punctuation to remove if appears at the end
|
197 |
-
delim: delimiter between symbols
|
198 |
-
clean: True to replace text in string
|
199 |
-
replacelistPre: dictionary of replacement to perform on the text before tokanization
|
200 |
-
replacelistPost: dictionary of replacement to perform on the text after tokanization
|
201 |
-
'''
|
202 |
-
# sentence tokenizer
|
203 |
-
allPunct = ["?", "!", "\\", "/", ")", "(", ".", ",", ";", ":"]
|
204 |
-
|
205 |
-
def tokenize(self, text, ignoredPuncts=["?", "!", "\\", "/", ")", "("],
|
206 |
-
keptPuncts=[".", ",", ";", ":"], endPunct=[">", "<", ":"], delim=" ",
|
207 |
-
clean=False, replacelistPre=dict(), replacelistPost=dict()):
|
208 |
-
|
209 |
-
if clean:
|
210 |
-
for word in replacelistPre:
|
211 |
-
origText = text
|
212 |
-
text = text.replace(word, replacelistPre[word])
|
213 |
-
if (origText != text):
|
214 |
-
print(origText)
|
215 |
-
print(text)
|
216 |
-
print("")
|
217 |
-
|
218 |
-
for punct in endPunct:
|
219 |
-
if text[-1] == punct:
|
220 |
-
print(text)
|
221 |
-
text = text[:-1]
|
222 |
-
print(text)
|
223 |
-
print("")
|
224 |
-
|
225 |
-
for punct in keptPuncts:
|
226 |
-
text = text.replace(punct, delim + punct + delim)
|
227 |
-
|
228 |
-
for punct in ignoredPuncts:
|
229 |
-
text = text.replace(punct, "")
|
230 |
-
|
231 |
-
ret = text.lower().split(delim)
|
232 |
-
|
233 |
-
if clean:
|
234 |
-
origRet = ret
|
235 |
-
ret = [replacelistPost.get(word, word) for word in ret]
|
236 |
-
if origRet != ret:
|
237 |
-
print(origRet)
|
238 |
-
print(ret)
|
239 |
-
|
240 |
-
ret = [t for t in ret if t != ""]
|
241 |
-
return ret
|
242 |
-
|
243 |
-
# Read class' generated files.
|
244 |
-
# files interface
|
245 |
-
def readFiles(self, instancesFilename):
|
246 |
-
with open(instancesFilename, "r") as inFile:
|
247 |
-
instances = json.load(inFile)
|
248 |
-
|
249 |
-
with open(config.questionDictFile(), "rb") as inFile:
|
250 |
-
self.questionDict = pickle.load(inFile)
|
251 |
-
|
252 |
-
with open(config.answerDictFile(), "rb") as inFile:
|
253 |
-
self.answerDict = pickle.load(inFile)
|
254 |
-
|
255 |
-
with open(config.qaDictFile(), "rb") as inFile:
|
256 |
-
self.qaDict = pickle.load(inFile)
|
257 |
-
|
258 |
-
return instances
|
259 |
-
|
260 |
-
'''
|
261 |
-
Generate class' files. Save json representation of instances and
|
262 |
-
symbols-to-integers dictionaries.
|
263 |
-
'''
|
264 |
-
|
265 |
-
def writeFiles(self, instances, instancesFilename):
|
266 |
-
with open(instancesFilename, "w") as outFile:
|
267 |
-
json.dump(instances, outFile)
|
268 |
-
|
269 |
-
with open(config.questionDictFile(), "wb") as outFile:
|
270 |
-
pickle.dump(self.questionDict, outFile)
|
271 |
-
|
272 |
-
with open(config.answerDictFile(), "wb") as outFile:
|
273 |
-
pickle.dump(self.answerDict, outFile)
|
274 |
-
|
275 |
-
with open(config.qaDictFile(), "wb") as outFile:
|
276 |
-
pickle.dump(self.qaDict, outFile)
|
277 |
-
|
278 |
-
# Write prediction json to file and optionally a one-answer-per-line output file
|
279 |
-
def writePreds(self, res, tier, suffix=""):
|
280 |
-
if res is None:
|
281 |
-
return
|
282 |
-
preds = res["preds"]
|
283 |
-
sortedPreds = sorted(preds, key=lambda instance: instance["index"])
|
284 |
-
with open(config.predsFile(tier + suffix), "w") as outFile:
|
285 |
-
outFile.write(json.dumps(sortedPreds))
|
286 |
-
with open(config.answersFile(tier + suffix), "w") as outFile:
|
287 |
-
for instance in sortedPreds:
|
288 |
-
writeline(outFile, instance["prediction"])
|
289 |
-
|
290 |
-
def readPDF(self, instancesFilename):
|
291 |
-
instances = []
|
292 |
-
|
293 |
-
if os.path.exists(instancesFilename):
|
294 |
-
instances = self.readFiles(instancesFilename)
|
295 |
-
|
296 |
-
return instances
|
297 |
-
|
298 |
-
def readData(self, datasetFilename, instancesFilename, train):
|
299 |
-
# data extraction
|
300 |
-
datasetReader = {
|
301 |
-
"PDF": self.readPDF
|
302 |
-
}
|
303 |
-
|
304 |
-
return datasetReader[config.dataset](datasetFilename, instancesFilename, train)
|
305 |
-
|
306 |
-
def vectorizeData(self, data):
|
307 |
-
# if "SHARED" tie symbol representations in questions and answers
|
308 |
-
if config.ansEmbMod == "SHARED":
|
309 |
-
qDict = self.qaDict
|
310 |
-
else:
|
311 |
-
qDict = self.questionDict
|
312 |
-
|
313 |
-
encodedQuestion = [qDict.encodeSequence(d["questionSeq"]) for d in data]
|
314 |
-
question, questionL = vectorize2DList(encodedQuestion)
|
315 |
-
|
316 |
-
# pass the whole instances? if heavy then not good
|
317 |
-
imageId = [d["imageId"] for d in data]
|
318 |
-
instance = data
|
319 |
-
|
320 |
-
return {"question": question,
|
321 |
-
"questionLength": questionL,
|
322 |
-
"imageId": imageId
|
323 |
-
}
|
324 |
-
|
325 |
-
# Separates data based on a field length
|
326 |
-
def lseparator(self, key, lims):
|
327 |
-
maxI = len(lims)
|
328 |
-
|
329 |
-
def separatorFn(x):
|
330 |
-
v = x[key]
|
331 |
-
for i, lim in enumerate(lims):
|
332 |
-
if len(v) < lim:
|
333 |
-
return i
|
334 |
-
return maxI
|
335 |
-
|
336 |
-
return {"separate": separatorFn, "groupsNum": maxI + 1}
|
337 |
-
|
338 |
-
# Buckets data to groups using a separator
|
339 |
-
def bucket(self, instances, separator):
|
340 |
-
buckets = [[] for i in range(separator["groupsNum"])]
|
341 |
-
for instance in instances:
|
342 |
-
bucketI = separator["separate"](instance)
|
343 |
-
buckets[bucketI].append(instance)
|
344 |
-
return [bucket for bucket in buckets if len(bucket) > 0]
|
345 |
-
|
346 |
-
# Re-buckets bucket list given a seperator
|
347 |
-
def rebucket(self, buckets, separator):
|
348 |
-
res = []
|
349 |
-
for bucket in buckets:
|
350 |
-
res += self.bucket(bucket, separator)
|
351 |
-
return res
|
352 |
-
|
353 |
-
# Buckets data based on question / program length
|
354 |
-
def bucketData(self, data, noBucket=False):
|
355 |
-
if noBucket:
|
356 |
-
buckets = [data]
|
357 |
-
else:
|
358 |
-
if config.noBucket:
|
359 |
-
buckets = [data]
|
360 |
-
elif config.noRebucket:
|
361 |
-
questionSep = self.lseparator("questionSeq", config.questionLims)
|
362 |
-
buckets = self.bucket(data, questionSep)
|
363 |
-
else:
|
364 |
-
programSep = self.lseparator("programSeq", config.programLims)
|
365 |
-
questionSep = self.lseparator("questionSeq", config.questionLims)
|
366 |
-
buckets = self.bucket(data, programSep)
|
367 |
-
buckets = self.rebucket(buckets, questionSep)
|
368 |
-
return buckets
|
369 |
-
|
370 |
-
'''
|
371 |
-
Prepares data:
|
372 |
-
1. Filters data according to above arguments.
|
373 |
-
2. Takes only a subset of the data based on config.trainedNum / config.testedNum
|
374 |
-
3. Buckets data according to question / program length
|
375 |
-
4. Vectorizes data into numpy arrays
|
376 |
-
'''
|
377 |
-
|
378 |
-
def prepareData(self, data, train, filterKey=None, noBucket=False):
|
379 |
-
filterDefault = {"maxQLength": 0, "maxPLength": 0, "onlyChain": False, "filterOp": 0}
|
380 |
-
|
381 |
-
filterTrain = {"maxQLength": config.tMaxQ, "maxPLength": config.tMaxP,
|
382 |
-
"onlyChain": config.tOnlyChain, "filterOp": config.tFilterOp}
|
383 |
-
|
384 |
-
filterVal = {"maxQLength": config.vMaxQ, "maxPLength": config.vMaxP,
|
385 |
-
"onlyChain": config.vOnlyChain, "filterOp": config.vFilterOp}
|
386 |
-
|
387 |
-
filters = {"train": filterTrain, "evalTrain": filterTrain,
|
388 |
-
"val": filterVal, "test": filterDefault}
|
389 |
-
|
390 |
-
if filterKey is None:
|
391 |
-
fltr = filterDefault
|
392 |
-
else:
|
393 |
-
fltr = filters[filterKey]
|
394 |
-
|
395 |
-
# split data when finetuning on validation set
|
396 |
-
if config.trainExtra and config.extraVal and (config.finetuneNum > 0):
|
397 |
-
if train:
|
398 |
-
data = data[:config.finetuneNum]
|
399 |
-
else:
|
400 |
-
data = data[config.finetuneNum:]
|
401 |
-
|
402 |
-
typeFilter = config.typeFilters[fltr["filterOp"]]
|
403 |
-
# filter specific settings
|
404 |
-
if fltr["onlyChain"]:
|
405 |
-
data = [d for d in data if all((len(inputNum) < 2) for inputNum in d["programInputs"])]
|
406 |
-
if fltr["maxQLength"] > 0:
|
407 |
-
data = [d for d in data if len(d["questionSeq"]) <= fltr["maxQLength"]]
|
408 |
-
if fltr["maxPLength"] > 0:
|
409 |
-
data = [d for d in data if len(d["programSeq"]) <= fltr["maxPLength"]]
|
410 |
-
if len(typeFilter) > 0:
|
411 |
-
data = [d for d in data if d["programSeq"][-1] not in typeFilter]
|
412 |
-
|
413 |
-
# run on subset of the data. If 0 then use all data
|
414 |
-
num = config.trainedNum if train else config.testedNum
|
415 |
-
# retainVal = True to retain same clevr_sample of validation across runs
|
416 |
-
if (not train) and (not config.retainVal):
|
417 |
-
random.shuffle(data)
|
418 |
-
if num > 0:
|
419 |
-
data = data[:num]
|
420 |
-
# set number to match dataset size
|
421 |
-
if train:
|
422 |
-
config.trainedNum = len(data)
|
423 |
-
else:
|
424 |
-
config.testedNum = len(data)
|
425 |
-
|
426 |
-
# bucket
|
427 |
-
buckets = self.bucketData(data, noBucket=noBucket)
|
428 |
-
|
429 |
-
# vectorize
|
430 |
-
return [self.vectorizeData(bucket) for bucket in buckets]
|
431 |
-
|
432 |
-
# Prepares all the tiers of a dataset. See prepareData method for further details.
|
433 |
-
def prepareDataset(self, dataset, noBucket=False):
|
434 |
-
if dataset is None:
|
435 |
-
return None
|
436 |
-
|
437 |
-
for tier in dataset:
|
438 |
-
if dataset[tier] is not None:
|
439 |
-
dataset[tier]["data"] = self.prepareData(dataset[tier]["instances"],
|
440 |
-
train=dataset[tier]["train"], filterKey=tier,
|
441 |
-
noBucket=noBucket)
|
442 |
-
|
443 |
-
for tier in dataset:
|
444 |
-
if dataset[tier] is not None:
|
445 |
-
del dataset[tier]["instances"]
|
446 |
-
|
447 |
-
return dataset
|
448 |
-
|
449 |
-
# Initializes word embeddings to random uniform / random normal / GloVe.
|
450 |
-
def initializeWordEmbeddings(self, wordsDict=None, noPadding=False):
|
451 |
-
# default dictionary to use for embeddings
|
452 |
-
if wordsDict is None:
|
453 |
-
wordsDict = self.questionDict
|
454 |
-
|
455 |
-
# uniform initialization
|
456 |
-
if config.wrdEmbUniform:
|
457 |
-
lowInit = -1.0 * config.wrdEmbScale
|
458 |
-
highInit = 1.0 * config.wrdEmbScale
|
459 |
-
embeddings = np.random.uniform(low=lowInit, high=highInit,
|
460 |
-
size=(wordsDict.getNumSymbols(), config.wrdEmbDim))
|
461 |
-
# normal initialization
|
462 |
-
else:
|
463 |
-
embeddings = config.wrdEmbScale * np.random.randn(wordsDict.getNumSymbols(),
|
464 |
-
config.wrdEmbDim)
|
465 |
-
|
466 |
-
# if wrdEmbRandom = False, use GloVE
|
467 |
-
counter = 0
|
468 |
-
if (not config.wrdEmbRandom):
|
469 |
-
with open(config.wordVectorsFile, 'r') as inFile:
|
470 |
-
for line in inFile:
|
471 |
-
line = line.strip().split()
|
472 |
-
word = line[0].lower()
|
473 |
-
vector = [float(x) for x in line[1:]]
|
474 |
-
index = wordsDict.sym2id.get(word)
|
475 |
-
if index is not None:
|
476 |
-
embeddings[index] = vector
|
477 |
-
counter += 1
|
478 |
-
|
479 |
-
print(counter)
|
480 |
-
print(self.questionDict.sym2id)
|
481 |
-
print(len(self.questionDict.sym2id))
|
482 |
-
print(self.answerDict.sym2id)
|
483 |
-
print(len(self.answerDict.sym2id))
|
484 |
-
print(self.qaDict.sym2id)
|
485 |
-
print(len(self.qaDict.sym2id))
|
486 |
-
|
487 |
-
if noPadding:
|
488 |
-
return embeddings # no embedding for padding symbol
|
489 |
-
else:
|
490 |
-
return embeddings[1:]
|
491 |
-
|
492 |
-
'''
|
493 |
-
Initializes words embeddings for question words and optionally for answer words
|
494 |
-
(when config.ansEmbMod == "BOTH"). If config.ansEmbMod == "SHARED", tie embeddings for
|
495 |
-
question and answer same symbols.
|
496 |
-
'''
|
497 |
-
|
498 |
-
def initializeQAEmbeddings(self):
|
499 |
-
# use same embeddings for questions and answers
|
500 |
-
if config.ansEmbMod == "SHARED":
|
501 |
-
qaEmbeddings = self.initializeWordEmbeddings(self.qaDict)
|
502 |
-
ansMap = np.array([self.qaDict.sym2id[sym] for sym in self.answerDict.id2sym])
|
503 |
-
embeddings = {"qa": qaEmbeddings, "ansMap": ansMap}
|
504 |
-
# use different embeddings for questions and answers
|
505 |
-
else:
|
506 |
-
qEmbeddings = self.initializeWordEmbeddings(self.questionDict)
|
507 |
-
aEmbeddings = None
|
508 |
-
if config.ansEmbMod == "BOTH":
|
509 |
-
aEmbeddings = self.initializeWordEmbeddings(self.answerDict, noPadding=True)
|
510 |
-
embeddings = {"q": qEmbeddings, "a": aEmbeddings}
|
511 |
-
return embeddings
|
512 |
-
|
513 |
-
'''
|
514 |
-
Preprocesses a given dataset into numpy arrays:
|
515 |
-
1. Reads the input data files into dictionary.
|
516 |
-
2. Saves the results jsons in files and loads them instead of parsing input if files exist/
|
517 |
-
3. Initializes word embeddings to random / GloVe.
|
518 |
-
4. Optionally filters data according to given filters.
|
519 |
-
5. Encodes and vectorize the data into numpy arrays.
|
520 |
-
5. Buckets the data according to the instances length.
|
521 |
-
'''
|
522 |
-
|
523 |
-
def preprocessData(self, question, debug=False):
|
524 |
-
# Read data into json and symbols' dictionaries
|
525 |
-
print(bold("Loading data..."))
|
526 |
-
start = time.time()
|
527 |
-
with open(config.questionDictFile(), "rb") as inFile:
|
528 |
-
self.questionDict = pickle.load(inFile)
|
529 |
-
with open(config.qaDictFile(), "rb") as inFile:
|
530 |
-
self.qaDict = pickle.load(inFile)
|
531 |
-
with open(config.answerDictFile(), "rb") as inFile:
|
532 |
-
self.answerDict = pickle.load(inFile)
|
533 |
-
question = question.replace('?', '').replace(', ', '').lower().split()
|
534 |
-
encodedQuestion = self.questionDict.encodeSequence(question)
|
535 |
-
data = {'question': np.array([encodedQuestion]), 'questionLength': np.array([len(encodedQuestion)])}
|
536 |
-
print("took {:.2f} seconds".format(time.time() - start))
|
537 |
-
|
538 |
-
# Initialize word embeddings (random / glove)
|
539 |
-
print(bold("Loading word vectors..."))
|
540 |
-
start = time.time()
|
541 |
-
embeddings = self.initializeQAEmbeddings()
|
542 |
-
print("took {:.2f} seconds".format(time.time() - start))
|
543 |
-
|
544 |
-
answer = 'yes' # DUMMY_ANSWER
|
545 |
-
self.answerDict.addSeq([answer])
|
546 |
-
self.qaDict.addSeq([answer])
|
547 |
-
|
548 |
-
config.questionWordsNum = self.questionDict.getNumSymbols()
|
549 |
-
config.answerWordsNum = self.answerDict.getNumSymbols()
|
550 |
-
|
551 |
-
return data, embeddings, self.answerDict
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CactiStaccingCrane/OpenAssistant-oasst-sft-1-pythia-12b/app.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
gr.Interface.load("models/OpenAssistant/oasst-sft-1-pythia-12b").launch()
|
|
|
|
|
|
|
|
spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/util/__init__.py
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
|
|
|
spaces/CarlDennis/HYTTS/commons.py
DELETED
@@ -1,97 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
import torch
|
3 |
-
from torch.nn import functional as F
|
4 |
-
import torch.jit
|
5 |
-
|
6 |
-
|
7 |
-
def script_method(fn, _rcb=None):
|
8 |
-
return fn
|
9 |
-
|
10 |
-
|
11 |
-
def script(obj, optimize=True, _frames_up=0, _rcb=None):
|
12 |
-
return obj
|
13 |
-
|
14 |
-
|
15 |
-
torch.jit.script_method = script_method
|
16 |
-
torch.jit.script = script
|
17 |
-
|
18 |
-
|
19 |
-
def init_weights(m, mean=0.0, std=0.01):
|
20 |
-
classname = m.__class__.__name__
|
21 |
-
if classname.find("Conv") != -1:
|
22 |
-
m.weight.data.normal_(mean, std)
|
23 |
-
|
24 |
-
|
25 |
-
def get_padding(kernel_size, dilation=1):
|
26 |
-
return int((kernel_size*dilation - dilation)/2)
|
27 |
-
|
28 |
-
|
29 |
-
def intersperse(lst, item):
|
30 |
-
result = [item] * (len(lst) * 2 + 1)
|
31 |
-
result[1::2] = lst
|
32 |
-
return result
|
33 |
-
|
34 |
-
|
35 |
-
def slice_segments(x, ids_str, segment_size=4):
|
36 |
-
ret = torch.zeros_like(x[:, :, :segment_size])
|
37 |
-
for i in range(x.size(0)):
|
38 |
-
idx_str = ids_str[i]
|
39 |
-
idx_end = idx_str + segment_size
|
40 |
-
ret[i] = x[i, :, idx_str:idx_end]
|
41 |
-
return ret
|
42 |
-
|
43 |
-
|
44 |
-
def rand_slice_segments(x, x_lengths=None, segment_size=4):
|
45 |
-
b, d, t = x.size()
|
46 |
-
if x_lengths is None:
|
47 |
-
x_lengths = t
|
48 |
-
ids_str_max = x_lengths - segment_size + 1
|
49 |
-
ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
|
50 |
-
ret = slice_segments(x, ids_str, segment_size)
|
51 |
-
return ret, ids_str
|
52 |
-
|
53 |
-
|
54 |
-
def subsequent_mask(length):
|
55 |
-
mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
|
56 |
-
return mask
|
57 |
-
|
58 |
-
|
59 |
-
@torch.jit.script
|
60 |
-
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
|
61 |
-
n_channels_int = n_channels[0]
|
62 |
-
in_act = input_a + input_b
|
63 |
-
t_act = torch.tanh(in_act[:, :n_channels_int, :])
|
64 |
-
s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
|
65 |
-
acts = t_act * s_act
|
66 |
-
return acts
|
67 |
-
|
68 |
-
|
69 |
-
def convert_pad_shape(pad_shape):
|
70 |
-
l = pad_shape[::-1]
|
71 |
-
pad_shape = [item for sublist in l for item in sublist]
|
72 |
-
return pad_shape
|
73 |
-
|
74 |
-
|
75 |
-
def sequence_mask(length, max_length=None):
|
76 |
-
if max_length is None:
|
77 |
-
max_length = length.max()
|
78 |
-
x = torch.arange(max_length, dtype=length.dtype, device=length.device)
|
79 |
-
return x.unsqueeze(0) < length.unsqueeze(1)
|
80 |
-
|
81 |
-
|
82 |
-
def generate_path(duration, mask):
|
83 |
-
"""
|
84 |
-
duration: [b, 1, t_x]
|
85 |
-
mask: [b, 1, t_y, t_x]
|
86 |
-
"""
|
87 |
-
device = duration.device
|
88 |
-
|
89 |
-
b, _, t_y, t_x = mask.shape
|
90 |
-
cum_duration = torch.cumsum(duration, -1)
|
91 |
-
|
92 |
-
cum_duration_flat = cum_duration.view(b * t_x)
|
93 |
-
path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
|
94 |
-
path = path.view(b, t_x, t_y)
|
95 |
-
path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
|
96 |
-
path = path.unsqueeze(1).transpose(2,3) * mask
|
97 |
-
return path
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|