Commit
·
bbf757c
1
Parent(s):
e336a46
Update parquet files (step 17 of 476)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Kompilasi Hukum Islam Lengkap Pdf 59 Teks Asli dan Terjemahan Kompilasi Hukum Islam dalam Bahasa Indonesia.md +0 -100
- spaces/1gistliPinn/ChatGPT4/Examples/Eminem Relapse Refill Free Download 17 FREE.md +0 -7
- spaces/1gistliPinn/ChatGPT4/Examples/Euro Truck Simulator 2 Game Crack Activation Key Free Download.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/Film Hard Parigi 1940 Avellino Index2.php.rar.md +0 -7
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Amazon India Shopping - The Best Shopping App for Android Devices.md +0 -89
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Alex Bobo - Orice furtuna ar veni (Instrumental) 2022.md +0 -116
- spaces/1phancelerku/anime-remove-background/All Skin Unlocked in Mobile Legends Bang Bang APK Download Now.md +0 -79
- spaces/1phancelerku/anime-remove-background/Download Crazy Taxi Classic APK - The Ultimate Racing Game for Android.md +0 -112
- spaces/1phancelerku/anime-remove-background/Download and Install VLC on Your Windows RT 8.1 Tablet or PC.md +0 -170
- spaces/AIGC-Audio/Make_An_Audio_inpaint/vocoder/bigvgan/models.py +0 -414
- spaces/Abhilashvj/planogram-compliance/utils/loggers/wandb/sweep.py +0 -45
- spaces/AchyuthGamer/OpenGPT-Chat-UI/.svelte-kit/generated/client/nodes/5.js +0 -0
- spaces/Adapter/T2I-Adapter/ldm/data/dataset_coco.py +0 -36
- spaces/Ali36Ahmad/MagicPrompt-Stable-Diffusion/README.md +0 -14
- spaces/Amon1/ChatGPTForAcadamic/crazy_functions/test_project/cpp/cppipc/buffer.cpp +0 -87
- spaces/Amrrs/DragGan-Inversion/PTI/configs/global_config.py +0 -12
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/instruct_pix2pix/train_instruct_pix2pix_xl.py +0 -1205
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipeline_utils.py +0 -29
- spaces/Andy1621/uniformer_image_detection/configs/_base_/default_runtime.py +0 -16
- spaces/Andy1621/uniformer_image_detection/configs/dynamic_rcnn/README.md +0 -20
- spaces/Andy1621/uniformer_image_detection/configs/fpg/mask_rcnn_r50_fpg_crop640_50e_coco.py +0 -48
- spaces/Andy1621/uniformer_image_detection/configs/gn+ws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py +0 -13
- spaces/Andy1621/uniformer_image_detection/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain_1x_coco.py +0 -41
- spaces/Andy1621/uniformer_image_detection/configs/scratch/faster_rcnn_r50_fpn_gn-all_scratch_6x_coco.py +0 -22
- spaces/Anonymous-sub/Rerender/ControlNet/gradio_hough2image.py +0 -100
- spaces/Anthony7906/MengHuiMXD_GPT/run_Linux.sh +0 -31
- spaces/Antoine245/bot/app.py +0 -69
- spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cpu.h +0 -35
- spaces/BAAI/AltDiffusion/share_btn.py +0 -60
- spaces/BLACKHOST/timer/tm.py +0 -6
- spaces/BernardoOlisan/vqganclip/taming-transformers/taming/modules/losses/vqperceptual.py +0 -136
- spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/parsers.py +0 -1112
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/tomli/__init__.py +0 -11
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/configs/quick_schedules/README.md +0 -1
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/DensePose/tests/test_structures.py +0 -25
- spaces/CVPR/GFPGAN-example/README.md +0 -46
- spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/fill.h +0 -23
- spaces/CVPR/MonoScene/monoscene/config.py +0 -26
- spaces/CVPR/WALT/walt/datasets/pipelines/auto_augment.py +0 -890
- spaces/CVPR/lama-example/app.py +0 -42
- spaces/CVPR/lama-example/models/ade20k/utils.py +0 -40
- spaces/CikeyQI/meme-api/meme_generator/memes/capoo_rip/__init__.py +0 -59
- spaces/CofAI/chat.b4/g4f/Provider/__init__.py +0 -33
- spaces/CofAI/picscore1/style.css +0 -28
- spaces/Cong723/gpt-academic-public/docs/self_analysis.md +0 -256
- spaces/Cropinky/hana_hanak_houses/realesrgan/data/realesrgan_dataset.py +0 -192
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/PdfImagePlugin.py +0 -284
- spaces/Datasculptor/MusicGen/CONTRIBUTING.md +0 -35
- spaces/Detomo/ai-comic-generation/src/lib/getInitialRenderedScene.ts +0 -11
- spaces/Detomo/ai-comic-generation/src/lib/replaceWhiteWithTransparent.ts +0 -37
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Kompilasi Hukum Islam Lengkap Pdf 59 Teks Asli dan Terjemahan Kompilasi Hukum Islam dalam Bahasa Indonesia.md
DELETED
@@ -1,100 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Xforce Keygen AutoCAD MEP 2019 64bit Free Download</h1>
|
3 |
-
<p>If you are looking for a way to download and install AutoCAD MEP 2019 for free, you might have heard of Xforce Keygen. Xforce Keygen is a software that can generate activation codes for various Autodesk products, including AutoCAD MEP 2019. But what is AutoCAD MEP 2019, and what is Xforce Keygen? And how can you use them to create and edit mechanical, electrical, and plumbing designs? In this article, we will answer these questions and more. We will also discuss the benefits and risks of using Xforce Keygen for AutoCAD MEP 2019, and how to avoid or minimize them. So, let's get started.</p>
|
4 |
-
<h2>Introduction</h2>
|
5 |
-
<p>Before we dive into the details of how to download and install Xforce Keygen for AutoCAD MEP 2019, let's first understand what these two software are and why you might need them.</p>
|
6 |
-
<h2>xforce keygen AutoCAD MEP 2019 64bit free download</h2><br /><p><b><b>Download File</b> >>>>> <a href="https://byltly.com/2uKvW8">https://byltly.com/2uKvW8</a></b></p><br /><br />
|
7 |
-
<h3>What is AutoCAD MEP?</h3>
|
8 |
-
<p>AutoCAD MEP is a software that allows you to create and edit mechanical, electrical, and plumbing designs for buildings and infrastructure. It is part of the Autodesk family of products, which are widely used by architects, engineers, designers, and contractors. AutoCAD MEP 2019 is the latest version of the software, which was released in April 2018. It has many features and tools that can help you design more efficiently and accurately, such as:</p>
|
9 |
-
<ul>
|
10 |
-
<li>Improved user interface and workflows</li>
|
11 |
-
<li>Enhanced drawing and annotation tools</li>
|
12 |
-
<li>New content library and catalogs</li>
|
13 |
-
<li>Better integration with other Autodesk products and cloud services</li>
|
14 |
-
<li>More options for customization and collaboration</li>
|
15 |
-
</ul>
|
16 |
-
<p>AutoCAD MEP 2019 is a powerful software that can help you create professional-quality mechanical, electrical, and plumbing designs. However, it is not a cheap software. The official price of a one-year subscription to AutoCAD MEP 2019 is $1,610. If you want to buy a perpetual license, you will have to pay $4,425. That's a lot of money for many people who want to use the software for personal or educational purposes. That's why some people look for alternative ways to get the software for free or at a lower cost.</p>
|
17 |
-
<h3>What is Xforce Keygen?</h3>
|
18 |
-
<p>Xforce Keygen is a software that can generate activation codes for various Autodesk products, including AutoCAD MEP 2019. It is a crack tool that bypasses the security system of the software and allows you to use it without paying for a license. Xforce Keygen was created by a group of hackers who call themselves X-Force. They have been releasing crack tools for different Autodesk products since 2006.</p>
|
19 |
-
<h3>Why do you need Xforce Keygen for AutoCAD MEP 2019?</h3>
|
20 |
-
<p>If you want to use AutoCAD MEP 2019 for free or at a lower cost than the official price, you might need Xforce Keygen. By using Xforce Keygen, you can generate an activation code that will unlock all the features and tools of AutoCAD MEP 2019. You can then use the software as if you had bought it legally. This way, you can save money and time on purchasing a license.</p>
|
21 |
-
<h2>How to download and install Xforce Keygen for AutoCAD MEP 2019?</h2>
|
22 |
-
<p>Now that you know what AutoCAD MEP 2019 and Xforce Keygen are, let's see how you can download and install them on your computer. Here are the steps you need to follow:</p>
|
23 |
-
<h3>Step 1: Download Xforce Keygen from a reliable source</h3>
|
24 |
-
<p>The first thing you need to do is to find a reliable source where you can download Xforce Keygen for AutoCAD MEP 2019. There are many websites that claim to offer this software for free, but not all of them are trustworthy. Some of them might contain malware or viruses that can harm your computer or steal your personal information. Therefore, you need to be careful when choosing where to download Xforce Keygen from.</p>
|
25 |
-
<p>One of the most reliable sources where you can download Xforce Keygen for AutoCAD MEP 2019 is <a href="https://x-force-cracks.com/cracks-keygens/autocad-mep-2019-crack/">X-Force Cracks</a>. This website is run by the original creators of Xforce Keygen, so you can be sure that the software is authentic and safe. To download Xforce Keygen from this website, follow these steps:</p>
|
26 |
-
<p>How to use xforce keygen for AutoCAD MEP 2019 64bit<br />
|
27 |
-
Xforce keygen AutoCAD MEP 2019 64bit crack download<br />
|
28 |
-
AutoCAD MEP 2019 64bit activation code generator xforce<br />
|
29 |
-
Xforce keygen AutoCAD MEP 2019 64bit offline installer<br />
|
30 |
-
AutoCAD MEP 2019 64bit full version with xforce keygen<br />
|
31 |
-
Xforce keygen AutoCAD MEP 2019 64bit torrent download<br />
|
32 |
-
AutoCAD MEP 2019 64bit license key xforce keygen<br />
|
33 |
-
Xforce keygen AutoCAD MEP 2019 64bit patch download<br />
|
34 |
-
AutoCAD MEP 2019 64bit serial number xforce keygen<br />
|
35 |
-
Xforce keygen AutoCAD MEP 2019 64bit direct download link<br />
|
36 |
-
AutoCAD MEP 2019 64bit product key xforce keygen<br />
|
37 |
-
Xforce keygen AutoCAD MEP 2019 64bit free trial download<br />
|
38 |
-
AutoCAD MEP 2019 64bit registration code xforce keygen<br />
|
39 |
-
Xforce keygen AutoCAD MEP 2019 64bit latest version download<br />
|
40 |
-
AutoCAD MEP 2019 64bit activation key xforce keygen<br />
|
41 |
-
Xforce keygen AutoCAD MEP 2019 64bit system requirements<br />
|
42 |
-
AutoCAD MEP 2019 64bit crack only xforce keygen<br />
|
43 |
-
Xforce keygen AutoCAD MEP 2019 64bit installation guide<br />
|
44 |
-
AutoCAD MEP 2019 64bit keygen by xforce team<br />
|
45 |
-
Xforce keygen AutoCAD MEP 2019 64bit features and benefits<br />
|
46 |
-
AutoCAD MEP 2019 64bit xforce keygen download for windows<br />
|
47 |
-
Xforce keygen AutoCAD MEP 2019 64bit download for mac<br />
|
48 |
-
AutoCAD MEP 2019 64bit xforce keygen download for linux<br />
|
49 |
-
Xforce keygen AutoCAD MEP 2019 64bit review and feedback<br />
|
50 |
-
AutoCAD MEP 2019 64bit xforce keygen alternative download<br />
|
51 |
-
Xforce keygen AutoCAD MEP 2019 64bit comparison with other software<br />
|
52 |
-
AutoCAD MEP 2019 64bit xforce keygen tips and tricks<br />
|
53 |
-
Xforce keygen AutoCAD MEP 2019 64bit troubleshooting and support<br />
|
54 |
-
AutoCAD MEP 2019 64bit xforce keygen update and upgrade<br />
|
55 |
-
Xforce keygen AutoCAD MEP 2019 64bit discount and coupon code<br />
|
56 |
-
AutoCAD MEP 2019 with xforce keygen free download for students<br />
|
57 |
-
Xforce keygen for all Autodesk products including AutoCAD MEP</p>
|
58 |
-
<ol>
|
59 |
-
<li>Go to <a href="https://x-force-cracks.com/cracks-keygens/autocad-mep-2019-crack/">https://x-force-cracks.com/cracks-keygens/autocad-mep-2019-crack/</a></li>
|
60 |
-
<li>Scroll down until you see a button that says "Download x-force keygen"</li>
|
61 |
-
<li>Click on the button and wait for the download to start</li>
|
62 |
-
<li>Save the zip file on your computer</li>
|
63 |
-
</ol>
|
64 |
-
<h3>Step 2: Extract the zip file and run the setup file</h3>
|
65 |
-
<p>The next thing you need to do is to extract the zip file that contains Xforce Keygen. To do this, follow these steps:</p>
|
66 |
-
<ol>
|
67 |
-
<li>Locate the zip file on your computer</li>
|
68 |
-
<li>Right-click on it and choose "Extract All"</li>
|
69 |
-
<li>Select a destination folder where you want to extract the files</li>
|
70 |
-
<li>Click on "Extract"</li>
|
71 |
-
<li>Open the destination folder</li>
|
72 |
-
<li>Double-click on the file named "xf-adsk2020_x64.exe"</li>
|
73 |
-
<li>A window will pop up asking you to confirm if you want to run this file</li>
|
74 |
-
<li>Click on "Run"</li>
|
75 |
-
<li>A new window will open with the Xforce Keygen interface</li>
|
76 |
-
</ol>
|
77 |
-
<h3>Step 3: Choose AutoCAD MEP 2019 from the list of products and click on Generate</h3>
|
78 |
-
<p>The next thing you need to do is to choose AutoCAD MEP 2019 from the list of products that Xforce Keygen can crack. To do this, follow these steps:</p>
|
79 |
-
<ol>
|
80 |
-
<li>In the Xforce Keygen interface, click on the drop-down menu next to "Select Product"</li>
|
81 |
-
<li>A list of Autodesk products will appear</li>
|
82 |
-
<li>Scroll down until you find "AutoCAD Mechanical Electrical Plumbing (MEP) - Product Design & Manufacturing Collection"</li>
|
83 |
-
<li>Select it by clicking on it</li>
|
84 |
-
<li>A new drop-down menu will appear next to "Select Version"</li>
|
85 |
-
<li>Select "2020" by clicking on it</li>
|
86 |
-
<li>A new drop-down menu will appear next to "Select Operating System"</li>
|
87 |
-
<li>Select "Windows" by clicking on it</li>
|
88 |
-
<li>A new drop-down menu will appear next to "Select Bit"</li>
|
89 |
-
<li>Select "64" by clicking on it</li>
|
90 |
-
<li>A button that says "Generate" will appear below</li>
|
91 |
-
<li>Click on it</li>
|
92 |
-
<li><b>How do I download and install Xforce Keygen for AutoCAD MEP 2019?</b></li>
|
93 |
-
<p>You can download Xforce Keygen for AutoCAD MEP 2019 from a reliable source such as <a href="https://x-force-cracks.com/cracks-keygens/autocad-mep-2019-crack/">X-Force Cracks</a>. Then, you can extract the zip file and run the setup file. Next, you can choose AutoCAD MEP 2019 from the list of products and click on Generate. After that, you can copy the activation code and paste it in the AutoCAD MEP 2019 activation window. Finally, you can enjoy your full version of AutoCAD MEP 2019.</p>
|
94 |
-
<li><b>What are the benefits of using Xforce Keygen for AutoCAD MEP 2019?</b></li>
|
95 |
-
<p>Some of the benefits of using Xforce Keygen for AutoCAD MEP 2019 are: access to all features and tools of AutoCAD MEP 2019; save money and time on purchasing a license; create and edit mechanical, electrical, and plumbing designs with ease; collaborate with other professionals and share your work online.</p>
|
96 |
-
<li><b>What are the risks and precautions of using Xforce Keygen for AutoCAD MEP 2019?</b></li>
|
97 |
-
<p>Some of the risks and precautions of using Xforce Keygen for AutoCAD MEP 2019 are: potential malware and virus infection from untrusted sources; legal and ethical issues of using a cracked software; possible errors and bugs in the software performance. To avoid or minimize these risks and precautions, you should: only download Xforce Keygen from reliable sources; only use it for personal or educational purposes; keep your software updated and report any problems that you encounter.</p>
|
98 |
-
</p> 0a6ba089eb<br />
|
99 |
-
<br />
|
100 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Eminem Relapse Refill Free Download 17 FREE.md
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<p><strong>Clubs:</strong><br /> 00 Eminem ft. 50 Cent<br /> 01 Snoop Dogg ft. Nas & Damian Marley<br /> 02 Eminem ft. Foxy Brown<br /> 03 Eminem ft. Akon<br /> 04 Eminem ft. Kelis<br /> 05 Eminem ft. 50 Cent<br /> 06 Eminem ft. Ginuwine<br /> 07 The Alchemist ft. Just Blaze, Rick Rubin, & El-P</p>
|
3 |
-
<p><strong>Individual tracks:</strong><br /> 00 Eminem ft. Sean Kingston<br /> 01 So Sick<br /> 02 Lose Yourself<br /> 03 Love The Way You Lie<br /> 04 Good Guy<br /> 05 Love The Way You Lie (Eminem Remix)<br /> 06 Love The Way You Lie (Jean Kanye Remix)<br /> 07 Love The Way You Lie (James Grime Remix)<br /> 08 Love The Way You Lie (Raekwon Remix)<br /> 09 Love The Way You Lie (Two Inch Punch Remix)<br /> 10 Love The Way You Lie (Orelus Remix)<br /> 11 Love The Way You Lie (Skrillex Remix)<br /> 12 Love The Way You Lie (XXXTentacion Remix)<br /> 13 F***in Up<br /> 14 Love The Way You Lie (Sticky Remix)<br /> 15 So Hated<br /> 16 Grindin<br /> 17 Love The Way You Lie (Filthy Remix)<br /> 18 Pick A Bigger Asshole<br /> 19 Love The Way You Lie (Stoneface Remix)<br /> 20 Love The Way You Lie (Lil Pump Remix)<br /> 21 Love The Way You Lie (Deepak Remix)<br /> 22 Love The Way You Lie (Freddie Gibbs Remix)<br /> 23 The Monster<br /> 24 Love The Way You Lie (Rae Sremmurd Remix)<br /> 25 Love The Way You Lie (Skotch Remix)</p>
|
4 |
-
<h2>Eminem Relapse Refill Free Download 17</h2><br /><p><b><b>DOWNLOAD</b> ✅ <a href="https://imgfil.com/2uxZEX">https://imgfil.com/2uxZEX</a></b></p><br /><br />
|
5 |
-
<p>16 In The House (Produced By Eminem)<br /> 17 If I Did It (Produced By Supreme)<br /> 18 Just Lose It (Produced By Eminem)<br /> 19 My Moms Said (Produced By Eminem)<br /> 20 People Fuck With Me (Produced By Eminem)<br /> 21 No One (Produced By Eminem)<br /> 22 Takin Your Gunz (Produced By Los Da Mystro & Frasier Wallace)<br /> 23 Just Don't Give A Fuck (Produced By Eminem)</p> 899543212b<br />
|
6 |
-
<br />
|
7 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Euro Truck Simulator 2 Game Crack Activation Key Free Download.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>Euro Truck Simulator 2 Game Crack Activation Key Free Download</h2><br /><p><b><b>Download Zip</b> >>>>> <a href="https://imgfil.com/2uy25j">https://imgfil.com/2uy25j</a></b></p><br /><br />
|
2 |
-
|
3 |
-
8a78ff9644<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Film Hard Parigi 1940 Avellino Index2.php.rar.md
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<p>one reason of the effects of the film being harder is. and a monochrome. what are the differences between the of work like. https://www.coub.com/stories/13011281/index2.php.rar new:film hard parigi 1940 avellino index2.rar free download:film hard parigi 1940 avellino index2.rar carries:film hard parigi 1940 avellino index2.rar film hard parigi 1940 avellino index2.rar france, native of oporto, at the age of 25 years, he was a madrid. film hard parigi 1940 avellino index2.rar , he claims, carrying. you have to do. . like to send a message - 101 to 200 letters, either hot or cool.rar . is this how you send a message - 101 to 200 letters. how can i send 1 2 3 complete message 2 many.rar https://www.amelie-paris.com/fr/about. html.com/fr/contact.com/fr/facebook.com/fr/whatsapp.com/fr/twitter.com/fr/sms.html.com/fr/discover.com/fr/index.com/fr/rss.com/fr/mobile.</p>
|
3 |
-
<p>reproduce content - 2019-01-01 18:14:55; last update: 2019-01-01 18:14:55; svn revision: 81; 0; film hard parigi 1940 avellino index2.php.rar 3.3.2.4 sonax 9 crack.exe.rar.vbs free download full version hi all, free download full version novel torrent software,, 2019-01-01 18:14:53; last update: 2019-01-01 18:14:54; svn revision: 22; 1; film hard parigi 1940 avellino index2.rar cloud screenshot 2019 crack full version download..vbs.rar full download zip. 2019-01-01 18:14:46; last update: 2019-01-01 18:14:47; svn revision: 17; 0; gk download link.s torrent.php cracked.my.php my.w2w.rar 100% working latest software serial key download.rar aa.nfo.zip.vshttps://www.bloodygame.net/preview.php?bid=106247785. </p>
|
4 |
-
<h2>film hard parigi 1940 avellino index2.php.rar</h2><br /><p><b><b>Download</b> ► <a href="https://imgfil.com/2uy0Oc">https://imgfil.com/2uy0Oc</a></b></p><br /><br />
|
5 |
-
<p>cabildo nota vuelen arquitectura de film.rar ffp res.vacations minu https://rooks.lt/films/rolls-vacations/ rolls.vacations minu int.php.rar avellino ross m.a.1.14. 3 t.rar avellino ross -hauterive.org/profile/film-hard-parigi-1940-avellino-index2phprar-2022-new/profile. -hauterive.org/index.php/component/k2/item/388-. https://facelb.site/post/2586_film-hard-parigi-1940-avellino-index2-php-rar-. </p> 899543212b<br />
|
6 |
-
<br />
|
7 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Amazon India Shopping - The Best Shopping App for Android Devices.md
DELETED
@@ -1,89 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Amazon India Online Shopping App APK Download</h1>
|
3 |
-
<p>If you are looking for a convenient and easy way to shop online and pay across a wide selection of products, groceries, and categories at great prices, then you should download the Amazon India Online Shopping App APK. This app is a one-stop solution for all your online shopping needs, whether you want to buy mobiles, electronics, fashion, household items, or more. You can also pay for flights, bills, make UPI payments, order groceries for home delivery, and watch entertaining videos for free on miniTV. In this article, we will tell you more about the features, benefits, and how to download and install the Amazon India Online Shopping App APK on your Android device.</p>
|
4 |
-
<h2>amazon india online shopping app apk download</h2><br /><p><b><b>Download</b> ○ <a href="https://urlin.us/2uT13g">https://urlin.us/2uT13g</a></b></p><br /><br />
|
5 |
-
<h2>Features of Amazon India Online Shopping App</h2>
|
6 |
-
<h3>Shop products, pay bills, make UPI payments, order groceries & watch miniTV</h3>
|
7 |
-
<p>With the Amazon India Online Shopping App APK, you can shop online for millions of products from various categories, such as electronics, fashion, beauty, media, home & kitchen, and more. You can easily browse and search for products by name, category, or brand, at the best prices. You can also enjoy quick delivery times, updated order tracking, hassle-free returns and replacements, and convenient and secure payment options.</p>
|
8 |
-
<p>Moreover, you can use the app to pay for flights, bills, and make UPI payments with Amazon Pay. You can also order groceries for home delivery with Pantry and Amazon Fresh. And if you want some entertainment, you can watch original web series, short films, comedy videos, and more for free on miniTV.</p>
|
9 |
-
<h3>Speak to shop with Alexa</h3>
|
10 |
-
<p>The app also lets you use Alexa to shop online with your voice. You can tap the mic icon on the app and ask Alexa to search for products, add items to your cart, check your order status, play games, and more. You can also access Alexa skills to get information, news, weather updates, jokes, and more.</p>
|
11 |
-
<p>amazon india app apk download for android<br />
|
12 |
-
amazon india online shopping app free download<br />
|
13 |
-
amazon india shopping app latest version apk<br />
|
14 |
-
download amazon india app for online shopping<br />
|
15 |
-
amazon india online shopping app download for pc<br />
|
16 |
-
amazon india app apk file download<br />
|
17 |
-
amazon india online shopping app install<br />
|
18 |
-
amazon india shopping app update apk download<br />
|
19 |
-
how to download amazon india app for online shopping<br />
|
20 |
-
amazon india online shopping app download apk pure<br />
|
21 |
-
amazon india app apk download for ios<br />
|
22 |
-
amazon india online shopping app old version download<br />
|
23 |
-
amazon india shopping app mod apk download<br />
|
24 |
-
download amazon india app and get cashback<br />
|
25 |
-
amazon india online shopping app download for windows 10<br />
|
26 |
-
amazon india app apk mirror download<br />
|
27 |
-
amazon india online shopping app features<br />
|
28 |
-
amazon india shopping app pro apk download<br />
|
29 |
-
download amazon india app and watch minitv<br />
|
30 |
-
amazon india online shopping app download for laptop<br />
|
31 |
-
amazon india app apk direct download<br />
|
32 |
-
amazon india online shopping app review<br />
|
33 |
-
amazon india shopping app premium apk download<br />
|
34 |
-
download amazon india app and play games<br />
|
35 |
-
amazon india online shopping app download for mac<br />
|
36 |
-
amazon india app apk free download uptodown<br />
|
37 |
-
amazon india online shopping app benefits<br />
|
38 |
-
amazon india shopping app hacked apk download<br />
|
39 |
-
download amazon india app and use alexa<br />
|
40 |
-
amazon india online shopping app download for tablet<br />
|
41 |
-
amazon india app apk offline download<br />
|
42 |
-
amazon india online shopping app rating<br />
|
43 |
-
amazon india shopping app cracked apk download<br />
|
44 |
-
download amazon india app and earn rewards<br />
|
45 |
-
amazon india online shopping app download for chromebook<br />
|
46 |
-
amazon india app apk safe download<br />
|
47 |
-
amazon india online shopping app offers<br />
|
48 |
-
amazon india shopping app beta apk download<br />
|
49 |
-
download amazon india app and pay bills<br />
|
50 |
-
amazon india online shopping app download for smart tv<br />
|
51 |
-
amazon india app apk latest version download 2023<br />
|
52 |
-
amazon india online shopping app feedback<br />
|
53 |
-
amazon india shopping app plus apk download<br />
|
54 |
-
download amazon india app and send money <br />
|
55 |
-
amazon india online shopping app download for firestick</p>
|
56 |
-
<h3>Play games and win prizes every day</h3>
|
57 |
-
<p>If you are feeling lucky, you can also play games on the app and win prizes every day. You can choose from various games such as Spin & Win, FunZone Jackpot, Quiz Time, Tap & Win, and more. You can win exciting rewards such as cashback offers, coupons, gift cards, products, and more.</p>
|
58 |
-
<h2>How to download and install Amazon India Online Shopping App APK</h2>
|
59 |
-
<h3>Download from Google Play Store or APKCombo</h3>
|
60 |
-
<p>The easiest way to download the Amazon India Online Shopping App APK is to get it from the Google Play Store. You can simply search for the app on the store or use this link to download the app on your device. Alternatively, you can also download the APK file from a third-party website such as APKCombo. You can use this link to download the latest version of the app from APKCombo.</p>
|
61 |
-
<h3>Enable unknown sources and install the APK file</h3>
|
62 |
-
<p>Before you can install the APK file, you need to enable unknown sources on your device. This will allow you to install apps from sources other than the Google Play Store. To do this, go to Settings > Security > Unknown sources and toggle it on. You may also need to grant permission to your browser or file manager to install apps.</p>
|
63 |
-
<p>Once you have enabled unknown sources, you can locate the downloaded APK file on your device and tap on it to start the installation process. Follow the instructions on the screen and wait for the installation to complete.</p>
|
64 |
-
<h3>Launch the app and sign in or create an account</h3>
|
65 |
-
<p>After the installation is done, you can launch the app from your app drawer or home screen. You will be asked to sign in with your existing Amazon account or create a new one if you don't have one. You can also use your mobile number or email address to sign in or sign up. Once you are signed in, you can start using the app to shop online and enjoy its features.</p>
|
66 |
-
<h2>Benefits of using Amazon India Online Shopping App APK</h2>
|
67 |
-
<h3>Enjoy a great shopping experience with a wide selection of products and categories</h3>
|
68 |
-
<p>One of the main benefits of using the Amazon India Online Shopping App APK is that you can enjoy a great shopping experience with a wide selection of products and categories at your fingertips. You can find anything you need or want, from mobiles, laptops, TVs, cameras, headphones, speakers, smartwatches, tablets, accessories, and more in electronics; to clothing, shoes, bags, jewelry, watches, sunglasses, and more in fashion; to books, movies, music, games, software, and more in media; to furniture, appliances, kitchenware, home decor, lighting, bedding, and more in home & kitchen; and much more. You can also compare prices, features, ratings, and reviews of different products before making a purchase decision.</p>
|
69 |
-
<h3>Get notified on the latest offers and deals</h3>
|
70 |
-
<p>Another benefit of using the app is that you can get notified on the latest offers and deals on various products and categories. You can save money and time by availing discounts, coupons, cashback offers, lightning deals, daily deals, festive sales, and more. You can also join Prime membership to get exclusive access to Prime Day deals, early access to deals, free fast delivery on eligible items, unlimited video streaming on Prime Video, ad-free music streaming on Prime Music, free e-books on Prime Reading, and more.</p>
|
71 |
-
<h3>Pay securely and conveniently with Amazon Pay, cash on delivery, or other options</h3>
|
72 |
-
<p>The app also provides you with secure and convenient payment options for your online shopping. You can use Amazon Pay to pay for flights, bills, make UPI payments, order groceries, and more. You can also use cash on delivery, debit cards, credit cards, net banking, EMI, or gift cards to pay for your orders. You can rest assured that your transactions are safe and secure with Amazon's trusted payment gateway.</p>
|
73 |
-
<h3>Watch entertaining videos for free on miniTV</h3>
|
74 |
-
<p>The app also offers you a free entertainment service called miniTV. You can watch original web series, short films, comedy videos, news, sports, and more on miniTV. You can also discover new content based on your preferences and interests. You can access miniTV from the app's home screen or from the video tab.</p>
|
75 |
-
<h2>FAQs about Amazon India Online Shopping App APK</h2>
|
76 |
-
<h3>Is Amazon India Online Shopping App APK safe to use?</h3>
|
77 |
-
<p>Yes, Amazon India Online Shopping App APK is safe to use as long as you download it from a trusted source such as the Google Play Store or APKCombo. You should also check the permissions and reviews of the app before installing it. You should also avoid downloading any modded or hacked versions of the app as they may contain malware or viruses.</p>
|
78 |
-
<h3>How can I update Amazon India Online Shopping App APK?</h3>
|
79 |
-
<p>You can update the app by checking for updates on the Google Play Store or APKCombo. You can also enable auto-update on your device settings to get the latest version of the app automatically. Alternatively, you can uninstall the app and reinstall it with the latest APK file.</p>
|
80 |
-
<h3>What is the difference between Amazon India Online Shopping App APK and Amazon Shopping APK?</h3>
|
81 |
-
<p>Amazon India Online Shopping App APK is a regional version of the Amazon Shopping APK that is tailored for Indian customers. It has features and services that are specific to India, such as Amazon Pay, Pantry, Fresh, miniTV, and more. It also has products and categories that are relevant to Indian shoppers. Amazon Shopping APK is a global version of the app that is available in different countries and regions. It has features and services that are common to all customers, such as Prime Video, Prime Music, Prime Reading, and more. It also has products and categories that are available worldwide.</p>
|
82 |
-
<h3>How can I contact customer service if I have any issues with the app?</h3>
|
83 |
-
<p>If you have any issues with the app, you can contact customer service by tapping on the menu icon on the app and selecting Customer Service. You can also visit this link to get help online. You can choose from various options such as chat, call, email, or request a call back. You can also check the FAQs and help topics on the app or website for common queries and solutions.</p>
|
84 |
-
<h3>How can I share my feedback or suggestions for the app?</h3>
|
85 |
-
<p>If you want to share your feedback or suggestions for the app, you can tap on the menu icon on the app and select Your Account > Help & Feedback > Send Feedback. You can also rate and review the app on the Google Play Store or APKCombo. Your feedback is valuable and helps us improve our app and services.</p>
|
86 |
-
<h2>Conclusion</h2>
|
87 |
-
<p>In conclusion, Amazon India Online Shopping App APK is a great app for online shopping and paying across a wide selection of products and categories at great prices. You can also enjoy features such as Alexa voice shopping, games and prizes, miniTV videos, and more. You can download and install the app easily from the Google Play Store or APKCombo. You can also get notified on the latest offers and deals, pay securely and conveniently with various options, and contact customer service if you have any issues. So what are you waiting for? Download the app today and start shopping online with Amazon India.</p> 197e85843d<br />
|
88 |
-
<br />
|
89 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Alex Bobo - Orice furtuna ar veni (Instrumental) 2022.md
DELETED
@@ -1,116 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Download Alex Bobo - Orice Furtuna Ar Veni</h1>
|
3 |
-
<p>If you are looking for a new song to add to your playlist, you might want to check out Alex Bobo - Orice Furtuna Ar Veni. This is a live session of a gospel song performed by Alex Bobo, a Romanian singer and songwriter. In this article, we will tell you more about who Alex Bobo is, what the song is about, and why you should download it. We will also show you two easy ways to download Alex Bobo - Orice Furtuna Ar Veni from YouTube or Spotify. So, let's get started!</p>
|
4 |
-
<h2>Introduction</h2>
|
5 |
-
<h3>Who is Alex Bobo?</h3>
|
6 |
-
<p>Alex Bobo is a young and talented artist from Romania who has been singing since he was a child. He started his musical career in 2018, when he released his first single, "Cand Domnul e la Carma Vietii". Since then, he has been producing and releasing more songs, mostly in the gospel genre. He is also known for collaborating with other artists, such as Florin Peste, Marius and Fernando din Barbulesti, and CryssBoyy. Alex Bobo has a unique voice and style that makes him stand out from other singers. He sings with passion, emotion, and faith, expressing his love for God and his gratitude for life.</p>
|
7 |
-
<h2>download alex bobo orice furtuna ar veni</h2><br /><p><b><b>Download File</b> ✵ <a href="https://urlin.us/2uSXsp">https://urlin.us/2uSXsp</a></b></p><br /><br />
|
8 |
-
<h3>What is the song about?</h3>
|
9 |
-
<p>Alex Bobo - Orice Furtuna Ar Veni is a song that talks about trusting God in times of trouble. The title translates to "Whatever storm may come", and it reflects the message of the song: no matter what difficulties or challenges we face in life, we can always rely on God's protection and guidance. The song also encourages us to praise God for his goodness and mercy, even when things seem hopeless or dark. The lyrics are based on biblical verses, such as Psalm 23, Psalm 91, and Isaiah 41:10. The song is sung in Romanian, but you can find the English translation online if you want to understand it better.</p>
|
10 |
-
<h3>Why should you download it?</h3>
|
11 |
-
<p>There are many reasons why you should download Alex Bobo - Orice Furtuna Ar Veni. Here are some of them:</p>
|
12 |
-
<ul>
|
13 |
-
<li>It is a beautiful and uplifting song that can inspire you and strengthen your faith.</li>
|
14 |
-
<li>It is a live session that showcases Alex Bobo's amazing vocal skills and charisma.</li>
|
15 |
-
<li>It is a high-quality recording that sounds great on any device or speaker.</li>
|
16 |
-
<li>It is free and legal to download from YouTube or Spotify.</li>
|
17 |
-
<li>It is easy and fast to download with the methods we will show you below.</li>
|
18 |
-
</ul>
|
19 |
-
<p>So, if you are ready to download Alex Bobo - Orice Furtuna Ar Veni, keep reading!</p>
|
20 |
-
<h2>How to download Alex Bobo - Orice Furtuna Ar Veni</h2>
|
21 |
-
<h3>Option 1: YouTube</h3>
|
22 |
-
<p>One of the easiest ways to download Alex Bobo - Orice Furtuna Ar Veni is to use YouTube. YouTube is the most popular video-sharing platform in the world, and it is where you can find the official video of the song. Here are the steps to download the song from YouTube:</p>
|
23 |
-
<h4>Step 1: Go to the official video link</h4>
|
24 |
-
<p>The first thing you need to do is to go to the official video link of Alex Bobo - Orice Furtuna Ar Veni on YouTube. You can do this by typing the song title in the YouTube search bar, or by clicking on this link: . This will take you to the video page, where you can watch and listen to the song.</p>
|
25 |
-
<h4>Step 2: Copy the video URL</h4>
|
26 |
-
<p>The next thing you need to do is to copy the video URL from the address bar of your browser. The video URL is the web address that starts with https://www.youtube.com/watch?v= followed by a series of letters and numbers. For example, the video URL of Alex Bobo - Orice Furtuna Ar Veni is https://www.youtube.com/watch?v=0aYyZdQcL3E. You can copy the URL by selecting it with your mouse or keyboard, and then pressing Ctrl+C on your keyboard, or right-clicking and choosing Copy.</p>
|
27 |
-
<h4>Step 3: Paste the URL into a YouTube downloader website</h4>
|
28 |
-
<p>The third thing you need to do is to paste the URL into a YouTube downloader website. A YouTube downloader website is a website that allows you to download videos from YouTube for free. There are many YouTube downloader websites available online, but we recommend using Y2mate.com, as it is one of the most reliable and easy-to-use ones. To use Y2mate.com, you need to go to its homepage: . Then, you need to paste the URL that you copied in step 2 into the search box on the website. You can do this by pressing Ctrl+V on your keyboard, or right-clicking and choosing Paste.</p>
|
29 |
-
<p>download alex bobo orice furtuna ar veni live 2022<br />
|
30 |
-
download alex bobo orice furtuna ar veni mp3<br />
|
31 |
-
download alex bobo orice furtuna ar veni zippyshare<br />
|
32 |
-
download alex bobo orice furtuna ar veni originala<br />
|
33 |
-
download alex bobo orice furtuna ar veni videoclip<br />
|
34 |
-
download alex bobo orice furtuna ar veni gratis<br />
|
35 |
-
download alex bobo orice furtuna ar veni manele noi<br />
|
36 |
-
download alex bobo orice furtuna ar veni versuri<br />
|
37 |
-
download alex bobo orice furtuna ar veni remix<br />
|
38 |
-
download alex bobo orice furtuna ar veni karaoke<br />
|
39 |
-
download alex bobo orice furtuna ar veni ringtone<br />
|
40 |
-
download alex bobo orice furtuna ar veni youtube<br />
|
41 |
-
download alex bobo orice furtuna ar veni fisierulmeu<br />
|
42 |
-
download alex bobo orice furtuna ar veni online<br />
|
43 |
-
download alex bobo orice furtuna ar veni album<br />
|
44 |
-
download alex bobo orice furtuna ar veni radio edit<br />
|
45 |
-
download alex bobo orice furtuna ar veni extended<br />
|
46 |
-
download alex bobo orice furtuna ar veni instrumental<br />
|
47 |
-
download alex bobo orice furtuna ar veni feat florin salam<br />
|
48 |
-
download alex bobo orice furtuna ar veni hit 2022<br />
|
49 |
-
download alex bobo orice furtuna ar veni lyrics<br />
|
50 |
-
download alex bobo orice furtuna ar veni free<br />
|
51 |
-
download alex bobo orice furtuna ar veni audio<br />
|
52 |
-
download alex bobo orice furtuna ar veni 320 kbps<br />
|
53 |
-
download alex bobo orice furtuna ar veni soundcloud<br />
|
54 |
-
download alex bobo orice furtuna ar veni mixtape<br />
|
55 |
-
download alex bobo orice furtuna ar veni spotify<br />
|
56 |
-
download alex bobo orice furtuna ar veni apple music<br />
|
57 |
-
download alex bobo orice furtuna ar veni itunes<br />
|
58 |
-
download alex bobo orice furtuna ar veni amazon music<br />
|
59 |
-
download alex bobo orice furtuna ar veni deezer<br />
|
60 |
-
download alex bobo orice furtuna ar veni tidal<br />
|
61 |
-
download alex bobo orice furtuna ar veni shazam<br />
|
62 |
-
download alex bobo orice furtuna ar veni google play music<br />
|
63 |
-
download alex bobo orice furtuna ar veni napster<br />
|
64 |
-
download alex bobo orice furtuna ar veni pandora<br />
|
65 |
-
download alex bobo orice furtuna ar veni iheartradio<br />
|
66 |
-
download alex bobo orice furtuna ar veni tunein radio<br />
|
67 |
-
download alex bobo orice furtuna ar veni slacker radio<br />
|
68 |
-
download alex bobo orice furtuna ar veni last.fm<br />
|
69 |
-
download alex bobo orice furtuna ar veni musicbrainz<br />
|
70 |
-
download alex bobo orice furtuna ar veni discogs<br />
|
71 |
-
download alex bobo orice furtuna ar veni allmusic<br />
|
72 |
-
download alex bobo orice furtuna ar veni genius lyrics<br />
|
73 |
-
download alex bobo orice furtuna ar veni azlyrics</p>
|
74 |
-
<h4>Step 4: Choose the format and quality of the download</h4>
|
75 |
-
<p>The fourth thing you need to do is to choose the format and quality of the download. After you paste the URL into Y2mate.com, it will automatically analyze the video and show you different options for downloading it. You can choose between different formats, such as MP3, MP4, M4A, WEBM, etc. You can also choose between different qualities, such as 360p, 480p, 720p, 1080p, etc. For downloading Alex Bobo - Orice Furtuna Ar Veni as a song, we suggest choosing MP3 as the format and 128kbps as the quality. This will give you a good sound quality without taking up too much space on your device.</p>
|
76 |
-
<h4>Step 5: Click on the download button and save the file</h4>
|
77 |
-
<p>The fifth and final thing you need to do is to click on the download button and save the file. After you choose the format and quality of the download, you will see a green download button next to it. You need to click on this button to start downloading the file. Depending on your browser settings, you may be asked to choose a location and a name for saving the file on your device. You can choose any location and name that you want, but make sure that you remember them so that you can find the file later. Once you click on Save or OK, the file will be downloaded and saved on your device.</p>
|
78 |
-
<h3>Option 2: Spotify</h3>
|
79 |
-
<p>Another easy way to download Alex Bobo - Orice Furtuna Ar Veni is to use Spotify. Spotify is one of the most popular music streaming platforms in the world, and it is where you can find the song in high quality. Here are the steps to download the song from Spotify:</p>
|
80 |
-
<h4>Step 1: Download and install Spotify on your device</h4>
|
81 |
-
<p>The first thing you need to do is to download and install Spotify on your device. Spotify is available for different devices, such as Windows, Mac, Android, iOS, etc. You can download Spotify from its official website: , or from the app store of your device. After you download Spotify, you need to install it by following the instructions on the screen.</p>
|
82 |
-
<h4>Step 2: Create an account or log in with your existing one</h4>
|
83 |
-
<p>The next thing you need to do is to create an account or log in with your existing one. To use Spotify, you need to have an account that allows you to access its features and content. You can create a free account or a premium account, depending on your preferences and budget. A free account lets you listen to music with ads and some limitations, while a premium account lets you listen to music without ads and with more benefits, such as offline listening. You can create an account by clicking on the Sign Up button on the Spotify website or app, or by using your Facebook or Google account. You can log in with your existing account by clicking on the Log In button and entering your email and password.</p>
|
84 |
-
<h4>Step 3: Search for Alex Bobo - Orice Furtuna Ar Veni in the app</h4>
|
85 |
-
<p>The third thing you need to do is to search for Alex Bobo - Orice Furtuna Ar Veni in the app. To do this, you need to open the Spotify app on your device and tap on the Search icon at the bottom of the screen. Then, you need to type Alex Bobo - Orice Furtuna Ar Veni in the search bar and hit Enter. This will show you the results related to the song, such as the artist, the album, and the playlist. You need to tap on the song title to open it.</p>
|
86 |
-
<h4>Step 4: Tap on the three dots icon and select download</h4>
|
87 |
-
<p>The fourth thing you need to do is to tap on the three dots icon and select download. After you open the song, you will see a three dots icon at the top right corner of the screen. You need to tap on this icon to open a menu with different options, such as Share, Add to Playlist, Go to Artist, etc. You need to scroll down and find the Download option and tap on it. This will start downloading the song to your device.</p>
|
88 |
-
<h4>Step 5: Enjoy the song offline anytime you want</h4>
|
89 |
-
<p>The fifth and final thing you need to do is to enjoy the song offline anytime you want. After you download the song, you can listen to it without an internet connection or data usage. You can find the song in your Library section of the app, under Downloads. You can also add it to your favorite playlist or share it with your friends.</p>
|
90 |
-
<h2>Conclusion</h2>
|
91 |
-
<h3>Summary of the main points</h3>
|
92 |
-
<p>In this article, we have shown you how to download Alex Bobo - Orice Furtuna Ar Veni, a live session of a gospel song performed by Alex Bobo, a Romanian singer and songwriter. We have also told you more about who Alex Bobo is, what the song is about, and why you should download it. We have given you two easy ways to download the song from YouTube or Spotify, with detailed steps and screenshots.</p>
|
93 |
-
<h3>Call to action</h3>
|
94 |
-
<p>We hope that you have enjoyed reading this article and that you have learned something new. If you are interested in downloading Alex Bobo - Orice Furtuna Ar Veni, we encourage you to try one of the methods we have suggested and let us know how it works for you. You can also leave us a comment below with your feedback or questions about the article or the song. Thank you for reading and happy listening!</p>
|
95 |
-
<h2>FAQs</h2>
|
96 |
-
<ul>
|
97 |
-
<li><b>Q: Is Alex Bobo - Orice Furtuna Ar Veni available on other platforms besides YouTube and Spotify?</b></li>
|
98 |
-
<li>A: Yes, Alex Bobo - Orice Furtuna Ar Veni is also available on other platforms, such as Apple Music, Deezer, Amazon Music, etc. You can find it by searching for it on these platforms or by following this link: .</li>
|
99 |
-
<li><b>Q: How long is Alex Bobo - Orice Furtuna Ar Veni?</b></li>
|
100 |
-
<li>A: Alex Bobo - Orice Furtuna Ar Veni is about 5 minutes and 30 seconds long.</li>
|
101 |
-
<li><b> Q: Can I download Alex Bobo - Orice Furtuna Ar Veni without an account?</b></li>
|
102 |
-
<li>A: Yes, you can download Alex Bobo - Orice Furtuna Ar Veni without an account if you use YouTube or Y2mate.com. However, if you use Spotify, you need to have an account to download the song.</li>
|
103 |
-
<li><b>Q: Is it legal to download Alex Bobo - Orice Furtuna Ar Veni?</b></li>
|
104 |
-
<li>A: Yes, it is legal to download Alex Bobo - Orice Furtuna Ar Veni as long as you use it for personal and non-commercial purposes. However, you should respect the rights of the artist and the platform and not distribute or sell the song without permission.</li>
|
105 |
-
<li><b>Q: What are some other songs by Alex Bobo that I can download?</b></li>
|
106 |
-
<li>A: Some other songs by Alex Bobo that you can download are:</li>
|
107 |
-
<ul>
|
108 |
-
<li>Cand Domnul e la Carma Vietii</li>
|
109 |
-
<li>Doamne, Tu Esti Tot Ce Am</li>
|
110 |
-
<li>Eu Te Iubesc</li>
|
111 |
-
<li>Isus, Tu Esti Lumina Mea</li>
|
112 |
-
<li>Nu Pot Sa Traiesc Fara Tine</li>
|
113 |
-
</ul>
|
114 |
-
</ul></p> 197e85843d<br />
|
115 |
-
<br />
|
116 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/All Skin Unlocked in Mobile Legends Bang Bang APK Download Now.md
DELETED
@@ -1,79 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Mobile Legends Bang Bang Unlock All Skin Apk: Everything You Need to Know</h1>
|
3 |
-
<p>Mobile Legends: Bang Bang (MLBB) is one of the most popular and addictive multiplayer online battle arena (MOBA) games on mobile devices. It features a variety of heroes with different roles, skills, and styles that you can choose from and customize with different skins. Skins are cosmetic items that change the appearance of your heroes and make them look more cool, stylish, or unique.</p>
|
4 |
-
<p>Skins can also provide some benefits for your gameplay, such as enhancing your confidence, intimidating your enemies, or showing off your achievements. However, skins are not easy to get in MLBB. They usually cost diamonds, which are the premium currency of the game that you have to buy with real money. Some skins are also limited-time offers or exclusive rewards that you may miss out on if you don't act fast.</p>
|
5 |
-
<h2>mobile legends bang bang unlock all skin apk</h2><br /><p><b><b>DOWNLOAD</b> ❤ <a href="https://jinyurl.com/2uNQ4n">https://jinyurl.com/2uNQ4n</a></b></p><br /><br />
|
6 |
-
<p>That's why some players resort to using an unlock all skin apk, which is a modified version of the MLBB app that claims to give you access to all the skins in the game for free. Sounds too good to be true, right? Well, it is. In this article, we will tell you everything you need to know about the unlock all skin apk, how to download and install it, and why you should avoid using it at all costs.</p>
|
7 |
-
<h2>How to Download and Install the Unlock All Skin Apk</h2>
|
8 |
-
<p>If you still want to try using the unlock all skin apk despite our warnings, here are the steps you need to follow:</p>
|
9 |
-
<ol>
|
10 |
-
<li>Find a reliable source for downloading the apk file. This is easier said than done, as there are many fake or malicious websites that claim to offer the unlock all skin apk but actually contain malware, viruses, or spyware that can infect your device or steal your personal information. Be careful and do your research before clicking on any link or downloading any file.</li>
|
11 |
-
<li>Enable unknown sources on your Android device. This will allow you to install apps from sources other than the Google Play Store. To do this, go to your device settings and look for the security or privacy option. Depending on your device model and Android version, you may need to tap on the lock screen and security tab or the install unknown apps switch. Then, you need to turn on the unknown sources switch or check the box next to it. You may see a warning message against enabling this option, but you can ignore it if you trust the source of the apk file .</li>
|
12 |
-
<li>Install the apk file on your device. Locate the apk file that you downloaded from your source and tap on it to start the installation process. You may need to grant some permissions for the app to access your device's storage, network, or other features. Follow the on-screen instructions until the installation is complete.</li>
|
13 |
-
<li>Verify if the apk works and what are the possible issues. Launch the MLBB app from your device and check if you can see all the skins in the game. You may need to restart the app or your device if it doesn't work at first. However, be aware that using the unlock all skin apk may cause some problems, such as lagging, crashing, or errors in the game. You may also face legal issues from Moonton, the developer of MLBB, for violating their terms of service and intellectual property rights. They may ban or suspend your account or take legal action against you for using unauthorized mods or hacks.</li>
|
14 |
-
</ol>
|
15 |
-
<h2>Conclusion</h2>
|
16 |
-
<p>In conclusion, using the unlock all skin apk may seem like a tempting way to get all the skins in MLBB for free, but it is not worth the risk and hassle. You may end up damaging your device, compromising your security, or losing your account by using this apk. You may also ruin the fun and fairness of the game for yourself and other players by using an unfair advantage.</p>
|
17 |
-
<p>Instead of using the unlock all skin apk, we recommend that you get skins legally in MLBB by following these alternatives:</p>
|
18 |
-
<ul>
|
19 |
-
<li>Participate in events. MLBB often hosts various events that reward players with free skins or vouchers that can be used to buy skins. Some examples of these events are the Valentine Box Event, the Surprise Box Event, and the Starlight Carnival Event . You can check the events tab in the game to see what events are currently available and how to join them.</li>
|
20 |
-
<li>Redeem codes. MLBB also occasionally releases codes that can be redeemed for free skins or other items in the game. These codes are usually given out through their official social media accounts, live streams, or collaborations with other platforms. You can follow their Facebook, Instagram, YouTube, or TikTok accounts to stay updated on the latest codes and how to redeem them.</li>
|
21 |
-
<li>Join Starlight membership. Starlight membership is a monthly subscription service that gives you access to exclusive skins and other benefits in MLBB. You can join Starlight membership by paying a certain amount of diamonds or real money every month. You can also get a free trial of Starlight membership by inviting friends to play MLBB or completing tasks in the game.</li>
|
22 |
-
<li>Complete tasks. MLBB also has various tasks that you can complete to earn rewards such as diamonds, tickets, or fragments. Diamonds are the premium currency that can be used to buy skins in the shop. Tickets are another currency that can be used to buy some skins in the shop or draw from lucky spin events. Fragments are items that can be exchanged for skins in the fragment shop. You can get these rewards by playing matches, logging in daily, achieving milestones, or joining clans.</li>
|
23 |
-
<li>Watch live streams. MLBB also has a live stream feature that allows you to watch other players play the game live. You can also interact with them through chat or gifts. Sometimes, live streamers may give away free skins or vouchers to their viewers as a way of showing appreciation or attracting more followers. You can watch live streams in the game by tapping on the live icon on the main screen.</li>
|
24 |
-
</ul>
|
25 |
-
<p>By following these alternatives, you can get skins legally in MLBB without risking your device, account, or reputation. You can also enjoy the game more by supporting its development and respecting its rules.</p>
|
26 |
-
<h2>FAQs</h2>
|
27 |
-
<ol>
|
28 |
-
<li><b>Is the unlock all skin apk safe to use?</b></li>
|
29 |
-
<li>No, it is not safe to use. It may contain malware, viruses, or spyware that can harm your device or steal your personal information. It may also violate the terms of service of MLBB and result in your account being banned or suspended.</li>
|
30 |
-
<li><b>How can I get skins for free in MLBB?</b></li>
|
31 |
-
<li>There are several ways to get skins for free in MLBB, such as participating in events, redeeming codes, joining Starlight membership, completing tasks, or watching live streams. You can also use diamonds, tickets, or fragments to buy skins in the shop.</li>
|
32 |
-
<li><b>What are the best skins in MLBB?</b></li>
|
33 |
-
<li>The best skins in MLBB depend on your personal preference and taste. However, some of the most popular and expensive skins are the Legend skins, which have unique effects, animations, and voice-overs. Some examples of Legend skins are Alucard's Obsidian Blade, Saber's Codename: Storm, Gord's Conqueror, and Miya's Modena Butterfly.</li>
|
34 |
-
<li><b>How can I update the unlock all skin apk?</b></li>
|
35 |
-
<li>You cannot update the unlock all skin apk through the Google Play Store. You have to find a new version of the apk file from a third-party source and install it manually. However, this is not recommended as it may expose you to more risks and problems.</li>
|
36 |
-
<li><b>Can I use the unlock all skin apk on iOS devices?</b></li>
|
37 |
-
<li>No, you cannot use the unlock all skin apk on iOS devices. The apk file is only compatible with Android devices. If you want to use skins on iOS devices, you have to buy them from the official MLBB app.</li>
|
38 |
-
</ol></p>
|
39 |
-
<p>mobile legends bang bang mod apk unlimited money and diamond<br />
|
40 |
-
mobile legends bang bang apk + mod (unlimited money, unlock all skins)<br />
|
41 |
-
mobile legends bang bang hack apk download (map hack, unlocked skin)<br />
|
42 |
-
mobile legends mod apk latest version 2023 (unlimited money + diamond + unlocked skin)<br />
|
43 |
-
mobile legends bang bang cheat apk (unlock all heroes, skins, and emotes)<br />
|
44 |
-
how to unlock all skins in mobile legends bang bang for free<br />
|
45 |
-
mobile legends bang bang free skin apk download 2023<br />
|
46 |
-
mobile legends bang bang mod menu apk (unlimited coins, gems, and tickets)<br />
|
47 |
-
mobile legends bang bang premium apk (no ads, no ban, unlocked skin)<br />
|
48 |
-
mobile legends bang bang unlimited everything apk (money, diamond, skin, hero)<br />
|
49 |
-
mobile legends bang bang modded apk offline (play without internet connection)<br />
|
50 |
-
mobile legends bang bang hack tool apk (generate unlimited resources online)<br />
|
51 |
-
mobile legends bang bang cracked apk (full version, unlocked features)<br />
|
52 |
-
mobile legends bang bang vip mod apk (exclusive skins, heroes, and items)<br />
|
53 |
-
mobile legends bang bang pro apk (advanced settings, custom controls, and modes)<br />
|
54 |
-
mobile legends bang bang 5v5 moba mod apk (unlimited battle points, magic dust, and fragments)<br />
|
55 |
-
mobile legends bang bang original apk (no mod, no hack, no cheat)<br />
|
56 |
-
mobile legends bang bang update apk (latest version, new features, and events)<br />
|
57 |
-
mobile legends bang bang old version apk (download previous versions of the game)<br />
|
58 |
-
mobile legends bang bang lite apk (low size, fast performance, and smooth gameplay)<br />
|
59 |
-
mobile legends mod apk unlock all skin 2023<br />
|
60 |
-
mobile legends mod apk unlimited money and diamond 2023<br />
|
61 |
-
mobile legends hack apk download 2023<br />
|
62 |
-
mobile legends mod apk latest version 2023<br />
|
63 |
-
mobile legends cheat apk 2023<br />
|
64 |
-
mobile legends free skin apk download 2023<br />
|
65 |
-
mobile legends mod menu apk 2023<br />
|
66 |
-
mobile legends premium apk 2023<br />
|
67 |
-
mobile legends unlimited everything apk 2023<br />
|
68 |
-
mobile legends modded apk offline 2023<br />
|
69 |
-
mobile legends hack tool apk 2023<br />
|
70 |
-
mobile legends cracked apk 2023<br />
|
71 |
-
mobile legends vip mod apk 2023<br />
|
72 |
-
mobile legends pro apk 2023<br />
|
73 |
-
mobile legends 5v5 moba mod apk 2023<br />
|
74 |
-
mobile legends original apk 2023<br />
|
75 |
-
mobile legends update apk 2023<br />
|
76 |
-
mobile legends old version apk 2023<br />
|
77 |
-
mobile legends lite apk 2023</p> 197e85843d<br />
|
78 |
-
<br />
|
79 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Crazy Taxi Classic APK - The Ultimate Racing Game for Android.md
DELETED
@@ -1,112 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Crazy Taxi Game Free Download for Android APK</h1>
|
3 |
-
<p>Do you love driving fast and furious cars in a chaotic city? Do you want to experience the thrill of picking up and dropping off passengers in a limited time? Do you want to enjoy a classic arcade game on your Android device? If you answered yes to any of these questions, then you should try Crazy Taxi Game, one of the most popular and fun racing games ever made. In this article, we will tell you everything you need to know about Crazy Taxi Game, how to download it for free as an APK file, and how to play it on your Android device.</p>
|
4 |
-
<h2>crazy taxi game free download for android apk</h2><br /><p><b><b>Download File</b> ——— <a href="https://jinyurl.com/2uNTHI">https://jinyurl.com/2uNTHI</a></b></p><br /><br />
|
5 |
-
<h2>What is Crazy Taxi Game?</h2>
|
6 |
-
<h3>A brief introduction to the game and its features</h3>
|
7 |
-
<p>Crazy Taxi Game is a video game that was originally released by Sega in 1999 for arcade machines and later ported to various platforms, including Android. The game is set in a fictional city inspired by San Francisco, where you play as one of four taxi drivers who have to pick up and drop off customers as fast as possible. You can choose from three, five, or ten-minute gameplay modes, or play in the original arcade mode with unlimited time. You can also customize your car, driver, and music from a selection of rock songs by bands like The Offspring and Bad Religion.</p>
|
8 |
-
<h3>The history and popularity of the game</h3>
|
9 |
-
<p>Crazy Taxi Game was a huge hit when it was first released, thanks to its unique gameplay, colorful graphics, catchy soundtrack, and humorous voice acting. It received critical acclaim from reviewers and gamers alike, and won several awards, such as the Best Arcade Game of 1999 by IGN. It also spawned several sequels, spin-offs, and adaptations, such as Crazy Taxi 2, Crazy Taxi 3, Crazy Taxi City Rush, and even a live-action movie. The game has sold over five million copies worldwide, and has been downloaded over ten million times on Android alone.</p>
|
10 |
-
<h2>How to Download Crazy Taxi Game for Android APK?</h2>
|
11 |
-
<h3>The requirements and compatibility of the game</h3>
|
12 |
-
<p>To download and play Crazy Taxi Game on your Android device, you will need at least Android version 4.1 or higher, and about 250 MB of free storage space. The game is compatible with most Android devices, including smartphones and tablets. However, some older or low-end devices may experience performance issues or crashes. You can check the compatibility of your device on the Google Play Store page of the game.</p>
|
13 |
-
<h3>The steps to download and install the game from different sources</h3>
|
14 |
-
<p>There are two main ways to download Crazy Taxi Game for Android APK: from the official Google Play Store or from a third-party website. Here are the steps for each method:</p>
|
15 |
-
<ul>
|
16 |
-
<li>From the Google Play Store: <ol>
|
17 |
-
<li>Open the Google Play Store app on your device and search for "Crazy Taxi Classic".</li>
|
18 |
-
<li>Select the game from the list of results and tap on "Install".</li>
|
19 |
-
<li>Wait for the download and installation process to complete.</li>
|
20 |
-
<li>Launch the game from your app drawer or home screen.</li>
|
21 |
-
</ol></li> <li>From a third-party website: <ol>
|
22 |
-
<li>Open your web browser and search for "Crazy Taxi Game APK" on a search engine like Google or Bing.</li>
|
23 |
-
<li>Select a reputable and trustworthy website that offers the APK file of the game, such as APKPure or APKMirror.</li>
|
24 |
-
<li>Download the APK file to your device by tapping on the download button or link.</li>
|
25 |
-
<li>Before installing the APK file, you may need to enable the installation of apps from unknown sources on your device. To do this, go to Settings > Security > Unknown Sources and toggle it on.</li>
|
26 |
-
<li>Locate the APK file on your device using a file manager app and tap on it to install it.</li>
|
27 |
-
<li>Launch the game from your app drawer or home screen.</li>
|
28 |
-
</ol></li>
|
29 |
-
</ul>
|
30 |
-
<h3>The advantages and disadvantages of downloading the game as an APK file</h3>
|
31 |
-
<p>Downloading Crazy Taxi Game as an APK file has some pros and cons that you should be aware of before choosing this method. Here are some of them:</p>
|
32 |
-
<table>
|
33 |
-
<tr><th>Advantages</th><th>Disadvantages</th></tr>
|
34 |
-
<tr><td>You can download the game even if it is not available in your region or country.</td><td>You may not get the latest updates and features of the game.</td></tr>
|
35 |
-
<tr><td>You can download the game without using the Google Play Store or having a Google account.</td><td>You may expose your device to malware or viruses from untrusted sources.</td></tr>
|
36 |
-
<tr><td>You can download the game for free without any ads or in-app purchases.</td><td>You may violate the terms and conditions of the game developer or publisher.</td></tr>
|
37 |
-
</table>
|
38 |
-
<h2>How to Play Crazy Taxi Game on Android?</h2>
|
39 |
-
<h3>The gameplay and controls of the game</h3>
|
40 |
-
<p>Crazy Taxi Game is easy to play but hard to master. The gameplay is simple: you have to drive your taxi around the city and pick up customers who are waiting for you. You have to take them to their destinations as quickly as possible, while avoiding traffic, obstacles, and other hazards. You can earn extra money by performing stunts, such as jumps, drifts, and near misses. You can also earn tips by satisfying your customers' preferences, such as driving fast, slow, or crazy. The more money you make, the higher your score and rank will be.</p>
|
41 |
-
<p>The controls of the game are intuitive and responsive. You can use either touch or tilt controls to steer your taxi. You can also use buttons to accelerate, brake, reverse, and switch lanes. You can also use a horn button to honk at other vehicles or pedestrians. You can change the control settings from the options menu according to your preference.</p>
|
42 |
-
<h3>The modes and challenges of the game</h3>
|
43 |
-
<p>Crazy Taxi Game offers four different modes to choose from: Arcade, Original, Crazy Box, and Leaderboards. Here is a brief description of each mode:</p>
|
44 |
-
<ul>
|
45 |
-
<li>Arcade: This is the classic mode that mimics the original arcade game. You have to pick up and drop off customers in a limited time. You can extend your time by reaching checkpoints or earning bonuses. You can choose from three difficulty levels: Easy, Normal, or Hard.</li>
|
46 |
-
<li>Original: This is a similar mode to Arcade, but with a different map and layout. You have to pick up and drop off customers in a limited time. You can extend your time by reaching checkpoints or earning bonuses. You can choose from three difficulty levels: Easy, Normal, or Hard.</li>
|
47 |
-
<li>Crazy Box: This is a mode that consists of 16 mini-games that test your skills and abilities. You have to complete various tasks and challenges, such as bowling, golfing, popping balloons, jumping ramps, etc. You can unlock new mini-games by completing previous ones.</li>
|
48 |
-
<li>Leaderboards: This is a mode that allows you to compete with other players around the world. You can see your rank and score on global and local leaderboards. You can also compare your stats and achievements with other players.</li>
|
49 |
-
</ul> <h3>The tips and tricks to master the game</h3>
|
50 |
-
<p>Crazy Taxi Game is a game that requires skill, strategy, and luck. Here are some tips and tricks to help you master the game and become a crazy taxi driver:</p>
|
51 |
-
<p>Crazy Taxi Classic APK latest version for Android<br />
|
52 |
-
How to install Crazy Taxi Classic APK on Android phone<br />
|
53 |
-
Crazy Taxi Classic mobile game by SEGA<br />
|
54 |
-
Play Crazy Taxi Classic online for free<br />
|
55 |
-
Crazy Taxi Classic tips and tricks for Android<br />
|
56 |
-
Crazy Taxi Classic mod APK unlimited money<br />
|
57 |
-
Crazy Taxi Classic APK file size and requirements<br />
|
58 |
-
Crazy Taxi Classic reviews and ratings on Google Play<br />
|
59 |
-
Crazy Taxi Classic gameplay modes and features<br />
|
60 |
-
Crazy Taxi Classic controller support for Android<br />
|
61 |
-
Download Crazy Taxi Classic APK from APKCombo<br />
|
62 |
-
Crazy Taxi Classic APK download link for Android<br />
|
63 |
-
Crazy Taxi Classic update and patch notes for Android<br />
|
64 |
-
Crazy Taxi Classic cheats and hacks for Android<br />
|
65 |
-
Crazy Taxi Classic best drivers and cars for Android<br />
|
66 |
-
Crazy Taxi Classic APK old versions for Android<br />
|
67 |
-
Crazy Taxi Classic APK download for PC Windows 10<br />
|
68 |
-
Crazy Taxi Classic alternatives and similar games for Android<br />
|
69 |
-
Crazy Taxi Classic APK mirror and direct download for Android<br />
|
70 |
-
Crazy Taxi Classic APK pure and safe download for Android<br />
|
71 |
-
Crazy Taxi Classic offline mode and data usage for Android<br />
|
72 |
-
Crazy Taxi Classic achievements and leaderboards for Android<br />
|
73 |
-
Crazy Taxi Classic wallpapers and themes for Android<br />
|
74 |
-
Crazy Taxi Classic soundtracks and music for Android<br />
|
75 |
-
Crazy Taxi Classic bugs and issues for Android<br />
|
76 |
-
Crazy Taxi Classic FAQ and guide for Android<br />
|
77 |
-
Crazy Taxi Classic videos and screenshots for Android<br />
|
78 |
-
Crazy Taxi Classic fan art and memes for Android<br />
|
79 |
-
Crazy Taxi Classic forum and community for Android<br />
|
80 |
-
Crazy Taxi Classic news and events for Android</p>
|
81 |
-
<ul>
|
82 |
-
<li>Learn the map and the shortcuts. The city is full of hidden paths, shortcuts, and ramps that can help you save time and avoid traffic. Explore the map and memorize the locations of the customers and their destinations. Use the arrow indicator to guide you to the nearest customer or destination.</li>
|
83 |
-
<li>Choose your driver and car wisely. Each driver and car has different attributes, such as speed, acceleration, handling, and weight. Some drivers and cars are better suited for certain modes or challenges than others. Experiment with different combinations and find the one that suits your style and preference.</li>
|
84 |
-
<li>Drive crazy but not reckless. Driving crazy means driving fast, furious, and fun. Driving reckless means driving careless, dangerous, and dumb. You want to drive crazy to earn more money and bonuses, but not reckless to lose customers or crash your car. Balance your speed and safety, and avoid collisions and accidents.</li>
|
85 |
-
<li>Use your horn and your brakes. Your horn is a useful tool to alert other vehicles or pedestrians of your presence. You can use it to make them move out of your way or to scare them for fun. Your brakes are also important to control your car and avoid crashes. You can use them to slow down, stop, reverse, or drift.</li>
|
86 |
-
<li>Have fun and enjoy the game. Crazy Taxi Game is a game that is meant to be fun and enjoyable. Don't take it too seriously or get frustrated if you fail or lose. Just relax and have a good time with the game. You can also play with your friends or family and share your scores and achievements.</li>
|
87 |
-
</ul>
|
88 |
-
<h2>Conclusion</h2>
|
89 |
-
<h3>A summary of the main points and a call to action</h3>
|
90 |
-
<p>Crazy Taxi Game is a classic arcade game that you can download for free as an APK file on your Android device. The game lets you drive a taxi in a crazy city and pick up customers in a limited time. The game has four modes, Arcade, Original, Crazy Box, and Leaderboards, that offer different challenges and fun. The game also has amazing graphics, sound, and music that make it more enjoyable. If you are looking for a fun and exciting racing game on your Android device, you should definitely try Crazy Taxi Game today.</p>
|
91 |
-
<h2>FAQs</h2>
|
92 |
-
<h3>Q1. Is Crazy Taxi Game free to play on Android?</h3>
|
93 |
-
<p>A1. Yes, Crazy Taxi Game is free to play on Android devices. You can download it from the Google Play Store or from a third-party website as an APK file.</p>
|
94 |
-
<h3>Q2. Is Crazy Taxi Game safe to download as an APK file?</h3>
|
95 |
-
<p>A2. Yes, Crazy Taxi Game is safe to download as an APK file if you download it from a reputable and trustworthy website. However, you should always be careful when downloading apps from unknown sources and scan them for malware or viruses before installing them.</p>
|
96 |
-
<h3>Q3. How can I update Crazy Taxi Game on Android?</h3>
|
97 |
-
<p>A3. You can update Crazy Taxi Game on Android by following these steps:</p>
|
98 |
-
<ul>
|
99 |
-
<li>If you downloaded the game from the Google Play Store, you can check for updates from the app page or from the My Apps & Games section.</li>
|
100 |
-
<li>If you downloaded the game as an APK file, you can check for updates from the website where you downloaded it or from the app settings.</li>
|
101 |
-
</ul>
|
102 |
-
<h3>Q4. What are some alternatives to Crazy Taxi Game on Android?</h3>
|
103 |
-
<p>A4. Some alternatives to Crazy Taxi Game on Android are:</p>
|
104 |
-
<ul>
|
105 |
-
<li>Taxi Sim 2020: A realistic taxi simulator game that lets you drive various taxis in different cities around the world.</li>
|
106 |
-
<li>Taxi Run: A casual taxi runner game that lets you drive a taxi in an endless road full of obstacles and traffic.</li>
|
107 |
-
<li>Taxi Driver 3D: A 3D taxi driving game that lets you drive a taxi in a city with realistic physics and graphics.</li>
|
108 |
-
</ul>
|
109 |
-
<h3>Q5. How can I contact the developers of Crazy Taxi Game?</h3>
|
110 |
-
<p>A5. You can contact the developers of Crazy Taxi Game by sending them an email at [email protected] or by visiting their website at https://www.sega.com/.</p> 197e85843d<br />
|
111 |
-
<br />
|
112 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download and Install VLC on Your Windows RT 8.1 Tablet or PC.md
DELETED
@@ -1,170 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Download and Install VLC Media Player on Windows RT 8.1 Devices</h1>
|
3 |
-
<p>If you have a Windows RT 8.1 device, such as a Surface RT or Surface 2 tablet, you might be wondering how to play your favorite media files on it. Unfortunately, Windows RT 8.1 has some limitations that prevent you from running most desktop applications, including many popular media players. However, there is one media player that can run on Windows RT 8.1 devices and that is VLC Media Player.</p>
|
4 |
-
<p>VLC Media Player is a free and open source cross-platform multimedia player that can play most media files, as well as DVDs, audio CDs, VCDs, and various streaming protocols. It also has many features that make it a versatile and powerful tool for media playback and manipulation. In this article, we will show you how to download and install VLC Media Player on your Windows RT 8.1 device, and how to use it to play, convert, edit, and download media files.</p>
|
5 |
-
<h2>vlc windows rt 8.1 download</h2><br /><p><b><b>Download File</b> ✫ <a href="https://jinyurl.com/2uNRFM">https://jinyurl.com/2uNRFM</a></b></p><br /><br />
|
6 |
-
<h2>What is Windows RT 8.1 and What are its Limitations?</h2>
|
7 |
-
<p>Windows RT 8.1 is a version of Windows 8.1 that is optimized for thin and light devices that have extended battery life and are designed for life on the go. It runs on devices that use the ARM architecture, which is different from the x86 architecture that most desktop PCs use. Windows RT 8.1 only runs built-in apps or apps that you download from the Windows Store, and it automatically updates itself and protects itself from viruses and malware.</p>
|
8 |
-
<p>However, while Windows RT 8.1 inherits the appearance and functionality of Windows 8.1, it has some drawbacks that you should be aware of before buying a Windows RT 8.1 device or downloading VLC Media Player for it:</p>
|
9 |
-
<ul>
|
10 |
-
<li>It can only execute software that is digitally signed by Microsoft, which means you cannot run any desktop applications or programs that are not available in the Windows Store.</li>
|
11 |
-
<li>It lacks certain developer-oriented features, such as the command prompt, PowerShell, Group Policy Editor, Registry Editor, etc.</li>
|
12 |
-
<li>It does not support some peripheral devices, such as printers, scanners, webcams, etc., unless they have drivers that are compatible with Windows RT 8.1.</li>
|
13 |
-
<li>It does not support some file formats, such as MKV, FLAC, OGG, etc., unless you have an app that can play them.</li>
|
14 |
-
<li>It does not support some network protocols, such as VPN, FTP, SSH, etc., unless you have an app that can use them.</li>
|
15 |
-
</ul>
|
16 |
-
<p>If you want to learn more about Windows RT 8.1 and its limitations, you can check out this <a href="(^9^)">FAQ</a> from Microsoft or this <a href="(^8^)">article</a> from CNET.</p>
|
17 |
-
<h2>What is VLC Media Player and What are its Features?</h2>
|
18 |
-
<p>VLC Media Player is one of the most popular and widely used media players in the world. It was developed by VideoLAN, a non-profit organization that promotes free and open source software for multimedia. It was first released in 2001 and has since been updated regularly with new features and bug fixes.</p>
|
19 |
-
<p>VLC Media Player has many features that make it a great choice for media playback and manipulation on Windows RT 8.1 devices. Some of the features of VLC Media Player are:</p>
|
20 |
-
<ul>
|
21 |
-
<li>It can play almost any media file format, including MKV, MP4, AVI, WMV, MOV, FLV, MP3, AAC, WMA, OGG, FLAC, etc., without the need for additional codecs or plugins.</li>
|
22 |
-
<li>It can play DVDs, audio CDs, VCDs, and various streaming protocols, such as HTTP, RTSP, RTP, UDP, etc., as well as online radio stations and podcasts.</li>
|
23 |
-
<li>It can convert videos to any format, such as MP4, AVI, WMV, FLV, etc., with various options for video and audio quality, resolution, bitrate, frame rate, etc.</li>
|
24 |
-
<li>It can edit videos by trimming, cropping, rotating, adding filters and effects, adjusting brightness, contrast, saturation, hue, etc.</li>
|
25 |
-
<li>It can remove audio from any video or extract audio from any video and save it as a separate file.</li>
|
26 |
-
<li>It can add subtitles to any video and synchronize them with the audio and video tracks.</li>
|
27 |
-
<li>It can use VLC as a video downloader for YouTube and other websites by copying and pasting the URL of the video into VLC.</li>
|
28 |
-
<li>It can stream media files from your computer to other devices on your network or over the internet using VLC's built-in server.</li>
|
29 |
-
<li>It can customize its interface with various skins and themes or create your own using VLC Skin Editor.</li>
|
30 |
-
<li>It can control VLC remotely using VLC Remote or other apps that support VLC's web interface.</li>
|
31 |
-
</ul>
|
32 |
-
<p>If you want to learn more about VLC Media Player and its features, you can check out this <a href="">official website</a> or this <a href="">user guide</a>.</p>
|
33 |
-
<p>vlc media player for windows rt 8.1 free download<br />
|
34 |
-
how to install vlc on windows rt 8.1 tablet<br />
|
35 |
-
vlc for windows rt 8.1 surface 2<br />
|
36 |
-
download vlc for windows 8.1 rt from windows store<br />
|
37 |
-
vlc windows rt 8.1 app<br />
|
38 |
-
vlc player windows rt 8.1 download link<br />
|
39 |
-
vlc for windows rt 8.1 not working<br />
|
40 |
-
vlc for windows rt 8.1 review<br />
|
41 |
-
vlc for windows rt 8.1 update<br />
|
42 |
-
vlc for windows rt 8.1 offline installer<br />
|
43 |
-
vlc for windows rt 8.1 arm<br />
|
44 |
-
vlc for windows rt 8.1 x86<br />
|
45 |
-
vlc for windows rt 8.1 crack<br />
|
46 |
-
vlc for windows rt 8.1 full version<br />
|
47 |
-
vlc for windows rt 8.1 pro<br />
|
48 |
-
vlc for windows rt 8.1 latest version<br />
|
49 |
-
vlc for windows rt 8.1 beta<br />
|
50 |
-
vlc for windows rt 8.1 release date<br />
|
51 |
-
vlc for windows rt 8.1 features<br />
|
52 |
-
vlc for windows rt 8.1 requirements<br />
|
53 |
-
vlc for windows rt 8.1 tutorial<br />
|
54 |
-
vlc for windows rt 8.1 support<br />
|
55 |
-
vlc for windows rt 8.1 forum<br />
|
56 |
-
vlc for windows rt 8.1 reddit<br />
|
57 |
-
vlc for windows rt 8.1 youtube<br />
|
58 |
-
vlc for windows rt 8.1 videolan<br />
|
59 |
-
vlc for windows rt 8.1 microsoft store<br />
|
60 |
-
vlc for windows rt 8.1 alternative<br />
|
61 |
-
vlc for windows rt 8.1 vs kodi<br />
|
62 |
-
vlc for windows rt 8.1 vs mx player<br />
|
63 |
-
vlc for windows rt 8.1 vs potplayer<br />
|
64 |
-
vlc for windows rt 8.1 vs gom player<br />
|
65 |
-
vlc for windows rt 8.1 vs kmplayer<br />
|
66 |
-
vlc for windows rt 8.1 vs media player classic<br />
|
67 |
-
vlc for windows rt 8.1 vs smplayer<br />
|
68 |
-
vlc for windows rt 8.1 vs mpv player<br />
|
69 |
-
vlc for windows rt 8.1 vs mpc-hc player<br />
|
70 |
-
vlc for windows rt 8.1 vs bsplayer<br />
|
71 |
-
vlc for windows rt 8.1 vs zoom player<br />
|
72 |
-
vlc for windows rt 8.1 vs divx player<br />
|
73 |
-
best settings for vlc on windows rt 8.1 <br />
|
74 |
-
how to play mkv files on vlc on windows rt 8.1 <br />
|
75 |
-
how to stream videos from pc to vlc on windows rt 8.1 <br />
|
76 |
-
how to use subtitles on vlc on windows rt 8.1 <br />
|
77 |
-
how to adjust audio sync on vlc on windows rt 8.1 <br />
|
78 |
-
how to convert videos with vlc on windows rt 8.1 <br />
|
79 |
-
how to record screen with vlc on windows rt 8.1 <br />
|
80 |
-
how to rip dvd with vlc on windows rt 8.1 <br />
|
81 |
-
how to download youtube videos with vlc on windows rt 8.1</p>
|
82 |
-
<h2>How to Download and Install VLC Media Player on Windows RT 8.1 Devices</h2>
|
83 |
-
<p>There are two ways to download and install VLC Media Player on your Windows RT 8.1 device: from the Windows Store or from the official website. We will explain both methods below:</p>
|
84 |
-
<h3>How to Download VLC Media Player for Windows RT 8.1 from the Windows Store</h3>
|
85 |
-
<p>The easiest way to get VLC Media Player on your Windows RT 8.1 device is to download it from the Windows Store. Here are the steps to do so:</p>
|
86 |
-
<ol>
|
87 |
-
<li>Open the Windows Store app on your device and search for "VLC" in the search box.</li>
|
88 |
-
<li>Select the app named "VLC for Windows Store" from the search results and tap on it.</li>
|
89 |
-
<li>Tap on the "Install" button and wait for the app to download and install on your device.</li>
|
90 |
-
<li>Once the installation is complete, you can launch VLC Media Player from the Start screen or the Apps list.</li>
|
91 |
-
</ol>
|
92 |
-
<p>Note that this version of VLC Media Player is different from the desktop version that you can download from the official website. It has a different interface and some features may not be available or may work differently. However, it still supports most media file formats and has basic playback and conversion functions.</p>
|
93 |
-
<h3>How to Download VLC Media Player for Windows RT 8.1 from the Official Website</h3>
|
94 |
-
<p>If you want to get the desktop version of VLC Media Player on your Windows RT 8.1 device, you will need to download it from the official website and install it manually. However, this method requires some technical skills and involves some risks. You will need to enable a developer mode on your device and run a PowerShell script that will bypass the digital signature requirement of Windows RT 8.1. This may void your warranty or damage your device if done incorrectly. Therefore, we do not recommend this method unless you are confident in what you are doing and understand the consequences.</p>
|
95 |
-
<p>If you still want to proceed with this method, here are the steps to do so:</p>
|
96 |
-
<ol>
|
97 |
-
<li>Download the latest version of VLC Media Player for Windows RT 8.1 from this <a href="">link</a>. Make sure you choose the ARM version that matches your device's architecture.</li>
|
98 |
-
<li>Extract the downloaded ZIP file to a folder on your device or a USB drive.</li>
|
99 |
-
<li>Open the Settings app on your device and go to "Update & security" > "For developers".</li>
|
100 |
-
<li>Select "Developer mode" and confirm by tapping "Yes". This will enable you to run unsigned apps on your device.</li>
|
101 |
-
<li>Open File Explorer on your device and go to "C:\Windows\System32". Find the file named "WindowsPowerShell\v1.0\powershell.exe" and copy it to another folder (e g. "C:\Temp"). This will create a copy of the PowerShell executable that you can run without restrictions.</li>
|
102 |
-
<li>Open the folder where you copied the PowerShell executable and right-click on it. Select "Run as administrator". This will open a PowerShell window with elevated privileges.</li>
|
103 |
-
<li>In the PowerShell window, type the following command and press Enter: <code>Set-ExecutionPolicy Unrestricted</code>. This will allow you to run any script on your device.</li>
|
104 |
-
<li>Now, type the following command and press Enter: <code>cd "C:\Users\YourUserName\Downloads\VLC-RT-3.0.16"</code>. Replace "YourUserName" with your actual user name and "VLC-RT-3.0.16" with the name of the folder where you extracted the VLC Media Player ZIP file. This will change the directory to the folder where the VLC Media Player files are located.</li>
|
105 |
-
<li>Finally, type the following command and press Enter: <code>.\Add-AppDevPackage.ps1</code>. This will run a script that will install VLC Media Player on your device.</li>
|
106 |
-
<li>Follow the instructions on the screen and wait for the installation to complete. You may need to enter your Microsoft account credentials and accept some terms and conditions.</li>
|
107 |
-
<li>Once the installation is complete, you can close the PowerShell window and launch VLC Media Player from the Start screen or the Apps list.</li>
|
108 |
-
</ol>
|
109 |
-
<p>Note that this version of VLC Media Player is identical to the desktop version that you can download from the official website. It has the same interface and features as the desktop version, but it may not be as stable or compatible with Windows RT 8.1 devices. You may encounter some errors or crashes while using it, so use it at your own risk.</p>
|
110 |
-
<h2>How to Use VLC Media Player on Windows RT 8.1 Devices</h2>
|
111 |
-
<p>Now that you have downloaded and installed VLC Media Player on your Windows RT 8.1 device, you can use it to play, convert, edit, and download media files. Here are some tips on how to use VLC Media Player on Windows RT 8.1 devices:</p>
|
112 |
-
<h3>How to Play Various Media Files with VLC Media Player</h3>
|
113 |
-
<p>VLC Media Player can play almost any media file format that you throw at it, without the need for additional codecs or plugins. Here are some ways to play various media files with VLC Media Player:</p>
|
114 |
-
<ul>
|
115 |
-
<li>To play a media file from your device, open VLC Media Player and tap on the "Browse" button on the main screen. Navigate to the folder where your media file is located and tap on it to play it.</li>
|
116 |
-
<li>To play a media file from a USB drive or an external hard drive, connect it to your device and open VLC Media Player. Tap on the "Browse" button on the main screen and select "This PC" from the sidebar. Find your USB drive or external hard drive under "Devices and drives" and tap on it to open it. Navigate to the folder where your media file is located and tap on it to play it.</li>
|
117 |
-
<li>To play a media file from a network location, such as a shared folder or a NAS server, open VLC Media Player and tap on the "Browse" button on the main screen. Select "Network" from the sidebar and tap on the "+" button at the bottom right corner of the screen. Enter the URL or IP address of your network location and tap on "OK". Navigate to the folder where your media file is located and tap on it to play it.</li>
|
118 |
-
<li>To play a media file from a streaming source, such as a website or a online radio station, open VLC Media Player and tap on the "Stream" button on the main screen. Enter the URL of your streaming source and tap on "OK". VLC Media Player will start playing the stream.</li>
|
119 |
-
<li>To play a DVD, audio CD, or VCD, insert it into your device's optical drive and open VLC Media Player. Tap on the "Disc" button on the main screen and select the type of disc you want to play. Tap on "Play" and VLC Media Player will start playing the disc.</li>
|
120 |
-
</ul>
|
121 |
-
<h4>How to Adjust Video and Audio Settings with VLC Media Player</h4>
|
122 |
-
<p>VLC Media Player allows you to adjust various video and audio settings to enhance your media playback experience. Here are some ways to adjust video and audio settings with VLC Media Player:</p>
|
123 |
-
<ul>
|
124 |
-
<li>To adjust the brightness, contrast, saturation, hue, and gamma of the video, tap on the "Video" button on the playback screen and select "Adjustments and Effects". Tap on the "Video Effects" tab and use the sliders to adjust the settings as you like.</li>
|
125 |
-
<li>To adjust the volume, balance, equalizer, compressor, and spatializer of the audio, tap on the "Audio" button on the playback screen and select "Adjustments and Effects". Tap on the "Audio Effects" tab and use the sliders and buttons to adjust the settings as you like.</li>
|
126 |
-
<li>To change the aspect ratio, crop ratio, zoom level, or orientation of the video, tap on the "Video" button on the playback screen and select "Crop". Use the buttons to select the option you want.</li>
|
127 |
-
<li>To change the audio track, subtitle track, or playback speed of the media file, tap on the "Tools" button on the playback screen and select the option you want.</li>
|
128 |
-
</ul>
|
129 |
-
<h4>How to Add Subtitles and Synchronize Them with VLC Media Player</h4>
|
130 |
-
<p>VLC Media Player can display subtitles for any video file that has a separate subtitle file in SRT, SSA, ASS, or VTT format. You can also synchronize the subtitles with the audio and video tracks if they are out of sync. Here are some ways to add subtitles and synchronize them with VLC Media Player:</p>
|
131 |
-
<ul>
|
132 |
-
<li>To add subtitles to a video file, make sure that the subtitle file has the same name as the video file and is in the same folder as the video file. For example, if your video file is named "movie.mp4", your subtitle file should be named "movie.srt". Then, open VLC Media Player and play the video file. The subtitles should appear automatically.</li>
|
133 |
-
<li>To synchronize subtitles with a video file, tap on the "Tools" button on the playback screen and select "Track Synchronization". Tap on the "Subtitles/Video" tab and use the buttons to adjust the subtitle delay. You can also use the keyboard shortcuts "G" and "H" to decrease or increase the subtitle delay by 50 milliseconds.</li>
|
134 |
-
<li>To change the font, size, color, or position of the subtitles, tap on the "Video" button on the playback screen and select "Subtitles". Use the buttons to select the option you want.</li>
|
135 |
-
</ul>
|
136 |
-
<h3>How to Convert Videos to Any Format with VLC Media Player</h3>
|
137 |
-
<p>VLC Media Player can also convert videos to any format that you want, such as MP4, AVI, WMV, FLV, etc. You can also choose from various presets for different devices, such as iPhone, iPad, Android, etc. Here are some ways to convert videos to any format with VLC Media Player:</p>
|
138 |
-
<ul>
|
139 |
-
<li>To convert a video file from your device, open VLC Media Player and tap on the "Browse" button on the main screen. Navigate to the folder where your video file is located and tap on it. Then, tap on the "Convert" button at the bottom right corner of the screen.</li>
|
140 |
-
<li>To convert a video file from a USB drive or an external hard drive, connect it to your device and open VLC Media Player. Tap on the "Browse" button on the main screen and select "This PC" from the sidebar. Find your USB drive or external hard drive under "Devices and drives" and tap on it to open it. Navigate to the folder where your video file is located and tap on it. Then, tap on the "Convert" button at the bottom right corner of the screen.</li>
|
141 |
-
<li>To convert a video file from a network location, such as a shared folder or a NAS server, open VLC Media Player and tap on the "Browse" button on the main screen. Select "Network" from the sidebar and tap on the "+" button at the bottom right corner of the screen. Enter the URL or IP address of your network location and tap on "OK". Navigate to the folder where your video file is located and tap on it. Then, tap on the "Convert" button at the bottom right corner of the screen.</li>
|
142 |
-
<li>To convert a video file from a streaming source, such as a website or a online radio station, open VLC Media Player and tap on the "Stream" button on the main screen. Enter the URL of your streaming source and tap on "OK". Then, tap on the "Convert" button at the bottom right corner of the screen.</li>
|
143 |
-
</ul>
|
144 |
-
<p>After tapping on the "Convert" button, you will see a screen where you can choose the output format, destination, and options for your converted video file. Here are some tips on how to choose the output format, destination, and options for your converted video file:</p>
|
145 |
-
<ul>
|
146 |
-
<li>To choose the output format, tap on the "Profile" drop-down menu and select the format that you want. You can also tap on the "Edit" button next to the menu to customize the video and audio codecs, bitrate, resolution, frame rate, etc.</li>
|
147 |
-
<li>To choose the destination, tap on the "Browse" button and navigate to the folder where you want to save your converted video file. You can also enter a name for your converted video file in the "File name" box.</li>
|
148 |
-
<li>To choose the options, tap on the "Options" button and select the options that you want. You can choose to start or stop the conversion at a specific time, add subtitles or metadata to your converted video file, or deinterlace or scale your converted video file.</li>
|
149 |
-
</ul>
|
150 |
-
<p>Once you have chosen the output format, destination, and options for your converted video file, tap on the "Start" button and wait for VLC Media Player to convert your video file. You can see the progress of the conversion on the playback screen. You can also pause or cancel the conversion at any time by tapping on the "Pause" or "Stop" button.</p>
|
151 |
-
<p>Once the conversion is complete, you can find your converted video file in the destination folder that you chose. You can also play it with VLC Media Player or any other media player that supports the output format.</p>
|
152 |
-
<h2>Conclusion</h2>
|
153 |
-
<p>VLC Media Player is a powerful and versatile media player that can run on Windows RT 8.1 devices and play, convert, edit, and download media files. It can overcome some of the limitations of Windows RT 8.1 and enhance your media playback experience. However, it may not be as stable or compatible with Windows RT 8.1 devices as the Windows Store version of VLC Media Player. Therefore, you should use it with caution and at your own risk.</p>
|
154 |
-
<p>We hope that this article has helped you learn how to download and install VLC Media Player on your Windows RT 8.1 device, and how to use it to play, convert, edit, and download media files. If you have any questions or feedback, please feel free to leave a comment below.</p>
|
155 |
-
<h3>FAQs</h3>
|
156 |
-
<p>Here are some frequently asked questions about VLC Media Player and Windows RT 8.1:</p>
|
157 |
-
<ul>
|
158 |
-
<li><b>Q: Is VLC Media Player safe to use on Windows RT 8.1 devices?</b></li>
|
159 |
-
<li>A: VLC Media Player is safe to use on Windows RT 8.1 devices if you download it from the Windows Store or from the official website of VideoLAN. However, if you download it from the official website, you will need to enable a developer mode on your device and run a PowerShell script that will bypass the digital signature requirement of Windows RT 8.1. This may void your warranty or damage your device if done incorrectly. Therefore, we do not recommend this method unless you are confident in what you are doing and understand the consequences.</li>
|
160 |
-
<li><b>Q: How can I update VLC Media Player on Windows RT 8.1 devices?</b></li>
|
161 |
-
<li>A: If you download VLC Media Player from the Windows Store, you can update it automatically or manually through the Windows Store app. If you download VLC Media Player from the official website, you will need to download the latest version of VLC Media Player for Windows RT 8.1 from the same link and install it manually using the same method as before.</li>
|
162 |
-
<li><b>Q: How can I uninstall VLC Media Player on Windows RT 8.1 devices?</b></li>
|
163 |
-
<li>A: If you download VLC Media Player from the Windows Store, you can uninstall it by right-clicking on its tile on the Start screen or the Apps list and selecting "Uninstall". If you download VLC Media Player from the official website, you can uninstall it by opening File Explorer and deleting the folder where you extracted the VLC Media Player ZIP file.</li>
|
164 |
-
<li><b>Q: How can I get help or support for VLC Media Player on Windows RT 8.1 devices?</b></li>
|
165 |
-
<li>A: If you need help or support for VLC Media Player on Windows RT 8.1 devices, you can visit the <a href="">official forum</a> or the <a href="">official wiki</a> of VideoLAN. You can also contact them via <a href="">email</a> or <a href="">social media</a>.</li>
|
166 |
-
<li><b>Q: How can I donate or contribute to VLC Media Player and VideoLAN?</b></li>
|
167 |
-
<li>A: If you like VLC Media Player and want to support its development and maintenance, you can donate or contribute to VideoLAN in various ways. You can donate money via <a href="">PayPal</a>, <a href="">credit card</a>, <a href="">bank transfer</a>, or <a href="">cryptocurrency</a>. You can also donate hardware, software, or services via <a href="">this form</a>. You can also contribute code, documentation, translation, design, testing, or feedback via <a href="">this page</a>.</li>
|
168 |
-
</ul></p> 197e85843d<br />
|
169 |
-
<br />
|
170 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/Make_An_Audio_inpaint/vocoder/bigvgan/models.py
DELETED
@@ -1,414 +0,0 @@
|
|
1 |
-
# Copyright (c) 2022 NVIDIA CORPORATION.
|
2 |
-
# Licensed under the MIT license.
|
3 |
-
|
4 |
-
# Adapted from https://github.com/jik876/hifi-gan under the MIT license.
|
5 |
-
# LICENSE is in incl_licenses directory.
|
6 |
-
|
7 |
-
|
8 |
-
import torch
|
9 |
-
import torch.nn.functional as F
|
10 |
-
import torch.nn as nn
|
11 |
-
from torch.nn import Conv1d, ConvTranspose1d, Conv2d
|
12 |
-
from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
|
13 |
-
import numpy as np
|
14 |
-
from .activations import Snake,SnakeBeta
|
15 |
-
from .alias_free_torch import *
|
16 |
-
import os
|
17 |
-
from omegaconf import OmegaConf
|
18 |
-
|
19 |
-
LRELU_SLOPE = 0.1
|
20 |
-
|
21 |
-
def init_weights(m, mean=0.0, std=0.01):
|
22 |
-
classname = m.__class__.__name__
|
23 |
-
if classname.find("Conv") != -1:
|
24 |
-
m.weight.data.normal_(mean, std)
|
25 |
-
|
26 |
-
|
27 |
-
def get_padding(kernel_size, dilation=1):
|
28 |
-
return int((kernel_size*dilation - dilation)/2)
|
29 |
-
|
30 |
-
class AMPBlock1(torch.nn.Module):
|
31 |
-
def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5), activation=None):
|
32 |
-
super(AMPBlock1, self).__init__()
|
33 |
-
self.h = h
|
34 |
-
|
35 |
-
self.convs1 = nn.ModuleList([
|
36 |
-
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
|
37 |
-
padding=get_padding(kernel_size, dilation[0]))),
|
38 |
-
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
|
39 |
-
padding=get_padding(kernel_size, dilation[1]))),
|
40 |
-
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
|
41 |
-
padding=get_padding(kernel_size, dilation[2])))
|
42 |
-
])
|
43 |
-
self.convs1.apply(init_weights)
|
44 |
-
|
45 |
-
self.convs2 = nn.ModuleList([
|
46 |
-
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
|
47 |
-
padding=get_padding(kernel_size, 1))),
|
48 |
-
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
|
49 |
-
padding=get_padding(kernel_size, 1))),
|
50 |
-
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
|
51 |
-
padding=get_padding(kernel_size, 1)))
|
52 |
-
])
|
53 |
-
self.convs2.apply(init_weights)
|
54 |
-
|
55 |
-
self.num_layers = len(self.convs1) + len(self.convs2) # total number of conv layers
|
56 |
-
|
57 |
-
if activation == 'snake': # periodic nonlinearity with snake function and anti-aliasing
|
58 |
-
self.activations = nn.ModuleList([
|
59 |
-
Activation1d(
|
60 |
-
activation=Snake(channels, alpha_logscale=h.snake_logscale))
|
61 |
-
for _ in range(self.num_layers)
|
62 |
-
])
|
63 |
-
elif activation == 'snakebeta': # periodic nonlinearity with snakebeta function and anti-aliasing
|
64 |
-
self.activations = nn.ModuleList([
|
65 |
-
Activation1d(
|
66 |
-
activation=SnakeBeta(channels, alpha_logscale=h.snake_logscale))
|
67 |
-
for _ in range(self.num_layers)
|
68 |
-
])
|
69 |
-
else:
|
70 |
-
raise NotImplementedError("activation incorrectly specified. check the config file and look for 'activation'.")
|
71 |
-
|
72 |
-
def forward(self, x):
|
73 |
-
acts1, acts2 = self.activations[::2], self.activations[1::2]
|
74 |
-
for c1, c2, a1, a2 in zip(self.convs1, self.convs2, acts1, acts2):
|
75 |
-
xt = a1(x)
|
76 |
-
xt = c1(xt)
|
77 |
-
xt = a2(xt)
|
78 |
-
xt = c2(xt)
|
79 |
-
x = xt + x
|
80 |
-
|
81 |
-
return x
|
82 |
-
|
83 |
-
def remove_weight_norm(self):
|
84 |
-
for l in self.convs1:
|
85 |
-
remove_weight_norm(l)
|
86 |
-
for l in self.convs2:
|
87 |
-
remove_weight_norm(l)
|
88 |
-
|
89 |
-
|
90 |
-
class AMPBlock2(torch.nn.Module):
|
91 |
-
def __init__(self, h, channels, kernel_size=3, dilation=(1, 3), activation=None):
|
92 |
-
super(AMPBlock2, self).__init__()
|
93 |
-
self.h = h
|
94 |
-
|
95 |
-
self.convs = nn.ModuleList([
|
96 |
-
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
|
97 |
-
padding=get_padding(kernel_size, dilation[0]))),
|
98 |
-
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
|
99 |
-
padding=get_padding(kernel_size, dilation[1])))
|
100 |
-
])
|
101 |
-
self.convs.apply(init_weights)
|
102 |
-
|
103 |
-
self.num_layers = len(self.convs) # total number of conv layers
|
104 |
-
|
105 |
-
if activation == 'snake': # periodic nonlinearity with snake function and anti-aliasing
|
106 |
-
self.activations = nn.ModuleList([
|
107 |
-
Activation1d(
|
108 |
-
activation=Snake(channels, alpha_logscale=h.snake_logscale))
|
109 |
-
for _ in range(self.num_layers)
|
110 |
-
])
|
111 |
-
elif activation == 'snakebeta': # periodic nonlinearity with snakebeta function and anti-aliasing
|
112 |
-
self.activations = nn.ModuleList([
|
113 |
-
Activation1d(
|
114 |
-
activation=SnakeBeta(channels, alpha_logscale=h.snake_logscale))
|
115 |
-
for _ in range(self.num_layers)
|
116 |
-
])
|
117 |
-
else:
|
118 |
-
raise NotImplementedError("activation incorrectly specified. check the config file and look for 'activation'.")
|
119 |
-
|
120 |
-
def forward(self, x):
|
121 |
-
for c, a in zip (self.convs, self.activations):
|
122 |
-
xt = a(x)
|
123 |
-
xt = c(xt)
|
124 |
-
x = xt + x
|
125 |
-
|
126 |
-
return x
|
127 |
-
|
128 |
-
def remove_weight_norm(self):
|
129 |
-
for l in self.convs:
|
130 |
-
remove_weight_norm(l)
|
131 |
-
|
132 |
-
|
133 |
-
class BigVGAN(torch.nn.Module):
|
134 |
-
# this is our main BigVGAN model. Applies anti-aliased periodic activation for resblocks.
|
135 |
-
def __init__(self, h):
|
136 |
-
super(BigVGAN, self).__init__()
|
137 |
-
self.h = h
|
138 |
-
|
139 |
-
self.num_kernels = len(h.resblock_kernel_sizes)
|
140 |
-
self.num_upsamples = len(h.upsample_rates)
|
141 |
-
|
142 |
-
# pre conv
|
143 |
-
self.conv_pre = weight_norm(Conv1d(h.num_mels, h.upsample_initial_channel, 7, 1, padding=3))
|
144 |
-
|
145 |
-
# define which AMPBlock to use. BigVGAN uses AMPBlock1 as default
|
146 |
-
resblock = AMPBlock1 if h.resblock == '1' else AMPBlock2
|
147 |
-
|
148 |
-
# transposed conv-based upsamplers. does not apply anti-aliasing
|
149 |
-
self.ups = nn.ModuleList()
|
150 |
-
for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)):
|
151 |
-
self.ups.append(nn.ModuleList([
|
152 |
-
weight_norm(ConvTranspose1d(h.upsample_initial_channel // (2 ** i),
|
153 |
-
h.upsample_initial_channel // (2 ** (i + 1)),
|
154 |
-
k, u, padding=(k - u) // 2))
|
155 |
-
]))
|
156 |
-
|
157 |
-
# residual blocks using anti-aliased multi-periodicity composition modules (AMP)
|
158 |
-
self.resblocks = nn.ModuleList()
|
159 |
-
for i in range(len(self.ups)):
|
160 |
-
ch = h.upsample_initial_channel // (2 ** (i + 1))
|
161 |
-
for j, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)):
|
162 |
-
self.resblocks.append(resblock(h, ch, k, d, activation=h.activation))
|
163 |
-
|
164 |
-
# post conv
|
165 |
-
if h.activation == "snake": # periodic nonlinearity with snake function and anti-aliasing
|
166 |
-
activation_post = Snake(ch, alpha_logscale=h.snake_logscale)
|
167 |
-
self.activation_post = Activation1d(activation=activation_post)
|
168 |
-
elif h.activation == "snakebeta": # periodic nonlinearity with snakebeta function and anti-aliasing
|
169 |
-
activation_post = SnakeBeta(ch, alpha_logscale=h.snake_logscale)
|
170 |
-
self.activation_post = Activation1d(activation=activation_post)
|
171 |
-
else:
|
172 |
-
raise NotImplementedError("activation incorrectly specified. check the config file and look for 'activation'.")
|
173 |
-
|
174 |
-
self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3))
|
175 |
-
|
176 |
-
# weight initialization
|
177 |
-
for i in range(len(self.ups)):
|
178 |
-
self.ups[i].apply(init_weights)
|
179 |
-
self.conv_post.apply(init_weights)
|
180 |
-
|
181 |
-
def forward(self, x):
|
182 |
-
# pre conv
|
183 |
-
x = self.conv_pre(x)
|
184 |
-
|
185 |
-
for i in range(self.num_upsamples):
|
186 |
-
# upsampling
|
187 |
-
for i_up in range(len(self.ups[i])):
|
188 |
-
x = self.ups[i][i_up](x)
|
189 |
-
# AMP blocks
|
190 |
-
xs = None
|
191 |
-
for j in range(self.num_kernels):
|
192 |
-
if xs is None:
|
193 |
-
xs = self.resblocks[i * self.num_kernels + j](x)
|
194 |
-
else:
|
195 |
-
xs += self.resblocks[i * self.num_kernels + j](x)
|
196 |
-
x = xs / self.num_kernels
|
197 |
-
|
198 |
-
# post conv
|
199 |
-
x = self.activation_post(x)
|
200 |
-
x = self.conv_post(x)
|
201 |
-
x = torch.tanh(x)
|
202 |
-
|
203 |
-
return x
|
204 |
-
|
205 |
-
def remove_weight_norm(self):
|
206 |
-
print('Removing weight norm...')
|
207 |
-
for l in self.ups:
|
208 |
-
for l_i in l:
|
209 |
-
remove_weight_norm(l_i)
|
210 |
-
for l in self.resblocks:
|
211 |
-
l.remove_weight_norm()
|
212 |
-
remove_weight_norm(self.conv_pre)
|
213 |
-
remove_weight_norm(self.conv_post)
|
214 |
-
|
215 |
-
|
216 |
-
class DiscriminatorP(torch.nn.Module):
|
217 |
-
def __init__(self, h, period, kernel_size=5, stride=3, use_spectral_norm=False):
|
218 |
-
super(DiscriminatorP, self).__init__()
|
219 |
-
self.period = period
|
220 |
-
self.d_mult = h.discriminator_channel_mult
|
221 |
-
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
222 |
-
self.convs = nn.ModuleList([
|
223 |
-
norm_f(Conv2d(1, int(32*self.d_mult), (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
|
224 |
-
norm_f(Conv2d(int(32*self.d_mult), int(128*self.d_mult), (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
|
225 |
-
norm_f(Conv2d(int(128*self.d_mult), int(512*self.d_mult), (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
|
226 |
-
norm_f(Conv2d(int(512*self.d_mult), int(1024*self.d_mult), (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
|
227 |
-
norm_f(Conv2d(int(1024*self.d_mult), int(1024*self.d_mult), (kernel_size, 1), 1, padding=(2, 0))),
|
228 |
-
])
|
229 |
-
self.conv_post = norm_f(Conv2d(int(1024*self.d_mult), 1, (3, 1), 1, padding=(1, 0)))
|
230 |
-
|
231 |
-
def forward(self, x):
|
232 |
-
fmap = []
|
233 |
-
|
234 |
-
# 1d to 2d
|
235 |
-
b, c, t = x.shape
|
236 |
-
if t % self.period != 0: # pad first
|
237 |
-
n_pad = self.period - (t % self.period)
|
238 |
-
x = F.pad(x, (0, n_pad), "reflect")
|
239 |
-
t = t + n_pad
|
240 |
-
x = x.view(b, c, t // self.period, self.period)
|
241 |
-
|
242 |
-
for l in self.convs:
|
243 |
-
x = l(x)
|
244 |
-
x = F.leaky_relu(x, LRELU_SLOPE)
|
245 |
-
fmap.append(x)
|
246 |
-
x = self.conv_post(x)
|
247 |
-
fmap.append(x)
|
248 |
-
x = torch.flatten(x, 1, -1)
|
249 |
-
|
250 |
-
return x, fmap
|
251 |
-
|
252 |
-
|
253 |
-
class MultiPeriodDiscriminator(torch.nn.Module):
|
254 |
-
def __init__(self, h):
|
255 |
-
super(MultiPeriodDiscriminator, self).__init__()
|
256 |
-
self.mpd_reshapes = h.mpd_reshapes
|
257 |
-
print("mpd_reshapes: {}".format(self.mpd_reshapes))
|
258 |
-
discriminators = [DiscriminatorP(h, rs, use_spectral_norm=h.use_spectral_norm) for rs in self.mpd_reshapes]
|
259 |
-
self.discriminators = nn.ModuleList(discriminators)
|
260 |
-
|
261 |
-
def forward(self, y, y_hat):
|
262 |
-
y_d_rs = []
|
263 |
-
y_d_gs = []
|
264 |
-
fmap_rs = []
|
265 |
-
fmap_gs = []
|
266 |
-
for i, d in enumerate(self.discriminators):
|
267 |
-
y_d_r, fmap_r = d(y)
|
268 |
-
y_d_g, fmap_g = d(y_hat)
|
269 |
-
y_d_rs.append(y_d_r)
|
270 |
-
fmap_rs.append(fmap_r)
|
271 |
-
y_d_gs.append(y_d_g)
|
272 |
-
fmap_gs.append(fmap_g)
|
273 |
-
|
274 |
-
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
|
275 |
-
|
276 |
-
|
277 |
-
class DiscriminatorR(nn.Module):
|
278 |
-
def __init__(self, cfg, resolution):
|
279 |
-
super().__init__()
|
280 |
-
|
281 |
-
self.resolution = resolution
|
282 |
-
assert len(self.resolution) == 3, \
|
283 |
-
"MRD layer requires list with len=3, got {}".format(self.resolution)
|
284 |
-
self.lrelu_slope = LRELU_SLOPE
|
285 |
-
|
286 |
-
norm_f = weight_norm if cfg.use_spectral_norm == False else spectral_norm
|
287 |
-
if hasattr(cfg, "mrd_use_spectral_norm"):
|
288 |
-
print("INFO: overriding MRD use_spectral_norm as {}".format(cfg.mrd_use_spectral_norm))
|
289 |
-
norm_f = weight_norm if cfg.mrd_use_spectral_norm == False else spectral_norm
|
290 |
-
self.d_mult = cfg.discriminator_channel_mult
|
291 |
-
if hasattr(cfg, "mrd_channel_mult"):
|
292 |
-
print("INFO: overriding mrd channel multiplier as {}".format(cfg.mrd_channel_mult))
|
293 |
-
self.d_mult = cfg.mrd_channel_mult
|
294 |
-
|
295 |
-
self.convs = nn.ModuleList([
|
296 |
-
norm_f(nn.Conv2d(1, int(32*self.d_mult), (3, 9), padding=(1, 4))),
|
297 |
-
norm_f(nn.Conv2d(int(32*self.d_mult), int(32*self.d_mult), (3, 9), stride=(1, 2), padding=(1, 4))),
|
298 |
-
norm_f(nn.Conv2d(int(32*self.d_mult), int(32*self.d_mult), (3, 9), stride=(1, 2), padding=(1, 4))),
|
299 |
-
norm_f(nn.Conv2d(int(32*self.d_mult), int(32*self.d_mult), (3, 9), stride=(1, 2), padding=(1, 4))),
|
300 |
-
norm_f(nn.Conv2d(int(32*self.d_mult), int(32*self.d_mult), (3, 3), padding=(1, 1))),
|
301 |
-
])
|
302 |
-
self.conv_post = norm_f(nn.Conv2d(int(32 * self.d_mult), 1, (3, 3), padding=(1, 1)))
|
303 |
-
|
304 |
-
def forward(self, x):
|
305 |
-
fmap = []
|
306 |
-
|
307 |
-
x = self.spectrogram(x)
|
308 |
-
x = x.unsqueeze(1)
|
309 |
-
for l in self.convs:
|
310 |
-
x = l(x)
|
311 |
-
x = F.leaky_relu(x, self.lrelu_slope)
|
312 |
-
fmap.append(x)
|
313 |
-
x = self.conv_post(x)
|
314 |
-
fmap.append(x)
|
315 |
-
x = torch.flatten(x, 1, -1)
|
316 |
-
|
317 |
-
return x, fmap
|
318 |
-
|
319 |
-
def spectrogram(self, x):
|
320 |
-
n_fft, hop_length, win_length = self.resolution
|
321 |
-
x = F.pad(x, (int((n_fft - hop_length) / 2), int((n_fft - hop_length) / 2)), mode='reflect')
|
322 |
-
x = x.squeeze(1)
|
323 |
-
x = torch.stft(x, n_fft=n_fft, hop_length=hop_length, win_length=win_length, center=False, return_complex=True)
|
324 |
-
x = torch.view_as_real(x) # [B, F, TT, 2]
|
325 |
-
mag = torch.norm(x, p=2, dim =-1) #[B, F, TT]
|
326 |
-
|
327 |
-
return mag
|
328 |
-
|
329 |
-
|
330 |
-
class MultiResolutionDiscriminator(nn.Module):
|
331 |
-
def __init__(self, cfg, debug=False):
|
332 |
-
super().__init__()
|
333 |
-
self.resolutions = cfg.resolutions
|
334 |
-
assert len(self.resolutions) == 3,\
|
335 |
-
"MRD requires list of list with len=3, each element having a list with len=3. got {}".\
|
336 |
-
format(self.resolutions)
|
337 |
-
self.discriminators = nn.ModuleList(
|
338 |
-
[DiscriminatorR(cfg, resolution) for resolution in self.resolutions]
|
339 |
-
)
|
340 |
-
|
341 |
-
def forward(self, y, y_hat):
|
342 |
-
y_d_rs = []
|
343 |
-
y_d_gs = []
|
344 |
-
fmap_rs = []
|
345 |
-
fmap_gs = []
|
346 |
-
|
347 |
-
for i, d in enumerate(self.discriminators):
|
348 |
-
y_d_r, fmap_r = d(x=y)
|
349 |
-
y_d_g, fmap_g = d(x=y_hat)
|
350 |
-
y_d_rs.append(y_d_r)
|
351 |
-
fmap_rs.append(fmap_r)
|
352 |
-
y_d_gs.append(y_d_g)
|
353 |
-
fmap_gs.append(fmap_g)
|
354 |
-
|
355 |
-
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
|
356 |
-
|
357 |
-
|
358 |
-
def feature_loss(fmap_r, fmap_g):
|
359 |
-
loss = 0
|
360 |
-
for dr, dg in zip(fmap_r, fmap_g):
|
361 |
-
for rl, gl in zip(dr, dg):
|
362 |
-
loss += torch.mean(torch.abs(rl - gl))
|
363 |
-
|
364 |
-
return loss*2
|
365 |
-
|
366 |
-
|
367 |
-
def discriminator_loss(disc_real_outputs, disc_generated_outputs):
|
368 |
-
loss = 0
|
369 |
-
r_losses = []
|
370 |
-
g_losses = []
|
371 |
-
for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
|
372 |
-
r_loss = torch.mean((1-dr)**2)
|
373 |
-
g_loss = torch.mean(dg**2)
|
374 |
-
loss += (r_loss + g_loss)
|
375 |
-
r_losses.append(r_loss.item())
|
376 |
-
g_losses.append(g_loss.item())
|
377 |
-
|
378 |
-
return loss, r_losses, g_losses
|
379 |
-
|
380 |
-
|
381 |
-
def generator_loss(disc_outputs):
|
382 |
-
loss = 0
|
383 |
-
gen_losses = []
|
384 |
-
for dg in disc_outputs:
|
385 |
-
l = torch.mean((1-dg)**2)
|
386 |
-
gen_losses.append(l)
|
387 |
-
loss += l
|
388 |
-
|
389 |
-
return loss, gen_losses
|
390 |
-
|
391 |
-
|
392 |
-
|
393 |
-
class VocoderBigVGAN(object):
|
394 |
-
def __init__(self, ckpt_vocoder,device='cuda'):
|
395 |
-
vocoder_sd = torch.load(os.path.join(ckpt_vocoder,'best_netG.pt'), map_location='cpu')
|
396 |
-
|
397 |
-
vocoder_args = OmegaConf.load(os.path.join(ckpt_vocoder,'args.yml'))
|
398 |
-
|
399 |
-
self.generator = BigVGAN(vocoder_args)
|
400 |
-
self.generator.load_state_dict(vocoder_sd['generator'])
|
401 |
-
self.generator.eval()
|
402 |
-
|
403 |
-
self.device = device
|
404 |
-
self.generator.to(self.device)
|
405 |
-
|
406 |
-
def vocode(self, spec):
|
407 |
-
with torch.no_grad():
|
408 |
-
if isinstance(spec,np.ndarray):
|
409 |
-
spec = torch.from_numpy(spec).unsqueeze(0)
|
410 |
-
spec = spec.to(dtype=torch.float32,device=self.device)
|
411 |
-
return self.generator(spec).squeeze().cpu().numpy()
|
412 |
-
|
413 |
-
def __call__(self, wav):
|
414 |
-
return self.vocode(wav)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Abhilashvj/planogram-compliance/utils/loggers/wandb/sweep.py
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
import sys
|
2 |
-
from pathlib import Path
|
3 |
-
|
4 |
-
import wandb
|
5 |
-
|
6 |
-
FILE = Path(__file__).resolve()
|
7 |
-
ROOT = FILE.parents[3] # YOLOv5 root directory
|
8 |
-
if str(ROOT) not in sys.path:
|
9 |
-
sys.path.append(str(ROOT)) # add ROOT to PATH
|
10 |
-
|
11 |
-
from train import parse_opt, train
|
12 |
-
from utils.callbacks import Callbacks
|
13 |
-
from utils.general import increment_path
|
14 |
-
from utils.torch_utils import select_device
|
15 |
-
|
16 |
-
|
17 |
-
def sweep():
|
18 |
-
wandb.init()
|
19 |
-
# Get hyp dict from sweep agent. Copy because train() modifies parameters which confused wandb.
|
20 |
-
hyp_dict = vars(wandb.config).get("_items").copy()
|
21 |
-
|
22 |
-
# Workaround: get necessary opt args
|
23 |
-
opt = parse_opt(known=True)
|
24 |
-
opt.batch_size = hyp_dict.get("batch_size")
|
25 |
-
opt.save_dir = str(
|
26 |
-
increment_path(
|
27 |
-
Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve
|
28 |
-
)
|
29 |
-
)
|
30 |
-
opt.epochs = hyp_dict.get("epochs")
|
31 |
-
opt.nosave = True
|
32 |
-
opt.data = hyp_dict.get("data")
|
33 |
-
opt.weights = str(opt.weights)
|
34 |
-
opt.cfg = str(opt.cfg)
|
35 |
-
opt.data = str(opt.data)
|
36 |
-
opt.hyp = str(opt.hyp)
|
37 |
-
opt.project = str(opt.project)
|
38 |
-
device = select_device(opt.device, batch_size=opt.batch_size)
|
39 |
-
|
40 |
-
# train
|
41 |
-
train(hyp_dict, opt, device, callbacks=Callbacks())
|
42 |
-
|
43 |
-
|
44 |
-
if __name__ == "__main__":
|
45 |
-
sweep()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT-Chat-UI/.svelte-kit/generated/client/nodes/5.js
DELETED
File without changes
|
spaces/Adapter/T2I-Adapter/ldm/data/dataset_coco.py
DELETED
@@ -1,36 +0,0 @@
|
|
1 |
-
import json
|
2 |
-
import cv2
|
3 |
-
import os
|
4 |
-
from basicsr.utils import img2tensor
|
5 |
-
|
6 |
-
|
7 |
-
class dataset_coco_mask_color():
|
8 |
-
def __init__(self, path_json, root_path_im, root_path_mask, image_size):
|
9 |
-
super(dataset_coco_mask_color, self).__init__()
|
10 |
-
with open(path_json, 'r', encoding='utf-8') as fp:
|
11 |
-
data = json.load(fp)
|
12 |
-
data = data['annotations']
|
13 |
-
self.files = []
|
14 |
-
self.root_path_im = root_path_im
|
15 |
-
self.root_path_mask = root_path_mask
|
16 |
-
for file in data:
|
17 |
-
name = "%012d.png" % file['image_id']
|
18 |
-
self.files.append({'name': name, 'sentence': file['caption']})
|
19 |
-
|
20 |
-
def __getitem__(self, idx):
|
21 |
-
file = self.files[idx]
|
22 |
-
name = file['name']
|
23 |
-
# print(os.path.join(self.root_path_im, name))
|
24 |
-
im = cv2.imread(os.path.join(self.root_path_im, name.replace('.png', '.jpg')))
|
25 |
-
im = cv2.resize(im, (512, 512))
|
26 |
-
im = img2tensor(im, bgr2rgb=True, float32=True) / 255.
|
27 |
-
|
28 |
-
mask = cv2.imread(os.path.join(self.root_path_mask, name)) # [:,:,0]
|
29 |
-
mask = cv2.resize(mask, (512, 512))
|
30 |
-
mask = img2tensor(mask, bgr2rgb=True, float32=True) / 255. # [0].unsqueeze(0)#/255.
|
31 |
-
|
32 |
-
sentence = file['sentence']
|
33 |
-
return {'im': im, 'mask': mask, 'sentence': sentence}
|
34 |
-
|
35 |
-
def __len__(self):
|
36 |
-
return len(self.files)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ali36Ahmad/MagicPrompt-Stable-Diffusion/README.md
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: MagicPrompt Stable Diffusion
|
3 |
-
emoji: 😻
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: indigo
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.3.1
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
duplicated_from: Gustavosta/MagicPrompt-Stable-Diffusion
|
12 |
-
---
|
13 |
-
|
14 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amon1/ChatGPTForAcadamic/crazy_functions/test_project/cpp/cppipc/buffer.cpp
DELETED
@@ -1,87 +0,0 @@
|
|
1 |
-
#include "libipc/buffer.h"
|
2 |
-
#include "libipc/utility/pimpl.h"
|
3 |
-
|
4 |
-
#include <cstring>
|
5 |
-
|
6 |
-
namespace ipc {
|
7 |
-
|
8 |
-
bool operator==(buffer const & b1, buffer const & b2) {
|
9 |
-
return (b1.size() == b2.size()) && (std::memcmp(b1.data(), b2.data(), b1.size()) == 0);
|
10 |
-
}
|
11 |
-
|
12 |
-
bool operator!=(buffer const & b1, buffer const & b2) {
|
13 |
-
return !(b1 == b2);
|
14 |
-
}
|
15 |
-
|
16 |
-
class buffer::buffer_ : public pimpl<buffer_> {
|
17 |
-
public:
|
18 |
-
void* p_;
|
19 |
-
std::size_t s_;
|
20 |
-
void* a_;
|
21 |
-
buffer::destructor_t d_;
|
22 |
-
|
23 |
-
buffer_(void* p, std::size_t s, buffer::destructor_t d, void* a)
|
24 |
-
: p_(p), s_(s), a_(a), d_(d) {
|
25 |
-
}
|
26 |
-
|
27 |
-
~buffer_() {
|
28 |
-
if (d_ == nullptr) return;
|
29 |
-
d_((a_ == nullptr) ? p_ : a_, s_);
|
30 |
-
}
|
31 |
-
};
|
32 |
-
|
33 |
-
buffer::buffer()
|
34 |
-
: buffer(nullptr, 0, nullptr, nullptr) {
|
35 |
-
}
|
36 |
-
|
37 |
-
buffer::buffer(void* p, std::size_t s, destructor_t d)
|
38 |
-
: p_(p_->make(p, s, d, nullptr)) {
|
39 |
-
}
|
40 |
-
|
41 |
-
buffer::buffer(void* p, std::size_t s, destructor_t d, void* additional)
|
42 |
-
: p_(p_->make(p, s, d, additional)) {
|
43 |
-
}
|
44 |
-
|
45 |
-
buffer::buffer(void* p, std::size_t s)
|
46 |
-
: buffer(p, s, nullptr) {
|
47 |
-
}
|
48 |
-
|
49 |
-
buffer::buffer(char const & c)
|
50 |
-
: buffer(const_cast<char*>(&c), 1) {
|
51 |
-
}
|
52 |
-
|
53 |
-
buffer::buffer(buffer&& rhs)
|
54 |
-
: buffer() {
|
55 |
-
swap(rhs);
|
56 |
-
}
|
57 |
-
|
58 |
-
buffer::~buffer() {
|
59 |
-
p_->clear();
|
60 |
-
}
|
61 |
-
|
62 |
-
void buffer::swap(buffer& rhs) {
|
63 |
-
std::swap(p_, rhs.p_);
|
64 |
-
}
|
65 |
-
|
66 |
-
buffer& buffer::operator=(buffer rhs) {
|
67 |
-
swap(rhs);
|
68 |
-
return *this;
|
69 |
-
}
|
70 |
-
|
71 |
-
bool buffer::empty() const noexcept {
|
72 |
-
return (impl(p_)->p_ == nullptr) || (impl(p_)->s_ == 0);
|
73 |
-
}
|
74 |
-
|
75 |
-
void* buffer::data() noexcept {
|
76 |
-
return impl(p_)->p_;
|
77 |
-
}
|
78 |
-
|
79 |
-
void const * buffer::data() const noexcept {
|
80 |
-
return impl(p_)->p_;
|
81 |
-
}
|
82 |
-
|
83 |
-
std::size_t buffer::size() const noexcept {
|
84 |
-
return impl(p_)->s_;
|
85 |
-
}
|
86 |
-
|
87 |
-
} // namespace ipc
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/PTI/configs/global_config.py
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
# Device
|
2 |
-
cuda_visible_devices = '0'
|
3 |
-
device = 'cuda:0'
|
4 |
-
|
5 |
-
# Logs
|
6 |
-
training_step = 1
|
7 |
-
image_rec_result_log_snapshot = 100
|
8 |
-
pivotal_training_steps = 0
|
9 |
-
model_snapshot_interval = 400
|
10 |
-
|
11 |
-
# Run name to be updated during PTI
|
12 |
-
run_name = ''
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/instruct_pix2pix/train_instruct_pix2pix_xl.py
DELETED
@@ -1,1205 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python
|
2 |
-
# coding=utf-8
|
3 |
-
# Copyright 2023 Harutatsu Akiyama and The HuggingFace Inc. team. All rights reserved.
|
4 |
-
#
|
5 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
-
# you may not use this file except in compliance with the License.
|
7 |
-
# You may obtain a copy of the License at
|
8 |
-
#
|
9 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
-
#
|
11 |
-
# Unless required by applicable law or agreed to in writing, software
|
12 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
-
# See the License for the specific language governing permissions and
|
15 |
-
# limitations under the License.
|
16 |
-
|
17 |
-
import argparse
|
18 |
-
import logging
|
19 |
-
import math
|
20 |
-
import os
|
21 |
-
import shutil
|
22 |
-
import warnings
|
23 |
-
from pathlib import Path
|
24 |
-
from urllib.parse import urlparse
|
25 |
-
|
26 |
-
import accelerate
|
27 |
-
import datasets
|
28 |
-
import numpy as np
|
29 |
-
import PIL
|
30 |
-
import torch
|
31 |
-
import torch.nn as nn
|
32 |
-
import torch.nn.functional as F
|
33 |
-
import torch.utils.checkpoint
|
34 |
-
import transformers
|
35 |
-
from accelerate import Accelerator
|
36 |
-
from accelerate.logging import get_logger
|
37 |
-
from accelerate.utils import ProjectConfiguration, set_seed
|
38 |
-
from datasets import load_dataset
|
39 |
-
from huggingface_hub import create_repo, upload_folder
|
40 |
-
from packaging import version
|
41 |
-
from PIL import Image
|
42 |
-
from torchvision import transforms
|
43 |
-
from tqdm.auto import tqdm
|
44 |
-
from transformers import AutoTokenizer, PretrainedConfig
|
45 |
-
|
46 |
-
import diffusers
|
47 |
-
from diffusers import AutoencoderKL, DDPMScheduler, UNet2DConditionModel
|
48 |
-
from diffusers.optimization import get_scheduler
|
49 |
-
from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_instruct_pix2pix import (
|
50 |
-
StableDiffusionXLInstructPix2PixPipeline,
|
51 |
-
)
|
52 |
-
from diffusers.training_utils import EMAModel
|
53 |
-
from diffusers.utils import check_min_version, deprecate, is_wandb_available, load_image
|
54 |
-
from diffusers.utils.import_utils import is_xformers_available
|
55 |
-
|
56 |
-
|
57 |
-
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
|
58 |
-
check_min_version("0.19.0")
|
59 |
-
|
60 |
-
logger = get_logger(__name__, log_level="INFO")
|
61 |
-
|
62 |
-
DATASET_NAME_MAPPING = {
|
63 |
-
"fusing/instructpix2pix-1000-samples": ("file_name", "edited_image", "edit_prompt"),
|
64 |
-
}
|
65 |
-
WANDB_TABLE_COL_NAMES = ["file_name", "edited_image", "edit_prompt"]
|
66 |
-
|
67 |
-
|
68 |
-
def import_model_class_from_model_name_or_path(
|
69 |
-
pretrained_model_name_or_path: str, revision: str, subfolder: str = "text_encoder"
|
70 |
-
):
|
71 |
-
text_encoder_config = PretrainedConfig.from_pretrained(
|
72 |
-
pretrained_model_name_or_path, subfolder=subfolder, revision=revision
|
73 |
-
)
|
74 |
-
model_class = text_encoder_config.architectures[0]
|
75 |
-
|
76 |
-
if model_class == "CLIPTextModel":
|
77 |
-
from transformers import CLIPTextModel
|
78 |
-
|
79 |
-
return CLIPTextModel
|
80 |
-
elif model_class == "CLIPTextModelWithProjection":
|
81 |
-
from transformers import CLIPTextModelWithProjection
|
82 |
-
|
83 |
-
return CLIPTextModelWithProjection
|
84 |
-
else:
|
85 |
-
raise ValueError(f"{model_class} is not supported.")
|
86 |
-
|
87 |
-
|
88 |
-
def parse_args():
|
89 |
-
parser = argparse.ArgumentParser(description="Script to train Stable Diffusion XL for InstructPix2Pix.")
|
90 |
-
parser.add_argument(
|
91 |
-
"--pretrained_model_name_or_path",
|
92 |
-
type=str,
|
93 |
-
default=None,
|
94 |
-
required=True,
|
95 |
-
help="Path to pretrained model or model identifier from huggingface.co/models.",
|
96 |
-
)
|
97 |
-
parser.add_argument(
|
98 |
-
"--pretrained_vae_model_name_or_path",
|
99 |
-
type=str,
|
100 |
-
default=None,
|
101 |
-
help="Path to an improved VAE to stabilize training. For more details check out: https://github.com/huggingface/diffusers/pull/4038.",
|
102 |
-
)
|
103 |
-
parser.add_argument(
|
104 |
-
"--revision",
|
105 |
-
type=str,
|
106 |
-
default=None,
|
107 |
-
required=False,
|
108 |
-
help="Revision of pretrained model identifier from huggingface.co/models.",
|
109 |
-
)
|
110 |
-
parser.add_argument(
|
111 |
-
"--dataset_name",
|
112 |
-
type=str,
|
113 |
-
default=None,
|
114 |
-
help=(
|
115 |
-
"The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private,"
|
116 |
-
" dataset). It can also be a path pointing to a local copy of a dataset in your filesystem,"
|
117 |
-
" or to a folder containing files that 🤗 Datasets can understand."
|
118 |
-
),
|
119 |
-
)
|
120 |
-
parser.add_argument(
|
121 |
-
"--dataset_config_name",
|
122 |
-
type=str,
|
123 |
-
default=None,
|
124 |
-
help="The config of the Dataset, leave as None if there's only one config.",
|
125 |
-
)
|
126 |
-
parser.add_argument(
|
127 |
-
"--train_data_dir",
|
128 |
-
type=str,
|
129 |
-
default=None,
|
130 |
-
help=(
|
131 |
-
"A folder containing the training data. Folder contents must follow the structure described in"
|
132 |
-
" https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file"
|
133 |
-
" must exist to provide the captions for the images. Ignored if `dataset_name` is specified."
|
134 |
-
),
|
135 |
-
)
|
136 |
-
parser.add_argument(
|
137 |
-
"--original_image_column",
|
138 |
-
type=str,
|
139 |
-
default="input_image",
|
140 |
-
help="The column of the dataset containing the original image on which edits where made.",
|
141 |
-
)
|
142 |
-
parser.add_argument(
|
143 |
-
"--edited_image_column",
|
144 |
-
type=str,
|
145 |
-
default="edited_image",
|
146 |
-
help="The column of the dataset containing the edited image.",
|
147 |
-
)
|
148 |
-
parser.add_argument(
|
149 |
-
"--edit_prompt_column",
|
150 |
-
type=str,
|
151 |
-
default="edit_prompt",
|
152 |
-
help="The column of the dataset containing the edit instruction.",
|
153 |
-
)
|
154 |
-
parser.add_argument(
|
155 |
-
"--val_image_url_or_path",
|
156 |
-
type=str,
|
157 |
-
default=None,
|
158 |
-
help="URL to the original image that you would like to edit (used during inference for debugging purposes).",
|
159 |
-
)
|
160 |
-
parser.add_argument(
|
161 |
-
"--validation_prompt", type=str, default=None, help="A prompt that is sampled during training for inference."
|
162 |
-
)
|
163 |
-
parser.add_argument(
|
164 |
-
"--num_validation_images",
|
165 |
-
type=int,
|
166 |
-
default=4,
|
167 |
-
help="Number of images that should be generated during validation with `validation_prompt`.",
|
168 |
-
)
|
169 |
-
parser.add_argument(
|
170 |
-
"--validation_steps",
|
171 |
-
type=int,
|
172 |
-
default=100,
|
173 |
-
help=(
|
174 |
-
"Run fine-tuning validation every X steps. The validation process consists of running the prompt"
|
175 |
-
" `args.validation_prompt` multiple times: `args.num_validation_images`."
|
176 |
-
),
|
177 |
-
)
|
178 |
-
parser.add_argument(
|
179 |
-
"--max_train_samples",
|
180 |
-
type=int,
|
181 |
-
default=None,
|
182 |
-
help=(
|
183 |
-
"For debugging purposes or quicker training, truncate the number of training examples to this "
|
184 |
-
"value if set."
|
185 |
-
),
|
186 |
-
)
|
187 |
-
parser.add_argument(
|
188 |
-
"--output_dir",
|
189 |
-
type=str,
|
190 |
-
default="instruct-pix2pix-model",
|
191 |
-
help="The output directory where the model predictions and checkpoints will be written.",
|
192 |
-
)
|
193 |
-
parser.add_argument(
|
194 |
-
"--cache_dir",
|
195 |
-
type=str,
|
196 |
-
default=None,
|
197 |
-
help="The directory where the downloaded models and datasets will be stored.",
|
198 |
-
)
|
199 |
-
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
|
200 |
-
parser.add_argument(
|
201 |
-
"--resolution",
|
202 |
-
type=int,
|
203 |
-
default=256,
|
204 |
-
help=(
|
205 |
-
"The resolution for input images, all the images in the train/validation dataset will be resized to this resolution."
|
206 |
-
),
|
207 |
-
)
|
208 |
-
parser.add_argument(
|
209 |
-
"--crops_coords_top_left_h",
|
210 |
-
type=int,
|
211 |
-
default=0,
|
212 |
-
help=("Coordinate for (the height) to be included in the crop coordinate embeddings needed by SDXL UNet."),
|
213 |
-
)
|
214 |
-
parser.add_argument(
|
215 |
-
"--crops_coords_top_left_w",
|
216 |
-
type=int,
|
217 |
-
default=0,
|
218 |
-
help=("Coordinate for (the height) to be included in the crop coordinate embeddings needed by SDXL UNet."),
|
219 |
-
)
|
220 |
-
parser.add_argument(
|
221 |
-
"--center_crop",
|
222 |
-
default=False,
|
223 |
-
action="store_true",
|
224 |
-
help=(
|
225 |
-
"Whether to center crop the input images to the resolution. If not set, the images will be randomly"
|
226 |
-
" cropped. The images will be resized to the resolution first before cropping."
|
227 |
-
),
|
228 |
-
)
|
229 |
-
parser.add_argument(
|
230 |
-
"--random_flip",
|
231 |
-
action="store_true",
|
232 |
-
help="whether to randomly flip images horizontally",
|
233 |
-
)
|
234 |
-
parser.add_argument(
|
235 |
-
"--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader."
|
236 |
-
)
|
237 |
-
parser.add_argument("--num_train_epochs", type=int, default=100)
|
238 |
-
parser.add_argument(
|
239 |
-
"--max_train_steps",
|
240 |
-
type=int,
|
241 |
-
default=None,
|
242 |
-
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
|
243 |
-
)
|
244 |
-
parser.add_argument(
|
245 |
-
"--gradient_accumulation_steps",
|
246 |
-
type=int,
|
247 |
-
default=1,
|
248 |
-
help="Number of updates steps to accumulate before performing a backward/update pass.",
|
249 |
-
)
|
250 |
-
parser.add_argument(
|
251 |
-
"--gradient_checkpointing",
|
252 |
-
action="store_true",
|
253 |
-
help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
|
254 |
-
)
|
255 |
-
parser.add_argument(
|
256 |
-
"--learning_rate",
|
257 |
-
type=float,
|
258 |
-
default=1e-4,
|
259 |
-
help="Initial learning rate (after the potential warmup period) to use.",
|
260 |
-
)
|
261 |
-
parser.add_argument(
|
262 |
-
"--scale_lr",
|
263 |
-
action="store_true",
|
264 |
-
default=False,
|
265 |
-
help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
|
266 |
-
)
|
267 |
-
parser.add_argument(
|
268 |
-
"--lr_scheduler",
|
269 |
-
type=str,
|
270 |
-
default="constant",
|
271 |
-
help=(
|
272 |
-
'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
|
273 |
-
' "constant", "constant_with_warmup"]'
|
274 |
-
),
|
275 |
-
)
|
276 |
-
parser.add_argument(
|
277 |
-
"--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
|
278 |
-
)
|
279 |
-
parser.add_argument(
|
280 |
-
"--conditioning_dropout_prob",
|
281 |
-
type=float,
|
282 |
-
default=None,
|
283 |
-
help="Conditioning dropout probability. Drops out the conditionings (image and edit prompt) used in training InstructPix2Pix. See section 3.2.1 in the paper: https://arxiv.org/abs/2211.09800.",
|
284 |
-
)
|
285 |
-
parser.add_argument(
|
286 |
-
"--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes."
|
287 |
-
)
|
288 |
-
parser.add_argument(
|
289 |
-
"--allow_tf32",
|
290 |
-
action="store_true",
|
291 |
-
help=(
|
292 |
-
"Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
|
293 |
-
" https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
|
294 |
-
),
|
295 |
-
)
|
296 |
-
parser.add_argument("--use_ema", action="store_true", help="Whether to use EMA model.")
|
297 |
-
parser.add_argument(
|
298 |
-
"--non_ema_revision",
|
299 |
-
type=str,
|
300 |
-
default=None,
|
301 |
-
required=False,
|
302 |
-
help=(
|
303 |
-
"Revision of pretrained non-ema model identifier. Must be a branch, tag or git identifier of the local or"
|
304 |
-
" remote repository specified with --pretrained_model_name_or_path."
|
305 |
-
),
|
306 |
-
)
|
307 |
-
parser.add_argument(
|
308 |
-
"--dataloader_num_workers",
|
309 |
-
type=int,
|
310 |
-
default=0,
|
311 |
-
help=(
|
312 |
-
"Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
|
313 |
-
),
|
314 |
-
)
|
315 |
-
parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
|
316 |
-
parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
|
317 |
-
parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
|
318 |
-
parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
|
319 |
-
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
|
320 |
-
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
|
321 |
-
parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
|
322 |
-
parser.add_argument(
|
323 |
-
"--hub_model_id",
|
324 |
-
type=str,
|
325 |
-
default=None,
|
326 |
-
help="The name of the repository to keep in sync with the local `output_dir`.",
|
327 |
-
)
|
328 |
-
parser.add_argument(
|
329 |
-
"--logging_dir",
|
330 |
-
type=str,
|
331 |
-
default="logs",
|
332 |
-
help=(
|
333 |
-
"[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
|
334 |
-
" *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
|
335 |
-
),
|
336 |
-
)
|
337 |
-
parser.add_argument(
|
338 |
-
"--mixed_precision",
|
339 |
-
type=str,
|
340 |
-
default=None,
|
341 |
-
choices=["no", "fp16", "bf16"],
|
342 |
-
help=(
|
343 |
-
"Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
|
344 |
-
" 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
|
345 |
-
" flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
|
346 |
-
),
|
347 |
-
)
|
348 |
-
parser.add_argument(
|
349 |
-
"--report_to",
|
350 |
-
type=str,
|
351 |
-
default="tensorboard",
|
352 |
-
help=(
|
353 |
-
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
|
354 |
-
' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
|
355 |
-
),
|
356 |
-
)
|
357 |
-
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
|
358 |
-
parser.add_argument(
|
359 |
-
"--checkpointing_steps",
|
360 |
-
type=int,
|
361 |
-
default=500,
|
362 |
-
help=(
|
363 |
-
"Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming"
|
364 |
-
" training using `--resume_from_checkpoint`."
|
365 |
-
),
|
366 |
-
)
|
367 |
-
parser.add_argument(
|
368 |
-
"--checkpoints_total_limit",
|
369 |
-
type=int,
|
370 |
-
default=None,
|
371 |
-
help=("Max number of checkpoints to store."),
|
372 |
-
)
|
373 |
-
parser.add_argument(
|
374 |
-
"--resume_from_checkpoint",
|
375 |
-
type=str,
|
376 |
-
default=None,
|
377 |
-
help=(
|
378 |
-
"Whether training should be resumed from a previous checkpoint. Use a path saved by"
|
379 |
-
' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
|
380 |
-
),
|
381 |
-
)
|
382 |
-
parser.add_argument(
|
383 |
-
"--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers."
|
384 |
-
)
|
385 |
-
|
386 |
-
args = parser.parse_args()
|
387 |
-
env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
|
388 |
-
if env_local_rank != -1 and env_local_rank != args.local_rank:
|
389 |
-
args.local_rank = env_local_rank
|
390 |
-
|
391 |
-
# Sanity checks
|
392 |
-
if args.dataset_name is None and args.train_data_dir is None:
|
393 |
-
raise ValueError("Need either a dataset name or a training folder.")
|
394 |
-
|
395 |
-
# default to using the same revision for the non-ema model if not specified
|
396 |
-
if args.non_ema_revision is None:
|
397 |
-
args.non_ema_revision = args.revision
|
398 |
-
|
399 |
-
return args
|
400 |
-
|
401 |
-
|
402 |
-
def convert_to_np(image, resolution):
|
403 |
-
if isinstance(image, str):
|
404 |
-
image = PIL.Image.open(image)
|
405 |
-
image = image.convert("RGB").resize((resolution, resolution))
|
406 |
-
return np.array(image).transpose(2, 0, 1)
|
407 |
-
|
408 |
-
|
409 |
-
def main():
|
410 |
-
args = parse_args()
|
411 |
-
|
412 |
-
if args.non_ema_revision is not None:
|
413 |
-
deprecate(
|
414 |
-
"non_ema_revision!=None",
|
415 |
-
"0.15.0",
|
416 |
-
message=(
|
417 |
-
"Downloading 'non_ema' weights from revision branches of the Hub is deprecated. Please make sure to"
|
418 |
-
" use `--variant=non_ema` instead."
|
419 |
-
),
|
420 |
-
)
|
421 |
-
logging_dir = os.path.join(args.output_dir, args.logging_dir)
|
422 |
-
accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir)
|
423 |
-
accelerator = Accelerator(
|
424 |
-
gradient_accumulation_steps=args.gradient_accumulation_steps,
|
425 |
-
mixed_precision=args.mixed_precision,
|
426 |
-
log_with=args.report_to,
|
427 |
-
project_config=accelerator_project_config,
|
428 |
-
)
|
429 |
-
|
430 |
-
generator = torch.Generator(device=accelerator.device).manual_seed(args.seed)
|
431 |
-
|
432 |
-
if args.report_to == "wandb":
|
433 |
-
if not is_wandb_available():
|
434 |
-
raise ImportError("Make sure to install wandb if you want to use it for logging during training.")
|
435 |
-
import wandb
|
436 |
-
|
437 |
-
# Make one log on every process with the configuration for debugging.
|
438 |
-
logging.basicConfig(
|
439 |
-
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
|
440 |
-
datefmt="%m/%d/%Y %H:%M:%S",
|
441 |
-
level=logging.INFO,
|
442 |
-
)
|
443 |
-
logger.info(accelerator.state, main_process_only=False)
|
444 |
-
if accelerator.is_local_main_process:
|
445 |
-
datasets.utils.logging.set_verbosity_warning()
|
446 |
-
transformers.utils.logging.set_verbosity_warning()
|
447 |
-
diffusers.utils.logging.set_verbosity_info()
|
448 |
-
else:
|
449 |
-
datasets.utils.logging.set_verbosity_error()
|
450 |
-
transformers.utils.logging.set_verbosity_error()
|
451 |
-
diffusers.utils.logging.set_verbosity_error()
|
452 |
-
|
453 |
-
# If passed along, set the training seed now.
|
454 |
-
if args.seed is not None:
|
455 |
-
set_seed(args.seed)
|
456 |
-
|
457 |
-
# Handle the repository creation
|
458 |
-
if accelerator.is_main_process:
|
459 |
-
if args.output_dir is not None:
|
460 |
-
os.makedirs(args.output_dir, exist_ok=True)
|
461 |
-
|
462 |
-
if args.push_to_hub:
|
463 |
-
repo_id = create_repo(
|
464 |
-
repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token
|
465 |
-
).repo_id
|
466 |
-
|
467 |
-
vae_path = (
|
468 |
-
args.pretrained_model_name_or_path
|
469 |
-
if args.pretrained_vae_model_name_or_path is None
|
470 |
-
else args.pretrained_vae_model_name_or_path
|
471 |
-
)
|
472 |
-
vae = AutoencoderKL.from_pretrained(
|
473 |
-
vae_path,
|
474 |
-
subfolder="vae" if args.pretrained_vae_model_name_or_path is None else None,
|
475 |
-
revision=args.revision,
|
476 |
-
)
|
477 |
-
unet = UNet2DConditionModel.from_pretrained(
|
478 |
-
args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision
|
479 |
-
)
|
480 |
-
|
481 |
-
# InstructPix2Pix uses an additional image for conditioning. To accommodate that,
|
482 |
-
# it uses 8 channels (instead of 4) in the first (conv) layer of the UNet. This UNet is
|
483 |
-
# then fine-tuned on the custom InstructPix2Pix dataset. This modified UNet is initialized
|
484 |
-
# from the pre-trained checkpoints. For the extra channels added to the first layer, they are
|
485 |
-
# initialized to zero.
|
486 |
-
logger.info("Initializing the XL InstructPix2Pix UNet from the pretrained UNet.")
|
487 |
-
in_channels = 8
|
488 |
-
out_channels = unet.conv_in.out_channels
|
489 |
-
unet.register_to_config(in_channels=in_channels)
|
490 |
-
|
491 |
-
with torch.no_grad():
|
492 |
-
new_conv_in = nn.Conv2d(
|
493 |
-
in_channels, out_channels, unet.conv_in.kernel_size, unet.conv_in.stride, unet.conv_in.padding
|
494 |
-
)
|
495 |
-
new_conv_in.weight.zero_()
|
496 |
-
new_conv_in.weight[:, :4, :, :].copy_(unet.conv_in.weight)
|
497 |
-
unet.conv_in = new_conv_in
|
498 |
-
|
499 |
-
# Create EMA for the unet.
|
500 |
-
if args.use_ema:
|
501 |
-
ema_unet = EMAModel(unet.parameters(), model_cls=UNet2DConditionModel, model_config=unet.config)
|
502 |
-
|
503 |
-
if args.enable_xformers_memory_efficient_attention:
|
504 |
-
if is_xformers_available():
|
505 |
-
import xformers
|
506 |
-
|
507 |
-
xformers_version = version.parse(xformers.__version__)
|
508 |
-
if xformers_version == version.parse("0.0.16"):
|
509 |
-
logger.warn(
|
510 |
-
"xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
|
511 |
-
)
|
512 |
-
unet.enable_xformers_memory_efficient_attention()
|
513 |
-
else:
|
514 |
-
raise ValueError("xformers is not available. Make sure it is installed correctly")
|
515 |
-
|
516 |
-
# `accelerate` 0.16.0 will have better support for customized saving
|
517 |
-
if version.parse(accelerate.__version__) >= version.parse("0.16.0"):
|
518 |
-
# create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format
|
519 |
-
def save_model_hook(models, weights, output_dir):
|
520 |
-
if args.use_ema:
|
521 |
-
ema_unet.save_pretrained(os.path.join(output_dir, "unet_ema"))
|
522 |
-
|
523 |
-
for i, model in enumerate(models):
|
524 |
-
model.save_pretrained(os.path.join(output_dir, "unet"))
|
525 |
-
|
526 |
-
# make sure to pop weight so that corresponding model is not saved again
|
527 |
-
weights.pop()
|
528 |
-
|
529 |
-
def load_model_hook(models, input_dir):
|
530 |
-
if args.use_ema:
|
531 |
-
load_model = EMAModel.from_pretrained(os.path.join(input_dir, "unet_ema"), UNet2DConditionModel)
|
532 |
-
ema_unet.load_state_dict(load_model.state_dict())
|
533 |
-
ema_unet.to(accelerator.device)
|
534 |
-
del load_model
|
535 |
-
|
536 |
-
for i in range(len(models)):
|
537 |
-
# pop models so that they are not loaded again
|
538 |
-
model = models.pop()
|
539 |
-
|
540 |
-
# load diffusers style into model
|
541 |
-
load_model = UNet2DConditionModel.from_pretrained(input_dir, subfolder="unet")
|
542 |
-
model.register_to_config(**load_model.config)
|
543 |
-
|
544 |
-
model.load_state_dict(load_model.state_dict())
|
545 |
-
del load_model
|
546 |
-
|
547 |
-
accelerator.register_save_state_pre_hook(save_model_hook)
|
548 |
-
accelerator.register_load_state_pre_hook(load_model_hook)
|
549 |
-
|
550 |
-
if args.gradient_checkpointing:
|
551 |
-
unet.enable_gradient_checkpointing()
|
552 |
-
|
553 |
-
# Enable TF32 for faster training on Ampere GPUs,
|
554 |
-
# cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
|
555 |
-
if args.allow_tf32:
|
556 |
-
torch.backends.cuda.matmul.allow_tf32 = True
|
557 |
-
|
558 |
-
if args.scale_lr:
|
559 |
-
args.learning_rate = (
|
560 |
-
args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
|
561 |
-
)
|
562 |
-
|
563 |
-
# Initialize the optimizer
|
564 |
-
if args.use_8bit_adam:
|
565 |
-
try:
|
566 |
-
import bitsandbytes as bnb
|
567 |
-
except ImportError:
|
568 |
-
raise ImportError(
|
569 |
-
"Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`"
|
570 |
-
)
|
571 |
-
|
572 |
-
optimizer_cls = bnb.optim.AdamW8bit
|
573 |
-
else:
|
574 |
-
optimizer_cls = torch.optim.AdamW
|
575 |
-
|
576 |
-
optimizer = optimizer_cls(
|
577 |
-
unet.parameters(),
|
578 |
-
lr=args.learning_rate,
|
579 |
-
betas=(args.adam_beta1, args.adam_beta2),
|
580 |
-
weight_decay=args.adam_weight_decay,
|
581 |
-
eps=args.adam_epsilon,
|
582 |
-
)
|
583 |
-
|
584 |
-
# Get the datasets: you can either provide your own training and evaluation files (see below)
|
585 |
-
# or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub).
|
586 |
-
|
587 |
-
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
|
588 |
-
# download the dataset.
|
589 |
-
if args.dataset_name is not None:
|
590 |
-
# Downloading and loading a dataset from the hub.
|
591 |
-
dataset = load_dataset(
|
592 |
-
args.dataset_name,
|
593 |
-
args.dataset_config_name,
|
594 |
-
cache_dir=args.cache_dir,
|
595 |
-
)
|
596 |
-
else:
|
597 |
-
data_files = {}
|
598 |
-
if args.train_data_dir is not None:
|
599 |
-
data_files["train"] = os.path.join(args.train_data_dir, "**")
|
600 |
-
dataset = load_dataset(
|
601 |
-
"imagefolder",
|
602 |
-
data_files=data_files,
|
603 |
-
cache_dir=args.cache_dir,
|
604 |
-
)
|
605 |
-
# See more about loading custom images at
|
606 |
-
# https://huggingface.co/docs/datasets/main/en/image_load#imagefolder
|
607 |
-
|
608 |
-
# Preprocessing the datasets.
|
609 |
-
# We need to tokenize inputs and targets.
|
610 |
-
column_names = dataset["train"].column_names
|
611 |
-
|
612 |
-
# 6. Get the column names for input/target.
|
613 |
-
dataset_columns = DATASET_NAME_MAPPING.get(args.dataset_name, None)
|
614 |
-
if args.original_image_column is None:
|
615 |
-
original_image_column = dataset_columns[0] if dataset_columns is not None else column_names[0]
|
616 |
-
else:
|
617 |
-
original_image_column = args.original_image_column
|
618 |
-
if original_image_column not in column_names:
|
619 |
-
raise ValueError(
|
620 |
-
f"--original_image_column' value '{args.original_image_column}' needs to be one of: {', '.join(column_names)}"
|
621 |
-
)
|
622 |
-
if args.edit_prompt_column is None:
|
623 |
-
edit_prompt_column = dataset_columns[1] if dataset_columns is not None else column_names[1]
|
624 |
-
else:
|
625 |
-
edit_prompt_column = args.edit_prompt_column
|
626 |
-
if edit_prompt_column not in column_names:
|
627 |
-
raise ValueError(
|
628 |
-
f"--edit_prompt_column' value '{args.edit_prompt_column}' needs to be one of: {', '.join(column_names)}"
|
629 |
-
)
|
630 |
-
if args.edited_image_column is None:
|
631 |
-
edited_image_column = dataset_columns[2] if dataset_columns is not None else column_names[2]
|
632 |
-
else:
|
633 |
-
edited_image_column = args.edited_image_column
|
634 |
-
if edited_image_column not in column_names:
|
635 |
-
raise ValueError(
|
636 |
-
f"--edited_image_column' value '{args.edited_image_column}' needs to be one of: {', '.join(column_names)}"
|
637 |
-
)
|
638 |
-
|
639 |
-
# For mixed precision training we cast the text_encoder and vae weights to half-precision
|
640 |
-
# as these models are only used for inference, keeping weights in full precision is not required.
|
641 |
-
weight_dtype = torch.float32
|
642 |
-
if accelerator.mixed_precision == "fp16":
|
643 |
-
weight_dtype = torch.float16
|
644 |
-
warnings.warn(f"weight_dtype {weight_dtype} may cause nan during vae encoding", UserWarning)
|
645 |
-
|
646 |
-
elif accelerator.mixed_precision == "bf16":
|
647 |
-
weight_dtype = torch.bfloat16
|
648 |
-
warnings.warn(f"weight_dtype {weight_dtype} may cause nan during vae encoding", UserWarning)
|
649 |
-
|
650 |
-
# Preprocessing the datasets.
|
651 |
-
# We need to tokenize input captions and transform the images.
|
652 |
-
def tokenize_captions(captions, tokenizer):
|
653 |
-
inputs = tokenizer(
|
654 |
-
captions,
|
655 |
-
max_length=tokenizer.model_max_length,
|
656 |
-
padding="max_length",
|
657 |
-
truncation=True,
|
658 |
-
return_tensors="pt",
|
659 |
-
)
|
660 |
-
return inputs.input_ids
|
661 |
-
|
662 |
-
# Preprocessing the datasets.
|
663 |
-
train_transforms = transforms.Compose(
|
664 |
-
[
|
665 |
-
transforms.CenterCrop(args.resolution) if args.center_crop else transforms.RandomCrop(args.resolution),
|
666 |
-
transforms.RandomHorizontalFlip() if args.random_flip else transforms.Lambda(lambda x: x),
|
667 |
-
]
|
668 |
-
)
|
669 |
-
|
670 |
-
def preprocess_images(examples):
|
671 |
-
original_images = np.concatenate(
|
672 |
-
[convert_to_np(image, args.resolution) for image in examples[original_image_column]]
|
673 |
-
)
|
674 |
-
edited_images = np.concatenate(
|
675 |
-
[convert_to_np(image, args.resolution) for image in examples[edited_image_column]]
|
676 |
-
)
|
677 |
-
# We need to ensure that the original and the edited images undergo the same
|
678 |
-
# augmentation transforms.
|
679 |
-
images = np.concatenate([original_images, edited_images])
|
680 |
-
images = torch.tensor(images)
|
681 |
-
images = 2 * (images / 255) - 1
|
682 |
-
return train_transforms(images)
|
683 |
-
|
684 |
-
# Load scheduler, tokenizer and models.
|
685 |
-
tokenizer_1 = AutoTokenizer.from_pretrained(
|
686 |
-
args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision, use_fast=False
|
687 |
-
)
|
688 |
-
tokenizer_2 = AutoTokenizer.from_pretrained(
|
689 |
-
args.pretrained_model_name_or_path, subfolder="tokenizer_2", revision=args.revision, use_fast=False
|
690 |
-
)
|
691 |
-
text_encoder_cls_1 = import_model_class_from_model_name_or_path(args.pretrained_model_name_or_path, args.revision)
|
692 |
-
text_encoder_cls_2 = import_model_class_from_model_name_or_path(
|
693 |
-
args.pretrained_model_name_or_path, args.revision, subfolder="text_encoder_2"
|
694 |
-
)
|
695 |
-
|
696 |
-
# Load scheduler and models
|
697 |
-
noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
|
698 |
-
text_encoder_1 = text_encoder_cls_1.from_pretrained(
|
699 |
-
args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision
|
700 |
-
)
|
701 |
-
text_encoder_2 = text_encoder_cls_2.from_pretrained(
|
702 |
-
args.pretrained_model_name_or_path, subfolder="text_encoder_2", revision=args.revision
|
703 |
-
)
|
704 |
-
|
705 |
-
# We ALWAYS pre-compute the additional condition embeddings needed for SDXL
|
706 |
-
# UNet as the model is already big and it uses two text encoders.
|
707 |
-
text_encoder_1.to(accelerator.device, dtype=weight_dtype)
|
708 |
-
text_encoder_2.to(accelerator.device, dtype=weight_dtype)
|
709 |
-
tokenizers = [tokenizer_1, tokenizer_2]
|
710 |
-
text_encoders = [text_encoder_1, text_encoder_2]
|
711 |
-
|
712 |
-
# Freeze vae and text_encoders
|
713 |
-
vae.requires_grad_(False)
|
714 |
-
text_encoder_1.requires_grad_(False)
|
715 |
-
text_encoder_2.requires_grad_(False)
|
716 |
-
|
717 |
-
# Adapted from pipelines.StableDiffusionXLPipeline.encode_prompt
|
718 |
-
def encode_prompt(text_encoders, tokenizers, prompt):
|
719 |
-
prompt_embeds_list = []
|
720 |
-
|
721 |
-
for tokenizer, text_encoder in zip(tokenizers, text_encoders):
|
722 |
-
text_inputs = tokenizer(
|
723 |
-
prompt,
|
724 |
-
padding="max_length",
|
725 |
-
max_length=tokenizer.model_max_length,
|
726 |
-
truncation=True,
|
727 |
-
return_tensors="pt",
|
728 |
-
)
|
729 |
-
text_input_ids = text_inputs.input_ids
|
730 |
-
untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
731 |
-
|
732 |
-
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
733 |
-
text_input_ids, untruncated_ids
|
734 |
-
):
|
735 |
-
removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
|
736 |
-
logger.warning(
|
737 |
-
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
738 |
-
f" {tokenizer.model_max_length} tokens: {removed_text}"
|
739 |
-
)
|
740 |
-
|
741 |
-
prompt_embeds = text_encoder(
|
742 |
-
text_input_ids.to(text_encoder.device),
|
743 |
-
output_hidden_states=True,
|
744 |
-
)
|
745 |
-
|
746 |
-
# We are only ALWAYS interested in the pooled output of the final text encoder
|
747 |
-
pooled_prompt_embeds = prompt_embeds[0]
|
748 |
-
prompt_embeds = prompt_embeds.hidden_states[-2]
|
749 |
-
bs_embed, seq_len, _ = prompt_embeds.shape
|
750 |
-
prompt_embeds = prompt_embeds.view(bs_embed, seq_len, -1)
|
751 |
-
prompt_embeds_list.append(prompt_embeds)
|
752 |
-
|
753 |
-
prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
|
754 |
-
pooled_prompt_embeds = pooled_prompt_embeds.view(bs_embed, -1)
|
755 |
-
return prompt_embeds, pooled_prompt_embeds
|
756 |
-
|
757 |
-
# Adapted from pipelines.StableDiffusionXLPipeline.encode_prompt
|
758 |
-
def encode_prompts(text_encoders, tokenizers, prompts):
|
759 |
-
prompt_embeds_all = []
|
760 |
-
pooled_prompt_embeds_all = []
|
761 |
-
|
762 |
-
for prompt in prompts:
|
763 |
-
prompt_embeds, pooled_prompt_embeds = encode_prompt(text_encoders, tokenizers, prompt)
|
764 |
-
prompt_embeds_all.append(prompt_embeds)
|
765 |
-
pooled_prompt_embeds_all.append(pooled_prompt_embeds)
|
766 |
-
|
767 |
-
return torch.stack(prompt_embeds_all), torch.stack(pooled_prompt_embeds_all)
|
768 |
-
|
769 |
-
# Adapted from examples.dreambooth.train_dreambooth_lora_sdxl
|
770 |
-
# Here, we compute not just the text embeddings but also the additional embeddings
|
771 |
-
# needed for the SD XL UNet to operate.
|
772 |
-
def compute_embeddings_for_prompts(prompts, text_encoders, tokenizers):
|
773 |
-
with torch.no_grad():
|
774 |
-
prompt_embeds_all, pooled_prompt_embeds_all = encode_prompts(text_encoders, tokenizers, prompts)
|
775 |
-
add_text_embeds_all = pooled_prompt_embeds_all
|
776 |
-
|
777 |
-
prompt_embeds_all = prompt_embeds_all.to(accelerator.device)
|
778 |
-
add_text_embeds_all = add_text_embeds_all.to(accelerator.device)
|
779 |
-
return prompt_embeds_all, add_text_embeds_all
|
780 |
-
|
781 |
-
# Get null conditioning
|
782 |
-
def compute_null_conditioning():
|
783 |
-
null_conditioning_list = []
|
784 |
-
for a_tokenizer, a_text_encoder in zip(tokenizers, text_encoders):
|
785 |
-
null_conditioning_list.append(
|
786 |
-
a_text_encoder(
|
787 |
-
tokenize_captions([""], tokenizer=a_tokenizer).to(accelerator.device),
|
788 |
-
output_hidden_states=True,
|
789 |
-
).hidden_states[-2]
|
790 |
-
)
|
791 |
-
return torch.concat(null_conditioning_list, dim=-1)
|
792 |
-
|
793 |
-
null_conditioning = compute_null_conditioning()
|
794 |
-
|
795 |
-
def compute_time_ids():
|
796 |
-
crops_coords_top_left = (args.crops_coords_top_left_h, args.crops_coords_top_left_w)
|
797 |
-
original_size = target_size = (args.resolution, args.resolution)
|
798 |
-
add_time_ids = list(original_size + crops_coords_top_left + target_size)
|
799 |
-
add_time_ids = torch.tensor([add_time_ids], dtype=weight_dtype)
|
800 |
-
return add_time_ids.to(accelerator.device).repeat(args.train_batch_size, 1)
|
801 |
-
|
802 |
-
add_time_ids = compute_time_ids()
|
803 |
-
|
804 |
-
def preprocess_train(examples):
|
805 |
-
# Preprocess images.
|
806 |
-
preprocessed_images = preprocess_images(examples)
|
807 |
-
# Since the original and edited images were concatenated before
|
808 |
-
# applying the transformations, we need to separate them and reshape
|
809 |
-
# them accordingly.
|
810 |
-
original_images, edited_images = preprocessed_images.chunk(2)
|
811 |
-
original_images = original_images.reshape(-1, 3, args.resolution, args.resolution)
|
812 |
-
edited_images = edited_images.reshape(-1, 3, args.resolution, args.resolution)
|
813 |
-
|
814 |
-
# Collate the preprocessed images into the `examples`.
|
815 |
-
examples["original_pixel_values"] = original_images
|
816 |
-
examples["edited_pixel_values"] = edited_images
|
817 |
-
|
818 |
-
# Preprocess the captions.
|
819 |
-
captions = list(examples[edit_prompt_column])
|
820 |
-
prompt_embeds_all, add_text_embeds_all = compute_embeddings_for_prompts(captions, text_encoders, tokenizers)
|
821 |
-
examples["prompt_embeds"] = prompt_embeds_all
|
822 |
-
examples["add_text_embeds"] = add_text_embeds_all
|
823 |
-
return examples
|
824 |
-
|
825 |
-
with accelerator.main_process_first():
|
826 |
-
if args.max_train_samples is not None:
|
827 |
-
dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples))
|
828 |
-
# Set the training transforms
|
829 |
-
train_dataset = dataset["train"].with_transform(preprocess_train)
|
830 |
-
|
831 |
-
def collate_fn(examples):
|
832 |
-
original_pixel_values = torch.stack([example["original_pixel_values"] for example in examples])
|
833 |
-
original_pixel_values = original_pixel_values.to(memory_format=torch.contiguous_format).float()
|
834 |
-
edited_pixel_values = torch.stack([example["edited_pixel_values"] for example in examples])
|
835 |
-
edited_pixel_values = edited_pixel_values.to(memory_format=torch.contiguous_format).float()
|
836 |
-
prompt_embeds = torch.concat([example["prompt_embeds"] for example in examples], dim=0)
|
837 |
-
add_text_embeds = torch.concat([example["add_text_embeds"] for example in examples], dim=0)
|
838 |
-
return {
|
839 |
-
"original_pixel_values": original_pixel_values,
|
840 |
-
"edited_pixel_values": edited_pixel_values,
|
841 |
-
"prompt_embeds": prompt_embeds,
|
842 |
-
"add_text_embeds": add_text_embeds,
|
843 |
-
}
|
844 |
-
|
845 |
-
# DataLoaders creation:
|
846 |
-
train_dataloader = torch.utils.data.DataLoader(
|
847 |
-
train_dataset,
|
848 |
-
shuffle=True,
|
849 |
-
collate_fn=collate_fn,
|
850 |
-
batch_size=args.train_batch_size,
|
851 |
-
num_workers=args.dataloader_num_workers,
|
852 |
-
)
|
853 |
-
|
854 |
-
# Scheduler and math around the number of training steps.
|
855 |
-
overrode_max_train_steps = False
|
856 |
-
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
|
857 |
-
if args.max_train_steps is None:
|
858 |
-
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
|
859 |
-
overrode_max_train_steps = True
|
860 |
-
|
861 |
-
lr_scheduler = get_scheduler(
|
862 |
-
args.lr_scheduler,
|
863 |
-
optimizer=optimizer,
|
864 |
-
num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps,
|
865 |
-
num_training_steps=args.max_train_steps * args.gradient_accumulation_steps,
|
866 |
-
)
|
867 |
-
|
868 |
-
# Prepare everything with our `accelerator`.
|
869 |
-
unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
|
870 |
-
unet, optimizer, train_dataloader, lr_scheduler
|
871 |
-
)
|
872 |
-
|
873 |
-
if args.use_ema:
|
874 |
-
ema_unet.to(accelerator.device)
|
875 |
-
|
876 |
-
# Move vae, unet and text_encoder to device and cast to weight_dtype
|
877 |
-
# The VAE is in float32 to avoid NaN losses.
|
878 |
-
if args.pretrained_vae_model_name_or_path is not None:
|
879 |
-
vae.to(accelerator.device, dtype=weight_dtype)
|
880 |
-
else:
|
881 |
-
vae.to(accelerator.device, dtype=torch.float32)
|
882 |
-
|
883 |
-
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
|
884 |
-
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
|
885 |
-
if overrode_max_train_steps:
|
886 |
-
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
|
887 |
-
# Afterwards we recalculate our number of training epochs
|
888 |
-
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
|
889 |
-
|
890 |
-
# We need to initialize the trackers we use, and also store our configuration.
|
891 |
-
# The trackers initializes automatically on the main process.
|
892 |
-
if accelerator.is_main_process:
|
893 |
-
accelerator.init_trackers("instruct-pix2pix-xl", config=vars(args))
|
894 |
-
|
895 |
-
# Train!
|
896 |
-
total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
|
897 |
-
|
898 |
-
logger.info("***** Running training *****")
|
899 |
-
logger.info(f" Num examples = {len(train_dataset)}")
|
900 |
-
logger.info(f" Num Epochs = {args.num_train_epochs}")
|
901 |
-
logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
|
902 |
-
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
|
903 |
-
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
|
904 |
-
logger.info(f" Total optimization steps = {args.max_train_steps}")
|
905 |
-
global_step = 0
|
906 |
-
first_epoch = 0
|
907 |
-
|
908 |
-
# Potentially load in the weights and states from a previous save
|
909 |
-
if args.resume_from_checkpoint:
|
910 |
-
if args.resume_from_checkpoint != "latest":
|
911 |
-
path = os.path.basename(args.resume_from_checkpoint)
|
912 |
-
else:
|
913 |
-
# Get the most recent checkpoint
|
914 |
-
dirs = os.listdir(args.output_dir)
|
915 |
-
dirs = [d for d in dirs if d.startswith("checkpoint")]
|
916 |
-
dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
|
917 |
-
path = dirs[-1] if len(dirs) > 0 else None
|
918 |
-
|
919 |
-
if path is None:
|
920 |
-
accelerator.print(
|
921 |
-
f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run."
|
922 |
-
)
|
923 |
-
args.resume_from_checkpoint = None
|
924 |
-
else:
|
925 |
-
accelerator.print(f"Resuming from checkpoint {path}")
|
926 |
-
accelerator.load_state(os.path.join(args.output_dir, path))
|
927 |
-
global_step = int(path.split("-")[1])
|
928 |
-
|
929 |
-
resume_global_step = global_step * args.gradient_accumulation_steps
|
930 |
-
first_epoch = global_step // num_update_steps_per_epoch
|
931 |
-
resume_step = resume_global_step % (num_update_steps_per_epoch * args.gradient_accumulation_steps)
|
932 |
-
|
933 |
-
# Only show the progress bar once on each machine.
|
934 |
-
progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process)
|
935 |
-
progress_bar.set_description("Steps")
|
936 |
-
|
937 |
-
for epoch in range(first_epoch, args.num_train_epochs):
|
938 |
-
unet.train()
|
939 |
-
train_loss = 0.0
|
940 |
-
for step, batch in enumerate(train_dataloader):
|
941 |
-
# Skip steps until we reach the resumed step
|
942 |
-
if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step:
|
943 |
-
if step % args.gradient_accumulation_steps == 0:
|
944 |
-
progress_bar.update(1)
|
945 |
-
continue
|
946 |
-
|
947 |
-
with accelerator.accumulate(unet):
|
948 |
-
# We want to learn the denoising process w.r.t the edited images which
|
949 |
-
# are conditioned on the original image (which was edited) and the edit instruction.
|
950 |
-
# So, first, convert images to latent space.
|
951 |
-
if args.pretrained_vae_model_name_or_path is not None:
|
952 |
-
edited_pixel_values = batch["edited_pixel_values"].to(dtype=weight_dtype)
|
953 |
-
else:
|
954 |
-
edited_pixel_values = batch["edited_pixel_values"]
|
955 |
-
latents = vae.encode(edited_pixel_values).latent_dist.sample()
|
956 |
-
latents = latents * vae.config.scaling_factor
|
957 |
-
if args.pretrained_vae_model_name_or_path is None:
|
958 |
-
latents = latents.to(weight_dtype)
|
959 |
-
|
960 |
-
# Sample noise that we'll add to the latents
|
961 |
-
noise = torch.randn_like(latents)
|
962 |
-
bsz = latents.shape[0]
|
963 |
-
# Sample a random timestep for each image
|
964 |
-
timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device)
|
965 |
-
timesteps = timesteps.long()
|
966 |
-
|
967 |
-
# Add noise to the latents according to the noise magnitude at each timestep
|
968 |
-
# (this is the forward diffusion process)
|
969 |
-
noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
|
970 |
-
|
971 |
-
# SDXL additional inputs
|
972 |
-
encoder_hidden_states = batch["prompt_embeds"]
|
973 |
-
add_text_embeds = batch["add_text_embeds"]
|
974 |
-
|
975 |
-
# Get the additional image embedding for conditioning.
|
976 |
-
# Instead of getting a diagonal Gaussian here, we simply take the mode.
|
977 |
-
if args.pretrained_vae_model_name_or_path is not None:
|
978 |
-
original_pixel_values = batch["original_pixel_values"].to(dtype=weight_dtype)
|
979 |
-
else:
|
980 |
-
original_pixel_values = batch["original_pixel_values"]
|
981 |
-
original_image_embeds = vae.encode(original_pixel_values).latent_dist.sample()
|
982 |
-
if args.pretrained_vae_model_name_or_path is None:
|
983 |
-
original_image_embeds = original_image_embeds.to(weight_dtype)
|
984 |
-
|
985 |
-
# Conditioning dropout to support classifier-free guidance during inference. For more details
|
986 |
-
# check out the section 3.2.1 of the original paper https://arxiv.org/abs/2211.09800.
|
987 |
-
if args.conditioning_dropout_prob is not None:
|
988 |
-
random_p = torch.rand(bsz, device=latents.device, generator=generator)
|
989 |
-
# Sample masks for the edit prompts.
|
990 |
-
prompt_mask = random_p < 2 * args.conditioning_dropout_prob
|
991 |
-
prompt_mask = prompt_mask.reshape(bsz, 1, 1)
|
992 |
-
# Final text conditioning.
|
993 |
-
encoder_hidden_states = torch.where(prompt_mask, null_conditioning, encoder_hidden_states)
|
994 |
-
|
995 |
-
# Sample masks for the original images.
|
996 |
-
image_mask_dtype = original_image_embeds.dtype
|
997 |
-
image_mask = 1 - (
|
998 |
-
(random_p >= args.conditioning_dropout_prob).to(image_mask_dtype)
|
999 |
-
* (random_p < 3 * args.conditioning_dropout_prob).to(image_mask_dtype)
|
1000 |
-
)
|
1001 |
-
image_mask = image_mask.reshape(bsz, 1, 1, 1)
|
1002 |
-
# Final image conditioning.
|
1003 |
-
original_image_embeds = image_mask * original_image_embeds
|
1004 |
-
|
1005 |
-
# Concatenate the `original_image_embeds` with the `noisy_latents`.
|
1006 |
-
concatenated_noisy_latents = torch.cat([noisy_latents, original_image_embeds], dim=1)
|
1007 |
-
|
1008 |
-
# Get the target for loss depending on the prediction type
|
1009 |
-
if noise_scheduler.config.prediction_type == "epsilon":
|
1010 |
-
target = noise
|
1011 |
-
elif noise_scheduler.config.prediction_type == "v_prediction":
|
1012 |
-
target = noise_scheduler.get_velocity(latents, noise, timesteps)
|
1013 |
-
else:
|
1014 |
-
raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
|
1015 |
-
|
1016 |
-
# Predict the noise residual and compute loss
|
1017 |
-
added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
|
1018 |
-
|
1019 |
-
model_pred = unet(
|
1020 |
-
concatenated_noisy_latents, timesteps, encoder_hidden_states, added_cond_kwargs=added_cond_kwargs
|
1021 |
-
).sample
|
1022 |
-
loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
|
1023 |
-
|
1024 |
-
# Gather the losses across all processes for logging (if we use distributed training).
|
1025 |
-
avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean()
|
1026 |
-
train_loss += avg_loss.item() / args.gradient_accumulation_steps
|
1027 |
-
|
1028 |
-
# Backpropagate
|
1029 |
-
accelerator.backward(loss)
|
1030 |
-
if accelerator.sync_gradients:
|
1031 |
-
accelerator.clip_grad_norm_(unet.parameters(), args.max_grad_norm)
|
1032 |
-
optimizer.step()
|
1033 |
-
lr_scheduler.step()
|
1034 |
-
optimizer.zero_grad()
|
1035 |
-
|
1036 |
-
# Checks if the accelerator has performed an optimization step behind the scenes
|
1037 |
-
if accelerator.sync_gradients:
|
1038 |
-
if args.use_ema:
|
1039 |
-
ema_unet.step(unet.parameters())
|
1040 |
-
progress_bar.update(1)
|
1041 |
-
global_step += 1
|
1042 |
-
accelerator.log({"train_loss": train_loss}, step=global_step)
|
1043 |
-
train_loss = 0.0
|
1044 |
-
|
1045 |
-
if global_step % args.checkpointing_steps == 0:
|
1046 |
-
if accelerator.is_main_process:
|
1047 |
-
# _before_ saving state, check if this save would set us over the `checkpoints_total_limit`
|
1048 |
-
if args.checkpoints_total_limit is not None:
|
1049 |
-
checkpoints = os.listdir(args.output_dir)
|
1050 |
-
checkpoints = [d for d in checkpoints if d.startswith("checkpoint")]
|
1051 |
-
checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1]))
|
1052 |
-
|
1053 |
-
# before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints
|
1054 |
-
if len(checkpoints) >= args.checkpoints_total_limit:
|
1055 |
-
num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1
|
1056 |
-
removing_checkpoints = checkpoints[0:num_to_remove]
|
1057 |
-
|
1058 |
-
logger.info(
|
1059 |
-
f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints"
|
1060 |
-
)
|
1061 |
-
logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}")
|
1062 |
-
|
1063 |
-
for removing_checkpoint in removing_checkpoints:
|
1064 |
-
removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint)
|
1065 |
-
shutil.rmtree(removing_checkpoint)
|
1066 |
-
|
1067 |
-
save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}")
|
1068 |
-
accelerator.save_state(save_path)
|
1069 |
-
logger.info(f"Saved state to {save_path}")
|
1070 |
-
|
1071 |
-
logs = {"step_loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
|
1072 |
-
progress_bar.set_postfix(**logs)
|
1073 |
-
|
1074 |
-
### BEGIN: Perform validation every `validation_epochs` steps
|
1075 |
-
if global_step % args.validation_steps == 0 or global_step == 1:
|
1076 |
-
if (args.val_image_url_or_path is not None) and (args.validation_prompt is not None):
|
1077 |
-
logger.info(
|
1078 |
-
f"Running validation... \n Generating {args.num_validation_images} images with prompt:"
|
1079 |
-
f" {args.validation_prompt}."
|
1080 |
-
)
|
1081 |
-
|
1082 |
-
# create pipeline
|
1083 |
-
if args.use_ema:
|
1084 |
-
# Store the UNet parameters temporarily and load the EMA parameters to perform inference.
|
1085 |
-
ema_unet.store(unet.parameters())
|
1086 |
-
ema_unet.copy_to(unet.parameters())
|
1087 |
-
|
1088 |
-
# The models need unwrapping because for compatibility in distributed training mode.
|
1089 |
-
pipeline = StableDiffusionXLInstructPix2PixPipeline.from_pretrained(
|
1090 |
-
args.pretrained_model_name_or_path,
|
1091 |
-
unet=accelerator.unwrap_model(unet),
|
1092 |
-
text_encoder=text_encoder_1,
|
1093 |
-
text_encoder_2=text_encoder_2,
|
1094 |
-
tokenizer=tokenizer_1,
|
1095 |
-
tokenizer_2=tokenizer_2,
|
1096 |
-
vae=vae,
|
1097 |
-
revision=args.revision,
|
1098 |
-
torch_dtype=weight_dtype,
|
1099 |
-
)
|
1100 |
-
pipeline = pipeline.to(accelerator.device)
|
1101 |
-
pipeline.set_progress_bar_config(disable=True)
|
1102 |
-
|
1103 |
-
# run inference
|
1104 |
-
# Save validation images
|
1105 |
-
val_save_dir = os.path.join(args.output_dir, "validation_images")
|
1106 |
-
if not os.path.exists(val_save_dir):
|
1107 |
-
os.makedirs(val_save_dir)
|
1108 |
-
|
1109 |
-
original_image = (
|
1110 |
-
lambda image_url_or_path: load_image(image_url_or_path)
|
1111 |
-
if urlparse(image_url_or_path).scheme
|
1112 |
-
else Image.open(image_url_or_path).convert("RGB")
|
1113 |
-
)(args.val_image_url_or_path)
|
1114 |
-
with torch.autocast(
|
1115 |
-
str(accelerator.device).replace(":0", ""), enabled=accelerator.mixed_precision == "fp16"
|
1116 |
-
):
|
1117 |
-
edited_images = []
|
1118 |
-
for val_img_idx in range(args.num_validation_images):
|
1119 |
-
a_val_img = pipeline(
|
1120 |
-
args.validation_prompt,
|
1121 |
-
image=original_image,
|
1122 |
-
num_inference_steps=20,
|
1123 |
-
image_guidance_scale=1.5,
|
1124 |
-
guidance_scale=7,
|
1125 |
-
generator=generator,
|
1126 |
-
).images[0]
|
1127 |
-
edited_images.append(a_val_img)
|
1128 |
-
a_val_img.save(os.path.join(val_save_dir, f"step_{global_step}_val_img_{val_img_idx}.png"))
|
1129 |
-
|
1130 |
-
for tracker in accelerator.trackers:
|
1131 |
-
if tracker.name == "wandb":
|
1132 |
-
wandb_table = wandb.Table(columns=WANDB_TABLE_COL_NAMES)
|
1133 |
-
for edited_image in edited_images:
|
1134 |
-
wandb_table.add_data(
|
1135 |
-
wandb.Image(original_image), wandb.Image(edited_image), args.validation_prompt
|
1136 |
-
)
|
1137 |
-
tracker.log({"validation": wandb_table})
|
1138 |
-
if args.use_ema:
|
1139 |
-
# Switch back to the original UNet parameters.
|
1140 |
-
ema_unet.restore(unet.parameters())
|
1141 |
-
|
1142 |
-
del pipeline
|
1143 |
-
torch.cuda.empty_cache()
|
1144 |
-
### END: Perform validation every `validation_epochs` steps
|
1145 |
-
|
1146 |
-
if global_step >= args.max_train_steps:
|
1147 |
-
break
|
1148 |
-
|
1149 |
-
# Create the pipeline using the trained modules and save it.
|
1150 |
-
accelerator.wait_for_everyone()
|
1151 |
-
if accelerator.is_main_process:
|
1152 |
-
unet = accelerator.unwrap_model(unet)
|
1153 |
-
if args.use_ema:
|
1154 |
-
ema_unet.copy_to(unet.parameters())
|
1155 |
-
|
1156 |
-
pipeline = StableDiffusionXLInstructPix2PixPipeline.from_pretrained(
|
1157 |
-
args.pretrained_model_name_or_path,
|
1158 |
-
text_encoder=text_encoder_1,
|
1159 |
-
text_encoder_2=text_encoder_2,
|
1160 |
-
tokenizer=tokenizer_1,
|
1161 |
-
tokenizer_2=tokenizer_2,
|
1162 |
-
vae=vae,
|
1163 |
-
unet=unet,
|
1164 |
-
revision=args.revision,
|
1165 |
-
)
|
1166 |
-
pipeline.save_pretrained(args.output_dir)
|
1167 |
-
|
1168 |
-
if args.push_to_hub:
|
1169 |
-
upload_folder(
|
1170 |
-
repo_id=repo_id,
|
1171 |
-
folder_path=args.output_dir,
|
1172 |
-
commit_message="End of training",
|
1173 |
-
ignore_patterns=["step_*", "epoch_*"],
|
1174 |
-
)
|
1175 |
-
|
1176 |
-
if args.validation_prompt is not None:
|
1177 |
-
edited_images = []
|
1178 |
-
pipeline = pipeline.to(accelerator.device)
|
1179 |
-
with torch.autocast(str(accelerator.device).replace(":0", "")):
|
1180 |
-
for _ in range(args.num_validation_images):
|
1181 |
-
edited_images.append(
|
1182 |
-
pipeline(
|
1183 |
-
args.validation_prompt,
|
1184 |
-
image=original_image,
|
1185 |
-
num_inference_steps=20,
|
1186 |
-
image_guidance_scale=1.5,
|
1187 |
-
guidance_scale=7,
|
1188 |
-
generator=generator,
|
1189 |
-
).images[0]
|
1190 |
-
)
|
1191 |
-
|
1192 |
-
for tracker in accelerator.trackers:
|
1193 |
-
if tracker.name == "wandb":
|
1194 |
-
wandb_table = wandb.Table(columns=WANDB_TABLE_COL_NAMES)
|
1195 |
-
for edited_image in edited_images:
|
1196 |
-
wandb_table.add_data(
|
1197 |
-
wandb.Image(original_image), wandb.Image(edited_image), args.validation_prompt
|
1198 |
-
)
|
1199 |
-
tracker.log({"test": wandb_table})
|
1200 |
-
|
1201 |
-
accelerator.end_training()
|
1202 |
-
|
1203 |
-
|
1204 |
-
if __name__ == "__main__":
|
1205 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipeline_utils.py
DELETED
@@ -1,29 +0,0 @@
|
|
1 |
-
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
# NOTE: This file is deprecated and will be removed in a future version.
|
17 |
-
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
|
18 |
-
|
19 |
-
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
|
20 |
-
from .utils import deprecate
|
21 |
-
|
22 |
-
|
23 |
-
deprecate(
|
24 |
-
"pipelines_utils",
|
25 |
-
"0.22.0",
|
26 |
-
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
|
27 |
-
standard_warn=False,
|
28 |
-
stacklevel=3,
|
29 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/_base_/default_runtime.py
DELETED
@@ -1,16 +0,0 @@
|
|
1 |
-
checkpoint_config = dict(interval=1)
|
2 |
-
# yapf:disable
|
3 |
-
log_config = dict(
|
4 |
-
interval=50,
|
5 |
-
hooks=[
|
6 |
-
dict(type='TextLoggerHook'),
|
7 |
-
# dict(type='TensorboardLoggerHook')
|
8 |
-
])
|
9 |
-
# yapf:enable
|
10 |
-
custom_hooks = [dict(type='NumClassCheckHook')]
|
11 |
-
|
12 |
-
dist_params = dict(backend='nccl')
|
13 |
-
log_level = 'INFO'
|
14 |
-
load_from = None
|
15 |
-
resume_from = None
|
16 |
-
workflow = [('train', 1)]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/dynamic_rcnn/README.md
DELETED
@@ -1,20 +0,0 @@
|
|
1 |
-
# Dynamic R-CNN: Towards High Quality Object Detection via Dynamic Training
|
2 |
-
|
3 |
-
## Introduction
|
4 |
-
|
5 |
-
[ALGORITHM]
|
6 |
-
|
7 |
-
```
|
8 |
-
@article{DynamicRCNN,
|
9 |
-
author = {Hongkai Zhang and Hong Chang and Bingpeng Ma and Naiyan Wang and Xilin Chen},
|
10 |
-
title = {Dynamic {R-CNN}: Towards High Quality Object Detection via Dynamic Training},
|
11 |
-
journal = {arXiv preprint arXiv:2004.06002},
|
12 |
-
year = {2020}
|
13 |
-
}
|
14 |
-
```
|
15 |
-
|
16 |
-
## Results and Models
|
17 |
-
|
18 |
-
| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download |
|
19 |
-
|:---------:|:-------:|:-------:|:--------:|:--------------:|:------:|:------:|:--------:|
|
20 |
-
| R-50 | pytorch | 1x | 3.8 | | 38.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x/dynamic_rcnn_r50_fpn_1x-62a3f276.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x/dynamic_rcnn_r50_fpn_1x_20200618_095048.log.json) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/fpg/mask_rcnn_r50_fpg_crop640_50e_coco.py
DELETED
@@ -1,48 +0,0 @@
|
|
1 |
-
_base_ = 'mask_rcnn_r50_fpn_crop640_50e_coco.py'
|
2 |
-
|
3 |
-
norm_cfg = dict(type='BN', requires_grad=True)
|
4 |
-
model = dict(
|
5 |
-
neck=dict(
|
6 |
-
type='FPG',
|
7 |
-
in_channels=[256, 512, 1024, 2048],
|
8 |
-
out_channels=256,
|
9 |
-
inter_channels=256,
|
10 |
-
num_outs=5,
|
11 |
-
stack_times=9,
|
12 |
-
paths=['bu'] * 9,
|
13 |
-
same_down_trans=None,
|
14 |
-
same_up_trans=dict(
|
15 |
-
type='conv',
|
16 |
-
kernel_size=3,
|
17 |
-
stride=2,
|
18 |
-
padding=1,
|
19 |
-
norm_cfg=norm_cfg,
|
20 |
-
inplace=False,
|
21 |
-
order=('act', 'conv', 'norm')),
|
22 |
-
across_lateral_trans=dict(
|
23 |
-
type='conv',
|
24 |
-
kernel_size=1,
|
25 |
-
norm_cfg=norm_cfg,
|
26 |
-
inplace=False,
|
27 |
-
order=('act', 'conv', 'norm')),
|
28 |
-
across_down_trans=dict(
|
29 |
-
type='interpolation_conv',
|
30 |
-
mode='nearest',
|
31 |
-
kernel_size=3,
|
32 |
-
norm_cfg=norm_cfg,
|
33 |
-
order=('act', 'conv', 'norm'),
|
34 |
-
inplace=False),
|
35 |
-
across_up_trans=None,
|
36 |
-
across_skip_trans=dict(
|
37 |
-
type='conv',
|
38 |
-
kernel_size=1,
|
39 |
-
norm_cfg=norm_cfg,
|
40 |
-
inplace=False,
|
41 |
-
order=('act', 'conv', 'norm')),
|
42 |
-
output_trans=dict(
|
43 |
-
type='last_conv',
|
44 |
-
kernel_size=3,
|
45 |
-
order=('act', 'conv', 'norm'),
|
46 |
-
inplace=False),
|
47 |
-
norm_cfg=norm_cfg,
|
48 |
-
skip_inds=[(0, 1, 2, 3), (0, 1, 2), (0, 1), (0, ), ()]))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/gn+ws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
|
2 |
-
conv_cfg = dict(type='ConvWS')
|
3 |
-
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
|
4 |
-
model = dict(
|
5 |
-
pretrained='open-mmlab://jhu/resnet50_gn_ws',
|
6 |
-
backbone=dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg),
|
7 |
-
neck=dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg),
|
8 |
-
roi_head=dict(
|
9 |
-
bbox_head=dict(
|
10 |
-
type='Shared4Conv1FCBBoxHead',
|
11 |
-
conv_out_channels=256,
|
12 |
-
conv_cfg=conv_cfg,
|
13 |
-
norm_cfg=norm_cfg)))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain_1x_coco.py
DELETED
@@ -1,41 +0,0 @@
|
|
1 |
-
_base_ = './mask_rcnn_r50_fpn_1x_coco.py'
|
2 |
-
model = dict(
|
3 |
-
pretrained='open-mmlab://detectron2/resnet50_caffe',
|
4 |
-
backbone=dict(norm_cfg=dict(requires_grad=False), style='caffe'))
|
5 |
-
# use caffe img_norm
|
6 |
-
img_norm_cfg = dict(
|
7 |
-
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
|
8 |
-
train_pipeline = [
|
9 |
-
dict(type='LoadImageFromFile'),
|
10 |
-
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
|
11 |
-
dict(
|
12 |
-
type='Resize',
|
13 |
-
img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
|
14 |
-
(1333, 768), (1333, 800)],
|
15 |
-
multiscale_mode='value',
|
16 |
-
keep_ratio=True),
|
17 |
-
dict(type='RandomFlip', flip_ratio=0.5),
|
18 |
-
dict(type='Normalize', **img_norm_cfg),
|
19 |
-
dict(type='Pad', size_divisor=32),
|
20 |
-
dict(type='DefaultFormatBundle'),
|
21 |
-
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
|
22 |
-
]
|
23 |
-
test_pipeline = [
|
24 |
-
dict(type='LoadImageFromFile'),
|
25 |
-
dict(
|
26 |
-
type='MultiScaleFlipAug',
|
27 |
-
img_scale=(1333, 800),
|
28 |
-
flip=False,
|
29 |
-
transforms=[
|
30 |
-
dict(type='Resize', keep_ratio=True),
|
31 |
-
dict(type='RandomFlip'),
|
32 |
-
dict(type='Normalize', **img_norm_cfg),
|
33 |
-
dict(type='Pad', size_divisor=32),
|
34 |
-
dict(type='ImageToTensor', keys=['img']),
|
35 |
-
dict(type='Collect', keys=['img']),
|
36 |
-
])
|
37 |
-
]
|
38 |
-
data = dict(
|
39 |
-
train=dict(pipeline=train_pipeline),
|
40 |
-
val=dict(pipeline=test_pipeline),
|
41 |
-
test=dict(pipeline=test_pipeline))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/scratch/faster_rcnn_r50_fpn_gn-all_scratch_6x_coco.py
DELETED
@@ -1,22 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/faster_rcnn_r50_fpn.py',
|
3 |
-
'../_base_/datasets/coco_detection.py',
|
4 |
-
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
|
5 |
-
]
|
6 |
-
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
|
7 |
-
model = dict(
|
8 |
-
pretrained=None,
|
9 |
-
backbone=dict(
|
10 |
-
frozen_stages=-1, zero_init_residual=False, norm_cfg=norm_cfg),
|
11 |
-
neck=dict(norm_cfg=norm_cfg),
|
12 |
-
roi_head=dict(
|
13 |
-
bbox_head=dict(
|
14 |
-
type='Shared4Conv1FCBBoxHead',
|
15 |
-
conv_out_channels=256,
|
16 |
-
norm_cfg=norm_cfg)))
|
17 |
-
# optimizer
|
18 |
-
optimizer = dict(paramwise_cfg=dict(norm_decay_mult=0))
|
19 |
-
optimizer_config = dict(_delete_=True, grad_clip=None)
|
20 |
-
# learning policy
|
21 |
-
lr_config = dict(warmup_ratio=0.1, step=[65, 71])
|
22 |
-
runner = dict(type='EpochBasedRunner', max_epochs=73)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/gradio_hough2image.py
DELETED
@@ -1,100 +0,0 @@
|
|
1 |
-
from share import *
|
2 |
-
import config
|
3 |
-
|
4 |
-
import cv2
|
5 |
-
import einops
|
6 |
-
import gradio as gr
|
7 |
-
import numpy as np
|
8 |
-
import torch
|
9 |
-
import random
|
10 |
-
|
11 |
-
from pytorch_lightning import seed_everything
|
12 |
-
from annotator.util import resize_image, HWC3
|
13 |
-
from annotator.mlsd import MLSDdetector
|
14 |
-
from cldm.model import create_model, load_state_dict
|
15 |
-
from cldm.ddim_hacked import DDIMSampler
|
16 |
-
|
17 |
-
|
18 |
-
apply_mlsd = MLSDdetector()
|
19 |
-
|
20 |
-
model = create_model('./models/cldm_v15.yaml').cpu()
|
21 |
-
model.load_state_dict(load_state_dict('./models/control_sd15_mlsd.pth', location='cuda'))
|
22 |
-
model = model.cuda()
|
23 |
-
ddim_sampler = DDIMSampler(model)
|
24 |
-
|
25 |
-
|
26 |
-
def process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, guess_mode, strength, scale, seed, eta, value_threshold, distance_threshold):
|
27 |
-
with torch.no_grad():
|
28 |
-
input_image = HWC3(input_image)
|
29 |
-
detected_map = apply_mlsd(resize_image(input_image, detect_resolution), value_threshold, distance_threshold)
|
30 |
-
detected_map = HWC3(detected_map)
|
31 |
-
img = resize_image(input_image, image_resolution)
|
32 |
-
H, W, C = img.shape
|
33 |
-
|
34 |
-
detected_map = cv2.resize(detected_map, (W, H), interpolation=cv2.INTER_NEAREST)
|
35 |
-
|
36 |
-
control = torch.from_numpy(detected_map.copy()).float().cuda() / 255.0
|
37 |
-
control = torch.stack([control for _ in range(num_samples)], dim=0)
|
38 |
-
control = einops.rearrange(control, 'b h w c -> b c h w').clone()
|
39 |
-
|
40 |
-
if seed == -1:
|
41 |
-
seed = random.randint(0, 65535)
|
42 |
-
seed_everything(seed)
|
43 |
-
|
44 |
-
if config.save_memory:
|
45 |
-
model.low_vram_shift(is_diffusing=False)
|
46 |
-
|
47 |
-
cond = {"c_concat": [control], "c_crossattn": [model.get_learned_conditioning([prompt + ', ' + a_prompt] * num_samples)]}
|
48 |
-
un_cond = {"c_concat": None if guess_mode else [control], "c_crossattn": [model.get_learned_conditioning([n_prompt] * num_samples)]}
|
49 |
-
shape = (4, H // 8, W // 8)
|
50 |
-
|
51 |
-
if config.save_memory:
|
52 |
-
model.low_vram_shift(is_diffusing=True)
|
53 |
-
|
54 |
-
model.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else ([strength] * 13) # Magic number. IDK why. Perhaps because 0.825**12<0.01 but 0.826**12>0.01
|
55 |
-
samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
|
56 |
-
shape, cond, verbose=False, eta=eta,
|
57 |
-
unconditional_guidance_scale=scale,
|
58 |
-
unconditional_conditioning=un_cond)
|
59 |
-
|
60 |
-
if config.save_memory:
|
61 |
-
model.low_vram_shift(is_diffusing=False)
|
62 |
-
|
63 |
-
x_samples = model.decode_first_stage(samples)
|
64 |
-
x_samples = (einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + 127.5).cpu().numpy().clip(0, 255).astype(np.uint8)
|
65 |
-
|
66 |
-
results = [x_samples[i] for i in range(num_samples)]
|
67 |
-
return [255 - cv2.dilate(detected_map, np.ones(shape=(3, 3), dtype=np.uint8), iterations=1)] + results
|
68 |
-
|
69 |
-
|
70 |
-
block = gr.Blocks().queue()
|
71 |
-
with block:
|
72 |
-
with gr.Row():
|
73 |
-
gr.Markdown("## Control Stable Diffusion with Hough Line Maps")
|
74 |
-
with gr.Row():
|
75 |
-
with gr.Column():
|
76 |
-
input_image = gr.Image(source='upload', type="numpy")
|
77 |
-
prompt = gr.Textbox(label="Prompt")
|
78 |
-
run_button = gr.Button(label="Run")
|
79 |
-
with gr.Accordion("Advanced options", open=False):
|
80 |
-
num_samples = gr.Slider(label="Images", minimum=1, maximum=12, value=1, step=1)
|
81 |
-
image_resolution = gr.Slider(label="Image Resolution", minimum=256, maximum=768, value=512, step=64)
|
82 |
-
strength = gr.Slider(label="Control Strength", minimum=0.0, maximum=2.0, value=1.0, step=0.01)
|
83 |
-
guess_mode = gr.Checkbox(label='Guess Mode', value=False)
|
84 |
-
detect_resolution = gr.Slider(label="Hough Resolution", minimum=128, maximum=1024, value=512, step=1)
|
85 |
-
value_threshold = gr.Slider(label="Hough value threshold (MLSD)", minimum=0.01, maximum=2.0, value=0.1, step=0.01)
|
86 |
-
distance_threshold = gr.Slider(label="Hough distance threshold (MLSD)", minimum=0.01, maximum=20.0, value=0.1, step=0.01)
|
87 |
-
ddim_steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=20, step=1)
|
88 |
-
scale = gr.Slider(label="Guidance Scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
|
89 |
-
seed = gr.Slider(label="Seed", minimum=-1, maximum=2147483647, step=1, randomize=True)
|
90 |
-
eta = gr.Number(label="eta (DDIM)", value=0.0)
|
91 |
-
a_prompt = gr.Textbox(label="Added Prompt", value='best quality, extremely detailed')
|
92 |
-
n_prompt = gr.Textbox(label="Negative Prompt",
|
93 |
-
value='longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality')
|
94 |
-
with gr.Column():
|
95 |
-
result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery").style(grid=2, height='auto')
|
96 |
-
ips = [input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, guess_mode, strength, scale, seed, eta, value_threshold, distance_threshold]
|
97 |
-
run_button.click(fn=process, inputs=ips, outputs=[result_gallery])
|
98 |
-
|
99 |
-
|
100 |
-
block.launch(server_name='0.0.0.0')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anthony7906/MengHuiMXD_GPT/run_Linux.sh
DELETED
@@ -1,31 +0,0 @@
|
|
1 |
-
#!/bin/bash
|
2 |
-
|
3 |
-
# 获取脚本所在目录
|
4 |
-
script_dir=$(dirname "$(readlink -f "$0")")
|
5 |
-
|
6 |
-
# 将工作目录更改为脚本所在目录
|
7 |
-
cd "$script_dir" || exit
|
8 |
-
|
9 |
-
# 检查Git仓库是否有更新
|
10 |
-
git remote update
|
11 |
-
pwd
|
12 |
-
|
13 |
-
if ! git status -uno | grep 'up to date' > /dev/null; then
|
14 |
-
# 如果有更新,关闭当前运行的服务器
|
15 |
-
pkill -f ChuanhuChatbot.py
|
16 |
-
|
17 |
-
# 拉取最新更改
|
18 |
-
git pull
|
19 |
-
|
20 |
-
# 安装依赖
|
21 |
-
pip3 install -r requirements.txt
|
22 |
-
|
23 |
-
# 重新启动服务器
|
24 |
-
nohup python3 ChuanhuChatbot.py &
|
25 |
-
fi
|
26 |
-
|
27 |
-
# 检查ChuanhuChatbot.py是否在运行
|
28 |
-
if ! pgrep -f ChuanhuChatbot.py > /dev/null; then
|
29 |
-
# 如果没有运行,启动服务器
|
30 |
-
nohup python3 ChuanhuChatbot.py &
|
31 |
-
fi
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Antoine245/bot/app.py
DELETED
@@ -1,69 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import os
|
3 |
-
import time
|
4 |
-
import google.generativeai as palm
|
5 |
-
|
6 |
-
palm.configure(api_key=os.environ.get("palm_key"))
|
7 |
-
|
8 |
-
defaults = {
|
9 |
-
'model': 'models/chat-bison-001',
|
10 |
-
'temperature': 0.25,
|
11 |
-
'candidate_count': 1,
|
12 |
-
'top_k': 40,
|
13 |
-
'top_p': 0.95,
|
14 |
-
}
|
15 |
-
|
16 |
-
context = "Your IT assistant"
|
17 |
-
|
18 |
-
examples = [
|
19 |
-
[
|
20 |
-
"Hey my computer is broken",
|
21 |
-
"Hey, what is the issue with your computer?"
|
22 |
-
]
|
23 |
-
]
|
24 |
-
|
25 |
-
history = ['']
|
26 |
-
|
27 |
-
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
28 |
-
chatbot = gr.Chatbot()
|
29 |
-
msg = gr.Textbox()
|
30 |
-
btn = gr.Button("Submit", variant="primary")
|
31 |
-
clear = gr.Button("Clear")
|
32 |
-
|
33 |
-
def user(user_message, history):
|
34 |
-
history.append([user_message, None])
|
35 |
-
return gr.update(value=""), history
|
36 |
-
|
37 |
-
def bot(history):
|
38 |
-
try:
|
39 |
-
bot_message = palm.chat(
|
40 |
-
context=context,
|
41 |
-
examples=examples,
|
42 |
-
messages=[h[0] for h in history]
|
43 |
-
)
|
44 |
-
|
45 |
-
history[-1][1] = ""
|
46 |
-
for character in bot_message.last:
|
47 |
-
history[-1][1] += character
|
48 |
-
time.sleep(0.005)
|
49 |
-
except Exception as e:
|
50 |
-
# Handle the exception here
|
51 |
-
print("Error occurred:", str(e))
|
52 |
-
# You can customize the error handling as per your requirements
|
53 |
-
# For example, return an error message to the user
|
54 |
-
|
55 |
-
history[-1][1] = "Incorrect input please retry with a longer sentence in english"
|
56 |
-
|
57 |
-
return history
|
58 |
-
|
59 |
-
response = msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
|
60 |
-
bot, chatbot, chatbot
|
61 |
-
)
|
62 |
-
response = btn.click(user, [msg, chatbot], [msg, chatbot], queue=False).then(
|
63 |
-
bot, chatbot, chatbot
|
64 |
-
)
|
65 |
-
response.then(lambda: gr.update(interactive=True), None, [msg], queue=False)
|
66 |
-
clear.click(lambda: None, None, chatbot, queue=False)
|
67 |
-
|
68 |
-
demo.queue()
|
69 |
-
demo.launch(debug=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cpu.h
DELETED
@@ -1,35 +0,0 @@
|
|
1 |
-
/*!
|
2 |
-
**************************************************************************************************
|
3 |
-
* Deformable DETR
|
4 |
-
* Copyright (c) 2020 SenseTime. All Rights Reserved.
|
5 |
-
* Licensed under the Apache License, Version 2.0 [see LICENSE for details]
|
6 |
-
**************************************************************************************************
|
7 |
-
* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
|
8 |
-
**************************************************************************************************
|
9 |
-
*/
|
10 |
-
|
11 |
-
#pragma once
|
12 |
-
#include <torch/extension.h>
|
13 |
-
|
14 |
-
namespace groundingdino {
|
15 |
-
|
16 |
-
at::Tensor
|
17 |
-
ms_deform_attn_cpu_forward(
|
18 |
-
const at::Tensor &value,
|
19 |
-
const at::Tensor &spatial_shapes,
|
20 |
-
const at::Tensor &level_start_index,
|
21 |
-
const at::Tensor &sampling_loc,
|
22 |
-
const at::Tensor &attn_weight,
|
23 |
-
const int im2col_step);
|
24 |
-
|
25 |
-
std::vector<at::Tensor>
|
26 |
-
ms_deform_attn_cpu_backward(
|
27 |
-
const at::Tensor &value,
|
28 |
-
const at::Tensor &spatial_shapes,
|
29 |
-
const at::Tensor &level_start_index,
|
30 |
-
const at::Tensor &sampling_loc,
|
31 |
-
const at::Tensor &attn_weight,
|
32 |
-
const at::Tensor &grad_output,
|
33 |
-
const int im2col_step);
|
34 |
-
|
35 |
-
} // namespace groundingdino
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BAAI/AltDiffusion/share_btn.py
DELETED
@@ -1,60 +0,0 @@
|
|
1 |
-
community_icon_html = """<svg id="share-btn-share-icon" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32">
|
2 |
-
<path d="M20.6081 3C21.7684 3 22.8053 3.49196 23.5284 4.38415C23.9756 4.93678 24.4428 5.82749 24.4808 7.16133C24.9674 7.01707 25.4353 6.93643 25.8725 6.93643C26.9833 6.93643 27.9865 7.37587 28.696 8.17411C29.6075 9.19872 30.0124 10.4579 29.8361 11.7177C29.7523 12.3177 29.5581 12.8555 29.2678 13.3534C29.8798 13.8646 30.3306 14.5763 30.5485 15.4322C30.719 16.1032 30.8939 17.5006 29.9808 18.9403C30.0389 19.0342 30.0934 19.1319 30.1442 19.2318C30.6932 20.3074 30.7283 21.5229 30.2439 22.6548C29.5093 24.3704 27.6841 25.7219 24.1397 27.1727C21.9347 28.0753 19.9174 28.6523 19.8994 28.6575C16.9842 29.4379 14.3477 29.8345 12.0653 29.8345C7.87017 29.8345 4.8668 28.508 3.13831 25.8921C0.356375 21.6797 0.754104 17.8269 4.35369 14.1131C6.34591 12.058 7.67023 9.02782 7.94613 8.36275C8.50224 6.39343 9.97271 4.20438 12.4172 4.20438H12.4179C12.6236 4.20438 12.8314 4.2214 13.0364 4.25468C14.107 4.42854 15.0428 5.06476 15.7115 6.02205C16.4331 5.09583 17.134 4.359 17.7682 3.94323C18.7242 3.31737 19.6794 3 20.6081 3ZM20.6081 5.95917C20.2427 5.95917 19.7963 6.1197 19.3039 6.44225C17.7754 7.44319 14.8258 12.6772 13.7458 14.7131C13.3839 15.3952 12.7655 15.6837 12.2086 15.6837C11.1036 15.6837 10.2408 14.5497 12.1076 13.1085C14.9146 10.9402 13.9299 7.39584 12.5898 7.1776C12.5311 7.16799 12.4731 7.16355 12.4172 7.16355C11.1989 7.16355 10.6615 9.33114 10.6615 9.33114C10.6615 9.33114 9.0863 13.4148 6.38031 16.206C3.67434 18.998 3.5346 21.2388 5.50675 24.2246C6.85185 26.2606 9.42666 26.8753 12.0653 26.8753C14.8021 26.8753 17.6077 26.2139 19.1799 25.793C19.2574 25.7723 28.8193 22.984 27.6081 20.6107C27.4046 20.212 27.0693 20.0522 26.6471 20.0522C24.9416 20.0522 21.8393 22.6726 20.5057 22.6726C20.2076 22.6726 19.9976 22.5416 19.9116 22.222C19.3433 20.1173 28.552 19.2325 27.7758 16.1839C27.639 15.6445 27.2677 15.4256 26.746 15.4263C24.4923 15.4263 19.4358 19.5181 18.3759 19.5181C18.2949 19.5181 18.2368 19.4937 18.2053 19.4419C17.6743 18.557 17.9653 17.9394 21.7082 15.6009C25.4511 13.2617 28.0783 11.8545 26.5841 10.1752C26.4121 9.98141 26.1684 9.8956 25.8725 9.8956C23.6001 9.89634 18.2311 14.9403 18.2311 14.9403C18.2311 14.9403 16.7821 16.496 15.9057 16.496C15.7043 16.496 15.533 16.4139 15.4169 16.2112C14.7956 15.1296 21.1879 10.1286 21.5484 8.06535C21.7928 6.66715 21.3771 5.95917 20.6081 5.95917Z" fill="#FF9D00"></path>
|
3 |
-
<path d="M5.50686 24.2246C3.53472 21.2387 3.67446 18.9979 6.38043 16.206C9.08641 13.4147 10.6615 9.33111 10.6615 9.33111C10.6615 9.33111 11.2499 6.95933 12.59 7.17757C13.93 7.39581 14.9139 10.9401 12.1069 13.1084C9.29997 15.276 12.6659 16.7489 13.7459 14.713C14.8258 12.6772 17.7747 7.44316 19.304 6.44221C20.8326 5.44128 21.9089 6.00204 21.5484 8.06532C21.188 10.1286 14.795 15.1295 15.4171 16.2118C16.0391 17.2934 18.2312 14.9402 18.2312 14.9402C18.2312 14.9402 25.0907 8.49588 26.5842 10.1752C28.0776 11.8545 25.4512 13.2616 21.7082 15.6008C17.9646 17.9393 17.6744 18.557 18.2054 19.4418C18.7372 20.3266 26.9998 13.1351 27.7759 16.1838C28.5513 19.2324 19.3434 20.1173 19.9117 22.2219C20.48 24.3274 26.3979 18.2382 27.6082 20.6107C28.8193 22.9839 19.2574 25.7722 19.18 25.7929C16.0914 26.62 8.24723 28.3726 5.50686 24.2246Z" fill="#FFD21E"></path>
|
4 |
-
</svg>"""
|
5 |
-
|
6 |
-
loading_icon_html = """<svg id="share-btn-loading-icon" style="display:none;" class="animate-spin"
|
7 |
-
style="color: #ffffff;
|
8 |
-
"
|
9 |
-
xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" fill="none" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><circle style="opacity: 0.25;" cx="12" cy="12" r="10" stroke="white" stroke-width="4"></circle><path style="opacity: 0.75;" fill="white" d="M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4zm2 5.291A7.962 7.962 0 014 12H0c0 3.042 1.135 5.824 3 7.938l3-2.647z"></path></svg>"""
|
10 |
-
|
11 |
-
share_js = """async () => {
|
12 |
-
async function uploadFile(file){
|
13 |
-
const UPLOAD_URL = 'https://huggingface.co/uploads';
|
14 |
-
const response = await fetch(UPLOAD_URL, {
|
15 |
-
method: 'POST',
|
16 |
-
headers: {
|
17 |
-
'Content-Type': file.type,
|
18 |
-
'X-Requested-With': 'XMLHttpRequest',
|
19 |
-
},
|
20 |
-
body: file, /// <- File inherits from Blob
|
21 |
-
});
|
22 |
-
const url = await response.text();
|
23 |
-
return url;
|
24 |
-
}
|
25 |
-
const gradioEl = document.querySelector('body > gradio-app');
|
26 |
-
const imgEls = gradioEl.querySelectorAll('#gallery img');
|
27 |
-
const promptTxt = gradioEl.querySelector('#prompt-text-input input').value;
|
28 |
-
const shareBtnEl = gradioEl.querySelector('#share-btn');
|
29 |
-
const shareIconEl = gradioEl.querySelector('#share-btn-share-icon');
|
30 |
-
const loadingIconEl = gradioEl.querySelector('#share-btn-loading-icon');
|
31 |
-
if(!imgEls.length){
|
32 |
-
return;
|
33 |
-
};
|
34 |
-
shareBtnEl.style.pointerEvents = 'none';
|
35 |
-
shareIconEl.style.display = 'none';
|
36 |
-
loadingIconEl.style.removeProperty('display');
|
37 |
-
const files = await Promise.all(
|
38 |
-
[...imgEls].map(async (imgEl) => {
|
39 |
-
const res = await fetch(imgEl.src);
|
40 |
-
const blob = await res.blob();
|
41 |
-
const imgId = Date.now() % 200;
|
42 |
-
const fileName = `diffuse-the-rest-${{imgId}}.png`;
|
43 |
-
return new File([blob], fileName, { type: 'image/png' });
|
44 |
-
})
|
45 |
-
);
|
46 |
-
const urls = await Promise.all(files.map((f) => uploadFile(f)));
|
47 |
-
const htmlImgs = urls.map(url => `<img src='${url}' width='400' height='400'>`);
|
48 |
-
const descriptionMd = `<div style='display: flex; flex-wrap: wrap; column-gap: 0.75rem;'>
|
49 |
-
${htmlImgs.join(`\n`)}
|
50 |
-
</div>`;
|
51 |
-
const params = new URLSearchParams({
|
52 |
-
title: promptTxt,
|
53 |
-
description: descriptionMd,
|
54 |
-
});
|
55 |
-
const paramsStr = params.toString();
|
56 |
-
window.open(`https://huggingface.co/spaces/BAAI/bilingual_stable_diffusion/discussions/new?${paramsStr}`, '_blank');
|
57 |
-
shareBtnEl.style.removeProperty('pointer-events');
|
58 |
-
shareIconEl.style.removeProperty('display');
|
59 |
-
loadingIconEl.style.display = 'none';
|
60 |
-
}"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BLACKHOST/timer/tm.py
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
from time import sleep
|
2 |
-
time = 1000 #can change
|
3 |
-
while time != 0:
|
4 |
-
print(time)
|
5 |
-
time -= 1 #can change
|
6 |
-
sleep(0.1) #can change
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BernardoOlisan/vqganclip/taming-transformers/taming/modules/losses/vqperceptual.py
DELETED
@@ -1,136 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
import torch.nn.functional as F
|
4 |
-
|
5 |
-
from taming.modules.losses.lpips import LPIPS
|
6 |
-
from taming.modules.discriminator.model import NLayerDiscriminator, weights_init
|
7 |
-
|
8 |
-
|
9 |
-
class DummyLoss(nn.Module):
|
10 |
-
def __init__(self):
|
11 |
-
super().__init__()
|
12 |
-
|
13 |
-
|
14 |
-
def adopt_weight(weight, global_step, threshold=0, value=0.):
|
15 |
-
if global_step < threshold:
|
16 |
-
weight = value
|
17 |
-
return weight
|
18 |
-
|
19 |
-
|
20 |
-
def hinge_d_loss(logits_real, logits_fake):
|
21 |
-
loss_real = torch.mean(F.relu(1. - logits_real))
|
22 |
-
loss_fake = torch.mean(F.relu(1. + logits_fake))
|
23 |
-
d_loss = 0.5 * (loss_real + loss_fake)
|
24 |
-
return d_loss
|
25 |
-
|
26 |
-
|
27 |
-
def vanilla_d_loss(logits_real, logits_fake):
|
28 |
-
d_loss = 0.5 * (
|
29 |
-
torch.mean(torch.nn.functional.softplus(-logits_real)) +
|
30 |
-
torch.mean(torch.nn.functional.softplus(logits_fake)))
|
31 |
-
return d_loss
|
32 |
-
|
33 |
-
|
34 |
-
class VQLPIPSWithDiscriminator(nn.Module):
|
35 |
-
def __init__(self, disc_start, codebook_weight=1.0, pixelloss_weight=1.0,
|
36 |
-
disc_num_layers=3, disc_in_channels=3, disc_factor=1.0, disc_weight=1.0,
|
37 |
-
perceptual_weight=1.0, use_actnorm=False, disc_conditional=False,
|
38 |
-
disc_ndf=64, disc_loss="hinge"):
|
39 |
-
super().__init__()
|
40 |
-
assert disc_loss in ["hinge", "vanilla"]
|
41 |
-
self.codebook_weight = codebook_weight
|
42 |
-
self.pixel_weight = pixelloss_weight
|
43 |
-
self.perceptual_loss = LPIPS().eval()
|
44 |
-
self.perceptual_weight = perceptual_weight
|
45 |
-
|
46 |
-
self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels,
|
47 |
-
n_layers=disc_num_layers,
|
48 |
-
use_actnorm=use_actnorm,
|
49 |
-
ndf=disc_ndf
|
50 |
-
).apply(weights_init)
|
51 |
-
self.discriminator_iter_start = disc_start
|
52 |
-
if disc_loss == "hinge":
|
53 |
-
self.disc_loss = hinge_d_loss
|
54 |
-
elif disc_loss == "vanilla":
|
55 |
-
self.disc_loss = vanilla_d_loss
|
56 |
-
else:
|
57 |
-
raise ValueError(f"Unknown GAN loss '{disc_loss}'.")
|
58 |
-
print(f"VQLPIPSWithDiscriminator running with {disc_loss} loss.")
|
59 |
-
self.disc_factor = disc_factor
|
60 |
-
self.discriminator_weight = disc_weight
|
61 |
-
self.disc_conditional = disc_conditional
|
62 |
-
|
63 |
-
def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None):
|
64 |
-
if last_layer is not None:
|
65 |
-
nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0]
|
66 |
-
g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0]
|
67 |
-
else:
|
68 |
-
nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0]
|
69 |
-
g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0]
|
70 |
-
|
71 |
-
d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4)
|
72 |
-
d_weight = torch.clamp(d_weight, 0.0, 1e4).detach()
|
73 |
-
d_weight = d_weight * self.discriminator_weight
|
74 |
-
return d_weight
|
75 |
-
|
76 |
-
def forward(self, codebook_loss, inputs, reconstructions, optimizer_idx,
|
77 |
-
global_step, last_layer=None, cond=None, split="train"):
|
78 |
-
rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous())
|
79 |
-
if self.perceptual_weight > 0:
|
80 |
-
p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous())
|
81 |
-
rec_loss = rec_loss + self.perceptual_weight * p_loss
|
82 |
-
else:
|
83 |
-
p_loss = torch.tensor([0.0])
|
84 |
-
|
85 |
-
nll_loss = rec_loss
|
86 |
-
#nll_loss = torch.sum(nll_loss) / nll_loss.shape[0]
|
87 |
-
nll_loss = torch.mean(nll_loss)
|
88 |
-
|
89 |
-
# now the GAN part
|
90 |
-
if optimizer_idx == 0:
|
91 |
-
# generator update
|
92 |
-
if cond is None:
|
93 |
-
assert not self.disc_conditional
|
94 |
-
logits_fake = self.discriminator(reconstructions.contiguous())
|
95 |
-
else:
|
96 |
-
assert self.disc_conditional
|
97 |
-
logits_fake = self.discriminator(torch.cat((reconstructions.contiguous(), cond), dim=1))
|
98 |
-
g_loss = -torch.mean(logits_fake)
|
99 |
-
|
100 |
-
try:
|
101 |
-
d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer)
|
102 |
-
except RuntimeError:
|
103 |
-
assert not self.training
|
104 |
-
d_weight = torch.tensor(0.0)
|
105 |
-
|
106 |
-
disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
|
107 |
-
loss = nll_loss + d_weight * disc_factor * g_loss + self.codebook_weight * codebook_loss.mean()
|
108 |
-
|
109 |
-
log = {"{}/total_loss".format(split): loss.clone().detach().mean(),
|
110 |
-
"{}/quant_loss".format(split): codebook_loss.detach().mean(),
|
111 |
-
"{}/nll_loss".format(split): nll_loss.detach().mean(),
|
112 |
-
"{}/rec_loss".format(split): rec_loss.detach().mean(),
|
113 |
-
"{}/p_loss".format(split): p_loss.detach().mean(),
|
114 |
-
"{}/d_weight".format(split): d_weight.detach(),
|
115 |
-
"{}/disc_factor".format(split): torch.tensor(disc_factor),
|
116 |
-
"{}/g_loss".format(split): g_loss.detach().mean(),
|
117 |
-
}
|
118 |
-
return loss, log
|
119 |
-
|
120 |
-
if optimizer_idx == 1:
|
121 |
-
# second pass for discriminator update
|
122 |
-
if cond is None:
|
123 |
-
logits_real = self.discriminator(inputs.contiguous().detach())
|
124 |
-
logits_fake = self.discriminator(reconstructions.contiguous().detach())
|
125 |
-
else:
|
126 |
-
logits_real = self.discriminator(torch.cat((inputs.contiguous().detach(), cond), dim=1))
|
127 |
-
logits_fake = self.discriminator(torch.cat((reconstructions.contiguous().detach(), cond), dim=1))
|
128 |
-
|
129 |
-
disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
|
130 |
-
d_loss = disc_factor * self.disc_loss(logits_real, logits_fake)
|
131 |
-
|
132 |
-
log = {"{}/disc_loss".format(split): d_loss.clone().detach().mean(),
|
133 |
-
"{}/logits_real".format(split): logits_real.detach().mean(),
|
134 |
-
"{}/logits_fake".format(split): logits_fake.detach().mean()
|
135 |
-
}
|
136 |
-
return d_loss, log
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/parsers.py
DELETED
@@ -1,1112 +0,0 @@
|
|
1 |
-
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License"). You
|
4 |
-
# may not use this file except in compliance with the License. A copy of
|
5 |
-
# the License is located at
|
6 |
-
#
|
7 |
-
# http://aws.amazon.com/apache2.0/
|
8 |
-
#
|
9 |
-
# or in the "license" file accompanying this file. This file is
|
10 |
-
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
|
11 |
-
# ANY KIND, either express or implied. See the License for the specific
|
12 |
-
# language governing permissions and limitations under the License.
|
13 |
-
"""Response parsers for the various protocol types.
|
14 |
-
|
15 |
-
The module contains classes that can take an HTTP response, and given
|
16 |
-
an output shape, parse the response into a dict according to the
|
17 |
-
rules in the output shape.
|
18 |
-
|
19 |
-
There are many similarities amongst the different protocols with regard
|
20 |
-
to response parsing, and the code is structured in a way to avoid
|
21 |
-
code duplication when possible. The diagram below is a diagram
|
22 |
-
showing the inheritance hierarchy of the response classes.
|
23 |
-
|
24 |
-
::
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
+--------------+
|
29 |
-
|ResponseParser|
|
30 |
-
+--------------+
|
31 |
-
^ ^ ^
|
32 |
-
+--------------------+ | +-------------------+
|
33 |
-
| | |
|
34 |
-
+----------+----------+ +------+-------+ +-------+------+
|
35 |
-
|BaseXMLResponseParser| |BaseRestParser| |BaseJSONParser|
|
36 |
-
+---------------------+ +--------------+ +--------------+
|
37 |
-
^ ^ ^ ^ ^ ^
|
38 |
-
| | | | | |
|
39 |
-
| | | | | |
|
40 |
-
| ++----------+-+ +-+-----------++ |
|
41 |
-
| |RestXMLParser| |RestJSONParser| |
|
42 |
-
+-----+-----+ +-------------+ +--------------+ +----+-----+
|
43 |
-
|QueryParser| |JSONParser|
|
44 |
-
+-----------+ +----------+
|
45 |
-
|
46 |
-
|
47 |
-
The diagram above shows that there is a base class, ``ResponseParser`` that
|
48 |
-
contains logic that is similar amongst all the different protocols (``query``,
|
49 |
-
``json``, ``rest-json``, ``rest-xml``). Amongst the various services there
|
50 |
-
is shared logic that can be grouped several ways:
|
51 |
-
|
52 |
-
* The ``query`` and ``rest-xml`` both have XML bodies that are parsed in the
|
53 |
-
same way.
|
54 |
-
* The ``json`` and ``rest-json`` protocols both have JSON bodies that are
|
55 |
-
parsed in the same way.
|
56 |
-
* The ``rest-json`` and ``rest-xml`` protocols have additional attributes
|
57 |
-
besides body parameters that are parsed the same (headers, query string,
|
58 |
-
status code).
|
59 |
-
|
60 |
-
This is reflected in the class diagram above. The ``BaseXMLResponseParser``
|
61 |
-
and the BaseJSONParser contain logic for parsing the XML/JSON body,
|
62 |
-
and the BaseRestParser contains logic for parsing out attributes that
|
63 |
-
come from other parts of the HTTP response. Classes like the
|
64 |
-
``RestXMLParser`` inherit from the ``BaseXMLResponseParser`` to get the
|
65 |
-
XML body parsing logic and the ``BaseRestParser`` to get the HTTP
|
66 |
-
header/status code/query string parsing.
|
67 |
-
|
68 |
-
Additionally, there are event stream parsers that are used by the other parsers
|
69 |
-
to wrap streaming bodies that represent a stream of events. The
|
70 |
-
BaseEventStreamParser extends from ResponseParser and defines the logic for
|
71 |
-
parsing values from the headers and payload of a message from the underlying
|
72 |
-
binary encoding protocol. Currently, event streams support parsing bodies
|
73 |
-
encoded as JSON and XML through the following hierarchy.
|
74 |
-
|
75 |
-
|
76 |
-
+--------------+
|
77 |
-
|ResponseParser|
|
78 |
-
+--------------+
|
79 |
-
^ ^ ^
|
80 |
-
+--------------------+ | +------------------+
|
81 |
-
| | |
|
82 |
-
+----------+----------+ +----------+----------+ +-------+------+
|
83 |
-
|BaseXMLResponseParser| |BaseEventStreamParser| |BaseJSONParser|
|
84 |
-
+---------------------+ +---------------------+ +--------------+
|
85 |
-
^ ^ ^ ^
|
86 |
-
| | | |
|
87 |
-
| | | |
|
88 |
-
+-+----------------+-+ +-+-----------------+-+
|
89 |
-
|EventStreamXMLParser| |EventStreamJSONParser|
|
90 |
-
+--------------------+ +---------------------+
|
91 |
-
|
92 |
-
Return Values
|
93 |
-
=============
|
94 |
-
|
95 |
-
Each call to ``parse()`` returns a dict has this form::
|
96 |
-
|
97 |
-
Standard Response
|
98 |
-
|
99 |
-
{
|
100 |
-
"ResponseMetadata": {"RequestId": <requestid>}
|
101 |
-
<response keys>
|
102 |
-
}
|
103 |
-
|
104 |
-
Error response
|
105 |
-
|
106 |
-
{
|
107 |
-
"ResponseMetadata": {"RequestId": <requestid>}
|
108 |
-
"Error": {
|
109 |
-
"Code": <string>,
|
110 |
-
"Message": <string>,
|
111 |
-
"Type": <string>,
|
112 |
-
<additional keys>
|
113 |
-
}
|
114 |
-
}
|
115 |
-
|
116 |
-
"""
|
117 |
-
import base64
|
118 |
-
import http.client
|
119 |
-
import json
|
120 |
-
import logging
|
121 |
-
import re
|
122 |
-
|
123 |
-
from botocore.compat import ETree, XMLParseError
|
124 |
-
from botocore.eventstream import EventStream, NoInitialResponseError
|
125 |
-
from botocore.utils import (
|
126 |
-
is_json_value_header,
|
127 |
-
lowercase_dict,
|
128 |
-
merge_dicts,
|
129 |
-
parse_timestamp,
|
130 |
-
)
|
131 |
-
|
132 |
-
LOG = logging.getLogger(__name__)
|
133 |
-
|
134 |
-
DEFAULT_TIMESTAMP_PARSER = parse_timestamp
|
135 |
-
|
136 |
-
|
137 |
-
class ResponseParserFactory:
|
138 |
-
def __init__(self):
|
139 |
-
self._defaults = {}
|
140 |
-
|
141 |
-
def set_parser_defaults(self, **kwargs):
|
142 |
-
"""Set default arguments when a parser instance is created.
|
143 |
-
|
144 |
-
You can specify any kwargs that are allowed by a ResponseParser
|
145 |
-
class. There are currently two arguments:
|
146 |
-
|
147 |
-
* timestamp_parser - A callable that can parse a timestamp string
|
148 |
-
* blob_parser - A callable that can parse a blob type
|
149 |
-
|
150 |
-
"""
|
151 |
-
self._defaults.update(kwargs)
|
152 |
-
|
153 |
-
def create_parser(self, protocol_name):
|
154 |
-
parser_cls = PROTOCOL_PARSERS[protocol_name]
|
155 |
-
return parser_cls(**self._defaults)
|
156 |
-
|
157 |
-
|
158 |
-
def create_parser(protocol):
|
159 |
-
return ResponseParserFactory().create_parser(protocol)
|
160 |
-
|
161 |
-
|
162 |
-
def _text_content(func):
|
163 |
-
# This decorator hides the difference between
|
164 |
-
# an XML node with text or a plain string. It's used
|
165 |
-
# to ensure that scalar processing operates only on text
|
166 |
-
# strings, which allows the same scalar handlers to be used
|
167 |
-
# for XML nodes from the body and HTTP headers.
|
168 |
-
def _get_text_content(self, shape, node_or_string):
|
169 |
-
if hasattr(node_or_string, 'text'):
|
170 |
-
text = node_or_string.text
|
171 |
-
if text is None:
|
172 |
-
# If an XML node is empty <foo></foo>,
|
173 |
-
# we want to parse that as an empty string,
|
174 |
-
# not as a null/None value.
|
175 |
-
text = ''
|
176 |
-
else:
|
177 |
-
text = node_or_string
|
178 |
-
return func(self, shape, text)
|
179 |
-
|
180 |
-
return _get_text_content
|
181 |
-
|
182 |
-
|
183 |
-
class ResponseParserError(Exception):
|
184 |
-
pass
|
185 |
-
|
186 |
-
|
187 |
-
class ResponseParser:
|
188 |
-
"""Base class for response parsing.
|
189 |
-
|
190 |
-
This class represents the interface that all ResponseParsers for the
|
191 |
-
various protocols must implement.
|
192 |
-
|
193 |
-
This class will take an HTTP response and a model shape and parse the
|
194 |
-
HTTP response into a dictionary.
|
195 |
-
|
196 |
-
There is a single public method exposed: ``parse``. See the ``parse``
|
197 |
-
docstring for more info.
|
198 |
-
|
199 |
-
"""
|
200 |
-
|
201 |
-
DEFAULT_ENCODING = 'utf-8'
|
202 |
-
EVENT_STREAM_PARSER_CLS = None
|
203 |
-
|
204 |
-
def __init__(self, timestamp_parser=None, blob_parser=None):
|
205 |
-
if timestamp_parser is None:
|
206 |
-
timestamp_parser = DEFAULT_TIMESTAMP_PARSER
|
207 |
-
self._timestamp_parser = timestamp_parser
|
208 |
-
if blob_parser is None:
|
209 |
-
blob_parser = self._default_blob_parser
|
210 |
-
self._blob_parser = blob_parser
|
211 |
-
self._event_stream_parser = None
|
212 |
-
if self.EVENT_STREAM_PARSER_CLS is not None:
|
213 |
-
self._event_stream_parser = self.EVENT_STREAM_PARSER_CLS(
|
214 |
-
timestamp_parser, blob_parser
|
215 |
-
)
|
216 |
-
|
217 |
-
def _default_blob_parser(self, value):
|
218 |
-
# Blobs are always returned as bytes type (this matters on python3).
|
219 |
-
# We don't decode this to a str because it's entirely possible that the
|
220 |
-
# blob contains binary data that actually can't be decoded.
|
221 |
-
return base64.b64decode(value)
|
222 |
-
|
223 |
-
def parse(self, response, shape):
|
224 |
-
"""Parse the HTTP response given a shape.
|
225 |
-
|
226 |
-
:param response: The HTTP response dictionary. This is a dictionary
|
227 |
-
that represents the HTTP request. The dictionary must have the
|
228 |
-
following keys, ``body``, ``headers``, and ``status_code``.
|
229 |
-
|
230 |
-
:param shape: The model shape describing the expected output.
|
231 |
-
:return: Returns a dictionary representing the parsed response
|
232 |
-
described by the model. In addition to the shape described from
|
233 |
-
the model, each response will also have a ``ResponseMetadata``
|
234 |
-
which contains metadata about the response, which contains at least
|
235 |
-
two keys containing ``RequestId`` and ``HTTPStatusCode``. Some
|
236 |
-
responses may populate additional keys, but ``RequestId`` will
|
237 |
-
always be present.
|
238 |
-
|
239 |
-
"""
|
240 |
-
LOG.debug('Response headers: %r', response['headers'])
|
241 |
-
LOG.debug('Response body:\n%r', response['body'])
|
242 |
-
if response['status_code'] >= 301:
|
243 |
-
if self._is_generic_error_response(response):
|
244 |
-
parsed = self._do_generic_error_parse(response)
|
245 |
-
elif self._is_modeled_error_shape(shape):
|
246 |
-
parsed = self._do_modeled_error_parse(response, shape)
|
247 |
-
# We don't want to decorate the modeled fields with metadata
|
248 |
-
return parsed
|
249 |
-
else:
|
250 |
-
parsed = self._do_error_parse(response, shape)
|
251 |
-
else:
|
252 |
-
parsed = self._do_parse(response, shape)
|
253 |
-
|
254 |
-
# We don't want to decorate event stream responses with metadata
|
255 |
-
if shape and shape.serialization.get('eventstream'):
|
256 |
-
return parsed
|
257 |
-
|
258 |
-
# Add ResponseMetadata if it doesn't exist and inject the HTTP
|
259 |
-
# status code and headers from the response.
|
260 |
-
if isinstance(parsed, dict):
|
261 |
-
response_metadata = parsed.get('ResponseMetadata', {})
|
262 |
-
response_metadata['HTTPStatusCode'] = response['status_code']
|
263 |
-
# Ensure that the http header keys are all lower cased. Older
|
264 |
-
# versions of urllib3 (< 1.11) would unintentionally do this for us
|
265 |
-
# (see urllib3#633). We need to do this conversion manually now.
|
266 |
-
headers = response['headers']
|
267 |
-
response_metadata['HTTPHeaders'] = lowercase_dict(headers)
|
268 |
-
parsed['ResponseMetadata'] = response_metadata
|
269 |
-
self._add_checksum_response_metadata(response, response_metadata)
|
270 |
-
return parsed
|
271 |
-
|
272 |
-
def _add_checksum_response_metadata(self, response, response_metadata):
|
273 |
-
checksum_context = response.get('context', {}).get('checksum', {})
|
274 |
-
algorithm = checksum_context.get('response_algorithm')
|
275 |
-
if algorithm:
|
276 |
-
response_metadata['ChecksumAlgorithm'] = algorithm
|
277 |
-
|
278 |
-
def _is_modeled_error_shape(self, shape):
|
279 |
-
return shape is not None and shape.metadata.get('exception', False)
|
280 |
-
|
281 |
-
def _is_generic_error_response(self, response):
|
282 |
-
# There are times when a service will respond with a generic
|
283 |
-
# error response such as:
|
284 |
-
# '<html><body><b>Http/1.1 Service Unavailable</b></body></html>'
|
285 |
-
#
|
286 |
-
# This can also happen if you're going through a proxy.
|
287 |
-
# In this case the protocol specific _do_error_parse will either
|
288 |
-
# fail to parse the response (in the best case) or silently succeed
|
289 |
-
# and treat the HTML above as an XML response and return
|
290 |
-
# non sensical parsed data.
|
291 |
-
# To prevent this case from happening we first need to check
|
292 |
-
# whether or not this response looks like the generic response.
|
293 |
-
if response['status_code'] >= 500:
|
294 |
-
if 'body' not in response or response['body'] is None:
|
295 |
-
return True
|
296 |
-
|
297 |
-
body = response['body'].strip()
|
298 |
-
return body.startswith(b'<html>') or not body
|
299 |
-
|
300 |
-
def _do_generic_error_parse(self, response):
|
301 |
-
# There's not really much we can do when we get a generic
|
302 |
-
# html response.
|
303 |
-
LOG.debug(
|
304 |
-
"Received a non protocol specific error response from the "
|
305 |
-
"service, unable to populate error code and message."
|
306 |
-
)
|
307 |
-
return {
|
308 |
-
'Error': {
|
309 |
-
'Code': str(response['status_code']),
|
310 |
-
'Message': http.client.responses.get(
|
311 |
-
response['status_code'], ''
|
312 |
-
),
|
313 |
-
},
|
314 |
-
'ResponseMetadata': {},
|
315 |
-
}
|
316 |
-
|
317 |
-
def _do_parse(self, response, shape):
|
318 |
-
raise NotImplementedError("%s._do_parse" % self.__class__.__name__)
|
319 |
-
|
320 |
-
def _do_error_parse(self, response, shape):
|
321 |
-
raise NotImplementedError(f"{self.__class__.__name__}._do_error_parse")
|
322 |
-
|
323 |
-
def _do_modeled_error_parse(self, response, shape, parsed):
|
324 |
-
raise NotImplementedError(
|
325 |
-
f"{self.__class__.__name__}._do_modeled_error_parse"
|
326 |
-
)
|
327 |
-
|
328 |
-
def _parse_shape(self, shape, node):
|
329 |
-
handler = getattr(
|
330 |
-
self, f'_handle_{shape.type_name}', self._default_handle
|
331 |
-
)
|
332 |
-
return handler(shape, node)
|
333 |
-
|
334 |
-
def _handle_list(self, shape, node):
|
335 |
-
# Enough implementations share list serialization that it's moved
|
336 |
-
# up here in the base class.
|
337 |
-
parsed = []
|
338 |
-
member_shape = shape.member
|
339 |
-
for item in node:
|
340 |
-
parsed.append(self._parse_shape(member_shape, item))
|
341 |
-
return parsed
|
342 |
-
|
343 |
-
def _default_handle(self, shape, value):
|
344 |
-
return value
|
345 |
-
|
346 |
-
def _create_event_stream(self, response, shape):
|
347 |
-
parser = self._event_stream_parser
|
348 |
-
name = response['context'].get('operation_name')
|
349 |
-
return EventStream(response['body'], shape, parser, name)
|
350 |
-
|
351 |
-
def _get_first_key(self, value):
|
352 |
-
return list(value)[0]
|
353 |
-
|
354 |
-
def _has_unknown_tagged_union_member(self, shape, value):
|
355 |
-
if shape.is_tagged_union:
|
356 |
-
if len(value) != 1:
|
357 |
-
error_msg = (
|
358 |
-
"Invalid service response: %s must have one and only "
|
359 |
-
"one member set."
|
360 |
-
)
|
361 |
-
raise ResponseParserError(error_msg % shape.name)
|
362 |
-
tag = self._get_first_key(value)
|
363 |
-
if tag not in shape.members:
|
364 |
-
msg = (
|
365 |
-
"Received a tagged union response with member "
|
366 |
-
"unknown to client: %s. Please upgrade SDK for full "
|
367 |
-
"response support."
|
368 |
-
)
|
369 |
-
LOG.info(msg % tag)
|
370 |
-
return True
|
371 |
-
return False
|
372 |
-
|
373 |
-
def _handle_unknown_tagged_union_member(self, tag):
|
374 |
-
return {'SDK_UNKNOWN_MEMBER': {'name': tag}}
|
375 |
-
|
376 |
-
|
377 |
-
class BaseXMLResponseParser(ResponseParser):
|
378 |
-
def __init__(self, timestamp_parser=None, blob_parser=None):
|
379 |
-
super().__init__(timestamp_parser, blob_parser)
|
380 |
-
self._namespace_re = re.compile('{.*}')
|
381 |
-
|
382 |
-
def _handle_map(self, shape, node):
|
383 |
-
parsed = {}
|
384 |
-
key_shape = shape.key
|
385 |
-
value_shape = shape.value
|
386 |
-
key_location_name = key_shape.serialization.get('name') or 'key'
|
387 |
-
value_location_name = value_shape.serialization.get('name') or 'value'
|
388 |
-
if shape.serialization.get('flattened') and not isinstance(node, list):
|
389 |
-
node = [node]
|
390 |
-
for keyval_node in node:
|
391 |
-
for single_pair in keyval_node:
|
392 |
-
# Within each <entry> there's a <key> and a <value>
|
393 |
-
tag_name = self._node_tag(single_pair)
|
394 |
-
if tag_name == key_location_name:
|
395 |
-
key_name = self._parse_shape(key_shape, single_pair)
|
396 |
-
elif tag_name == value_location_name:
|
397 |
-
val_name = self._parse_shape(value_shape, single_pair)
|
398 |
-
else:
|
399 |
-
raise ResponseParserError("Unknown tag: %s" % tag_name)
|
400 |
-
parsed[key_name] = val_name
|
401 |
-
return parsed
|
402 |
-
|
403 |
-
def _node_tag(self, node):
|
404 |
-
return self._namespace_re.sub('', node.tag)
|
405 |
-
|
406 |
-
def _handle_list(self, shape, node):
|
407 |
-
# When we use _build_name_to_xml_node, repeated elements are aggregated
|
408 |
-
# into a list. However, we can't tell the difference between a scalar
|
409 |
-
# value and a single element flattened list. So before calling the
|
410 |
-
# real _handle_list, we know that "node" should actually be a list if
|
411 |
-
# it's flattened, and if it's not, then we make it a one element list.
|
412 |
-
if shape.serialization.get('flattened') and not isinstance(node, list):
|
413 |
-
node = [node]
|
414 |
-
return super()._handle_list(shape, node)
|
415 |
-
|
416 |
-
def _handle_structure(self, shape, node):
|
417 |
-
parsed = {}
|
418 |
-
members = shape.members
|
419 |
-
if shape.metadata.get('exception', False):
|
420 |
-
node = self._get_error_root(node)
|
421 |
-
xml_dict = self._build_name_to_xml_node(node)
|
422 |
-
if self._has_unknown_tagged_union_member(shape, xml_dict):
|
423 |
-
tag = self._get_first_key(xml_dict)
|
424 |
-
return self._handle_unknown_tagged_union_member(tag)
|
425 |
-
for member_name in members:
|
426 |
-
member_shape = members[member_name]
|
427 |
-
if (
|
428 |
-
'location' in member_shape.serialization
|
429 |
-
or member_shape.serialization.get('eventheader')
|
430 |
-
):
|
431 |
-
# All members with locations have already been handled,
|
432 |
-
# so we don't need to parse these members.
|
433 |
-
continue
|
434 |
-
xml_name = self._member_key_name(member_shape, member_name)
|
435 |
-
member_node = xml_dict.get(xml_name)
|
436 |
-
if member_node is not None:
|
437 |
-
parsed[member_name] = self._parse_shape(
|
438 |
-
member_shape, member_node
|
439 |
-
)
|
440 |
-
elif member_shape.serialization.get('xmlAttribute'):
|
441 |
-
attribs = {}
|
442 |
-
location_name = member_shape.serialization['name']
|
443 |
-
for key, value in node.attrib.items():
|
444 |
-
new_key = self._namespace_re.sub(
|
445 |
-
location_name.split(':')[0] + ':', key
|
446 |
-
)
|
447 |
-
attribs[new_key] = value
|
448 |
-
if location_name in attribs:
|
449 |
-
parsed[member_name] = attribs[location_name]
|
450 |
-
return parsed
|
451 |
-
|
452 |
-
def _get_error_root(self, original_root):
|
453 |
-
if self._node_tag(original_root) == 'ErrorResponse':
|
454 |
-
for child in original_root:
|
455 |
-
if self._node_tag(child) == 'Error':
|
456 |
-
return child
|
457 |
-
return original_root
|
458 |
-
|
459 |
-
def _member_key_name(self, shape, member_name):
|
460 |
-
# This method is needed because we have to special case flattened list
|
461 |
-
# with a serialization name. If this is the case we use the
|
462 |
-
# locationName from the list's member shape as the key name for the
|
463 |
-
# surrounding structure.
|
464 |
-
if shape.type_name == 'list' and shape.serialization.get('flattened'):
|
465 |
-
list_member_serialized_name = shape.member.serialization.get(
|
466 |
-
'name'
|
467 |
-
)
|
468 |
-
if list_member_serialized_name is not None:
|
469 |
-
return list_member_serialized_name
|
470 |
-
serialized_name = shape.serialization.get('name')
|
471 |
-
if serialized_name is not None:
|
472 |
-
return serialized_name
|
473 |
-
return member_name
|
474 |
-
|
475 |
-
def _build_name_to_xml_node(self, parent_node):
|
476 |
-
# If the parent node is actually a list. We should not be trying
|
477 |
-
# to serialize it to a dictionary. Instead, return the first element
|
478 |
-
# in the list.
|
479 |
-
if isinstance(parent_node, list):
|
480 |
-
return self._build_name_to_xml_node(parent_node[0])
|
481 |
-
xml_dict = {}
|
482 |
-
for item in parent_node:
|
483 |
-
key = self._node_tag(item)
|
484 |
-
if key in xml_dict:
|
485 |
-
# If the key already exists, the most natural
|
486 |
-
# way to handle this is to aggregate repeated
|
487 |
-
# keys into a single list.
|
488 |
-
# <foo>1</foo><foo>2</foo> -> {'foo': [Node(1), Node(2)]}
|
489 |
-
if isinstance(xml_dict[key], list):
|
490 |
-
xml_dict[key].append(item)
|
491 |
-
else:
|
492 |
-
# Convert from a scalar to a list.
|
493 |
-
xml_dict[key] = [xml_dict[key], item]
|
494 |
-
else:
|
495 |
-
xml_dict[key] = item
|
496 |
-
return xml_dict
|
497 |
-
|
498 |
-
def _parse_xml_string_to_dom(self, xml_string):
|
499 |
-
try:
|
500 |
-
parser = ETree.XMLParser(
|
501 |
-
target=ETree.TreeBuilder(), encoding=self.DEFAULT_ENCODING
|
502 |
-
)
|
503 |
-
parser.feed(xml_string)
|
504 |
-
root = parser.close()
|
505 |
-
except XMLParseError as e:
|
506 |
-
raise ResponseParserError(
|
507 |
-
"Unable to parse response (%s), "
|
508 |
-
"invalid XML received. Further retries may succeed:\n%s"
|
509 |
-
% (e, xml_string)
|
510 |
-
)
|
511 |
-
return root
|
512 |
-
|
513 |
-
def _replace_nodes(self, parsed):
|
514 |
-
for key, value in parsed.items():
|
515 |
-
if list(value):
|
516 |
-
sub_dict = self._build_name_to_xml_node(value)
|
517 |
-
parsed[key] = self._replace_nodes(sub_dict)
|
518 |
-
else:
|
519 |
-
parsed[key] = value.text
|
520 |
-
return parsed
|
521 |
-
|
522 |
-
@_text_content
|
523 |
-
def _handle_boolean(self, shape, text):
|
524 |
-
if text == 'true':
|
525 |
-
return True
|
526 |
-
else:
|
527 |
-
return False
|
528 |
-
|
529 |
-
@_text_content
|
530 |
-
def _handle_float(self, shape, text):
|
531 |
-
return float(text)
|
532 |
-
|
533 |
-
@_text_content
|
534 |
-
def _handle_timestamp(self, shape, text):
|
535 |
-
return self._timestamp_parser(text)
|
536 |
-
|
537 |
-
@_text_content
|
538 |
-
def _handle_integer(self, shape, text):
|
539 |
-
return int(text)
|
540 |
-
|
541 |
-
@_text_content
|
542 |
-
def _handle_string(self, shape, text):
|
543 |
-
return text
|
544 |
-
|
545 |
-
@_text_content
|
546 |
-
def _handle_blob(self, shape, text):
|
547 |
-
return self._blob_parser(text)
|
548 |
-
|
549 |
-
_handle_character = _handle_string
|
550 |
-
_handle_double = _handle_float
|
551 |
-
_handle_long = _handle_integer
|
552 |
-
|
553 |
-
|
554 |
-
class QueryParser(BaseXMLResponseParser):
|
555 |
-
def _do_error_parse(self, response, shape):
|
556 |
-
xml_contents = response['body']
|
557 |
-
root = self._parse_xml_string_to_dom(xml_contents)
|
558 |
-
parsed = self._build_name_to_xml_node(root)
|
559 |
-
self._replace_nodes(parsed)
|
560 |
-
# Once we've converted xml->dict, we need to make one or two
|
561 |
-
# more adjustments to extract nested errors and to be consistent
|
562 |
-
# with ResponseMetadata for non-error responses:
|
563 |
-
# 1. {"Errors": {"Error": {...}}} -> {"Error": {...}}
|
564 |
-
# 2. {"RequestId": "id"} -> {"ResponseMetadata": {"RequestId": "id"}}
|
565 |
-
if 'Errors' in parsed:
|
566 |
-
parsed.update(parsed.pop('Errors'))
|
567 |
-
if 'RequestId' in parsed:
|
568 |
-
parsed['ResponseMetadata'] = {'RequestId': parsed.pop('RequestId')}
|
569 |
-
return parsed
|
570 |
-
|
571 |
-
def _do_modeled_error_parse(self, response, shape):
|
572 |
-
return self._parse_body_as_xml(response, shape, inject_metadata=False)
|
573 |
-
|
574 |
-
def _do_parse(self, response, shape):
|
575 |
-
return self._parse_body_as_xml(response, shape, inject_metadata=True)
|
576 |
-
|
577 |
-
def _parse_body_as_xml(self, response, shape, inject_metadata=True):
|
578 |
-
xml_contents = response['body']
|
579 |
-
root = self._parse_xml_string_to_dom(xml_contents)
|
580 |
-
parsed = {}
|
581 |
-
if shape is not None:
|
582 |
-
start = root
|
583 |
-
if 'resultWrapper' in shape.serialization:
|
584 |
-
start = self._find_result_wrapped_shape(
|
585 |
-
shape.serialization['resultWrapper'], root
|
586 |
-
)
|
587 |
-
parsed = self._parse_shape(shape, start)
|
588 |
-
if inject_metadata:
|
589 |
-
self._inject_response_metadata(root, parsed)
|
590 |
-
return parsed
|
591 |
-
|
592 |
-
def _find_result_wrapped_shape(self, element_name, xml_root_node):
|
593 |
-
mapping = self._build_name_to_xml_node(xml_root_node)
|
594 |
-
return mapping[element_name]
|
595 |
-
|
596 |
-
def _inject_response_metadata(self, node, inject_into):
|
597 |
-
mapping = self._build_name_to_xml_node(node)
|
598 |
-
child_node = mapping.get('ResponseMetadata')
|
599 |
-
if child_node is not None:
|
600 |
-
sub_mapping = self._build_name_to_xml_node(child_node)
|
601 |
-
for key, value in sub_mapping.items():
|
602 |
-
sub_mapping[key] = value.text
|
603 |
-
inject_into['ResponseMetadata'] = sub_mapping
|
604 |
-
|
605 |
-
|
606 |
-
class EC2QueryParser(QueryParser):
|
607 |
-
def _inject_response_metadata(self, node, inject_into):
|
608 |
-
mapping = self._build_name_to_xml_node(node)
|
609 |
-
child_node = mapping.get('requestId')
|
610 |
-
if child_node is not None:
|
611 |
-
inject_into['ResponseMetadata'] = {'RequestId': child_node.text}
|
612 |
-
|
613 |
-
def _do_error_parse(self, response, shape):
|
614 |
-
# EC2 errors look like:
|
615 |
-
# <Response>
|
616 |
-
# <Errors>
|
617 |
-
# <Error>
|
618 |
-
# <Code>InvalidInstanceID.Malformed</Code>
|
619 |
-
# <Message>Invalid id: "1343124"</Message>
|
620 |
-
# </Error>
|
621 |
-
# </Errors>
|
622 |
-
# <RequestID>12345</RequestID>
|
623 |
-
# </Response>
|
624 |
-
# This is different from QueryParser in that it's RequestID,
|
625 |
-
# not RequestId
|
626 |
-
original = super()._do_error_parse(response, shape)
|
627 |
-
if 'RequestID' in original:
|
628 |
-
original['ResponseMetadata'] = {
|
629 |
-
'RequestId': original.pop('RequestID')
|
630 |
-
}
|
631 |
-
return original
|
632 |
-
|
633 |
-
def _get_error_root(self, original_root):
|
634 |
-
for child in original_root:
|
635 |
-
if self._node_tag(child) == 'Errors':
|
636 |
-
for errors_child in child:
|
637 |
-
if self._node_tag(errors_child) == 'Error':
|
638 |
-
return errors_child
|
639 |
-
return original_root
|
640 |
-
|
641 |
-
|
642 |
-
class BaseJSONParser(ResponseParser):
|
643 |
-
def _handle_structure(self, shape, value):
|
644 |
-
final_parsed = {}
|
645 |
-
if shape.is_document_type:
|
646 |
-
final_parsed = value
|
647 |
-
else:
|
648 |
-
member_shapes = shape.members
|
649 |
-
if value is None:
|
650 |
-
# If the comes across the wire as "null" (None in python),
|
651 |
-
# we should be returning this unchanged, instead of as an
|
652 |
-
# empty dict.
|
653 |
-
return None
|
654 |
-
final_parsed = {}
|
655 |
-
if self._has_unknown_tagged_union_member(shape, value):
|
656 |
-
tag = self._get_first_key(value)
|
657 |
-
return self._handle_unknown_tagged_union_member(tag)
|
658 |
-
for member_name in member_shapes:
|
659 |
-
member_shape = member_shapes[member_name]
|
660 |
-
json_name = member_shape.serialization.get('name', member_name)
|
661 |
-
raw_value = value.get(json_name)
|
662 |
-
if raw_value is not None:
|
663 |
-
final_parsed[member_name] = self._parse_shape(
|
664 |
-
member_shapes[member_name], raw_value
|
665 |
-
)
|
666 |
-
return final_parsed
|
667 |
-
|
668 |
-
def _handle_map(self, shape, value):
|
669 |
-
parsed = {}
|
670 |
-
key_shape = shape.key
|
671 |
-
value_shape = shape.value
|
672 |
-
for key, value in value.items():
|
673 |
-
actual_key = self._parse_shape(key_shape, key)
|
674 |
-
actual_value = self._parse_shape(value_shape, value)
|
675 |
-
parsed[actual_key] = actual_value
|
676 |
-
return parsed
|
677 |
-
|
678 |
-
def _handle_blob(self, shape, value):
|
679 |
-
return self._blob_parser(value)
|
680 |
-
|
681 |
-
def _handle_timestamp(self, shape, value):
|
682 |
-
return self._timestamp_parser(value)
|
683 |
-
|
684 |
-
def _do_error_parse(self, response, shape):
|
685 |
-
body = self._parse_body_as_json(response['body'])
|
686 |
-
error = {"Error": {"Message": '', "Code": ''}, "ResponseMetadata": {}}
|
687 |
-
headers = response['headers']
|
688 |
-
# Error responses can have slightly different structures for json.
|
689 |
-
# The basic structure is:
|
690 |
-
#
|
691 |
-
# {"__type":"ConnectClientException",
|
692 |
-
# "message":"The error message."}
|
693 |
-
|
694 |
-
# The error message can either come in the 'message' or 'Message' key
|
695 |
-
# so we need to check for both.
|
696 |
-
error['Error']['Message'] = body.get(
|
697 |
-
'message', body.get('Message', '')
|
698 |
-
)
|
699 |
-
# if the message did not contain an error code
|
700 |
-
# include the response status code
|
701 |
-
response_code = response.get('status_code')
|
702 |
-
# Error response may contain an x-amzn-query-error header for json
|
703 |
-
# we need to fetch the error code from this header in that case
|
704 |
-
query_error = headers.get('x-amzn-query-error', '')
|
705 |
-
query_error_components = query_error.split(';')
|
706 |
-
code = None
|
707 |
-
if len(query_error_components) == 2 and query_error_components[0]:
|
708 |
-
code = query_error_components[0]
|
709 |
-
error['Error']['Type'] = query_error_components[1]
|
710 |
-
if code is None:
|
711 |
-
code = body.get('__type', response_code and str(response_code))
|
712 |
-
if code is not None:
|
713 |
-
# code has a couple forms as well:
|
714 |
-
# * "com.aws.dynamodb.vAPI#ProvisionedThroughputExceededException"
|
715 |
-
# * "ResourceNotFoundException"
|
716 |
-
if '#' in code:
|
717 |
-
code = code.rsplit('#', 1)[1]
|
718 |
-
error['Error']['Code'] = code
|
719 |
-
self._inject_response_metadata(error, response['headers'])
|
720 |
-
return error
|
721 |
-
|
722 |
-
def _inject_response_metadata(self, parsed, headers):
|
723 |
-
if 'x-amzn-requestid' in headers:
|
724 |
-
parsed.setdefault('ResponseMetadata', {})['RequestId'] = headers[
|
725 |
-
'x-amzn-requestid'
|
726 |
-
]
|
727 |
-
|
728 |
-
def _parse_body_as_json(self, body_contents):
|
729 |
-
if not body_contents:
|
730 |
-
return {}
|
731 |
-
body = body_contents.decode(self.DEFAULT_ENCODING)
|
732 |
-
try:
|
733 |
-
original_parsed = json.loads(body)
|
734 |
-
return original_parsed
|
735 |
-
except ValueError:
|
736 |
-
# if the body cannot be parsed, include
|
737 |
-
# the literal string as the message
|
738 |
-
return {'message': body}
|
739 |
-
|
740 |
-
|
741 |
-
class BaseEventStreamParser(ResponseParser):
|
742 |
-
def _do_parse(self, response, shape):
|
743 |
-
final_parsed = {}
|
744 |
-
if shape.serialization.get('eventstream'):
|
745 |
-
event_type = response['headers'].get(':event-type')
|
746 |
-
event_shape = shape.members.get(event_type)
|
747 |
-
if event_shape:
|
748 |
-
final_parsed[event_type] = self._do_parse(
|
749 |
-
response, event_shape
|
750 |
-
)
|
751 |
-
else:
|
752 |
-
self._parse_non_payload_attrs(
|
753 |
-
response, shape, shape.members, final_parsed
|
754 |
-
)
|
755 |
-
self._parse_payload(response, shape, shape.members, final_parsed)
|
756 |
-
return final_parsed
|
757 |
-
|
758 |
-
def _do_error_parse(self, response, shape):
|
759 |
-
exception_type = response['headers'].get(':exception-type')
|
760 |
-
exception_shape = shape.members.get(exception_type)
|
761 |
-
if exception_shape is not None:
|
762 |
-
original_parsed = self._initial_body_parse(response['body'])
|
763 |
-
body = self._parse_shape(exception_shape, original_parsed)
|
764 |
-
error = {
|
765 |
-
'Error': {
|
766 |
-
'Code': exception_type,
|
767 |
-
'Message': body.get('Message', body.get('message', '')),
|
768 |
-
}
|
769 |
-
}
|
770 |
-
else:
|
771 |
-
error = {
|
772 |
-
'Error': {
|
773 |
-
'Code': response['headers'].get(':error-code', ''),
|
774 |
-
'Message': response['headers'].get(':error-message', ''),
|
775 |
-
}
|
776 |
-
}
|
777 |
-
return error
|
778 |
-
|
779 |
-
def _parse_payload(self, response, shape, member_shapes, final_parsed):
|
780 |
-
if shape.serialization.get('event'):
|
781 |
-
for name in member_shapes:
|
782 |
-
member_shape = member_shapes[name]
|
783 |
-
if member_shape.serialization.get('eventpayload'):
|
784 |
-
body = response['body']
|
785 |
-
if member_shape.type_name == 'blob':
|
786 |
-
parsed_body = body
|
787 |
-
elif member_shape.type_name == 'string':
|
788 |
-
parsed_body = body.decode(self.DEFAULT_ENCODING)
|
789 |
-
else:
|
790 |
-
raw_parse = self._initial_body_parse(body)
|
791 |
-
parsed_body = self._parse_shape(
|
792 |
-
member_shape, raw_parse
|
793 |
-
)
|
794 |
-
final_parsed[name] = parsed_body
|
795 |
-
return
|
796 |
-
# If we didn't find an explicit payload, use the current shape
|
797 |
-
original_parsed = self._initial_body_parse(response['body'])
|
798 |
-
body_parsed = self._parse_shape(shape, original_parsed)
|
799 |
-
final_parsed.update(body_parsed)
|
800 |
-
|
801 |
-
def _parse_non_payload_attrs(
|
802 |
-
self, response, shape, member_shapes, final_parsed
|
803 |
-
):
|
804 |
-
headers = response['headers']
|
805 |
-
for name in member_shapes:
|
806 |
-
member_shape = member_shapes[name]
|
807 |
-
if member_shape.serialization.get('eventheader'):
|
808 |
-
if name in headers:
|
809 |
-
value = headers[name]
|
810 |
-
if member_shape.type_name == 'timestamp':
|
811 |
-
# Event stream timestamps are an in milleseconds so we
|
812 |
-
# divide by 1000 to convert to seconds.
|
813 |
-
value = self._timestamp_parser(value / 1000.0)
|
814 |
-
final_parsed[name] = value
|
815 |
-
|
816 |
-
def _initial_body_parse(self, body_contents):
|
817 |
-
# This method should do the initial xml/json parsing of the
|
818 |
-
# body. We we still need to walk the parsed body in order
|
819 |
-
# to convert types, but this method will do the first round
|
820 |
-
# of parsing.
|
821 |
-
raise NotImplementedError("_initial_body_parse")
|
822 |
-
|
823 |
-
|
824 |
-
class EventStreamJSONParser(BaseEventStreamParser, BaseJSONParser):
|
825 |
-
def _initial_body_parse(self, body_contents):
|
826 |
-
return self._parse_body_as_json(body_contents)
|
827 |
-
|
828 |
-
|
829 |
-
class EventStreamXMLParser(BaseEventStreamParser, BaseXMLResponseParser):
|
830 |
-
def _initial_body_parse(self, xml_string):
|
831 |
-
if not xml_string:
|
832 |
-
return ETree.Element('')
|
833 |
-
return self._parse_xml_string_to_dom(xml_string)
|
834 |
-
|
835 |
-
|
836 |
-
class JSONParser(BaseJSONParser):
|
837 |
-
|
838 |
-
EVENT_STREAM_PARSER_CLS = EventStreamJSONParser
|
839 |
-
|
840 |
-
"""Response parser for the "json" protocol."""
|
841 |
-
|
842 |
-
def _do_parse(self, response, shape):
|
843 |
-
parsed = {}
|
844 |
-
if shape is not None:
|
845 |
-
event_name = shape.event_stream_name
|
846 |
-
if event_name:
|
847 |
-
parsed = self._handle_event_stream(response, shape, event_name)
|
848 |
-
else:
|
849 |
-
parsed = self._handle_json_body(response['body'], shape)
|
850 |
-
self._inject_response_metadata(parsed, response['headers'])
|
851 |
-
return parsed
|
852 |
-
|
853 |
-
def _do_modeled_error_parse(self, response, shape):
|
854 |
-
return self._handle_json_body(response['body'], shape)
|
855 |
-
|
856 |
-
def _handle_event_stream(self, response, shape, event_name):
|
857 |
-
event_stream_shape = shape.members[event_name]
|
858 |
-
event_stream = self._create_event_stream(response, event_stream_shape)
|
859 |
-
try:
|
860 |
-
event = event_stream.get_initial_response()
|
861 |
-
except NoInitialResponseError:
|
862 |
-
error_msg = 'First event was not of type initial-response'
|
863 |
-
raise ResponseParserError(error_msg)
|
864 |
-
parsed = self._handle_json_body(event.payload, shape)
|
865 |
-
parsed[event_name] = event_stream
|
866 |
-
return parsed
|
867 |
-
|
868 |
-
def _handle_json_body(self, raw_body, shape):
|
869 |
-
# The json.loads() gives us the primitive JSON types,
|
870 |
-
# but we need to traverse the parsed JSON data to convert
|
871 |
-
# to richer types (blobs, timestamps, etc.
|
872 |
-
parsed_json = self._parse_body_as_json(raw_body)
|
873 |
-
return self._parse_shape(shape, parsed_json)
|
874 |
-
|
875 |
-
|
876 |
-
class BaseRestParser(ResponseParser):
|
877 |
-
def _do_parse(self, response, shape):
|
878 |
-
final_parsed = {}
|
879 |
-
final_parsed['ResponseMetadata'] = self._populate_response_metadata(
|
880 |
-
response
|
881 |
-
)
|
882 |
-
self._add_modeled_parse(response, shape, final_parsed)
|
883 |
-
return final_parsed
|
884 |
-
|
885 |
-
def _add_modeled_parse(self, response, shape, final_parsed):
|
886 |
-
if shape is None:
|
887 |
-
return final_parsed
|
888 |
-
member_shapes = shape.members
|
889 |
-
self._parse_non_payload_attrs(
|
890 |
-
response, shape, member_shapes, final_parsed
|
891 |
-
)
|
892 |
-
self._parse_payload(response, shape, member_shapes, final_parsed)
|
893 |
-
|
894 |
-
def _do_modeled_error_parse(self, response, shape):
|
895 |
-
final_parsed = {}
|
896 |
-
self._add_modeled_parse(response, shape, final_parsed)
|
897 |
-
return final_parsed
|
898 |
-
|
899 |
-
def _populate_response_metadata(self, response):
|
900 |
-
metadata = {}
|
901 |
-
headers = response['headers']
|
902 |
-
if 'x-amzn-requestid' in headers:
|
903 |
-
metadata['RequestId'] = headers['x-amzn-requestid']
|
904 |
-
elif 'x-amz-request-id' in headers:
|
905 |
-
metadata['RequestId'] = headers['x-amz-request-id']
|
906 |
-
# HostId is what it's called whenever this value is returned
|
907 |
-
# in an XML response body, so to be consistent, we'll always
|
908 |
-
# call is HostId.
|
909 |
-
metadata['HostId'] = headers.get('x-amz-id-2', '')
|
910 |
-
return metadata
|
911 |
-
|
912 |
-
def _parse_payload(self, response, shape, member_shapes, final_parsed):
|
913 |
-
if 'payload' in shape.serialization:
|
914 |
-
# If a payload is specified in the output shape, then only that
|
915 |
-
# shape is used for the body payload.
|
916 |
-
payload_member_name = shape.serialization['payload']
|
917 |
-
body_shape = member_shapes[payload_member_name]
|
918 |
-
if body_shape.serialization.get('eventstream'):
|
919 |
-
body = self._create_event_stream(response, body_shape)
|
920 |
-
final_parsed[payload_member_name] = body
|
921 |
-
elif body_shape.type_name in ['string', 'blob']:
|
922 |
-
# This is a stream
|
923 |
-
body = response['body']
|
924 |
-
if isinstance(body, bytes):
|
925 |
-
body = body.decode(self.DEFAULT_ENCODING)
|
926 |
-
final_parsed[payload_member_name] = body
|
927 |
-
else:
|
928 |
-
original_parsed = self._initial_body_parse(response['body'])
|
929 |
-
final_parsed[payload_member_name] = self._parse_shape(
|
930 |
-
body_shape, original_parsed
|
931 |
-
)
|
932 |
-
else:
|
933 |
-
original_parsed = self._initial_body_parse(response['body'])
|
934 |
-
body_parsed = self._parse_shape(shape, original_parsed)
|
935 |
-
final_parsed.update(body_parsed)
|
936 |
-
|
937 |
-
def _parse_non_payload_attrs(
|
938 |
-
self, response, shape, member_shapes, final_parsed
|
939 |
-
):
|
940 |
-
headers = response['headers']
|
941 |
-
for name in member_shapes:
|
942 |
-
member_shape = member_shapes[name]
|
943 |
-
location = member_shape.serialization.get('location')
|
944 |
-
if location is None:
|
945 |
-
continue
|
946 |
-
elif location == 'statusCode':
|
947 |
-
final_parsed[name] = self._parse_shape(
|
948 |
-
member_shape, response['status_code']
|
949 |
-
)
|
950 |
-
elif location == 'headers':
|
951 |
-
final_parsed[name] = self._parse_header_map(
|
952 |
-
member_shape, headers
|
953 |
-
)
|
954 |
-
elif location == 'header':
|
955 |
-
header_name = member_shape.serialization.get('name', name)
|
956 |
-
if header_name in headers:
|
957 |
-
final_parsed[name] = self._parse_shape(
|
958 |
-
member_shape, headers[header_name]
|
959 |
-
)
|
960 |
-
|
961 |
-
def _parse_header_map(self, shape, headers):
|
962 |
-
# Note that headers are case insensitive, so we .lower()
|
963 |
-
# all header names and header prefixes.
|
964 |
-
parsed = {}
|
965 |
-
prefix = shape.serialization.get('name', '').lower()
|
966 |
-
for header_name in headers:
|
967 |
-
if header_name.lower().startswith(prefix):
|
968 |
-
# The key name inserted into the parsed hash
|
969 |
-
# strips off the prefix.
|
970 |
-
name = header_name[len(prefix) :]
|
971 |
-
parsed[name] = headers[header_name]
|
972 |
-
return parsed
|
973 |
-
|
974 |
-
def _initial_body_parse(self, body_contents):
|
975 |
-
# This method should do the initial xml/json parsing of the
|
976 |
-
# body. We we still need to walk the parsed body in order
|
977 |
-
# to convert types, but this method will do the first round
|
978 |
-
# of parsing.
|
979 |
-
raise NotImplementedError("_initial_body_parse")
|
980 |
-
|
981 |
-
def _handle_string(self, shape, value):
|
982 |
-
parsed = value
|
983 |
-
if is_json_value_header(shape):
|
984 |
-
decoded = base64.b64decode(value).decode(self.DEFAULT_ENCODING)
|
985 |
-
parsed = json.loads(decoded)
|
986 |
-
return parsed
|
987 |
-
|
988 |
-
def _handle_list(self, shape, node):
|
989 |
-
location = shape.serialization.get('location')
|
990 |
-
if location == 'header' and not isinstance(node, list):
|
991 |
-
# List in headers may be a comma separated string as per RFC7230
|
992 |
-
node = [e.strip() for e in node.split(',')]
|
993 |
-
return super()._handle_list(shape, node)
|
994 |
-
|
995 |
-
|
996 |
-
class RestJSONParser(BaseRestParser, BaseJSONParser):
|
997 |
-
|
998 |
-
EVENT_STREAM_PARSER_CLS = EventStreamJSONParser
|
999 |
-
|
1000 |
-
def _initial_body_parse(self, body_contents):
|
1001 |
-
return self._parse_body_as_json(body_contents)
|
1002 |
-
|
1003 |
-
def _do_error_parse(self, response, shape):
|
1004 |
-
error = super()._do_error_parse(response, shape)
|
1005 |
-
self._inject_error_code(error, response)
|
1006 |
-
return error
|
1007 |
-
|
1008 |
-
def _inject_error_code(self, error, response):
|
1009 |
-
# The "Code" value can come from either a response
|
1010 |
-
# header or a value in the JSON body.
|
1011 |
-
body = self._initial_body_parse(response['body'])
|
1012 |
-
if 'x-amzn-errortype' in response['headers']:
|
1013 |
-
code = response['headers']['x-amzn-errortype']
|
1014 |
-
# Could be:
|
1015 |
-
# x-amzn-errortype: ValidationException:
|
1016 |
-
code = code.split(':')[0]
|
1017 |
-
error['Error']['Code'] = code
|
1018 |
-
elif 'code' in body or 'Code' in body:
|
1019 |
-
error['Error']['Code'] = body.get('code', body.get('Code', ''))
|
1020 |
-
|
1021 |
-
def _handle_integer(self, shape, value):
|
1022 |
-
return int(value)
|
1023 |
-
|
1024 |
-
_handle_long = _handle_integer
|
1025 |
-
|
1026 |
-
|
1027 |
-
class RestXMLParser(BaseRestParser, BaseXMLResponseParser):
|
1028 |
-
|
1029 |
-
EVENT_STREAM_PARSER_CLS = EventStreamXMLParser
|
1030 |
-
|
1031 |
-
def _initial_body_parse(self, xml_string):
|
1032 |
-
if not xml_string:
|
1033 |
-
return ETree.Element('')
|
1034 |
-
return self._parse_xml_string_to_dom(xml_string)
|
1035 |
-
|
1036 |
-
def _do_error_parse(self, response, shape):
|
1037 |
-
# We're trying to be service agnostic here, but S3 does have a slightly
|
1038 |
-
# different response structure for its errors compared to other
|
1039 |
-
# rest-xml serivces (route53/cloudfront). We handle this by just
|
1040 |
-
# trying to parse both forms.
|
1041 |
-
# First:
|
1042 |
-
# <ErrorResponse xmlns="...">
|
1043 |
-
# <Error>
|
1044 |
-
# <Type>Sender</Type>
|
1045 |
-
# <Code>InvalidInput</Code>
|
1046 |
-
# <Message>Invalid resource type: foo</Message>
|
1047 |
-
# </Error>
|
1048 |
-
# <RequestId>request-id</RequestId>
|
1049 |
-
# </ErrorResponse>
|
1050 |
-
if response['body']:
|
1051 |
-
# If the body ends up being invalid xml, the xml parser should not
|
1052 |
-
# blow up. It should at least try to pull information about the
|
1053 |
-
# the error response from other sources like the HTTP status code.
|
1054 |
-
try:
|
1055 |
-
return self._parse_error_from_body(response)
|
1056 |
-
except ResponseParserError:
|
1057 |
-
LOG.debug(
|
1058 |
-
'Exception caught when parsing error response body:',
|
1059 |
-
exc_info=True,
|
1060 |
-
)
|
1061 |
-
return self._parse_error_from_http_status(response)
|
1062 |
-
|
1063 |
-
def _parse_error_from_http_status(self, response):
|
1064 |
-
return {
|
1065 |
-
'Error': {
|
1066 |
-
'Code': str(response['status_code']),
|
1067 |
-
'Message': http.client.responses.get(
|
1068 |
-
response['status_code'], ''
|
1069 |
-
),
|
1070 |
-
},
|
1071 |
-
'ResponseMetadata': {
|
1072 |
-
'RequestId': response['headers'].get('x-amz-request-id', ''),
|
1073 |
-
'HostId': response['headers'].get('x-amz-id-2', ''),
|
1074 |
-
},
|
1075 |
-
}
|
1076 |
-
|
1077 |
-
def _parse_error_from_body(self, response):
|
1078 |
-
xml_contents = response['body']
|
1079 |
-
root = self._parse_xml_string_to_dom(xml_contents)
|
1080 |
-
parsed = self._build_name_to_xml_node(root)
|
1081 |
-
self._replace_nodes(parsed)
|
1082 |
-
if root.tag == 'Error':
|
1083 |
-
# This is an S3 error response. First we'll populate the
|
1084 |
-
# response metadata.
|
1085 |
-
metadata = self._populate_response_metadata(response)
|
1086 |
-
# The RequestId and the HostId are already in the
|
1087 |
-
# ResponseMetadata, but are also duplicated in the XML
|
1088 |
-
# body. We don't need these values in both places,
|
1089 |
-
# we'll just remove them from the parsed XML body.
|
1090 |
-
parsed.pop('RequestId', '')
|
1091 |
-
parsed.pop('HostId', '')
|
1092 |
-
return {'Error': parsed, 'ResponseMetadata': metadata}
|
1093 |
-
elif 'RequestId' in parsed:
|
1094 |
-
# Other rest-xml serivces:
|
1095 |
-
parsed['ResponseMetadata'] = {'RequestId': parsed.pop('RequestId')}
|
1096 |
-
default = {'Error': {'Message': '', 'Code': ''}}
|
1097 |
-
merge_dicts(default, parsed)
|
1098 |
-
return default
|
1099 |
-
|
1100 |
-
@_text_content
|
1101 |
-
def _handle_string(self, shape, text):
|
1102 |
-
text = super()._handle_string(shape, text)
|
1103 |
-
return text
|
1104 |
-
|
1105 |
-
|
1106 |
-
PROTOCOL_PARSERS = {
|
1107 |
-
'ec2': EC2QueryParser,
|
1108 |
-
'query': QueryParser,
|
1109 |
-
'json': JSONParser,
|
1110 |
-
'rest-json': RestJSONParser,
|
1111 |
-
'rest-xml': RestXMLParser,
|
1112 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/tomli/__init__.py
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
# SPDX-License-Identifier: MIT
|
2 |
-
# SPDX-FileCopyrightText: 2021 Taneli Hukkinen
|
3 |
-
# Licensed to PSF under a Contributor Agreement.
|
4 |
-
|
5 |
-
__all__ = ("loads", "load", "TOMLDecodeError")
|
6 |
-
__version__ = "2.0.1" # DO NOT EDIT THIS LINE MANUALLY. LET bump2version UTILITY DO IT
|
7 |
-
|
8 |
-
from ._parser import TOMLDecodeError, load, loads
|
9 |
-
|
10 |
-
# Pretend this exception was created here.
|
11 |
-
TOMLDecodeError.__module__ = __name__
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/configs/quick_schedules/README.md
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
These are quick configs for performance or accuracy regression tracking purposes.
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/DensePose/tests/test_structures.py
DELETED
@@ -1,25 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
2 |
-
|
3 |
-
import unittest
|
4 |
-
|
5 |
-
from densepose.structures import normalized_coords_transform
|
6 |
-
|
7 |
-
|
8 |
-
class TestStructures(unittest.TestCase):
|
9 |
-
def test_normalized_coords_transform(self):
|
10 |
-
bbox = (32, 24, 288, 216)
|
11 |
-
x0, y0, w, h = bbox
|
12 |
-
xmin, ymin, xmax, ymax = x0, y0, x0 + w, y0 + h
|
13 |
-
f = normalized_coords_transform(*bbox)
|
14 |
-
# Top-left
|
15 |
-
expected_p, actual_p = (-1, -1), f((xmin, ymin))
|
16 |
-
self.assertEqual(expected_p, actual_p)
|
17 |
-
# Top-right
|
18 |
-
expected_p, actual_p = (1, -1), f((xmax, ymin))
|
19 |
-
self.assertEqual(expected_p, actual_p)
|
20 |
-
# Bottom-left
|
21 |
-
expected_p, actual_p = (-1, 1), f((xmin, ymax))
|
22 |
-
self.assertEqual(expected_p, actual_p)
|
23 |
-
# Bottom-right
|
24 |
-
expected_p, actual_p = (1, 1), f((xmax, ymax))
|
25 |
-
self.assertEqual(expected_p, actual_p)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/GFPGAN-example/README.md
DELETED
@@ -1,46 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: GFPGAN Example
|
3 |
-
emoji: 🚀
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: indigo
|
6 |
-
sdk: gradio
|
7 |
-
app_file: app.py
|
8 |
-
pinned: false
|
9 |
-
license: apache-2.0
|
10 |
-
---
|
11 |
-
|
12 |
-
# Configuration
|
13 |
-
|
14 |
-
`title`: _string_
|
15 |
-
Display title for the Space
|
16 |
-
|
17 |
-
`emoji`: _string_
|
18 |
-
Space emoji (emoji-only character allowed)
|
19 |
-
|
20 |
-
`colorFrom`: _string_
|
21 |
-
Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
|
22 |
-
|
23 |
-
`colorTo`: _string_
|
24 |
-
Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
|
25 |
-
|
26 |
-
`sdk`: _string_
|
27 |
-
Can be either `gradio`, `streamlit`, or `static`
|
28 |
-
|
29 |
-
`sdk_version` : _string_
|
30 |
-
Only applicable for `streamlit` SDK.
|
31 |
-
See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
|
32 |
-
|
33 |
-
`app_file`: _string_
|
34 |
-
Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code).
|
35 |
-
Path is relative to the root of the repository.
|
36 |
-
|
37 |
-
`models`: _List[string]_
|
38 |
-
HF model IDs (like "gpt2" or "deepset/roberta-base-squad2") used in the Space.
|
39 |
-
Will be parsed automatically from your code if not specified here.
|
40 |
-
|
41 |
-
`datasets`: _List[string]_
|
42 |
-
HF dataset IDs (like "common_voice" or "oscar-corpus/OSCAR-2109") used in the Space.
|
43 |
-
Will be parsed automatically from your code if not specified here.
|
44 |
-
|
45 |
-
`pinned`: _boolean_
|
46 |
-
Whether the Space stays on top of your list.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/fill.h
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
|
21 |
-
// this system inherits fill
|
22 |
-
#include <thrust/system/cpp/detail/fill.h>
|
23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/MonoScene/monoscene/config.py
DELETED
@@ -1,26 +0,0 @@
|
|
1 |
-
from transformers import PretrainedConfig
|
2 |
-
from typing import List
|
3 |
-
|
4 |
-
|
5 |
-
class MonoSceneConfig(PretrainedConfig):
|
6 |
-
|
7 |
-
def __init__(
|
8 |
-
self,
|
9 |
-
dataset="kitti",
|
10 |
-
n_classes=20,
|
11 |
-
feature=64,
|
12 |
-
project_scale=2,
|
13 |
-
full_scene_size=(256, 256, 32),
|
14 |
-
**kwargs,
|
15 |
-
):
|
16 |
-
self.dataset = dataset
|
17 |
-
self.n_classes = n_classes
|
18 |
-
self.feature = feature
|
19 |
-
self.project_scale = project_scale
|
20 |
-
self.full_scene_size = full_scene_size
|
21 |
-
super().__init__(**kwargs)
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/walt/datasets/pipelines/auto_augment.py
DELETED
@@ -1,890 +0,0 @@
|
|
1 |
-
import copy
|
2 |
-
|
3 |
-
import cv2
|
4 |
-
import mmcv
|
5 |
-
import numpy as np
|
6 |
-
|
7 |
-
from ..builder import PIPELINES
|
8 |
-
from .compose import Compose
|
9 |
-
|
10 |
-
_MAX_LEVEL = 10
|
11 |
-
|
12 |
-
|
13 |
-
def level_to_value(level, max_value):
|
14 |
-
"""Map from level to values based on max_value."""
|
15 |
-
return (level / _MAX_LEVEL) * max_value
|
16 |
-
|
17 |
-
|
18 |
-
def enhance_level_to_value(level, a=1.8, b=0.1):
|
19 |
-
"""Map from level to values."""
|
20 |
-
return (level / _MAX_LEVEL) * a + b
|
21 |
-
|
22 |
-
|
23 |
-
def random_negative(value, random_negative_prob):
|
24 |
-
"""Randomly negate value based on random_negative_prob."""
|
25 |
-
return -value if np.random.rand() < random_negative_prob else value
|
26 |
-
|
27 |
-
|
28 |
-
def bbox2fields():
|
29 |
-
"""The key correspondence from bboxes to labels, masks and
|
30 |
-
segmentations."""
|
31 |
-
bbox2label = {
|
32 |
-
'gt_bboxes': 'gt_labels',
|
33 |
-
'gt_bboxes_ignore': 'gt_labels_ignore'
|
34 |
-
}
|
35 |
-
bbox2mask = {
|
36 |
-
'gt_bboxes': 'gt_masks',
|
37 |
-
'gt_bboxes_ignore': 'gt_masks_ignore'
|
38 |
-
}
|
39 |
-
bbox2seg = {
|
40 |
-
'gt_bboxes': 'gt_semantic_seg',
|
41 |
-
}
|
42 |
-
return bbox2label, bbox2mask, bbox2seg
|
43 |
-
|
44 |
-
|
45 |
-
@PIPELINES.register_module()
|
46 |
-
class AutoAugment(object):
|
47 |
-
"""Auto augmentation.
|
48 |
-
|
49 |
-
This data augmentation is proposed in `Learning Data Augmentation
|
50 |
-
Strategies for Object Detection <https://arxiv.org/pdf/1906.11172>`_.
|
51 |
-
|
52 |
-
TODO: Implement 'Shear', 'Sharpness' and 'Rotate' transforms
|
53 |
-
|
54 |
-
Args:
|
55 |
-
policies (list[list[dict]]): The policies of auto augmentation. Each
|
56 |
-
policy in ``policies`` is a specific augmentation policy, and is
|
57 |
-
composed by several augmentations (dict). When AutoAugment is
|
58 |
-
called, a random policy in ``policies`` will be selected to
|
59 |
-
augment images.
|
60 |
-
|
61 |
-
Examples:
|
62 |
-
>>> replace = (104, 116, 124)
|
63 |
-
>>> policies = [
|
64 |
-
>>> [
|
65 |
-
>>> dict(type='Sharpness', prob=0.0, level=8),
|
66 |
-
>>> dict(
|
67 |
-
>>> type='Shear',
|
68 |
-
>>> prob=0.4,
|
69 |
-
>>> level=0,
|
70 |
-
>>> replace=replace,
|
71 |
-
>>> axis='x')
|
72 |
-
>>> ],
|
73 |
-
>>> [
|
74 |
-
>>> dict(
|
75 |
-
>>> type='Rotate',
|
76 |
-
>>> prob=0.6,
|
77 |
-
>>> level=10,
|
78 |
-
>>> replace=replace),
|
79 |
-
>>> dict(type='Color', prob=1.0, level=6)
|
80 |
-
>>> ]
|
81 |
-
>>> ]
|
82 |
-
>>> augmentation = AutoAugment(policies)
|
83 |
-
>>> img = np.ones(100, 100, 3)
|
84 |
-
>>> gt_bboxes = np.ones(10, 4)
|
85 |
-
>>> results = dict(img=img, gt_bboxes=gt_bboxes)
|
86 |
-
>>> results = augmentation(results)
|
87 |
-
"""
|
88 |
-
|
89 |
-
def __init__(self, policies):
|
90 |
-
assert isinstance(policies, list) and len(policies) > 0, \
|
91 |
-
'Policies must be a non-empty list.'
|
92 |
-
for policy in policies:
|
93 |
-
assert isinstance(policy, list) and len(policy) > 0, \
|
94 |
-
'Each policy in policies must be a non-empty list.'
|
95 |
-
for augment in policy:
|
96 |
-
assert isinstance(augment, dict) and 'type' in augment, \
|
97 |
-
'Each specific augmentation must be a dict with key' \
|
98 |
-
' "type".'
|
99 |
-
|
100 |
-
self.policies = copy.deepcopy(policies)
|
101 |
-
self.transforms = [Compose(policy) for policy in self.policies]
|
102 |
-
|
103 |
-
def __call__(self, results):
|
104 |
-
transform = np.random.choice(self.transforms)
|
105 |
-
return transform(results)
|
106 |
-
|
107 |
-
def __repr__(self):
|
108 |
-
return f'{self.__class__.__name__}(policies={self.policies})'
|
109 |
-
|
110 |
-
|
111 |
-
@PIPELINES.register_module()
|
112 |
-
class Shear(object):
|
113 |
-
"""Apply Shear Transformation to image (and its corresponding bbox, mask,
|
114 |
-
segmentation).
|
115 |
-
|
116 |
-
Args:
|
117 |
-
level (int | float): The level should be in range [0,_MAX_LEVEL].
|
118 |
-
img_fill_val (int | float | tuple): The filled values for image border.
|
119 |
-
If float, the same fill value will be used for all the three
|
120 |
-
channels of image. If tuple, the should be 3 elements.
|
121 |
-
seg_ignore_label (int): The fill value used for segmentation map.
|
122 |
-
Note this value must equals ``ignore_label`` in ``semantic_head``
|
123 |
-
of the corresponding config. Default 255.
|
124 |
-
prob (float): The probability for performing Shear and should be in
|
125 |
-
range [0, 1].
|
126 |
-
direction (str): The direction for shear, either "horizontal"
|
127 |
-
or "vertical".
|
128 |
-
max_shear_magnitude (float): The maximum magnitude for Shear
|
129 |
-
transformation.
|
130 |
-
random_negative_prob (float): The probability that turns the
|
131 |
-
offset negative. Should be in range [0,1]
|
132 |
-
interpolation (str): Same as in :func:`mmcv.imshear`.
|
133 |
-
"""
|
134 |
-
|
135 |
-
def __init__(self,
|
136 |
-
level,
|
137 |
-
img_fill_val=128,
|
138 |
-
seg_ignore_label=255,
|
139 |
-
prob=0.5,
|
140 |
-
direction='horizontal',
|
141 |
-
max_shear_magnitude=0.3,
|
142 |
-
random_negative_prob=0.5,
|
143 |
-
interpolation='bilinear'):
|
144 |
-
assert isinstance(level, (int, float)), 'The level must be type ' \
|
145 |
-
f'int or float, got {type(level)}.'
|
146 |
-
assert 0 <= level <= _MAX_LEVEL, 'The level should be in range ' \
|
147 |
-
f'[0,{_MAX_LEVEL}], got {level}.'
|
148 |
-
if isinstance(img_fill_val, (float, int)):
|
149 |
-
img_fill_val = tuple([float(img_fill_val)] * 3)
|
150 |
-
elif isinstance(img_fill_val, tuple):
|
151 |
-
assert len(img_fill_val) == 3, 'img_fill_val as tuple must ' \
|
152 |
-
f'have 3 elements. got {len(img_fill_val)}.'
|
153 |
-
img_fill_val = tuple([float(val) for val in img_fill_val])
|
154 |
-
else:
|
155 |
-
raise ValueError(
|
156 |
-
'img_fill_val must be float or tuple with 3 elements.')
|
157 |
-
assert np.all([0 <= val <= 255 for val in img_fill_val]), 'all ' \
|
158 |
-
'elements of img_fill_val should between range [0,255].' \
|
159 |
-
f'got {img_fill_val}.'
|
160 |
-
assert 0 <= prob <= 1.0, 'The probability of shear should be in ' \
|
161 |
-
f'range [0,1]. got {prob}.'
|
162 |
-
assert direction in ('horizontal', 'vertical'), 'direction must ' \
|
163 |
-
f'in be either "horizontal" or "vertical". got {direction}.'
|
164 |
-
assert isinstance(max_shear_magnitude, float), 'max_shear_magnitude ' \
|
165 |
-
f'should be type float. got {type(max_shear_magnitude)}.'
|
166 |
-
assert 0. <= max_shear_magnitude <= 1., 'Defaultly ' \
|
167 |
-
'max_shear_magnitude should be in range [0,1]. ' \
|
168 |
-
f'got {max_shear_magnitude}.'
|
169 |
-
self.level = level
|
170 |
-
self.magnitude = level_to_value(level, max_shear_magnitude)
|
171 |
-
self.img_fill_val = img_fill_val
|
172 |
-
self.seg_ignore_label = seg_ignore_label
|
173 |
-
self.prob = prob
|
174 |
-
self.direction = direction
|
175 |
-
self.max_shear_magnitude = max_shear_magnitude
|
176 |
-
self.random_negative_prob = random_negative_prob
|
177 |
-
self.interpolation = interpolation
|
178 |
-
|
179 |
-
def _shear_img(self,
|
180 |
-
results,
|
181 |
-
magnitude,
|
182 |
-
direction='horizontal',
|
183 |
-
interpolation='bilinear'):
|
184 |
-
"""Shear the image.
|
185 |
-
|
186 |
-
Args:
|
187 |
-
results (dict): Result dict from loading pipeline.
|
188 |
-
magnitude (int | float): The magnitude used for shear.
|
189 |
-
direction (str): The direction for shear, either "horizontal"
|
190 |
-
or "vertical".
|
191 |
-
interpolation (str): Same as in :func:`mmcv.imshear`.
|
192 |
-
"""
|
193 |
-
for key in results.get('img_fields', ['img']):
|
194 |
-
img = results[key]
|
195 |
-
img_sheared = mmcv.imshear(
|
196 |
-
img,
|
197 |
-
magnitude,
|
198 |
-
direction,
|
199 |
-
border_value=self.img_fill_val,
|
200 |
-
interpolation=interpolation)
|
201 |
-
results[key] = img_sheared.astype(img.dtype)
|
202 |
-
|
203 |
-
def _shear_bboxes(self, results, magnitude):
|
204 |
-
"""Shear the bboxes."""
|
205 |
-
h, w, c = results['img_shape']
|
206 |
-
if self.direction == 'horizontal':
|
207 |
-
shear_matrix = np.stack([[1, magnitude],
|
208 |
-
[0, 1]]).astype(np.float32) # [2, 2]
|
209 |
-
else:
|
210 |
-
shear_matrix = np.stack([[1, 0], [magnitude,
|
211 |
-
1]]).astype(np.float32)
|
212 |
-
for key in results.get('bbox_fields', []):
|
213 |
-
min_x, min_y, max_x, max_y = np.split(
|
214 |
-
results[key], results[key].shape[-1], axis=-1)
|
215 |
-
coordinates = np.stack([[min_x, min_y], [max_x, min_y],
|
216 |
-
[min_x, max_y],
|
217 |
-
[max_x, max_y]]) # [4, 2, nb_box, 1]
|
218 |
-
coordinates = coordinates[..., 0].transpose(
|
219 |
-
(2, 1, 0)).astype(np.float32) # [nb_box, 2, 4]
|
220 |
-
new_coords = np.matmul(shear_matrix[None, :, :],
|
221 |
-
coordinates) # [nb_box, 2, 4]
|
222 |
-
min_x = np.min(new_coords[:, 0, :], axis=-1)
|
223 |
-
min_y = np.min(new_coords[:, 1, :], axis=-1)
|
224 |
-
max_x = np.max(new_coords[:, 0, :], axis=-1)
|
225 |
-
max_y = np.max(new_coords[:, 1, :], axis=-1)
|
226 |
-
min_x = np.clip(min_x, a_min=0, a_max=w)
|
227 |
-
min_y = np.clip(min_y, a_min=0, a_max=h)
|
228 |
-
max_x = np.clip(max_x, a_min=min_x, a_max=w)
|
229 |
-
max_y = np.clip(max_y, a_min=min_y, a_max=h)
|
230 |
-
results[key] = np.stack([min_x, min_y, max_x, max_y],
|
231 |
-
axis=-1).astype(results[key].dtype)
|
232 |
-
|
233 |
-
def _shear_masks(self,
|
234 |
-
results,
|
235 |
-
magnitude,
|
236 |
-
direction='horizontal',
|
237 |
-
fill_val=0,
|
238 |
-
interpolation='bilinear'):
|
239 |
-
"""Shear the masks."""
|
240 |
-
h, w, c = results['img_shape']
|
241 |
-
for key in results.get('mask_fields', []):
|
242 |
-
masks = results[key]
|
243 |
-
results[key] = masks.shear((h, w),
|
244 |
-
magnitude,
|
245 |
-
direction,
|
246 |
-
border_value=fill_val,
|
247 |
-
interpolation=interpolation)
|
248 |
-
|
249 |
-
def _shear_seg(self,
|
250 |
-
results,
|
251 |
-
magnitude,
|
252 |
-
direction='horizontal',
|
253 |
-
fill_val=255,
|
254 |
-
interpolation='bilinear'):
|
255 |
-
"""Shear the segmentation maps."""
|
256 |
-
for key in results.get('seg_fields', []):
|
257 |
-
seg = results[key]
|
258 |
-
results[key] = mmcv.imshear(
|
259 |
-
seg,
|
260 |
-
magnitude,
|
261 |
-
direction,
|
262 |
-
border_value=fill_val,
|
263 |
-
interpolation=interpolation).astype(seg.dtype)
|
264 |
-
|
265 |
-
def _filter_invalid(self, results, min_bbox_size=0):
|
266 |
-
"""Filter bboxes and corresponding masks too small after shear
|
267 |
-
augmentation."""
|
268 |
-
bbox2label, bbox2mask, _ = bbox2fields()
|
269 |
-
for key in results.get('bbox_fields', []):
|
270 |
-
bbox_w = results[key][:, 2] - results[key][:, 0]
|
271 |
-
bbox_h = results[key][:, 3] - results[key][:, 1]
|
272 |
-
valid_inds = (bbox_w > min_bbox_size) & (bbox_h > min_bbox_size)
|
273 |
-
valid_inds = np.nonzero(valid_inds)[0]
|
274 |
-
results[key] = results[key][valid_inds]
|
275 |
-
# label fields. e.g. gt_labels and gt_labels_ignore
|
276 |
-
label_key = bbox2label.get(key)
|
277 |
-
if label_key in results:
|
278 |
-
results[label_key] = results[label_key][valid_inds]
|
279 |
-
# mask fields, e.g. gt_masks and gt_masks_ignore
|
280 |
-
mask_key = bbox2mask.get(key)
|
281 |
-
if mask_key in results:
|
282 |
-
results[mask_key] = results[mask_key][valid_inds]
|
283 |
-
|
284 |
-
def __call__(self, results):
|
285 |
-
"""Call function to shear images, bounding boxes, masks and semantic
|
286 |
-
segmentation maps.
|
287 |
-
|
288 |
-
Args:
|
289 |
-
results (dict): Result dict from loading pipeline.
|
290 |
-
|
291 |
-
Returns:
|
292 |
-
dict: Sheared results.
|
293 |
-
"""
|
294 |
-
if np.random.rand() > self.prob:
|
295 |
-
return results
|
296 |
-
magnitude = random_negative(self.magnitude, self.random_negative_prob)
|
297 |
-
self._shear_img(results, magnitude, self.direction, self.interpolation)
|
298 |
-
self._shear_bboxes(results, magnitude)
|
299 |
-
# fill_val set to 0 for background of mask.
|
300 |
-
self._shear_masks(
|
301 |
-
results,
|
302 |
-
magnitude,
|
303 |
-
self.direction,
|
304 |
-
fill_val=0,
|
305 |
-
interpolation=self.interpolation)
|
306 |
-
self._shear_seg(
|
307 |
-
results,
|
308 |
-
magnitude,
|
309 |
-
self.direction,
|
310 |
-
fill_val=self.seg_ignore_label,
|
311 |
-
interpolation=self.interpolation)
|
312 |
-
self._filter_invalid(results)
|
313 |
-
return results
|
314 |
-
|
315 |
-
def __repr__(self):
|
316 |
-
repr_str = self.__class__.__name__
|
317 |
-
repr_str += f'(level={self.level}, '
|
318 |
-
repr_str += f'img_fill_val={self.img_fill_val}, '
|
319 |
-
repr_str += f'seg_ignore_label={self.seg_ignore_label}, '
|
320 |
-
repr_str += f'prob={self.prob}, '
|
321 |
-
repr_str += f'direction={self.direction}, '
|
322 |
-
repr_str += f'max_shear_magnitude={self.max_shear_magnitude}, '
|
323 |
-
repr_str += f'random_negative_prob={self.random_negative_prob}, '
|
324 |
-
repr_str += f'interpolation={self.interpolation})'
|
325 |
-
return repr_str
|
326 |
-
|
327 |
-
|
328 |
-
@PIPELINES.register_module()
|
329 |
-
class Rotate(object):
|
330 |
-
"""Apply Rotate Transformation to image (and its corresponding bbox, mask,
|
331 |
-
segmentation).
|
332 |
-
|
333 |
-
Args:
|
334 |
-
level (int | float): The level should be in range (0,_MAX_LEVEL].
|
335 |
-
scale (int | float): Isotropic scale factor. Same in
|
336 |
-
``mmcv.imrotate``.
|
337 |
-
center (int | float | tuple[float]): Center point (w, h) of the
|
338 |
-
rotation in the source image. If None, the center of the
|
339 |
-
image will be used. Same in ``mmcv.imrotate``.
|
340 |
-
img_fill_val (int | float | tuple): The fill value for image border.
|
341 |
-
If float, the same value will be used for all the three
|
342 |
-
channels of image. If tuple, the should be 3 elements (e.g.
|
343 |
-
equals the number of channels for image).
|
344 |
-
seg_ignore_label (int): The fill value used for segmentation map.
|
345 |
-
Note this value must equals ``ignore_label`` in ``semantic_head``
|
346 |
-
of the corresponding config. Default 255.
|
347 |
-
prob (float): The probability for perform transformation and
|
348 |
-
should be in range 0 to 1.
|
349 |
-
max_rotate_angle (int | float): The maximum angles for rotate
|
350 |
-
transformation.
|
351 |
-
random_negative_prob (float): The probability that turns the
|
352 |
-
offset negative.
|
353 |
-
"""
|
354 |
-
|
355 |
-
def __init__(self,
|
356 |
-
level,
|
357 |
-
scale=1,
|
358 |
-
center=None,
|
359 |
-
img_fill_val=128,
|
360 |
-
seg_ignore_label=255,
|
361 |
-
prob=0.5,
|
362 |
-
max_rotate_angle=30,
|
363 |
-
random_negative_prob=0.5):
|
364 |
-
assert isinstance(level, (int, float)), \
|
365 |
-
f'The level must be type int or float. got {type(level)}.'
|
366 |
-
assert 0 <= level <= _MAX_LEVEL, \
|
367 |
-
f'The level should be in range (0,{_MAX_LEVEL}]. got {level}.'
|
368 |
-
assert isinstance(scale, (int, float)), \
|
369 |
-
f'The scale must be type int or float. got type {type(scale)}.'
|
370 |
-
if isinstance(center, (int, float)):
|
371 |
-
center = (center, center)
|
372 |
-
elif isinstance(center, tuple):
|
373 |
-
assert len(center) == 2, 'center with type tuple must have '\
|
374 |
-
f'2 elements. got {len(center)} elements.'
|
375 |
-
else:
|
376 |
-
assert center is None, 'center must be None or type int, '\
|
377 |
-
f'float or tuple, got type {type(center)}.'
|
378 |
-
if isinstance(img_fill_val, (float, int)):
|
379 |
-
img_fill_val = tuple([float(img_fill_val)] * 3)
|
380 |
-
elif isinstance(img_fill_val, tuple):
|
381 |
-
assert len(img_fill_val) == 3, 'img_fill_val as tuple must '\
|
382 |
-
f'have 3 elements. got {len(img_fill_val)}.'
|
383 |
-
img_fill_val = tuple([float(val) for val in img_fill_val])
|
384 |
-
else:
|
385 |
-
raise ValueError(
|
386 |
-
'img_fill_val must be float or tuple with 3 elements.')
|
387 |
-
assert np.all([0 <= val <= 255 for val in img_fill_val]), \
|
388 |
-
'all elements of img_fill_val should between range [0,255]. '\
|
389 |
-
f'got {img_fill_val}.'
|
390 |
-
assert 0 <= prob <= 1.0, 'The probability should be in range [0,1]. '\
|
391 |
-
'got {prob}.'
|
392 |
-
assert isinstance(max_rotate_angle, (int, float)), 'max_rotate_angle '\
|
393 |
-
f'should be type int or float. got type {type(max_rotate_angle)}.'
|
394 |
-
self.level = level
|
395 |
-
self.scale = scale
|
396 |
-
# Rotation angle in degrees. Positive values mean
|
397 |
-
# clockwise rotation.
|
398 |
-
self.angle = level_to_value(level, max_rotate_angle)
|
399 |
-
self.center = center
|
400 |
-
self.img_fill_val = img_fill_val
|
401 |
-
self.seg_ignore_label = seg_ignore_label
|
402 |
-
self.prob = prob
|
403 |
-
self.max_rotate_angle = max_rotate_angle
|
404 |
-
self.random_negative_prob = random_negative_prob
|
405 |
-
|
406 |
-
def _rotate_img(self, results, angle, center=None, scale=1.0):
|
407 |
-
"""Rotate the image.
|
408 |
-
|
409 |
-
Args:
|
410 |
-
results (dict): Result dict from loading pipeline.
|
411 |
-
angle (float): Rotation angle in degrees, positive values
|
412 |
-
mean clockwise rotation. Same in ``mmcv.imrotate``.
|
413 |
-
center (tuple[float], optional): Center point (w, h) of the
|
414 |
-
rotation. Same in ``mmcv.imrotate``.
|
415 |
-
scale (int | float): Isotropic scale factor. Same in
|
416 |
-
``mmcv.imrotate``.
|
417 |
-
"""
|
418 |
-
for key in results.get('img_fields', ['img']):
|
419 |
-
img = results[key].copy()
|
420 |
-
img_rotated = mmcv.imrotate(
|
421 |
-
img, angle, center, scale, border_value=self.img_fill_val)
|
422 |
-
results[key] = img_rotated.astype(img.dtype)
|
423 |
-
|
424 |
-
def _rotate_bboxes(self, results, rotate_matrix):
|
425 |
-
"""Rotate the bboxes."""
|
426 |
-
h, w, c = results['img_shape']
|
427 |
-
for key in results.get('bbox_fields', []):
|
428 |
-
min_x, min_y, max_x, max_y = np.split(
|
429 |
-
results[key], results[key].shape[-1], axis=-1)
|
430 |
-
coordinates = np.stack([[min_x, min_y], [max_x, min_y],
|
431 |
-
[min_x, max_y],
|
432 |
-
[max_x, max_y]]) # [4, 2, nb_bbox, 1]
|
433 |
-
# pad 1 to convert from format [x, y] to homogeneous
|
434 |
-
# coordinates format [x, y, 1]
|
435 |
-
coordinates = np.concatenate(
|
436 |
-
(coordinates,
|
437 |
-
np.ones((4, 1, coordinates.shape[2], 1), coordinates.dtype)),
|
438 |
-
axis=1) # [4, 3, nb_bbox, 1]
|
439 |
-
coordinates = coordinates.transpose(
|
440 |
-
(2, 0, 1, 3)) # [nb_bbox, 4, 3, 1]
|
441 |
-
rotated_coords = np.matmul(rotate_matrix,
|
442 |
-
coordinates) # [nb_bbox, 4, 2, 1]
|
443 |
-
rotated_coords = rotated_coords[..., 0] # [nb_bbox, 4, 2]
|
444 |
-
min_x, min_y = np.min(
|
445 |
-
rotated_coords[:, :, 0], axis=1), np.min(
|
446 |
-
rotated_coords[:, :, 1], axis=1)
|
447 |
-
max_x, max_y = np.max(
|
448 |
-
rotated_coords[:, :, 0], axis=1), np.max(
|
449 |
-
rotated_coords[:, :, 1], axis=1)
|
450 |
-
min_x, min_y = np.clip(
|
451 |
-
min_x, a_min=0, a_max=w), np.clip(
|
452 |
-
min_y, a_min=0, a_max=h)
|
453 |
-
max_x, max_y = np.clip(
|
454 |
-
max_x, a_min=min_x, a_max=w), np.clip(
|
455 |
-
max_y, a_min=min_y, a_max=h)
|
456 |
-
results[key] = np.stack([min_x, min_y, max_x, max_y],
|
457 |
-
axis=-1).astype(results[key].dtype)
|
458 |
-
|
459 |
-
def _rotate_masks(self,
|
460 |
-
results,
|
461 |
-
angle,
|
462 |
-
center=None,
|
463 |
-
scale=1.0,
|
464 |
-
fill_val=0):
|
465 |
-
"""Rotate the masks."""
|
466 |
-
h, w, c = results['img_shape']
|
467 |
-
for key in results.get('mask_fields', []):
|
468 |
-
masks = results[key]
|
469 |
-
results[key] = masks.rotate((h, w), angle, center, scale, fill_val)
|
470 |
-
|
471 |
-
def _rotate_seg(self,
|
472 |
-
results,
|
473 |
-
angle,
|
474 |
-
center=None,
|
475 |
-
scale=1.0,
|
476 |
-
fill_val=255):
|
477 |
-
"""Rotate the segmentation map."""
|
478 |
-
for key in results.get('seg_fields', []):
|
479 |
-
seg = results[key].copy()
|
480 |
-
results[key] = mmcv.imrotate(
|
481 |
-
seg, angle, center, scale,
|
482 |
-
border_value=fill_val).astype(seg.dtype)
|
483 |
-
|
484 |
-
def _filter_invalid(self, results, min_bbox_size=0):
|
485 |
-
"""Filter bboxes and corresponding masks too small after rotate
|
486 |
-
augmentation."""
|
487 |
-
bbox2label, bbox2mask, _ = bbox2fields()
|
488 |
-
for key in results.get('bbox_fields', []):
|
489 |
-
bbox_w = results[key][:, 2] - results[key][:, 0]
|
490 |
-
bbox_h = results[key][:, 3] - results[key][:, 1]
|
491 |
-
valid_inds = (bbox_w > min_bbox_size) & (bbox_h > min_bbox_size)
|
492 |
-
valid_inds = np.nonzero(valid_inds)[0]
|
493 |
-
results[key] = results[key][valid_inds]
|
494 |
-
# label fields. e.g. gt_labels and gt_labels_ignore
|
495 |
-
label_key = bbox2label.get(key)
|
496 |
-
if label_key in results:
|
497 |
-
results[label_key] = results[label_key][valid_inds]
|
498 |
-
# mask fields, e.g. gt_masks and gt_masks_ignore
|
499 |
-
mask_key = bbox2mask.get(key)
|
500 |
-
if mask_key in results:
|
501 |
-
results[mask_key] = results[mask_key][valid_inds]
|
502 |
-
|
503 |
-
def __call__(self, results):
|
504 |
-
"""Call function to rotate images, bounding boxes, masks and semantic
|
505 |
-
segmentation maps.
|
506 |
-
|
507 |
-
Args:
|
508 |
-
results (dict): Result dict from loading pipeline.
|
509 |
-
|
510 |
-
Returns:
|
511 |
-
dict: Rotated results.
|
512 |
-
"""
|
513 |
-
if np.random.rand() > self.prob:
|
514 |
-
return results
|
515 |
-
h, w = results['img'].shape[:2]
|
516 |
-
center = self.center
|
517 |
-
if center is None:
|
518 |
-
center = ((w - 1) * 0.5, (h - 1) * 0.5)
|
519 |
-
angle = random_negative(self.angle, self.random_negative_prob)
|
520 |
-
self._rotate_img(results, angle, center, self.scale)
|
521 |
-
rotate_matrix = cv2.getRotationMatrix2D(center, -angle, self.scale)
|
522 |
-
self._rotate_bboxes(results, rotate_matrix)
|
523 |
-
self._rotate_masks(results, angle, center, self.scale, fill_val=0)
|
524 |
-
self._rotate_seg(
|
525 |
-
results, angle, center, self.scale, fill_val=self.seg_ignore_label)
|
526 |
-
self._filter_invalid(results)
|
527 |
-
return results
|
528 |
-
|
529 |
-
def __repr__(self):
|
530 |
-
repr_str = self.__class__.__name__
|
531 |
-
repr_str += f'(level={self.level}, '
|
532 |
-
repr_str += f'scale={self.scale}, '
|
533 |
-
repr_str += f'center={self.center}, '
|
534 |
-
repr_str += f'img_fill_val={self.img_fill_val}, '
|
535 |
-
repr_str += f'seg_ignore_label={self.seg_ignore_label}, '
|
536 |
-
repr_str += f'prob={self.prob}, '
|
537 |
-
repr_str += f'max_rotate_angle={self.max_rotate_angle}, '
|
538 |
-
repr_str += f'random_negative_prob={self.random_negative_prob})'
|
539 |
-
return repr_str
|
540 |
-
|
541 |
-
|
542 |
-
@PIPELINES.register_module()
|
543 |
-
class Translate(object):
|
544 |
-
"""Translate the images, bboxes, masks and segmentation maps horizontally
|
545 |
-
or vertically.
|
546 |
-
|
547 |
-
Args:
|
548 |
-
level (int | float): The level for Translate and should be in
|
549 |
-
range [0,_MAX_LEVEL].
|
550 |
-
prob (float): The probability for performing translation and
|
551 |
-
should be in range [0, 1].
|
552 |
-
img_fill_val (int | float | tuple): The filled value for image
|
553 |
-
border. If float, the same fill value will be used for all
|
554 |
-
the three channels of image. If tuple, the should be 3
|
555 |
-
elements (e.g. equals the number of channels for image).
|
556 |
-
seg_ignore_label (int): The fill value used for segmentation map.
|
557 |
-
Note this value must equals ``ignore_label`` in ``semantic_head``
|
558 |
-
of the corresponding config. Default 255.
|
559 |
-
direction (str): The translate direction, either "horizontal"
|
560 |
-
or "vertical".
|
561 |
-
max_translate_offset (int | float): The maximum pixel's offset for
|
562 |
-
Translate.
|
563 |
-
random_negative_prob (float): The probability that turns the
|
564 |
-
offset negative.
|
565 |
-
min_size (int | float): The minimum pixel for filtering
|
566 |
-
invalid bboxes after the translation.
|
567 |
-
"""
|
568 |
-
|
569 |
-
def __init__(self,
|
570 |
-
level,
|
571 |
-
prob=0.5,
|
572 |
-
img_fill_val=128,
|
573 |
-
seg_ignore_label=255,
|
574 |
-
direction='horizontal',
|
575 |
-
max_translate_offset=250.,
|
576 |
-
random_negative_prob=0.5,
|
577 |
-
min_size=0):
|
578 |
-
assert isinstance(level, (int, float)), \
|
579 |
-
'The level must be type int or float.'
|
580 |
-
assert 0 <= level <= _MAX_LEVEL, \
|
581 |
-
'The level used for calculating Translate\'s offset should be ' \
|
582 |
-
'in range [0,_MAX_LEVEL]'
|
583 |
-
assert 0 <= prob <= 1.0, \
|
584 |
-
'The probability of translation should be in range [0, 1].'
|
585 |
-
if isinstance(img_fill_val, (float, int)):
|
586 |
-
img_fill_val = tuple([float(img_fill_val)] * 3)
|
587 |
-
elif isinstance(img_fill_val, tuple):
|
588 |
-
assert len(img_fill_val) == 3, \
|
589 |
-
'img_fill_val as tuple must have 3 elements.'
|
590 |
-
img_fill_val = tuple([float(val) for val in img_fill_val])
|
591 |
-
else:
|
592 |
-
raise ValueError('img_fill_val must be type float or tuple.')
|
593 |
-
assert np.all([0 <= val <= 255 for val in img_fill_val]), \
|
594 |
-
'all elements of img_fill_val should between range [0,255].'
|
595 |
-
assert direction in ('horizontal', 'vertical'), \
|
596 |
-
'direction should be "horizontal" or "vertical".'
|
597 |
-
assert isinstance(max_translate_offset, (int, float)), \
|
598 |
-
'The max_translate_offset must be type int or float.'
|
599 |
-
# the offset used for translation
|
600 |
-
self.offset = int(level_to_value(level, max_translate_offset))
|
601 |
-
self.level = level
|
602 |
-
self.prob = prob
|
603 |
-
self.img_fill_val = img_fill_val
|
604 |
-
self.seg_ignore_label = seg_ignore_label
|
605 |
-
self.direction = direction
|
606 |
-
self.max_translate_offset = max_translate_offset
|
607 |
-
self.random_negative_prob = random_negative_prob
|
608 |
-
self.min_size = min_size
|
609 |
-
|
610 |
-
def _translate_img(self, results, offset, direction='horizontal'):
|
611 |
-
"""Translate the image.
|
612 |
-
|
613 |
-
Args:
|
614 |
-
results (dict): Result dict from loading pipeline.
|
615 |
-
offset (int | float): The offset for translate.
|
616 |
-
direction (str): The translate direction, either "horizontal"
|
617 |
-
or "vertical".
|
618 |
-
"""
|
619 |
-
for key in results.get('img_fields', ['img']):
|
620 |
-
img = results[key].copy()
|
621 |
-
results[key] = mmcv.imtranslate(
|
622 |
-
img, offset, direction, self.img_fill_val).astype(img.dtype)
|
623 |
-
|
624 |
-
def _translate_bboxes(self, results, offset):
|
625 |
-
"""Shift bboxes horizontally or vertically, according to offset."""
|
626 |
-
h, w, c = results['img_shape']
|
627 |
-
for key in results.get('bbox_fields', []):
|
628 |
-
min_x, min_y, max_x, max_y = np.split(
|
629 |
-
results[key], results[key].shape[-1], axis=-1)
|
630 |
-
if self.direction == 'horizontal':
|
631 |
-
min_x = np.maximum(0, min_x + offset)
|
632 |
-
max_x = np.minimum(w, max_x + offset)
|
633 |
-
elif self.direction == 'vertical':
|
634 |
-
min_y = np.maximum(0, min_y + offset)
|
635 |
-
max_y = np.minimum(h, max_y + offset)
|
636 |
-
|
637 |
-
# the boxes translated outside of image will be filtered along with
|
638 |
-
# the corresponding masks, by invoking ``_filter_invalid``.
|
639 |
-
results[key] = np.concatenate([min_x, min_y, max_x, max_y],
|
640 |
-
axis=-1)
|
641 |
-
|
642 |
-
def _translate_masks(self,
|
643 |
-
results,
|
644 |
-
offset,
|
645 |
-
direction='horizontal',
|
646 |
-
fill_val=0):
|
647 |
-
"""Translate masks horizontally or vertically."""
|
648 |
-
h, w, c = results['img_shape']
|
649 |
-
for key in results.get('mask_fields', []):
|
650 |
-
masks = results[key]
|
651 |
-
results[key] = masks.translate((h, w), offset, direction, fill_val)
|
652 |
-
|
653 |
-
def _translate_seg(self,
|
654 |
-
results,
|
655 |
-
offset,
|
656 |
-
direction='horizontal',
|
657 |
-
fill_val=255):
|
658 |
-
"""Translate segmentation maps horizontally or vertically."""
|
659 |
-
for key in results.get('seg_fields', []):
|
660 |
-
seg = results[key].copy()
|
661 |
-
results[key] = mmcv.imtranslate(seg, offset, direction,
|
662 |
-
fill_val).astype(seg.dtype)
|
663 |
-
|
664 |
-
def _filter_invalid(self, results, min_size=0):
|
665 |
-
"""Filter bboxes and masks too small or translated out of image."""
|
666 |
-
bbox2label, bbox2mask, _ = bbox2fields()
|
667 |
-
for key in results.get('bbox_fields', []):
|
668 |
-
bbox_w = results[key][:, 2] - results[key][:, 0]
|
669 |
-
bbox_h = results[key][:, 3] - results[key][:, 1]
|
670 |
-
valid_inds = (bbox_w > min_size) & (bbox_h > min_size)
|
671 |
-
valid_inds = np.nonzero(valid_inds)[0]
|
672 |
-
results[key] = results[key][valid_inds]
|
673 |
-
# label fields. e.g. gt_labels and gt_labels_ignore
|
674 |
-
label_key = bbox2label.get(key)
|
675 |
-
if label_key in results:
|
676 |
-
results[label_key] = results[label_key][valid_inds]
|
677 |
-
# mask fields, e.g. gt_masks and gt_masks_ignore
|
678 |
-
mask_key = bbox2mask.get(key)
|
679 |
-
if mask_key in results:
|
680 |
-
results[mask_key] = results[mask_key][valid_inds]
|
681 |
-
return results
|
682 |
-
|
683 |
-
def __call__(self, results):
|
684 |
-
"""Call function to translate images, bounding boxes, masks and
|
685 |
-
semantic segmentation maps.
|
686 |
-
|
687 |
-
Args:
|
688 |
-
results (dict): Result dict from loading pipeline.
|
689 |
-
|
690 |
-
Returns:
|
691 |
-
dict: Translated results.
|
692 |
-
"""
|
693 |
-
if np.random.rand() > self.prob:
|
694 |
-
return results
|
695 |
-
offset = random_negative(self.offset, self.random_negative_prob)
|
696 |
-
self._translate_img(results, offset, self.direction)
|
697 |
-
self._translate_bboxes(results, offset)
|
698 |
-
# fill_val defaultly 0 for BitmapMasks and None for PolygonMasks.
|
699 |
-
self._translate_masks(results, offset, self.direction)
|
700 |
-
# fill_val set to ``seg_ignore_label`` for the ignored value
|
701 |
-
# of segmentation map.
|
702 |
-
self._translate_seg(
|
703 |
-
results, offset, self.direction, fill_val=self.seg_ignore_label)
|
704 |
-
self._filter_invalid(results, min_size=self.min_size)
|
705 |
-
return results
|
706 |
-
|
707 |
-
|
708 |
-
@PIPELINES.register_module()
|
709 |
-
class ColorTransform(object):
|
710 |
-
"""Apply Color transformation to image. The bboxes, masks, and
|
711 |
-
segmentations are not modified.
|
712 |
-
|
713 |
-
Args:
|
714 |
-
level (int | float): Should be in range [0,_MAX_LEVEL].
|
715 |
-
prob (float): The probability for performing Color transformation.
|
716 |
-
"""
|
717 |
-
|
718 |
-
def __init__(self, level, prob=0.5):
|
719 |
-
assert isinstance(level, (int, float)), \
|
720 |
-
'The level must be type int or float.'
|
721 |
-
assert 0 <= level <= _MAX_LEVEL, \
|
722 |
-
'The level should be in range [0,_MAX_LEVEL].'
|
723 |
-
assert 0 <= prob <= 1.0, \
|
724 |
-
'The probability should be in range [0,1].'
|
725 |
-
self.level = level
|
726 |
-
self.prob = prob
|
727 |
-
self.factor = enhance_level_to_value(level)
|
728 |
-
|
729 |
-
def _adjust_color_img(self, results, factor=1.0):
|
730 |
-
"""Apply Color transformation to image."""
|
731 |
-
for key in results.get('img_fields', ['img']):
|
732 |
-
# NOTE defaultly the image should be BGR format
|
733 |
-
img = results[key]
|
734 |
-
results[key] = mmcv.adjust_color(img, factor).astype(img.dtype)
|
735 |
-
|
736 |
-
def __call__(self, results):
|
737 |
-
"""Call function for Color transformation.
|
738 |
-
|
739 |
-
Args:
|
740 |
-
results (dict): Result dict from loading pipeline.
|
741 |
-
|
742 |
-
Returns:
|
743 |
-
dict: Colored results.
|
744 |
-
"""
|
745 |
-
if np.random.rand() > self.prob:
|
746 |
-
return results
|
747 |
-
self._adjust_color_img(results, self.factor)
|
748 |
-
return results
|
749 |
-
|
750 |
-
def __repr__(self):
|
751 |
-
repr_str = self.__class__.__name__
|
752 |
-
repr_str += f'(level={self.level}, '
|
753 |
-
repr_str += f'prob={self.prob})'
|
754 |
-
return repr_str
|
755 |
-
|
756 |
-
|
757 |
-
@PIPELINES.register_module()
|
758 |
-
class EqualizeTransform(object):
|
759 |
-
"""Apply Equalize transformation to image. The bboxes, masks and
|
760 |
-
segmentations are not modified.
|
761 |
-
|
762 |
-
Args:
|
763 |
-
prob (float): The probability for performing Equalize transformation.
|
764 |
-
"""
|
765 |
-
|
766 |
-
def __init__(self, prob=0.5):
|
767 |
-
assert 0 <= prob <= 1.0, \
|
768 |
-
'The probability should be in range [0,1].'
|
769 |
-
self.prob = prob
|
770 |
-
|
771 |
-
def _imequalize(self, results):
|
772 |
-
"""Equalizes the histogram of one image."""
|
773 |
-
for key in results.get('img_fields', ['img']):
|
774 |
-
img = results[key]
|
775 |
-
results[key] = mmcv.imequalize(img).astype(img.dtype)
|
776 |
-
|
777 |
-
def __call__(self, results):
|
778 |
-
"""Call function for Equalize transformation.
|
779 |
-
|
780 |
-
Args:
|
781 |
-
results (dict): Results dict from loading pipeline.
|
782 |
-
|
783 |
-
Returns:
|
784 |
-
dict: Results after the transformation.
|
785 |
-
"""
|
786 |
-
if np.random.rand() > self.prob:
|
787 |
-
return results
|
788 |
-
self._imequalize(results)
|
789 |
-
return results
|
790 |
-
|
791 |
-
def __repr__(self):
|
792 |
-
repr_str = self.__class__.__name__
|
793 |
-
repr_str += f'(prob={self.prob})'
|
794 |
-
|
795 |
-
|
796 |
-
@PIPELINES.register_module()
|
797 |
-
class BrightnessTransform(object):
|
798 |
-
"""Apply Brightness transformation to image. The bboxes, masks and
|
799 |
-
segmentations are not modified.
|
800 |
-
|
801 |
-
Args:
|
802 |
-
level (int | float): Should be in range [0,_MAX_LEVEL].
|
803 |
-
prob (float): The probability for performing Brightness transformation.
|
804 |
-
"""
|
805 |
-
|
806 |
-
def __init__(self, level, prob=0.5):
|
807 |
-
assert isinstance(level, (int, float)), \
|
808 |
-
'The level must be type int or float.'
|
809 |
-
assert 0 <= level <= _MAX_LEVEL, \
|
810 |
-
'The level should be in range [0,_MAX_LEVEL].'
|
811 |
-
assert 0 <= prob <= 1.0, \
|
812 |
-
'The probability should be in range [0,1].'
|
813 |
-
self.level = level
|
814 |
-
self.prob = prob
|
815 |
-
self.factor = enhance_level_to_value(level)
|
816 |
-
|
817 |
-
def _adjust_brightness_img(self, results, factor=1.0):
|
818 |
-
"""Adjust the brightness of image."""
|
819 |
-
for key in results.get('img_fields', ['img']):
|
820 |
-
img = results[key]
|
821 |
-
results[key] = mmcv.adjust_brightness(img,
|
822 |
-
factor).astype(img.dtype)
|
823 |
-
|
824 |
-
def __call__(self, results):
|
825 |
-
"""Call function for Brightness transformation.
|
826 |
-
|
827 |
-
Args:
|
828 |
-
results (dict): Results dict from loading pipeline.
|
829 |
-
|
830 |
-
Returns:
|
831 |
-
dict: Results after the transformation.
|
832 |
-
"""
|
833 |
-
if np.random.rand() > self.prob:
|
834 |
-
return results
|
835 |
-
self._adjust_brightness_img(results, self.factor)
|
836 |
-
return results
|
837 |
-
|
838 |
-
def __repr__(self):
|
839 |
-
repr_str = self.__class__.__name__
|
840 |
-
repr_str += f'(level={self.level}, '
|
841 |
-
repr_str += f'prob={self.prob})'
|
842 |
-
return repr_str
|
843 |
-
|
844 |
-
|
845 |
-
@PIPELINES.register_module()
|
846 |
-
class ContrastTransform(object):
|
847 |
-
"""Apply Contrast transformation to image. The bboxes, masks and
|
848 |
-
segmentations are not modified.
|
849 |
-
|
850 |
-
Args:
|
851 |
-
level (int | float): Should be in range [0,_MAX_LEVEL].
|
852 |
-
prob (float): The probability for performing Contrast transformation.
|
853 |
-
"""
|
854 |
-
|
855 |
-
def __init__(self, level, prob=0.5):
|
856 |
-
assert isinstance(level, (int, float)), \
|
857 |
-
'The level must be type int or float.'
|
858 |
-
assert 0 <= level <= _MAX_LEVEL, \
|
859 |
-
'The level should be in range [0,_MAX_LEVEL].'
|
860 |
-
assert 0 <= prob <= 1.0, \
|
861 |
-
'The probability should be in range [0,1].'
|
862 |
-
self.level = level
|
863 |
-
self.prob = prob
|
864 |
-
self.factor = enhance_level_to_value(level)
|
865 |
-
|
866 |
-
def _adjust_contrast_img(self, results, factor=1.0):
|
867 |
-
"""Adjust the image contrast."""
|
868 |
-
for key in results.get('img_fields', ['img']):
|
869 |
-
img = results[key]
|
870 |
-
results[key] = mmcv.adjust_contrast(img, factor).astype(img.dtype)
|
871 |
-
|
872 |
-
def __call__(self, results):
|
873 |
-
"""Call function for Contrast transformation.
|
874 |
-
|
875 |
-
Args:
|
876 |
-
results (dict): Results dict from loading pipeline.
|
877 |
-
|
878 |
-
Returns:
|
879 |
-
dict: Results after the transformation.
|
880 |
-
"""
|
881 |
-
if np.random.rand() > self.prob:
|
882 |
-
return results
|
883 |
-
self._adjust_contrast_img(results, self.factor)
|
884 |
-
return results
|
885 |
-
|
886 |
-
def __repr__(self):
|
887 |
-
repr_str = self.__class__.__name__
|
888 |
-
repr_str += f'(level={self.level}, '
|
889 |
-
repr_str += f'prob={self.prob})'
|
890 |
-
return repr_str
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/lama-example/app.py
DELETED
@@ -1,42 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
os.system("gdown https://drive.google.com/uc?id=1-95IOJ-2y9BtmABiffIwndPqNZD_gLnV")
|
3 |
-
os.system("unzip big-lama.zip")
|
4 |
-
import cv2
|
5 |
-
import paddlehub as hub
|
6 |
-
import gradio as gr
|
7 |
-
import torch
|
8 |
-
from PIL import Image, ImageOps
|
9 |
-
import numpy as np
|
10 |
-
os.mkdir("data")
|
11 |
-
os.mkdir("dataout")
|
12 |
-
model = hub.Module(name='U2Net')
|
13 |
-
def infer(img,mask,option):
|
14 |
-
img = ImageOps.contain(img, (700,700))
|
15 |
-
width, height = img.size
|
16 |
-
img.save("./data/data.png")
|
17 |
-
if option == "automatic (U2net)":
|
18 |
-
result = model.Segmentation(
|
19 |
-
images=[cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)],
|
20 |
-
paths=None,
|
21 |
-
batch_size=1,
|
22 |
-
input_size=320,
|
23 |
-
output_dir='output',
|
24 |
-
visualization=True)
|
25 |
-
im = Image.fromarray(result[0]['mask'])
|
26 |
-
else:
|
27 |
-
mask = mask.resize((width,height))
|
28 |
-
im = mask
|
29 |
-
im.save("./data/data_mask.png")
|
30 |
-
os.system('python predict.py model.path=/home/user/app/big-lama/ indir=/home/user/app/data/ outdir=/home/user/app/dataout/ device=cpu')
|
31 |
-
return "./dataout/data_mask.png",im
|
32 |
-
|
33 |
-
inputs = [gr.inputs.Image(type='pil', label="Original Image"),gr.inputs.Image(type='pil',source="canvas", label="Mask",invert_colors=True),gr.inputs.Radio(choices=["automatic (U2net)","manual"], type="value", default="manual", label="Masking option")]
|
34 |
-
outputs = [gr.outputs.Image(type="file",label="output"),gr.outputs.Image(type="pil",label="Mask")]
|
35 |
-
title = "LaMa Image Inpainting Example"
|
36 |
-
description = "Gradio demo for LaMa: Resolution-robust Large Mask Inpainting with Fourier Convolutions. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below. Masks are generated by U^2net"
|
37 |
-
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2109.07161' target='_blank'>Resolution-robust Large Mask Inpainting with Fourier Convolutions</a> | <a href='https://github.com/saic-mdal/lama' target='_blank'>Github Repo</a></p><center><img src='https://visitor-badge.glitch.me/badge?page_id=cvpr_lama' alt='visitor badge'></center>"
|
38 |
-
examples = [
|
39 |
-
['person512.png',"canvas.png","automatic (U2net)"],
|
40 |
-
['person512.png',"maskexam.png","manual"]
|
41 |
-
]
|
42 |
-
gr.Interface(infer, inputs, outputs, title=title, description=description, article=article, examples=examples).launch(enable_queue=True,cache_examples=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/lama-example/models/ade20k/utils.py
DELETED
@@ -1,40 +0,0 @@
|
|
1 |
-
"""Modified from https://github.com/CSAILVision/semantic-segmentation-pytorch"""
|
2 |
-
|
3 |
-
import os
|
4 |
-
import sys
|
5 |
-
|
6 |
-
import numpy as np
|
7 |
-
import torch
|
8 |
-
|
9 |
-
try:
|
10 |
-
from urllib import urlretrieve
|
11 |
-
except ImportError:
|
12 |
-
from urllib.request import urlretrieve
|
13 |
-
|
14 |
-
|
15 |
-
def load_url(url, model_dir='./pretrained', map_location=None):
|
16 |
-
if not os.path.exists(model_dir):
|
17 |
-
os.makedirs(model_dir)
|
18 |
-
filename = url.split('/')[-1]
|
19 |
-
cached_file = os.path.join(model_dir, filename)
|
20 |
-
if not os.path.exists(cached_file):
|
21 |
-
sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file))
|
22 |
-
urlretrieve(url, cached_file)
|
23 |
-
return torch.load(cached_file, map_location=map_location)
|
24 |
-
|
25 |
-
|
26 |
-
def color_encode(labelmap, colors, mode='RGB'):
|
27 |
-
labelmap = labelmap.astype('int')
|
28 |
-
labelmap_rgb = np.zeros((labelmap.shape[0], labelmap.shape[1], 3),
|
29 |
-
dtype=np.uint8)
|
30 |
-
for label in np.unique(labelmap):
|
31 |
-
if label < 0:
|
32 |
-
continue
|
33 |
-
labelmap_rgb += (labelmap == label)[:, :, np.newaxis] * \
|
34 |
-
np.tile(colors[label],
|
35 |
-
(labelmap.shape[0], labelmap.shape[1], 1))
|
36 |
-
|
37 |
-
if mode == 'BGR':
|
38 |
-
return labelmap_rgb[:, :, ::-1]
|
39 |
-
else:
|
40 |
-
return labelmap_rgb
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CikeyQI/meme-api/meme_generator/memes/capoo_rip/__init__.py
DELETED
@@ -1,59 +0,0 @@
|
|
1 |
-
from pathlib import Path
|
2 |
-
from typing import List
|
3 |
-
|
4 |
-
from pil_utils import BuildImage
|
5 |
-
|
6 |
-
from meme_generator import add_meme
|
7 |
-
from meme_generator.utils import save_gif
|
8 |
-
|
9 |
-
img_dir = Path(__file__).parent / "images"
|
10 |
-
|
11 |
-
|
12 |
-
def capoo_rip(images: List[BuildImage], texts, args):
|
13 |
-
img = images[0].convert("RGBA").resize((150, 100), keep_ratio=True)
|
14 |
-
img_left = img.crop((0, 0, 75, 100))
|
15 |
-
img_right = img.crop((75, 0, 150, 100))
|
16 |
-
params1 = [
|
17 |
-
[(61, 196), ((140, 68), (0, 59), (33, 0), (165, 8))],
|
18 |
-
[(63, 196), ((136, 68), (0, 59), (29, 0), (158, 13))],
|
19 |
-
[(62, 195), ((137, 72), (0, 58), (27, 0), (167, 11))],
|
20 |
-
[(95, 152), ((0, 8), (155, 0), (163, 107), (13, 112))],
|
21 |
-
[(108, 129), ((0, 6), (128, 0), (136, 113), (10, 117))],
|
22 |
-
[(84, 160), ((0, 6), (184, 0), (190, 90), (10, 97))],
|
23 |
-
]
|
24 |
-
params2 = [
|
25 |
-
(
|
26 |
-
[(78, 158), ((0, 3), (86, 0), (97, 106), (16, 106))],
|
27 |
-
[(195, 156), ((0, 4), (82, 0), (85, 106), (15, 110))],
|
28 |
-
),
|
29 |
-
(
|
30 |
-
[(89, 156), ((0, 0), (80, 0), (94, 100), (14, 100))],
|
31 |
-
[(192, 151), ((0, 7), (79, 3), (82, 107), (11, 112))],
|
32 |
-
),
|
33 |
-
]
|
34 |
-
raw_frames = [BuildImage.open(img_dir / f"{i}.png") for i in range(8)]
|
35 |
-
for i in range(6):
|
36 |
-
pos, points = params1[i]
|
37 |
-
raw_frames[i].paste(img.perspective(points), pos, below=True)
|
38 |
-
for i in range(2):
|
39 |
-
(pos1, points1), (pos2, points2) = params2[i]
|
40 |
-
raw_frames[i + 6].paste(img_left.perspective(points1), pos1, below=True)
|
41 |
-
raw_frames[i + 6].paste(img_right.perspective(points2), pos2, below=True)
|
42 |
-
|
43 |
-
new_frames: List[BuildImage] = []
|
44 |
-
for i in range(3):
|
45 |
-
new_frames += raw_frames[0:3]
|
46 |
-
new_frames += raw_frames[3:]
|
47 |
-
new_frames.append(raw_frames[-1])
|
48 |
-
|
49 |
-
frames = [frame.image for frame in new_frames]
|
50 |
-
return save_gif(frames, 0.1)
|
51 |
-
|
52 |
-
|
53 |
-
add_meme(
|
54 |
-
"capoo_rip",
|
55 |
-
capoo_rip,
|
56 |
-
min_images=1,
|
57 |
-
max_images=1,
|
58 |
-
keywords=["咖波撕"],
|
59 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CofAI/chat.b4/g4f/Provider/__init__.py
DELETED
@@ -1,33 +0,0 @@
|
|
1 |
-
from . import Provider
|
2 |
-
from .Providers import (
|
3 |
-
Aichat,
|
4 |
-
Ails,
|
5 |
-
Bard,
|
6 |
-
Better,
|
7 |
-
Bing,
|
8 |
-
ChatgptAi,
|
9 |
-
ChatgptLogin,
|
10 |
-
ChatgptLogin,
|
11 |
-
DeepAi,
|
12 |
-
Easychat,
|
13 |
-
Ezcht,
|
14 |
-
Fakeopen,
|
15 |
-
Forefront,
|
16 |
-
GetGpt,
|
17 |
-
Gravityengine,
|
18 |
-
H2o,
|
19 |
-
hteyun,
|
20 |
-
Liaobots,
|
21 |
-
Lockchat,
|
22 |
-
Mishalsgpt,
|
23 |
-
Phind,
|
24 |
-
Theb,
|
25 |
-
Vercel,
|
26 |
-
Weuseing,
|
27 |
-
Xiaor,
|
28 |
-
Yqcloud,
|
29 |
-
You,
|
30 |
-
Zeabur
|
31 |
-
)
|
32 |
-
|
33 |
-
Palm = Bard
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CofAI/picscore1/style.css
DELETED
@@ -1,28 +0,0 @@
|
|
1 |
-
body {
|
2 |
-
padding: 2rem;
|
3 |
-
font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif;
|
4 |
-
}
|
5 |
-
|
6 |
-
h1 {
|
7 |
-
font-size: 16px;
|
8 |
-
margin-top: 0;
|
9 |
-
}
|
10 |
-
|
11 |
-
p {
|
12 |
-
color: rgb(107, 114, 128);
|
13 |
-
font-size: 15px;
|
14 |
-
margin-bottom: 10px;
|
15 |
-
margin-top: 5px;
|
16 |
-
}
|
17 |
-
|
18 |
-
.card {
|
19 |
-
max-width: 620px;
|
20 |
-
margin: 0 auto;
|
21 |
-
padding: 16px;
|
22 |
-
border: 1px solid lightgray;
|
23 |
-
border-radius: 16px;
|
24 |
-
}
|
25 |
-
|
26 |
-
.card p:last-child {
|
27 |
-
margin-bottom: 0;
|
28 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Cong723/gpt-academic-public/docs/self_analysis.md
DELETED
@@ -1,256 +0,0 @@
|
|
1 |
-
# chatgpt-academic项目自译解报告
|
2 |
-
(Author补充:以下分析均由本项目调用ChatGPT一键生成,如果有不准确的地方,全怪GPT😄)
|
3 |
-
|
4 |
-
## 对程序的整体功能和构架做出概括。然后用一张markdown表格整理每个文件的功能。
|
5 |
-
|
6 |
-
整体概括:
|
7 |
-
|
8 |
-
该程序是一个基于自然语言处理和机器学习的科学论文辅助工具,主要功能包括聊天机器人、批量总结PDF文档、批量翻译PDF文档、生成函数注释、解析项目源代码等。程序基于 Gradio 构建 Web 服务,并集成了代理和自动更新功能,提高了用户的使用体验。
|
9 |
-
|
10 |
-
文件功能表格:
|
11 |
-
|
12 |
-
| 文件名 | 文件功能 |
|
13 |
-
| --- | --- |
|
14 |
-
| check_proxy.py | 用于检查代理的正确性和可用性 |
|
15 |
-
| colorful.py | 包含不同预设置颜色的常量,并用于多种UI元素 |
|
16 |
-
| config.py | 用于全局配置的类 |
|
17 |
-
| config_private.py | 与config.py文件一起使用的另一个配置文件,用于更改私密信息 |
|
18 |
-
| core_functional.py | 包含一些TextFunctional类和基础功能函数 |
|
19 |
-
| crazy_functional.py | 包含大量高级功能函数和实验性的功能函数 |
|
20 |
-
| main.py | 程序的主入口,包含GUI主窗口和主要的UI管理功能 |
|
21 |
-
| theme.py | 包含一些预设置主题的颜色 |
|
22 |
-
| toolbox.py | 提供了一些有用的工具函数 |
|
23 |
-
| crazy_functions\crazy_utils.py | 包含一些用于实现高级功能的辅助函数 |
|
24 |
-
| crazy_functions\Latex全文润色.py | 实现了对LaTeX文件中全文的润色和格式化功能 |
|
25 |
-
| crazy_functions\Latex全文翻译.py | 实现了对LaTeX文件中的内容进行翻译的功能 |
|
26 |
-
| crazy_functions\_\_init\_\_.py | 用于导入crazy_functional.py中的功能函数 |
|
27 |
-
| crazy_functions\下载arxiv论文翻译摘要.py | 从Arxiv上下载论文并提取重要信息 |
|
28 |
-
| crazy_functions\代码重写为全英文_多线程.py | 针对中文Python文件,将其翻译为全英文 |
|
29 |
-
| crazy_functions\总结word文档.py | 提取Word文件的重要内容来生成摘要 |
|
30 |
-
| crazy_functions\批量Markdown翻译.py | 批量翻译Markdown文件 |
|
31 |
-
| crazy_functions\批量总结PDF文档.py | 批量从PDF文件中提取摘要 |
|
32 |
-
| crazy_functions\批量总结PDF文档pdfminer.py | 批量从PDF文件中提取摘要 |
|
33 |
-
| crazy_functions\批量翻译PDF文档_多线程.py | 批量翻译PDF文件 |
|
34 |
-
| crazy_functions\理解PDF文档内容.py | 批量分析PDF文件并提取摘要 |
|
35 |
-
| crazy_functions\生成函数注释.py | 自动生成Python文件中函数的注释 |
|
36 |
-
| crazy_functions\解析项目源代码.py | 解析并分析给定项目的源代码 |
|
37 |
-
| crazy_functions\询问多个大语言模型.py | 向多个大语言模型询问输入文本并进行处理 |
|
38 |
-
| crazy_functions\读文献写摘要.py | 根据用户输入读取文献内容并生成摘要 |
|
39 |
-
| crazy_functions\谷歌检索小助手.py | 利用谷歌学术检索用户提供的论文信息并提取相关信息 |
|
40 |
-
| crazy_functions\高级功能函数模板.py | 实现高级功能的模板函数 |
|
41 |
-
| request_llm\bridge_all.py | 处理与LLM的交互 |
|
42 |
-
| request_llm\bridge_chatglm.py | 使用ChatGLM模型进行聊天 |
|
43 |
-
| request_llm\bridge_chatgpt.py | 实现对话生成的各项功能 |
|
44 |
-
| request_llm\bridge_tgui.py | 在Websockets中与用户进行交互并生成文本输出 |
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
## [0/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\check_proxy.py
|
49 |
-
|
50 |
-
该文件主要包括四个函数:check_proxy、backup_and_download、patch_and_restart 和 auto_update。其中,check_proxy 函数用于检查代理是否可用;backup_and_download 用于进行一键更新备份和下载;patch_and_restart 是一键更新协议的重要函数,用于覆盖和重启;auto_update 函数用于查询版本和用户意见,并自动进行一键更新。该文件主要使用了 requests、json、shutil、zipfile、distutils、subprocess 等 Python 标准库和 toolbox 和 colorful 两个第三方库。
|
51 |
-
|
52 |
-
## [1/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\colorful.py
|
53 |
-
|
54 |
-
该程序文件实现了一些打印文本的函数,使其具有不同的颜色输出。当系统为Linux时直接跳过,否则使用colorama库来实现颜色输出。程序提供了深色和亮色两种颜色输出方式,同时也提供了对打印函数的别名。对于不是终端输出的情况,对所有的打印函数进行重复定义,以便在重定向时能够避免打印错误日志。
|
55 |
-
|
56 |
-
## [2/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\config.py
|
57 |
-
|
58 |
-
该程序文件是一个配置文件,其主要功能是提供使用API密钥等信息,以及对程序的体验进行优化,例如定义对话框高度、布局等。还包含一些其他的设置,例如设置并行使用的线程数、重试次数限制等等。
|
59 |
-
|
60 |
-
## [3/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\config_private.py
|
61 |
-
|
62 |
-
这是一个名为config_private.py的Python文件,它用于配置API_KEY和代理信息。API_KEY是一个私密密钥,用于访���某些受保护的API。USE_PROXY变量设置为True以应用代理,proxies变量配置了代理网络的地址和协议。在使用该文件时,需要填写正确的API_KEY和代理信息。
|
63 |
-
|
64 |
-
## [4/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\core_functional.py
|
65 |
-
|
66 |
-
该文件是一个Python模块,名为"core_functional.py"。模块中定义了一个字典,包含了各种核心功能的配置信息,如英语学术润色、中文学术润色、查找语法错误等。每个功能都包含一些前言和后语,在前言中描述了该功能的任务和要求,在后语中提供一些附加信息。此外,有些功能还定义了一些特定的处理函数和按钮颜色。
|
67 |
-
|
68 |
-
## [5/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functional.py
|
69 |
-
|
70 |
-
这是一个Python程序文件,文件名是crazy_functional.py。它导入了一个名为HotReload的工具箱,并定义了一个名为get_crazy_functions()的函数。这个函数包括三个部分的插件组,分别是已经编写完成的第一组插件、已经测试但距离完美状态还差一点点的第二组插件和尚未充分测试的第三组插件。每个插件都有一个名称、一个按钮颜色、一个函数和一个是否加入下拉菜单中的标志位。这些插件提供了多种功能,包括生成函数注释、解析项目源代码、批量翻译PDF文档、谷歌检索、PDF文档内容理解和Latex文档的全文润色、翻译等功能。其中第三组插件可能还存在一定的bug。
|
71 |
-
|
72 |
-
## [6/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\main.py
|
73 |
-
|
74 |
-
该Python脚本代码实现了一个用于交互式对话的Chatbot机器人。它使用了Gradio框架来构建一个Web界面,并在此基础之上嵌入了一个文本输入框和与Chatbot进行交互的其他控件,包括提交、重置、停止和清除按钮、选择框和滑块等。此外,它还包括了一些类和函数和一些用于编程分析的工具和方法。整个程序文件的结构清晰,注释丰富,并提供了很多技术细节,使得开发者可以很容易地在其基础上进行二次开发、修改、扩展和集成。
|
75 |
-
|
76 |
-
## [7/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\theme.py
|
77 |
-
|
78 |
-
该程序文件名为theme.py,主要功能为调节Gradio的全局样式。在该文件中,调节了Gradio的主题颜色、字体、阴影、边框、渐变等等样式。同时,该文件还添加了一些高级CSS样式,比如调整表格单元格的背景和边框,设定聊天气泡的圆角、最大宽度和阴影等等。如果CODE_HIGHLIGHT为True,则还进行了代码高亮显示。
|
79 |
-
|
80 |
-
## [8/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\toolbox.py
|
81 |
-
|
82 |
-
这是一个名为`toolbox.py`的源代码文件。该文件包含了一系列工具函数和装饰器,用于聊天Bot的开发和调试。其中有一些功能包括将输入参数进行重组、捕捉函数中的异常并记录到历史记录中、生成Markdown格式的聊天记录报告等。该文件中还包含了一些与转换Markdown文本相关的函数。
|
83 |
-
|
84 |
-
## [9/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\crazy_utils.py
|
85 |
-
|
86 |
-
这是一个Python程序文件 `crazy_utils.py`,它包含了两个函数:
|
87 |
-
|
88 |
-
- `input_clipping(inputs, history, max_token_limit)`:这个函数接收三个参数,inputs 是一个字符串,history 是一个列表,max_token_limit 是一个整数。它使用 `tiktoken` 、`numpy` 和 `toolbox` 模块,处理输入文本和历史记录,将其裁剪到指定的最大标记数,避免输入过长导致的性能问题。如果 inputs 长度不超过 max_token_limit 的一半,则只裁剪历史;否则,同时裁剪输入和历史。
|
89 |
-
- `request_gpt_model_in_new_thread_with_ui_alive(inputs, inputs_show_user, llm_kwargs, chatbot, history, sys_prompt, refresh_interval=0.2, handle_token_exceed=True, retry_times_at_unknown_error=2)`:这个函数接收八个参数,其中后三个是列表类型,其他为标量或句柄等。它提供对话窗口和刷新控制,执行 `predict_no_ui_long_connection` 方法,将输入数据发送至 GPT 模型并获取结果,如果子任务出错,返回相应的错误信息,否则返回结果。
|
90 |
-
|
91 |
-
## [10/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\Latex全文润色.py
|
92 |
-
|
93 |
-
这是一个名为"crazy_functions\Latex全文润色.py"的程序文件,其中包含了两个函数"Latex英文润色"和"Latex中文润色",以及其他辅助函数。这些函数能够对 Latex 项目进行润色处理,其中 "多文件润色" 函数是一个主要函数,它调用了其他辅助函数用于读取和处理 Latex 项目中的文件。函数使用了多线程和机器学习模型进行自然语言处理,对文件进行简化和排版来满足学术标准。注释已删除并可以在函数内部查找。
|
94 |
-
|
95 |
-
## [11/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\Latex全文翻译.py
|
96 |
-
|
97 |
-
这个程序文件包括一个用于对整个Latex项目进行翻译的函数 `Latex英译中` 和一个用于将中文翻译为英文的函数 `Latex中译英`。这两个函数都会尝试导入依赖库 tiktoken, 若无法导入则会提示用户安装。`Latex英译中` 函数会对 Latex 项目中的文件进行分离并去除注释,然后运行多线程翻译。`Latex中译英` 也做同样的事情,只不过是将中文翻译为英文。这个程序文件还包括其他一些帮助函数。
|
98 |
-
|
99 |
-
## [12/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\__init__.py
|
100 |
-
|
101 |
-
这是一个 Python 包,包名为 `crazy_functions`,在 `__init__.py` 文件中定义了一些函数,包含以下函数:
|
102 |
-
|
103 |
-
- `crazy_addition(a, b)`:对两个数进行加法运算,并将结果返回。
|
104 |
-
- `crazy_multiplication(a, b)`:对两个数进行乘法运算,并将结果返回。
|
105 |
-
- `crazy_subtraction(a, b)`:对两个数进行减法运算,并将结果返回。
|
106 |
-
- `crazy_division(a, b)`:对两个数进行除法运算,并将结果返回。
|
107 |
-
- `crazy_factorial(n)`:计算 `n` 的阶乘并返回结果。
|
108 |
-
|
109 |
-
这些函数可能会有一些奇怪或者不符合常规的实现方式(由函数名可以看出来),所以这个包的名称为 `crazy_functions`,可能是暗示这些函数会有一些“疯狂”的实现方式。
|
110 |
-
|
111 |
-
## [13/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\下载arxiv论文翻译摘要.py
|
112 |
-
|
113 |
-
该程序实现了一个名为“下载arxiv论文并翻译摘要”的函数插件,作者是“binary-husky”。该函数的功能是,在输入一篇arxiv论文的链接后,提取摘要、下载PDF文档、翻译摘要为中文,并将翻译结果保存到文件中。程序使用了一些Python库,如requests、pdfminer和beautifulsoup4等。程序入口是名为“下载arxiv论文并翻译摘要”的函数,其中使用了自定义的辅助函数download_arxiv_和get_name。程序中还使用了其他非函数的辅助函数和变量,如update_ui、CatchException、report_exception和get_conf等。
|
114 |
-
|
115 |
-
## [14/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\代码重写为全英文_多线程.py
|
116 |
-
|
117 |
-
该文件是一个多线程Python脚本,包含多个函数和利用第三方库进行的API请求。主要功能是将给定文件夹内的Python代码文件中所有中文转化为英文,然后输出转化后的英文代码。重要的功能和步骤包括:
|
118 |
-
|
119 |
-
1. 清空历史,以免输入溢出
|
120 |
-
2. 尝试导入依赖,如果缺少依赖,则给出安装建议
|
121 |
-
3. 集合文件
|
122 |
-
4. 显示随意内容以防卡顿的感觉
|
123 |
-
5. Token限制下的截断与处理
|
124 |
-
6. 多线程操作请求转换中文变为英文的代码
|
125 |
-
7. 所有线程同时开始执行任务函数
|
126 |
-
8. 循环轮询各个线程是否执行完毕
|
127 |
-
9. 把结果写入文件
|
128 |
-
10. 备份一个文件
|
129 |
-
|
130 |
-
## [15/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\总结word文档.py
|
131 |
-
|
132 |
-
这是一个名为"总结word文档.py"的程序文件,使用python编写。该文件导入了"toolbox"和"crazy_utils"模块,实现了解析docx格式和doc格式的文件的功能。该文件包含了一个名为"解析docx"的函数,通过对文件内容应用自然语言处理技术,生成文章片段的中英文概述。具体实现过程中,该函数使用了"docx"模块和"win32com.client"模块来实现对docx和doc格式文件的解析,同时使用了"request_gpt_model_in_new_thread_with_ui_alive"函数来向GPT模型发起请求。最后,该文件还实现了一个名为"总结word文档"的函数来批量总结Word文档。
|
133 |
-
|
134 |
-
## [16/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\批量Markdown翻译.py
|
135 |
-
|
136 |
-
这个程序文件实现了一个批量Markdown翻译功能,可以将一个源代码项目中的Markdown文本翻译成指定语言(目前支持中<-英和英<-中)。程序主要分为三个函数,`PaperFileGroup`类用于处理长文本的拆分,`多文件翻译`是主要函数调用了`request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency`函数进行多线程翻译并输出结果,`Markdown英译中`和`Markdown中译外`分别是英译中和中译英的入口函数,用于解析项目路径和调用翻译函数。程序依赖于tiktoken等库实现。
|
137 |
-
|
138 |
-
## [17/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\批量总结PDF文档.py
|
139 |
-
|
140 |
-
这是一个名为“批量总结PDF文档”的Python脚本,包含了多个函数。其中有一个函数名为“clean_text”,可以对PDF提取出的原始文本进行清洗和格式化处理,将连字转换为其基本形式,并根据heuristic规则判断换行符是否是段落分隔,并相应地进行替换。另一个函数名为“解析PDF”,可以接收一个PDF文件清单,并对清单中的每一个PDF进行解析,提取��文本并调用“clean_text”函数进行清洗和格式化处理,然后向用户发送一个包含文章简介信息的问题并等待用户回答。最后,该脚本也包含一个名为“批量总结PDF文档”的主函数,其中调用了“解析PDF”函数来完成对PDF文件的批量处理。
|
141 |
-
|
142 |
-
## [18/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\批量总结PDF文档pdfminer.py
|
143 |
-
|
144 |
-
这个文件是一个Python模块,文件名为pdfminer.py,它定义了一个函数批量总结PDF文档。该函数接受一些参数,然后尝试导入pdfminer和beautifulsoup4库。该函数将读取pdf文件或tex文件中的内容,对其进行分析,并使用GPT模型进行自然语言摘要。文件中还有一个辅助函数readPdf,用于读取pdf文件中的内容。
|
145 |
-
|
146 |
-
## [19/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\批量翻译PDF文档_多线程.py
|
147 |
-
|
148 |
-
这是一个Python脚本,文件名是crazy_functions\批量翻译PDF文档_多线程.py。该脚本提供了一个名为“批量翻译PDF文档”的函数,可以批量翻译PDF文件并生成报告文件。该函数使用了多个模块和函数(如toolbox、crazy_utils、update_ui等),使用了Python的异常处理和多线程功能,还使用了一些文本处理函数和第三方库(如fitz和tiktoken)。在函数执行过程中,它会进行一些参数检查、读取和清理PDF文本、递归地切割PDF文件、获取文章meta信息、多线程翻译、整理报告格式等操作,并更新UI界面和生成报告文件。
|
149 |
-
|
150 |
-
## [20/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\理解PDF文档内容.py
|
151 |
-
|
152 |
-
这是一个解析PDF文件内容的Python程序,程序文件名为"理解PDF文档内容.py",程序主要由5个步骤组成:第0步是切割PDF文件;第1步是从摘要中提取高价值信息,放到history中;第2步是迭代地历遍整个文章,提取精炼信息;第3步是整理history;第4步是设置一个token上限,防止回答时Token溢出。程序主要用到了Python中的各种模块和函数库,如:toolbox, tiktoken, pymupdf等。
|
153 |
-
|
154 |
-
## [21/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\生成函数注释.py
|
155 |
-
|
156 |
-
这是一个名为"生成函数注释"的函数,带有一个装饰器"@CatchException",可以捕获异常。该函数接受文件路径、参数和聊天机器人等参数,用于对多个Python或C++文件进行函数注释,使用了"toolbox"和"crazy_utils"模块中的函数。该函数会逐个读取指定文件中的内容,并使用聊天机器人进行交互,向用户请求注释信息,然后将生成的注释与原文件内容一起输出到一个markdown表格中。最后,该函数返回一个字符串,指示任务是否已完成。另外还包含一个名为"批量生成函数注释"的函数,它与"生成函数注释"函数一起用于批量处理多个文件。
|
157 |
-
|
158 |
-
## [22/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\解析项目源代码.py
|
159 |
-
|
160 |
-
这个程序文件实现了对一个源代码项目进行分析的功能。其中,函数`解析项目本身`、`解析一个Python项目`、`解析一个C项目的头文件`、`解析一个C项目`、`解析一个Java项目`和`解析前端项目`分别用于解析不同类型的项目。函数`解析源代码新`实现了对每一个源代码文件的分析,并将分析结果汇总,同时还实现了分组和迭代处理,提高了效率。最后,函数`write_results_to_file`将所有分析结果写入文件。中间,还用到了`request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency`和`request_gpt_model_in_new_thread_with_ui_alive`来完成请求和响应,并用`update_ui`实时更新界面。
|
161 |
-
|
162 |
-
## [23/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\询问多个大语言模型.py
|
163 |
-
|
164 |
-
这是一个Python程序,文件名为"crazy_functions\询问多个大语言模型.py"。该程序实现了一个同时向多个大语言模型询问的功能,接收用户输入文本以及模型参数,向ChatGPT和ChatGLM模型发出请求,并将对话记录显示在聊天框中,同时刷新界面。
|
165 |
-
|
166 |
-
## [24/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\读文章写摘要.py
|
167 |
-
|
168 |
-
该程序文件是一个Python模块,文件名为"读文章写摘要.py",主要包含两个函数:"解析Paper"和"读文章写摘要"。其中,"解析Paper"函数接受文件路径、参数等参数,逐个打印文件内容并使用GPT模型生成对该文件的摘要;"读文章写摘要"函数则接受一段文本内容和参数,将该文本内容及其所有.tex文件逐个传递给"解析Paper"函数进行处理,并使用GPT模型生成文章的中英文摘要。文件还导入了一些工具函数,如异常处理、信息上报和文件写入等。
|
169 |
-
|
170 |
-
## [25/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\谷歌检索小助手.py
|
171 |
-
|
172 |
-
该文件代码包含了一个名为`get_meta_information`的函数和一个名为`谷歌检索小助手`的装饰器函数,用于从谷歌学术中抓取文章元信息,并从用户提供的搜索页面中分析所有文章的相关信息。该文件使用了许多第三方库,如requests、arxiv、BeautifulSoup等。其中`get_meta_information`函数中还定义了一个名为`string_similar`的辅助函数,用于比较字符串相似度。
|
173 |
-
|
174 |
-
## [26/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\高级功能函数模板.py
|
175 |
-
|
176 |
-
该程序文件是一个 Python 模块,包含一个名为“高阶功能模板函数”的函数。该函数接受多个参数,其中包括输入文本、GPT 模型参数、插件模型参数、聊天显示框、聊天历史等。 该函数的主要功能是根据输入文本,使用 GPT 模型生成一些问题,并等待用户回答这些问题(使用 Markdown 格式),然后将用户回答加入到聊天历史中,并更新聊天显示框。该函数还包含了一些异常处理和多线程的相关操作。该程序文件还引用了另一个 Python 模块中的两个函数,分别为“CatchException”和“update_ui”,并且还引用了一个名为“request_gpt_model_in_new_thread_with_ui_alive”的自定义函数。
|
177 |
-
|
178 |
-
## [27/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\request_llm\bridge_all.py
|
179 |
-
|
180 |
-
这个文件是用来处理与LLM的交互的。包含两个函数,一个是 predict_no_ui_long_connection 用来处理长文本的输出,可以多线程调用;另一个是 predict 用来处理基础的对话功能。这个文件会导入其他文件中定义的方法进行调用,具体调用哪个方法取决于传入的参数。函数中还有一些装饰器和管理多线程的逻辑。
|
181 |
-
|
182 |
-
## [28/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\request_llm\bridge_chatglm.py
|
183 |
-
|
184 |
-
这个程序文件实现了一个使用ChatGLM模型进行聊天的功能。具体实现过程是:首先进行初始化,然后使用GetGLMHandle类进行ChatGLM模型的加载和运行。predict_no_ui_long_connection函数用于多线程聊天,而predict函数用于单线程聊天,它们的不同之处在于前者不会更新UI界面,后者会。这个文件还导入了其他模块和库,例如transformers、time、importlib等,并使用了多进程Pipe。
|
185 |
-
|
186 |
-
## [29/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\request_llm\bridge_chatgpt.py
|
187 |
-
|
188 |
-
这个程序文件是用于对话生成的,主要包含三个函数:predict、predict_no_ui、predict_no_ui_long_connection。其中,predict是用于普通对话的函数,具备完备的交互功能,但不具备多线程能力;predict_no_ui是高级实验性功能模块调用的函数,参数简单,可以多线程并行,方便实现复杂的功能逻辑;predict_no_ui_long_connection解决了predict_no_ui在处理长文档时容易断开连接的问题,同样支持多线程。程序中还包含一些常量和工具函数,用于整合信息,选择LLM模型,生成http请求,发送请求,接收响应等。它需要配置一个config文件,包含代理网址、API等敏感信息。
|
189 |
-
|
190 |
-
## [30/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\request_llm\bridge_tgui.py
|
191 |
-
|
192 |
-
该程序文件实现了一个基于Websockets的文本生成服务和对话功能。其中,有三个函数:`run()`、`predict()`和`predict_no_ui_long_connection()`。`run()`函数用于连接到Websocket服务并生成文本结果;`predict()`函数用于将用户输入作为文本生成的输入,同时在UI上显示对话历史记录,并在不断更新UI的过程中不断更新生成的文本输出;`predict_no_ui_long_connection()`函数与`predict()`函数类似,但没有UI,并在一段时间内返回单个生成的文本。整个程序还引入了多个Python模块来完成相关功能,例如`asyncio`、`websockets`、`json`等等。
|
193 |
-
|
194 |
-
## 根据以上分析,对程序的整体功能和构架重新做出概括。然后用一张markdown表格整理每个文件的功能(包括check_proxy.py, colorful.py, config.py, config_private.py, core_functional.py, crazy_functional.py, main.py, theme.py, toolbox.py, crazy_functions\crazy_utils.py, crazy_functions\Latex全文润色.py, crazy_functions\Latex全文翻译.py, crazy_functions\__init__.py, crazy_functions\下载arxiv论文翻译摘要.py, crazy_functions\代码重写为全英文_多线程.py, crazy_functions\总结word文档.py)。
|
195 |
-
|
196 |
-
程序功能概括:该程序是一个聊天机器人,可以通过 Web 界面与用户进行交互。它包含了丰富的功能,如文本润色、翻译、代码重写、在线查找等,并且支持多线程处理。用户可以通过 Gradio 框架提供的 Web 界面进行交互,程序还提供了一些调试工具,如toolbox 模块,方便程序开发和调试。
|
197 |
-
|
198 |
-
下��概述了每个文件的功能:
|
199 |
-
|
200 |
-
| 文件名 | 功能 |
|
201 |
-
| ----------------------------------------------------------- | ------------------------------------------------------------ |
|
202 |
-
| check_proxy.py | 检查代理是否可用 |
|
203 |
-
| colorful.py | 用于打印文本的字体颜色输出模块 |
|
204 |
-
| config.py | 用于程序中的各种设置,如并行线程数量和重试次数的限制等 |
|
205 |
-
| config_private.py | 配置API_KEY和代理信息的文件 |
|
206 |
-
| core_functional.py | 包含具体的文本处理功能的模块 |
|
207 |
-
| crazy_functional.py | 包括各种插件函数的模块,提供了多种文本处理功能 |
|
208 |
-
| main.py | 包含 Chatbot 机器人主程序的模块 |
|
209 |
-
| theme.py | 用于调节全局样式的模块 |
|
210 |
-
| toolbox.py | 包含工具函数和装饰器,用于聊天Bot的开发和调试 |
|
211 |
-
| crazy_functions\crazy_utils.py | 包含一些辅助函数,如文本裁剪和消息捕捉等 |
|
212 |
-
| crazy_functions\Latex全文润色.py | 对 Latex 项目进行润色处理的功能模块 |
|
213 |
-
| crazy_functions\Latex全文翻译.py | 对 Latex 项目进行翻译的功能模块 |
|
214 |
-
| crazy_functions\__init__.py | 定义一些奇特的数学函数等 |
|
215 |
-
| crazy_functions\下载arxiv论文翻译摘要.py | 下载 Arxiv 论文并翻译摘要的功能模块 |
|
216 |
-
| crazy_functions\代码重写为全英文_多线程.py | 将Python程序中所有中文转化为英文的功能模块 |
|
217 |
-
| crazy_functions\总结word文档.py | 解析 docx 和 doc 格式的文件,生成文章片段的中英文概述的功能模块 |
|
218 |
-
|
219 |
-
## 根据以上分析,对程序的整体功能和构架重新做出概括。然后用一张markdown表格整理每个文件的功能(包括check_proxy.py, colorful.py, config.py, config_private.py, core_functional.py, crazy_functional.py, main.py, theme.py, toolbox.py, crazy_functions\crazy_utils.py, crazy_functions\Latex全文润色.py, crazy_functions\Latex全文翻译.py, crazy_functions\__init__.py, crazy_functions\下载arxiv论文翻译摘要.py, crazy_functions\代码重写为全英文_多线程.py, crazy_functions\总结word文档.py, crazy_functions\批量Markdown翻译.py, crazy_functions\批量总结PDF文档.py, crazy_functions\批量总结PDF文档pdfminer.py, crazy_functions\批量翻译PDF文档_多线程.py, crazy_functions\理解PDF文档内容.py, crazy_functions\生成函数注释.py, crazy_functions\解析项目源代码.py, crazy_functions\询问多个大语言模型.py, crazy_functions\读文章写摘要.py, crazy_functions\谷歌检索小助手.py, crazy_functions\高级功能函数模板.py, request_llm\bridge_all.py, request_llm\bridge_chatglm.py, request_llm\bridge_chatgpt.py, request_llm\bridge_tgui.py)。
|
220 |
-
|
221 |
-
根据以上分析,整个程序是一个集成了多个有用工具和功能的文本处理和生成工具,提供了多种在不同场景下使用的功能,包括但不限于对话生成、文本摘要、PDF文件批量处理、代码翻译和实用工具等。主要的Python模块包括"toolbox.py"、"config.py"、"core_functional.py"和"crazy_functional.py"等,并且还使用了许多第三方库和模块实现相关功能。以下是每个程序文件的功能:
|
222 |
-
|
223 |
-
| 文件名 | 文件功能 |
|
224 |
-
| --- | --- |
|
225 |
-
| check_proxy.py | 用于检查代理的正确性和可用性 |
|
226 |
-
| colorful.py | 包含不同预设置颜色的常量,并用于多种UI元素 |
|
227 |
-
| config.py | 用于全局配置的类 |
|
228 |
-
| config_private.py | 与config.py文件一起使用的另一个配置文件,用于更改私密信息 |
|
229 |
-
| core_functional.py | 包含一些TextFunctional类和基础功能函数 |
|
230 |
-
| crazy_functional.py | 包含大量高级功能函数和实验性的功能函数 |
|
231 |
-
| main.py | 程序的主入口,包含GUI主窗口和主要的UI管理功能 |
|
232 |
-
| theme.py | 包含一些预设置主题的颜色 |
|
233 |
-
| toolbox.py | 提供了一些有用的工具函数 |
|
234 |
-
| crazy_functions\crazy_utils.py | 包含一些用于实现高级功能的辅助函数 |
|
235 |
-
| crazy_functions\Latex全文润色.py | 实现了对LaTeX文件中全文的润色和格式化功能 |
|
236 |
-
| crazy_functions\Latex全文翻译.py | 实现了对LaTeX文件中的内容进行翻译的功能 |
|
237 |
-
| crazy_functions\_\_init\_\_.py | 用于导入crazy_functional.py中的功能函数 |
|
238 |
-
| crazy_functions\下载arxiv论文翻译摘要.py | 从Arxiv上下载论文并提取重要信息 |
|
239 |
-
| crazy_functions\代码重写为全英文_多线程.py | 针对中文Python文件,将其翻译为全英文 |
|
240 |
-
| crazy_functions\总结word文档.py | 提取Word文件的重要内容来生成摘要 |
|
241 |
-
| crazy_functions\批量Markdown翻译.py | 批量翻译Markdown文件 |
|
242 |
-
| crazy_functions\批量总结PDF文档.py | 批量从PDF文件中提取摘要 |
|
243 |
-
| crazy_functions\批量总结PDF文档pdfminer.py | 批量从PDF文件中提取摘要 |
|
244 |
-
| crazy_functions\批量翻译PDF文档_多线程.py | 批量翻译PDF文件 |
|
245 |
-
| crazy_functions\理解PDF文档内容.py | 批量分析PDF文件并提取摘要 |
|
246 |
-
| crazy_functions\生成函数注释.py | 自动生成Python文件中函数的注释 |
|
247 |
-
| crazy_functions\解析项目源代码.py | 解析并分析给定项目的源代码 |
|
248 |
-
| crazy_functions\询问多个大语言模型.py | 向多个大语言模型询问输入文本并进行处理 |
|
249 |
-
| crazy_functions\读文献写摘要.py | 根据用户输入读取文献内容并生成摘要 |
|
250 |
-
| crazy_functions\谷歌检索小助手.py | 利用谷歌学术检索用户提供的论文信息并提取相关信息 |
|
251 |
-
| crazy_functions\高级功能函数模板.py | 实现高级功能的模板函数 |
|
252 |
-
| request_llm\bridge_all.py | 处理与LLM的交互 |
|
253 |
-
| request_llm\bridge_chatglm.py | 使用ChatGLM模型进行聊天 |
|
254 |
-
| request_llm\bridge_chatgpt.py | 实现对话生成的各项功能 |
|
255 |
-
| request_llm\bridge_tgui.py | 在Websockets中与用户进行交互并生成文本输出 |
|
256 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Cropinky/hana_hanak_houses/realesrgan/data/realesrgan_dataset.py
DELETED
@@ -1,192 +0,0 @@
|
|
1 |
-
import cv2
|
2 |
-
import math
|
3 |
-
import numpy as np
|
4 |
-
import os
|
5 |
-
import os.path as osp
|
6 |
-
import random
|
7 |
-
import time
|
8 |
-
import torch
|
9 |
-
from basicsr.data.degradations import circular_lowpass_kernel, random_mixed_kernels
|
10 |
-
from basicsr.data.transforms import augment
|
11 |
-
from basicsr.utils import FileClient, get_root_logger, imfrombytes, img2tensor
|
12 |
-
from basicsr.utils.registry import DATASET_REGISTRY
|
13 |
-
from torch.utils import data as data
|
14 |
-
|
15 |
-
|
16 |
-
@DATASET_REGISTRY.register()
|
17 |
-
class RealESRGANDataset(data.Dataset):
|
18 |
-
"""Dataset used for Real-ESRGAN model:
|
19 |
-
Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data.
|
20 |
-
|
21 |
-
It loads gt (Ground-Truth) images, and augments them.
|
22 |
-
It also generates blur kernels and sinc kernels for generating low-quality images.
|
23 |
-
Note that the low-quality images are processed in tensors on GPUS for faster processing.
|
24 |
-
|
25 |
-
Args:
|
26 |
-
opt (dict): Config for train datasets. It contains the following keys:
|
27 |
-
dataroot_gt (str): Data root path for gt.
|
28 |
-
meta_info (str): Path for meta information file.
|
29 |
-
io_backend (dict): IO backend type and other kwarg.
|
30 |
-
use_hflip (bool): Use horizontal flips.
|
31 |
-
use_rot (bool): Use rotation (use vertical flip and transposing h and w for implementation).
|
32 |
-
Please see more options in the codes.
|
33 |
-
"""
|
34 |
-
|
35 |
-
def __init__(self, opt):
|
36 |
-
super(RealESRGANDataset, self).__init__()
|
37 |
-
self.opt = opt
|
38 |
-
self.file_client = None
|
39 |
-
self.io_backend_opt = opt['io_backend']
|
40 |
-
self.gt_folder = opt['dataroot_gt']
|
41 |
-
|
42 |
-
# file client (lmdb io backend)
|
43 |
-
if self.io_backend_opt['type'] == 'lmdb':
|
44 |
-
self.io_backend_opt['db_paths'] = [self.gt_folder]
|
45 |
-
self.io_backend_opt['client_keys'] = ['gt']
|
46 |
-
if not self.gt_folder.endswith('.lmdb'):
|
47 |
-
raise ValueError(f"'dataroot_gt' should end with '.lmdb', but received {self.gt_folder}")
|
48 |
-
with open(osp.join(self.gt_folder, 'meta_info.txt')) as fin:
|
49 |
-
self.paths = [line.split('.')[0] for line in fin]
|
50 |
-
else:
|
51 |
-
# disk backend with meta_info
|
52 |
-
# Each line in the meta_info describes the relative path to an image
|
53 |
-
with open(self.opt['meta_info']) as fin:
|
54 |
-
paths = [line.strip().split(' ')[0] for line in fin]
|
55 |
-
self.paths = [os.path.join(self.gt_folder, v) for v in paths]
|
56 |
-
|
57 |
-
# blur settings for the first degradation
|
58 |
-
self.blur_kernel_size = opt['blur_kernel_size']
|
59 |
-
self.kernel_list = opt['kernel_list']
|
60 |
-
self.kernel_prob = opt['kernel_prob'] # a list for each kernel probability
|
61 |
-
self.blur_sigma = opt['blur_sigma']
|
62 |
-
self.betag_range = opt['betag_range'] # betag used in generalized Gaussian blur kernels
|
63 |
-
self.betap_range = opt['betap_range'] # betap used in plateau blur kernels
|
64 |
-
self.sinc_prob = opt['sinc_prob'] # the probability for sinc filters
|
65 |
-
|
66 |
-
# blur settings for the second degradation
|
67 |
-
self.blur_kernel_size2 = opt['blur_kernel_size2']
|
68 |
-
self.kernel_list2 = opt['kernel_list2']
|
69 |
-
self.kernel_prob2 = opt['kernel_prob2']
|
70 |
-
self.blur_sigma2 = opt['blur_sigma2']
|
71 |
-
self.betag_range2 = opt['betag_range2']
|
72 |
-
self.betap_range2 = opt['betap_range2']
|
73 |
-
self.sinc_prob2 = opt['sinc_prob2']
|
74 |
-
|
75 |
-
# a final sinc filter
|
76 |
-
self.final_sinc_prob = opt['final_sinc_prob']
|
77 |
-
|
78 |
-
self.kernel_range = [2 * v + 1 for v in range(3, 11)] # kernel size ranges from 7 to 21
|
79 |
-
# TODO: kernel range is now hard-coded, should be in the configure file
|
80 |
-
self.pulse_tensor = torch.zeros(21, 21).float() # convolving with pulse tensor brings no blurry effect
|
81 |
-
self.pulse_tensor[10, 10] = 1
|
82 |
-
|
83 |
-
def __getitem__(self, index):
|
84 |
-
if self.file_client is None:
|
85 |
-
self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt)
|
86 |
-
|
87 |
-
# -------------------------------- Load gt images -------------------------------- #
|
88 |
-
# Shape: (h, w, c); channel order: BGR; image range: [0, 1], float32.
|
89 |
-
gt_path = self.paths[index]
|
90 |
-
# avoid errors caused by high latency in reading files
|
91 |
-
retry = 3
|
92 |
-
while retry > 0:
|
93 |
-
try:
|
94 |
-
img_bytes = self.file_client.get(gt_path, 'gt')
|
95 |
-
except (IOError, OSError) as e:
|
96 |
-
logger = get_root_logger()
|
97 |
-
logger.warn(f'File client error: {e}, remaining retry times: {retry - 1}')
|
98 |
-
# change another file to read
|
99 |
-
index = random.randint(0, self.__len__())
|
100 |
-
gt_path = self.paths[index]
|
101 |
-
time.sleep(1) # sleep 1s for occasional server congestion
|
102 |
-
else:
|
103 |
-
break
|
104 |
-
finally:
|
105 |
-
retry -= 1
|
106 |
-
img_gt = imfrombytes(img_bytes, float32=True)
|
107 |
-
|
108 |
-
# -------------------- Do augmentation for training: flip, rotation -------------------- #
|
109 |
-
img_gt = augment(img_gt, self.opt['use_hflip'], self.opt['use_rot'])
|
110 |
-
|
111 |
-
# crop or pad to 400
|
112 |
-
# TODO: 400 is hard-coded. You may change it accordingly
|
113 |
-
h, w = img_gt.shape[0:2]
|
114 |
-
crop_pad_size = 400
|
115 |
-
# pad
|
116 |
-
if h < crop_pad_size or w < crop_pad_size:
|
117 |
-
pad_h = max(0, crop_pad_size - h)
|
118 |
-
pad_w = max(0, crop_pad_size - w)
|
119 |
-
img_gt = cv2.copyMakeBorder(img_gt, 0, pad_h, 0, pad_w, cv2.BORDER_REFLECT_101)
|
120 |
-
# crop
|
121 |
-
if img_gt.shape[0] > crop_pad_size or img_gt.shape[1] > crop_pad_size:
|
122 |
-
h, w = img_gt.shape[0:2]
|
123 |
-
# randomly choose top and left coordinates
|
124 |
-
top = random.randint(0, h - crop_pad_size)
|
125 |
-
left = random.randint(0, w - crop_pad_size)
|
126 |
-
img_gt = img_gt[top:top + crop_pad_size, left:left + crop_pad_size, ...]
|
127 |
-
|
128 |
-
# ------------------------ Generate kernels (used in the first degradation) ------------------------ #
|
129 |
-
kernel_size = random.choice(self.kernel_range)
|
130 |
-
if np.random.uniform() < self.opt['sinc_prob']:
|
131 |
-
# this sinc filter setting is for kernels ranging from [7, 21]
|
132 |
-
if kernel_size < 13:
|
133 |
-
omega_c = np.random.uniform(np.pi / 3, np.pi)
|
134 |
-
else:
|
135 |
-
omega_c = np.random.uniform(np.pi / 5, np.pi)
|
136 |
-
kernel = circular_lowpass_kernel(omega_c, kernel_size, pad_to=False)
|
137 |
-
else:
|
138 |
-
kernel = random_mixed_kernels(
|
139 |
-
self.kernel_list,
|
140 |
-
self.kernel_prob,
|
141 |
-
kernel_size,
|
142 |
-
self.blur_sigma,
|
143 |
-
self.blur_sigma, [-math.pi, math.pi],
|
144 |
-
self.betag_range,
|
145 |
-
self.betap_range,
|
146 |
-
noise_range=None)
|
147 |
-
# pad kernel
|
148 |
-
pad_size = (21 - kernel_size) // 2
|
149 |
-
kernel = np.pad(kernel, ((pad_size, pad_size), (pad_size, pad_size)))
|
150 |
-
|
151 |
-
# ------------------------ Generate kernels (used in the second degradation) ------------------------ #
|
152 |
-
kernel_size = random.choice(self.kernel_range)
|
153 |
-
if np.random.uniform() < self.opt['sinc_prob2']:
|
154 |
-
if kernel_size < 13:
|
155 |
-
omega_c = np.random.uniform(np.pi / 3, np.pi)
|
156 |
-
else:
|
157 |
-
omega_c = np.random.uniform(np.pi / 5, np.pi)
|
158 |
-
kernel2 = circular_lowpass_kernel(omega_c, kernel_size, pad_to=False)
|
159 |
-
else:
|
160 |
-
kernel2 = random_mixed_kernels(
|
161 |
-
self.kernel_list2,
|
162 |
-
self.kernel_prob2,
|
163 |
-
kernel_size,
|
164 |
-
self.blur_sigma2,
|
165 |
-
self.blur_sigma2, [-math.pi, math.pi],
|
166 |
-
self.betag_range2,
|
167 |
-
self.betap_range2,
|
168 |
-
noise_range=None)
|
169 |
-
|
170 |
-
# pad kernel
|
171 |
-
pad_size = (21 - kernel_size) // 2
|
172 |
-
kernel2 = np.pad(kernel2, ((pad_size, pad_size), (pad_size, pad_size)))
|
173 |
-
|
174 |
-
# ------------------------------------- the final sinc kernel ------------------------------------- #
|
175 |
-
if np.random.uniform() < self.opt['final_sinc_prob']:
|
176 |
-
kernel_size = random.choice(self.kernel_range)
|
177 |
-
omega_c = np.random.uniform(np.pi / 3, np.pi)
|
178 |
-
sinc_kernel = circular_lowpass_kernel(omega_c, kernel_size, pad_to=21)
|
179 |
-
sinc_kernel = torch.FloatTensor(sinc_kernel)
|
180 |
-
else:
|
181 |
-
sinc_kernel = self.pulse_tensor
|
182 |
-
|
183 |
-
# BGR to RGB, HWC to CHW, numpy to tensor
|
184 |
-
img_gt = img2tensor([img_gt], bgr2rgb=True, float32=True)[0]
|
185 |
-
kernel = torch.FloatTensor(kernel)
|
186 |
-
kernel2 = torch.FloatTensor(kernel2)
|
187 |
-
|
188 |
-
return_d = {'gt': img_gt, 'kernel1': kernel, 'kernel2': kernel2, 'sinc_kernel': sinc_kernel, 'gt_path': gt_path}
|
189 |
-
return return_d
|
190 |
-
|
191 |
-
def __len__(self):
|
192 |
-
return len(self.paths)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/PdfImagePlugin.py
DELETED
@@ -1,284 +0,0 @@
|
|
1 |
-
#
|
2 |
-
# The Python Imaging Library.
|
3 |
-
# $Id$
|
4 |
-
#
|
5 |
-
# PDF (Acrobat) file handling
|
6 |
-
#
|
7 |
-
# History:
|
8 |
-
# 1996-07-16 fl Created
|
9 |
-
# 1997-01-18 fl Fixed header
|
10 |
-
# 2004-02-21 fl Fixes for 1/L/CMYK images, etc.
|
11 |
-
# 2004-02-24 fl Fixes for 1 and P images.
|
12 |
-
#
|
13 |
-
# Copyright (c) 1997-2004 by Secret Labs AB. All rights reserved.
|
14 |
-
# Copyright (c) 1996-1997 by Fredrik Lundh.
|
15 |
-
#
|
16 |
-
# See the README file for information on usage and redistribution.
|
17 |
-
#
|
18 |
-
|
19 |
-
##
|
20 |
-
# Image plugin for PDF images (output only).
|
21 |
-
##
|
22 |
-
|
23 |
-
import io
|
24 |
-
import math
|
25 |
-
import os
|
26 |
-
import time
|
27 |
-
|
28 |
-
from . import Image, ImageFile, ImageSequence, PdfParser, __version__, features
|
29 |
-
|
30 |
-
#
|
31 |
-
# --------------------------------------------------------------------
|
32 |
-
|
33 |
-
# object ids:
|
34 |
-
# 1. catalogue
|
35 |
-
# 2. pages
|
36 |
-
# 3. image
|
37 |
-
# 4. page
|
38 |
-
# 5. page contents
|
39 |
-
|
40 |
-
|
41 |
-
def _save_all(im, fp, filename):
|
42 |
-
_save(im, fp, filename, save_all=True)
|
43 |
-
|
44 |
-
|
45 |
-
##
|
46 |
-
# (Internal) Image save plugin for the PDF format.
|
47 |
-
|
48 |
-
|
49 |
-
def _save(im, fp, filename, save_all=False):
|
50 |
-
is_appending = im.encoderinfo.get("append", False)
|
51 |
-
if is_appending:
|
52 |
-
existing_pdf = PdfParser.PdfParser(f=fp, filename=filename, mode="r+b")
|
53 |
-
else:
|
54 |
-
existing_pdf = PdfParser.PdfParser(f=fp, filename=filename, mode="w+b")
|
55 |
-
|
56 |
-
dpi = im.encoderinfo.get("dpi")
|
57 |
-
if dpi:
|
58 |
-
x_resolution = dpi[0]
|
59 |
-
y_resolution = dpi[1]
|
60 |
-
else:
|
61 |
-
x_resolution = y_resolution = im.encoderinfo.get("resolution", 72.0)
|
62 |
-
|
63 |
-
info = {
|
64 |
-
"title": None
|
65 |
-
if is_appending
|
66 |
-
else os.path.splitext(os.path.basename(filename))[0],
|
67 |
-
"author": None,
|
68 |
-
"subject": None,
|
69 |
-
"keywords": None,
|
70 |
-
"creator": None,
|
71 |
-
"producer": None,
|
72 |
-
"creationDate": None if is_appending else time.gmtime(),
|
73 |
-
"modDate": None if is_appending else time.gmtime(),
|
74 |
-
}
|
75 |
-
for k, default in info.items():
|
76 |
-
v = im.encoderinfo.get(k) if k in im.encoderinfo else default
|
77 |
-
if v:
|
78 |
-
existing_pdf.info[k[0].upper() + k[1:]] = v
|
79 |
-
|
80 |
-
#
|
81 |
-
# make sure image data is available
|
82 |
-
im.load()
|
83 |
-
|
84 |
-
existing_pdf.start_writing()
|
85 |
-
existing_pdf.write_header()
|
86 |
-
existing_pdf.write_comment(f"created by Pillow {__version__} PDF driver")
|
87 |
-
|
88 |
-
#
|
89 |
-
# pages
|
90 |
-
ims = [im]
|
91 |
-
if save_all:
|
92 |
-
append_images = im.encoderinfo.get("append_images", [])
|
93 |
-
for append_im in append_images:
|
94 |
-
append_im.encoderinfo = im.encoderinfo.copy()
|
95 |
-
ims.append(append_im)
|
96 |
-
number_of_pages = 0
|
97 |
-
image_refs = []
|
98 |
-
page_refs = []
|
99 |
-
contents_refs = []
|
100 |
-
for im in ims:
|
101 |
-
im_number_of_pages = 1
|
102 |
-
if save_all:
|
103 |
-
try:
|
104 |
-
im_number_of_pages = im.n_frames
|
105 |
-
except AttributeError:
|
106 |
-
# Image format does not have n_frames.
|
107 |
-
# It is a single frame image
|
108 |
-
pass
|
109 |
-
number_of_pages += im_number_of_pages
|
110 |
-
for i in range(im_number_of_pages):
|
111 |
-
image_refs.append(existing_pdf.next_object_id(0))
|
112 |
-
page_refs.append(existing_pdf.next_object_id(0))
|
113 |
-
contents_refs.append(existing_pdf.next_object_id(0))
|
114 |
-
existing_pdf.pages.append(page_refs[-1])
|
115 |
-
|
116 |
-
#
|
117 |
-
# catalog and list of pages
|
118 |
-
existing_pdf.write_catalog()
|
119 |
-
|
120 |
-
page_number = 0
|
121 |
-
for im_sequence in ims:
|
122 |
-
im_pages = ImageSequence.Iterator(im_sequence) if save_all else [im_sequence]
|
123 |
-
for im in im_pages:
|
124 |
-
# FIXME: Should replace ASCIIHexDecode with RunLengthDecode
|
125 |
-
# (packbits) or LZWDecode (tiff/lzw compression). Note that
|
126 |
-
# PDF 1.2 also supports Flatedecode (zip compression).
|
127 |
-
|
128 |
-
bits = 8
|
129 |
-
params = None
|
130 |
-
decode = None
|
131 |
-
|
132 |
-
#
|
133 |
-
# Get image characteristics
|
134 |
-
|
135 |
-
width, height = im.size
|
136 |
-
|
137 |
-
if im.mode == "1":
|
138 |
-
if features.check("libtiff"):
|
139 |
-
filter = "CCITTFaxDecode"
|
140 |
-
bits = 1
|
141 |
-
params = PdfParser.PdfArray(
|
142 |
-
[
|
143 |
-
PdfParser.PdfDict(
|
144 |
-
{
|
145 |
-
"K": -1,
|
146 |
-
"BlackIs1": True,
|
147 |
-
"Columns": width,
|
148 |
-
"Rows": height,
|
149 |
-
}
|
150 |
-
)
|
151 |
-
]
|
152 |
-
)
|
153 |
-
else:
|
154 |
-
filter = "DCTDecode"
|
155 |
-
colorspace = PdfParser.PdfName("DeviceGray")
|
156 |
-
procset = "ImageB" # grayscale
|
157 |
-
elif im.mode == "L":
|
158 |
-
filter = "DCTDecode"
|
159 |
-
# params = f"<< /Predictor 15 /Columns {width-2} >>"
|
160 |
-
colorspace = PdfParser.PdfName("DeviceGray")
|
161 |
-
procset = "ImageB" # grayscale
|
162 |
-
elif im.mode == "P":
|
163 |
-
filter = "ASCIIHexDecode"
|
164 |
-
palette = im.getpalette()
|
165 |
-
colorspace = [
|
166 |
-
PdfParser.PdfName("Indexed"),
|
167 |
-
PdfParser.PdfName("DeviceRGB"),
|
168 |
-
255,
|
169 |
-
PdfParser.PdfBinary(palette),
|
170 |
-
]
|
171 |
-
procset = "ImageI" # indexed color
|
172 |
-
elif im.mode == "RGB":
|
173 |
-
filter = "DCTDecode"
|
174 |
-
colorspace = PdfParser.PdfName("DeviceRGB")
|
175 |
-
procset = "ImageC" # color images
|
176 |
-
elif im.mode == "RGBA":
|
177 |
-
filter = "JPXDecode"
|
178 |
-
colorspace = PdfParser.PdfName("DeviceRGB")
|
179 |
-
procset = "ImageC" # color images
|
180 |
-
elif im.mode == "CMYK":
|
181 |
-
filter = "DCTDecode"
|
182 |
-
colorspace = PdfParser.PdfName("DeviceCMYK")
|
183 |
-
procset = "ImageC" # color images
|
184 |
-
decode = [1, 0, 1, 0, 1, 0, 1, 0]
|
185 |
-
else:
|
186 |
-
msg = f"cannot save mode {im.mode}"
|
187 |
-
raise ValueError(msg)
|
188 |
-
|
189 |
-
#
|
190 |
-
# image
|
191 |
-
|
192 |
-
op = io.BytesIO()
|
193 |
-
|
194 |
-
if filter == "ASCIIHexDecode":
|
195 |
-
ImageFile._save(im, op, [("hex", (0, 0) + im.size, 0, im.mode)])
|
196 |
-
elif filter == "CCITTFaxDecode":
|
197 |
-
im.save(
|
198 |
-
op,
|
199 |
-
"TIFF",
|
200 |
-
compression="group4",
|
201 |
-
# use a single strip
|
202 |
-
strip_size=math.ceil(im.width / 8) * im.height,
|
203 |
-
)
|
204 |
-
elif filter == "DCTDecode":
|
205 |
-
Image.SAVE["JPEG"](im, op, filename)
|
206 |
-
elif filter == "JPXDecode":
|
207 |
-
Image.SAVE["JPEG2000"](im, op, filename)
|
208 |
-
elif filter == "FlateDecode":
|
209 |
-
ImageFile._save(im, op, [("zip", (0, 0) + im.size, 0, im.mode)])
|
210 |
-
elif filter == "RunLengthDecode":
|
211 |
-
ImageFile._save(im, op, [("packbits", (0, 0) + im.size, 0, im.mode)])
|
212 |
-
else:
|
213 |
-
msg = f"unsupported PDF filter ({filter})"
|
214 |
-
raise ValueError(msg)
|
215 |
-
|
216 |
-
stream = op.getvalue()
|
217 |
-
if filter == "CCITTFaxDecode":
|
218 |
-
stream = stream[8:]
|
219 |
-
filter = PdfParser.PdfArray([PdfParser.PdfName(filter)])
|
220 |
-
else:
|
221 |
-
filter = PdfParser.PdfName(filter)
|
222 |
-
|
223 |
-
existing_pdf.write_obj(
|
224 |
-
image_refs[page_number],
|
225 |
-
stream=stream,
|
226 |
-
Type=PdfParser.PdfName("XObject"),
|
227 |
-
Subtype=PdfParser.PdfName("Image"),
|
228 |
-
Width=width, # * 72.0 / x_resolution,
|
229 |
-
Height=height, # * 72.0 / y_resolution,
|
230 |
-
Filter=filter,
|
231 |
-
BitsPerComponent=bits,
|
232 |
-
Decode=decode,
|
233 |
-
DecodeParms=params,
|
234 |
-
ColorSpace=colorspace,
|
235 |
-
)
|
236 |
-
|
237 |
-
#
|
238 |
-
# page
|
239 |
-
|
240 |
-
existing_pdf.write_page(
|
241 |
-
page_refs[page_number],
|
242 |
-
Resources=PdfParser.PdfDict(
|
243 |
-
ProcSet=[PdfParser.PdfName("PDF"), PdfParser.PdfName(procset)],
|
244 |
-
XObject=PdfParser.PdfDict(image=image_refs[page_number]),
|
245 |
-
),
|
246 |
-
MediaBox=[
|
247 |
-
0,
|
248 |
-
0,
|
249 |
-
width * 72.0 / x_resolution,
|
250 |
-
height * 72.0 / y_resolution,
|
251 |
-
],
|
252 |
-
Contents=contents_refs[page_number],
|
253 |
-
)
|
254 |
-
|
255 |
-
#
|
256 |
-
# page contents
|
257 |
-
|
258 |
-
page_contents = b"q %f 0 0 %f 0 0 cm /image Do Q\n" % (
|
259 |
-
width * 72.0 / x_resolution,
|
260 |
-
height * 72.0 / y_resolution,
|
261 |
-
)
|
262 |
-
|
263 |
-
existing_pdf.write_obj(contents_refs[page_number], stream=page_contents)
|
264 |
-
|
265 |
-
page_number += 1
|
266 |
-
|
267 |
-
#
|
268 |
-
# trailer
|
269 |
-
existing_pdf.write_xref_and_trailer()
|
270 |
-
if hasattr(fp, "flush"):
|
271 |
-
fp.flush()
|
272 |
-
existing_pdf.close()
|
273 |
-
|
274 |
-
|
275 |
-
#
|
276 |
-
# --------------------------------------------------------------------
|
277 |
-
|
278 |
-
|
279 |
-
Image.register_save("PDF", _save)
|
280 |
-
Image.register_save_all("PDF", _save_all)
|
281 |
-
|
282 |
-
Image.register_extension("PDF", ".pdf")
|
283 |
-
|
284 |
-
Image.register_mime("PDF", "application/pdf")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Datasculptor/MusicGen/CONTRIBUTING.md
DELETED
@@ -1,35 +0,0 @@
|
|
1 |
-
# Contributing to Audiocraft
|
2 |
-
|
3 |
-
We want to make contributing to this project as easy and transparent as
|
4 |
-
possible.
|
5 |
-
|
6 |
-
## Pull Requests
|
7 |
-
|
8 |
-
Audiocraft is the implementation of a research paper.
|
9 |
-
Therefore, we do not plan on accepting many pull requests for new features.
|
10 |
-
We certainly welcome them for bug fixes.
|
11 |
-
|
12 |
-
1. Fork the repo and create your branch from `main`.
|
13 |
-
2. If you've added code that should be tested, add tests.
|
14 |
-
3. If you've changed APIs, update the documentation.
|
15 |
-
4. Ensure the test suite passes.
|
16 |
-
5. Make sure your code lints.
|
17 |
-
6. If you haven't already, complete the Contributor License Agreement ("CLA").
|
18 |
-
|
19 |
-
## Contributor License Agreement ("CLA")
|
20 |
-
In order to accept your pull request, we need you to submit a CLA. You only need
|
21 |
-
to do this once to work on any of Meta's open source projects.
|
22 |
-
|
23 |
-
Complete your CLA here: <https://code.facebook.com/cla>
|
24 |
-
|
25 |
-
## Issues
|
26 |
-
We use GitHub issues to track public bugs. Please ensure your description is
|
27 |
-
clear and has sufficient instructions to be able to reproduce the issue.
|
28 |
-
|
29 |
-
Meta has a [bounty program](https://www.facebook.com/whitehat/) for the safe
|
30 |
-
disclosure of security bugs. In those cases, please go through the process
|
31 |
-
outlined on that page and do not file a public issue.
|
32 |
-
|
33 |
-
## License
|
34 |
-
By contributing to encodec, you agree that your contributions will be licensed
|
35 |
-
under the LICENSE file in the root directory of this source tree.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Detomo/ai-comic-generation/src/lib/getInitialRenderedScene.ts
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
import { RenderedScene } from "@/types"
|
2 |
-
|
3 |
-
export const getInitialRenderedScene = (): RenderedScene => ({
|
4 |
-
renderId: "",
|
5 |
-
status: "pending",
|
6 |
-
assetUrl: "",
|
7 |
-
alt: "",
|
8 |
-
error: "",
|
9 |
-
maskUrl: "",
|
10 |
-
segments: []
|
11 |
-
})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Detomo/ai-comic-generation/src/lib/replaceWhiteWithTransparent.ts
DELETED
@@ -1,37 +0,0 @@
|
|
1 |
-
export function replaceWhiteWithTransparent(imageBase64: string): Promise<string> {
|
2 |
-
return new Promise((resolve, reject) => {
|
3 |
-
const img = new Image();
|
4 |
-
img.onload = () => {
|
5 |
-
const canvas = document.createElement('canvas');
|
6 |
-
canvas.width = img.width;
|
7 |
-
canvas.height = img.height;
|
8 |
-
|
9 |
-
const ctx = canvas.getContext('2d');
|
10 |
-
if (!ctx) {
|
11 |
-
reject('Unable to get canvas 2D context');
|
12 |
-
return;
|
13 |
-
}
|
14 |
-
|
15 |
-
ctx.drawImage(img, 0, 0);
|
16 |
-
|
17 |
-
const imageData = ctx.getImageData(0, 0, canvas.width, canvas.height);
|
18 |
-
const data = imageData.data;
|
19 |
-
|
20 |
-
for (let i = 0; i < data.length; i += 4) {
|
21 |
-
if (data[i] === 255 && data[i + 1] === 255 && data[i + 2] === 255) {
|
22 |
-
data[i + 3] = 0;
|
23 |
-
}
|
24 |
-
}
|
25 |
-
|
26 |
-
ctx.putImageData(imageData, 0, 0);
|
27 |
-
|
28 |
-
resolve(canvas.toDataURL());
|
29 |
-
};
|
30 |
-
|
31 |
-
img.onerror = (err) => {
|
32 |
-
reject(err);
|
33 |
-
};
|
34 |
-
|
35 |
-
img.src = imageBase64;
|
36 |
-
});
|
37 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|