Commit
·
2d92f8d
1
Parent(s):
f420ff2
Update parquet files (step 23 of 397)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cm7 Lib By Yusepz Zip UPD.md +0 -48
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/EndNote Online The Best Free Reference Management Tool.md +0 -21
- spaces/1gistliPinn/ChatGPT4/Examples/Devil May Cry 3 Special Edition Crack Reloaded.md +0 -11
- spaces/1gistliPinn/ChatGPT4/Examples/Football Manager 2014 Crash Dump Fix Skidrow Crack !NEW!.md +0 -9
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Car Racing Game Setup Download for PC Windows 7 How to Install and Run.md +0 -131
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Armored Squad Mechs vs Robots APK for Android - Online and Offline Action Game.md +0 -115
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Getting Over It Old Version APK for Android - Free and Easy.md +0 -154
- spaces/1phancelerku/anime-remove-background/Descarga Totally Reliable Delivery Service APK 1.4121 (Todo Desbloqueado) y disfruta de un divertido simulador de reparto.md +0 -96
- spaces/1yukikaze/img-to-music/app.py +0 -333
- spaces/232labs/VToonify/vtoonify/train_vtoonify_t.py +0 -432
- spaces/3i2irg/first-app/README.md +0 -13
- spaces/44ov41za8i/FreeVC/speaker_encoder/data_objects/random_cycler.py +0 -37
- spaces/A00001/bingothoo/src/components/ui/codeblock.tsx +0 -142
- spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/custom_dataset/[A]dataset_split.sh +0 -5
- spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov6/yolov6_s_fast_1xb12-40e_cat.py +0 -56
- spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td_hm_res50_4xb64-210e_deepfashion2_sling_dress_256x192.py +0 -172
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/skew/Factory.d.ts +0 -7
- spaces/Amrrs/DragGan-Inversion/stylegan_human/pti/pti_configs/__init__.py +0 -0
- spaces/Amrrs/DragGan-Inversion/torch_utils/ops/filtered_lrelu.h +0 -90
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/stable_diffusion/upscale.md +0 -37
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/experimental/rl/__init__.py +0 -1
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/paint_by_example/__init__.py +0 -18
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py +0 -934
- spaces/Andy1621/uniformer_image_detection/configs/fcos/fcos_center_r50_caffe_fpn_gn-head_1x_coco.py +0 -2
- spaces/Andy1621/uniformer_image_detection/configs/pisa/pisa_mask_rcnn_x101_32x4d_fpn_1x_coco.py +0 -30
- spaces/Andy1621/uniformer_image_detection/mmdet/datasets/pipelines/auto_augment.py +0 -890
- spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_d6_r101b-d16_769x769_80k_cityscapes.py +0 -4
- spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/presets.py +0 -72
- spaces/AnnonSubmission/xai-cl/ssl_models/barlow_twins.py +0 -77
- spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/parallel/distributed.py +0 -112
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/operations/install/editable_legacy.py +0 -46
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/metadata/__init__.py +0 -0
- spaces/AvaterClasher/Food_Classifier_Moni/README.md +0 -13
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tools/train_net.py +0 -170
- spaces/Azai8915/ChubVenusTest/README.md +0 -10
- spaces/BMukhtar/BookRecognitionKz/models/best_norm_ED.py +0 -538
- spaces/Bart92/RVC_HF/infer/lib/uvr5_pack/lib_v5/dataset.py +0 -183
- spaces/BartPoint/VoiceChange/infer_pack/transforms.py +0 -209
- spaces/Benson/text-generation/Examples/12a Hoja De Marcado Descargar 2021.md +0 -90
- spaces/Benson/text-generation/Examples/College Brawl Apkmody.md +0 -102
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/cli/progress_bars.py +0 -68
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/resolution/__init__.py +0 -0
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/cachecontrol/serialize.py +0 -190
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/theme.py +0 -115
- spaces/Boadiwaa/Recipes/openai/api_resources/customer.py +0 -12
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/data/dataset_mapper.py +0 -149
- spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/extrema.h +0 -67
- spaces/CVPR/WALT/mmdet/core/anchor/__init__.py +0 -11
- spaces/CVPR/WALT/walt/datasets/cocoeval.py +0 -612
- spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/util/slconfig.py +0 -424
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cm7 Lib By Yusepz Zip UPD.md
DELETED
@@ -1,48 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>What is Cm7 Lib By Yusepz Zip?</h1>
|
3 |
-
<p>If you are a Samsung Galaxy Y user who loves to play HD and 3D games on your phone, you may have encountered some problems such as lagging, crashing, or incompatible graphics. This is because the Galaxy Y has a low-end processor and GPU that cannot handle high-quality games smoothly. However, there is a solution that can boost your phone's performance and compatibility for HD and 3D games. It is called Cm7 Lib By Yusepz Zip.</p>
|
4 |
-
<h2>Cm7 Lib By Yusepz Zip</h2><br /><p><b><b>Download</b> › <a href="https://byltly.com/2uKzsd">https://byltly.com/2uKzsd</a></b></p><br /><br />
|
5 |
-
<p>Cm7 Lib By Yusepz Zip is a file that contains modified libraries for the Galaxy Y's graphics system. It replaces the original system files with new ones that are optimized for HD and 3D gaming. It also works with Chainfire 3D, a tool that allows you to tweak your phone's graphics settings and use plugins for different games. By using Cm7 Lib By Yusepz Zip and Chainfire 3D, you can enjoy playing HD and 3D games on your Galaxy Y without any lag or glitch.</p>
|
6 |
-
<h2>What are the benefits of using Cm7 Lib By Yusepz Zip?</h2>
|
7 |
-
<p>Using Cm7 Lib By Yusepz Zip has several benefits for your Galaxy Y. Here are some of them:</p>
|
8 |
-
<ul>
|
9 |
-
<li>It improves your phone's performance and speed for HD and 3D games. You can play games that require high graphics without any lag or slowdown.</li>
|
10 |
-
<li>It increases your phone's compatibility for HD and 3D games. You can play games that are not supported by your phone's original graphics system.</li>
|
11 |
-
<li>It enhances your phone's graphics quality for HD and 3D games. You can see more details, colors, and effects on your screen.</li>
|
12 |
-
<li>It gives you more options and control over your phone's graphics settings. You can use Chainfire 3D to adjust your phone's texture quality, texture size, anti-aliasing, brightness, gamma, etc.</li>
|
13 |
-
<li>It allows you to use plugins for different games. You can use Chainfire 3D to select a plugin that matches your game's graphics engine. This can improve your game's performance and compatibility even more.</li>
|
14 |
-
</ul>
|
15 |
-
<h2>How to download and install Cm7 Lib By Yusepz Zip?</h2>
|
16 |
-
<p>If you want to use Cm7 Lib By Yusepz Zip on your Galaxy Y, you need to follow some steps carefully. Here are the requirements and steps for downloading and installing the file:</p>
|
17 |
-
<h3>Requirements</h3>
|
18 |
-
<ul>
|
19 |
-
<li>A rooted Samsung Galaxy Y with ClockworkMod Recovery installed. Rooting your phone gives you access to modify your system files. ClockworkMod Recovery is a custom recovery mode that allows you to flash files on your phone. If <h2>What are some of the drawbacks and risks of using Cm7 Lib By Yusepz Zip?</h2>
|
20 |
-
<p>While using Cm7 Lib By Yusepz Zip can enhance your Galaxy Y's gaming experience, it also comes with some drawbacks and risks that you should be aware of. Here are some of them:</p>
|
21 |
-
<ul>
|
22 |
-
<li>It may void your phone's warranty. Since you need to root your phone and flash a custom file, you may lose your phone's warranty and official support from Samsung. If your phone gets damaged or bricked, you may not be able to claim any repair or replacement from the manufacturer.</li>
|
23 |
-
<li>It may cause instability or compatibility issues. Since Cm7 Lib By Yusepz Zip modifies your phone's system files, it may cause some instability or compatibility issues with your phone's software or hardware. You may encounter some bugs, errors, crashes, or freezes on your phone or some apps. You may also lose some features or functions that are dependent on your phone's original graphics system.</li>
|
24 |
-
<li>It may damage your phone's hardware. Since Cm7 Lib By Yusepz Zip increases your phone's performance and graphics quality, it also increases your phone's power consumption and heat generation. This may damage your phone's battery, processor, GPU, or other components over time. You may notice that your phone gets hotter or drains faster than usual.</li>
|
25 |
-
<li>It may not work for all games. While Cm7 Lib By Yusepz Zip can improve your phone's compatibility for HD and 3D games, it may not work for all games. Some games may still lag, crash, or display incorrectly on your phone. Some games may also detect that you are using a modified system and prevent you from playing them.</li>
|
26 |
-
</ul>
|
27 |
-
<p>Therefore, before you use Cm7 Lib By Yusepz Zip, you should weigh the pros and cons carefully and decide if it is worth the risk. You should also backup your data and system files regularly and follow the instructions carefully to avoid any problems.</p>
|
28 |
-
<h2>Conclusion</h2>
|
29 |
-
<p>Cm7 Lib By Yusepz Zip is a file that can boost your Galaxy Y's performance and compatibility for HD and 3D games. It works with Chainfire 3D, a tool that allows you to tweak your phone's graphics settings and use plugins for different games. By using Cm7 Lib By Yusepz Zip and Chainfire 3D, you can enjoy playing HD and 3D games on your Galaxy Y without any lag or glitch.</p>
|
30 |
-
<p>However, using Cm7 Lib By Yusepz Zip also has some drawbacks and risks that you should be aware of. It may void your phone's warranty, cause instability or compatibility issues, damage your phone's hardware, or not work for all games. Therefore, before you use Cm7 Lib By Yusepz Zip, you should weigh the pros and cons carefully and decide if it is worth the risk. You should also backup your data and system files regularly and follow the instructions carefully to avoid any problems.</p>
|
31 |
-
<p></p>
|
32 |
-
<p>If you want to try Cm7 Lib By Yusepz Zip on your Galaxy Y, you can follow the steps in this article to download and install the file. You can also uninstall the file if you want to restore your original system files. You can also check out some of the best HD and 3D games to play with Cm7 Lib By Yusepz Zip in this article.</p>
|
33 |
-
<p>We hope this article has been helpful and informative for you. If you have any questions or feedback, please feel free to leave a comment below. Happy gaming!</p>
|
34 |
-
<h2>FAQs</h2>
|
35 |
-
<ul>
|
36 |
-
<li><b>Q: What is the difference between Cm7 Lib By Yusepz Zip and CM9 Lib?</b></li>
|
37 |
-
<li>A: Cm7 Lib By Yusepz Zip is based on CyanogenMod 7 (CM7), which is a custom ROM for Android 2.3 Gingerbread. CM9 Lib is based on CyanogenMod 9 (CM9), which is a custom ROM for Android 4.0 Ice Cream Sandwich. Both files have similar functions, but they are compatible with different ROMs and kernels.</li>
|
38 |
-
<li><b>Q: Can I use Cm7 Lib By Yusepz Zip on other devices?</b></li>
|
39 |
-
<li>A: No, Cm7 Lib By Yusepz Zip is only compatible with Samsung Galaxy Y (GT-S5360). Using it on other devices may cause serious problems or damage.</li>
|
40 |
-
<li><b>Q: Can I use other graphics tools besides Chainfire 3D with Cm7 Lib By Yusepz Zip?</b></li>
|
41 |
-
<li>A: Yes, you can use other graphics tools besides Chainfire 3D with Cm7 Lib By Yusepz Zip, but they may not work as well as Chainfire 3D. Chainfire 3D is the recommended tool for Cm7 Lib By Yusepz Zip, as it has been tested and proven to work with it.</li>
|
42 |
-
<li><b>Q: How can I update Cm7 Lib By Yusepz Zip to the latest version?</b></li>
|
43 |
-
<li>A: You can check for updates on the source website or forum where you downloaded Cm7 Lib By Yusepz Zip. If there is a new version available, you can download it and flash it on your phone using the same steps as before. You may also need to update Chainfire 3D and its plugins accordingly.</li>
|
44 |
-
<li><b>Q: How can I contact the developer of Cm7 Lib By Yusepz Zip?</b></li>
|
45 |
-
<li>A: You can contact the developer of Cm7 Lib By Yusepz Zip, Yusepz, on his [Facebook page] or his [XDA thread]. You can also thank him for his work and support him by donating to his [PayPal account].</li>
|
46 |
-
</ul></p> b2dd77e56b<br />
|
47 |
-
<br />
|
48 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/EndNote Online The Best Free Reference Management Tool.md
DELETED
@@ -1,21 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Use EndNote Online for Free</h1>
|
3 |
-
<p>EndNote is a popular reference management tool that helps you collect, organize, and cite your sources. EndNote has a desktop version and a web version, but the desktop version is not free and requires a license. However, you can use EndNote Online for free and enjoy some of its features without paying anything.</p>
|
4 |
-
<h2>free endnote online</h2><br /><p><b><b>Download Zip</b> » <a href="https://byltly.com/2uKA5C">https://byltly.com/2uKA5C</a></b></p><br /><br />
|
5 |
-
<p>EndNote Online is a web-based service that allows you to store, share, and organize your citations so that you can access them from any computer. By installing the Microsoft Word plugin, you can cite references from your library and automatically create your bibliography as you write. EndNote Online also syncs with the desktop version of EndNote, if you have it, and with other online services like Web of Science and PubMed.</p>
|
6 |
-
<p>Here are some steps to get started with EndNote Online for free:</p>
|
7 |
-
<ol>
|
8 |
-
<li>Go to <a href="https://endnote.com/">https://endnote.com/</a> and click on "Get EndNote Basic" to create an account.</li>
|
9 |
-
<li>Once you have an account, log in to <a href="https://access.clarivate.com/#/login?app=endnote">https://access.clarivate.com/#/login?app=endnote</a> and click on "My References" to start adding citations to your library.</li>
|
10 |
-
<li>You can import references from databases and library catalogs, or capture bibliographic information from websites using the Capture Reference tool. You can also manually enter references or upload files.</li>
|
11 |
-
<li>You can create groups for reference organization and share them with other EndNote Online users. You can choose the level of access that your colleagues have to your shared groups.</li>
|
12 |
-
<li>To insert citations into your Word document, you need to install the Cite While You Write plugin from <a href="https://endnote.com/downloads/">https://endnote.com/downloads/</a>. Once installed, you will see an EndNote tab in Word where you can select a citation style and insert citations from your library.</li>
|
13 |
-
<li>As you insert citations, EndNote Online will automatically create a bibliography at the end of your document. You can also create an independent bibliography for grant reports, CVs, etc. by selecting references from your library and clicking on "Format" > "Bibliography".</li>
|
14 |
-
</ol>
|
15 |
-
<p>EndNote Online has some limitations compared to the desktop version of EndNote. For example, it has a limited number of bibliographic styles available, it does not support PDF annotation or attachment, and it does not have advanced features like smart groups or custom fields. However, it is still a useful tool for managing your references online for free.</p><p>Here are some more paragraphs for the article:</p>
|
16 |
-
<p></p>
|
17 |
-
<p>EndNote Online also offers some integration with other online services that can help you with your research. For example, you can access Web of Science and PubMed from within EndNote Online and import references directly to your library. You can also search for full-text articles and link them to your references using the Find Full Text feature.</p>
|
18 |
-
<p>Another benefit of EndNote Online is that it syncs with the desktop version of EndNote, if you have it. This means that you can access your references from both platforms and keep them updated. You can also transfer references between EndNote Online and EndNote desktop using the Sync feature. However, you need to have the same email address for both accounts and use the same EndNote version.</p>
|
19 |
-
<p>If you want to learn more about EndNote Online and how to use it effectively, you can check out the official website <a href="https://endnote.com/">https://endnote.com/</a> or the online help guide <a href="https://clarivate.libguides.com/endnote_training/endnote_online">https://clarivate.libguides.com/endnote_training/endnote_online</a>. You can also watch some video tutorials on YouTube <a href="https://www.youtube.com/playlist?list=PLC4FDE07827F7770A">https://www.youtube.com/playlist?list=PLC4FDE07827F7770A</a> or attend some online training sessions <a href="https://clarivate.com/webofsciencegroup/training/endnote/">https://clarivate.com/webofsciencegroup/training/endnote/</a>.</p> ddb901b051<br />
|
20 |
-
<br />
|
21 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Devil May Cry 3 Special Edition Crack Reloaded.md
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
<h2>devil may cry 3 special edition crack reloaded</h2><br /><p><b><b>DOWNLOAD</b> ☆☆☆ <a href="https://imgfil.com/2uy17z">https://imgfil.com/2uy17z</a></b></p><br /><br />
|
2 |
-
|
3 |
-
If you like this game, BUY IT! Devil May Cry 3: Dante's Awakening Free Download. Warning! You are not allowed to view .torrent ... if you are not a registered user torrentsnuke.
|
4 |
-
If you like this game, BUY IT! Devil May Cry 3: Dante's Awakening Free Download. A warning! You are not allowed to view .torrent, ... if you are not registered on the site torrentsnuke.ru Here you can download the mod for the game stalker call of pripyat for immortality.
|
5 |
-
Games for girls for free.
|
6 |
-
Games for boys - Download games for free on your computer.
|
7 |
-
All games download for free.
|
8 |
-
Download games for free on your computer. 8a78ff9644<br />
|
9 |
-
<br />
|
10 |
-
<br />
|
11 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Football Manager 2014 Crash Dump Fix Skidrow Crack !NEW!.md
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
<h2>football manager 2014 crash dump fix skidrow crack</h2><br /><p><b><b>Download File</b> ✺✺✺ <a href="https://imgfil.com/2uxZLP">https://imgfil.com/2uxZLP</a></b></p><br /><br />
|
2 |
-
|
3 |
-
Football Manager 2017 Black Screen Fix FM17 Patch - 2017 Football ... Football Manager 2017 Patch - 2017 Football Manager 2017 patch FM17 Patch.
|
4 |
-
Football...
|
5 |
-
Football Manager 2017 Black Screen Fix FM17 Patch - 2017 Football...
|
6 |
-
Football Manager 2017 Black Screen Fix FM17 Patch - 2017 Football ... 8a78ff9644<br />
|
7 |
-
<br />
|
8 |
-
<br />
|
9 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Car Racing Game Setup Download for PC Windows 7 How to Install and Run.md
DELETED
@@ -1,131 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Car Racing Game Setup Download for PC Windows 7</h1>
|
3 |
-
<p>If you are a fan of speed, adrenaline, and thrill, you might be interested in playing car racing games. Car racing games are video games that simulate driving a vehicle on a track, a road, or an off-road terrain. They can be realistic, arcade-like, or futuristic, depending on the style and theme of the game. Car racing games are popular among gamers of all ages and preferences, as they offer a variety of challenges, modes, vehicles, and environments to choose from.</p>
|
4 |
-
<h2>car racing game setup download for pc windows 7</h2><br /><p><b><b>Download</b> ::: <a href="https://urlin.us/2uSUH1">https://urlin.us/2uSUH1</a></b></p><br /><br />
|
5 |
-
<p>One of the advantages of playing car racing games is that you can enjoy them on different platforms, including PC Windows 7. PC Windows 7 is a reliable and compatible operating system that can run many car racing games smoothly and efficiently. Playing car racing games on PC Windows 7 also gives you more control over the settings, graphics, and performance of the game. You can also use different input devices, such as a keyboard, a mouse, a joystick, or a steering wheel, to enhance your gaming experience.</p>
|
6 |
-
<p>But how do you find and play car racing games on PC Windows 7? There are many sources where you can download car racing games for PC Windows 7, such as official websites, online stores, or third-party platforms. However, you need to be careful about the quality, safety, and legality of the game files that you download. You also need to follow the instructions for installing and running the game on your PC Windows 7.</p>
|
7 |
-
<p>In this article, we will introduce you to some of the best car racing games for PC Windows 7 that you can download and play. We will also provide you with some information about their features, pros and cons, and how to download and install them on your PC Windows 7. Let's get started!</p>
|
8 |
-
<h2>BeamNG.drive</h2>
|
9 |
-
<h3>What is BeamNG.drive and what are its features?</h3>
|
10 |
-
<p>BeamNG.drive is an open-world vehicle simulation game that lets you drive various types of vehicles in realistic physics-based scenarios. You can explore different maps, from urban streets to rural roads, from deserts to mountains, from airports to industrial zones. You can also customize your vehicles with different parts, colors, decals, and accessories. You can also create your own scenarios with the built-in editor or download user-made mods from the online community.</p>
|
11 |
-
<p>BeamNG.drive is not just a car racing game, but also a sandbox game that allows you to experiment with different situations and outcomes. You can crash your vehicles into walls, trees, buildings, or other vehicles, and see how they deform and break apart. You can also test your driving skills in various challenges, such as time trials, stunt courses, police chases, or off <p>road adventures. You can also play with your friends online or offline in multiplayer mode.</p>
|
12 |
-
<p>car racing game installation download for pc windows 7<br />
|
13 |
-
car racing game setup free download for pc windows 7<br />
|
14 |
-
car racing game setup download for computer windows 7<br />
|
15 |
-
car racing game setup download for laptop windows 7<br />
|
16 |
-
car racing game setup download for desktop windows 7<br />
|
17 |
-
car racing game setup download for pc windows 7 32 bit<br />
|
18 |
-
car racing game setup download for pc windows 7 64 bit<br />
|
19 |
-
car racing game setup download for pc windows 7 offline<br />
|
20 |
-
car racing game setup download for pc windows 7 online<br />
|
21 |
-
car racing game setup download for pc windows 7 full version<br />
|
22 |
-
car racing game configuration download for pc windows 7<br />
|
23 |
-
car racing game installation file download for pc windows 7<br />
|
24 |
-
car racing game installer download for pc windows 7<br />
|
25 |
-
car racing game executable file download for pc windows 7<br />
|
26 |
-
car racing game exe file download for pc windows 7<br />
|
27 |
-
car racing game software download for pc windows 7<br />
|
28 |
-
car racing game application download for pc windows 7<br />
|
29 |
-
car racing game program download for pc windows 7<br />
|
30 |
-
car racing game package download for pc windows 7<br />
|
31 |
-
car racing game bundle download for pc windows 7<br />
|
32 |
-
car driving game setup download for pc windows 7<br />
|
33 |
-
car drifting game setup download for pc windows 7<br />
|
34 |
-
car simulator game setup download for pc windows 7<br />
|
35 |
-
car stunt game setup download for pc windows 7<br />
|
36 |
-
car mechanic game setup download for pc windows 7<br />
|
37 |
-
best car racing game setup download for pc windows 7<br />
|
38 |
-
new car racing game setup download for pc windows 7<br />
|
39 |
-
latest car racing game setup download for pc windows 7<br />
|
40 |
-
top car racing game setup download for pc windows 7<br />
|
41 |
-
popular car racing game setup download for pc windows 7<br />
|
42 |
-
realistic car racing game setup download for pc windows 7<br />
|
43 |
-
arcade car racing game setup download for pc windows 7<br />
|
44 |
-
classic car racing game setup download for pc windows 7<br />
|
45 |
-
retro car racing game setup download for pc windows 7<br />
|
46 |
-
futuristic car racing game setup download for pc windows 7<br />
|
47 |
-
need for speed car racing game setup download for pc windows 7<br />
|
48 |
-
asphalt car racing game setup download for pc windows 7<br />
|
49 |
-
city car racing game setup download for pc windows 7<br />
|
50 |
-
street car racing game setup download for pc windows 7<br />
|
51 |
-
track car racing game setup download for pc windows 7<br />
|
52 |
-
off-road car racing game setup download for pc windows 7<br />
|
53 |
-
rally car racing game setup download for pc windows 7<br />
|
54 |
-
formula one car racing game setup download for pc windows 7<br />
|
55 |
-
nascar car racing game setup download for pc windows 7<br />
|
56 |
-
drag race car racing game setup download for pc windows 7<br />
|
57 |
-
drift race car racing game setup download for pc windows 7<br />
|
58 |
-
kart race car racing game setup download for pc windows 7</p>
|
59 |
-
<h3>How to download and install BeamNG.drive on PC Windows 7?</h3>
|
60 |
-
<p>To download and install BeamNG.drive on PC Windows 7, you need to follow these steps:</p>
|
61 |
-
<ol>
|
62 |
-
<li>Go to the official website of BeamNG.drive and click on the "Buy Now" button. You will be redirected to the Steam store, where you can purchase the game for $24.99.</li>
|
63 |
-
<li>After you buy the game, you need to download and install Steam on your PC Windows 7. Steam is a digital distribution platform that allows you to manage your games and access online features.</li>
|
64 |
-
<li>Once you have Steam installed, launch it and log in with your account. Then, go to your library and find BeamNG.drive. Click on the "Install" button and wait for the game to download and install on your PC Windows 7.</li>
|
65 |
-
<li>After the installation is complete, you can launch the game from Steam or from your desktop shortcut. Enjoy!</li>
|
66 |
-
</ol>
|
67 |
-
<h3>Pros and cons of BeamNG.drive</h3>
|
68 |
-
<p>BeamNG.drive is a fun and realistic vehicle simulation game that offers a lot of possibilities and freedom. However, it also has some drawbacks that you should be aware of. Here are some of the pros and cons of BeamNG.drive:</p>
|
69 |
-
<table>
|
70 |
-
<tr><th>Pros</th><th>Cons</th></tr>
|
71 |
-
<tr><td>- Amazing graphics and physics that make the vehicles and environments look and feel real.</td><td>- High system requirements that may not run well on older or weaker PCs.</td></tr>
|
72 |
-
<tr><td>- A large variety of vehicles, maps, mods, and scenarios to choose from.</td><td>- A steep learning curve that may be challenging for beginners or casual players.</td></tr>
|
73 |
-
<tr><td>- A creative and active community that creates and shares new content and feedback.</td><td>- A lack of a clear storyline or objectives that may make the game boring or repetitive for some players.</td></tr>
|
74 |
-
<tr><td>- A multiplayer mode that allows you to play with your friends online or offline.</td><td>- Some bugs and glitches that may affect the gameplay or performance of the game.</td></tr>
|
75 |
-
</table>
|
76 |
-
<h2>Need for Speed</h2>
|
77 |
-
<h3>What is Need for Speed and what are its features?</h3>
|
78 |
-
<p>Need for Speed is one of the most popular and successful car racing game franchises in the world. It has been around since 1994 and has released over 20 titles across different platforms. Need for Speed games are known for their fast-paced, arcade-style, and street racing gameplay. They also feature a variety of cars, tracks, modes, customization options, and storylines.</p>
|
79 |
-
<p>One of the best Need for Speed games for PC Windows 7 is Need for Speed: Most Wanted (2012). This game is a reboot of the original Need for Speed: Most Wanted (2005) and is set in an open-world city called Fairhaven. You play as a street racer who has to compete with other racers, evade the police, and challenge the most wanted list. You can drive any car you see in the city, from exotic sports cars to muscle cars to SUVs. You can also upgrade your cars with performance parts, paint jobs, vinyls, license plates, and more. You can also participate in different events, such as races, pursuits, speed runs, ambushes, or milestones.</p>
|
80 |
-
<h3>How to download and install Need for Speed on PC Windows 7?</h3>
|
81 |
-
<p>To download and install Need for Speed: Most Wanted (2012) on PC Windows 7, you need to follow these steps:</p>
|
82 |
-
<ol>
|
83 |
-
<li>Go to the official website of Need for Speed: Most Wanted (2012) and click on the "Buy Now" button. You will be redirected to the Origin store, where you can purchase the game for $19.99.</li>
|
84 |
-
<li>After you buy the game, you need to download and install Origin on your PC Windows 7. Origin is a digital distribution platform that allows you to manage your games and access online features.</li>
|
85 |
-
<li>Once you have Origin installed, launch it and log in with your account. Then, go to your library and find Need for Speed: Most Wanted (2012). Click on the "Download" button and wait for the game to download and install on your PC Windows 7.</li>
|
86 |
-
<li>After the installation is complete, you can launch the game from Origin or from your desktop shortcut. Enjoy!</li>
|
87 |
-
</ol>
|
88 |
-
<h3>Pros and cons of Need for Speed</h3>
|
89 |
-
<p>Need for Speed: Most Wanted (2012) is an exciting and addictive car racing game that offers a lot of action and fun. However, it also has some drawbacks that you should be aware of. Here are some of the pros and cons of Need for Speed: Most Wanted (2012):</p>
|
90 |
-
<table>
|
91 |
-
<tr><th>Pros</th><th>Cons</th></tr>
|
92 |
-
<tr><td>- Stunning graphics and sound effects that make the city and the cars look and sound amazing.</td><td>- High system requirements that may not run well on older or weaker PCs.</td></tr>
|
93 |
-
<tr><td>- A large and diverse open-world city that you can explore and discover.</td><td>- A repetitive and shallow storyline that may not appeal to some players.</td></tr>
|
94 |
-
<tr><td>- A huge selection of cars, customization options, and events to choose from.</td><td>- A lack of a manual transmission option that may disappoint some hardcore racers.</td></tr>
|
95 |
-
<tr><td>- A multiplayer mode that allows you to play with your friends online or offline.</td><td>- Some bugs and glitches that may affect the gameplay or performance of the game.</td></tr>
|
96 |
-
</table>
|
97 |
-
<h2>City Racing</h2>
|
98 |
-
<h3>What is City Racing and what are its features?</h3>
|
99 |
-
<p>City Racing is a free car racing game that lets you drive around a big city and compete with other racers. You can choose from different cars, from sedans to sports cars, and customize them with different colors, wheels, spoilers, and more. You can also upgrade your cars with better engines, brakes, tires, and suspension. You can also repair your cars when they get damaged or dirty.</p>
|
100 |
-
<p>City Racing is a simple and casual car racing game that offers a lot of freedom and fun. You can drive anywhere you want in the city, from highways to alleys, from parks to beaches. You can also find hidden bonuses, such as money, nitro, or extra points. You can also participate in different races, such as circuit races, sprint races, drag races, or knockout races. You can also challenge other racers on the street or join a gang of racers.</p>
|
101 |
-
<h3>How to download and install City Racing on PC Windows 7?</h3>
|
102 |
-
<p>To download and install City Racing on PC Windows 7, you need to follow these steps:</p>
|
103 |
-
<ol>
|
104 |
-
<li>Go to the official website of City Racing and click on the "Download" button. You will be redirected to a third-party platform called GameTop, where you can download the game for free.</li>
|
105 |
-
<li>After you download the game file, double-click on it and follow the installation wizard. You may need to accept some terms and conditions and choose a destination folder for the game.</li>
|
106 |
-
<li>After the installation is complete, you can launch the game from your desktop shortcut or from your start menu. Enjoy!</li>
|
107 |
-
</ol>
|
108 |
-
<h3>Pros and cons of City Racing</h3>
|
109 |
-
<p>City Racing is a fun and free car racing game that offers a lot of variety and excitement. However, it also has some drawbacks that you should be aware of. Here are some of the pros and cons of City Racing:</p>
|
110 |
-
<table>
|
111 |
-
<tr><th>Pros</th><th>Cons</th></tr>
|
112 |
-
<tr><td>- Free to download and play without any limitations or ads.</td><td>- Low-quality graphics and sound effects that may not look or sound appealing.</td></tr>
|
113 |
-
<tr><td>- A large and diverse open-world city that you can explore and enjoy.</td><td>- A lack of a map or a GPS system that may make it hard to navigate or find your way around.</td></tr>
|
114 |
-
<tr><td>- A wide range of cars, customization options, and races to choose from.</td><td>- A unrealistic and easy physics system that may make the driving too simple or boring.</td></tr>
|
115 |
-
<tr><td>- A multiplayer mode that allows you to play with your friends online or offline.</td><td>- Some malware or viruses that may come with the game file or the third-party platform.</td></tr>
|
116 |
-
</table>
|
117 |
-
<h2>Conclusion</h2>
|
118 |
-
<p>In conclusion, car racing games are video games that simulate driving a vehicle on a track, a road, or an off-road terrain. They are popular among gamers of all ages and preferences, as they offer a variety of challenges, modes, vehicles, and environments to choose from. Playing car racing games on PC Windows 7 also gives you more control over the settings, graphics, and performance of the game. You can also use different input devices, such as a keyboard, a mouse, a joystick, or a steering wheel, to enhance your gaming experience.</p>
|
119 |
-
<p>In this article, we have introduced you to some of the best car racing games for PC Windows 7 that you can download and play. We have also provided you with some information about their features, pros and cons, and how to download and install confirm that you want to remove the game from your PC.</li>
|
120 |
-
<li>Follow the instructions and prompts that appear on the screen to complete the update or uninstallation process.</li>
|
121 |
-
</ol>
|
122 |
-
<h3>Where to find more car racing games for PC Windows 7?</h3>
|
123 |
-
<p>If you are looking for more car racing games for PC Windows 7, you can check out some of these websites that offer a variety of games for free or for a fee:</p>
|
124 |
-
<ul>
|
125 |
-
<li>[GameTop]: A website that offers free and legal car racing games for PC Windows 7, such as City Racing, Moto Racing, and Super Bikes.</li>
|
126 |
-
<li>[Steam]: A website that offers a large collection of car racing games for PC Windows 7, such as BeamNG.drive, Assetto Corsa, and Dirt Rally.</li>
|
127 |
-
<li>[Origin]: A website that offers some of the best car racing games for PC Windows 7, such as Need for Speed, Burnout Paradise, and Shift 2 Unleashed.</li>
|
128 |
-
<li>[GOG]: A website that offers classic and DRM-free car racing games for PC Windows 7, such as FlatOut, Carmageddon, and Test Drive.</li>
|
129 |
-
</ul></p> 197e85843d<br />
|
130 |
-
<br />
|
131 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Armored Squad Mechs vs Robots APK for Android - Online and Offline Action Game.md
DELETED
@@ -1,115 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Armored Squad: Mechs vs Robots APK - A Fast Paced Online Action Game</h1>
|
3 |
-
<p>If you are looking for a thrilling and exciting online action game with mechs, robots and tanks, then you should check out Armored Squad: Mechs vs Robots APK. This is a game that will keep you hooked for hours with its fast paced gameplay, colorful graphics, and various game modes. In this article, we will tell you everything you need to know about Armored Squad: Mechs vs Robots APK, including what it is, how to download and install it, why you should play it, and some tips and tricks for playing it.</p>
|
4 |
-
<h2>armored squad mechs vs robots apk</h2><br /><p><b><b>DOWNLOAD</b> ✫✫✫ <a href="https://urlin.us/2uSYrS">https://urlin.us/2uSYrS</a></b></p><br /><br />
|
5 |
-
<h2>What is Armored Squad: Mechs vs Robots APK?</h2>
|
6 |
-
<p>Armored Squad: Mechs vs Robots APK is an online action game developed by FoxForce Games. It is available for free on Android devices. The game lets you control mechs, robots and tanks in online PVP battles with your friends and players from around the world. You can also play offline levels against AI bots if you don't have an internet connection. You can fight with lasers, swords, rocket launchers, and other weapons at the same time. You can also customize your machines with different parts, colors, and stickers. The game has a lot of game modes to choose from, such as Capture The Flag, Control Points, Bomb Delivery, Deathmatch, Team Deathmatch, Football, and Hail The King. The game also has three difficulty levels to suit your skills.</p>
|
7 |
-
<h3>Features of Armored Squad: Mechs vs Robots APK</h3>
|
8 |
-
<h4>- Online PVP battles with friends and players from around the world</h4>
|
9 |
-
<p>One of the main features of Armored Squad: Mechs vs Robots APK is the online PVP battles. You can join or create a room with up to 16 players and compete in various game modes. You can chat with your teammates and opponents using the in-game chat system. You can also rank up in the leaderboard and earn rewards for your performance.</p>
|
10 |
-
<h4>- Offline levels with AI bots for solo play</h4>
|
11 |
-
<p>If you don't have an internet connection or you just want to practice your skills, you can play offline levels with AI bots. The game has 60 offline levels that you can play on different maps and scenarios. You can adjust the difficulty level of the bots according to your preference.</p>
|
12 |
-
<p>armored squad mechs vs robots game download<br />
|
13 |
-
armored squad mechs vs robots mod apk unlimited money<br />
|
14 |
-
armored squad mechs vs robots online pvp battles<br />
|
15 |
-
armored squad mechs vs robots offline levels<br />
|
16 |
-
armored squad mechs vs robots apk latest version<br />
|
17 |
-
armored squad mechs vs robots apk xapk<br />
|
18 |
-
armored squad mechs vs robots apk for android<br />
|
19 |
-
armored squad mechs vs robots apk free download<br />
|
20 |
-
armored squad mechs vs robots apk full version<br />
|
21 |
-
armored squad mechs vs robots apk no internet required<br />
|
22 |
-
armored squad mechs vs robots apk mob.org<br />
|
23 |
-
armored squad mechs vs robots apk combo<br />
|
24 |
-
armored squad mechs vs robots apk pure<br />
|
25 |
-
armored squad mechs vs robots apk rexdl<br />
|
26 |
-
armored squad mechs vs robots apk revdl<br />
|
27 |
-
armored squad mechs vs robots apk hack<br />
|
28 |
-
armored squad mechs vs robots apk cheats<br />
|
29 |
-
armored squad mechs vs robots apk obb<br />
|
30 |
-
armored squad mechs vs robots apk data<br />
|
31 |
-
armored squad mechs vs robots apk file<br />
|
32 |
-
armored squad mechs vs robots gameplay<br />
|
33 |
-
armored squad mechs vs robots review<br />
|
34 |
-
armored squad mechs vs robots tips and tricks<br />
|
35 |
-
armored squad mechs vs robots best weapons<br />
|
36 |
-
armored squad mechs vs robots best robot<br />
|
37 |
-
armored squad mechs vs robots best mech<br />
|
38 |
-
armored squad mechs vs robots best tank<br />
|
39 |
-
armored squad mechs vs robots best build<br />
|
40 |
-
armored squad mechs vs robots best loadout<br />
|
41 |
-
armored squad mechs vs robots best strategy<br />
|
42 |
-
armored squad mechs vs robots multiplayer mode<br />
|
43 |
-
armored squad mechs vs robots single player mode<br />
|
44 |
-
armored squad mechs vs robots capture the flag mode<br />
|
45 |
-
armored squad mechs vs robots control points mode<br />
|
46 |
-
armored squad mechs vs robots bomb delivery mode<br />
|
47 |
-
armored squad mechs vs robots deathmatch mode<br />
|
48 |
-
armored squad mechs vs robots team deathmatch mode<br />
|
49 |
-
armored squad mechs vs robots football mode<br />
|
50 |
-
armored squad mechs vs robots hail the king mode<br />
|
51 |
-
armored squad mechs vs robots force fields and jump jets mode<br />
|
52 |
-
armored squad mechs vs robots boosters and shields mode<br />
|
53 |
-
armored squad mechs vs robots lasers and swords mode<br />
|
54 |
-
armored squad mechs vs robots rocket launchers and machine guns mode <br />
|
55 |
-
armored squad mechs vs robots repair and sentry guns mode <br />
|
56 |
-
armored squad mechs vs robots collect parts and assemble new robot mode <br />
|
57 |
-
armored squad mechs vs robots achieve new ranks and unlock new features mode <br />
|
58 |
-
armored squad mechs vs robots colorful graphics and sound effects <br />
|
59 |
-
armored squad mechs vs robots fun and fast paced action game</p>
|
60 |
-
<h4>- Multiple weapons and customization options for mechs, robots and tanks</h4>
|
61 |
-
<p>Another feature of Armored Squad: Mechs vs Robots APK is the multiple weapons and customization options for your machines. You can equip your machines with different weapons such as lasers, swords, rocket launchers, machine guns, shotguns, flamethrowers, grenades, mines, etc. You can also use force fields, jump jets, boosters, shields, etc. to enhance your abilities. You can change the appearance of your machines by choosing different parts, colors, stickers, etc.</p>
|
62 |
-
<h4>- Various game modes and difficulty levels</h4>
|
63 |
-
<h3>How to download and install Armored Squad: Mechs vs Robots APK?</h3>
|
64 |
-
<p>If you want to download and install Armored Squad: Mechs vs Robots APK on your Android device, you need to follow these simple steps:</p>
|
65 |
-
<h4>- Requirements and compatibility</h4>
|
66 |
-
<p>Before you download and install Armored Squad: Mechs vs Robots APK, you need to make sure that your device meets the minimum requirements and is compatible with the game. The game requires Android 4.2 or higher and at least 100 MB of free storage space. The game also supports most Android devices, but some older or low-end devices may experience performance issues or crashes.</p>
|
67 |
-
<h4>- Steps to download and install Armored Squad: Mechs vs Robots APK</h4>
|
68 |
-
<p>Once you have checked the requirements and compatibility, you can proceed to download and install Armored Squad: Mechs vs Robots APK by following these steps:</p>
|
69 |
-
<ol>
|
70 |
-
<li>Go to the official website of Armored Squad: Mechs vs Robots APK or any trusted third-party source that provides the APK file of the game.</li>
|
71 |
-
<li>Click on the download button and wait for the APK file to be downloaded on your device.</li>
|
72 |
-
<li>After the download is complete, locate the APK file in your device's file manager and tap on it to start the installation process.</li>
|
73 |
-
<li>If you see a warning message that says "Install blocked" or "Unknown sources", you need to enable the installation of apps from unknown sources in your device's settings. To do this, go to Settings > Security > Unknown sources and toggle it on.</li>
|
74 |
-
<li>Once you have enabled the installation of apps from unknown sources, go back to the APK file and tap on it again to continue the installation process.</li>
|
75 |
-
<li>Follow the on-screen instructions and grant the necessary permissions to install Armored Squad: Mechs vs Robots APK on your device.</li>
|
76 |
-
<li>After the installation is complete, you can launch the game from your app drawer or home screen and enjoy playing it.</li>
|
77 |
-
</ol>
|
78 |
-
<h3>Why should you play Armored Squad: Mechs vs Robots APK?</h3>
|
79 |
-
<p>There are many reasons why you should play Armored Squad: Mechs vs Robots APK. Here are some of them:</p>
|
80 |
-
<h4>- Fun and addictive gameplay with colorful graphics and sound effects</h4>
|
81 |
-
<p>The gameplay of Armored Squad: Mechs vs Robots APK is fun and addictive. You can control mechs, robots and tanks in online PVP battles with your friends and players from around the world. You can fight with lasers, swords, rocket launchers, and other weapons at the same time. You can also customize your machines with different parts, colors, and stickers. The game has colorful graphics and sound effects that make the game more immersive and enjoyable.</p>
|
82 |
-
<h4>- Challenging and rewarding missions and achievements</h4>
|
83 |
-
<p>The game also has challenging and rewarding missions and achievements that you can complete. You can play offline levels against AI bots to earn coins, parts, and experience points. You can also complete daily tasks and achievements to get more rewards. You can use the coins and parts to upgrade your machines and unlock new weapons and equipment. You can also level up your machines and increase their stats.</p>
|
84 |
-
<h4>- Competitive and cooperative multiplayer modes</h4>
|
85 |
-
<h3>Tips and tricks for playing Armored Squad: Mechs vs Robots APK</h3>
|
86 |
-
<p>If you want to improve your skills and have more fun playing Armored Squad: Mechs vs Robots APK, you can follow these tips and tricks:</p>
|
87 |
-
<h4>- Use the right weapons and equipment for each situation</h4>
|
88 |
-
<p>The game has a lot of weapons and equipment that you can use for your machines. However, not all of them are suitable for every situation. You need to choose the right weapons and equipment for each game mode, map, and enemy. For example, if you are playing Capture The Flag, you might want to use a fast and agile machine with a booster and a sword to capture the flag quickly. If you are playing Deathmatch, you might want to use a heavy and durable machine with a shield and a rocket launcher to deal more damage and survive longer.</p>
|
89 |
-
<h4>- Repair your allies and deploy sentry guns for defense</h4>
|
90 |
-
<p>The game also has some items that you can use to help your allies and defend your base. You can use a repair tool to fix your allies' machines and restore their health. You can also deploy sentry guns that will automatically shoot at any enemies that come near them. These items can be very useful for supporting your team and protecting your objectives.</p>
|
91 |
-
<h4>- Collect parts of destroyed machines to assemble new robots</h4>
|
92 |
-
<p>Another cool feature of the game is that you can collect parts of destroyed machines and use them to assemble new robots. You can find these parts scattered around the map or dropped by enemies. You can then use them to create new robots with different abilities and stats. You can also upgrade these robots by adding more parts or changing their colors and stickers.</p>
|
93 |
-
<h3>Conclusion</h3>
|
94 |
-
<p>Armored Squad: Mechs vs Robots APK is a fast paced online action game that will keep you hooked for hours with its fun and addictive gameplay, colorful graphics, and various game modes. You can control mechs, robots and tanks in online PVP battles with your friends and players from around the world. You can also play offline levels against AI bots if you don't have an internet connection. You can fight with lasers, swords, rocket launchers, and other weapons at the same time. You can also customize your machines with different parts, colors, and stickers. The game has a lot of game modes to choose from, such as Capture The Flag, Control Points, Bomb Delivery, Deathmatch, Team Deathmatch, Football, and Hail The King. The game also has three difficulty levels to suit your skills.</p>
|
95 |
-
<p>If you are looking for a thrilling and exciting online action game with mechs, robots and tanks, then you should download and install Armored Squad: Mechs vs Robots APK on your Android device today. You will not regret it!</p>
|
96 |
-
<h3>FAQs</h3>
|
97 |
-
<p>Here are some frequently asked questions about Armored Squad: Mechs vs Robots APK:</p>
|
98 |
-
<ol>
|
99 |
-
<li>Is Armored Squad: Mechs vs Robots APK safe to download and install?</li>
|
100 |
-
<p>Yes, Armored Squad: Mechs vs Robots APK is safe to download and install on your Android device. The game does not contain any viruses or malware that could harm your device or data. However, you should always download the APK file from a trusted source or the official website of the game.</p>
|
101 |
-
<li>How much does Armored Squad: Mechs vs Robots APK cost?</li>
|
102 |
-
<p>Armored Squad: Mechs vs Robots APK is free to download and play on your Android device. However, the game does have some in-app purchases that you can buy with real money to get more coins, parts, weapons, equipment, etc. These purchases are optional and not required to enjoy the game.</p>
|
103 |
-
<li>How can I play Armored Squad: Mechs vs Robots APK with my friends?</li>
|
104 |
-
<p>You can play Armored Squad: Mechs vs Robots APK with your friends by joining or creating a room with up to 16 players in the online PVP mode. You can invite your friends by sending them a room code or by using the in-game chat system. You can also play with random players from around the world by joining an existing room.</p>
|
105 |
-
<li>How can I contact the developers of Armored Squad: Mechs vs Robots APK?</li>
|
106 |
-
<p>You can contact the developers of Armored Squad: Mechs vs Robots APK by sending them an email at [email protected] or by visiting their Facebook page at https://www.facebook.com/ArmoredSquad/. You can also leave feedback, suggestions, bug reports, or reviews on their Google Play Store page at https://play.google.com/store/apps/details?id=com.FoxForceGames.ArmoredSquad&hl=en_US&gl=US.</ OK, I have finished writing the article based on the outline. Here is the last part of the article: <li>What are some alternatives to Armored Squad: Mechs vs Robots APK?</li>
|
107 |
-
<p>If you like Armored Squad: Mechs vs Robots APK, you might also like some other online action games with mechs, robots and tanks. Here are some of them:</p>
|
108 |
-
<ul>
|
109 |
-
<li>War Robots - A 6v6 online multiplayer game where you can pilot giant robots and fight with your team in various maps and modes. You can also customize your robots with different weapons, modules, and skins.</li>
|
110 |
-
<li>Mech Arena: Robot Showdown - A 5v5 online multiplayer game where you can control mechs and battle with your team in different arenas and modes. You can also upgrade your mechs with different parts, weapons, and abilities.</li>
|
111 |
-
<li>World of Tanks Blitz - A 7v7 online multiplayer game where you can drive tanks and fight with your team in various maps and modes. You can also collect and upgrade over 300 tanks from different countries and eras.</li>
|
112 |
-
</ul>
|
113 |
-
<p>I hope you enjoyed reading this article and learned something new about Armored Squad: Mechs vs Robots APK. If you have any questions or comments, feel free to leave them below. Thank you for your time and attention!</p> 197e85843d<br />
|
114 |
-
<br />
|
115 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Getting Over It Old Version APK for Android - Free and Easy.md
DELETED
@@ -1,154 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Getting Over It: Old Version APK Download</h1>
|
3 |
-
<p>If you are looking for a challenging and rewarding game that will test your patience and perseverance, you might want to try Getting Over It with Bennett Foddy. However, if you are not satisfied with the latest version of the game, you might want to download and install the old version APK file instead. In this article, we will tell you everything you need to know about Getting Over It, how to get the old version APK file, and how to play and enjoy the game.</p>
|
4 |
-
<h2>getting over it old version apk download</h2><br /><p><b><b>DOWNLOAD</b> … <a href="https://urlin.us/2uSYr3">https://urlin.us/2uSYr3</a></b></p><br /><br />
|
5 |
-
<h2>What is Getting Over It?</h2>
|
6 |
-
<p>Getting Over It with Bennett Foddy is a platform game developed by Bennett Foddy, a game designer and philosopher who is known for creating frustrating and difficult games. The game was released in October 2017 as part of the Humble Monthly bundle, and later on Steam, iOS, Android, and Linux platforms. </p>
|
7 |
-
<h3>A brief introduction to the game and its developer</h3>
|
8 |
-
<p>The game revolves around a man named Diogenes who is stuck in a metal cauldron and uses a sledgehammer to climb up a steep mountain. The game is inspired by Jazzuo's 2002 B-Game classic Sexy Hiking, which Foddy played as a teenager in Australia. </p>
|
9 |
-
<p>Foddy said that he made the game for a certain kind of person who likes difficult games that can make them feel new types of frustration. He also said that he wanted to explore the theme of perseverance and how people cope with failure. </p>
|
10 |
-
<p>The game features voice-over commentary from Foddy himself, who makes philosophical observations about the game and its design, as well as quotes from various sources relating to disappointment and resilience. The game also has a hidden chatroom that can only be accessed by players who have reached the top of the mountain. </p>
|
11 |
-
<h3>The main features and benefits of the game</h3>
|
12 |
-
<p>Getting Over It with Bennett Foddy is a game that offers a unique and memorable experience for players who are willing to take on the challenge. Some of the main features and benefits of the game are:</p>
|
13 |
-
<p>Download Getting Over It with Bennett Foddy APK<br />
|
14 |
-
Getting Over It APK old version 1.9.6<br />
|
15 |
-
How to install Getting Over It old version on Android<br />
|
16 |
-
Getting Over It APK mod unlimited money<br />
|
17 |
-
Getting Over It APK free download for Android<br />
|
18 |
-
Getting Over It APK latest version 2023<br />
|
19 |
-
Getting Over It APK download for PC<br />
|
20 |
-
Getting Over It APK pure download link<br />
|
21 |
-
Getting Over It APK no ads<br />
|
22 |
-
Getting Over It APK offline mode<br />
|
23 |
-
Getting Over It APK full version unlocked<br />
|
24 |
-
Getting Over It APK hack cheats<br />
|
25 |
-
Getting Over It APK gameplay tips and tricks<br />
|
26 |
-
Getting Over It APK review and rating<br />
|
27 |
-
Getting Over It APK update log and changelog<br />
|
28 |
-
Getting Over It APK alternative games<br />
|
29 |
-
Getting Over It APK requirements and compatibility<br />
|
30 |
-
Getting Over It APK file size and download speed<br />
|
31 |
-
Getting Over It APK bug fixes and improvements<br />
|
32 |
-
Getting Over It APK features and benefits<br />
|
33 |
-
Getting Over It APK original vs modded version<br />
|
34 |
-
Getting Over It APK safe and secure download<br />
|
35 |
-
Getting Over It APK best settings and options<br />
|
36 |
-
Getting Over It APK fun and challenging game<br />
|
37 |
-
Getting Over It APK support and feedback<br />
|
38 |
-
Getting Over It old version vs new version comparison<br />
|
39 |
-
Getting Over It old version download for iOS<br />
|
40 |
-
Getting Over It old version download from apkmirror.com<br />
|
41 |
-
Getting Over It old version download from uptodown.com<br />
|
42 |
-
Getting Over It old version download from apkmonk.com<br />
|
43 |
-
Getting Over It old version download from apkaward.com<br />
|
44 |
-
Getting Over It old version download from apk4fun.com<br />
|
45 |
-
Getting Over It old version download from apkpure.co.id<br />
|
46 |
-
Getting Over It old version download from apkdone.com<br />
|
47 |
-
Getting Over It old version download from apkfab.com<br />
|
48 |
-
Why download Getting Over It old version apk?<br />
|
49 |
-
How to uninstall Getting Over It old version apk?<br />
|
50 |
-
How to update Getting Over It old version apk?<br />
|
51 |
-
How to backup and restore Getting Over It old version apk?<br />
|
52 |
-
How to transfer Getting Over It old version apk to another device?</p>
|
53 |
-
<ul>
|
54 |
-
<li>It has simple but realistic physics-based gameplay that requires skill, precision, and practice.</li>
|
55 |
-
<li>It has minimalist but beautiful graphics that create a contrast between the serene environment and the frustrating gameplay.</li>
|
56 |
-
<li>It has an original soundtrack that consists of soothing guitar music that changes according to the player's progress.</li>
|
57 |
-
<li>It has a humorous and insightful narration that adds depth and personality to the game.</li>
|
58 |
-
<li>It has a mysterious and rewarding ending that awaits those who manage to reach the top of the mountain.</li>
|
59 |
-
<li>It has a high replay value as players can try to beat their own or other players' records or explore different paths and secrets.</li>
|
60 |
-
</ul>
|
61 |
-
<h3>The reasons why some players prefer the old version</h3>
|
62 |
-
<p>While Getting Over It with Bennett Foddy has received mostly positive reviews from critics and players alike, some players prefer to play the old version of the game instead of the latest one. Some of the reasons why some players prefer the old version are:</p>
|
63 |
-
<ul>
|
64 |
-
<li>They find it more challenging and satisfying as it has fewer checkpoints and more obstacles.</li>
|
65 |
-
<li>They find it more authentic and original as it has less polish and more glitches.</li>
|
66 |
-
<li>They find it more nostalgic and sentimental as it reminds them of their first time playing the game or watching others play it.</li>
|
67 |
-
<li> <h2>How to download and install the old version of Getting Over It?</h2>
|
68 |
-
<p>If you are interested in playing the old version of Getting Over It with Bennett Foddy, you will need to download and install the old version APK file on your Android device. An APK file is an Android Package file that contains the installation files for an app. However, before you proceed, you should be aware of the potential risks and drawbacks of using the old version APK file.</p>
|
69 |
-
<h3>The sources and links for the old version APK file</h3>
|
70 |
-
<p>There are many websites and platforms that offer the old version APK file for Getting Over It with Bennett Foddy. However, not all of them are reliable and safe. Some of them may contain malware, viruses, or other harmful software that can damage your device or compromise your privacy. Therefore, you should be careful and selective when choosing where to download the old version APK file from.</p>
|
71 |
-
<p>One of the most trusted and popular sources for the old version APK file is APKPure. APKPure is a website that provides free and pure APK files for various apps and games. You can download the old version APK file for Getting Over It with Bennett Foddy from APKPure by following this link: </p>
|
72 |
-
<p>Another source that you can use is APKMirror. APKMirror is a website that hosts a large collection of APK files for different apps and games. You can download the old version APK file for Getting Over It with Bennett Foddy from APKMirror by following this link: </p>
|
73 |
-
<h3>The steps and tips for installing the old version APK file</h3>
|
74 |
-
<p>After you have downloaded the old version APK file for Getting Over It with Bennett Foddy from a reliable source, you will need to install it on your Android device. However, before you do that, you will need to enable the option to install apps from unknown sources on your device. This option allows you to install apps that are not from the Google Play Store or other official sources.</p>
|
75 |
-
<p>To enable this option, you will need to follow these steps:</p>
|
76 |
-
<ol>
|
77 |
-
<li>Go to your device's settings and tap on security or privacy.</li>
|
78 |
-
<li>Find and tap on the option that says unknown sources or install unknown apps.</li>
|
79 |
-
<li>Toggle on the switch or check the box that allows you to install apps from unknown sources.</li>
|
80 |
-
<li>Confirm your choice by tapping on OK or Allow.</li>
|
81 |
-
</ol>
|
82 |
-
<p>After you have enabled this option, you can proceed to install the old version APK file for Getting Over It with Bennett Foddy by following these steps:</p>
|
83 |
-
<ol>
|
84 |
-
<li>Locate the downloaded old version APK file on your device's storage or file manager.</li>
|
85 |
-
<li>Tap on the old version APK file to open it.</li>
|
86 |
-
<li>Tap on Install and wait for the installation process to finish.</li>
|
87 |
-
<li>Tap on Open or Done to launch or exit the app.</li>
|
88 |
-
</ol>
|
89 |
-
<h3>The potential risks and drawbacks of using the old version APK file</h3>
|
90 |
-
<p>While using the old version APK file for Getting Over It with Bennett Foddy may have some advantages, it also comes with some risks and drawbacks that you should be aware of. Some of them are:</p>
|
91 |
-
<ul>
|
92 |
-
<li>You may not be able to access some of the features or updates that are available in the latest version of the game.</li>
|
93 |
-
<li>You may encounter some bugs, errors, or crashes that affect your gameplay experience.</li>
|
94 |
-
<li>You may not be able to sync your progress or achievements with other platforms or devices.</li>
|
95 |
-
<li>You may violate some of the terms and conditions of the game developer or publisher by using an unofficial or modified version of the game.</li>
|
96 |
-
<li>You may expose your device or data to malware, viruses, or other harmful software that may come with the old version APK file.</li>
|
97 |
-
</ul>
|
98 |
-
<h2>How to play and enjoy the old version of Getting Over It?</h2>
|
99 |
-
<p>Once you have successfully downloaded and installed the old version of Getting Over It with Bennett Foddy on your Android device, you can start playing and enjoying the game. However, if you are new to the game or need some guidance, here are some tips and tricks that can help you beat the game.</p>
|
100 |
-
<h3>The basic gameplay and controls of the game</h3>
|
101 |
-
<p>The gameplay and controls of Getting Over It with Bennett Foddy are simple but challenging. Your goal is to use your sledgehammer to climb up a mountain made of various objects and obstacles. You can swing your sledgehammer by dragging your finger across the screen. You can also use your sledgehammer to hook onto objects, push yourself off surfaces, or balance yourself in mid-air. However, you need to be careful not to fall down or lose your grip, as there is no saving or undo <p>button. The game is designed to be frustrating and unforgiving, as you can lose hours of progress in a matter of seconds. However, the game is also rewarding and satisfying, as you can overcome seemingly impossible challenges and reach new heights.</p>
|
102 |
-
<h3>The tips and tricks for beating the game</h3>
|
103 |
-
<p>Getting Over It with Bennett Foddy is a game that requires a lot of skill, patience, and perseverance. However, there are some tips and tricks that can help you beat the game faster and easier. Some of them are:</p>
|
104 |
-
<ul>
|
105 |
-
<li>Practice and master the basic movements and techniques of the sledgehammer, such as swinging, hooking, pushing, and balancing.</li>
|
106 |
-
<li>Learn and memorize the layout and features of the mountain, such as the objects, obstacles, paths, and shortcuts.</li>
|
107 |
-
<li>Plan and execute your moves carefully and strategically, as every move can have a significant impact on your progress.</li>
|
108 |
-
<li>Adjust your speed and momentum according to the situation, as sometimes you need to be fast and agile, and sometimes you need to be slow and steady.</li>
|
109 |
-
<li>Be calm and focused, as getting angry or frustrated can affect your performance and judgment.</li>
|
110 |
-
<li>Listen to the narration and music, as they can provide you with some hints, insights, or motivation.</li>
|
111 |
-
<li>Take breaks and relax, as playing for too long can cause fatigue or stress.</li>
|
112 |
-
</ul>
|
113 |
-
<h3>The rewards and achievements for completing the game</h3>
|
114 |
-
<p>Getting Over It with Bennett Foddy is a game that offers a lot of rewards and achievements for completing the game. Some of them are:</p>
|
115 |
-
<ul>
|
116 |
-
<li>You will get to see the ending of the game, which is a surprise that we will not spoil here.</li>
|
117 |
-
<li>You will get to access the hidden chatroom, where you can chat with other players who have reached the top of the mountain.</li>
|
118 |
-
<li>You will get to unlock a golden cauldron and a golden sledgehammer, which are cosmetic items that show your accomplishment.</li>
|
119 |
-
<li>You will get to earn some Steam achievements, such as "Getting Over It", "The Golden God", "The End", and "Speedrunner".</li>
|
120 |
-
<li>You will get to feel a sense of pride and satisfaction for overcoming one of the most difficult games ever made.</li>
|
121 |
-
</ul>
|
122 |
-
<h2>Conclusion</h2>
|
123 |
-
<p>Getting Over It with Bennett Foddy is a game that is not for everyone. It is a game that can make you feel frustrated, angry, or hopeless. However, it is also a game that can make you feel happy, proud, or hopeful. It is a game that can teach you about perseverance, resilience, and failure. It is a game that can challenge you, reward you, and inspire you.</p>
|
124 |
-
<p>If you are interested in playing the old version of Getting Over It with Bennett Foddy on your Android device, you can download and install the old version APK file from a reliable source. However, you should be aware of the potential risks and drawbacks of using the old version APK file. You should also follow some tips and tricks that can help you beat the game faster and easier.</p>
|
125 |
-
<p>If you are ready to take on the challenge of Getting Over It with Bennett Foddy, you can download the old version APK file from one of these links:</p>
|
126 |
-
<p><a href="">APKPure</a></p>
|
127 |
-
<p><a href="">APKMirror</a></p>
|
128 |
-
<h2>Frequently Asked Questions</h2>
|
129 |
-
<p>Here are some frequently asked questions about Getting Over It with Bennett Foddy:</p>
|
130 |
-
<h3>Q: How long does it take to beat Getting Over It with Bennett Foddy?</h3>
|
131 |
-
<p>A: The answer depends on your skill level, experience, and luck. Some players can beat the game in less than an hour, while others may take days or weeks. The world record for speedrunning the game is currently 1 minute 56 seconds.</p>
|
132 |
-
<h3>Q: Who is Bennett Foddy?</h3>
|
133 |
-
<p>A: Bennett Foddy is a game designer and philosopher who is known for creating frustrating and difficult games. He is also a professor at New York University's Game Center.</p>
|
134 |
-
<h3>Q: What is Diogenes syndrome?</h3>
|
135 |
-
<p>A: Diogenes syndrome is a behavioral disorder characterized by extreme self-neglect, hoarding, social withdrawal, and lack of shame. The name comes from Diogenes of Sinope, an ancient Greek philosopher who lived in a barrel.</p>
|
136 |
-
<h3>Q: What are some other games similar to Getting Over It with Bennett Foddy?</h3>
|
137 |
-
<p>A: Some other games similar to Getting Over It with Bennett Foddy are:</p>
|
138 |
-
<ul>
|
139 |
-
<li>Sexy Hiking, the game that inspired Getting Over It with Bennett Foddy, where you control a man with a hammer who tries to climb a mountain of garbage.</li>
|
140 |
-
<li>QWOP, another game by Bennett Foddy, where you control a runner's legs with four keys and try to run 100 meters.</li>
|
141 |
-
<li>I Am Bread, a game where you control a slice of bread that wants to become toast by moving around various environments.</li>
|
142 |
-
<li>Octodad: Dadliest Catch, a game where you control an octopus disguised as a human who tries to keep his identity secret from his family and society.</li>
|
143 |
-
<li>Surgeon Simulator, a game where you perform various surgical operations with clumsy and inaccurate controls.</li>
|
144 |
-
</ul>
|
145 |
-
<h3>Q: What is the meaning or message of Getting Over It with Bennett Foddy?</h3>
|
146 |
-
<p>A: The meaning or message of Getting Over It with Bennett Foddy is open to interpretation and debate. Some possible interpretations are:</p>
|
147 |
-
<ul>
|
148 |
-
<li>The game is a metaphor for life, where you face many challenges and setbacks, but also have the opportunity to grow and learn from them.</li>
|
149 |
-
<li>The game is a commentary on the nature of games, where you seek challenge and satisfaction, but also have to deal with frustration and failure.</li>
|
150 |
-
<li>The game is a reflection of the developer's own experiences and thoughts, where he shares his views on perseverance, resilience, and failure.</li>
|
151 |
-
<li>The game is a joke or a prank, where the developer tries to make fun of or annoy the players with his absurd and unfair design choices.</li>
|
152 |
-
</ul></p> 197e85843d<br />
|
153 |
-
<br />
|
154 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Descarga Totally Reliable Delivery Service APK 1.4121 (Todo Desbloqueado) y disfruta de un divertido simulador de reparto.md
DELETED
@@ -1,96 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Totally Reliable Delivery Service APK Todo Desbloqueado: How to Download and Play this Hilarious Simulator</h1>
|
3 |
-
<p>If you are looking for a game that will make you laugh out loud, then you should try Totally Reliable Delivery Service. This is a game where you have to deliver packages to various locations, but with a twist. The twist is that everything in the game is physics-based, which means that you will encounter a lot of hilarious situations and accidents along the way. You can also play with your friends online and have even more fun together. In this article, we will show you how to download and play Totally Reliable Delivery Service APK Todo Desbloqueado, which is a modded version of the game that unlocks everything for free.</p>
|
4 |
-
<h2>totally reliable delivery service apk todo desbloqueado</h2><br /><p><b><b>DOWNLOAD</b> --->>> <a href="https://jinyurl.com/2uNLgz">https://jinyurl.com/2uNLgz</a></b></p><br /><br />
|
5 |
-
<h2>What is Totally Reliable Delivery Service?</h2>
|
6 |
-
<h3>A colorful and chaotic game of delivering packages</h3>
|
7 |
-
<p>Totally Reliable Delivery Service is a game that was released in 2020 by TinyBuild and We're Five Games. It is a simulation game that puts you in the role of a delivery person who has to deliver packages to various locations in a large open world. The game has a cartoonish and colorful graphics style that adds to its charm and humor. The game also has a lot of vehicles and tools that you can use to transport the packages, such as trucks, planes, helicopters, boats, rockets, catapults, and more. However, the game also has a realistic physics engine that makes everything unpredictable and chaotic. You will have to deal with ragdoll physics, gravity, collisions, explosions, and other obstacles that will make your delivery job harder and funnier.</p>
|
8 |
-
<h3>A multiplayer mode that lets you play with your friends</h3>
|
9 |
-
<p>One of the best features of Totally Reliable Delivery Service is its multiplayer mode. You can play with up to three other players online or locally and cooperate or compete with each other in delivering packages. You can also customize your character's appearance and clothes to make them look more unique. The multiplayer mode adds more variety and replay value to the game, as you can explore different areas, try different vehicles, and create your own challenges and scenarios with your friends. You can also chat with your friends using voice or text messages and share your funny moments with them.</p>
|
10 |
-
<h2>How to download Totally Reliable Delivery Service APK Todo Desbloqueado?</h2>
|
11 |
-
<h3>The benefits of downloading the modded version</h3>
|
12 |
-
<p>Totally Reliable Delivery Service is a paid game that costs $4.99 on the Google Play Store. However, if you want to enjoy the game for free and without any limitations, you can download Totally Reliable Delivery Service APK Todo Desbloqueado. This is a modded version of the game that unlocks everything for free, such as all the vehicles, tools, clothes, maps, modes, and more. You can also play online with other players who have the same version of the game. This way, you can have more fun and options in playing the game without spending any money.</p>
|
13 |
-
<h3>The steps to download and install the APK file</h3>
|
14 |
-
<p>To download and install Totally Reliable Delivery Service APK Todo Desbloqueado, you need to follow these simple steps:</p>
|
15 |
-
<ol>
|
16 |
-
<li>Go to [this link](^1^) and click on the download button.</li>
|
17 |
-
<li>Wait for the APK file to be downloaded on your device.</li>
|
18 |
-
<li>Go to your device's settings and enable the installation of apps from unknown sources.</li>
|
19 |
-
<li>Locate the APK file on your device's file manager and tap on it.</li>
|
20 |
-
<li>Follow the instructions on the screen and wait for the installation to be completed.</li <li>Launch the game and enjoy playing Totally Reliable Delivery Service APK Todo Desbloqueado.</li>
|
21 |
-
</ol>
|
22 |
-
<h2>How to play Totally Reliable Delivery Service APK Todo Desbloqueado?</h2>
|
23 |
-
<h3>The basic controls and mechanics of the game</h3>
|
24 |
-
<p>Totally Reliable Delivery Service APK Todo Desbloqueado is a game that is easy to learn but hard to master. The game has simple controls that let you move, jump, grab, and interact with objects and vehicles. You can also use the buttons on the screen to activate your vehicle's functions, such as steering, accelerating, braking, and flying. However, the game also has a realistic physics engine that makes everything unpredictable and chaotic. You will have to deal with ragdoll physics, gravity, collisions, explosions, and other obstacles that will make your delivery job harder and funnier. You will also have to balance your speed, accuracy, and safety in delivering the packages, as you will be rated based on these factors.</p>
|
25 |
-
<p>totally reliable delivery service mod apk todo desbloqueado<br />
|
26 |
-
descargar totally reliable delivery service apk todo desbloqueado<br />
|
27 |
-
totally reliable delivery service apk full todo desbloqueado<br />
|
28 |
-
totally reliable delivery service apk hack todo desbloqueado<br />
|
29 |
-
totally reliable delivery service apk gratis todo desbloqueado<br />
|
30 |
-
totally reliable delivery service apk premium todo desbloqueado<br />
|
31 |
-
totally reliable delivery service apk ultima version todo desbloqueado<br />
|
32 |
-
totally reliable delivery service apk mega todo desbloqueado<br />
|
33 |
-
totally reliable delivery service apk mediafire todo desbloqueado<br />
|
34 |
-
totally reliable delivery service apk android todo desbloqueado<br />
|
35 |
-
totally reliable delivery service apk sin internet todo desbloqueado<br />
|
36 |
-
totally reliable delivery service apk sin anuncios todo desbloqueado<br />
|
37 |
-
totally reliable delivery service apk con dinero infinito todo desbloqueado<br />
|
38 |
-
totally reliable delivery service apk con todos los vehiculos todo desbloqueado<br />
|
39 |
-
totally reliable delivery service apk con todos los mapas todo desbloqueado<br />
|
40 |
-
totally reliable delivery service apk con todos los personajes todo desbloqueado<br />
|
41 |
-
totally reliable delivery service apk con todos los trajes todo desbloqueado<br />
|
42 |
-
totally reliable delivery service apk con todos los accesorios todo desbloqueado<br />
|
43 |
-
totally reliable delivery service apk con todos los modos de juego todo desbloqueado<br />
|
44 |
-
totally reliable delivery service apk con todos los niveles todo desbloqueado<br />
|
45 |
-
como descargar totally reliable delivery service apk todo desbloqueado<br />
|
46 |
-
como instalar totally reliable delivery service apk todo desbloqueado<br />
|
47 |
-
como jugar totally reliable delivery service apk todo desbloqueado<br />
|
48 |
-
como actualizar totally reliable delivery service apk todo desbloqueado<br />
|
49 |
-
como conseguir totally reliable delivery service apk todo desbloqueado<br />
|
50 |
-
donde descargar totally reliable delivery service apk todo desbloqueado<br />
|
51 |
-
donde encontrar totally reliable delivery service apk todo desbloqueado<br />
|
52 |
-
donde comprar totally reliable delivery service apk todo desbloqueado<br />
|
53 |
-
que es totally reliable delivery service apk todo desbloqueado<br />
|
54 |
-
que ofrece totally reliable delivery service apk todo desbloqueado<br />
|
55 |
-
que contiene totally reliable delivery service apk todo desbloqueado<br />
|
56 |
-
que necesita totally reliable delivery service apk todo desbloqueado<br />
|
57 |
-
que ventajas tiene totally reliable delivery service apk todo desbloqueado<br />
|
58 |
-
que requisitos tiene totally reliable delivery service apk todo desbloqueado<br />
|
59 |
-
que opiniones tiene totally reliable delivery service apk todo desbloqueado<br />
|
60 |
-
para que sirve totally reliable delivery service apk todo desbloqueado<br />
|
61 |
-
para que se usa totally reliable delivery service apk todo desbloqueado<br />
|
62 |
-
para que es bueno totally reliable delivery service apk todo desbloqueado<br />
|
63 |
-
para quien es totalmente confiable el servicio de entrega de APK Todo Desloquedo (Spanish translation of the original keyword)<br />
|
64 |
-
por que descargar totalmente confiable el servicio de entrega de APK Todo Desloquedo (Spanish translation of the original keyword)</p>
|
65 |
-
<h3>The different modes and challenges of the game</h3>
|
66 |
-
<p>Totally Reliable Delivery Service APK Todo Desbloqueado has a lot of modes and challenges that you can play and enjoy. You can choose from the following modes:</p>
|
67 |
-
<ul>
|
68 |
-
<li>Delivery Mode: This is the main mode of the game, where you have to deliver packages to various locations in the open world. You can choose from different difficulty levels and types of packages, such as fragile, explosive, or heavy. You can also play this mode with your friends online or locally and cooperate or compete with each other.</li>
|
69 |
-
<li>Sandbox Mode: This is a mode where you can explore the open world without any objectives or limitations. You can use any vehicle or tool that you want and create your own scenarios and challenges. You can also play this mode with your friends online or locally and have fun together.</li>
|
70 |
-
<li>Minigames Mode: This is a mode where you can play various minigames that are based on delivering packages or using vehicles. Some examples of minigames are: Rocket League, where you have to score goals with a rocket-powered car; Golf, where you have to hit a golf ball with a golf club; Bowling, where you have to knock down pins with a bowling ball; and more. You can also play this mode with your friends online or locally and compete for the best score.</li>
|
71 |
-
</ul>
|
72 |
-
<h3>The tips and tricks to have more fun and avoid frustration</h3>
|
73 |
-
<p>Totally Reliable Delivery Service APK Todo Desbloqueado is a game that is meant to be fun and humorous, but it can also be frustrating at times. Here are some tips and tricks that can help you have more fun and avoid frustration:</p>
|
74 |
-
<ul>
|
75 |
-
<li>Experiment with different vehicles and tools: The game has a lot of vehicles and tools that you can use to transport the packages, such as trucks, planes, helicopters, boats, rockets, catapults, and more. Each vehicle and tool has its own advantages and disadvantages, so you should try them all and see which one suits your style and preference. You might also discover some hidden features or secrets that can make your delivery job easier or harder.</li>
|
76 |
-
<li>Use the environment to your advantage: The game has a large open world that is full of different areas, such as cities, islands, mountains, deserts, forests, and more. Each area has its own characteristics and challenges that can affect your delivery job. You should use the environment to your advantage by finding shortcuts, hiding spots, ramps, bridges, tunnels, and more. You should also be aware of the hazards and obstacles that can hinder your delivery job, such as traffic, animals, weather, water, fire, and more.</li>
|
77 |
-
<li>Have fun with your friends: The game is more fun when you play with your friends online or locally. You can cooperate or compete with each other in delivering packages or playing minigames. You can also chat with your friends using voice or text messages and share your funny moments with them. You can also customize your character's appearance and clothes to make them look more unique.</li>
|
78 |
-
</ul>
|
79 |
-
<h2>Conclusion</h2>
|
80 |
-
<p>Totally Reliable Delivery Service APK Todo Desbloqueado is a game that will make you laugh out loud with its colorful and chaotic gameplay of delivering packages. You can also play with your friends online or locally and have even more fun together. You can download Totally Reliable Delivery Service APK Todo Desbloqueado for free from [this link] and enjoy playing this hilarious simulator.</p>
|
81 |
-
<h2>FAQs</h2>
|
82 |
-
<p>Here are some frequently asked questions about Totally Reliable Delivery Service APK Todo Desbloqueado:</p>
|
83 |
-
<ol>
|
84 |
-
<li><b>What are the requirements to play Totally Reliable Delivery Service APK Todo Desbloqueado?</b><br>
|
85 |
-
You need an Android device that has at least Android 5.0 and 1 GB of RAM. You also need to have enough storage space to install the APK file, which is about 300 MB.</li>
|
86 |
-
<li><b>Is Totally Reliable Delivery Service APK Todo Desbloqueado safe to download and install?</b><br>
|
87 |
-
Yes, Totally Reliable Delivery Service APK Todo Desbloqueado is safe to download and install. The APK file has been scanned and verified by various antivirus programs and does not contain any malware or viruses. However, you should always download the APK file from a trusted source and enable the installation of apps from unknown sources on your device's settings.</li>
|
88 |
-
<li><b>Can I play Totally Reliable Delivery Service APK Todo Desbloqueado offline?</b><br>
|
89 |
-
Yes, you can play Totally Reliable Delivery Service APK Todo Desbloqueado offline. You can play the delivery mode and the sandbox mode without an internet connection. However, you will need an internet connection to play the online multiplayer mode and the minigames mode.</li>
|
90 |
-
<li><b>Can I play Totally Reliable Delivery Service APK Todo Desbloqueado on PC?</b><br>
|
91 |
-
Yes, you can play Totally Reliable Delivery Service APK Todo Desbloqueado on PC. You will need to use an Android emulator, such as BlueStacks or NoxPlayer, to run the APK file on your PC. You can also use a controller or a keyboard and mouse to play the game on your PC.</li>
|
92 |
-
<li><b>What are some similar games to Totally Reliable Delivery Service APK Todo Desbloqueado?</b><br>
|
93 |
-
Some similar games to Totally Reliable Delivery Service APK Todo Desbloqueado are: Human: Fall Flat, Gang Beasts, Goat Simulator, Octodad: Dadliest Catch, and Fall Guys: Ultimate Knockout.</li>
|
94 |
-
</ol></p> 197e85843d<br />
|
95 |
-
<br />
|
96 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1yukikaze/img-to-music/app.py
DELETED
@@ -1,333 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import openai
|
3 |
-
import numpy as np
|
4 |
-
import time
|
5 |
-
import base64
|
6 |
-
import ffmpeg
|
7 |
-
from sentence_transformers import SentenceTransformer
|
8 |
-
from audio2numpy import open_audio
|
9 |
-
import httpx
|
10 |
-
import json
|
11 |
-
import os
|
12 |
-
import requests
|
13 |
-
import urllib
|
14 |
-
import pydub
|
15 |
-
from os import path
|
16 |
-
from pydub import AudioSegment
|
17 |
-
import re
|
18 |
-
|
19 |
-
MUBERT_LICENSE = os.environ.get('MUBERT_LICENSE')
|
20 |
-
MUBERT_TOKEN = os.environ.get('MUBERT_TOKEN')
|
21 |
-
|
22 |
-
#img_to_text = gr.Blocks.load(name="spaces/pharma/CLIP-Interrogator")
|
23 |
-
img_to_text = gr.Blocks.load(name="spaces/fffiloni/CLIP-Interrogator-2")
|
24 |
-
|
25 |
-
from share_btn import community_icon_html, loading_icon_html, share_js
|
26 |
-
from utils import get_tags_for_prompts, get_mubert_tags_embeddings
|
27 |
-
|
28 |
-
minilm = SentenceTransformer('all-MiniLM-L6-v2')
|
29 |
-
mubert_tags_embeddings = get_mubert_tags_embeddings(minilm)
|
30 |
-
|
31 |
-
##————————————————————————————————————
|
32 |
-
|
33 |
-
MUBERT_LICENSE = os.environ.get('MUBERT_LICENSE')
|
34 |
-
MUBERT_TOKEN = os.environ.get('MUBERT_TOKEN')
|
35 |
-
|
36 |
-
##————————————————————————————————————
|
37 |
-
def get_pat_token():
|
38 |
-
r = httpx.post('https://api-b2b.mubert.com/v2/GetServiceAccess',
|
39 |
-
json={
|
40 |
-
"method": "GetServiceAccess",
|
41 |
-
"params": {
|
42 |
-
"email":"[email protected]",
|
43 |
-
"phone":"+11234567890",
|
44 |
-
"license": MUBERT_LICENSE,
|
45 |
-
"token": MUBERT_TOKEN,
|
46 |
-
|
47 |
-
}
|
48 |
-
})
|
49 |
-
|
50 |
-
rdata = json.loads(r.text)
|
51 |
-
assert rdata['status'] == 1, "probably incorrect e-mail"
|
52 |
-
pat = rdata['data']['pat']
|
53 |
-
#print(f"pat: {pat}")
|
54 |
-
return pat
|
55 |
-
|
56 |
-
def get_music(pat, prompt, track_duration, gen_intensity, gen_mode):
|
57 |
-
|
58 |
-
if len(prompt) > 200:
|
59 |
-
prompt = prompt[:200]
|
60 |
-
|
61 |
-
r = httpx.post('https://api-b2b.mubert.com/v2/TTMRecordTrack',
|
62 |
-
json={
|
63 |
-
"method": "TTMRecordTrack",
|
64 |
-
"params":
|
65 |
-
{
|
66 |
-
"text": prompt,
|
67 |
-
"pat": pat,
|
68 |
-
"mode":gen_mode,
|
69 |
-
"duration":track_duration,
|
70 |
-
"intensity": gen_intensity,
|
71 |
-
"format": "wav"
|
72 |
-
}
|
73 |
-
})
|
74 |
-
|
75 |
-
rdata = json.loads(r.text)
|
76 |
-
|
77 |
-
#print(f"rdata: {rdata}")
|
78 |
-
assert rdata['status'] == 1, rdata['error']['text']
|
79 |
-
track = rdata['data']['tasks'][0]['download_link']
|
80 |
-
print(track)
|
81 |
-
|
82 |
-
local_file_path = "sample.wav"
|
83 |
-
|
84 |
-
# Download the MP3 file from the URL
|
85 |
-
headers = {
|
86 |
-
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7; rv:93.0) Gecko/20100101 Firefox/93.0'}
|
87 |
-
|
88 |
-
retries = 3
|
89 |
-
delay = 5 # in seconds
|
90 |
-
while retries > 0:
|
91 |
-
response = requests.get(track, headers=headers)
|
92 |
-
if response.status_code == 200:
|
93 |
-
break
|
94 |
-
retries -= 1
|
95 |
-
time.sleep(delay)
|
96 |
-
response = requests.get(track, headers=headers)
|
97 |
-
print(f"{response}")
|
98 |
-
# Save the downloaded content to a local file
|
99 |
-
with open(local_file_path, 'wb') as f:
|
100 |
-
f.write(response.content)
|
101 |
-
return "sample.wav", track
|
102 |
-
|
103 |
-
|
104 |
-
def get_results(text_prompt,track_duration,gen_intensity,gen_mode):
|
105 |
-
pat_token = get_pat_token()
|
106 |
-
music = get_music(pat_token, text_prompt, track_duration, gen_intensity, gen_mode)
|
107 |
-
return pat_token, music[0], music[1]
|
108 |
-
|
109 |
-
def get_prompts(uploaded_image, track_duration, gen_intensity, gen_mode, openai_api_key):
|
110 |
-
print("calling clip interrogator")
|
111 |
-
#prompt = img_to_text(uploaded_image, "ViT-L (best for Stable Diffusion 1.*)", "fast", fn_index=1)[0]
|
112 |
-
|
113 |
-
prompt = img_to_text(uploaded_image, 'best', 4, fn_index=1)[0]
|
114 |
-
print(prompt)
|
115 |
-
clean_prompt = clean_text(prompt)
|
116 |
-
print(f"prompt cleaned: {clean_prompt}")
|
117 |
-
musical_prompt = 'You did not use any OpenAI API key to pimp your result :)'
|
118 |
-
if openai_api_key is not None:
|
119 |
-
gpt_adaptation = try_api(prompt, openai_api_key)
|
120 |
-
if gpt_adaptation[0] != "oups":
|
121 |
-
musical_prompt = gpt_adaptation[0]
|
122 |
-
print(f"musical adapt: {musical_prompt}")
|
123 |
-
music_result = get_results(musical_prompt, track_duration, gen_intensity, gen_mode)
|
124 |
-
else:
|
125 |
-
music_result = get_results(clean_prompt, track_duration, gen_intensity, gen_mode)
|
126 |
-
else:
|
127 |
-
music_result = get_results(clean_prompt, track_duration, gen_intensity, gen_mode)
|
128 |
-
|
129 |
-
show_prompts = f"""
|
130 |
-
CLIP Interrogator Caption: '{prompt}'
|
131 |
-
—
|
132 |
-
OpenAI Musical Adaptation: '{musical_prompt}'
|
133 |
-
—
|
134 |
-
Audio file link: {music_result[2]}
|
135 |
-
"""
|
136 |
-
#wave_file = convert_mp3_to_wav(music_result[1])
|
137 |
-
|
138 |
-
time.sleep(1)
|
139 |
-
return gr.Textbox.update(value=show_prompts, visible=True), music_result[1], gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)
|
140 |
-
|
141 |
-
def try_api(message, openai_api_key):
|
142 |
-
|
143 |
-
try:
|
144 |
-
response = call_api(message, openai_api_key)
|
145 |
-
return response, "<span class='openai_clear'>no error</span>"
|
146 |
-
except openai.error.Timeout as e:
|
147 |
-
#Handle timeout error, e.g. retry or log
|
148 |
-
#print(f"OpenAI API request timed out: {e}")
|
149 |
-
return "oups", f"<span class='openai_error'>OpenAI API request timed out: <br />{e}</span>"
|
150 |
-
except openai.error.APIError as e:
|
151 |
-
#Handle API error, e.g. retry or log
|
152 |
-
#print(f"OpenAI API returned an API Error: {e}")
|
153 |
-
return "oups", f"<span class='openai_error'>OpenAI API returned an API Error: <br />{e}</span>"
|
154 |
-
except openai.error.APIConnectionError as e:
|
155 |
-
#Handle connection error, e.g. check network or log
|
156 |
-
#print(f"OpenAI API request failed to connect: {e}")
|
157 |
-
return "oups", f"<span class='openai_error'>OpenAI API request failed to connect: <br />{e}</span>"
|
158 |
-
except openai.error.InvalidRequestError as e:
|
159 |
-
#Handle invalid request error, e.g. validate parameters or log
|
160 |
-
#print(f"OpenAI API request was invalid: {e}")
|
161 |
-
return "oups", f"<span class='openai_error'>OpenAI API request was invalid: <br />{e}</span>"
|
162 |
-
except openai.error.AuthenticationError as e:
|
163 |
-
#Handle authentication error, e.g. check credentials or log
|
164 |
-
#print(f"OpenAI API request was not authorized: {e}")
|
165 |
-
return "oups", f"<span class='openai_error'>OpenAI API request was not authorized: <br />{e}</span>"
|
166 |
-
except openai.error.PermissionError as e:
|
167 |
-
#Handle permission error, e.g. check scope or log
|
168 |
-
#print(f"OpenAI API request was not permitted: {e}")
|
169 |
-
return "oups", f"<span class='openai_error'>OpenAI API request was not permitted: <br />{e}</span>"
|
170 |
-
except openai.error.RateLimitError as e:
|
171 |
-
#Handle rate limit error, e.g. wait or log
|
172 |
-
#print(f"OpenAI API request exceeded rate limit: {e}")
|
173 |
-
return "oups", f"<span class='openai_error'>OpenAI API request exceeded rate limit: <br />{e}</span>"
|
174 |
-
|
175 |
-
def call_api(message, openai_api_key):
|
176 |
-
|
177 |
-
instruction = "Convert in less than 200 characters this image caption to a very concise musical description with musical terms, as if you wanted to describe a musical ambiance, stricly in English"
|
178 |
-
|
179 |
-
print("starting open ai")
|
180 |
-
augmented_prompt = f"{instruction}: '{message}'."
|
181 |
-
openai.api_key = openai_api_key
|
182 |
-
|
183 |
-
response = openai.Completion.create(
|
184 |
-
model="text-davinci-003",
|
185 |
-
prompt=augmented_prompt,
|
186 |
-
temperature=0.5,
|
187 |
-
max_tokens=2048,
|
188 |
-
top_p=1,
|
189 |
-
frequency_penalty=0,
|
190 |
-
presence_penalty=0.6
|
191 |
-
)
|
192 |
-
|
193 |
-
#print(response)
|
194 |
-
|
195 |
-
#return str(response.choices[0].text).split("\n",2)[2]
|
196 |
-
return str(response.choices[0].text).lstrip('\n')
|
197 |
-
|
198 |
-
|
199 |
-
def get_track_by_tags(tags, pat, duration, gen_intensity, gen_mode, maxit=20):
|
200 |
-
|
201 |
-
r = httpx.post('https://api-b2b.mubert.com/v2/RecordTrackTTM',
|
202 |
-
json={
|
203 |
-
"method": "RecordTrackTTM",
|
204 |
-
"params": {
|
205 |
-
"pat": pat,
|
206 |
-
"duration": duration,
|
207 |
-
"format": "wav",
|
208 |
-
"intensity":gen_intensity,
|
209 |
-
"tags": tags,
|
210 |
-
"mode": gen_mode
|
211 |
-
}
|
212 |
-
})
|
213 |
-
|
214 |
-
rdata = json.loads(r.text)
|
215 |
-
print(rdata)
|
216 |
-
#assert rdata['status'] == 1, rdata['error']['text']
|
217 |
-
trackurl = rdata['data']['tasks'][0]
|
218 |
-
|
219 |
-
print('Generating track ', end='')
|
220 |
-
for i in range(maxit):
|
221 |
-
r = httpx.get(trackurl)
|
222 |
-
if r.status_code == 200:
|
223 |
-
return trackurl
|
224 |
-
time.sleep(1)
|
225 |
-
|
226 |
-
|
227 |
-
def generate_track_by_prompt(pat, prompt, duration, gen_intensity, gen_mode):
|
228 |
-
try:
|
229 |
-
_, tags = get_tags_for_prompts(minilm, mubert_tags_embeddings, prompt)[0]
|
230 |
-
result = get_track_by_tags(tags, pat, int(duration), gen_intensity, gen_mode)
|
231 |
-
print(result)
|
232 |
-
return result, ",".join(tags), "Success"
|
233 |
-
except Exception as e:
|
234 |
-
return None, "", str(e)
|
235 |
-
|
236 |
-
def convert_mp3_to_wav(mp3_filepath):
|
237 |
-
|
238 |
-
wave_file="file.wav"
|
239 |
-
|
240 |
-
sound = AudioSegment.from_mp3(mp3_filepath)
|
241 |
-
sound.export(wave_file, format="wav")
|
242 |
-
|
243 |
-
return wave_file
|
244 |
-
|
245 |
-
def remove_emoji(text):
|
246 |
-
emoji_pattern = re.compile("["
|
247 |
-
u"\U0001F600-\U0001F64F" # emoticons
|
248 |
-
u"\U0001F300-\U0001F5FF" # symbols & pictographs
|
249 |
-
u"\U0001F680-\U0001F6FF" # transport & map symbols
|
250 |
-
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
|
251 |
-
"]+", flags=re.UNICODE)
|
252 |
-
return emoji_pattern.sub(r'', text)
|
253 |
-
|
254 |
-
def remove_nonalphanumeric(text):
|
255 |
-
return re.sub(r'[^a-zA-Z0-9\s]', '', text)
|
256 |
-
|
257 |
-
def clean_text(text):
|
258 |
-
clean_text = remove_nonalphanumeric(text)
|
259 |
-
clean_text = remove_emoji(clean_text)
|
260 |
-
clean_text = re.sub(r'\d+', '', clean_text) # Remove any number
|
261 |
-
return clean_text
|
262 |
-
|
263 |
-
article = """
|
264 |
-
|
265 |
-
<div class="footer">
|
266 |
-
<p>
|
267 |
-
|
268 |
-
Follow <a href="https://twitter.com/fffiloni" target="_blank">Sylvain Filoni</a> for future updates 🤗
|
269 |
-
</p>
|
270 |
-
</div>
|
271 |
-
|
272 |
-
<div id="may-like-container" style="display: flex;justify-content: center;flex-direction: column;align-items: center;margin-bottom: 30px;">
|
273 |
-
<p style="font-size: 0.8em;margin-bottom: 4px;">You may also like: </p>
|
274 |
-
<div id="may-like" style="display: flex;flex-wrap: wrap;align-items: center;height: 20px;">
|
275 |
-
<svg height="20" width="122" style="margin-left:4px;margin-bottom: 6px;">
|
276 |
-
<a href="https://huggingface.co/spaces/fffiloni/spectrogram-to-music" target="_blank">
|
277 |
-
<image href="https://img.shields.io/badge/🤗 Spaces-Riffusion-blue" src="https://img.shields.io/badge/🤗 Spaces-Riffusion-blue.png" height="20"/>
|
278 |
-
</a>
|
279 |
-
</svg>
|
280 |
-
</div>
|
281 |
-
</div>
|
282 |
-
|
283 |
-
|
284 |
-
"""
|
285 |
-
|
286 |
-
with gr.Blocks(css="style.css") as demo:
|
287 |
-
with gr.Column(elem_id="col-container"):
|
288 |
-
|
289 |
-
gr.HTML("""<div style="text-align: center; max-width: 700px; margin: 0 auto;">
|
290 |
-
<div
|
291 |
-
style="
|
292 |
-
display: inline-flex;
|
293 |
-
align-items: center;
|
294 |
-
gap: 0.8rem;
|
295 |
-
font-size: 1.75rem;
|
296 |
-
"
|
297 |
-
>
|
298 |
-
<h1 style="font-weight: 900; margin-bottom: 7px; margin-top: 5px;">
|
299 |
-
Image to Music
|
300 |
-
</h1>
|
301 |
-
</div>
|
302 |
-
<p style="margin-bottom: 10px; font-size: 94%">
|
303 |
-
Sends an image in to <a href="https://huggingface.co/spaces/pharma/CLIP-Interrogator" target="_blank">CLIP Interrogator</a>
|
304 |
-
to generate a text prompt which is then run through
|
305 |
-
<a href="https://huggingface.co/Mubert" target="_blank">Mubert</a> text-to-music to generate music from the input image!
|
306 |
-
</p>
|
307 |
-
</div>""")
|
308 |
-
|
309 |
-
input_img = gr.Image(type="filepath", elem_id="input-img")
|
310 |
-
prompts_out = gr.Textbox(label="Text Captions", visible=False, elem_id="prompts_out", info="If player do not work, try to copy/paste the link in a new browser window")
|
311 |
-
music_output = gr.Audio(label="Result", type="filepath", elem_id="music-output").style(height="5rem")
|
312 |
-
#music_url = gr.Textbox(max_lines=1, info="If player do not work, try to copy/paste the link in a new browser window")
|
313 |
-
#text_status = gr.Textbox(label="status")
|
314 |
-
with gr.Group(elem_id="share-btn-container"):
|
315 |
-
community_icon = gr.HTML(community_icon_html, visible=False)
|
316 |
-
loading_icon = gr.HTML(loading_icon_html, visible=False)
|
317 |
-
share_button = gr.Button("Share to community", elem_id="share-btn", visible=False)
|
318 |
-
|
319 |
-
with gr.Accordion(label="Music Generation Options", open=False):
|
320 |
-
openai_api_key = gr.Textbox(type="password", label="🔐 Your OpenAI API Key (optional)", placeholder="sk-123abc...", info="You can use your OpenAI key to adapt CLIP Interrogator caption to a musical translation.")
|
321 |
-
track_duration = gr.Slider(minimum=20, maximum=120, value=55, ustep=5, label="Track duration", elem_id="duration-inp")
|
322 |
-
with gr.Row():
|
323 |
-
gen_intensity = gr.Dropdown(choices=["low", "medium", "high"], value="medium", label="Intensity")
|
324 |
-
gen_mode = gr.Radio(label="mode", choices=["track", "loop"], value="loop")
|
325 |
-
|
326 |
-
generate = gr.Button("Generate Music from Image")
|
327 |
-
|
328 |
-
gr.HTML(article)
|
329 |
-
|
330 |
-
generate.click(get_prompts, inputs=[input_img,track_duration,gen_intensity,gen_mode, openai_api_key], outputs=[prompts_out, music_output, share_button, community_icon, loading_icon], api_name="i2m")
|
331 |
-
share_button.click(None, [], [], _js=share_js)
|
332 |
-
|
333 |
-
demo.queue(max_size=32).launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/232labs/VToonify/vtoonify/train_vtoonify_t.py
DELETED
@@ -1,432 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
#os.environ['CUDA_VISIBLE_DEVICES'] = "0"
|
3 |
-
import argparse
|
4 |
-
import math
|
5 |
-
import random
|
6 |
-
|
7 |
-
import numpy as np
|
8 |
-
import torch
|
9 |
-
from torch import nn, optim
|
10 |
-
from torch.nn import functional as F
|
11 |
-
from torch.utils import data
|
12 |
-
import torch.distributed as dist
|
13 |
-
from torchvision import transforms, utils
|
14 |
-
from tqdm import tqdm
|
15 |
-
from PIL import Image
|
16 |
-
from util import *
|
17 |
-
from model.stylegan import lpips
|
18 |
-
from model.stylegan.model import Generator, Downsample
|
19 |
-
from model.vtoonify import VToonify, ConditionalDiscriminator
|
20 |
-
from model.bisenet.model import BiSeNet
|
21 |
-
from model.simple_augment import random_apply_affine
|
22 |
-
from model.stylegan.distributed import (
|
23 |
-
get_rank,
|
24 |
-
synchronize,
|
25 |
-
reduce_loss_dict,
|
26 |
-
reduce_sum,
|
27 |
-
get_world_size,
|
28 |
-
)
|
29 |
-
|
30 |
-
# In the paper, --weight for each style is set as follows,
|
31 |
-
# cartoon: default
|
32 |
-
# caricature: default
|
33 |
-
# pixar: 1 1 1 1 1 1 1 1 1 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5
|
34 |
-
# comic: 0.5 0.5 0.5 0.5 0.5 0.5 0.5 1 1 1 1 1 1 1 1 1 1 1
|
35 |
-
# arcane: 0.5 0.5 0.5 0.5 0.5 0.5 0.5 1 1 1 1 1 1 1 1 1 1 1
|
36 |
-
|
37 |
-
class TrainOptions():
|
38 |
-
def __init__(self):
|
39 |
-
|
40 |
-
self.parser = argparse.ArgumentParser(description="Train VToonify-T")
|
41 |
-
self.parser.add_argument("--iter", type=int, default=2000, help="total training iterations")
|
42 |
-
self.parser.add_argument("--batch", type=int, default=8, help="batch sizes for each gpus")
|
43 |
-
self.parser.add_argument("--lr", type=float, default=0.0001, help="learning rate")
|
44 |
-
self.parser.add_argument("--local_rank", type=int, default=0, help="local rank for distributed training")
|
45 |
-
self.parser.add_argument("--start_iter", type=int, default=0, help="start iteration")
|
46 |
-
self.parser.add_argument("--save_every", type=int, default=30000, help="interval of saving a checkpoint")
|
47 |
-
self.parser.add_argument("--save_begin", type=int, default=30000, help="when to start saving a checkpoint")
|
48 |
-
self.parser.add_argument("--log_every", type=int, default=200, help="interval of saving an intermediate image result")
|
49 |
-
|
50 |
-
self.parser.add_argument("--adv_loss", type=float, default=0.01, help="the weight of adv loss")
|
51 |
-
self.parser.add_argument("--grec_loss", type=float, default=0.1, help="the weight of mse recontruction loss")
|
52 |
-
self.parser.add_argument("--perc_loss", type=float, default=0.01, help="the weight of perceptual loss")
|
53 |
-
self.parser.add_argument("--tmp_loss", type=float, default=1.0, help="the weight of temporal consistency loss")
|
54 |
-
|
55 |
-
self.parser.add_argument("--encoder_path", type=str, default=None, help="path to the pretrained encoder model")
|
56 |
-
self.parser.add_argument("--direction_path", type=str, default='./checkpoint/directions.npy', help="path to the editing direction latents")
|
57 |
-
self.parser.add_argument("--stylegan_path", type=str, default='./checkpoint/stylegan2-ffhq-config-f.pt', help="path to the stylegan model")
|
58 |
-
self.parser.add_argument("--finetunegan_path", type=str, default='./checkpoint/cartoon/finetune-000600.pt', help="path to the finetuned stylegan model")
|
59 |
-
self.parser.add_argument("--weight", type=float, nargs=18, default=[1]*9+[0]*9, help="the weight for blending two models")
|
60 |
-
self.parser.add_argument("--faceparsing_path", type=str, default='./checkpoint/faceparsing.pth', help="path of the face parsing model")
|
61 |
-
self.parser.add_argument("--style_encoder_path", type=str, default='./checkpoint/encoder.pt', help="path of the style encoder")
|
62 |
-
|
63 |
-
self.parser.add_argument("--name", type=str, default='vtoonify_t_cartoon', help="saved model name")
|
64 |
-
self.parser.add_argument("--pretrain", action="store_true", help="if true, only pretrain the encoder")
|
65 |
-
|
66 |
-
def parse(self):
|
67 |
-
self.opt = self.parser.parse_args()
|
68 |
-
if self.opt.encoder_path is None:
|
69 |
-
self.opt.encoder_path = os.path.join('./checkpoint/', self.opt.name, 'pretrain.pt')
|
70 |
-
args = vars(self.opt)
|
71 |
-
if self.opt.local_rank == 0:
|
72 |
-
print('Load options')
|
73 |
-
for name, value in sorted(args.items()):
|
74 |
-
print('%s: %s' % (str(name), str(value)))
|
75 |
-
return self.opt
|
76 |
-
|
77 |
-
|
78 |
-
# pretrain E of vtoonify.
|
79 |
-
# We train E so that its the last-layer feature matches the original 8-th-layer input feature of G1
|
80 |
-
# See Model initialization in Sec. 4.1.2 for the detail
|
81 |
-
def pretrain(args, generator, g_optim, g_ema, parsingpredictor, down, directions, basemodel, device):
|
82 |
-
pbar = range(args.iter)
|
83 |
-
|
84 |
-
if get_rank() == 0:
|
85 |
-
pbar = tqdm(pbar, initial=args.start_iter, dynamic_ncols=True, smoothing=0.01)
|
86 |
-
|
87 |
-
recon_loss = torch.tensor(0.0, device=device)
|
88 |
-
loss_dict = {}
|
89 |
-
|
90 |
-
if args.distributed:
|
91 |
-
g_module = generator.module
|
92 |
-
else:
|
93 |
-
g_module = generator
|
94 |
-
|
95 |
-
accum = 0.5 ** (32 / (10 * 1000))
|
96 |
-
|
97 |
-
requires_grad(g_module.encoder, True)
|
98 |
-
|
99 |
-
for idx in pbar:
|
100 |
-
i = idx + args.start_iter
|
101 |
-
|
102 |
-
if i > args.iter:
|
103 |
-
print("Done!")
|
104 |
-
break
|
105 |
-
|
106 |
-
with torch.no_grad():
|
107 |
-
# during pretraining, no geometric transformations are applied.
|
108 |
-
noise_sample = torch.randn(args.batch, 512).cuda()
|
109 |
-
ws_ = basemodel.style(noise_sample).unsqueeze(1).repeat(1,18,1) # random w
|
110 |
-
ws_[:, 3:7] += directions[torch.randint(0, directions.shape[0], (args.batch,)), 3:7] # w''=w'=w+n
|
111 |
-
img_gen, _ = basemodel([ws_], input_is_latent=True, truncation=0.5, truncation_latent=0) # image part of x'
|
112 |
-
img_gen = torch.clamp(img_gen, -1, 1).detach()
|
113 |
-
img_gen512 = down(img_gen.detach())
|
114 |
-
img_gen256 = down(img_gen512.detach()) # image part of x'_down
|
115 |
-
mask512 = parsingpredictor(2*torch.clamp(img_gen512, -1, 1))[0]
|
116 |
-
real_input = torch.cat((img_gen256, down(mask512)/16.0), dim=1).detach() # x'_down
|
117 |
-
# f_G1^(8)(w'')
|
118 |
-
real_feat, real_skip = g_ema.generator([ws_], input_is_latent=True, return_feature_ind = 6, truncation=0.5, truncation_latent=0)
|
119 |
-
real_feat = real_feat.detach()
|
120 |
-
real_skip = real_skip.detach()
|
121 |
-
|
122 |
-
# f_E^(last)(x'_down)
|
123 |
-
fake_feat, fake_skip = generator(real_input, style=None, return_feat=True)
|
124 |
-
|
125 |
-
# L_E in Eq.(1)
|
126 |
-
recon_loss = F.mse_loss(fake_feat, real_feat) + F.mse_loss(fake_skip, real_skip)
|
127 |
-
|
128 |
-
loss_dict["emse"] = recon_loss
|
129 |
-
|
130 |
-
generator.zero_grad()
|
131 |
-
recon_loss.backward()
|
132 |
-
g_optim.step()
|
133 |
-
|
134 |
-
accumulate(g_ema.encoder, g_module.encoder, accum)
|
135 |
-
|
136 |
-
loss_reduced = reduce_loss_dict(loss_dict)
|
137 |
-
|
138 |
-
emse_loss_val = loss_reduced["emse"].mean().item()
|
139 |
-
|
140 |
-
if get_rank() == 0:
|
141 |
-
pbar.set_description(
|
142 |
-
(
|
143 |
-
f"iter: {i:d}; emse: {emse_loss_val:.3f}"
|
144 |
-
)
|
145 |
-
)
|
146 |
-
|
147 |
-
if ((i+1) >= args.save_begin and (i+1) % args.save_every == 0) or (i+1) == args.iter:
|
148 |
-
if (i+1) == args.iter:
|
149 |
-
savename = f"checkpoint/%s/pretrain.pt"%(args.name)
|
150 |
-
else:
|
151 |
-
savename = f"checkpoint/%s/pretrain-%05d.pt"%(args.name, i+1)
|
152 |
-
torch.save(
|
153 |
-
{
|
154 |
-
#"g": g_module.encoder.state_dict(),
|
155 |
-
"g_ema": g_ema.encoder.state_dict(),
|
156 |
-
},
|
157 |
-
savename,
|
158 |
-
)
|
159 |
-
|
160 |
-
|
161 |
-
# generate paired data and train vtoonify, see Sec. 4.1.2 for the detail
|
162 |
-
def train(args, generator, discriminator, g_optim, d_optim, g_ema, percept, parsingpredictor, down, pspencoder, directions, basemodel, device):
|
163 |
-
pbar = range(args.iter)
|
164 |
-
|
165 |
-
if get_rank() == 0:
|
166 |
-
pbar = tqdm(pbar, initial=args.start_iter, smoothing=0.01, ncols=120, dynamic_ncols=False)
|
167 |
-
|
168 |
-
d_loss = torch.tensor(0.0, device=device)
|
169 |
-
g_loss = torch.tensor(0.0, device=device)
|
170 |
-
grec_loss = torch.tensor(0.0, device=device)
|
171 |
-
gfeat_loss = torch.tensor(0.0, device=device)
|
172 |
-
temporal_loss = torch.tensor(0.0, device=device)
|
173 |
-
loss_dict = {}
|
174 |
-
|
175 |
-
if args.distributed:
|
176 |
-
g_module = generator.module
|
177 |
-
d_module = discriminator.module
|
178 |
-
|
179 |
-
else:
|
180 |
-
g_module = generator
|
181 |
-
d_module = discriminator
|
182 |
-
|
183 |
-
accum = 0.5 ** (32 / (10 * 1000))
|
184 |
-
|
185 |
-
for idx in pbar:
|
186 |
-
i = idx + args.start_iter
|
187 |
-
|
188 |
-
if i > args.iter:
|
189 |
-
print("Done!")
|
190 |
-
break
|
191 |
-
|
192 |
-
###### This part is for data generation. Generate pair (x, y, w'') as in Fig. 5 of the paper
|
193 |
-
with torch.no_grad():
|
194 |
-
noise_sample = torch.randn(args.batch, 512).cuda()
|
195 |
-
wc = basemodel.style(noise_sample).unsqueeze(1).repeat(1,18,1) # random w
|
196 |
-
wc[:, 3:7] += directions[torch.randint(0, directions.shape[0], (args.batch,)), 3:7] # w'=w+n
|
197 |
-
wc = wc.detach()
|
198 |
-
xc, _ = basemodel([wc], input_is_latent=True, truncation=0.5, truncation_latent=0)
|
199 |
-
xc = torch.clamp(xc, -1, 1).detach() # x'
|
200 |
-
xl = pspencoder(F.adaptive_avg_pool2d(xc, 256))
|
201 |
-
xl = basemodel.style(xl.reshape(xl.shape[0]*xl.shape[1], xl.shape[2])).reshape(xl.shape) # E_s(x'_down)
|
202 |
-
xl = torch.cat((wc[:,0:7]*0.5, xl[:,7:18]), dim=1).detach() # w'' = concatenate w' and E_s(x'_down)
|
203 |
-
xs, _ = g_ema.generator([xl], input_is_latent=True)
|
204 |
-
xs = torch.clamp(xs, -1, 1).detach() # y'
|
205 |
-
# during training, random geometric transformations are applied.
|
206 |
-
imgs, _ = random_apply_affine(torch.cat((xc.detach(),xs), dim=1), 0.2, None)
|
207 |
-
real_input1024 = imgs[:,0:3].detach() # image part of x
|
208 |
-
real_input512 = down(real_input1024).detach()
|
209 |
-
real_input256 = down(real_input512).detach()
|
210 |
-
mask512 = parsingpredictor(2*real_input512)[0]
|
211 |
-
mask256 = down(mask512).detach()
|
212 |
-
mask = F.adaptive_avg_pool2d(mask512, 1024).detach() # parsing part of x
|
213 |
-
real_output = imgs[:,3:].detach() # y
|
214 |
-
real_input = torch.cat((real_input256, mask256/16.0), dim=1) # x_down
|
215 |
-
# for log, sample a fixed input-output pair (x_down, y, w'')
|
216 |
-
if idx == 0 or i == 0:
|
217 |
-
samplein = real_input.clone().detach()
|
218 |
-
sampleout = real_output.clone().detach()
|
219 |
-
samplexl = xl.clone().detach()
|
220 |
-
|
221 |
-
###### This part is for training discriminator
|
222 |
-
|
223 |
-
requires_grad(g_module.encoder, False)
|
224 |
-
requires_grad(g_module.fusion_out, False)
|
225 |
-
requires_grad(g_module.fusion_skip, False)
|
226 |
-
requires_grad(discriminator, True)
|
227 |
-
|
228 |
-
fake_output = generator(real_input, xl)
|
229 |
-
fake_pred = discriminator(F.adaptive_avg_pool2d(fake_output, 256))
|
230 |
-
real_pred = discriminator(F.adaptive_avg_pool2d(real_output, 256))
|
231 |
-
|
232 |
-
# L_adv in Eq.(3)
|
233 |
-
d_loss = d_logistic_loss(real_pred, fake_pred) * args.adv_loss
|
234 |
-
loss_dict["d"] = d_loss
|
235 |
-
|
236 |
-
discriminator.zero_grad()
|
237 |
-
d_loss.backward()
|
238 |
-
d_optim.step()
|
239 |
-
|
240 |
-
###### This part is for training generator (encoder and fusion modules)
|
241 |
-
|
242 |
-
requires_grad(g_module.encoder, True)
|
243 |
-
requires_grad(g_module.fusion_out, True)
|
244 |
-
requires_grad(g_module.fusion_skip, True)
|
245 |
-
requires_grad(discriminator, False)
|
246 |
-
|
247 |
-
fake_output = generator(real_input, xl)
|
248 |
-
fake_pred = discriminator(F.adaptive_avg_pool2d(fake_output, 256))
|
249 |
-
# L_adv in Eq.(3)
|
250 |
-
g_loss = g_nonsaturating_loss(fake_pred) * args.adv_loss
|
251 |
-
# L_rec in Eq.(2)
|
252 |
-
grec_loss = F.mse_loss(fake_output, real_output) * args.grec_loss
|
253 |
-
gfeat_loss = percept(F.adaptive_avg_pool2d(fake_output, 512), # 1024 will out of memory
|
254 |
-
F.adaptive_avg_pool2d(real_output, 512)).sum() * args.perc_loss # 256 will get blurry output
|
255 |
-
|
256 |
-
loss_dict["g"] = g_loss
|
257 |
-
loss_dict["gr"] = grec_loss
|
258 |
-
loss_dict["gf"] = gfeat_loss
|
259 |
-
|
260 |
-
w = random.randint(0,1024-896)
|
261 |
-
h = random.randint(0,1024-896)
|
262 |
-
crop_input = torch.cat((real_input1024[:,:,w:w+896,h:h+896], mask[:,:,w:w+896,h:h+896]/16.0), dim=1).detach()
|
263 |
-
crop_input = down(down(crop_input))
|
264 |
-
crop_fake_output = fake_output[:,:,w:w+896,h:h+896]
|
265 |
-
fake_crop_output = generator(crop_input, xl)
|
266 |
-
# L_tmp in Eq.(4), gradually increase the weight of L_tmp
|
267 |
-
temporal_loss = ((fake_crop_output-crop_fake_output)**2).mean() * max(idx/(args.iter/2.0)-1, 0) * args.tmp_loss
|
268 |
-
loss_dict["tp"] = temporal_loss
|
269 |
-
|
270 |
-
generator.zero_grad()
|
271 |
-
(g_loss + grec_loss + gfeat_loss + temporal_loss).backward()
|
272 |
-
g_optim.step()
|
273 |
-
|
274 |
-
accumulate(g_ema.encoder, g_module.encoder, accum)
|
275 |
-
accumulate(g_ema.fusion_out, g_module.fusion_out, accum)
|
276 |
-
accumulate(g_ema.fusion_skip, g_module.fusion_skip, accum)
|
277 |
-
|
278 |
-
loss_reduced = reduce_loss_dict(loss_dict)
|
279 |
-
|
280 |
-
d_loss_val = loss_reduced["d"].mean().item()
|
281 |
-
g_loss_val = loss_reduced["g"].mean().item()
|
282 |
-
gr_loss_val = loss_reduced["gr"].mean().item()
|
283 |
-
gf_loss_val = loss_reduced["gf"].mean().item()
|
284 |
-
tmp_loss_val = loss_reduced["tp"].mean().item()
|
285 |
-
|
286 |
-
if get_rank() == 0:
|
287 |
-
pbar.set_description(
|
288 |
-
(
|
289 |
-
f"iter: {i:d}; advd: {d_loss_val:.3f}; advg: {g_loss_val:.3f}; mse: {gr_loss_val:.3f}; "
|
290 |
-
f"perc: {gf_loss_val:.3f}; tmp: {tmp_loss_val:.3f}"
|
291 |
-
)
|
292 |
-
)
|
293 |
-
|
294 |
-
if i % args.log_every == 0 or (i+1) == args.iter:
|
295 |
-
with torch.no_grad():
|
296 |
-
g_ema.eval()
|
297 |
-
sample = g_ema(samplein, samplexl)
|
298 |
-
sample = F.interpolate(torch.cat((sampleout, sample), dim=0), 256)
|
299 |
-
utils.save_image(
|
300 |
-
sample,
|
301 |
-
f"log/%s/%05d.jpg"%(args.name, i),
|
302 |
-
nrow=int(args.batch),
|
303 |
-
normalize=True,
|
304 |
-
range=(-1, 1),
|
305 |
-
)
|
306 |
-
|
307 |
-
if ((i+1) >= args.save_begin and (i+1) % args.save_every == 0) or (i+1) == args.iter:
|
308 |
-
if (i+1) == args.iter:
|
309 |
-
savename = f"checkpoint/%s/vtoonify.pt"%(args.name)
|
310 |
-
else:
|
311 |
-
savename = f"checkpoint/%s/vtoonify_%05d.pt"%(args.name, i+1)
|
312 |
-
torch.save(
|
313 |
-
{
|
314 |
-
#"g": g_module.state_dict(),
|
315 |
-
#"d": d_module.state_dict(),
|
316 |
-
"g_ema": g_ema.state_dict(),
|
317 |
-
},
|
318 |
-
savename,
|
319 |
-
)
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
if __name__ == "__main__":
|
324 |
-
|
325 |
-
device = "cuda"
|
326 |
-
parser = TrainOptions()
|
327 |
-
args = parser.parse()
|
328 |
-
if args.local_rank == 0:
|
329 |
-
print('*'*98)
|
330 |
-
if not os.path.exists("log/%s/"%(args.name)):
|
331 |
-
os.makedirs("log/%s/"%(args.name))
|
332 |
-
if not os.path.exists("checkpoint/%s/"%(args.name)):
|
333 |
-
os.makedirs("checkpoint/%s/"%(args.name))
|
334 |
-
|
335 |
-
n_gpu = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
|
336 |
-
args.distributed = n_gpu > 1
|
337 |
-
|
338 |
-
if args.distributed:
|
339 |
-
torch.cuda.set_device(args.local_rank)
|
340 |
-
torch.distributed.init_process_group(backend="nccl", init_method="env://")
|
341 |
-
synchronize()
|
342 |
-
|
343 |
-
generator = VToonify(backbone = 'toonify').to(device)
|
344 |
-
generator.apply(weights_init)
|
345 |
-
g_ema = VToonify(backbone = 'toonify').to(device)
|
346 |
-
g_ema.eval()
|
347 |
-
|
348 |
-
basemodel = Generator(1024, 512, 8, 2).to(device) # G0
|
349 |
-
finetunemodel = Generator(1024, 512, 8, 2).to(device)
|
350 |
-
basemodel.load_state_dict(torch.load(args.stylegan_path, map_location=lambda storage, loc: storage)['g_ema'])
|
351 |
-
finetunemodel.load_state_dict(torch.load(args.finetunegan_path, map_location=lambda storage, loc: storage)['g_ema'])
|
352 |
-
fused_state_dict = blend_models(finetunemodel, basemodel, args.weight) # G1
|
353 |
-
generator.generator.load_state_dict(fused_state_dict) # load G1
|
354 |
-
g_ema.generator.load_state_dict(fused_state_dict)
|
355 |
-
requires_grad(basemodel, False)
|
356 |
-
requires_grad(generator.generator, False)
|
357 |
-
requires_grad(g_ema.generator, False)
|
358 |
-
|
359 |
-
if not args.pretrain:
|
360 |
-
generator.encoder.load_state_dict(torch.load(args.encoder_path, map_location=lambda storage, loc: storage)["g_ema"])
|
361 |
-
# we initialize the fusion modules to map f_G \otimes f_E to f_G.
|
362 |
-
for k in generator.fusion_out:
|
363 |
-
k.weight.data *= 0.01
|
364 |
-
k.weight[:,0:k.weight.shape[0],1,1].data += torch.eye(k.weight.shape[0]).cuda()
|
365 |
-
for k in generator.fusion_skip:
|
366 |
-
k.weight.data *= 0.01
|
367 |
-
k.weight[:,0:k.weight.shape[0],1,1].data += torch.eye(k.weight.shape[0]).cuda()
|
368 |
-
|
369 |
-
accumulate(g_ema.encoder, generator.encoder, 0)
|
370 |
-
accumulate(g_ema.fusion_out, generator.fusion_out, 0)
|
371 |
-
accumulate(g_ema.fusion_skip, generator.fusion_skip, 0)
|
372 |
-
|
373 |
-
g_parameters = list(generator.encoder.parameters())
|
374 |
-
if not args.pretrain:
|
375 |
-
g_parameters = g_parameters + list(generator.fusion_out.parameters()) + list(generator.fusion_skip.parameters())
|
376 |
-
|
377 |
-
g_optim = optim.Adam(
|
378 |
-
g_parameters,
|
379 |
-
lr=args.lr,
|
380 |
-
betas=(0.9, 0.99),
|
381 |
-
)
|
382 |
-
|
383 |
-
if args.distributed:
|
384 |
-
generator = nn.parallel.DistributedDataParallel(
|
385 |
-
generator,
|
386 |
-
device_ids=[args.local_rank],
|
387 |
-
output_device=args.local_rank,
|
388 |
-
broadcast_buffers=False,
|
389 |
-
find_unused_parameters=True,
|
390 |
-
)
|
391 |
-
|
392 |
-
parsingpredictor = BiSeNet(n_classes=19)
|
393 |
-
parsingpredictor.load_state_dict(torch.load(args.faceparsing_path, map_location=lambda storage, loc: storage))
|
394 |
-
parsingpredictor.to(device).eval()
|
395 |
-
requires_grad(parsingpredictor, False)
|
396 |
-
|
397 |
-
# we apply gaussian blur to the images to avoid flickers caused during downsampling
|
398 |
-
down = Downsample(kernel=[1, 3, 3, 1], factor=2).to(device)
|
399 |
-
requires_grad(down, False)
|
400 |
-
|
401 |
-
directions = torch.tensor(np.load(args.direction_path)).to(device)
|
402 |
-
|
403 |
-
if not args.pretrain:
|
404 |
-
discriminator = ConditionalDiscriminator(256).to(device)
|
405 |
-
|
406 |
-
d_optim = optim.Adam(
|
407 |
-
discriminator.parameters(),
|
408 |
-
lr=args.lr,
|
409 |
-
betas=(0.9, 0.99),
|
410 |
-
)
|
411 |
-
|
412 |
-
if args.distributed:
|
413 |
-
discriminator = nn.parallel.DistributedDataParallel(
|
414 |
-
discriminator,
|
415 |
-
device_ids=[args.local_rank],
|
416 |
-
output_device=args.local_rank,
|
417 |
-
broadcast_buffers=False,
|
418 |
-
find_unused_parameters=True,
|
419 |
-
)
|
420 |
-
|
421 |
-
percept = lpips.PerceptualLoss(model="net-lin", net="vgg", use_gpu=device.startswith("cuda"), gpu_ids=[args.local_rank])
|
422 |
-
requires_grad(percept.model.net, False)
|
423 |
-
|
424 |
-
pspencoder = load_psp_standalone(args.style_encoder_path, device)
|
425 |
-
|
426 |
-
if args.local_rank == 0:
|
427 |
-
print('Load models and data successfully loaded!')
|
428 |
-
|
429 |
-
if args.pretrain:
|
430 |
-
pretrain(args, generator, g_optim, g_ema, parsingpredictor, down, directions, basemodel, device)
|
431 |
-
else:
|
432 |
-
train(args, generator, discriminator, g_optim, d_optim, g_ema, percept, parsingpredictor, down, pspencoder, directions, basemodel, device)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/3i2irg/first-app/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: First App
|
3 |
-
emoji: 🔥
|
4 |
-
colorFrom: yellow
|
5 |
-
colorTo: gray
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.16.2
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: apache-2.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/44ov41za8i/FreeVC/speaker_encoder/data_objects/random_cycler.py
DELETED
@@ -1,37 +0,0 @@
|
|
1 |
-
import random
|
2 |
-
|
3 |
-
class RandomCycler:
|
4 |
-
"""
|
5 |
-
Creates an internal copy of a sequence and allows access to its items in a constrained random
|
6 |
-
order. For a source sequence of n items and one or several consecutive queries of a total
|
7 |
-
of m items, the following guarantees hold (one implies the other):
|
8 |
-
- Each item will be returned between m // n and ((m - 1) // n) + 1 times.
|
9 |
-
- Between two appearances of the same item, there may be at most 2 * (n - 1) other items.
|
10 |
-
"""
|
11 |
-
|
12 |
-
def __init__(self, source):
|
13 |
-
if len(source) == 0:
|
14 |
-
raise Exception("Can't create RandomCycler from an empty collection")
|
15 |
-
self.all_items = list(source)
|
16 |
-
self.next_items = []
|
17 |
-
|
18 |
-
def sample(self, count: int):
|
19 |
-
shuffle = lambda l: random.sample(l, len(l))
|
20 |
-
|
21 |
-
out = []
|
22 |
-
while count > 0:
|
23 |
-
if count >= len(self.all_items):
|
24 |
-
out.extend(shuffle(list(self.all_items)))
|
25 |
-
count -= len(self.all_items)
|
26 |
-
continue
|
27 |
-
n = min(count, len(self.next_items))
|
28 |
-
out.extend(self.next_items[:n])
|
29 |
-
count -= n
|
30 |
-
self.next_items = self.next_items[n:]
|
31 |
-
if len(self.next_items) == 0:
|
32 |
-
self.next_items = shuffle(list(self.all_items))
|
33 |
-
return out
|
34 |
-
|
35 |
-
def __next__(self):
|
36 |
-
return self.sample(1)[0]
|
37 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/A00001/bingothoo/src/components/ui/codeblock.tsx
DELETED
@@ -1,142 +0,0 @@
|
|
1 |
-
'use client'
|
2 |
-
|
3 |
-
import { FC, memo } from 'react'
|
4 |
-
import { Prism as SyntaxHighlighter } from 'react-syntax-highlighter'
|
5 |
-
import { coldarkDark } from 'react-syntax-highlighter/dist/cjs/styles/prism'
|
6 |
-
|
7 |
-
import { useCopyToClipboard } from '@/lib/hooks/use-copy-to-clipboard'
|
8 |
-
import { IconCheck, IconCopy, IconDownload } from '@/components/ui/icons'
|
9 |
-
import { Button } from '@/components/ui/button'
|
10 |
-
|
11 |
-
interface Props {
|
12 |
-
language: string
|
13 |
-
value: string
|
14 |
-
}
|
15 |
-
|
16 |
-
interface languageMap {
|
17 |
-
[key: string]: string | undefined
|
18 |
-
}
|
19 |
-
|
20 |
-
export const programmingLanguages: languageMap = {
|
21 |
-
javascript: '.js',
|
22 |
-
python: '.py',
|
23 |
-
java: '.java',
|
24 |
-
c: '.c',
|
25 |
-
cpp: '.cpp',
|
26 |
-
'c++': '.cpp',
|
27 |
-
'c#': '.cs',
|
28 |
-
ruby: '.rb',
|
29 |
-
php: '.php',
|
30 |
-
swift: '.swift',
|
31 |
-
'objective-c': '.m',
|
32 |
-
kotlin: '.kt',
|
33 |
-
typescript: '.ts',
|
34 |
-
go: '.go',
|
35 |
-
perl: '.pl',
|
36 |
-
rust: '.rs',
|
37 |
-
scala: '.scala',
|
38 |
-
haskell: '.hs',
|
39 |
-
lua: '.lua',
|
40 |
-
shell: '.sh',
|
41 |
-
sql: '.sql',
|
42 |
-
html: '.html',
|
43 |
-
css: '.css'
|
44 |
-
// add more file extensions here, make sure the key is same as language prop in CodeBlock.tsx component
|
45 |
-
}
|
46 |
-
|
47 |
-
export const generateRandomString = (length: number, lowercase = false) => {
|
48 |
-
const chars = 'ABCDEFGHJKLMNPQRSTUVWXY3456789' // excluding similar looking characters like Z, 2, I, 1, O, 0
|
49 |
-
let result = ''
|
50 |
-
for (let i = 0; i < length; i++) {
|
51 |
-
result += chars.charAt(Math.floor(Math.random() * chars.length))
|
52 |
-
}
|
53 |
-
return lowercase ? result.toLowerCase() : result
|
54 |
-
}
|
55 |
-
|
56 |
-
const CodeBlock: FC<Props> = memo(({ language, value }) => {
|
57 |
-
const { isCopied, copyToClipboard } = useCopyToClipboard({ timeout: 2000 })
|
58 |
-
|
59 |
-
const downloadAsFile = () => {
|
60 |
-
if (typeof window === 'undefined') {
|
61 |
-
return
|
62 |
-
}
|
63 |
-
const fileExtension = programmingLanguages[language] || '.file'
|
64 |
-
const suggestedFileName = `file-${generateRandomString(
|
65 |
-
3,
|
66 |
-
true
|
67 |
-
)}${fileExtension}`
|
68 |
-
const fileName = window.prompt('Enter file name' || '', suggestedFileName)
|
69 |
-
|
70 |
-
if (!fileName) {
|
71 |
-
// User pressed cancel on prompt.
|
72 |
-
return
|
73 |
-
}
|
74 |
-
|
75 |
-
const blob = new Blob([value], { type: 'text/plain' })
|
76 |
-
const url = URL.createObjectURL(blob)
|
77 |
-
const link = document.createElement('a')
|
78 |
-
link.download = fileName
|
79 |
-
link.href = url
|
80 |
-
link.style.display = 'none'
|
81 |
-
document.body.appendChild(link)
|
82 |
-
link.click()
|
83 |
-
document.body.removeChild(link)
|
84 |
-
URL.revokeObjectURL(url)
|
85 |
-
}
|
86 |
-
|
87 |
-
const onCopy = () => {
|
88 |
-
if (isCopied) return
|
89 |
-
copyToClipboard(value)
|
90 |
-
}
|
91 |
-
|
92 |
-
return (
|
93 |
-
<div className="codeblock relative w-full bg-zinc-950 font-sans">
|
94 |
-
<div className="flex w-full items-center justify-between bg-zinc-800 px-6 py-2 pr-4 text-zinc-100">
|
95 |
-
<span className="text-xs lowercase">{language}</span>
|
96 |
-
<div className="flex items-center space-x-1">
|
97 |
-
<Button
|
98 |
-
variant="ghost"
|
99 |
-
className="hover:bg-zinc-800 focus-visible:ring-1 focus-visible:ring-slate-700 focus-visible:ring-offset-0"
|
100 |
-
onClick={downloadAsFile}
|
101 |
-
size="icon"
|
102 |
-
>
|
103 |
-
<IconDownload />
|
104 |
-
<span className="sr-only">Download</span>
|
105 |
-
</Button>
|
106 |
-
<Button
|
107 |
-
variant="ghost"
|
108 |
-
size="icon"
|
109 |
-
className="text-xs hover:bg-zinc-800 focus-visible:ring-1 focus-visible:ring-slate-700 focus-visible:ring-offset-0"
|
110 |
-
onClick={onCopy}
|
111 |
-
>
|
112 |
-
{isCopied ? <IconCheck /> : <IconCopy />}
|
113 |
-
<span className="sr-only">Copy code</span>
|
114 |
-
</Button>
|
115 |
-
</div>
|
116 |
-
</div>
|
117 |
-
<SyntaxHighlighter
|
118 |
-
language={language}
|
119 |
-
style={coldarkDark}
|
120 |
-
PreTag="div"
|
121 |
-
showLineNumbers
|
122 |
-
customStyle={{
|
123 |
-
margin: 0,
|
124 |
-
width: '100%',
|
125 |
-
background: 'transparent',
|
126 |
-
padding: '1.5rem 1rem'
|
127 |
-
}}
|
128 |
-
codeTagProps={{
|
129 |
-
style: {
|
130 |
-
fontSize: '0.9rem',
|
131 |
-
fontFamily: 'var(--font-mono)'
|
132 |
-
}
|
133 |
-
}}
|
134 |
-
>
|
135 |
-
{value}
|
136 |
-
</SyntaxHighlighter>
|
137 |
-
</div>
|
138 |
-
)
|
139 |
-
})
|
140 |
-
CodeBlock.displayName = 'CodeBlock'
|
141 |
-
|
142 |
-
export { CodeBlock }
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/custom_dataset/[A]dataset_split.sh
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
cd ..
|
2 |
-
python tools/misc/coco_split.py --json data-df2/deepfashion2-smaller-dataset.json \
|
3 |
-
--out-dir data-df2/annotations/ \
|
4 |
-
--ratios 8 2 \
|
5 |
-
--shuffle --seed 10
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov6/yolov6_s_fast_1xb12-40e_cat.py
DELETED
@@ -1,56 +0,0 @@
|
|
1 |
-
_base_ = './yolov6_s_syncbn_fast_8xb32-400e_coco.py'
|
2 |
-
|
3 |
-
data_root = './data/cat/'
|
4 |
-
class_name = ('cat', )
|
5 |
-
num_classes = len(class_name)
|
6 |
-
metainfo = dict(classes=class_name, palette=[(20, 220, 60)])
|
7 |
-
|
8 |
-
max_epochs = 40
|
9 |
-
train_batch_size_per_gpu = 12
|
10 |
-
train_num_workers = 4
|
11 |
-
num_last_epochs = 5
|
12 |
-
|
13 |
-
load_from = 'https://download.openmmlab.com/mmyolo/v0/yolov6/yolov6_s_syncbn_fast_8xb32-400e_coco/yolov6_s_syncbn_fast_8xb32-400e_coco_20221102_203035-932e1d91.pth' # noqa
|
14 |
-
|
15 |
-
model = dict(
|
16 |
-
backbone=dict(frozen_stages=4),
|
17 |
-
bbox_head=dict(head_module=dict(num_classes=num_classes)),
|
18 |
-
train_cfg=dict(
|
19 |
-
initial_assigner=dict(num_classes=num_classes),
|
20 |
-
assigner=dict(num_classes=num_classes)))
|
21 |
-
|
22 |
-
train_dataloader = dict(
|
23 |
-
batch_size=train_batch_size_per_gpu,
|
24 |
-
num_workers=train_num_workers,
|
25 |
-
dataset=dict(
|
26 |
-
data_root=data_root,
|
27 |
-
metainfo=metainfo,
|
28 |
-
ann_file='annotations/trainval.json',
|
29 |
-
data_prefix=dict(img='images/')))
|
30 |
-
|
31 |
-
val_dataloader = dict(
|
32 |
-
dataset=dict(
|
33 |
-
metainfo=metainfo,
|
34 |
-
data_root=data_root,
|
35 |
-
ann_file='annotations/test.json',
|
36 |
-
data_prefix=dict(img='images/')))
|
37 |
-
|
38 |
-
test_dataloader = val_dataloader
|
39 |
-
|
40 |
-
val_evaluator = dict(ann_file=data_root + 'annotations/test.json')
|
41 |
-
test_evaluator = val_evaluator
|
42 |
-
|
43 |
-
_base_.optim_wrapper.optimizer.batch_size_per_gpu = train_batch_size_per_gpu
|
44 |
-
_base_.custom_hooks[1].switch_epoch = max_epochs - num_last_epochs
|
45 |
-
|
46 |
-
default_hooks = dict(
|
47 |
-
checkpoint=dict(interval=10, max_keep_ckpts=2, save_best='auto'),
|
48 |
-
# The warmup_mim_iter parameter is critical.
|
49 |
-
# The default value is 1000 which is not suitable for cat datasets.
|
50 |
-
param_scheduler=dict(max_epochs=max_epochs, warmup_mim_iter=10),
|
51 |
-
logger=dict(type='LoggerHook', interval=5))
|
52 |
-
train_cfg = dict(
|
53 |
-
max_epochs=max_epochs,
|
54 |
-
val_interval=10,
|
55 |
-
dynamic_intervals=[(max_epochs - num_last_epochs, 1)])
|
56 |
-
# visualizer = dict(vis_backends = [dict(type='LocalVisBackend'), dict(type='WandbVisBackend')]) # noqa
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td_hm_res50_4xb64-210e_deepfashion2_sling_dress_256x192.py
DELETED
@@ -1,172 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../../../_base_/default_runtime.py',
|
3 |
-
'../../../_base_/datasets/deepfashion2.py'
|
4 |
-
]
|
5 |
-
|
6 |
-
default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater'))
|
7 |
-
|
8 |
-
resume = False # 断点恢复
|
9 |
-
load_from = None # 模型权重加载
|
10 |
-
train_cfg = dict(by_epoch=True, max_epochs=210, val_interval=10) # 训练轮数,测试间隔
|
11 |
-
param_scheduler = [
|
12 |
-
dict( # warmup策略
|
13 |
-
type='LinearLR',
|
14 |
-
begin=0,
|
15 |
-
end=500,
|
16 |
-
start_factor=0.001,
|
17 |
-
by_epoch=False),
|
18 |
-
dict( # scheduler
|
19 |
-
type='MultiStepLR',
|
20 |
-
begin=0,
|
21 |
-
end=210,
|
22 |
-
milestones=[100, 160],
|
23 |
-
gamma=0.1,
|
24 |
-
by_epoch=True)
|
25 |
-
]
|
26 |
-
optim_wrapper = dict(optimizer=dict(type='Adam', lr=0.0005)) # 优化器和学习率
|
27 |
-
auto_scale_lr = dict(base_batch_size=512) # 根据batch_size自动缩放学习率
|
28 |
-
|
29 |
-
backend_args = dict(backend='local') # 数据加载后端设置,默认从本地硬盘加载
|
30 |
-
dataset_type = 'DeepFashion2Dataset' # 数据集类名 DeepFashionDataset
|
31 |
-
data_mode = 'topdown' # 算法结构类型,用于指定标注信息加载策略
|
32 |
-
data_root = 'data/deepfashion2/' # 数据存放路径
|
33 |
-
# 定义数据编解码器,用于生成target和对pred进行解码,同时包含了输入图片和输出heatmap尺寸等信息
|
34 |
-
codec = dict(
|
35 |
-
type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2)
|
36 |
-
|
37 |
-
train_pipeline = [
|
38 |
-
dict(type='LoadImage'),
|
39 |
-
dict(type='GetBBoxCenterScale'),
|
40 |
-
dict(type='RandomFlip', direction='horizontal'),
|
41 |
-
dict(
|
42 |
-
type='RandomBBoxTransform',
|
43 |
-
shift_prob=0,
|
44 |
-
rotate_factor=60,
|
45 |
-
scale_factor=(0.75, 1.25)),
|
46 |
-
dict(type='TopdownAffine', input_size=codec['input_size']),
|
47 |
-
dict(type='GenerateTarget', encoder=codec),
|
48 |
-
dict(type='PackPoseInputs')
|
49 |
-
]
|
50 |
-
val_pipeline = [ # 测试时数据增强
|
51 |
-
dict(type='LoadImage', backend_args=backend_args), # 加载图片
|
52 |
-
dict(type='GetBBoxCenterScale'), # 根据bbox获取center和scale
|
53 |
-
dict(type='TopdownAffine', input_size=codec['input_size']), # 根据变换矩阵更新目标数据
|
54 |
-
dict(type='PackPoseInputs') # 对target进行打包用于训练
|
55 |
-
]
|
56 |
-
train_dataloader = dict( # 训练数据加载
|
57 |
-
batch_size=64, # 批次大小
|
58 |
-
num_workers=6, # 数据加载进程数
|
59 |
-
persistent_workers=True, # 在不活跃时维持进程不终止,避免反复启动进程的开销
|
60 |
-
sampler=dict(type='DefaultSampler', shuffle=True), # 采样策略,打乱数据
|
61 |
-
dataset=dict(
|
62 |
-
type=dataset_type, # 数据集类名
|
63 |
-
data_root=data_root, # 数据集路径
|
64 |
-
data_mode=data_mode, # 算法类型
|
65 |
-
ann_file='train/deepfashion2_sling_dress.json', # 标注文件路径
|
66 |
-
data_prefix=dict(img='train/image/'), # 图像路径
|
67 |
-
pipeline=train_pipeline # 数据流水线
|
68 |
-
))
|
69 |
-
val_dataloader = dict(
|
70 |
-
batch_size=32,
|
71 |
-
num_workers=6,
|
72 |
-
persistent_workers=True, # 在不活跃时维持进程不终止,避免反复启动进程的开销
|
73 |
-
drop_last=False,
|
74 |
-
sampler=dict(type='DefaultSampler', shuffle=False), # 采样策略,不进行打乱
|
75 |
-
dataset=dict(
|
76 |
-
type=dataset_type, # 数据集类名
|
77 |
-
data_root=data_root, # 数据集路径
|
78 |
-
data_mode=data_mode, # 算法类型
|
79 |
-
ann_file='validation/deepfashion2_sling_dress.json', # 标注文件路径
|
80 |
-
data_prefix=dict(img='validation/image/'), # 图像路径
|
81 |
-
test_mode=True, # 测试模式开关
|
82 |
-
pipeline=val_pipeline # 数据流水线
|
83 |
-
))
|
84 |
-
test_dataloader = val_dataloader # 默认情况下不区分验证集和测试集,用户根据需要来自行定义
|
85 |
-
|
86 |
-
channel_cfg = dict(
|
87 |
-
num_output_channels=294,
|
88 |
-
dataset_joints=294,
|
89 |
-
dataset_channel=[
|
90 |
-
[
|
91 |
-
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
|
92 |
-
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
|
93 |
-
36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
|
94 |
-
53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
|
95 |
-
70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86,
|
96 |
-
87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102,
|
97 |
-
103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
|
98 |
-
116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128,
|
99 |
-
129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141,
|
100 |
-
142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154,
|
101 |
-
155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167,
|
102 |
-
168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180,
|
103 |
-
181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193,
|
104 |
-
194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206,
|
105 |
-
207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
|
106 |
-
220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232,
|
107 |
-
233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245,
|
108 |
-
246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258,
|
109 |
-
259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271,
|
110 |
-
272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284,
|
111 |
-
285, 286, 287, 288, 289, 290, 291, 292, 293
|
112 |
-
],
|
113 |
-
],
|
114 |
-
inference_channel=[
|
115 |
-
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
|
116 |
-
20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
|
117 |
-
38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
|
118 |
-
56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
|
119 |
-
74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
|
120 |
-
92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
|
121 |
-
108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
|
122 |
-
122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
|
123 |
-
136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
|
124 |
-
150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
|
125 |
-
164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
|
126 |
-
178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
|
127 |
-
192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205,
|
128 |
-
206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
|
129 |
-
220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233,
|
130 |
-
234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
|
131 |
-
248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261,
|
132 |
-
262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275,
|
133 |
-
276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289,
|
134 |
-
290, 291, 292, 293
|
135 |
-
])
|
136 |
-
|
137 |
-
model = dict(
|
138 |
-
type='TopdownPoseEstimator', # 模型结构决定了算法流程
|
139 |
-
data_preprocessor=dict( # 数据归一化和通道顺序调整,作为模型的一部分
|
140 |
-
type='PoseDataPreprocessor',
|
141 |
-
mean=[123.675, 116.28, 103.53],
|
142 |
-
std=[58.395, 57.12, 57.375],
|
143 |
-
bgr_to_rgb=True),
|
144 |
-
backbone=dict(
|
145 |
-
type='ResNet',
|
146 |
-
depth=50,
|
147 |
-
init_cfg=dict(
|
148 |
-
type='Pretrained', # 预训练参数,只加载backbone权重用于迁移学习
|
149 |
-
checkpoint='torchvision://resnet50')),
|
150 |
-
head=dict( # 模型头部
|
151 |
-
type='HeatmapHead',
|
152 |
-
in_channels=2048,
|
153 |
-
out_channels=channel_cfg['num_output_channels'],
|
154 |
-
# deconv_out_channels=None,
|
155 |
-
loss=dict(type='KeypointMSELoss', use_target_weight=True), # 损失函数
|
156 |
-
decoder=codec), # 解码器,将heatmap解码成坐标值
|
157 |
-
test_cfg=dict(
|
158 |
-
flip_test=True, # 开启测试时水平翻转集成
|
159 |
-
flip_mode='heatmap', # 对heatmap进行翻转
|
160 |
-
shift_heatmap=True, # 对翻转后的结果进行平移提高精度
|
161 |
-
))
|
162 |
-
|
163 |
-
val_evaluator = [
|
164 |
-
dict(type='PCKAccuracy', thr=0.2),
|
165 |
-
dict(type='AUC'),
|
166 |
-
dict(type='EPE'),
|
167 |
-
]
|
168 |
-
test_evaluator = val_evaluator # 默认情况下不区分验证集和测试集,用户根据需要来自行定义
|
169 |
-
|
170 |
-
visualizer = dict(
|
171 |
-
vis_backends=[dict(type='LocalVisBackend'),
|
172 |
-
dict(type='WandbVisBackend')])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/skew/Factory.d.ts
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
import Container from '../container/Container';
|
2 |
-
import Skew from './Skew';
|
3 |
-
|
4 |
-
export default function (
|
5 |
-
parentContainer: Container,
|
6 |
-
config?: Skew.IConfig
|
7 |
-
): Skew;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/stylegan_human/pti/pti_configs/__init__.py
DELETED
File without changes
|
spaces/Amrrs/DragGan-Inversion/torch_utils/ops/filtered_lrelu.h
DELETED
@@ -1,90 +0,0 @@
|
|
1 |
-
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
2 |
-
//
|
3 |
-
// NVIDIA CORPORATION and its licensors retain all intellectual property
|
4 |
-
// and proprietary rights in and to this software, related documentation
|
5 |
-
// and any modifications thereto. Any use, reproduction, disclosure or
|
6 |
-
// distribution of this software and related documentation without an express
|
7 |
-
// license agreement from NVIDIA CORPORATION is strictly prohibited.
|
8 |
-
|
9 |
-
#include <cuda_runtime.h>
|
10 |
-
|
11 |
-
//------------------------------------------------------------------------
|
12 |
-
// CUDA kernel parameters.
|
13 |
-
|
14 |
-
struct filtered_lrelu_kernel_params
|
15 |
-
{
|
16 |
-
// These parameters decide which kernel to use.
|
17 |
-
int up; // upsampling ratio (1, 2, 4)
|
18 |
-
int down; // downsampling ratio (1, 2, 4)
|
19 |
-
int2 fuShape; // [size, 1] | [size, size]
|
20 |
-
int2 fdShape; // [size, 1] | [size, size]
|
21 |
-
|
22 |
-
int _dummy; // Alignment.
|
23 |
-
|
24 |
-
// Rest of the parameters.
|
25 |
-
const void* x; // Input tensor.
|
26 |
-
void* y; // Output tensor.
|
27 |
-
const void* b; // Bias tensor.
|
28 |
-
unsigned char* s; // Sign tensor in/out. NULL if unused.
|
29 |
-
const float* fu; // Upsampling filter.
|
30 |
-
const float* fd; // Downsampling filter.
|
31 |
-
|
32 |
-
int2 pad0; // Left/top padding.
|
33 |
-
float gain; // Additional gain factor.
|
34 |
-
float slope; // Leaky ReLU slope on negative side.
|
35 |
-
float clamp; // Clamp after nonlinearity.
|
36 |
-
int flip; // Filter kernel flip for gradient computation.
|
37 |
-
|
38 |
-
int tilesXdim; // Original number of horizontal output tiles.
|
39 |
-
int tilesXrep; // Number of horizontal tiles per CTA.
|
40 |
-
int blockZofs; // Block z offset to support large minibatch, channel dimensions.
|
41 |
-
|
42 |
-
int4 xShape; // [width, height, channel, batch]
|
43 |
-
int4 yShape; // [width, height, channel, batch]
|
44 |
-
int2 sShape; // [width, height] - width is in bytes. Contiguous. Zeros if unused.
|
45 |
-
int2 sOfs; // [ofs_x, ofs_y] - offset between upsampled data and sign tensor.
|
46 |
-
int swLimit; // Active width of sign tensor in bytes.
|
47 |
-
|
48 |
-
longlong4 xStride; // Strides of all tensors except signs, same component order as shapes.
|
49 |
-
longlong4 yStride; //
|
50 |
-
int64_t bStride; //
|
51 |
-
longlong3 fuStride; //
|
52 |
-
longlong3 fdStride; //
|
53 |
-
};
|
54 |
-
|
55 |
-
struct filtered_lrelu_act_kernel_params
|
56 |
-
{
|
57 |
-
void* x; // Input/output, modified in-place.
|
58 |
-
unsigned char* s; // Sign tensor in/out. NULL if unused.
|
59 |
-
|
60 |
-
float gain; // Additional gain factor.
|
61 |
-
float slope; // Leaky ReLU slope on negative side.
|
62 |
-
float clamp; // Clamp after nonlinearity.
|
63 |
-
|
64 |
-
int4 xShape; // [width, height, channel, batch]
|
65 |
-
longlong4 xStride; // Input/output tensor strides, same order as in shape.
|
66 |
-
int2 sShape; // [width, height] - width is in elements. Contiguous. Zeros if unused.
|
67 |
-
int2 sOfs; // [ofs_x, ofs_y] - offset between upsampled data and sign tensor.
|
68 |
-
};
|
69 |
-
|
70 |
-
//------------------------------------------------------------------------
|
71 |
-
// CUDA kernel specialization.
|
72 |
-
|
73 |
-
struct filtered_lrelu_kernel_spec
|
74 |
-
{
|
75 |
-
void* setup; // Function for filter kernel setup.
|
76 |
-
void* exec; // Function for main operation.
|
77 |
-
int2 tileOut; // Width/height of launch tile.
|
78 |
-
int numWarps; // Number of warps per thread block, determines launch block size.
|
79 |
-
int xrep; // For processing multiple horizontal tiles per thread block.
|
80 |
-
int dynamicSharedKB; // How much dynamic shared memory the exec kernel wants.
|
81 |
-
};
|
82 |
-
|
83 |
-
//------------------------------------------------------------------------
|
84 |
-
// CUDA kernel selection.
|
85 |
-
|
86 |
-
template <class T, class index_t, bool signWrite, bool signRead> filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB);
|
87 |
-
template <class T, bool signWrite, bool signRead> void* choose_filtered_lrelu_act_kernel(void);
|
88 |
-
template <bool signWrite, bool signRead> cudaError_t copy_filters(cudaStream_t stream);
|
89 |
-
|
90 |
-
//------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/stable_diffusion/upscale.md
DELETED
@@ -1,37 +0,0 @@
|
|
1 |
-
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
|
3 |
-
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
4 |
-
the License. You may obtain a copy of the License at
|
5 |
-
|
6 |
-
http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
|
8 |
-
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
9 |
-
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
10 |
-
specific language governing permissions and limitations under the License.
|
11 |
-
-->
|
12 |
-
|
13 |
-
# Super-resolution
|
14 |
-
|
15 |
-
The Stable Diffusion upscaler diffusion model was created by the researchers and engineers from [CompVis](https://github.com/CompVis), [Stability AI](https://stability.ai/), and [LAION](https://laion.ai/). It is used to enhance the resolution of input images by a factor of 4.
|
16 |
-
|
17 |
-
<Tip>
|
18 |
-
|
19 |
-
Make sure to check out the Stable Diffusion [Tips](overview#tips) section to learn how to explore the tradeoff between scheduler speed and quality, and how to reuse pipeline components efficiently!
|
20 |
-
|
21 |
-
If you're interested in using one of the official checkpoints for a task, explore the [CompVis](https://huggingface.co/CompVis), [Runway](https://huggingface.co/runwayml), and [Stability AI](https://huggingface.co/stabilityai) Hub organizations!
|
22 |
-
|
23 |
-
</Tip>
|
24 |
-
|
25 |
-
## StableDiffusionUpscalePipeline
|
26 |
-
|
27 |
-
[[autodoc]] StableDiffusionUpscalePipeline
|
28 |
-
- all
|
29 |
-
- __call__
|
30 |
-
- enable_attention_slicing
|
31 |
-
- disable_attention_slicing
|
32 |
-
- enable_xformers_memory_efficient_attention
|
33 |
-
- disable_xformers_memory_efficient_attention
|
34 |
-
|
35 |
-
## StableDiffusionPipelineOutput
|
36 |
-
|
37 |
-
[[autodoc]] pipelines.stable_diffusion.StableDiffusionPipelineOutput
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/experimental/rl/__init__.py
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
from .value_guided_sampling import ValueGuidedRLPipeline
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/paint_by_example/__init__.py
DELETED
@@ -1,18 +0,0 @@
|
|
1 |
-
from dataclasses import dataclass
|
2 |
-
from typing import List, Optional, Union
|
3 |
-
|
4 |
-
import numpy as np
|
5 |
-
import PIL
|
6 |
-
from PIL import Image
|
7 |
-
|
8 |
-
from ...utils import OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
|
9 |
-
|
10 |
-
|
11 |
-
try:
|
12 |
-
if not (is_transformers_available() and is_torch_available()):
|
13 |
-
raise OptionalDependencyNotAvailable()
|
14 |
-
except OptionalDependencyNotAvailable:
|
15 |
-
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
|
16 |
-
else:
|
17 |
-
from .image_encoder import PaintByExampleImageEncoder
|
18 |
-
from .pipeline_paint_by_example import PaintByExamplePipeline
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py
DELETED
@@ -1,934 +0,0 @@
|
|
1 |
-
# Copyright 2023 Harutatsu Akiyama and The HuggingFace Team. All rights reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
|
15 |
-
import inspect
|
16 |
-
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
17 |
-
|
18 |
-
import numpy as np
|
19 |
-
import PIL.Image
|
20 |
-
import torch
|
21 |
-
from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
|
22 |
-
|
23 |
-
from ...image_processor import VaeImageProcessor
|
24 |
-
from ...loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
|
25 |
-
from ...models import AutoencoderKL, UNet2DConditionModel
|
26 |
-
from ...models.attention_processor import (
|
27 |
-
AttnProcessor2_0,
|
28 |
-
LoRAAttnProcessor2_0,
|
29 |
-
LoRAXFormersAttnProcessor,
|
30 |
-
XFormersAttnProcessor,
|
31 |
-
)
|
32 |
-
from ...schedulers import KarrasDiffusionSchedulers
|
33 |
-
from ...utils import (
|
34 |
-
deprecate,
|
35 |
-
is_accelerate_available,
|
36 |
-
is_accelerate_version,
|
37 |
-
is_invisible_watermark_available,
|
38 |
-
logging,
|
39 |
-
randn_tensor,
|
40 |
-
)
|
41 |
-
from ..pipeline_utils import DiffusionPipeline
|
42 |
-
from . import StableDiffusionXLPipelineOutput
|
43 |
-
|
44 |
-
|
45 |
-
if is_invisible_watermark_available():
|
46 |
-
from .watermark import StableDiffusionXLWatermarker
|
47 |
-
|
48 |
-
|
49 |
-
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
50 |
-
|
51 |
-
|
52 |
-
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
|
53 |
-
"""
|
54 |
-
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
|
55 |
-
Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
|
56 |
-
"""
|
57 |
-
std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
|
58 |
-
std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
|
59 |
-
# rescale the results from guidance (fixes overexposure)
|
60 |
-
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
|
61 |
-
# mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
|
62 |
-
noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
|
63 |
-
return noise_cfg
|
64 |
-
|
65 |
-
|
66 |
-
class StableDiffusionXLInstructPix2PixPipeline(DiffusionPipeline, FromSingleFileMixin, LoraLoaderMixin):
|
67 |
-
r"""
|
68 |
-
Pipeline for pixel-level image editing by following text instructions. Based on Stable Diffusion XL.
|
69 |
-
|
70 |
-
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
71 |
-
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
72 |
-
|
73 |
-
In addition the pipeline inherits the following loading methods:
|
74 |
-
- *Textual-Inversion*: [`loaders.TextualInversionLoaderMixin.load_textual_inversion`]
|
75 |
-
- *LoRA*: [`loaders.LoraLoaderMixin.load_lora_weights`]
|
76 |
-
|
77 |
-
as well as the following saving methods:
|
78 |
-
- *LoRA*: [`loaders.LoraLoaderMixin.save_lora_weights`]
|
79 |
-
|
80 |
-
Args:
|
81 |
-
vae ([`AutoencoderKL`]):
|
82 |
-
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
83 |
-
text_encoder ([`CLIPTextModel`]):
|
84 |
-
Frozen text-encoder. Stable Diffusion XL uses the text portion of
|
85 |
-
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
|
86 |
-
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
87 |
-
text_encoder_2 ([` CLIPTextModelWithProjection`]):
|
88 |
-
Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of
|
89 |
-
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection),
|
90 |
-
specifically the
|
91 |
-
[laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)
|
92 |
-
variant.
|
93 |
-
tokenizer (`CLIPTokenizer`):
|
94 |
-
Tokenizer of class
|
95 |
-
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
96 |
-
tokenizer_2 (`CLIPTokenizer`):
|
97 |
-
Second Tokenizer of class
|
98 |
-
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
99 |
-
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
100 |
-
scheduler ([`SchedulerMixin`]):
|
101 |
-
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
102 |
-
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
103 |
-
"""
|
104 |
-
|
105 |
-
def __init__(
|
106 |
-
self,
|
107 |
-
vae: AutoencoderKL,
|
108 |
-
text_encoder: CLIPTextModel,
|
109 |
-
text_encoder_2: CLIPTextModelWithProjection,
|
110 |
-
tokenizer: CLIPTokenizer,
|
111 |
-
tokenizer_2: CLIPTokenizer,
|
112 |
-
unet: UNet2DConditionModel,
|
113 |
-
scheduler: KarrasDiffusionSchedulers,
|
114 |
-
requires_aesthetics_score: bool = False,
|
115 |
-
force_zeros_for_empty_prompt: bool = True,
|
116 |
-
add_watermarker: Optional[bool] = None,
|
117 |
-
):
|
118 |
-
super().__init__()
|
119 |
-
|
120 |
-
self.register_modules(
|
121 |
-
vae=vae,
|
122 |
-
text_encoder=text_encoder,
|
123 |
-
text_encoder_2=text_encoder_2,
|
124 |
-
tokenizer=tokenizer,
|
125 |
-
tokenizer_2=tokenizer_2,
|
126 |
-
unet=unet,
|
127 |
-
scheduler=scheduler,
|
128 |
-
)
|
129 |
-
self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
|
130 |
-
self.register_to_config(requires_aesthetics_score=requires_aesthetics_score)
|
131 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
132 |
-
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
133 |
-
|
134 |
-
self.vae.config.force_upcast = True # force the VAE to be in float32 mode, as it overflows in float16
|
135 |
-
|
136 |
-
add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available()
|
137 |
-
|
138 |
-
if add_watermarker:
|
139 |
-
self.watermark = StableDiffusionXLWatermarker()
|
140 |
-
else:
|
141 |
-
self.watermark = None
|
142 |
-
|
143 |
-
def enable_vae_slicing(self):
|
144 |
-
r"""
|
145 |
-
Enable sliced VAE decoding.
|
146 |
-
|
147 |
-
When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several
|
148 |
-
steps. This is useful to save some memory and allow larger batch sizes.
|
149 |
-
"""
|
150 |
-
self.vae.enable_slicing()
|
151 |
-
|
152 |
-
def disable_vae_slicing(self):
|
153 |
-
r"""
|
154 |
-
Disable sliced VAE decoding. If `enable_vae_slicing` was previously invoked, this method will go back to
|
155 |
-
computing decoding in one step.
|
156 |
-
"""
|
157 |
-
self.vae.disable_slicing()
|
158 |
-
|
159 |
-
def enable_vae_tiling(self):
|
160 |
-
r"""
|
161 |
-
Enable tiled VAE decoding.
|
162 |
-
|
163 |
-
When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in
|
164 |
-
several steps. This is useful to save a large amount of memory and to allow the processing of larger images.
|
165 |
-
"""
|
166 |
-
self.vae.enable_tiling()
|
167 |
-
|
168 |
-
def disable_vae_tiling(self):
|
169 |
-
r"""
|
170 |
-
Disable tiled VAE decoding. If `enable_vae_tiling` was previously invoked, this method will go back to
|
171 |
-
computing decoding in one step.
|
172 |
-
"""
|
173 |
-
self.vae.disable_tiling()
|
174 |
-
|
175 |
-
# Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.enable_model_cpu_offload
|
176 |
-
def enable_model_cpu_offload(self, gpu_id=0):
|
177 |
-
r"""
|
178 |
-
Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
|
179 |
-
to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
|
180 |
-
method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
|
181 |
-
`enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
|
182 |
-
"""
|
183 |
-
if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
|
184 |
-
from accelerate import cpu_offload_with_hook
|
185 |
-
else:
|
186 |
-
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
|
187 |
-
|
188 |
-
device = torch.device(f"cuda:{gpu_id}")
|
189 |
-
|
190 |
-
if self.device.type != "cpu":
|
191 |
-
self.to("cpu", silence_dtype_warnings=True)
|
192 |
-
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
|
193 |
-
|
194 |
-
model_sequence = (
|
195 |
-
[self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
|
196 |
-
)
|
197 |
-
model_sequence.extend([self.unet, self.vae])
|
198 |
-
|
199 |
-
hook = None
|
200 |
-
for cpu_offloaded_model in model_sequence:
|
201 |
-
_, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
|
202 |
-
|
203 |
-
# We'll offload the last model manually.
|
204 |
-
self.final_offload_hook = hook
|
205 |
-
|
206 |
-
def encode_prompt(
|
207 |
-
self,
|
208 |
-
prompt,
|
209 |
-
device: Optional[torch.device] = None,
|
210 |
-
num_images_per_prompt: int = 1,
|
211 |
-
do_classifier_free_guidance: bool = True,
|
212 |
-
negative_prompt=None,
|
213 |
-
prompt_embeds: Optional[torch.FloatTensor] = None,
|
214 |
-
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
215 |
-
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
216 |
-
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
217 |
-
lora_scale: Optional[float] = None,
|
218 |
-
):
|
219 |
-
r"""
|
220 |
-
Encodes the prompt into text encoder hidden states.
|
221 |
-
|
222 |
-
Args:
|
223 |
-
prompt (`str` or `List[str]`, *optional*):
|
224 |
-
prompt to be encoded
|
225 |
-
device: (`torch.device`):
|
226 |
-
torch device
|
227 |
-
num_images_per_prompt (`int`):
|
228 |
-
number of images that should be generated per prompt
|
229 |
-
do_classifier_free_guidance (`bool`):
|
230 |
-
whether to use classifier free guidance or not
|
231 |
-
negative_prompt (`str` or `List[str]`, *optional*):
|
232 |
-
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
233 |
-
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
234 |
-
less than `1`).
|
235 |
-
prompt_embeds (`torch.FloatTensor`, *optional*):
|
236 |
-
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
237 |
-
provided, text embeddings will be generated from `prompt` input argument.
|
238 |
-
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
239 |
-
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
240 |
-
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
241 |
-
argument.
|
242 |
-
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
243 |
-
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
|
244 |
-
If not provided, pooled text embeddings will be generated from `prompt` input argument.
|
245 |
-
negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
246 |
-
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
247 |
-
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
|
248 |
-
input argument.
|
249 |
-
lora_scale (`float`, *optional*):
|
250 |
-
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
251 |
-
"""
|
252 |
-
device = device or self._execution_device
|
253 |
-
|
254 |
-
# set lora scale so that monkey patched LoRA
|
255 |
-
# function of text encoder can correctly access it
|
256 |
-
if lora_scale is not None and isinstance(self, LoraLoaderMixin):
|
257 |
-
self._lora_scale = lora_scale
|
258 |
-
|
259 |
-
if prompt is not None and isinstance(prompt, str):
|
260 |
-
batch_size = 1
|
261 |
-
elif prompt is not None and isinstance(prompt, list):
|
262 |
-
batch_size = len(prompt)
|
263 |
-
else:
|
264 |
-
batch_size = prompt_embeds.shape[0]
|
265 |
-
|
266 |
-
# Define tokenizers and text encoders
|
267 |
-
tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
|
268 |
-
text_encoders = (
|
269 |
-
[self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
|
270 |
-
)
|
271 |
-
|
272 |
-
if prompt_embeds is None:
|
273 |
-
# textual inversion: procecss multi-vector tokens if necessary
|
274 |
-
prompt_embeds_list = []
|
275 |
-
for tokenizer, text_encoder in zip(tokenizers, text_encoders):
|
276 |
-
if isinstance(self, TextualInversionLoaderMixin):
|
277 |
-
prompt = self.maybe_convert_prompt(prompt, tokenizer)
|
278 |
-
|
279 |
-
text_inputs = tokenizer(
|
280 |
-
prompt,
|
281 |
-
padding="max_length",
|
282 |
-
max_length=tokenizer.model_max_length,
|
283 |
-
truncation=True,
|
284 |
-
return_tensors="pt",
|
285 |
-
)
|
286 |
-
text_input_ids = text_inputs.input_ids
|
287 |
-
untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
288 |
-
|
289 |
-
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
290 |
-
text_input_ids, untruncated_ids
|
291 |
-
):
|
292 |
-
removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
|
293 |
-
logger.warning(
|
294 |
-
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
295 |
-
f" {tokenizer.model_max_length} tokens: {removed_text}"
|
296 |
-
)
|
297 |
-
|
298 |
-
prompt_embeds = text_encoder(
|
299 |
-
text_input_ids.to(device),
|
300 |
-
output_hidden_states=True,
|
301 |
-
)
|
302 |
-
|
303 |
-
# We are only ALWAYS interested in the pooled output of the final text encoder
|
304 |
-
pooled_prompt_embeds = prompt_embeds[0]
|
305 |
-
prompt_embeds = prompt_embeds.hidden_states[-2]
|
306 |
-
|
307 |
-
bs_embed, seq_len, _ = prompt_embeds.shape
|
308 |
-
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
309 |
-
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
310 |
-
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
311 |
-
|
312 |
-
prompt_embeds_list.append(prompt_embeds)
|
313 |
-
|
314 |
-
prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
|
315 |
-
|
316 |
-
# get unconditional embeddings for classifier free guidance
|
317 |
-
zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
|
318 |
-
if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
|
319 |
-
negative_prompt_embeds = torch.zeros_like(prompt_embeds)
|
320 |
-
negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
|
321 |
-
elif do_classifier_free_guidance and negative_prompt_embeds is None:
|
322 |
-
negative_prompt = negative_prompt or ""
|
323 |
-
uncond_tokens: List[str]
|
324 |
-
if prompt is not None and type(prompt) is not type(negative_prompt):
|
325 |
-
raise TypeError(
|
326 |
-
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
327 |
-
f" {type(prompt)}."
|
328 |
-
)
|
329 |
-
elif isinstance(negative_prompt, str):
|
330 |
-
uncond_tokens = [negative_prompt]
|
331 |
-
elif batch_size != len(negative_prompt):
|
332 |
-
raise ValueError(
|
333 |
-
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
334 |
-
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
335 |
-
" the batch size of `prompt`."
|
336 |
-
)
|
337 |
-
else:
|
338 |
-
uncond_tokens = negative_prompt
|
339 |
-
|
340 |
-
negative_prompt_embeds_list = []
|
341 |
-
for tokenizer, text_encoder in zip(tokenizers, text_encoders):
|
342 |
-
# textual inversion: procecss multi-vector tokens if necessary
|
343 |
-
if isinstance(self, TextualInversionLoaderMixin):
|
344 |
-
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, tokenizer)
|
345 |
-
|
346 |
-
max_length = prompt_embeds.shape[1]
|
347 |
-
uncond_input = tokenizer(
|
348 |
-
uncond_tokens,
|
349 |
-
padding="max_length",
|
350 |
-
max_length=max_length,
|
351 |
-
truncation=True,
|
352 |
-
return_tensors="pt",
|
353 |
-
)
|
354 |
-
|
355 |
-
negative_prompt_embeds = text_encoder(
|
356 |
-
uncond_input.input_ids.to(device),
|
357 |
-
output_hidden_states=True,
|
358 |
-
)
|
359 |
-
# We are only ALWAYS interested in the pooled output of the final text encoder
|
360 |
-
negative_pooled_prompt_embeds = negative_prompt_embeds[0]
|
361 |
-
negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
|
362 |
-
|
363 |
-
if do_classifier_free_guidance:
|
364 |
-
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
365 |
-
seq_len = negative_prompt_embeds.shape[1]
|
366 |
-
|
367 |
-
negative_prompt_embeds = negative_prompt_embeds.to(dtype=text_encoder.dtype, device=device)
|
368 |
-
|
369 |
-
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
370 |
-
negative_prompt_embeds = negative_prompt_embeds.view(
|
371 |
-
batch_size * num_images_per_prompt, seq_len, -1
|
372 |
-
)
|
373 |
-
|
374 |
-
# For classifier free guidance, we need to do two forward passes.
|
375 |
-
# Here we concatenate the unconditional and text embeddings into a single batch
|
376 |
-
# to avoid doing two forward passes
|
377 |
-
|
378 |
-
negative_prompt_embeds_list.append(negative_prompt_embeds)
|
379 |
-
|
380 |
-
negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
|
381 |
-
|
382 |
-
bs_embed = pooled_prompt_embeds.shape[0]
|
383 |
-
pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
|
384 |
-
bs_embed * num_images_per_prompt, -1
|
385 |
-
)
|
386 |
-
negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
|
387 |
-
bs_embed * num_images_per_prompt, -1
|
388 |
-
)
|
389 |
-
|
390 |
-
return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
|
391 |
-
|
392 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
393 |
-
def prepare_extra_step_kwargs(self, generator, eta):
|
394 |
-
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
395 |
-
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
396 |
-
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
397 |
-
# and should be between [0, 1]
|
398 |
-
|
399 |
-
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
400 |
-
extra_step_kwargs = {}
|
401 |
-
if accepts_eta:
|
402 |
-
extra_step_kwargs["eta"] = eta
|
403 |
-
|
404 |
-
# check if the scheduler accepts generator
|
405 |
-
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
406 |
-
if accepts_generator:
|
407 |
-
extra_step_kwargs["generator"] = generator
|
408 |
-
return extra_step_kwargs
|
409 |
-
|
410 |
-
def get_timesteps(self, num_inference_steps, strength, device):
|
411 |
-
# get the original timestep using init_timestep
|
412 |
-
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
|
413 |
-
|
414 |
-
t_start = max(num_inference_steps - init_timestep, 0)
|
415 |
-
timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
|
416 |
-
|
417 |
-
return timesteps, num_inference_steps - t_start
|
418 |
-
|
419 |
-
def check_inputs(
|
420 |
-
self, prompt, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None
|
421 |
-
):
|
422 |
-
if (callback_steps is None) or (
|
423 |
-
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
424 |
-
):
|
425 |
-
raise ValueError(
|
426 |
-
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
427 |
-
f" {type(callback_steps)}."
|
428 |
-
)
|
429 |
-
|
430 |
-
if prompt is not None and prompt_embeds is not None:
|
431 |
-
raise ValueError(
|
432 |
-
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
433 |
-
" only forward one of the two."
|
434 |
-
)
|
435 |
-
elif prompt is None and prompt_embeds is None:
|
436 |
-
raise ValueError(
|
437 |
-
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
438 |
-
)
|
439 |
-
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
440 |
-
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
441 |
-
|
442 |
-
if negative_prompt is not None and negative_prompt_embeds is not None:
|
443 |
-
raise ValueError(
|
444 |
-
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
445 |
-
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
446 |
-
)
|
447 |
-
|
448 |
-
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
449 |
-
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
450 |
-
raise ValueError(
|
451 |
-
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
452 |
-
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
453 |
-
f" {negative_prompt_embeds.shape}."
|
454 |
-
)
|
455 |
-
|
456 |
-
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
|
457 |
-
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
|
458 |
-
if isinstance(generator, list) and len(generator) != batch_size:
|
459 |
-
raise ValueError(
|
460 |
-
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
461 |
-
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
462 |
-
)
|
463 |
-
|
464 |
-
if latents is None:
|
465 |
-
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
466 |
-
else:
|
467 |
-
latents = latents.to(device)
|
468 |
-
|
469 |
-
# scale the initial noise by the standard deviation required by the scheduler
|
470 |
-
latents = latents * self.scheduler.init_noise_sigma
|
471 |
-
return latents
|
472 |
-
|
473 |
-
def prepare_image_latents(
|
474 |
-
self, image, batch_size, num_images_per_prompt, dtype, device, do_classifier_free_guidance, generator=None
|
475 |
-
):
|
476 |
-
if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
|
477 |
-
raise ValueError(
|
478 |
-
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
|
479 |
-
)
|
480 |
-
|
481 |
-
image = image.to(device=device, dtype=dtype)
|
482 |
-
|
483 |
-
batch_size = batch_size * num_images_per_prompt
|
484 |
-
|
485 |
-
if image.shape[1] == 4:
|
486 |
-
image_latents = image
|
487 |
-
else:
|
488 |
-
# make sure the VAE is in float32 mode, as it overflows in float16
|
489 |
-
if self.vae.config.force_upcast:
|
490 |
-
image = image.float()
|
491 |
-
self.vae.to(dtype=torch.float32)
|
492 |
-
|
493 |
-
if isinstance(generator, list) and len(generator) != batch_size:
|
494 |
-
raise ValueError(
|
495 |
-
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
496 |
-
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
497 |
-
)
|
498 |
-
|
499 |
-
if isinstance(generator, list):
|
500 |
-
image_latents = [self.vae.encode(image[i : i + 1]).latent_dist.mode() for i in range(batch_size)]
|
501 |
-
image_latents = torch.cat(image_latents, dim=0)
|
502 |
-
else:
|
503 |
-
image_latents = self.vae.encode(image).latent_dist.mode()
|
504 |
-
|
505 |
-
if batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] == 0:
|
506 |
-
# expand image_latents for batch_size
|
507 |
-
deprecation_message = (
|
508 |
-
f"You have passed {batch_size} text prompts (`prompt`), but only {image_latents.shape[0]} initial"
|
509 |
-
" images (`image`). Initial images are now duplicating to match the number of text prompts. Note"
|
510 |
-
" that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update"
|
511 |
-
" your script to pass as many initial images as text prompts to suppress this warning."
|
512 |
-
)
|
513 |
-
deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False)
|
514 |
-
additional_image_per_prompt = batch_size // image_latents.shape[0]
|
515 |
-
image_latents = torch.cat([image_latents] * additional_image_per_prompt, dim=0)
|
516 |
-
elif batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] != 0:
|
517 |
-
raise ValueError(
|
518 |
-
f"Cannot duplicate `image` of batch size {image_latents.shape[0]} to {batch_size} text prompts."
|
519 |
-
)
|
520 |
-
else:
|
521 |
-
image_latents = torch.cat([image_latents], dim=0)
|
522 |
-
|
523 |
-
if do_classifier_free_guidance:
|
524 |
-
uncond_image_latents = torch.zeros_like(image_latents)
|
525 |
-
image_latents = torch.cat([image_latents, image_latents, uncond_image_latents], dim=0)
|
526 |
-
|
527 |
-
return image_latents
|
528 |
-
|
529 |
-
def _get_add_time_ids(
|
530 |
-
self, original_size, crops_coords_top_left, target_size, aesthetic_score, negative_aesthetic_score, dtype
|
531 |
-
):
|
532 |
-
if self.config.requires_aesthetics_score:
|
533 |
-
add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,))
|
534 |
-
add_neg_time_ids = list(original_size + crops_coords_top_left + (negative_aesthetic_score,))
|
535 |
-
else:
|
536 |
-
add_time_ids = list(original_size + crops_coords_top_left + target_size)
|
537 |
-
add_neg_time_ids = list(original_size + crops_coords_top_left + target_size)
|
538 |
-
|
539 |
-
passed_add_embed_dim = (
|
540 |
-
self.unet.config.addition_time_embed_dim * len(add_time_ids) + self.text_encoder_2.config.projection_dim
|
541 |
-
)
|
542 |
-
expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
|
543 |
-
|
544 |
-
if (
|
545 |
-
expected_add_embed_dim > passed_add_embed_dim
|
546 |
-
and (expected_add_embed_dim - passed_add_embed_dim) == self.unet.config.addition_time_embed_dim
|
547 |
-
):
|
548 |
-
raise ValueError(
|
549 |
-
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model."
|
550 |
-
)
|
551 |
-
elif (
|
552 |
-
expected_add_embed_dim < passed_add_embed_dim
|
553 |
-
and (passed_add_embed_dim - expected_add_embed_dim) == self.unet.config.addition_time_embed_dim
|
554 |
-
):
|
555 |
-
raise ValueError(
|
556 |
-
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model."
|
557 |
-
)
|
558 |
-
elif expected_add_embed_dim != passed_add_embed_dim:
|
559 |
-
raise ValueError(
|
560 |
-
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
|
561 |
-
)
|
562 |
-
|
563 |
-
add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
|
564 |
-
add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype)
|
565 |
-
|
566 |
-
return add_time_ids, add_neg_time_ids
|
567 |
-
|
568 |
-
def upcast_vae(self):
|
569 |
-
dtype = self.vae.dtype
|
570 |
-
self.vae.to(dtype=torch.float32)
|
571 |
-
use_torch_2_0_or_xformers = isinstance(
|
572 |
-
self.vae.decoder.mid_block.attentions[0].processor,
|
573 |
-
(
|
574 |
-
AttnProcessor2_0,
|
575 |
-
XFormersAttnProcessor,
|
576 |
-
LoRAXFormersAttnProcessor,
|
577 |
-
LoRAAttnProcessor2_0,
|
578 |
-
),
|
579 |
-
)
|
580 |
-
# if xformers or torch_2_0 is used attention block does not need
|
581 |
-
# to be in float32 which can save lots of memory
|
582 |
-
if use_torch_2_0_or_xformers:
|
583 |
-
self.vae.post_quant_conv.to(dtype)
|
584 |
-
self.vae.decoder.conv_in.to(dtype)
|
585 |
-
self.vae.decoder.mid_block.to(dtype)
|
586 |
-
|
587 |
-
@torch.no_grad()
|
588 |
-
def __call__(
|
589 |
-
self,
|
590 |
-
prompt: Union[str, List[str]] = None,
|
591 |
-
image: Union[
|
592 |
-
torch.FloatTensor,
|
593 |
-
PIL.Image.Image,
|
594 |
-
np.ndarray,
|
595 |
-
List[torch.FloatTensor],
|
596 |
-
List[PIL.Image.Image],
|
597 |
-
List[np.ndarray],
|
598 |
-
] = None,
|
599 |
-
num_inference_steps: int = 100,
|
600 |
-
guidance_scale: float = 7.5,
|
601 |
-
image_guidance_scale: float = 1.5,
|
602 |
-
negative_prompt: Optional[Union[str, List[str]]] = None,
|
603 |
-
num_images_per_prompt: Optional[int] = 1,
|
604 |
-
eta: float = 0.0,
|
605 |
-
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
606 |
-
latents: Optional[torch.FloatTensor] = None,
|
607 |
-
prompt_embeds: Optional[torch.FloatTensor] = None,
|
608 |
-
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
609 |
-
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
610 |
-
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
611 |
-
output_type: Optional[str] = "pil",
|
612 |
-
return_dict: bool = True,
|
613 |
-
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
614 |
-
callback_steps: int = 1,
|
615 |
-
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
616 |
-
guidance_rescale: float = 0.0,
|
617 |
-
original_size: Tuple[int, int] = None,
|
618 |
-
crops_coords_top_left: Tuple[int, int] = (0, 0),
|
619 |
-
target_size: Tuple[int, int] = None,
|
620 |
-
aesthetic_score: float = 6.0,
|
621 |
-
negative_aesthetic_score: float = 2.5,
|
622 |
-
):
|
623 |
-
r"""
|
624 |
-
Function invoked when calling the pipeline for generation.
|
625 |
-
|
626 |
-
Args:
|
627 |
-
prompt (`str` or `List[str]`, *optional*):
|
628 |
-
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
|
629 |
-
instead.
|
630 |
-
image (`torch.FloatTensor` or `PIL.Image.Image` or `np.ndarray` or `List[torch.FloatTensor]` or `List[PIL.Image.Image]` or `List[np.ndarray]`):
|
631 |
-
The image(s) to modify with the pipeline.
|
632 |
-
num_inference_steps (`int`, *optional*, defaults to 50):
|
633 |
-
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
634 |
-
expense of slower inference.
|
635 |
-
guidance_scale (`float`, *optional*, defaults to 7.5):
|
636 |
-
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
637 |
-
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
638 |
-
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
639 |
-
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
640 |
-
usually at the expense of lower image quality.
|
641 |
-
image_guidance_scale (`float`, *optional*, defaults to 1.5):
|
642 |
-
Image guidance scale is to push the generated image towards the inital image `image`. Image guidance
|
643 |
-
scale is enabled by setting `image_guidance_scale > 1`. Higher image guidance scale encourages to
|
644 |
-
generate images that are closely linked to the source image `image`, usually at the expense of lower
|
645 |
-
image quality. This pipeline requires a value of at least `1`.
|
646 |
-
negative_prompt (`str` or `List[str]`, *optional*):
|
647 |
-
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
648 |
-
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
649 |
-
less than `1`).
|
650 |
-
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
651 |
-
The number of images to generate per prompt.
|
652 |
-
eta (`float`, *optional*, defaults to 0.0):
|
653 |
-
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
654 |
-
[`schedulers.DDIMScheduler`], will be ignored for others.
|
655 |
-
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
656 |
-
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
657 |
-
to make generation deterministic.
|
658 |
-
latents (`torch.FloatTensor`, *optional*):
|
659 |
-
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
660 |
-
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
661 |
-
tensor will ge generated by sampling using the supplied random `generator`.
|
662 |
-
prompt_embeds (`torch.FloatTensor`, *optional*):
|
663 |
-
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
664 |
-
provided, text embeddings will be generated from `prompt` input argument.
|
665 |
-
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
666 |
-
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
667 |
-
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
668 |
-
argument.
|
669 |
-
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
670 |
-
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
|
671 |
-
If not provided, pooled text embeddings will be generated from `prompt` input argument.
|
672 |
-
negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
673 |
-
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
674 |
-
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
|
675 |
-
input argument.
|
676 |
-
output_type (`str`, *optional*, defaults to `"pil"`):
|
677 |
-
The output format of the generate image. Choose between
|
678 |
-
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
679 |
-
return_dict (`bool`, *optional*, defaults to `True`):
|
680 |
-
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] instead of a
|
681 |
-
plain tuple.
|
682 |
-
callback (`Callable`, *optional*):
|
683 |
-
A function that will be called every `callback_steps` steps during inference. The function will be
|
684 |
-
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
|
685 |
-
callback_steps (`int`, *optional*, defaults to 1):
|
686 |
-
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
687 |
-
called at every step.
|
688 |
-
cross_attention_kwargs (`dict`, *optional*):
|
689 |
-
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
|
690 |
-
`self.processor` in
|
691 |
-
[diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
|
692 |
-
guidance_rescale (`float`, *optional*, defaults to 0.7):
|
693 |
-
Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are
|
694 |
-
Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of
|
695 |
-
[Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf).
|
696 |
-
Guidance rescale factor should fix overexposure when using zero terminal SNR.
|
697 |
-
original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
698 |
-
TODO
|
699 |
-
crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
|
700 |
-
TODO
|
701 |
-
target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
702 |
-
TODO
|
703 |
-
aesthetic_score (`float`, *optional*, defaults to 6.0):
|
704 |
-
TODO
|
705 |
-
negative_aesthetic_score (`float`, *optional*, defaults to 2.5):
|
706 |
-
TDOO
|
707 |
-
|
708 |
-
Examples:
|
709 |
-
|
710 |
-
Returns:
|
711 |
-
[`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] or `tuple`:
|
712 |
-
[`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a
|
713 |
-
`tuple. When returning a tuple, the first element is a list with the generated images, and the second
|
714 |
-
element is a list of `bool`s denoting whether the corresponding generated image likely represents
|
715 |
-
"not-safe-for-work" (nsfw) content, according to the `safety_checker`.
|
716 |
-
"""
|
717 |
-
# 1. Check inputs. Raise error if not correct
|
718 |
-
self.check_inputs(prompt, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds)
|
719 |
-
|
720 |
-
if image is None:
|
721 |
-
raise ValueError("`image` input cannot be undefined.")
|
722 |
-
|
723 |
-
# 2. Define call parameters
|
724 |
-
if prompt is not None and isinstance(prompt, str):
|
725 |
-
batch_size = 1
|
726 |
-
elif prompt is not None and isinstance(prompt, list):
|
727 |
-
batch_size = len(prompt)
|
728 |
-
else:
|
729 |
-
batch_size = prompt_embeds.shape[0]
|
730 |
-
|
731 |
-
device = self._execution_device
|
732 |
-
|
733 |
-
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
734 |
-
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
735 |
-
# corresponds to doing no classifier free guidance.
|
736 |
-
do_classifier_free_guidance = guidance_scale > 1.0 and image_guidance_scale >= 1.0
|
737 |
-
# check if scheduler is in sigmas space
|
738 |
-
scheduler_is_in_sigma_space = hasattr(self.scheduler, "sigmas")
|
739 |
-
|
740 |
-
# 3. Encode input prompt
|
741 |
-
text_encoder_lora_scale = (
|
742 |
-
cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
|
743 |
-
)
|
744 |
-
(
|
745 |
-
prompt_embeds,
|
746 |
-
negative_prompt_embeds,
|
747 |
-
pooled_prompt_embeds,
|
748 |
-
negative_pooled_prompt_embeds,
|
749 |
-
) = self.encode_prompt(
|
750 |
-
prompt,
|
751 |
-
device,
|
752 |
-
num_images_per_prompt,
|
753 |
-
do_classifier_free_guidance,
|
754 |
-
negative_prompt,
|
755 |
-
prompt_embeds=prompt_embeds,
|
756 |
-
negative_prompt_embeds=negative_prompt_embeds,
|
757 |
-
pooled_prompt_embeds=pooled_prompt_embeds,
|
758 |
-
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
|
759 |
-
lora_scale=text_encoder_lora_scale,
|
760 |
-
)
|
761 |
-
|
762 |
-
# 4. Preprocess image
|
763 |
-
image = self.image_processor.preprocess(image).to(device)
|
764 |
-
|
765 |
-
# 5. Prepare timesteps
|
766 |
-
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
767 |
-
timesteps = self.scheduler.timesteps
|
768 |
-
|
769 |
-
# 6. Prepare Image latents
|
770 |
-
image_latents = self.prepare_image_latents(
|
771 |
-
image,
|
772 |
-
batch_size,
|
773 |
-
num_images_per_prompt,
|
774 |
-
prompt_embeds.dtype,
|
775 |
-
device,
|
776 |
-
do_classifier_free_guidance,
|
777 |
-
generator,
|
778 |
-
)
|
779 |
-
|
780 |
-
height, width = image_latents.shape[-2:]
|
781 |
-
height = height * self.vae_scale_factor
|
782 |
-
width = width * self.vae_scale_factor
|
783 |
-
|
784 |
-
# 7. Prepare latent variables
|
785 |
-
num_channels_latents = self.vae.config.latent_channels
|
786 |
-
latents = self.prepare_latents(
|
787 |
-
batch_size * num_images_per_prompt,
|
788 |
-
num_channels_latents,
|
789 |
-
height,
|
790 |
-
width,
|
791 |
-
prompt_embeds.dtype,
|
792 |
-
device,
|
793 |
-
generator,
|
794 |
-
latents,
|
795 |
-
)
|
796 |
-
|
797 |
-
# 8. Check that shapes of latents and image match the UNet channels
|
798 |
-
num_channels_image = image_latents.shape[1]
|
799 |
-
if num_channels_latents + num_channels_image != self.unet.config.in_channels:
|
800 |
-
raise ValueError(
|
801 |
-
f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
|
802 |
-
f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
|
803 |
-
f" `num_channels_image`: {num_channels_image} "
|
804 |
-
f" = {num_channels_latents + num_channels_image}. Please verify the config of"
|
805 |
-
" `pipeline.unet` or your `image` input."
|
806 |
-
)
|
807 |
-
|
808 |
-
# 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
809 |
-
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
810 |
-
|
811 |
-
original_size = original_size or (height, width)
|
812 |
-
target_size = target_size or (height, width)
|
813 |
-
|
814 |
-
# 10. Prepare added time ids & embeddings
|
815 |
-
add_text_embeds = pooled_prompt_embeds
|
816 |
-
add_time_ids, add_neg_time_ids = self._get_add_time_ids(
|
817 |
-
original_size,
|
818 |
-
crops_coords_top_left,
|
819 |
-
target_size,
|
820 |
-
aesthetic_score,
|
821 |
-
negative_aesthetic_score,
|
822 |
-
dtype=prompt_embeds.dtype,
|
823 |
-
)
|
824 |
-
add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1)
|
825 |
-
|
826 |
-
original_prompt_embeds_len = len(prompt_embeds)
|
827 |
-
original_add_text_embeds_len = len(add_text_embeds)
|
828 |
-
original_add_time_ids = len(add_time_ids)
|
829 |
-
|
830 |
-
if do_classifier_free_guidance:
|
831 |
-
prompt_embeds = torch.cat([prompt_embeds, negative_prompt_embeds], dim=0)
|
832 |
-
add_text_embeds = torch.cat([add_text_embeds, negative_pooled_prompt_embeds], dim=0)
|
833 |
-
add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1)
|
834 |
-
add_time_ids = torch.cat([add_time_ids, add_neg_time_ids], dim=0)
|
835 |
-
|
836 |
-
# Make dimensions consistent
|
837 |
-
add_text_embeds = torch.concat((add_text_embeds, add_text_embeds[:original_add_text_embeds_len]), dim=0)
|
838 |
-
add_time_ids = torch.concat((add_time_ids, add_time_ids.clone()[:original_add_time_ids]), dim=0)
|
839 |
-
prompt_embeds = torch.concat((prompt_embeds, prompt_embeds.clone()[:original_prompt_embeds_len]), dim=0)
|
840 |
-
|
841 |
-
prompt_embeds = prompt_embeds.to(device).to(torch.float32)
|
842 |
-
add_text_embeds = add_text_embeds.to(device).to(torch.float32)
|
843 |
-
add_time_ids = add_time_ids.to(device)
|
844 |
-
|
845 |
-
# 11. Denoising loop
|
846 |
-
self.unet = self.unet.to(torch.float32)
|
847 |
-
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
848 |
-
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
849 |
-
for i, t in enumerate(timesteps):
|
850 |
-
# Expand the latents if we are doing classifier free guidance.
|
851 |
-
# The latents are expanded 3 times because for pix2pix the guidance\
|
852 |
-
# is applied for both the text and the input image.
|
853 |
-
latent_model_input = torch.cat([latents] * 3) if do_classifier_free_guidance else latents
|
854 |
-
|
855 |
-
# concat latents, image_latents in the channel dimension
|
856 |
-
scaled_latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
857 |
-
scaled_latent_model_input = torch.cat([scaled_latent_model_input, image_latents], dim=1)
|
858 |
-
|
859 |
-
# predict the noise residual
|
860 |
-
added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
|
861 |
-
noise_pred = self.unet(
|
862 |
-
scaled_latent_model_input,
|
863 |
-
t,
|
864 |
-
encoder_hidden_states=prompt_embeds,
|
865 |
-
cross_attention_kwargs=cross_attention_kwargs,
|
866 |
-
added_cond_kwargs=added_cond_kwargs,
|
867 |
-
return_dict=False,
|
868 |
-
)[0]
|
869 |
-
|
870 |
-
# Hack:
|
871 |
-
# For karras style schedulers the model does classifer free guidance using the
|
872 |
-
# predicted_original_sample instead of the noise_pred. So we need to compute the
|
873 |
-
# predicted_original_sample here if we are using a karras style scheduler.
|
874 |
-
if scheduler_is_in_sigma_space:
|
875 |
-
step_index = (self.scheduler.timesteps == t).nonzero()[0].item()
|
876 |
-
sigma = self.scheduler.sigmas[step_index]
|
877 |
-
noise_pred = latent_model_input - sigma * noise_pred
|
878 |
-
|
879 |
-
# perform guidance
|
880 |
-
if do_classifier_free_guidance:
|
881 |
-
noise_pred_text, noise_pred_image, noise_pred_uncond = noise_pred.chunk(3)
|
882 |
-
noise_pred = (
|
883 |
-
noise_pred_uncond
|
884 |
-
+ guidance_scale * (noise_pred_text - noise_pred_image)
|
885 |
-
+ image_guidance_scale * (noise_pred_image - noise_pred_uncond)
|
886 |
-
)
|
887 |
-
|
888 |
-
if do_classifier_free_guidance and guidance_rescale > 0.0:
|
889 |
-
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
|
890 |
-
noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
|
891 |
-
|
892 |
-
# Hack:
|
893 |
-
# For karras style schedulers the model does classifer free guidance using the
|
894 |
-
# predicted_original_sample instead of the noise_pred. But the scheduler.step function
|
895 |
-
# expects the noise_pred and computes the predicted_original_sample internally. So we
|
896 |
-
# need to overwrite the noise_pred here such that the value of the computed
|
897 |
-
# predicted_original_sample is correct.
|
898 |
-
if scheduler_is_in_sigma_space:
|
899 |
-
noise_pred = (noise_pred - latents) / (-sigma)
|
900 |
-
|
901 |
-
# compute the previous noisy sample x_t -> x_t-1
|
902 |
-
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
|
903 |
-
|
904 |
-
# call the callback, if provided
|
905 |
-
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
906 |
-
progress_bar.update()
|
907 |
-
if callback is not None and i % callback_steps == 0:
|
908 |
-
callback(i, t, latents)
|
909 |
-
|
910 |
-
# make sure the VAE is in float32 mode, as it overflows in float16
|
911 |
-
if self.vae.dtype == torch.float16 and self.vae.config.force_upcast:
|
912 |
-
self.upcast_vae()
|
913 |
-
latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
|
914 |
-
|
915 |
-
if not output_type == "latent":
|
916 |
-
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
|
917 |
-
else:
|
918 |
-
image = latents
|
919 |
-
return StableDiffusionXLPipelineOutput(images=image)
|
920 |
-
|
921 |
-
# apply watermark if available
|
922 |
-
if self.watermark is not None:
|
923 |
-
image = self.watermark.apply_watermark(image)
|
924 |
-
|
925 |
-
image = self.image_processor.postprocess(image, output_type=output_type)
|
926 |
-
|
927 |
-
# Offload last model to CPU
|
928 |
-
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
|
929 |
-
self.final_offload_hook.offload()
|
930 |
-
|
931 |
-
if not return_dict:
|
932 |
-
return (image,)
|
933 |
-
|
934 |
-
return StableDiffusionXLPipelineOutput(images=image)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/fcos/fcos_center_r50_caffe_fpn_gn-head_1x_coco.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
_base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py'
|
2 |
-
model = dict(bbox_head=dict(center_sampling=True, center_sample_radius=1.5))
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/pisa/pisa_mask_rcnn_x101_32x4d_fpn_1x_coco.py
DELETED
@@ -1,30 +0,0 @@
|
|
1 |
-
_base_ = '../mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py'
|
2 |
-
|
3 |
-
model = dict(
|
4 |
-
roi_head=dict(
|
5 |
-
type='PISARoIHead',
|
6 |
-
bbox_head=dict(
|
7 |
-
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))),
|
8 |
-
train_cfg=dict(
|
9 |
-
rpn_proposal=dict(
|
10 |
-
nms_pre=2000,
|
11 |
-
max_per_img=2000,
|
12 |
-
nms=dict(type='nms', iou_threshold=0.7),
|
13 |
-
min_bbox_size=0),
|
14 |
-
rcnn=dict(
|
15 |
-
sampler=dict(
|
16 |
-
type='ScoreHLRSampler',
|
17 |
-
num=512,
|
18 |
-
pos_fraction=0.25,
|
19 |
-
neg_pos_ub=-1,
|
20 |
-
add_gt_as_proposals=True,
|
21 |
-
k=0.5,
|
22 |
-
bias=0.),
|
23 |
-
isr=dict(k=2, bias=0),
|
24 |
-
carl=dict(k=1, bias=0.2))),
|
25 |
-
test_cfg=dict(
|
26 |
-
rpn=dict(
|
27 |
-
nms_pre=2000,
|
28 |
-
max_per_img=2000,
|
29 |
-
nms=dict(type='nms', iou_threshold=0.7),
|
30 |
-
min_bbox_size=0)))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/datasets/pipelines/auto_augment.py
DELETED
@@ -1,890 +0,0 @@
|
|
1 |
-
import copy
|
2 |
-
|
3 |
-
import cv2
|
4 |
-
import mmcv
|
5 |
-
import numpy as np
|
6 |
-
|
7 |
-
from ..builder import PIPELINES
|
8 |
-
from .compose import Compose
|
9 |
-
|
10 |
-
_MAX_LEVEL = 10
|
11 |
-
|
12 |
-
|
13 |
-
def level_to_value(level, max_value):
|
14 |
-
"""Map from level to values based on max_value."""
|
15 |
-
return (level / _MAX_LEVEL) * max_value
|
16 |
-
|
17 |
-
|
18 |
-
def enhance_level_to_value(level, a=1.8, b=0.1):
|
19 |
-
"""Map from level to values."""
|
20 |
-
return (level / _MAX_LEVEL) * a + b
|
21 |
-
|
22 |
-
|
23 |
-
def random_negative(value, random_negative_prob):
|
24 |
-
"""Randomly negate value based on random_negative_prob."""
|
25 |
-
return -value if np.random.rand() < random_negative_prob else value
|
26 |
-
|
27 |
-
|
28 |
-
def bbox2fields():
|
29 |
-
"""The key correspondence from bboxes to labels, masks and
|
30 |
-
segmentations."""
|
31 |
-
bbox2label = {
|
32 |
-
'gt_bboxes': 'gt_labels',
|
33 |
-
'gt_bboxes_ignore': 'gt_labels_ignore'
|
34 |
-
}
|
35 |
-
bbox2mask = {
|
36 |
-
'gt_bboxes': 'gt_masks',
|
37 |
-
'gt_bboxes_ignore': 'gt_masks_ignore'
|
38 |
-
}
|
39 |
-
bbox2seg = {
|
40 |
-
'gt_bboxes': 'gt_semantic_seg',
|
41 |
-
}
|
42 |
-
return bbox2label, bbox2mask, bbox2seg
|
43 |
-
|
44 |
-
|
45 |
-
@PIPELINES.register_module()
|
46 |
-
class AutoAugment(object):
|
47 |
-
"""Auto augmentation.
|
48 |
-
|
49 |
-
This data augmentation is proposed in `Learning Data Augmentation
|
50 |
-
Strategies for Object Detection <https://arxiv.org/pdf/1906.11172>`_.
|
51 |
-
|
52 |
-
TODO: Implement 'Shear', 'Sharpness' and 'Rotate' transforms
|
53 |
-
|
54 |
-
Args:
|
55 |
-
policies (list[list[dict]]): The policies of auto augmentation. Each
|
56 |
-
policy in ``policies`` is a specific augmentation policy, and is
|
57 |
-
composed by several augmentations (dict). When AutoAugment is
|
58 |
-
called, a random policy in ``policies`` will be selected to
|
59 |
-
augment images.
|
60 |
-
|
61 |
-
Examples:
|
62 |
-
>>> replace = (104, 116, 124)
|
63 |
-
>>> policies = [
|
64 |
-
>>> [
|
65 |
-
>>> dict(type='Sharpness', prob=0.0, level=8),
|
66 |
-
>>> dict(
|
67 |
-
>>> type='Shear',
|
68 |
-
>>> prob=0.4,
|
69 |
-
>>> level=0,
|
70 |
-
>>> replace=replace,
|
71 |
-
>>> axis='x')
|
72 |
-
>>> ],
|
73 |
-
>>> [
|
74 |
-
>>> dict(
|
75 |
-
>>> type='Rotate',
|
76 |
-
>>> prob=0.6,
|
77 |
-
>>> level=10,
|
78 |
-
>>> replace=replace),
|
79 |
-
>>> dict(type='Color', prob=1.0, level=6)
|
80 |
-
>>> ]
|
81 |
-
>>> ]
|
82 |
-
>>> augmentation = AutoAugment(policies)
|
83 |
-
>>> img = np.ones(100, 100, 3)
|
84 |
-
>>> gt_bboxes = np.ones(10, 4)
|
85 |
-
>>> results = dict(img=img, gt_bboxes=gt_bboxes)
|
86 |
-
>>> results = augmentation(results)
|
87 |
-
"""
|
88 |
-
|
89 |
-
def __init__(self, policies):
|
90 |
-
assert isinstance(policies, list) and len(policies) > 0, \
|
91 |
-
'Policies must be a non-empty list.'
|
92 |
-
for policy in policies:
|
93 |
-
assert isinstance(policy, list) and len(policy) > 0, \
|
94 |
-
'Each policy in policies must be a non-empty list.'
|
95 |
-
for augment in policy:
|
96 |
-
assert isinstance(augment, dict) and 'type' in augment, \
|
97 |
-
'Each specific augmentation must be a dict with key' \
|
98 |
-
' "type".'
|
99 |
-
|
100 |
-
self.policies = copy.deepcopy(policies)
|
101 |
-
self.transforms = [Compose(policy) for policy in self.policies]
|
102 |
-
|
103 |
-
def __call__(self, results):
|
104 |
-
transform = np.random.choice(self.transforms)
|
105 |
-
return transform(results)
|
106 |
-
|
107 |
-
def __repr__(self):
|
108 |
-
return f'{self.__class__.__name__}(policies={self.policies})'
|
109 |
-
|
110 |
-
|
111 |
-
@PIPELINES.register_module()
|
112 |
-
class Shear(object):
|
113 |
-
"""Apply Shear Transformation to image (and its corresponding bbox, mask,
|
114 |
-
segmentation).
|
115 |
-
|
116 |
-
Args:
|
117 |
-
level (int | float): The level should be in range [0,_MAX_LEVEL].
|
118 |
-
img_fill_val (int | float | tuple): The filled values for image border.
|
119 |
-
If float, the same fill value will be used for all the three
|
120 |
-
channels of image. If tuple, the should be 3 elements.
|
121 |
-
seg_ignore_label (int): The fill value used for segmentation map.
|
122 |
-
Note this value must equals ``ignore_label`` in ``semantic_head``
|
123 |
-
of the corresponding config. Default 255.
|
124 |
-
prob (float): The probability for performing Shear and should be in
|
125 |
-
range [0, 1].
|
126 |
-
direction (str): The direction for shear, either "horizontal"
|
127 |
-
or "vertical".
|
128 |
-
max_shear_magnitude (float): The maximum magnitude for Shear
|
129 |
-
transformation.
|
130 |
-
random_negative_prob (float): The probability that turns the
|
131 |
-
offset negative. Should be in range [0,1]
|
132 |
-
interpolation (str): Same as in :func:`mmcv.imshear`.
|
133 |
-
"""
|
134 |
-
|
135 |
-
def __init__(self,
|
136 |
-
level,
|
137 |
-
img_fill_val=128,
|
138 |
-
seg_ignore_label=255,
|
139 |
-
prob=0.5,
|
140 |
-
direction='horizontal',
|
141 |
-
max_shear_magnitude=0.3,
|
142 |
-
random_negative_prob=0.5,
|
143 |
-
interpolation='bilinear'):
|
144 |
-
assert isinstance(level, (int, float)), 'The level must be type ' \
|
145 |
-
f'int or float, got {type(level)}.'
|
146 |
-
assert 0 <= level <= _MAX_LEVEL, 'The level should be in range ' \
|
147 |
-
f'[0,{_MAX_LEVEL}], got {level}.'
|
148 |
-
if isinstance(img_fill_val, (float, int)):
|
149 |
-
img_fill_val = tuple([float(img_fill_val)] * 3)
|
150 |
-
elif isinstance(img_fill_val, tuple):
|
151 |
-
assert len(img_fill_val) == 3, 'img_fill_val as tuple must ' \
|
152 |
-
f'have 3 elements. got {len(img_fill_val)}.'
|
153 |
-
img_fill_val = tuple([float(val) for val in img_fill_val])
|
154 |
-
else:
|
155 |
-
raise ValueError(
|
156 |
-
'img_fill_val must be float or tuple with 3 elements.')
|
157 |
-
assert np.all([0 <= val <= 255 for val in img_fill_val]), 'all ' \
|
158 |
-
'elements of img_fill_val should between range [0,255].' \
|
159 |
-
f'got {img_fill_val}.'
|
160 |
-
assert 0 <= prob <= 1.0, 'The probability of shear should be in ' \
|
161 |
-
f'range [0,1]. got {prob}.'
|
162 |
-
assert direction in ('horizontal', 'vertical'), 'direction must ' \
|
163 |
-
f'in be either "horizontal" or "vertical". got {direction}.'
|
164 |
-
assert isinstance(max_shear_magnitude, float), 'max_shear_magnitude ' \
|
165 |
-
f'should be type float. got {type(max_shear_magnitude)}.'
|
166 |
-
assert 0. <= max_shear_magnitude <= 1., 'Defaultly ' \
|
167 |
-
'max_shear_magnitude should be in range [0,1]. ' \
|
168 |
-
f'got {max_shear_magnitude}.'
|
169 |
-
self.level = level
|
170 |
-
self.magnitude = level_to_value(level, max_shear_magnitude)
|
171 |
-
self.img_fill_val = img_fill_val
|
172 |
-
self.seg_ignore_label = seg_ignore_label
|
173 |
-
self.prob = prob
|
174 |
-
self.direction = direction
|
175 |
-
self.max_shear_magnitude = max_shear_magnitude
|
176 |
-
self.random_negative_prob = random_negative_prob
|
177 |
-
self.interpolation = interpolation
|
178 |
-
|
179 |
-
def _shear_img(self,
|
180 |
-
results,
|
181 |
-
magnitude,
|
182 |
-
direction='horizontal',
|
183 |
-
interpolation='bilinear'):
|
184 |
-
"""Shear the image.
|
185 |
-
|
186 |
-
Args:
|
187 |
-
results (dict): Result dict from loading pipeline.
|
188 |
-
magnitude (int | float): The magnitude used for shear.
|
189 |
-
direction (str): The direction for shear, either "horizontal"
|
190 |
-
or "vertical".
|
191 |
-
interpolation (str): Same as in :func:`mmcv.imshear`.
|
192 |
-
"""
|
193 |
-
for key in results.get('img_fields', ['img']):
|
194 |
-
img = results[key]
|
195 |
-
img_sheared = mmcv.imshear(
|
196 |
-
img,
|
197 |
-
magnitude,
|
198 |
-
direction,
|
199 |
-
border_value=self.img_fill_val,
|
200 |
-
interpolation=interpolation)
|
201 |
-
results[key] = img_sheared.astype(img.dtype)
|
202 |
-
|
203 |
-
def _shear_bboxes(self, results, magnitude):
|
204 |
-
"""Shear the bboxes."""
|
205 |
-
h, w, c = results['img_shape']
|
206 |
-
if self.direction == 'horizontal':
|
207 |
-
shear_matrix = np.stack([[1, magnitude],
|
208 |
-
[0, 1]]).astype(np.float32) # [2, 2]
|
209 |
-
else:
|
210 |
-
shear_matrix = np.stack([[1, 0], [magnitude,
|
211 |
-
1]]).astype(np.float32)
|
212 |
-
for key in results.get('bbox_fields', []):
|
213 |
-
min_x, min_y, max_x, max_y = np.split(
|
214 |
-
results[key], results[key].shape[-1], axis=-1)
|
215 |
-
coordinates = np.stack([[min_x, min_y], [max_x, min_y],
|
216 |
-
[min_x, max_y],
|
217 |
-
[max_x, max_y]]) # [4, 2, nb_box, 1]
|
218 |
-
coordinates = coordinates[..., 0].transpose(
|
219 |
-
(2, 1, 0)).astype(np.float32) # [nb_box, 2, 4]
|
220 |
-
new_coords = np.matmul(shear_matrix[None, :, :],
|
221 |
-
coordinates) # [nb_box, 2, 4]
|
222 |
-
min_x = np.min(new_coords[:, 0, :], axis=-1)
|
223 |
-
min_y = np.min(new_coords[:, 1, :], axis=-1)
|
224 |
-
max_x = np.max(new_coords[:, 0, :], axis=-1)
|
225 |
-
max_y = np.max(new_coords[:, 1, :], axis=-1)
|
226 |
-
min_x = np.clip(min_x, a_min=0, a_max=w)
|
227 |
-
min_y = np.clip(min_y, a_min=0, a_max=h)
|
228 |
-
max_x = np.clip(max_x, a_min=min_x, a_max=w)
|
229 |
-
max_y = np.clip(max_y, a_min=min_y, a_max=h)
|
230 |
-
results[key] = np.stack([min_x, min_y, max_x, max_y],
|
231 |
-
axis=-1).astype(results[key].dtype)
|
232 |
-
|
233 |
-
def _shear_masks(self,
|
234 |
-
results,
|
235 |
-
magnitude,
|
236 |
-
direction='horizontal',
|
237 |
-
fill_val=0,
|
238 |
-
interpolation='bilinear'):
|
239 |
-
"""Shear the masks."""
|
240 |
-
h, w, c = results['img_shape']
|
241 |
-
for key in results.get('mask_fields', []):
|
242 |
-
masks = results[key]
|
243 |
-
results[key] = masks.shear((h, w),
|
244 |
-
magnitude,
|
245 |
-
direction,
|
246 |
-
border_value=fill_val,
|
247 |
-
interpolation=interpolation)
|
248 |
-
|
249 |
-
def _shear_seg(self,
|
250 |
-
results,
|
251 |
-
magnitude,
|
252 |
-
direction='horizontal',
|
253 |
-
fill_val=255,
|
254 |
-
interpolation='bilinear'):
|
255 |
-
"""Shear the segmentation maps."""
|
256 |
-
for key in results.get('seg_fields', []):
|
257 |
-
seg = results[key]
|
258 |
-
results[key] = mmcv.imshear(
|
259 |
-
seg,
|
260 |
-
magnitude,
|
261 |
-
direction,
|
262 |
-
border_value=fill_val,
|
263 |
-
interpolation=interpolation).astype(seg.dtype)
|
264 |
-
|
265 |
-
def _filter_invalid(self, results, min_bbox_size=0):
|
266 |
-
"""Filter bboxes and corresponding masks too small after shear
|
267 |
-
augmentation."""
|
268 |
-
bbox2label, bbox2mask, _ = bbox2fields()
|
269 |
-
for key in results.get('bbox_fields', []):
|
270 |
-
bbox_w = results[key][:, 2] - results[key][:, 0]
|
271 |
-
bbox_h = results[key][:, 3] - results[key][:, 1]
|
272 |
-
valid_inds = (bbox_w > min_bbox_size) & (bbox_h > min_bbox_size)
|
273 |
-
valid_inds = np.nonzero(valid_inds)[0]
|
274 |
-
results[key] = results[key][valid_inds]
|
275 |
-
# label fields. e.g. gt_labels and gt_labels_ignore
|
276 |
-
label_key = bbox2label.get(key)
|
277 |
-
if label_key in results:
|
278 |
-
results[label_key] = results[label_key][valid_inds]
|
279 |
-
# mask fields, e.g. gt_masks and gt_masks_ignore
|
280 |
-
mask_key = bbox2mask.get(key)
|
281 |
-
if mask_key in results:
|
282 |
-
results[mask_key] = results[mask_key][valid_inds]
|
283 |
-
|
284 |
-
def __call__(self, results):
|
285 |
-
"""Call function to shear images, bounding boxes, masks and semantic
|
286 |
-
segmentation maps.
|
287 |
-
|
288 |
-
Args:
|
289 |
-
results (dict): Result dict from loading pipeline.
|
290 |
-
|
291 |
-
Returns:
|
292 |
-
dict: Sheared results.
|
293 |
-
"""
|
294 |
-
if np.random.rand() > self.prob:
|
295 |
-
return results
|
296 |
-
magnitude = random_negative(self.magnitude, self.random_negative_prob)
|
297 |
-
self._shear_img(results, magnitude, self.direction, self.interpolation)
|
298 |
-
self._shear_bboxes(results, magnitude)
|
299 |
-
# fill_val set to 0 for background of mask.
|
300 |
-
self._shear_masks(
|
301 |
-
results,
|
302 |
-
magnitude,
|
303 |
-
self.direction,
|
304 |
-
fill_val=0,
|
305 |
-
interpolation=self.interpolation)
|
306 |
-
self._shear_seg(
|
307 |
-
results,
|
308 |
-
magnitude,
|
309 |
-
self.direction,
|
310 |
-
fill_val=self.seg_ignore_label,
|
311 |
-
interpolation=self.interpolation)
|
312 |
-
self._filter_invalid(results)
|
313 |
-
return results
|
314 |
-
|
315 |
-
def __repr__(self):
|
316 |
-
repr_str = self.__class__.__name__
|
317 |
-
repr_str += f'(level={self.level}, '
|
318 |
-
repr_str += f'img_fill_val={self.img_fill_val}, '
|
319 |
-
repr_str += f'seg_ignore_label={self.seg_ignore_label}, '
|
320 |
-
repr_str += f'prob={self.prob}, '
|
321 |
-
repr_str += f'direction={self.direction}, '
|
322 |
-
repr_str += f'max_shear_magnitude={self.max_shear_magnitude}, '
|
323 |
-
repr_str += f'random_negative_prob={self.random_negative_prob}, '
|
324 |
-
repr_str += f'interpolation={self.interpolation})'
|
325 |
-
return repr_str
|
326 |
-
|
327 |
-
|
328 |
-
@PIPELINES.register_module()
|
329 |
-
class Rotate(object):
|
330 |
-
"""Apply Rotate Transformation to image (and its corresponding bbox, mask,
|
331 |
-
segmentation).
|
332 |
-
|
333 |
-
Args:
|
334 |
-
level (int | float): The level should be in range (0,_MAX_LEVEL].
|
335 |
-
scale (int | float): Isotropic scale factor. Same in
|
336 |
-
``mmcv.imrotate``.
|
337 |
-
center (int | float | tuple[float]): Center point (w, h) of the
|
338 |
-
rotation in the source image. If None, the center of the
|
339 |
-
image will be used. Same in ``mmcv.imrotate``.
|
340 |
-
img_fill_val (int | float | tuple): The fill value for image border.
|
341 |
-
If float, the same value will be used for all the three
|
342 |
-
channels of image. If tuple, the should be 3 elements (e.g.
|
343 |
-
equals the number of channels for image).
|
344 |
-
seg_ignore_label (int): The fill value used for segmentation map.
|
345 |
-
Note this value must equals ``ignore_label`` in ``semantic_head``
|
346 |
-
of the corresponding config. Default 255.
|
347 |
-
prob (float): The probability for perform transformation and
|
348 |
-
should be in range 0 to 1.
|
349 |
-
max_rotate_angle (int | float): The maximum angles for rotate
|
350 |
-
transformation.
|
351 |
-
random_negative_prob (float): The probability that turns the
|
352 |
-
offset negative.
|
353 |
-
"""
|
354 |
-
|
355 |
-
def __init__(self,
|
356 |
-
level,
|
357 |
-
scale=1,
|
358 |
-
center=None,
|
359 |
-
img_fill_val=128,
|
360 |
-
seg_ignore_label=255,
|
361 |
-
prob=0.5,
|
362 |
-
max_rotate_angle=30,
|
363 |
-
random_negative_prob=0.5):
|
364 |
-
assert isinstance(level, (int, float)), \
|
365 |
-
f'The level must be type int or float. got {type(level)}.'
|
366 |
-
assert 0 <= level <= _MAX_LEVEL, \
|
367 |
-
f'The level should be in range (0,{_MAX_LEVEL}]. got {level}.'
|
368 |
-
assert isinstance(scale, (int, float)), \
|
369 |
-
f'The scale must be type int or float. got type {type(scale)}.'
|
370 |
-
if isinstance(center, (int, float)):
|
371 |
-
center = (center, center)
|
372 |
-
elif isinstance(center, tuple):
|
373 |
-
assert len(center) == 2, 'center with type tuple must have '\
|
374 |
-
f'2 elements. got {len(center)} elements.'
|
375 |
-
else:
|
376 |
-
assert center is None, 'center must be None or type int, '\
|
377 |
-
f'float or tuple, got type {type(center)}.'
|
378 |
-
if isinstance(img_fill_val, (float, int)):
|
379 |
-
img_fill_val = tuple([float(img_fill_val)] * 3)
|
380 |
-
elif isinstance(img_fill_val, tuple):
|
381 |
-
assert len(img_fill_val) == 3, 'img_fill_val as tuple must '\
|
382 |
-
f'have 3 elements. got {len(img_fill_val)}.'
|
383 |
-
img_fill_val = tuple([float(val) for val in img_fill_val])
|
384 |
-
else:
|
385 |
-
raise ValueError(
|
386 |
-
'img_fill_val must be float or tuple with 3 elements.')
|
387 |
-
assert np.all([0 <= val <= 255 for val in img_fill_val]), \
|
388 |
-
'all elements of img_fill_val should between range [0,255]. '\
|
389 |
-
f'got {img_fill_val}.'
|
390 |
-
assert 0 <= prob <= 1.0, 'The probability should be in range [0,1]. '\
|
391 |
-
'got {prob}.'
|
392 |
-
assert isinstance(max_rotate_angle, (int, float)), 'max_rotate_angle '\
|
393 |
-
f'should be type int or float. got type {type(max_rotate_angle)}.'
|
394 |
-
self.level = level
|
395 |
-
self.scale = scale
|
396 |
-
# Rotation angle in degrees. Positive values mean
|
397 |
-
# clockwise rotation.
|
398 |
-
self.angle = level_to_value(level, max_rotate_angle)
|
399 |
-
self.center = center
|
400 |
-
self.img_fill_val = img_fill_val
|
401 |
-
self.seg_ignore_label = seg_ignore_label
|
402 |
-
self.prob = prob
|
403 |
-
self.max_rotate_angle = max_rotate_angle
|
404 |
-
self.random_negative_prob = random_negative_prob
|
405 |
-
|
406 |
-
def _rotate_img(self, results, angle, center=None, scale=1.0):
|
407 |
-
"""Rotate the image.
|
408 |
-
|
409 |
-
Args:
|
410 |
-
results (dict): Result dict from loading pipeline.
|
411 |
-
angle (float): Rotation angle in degrees, positive values
|
412 |
-
mean clockwise rotation. Same in ``mmcv.imrotate``.
|
413 |
-
center (tuple[float], optional): Center point (w, h) of the
|
414 |
-
rotation. Same in ``mmcv.imrotate``.
|
415 |
-
scale (int | float): Isotropic scale factor. Same in
|
416 |
-
``mmcv.imrotate``.
|
417 |
-
"""
|
418 |
-
for key in results.get('img_fields', ['img']):
|
419 |
-
img = results[key].copy()
|
420 |
-
img_rotated = mmcv.imrotate(
|
421 |
-
img, angle, center, scale, border_value=self.img_fill_val)
|
422 |
-
results[key] = img_rotated.astype(img.dtype)
|
423 |
-
|
424 |
-
def _rotate_bboxes(self, results, rotate_matrix):
|
425 |
-
"""Rotate the bboxes."""
|
426 |
-
h, w, c = results['img_shape']
|
427 |
-
for key in results.get('bbox_fields', []):
|
428 |
-
min_x, min_y, max_x, max_y = np.split(
|
429 |
-
results[key], results[key].shape[-1], axis=-1)
|
430 |
-
coordinates = np.stack([[min_x, min_y], [max_x, min_y],
|
431 |
-
[min_x, max_y],
|
432 |
-
[max_x, max_y]]) # [4, 2, nb_bbox, 1]
|
433 |
-
# pad 1 to convert from format [x, y] to homogeneous
|
434 |
-
# coordinates format [x, y, 1]
|
435 |
-
coordinates = np.concatenate(
|
436 |
-
(coordinates,
|
437 |
-
np.ones((4, 1, coordinates.shape[2], 1), coordinates.dtype)),
|
438 |
-
axis=1) # [4, 3, nb_bbox, 1]
|
439 |
-
coordinates = coordinates.transpose(
|
440 |
-
(2, 0, 1, 3)) # [nb_bbox, 4, 3, 1]
|
441 |
-
rotated_coords = np.matmul(rotate_matrix,
|
442 |
-
coordinates) # [nb_bbox, 4, 2, 1]
|
443 |
-
rotated_coords = rotated_coords[..., 0] # [nb_bbox, 4, 2]
|
444 |
-
min_x, min_y = np.min(
|
445 |
-
rotated_coords[:, :, 0], axis=1), np.min(
|
446 |
-
rotated_coords[:, :, 1], axis=1)
|
447 |
-
max_x, max_y = np.max(
|
448 |
-
rotated_coords[:, :, 0], axis=1), np.max(
|
449 |
-
rotated_coords[:, :, 1], axis=1)
|
450 |
-
min_x, min_y = np.clip(
|
451 |
-
min_x, a_min=0, a_max=w), np.clip(
|
452 |
-
min_y, a_min=0, a_max=h)
|
453 |
-
max_x, max_y = np.clip(
|
454 |
-
max_x, a_min=min_x, a_max=w), np.clip(
|
455 |
-
max_y, a_min=min_y, a_max=h)
|
456 |
-
results[key] = np.stack([min_x, min_y, max_x, max_y],
|
457 |
-
axis=-1).astype(results[key].dtype)
|
458 |
-
|
459 |
-
def _rotate_masks(self,
|
460 |
-
results,
|
461 |
-
angle,
|
462 |
-
center=None,
|
463 |
-
scale=1.0,
|
464 |
-
fill_val=0):
|
465 |
-
"""Rotate the masks."""
|
466 |
-
h, w, c = results['img_shape']
|
467 |
-
for key in results.get('mask_fields', []):
|
468 |
-
masks = results[key]
|
469 |
-
results[key] = masks.rotate((h, w), angle, center, scale, fill_val)
|
470 |
-
|
471 |
-
def _rotate_seg(self,
|
472 |
-
results,
|
473 |
-
angle,
|
474 |
-
center=None,
|
475 |
-
scale=1.0,
|
476 |
-
fill_val=255):
|
477 |
-
"""Rotate the segmentation map."""
|
478 |
-
for key in results.get('seg_fields', []):
|
479 |
-
seg = results[key].copy()
|
480 |
-
results[key] = mmcv.imrotate(
|
481 |
-
seg, angle, center, scale,
|
482 |
-
border_value=fill_val).astype(seg.dtype)
|
483 |
-
|
484 |
-
def _filter_invalid(self, results, min_bbox_size=0):
|
485 |
-
"""Filter bboxes and corresponding masks too small after rotate
|
486 |
-
augmentation."""
|
487 |
-
bbox2label, bbox2mask, _ = bbox2fields()
|
488 |
-
for key in results.get('bbox_fields', []):
|
489 |
-
bbox_w = results[key][:, 2] - results[key][:, 0]
|
490 |
-
bbox_h = results[key][:, 3] - results[key][:, 1]
|
491 |
-
valid_inds = (bbox_w > min_bbox_size) & (bbox_h > min_bbox_size)
|
492 |
-
valid_inds = np.nonzero(valid_inds)[0]
|
493 |
-
results[key] = results[key][valid_inds]
|
494 |
-
# label fields. e.g. gt_labels and gt_labels_ignore
|
495 |
-
label_key = bbox2label.get(key)
|
496 |
-
if label_key in results:
|
497 |
-
results[label_key] = results[label_key][valid_inds]
|
498 |
-
# mask fields, e.g. gt_masks and gt_masks_ignore
|
499 |
-
mask_key = bbox2mask.get(key)
|
500 |
-
if mask_key in results:
|
501 |
-
results[mask_key] = results[mask_key][valid_inds]
|
502 |
-
|
503 |
-
def __call__(self, results):
|
504 |
-
"""Call function to rotate images, bounding boxes, masks and semantic
|
505 |
-
segmentation maps.
|
506 |
-
|
507 |
-
Args:
|
508 |
-
results (dict): Result dict from loading pipeline.
|
509 |
-
|
510 |
-
Returns:
|
511 |
-
dict: Rotated results.
|
512 |
-
"""
|
513 |
-
if np.random.rand() > self.prob:
|
514 |
-
return results
|
515 |
-
h, w = results['img'].shape[:2]
|
516 |
-
center = self.center
|
517 |
-
if center is None:
|
518 |
-
center = ((w - 1) * 0.5, (h - 1) * 0.5)
|
519 |
-
angle = random_negative(self.angle, self.random_negative_prob)
|
520 |
-
self._rotate_img(results, angle, center, self.scale)
|
521 |
-
rotate_matrix = cv2.getRotationMatrix2D(center, -angle, self.scale)
|
522 |
-
self._rotate_bboxes(results, rotate_matrix)
|
523 |
-
self._rotate_masks(results, angle, center, self.scale, fill_val=0)
|
524 |
-
self._rotate_seg(
|
525 |
-
results, angle, center, self.scale, fill_val=self.seg_ignore_label)
|
526 |
-
self._filter_invalid(results)
|
527 |
-
return results
|
528 |
-
|
529 |
-
def __repr__(self):
|
530 |
-
repr_str = self.__class__.__name__
|
531 |
-
repr_str += f'(level={self.level}, '
|
532 |
-
repr_str += f'scale={self.scale}, '
|
533 |
-
repr_str += f'center={self.center}, '
|
534 |
-
repr_str += f'img_fill_val={self.img_fill_val}, '
|
535 |
-
repr_str += f'seg_ignore_label={self.seg_ignore_label}, '
|
536 |
-
repr_str += f'prob={self.prob}, '
|
537 |
-
repr_str += f'max_rotate_angle={self.max_rotate_angle}, '
|
538 |
-
repr_str += f'random_negative_prob={self.random_negative_prob})'
|
539 |
-
return repr_str
|
540 |
-
|
541 |
-
|
542 |
-
@PIPELINES.register_module()
|
543 |
-
class Translate(object):
|
544 |
-
"""Translate the images, bboxes, masks and segmentation maps horizontally
|
545 |
-
or vertically.
|
546 |
-
|
547 |
-
Args:
|
548 |
-
level (int | float): The level for Translate and should be in
|
549 |
-
range [0,_MAX_LEVEL].
|
550 |
-
prob (float): The probability for performing translation and
|
551 |
-
should be in range [0, 1].
|
552 |
-
img_fill_val (int | float | tuple): The filled value for image
|
553 |
-
border. If float, the same fill value will be used for all
|
554 |
-
the three channels of image. If tuple, the should be 3
|
555 |
-
elements (e.g. equals the number of channels for image).
|
556 |
-
seg_ignore_label (int): The fill value used for segmentation map.
|
557 |
-
Note this value must equals ``ignore_label`` in ``semantic_head``
|
558 |
-
of the corresponding config. Default 255.
|
559 |
-
direction (str): The translate direction, either "horizontal"
|
560 |
-
or "vertical".
|
561 |
-
max_translate_offset (int | float): The maximum pixel's offset for
|
562 |
-
Translate.
|
563 |
-
random_negative_prob (float): The probability that turns the
|
564 |
-
offset negative.
|
565 |
-
min_size (int | float): The minimum pixel for filtering
|
566 |
-
invalid bboxes after the translation.
|
567 |
-
"""
|
568 |
-
|
569 |
-
def __init__(self,
|
570 |
-
level,
|
571 |
-
prob=0.5,
|
572 |
-
img_fill_val=128,
|
573 |
-
seg_ignore_label=255,
|
574 |
-
direction='horizontal',
|
575 |
-
max_translate_offset=250.,
|
576 |
-
random_negative_prob=0.5,
|
577 |
-
min_size=0):
|
578 |
-
assert isinstance(level, (int, float)), \
|
579 |
-
'The level must be type int or float.'
|
580 |
-
assert 0 <= level <= _MAX_LEVEL, \
|
581 |
-
'The level used for calculating Translate\'s offset should be ' \
|
582 |
-
'in range [0,_MAX_LEVEL]'
|
583 |
-
assert 0 <= prob <= 1.0, \
|
584 |
-
'The probability of translation should be in range [0, 1].'
|
585 |
-
if isinstance(img_fill_val, (float, int)):
|
586 |
-
img_fill_val = tuple([float(img_fill_val)] * 3)
|
587 |
-
elif isinstance(img_fill_val, tuple):
|
588 |
-
assert len(img_fill_val) == 3, \
|
589 |
-
'img_fill_val as tuple must have 3 elements.'
|
590 |
-
img_fill_val = tuple([float(val) for val in img_fill_val])
|
591 |
-
else:
|
592 |
-
raise ValueError('img_fill_val must be type float or tuple.')
|
593 |
-
assert np.all([0 <= val <= 255 for val in img_fill_val]), \
|
594 |
-
'all elements of img_fill_val should between range [0,255].'
|
595 |
-
assert direction in ('horizontal', 'vertical'), \
|
596 |
-
'direction should be "horizontal" or "vertical".'
|
597 |
-
assert isinstance(max_translate_offset, (int, float)), \
|
598 |
-
'The max_translate_offset must be type int or float.'
|
599 |
-
# the offset used for translation
|
600 |
-
self.offset = int(level_to_value(level, max_translate_offset))
|
601 |
-
self.level = level
|
602 |
-
self.prob = prob
|
603 |
-
self.img_fill_val = img_fill_val
|
604 |
-
self.seg_ignore_label = seg_ignore_label
|
605 |
-
self.direction = direction
|
606 |
-
self.max_translate_offset = max_translate_offset
|
607 |
-
self.random_negative_prob = random_negative_prob
|
608 |
-
self.min_size = min_size
|
609 |
-
|
610 |
-
def _translate_img(self, results, offset, direction='horizontal'):
|
611 |
-
"""Translate the image.
|
612 |
-
|
613 |
-
Args:
|
614 |
-
results (dict): Result dict from loading pipeline.
|
615 |
-
offset (int | float): The offset for translate.
|
616 |
-
direction (str): The translate direction, either "horizontal"
|
617 |
-
or "vertical".
|
618 |
-
"""
|
619 |
-
for key in results.get('img_fields', ['img']):
|
620 |
-
img = results[key].copy()
|
621 |
-
results[key] = mmcv.imtranslate(
|
622 |
-
img, offset, direction, self.img_fill_val).astype(img.dtype)
|
623 |
-
|
624 |
-
def _translate_bboxes(self, results, offset):
|
625 |
-
"""Shift bboxes horizontally or vertically, according to offset."""
|
626 |
-
h, w, c = results['img_shape']
|
627 |
-
for key in results.get('bbox_fields', []):
|
628 |
-
min_x, min_y, max_x, max_y = np.split(
|
629 |
-
results[key], results[key].shape[-1], axis=-1)
|
630 |
-
if self.direction == 'horizontal':
|
631 |
-
min_x = np.maximum(0, min_x + offset)
|
632 |
-
max_x = np.minimum(w, max_x + offset)
|
633 |
-
elif self.direction == 'vertical':
|
634 |
-
min_y = np.maximum(0, min_y + offset)
|
635 |
-
max_y = np.minimum(h, max_y + offset)
|
636 |
-
|
637 |
-
# the boxes translated outside of image will be filtered along with
|
638 |
-
# the corresponding masks, by invoking ``_filter_invalid``.
|
639 |
-
results[key] = np.concatenate([min_x, min_y, max_x, max_y],
|
640 |
-
axis=-1)
|
641 |
-
|
642 |
-
def _translate_masks(self,
|
643 |
-
results,
|
644 |
-
offset,
|
645 |
-
direction='horizontal',
|
646 |
-
fill_val=0):
|
647 |
-
"""Translate masks horizontally or vertically."""
|
648 |
-
h, w, c = results['img_shape']
|
649 |
-
for key in results.get('mask_fields', []):
|
650 |
-
masks = results[key]
|
651 |
-
results[key] = masks.translate((h, w), offset, direction, fill_val)
|
652 |
-
|
653 |
-
def _translate_seg(self,
|
654 |
-
results,
|
655 |
-
offset,
|
656 |
-
direction='horizontal',
|
657 |
-
fill_val=255):
|
658 |
-
"""Translate segmentation maps horizontally or vertically."""
|
659 |
-
for key in results.get('seg_fields', []):
|
660 |
-
seg = results[key].copy()
|
661 |
-
results[key] = mmcv.imtranslate(seg, offset, direction,
|
662 |
-
fill_val).astype(seg.dtype)
|
663 |
-
|
664 |
-
def _filter_invalid(self, results, min_size=0):
|
665 |
-
"""Filter bboxes and masks too small or translated out of image."""
|
666 |
-
bbox2label, bbox2mask, _ = bbox2fields()
|
667 |
-
for key in results.get('bbox_fields', []):
|
668 |
-
bbox_w = results[key][:, 2] - results[key][:, 0]
|
669 |
-
bbox_h = results[key][:, 3] - results[key][:, 1]
|
670 |
-
valid_inds = (bbox_w > min_size) & (bbox_h > min_size)
|
671 |
-
valid_inds = np.nonzero(valid_inds)[0]
|
672 |
-
results[key] = results[key][valid_inds]
|
673 |
-
# label fields. e.g. gt_labels and gt_labels_ignore
|
674 |
-
label_key = bbox2label.get(key)
|
675 |
-
if label_key in results:
|
676 |
-
results[label_key] = results[label_key][valid_inds]
|
677 |
-
# mask fields, e.g. gt_masks and gt_masks_ignore
|
678 |
-
mask_key = bbox2mask.get(key)
|
679 |
-
if mask_key in results:
|
680 |
-
results[mask_key] = results[mask_key][valid_inds]
|
681 |
-
return results
|
682 |
-
|
683 |
-
def __call__(self, results):
|
684 |
-
"""Call function to translate images, bounding boxes, masks and
|
685 |
-
semantic segmentation maps.
|
686 |
-
|
687 |
-
Args:
|
688 |
-
results (dict): Result dict from loading pipeline.
|
689 |
-
|
690 |
-
Returns:
|
691 |
-
dict: Translated results.
|
692 |
-
"""
|
693 |
-
if np.random.rand() > self.prob:
|
694 |
-
return results
|
695 |
-
offset = random_negative(self.offset, self.random_negative_prob)
|
696 |
-
self._translate_img(results, offset, self.direction)
|
697 |
-
self._translate_bboxes(results, offset)
|
698 |
-
# fill_val defaultly 0 for BitmapMasks and None for PolygonMasks.
|
699 |
-
self._translate_masks(results, offset, self.direction)
|
700 |
-
# fill_val set to ``seg_ignore_label`` for the ignored value
|
701 |
-
# of segmentation map.
|
702 |
-
self._translate_seg(
|
703 |
-
results, offset, self.direction, fill_val=self.seg_ignore_label)
|
704 |
-
self._filter_invalid(results, min_size=self.min_size)
|
705 |
-
return results
|
706 |
-
|
707 |
-
|
708 |
-
@PIPELINES.register_module()
|
709 |
-
class ColorTransform(object):
|
710 |
-
"""Apply Color transformation to image. The bboxes, masks, and
|
711 |
-
segmentations are not modified.
|
712 |
-
|
713 |
-
Args:
|
714 |
-
level (int | float): Should be in range [0,_MAX_LEVEL].
|
715 |
-
prob (float): The probability for performing Color transformation.
|
716 |
-
"""
|
717 |
-
|
718 |
-
def __init__(self, level, prob=0.5):
|
719 |
-
assert isinstance(level, (int, float)), \
|
720 |
-
'The level must be type int or float.'
|
721 |
-
assert 0 <= level <= _MAX_LEVEL, \
|
722 |
-
'The level should be in range [0,_MAX_LEVEL].'
|
723 |
-
assert 0 <= prob <= 1.0, \
|
724 |
-
'The probability should be in range [0,1].'
|
725 |
-
self.level = level
|
726 |
-
self.prob = prob
|
727 |
-
self.factor = enhance_level_to_value(level)
|
728 |
-
|
729 |
-
def _adjust_color_img(self, results, factor=1.0):
|
730 |
-
"""Apply Color transformation to image."""
|
731 |
-
for key in results.get('img_fields', ['img']):
|
732 |
-
# NOTE defaultly the image should be BGR format
|
733 |
-
img = results[key]
|
734 |
-
results[key] = mmcv.adjust_color(img, factor).astype(img.dtype)
|
735 |
-
|
736 |
-
def __call__(self, results):
|
737 |
-
"""Call function for Color transformation.
|
738 |
-
|
739 |
-
Args:
|
740 |
-
results (dict): Result dict from loading pipeline.
|
741 |
-
|
742 |
-
Returns:
|
743 |
-
dict: Colored results.
|
744 |
-
"""
|
745 |
-
if np.random.rand() > self.prob:
|
746 |
-
return results
|
747 |
-
self._adjust_color_img(results, self.factor)
|
748 |
-
return results
|
749 |
-
|
750 |
-
def __repr__(self):
|
751 |
-
repr_str = self.__class__.__name__
|
752 |
-
repr_str += f'(level={self.level}, '
|
753 |
-
repr_str += f'prob={self.prob})'
|
754 |
-
return repr_str
|
755 |
-
|
756 |
-
|
757 |
-
@PIPELINES.register_module()
|
758 |
-
class EqualizeTransform(object):
|
759 |
-
"""Apply Equalize transformation to image. The bboxes, masks and
|
760 |
-
segmentations are not modified.
|
761 |
-
|
762 |
-
Args:
|
763 |
-
prob (float): The probability for performing Equalize transformation.
|
764 |
-
"""
|
765 |
-
|
766 |
-
def __init__(self, prob=0.5):
|
767 |
-
assert 0 <= prob <= 1.0, \
|
768 |
-
'The probability should be in range [0,1].'
|
769 |
-
self.prob = prob
|
770 |
-
|
771 |
-
def _imequalize(self, results):
|
772 |
-
"""Equalizes the histogram of one image."""
|
773 |
-
for key in results.get('img_fields', ['img']):
|
774 |
-
img = results[key]
|
775 |
-
results[key] = mmcv.imequalize(img).astype(img.dtype)
|
776 |
-
|
777 |
-
def __call__(self, results):
|
778 |
-
"""Call function for Equalize transformation.
|
779 |
-
|
780 |
-
Args:
|
781 |
-
results (dict): Results dict from loading pipeline.
|
782 |
-
|
783 |
-
Returns:
|
784 |
-
dict: Results after the transformation.
|
785 |
-
"""
|
786 |
-
if np.random.rand() > self.prob:
|
787 |
-
return results
|
788 |
-
self._imequalize(results)
|
789 |
-
return results
|
790 |
-
|
791 |
-
def __repr__(self):
|
792 |
-
repr_str = self.__class__.__name__
|
793 |
-
repr_str += f'(prob={self.prob})'
|
794 |
-
|
795 |
-
|
796 |
-
@PIPELINES.register_module()
|
797 |
-
class BrightnessTransform(object):
|
798 |
-
"""Apply Brightness transformation to image. The bboxes, masks and
|
799 |
-
segmentations are not modified.
|
800 |
-
|
801 |
-
Args:
|
802 |
-
level (int | float): Should be in range [0,_MAX_LEVEL].
|
803 |
-
prob (float): The probability for performing Brightness transformation.
|
804 |
-
"""
|
805 |
-
|
806 |
-
def __init__(self, level, prob=0.5):
|
807 |
-
assert isinstance(level, (int, float)), \
|
808 |
-
'The level must be type int or float.'
|
809 |
-
assert 0 <= level <= _MAX_LEVEL, \
|
810 |
-
'The level should be in range [0,_MAX_LEVEL].'
|
811 |
-
assert 0 <= prob <= 1.0, \
|
812 |
-
'The probability should be in range [0,1].'
|
813 |
-
self.level = level
|
814 |
-
self.prob = prob
|
815 |
-
self.factor = enhance_level_to_value(level)
|
816 |
-
|
817 |
-
def _adjust_brightness_img(self, results, factor=1.0):
|
818 |
-
"""Adjust the brightness of image."""
|
819 |
-
for key in results.get('img_fields', ['img']):
|
820 |
-
img = results[key]
|
821 |
-
results[key] = mmcv.adjust_brightness(img,
|
822 |
-
factor).astype(img.dtype)
|
823 |
-
|
824 |
-
def __call__(self, results):
|
825 |
-
"""Call function for Brightness transformation.
|
826 |
-
|
827 |
-
Args:
|
828 |
-
results (dict): Results dict from loading pipeline.
|
829 |
-
|
830 |
-
Returns:
|
831 |
-
dict: Results after the transformation.
|
832 |
-
"""
|
833 |
-
if np.random.rand() > self.prob:
|
834 |
-
return results
|
835 |
-
self._adjust_brightness_img(results, self.factor)
|
836 |
-
return results
|
837 |
-
|
838 |
-
def __repr__(self):
|
839 |
-
repr_str = self.__class__.__name__
|
840 |
-
repr_str += f'(level={self.level}, '
|
841 |
-
repr_str += f'prob={self.prob})'
|
842 |
-
return repr_str
|
843 |
-
|
844 |
-
|
845 |
-
@PIPELINES.register_module()
|
846 |
-
class ContrastTransform(object):
|
847 |
-
"""Apply Contrast transformation to image. The bboxes, masks and
|
848 |
-
segmentations are not modified.
|
849 |
-
|
850 |
-
Args:
|
851 |
-
level (int | float): Should be in range [0,_MAX_LEVEL].
|
852 |
-
prob (float): The probability for performing Contrast transformation.
|
853 |
-
"""
|
854 |
-
|
855 |
-
def __init__(self, level, prob=0.5):
|
856 |
-
assert isinstance(level, (int, float)), \
|
857 |
-
'The level must be type int or float.'
|
858 |
-
assert 0 <= level <= _MAX_LEVEL, \
|
859 |
-
'The level should be in range [0,_MAX_LEVEL].'
|
860 |
-
assert 0 <= prob <= 1.0, \
|
861 |
-
'The probability should be in range [0,1].'
|
862 |
-
self.level = level
|
863 |
-
self.prob = prob
|
864 |
-
self.factor = enhance_level_to_value(level)
|
865 |
-
|
866 |
-
def _adjust_contrast_img(self, results, factor=1.0):
|
867 |
-
"""Adjust the image contrast."""
|
868 |
-
for key in results.get('img_fields', ['img']):
|
869 |
-
img = results[key]
|
870 |
-
results[key] = mmcv.adjust_contrast(img, factor).astype(img.dtype)
|
871 |
-
|
872 |
-
def __call__(self, results):
|
873 |
-
"""Call function for Contrast transformation.
|
874 |
-
|
875 |
-
Args:
|
876 |
-
results (dict): Results dict from loading pipeline.
|
877 |
-
|
878 |
-
Returns:
|
879 |
-
dict: Results after the transformation.
|
880 |
-
"""
|
881 |
-
if np.random.rand() > self.prob:
|
882 |
-
return results
|
883 |
-
self._adjust_contrast_img(results, self.factor)
|
884 |
-
return results
|
885 |
-
|
886 |
-
def __repr__(self):
|
887 |
-
repr_str = self.__class__.__name__
|
888 |
-
repr_str += f'(level={self.level}, '
|
889 |
-
repr_str += f'prob={self.prob})'
|
890 |
-
return repr_str
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_d6_r101b-d16_769x769_80k_cityscapes.py
DELETED
@@ -1,4 +0,0 @@
|
|
1 |
-
_base_ = './fcn_d6_r50b-d16_769x769_80k_cityscapes.py'
|
2 |
-
model = dict(
|
3 |
-
pretrained='torchvision://resnet101',
|
4 |
-
backbone=dict(type='ResNet', depth=101))
|
|
|
|
|
|
|
|
|
|
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/presets.py
DELETED
@@ -1,72 +0,0 @@
|
|
1 |
-
import functools
|
2 |
-
from pathlib import Path
|
3 |
-
|
4 |
-
import yaml
|
5 |
-
|
6 |
-
|
7 |
-
def default_preset():
|
8 |
-
return {
|
9 |
-
'do_sample': True,
|
10 |
-
'temperature': 1,
|
11 |
-
'top_p': 1,
|
12 |
-
'top_k': 0,
|
13 |
-
'typical_p': 1,
|
14 |
-
'epsilon_cutoff': 0,
|
15 |
-
'eta_cutoff': 0,
|
16 |
-
'tfs': 1,
|
17 |
-
'top_a': 0,
|
18 |
-
'repetition_penalty': 1,
|
19 |
-
'repetition_penalty_range': 0,
|
20 |
-
'encoder_repetition_penalty': 1,
|
21 |
-
'no_repeat_ngram_size': 0,
|
22 |
-
'min_length': 0,
|
23 |
-
'guidance_scale': 1,
|
24 |
-
'mirostat_mode': 0,
|
25 |
-
'mirostat_tau': 5.0,
|
26 |
-
'mirostat_eta': 0.1,
|
27 |
-
'penalty_alpha': 0,
|
28 |
-
'num_beams': 1,
|
29 |
-
'length_penalty': 1,
|
30 |
-
'early_stopping': False,
|
31 |
-
'custom_token_bans': '',
|
32 |
-
}
|
33 |
-
|
34 |
-
|
35 |
-
def presets_params():
|
36 |
-
return [k for k in default_preset()]
|
37 |
-
|
38 |
-
|
39 |
-
def load_preset(name):
|
40 |
-
generate_params = default_preset()
|
41 |
-
if name not in ['None', None, '']:
|
42 |
-
with open(Path(f'presets/{name}.yaml'), 'r') as infile:
|
43 |
-
preset = yaml.safe_load(infile)
|
44 |
-
|
45 |
-
for k in preset:
|
46 |
-
generate_params[k] = preset[k]
|
47 |
-
|
48 |
-
generate_params['temperature'] = min(1.99, generate_params['temperature'])
|
49 |
-
return generate_params
|
50 |
-
|
51 |
-
|
52 |
-
@functools.cache
|
53 |
-
def load_preset_memoized(name):
|
54 |
-
return load_preset(name)
|
55 |
-
|
56 |
-
|
57 |
-
def load_preset_for_ui(name, state):
|
58 |
-
generate_params = load_preset(name)
|
59 |
-
state.update(generate_params)
|
60 |
-
return state, *[generate_params[k] for k in presets_params()]
|
61 |
-
|
62 |
-
|
63 |
-
def generate_preset_yaml(state):
|
64 |
-
defaults = default_preset()
|
65 |
-
data = {k: state[k] for k in presets_params()}
|
66 |
-
|
67 |
-
# Remove entries that are identical to the defaults
|
68 |
-
for k in list(data.keys()):
|
69 |
-
if data[k] == defaults[k]:
|
70 |
-
del data[k]
|
71 |
-
|
72 |
-
return yaml.dump(data, sort_keys=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnnonSubmission/xai-cl/ssl_models/barlow_twins.py
DELETED
@@ -1,77 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
import torchvision
|
4 |
-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
5 |
-
|
6 |
-
"""from https://github.com/facebookresearch/barlowtwins"""
|
7 |
-
|
8 |
-
def off_diagonal(x):
|
9 |
-
# return a flattened view of the off-diagonal elements of a square matrix
|
10 |
-
n, m = x.shape
|
11 |
-
assert n == m
|
12 |
-
return x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten()
|
13 |
-
|
14 |
-
class BarlowTwins(nn.Module):
|
15 |
-
def __init__(self):
|
16 |
-
super().__init__()
|
17 |
-
|
18 |
-
self.backbone = torchvision.models.resnet50(zero_init_residual=True)
|
19 |
-
self.backbone.fc = nn.Identity()
|
20 |
-
|
21 |
-
# projector
|
22 |
-
sizes = [2048] + list(map(int, '8192-8192-8192'.split('-')))
|
23 |
-
layers = []
|
24 |
-
for i in range(len(sizes) - 2):
|
25 |
-
layers.append(nn.Linear(sizes[i], sizes[i + 1], bias=False))
|
26 |
-
layers.append(nn.BatchNorm1d(sizes[i + 1]))
|
27 |
-
layers.append(nn.ReLU(inplace=True))
|
28 |
-
|
29 |
-
layers.append(nn.Linear(sizes[-2], sizes[-1], bias=False))
|
30 |
-
self.projector = nn.Sequential(*layers)
|
31 |
-
|
32 |
-
# normalization layer for the representations z1 and z2
|
33 |
-
self.bn = nn.BatchNorm1d(sizes[-1], affine=False)
|
34 |
-
|
35 |
-
def forward(self, y1, y2):
|
36 |
-
z1 = self.projector(self.backbone(y1))
|
37 |
-
z2 = self.projector(self.backbone(y2))
|
38 |
-
|
39 |
-
# empirical cross-correlation matrix
|
40 |
-
c = self.bn(z1).T @ self.bn(z2)
|
41 |
-
|
42 |
-
on_diag = torch.diagonal(c).add_(-1).pow_(2).sum()
|
43 |
-
off_diag = off_diagonal(c).pow_(2).sum()
|
44 |
-
loss = on_diag + 0.0051 * off_diag
|
45 |
-
return loss
|
46 |
-
|
47 |
-
class ResNet(nn.Module):
|
48 |
-
def __init__(self, backbone):
|
49 |
-
super().__init__()
|
50 |
-
|
51 |
-
modules = list(backbone.children())[:-2]
|
52 |
-
self.net = nn.Sequential(*modules)
|
53 |
-
|
54 |
-
def forward(self, x):
|
55 |
-
return self.net(x).mean(dim=[2, 3])
|
56 |
-
|
57 |
-
class RestructuredBarlowTwins(nn.Module):
|
58 |
-
def __init__(self, model):
|
59 |
-
super().__init__()
|
60 |
-
|
61 |
-
self.encoder = ResNet(model.backbone)
|
62 |
-
self.contrastive_head = model.projector
|
63 |
-
|
64 |
-
def forward(self, x):
|
65 |
-
x = self.encoder(x)
|
66 |
-
x = self.contrastive_head(x)
|
67 |
-
return x
|
68 |
-
|
69 |
-
|
70 |
-
def get_barlow_twins_model(ckpt_path = 'barlow_twins.pth'):
|
71 |
-
model = BarlowTwins()
|
72 |
-
state_dict = torch.load('pretrained_models/barlow_models/' + ckpt_path, map_location='cpu')
|
73 |
-
state_dict = state_dict['model']
|
74 |
-
state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()}
|
75 |
-
model.load_state_dict(state_dict)
|
76 |
-
restructured_model = RestructuredBarlowTwins(model)
|
77 |
-
return restructured_model.to(device)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/parallel/distributed.py
DELETED
@@ -1,112 +0,0 @@
|
|
1 |
-
# Copyright (c) OpenMMLab. All rights reserved.
|
2 |
-
import torch
|
3 |
-
from torch.nn.parallel.distributed import (DistributedDataParallel,
|
4 |
-
_find_tensors)
|
5 |
-
|
6 |
-
from annotator.uniformer.mmcv import print_log
|
7 |
-
from annotator.uniformer.mmcv.utils import TORCH_VERSION, digit_version
|
8 |
-
from .scatter_gather import scatter_kwargs
|
9 |
-
|
10 |
-
|
11 |
-
class MMDistributedDataParallel(DistributedDataParallel):
|
12 |
-
"""The DDP module that supports DataContainer.
|
13 |
-
|
14 |
-
MMDDP has two main differences with PyTorch DDP:
|
15 |
-
|
16 |
-
- It supports a custom type :class:`DataContainer` which allows more
|
17 |
-
flexible control of input data.
|
18 |
-
- It implement two APIs ``train_step()`` and ``val_step()``.
|
19 |
-
"""
|
20 |
-
|
21 |
-
def to_kwargs(self, inputs, kwargs, device_id):
|
22 |
-
# Use `self.to_kwargs` instead of `self.scatter` in pytorch1.8
|
23 |
-
# to move all tensors to device_id
|
24 |
-
return scatter_kwargs(inputs, kwargs, [device_id], dim=self.dim)
|
25 |
-
|
26 |
-
def scatter(self, inputs, kwargs, device_ids):
|
27 |
-
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
|
28 |
-
|
29 |
-
def train_step(self, *inputs, **kwargs):
|
30 |
-
"""train_step() API for module wrapped by DistributedDataParallel.
|
31 |
-
|
32 |
-
This method is basically the same as
|
33 |
-
``DistributedDataParallel.forward()``, while replacing
|
34 |
-
``self.module.forward()`` with ``self.module.train_step()``.
|
35 |
-
It is compatible with PyTorch 1.1 - 1.5.
|
36 |
-
"""
|
37 |
-
|
38 |
-
# In PyTorch >= 1.7, ``reducer._rebuild_buckets()`` is moved from the
|
39 |
-
# end of backward to the beginning of forward.
|
40 |
-
if ('parrots' not in TORCH_VERSION
|
41 |
-
and digit_version(TORCH_VERSION) >= digit_version('1.7')
|
42 |
-
and self.reducer._rebuild_buckets()):
|
43 |
-
print_log(
|
44 |
-
'Reducer buckets have been rebuilt in this iteration.',
|
45 |
-
logger='mmcv')
|
46 |
-
|
47 |
-
if getattr(self, 'require_forward_param_sync', True):
|
48 |
-
self._sync_params()
|
49 |
-
if self.device_ids:
|
50 |
-
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
|
51 |
-
if len(self.device_ids) == 1:
|
52 |
-
output = self.module.train_step(*inputs[0], **kwargs[0])
|
53 |
-
else:
|
54 |
-
outputs = self.parallel_apply(
|
55 |
-
self._module_copies[:len(inputs)], inputs, kwargs)
|
56 |
-
output = self.gather(outputs, self.output_device)
|
57 |
-
else:
|
58 |
-
output = self.module.train_step(*inputs, **kwargs)
|
59 |
-
|
60 |
-
if torch.is_grad_enabled() and getattr(
|
61 |
-
self, 'require_backward_grad_sync', True):
|
62 |
-
if self.find_unused_parameters:
|
63 |
-
self.reducer.prepare_for_backward(list(_find_tensors(output)))
|
64 |
-
else:
|
65 |
-
self.reducer.prepare_for_backward([])
|
66 |
-
else:
|
67 |
-
if ('parrots' not in TORCH_VERSION
|
68 |
-
and digit_version(TORCH_VERSION) > digit_version('1.2')):
|
69 |
-
self.require_forward_param_sync = False
|
70 |
-
return output
|
71 |
-
|
72 |
-
def val_step(self, *inputs, **kwargs):
|
73 |
-
"""val_step() API for module wrapped by DistributedDataParallel.
|
74 |
-
|
75 |
-
This method is basically the same as
|
76 |
-
``DistributedDataParallel.forward()``, while replacing
|
77 |
-
``self.module.forward()`` with ``self.module.val_step()``.
|
78 |
-
It is compatible with PyTorch 1.1 - 1.5.
|
79 |
-
"""
|
80 |
-
# In PyTorch >= 1.7, ``reducer._rebuild_buckets()`` is moved from the
|
81 |
-
# end of backward to the beginning of forward.
|
82 |
-
if ('parrots' not in TORCH_VERSION
|
83 |
-
and digit_version(TORCH_VERSION) >= digit_version('1.7')
|
84 |
-
and self.reducer._rebuild_buckets()):
|
85 |
-
print_log(
|
86 |
-
'Reducer buckets have been rebuilt in this iteration.',
|
87 |
-
logger='mmcv')
|
88 |
-
|
89 |
-
if getattr(self, 'require_forward_param_sync', True):
|
90 |
-
self._sync_params()
|
91 |
-
if self.device_ids:
|
92 |
-
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
|
93 |
-
if len(self.device_ids) == 1:
|
94 |
-
output = self.module.val_step(*inputs[0], **kwargs[0])
|
95 |
-
else:
|
96 |
-
outputs = self.parallel_apply(
|
97 |
-
self._module_copies[:len(inputs)], inputs, kwargs)
|
98 |
-
output = self.gather(outputs, self.output_device)
|
99 |
-
else:
|
100 |
-
output = self.module.val_step(*inputs, **kwargs)
|
101 |
-
|
102 |
-
if torch.is_grad_enabled() and getattr(
|
103 |
-
self, 'require_backward_grad_sync', True):
|
104 |
-
if self.find_unused_parameters:
|
105 |
-
self.reducer.prepare_for_backward(list(_find_tensors(output)))
|
106 |
-
else:
|
107 |
-
self.reducer.prepare_for_backward([])
|
108 |
-
else:
|
109 |
-
if ('parrots' not in TORCH_VERSION
|
110 |
-
and digit_version(TORCH_VERSION) > digit_version('1.2')):
|
111 |
-
self.require_forward_param_sync = False
|
112 |
-
return output
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/operations/install/editable_legacy.py
DELETED
@@ -1,46 +0,0 @@
|
|
1 |
-
"""Legacy editable installation process, i.e. `setup.py develop`.
|
2 |
-
"""
|
3 |
-
import logging
|
4 |
-
from typing import Optional, Sequence
|
5 |
-
|
6 |
-
from pip._internal.build_env import BuildEnvironment
|
7 |
-
from pip._internal.utils.logging import indent_log
|
8 |
-
from pip._internal.utils.setuptools_build import make_setuptools_develop_args
|
9 |
-
from pip._internal.utils.subprocess import call_subprocess
|
10 |
-
|
11 |
-
logger = logging.getLogger(__name__)
|
12 |
-
|
13 |
-
|
14 |
-
def install_editable(
|
15 |
-
*,
|
16 |
-
global_options: Sequence[str],
|
17 |
-
prefix: Optional[str],
|
18 |
-
home: Optional[str],
|
19 |
-
use_user_site: bool,
|
20 |
-
name: str,
|
21 |
-
setup_py_path: str,
|
22 |
-
isolated: bool,
|
23 |
-
build_env: BuildEnvironment,
|
24 |
-
unpacked_source_directory: str,
|
25 |
-
) -> None:
|
26 |
-
"""Install a package in editable mode. Most arguments are pass-through
|
27 |
-
to setuptools.
|
28 |
-
"""
|
29 |
-
logger.info("Running setup.py develop for %s", name)
|
30 |
-
|
31 |
-
args = make_setuptools_develop_args(
|
32 |
-
setup_py_path,
|
33 |
-
global_options=global_options,
|
34 |
-
no_user_config=isolated,
|
35 |
-
prefix=prefix,
|
36 |
-
home=home,
|
37 |
-
use_user_site=use_user_site,
|
38 |
-
)
|
39 |
-
|
40 |
-
with indent_log():
|
41 |
-
with build_env:
|
42 |
-
call_subprocess(
|
43 |
-
args,
|
44 |
-
command_desc="python setup.py develop",
|
45 |
-
cwd=unpacked_source_directory,
|
46 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/metadata/__init__.py
DELETED
File without changes
|
spaces/AvaterClasher/Food_Classifier_Moni/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Food Classifier Moni
|
3 |
-
emoji: 🍣
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: black
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.1.4
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
---
|
12 |
-
|
13 |
-
An application to classify images on food into pizza, steak or sushi classes.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tools/train_net.py
DELETED
@@ -1,170 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python
|
2 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
3 |
-
"""
|
4 |
-
A main training script.
|
5 |
-
|
6 |
-
This scripts reads a given config file and runs the training or evaluation.
|
7 |
-
It is an entry point that is made to train standard models in detectron2.
|
8 |
-
|
9 |
-
In order to let one script support training of many models,
|
10 |
-
this script contains logic that are specific to these built-in models and therefore
|
11 |
-
may not be suitable for your own project.
|
12 |
-
For example, your research project perhaps only needs a single "evaluator".
|
13 |
-
|
14 |
-
Therefore, we recommend you to use detectron2 as an library and take
|
15 |
-
this file as an example of how to use the library.
|
16 |
-
You may want to write your own script with your datasets and other customizations.
|
17 |
-
"""
|
18 |
-
|
19 |
-
import logging
|
20 |
-
import os
|
21 |
-
from collections import OrderedDict
|
22 |
-
import torch
|
23 |
-
|
24 |
-
import detectron2.utils.comm as comm
|
25 |
-
from detectron2.checkpoint import DetectionCheckpointer
|
26 |
-
from detectron2.config import get_cfg
|
27 |
-
from detectron2.data import MetadataCatalog
|
28 |
-
from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, hooks, launch
|
29 |
-
from detectron2.evaluation import (
|
30 |
-
CityscapesInstanceEvaluator,
|
31 |
-
CityscapesSemSegEvaluator,
|
32 |
-
COCOEvaluator,
|
33 |
-
COCOPanopticEvaluator,
|
34 |
-
DatasetEvaluators,
|
35 |
-
LVISEvaluator,
|
36 |
-
PascalVOCDetectionEvaluator,
|
37 |
-
SemSegEvaluator,
|
38 |
-
verify_results,
|
39 |
-
)
|
40 |
-
from detectron2.modeling import GeneralizedRCNNWithTTA
|
41 |
-
|
42 |
-
|
43 |
-
def build_evaluator(cfg, dataset_name, output_folder=None):
|
44 |
-
"""
|
45 |
-
Create evaluator(s) for a given dataset.
|
46 |
-
This uses the special metadata "evaluator_type" associated with each builtin dataset.
|
47 |
-
For your own dataset, you can simply create an evaluator manually in your
|
48 |
-
script and do not have to worry about the hacky if-else logic here.
|
49 |
-
"""
|
50 |
-
if output_folder is None:
|
51 |
-
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
|
52 |
-
evaluator_list = []
|
53 |
-
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
|
54 |
-
if evaluator_type in ["sem_seg", "coco_panoptic_seg"]:
|
55 |
-
evaluator_list.append(
|
56 |
-
SemSegEvaluator(
|
57 |
-
dataset_name,
|
58 |
-
distributed=True,
|
59 |
-
output_dir=output_folder,
|
60 |
-
)
|
61 |
-
)
|
62 |
-
if evaluator_type in ["coco", "coco_panoptic_seg"]:
|
63 |
-
evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder))
|
64 |
-
if evaluator_type == "coco_panoptic_seg":
|
65 |
-
evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))
|
66 |
-
if evaluator_type == "cityscapes_instance":
|
67 |
-
assert (
|
68 |
-
torch.cuda.device_count() > comm.get_rank()
|
69 |
-
), "CityscapesEvaluator currently do not work with multiple machines."
|
70 |
-
return CityscapesInstanceEvaluator(dataset_name)
|
71 |
-
if evaluator_type == "cityscapes_sem_seg":
|
72 |
-
assert (
|
73 |
-
torch.cuda.device_count() > comm.get_rank()
|
74 |
-
), "CityscapesEvaluator currently do not work with multiple machines."
|
75 |
-
return CityscapesSemSegEvaluator(dataset_name)
|
76 |
-
elif evaluator_type == "pascal_voc":
|
77 |
-
return PascalVOCDetectionEvaluator(dataset_name)
|
78 |
-
elif evaluator_type == "lvis":
|
79 |
-
return LVISEvaluator(dataset_name, output_dir=output_folder)
|
80 |
-
if len(evaluator_list) == 0:
|
81 |
-
raise NotImplementedError(
|
82 |
-
"no Evaluator for the dataset {} with the type {}".format(dataset_name, evaluator_type)
|
83 |
-
)
|
84 |
-
elif len(evaluator_list) == 1:
|
85 |
-
return evaluator_list[0]
|
86 |
-
return DatasetEvaluators(evaluator_list)
|
87 |
-
|
88 |
-
|
89 |
-
class Trainer(DefaultTrainer):
|
90 |
-
"""
|
91 |
-
We use the "DefaultTrainer" which contains pre-defined default logic for
|
92 |
-
standard training workflow. They may not work for you, especially if you
|
93 |
-
are working on a new research project. In that case you can write your
|
94 |
-
own training loop. You can use "tools/plain_train_net.py" as an example.
|
95 |
-
"""
|
96 |
-
|
97 |
-
@classmethod
|
98 |
-
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
|
99 |
-
return build_evaluator(cfg, dataset_name, output_folder)
|
100 |
-
|
101 |
-
@classmethod
|
102 |
-
def test_with_TTA(cls, cfg, model):
|
103 |
-
logger = logging.getLogger("detectron2.trainer")
|
104 |
-
# In the end of training, run an evaluation with TTA
|
105 |
-
# Only support some R-CNN models.
|
106 |
-
logger.info("Running inference with test-time augmentation ...")
|
107 |
-
model = GeneralizedRCNNWithTTA(cfg, model)
|
108 |
-
evaluators = [
|
109 |
-
cls.build_evaluator(
|
110 |
-
cfg, name, output_folder=os.path.join(cfg.OUTPUT_DIR, "inference_TTA")
|
111 |
-
)
|
112 |
-
for name in cfg.DATASETS.TEST
|
113 |
-
]
|
114 |
-
res = cls.test(cfg, model, evaluators)
|
115 |
-
res = OrderedDict({k + "_TTA": v for k, v in res.items()})
|
116 |
-
return res
|
117 |
-
|
118 |
-
|
119 |
-
def setup(args):
|
120 |
-
"""
|
121 |
-
Create configs and perform basic setups.
|
122 |
-
"""
|
123 |
-
cfg = get_cfg()
|
124 |
-
cfg.merge_from_file(args.config_file)
|
125 |
-
cfg.merge_from_list(args.opts)
|
126 |
-
cfg.freeze()
|
127 |
-
default_setup(cfg, args)
|
128 |
-
return cfg
|
129 |
-
|
130 |
-
|
131 |
-
def main(args):
|
132 |
-
cfg = setup(args)
|
133 |
-
|
134 |
-
if args.eval_only:
|
135 |
-
model = Trainer.build_model(cfg)
|
136 |
-
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
|
137 |
-
cfg.MODEL.WEIGHTS, resume=args.resume
|
138 |
-
)
|
139 |
-
res = Trainer.test(cfg, model)
|
140 |
-
if cfg.TEST.AUG.ENABLED:
|
141 |
-
res.update(Trainer.test_with_TTA(cfg, model))
|
142 |
-
if comm.is_main_process():
|
143 |
-
verify_results(cfg, res)
|
144 |
-
return res
|
145 |
-
|
146 |
-
"""
|
147 |
-
If you'd like to do anything fancier than the standard training logic,
|
148 |
-
consider writing your own training loop (see plain_train_net.py) or
|
149 |
-
subclassing the trainer.
|
150 |
-
"""
|
151 |
-
trainer = Trainer(cfg)
|
152 |
-
trainer.resume_or_load(resume=args.resume)
|
153 |
-
if cfg.TEST.AUG.ENABLED:
|
154 |
-
trainer.register_hooks(
|
155 |
-
[hooks.EvalHook(0, lambda: trainer.test_with_TTA(cfg, trainer.model))]
|
156 |
-
)
|
157 |
-
return trainer.train()
|
158 |
-
|
159 |
-
|
160 |
-
if __name__ == "__main__":
|
161 |
-
args = default_argument_parser().parse_args()
|
162 |
-
print("Command Line Args:", args)
|
163 |
-
launch(
|
164 |
-
main,
|
165 |
-
args.num_gpus,
|
166 |
-
num_machines=args.num_machines,
|
167 |
-
machine_rank=args.machine_rank,
|
168 |
-
dist_url=args.dist_url,
|
169 |
-
args=(args,),
|
170 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Azai8915/ChubVenusTest/README.md
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: ChubVenusTest
|
3 |
-
emoji: 😻
|
4 |
-
colorFrom: yellow
|
5 |
-
colorTo: red
|
6 |
-
sdk: docker
|
7 |
-
pinned: false
|
8 |
-
---
|
9 |
-
|
10 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BMukhtar/BookRecognitionKz/models/best_norm_ED.py
DELETED
@@ -1,538 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import torch
|
3 |
-
import torch.nn as nn
|
4 |
-
import torch.nn.functional as F
|
5 |
-
|
6 |
-
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
7 |
-
|
8 |
-
|
9 |
-
class TPS_SpatialTransformerNetwork(nn.Module):
|
10 |
-
""" Rectification Network of RARE, namely TPS based STN """
|
11 |
-
|
12 |
-
def __init__(self, F, I_size, I_r_size, I_channel_num=1):
|
13 |
-
""" Based on RARE TPS
|
14 |
-
input:
|
15 |
-
batch_I: Batch Input Image [batch_size x I_channel_num x I_height x I_width]
|
16 |
-
I_size : (height, width) of the input image I
|
17 |
-
I_r_size : (height, width) of the rectified image I_r
|
18 |
-
I_channel_num : the number of channels of the input image I
|
19 |
-
output:
|
20 |
-
batch_I_r: rectified image [batch_size x I_channel_num x I_r_height x I_r_width]
|
21 |
-
"""
|
22 |
-
super(TPS_SpatialTransformerNetwork, self).__init__()
|
23 |
-
self.F = F
|
24 |
-
self.I_size = I_size
|
25 |
-
self.I_r_size = I_r_size # = (I_r_height, I_r_width)
|
26 |
-
self.I_channel_num = I_channel_num
|
27 |
-
self.LocalizationNetwork = LocalizationNetwork(self.F, self.I_channel_num)
|
28 |
-
self.GridGenerator = GridGenerator(self.F, self.I_r_size)
|
29 |
-
|
30 |
-
def forward(self, batch_I):
|
31 |
-
batch_C_prime = self.LocalizationNetwork(batch_I) # batch_size x K x 2
|
32 |
-
build_P_prime = self.GridGenerator.build_P_prime(batch_C_prime) # batch_size x n (= I_r_width x I_r_height) x 2
|
33 |
-
build_P_prime_reshape = build_P_prime.reshape([build_P_prime.size(0), self.I_r_size[0], self.I_r_size[1], 2])
|
34 |
-
batch_I_r = F.grid_sample(batch_I, build_P_prime_reshape, padding_mode='border')
|
35 |
-
|
36 |
-
return batch_I_r
|
37 |
-
|
38 |
-
|
39 |
-
class LocalizationNetwork(nn.Module):
|
40 |
-
""" Localization Network of RARE, which predicts C' (K x 2) from I (I_width x I_height) """
|
41 |
-
|
42 |
-
def __init__(self, F, I_channel_num):
|
43 |
-
super(LocalizationNetwork, self).__init__()
|
44 |
-
self.F = F
|
45 |
-
self.I_channel_num = I_channel_num
|
46 |
-
self.conv = nn.Sequential(
|
47 |
-
nn.Conv2d(in_channels=self.I_channel_num, out_channels=64, kernel_size=3, stride=1, padding=1,
|
48 |
-
bias=False), nn.BatchNorm2d(64), nn.ReLU(True),
|
49 |
-
nn.MaxPool2d(2, 2), # batch_size x 64 x I_height/2 x I_width/2
|
50 |
-
nn.Conv2d(64, 128, 3, 1, 1, bias=False), nn.BatchNorm2d(128), nn.ReLU(True),
|
51 |
-
nn.MaxPool2d(2, 2), # batch_size x 128 x I_height/4 x I_width/4
|
52 |
-
nn.Conv2d(128, 256, 3, 1, 1, bias=False), nn.BatchNorm2d(256), nn.ReLU(True),
|
53 |
-
nn.MaxPool2d(2, 2), # batch_size x 256 x I_height/8 x I_width/8
|
54 |
-
nn.Conv2d(256, 512, 3, 1, 1, bias=False), nn.BatchNorm2d(512), nn.ReLU(True),
|
55 |
-
nn.AdaptiveAvgPool2d(1) # batch_size x 512
|
56 |
-
)
|
57 |
-
|
58 |
-
self.localization_fc1 = nn.Sequential(nn.Linear(512, 256), nn.ReLU(True))
|
59 |
-
self.localization_fc2 = nn.Linear(256, self.F * 2)
|
60 |
-
|
61 |
-
# Init fc2 in LocalizationNetwork
|
62 |
-
self.localization_fc2.weight.data.fill_(0)
|
63 |
-
""" see RARE paper Fig. 6 (a) """
|
64 |
-
ctrl_pts_x = np.linspace(-1.0, 1.0, int(F / 2))
|
65 |
-
ctrl_pts_y_top = np.linspace(0.0, -1.0, num=int(F / 2))
|
66 |
-
ctrl_pts_y_bottom = np.linspace(1.0, 0.0, num=int(F / 2))
|
67 |
-
ctrl_pts_top = np.stack([ctrl_pts_x, ctrl_pts_y_top], axis=1)
|
68 |
-
ctrl_pts_bottom = np.stack([ctrl_pts_x, ctrl_pts_y_bottom], axis=1)
|
69 |
-
initial_bias = np.concatenate([ctrl_pts_top, ctrl_pts_bottom], axis=0)
|
70 |
-
self.localization_fc2.bias.data = torch.from_numpy(initial_bias).float().view(-1)
|
71 |
-
|
72 |
-
def forward(self, batch_I):
|
73 |
-
"""
|
74 |
-
input: batch_I : Batch Input Image [batch_size x I_channel_num x I_height x I_width]
|
75 |
-
output: batch_C_prime : Predicted coordinates of fiducial points for input batch [batch_size x F x 2]
|
76 |
-
"""
|
77 |
-
batch_size = batch_I.size(0)
|
78 |
-
features = self.conv(batch_I).view(batch_size, -1)
|
79 |
-
batch_C_prime = self.localization_fc2(self.localization_fc1(features)).view(batch_size, self.F, 2)
|
80 |
-
return batch_C_prime
|
81 |
-
|
82 |
-
|
83 |
-
class GridGenerator(nn.Module):
|
84 |
-
""" Grid Generator of RARE, which produces P_prime by multiplying T with P """
|
85 |
-
|
86 |
-
def __init__(self, F, I_r_size):
|
87 |
-
""" Generate P_hat and inv_delta_C for later """
|
88 |
-
super(GridGenerator, self).__init__()
|
89 |
-
self.eps = 1e-6
|
90 |
-
self.I_r_height, self.I_r_width = I_r_size
|
91 |
-
self.F = F
|
92 |
-
self.C = self._build_C(self.F) # F x 2
|
93 |
-
self.P = self._build_P(self.I_r_width, self.I_r_height)
|
94 |
-
## for multi-gpu, you need register buffer
|
95 |
-
self.register_buffer("inv_delta_C", torch.tensor(self._build_inv_delta_C(self.F, self.C)).float()) # F+3 x F+3
|
96 |
-
self.register_buffer("P_hat", torch.tensor(self._build_P_hat(self.F, self.C, self.P)).float()) # n x F+3
|
97 |
-
## for fine-tuning with different image width, you may use below instead of self.register_buffer
|
98 |
-
# self.inv_delta_C = torch.tensor(self._build_inv_delta_C(self.F, self.C)).float().cuda() # F+3 x F+3
|
99 |
-
# self.P_hat = torch.tensor(self._build_P_hat(self.F, self.C, self.P)).float().cuda() # n x F+3
|
100 |
-
|
101 |
-
def _build_C(self, F):
|
102 |
-
""" Return coordinates of fiducial points in I_r; C """
|
103 |
-
ctrl_pts_x = np.linspace(-1.0, 1.0, int(F / 2))
|
104 |
-
ctrl_pts_y_top = -1 * np.ones(int(F / 2))
|
105 |
-
ctrl_pts_y_bottom = np.ones(int(F / 2))
|
106 |
-
ctrl_pts_top = np.stack([ctrl_pts_x, ctrl_pts_y_top], axis=1)
|
107 |
-
ctrl_pts_bottom = np.stack([ctrl_pts_x, ctrl_pts_y_bottom], axis=1)
|
108 |
-
C = np.concatenate([ctrl_pts_top, ctrl_pts_bottom], axis=0)
|
109 |
-
return C # F x 2
|
110 |
-
|
111 |
-
def _build_inv_delta_C(self, F, C):
|
112 |
-
""" Return inv_delta_C which is needed to calculate T """
|
113 |
-
hat_C = np.zeros((F, F), dtype=float) # F x F
|
114 |
-
for i in range(0, F):
|
115 |
-
for j in range(i, F):
|
116 |
-
r = np.linalg.norm(C[i] - C[j])
|
117 |
-
hat_C[i, j] = r
|
118 |
-
hat_C[j, i] = r
|
119 |
-
np.fill_diagonal(hat_C, 1)
|
120 |
-
hat_C = (hat_C ** 2) * np.log(hat_C)
|
121 |
-
# print(C.shape, hat_C.shape)
|
122 |
-
delta_C = np.concatenate( # F+3 x F+3
|
123 |
-
[
|
124 |
-
np.concatenate([np.ones((F, 1)), C, hat_C], axis=1), # F x F+3
|
125 |
-
np.concatenate([np.zeros((2, 3)), np.transpose(C)], axis=1), # 2 x F+3
|
126 |
-
np.concatenate([np.zeros((1, 3)), np.ones((1, F))], axis=1) # 1 x F+3
|
127 |
-
],
|
128 |
-
axis=0
|
129 |
-
)
|
130 |
-
inv_delta_C = np.linalg.inv(delta_C)
|
131 |
-
return inv_delta_C # F+3 x F+3
|
132 |
-
|
133 |
-
def _build_P(self, I_r_width, I_r_height):
|
134 |
-
I_r_grid_x = (np.arange(-I_r_width, I_r_width, 2) + 1.0) / I_r_width # self.I_r_width
|
135 |
-
I_r_grid_y = (np.arange(-I_r_height, I_r_height, 2) + 1.0) / I_r_height # self.I_r_height
|
136 |
-
P = np.stack( # self.I_r_width x self.I_r_height x 2
|
137 |
-
np.meshgrid(I_r_grid_x, I_r_grid_y),
|
138 |
-
axis=2
|
139 |
-
)
|
140 |
-
return P.reshape([-1, 2]) # n (= self.I_r_width x self.I_r_height) x 2
|
141 |
-
|
142 |
-
def _build_P_hat(self, F, C, P):
|
143 |
-
n = P.shape[0] # n (= self.I_r_width x self.I_r_height)
|
144 |
-
P_tile = np.tile(np.expand_dims(P, axis=1), (1, F, 1)) # n x 2 -> n x 1 x 2 -> n x F x 2
|
145 |
-
C_tile = np.expand_dims(C, axis=0) # 1 x F x 2
|
146 |
-
P_diff = P_tile - C_tile # n x F x 2
|
147 |
-
rbf_norm = np.linalg.norm(P_diff, ord=2, axis=2, keepdims=False) # n x F
|
148 |
-
rbf = np.multiply(np.square(rbf_norm), np.log(rbf_norm + self.eps)) # n x F
|
149 |
-
P_hat = np.concatenate([np.ones((n, 1)), P, rbf], axis=1)
|
150 |
-
return P_hat # n x F+3
|
151 |
-
|
152 |
-
def build_P_prime(self, batch_C_prime):
|
153 |
-
""" Generate Grid from batch_C_prime [batch_size x F x 2] """
|
154 |
-
batch_size = batch_C_prime.size(0)
|
155 |
-
batch_inv_delta_C = self.inv_delta_C.repeat(batch_size, 1, 1)
|
156 |
-
batch_P_hat = self.P_hat.repeat(batch_size, 1, 1)
|
157 |
-
batch_C_prime_with_zeros = torch.cat((batch_C_prime, torch.zeros(
|
158 |
-
batch_size, 3, 2).float().to(device)), dim=1) # batch_size x F+3 x 2
|
159 |
-
batch_T = torch.bmm(batch_inv_delta_C, batch_C_prime_with_zeros) # batch_size x F+3 x 2
|
160 |
-
batch_P_prime = torch.bmm(batch_P_hat, batch_T) # batch_size x n x 2
|
161 |
-
return batch_P_prime # batch_size x n x 2
|
162 |
-
|
163 |
-
|
164 |
-
class VGG_FeatureExtractor(nn.Module):
|
165 |
-
""" FeatureExtractor of CRNN (https://arxiv.org/pdf/1507.05717.pdf) """
|
166 |
-
|
167 |
-
def __init__(self, input_channel, output_channel=512):
|
168 |
-
super(VGG_FeatureExtractor, self).__init__()
|
169 |
-
self.output_channel = [int(output_channel / 8), int(output_channel / 4),
|
170 |
-
int(output_channel / 2), output_channel] # [64, 128, 256, 512]
|
171 |
-
self.ConvNet = nn.Sequential(
|
172 |
-
nn.Conv2d(input_channel, self.output_channel[0], 3, 1, 1), nn.ReLU(True),
|
173 |
-
nn.MaxPool2d(2, 2), # 64x16x50
|
174 |
-
nn.Conv2d(self.output_channel[0], self.output_channel[1], 3, 1, 1), nn.ReLU(True),
|
175 |
-
nn.MaxPool2d(2, 2), # 128x8x25
|
176 |
-
nn.Conv2d(self.output_channel[1], self.output_channel[2], 3, 1, 1), nn.ReLU(True), # 256x8x25
|
177 |
-
nn.Conv2d(self.output_channel[2], self.output_channel[2], 3, 1, 1), nn.ReLU(True),
|
178 |
-
nn.MaxPool2d((2, 1), (2, 1)), # 256x4x25
|
179 |
-
nn.Conv2d(self.output_channel[2], self.output_channel[3], 3, 1, 1, bias=False),
|
180 |
-
nn.BatchNorm2d(self.output_channel[3]), nn.ReLU(True), # 512x4x25
|
181 |
-
nn.Conv2d(self.output_channel[3], self.output_channel[3], 3, 1, 1, bias=False),
|
182 |
-
nn.BatchNorm2d(self.output_channel[3]), nn.ReLU(True),
|
183 |
-
nn.MaxPool2d((2, 1), (2, 1)), # 512x2x25
|
184 |
-
nn.Conv2d(self.output_channel[3], self.output_channel[3], 2, 1, 0), nn.ReLU(True)) # 512x1x24
|
185 |
-
|
186 |
-
def forward(self, input):
|
187 |
-
return self.ConvNet(input)
|
188 |
-
|
189 |
-
|
190 |
-
class RCNN_FeatureExtractor(nn.Module):
|
191 |
-
""" FeatureExtractor of GRCNN (https://papers.nips.cc/paper/6637-gated-recurrent-convolution-neural-network-for-ocr.pdf) """
|
192 |
-
|
193 |
-
def __init__(self, input_channel, output_channel=512):
|
194 |
-
super(RCNN_FeatureExtractor, self).__init__()
|
195 |
-
self.output_channel = [int(output_channel / 8), int(output_channel / 4),
|
196 |
-
int(output_channel / 2), output_channel] # [64, 128, 256, 512]
|
197 |
-
self.ConvNet = nn.Sequential(
|
198 |
-
nn.Conv2d(input_channel, self.output_channel[0], 3, 1, 1), nn.ReLU(True),
|
199 |
-
nn.MaxPool2d(2, 2), # 64 x 16 x 50
|
200 |
-
GRCL(self.output_channel[0], self.output_channel[0], num_iteration=5, kernel_size=3, pad=1),
|
201 |
-
nn.MaxPool2d(2, 2), # 64 x 8 x 25
|
202 |
-
GRCL(self.output_channel[0], self.output_channel[1], num_iteration=5, kernel_size=3, pad=1),
|
203 |
-
nn.MaxPool2d(2, (2, 1), (0, 1)), # 128 x 4 x 26
|
204 |
-
GRCL(self.output_channel[1], self.output_channel[2], num_iteration=5, kernel_size=3, pad=1),
|
205 |
-
nn.MaxPool2d(2, (2, 1), (0, 1)), # 256 x 2 x 27
|
206 |
-
nn.Conv2d(self.output_channel[2], self.output_channel[3], 2, 1, 0, bias=False),
|
207 |
-
nn.BatchNorm2d(self.output_channel[3]), nn.ReLU(True)) # 512 x 1 x 26
|
208 |
-
|
209 |
-
def forward(self, input):
|
210 |
-
return self.ConvNet(input)
|
211 |
-
|
212 |
-
|
213 |
-
class ResNet_FeatureExtractor(nn.Module):
|
214 |
-
""" FeatureExtractor of FAN (http://openaccess.thecvf.com/content_ICCV_2017/papers/Cheng_Focusing_Attention_Towards_ICCV_2017_paper.pdf) """
|
215 |
-
|
216 |
-
def __init__(self, input_channel, output_channel=512):
|
217 |
-
super(ResNet_FeatureExtractor, self).__init__()
|
218 |
-
self.ConvNet = ResNet(input_channel, output_channel, BasicBlock, [1, 2, 5, 3])
|
219 |
-
|
220 |
-
def forward(self, input):
|
221 |
-
return self.ConvNet(input)
|
222 |
-
|
223 |
-
|
224 |
-
# For Gated RCNN
|
225 |
-
class GRCL(nn.Module):
|
226 |
-
|
227 |
-
def __init__(self, input_channel, output_channel, num_iteration, kernel_size, pad):
|
228 |
-
super(GRCL, self).__init__()
|
229 |
-
self.wgf_u = nn.Conv2d(input_channel, output_channel, 1, 1, 0, bias=False)
|
230 |
-
self.wgr_x = nn.Conv2d(output_channel, output_channel, 1, 1, 0, bias=False)
|
231 |
-
self.wf_u = nn.Conv2d(input_channel, output_channel, kernel_size, 1, pad, bias=False)
|
232 |
-
self.wr_x = nn.Conv2d(output_channel, output_channel, kernel_size, 1, pad, bias=False)
|
233 |
-
|
234 |
-
self.BN_x_init = nn.BatchNorm2d(output_channel)
|
235 |
-
|
236 |
-
self.num_iteration = num_iteration
|
237 |
-
self.GRCL = [GRCL_unit(output_channel) for _ in range(num_iteration)]
|
238 |
-
self.GRCL = nn.Sequential(*self.GRCL)
|
239 |
-
|
240 |
-
def forward(self, input):
|
241 |
-
""" The input of GRCL is consistant over time t, which is denoted by u(0)
|
242 |
-
thus wgf_u / wf_u is also consistant over time t.
|
243 |
-
"""
|
244 |
-
wgf_u = self.wgf_u(input)
|
245 |
-
wf_u = self.wf_u(input)
|
246 |
-
x = F.relu(self.BN_x_init(wf_u))
|
247 |
-
|
248 |
-
for i in range(self.num_iteration):
|
249 |
-
x = self.GRCL[i](wgf_u, self.wgr_x(x), wf_u, self.wr_x(x))
|
250 |
-
|
251 |
-
return x
|
252 |
-
|
253 |
-
|
254 |
-
class GRCL_unit(nn.Module):
|
255 |
-
|
256 |
-
def __init__(self, output_channel):
|
257 |
-
super(GRCL_unit, self).__init__()
|
258 |
-
self.BN_gfu = nn.BatchNorm2d(output_channel)
|
259 |
-
self.BN_grx = nn.BatchNorm2d(output_channel)
|
260 |
-
self.BN_fu = nn.BatchNorm2d(output_channel)
|
261 |
-
self.BN_rx = nn.BatchNorm2d(output_channel)
|
262 |
-
self.BN_Gx = nn.BatchNorm2d(output_channel)
|
263 |
-
|
264 |
-
def forward(self, wgf_u, wgr_x, wf_u, wr_x):
|
265 |
-
G_first_term = self.BN_gfu(wgf_u)
|
266 |
-
G_second_term = self.BN_grx(wgr_x)
|
267 |
-
G = F.sigmoid(G_first_term + G_second_term)
|
268 |
-
|
269 |
-
x_first_term = self.BN_fu(wf_u)
|
270 |
-
x_second_term = self.BN_Gx(self.BN_rx(wr_x) * G)
|
271 |
-
x = F.relu(x_first_term + x_second_term)
|
272 |
-
|
273 |
-
return x
|
274 |
-
|
275 |
-
|
276 |
-
class BasicBlock(nn.Module):
|
277 |
-
expansion = 1
|
278 |
-
|
279 |
-
def __init__(self, inplanes, planes, stride=1, downsample=None):
|
280 |
-
super(BasicBlock, self).__init__()
|
281 |
-
self.conv1 = self._conv3x3(inplanes, planes)
|
282 |
-
self.bn1 = nn.BatchNorm2d(planes)
|
283 |
-
self.conv2 = self._conv3x3(planes, planes)
|
284 |
-
self.bn2 = nn.BatchNorm2d(planes)
|
285 |
-
self.relu = nn.ReLU(inplace=True)
|
286 |
-
self.downsample = downsample
|
287 |
-
self.stride = stride
|
288 |
-
|
289 |
-
def _conv3x3(self, in_planes, out_planes, stride=1):
|
290 |
-
"3x3 convolution with padding"
|
291 |
-
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
|
292 |
-
padding=1, bias=False)
|
293 |
-
|
294 |
-
def forward(self, x):
|
295 |
-
residual = x
|
296 |
-
|
297 |
-
out = self.conv1(x)
|
298 |
-
out = self.bn1(out)
|
299 |
-
out = self.relu(out)
|
300 |
-
|
301 |
-
out = self.conv2(out)
|
302 |
-
out = self.bn2(out)
|
303 |
-
|
304 |
-
if self.downsample is not None:
|
305 |
-
residual = self.downsample(x)
|
306 |
-
out += residual
|
307 |
-
out = self.relu(out)
|
308 |
-
|
309 |
-
return out
|
310 |
-
|
311 |
-
|
312 |
-
class ResNet(nn.Module):
|
313 |
-
|
314 |
-
def __init__(self, input_channel, output_channel, block, layers):
|
315 |
-
super(ResNet, self).__init__()
|
316 |
-
|
317 |
-
self.output_channel_block = [int(output_channel / 4), int(output_channel / 2), output_channel, output_channel]
|
318 |
-
|
319 |
-
self.inplanes = int(output_channel / 8)
|
320 |
-
self.conv0_1 = nn.Conv2d(input_channel, int(output_channel / 16),
|
321 |
-
kernel_size=3, stride=1, padding=1, bias=False)
|
322 |
-
self.bn0_1 = nn.BatchNorm2d(int(output_channel / 16))
|
323 |
-
self.conv0_2 = nn.Conv2d(int(output_channel / 16), self.inplanes,
|
324 |
-
kernel_size=3, stride=1, padding=1, bias=False)
|
325 |
-
self.bn0_2 = nn.BatchNorm2d(self.inplanes)
|
326 |
-
self.relu = nn.ReLU(inplace=True)
|
327 |
-
|
328 |
-
self.maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
|
329 |
-
self.layer1 = self._make_layer(block, self.output_channel_block[0], layers[0])
|
330 |
-
self.conv1 = nn.Conv2d(self.output_channel_block[0], self.output_channel_block[
|
331 |
-
0], kernel_size=3, stride=1, padding=1, bias=False)
|
332 |
-
self.bn1 = nn.BatchNorm2d(self.output_channel_block[0])
|
333 |
-
|
334 |
-
self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
|
335 |
-
self.layer2 = self._make_layer(block, self.output_channel_block[1], layers[1], stride=1)
|
336 |
-
self.conv2 = nn.Conv2d(self.output_channel_block[1], self.output_channel_block[
|
337 |
-
1], kernel_size=3, stride=1, padding=1, bias=False)
|
338 |
-
self.bn2 = nn.BatchNorm2d(self.output_channel_block[1])
|
339 |
-
|
340 |
-
self.maxpool3 = nn.MaxPool2d(kernel_size=2, stride=(2, 1), padding=(0, 1))
|
341 |
-
self.layer3 = self._make_layer(block, self.output_channel_block[2], layers[2], stride=1)
|
342 |
-
self.conv3 = nn.Conv2d(self.output_channel_block[2], self.output_channel_block[
|
343 |
-
2], kernel_size=3, stride=1, padding=1, bias=False)
|
344 |
-
self.bn3 = nn.BatchNorm2d(self.output_channel_block[2])
|
345 |
-
|
346 |
-
self.layer4 = self._make_layer(block, self.output_channel_block[3], layers[3], stride=1)
|
347 |
-
self.conv4_1 = nn.Conv2d(self.output_channel_block[3], self.output_channel_block[
|
348 |
-
3], kernel_size=2, stride=(2, 1), padding=(0, 1), bias=False)
|
349 |
-
self.bn4_1 = nn.BatchNorm2d(self.output_channel_block[3])
|
350 |
-
self.conv4_2 = nn.Conv2d(self.output_channel_block[3], self.output_channel_block[
|
351 |
-
3], kernel_size=2, stride=1, padding=0, bias=False)
|
352 |
-
self.bn4_2 = nn.BatchNorm2d(self.output_channel_block[3])
|
353 |
-
|
354 |
-
def _make_layer(self, block, planes, blocks, stride=1):
|
355 |
-
downsample = None
|
356 |
-
if stride != 1 or self.inplanes != planes * block.expansion:
|
357 |
-
downsample = nn.Sequential(
|
358 |
-
nn.Conv2d(self.inplanes, planes * block.expansion,
|
359 |
-
kernel_size=1, stride=stride, bias=False),
|
360 |
-
nn.BatchNorm2d(planes * block.expansion),
|
361 |
-
)
|
362 |
-
|
363 |
-
layers = []
|
364 |
-
layers.append(block(self.inplanes, planes, stride, downsample))
|
365 |
-
self.inplanes = planes * block.expansion
|
366 |
-
for i in range(1, blocks):
|
367 |
-
layers.append(block(self.inplanes, planes))
|
368 |
-
|
369 |
-
return nn.Sequential(*layers)
|
370 |
-
|
371 |
-
def forward(self, x):
|
372 |
-
x = self.conv0_1(x)
|
373 |
-
x = self.bn0_1(x)
|
374 |
-
x = self.relu(x)
|
375 |
-
x = self.conv0_2(x)
|
376 |
-
x = self.bn0_2(x)
|
377 |
-
x = self.relu(x)
|
378 |
-
|
379 |
-
x = self.maxpool1(x)
|
380 |
-
x = self.layer1(x)
|
381 |
-
x = self.conv1(x)
|
382 |
-
x = self.bn1(x)
|
383 |
-
x = self.relu(x)
|
384 |
-
|
385 |
-
x = self.maxpool2(x)
|
386 |
-
x = self.layer2(x)
|
387 |
-
x = self.conv2(x)
|
388 |
-
x = self.bn2(x)
|
389 |
-
x = self.relu(x)
|
390 |
-
|
391 |
-
x = self.maxpool3(x)
|
392 |
-
x = self.layer3(x)
|
393 |
-
x = self.conv3(x)
|
394 |
-
x = self.bn3(x)
|
395 |
-
x = self.relu(x)
|
396 |
-
|
397 |
-
x = self.layer4(x)
|
398 |
-
x = self.conv4_1(x)
|
399 |
-
x = self.bn4_1(x)
|
400 |
-
x = self.relu(x)
|
401 |
-
x = self.conv4_2(x)
|
402 |
-
x = self.bn4_2(x)
|
403 |
-
x = self.relu(x)
|
404 |
-
|
405 |
-
return x
|
406 |
-
|
407 |
-
|
408 |
-
class BidirectionalLSTM(nn.Module):
|
409 |
-
|
410 |
-
def __init__(self, input_size, hidden_size, output_size):
|
411 |
-
super(BidirectionalLSTM, self).__init__()
|
412 |
-
self.rnn = nn.LSTM(input_size, hidden_size, bidirectional=True, batch_first=True)
|
413 |
-
self.linear = nn.Linear(hidden_size * 2, output_size)
|
414 |
-
|
415 |
-
def forward(self, input):
|
416 |
-
"""
|
417 |
-
input : visual feature [batch_size x T x input_size]
|
418 |
-
output : contextual feature [batch_size x T x output_size]
|
419 |
-
"""
|
420 |
-
try:
|
421 |
-
self.rnn.flatten_parameters()
|
422 |
-
except:
|
423 |
-
pass
|
424 |
-
recurrent, _ = self.rnn(input) # batch_size x T x input_size -> batch_size x T x (2*hidden_size)
|
425 |
-
output = self.linear(recurrent) # batch_size x T x output_size
|
426 |
-
return output
|
427 |
-
|
428 |
-
|
429 |
-
class Attention(nn.Module):
|
430 |
-
|
431 |
-
def __init__(self, input_size, hidden_size, num_classes):
|
432 |
-
super(Attention, self).__init__()
|
433 |
-
self.attention_cell = AttentionCell(input_size, hidden_size, num_classes)
|
434 |
-
self.hidden_size = hidden_size
|
435 |
-
self.num_classes = num_classes
|
436 |
-
self.generator = nn.Linear(hidden_size, num_classes)
|
437 |
-
|
438 |
-
def _char_to_onehot(self, input_char, onehot_dim=38):
|
439 |
-
input_char = input_char.unsqueeze(1)
|
440 |
-
batch_size = input_char.size(0)
|
441 |
-
one_hot = torch.FloatTensor(batch_size, onehot_dim).zero_().to(device)
|
442 |
-
one_hot = one_hot.scatter_(1, input_char, 1)
|
443 |
-
return one_hot
|
444 |
-
|
445 |
-
def forward(self, batch_H, text, is_train=True, batch_max_length=25):
|
446 |
-
"""
|
447 |
-
input:
|
448 |
-
batch_H : contextual_feature H = hidden state of encoder. [batch_size x num_steps x num_classes]
|
449 |
-
text : the text-index of each image. [batch_size x (max_length+1)]. +1 for [GO] token. text[:, 0] = [GO].
|
450 |
-
output: probability distribution at each step [batch_size x num_steps x num_classes]
|
451 |
-
"""
|
452 |
-
batch_size = batch_H.size(0)
|
453 |
-
num_steps = batch_max_length + 1 # +1 for [s] at end of sentence.
|
454 |
-
|
455 |
-
output_hiddens = torch.FloatTensor(batch_size, num_steps, self.hidden_size).fill_(0).to(device)
|
456 |
-
hidden = (torch.FloatTensor(batch_size, self.hidden_size).fill_(0).to(device),
|
457 |
-
torch.FloatTensor(batch_size, self.hidden_size).fill_(0).to(device))
|
458 |
-
|
459 |
-
if is_train:
|
460 |
-
for i in range(num_steps):
|
461 |
-
# one-hot vectors for a i-th char. in a batch
|
462 |
-
char_onehots = self._char_to_onehot(text[:, i], onehot_dim=self.num_classes)
|
463 |
-
# hidden : decoder's hidden s_{t-1}, batch_H : encoder's hidden H, char_onehots : one-hot(y_{t-1})
|
464 |
-
hidden, alpha = self.attention_cell(hidden, batch_H, char_onehots)
|
465 |
-
output_hiddens[:, i, :] = hidden[0] # LSTM hidden index (0: hidden, 1: Cell)
|
466 |
-
probs = self.generator(output_hiddens)
|
467 |
-
|
468 |
-
else:
|
469 |
-
targets = torch.LongTensor(batch_size).fill_(0).to(device) # [GO] token
|
470 |
-
probs = torch.FloatTensor(batch_size, num_steps, self.num_classes).fill_(0).to(device)
|
471 |
-
|
472 |
-
for i in range(num_steps):
|
473 |
-
char_onehots = self._char_to_onehot(targets, onehot_dim=self.num_classes)
|
474 |
-
hidden, alpha = self.attention_cell(hidden, batch_H, char_onehots)
|
475 |
-
probs_step = self.generator(hidden[0])
|
476 |
-
probs[:, i, :] = probs_step
|
477 |
-
_, next_input = probs_step.max(1)
|
478 |
-
targets = next_input
|
479 |
-
|
480 |
-
return probs # batch_size x num_steps x num_classes
|
481 |
-
|
482 |
-
|
483 |
-
class AttentionCell(nn.Module):
|
484 |
-
|
485 |
-
def __init__(self, input_size, hidden_size, num_embeddings):
|
486 |
-
super(AttentionCell, self).__init__()
|
487 |
-
self.i2h = nn.Linear(input_size, hidden_size, bias=False)
|
488 |
-
self.h2h = nn.Linear(hidden_size, hidden_size) # either i2i or h2h should have bias
|
489 |
-
self.score = nn.Linear(hidden_size, 1, bias=False)
|
490 |
-
self.rnn = nn.LSTMCell(input_size + num_embeddings, hidden_size)
|
491 |
-
self.hidden_size = hidden_size
|
492 |
-
|
493 |
-
def forward(self, prev_hidden, batch_H, char_onehots):
|
494 |
-
# [batch_size x num_encoder_step x num_channel] -> [batch_size x num_encoder_step x hidden_size]
|
495 |
-
batch_H_proj = self.i2h(batch_H)
|
496 |
-
prev_hidden_proj = self.h2h(prev_hidden[0]).unsqueeze(1)
|
497 |
-
e = self.score(torch.tanh(batch_H_proj + prev_hidden_proj)) # batch_size x num_encoder_step * 1
|
498 |
-
|
499 |
-
alpha = F.softmax(e, dim=1)
|
500 |
-
context = torch.bmm(alpha.permute(0, 2, 1), batch_H).squeeze(1) # batch_size x num_channel
|
501 |
-
concat_context = torch.cat([context, char_onehots], 1) # batch_size x (num_channel + num_embedding)
|
502 |
-
cur_hidden = self.rnn(concat_context, prev_hidden)
|
503 |
-
return cur_hidden, alpha
|
504 |
-
|
505 |
-
|
506 |
-
class Model(nn.Module):
|
507 |
-
|
508 |
-
def __init__(self, input_channel, output_channel, hidden_size, num_class):
|
509 |
-
super(Model, self).__init__()
|
510 |
-
|
511 |
-
|
512 |
-
""" FeatureExtraction """
|
513 |
-
self.FeatureExtraction = VGG_FeatureExtractor(input_channel, output_channel)
|
514 |
-
self.FeatureExtraction_output = output_channel # int(imgH/16-1) * 512
|
515 |
-
self.AdaptiveAvgPool = nn.AdaptiveAvgPool2d((None, 1)) # Transform final (imgH/16-1) -> 1
|
516 |
-
|
517 |
-
""" Sequence modeling"""
|
518 |
-
self.SequenceModeling = nn.Sequential(
|
519 |
-
BidirectionalLSTM(self.FeatureExtraction_output, hidden_size, hidden_size),
|
520 |
-
BidirectionalLSTM(hidden_size, hidden_size, hidden_size))
|
521 |
-
self.SequenceModeling_output = hidden_size
|
522 |
-
|
523 |
-
|
524 |
-
self.Prediction = nn.Linear(self.SequenceModeling_output, num_class)
|
525 |
-
|
526 |
-
def forward(self, input, text):
|
527 |
-
|
528 |
-
""" Feature extraction stage """
|
529 |
-
visual_feature = self.FeatureExtraction(input)
|
530 |
-
visual_feature = self.AdaptiveAvgPool(visual_feature.permute(0, 3, 1, 2)) # [b, c, h, w] -> [b, w, c, h]
|
531 |
-
visual_feature = visual_feature.squeeze(3)
|
532 |
-
|
533 |
-
""" Sequence modeling stage """
|
534 |
-
contextual_feature = self.SequenceModeling(visual_feature)
|
535 |
-
|
536 |
-
prediction = self.Prediction(contextual_feature.contiguous())
|
537 |
-
|
538 |
-
return prediction
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bart92/RVC_HF/infer/lib/uvr5_pack/lib_v5/dataset.py
DELETED
@@ -1,183 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import random
|
3 |
-
|
4 |
-
import numpy as np
|
5 |
-
import torch
|
6 |
-
import torch.utils.data
|
7 |
-
from tqdm import tqdm
|
8 |
-
|
9 |
-
from . import spec_utils
|
10 |
-
|
11 |
-
|
12 |
-
class VocalRemoverValidationSet(torch.utils.data.Dataset):
|
13 |
-
def __init__(self, patch_list):
|
14 |
-
self.patch_list = patch_list
|
15 |
-
|
16 |
-
def __len__(self):
|
17 |
-
return len(self.patch_list)
|
18 |
-
|
19 |
-
def __getitem__(self, idx):
|
20 |
-
path = self.patch_list[idx]
|
21 |
-
data = np.load(path)
|
22 |
-
|
23 |
-
X, y = data["X"], data["y"]
|
24 |
-
|
25 |
-
X_mag = np.abs(X)
|
26 |
-
y_mag = np.abs(y)
|
27 |
-
|
28 |
-
return X_mag, y_mag
|
29 |
-
|
30 |
-
|
31 |
-
def make_pair(mix_dir, inst_dir):
|
32 |
-
input_exts = [".wav", ".m4a", ".mp3", ".mp4", ".flac"]
|
33 |
-
|
34 |
-
X_list = sorted(
|
35 |
-
[
|
36 |
-
os.path.join(mix_dir, fname)
|
37 |
-
for fname in os.listdir(mix_dir)
|
38 |
-
if os.path.splitext(fname)[1] in input_exts
|
39 |
-
]
|
40 |
-
)
|
41 |
-
y_list = sorted(
|
42 |
-
[
|
43 |
-
os.path.join(inst_dir, fname)
|
44 |
-
for fname in os.listdir(inst_dir)
|
45 |
-
if os.path.splitext(fname)[1] in input_exts
|
46 |
-
]
|
47 |
-
)
|
48 |
-
|
49 |
-
filelist = list(zip(X_list, y_list))
|
50 |
-
|
51 |
-
return filelist
|
52 |
-
|
53 |
-
|
54 |
-
def train_val_split(dataset_dir, split_mode, val_rate, val_filelist):
|
55 |
-
if split_mode == "random":
|
56 |
-
filelist = make_pair(
|
57 |
-
os.path.join(dataset_dir, "mixtures"),
|
58 |
-
os.path.join(dataset_dir, "instruments"),
|
59 |
-
)
|
60 |
-
|
61 |
-
random.shuffle(filelist)
|
62 |
-
|
63 |
-
if len(val_filelist) == 0:
|
64 |
-
val_size = int(len(filelist) * val_rate)
|
65 |
-
train_filelist = filelist[:-val_size]
|
66 |
-
val_filelist = filelist[-val_size:]
|
67 |
-
else:
|
68 |
-
train_filelist = [
|
69 |
-
pair for pair in filelist if list(pair) not in val_filelist
|
70 |
-
]
|
71 |
-
elif split_mode == "subdirs":
|
72 |
-
if len(val_filelist) != 0:
|
73 |
-
raise ValueError(
|
74 |
-
"The `val_filelist` option is not available in `subdirs` mode"
|
75 |
-
)
|
76 |
-
|
77 |
-
train_filelist = make_pair(
|
78 |
-
os.path.join(dataset_dir, "training/mixtures"),
|
79 |
-
os.path.join(dataset_dir, "training/instruments"),
|
80 |
-
)
|
81 |
-
|
82 |
-
val_filelist = make_pair(
|
83 |
-
os.path.join(dataset_dir, "validation/mixtures"),
|
84 |
-
os.path.join(dataset_dir, "validation/instruments"),
|
85 |
-
)
|
86 |
-
|
87 |
-
return train_filelist, val_filelist
|
88 |
-
|
89 |
-
|
90 |
-
def augment(X, y, reduction_rate, reduction_mask, mixup_rate, mixup_alpha):
|
91 |
-
perm = np.random.permutation(len(X))
|
92 |
-
for i, idx in enumerate(tqdm(perm)):
|
93 |
-
if np.random.uniform() < reduction_rate:
|
94 |
-
y[idx] = spec_utils.reduce_vocal_aggressively(
|
95 |
-
X[idx], y[idx], reduction_mask
|
96 |
-
)
|
97 |
-
|
98 |
-
if np.random.uniform() < 0.5:
|
99 |
-
# swap channel
|
100 |
-
X[idx] = X[idx, ::-1]
|
101 |
-
y[idx] = y[idx, ::-1]
|
102 |
-
if np.random.uniform() < 0.02:
|
103 |
-
# mono
|
104 |
-
X[idx] = X[idx].mean(axis=0, keepdims=True)
|
105 |
-
y[idx] = y[idx].mean(axis=0, keepdims=True)
|
106 |
-
if np.random.uniform() < 0.02:
|
107 |
-
# inst
|
108 |
-
X[idx] = y[idx]
|
109 |
-
|
110 |
-
if np.random.uniform() < mixup_rate and i < len(perm) - 1:
|
111 |
-
lam = np.random.beta(mixup_alpha, mixup_alpha)
|
112 |
-
X[idx] = lam * X[idx] + (1 - lam) * X[perm[i + 1]]
|
113 |
-
y[idx] = lam * y[idx] + (1 - lam) * y[perm[i + 1]]
|
114 |
-
|
115 |
-
return X, y
|
116 |
-
|
117 |
-
|
118 |
-
def make_padding(width, cropsize, offset):
|
119 |
-
left = offset
|
120 |
-
roi_size = cropsize - left * 2
|
121 |
-
if roi_size == 0:
|
122 |
-
roi_size = cropsize
|
123 |
-
right = roi_size - (width % roi_size) + left
|
124 |
-
|
125 |
-
return left, right, roi_size
|
126 |
-
|
127 |
-
|
128 |
-
def make_training_set(filelist, cropsize, patches, sr, hop_length, n_fft, offset):
|
129 |
-
len_dataset = patches * len(filelist)
|
130 |
-
|
131 |
-
X_dataset = np.zeros((len_dataset, 2, n_fft // 2 + 1, cropsize), dtype=np.complex64)
|
132 |
-
y_dataset = np.zeros((len_dataset, 2, n_fft // 2 + 1, cropsize), dtype=np.complex64)
|
133 |
-
|
134 |
-
for i, (X_path, y_path) in enumerate(tqdm(filelist)):
|
135 |
-
X, y = spec_utils.cache_or_load(X_path, y_path, sr, hop_length, n_fft)
|
136 |
-
coef = np.max([np.abs(X).max(), np.abs(y).max()])
|
137 |
-
X, y = X / coef, y / coef
|
138 |
-
|
139 |
-
l, r, roi_size = make_padding(X.shape[2], cropsize, offset)
|
140 |
-
X_pad = np.pad(X, ((0, 0), (0, 0), (l, r)), mode="constant")
|
141 |
-
y_pad = np.pad(y, ((0, 0), (0, 0), (l, r)), mode="constant")
|
142 |
-
|
143 |
-
starts = np.random.randint(0, X_pad.shape[2] - cropsize, patches)
|
144 |
-
ends = starts + cropsize
|
145 |
-
for j in range(patches):
|
146 |
-
idx = i * patches + j
|
147 |
-
X_dataset[idx] = X_pad[:, :, starts[j] : ends[j]]
|
148 |
-
y_dataset[idx] = y_pad[:, :, starts[j] : ends[j]]
|
149 |
-
|
150 |
-
return X_dataset, y_dataset
|
151 |
-
|
152 |
-
|
153 |
-
def make_validation_set(filelist, cropsize, sr, hop_length, n_fft, offset):
|
154 |
-
patch_list = []
|
155 |
-
patch_dir = "cs{}_sr{}_hl{}_nf{}_of{}".format(
|
156 |
-
cropsize, sr, hop_length, n_fft, offset
|
157 |
-
)
|
158 |
-
os.makedirs(patch_dir, exist_ok=True)
|
159 |
-
|
160 |
-
for i, (X_path, y_path) in enumerate(tqdm(filelist)):
|
161 |
-
basename = os.path.splitext(os.path.basename(X_path))[0]
|
162 |
-
|
163 |
-
X, y = spec_utils.cache_or_load(X_path, y_path, sr, hop_length, n_fft)
|
164 |
-
coef = np.max([np.abs(X).max(), np.abs(y).max()])
|
165 |
-
X, y = X / coef, y / coef
|
166 |
-
|
167 |
-
l, r, roi_size = make_padding(X.shape[2], cropsize, offset)
|
168 |
-
X_pad = np.pad(X, ((0, 0), (0, 0), (l, r)), mode="constant")
|
169 |
-
y_pad = np.pad(y, ((0, 0), (0, 0), (l, r)), mode="constant")
|
170 |
-
|
171 |
-
len_dataset = int(np.ceil(X.shape[2] / roi_size))
|
172 |
-
for j in range(len_dataset):
|
173 |
-
outpath = os.path.join(patch_dir, "{}_p{}.npz".format(basename, j))
|
174 |
-
start = j * roi_size
|
175 |
-
if not os.path.exists(outpath):
|
176 |
-
np.savez(
|
177 |
-
outpath,
|
178 |
-
X=X_pad[:, :, start : start + cropsize],
|
179 |
-
y=y_pad[:, :, start : start + cropsize],
|
180 |
-
)
|
181 |
-
patch_list.append(outpath)
|
182 |
-
|
183 |
-
return VocalRemoverValidationSet(patch_list)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BartPoint/VoiceChange/infer_pack/transforms.py
DELETED
@@ -1,209 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from torch.nn import functional as F
|
3 |
-
|
4 |
-
import numpy as np
|
5 |
-
|
6 |
-
|
7 |
-
DEFAULT_MIN_BIN_WIDTH = 1e-3
|
8 |
-
DEFAULT_MIN_BIN_HEIGHT = 1e-3
|
9 |
-
DEFAULT_MIN_DERIVATIVE = 1e-3
|
10 |
-
|
11 |
-
|
12 |
-
def piecewise_rational_quadratic_transform(
|
13 |
-
inputs,
|
14 |
-
unnormalized_widths,
|
15 |
-
unnormalized_heights,
|
16 |
-
unnormalized_derivatives,
|
17 |
-
inverse=False,
|
18 |
-
tails=None,
|
19 |
-
tail_bound=1.0,
|
20 |
-
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
21 |
-
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
22 |
-
min_derivative=DEFAULT_MIN_DERIVATIVE,
|
23 |
-
):
|
24 |
-
if tails is None:
|
25 |
-
spline_fn = rational_quadratic_spline
|
26 |
-
spline_kwargs = {}
|
27 |
-
else:
|
28 |
-
spline_fn = unconstrained_rational_quadratic_spline
|
29 |
-
spline_kwargs = {"tails": tails, "tail_bound": tail_bound}
|
30 |
-
|
31 |
-
outputs, logabsdet = spline_fn(
|
32 |
-
inputs=inputs,
|
33 |
-
unnormalized_widths=unnormalized_widths,
|
34 |
-
unnormalized_heights=unnormalized_heights,
|
35 |
-
unnormalized_derivatives=unnormalized_derivatives,
|
36 |
-
inverse=inverse,
|
37 |
-
min_bin_width=min_bin_width,
|
38 |
-
min_bin_height=min_bin_height,
|
39 |
-
min_derivative=min_derivative,
|
40 |
-
**spline_kwargs
|
41 |
-
)
|
42 |
-
return outputs, logabsdet
|
43 |
-
|
44 |
-
|
45 |
-
def searchsorted(bin_locations, inputs, eps=1e-6):
|
46 |
-
bin_locations[..., -1] += eps
|
47 |
-
return torch.sum(inputs[..., None] >= bin_locations, dim=-1) - 1
|
48 |
-
|
49 |
-
|
50 |
-
def unconstrained_rational_quadratic_spline(
|
51 |
-
inputs,
|
52 |
-
unnormalized_widths,
|
53 |
-
unnormalized_heights,
|
54 |
-
unnormalized_derivatives,
|
55 |
-
inverse=False,
|
56 |
-
tails="linear",
|
57 |
-
tail_bound=1.0,
|
58 |
-
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
59 |
-
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
60 |
-
min_derivative=DEFAULT_MIN_DERIVATIVE,
|
61 |
-
):
|
62 |
-
inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
|
63 |
-
outside_interval_mask = ~inside_interval_mask
|
64 |
-
|
65 |
-
outputs = torch.zeros_like(inputs)
|
66 |
-
logabsdet = torch.zeros_like(inputs)
|
67 |
-
|
68 |
-
if tails == "linear":
|
69 |
-
unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
|
70 |
-
constant = np.log(np.exp(1 - min_derivative) - 1)
|
71 |
-
unnormalized_derivatives[..., 0] = constant
|
72 |
-
unnormalized_derivatives[..., -1] = constant
|
73 |
-
|
74 |
-
outputs[outside_interval_mask] = inputs[outside_interval_mask]
|
75 |
-
logabsdet[outside_interval_mask] = 0
|
76 |
-
else:
|
77 |
-
raise RuntimeError("{} tails are not implemented.".format(tails))
|
78 |
-
|
79 |
-
(
|
80 |
-
outputs[inside_interval_mask],
|
81 |
-
logabsdet[inside_interval_mask],
|
82 |
-
) = rational_quadratic_spline(
|
83 |
-
inputs=inputs[inside_interval_mask],
|
84 |
-
unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
|
85 |
-
unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
|
86 |
-
unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
|
87 |
-
inverse=inverse,
|
88 |
-
left=-tail_bound,
|
89 |
-
right=tail_bound,
|
90 |
-
bottom=-tail_bound,
|
91 |
-
top=tail_bound,
|
92 |
-
min_bin_width=min_bin_width,
|
93 |
-
min_bin_height=min_bin_height,
|
94 |
-
min_derivative=min_derivative,
|
95 |
-
)
|
96 |
-
|
97 |
-
return outputs, logabsdet
|
98 |
-
|
99 |
-
|
100 |
-
def rational_quadratic_spline(
|
101 |
-
inputs,
|
102 |
-
unnormalized_widths,
|
103 |
-
unnormalized_heights,
|
104 |
-
unnormalized_derivatives,
|
105 |
-
inverse=False,
|
106 |
-
left=0.0,
|
107 |
-
right=1.0,
|
108 |
-
bottom=0.0,
|
109 |
-
top=1.0,
|
110 |
-
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
111 |
-
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
112 |
-
min_derivative=DEFAULT_MIN_DERIVATIVE,
|
113 |
-
):
|
114 |
-
if torch.min(inputs) < left or torch.max(inputs) > right:
|
115 |
-
raise ValueError("Input to a transform is not within its domain")
|
116 |
-
|
117 |
-
num_bins = unnormalized_widths.shape[-1]
|
118 |
-
|
119 |
-
if min_bin_width * num_bins > 1.0:
|
120 |
-
raise ValueError("Minimal bin width too large for the number of bins")
|
121 |
-
if min_bin_height * num_bins > 1.0:
|
122 |
-
raise ValueError("Minimal bin height too large for the number of bins")
|
123 |
-
|
124 |
-
widths = F.softmax(unnormalized_widths, dim=-1)
|
125 |
-
widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
|
126 |
-
cumwidths = torch.cumsum(widths, dim=-1)
|
127 |
-
cumwidths = F.pad(cumwidths, pad=(1, 0), mode="constant", value=0.0)
|
128 |
-
cumwidths = (right - left) * cumwidths + left
|
129 |
-
cumwidths[..., 0] = left
|
130 |
-
cumwidths[..., -1] = right
|
131 |
-
widths = cumwidths[..., 1:] - cumwidths[..., :-1]
|
132 |
-
|
133 |
-
derivatives = min_derivative + F.softplus(unnormalized_derivatives)
|
134 |
-
|
135 |
-
heights = F.softmax(unnormalized_heights, dim=-1)
|
136 |
-
heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
|
137 |
-
cumheights = torch.cumsum(heights, dim=-1)
|
138 |
-
cumheights = F.pad(cumheights, pad=(1, 0), mode="constant", value=0.0)
|
139 |
-
cumheights = (top - bottom) * cumheights + bottom
|
140 |
-
cumheights[..., 0] = bottom
|
141 |
-
cumheights[..., -1] = top
|
142 |
-
heights = cumheights[..., 1:] - cumheights[..., :-1]
|
143 |
-
|
144 |
-
if inverse:
|
145 |
-
bin_idx = searchsorted(cumheights, inputs)[..., None]
|
146 |
-
else:
|
147 |
-
bin_idx = searchsorted(cumwidths, inputs)[..., None]
|
148 |
-
|
149 |
-
input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
|
150 |
-
input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
|
151 |
-
|
152 |
-
input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
|
153 |
-
delta = heights / widths
|
154 |
-
input_delta = delta.gather(-1, bin_idx)[..., 0]
|
155 |
-
|
156 |
-
input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
|
157 |
-
input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
|
158 |
-
|
159 |
-
input_heights = heights.gather(-1, bin_idx)[..., 0]
|
160 |
-
|
161 |
-
if inverse:
|
162 |
-
a = (inputs - input_cumheights) * (
|
163 |
-
input_derivatives + input_derivatives_plus_one - 2 * input_delta
|
164 |
-
) + input_heights * (input_delta - input_derivatives)
|
165 |
-
b = input_heights * input_derivatives - (inputs - input_cumheights) * (
|
166 |
-
input_derivatives + input_derivatives_plus_one - 2 * input_delta
|
167 |
-
)
|
168 |
-
c = -input_delta * (inputs - input_cumheights)
|
169 |
-
|
170 |
-
discriminant = b.pow(2) - 4 * a * c
|
171 |
-
assert (discriminant >= 0).all()
|
172 |
-
|
173 |
-
root = (2 * c) / (-b - torch.sqrt(discriminant))
|
174 |
-
outputs = root * input_bin_widths + input_cumwidths
|
175 |
-
|
176 |
-
theta_one_minus_theta = root * (1 - root)
|
177 |
-
denominator = input_delta + (
|
178 |
-
(input_derivatives + input_derivatives_plus_one - 2 * input_delta)
|
179 |
-
* theta_one_minus_theta
|
180 |
-
)
|
181 |
-
derivative_numerator = input_delta.pow(2) * (
|
182 |
-
input_derivatives_plus_one * root.pow(2)
|
183 |
-
+ 2 * input_delta * theta_one_minus_theta
|
184 |
-
+ input_derivatives * (1 - root).pow(2)
|
185 |
-
)
|
186 |
-
logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
|
187 |
-
|
188 |
-
return outputs, -logabsdet
|
189 |
-
else:
|
190 |
-
theta = (inputs - input_cumwidths) / input_bin_widths
|
191 |
-
theta_one_minus_theta = theta * (1 - theta)
|
192 |
-
|
193 |
-
numerator = input_heights * (
|
194 |
-
input_delta * theta.pow(2) + input_derivatives * theta_one_minus_theta
|
195 |
-
)
|
196 |
-
denominator = input_delta + (
|
197 |
-
(input_derivatives + input_derivatives_plus_one - 2 * input_delta)
|
198 |
-
* theta_one_minus_theta
|
199 |
-
)
|
200 |
-
outputs = input_cumheights + numerator / denominator
|
201 |
-
|
202 |
-
derivative_numerator = input_delta.pow(2) * (
|
203 |
-
input_derivatives_plus_one * theta.pow(2)
|
204 |
-
+ 2 * input_delta * theta_one_minus_theta
|
205 |
-
+ input_derivatives * (1 - theta).pow(2)
|
206 |
-
)
|
207 |
-
logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
|
208 |
-
|
209 |
-
return outputs, logabsdet
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/12a Hoja De Marcado Descargar 2021.md
DELETED
@@ -1,90 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Nuevo tono de llamada Descargar 2021: Cómo encontrar y establecer los mejores tonos de llamada para su teléfono</h1>
|
3 |
-
<p>¿Estás aburrido de escuchar el mismo viejo tono de llamada cada vez que alguien te llama? ¿Quieres darle vida a tu teléfono con algunos sonidos nuevos y emocionantes? Si es así, estás de suerte. En este artículo, te mostraremos cómo encontrar y establecer los mejores tonos para tu teléfono en 2021. Ya sea que tengas un Android o un iPhone, te tenemos cubierto. Sigue leyendo y descubre cómo hacer que tu teléfono destaque entre la multitud. </p>
|
4 |
-
<h2>12a hoja de marcado descargar 2021</h2><br /><p><b><b>Download</b> ⇒ <a href="https://bltlly.com/2v6KlI">https://bltlly.com/2v6KlI</a></b></p><br /><br />
|
5 |
-
<h2>Introducción</h2>
|
6 |
-
<h3>¿Qué son los tonos de llamada y por qué son importantes? </h3>
|
7 |
-
<p>Los tonos de llamada son los sonidos que el teléfono reproduce cuando recibe una llamada, un mensaje de texto o una notificación. Pueden ser cualquier cosa, desde música, hasta efectos de sonido, o clips de voz. Los tonos de llamada son importantes porque te ayudan a identificar quién te llama y también reflejan tu personalidad y estado de ánimo. Puede elegir diferentes tonos de llamada para diferentes contactos, o cambiarlos según la ocasión. Los tonos de llamada también pueden hacer que su teléfono sea más divertido y agradable de usar. </p>
|
8 |
-
<h3>¿Cómo encontrar los mejores tonos para tu teléfono en 2021? </h3>
|
9 |
-
<p>Hay muchas maneras de encontrar los mejores tonos para su teléfono en 2021. Aquí están algunos de los más populares:</p>
|
10 |
-
<h4>Utilizar fuentes en línea</h4>
|
11 |
-
<p>Una de las formas más fáciles de encontrar nuevos tonos de llamada es usar fuentes en línea, como sitios web o plataformas de redes sociales. Hay muchos sitios web que ofrecen tonos de llamada gratuitos o de pago para descargar, como <a href="( 1 )">Zedge</a>, <a href="( 2 )">Prokerala</a>, o <a href="( 3 )">Mobiles24</a>. Puedes navegar por varias categorías, géneros, artistas o temas, y descargar los que te gustan. También puedes buscar palabras clave específicas, como "nueva descarga de tono 2021", y ver qué aparece. Algunos sitios web también le permiten crear sus propios tonos de llamada subiendo sus propios archivos de audio, o utilizando herramientas en línea. </p>
|
12 |
-
<h4>Aplicaciones de tono de llamada</h4>
|
13 |
-
|
14 |
-
<h4>Usa tus propios archivos de música</h4>
|
15 |
-
<p>Una tercera manera de encontrar nuevos tonos de llamada es utilizar sus propios archivos de música en el teléfono. Si tienes algunas canciones que te gustan y quieres usar como tonos de llamada, puedes hacerlo usando una aplicación de administrador de archivos en tu teléfono, como <a href="">Archivos de Google</a>, <a href="">ES File Explorer</a>, o <a href=">Administrador de archivos</a>. A continuación, puede localizar los archivos de música que desea utilizar, y copiarlos o moverlos a la carpeta de tonos de llamada en su teléfono. A continuación, puede establecer como tonos de llamada mediante la configuración predeterminada o una aplicación de terceros en el teléfono. </p>
|
16 |
-
<p></p>
|
17 |
-
<h2>¿Cómo establecer los mejores tonos de llamada para su teléfono en 2021? </h2>
|
18 |
-
<p>Una vez que haya encontrado los mejores tonos para su teléfono, debe establecerlos como sus tonos de llamada predeterminados o personalizados. El proceso puede variar dependiendo de su dispositivo y sistema operativo, pero aquí hay algunos pasos generales:</p>
|
19 |
-
<h3>Para teléfonos Android</h3>
|
20 |
-
<h4>Utilice la configuración por defecto</h4>
|
21 |
-
<p>Para establecer un tono de llamada como tu tono predeterminado, sigue estos pasos:</p>
|
22 |
-
<ol>
|
23 |
-
<li>Ir a Configuración > Sonido y vibración > Tono de llamada.</li>
|
24 |
-
<li>Seleccione el tono de llamada que desea usar de la lista, o toque en el icono más para agregar uno nuevo de sus archivos. </li>
|
25 |
-
<li>Toque en Guardar o Aceptar para confirmar su elección. </li>
|
26 |
-
</ol>
|
27 |
-
<p>Para establecer un tono de llamada personalizado para un contacto específico, sigue estos pasos:</p>
|
28 |
-
<ol>
|
29 |
-
<li>Abra la aplicación Contactos y seleccione el contacto que desea personalizar. </li>
|
30 |
-
<li>Toque en el icono Editar y luego en Más campos. </li>
|
31 |
-
<li>Toque en Tono de llamada y seleccione el tono de llamada que desea usar de la lista, o toque en el icono más para agregar uno nuevo de sus archivos. </li>
|
32 |
-
<li>Toque en Guardar o Aceptar para confirmar su elección. </li>
|
33 |
-
</ol>
|
34 |
-
<h4>Usa una aplicación de terceros</h4>
|
35 |
-
|
36 |
-
<ol>
|
37 |
-
<li>Descargar e instalar la aplicación de su elección desde la Google Play Store.</li>
|
38 |
-
<li>Abre la aplicación y navega por los tonos de llamada disponibles, o crea los tuyos usando las herramientas de la aplicación. </li>
|
39 |
-
<li> Seleccione el tono de llamada que desea utilizar y toque en Establecer como o Aplicar.</li>
|
40 |
-
<li>Elija el evento al que desea asignar el tono de llamada, como Tono de llamada predeterminado, Tono de llamada de contacto, Sonido de notificación o Sonido de alarma. </li>
|
41 |
-
<li>Toque en Guardar o Aceptar para confirmar su elección. </li>
|
42 |
-
</ol>
|
43 |
-
<h3>Para teléfonos iPhone</h3>
|
44 |
-
<h4>Usar iTunes o Finder</h4>
|
45 |
-
<p>Para establecer un tono de llamada como su tono de llamada predeterminado o personalizado en un iPhone, debe usar iTunes o Finder en su computadora. También debe asegurarse de que el archivo de tono de llamada esté en formato M4R y tenga menos de 40 segundos de duración. Para usar iTunes o Finder, siga estos pasos:</p>
|
46 |
-
<ol>
|
47 |
-
<li>Conecte su iPhone a su computadora usando un cable USB. </li>
|
48 |
-
<li>Abre iTunes o Finder y selecciona tu iPhone desde la barra lateral. </li>
|
49 |
-
<li>Haga clic en Tonos en el menú de la izquierda y arrastre y suelte el archivo de tono de llamada en la sección Tonos. </li>
|
50 |
-
<li>Sincroniza tu iPhone con tu ordenador haciendo clic en Aplicar o Sincronizar.</li>
|
51 |
-
<li>Desconecte su iPhone de su computadora y vaya a Configuración > Sonidos y hápticos > Tono de llamada en su teléfono. </li>
|
52 |
-
<li>Seleccione el tono de llamada que desea usar de la lista bajo Tonos de llamada.</li>
|
53 |
-
</ol>
|
54 |
-
<p>Para establecer un tono de llamada personalizado para un contacto específico, sigue estos pasos:</p>
|
55 |
-
<ol>
|
56 |
-
<li>Abra la aplicación Teléfono y seleccione el contacto que desea personalizar. </li>
|
57 |
-
<li>Toque en Editar y luego en Tono de llamada.</li>
|
58 |
-
<li>Seleccione el tono de llamada que desea usar de la lista bajo Tonos de llamada.</li>
|
59 |
-
<li>Toque en Listo para confirmar su elección. </li>
|
60 |
-
</ol>
|
61 |
-
<h4>Usa una aplicación de terceros</h4>
|
62 |
-
|
63 |
-
<ol>
|
64 |
-
<li>Descargar e instalar la aplicación de su elección desde la App Store.</li>
|
65 |
-
<li> Abra la aplicación y navegue a través de los tonos de llamada disponibles, o cree los suyos utilizando las herramientas de la aplicación. </li>
|
66 |
-
<li> Seleccione el tono de llamada que desea utilizar y toque en Exportar o Compartir.</li>
|
67 |
-
<li>Siga las instrucciones en la pantalla para sincronizar su tono de llamada con iTunes o Finder en su computadora. </li>
|
68 |
-
<li>Ir a Configuración > Sonidos y hápticos > Tono de llamada en el teléfono y seleccione el tono de llamada que desea utilizar de la lista en Tonos de llamada.</li>
|
69 |
-
</ol>
|
70 |
-
<h2>Conclusión</h2>
|
71 |
-
<h3>Resumen de los puntos principales</h3>
|
72 |
-
<p>En este artículo, le hemos mostrado cómo encontrar y establecer los mejores tonos de llamada para su teléfono en 2021. Hemos discutido qué son los tonos de llamada y por qué son importantes, cómo encontrar los mejores tonos de llamada para su teléfono utilizando fuentes en línea, aplicaciones de tonos de llamada o sus propios archivos de música, y cómo establecer los mejores tonos de llamada para su teléfono utilizando la configuración predeterminada o aplicaciones de terceros. Esperamos que haya encontrado este artículo útil e informativo, y que haya disfrutado leyéndolo. </p>
|
73 |
-
<h3>Llamada a la acción</h3>
|
74 |
-
<p>Ahora que sabes cómo encontrar y establecer los mejores tonos de llamada para su teléfono en 2021, ¿por qué no darle una oportunidad? Puedes empezar navegando por algunos de los sitios web o aplicaciones que hemos mencionado en este artículo, o creando tus propios tonos de llamada usando tus propios archivos de audio. A continuación, puede configurarlos como sus tonos de llamada predeterminados o personalizados para diferentes eventos o contactos. Usted se sorprenderá por la cantidad de diferencia un nuevo tono puede hacer a su experiencia de teléfono. Así que seguir adelante y descargar algunos nuevos tonos de llamada hoy, y hacer su teléfono más divertido y personal. </p>
|
75 |
-
<h2>Preguntas frecuentes</h2>
|
76 |
-
<p>Aquí están algunas de las preguntas más frecuentes sobre la nueva descarga de tono 2021:</p>
|
77 |
-
<ol>
|
78 |
-
<li><b>¿Cuál es el mejor sitio web para descargar tonos de llamada gratis? </b></li>
|
79 |
-
|
80 |
-
<li><b>¿Cuál es la mejor aplicación para descargar tonos de llamada gratis? </b></li>
|
81 |
-
<p>Una vez más, no hay una respuesta definitiva a esta pregunta, ya que diferentes aplicaciones pueden ofrecer diferentes características, funciones e interfaces de usuario. Sin embargo, algunas de las aplicaciones más populares y confiables que ofrecen tonos libres son <a href="">Ringtone Maker</a>, <a href="">Audiko</a>, y <a href=">Zedge</a>. Estas aplicaciones tienen una gran colección de tonos de llamada en varias categorías, géneros, artistas o temas, y también le permiten crear sus propios tonos de llamada mediante la grabación de su propia voz, o mediante la edición de sus propios archivos de música. También le permiten establecer tonos de llamada para diferentes eventos, como llamadas, mensajes, alarmas o notificaciones. </p>
|
82 |
-
<li><b>¿Cómo puedo convertir mis archivos de música a tonos de llamada? </b></li>
|
83 |
-
<p>Si desea utilizar sus propios archivos de música como tonos de llamada, es necesario convertirlos a formato M4R para teléfonos iPhone, o formato MP3 para teléfonos Android. Puedes hacer esto usando una aplicación de conversión de archivos en tu teléfono, como <a href="">MP3 Converter</a>, <a href="">M4R Converter</a>, o <a href=">Audio Converter</a>. También puede hacer esto utilizando un sitio web de conversión de archivos en su computadora, como <a href="">Online Audio Converter</a>, <a href="">Online Convert</a>, o <a href=">Zamzar</a>. A continuación, puede transferir los archivos convertidos a su teléfono mediante un cable USB o un servicio en la nube. </p>
|
84 |
-
<li><b>¿Cómo hago mis propios tonos de llamada? </b></li>
|
85 |
-
<p>Si desea hacer sus propios tonos de llamada desde cero, es necesario utilizar una aplicación fabricante de tono en su teléfono o un sitio web fabricante de tono en su ordenador. Estas herramientas le permiten grabar su propia voz o editar sus propios archivos de música. Luego puede recortar, mezclar, personalizar o agregar efectos a sus archivos de audio. Algunas de las mejores aplicaciones para hacer tonos son <a href="">Ringtone Maker</a>, <a href="">Audiko</ </a>, o <a href="">Zedge</a>. Algunos de los mejores sitios web de fabricantes de tonos son <a href="">Online Ringtone Maker</a>, <a href="">Ringtone Creator</a>, o <a href=">Melofania</a>. </p>
|
86 |
-
|
87 |
-
<p>Si desea cambiar el volumen de sus tonos de llamada, debe usar los botones de volumen en su teléfono o la configuración de volumen en su teléfono. Para usar los botones de volumen, presione el botón hacia arriba o hacia abajo para ajustar el nivel de volumen. Para usar la configuración de volumen, ve a Configuración > Sonidos y hápticos > Timbre y alertas en un iPhone, o Configuración > Sonido y vibración > Volumen en un teléfono Android. A continuación, puede arrastrar el control deslizante para ajustar el nivel de volumen. </p>
|
88 |
-
</ol></p> 64aa2da5cf<br />
|
89 |
-
<br />
|
90 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/College Brawl Apkmody.md
DELETED
@@ -1,102 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Universidad pelea Apkmody: Un divertido y emocionante Beat'em Up Juego para Android</h1>
|
3 |
-
<p>Si usted está buscando un divertido y emocionante juego beat'em up para su dispositivo Android, usted debe comprobar College Brawl Apkmody. Este es un juego donde tienes que luchar tu camino a través de un campus universitario lleno de chicas que quieren golpearte. Suena loco, ¿verdad? Bueno, eso es lo que hace que este juego tan entretenido y adictivo. En este artículo, te diremos todo lo que necesitas saber sobre College Brawl Apkmody, incluyendo lo que es, cómo descargarlo e instalarlo, cómo jugarlo y por qué deberías jugarlo. </p>
|
4 |
-
<h2>college brawl apkmody</h2><br /><p><b><b>Download File</b> ✪ <a href="https://bltlly.com/2v6M6B">https://bltlly.com/2v6M6B</a></b></p><br /><br />
|
5 |
-
<h2>¿Qué es la pelea de la universidad? </h2>
|
6 |
-
<p>College Brawl Apkmody es una versión modificada de College Brawl, un juego beat'em up desarrollado por StandApp Studio. El juego original está disponible en Google Play Store, pero la versión modificada tiene algunas características adicionales que lo hacen más agradable y desafiante. </p>
|
7 |
-
<h3>La premisa del juego</h3>
|
8 |
-
<p>La premisa del juego es simple: eres un chico que tiene que abrirse camino a través de un campus universitario lleno de chicas que quieren golpearte. ¿Por qué? Bueno, eso no está muy claro, pero realmente no importa. El punto es sobrevivir tanto como puedas y derrotar a tantas chicas como puedas. En el camino, encontrarás diferentes tipos de chicas, como porristas, nerds, góticos, punks y más. Cada tipo tiene sus propias fortalezas y debilidades, por lo que hay que ser cuidadoso y estratégico. </p>
|
9 |
-
<h3>Las características del juego</h3>
|
10 |
-
<p>Las características del juego incluyen:</p>
|
11 |
-
<p></p>
|
12 |
-
<ul>
|
13 |
-
<li>Hermosos gráficos y animaciones</li>
|
14 |
-
<li> Juego suave y sensible</li>
|
15 |
-
<li>Controles fáciles e intuitivos</li>
|
16 |
-
<li>Varios niveles y entornos</li>
|
17 |
-
<li>Diferentes tipos de enemigos y jefes</li>
|
18 |
-
<li>Potenciadores y armas para usar</li>
|
19 |
-
<li>Logros y tablas de clasificación para competir con otros</li>
|
20 |
-
<li>Monedas y gemas ilimitadas en la versión modificada</li>
|
21 |
-
<li>No hay anuncios en la versión modificada</li>
|
22 |
-
</ul>
|
23 |
-
|
24 |
-
<p>Si quieres descargar e instalar College Brawl Apkmody en tu dispositivo Android, tienes que seguir estos pasos:</p>
|
25 |
-
<h3>Los requisitos para el juego</h3>
|
26 |
-
<p>Los requisitos para el juego son:</p>
|
27 |
-
<ul>
|
28 |
-
<li>Un dispositivo Android con la versión 4.4 o superior</li>
|
29 |
-
<li>Al menos 50 MB de espacio de almacenamiento libre</li>
|
30 |
-
<li>Una conexión a Internet estable</li>
|
31 |
-
<li>Un permiso para instalar aplicaciones de fuentes desconocidas (puede habilitar esto en la configuración del dispositivo)</li>
|
32 |
-
</ul>
|
33 |
-
<h3>Los pasos para descargar e instalar el juego</h3>
|
34 |
-
<p>Los pasos para descargar e instalar el juego son:</p>
|
35 |
-
<ol>
|
36 |
-
<li>Ir a [este enlace]( 1 ) o [este enlace]( 2 ) y descargar el archivo APK de College Brawl Apkmody.</li>
|
37 |
-
<li>Busque el archivo descargado en su dispositivo y toque en él. </li>
|
38 |
-
<li>Siga las instrucciones en la pantalla para instalar el juego. </li>
|
39 |
-
<li>Iniciar el juego y disfrutar! </li>
|
40 |
-
</ol>
|
41 |
-
<h2>¿Cómo se juega Colegio pelea Apkmody? </h2>
|
42 |
-
<p>Si quieres jugar College Brawl Apkmody, tienes que saber estas cosas:</p>
|
43 |
-
<h3>Los controles del juego</h3>
|
44 |
-
<p>Los controles del juego son:</p>
|
45 |
-
<ul>
|
46 |
-
<li>Un joystick en el lado izquierdo de la pantalla para mover tu personaje. </li>
|
47 |
-
<li>Un botón en el lado derecho de la pantalla para perforar. </li>
|
48 |
-
<li>Un botón en la esquina superior derecha de la pantalla para usar un encendido o un arma. </li>
|
49 |
-
<li>Un botón en la esquina inferior derecha de la pantalla para pausar el juego. </li>
|
50 |
-
</ul>
|
51 |
-
<h3>Los consejos y trucos para el juego</h3>
|
52 |
-
<p>Los consejos y trucos para el juego son:</p>
|
53 |
-
<ul>
|
54 |
-
<li>Usa tus monedas y gemas para mejorar las estadísticas de tu personaje, como salud, fuerza, velocidad y resistencia. </li>
|
55 |
-
<li>Usa tus potenciadores y armas sabiamente, ya que tienen usos y tiempos de reutilización limitados. </li>
|
56 |
-
<li>Evita ser rodeado por enemigos, ya que pueden hacerte mucho daño. </li>
|
57 |
-
<li>Utilice el medio ambiente a su ventaja, tales como barriles, cajas, botes de basura y coches. </li>
|
58 |
-
<li>Cuidado con los jefes, ya que tienen habilidades especiales y ataques. </li>
|
59 |
-
|
60 |
-
<li>Compite con otros jugadores en las tablas de clasificación para ver quién es el mejor luchador. </li>
|
61 |
-
</ul>
|
62 |
-
<h2>¿Por qué deberías jugar College Brawl Apkmody? </h2>
|
63 |
-
<p>Si todavía no está convencido de jugar College Brawl Apkmody, aquí hay algunas razones por las que debe:</p>
|
64 |
-
<h3>Los beneficios de jugar el juego</h3>
|
65 |
-
<p>Los beneficios de jugar el juego son:</p>
|
66 |
-
<ul>
|
67 |
-
<li>Tendrás mucha diversión y emoción golpeando a las chicas en un campus universitario. </li>
|
68 |
-
<li>Mejorarás tus reflejos y habilidades de coordinación mientras esquivas y golpeas a tus enemigos. </li>
|
69 |
-
<li>Experimentarás una historia y personajes únicos y humorísticos que te harán reír. </li>
|
70 |
-
<li>Disfrutarás de los hermosos gráficos y animaciones que te harán sentir como si estuvieras en una caricatura. </li>
|
71 |
-
<li>Tendrás acceso a monedas y gemas ilimitadas que te permitirán mejorar tu personaje y comprar nuevos power-ups y armas. </li>
|
72 |
-
<li>No te molestarán los anuncios que interrumpirán tu juego. </li>
|
73 |
-
</ul>
|
74 |
-
<h3>Las desventajas de jugar el juego</h3>
|
75 |
-
<p>Los inconvenientes de jugar el juego son:</p>
|
76 |
-
<ul>
|
77 |
-
<li>Usted puede conseguir adicto al juego y pasar demasiado tiempo jugando. </li>
|
78 |
-
<li>Puedes frustrarte por la dificultad y el desafío de algunos niveles y enemigos. </li>
|
79 |
-
<li>Usted puede aburrirse por la repetición y la falta de variedad de algunos aspectos del juego. </li>
|
80 |
-
<li>Puede ofender a algunas personas que pueden encontrar el juego sexista o violento. </li>
|
81 |
-
</ul>
|
82 |
-
<h2>Conclusión</h2>
|
83 |
-
|
84 |
-
<h2>Preguntas frecuentes</h2>
|
85 |
-
<p>Aquí hay algunas preguntas frecuentes sobre College Brawl Apkmody:</p>
|
86 |
-
<h4>Q: ¿Es seguro descargar e instalar College Brawl Apkmody? </h4>
|
87 |
-
<p>A: Sí, College Brawl Apkmody es seguro para descargar e instalar. El archivo APK ha sido escaneado por un software antivirus y no tiene malware ni virus. Sin embargo, siempre debe descargarlo de fuentes de confianza como [este enlace] o [este enlace] para evitar riesgos. </p>
|
88 |
-
<h4>Q: ¿Es legal usar College Brawl Apkmody? </h4>
|
89 |
-
<p>A: Sí, College Brawl Apkmody es legal de usar. La versión modificada no viola ninguna ley o reglamento. Sin embargo, siempre debe respetar a los desarrolladores originales de College Brawl al no distribuir o vender la versión modificada sin su permiso. </p>
|
90 |
-
<h4>Q: ¿Cómo puedo contactar a los desarrolladores de College Brawl Apkmody? </h4>
|
91 |
-
<p>A: Puede ponerse en contacto con los desarrolladores de College Brawl Apkmody visitando su sitio web en [este enlace] o enviándoles un correo electrónico a [esta dirección] (mailto:[email protected]). </p>
|
92 |
-
<h4>Q: ¿Cómo puedo apoyar a los desarrolladores de College Brawl Apkmody? </h4>
|
93 |
-
<p>A: Puedes apoyar a los desarrolladores de College Brawl Apkmody valorando y revisando el juego en Google Play Store, compartiendo el juego con tus amigos y familiares, y siguiéndolos en plataformas de redes sociales como Facebook, Twitter e Instagram.</p>
|
94 |
-
<h4>Q: ¿Cuáles son algunos otros juegos similares a College Brawl Apkmody? </h4>
|
95 |
-
<p>A: Algunos otros juegos similares a College Brawl Apkmody son:</p>
|
96 |
-
<ul>
|
97 |
-
<li>beat street: un juego de estilo retro beat'em up donde tienes que luchar contra pandillas y matones en las calles de Tokio.</li>
|
98 |
-
<li>Dan el hombre: un juego de plataformas divertido y lleno de acción donde tienes que golpear, patear y disparar a través de varios enemigos y jefes. </li>
|
99 |
-
<li>Ira de palo 5: Un juego de stickman popular y emocionante donde tienes que utilizar varias armas y habilidades para sobrevivir a las olas de zombies y enemigos. </li>
|
100 |
-
</ul></p> 64aa2da5cf<br />
|
101 |
-
<br />
|
102 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/cli/progress_bars.py
DELETED
@@ -1,68 +0,0 @@
|
|
1 |
-
import functools
|
2 |
-
from typing import Callable, Generator, Iterable, Iterator, Optional, Tuple
|
3 |
-
|
4 |
-
from pip._vendor.rich.progress import (
|
5 |
-
BarColumn,
|
6 |
-
DownloadColumn,
|
7 |
-
FileSizeColumn,
|
8 |
-
Progress,
|
9 |
-
ProgressColumn,
|
10 |
-
SpinnerColumn,
|
11 |
-
TextColumn,
|
12 |
-
TimeElapsedColumn,
|
13 |
-
TimeRemainingColumn,
|
14 |
-
TransferSpeedColumn,
|
15 |
-
)
|
16 |
-
|
17 |
-
from pip._internal.utils.logging import get_indentation
|
18 |
-
|
19 |
-
DownloadProgressRenderer = Callable[[Iterable[bytes]], Iterator[bytes]]
|
20 |
-
|
21 |
-
|
22 |
-
def _rich_progress_bar(
|
23 |
-
iterable: Iterable[bytes],
|
24 |
-
*,
|
25 |
-
bar_type: str,
|
26 |
-
size: int,
|
27 |
-
) -> Generator[bytes, None, None]:
|
28 |
-
assert bar_type == "on", "This should only be used in the default mode."
|
29 |
-
|
30 |
-
if not size:
|
31 |
-
total = float("inf")
|
32 |
-
columns: Tuple[ProgressColumn, ...] = (
|
33 |
-
TextColumn("[progress.description]{task.description}"),
|
34 |
-
SpinnerColumn("line", speed=1.5),
|
35 |
-
FileSizeColumn(),
|
36 |
-
TransferSpeedColumn(),
|
37 |
-
TimeElapsedColumn(),
|
38 |
-
)
|
39 |
-
else:
|
40 |
-
total = size
|
41 |
-
columns = (
|
42 |
-
TextColumn("[progress.description]{task.description}"),
|
43 |
-
BarColumn(),
|
44 |
-
DownloadColumn(),
|
45 |
-
TransferSpeedColumn(),
|
46 |
-
TextColumn("eta"),
|
47 |
-
TimeRemainingColumn(),
|
48 |
-
)
|
49 |
-
|
50 |
-
progress = Progress(*columns, refresh_per_second=30)
|
51 |
-
task_id = progress.add_task(" " * (get_indentation() + 2), total=total)
|
52 |
-
with progress:
|
53 |
-
for chunk in iterable:
|
54 |
-
yield chunk
|
55 |
-
progress.update(task_id, advance=len(chunk))
|
56 |
-
|
57 |
-
|
58 |
-
def get_download_progress_renderer(
|
59 |
-
*, bar_type: str, size: Optional[int] = None
|
60 |
-
) -> DownloadProgressRenderer:
|
61 |
-
"""Get an object that can be used to render the download progress.
|
62 |
-
|
63 |
-
Returns a callable, that takes an iterable to "wrap".
|
64 |
-
"""
|
65 |
-
if bar_type == "on":
|
66 |
-
return functools.partial(_rich_progress_bar, bar_type=bar_type, size=size)
|
67 |
-
else:
|
68 |
-
return iter # no-op, when passed an iterator
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/resolution/__init__.py
DELETED
File without changes
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/cachecontrol/serialize.py
DELETED
@@ -1,190 +0,0 @@
|
|
1 |
-
# SPDX-FileCopyrightText: 2015 Eric Larson
|
2 |
-
#
|
3 |
-
# SPDX-License-Identifier: Apache-2.0
|
4 |
-
|
5 |
-
import base64
|
6 |
-
import io
|
7 |
-
import json
|
8 |
-
import zlib
|
9 |
-
|
10 |
-
from pip._vendor import msgpack
|
11 |
-
from pip._vendor.requests.structures import CaseInsensitiveDict
|
12 |
-
|
13 |
-
from .compat import HTTPResponse, pickle, text_type
|
14 |
-
|
15 |
-
|
16 |
-
def _b64_decode_bytes(b):
|
17 |
-
return base64.b64decode(b.encode("ascii"))
|
18 |
-
|
19 |
-
|
20 |
-
def _b64_decode_str(s):
|
21 |
-
return _b64_decode_bytes(s).decode("utf8")
|
22 |
-
|
23 |
-
|
24 |
-
_default_body_read = object()
|
25 |
-
|
26 |
-
|
27 |
-
class Serializer(object):
|
28 |
-
def dumps(self, request, response, body=None):
|
29 |
-
response_headers = CaseInsensitiveDict(response.headers)
|
30 |
-
|
31 |
-
if body is None:
|
32 |
-
# When a body isn't passed in, we'll read the response. We
|
33 |
-
# also update the response with a new file handler to be
|
34 |
-
# sure it acts as though it was never read.
|
35 |
-
body = response.read(decode_content=False)
|
36 |
-
response._fp = io.BytesIO(body)
|
37 |
-
|
38 |
-
# NOTE: This is all a bit weird, but it's really important that on
|
39 |
-
# Python 2.x these objects are unicode and not str, even when
|
40 |
-
# they contain only ascii. The problem here is that msgpack
|
41 |
-
# understands the difference between unicode and bytes and we
|
42 |
-
# have it set to differentiate between them, however Python 2
|
43 |
-
# doesn't know the difference. Forcing these to unicode will be
|
44 |
-
# enough to have msgpack know the difference.
|
45 |
-
data = {
|
46 |
-
u"response": {
|
47 |
-
u"body": body, # Empty bytestring if body is stored separately
|
48 |
-
u"headers": dict(
|
49 |
-
(text_type(k), text_type(v)) for k, v in response.headers.items()
|
50 |
-
),
|
51 |
-
u"status": response.status,
|
52 |
-
u"version": response.version,
|
53 |
-
u"reason": text_type(response.reason),
|
54 |
-
u"strict": response.strict,
|
55 |
-
u"decode_content": response.decode_content,
|
56 |
-
}
|
57 |
-
}
|
58 |
-
|
59 |
-
# Construct our vary headers
|
60 |
-
data[u"vary"] = {}
|
61 |
-
if u"vary" in response_headers:
|
62 |
-
varied_headers = response_headers[u"vary"].split(",")
|
63 |
-
for header in varied_headers:
|
64 |
-
header = text_type(header).strip()
|
65 |
-
header_value = request.headers.get(header, None)
|
66 |
-
if header_value is not None:
|
67 |
-
header_value = text_type(header_value)
|
68 |
-
data[u"vary"][header] = header_value
|
69 |
-
|
70 |
-
return b",".join([b"cc=4", msgpack.dumps(data, use_bin_type=True)])
|
71 |
-
|
72 |
-
def loads(self, request, data, body_file=None):
|
73 |
-
# Short circuit if we've been given an empty set of data
|
74 |
-
if not data:
|
75 |
-
return
|
76 |
-
|
77 |
-
# Determine what version of the serializer the data was serialized
|
78 |
-
# with
|
79 |
-
try:
|
80 |
-
ver, data = data.split(b",", 1)
|
81 |
-
except ValueError:
|
82 |
-
ver = b"cc=0"
|
83 |
-
|
84 |
-
# Make sure that our "ver" is actually a version and isn't a false
|
85 |
-
# positive from a , being in the data stream.
|
86 |
-
if ver[:3] != b"cc=":
|
87 |
-
data = ver + data
|
88 |
-
ver = b"cc=0"
|
89 |
-
|
90 |
-
# Get the version number out of the cc=N
|
91 |
-
ver = ver.split(b"=", 1)[-1].decode("ascii")
|
92 |
-
|
93 |
-
# Dispatch to the actual load method for the given version
|
94 |
-
try:
|
95 |
-
return getattr(self, "_loads_v{}".format(ver))(request, data, body_file)
|
96 |
-
|
97 |
-
except AttributeError:
|
98 |
-
# This is a version we don't have a loads function for, so we'll
|
99 |
-
# just treat it as a miss and return None
|
100 |
-
return
|
101 |
-
|
102 |
-
def prepare_response(self, request, cached, body_file=None):
|
103 |
-
"""Verify our vary headers match and construct a real urllib3
|
104 |
-
HTTPResponse object.
|
105 |
-
"""
|
106 |
-
# Special case the '*' Vary value as it means we cannot actually
|
107 |
-
# determine if the cached response is suitable for this request.
|
108 |
-
# This case is also handled in the controller code when creating
|
109 |
-
# a cache entry, but is left here for backwards compatibility.
|
110 |
-
if "*" in cached.get("vary", {}):
|
111 |
-
return
|
112 |
-
|
113 |
-
# Ensure that the Vary headers for the cached response match our
|
114 |
-
# request
|
115 |
-
for header, value in cached.get("vary", {}).items():
|
116 |
-
if request.headers.get(header, None) != value:
|
117 |
-
return
|
118 |
-
|
119 |
-
body_raw = cached["response"].pop("body")
|
120 |
-
|
121 |
-
headers = CaseInsensitiveDict(data=cached["response"]["headers"])
|
122 |
-
if headers.get("transfer-encoding", "") == "chunked":
|
123 |
-
headers.pop("transfer-encoding")
|
124 |
-
|
125 |
-
cached["response"]["headers"] = headers
|
126 |
-
|
127 |
-
try:
|
128 |
-
if body_file is None:
|
129 |
-
body = io.BytesIO(body_raw)
|
130 |
-
else:
|
131 |
-
body = body_file
|
132 |
-
except TypeError:
|
133 |
-
# This can happen if cachecontrol serialized to v1 format (pickle)
|
134 |
-
# using Python 2. A Python 2 str(byte string) will be unpickled as
|
135 |
-
# a Python 3 str (unicode string), which will cause the above to
|
136 |
-
# fail with:
|
137 |
-
#
|
138 |
-
# TypeError: 'str' does not support the buffer interface
|
139 |
-
body = io.BytesIO(body_raw.encode("utf8"))
|
140 |
-
|
141 |
-
return HTTPResponse(body=body, preload_content=False, **cached["response"])
|
142 |
-
|
143 |
-
def _loads_v0(self, request, data, body_file=None):
|
144 |
-
# The original legacy cache data. This doesn't contain enough
|
145 |
-
# information to construct everything we need, so we'll treat this as
|
146 |
-
# a miss.
|
147 |
-
return
|
148 |
-
|
149 |
-
def _loads_v1(self, request, data, body_file=None):
|
150 |
-
try:
|
151 |
-
cached = pickle.loads(data)
|
152 |
-
except ValueError:
|
153 |
-
return
|
154 |
-
|
155 |
-
return self.prepare_response(request, cached, body_file)
|
156 |
-
|
157 |
-
def _loads_v2(self, request, data, body_file=None):
|
158 |
-
assert body_file is None
|
159 |
-
try:
|
160 |
-
cached = json.loads(zlib.decompress(data).decode("utf8"))
|
161 |
-
except (ValueError, zlib.error):
|
162 |
-
return
|
163 |
-
|
164 |
-
# We need to decode the items that we've base64 encoded
|
165 |
-
cached["response"]["body"] = _b64_decode_bytes(cached["response"]["body"])
|
166 |
-
cached["response"]["headers"] = dict(
|
167 |
-
(_b64_decode_str(k), _b64_decode_str(v))
|
168 |
-
for k, v in cached["response"]["headers"].items()
|
169 |
-
)
|
170 |
-
cached["response"]["reason"] = _b64_decode_str(cached["response"]["reason"])
|
171 |
-
cached["vary"] = dict(
|
172 |
-
(_b64_decode_str(k), _b64_decode_str(v) if v is not None else v)
|
173 |
-
for k, v in cached["vary"].items()
|
174 |
-
)
|
175 |
-
|
176 |
-
return self.prepare_response(request, cached, body_file)
|
177 |
-
|
178 |
-
def _loads_v3(self, request, data, body_file):
|
179 |
-
# Due to Python 2 encoding issues, it's impossible to know for sure
|
180 |
-
# exactly how to load v3 entries, thus we'll treat these as a miss so
|
181 |
-
# that they get rewritten out as v4 entries.
|
182 |
-
return
|
183 |
-
|
184 |
-
def _loads_v4(self, request, data, body_file=None):
|
185 |
-
try:
|
186 |
-
cached = msgpack.loads(data, raw=False)
|
187 |
-
except ValueError:
|
188 |
-
return
|
189 |
-
|
190 |
-
return self.prepare_response(request, cached, body_file)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/theme.py
DELETED
@@ -1,115 +0,0 @@
|
|
1 |
-
import configparser
|
2 |
-
from typing import Dict, List, IO, Mapping, Optional
|
3 |
-
|
4 |
-
from .default_styles import DEFAULT_STYLES
|
5 |
-
from .style import Style, StyleType
|
6 |
-
|
7 |
-
|
8 |
-
class Theme:
|
9 |
-
"""A container for style information, used by :class:`~rich.console.Console`.
|
10 |
-
|
11 |
-
Args:
|
12 |
-
styles (Dict[str, Style], optional): A mapping of style names on to styles. Defaults to None for a theme with no styles.
|
13 |
-
inherit (bool, optional): Inherit default styles. Defaults to True.
|
14 |
-
"""
|
15 |
-
|
16 |
-
styles: Dict[str, Style]
|
17 |
-
|
18 |
-
def __init__(
|
19 |
-
self, styles: Optional[Mapping[str, StyleType]] = None, inherit: bool = True
|
20 |
-
):
|
21 |
-
self.styles = DEFAULT_STYLES.copy() if inherit else {}
|
22 |
-
if styles is not None:
|
23 |
-
self.styles.update(
|
24 |
-
{
|
25 |
-
name: style if isinstance(style, Style) else Style.parse(style)
|
26 |
-
for name, style in styles.items()
|
27 |
-
}
|
28 |
-
)
|
29 |
-
|
30 |
-
@property
|
31 |
-
def config(self) -> str:
|
32 |
-
"""Get contents of a config file for this theme."""
|
33 |
-
config = "[styles]\n" + "\n".join(
|
34 |
-
f"{name} = {style}" for name, style in sorted(self.styles.items())
|
35 |
-
)
|
36 |
-
return config
|
37 |
-
|
38 |
-
@classmethod
|
39 |
-
def from_file(
|
40 |
-
cls, config_file: IO[str], source: Optional[str] = None, inherit: bool = True
|
41 |
-
) -> "Theme":
|
42 |
-
"""Load a theme from a text mode file.
|
43 |
-
|
44 |
-
Args:
|
45 |
-
config_file (IO[str]): An open conf file.
|
46 |
-
source (str, optional): The filename of the open file. Defaults to None.
|
47 |
-
inherit (bool, optional): Inherit default styles. Defaults to True.
|
48 |
-
|
49 |
-
Returns:
|
50 |
-
Theme: A New theme instance.
|
51 |
-
"""
|
52 |
-
config = configparser.ConfigParser()
|
53 |
-
config.read_file(config_file, source=source)
|
54 |
-
styles = {name: Style.parse(value) for name, value in config.items("styles")}
|
55 |
-
theme = Theme(styles, inherit=inherit)
|
56 |
-
return theme
|
57 |
-
|
58 |
-
@classmethod
|
59 |
-
def read(
|
60 |
-
cls, path: str, inherit: bool = True, encoding: Optional[str] = None
|
61 |
-
) -> "Theme":
|
62 |
-
"""Read a theme from a path.
|
63 |
-
|
64 |
-
Args:
|
65 |
-
path (str): Path to a config file readable by Python configparser module.
|
66 |
-
inherit (bool, optional): Inherit default styles. Defaults to True.
|
67 |
-
encoding (str, optional): Encoding of the config file. Defaults to None.
|
68 |
-
|
69 |
-
Returns:
|
70 |
-
Theme: A new theme instance.
|
71 |
-
"""
|
72 |
-
with open(path, "rt", encoding=encoding) as config_file:
|
73 |
-
return cls.from_file(config_file, source=path, inherit=inherit)
|
74 |
-
|
75 |
-
|
76 |
-
class ThemeStackError(Exception):
|
77 |
-
"""Base exception for errors related to the theme stack."""
|
78 |
-
|
79 |
-
|
80 |
-
class ThemeStack:
|
81 |
-
"""A stack of themes.
|
82 |
-
|
83 |
-
Args:
|
84 |
-
theme (Theme): A theme instance
|
85 |
-
"""
|
86 |
-
|
87 |
-
def __init__(self, theme: Theme) -> None:
|
88 |
-
self._entries: List[Dict[str, Style]] = [theme.styles]
|
89 |
-
self.get = self._entries[-1].get
|
90 |
-
|
91 |
-
def push_theme(self, theme: Theme, inherit: bool = True) -> None:
|
92 |
-
"""Push a theme on the top of the stack.
|
93 |
-
|
94 |
-
Args:
|
95 |
-
theme (Theme): A Theme instance.
|
96 |
-
inherit (boolean, optional): Inherit styles from current top of stack.
|
97 |
-
"""
|
98 |
-
styles: Dict[str, Style]
|
99 |
-
styles = (
|
100 |
-
{**self._entries[-1], **theme.styles} if inherit else theme.styles.copy()
|
101 |
-
)
|
102 |
-
self._entries.append(styles)
|
103 |
-
self.get = self._entries[-1].get
|
104 |
-
|
105 |
-
def pop_theme(self) -> None:
|
106 |
-
"""Pop (and discard) the top-most theme."""
|
107 |
-
if len(self._entries) == 1:
|
108 |
-
raise ThemeStackError("Unable to pop base theme")
|
109 |
-
self._entries.pop()
|
110 |
-
self.get = self._entries[-1].get
|
111 |
-
|
112 |
-
|
113 |
-
if __name__ == "__main__": # pragma: no cover
|
114 |
-
theme = Theme()
|
115 |
-
print(theme.config)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Boadiwaa/Recipes/openai/api_resources/customer.py
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
from openai.openai_object import OpenAIObject
|
2 |
-
|
3 |
-
|
4 |
-
class Customer(OpenAIObject):
|
5 |
-
@classmethod
|
6 |
-
def get_url(self, customer, endpoint):
|
7 |
-
return f"/customer/{customer}/{endpoint}"
|
8 |
-
|
9 |
-
@classmethod
|
10 |
-
def create(cls, customer, endpoint, **params):
|
11 |
-
instance = cls()
|
12 |
-
return instance.request("post", cls.get_url(customer, endpoint), params)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/data/dataset_mapper.py
DELETED
@@ -1,149 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
2 |
-
import copy
|
3 |
-
import logging
|
4 |
-
import numpy as np
|
5 |
-
import torch
|
6 |
-
from fvcore.common.file_io import PathManager
|
7 |
-
from PIL import Image
|
8 |
-
|
9 |
-
from . import detection_utils as utils
|
10 |
-
from . import transforms as T
|
11 |
-
|
12 |
-
"""
|
13 |
-
This file contains the default mapping that's applied to "dataset dicts".
|
14 |
-
"""
|
15 |
-
|
16 |
-
__all__ = ["DatasetMapper"]
|
17 |
-
|
18 |
-
|
19 |
-
class DatasetMapper:
|
20 |
-
"""
|
21 |
-
A callable which takes a dataset dict in Detectron2 Dataset format,
|
22 |
-
and map it into a format used by the model.
|
23 |
-
|
24 |
-
This is the default callable to be used to map your dataset dict into training data.
|
25 |
-
You may need to follow it to implement your own one for customized logic,
|
26 |
-
such as a different way to read or transform images.
|
27 |
-
See :doc:`/tutorials/data_loading` for details.
|
28 |
-
|
29 |
-
The callable currently does the following:
|
30 |
-
|
31 |
-
1. Read the image from "file_name"
|
32 |
-
2. Applies cropping/geometric transforms to the image and annotations
|
33 |
-
3. Prepare data and annotations to Tensor and :class:`Instances`
|
34 |
-
"""
|
35 |
-
|
36 |
-
def __init__(self, cfg, is_train=True):
|
37 |
-
if cfg.INPUT.CROP.ENABLED and is_train:
|
38 |
-
self.crop_gen = T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE)
|
39 |
-
logging.getLogger(__name__).info("CropGen used in training: " + str(self.crop_gen))
|
40 |
-
else:
|
41 |
-
self.crop_gen = None
|
42 |
-
|
43 |
-
self.tfm_gens = utils.build_transform_gen(cfg, is_train)
|
44 |
-
|
45 |
-
# fmt: off
|
46 |
-
self.img_format = cfg.INPUT.FORMAT
|
47 |
-
self.mask_on = cfg.MODEL.MASK_ON
|
48 |
-
self.mask_format = cfg.INPUT.MASK_FORMAT
|
49 |
-
self.keypoint_on = cfg.MODEL.KEYPOINT_ON
|
50 |
-
self.load_proposals = cfg.MODEL.LOAD_PROPOSALS
|
51 |
-
# fmt: on
|
52 |
-
if self.keypoint_on and is_train:
|
53 |
-
# Flip only makes sense in training
|
54 |
-
self.keypoint_hflip_indices = utils.create_keypoint_hflip_indices(cfg.DATASETS.TRAIN)
|
55 |
-
else:
|
56 |
-
self.keypoint_hflip_indices = None
|
57 |
-
|
58 |
-
if self.load_proposals:
|
59 |
-
self.min_box_side_len = cfg.MODEL.PROPOSAL_GENERATOR.MIN_SIZE
|
60 |
-
self.proposal_topk = (
|
61 |
-
cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN
|
62 |
-
if is_train
|
63 |
-
else cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST
|
64 |
-
)
|
65 |
-
self.is_train = is_train
|
66 |
-
|
67 |
-
def __call__(self, dataset_dict):
|
68 |
-
"""
|
69 |
-
Args:
|
70 |
-
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
|
71 |
-
|
72 |
-
Returns:
|
73 |
-
dict: a format that builtin models in detectron2 accept
|
74 |
-
"""
|
75 |
-
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
|
76 |
-
# USER: Write your own image loading if it's not from a file
|
77 |
-
image = utils.read_image(dataset_dict["file_name"], format=self.img_format)
|
78 |
-
utils.check_image_size(dataset_dict, image)
|
79 |
-
|
80 |
-
if "annotations" not in dataset_dict:
|
81 |
-
image, transforms = T.apply_transform_gens(
|
82 |
-
([self.crop_gen] if self.crop_gen else []) + self.tfm_gens, image
|
83 |
-
)
|
84 |
-
else:
|
85 |
-
# Crop around an instance if there are instances in the image.
|
86 |
-
# USER: Remove if you don't use cropping
|
87 |
-
if self.crop_gen:
|
88 |
-
crop_tfm = utils.gen_crop_transform_with_instance(
|
89 |
-
self.crop_gen.get_crop_size(image.shape[:2]),
|
90 |
-
image.shape[:2],
|
91 |
-
np.random.choice(dataset_dict["annotations"]),
|
92 |
-
)
|
93 |
-
image = crop_tfm.apply_image(image)
|
94 |
-
image, transforms = T.apply_transform_gens(self.tfm_gens, image)
|
95 |
-
if self.crop_gen:
|
96 |
-
transforms = crop_tfm + transforms
|
97 |
-
|
98 |
-
image_shape = image.shape[:2] # h, w
|
99 |
-
|
100 |
-
# Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
|
101 |
-
# but not efficient on large generic data structures due to the use of pickle & mp.Queue.
|
102 |
-
# Therefore it's important to use torch.Tensor.
|
103 |
-
dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
|
104 |
-
|
105 |
-
# USER: Remove if you don't use pre-computed proposals.
|
106 |
-
if self.load_proposals:
|
107 |
-
utils.transform_proposals(
|
108 |
-
dataset_dict, image_shape, transforms, self.min_box_side_len, self.proposal_topk
|
109 |
-
)
|
110 |
-
|
111 |
-
if not self.is_train:
|
112 |
-
# USER: Modify this if you want to keep them for some reason.
|
113 |
-
dataset_dict.pop("annotations", None)
|
114 |
-
dataset_dict.pop("sem_seg_file_name", None)
|
115 |
-
return dataset_dict
|
116 |
-
|
117 |
-
if "annotations" in dataset_dict:
|
118 |
-
# USER: Modify this if you want to keep them for some reason.
|
119 |
-
for anno in dataset_dict["annotations"]:
|
120 |
-
if not self.mask_on:
|
121 |
-
anno.pop("segmentation", None)
|
122 |
-
if not self.keypoint_on:
|
123 |
-
anno.pop("keypoints", None)
|
124 |
-
|
125 |
-
# USER: Implement additional transformations if you have other types of data
|
126 |
-
annos = [
|
127 |
-
utils.transform_instance_annotations(
|
128 |
-
obj, transforms, image_shape, keypoint_hflip_indices=self.keypoint_hflip_indices
|
129 |
-
)
|
130 |
-
for obj in dataset_dict.pop("annotations")
|
131 |
-
if obj.get("iscrowd", 0) == 0
|
132 |
-
]
|
133 |
-
instances = utils.annotations_to_instances(
|
134 |
-
annos, image_shape, mask_format=self.mask_format
|
135 |
-
)
|
136 |
-
# Create a tight bounding box from masks, useful when image is cropped
|
137 |
-
if self.crop_gen and instances.has("gt_masks"):
|
138 |
-
instances.gt_boxes = instances.gt_masks.get_bounding_boxes()
|
139 |
-
dataset_dict["instances"] = utils.filter_empty_instances(instances)
|
140 |
-
|
141 |
-
# USER: Remove if you don't do semantic/panoptic segmentation.
|
142 |
-
if "sem_seg_file_name" in dataset_dict:
|
143 |
-
with PathManager.open(dataset_dict.pop("sem_seg_file_name"), "rb") as f:
|
144 |
-
sem_seg_gt = Image.open(f)
|
145 |
-
sem_seg_gt = np.asarray(sem_seg_gt, dtype="uint8")
|
146 |
-
sem_seg_gt = transforms.apply_segmentation(sem_seg_gt)
|
147 |
-
sem_seg_gt = torch.as_tensor(sem_seg_gt.astype("long"))
|
148 |
-
dataset_dict["sem_seg"] = sem_seg_gt
|
149 |
-
return dataset_dict
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/extrema.h
DELETED
@@ -1,67 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
#include <thrust/system/omp/detail/execution_policy.h>
|
21 |
-
#include <thrust/system/detail/generic/extrema.h>
|
22 |
-
|
23 |
-
namespace thrust
|
24 |
-
{
|
25 |
-
namespace system
|
26 |
-
{
|
27 |
-
namespace omp
|
28 |
-
{
|
29 |
-
namespace detail
|
30 |
-
{
|
31 |
-
|
32 |
-
template <typename DerivedPolicy, typename ForwardIterator, typename BinaryPredicate>
|
33 |
-
ForwardIterator max_element(execution_policy<DerivedPolicy> &exec,
|
34 |
-
ForwardIterator first,
|
35 |
-
ForwardIterator last,
|
36 |
-
BinaryPredicate comp)
|
37 |
-
{
|
38 |
-
// omp prefers generic::max_element to cpp::max_element
|
39 |
-
return thrust::system::detail::generic::max_element(exec, first, last, comp);
|
40 |
-
} // end max_element()
|
41 |
-
|
42 |
-
template <typename DerivedPolicy, typename ForwardIterator, typename BinaryPredicate>
|
43 |
-
ForwardIterator min_element(execution_policy<DerivedPolicy> &exec,
|
44 |
-
ForwardIterator first,
|
45 |
-
ForwardIterator last,
|
46 |
-
BinaryPredicate comp)
|
47 |
-
{
|
48 |
-
// omp prefers generic::min_element to cpp::min_element
|
49 |
-
return thrust::system::detail::generic::min_element(exec, first, last, comp);
|
50 |
-
} // end min_element()
|
51 |
-
|
52 |
-
template <typename DerivedPolicy, typename ForwardIterator, typename BinaryPredicate>
|
53 |
-
thrust::pair<ForwardIterator,ForwardIterator> minmax_element(execution_policy<DerivedPolicy> &exec,
|
54 |
-
ForwardIterator first,
|
55 |
-
ForwardIterator last,
|
56 |
-
BinaryPredicate comp)
|
57 |
-
{
|
58 |
-
// omp prefers generic::minmax_element to cpp::minmax_element
|
59 |
-
return thrust::system::detail::generic::minmax_element(exec, first, last, comp);
|
60 |
-
} // end minmax_element()
|
61 |
-
|
62 |
-
} // end detail
|
63 |
-
} // end omp
|
64 |
-
} // end system
|
65 |
-
} // end thrust
|
66 |
-
|
67 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/mmdet/core/anchor/__init__.py
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
from .anchor_generator import (AnchorGenerator, LegacyAnchorGenerator,
|
2 |
-
YOLOAnchorGenerator)
|
3 |
-
from .builder import ANCHOR_GENERATORS, build_anchor_generator
|
4 |
-
from .point_generator import PointGenerator
|
5 |
-
from .utils import anchor_inside_flags, calc_region, images_to_levels
|
6 |
-
|
7 |
-
__all__ = [
|
8 |
-
'AnchorGenerator', 'LegacyAnchorGenerator', 'anchor_inside_flags',
|
9 |
-
'PointGenerator', 'images_to_levels', 'calc_region',
|
10 |
-
'build_anchor_generator', 'ANCHOR_GENERATORS', 'YOLOAnchorGenerator'
|
11 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/walt/datasets/cocoeval.py
DELETED
@@ -1,612 +0,0 @@
|
|
1 |
-
__author__ = 'tsungyi'
|
2 |
-
|
3 |
-
import numpy as np
|
4 |
-
import datetime
|
5 |
-
import time
|
6 |
-
from collections import defaultdict
|
7 |
-
import pycocotools.mask as maskUtils
|
8 |
-
import copy
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
def xywh_to_xyxy(xywh):
|
13 |
-
"""Convert [x1 y1 w h] box format to [x1 y1 x2 y2] format."""
|
14 |
-
if isinstance(xywh, (list, tuple)):
|
15 |
-
# Single box given as a list of coordinates
|
16 |
-
assert len(xywh) == 4
|
17 |
-
x1, y1 = xywh[0], xywh[1]
|
18 |
-
x2 = x1 + np.maximum(0., xywh[2] - 1.)
|
19 |
-
y2 = y1 + np.maximum(0., xywh[3] - 1.)
|
20 |
-
return (x1, y1, x2, y2)
|
21 |
-
elif isinstance(xywh, np.ndarray):
|
22 |
-
# Multiple boxes given as a 2D ndarray
|
23 |
-
return np.hstack(
|
24 |
-
(xywh[:, 0:2], xywh[:, 0:2] + np.maximum(0, xywh[:, 2:4] - 1))
|
25 |
-
)
|
26 |
-
else:
|
27 |
-
raise TypeError('Argument xywh must be a list, tuple, or numpy array.')
|
28 |
-
|
29 |
-
def get_iou(pred_box, gt_box):
|
30 |
-
"""
|
31 |
-
pred_box : the coordinate for predict bounding box
|
32 |
-
gt_box : the coordinate for ground truth bounding box
|
33 |
-
return : the iou score
|
34 |
-
the left-down coordinate of pred_box:(pred_box[0], pred_box[1])
|
35 |
-
the right-up coordinate of pred_box:(pred_box[2], pred_box[3])
|
36 |
-
"""
|
37 |
-
pred_box = xywh_to_xyxy(pred_box)
|
38 |
-
gt_box = xywh_to_xyxy(gt_box)
|
39 |
-
# 1.get the coordinate of inters
|
40 |
-
ixmin = max(pred_box[0], gt_box[0])
|
41 |
-
ixmax = min(pred_box[2], gt_box[2])
|
42 |
-
iymin = max(pred_box[1], gt_box[1])
|
43 |
-
iymax = min(pred_box[3], gt_box[3])
|
44 |
-
|
45 |
-
iw = np.maximum(ixmax-ixmin+1., 0.)
|
46 |
-
ih = np.maximum(iymax-iymin+1., 0.)
|
47 |
-
|
48 |
-
# 2. calculate the area of inters
|
49 |
-
inters = iw*ih
|
50 |
-
|
51 |
-
# 3. calculate the area of union
|
52 |
-
uni = ((pred_box[2]-pred_box[0]+1.) * (pred_box[3]-pred_box[1]+1.) +
|
53 |
-
(gt_box[2] - gt_box[0] + 1.) * (gt_box[3] - gt_box[1] + 1.) -
|
54 |
-
inters)
|
55 |
-
|
56 |
-
# 4. calculate the overlaps between pred_box and gt_box
|
57 |
-
iou = inters / uni
|
58 |
-
|
59 |
-
return iou
|
60 |
-
|
61 |
-
|
62 |
-
class COCOeval:
|
63 |
-
# Interface for evaluating detection on the Microsoft COCO dataset.
|
64 |
-
#
|
65 |
-
# The usage for CocoEval is as follows:
|
66 |
-
# cocoGt=..., cocoDt=... # load dataset and results
|
67 |
-
# E = CocoEval(cocoGt,cocoDt); # initialize CocoEval object
|
68 |
-
# E.params.recThrs = ...; # set parameters as desired
|
69 |
-
# E.evaluate(); # run per image evaluation
|
70 |
-
# E.accumulate(); # accumulate per image results
|
71 |
-
# E.summarize(); # display summary metrics of results
|
72 |
-
# For example usage see evalDemo.m and http://mscoco.org/.
|
73 |
-
#
|
74 |
-
# The evaluation parameters are as follows (defaults in brackets):
|
75 |
-
# imgIds - [all] N img ids to use for evaluation
|
76 |
-
# catIds - [all] K cat ids to use for evaluation
|
77 |
-
# iouThrs - [.5:.05:.95] T=10 IoU thresholds for evaluation
|
78 |
-
# recThrs - [0:.01:1] R=101 recall thresholds for evaluation
|
79 |
-
# areaRng - [...] A=4 object area ranges for evaluation
|
80 |
-
# maxDets - [1 10 100] M=3 thresholds on max detections per image
|
81 |
-
# iouType - ['segm'] set iouType to 'segm', 'bbox' or 'keypoints'
|
82 |
-
# iouType replaced the now DEPRECATED useSegm parameter.
|
83 |
-
# useCats - [1] if true use category labels for evaluation
|
84 |
-
# Note: if useCats=0 category labels are ignored as in proposal scoring.
|
85 |
-
# Note: multiple areaRngs [Ax2] and maxDets [Mx1] can be specified.
|
86 |
-
#
|
87 |
-
# evaluate(): evaluates detections on every image and every category and
|
88 |
-
# concats the results into the "evalImgs" with fields:
|
89 |
-
# dtIds - [1xD] id for each of the D detections (dt)
|
90 |
-
# gtIds - [1xG] id for each of the G ground truths (gt)
|
91 |
-
# dtMatches - [TxD] matching gt id at each IoU or 0
|
92 |
-
# gtMatches - [TxG] matching dt id at each IoU or 0
|
93 |
-
# dtScores - [1xD] confidence of each dt
|
94 |
-
# gtIgnore - [1xG] ignore flag for each gt
|
95 |
-
# dtIgnore - [TxD] ignore flag for each dt at each IoU
|
96 |
-
#
|
97 |
-
# accumulate(): accumulates the per-image, per-category evaluation
|
98 |
-
# results in "evalImgs" into the dictionary "eval" with fields:
|
99 |
-
# params - parameters used for evaluation
|
100 |
-
# date - date evaluation was performed
|
101 |
-
# counts - [T,R,K,A,M] parameter dimensions (see above)
|
102 |
-
# precision - [TxRxKxAxM] precision for every evaluation setting
|
103 |
-
# recall - [TxKxAxM] max recall for every evaluation setting
|
104 |
-
# Note: precision and recall==-1 for settings with no gt objects.
|
105 |
-
#
|
106 |
-
# See also coco, mask, pycocoDemo, pycocoEvalDemo
|
107 |
-
#
|
108 |
-
# Microsoft COCO Toolbox. version 2.0
|
109 |
-
# Data, paper, and tutorials available at: http://mscoco.org/
|
110 |
-
# Code written by Piotr Dollar and Tsung-Yi Lin, 2015.
|
111 |
-
# Licensed under the Simplified BSD License [see coco/license.txt]
|
112 |
-
def __init__(self, cocoGt=None, cocoDt=None, iouType='segm'):
|
113 |
-
'''
|
114 |
-
Initialize CocoEval using coco APIs for gt and dt
|
115 |
-
:param cocoGt: coco object with ground truth annotations
|
116 |
-
:param cocoDt: coco object with detection results
|
117 |
-
:return: None
|
118 |
-
'''
|
119 |
-
if not iouType:
|
120 |
-
print('iouType not specified. use default iouType segm')
|
121 |
-
self.cocoGt = cocoGt # ground truth COCO API
|
122 |
-
self.cocoDt = cocoDt # detections COCO API
|
123 |
-
self.evalImgs = defaultdict(list) # per-image per-category evaluation results [KxAxI] elements
|
124 |
-
self.eval = {} # accumulated evaluation results
|
125 |
-
self._gts = defaultdict(list) # gt for evaluation
|
126 |
-
self._dts = defaultdict(list) # dt for evaluation
|
127 |
-
self.params = Params(iouType=iouType) # parameters
|
128 |
-
self._paramsEval = {} # parameters for evaluation
|
129 |
-
self.stats = [] # result summarization
|
130 |
-
self.ious = {} # ious between all gts and dts
|
131 |
-
self.percentage_occ = 0
|
132 |
-
if not cocoGt is None:
|
133 |
-
self.params.imgIds = sorted(cocoGt.getImgIds())
|
134 |
-
self.params.catIds = sorted(cocoGt.getCatIds())
|
135 |
-
|
136 |
-
|
137 |
-
def _prepare(self):
|
138 |
-
'''
|
139 |
-
Prepare ._gts and ._dts for evaluation based on params
|
140 |
-
:return: None
|
141 |
-
'''
|
142 |
-
def _toMask(anns, coco):
|
143 |
-
# modify ann['segmentation'] by reference
|
144 |
-
for ann in anns:
|
145 |
-
rle = coco.annToRLE(ann)
|
146 |
-
ann['segmentation'] = rle
|
147 |
-
p = self.params
|
148 |
-
if p.useCats:
|
149 |
-
gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))
|
150 |
-
dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))
|
151 |
-
else:
|
152 |
-
gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds))
|
153 |
-
dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds))
|
154 |
-
|
155 |
-
if self.percentage_occ >= 0:
|
156 |
-
gts_new = []
|
157 |
-
indices = []
|
158 |
-
for gt in gts:
|
159 |
-
#print(gt['occ_percentage'], self.percentage_occ)
|
160 |
-
if gt['occ_percentage'] >= self.percentage_occ*10 and gt['occ_percentage'] <(self.percentage_occ+1)*10:
|
161 |
-
for ind, dt in enumerate(dts):
|
162 |
-
if ind in indices or dt['image_id'] != gt['image_id']:
|
163 |
-
continue
|
164 |
-
#print(dt['image_id'], gt['image_id'])
|
165 |
-
if get_iou(gt['bbox'], dt['bbox']) >0.4:
|
166 |
-
indices.append(ind)
|
167 |
-
gts_new.append(gt)
|
168 |
-
|
169 |
-
dts_new = []
|
170 |
-
for i in np.unique(indices):
|
171 |
-
dts_new.append(dts[i])
|
172 |
-
|
173 |
-
#print(len(gts_new), len(gts), len(dts), len(dts_new), len(indices))
|
174 |
-
dts = dts_new
|
175 |
-
gts = gts_new
|
176 |
-
'''
|
177 |
-
'''
|
178 |
-
|
179 |
-
# convert ground truth to mask if iouType == 'segm'
|
180 |
-
if p.iouType == 'segm':
|
181 |
-
_toMask(gts, self.cocoGt)
|
182 |
-
_toMask(dts, self.cocoDt)
|
183 |
-
# set ignore flag
|
184 |
-
for gt in gts:
|
185 |
-
gt['ignore'] = gt['ignore'] if 'ignore' in gt else 0
|
186 |
-
gt['ignore'] = 'iscrowd' in gt and gt['iscrowd']
|
187 |
-
if p.iouType == 'keypoints':
|
188 |
-
gt['ignore'] = (gt['num_keypoints'] == 0) or gt['ignore']
|
189 |
-
self._gts = defaultdict(list) # gt for evaluation
|
190 |
-
self._dts = defaultdict(list) # dt for evaluation
|
191 |
-
for gt in gts:
|
192 |
-
self._gts[gt['image_id'], gt['category_id']].append(gt)
|
193 |
-
for dt in dts:
|
194 |
-
self._dts[dt['image_id'], dt['category_id']].append(dt)
|
195 |
-
self.evalImgs = defaultdict(list) # per-image per-category evaluation results
|
196 |
-
self.eval = {} # accumulated evaluation results
|
197 |
-
|
198 |
-
def evaluate(self):
|
199 |
-
'''
|
200 |
-
Run per image evaluation on given images and store results (a list of dict) in self.evalImgs
|
201 |
-
:return: None
|
202 |
-
'''
|
203 |
-
tic = time.time()
|
204 |
-
print('Running per image evaluation...')
|
205 |
-
p = self.params
|
206 |
-
# add backward compatibility if useSegm is specified in params
|
207 |
-
if not p.useSegm is None:
|
208 |
-
p.iouType = 'segm' if p.useSegm == 1 else 'bbox'
|
209 |
-
print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType))
|
210 |
-
print('Evaluate annotation type *{}*'.format(p.iouType))
|
211 |
-
p.imgIds = list(np.unique(p.imgIds))
|
212 |
-
if p.useCats:
|
213 |
-
p.catIds = list(np.unique(p.catIds))
|
214 |
-
p.maxDets = sorted(p.maxDets)
|
215 |
-
self.params=p
|
216 |
-
|
217 |
-
self._prepare()
|
218 |
-
# loop through images, area range, max detection number
|
219 |
-
catIds = p.catIds if p.useCats else [-1]
|
220 |
-
|
221 |
-
if p.iouType == 'segm' or p.iouType == 'bbox':
|
222 |
-
computeIoU = self.computeIoU
|
223 |
-
elif p.iouType == 'keypoints':
|
224 |
-
computeIoU = self.computeOks
|
225 |
-
self.ious = {(imgId, catId): computeIoU(imgId, catId) \
|
226 |
-
for imgId in p.imgIds
|
227 |
-
for catId in catIds}
|
228 |
-
|
229 |
-
evaluateImg = self.evaluateImg
|
230 |
-
maxDet = p.maxDets[-1]
|
231 |
-
self.evalImgs = [evaluateImg(imgId, catId, areaRng, maxDet)
|
232 |
-
for catId in catIds
|
233 |
-
for areaRng in p.areaRng
|
234 |
-
for imgId in p.imgIds
|
235 |
-
]
|
236 |
-
self._paramsEval = copy.deepcopy(self.params)
|
237 |
-
toc = time.time()
|
238 |
-
print('DONE (t={:0.2f}s).'.format(toc-tic))
|
239 |
-
|
240 |
-
def computeIoU(self, imgId, catId):
|
241 |
-
# dts_new.append(dt)
|
242 |
-
p = self.params
|
243 |
-
if p.useCats:
|
244 |
-
gt = self._gts[imgId,catId]
|
245 |
-
dt = self._dts[imgId,catId]
|
246 |
-
else:
|
247 |
-
gt = [_ for cId in p.catIds for _ in self._gts[imgId,cId]]
|
248 |
-
dt = [_ for cId in p.catIds for _ in self._dts[imgId,cId]]
|
249 |
-
if len(gt) == 0 and len(dt) ==0:
|
250 |
-
return []
|
251 |
-
inds = np.argsort([-d['score'] for d in dt], kind='mergesort')
|
252 |
-
dt = [dt[i] for i in inds]
|
253 |
-
if len(dt) > p.maxDets[-1]:
|
254 |
-
dt=dt[0:p.maxDets[-1]]
|
255 |
-
|
256 |
-
if p.iouType == 'segm':
|
257 |
-
g = [g['segmentation'] for g in gt]
|
258 |
-
d = [d['segmentation'] for d in dt]
|
259 |
-
elif p.iouType == 'bbox':
|
260 |
-
g = [g['bbox'] for g in gt]
|
261 |
-
d = [d['bbox'] for d in dt]
|
262 |
-
else:
|
263 |
-
raise Exception('unknown iouType for iou computation')
|
264 |
-
|
265 |
-
# compute iou between each dt and gt region
|
266 |
-
iscrowd = [int(o['iscrowd']) for o in gt]
|
267 |
-
ious = maskUtils.iou(d,g,iscrowd)
|
268 |
-
return ious
|
269 |
-
|
270 |
-
def computeOks(self, imgId, catId):
|
271 |
-
p = self.params
|
272 |
-
# dimention here should be Nxm
|
273 |
-
gts = self._gts[imgId, catId]
|
274 |
-
dts = self._dts[imgId, catId]
|
275 |
-
inds = np.argsort([-d['score'] for d in dts], kind='mergesort')
|
276 |
-
dts = [dts[i] for i in inds]
|
277 |
-
if len(dts) > p.maxDets[-1]:
|
278 |
-
dts = dts[0:p.maxDets[-1]]
|
279 |
-
# if len(gts) == 0 and len(dts) == 0:
|
280 |
-
if len(gts) == 0 or len(dts) == 0:
|
281 |
-
return []
|
282 |
-
ious = np.zeros((len(dts), len(gts)))
|
283 |
-
sigmas = p.kpt_oks_sigmas
|
284 |
-
vars = (sigmas * 2)**2
|
285 |
-
k = len(sigmas)
|
286 |
-
# compute oks between each detection and ground truth object
|
287 |
-
for j, gt in enumerate(gts):
|
288 |
-
# create bounds for ignore regions(double the gt bbox)
|
289 |
-
g = np.array(gt['keypoints'])
|
290 |
-
xg = g[0::3]; yg = g[1::3]; vg = g[2::3]
|
291 |
-
k1 = np.count_nonzero(vg > 0)
|
292 |
-
bb = gt['bbox']
|
293 |
-
x0 = bb[0] - bb[2]; x1 = bb[0] + bb[2] * 2
|
294 |
-
y0 = bb[1] - bb[3]; y1 = bb[1] + bb[3] * 2
|
295 |
-
for i, dt in enumerate(dts):
|
296 |
-
d = np.array(dt['keypoints'])
|
297 |
-
xd = d[0::3]; yd = d[1::3]
|
298 |
-
if k1>0:
|
299 |
-
# measure the per-keypoint distance if keypoints visible
|
300 |
-
dx = xd - xg
|
301 |
-
dy = yd - yg
|
302 |
-
else:
|
303 |
-
# measure minimum distance to keypoints in (x0,y0) & (x1,y1)
|
304 |
-
z = np.zeros((k))
|
305 |
-
dx = np.max((z, x0-xd),axis=0)+np.max((z, xd-x1),axis=0)
|
306 |
-
dy = np.max((z, y0-yd),axis=0)+np.max((z, yd-y1),axis=0)
|
307 |
-
e = (dx**2 + dy**2) / vars / (gt['area']+np.spacing(1)) / 2
|
308 |
-
if k1 > 0:
|
309 |
-
e=e[vg > 0]
|
310 |
-
ious[i, j] = np.sum(np.exp(-e)) / e.shape[0]
|
311 |
-
return ious
|
312 |
-
|
313 |
-
def evaluateImg(self, imgId, catId, aRng, maxDet):
|
314 |
-
'''
|
315 |
-
perform evaluation for single category and image
|
316 |
-
:return: dict (single image results)
|
317 |
-
'''
|
318 |
-
p = self.params
|
319 |
-
if p.useCats:
|
320 |
-
gt = self._gts[imgId,catId]
|
321 |
-
dt = self._dts[imgId,catId]
|
322 |
-
else:
|
323 |
-
gt = [_ for cId in p.catIds for _ in self._gts[imgId,cId]]
|
324 |
-
dt = [_ for cId in p.catIds for _ in self._dts[imgId,cId]]
|
325 |
-
if len(gt) == 0 and len(dt) ==0:
|
326 |
-
return None
|
327 |
-
|
328 |
-
for g in gt:
|
329 |
-
if g['ignore'] or (g['area']<aRng[0] or g['area']>aRng[1]):
|
330 |
-
g['_ignore'] = 1
|
331 |
-
else:
|
332 |
-
g['_ignore'] = 0
|
333 |
-
|
334 |
-
# sort dt highest score first, sort gt ignore last
|
335 |
-
gtind = np.argsort([g['_ignore'] for g in gt], kind='mergesort')
|
336 |
-
gt = [gt[i] for i in gtind]
|
337 |
-
dtind = np.argsort([-d['score'] for d in dt], kind='mergesort')
|
338 |
-
dt = [dt[i] for i in dtind[0:maxDet]]
|
339 |
-
iscrowd = [int(o['iscrowd']) for o in gt]
|
340 |
-
# load computed ious
|
341 |
-
ious = self.ious[imgId, catId][:, gtind] if len(self.ious[imgId, catId]) > 0 else self.ious[imgId, catId]
|
342 |
-
|
343 |
-
T = len(p.iouThrs)
|
344 |
-
G = len(gt)
|
345 |
-
D = len(dt)
|
346 |
-
gtm = np.zeros((T,G))
|
347 |
-
dtm = np.zeros((T,D))
|
348 |
-
gtIg = np.array([g['_ignore'] for g in gt])
|
349 |
-
dtIg = np.zeros((T,D))
|
350 |
-
if not len(ious)==0:
|
351 |
-
for tind, t in enumerate(p.iouThrs):
|
352 |
-
for dind, d in enumerate(dt):
|
353 |
-
# information about best match so far (m=-1 -> unmatched)
|
354 |
-
iou = min([t,1-1e-10])
|
355 |
-
m = -1
|
356 |
-
for gind, g in enumerate(gt):
|
357 |
-
# if this gt already matched, and not a crowd, continue
|
358 |
-
if gtm[tind,gind]>0 and not iscrowd[gind]:
|
359 |
-
continue
|
360 |
-
# if dt matched to reg gt, and on ignore gt, stop
|
361 |
-
if m>-1 and gtIg[m]==0 and gtIg[gind]==1:
|
362 |
-
break
|
363 |
-
# continue to next gt unless better match made
|
364 |
-
if ious[dind,gind] < iou:
|
365 |
-
continue
|
366 |
-
# if match successful and best so far, store appropriately
|
367 |
-
iou=ious[dind,gind]
|
368 |
-
m=gind
|
369 |
-
# if match made store id of match for both dt and gt
|
370 |
-
if m ==-1:
|
371 |
-
continue
|
372 |
-
dtIg[tind,dind] = gtIg[m]
|
373 |
-
dtm[tind,dind] = gt[m]['id']
|
374 |
-
gtm[tind,m] = d['id']
|
375 |
-
# set unmatched detections outside of area range to ignore
|
376 |
-
a = np.array([d['area']<aRng[0] or d['area']>aRng[1] for d in dt]).reshape((1, len(dt)))
|
377 |
-
dtIg = np.logical_or(dtIg, np.logical_and(dtm==0, np.repeat(a,T,0)))
|
378 |
-
# store results for given image and category
|
379 |
-
return {
|
380 |
-
'image_id': imgId,
|
381 |
-
'category_id': catId,
|
382 |
-
'aRng': aRng,
|
383 |
-
'maxDet': maxDet,
|
384 |
-
'dtIds': [d['id'] for d in dt],
|
385 |
-
'gtIds': [g['id'] for g in gt],
|
386 |
-
'dtMatches': dtm,
|
387 |
-
'gtMatches': gtm,
|
388 |
-
'dtScores': [d['score'] for d in dt],
|
389 |
-
'gtIgnore': gtIg,
|
390 |
-
'dtIgnore': dtIg,
|
391 |
-
}
|
392 |
-
|
393 |
-
def accumulate(self, p = None):
|
394 |
-
'''
|
395 |
-
Accumulate per image evaluation results and store the result in self.eval
|
396 |
-
:param p: input params for evaluation
|
397 |
-
:return: None
|
398 |
-
'''
|
399 |
-
print('Accumulating evaluation results...')
|
400 |
-
tic = time.time()
|
401 |
-
if not self.evalImgs:
|
402 |
-
print('Please run evaluate() first')
|
403 |
-
# allows input customized parameters
|
404 |
-
if p is None:
|
405 |
-
p = self.params
|
406 |
-
p.catIds = p.catIds if p.useCats == 1 else [-1]
|
407 |
-
T = len(p.iouThrs)
|
408 |
-
R = len(p.recThrs)
|
409 |
-
K = len(p.catIds) if p.useCats else 1
|
410 |
-
A = len(p.areaRng)
|
411 |
-
M = len(p.maxDets)
|
412 |
-
precision = -np.ones((T,R,K,A,M)) # -1 for the precision of absent categories
|
413 |
-
recall = -np.ones((T,K,A,M))
|
414 |
-
scores = -np.ones((T,R,K,A,M))
|
415 |
-
|
416 |
-
# create dictionary for future indexing
|
417 |
-
_pe = self._paramsEval
|
418 |
-
catIds = _pe.catIds if _pe.useCats else [-1]
|
419 |
-
setK = set(catIds)
|
420 |
-
setA = set(map(tuple, _pe.areaRng))
|
421 |
-
setM = set(_pe.maxDets)
|
422 |
-
setI = set(_pe.imgIds)
|
423 |
-
# get inds to evaluate
|
424 |
-
k_list = [n for n, k in enumerate(p.catIds) if k in setK]
|
425 |
-
m_list = [m for n, m in enumerate(p.maxDets) if m in setM]
|
426 |
-
a_list = [n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng)) if a in setA]
|
427 |
-
i_list = [n for n, i in enumerate(p.imgIds) if i in setI]
|
428 |
-
I0 = len(_pe.imgIds)
|
429 |
-
A0 = len(_pe.areaRng)
|
430 |
-
# retrieve E at each category, area range, and max number of detections
|
431 |
-
for k, k0 in enumerate(k_list):
|
432 |
-
Nk = k0*A0*I0
|
433 |
-
for a, a0 in enumerate(a_list):
|
434 |
-
Na = a0*I0
|
435 |
-
for m, maxDet in enumerate(m_list):
|
436 |
-
E = [self.evalImgs[Nk + Na + i] for i in i_list]
|
437 |
-
E = [e for e in E if not e is None]
|
438 |
-
if len(E) == 0:
|
439 |
-
continue
|
440 |
-
dtScores = np.concatenate([e['dtScores'][0:maxDet] for e in E])
|
441 |
-
|
442 |
-
# different sorting method generates slightly different results.
|
443 |
-
# mergesort is used to be consistent as Matlab implementation.
|
444 |
-
inds = np.argsort(-dtScores, kind='mergesort')
|
445 |
-
dtScoresSorted = dtScores[inds]
|
446 |
-
|
447 |
-
dtm = np.concatenate([e['dtMatches'][:,0:maxDet] for e in E], axis=1)[:,inds]
|
448 |
-
dtIg = np.concatenate([e['dtIgnore'][:,0:maxDet] for e in E], axis=1)[:,inds]
|
449 |
-
gtIg = np.concatenate([e['gtIgnore'] for e in E])
|
450 |
-
npig = np.count_nonzero(gtIg==0 )
|
451 |
-
if npig == 0:
|
452 |
-
continue
|
453 |
-
tps = np.logical_and( dtm, np.logical_not(dtIg) )
|
454 |
-
fps = np.logical_and(np.logical_not(dtm), np.logical_not(dtIg) )
|
455 |
-
|
456 |
-
tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)
|
457 |
-
fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float)
|
458 |
-
for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):
|
459 |
-
tp = np.array(tp)
|
460 |
-
fp = np.array(fp)
|
461 |
-
nd = len(tp)
|
462 |
-
rc = tp / npig
|
463 |
-
pr = tp / (fp+tp+np.spacing(1))
|
464 |
-
q = np.zeros((R,))
|
465 |
-
ss = np.zeros((R,))
|
466 |
-
|
467 |
-
if nd:
|
468 |
-
recall[t,k,a,m] = rc[-1]
|
469 |
-
else:
|
470 |
-
recall[t,k,a,m] = 0
|
471 |
-
|
472 |
-
# numpy is slow without cython optimization for accessing elements
|
473 |
-
# use python array gets significant speed improvement
|
474 |
-
pr = pr.tolist(); q = q.tolist()
|
475 |
-
|
476 |
-
for i in range(nd-1, 0, -1):
|
477 |
-
if pr[i] > pr[i-1]:
|
478 |
-
pr[i-1] = pr[i]
|
479 |
-
|
480 |
-
inds = np.searchsorted(rc, p.recThrs, side='left')
|
481 |
-
try:
|
482 |
-
for ri, pi in enumerate(inds):
|
483 |
-
q[ri] = pr[pi]
|
484 |
-
ss[ri] = dtScoresSorted[pi]
|
485 |
-
except:
|
486 |
-
pass
|
487 |
-
precision[t,:,k,a,m] = np.array(q)
|
488 |
-
scores[t,:,k,a,m] = np.array(ss)
|
489 |
-
self.eval = {
|
490 |
-
'params': p,
|
491 |
-
'counts': [T, R, K, A, M],
|
492 |
-
'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
|
493 |
-
'precision': precision,
|
494 |
-
'recall': recall,
|
495 |
-
'scores': scores,
|
496 |
-
}
|
497 |
-
toc = time.time()
|
498 |
-
print('DONE (t={:0.2f}s).'.format( toc-tic))
|
499 |
-
|
500 |
-
def summarize(self):
|
501 |
-
'''
|
502 |
-
Compute and display summary metrics for evaluation results.
|
503 |
-
Note this functin can *only* be applied on the default parameter setting
|
504 |
-
'''
|
505 |
-
def _summarize( ap=1, iouThr=None, areaRng='all', maxDets=100 ):
|
506 |
-
p = self.params
|
507 |
-
iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}'
|
508 |
-
titleStr = 'Average Precision' if ap == 1 else 'Average Recall'
|
509 |
-
typeStr = '(AP)' if ap==1 else '(AR)'
|
510 |
-
iouStr = '{:0.2f}:{:0.2f}'.format(p.iouThrs[0], p.iouThrs[-1]) \
|
511 |
-
if iouThr is None else '{:0.2f}'.format(iouThr)
|
512 |
-
|
513 |
-
aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]
|
514 |
-
mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]
|
515 |
-
if ap == 1:
|
516 |
-
# dimension of precision: [TxRxKxAxM]
|
517 |
-
s = self.eval['precision']
|
518 |
-
# IoU
|
519 |
-
if iouThr is not None:
|
520 |
-
t = np.where(iouThr == p.iouThrs)[0]
|
521 |
-
s = s[t]
|
522 |
-
s = s[:,:,:,aind,mind]
|
523 |
-
else:
|
524 |
-
# dimension of recall: [TxKxAxM]
|
525 |
-
s = self.eval['recall']
|
526 |
-
if iouThr is not None:
|
527 |
-
t = np.where(iouThr == p.iouThrs)[0]
|
528 |
-
s = s[t]
|
529 |
-
s = s[:,:,aind,mind]
|
530 |
-
if len(s[s>-1])==0:
|
531 |
-
mean_s = -1
|
532 |
-
else:
|
533 |
-
mean_s = np.mean(s[s>-1])
|
534 |
-
print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s))
|
535 |
-
return mean_s
|
536 |
-
def _summarizeDets():
|
537 |
-
stats = np.zeros((12,))
|
538 |
-
stats[0] = _summarize(1)
|
539 |
-
stats[1] = _summarize(1, iouThr=.5, maxDets=self.params.maxDets[2])
|
540 |
-
stats[2] = _summarize(1, iouThr=.75, maxDets=self.params.maxDets[2])
|
541 |
-
stats[3] = _summarize(1, areaRng='small', maxDets=self.params.maxDets[2])
|
542 |
-
stats[4] = _summarize(1, areaRng='medium', maxDets=self.params.maxDets[2])
|
543 |
-
stats[5] = _summarize(1, areaRng='large', maxDets=self.params.maxDets[2])
|
544 |
-
stats[6] = _summarize(0, maxDets=self.params.maxDets[0])
|
545 |
-
stats[7] = _summarize(0, maxDets=self.params.maxDets[1])
|
546 |
-
stats[8] = _summarize(0, maxDets=self.params.maxDets[2])
|
547 |
-
stats[9] = _summarize(0, areaRng='small', maxDets=self.params.maxDets[2])
|
548 |
-
stats[10] = _summarize(0, areaRng='medium', maxDets=self.params.maxDets[2])
|
549 |
-
stats[11] = _summarize(0, areaRng='large', maxDets=self.params.maxDets[2])
|
550 |
-
return stats
|
551 |
-
def _summarizeKps():
|
552 |
-
stats = np.zeros((10,))
|
553 |
-
stats[0] = _summarize(1, maxDets=20)
|
554 |
-
stats[1] = _summarize(1, maxDets=20, iouThr=.5)
|
555 |
-
stats[2] = _summarize(1, maxDets=20, iouThr=.75)
|
556 |
-
stats[3] = _summarize(1, maxDets=20, areaRng='medium')
|
557 |
-
stats[4] = _summarize(1, maxDets=20, areaRng='large')
|
558 |
-
stats[5] = _summarize(0, maxDets=20)
|
559 |
-
stats[6] = _summarize(0, maxDets=20, iouThr=.5)
|
560 |
-
stats[7] = _summarize(0, maxDets=20, iouThr=.75)
|
561 |
-
stats[8] = _summarize(0, maxDets=20, areaRng='medium')
|
562 |
-
stats[9] = _summarize(0, maxDets=20, areaRng='large')
|
563 |
-
return stats
|
564 |
-
if not self.eval:
|
565 |
-
raise Exception('Please run accumulate() first')
|
566 |
-
iouType = self.params.iouType
|
567 |
-
if iouType == 'segm' or iouType == 'bbox':
|
568 |
-
summarize = _summarizeDets
|
569 |
-
elif iouType == 'keypoints':
|
570 |
-
summarize = _summarizeKps
|
571 |
-
self.stats = summarize()
|
572 |
-
|
573 |
-
def __str__(self):
|
574 |
-
self.summarize()
|
575 |
-
|
576 |
-
class Params:
|
577 |
-
'''
|
578 |
-
Params for coco evaluation api
|
579 |
-
'''
|
580 |
-
def setDetParams(self):
|
581 |
-
self.imgIds = []
|
582 |
-
self.catIds = []
|
583 |
-
# np.arange causes trouble. the data point on arange is slightly larger than the true value
|
584 |
-
self.iouThrs = np.linspace(.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
|
585 |
-
self.recThrs = np.linspace(.0, 1.00, int(np.round((1.00 - .0) / .01)) + 1, endpoint=True)
|
586 |
-
self.maxDets = [1, 10, 100]
|
587 |
-
self.areaRng = [[0 ** 2, 1e5 ** 2], [0 ** 2, 32 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]]
|
588 |
-
self.areaRngLbl = ['all', 'small', 'medium', 'large']
|
589 |
-
self.useCats = 1
|
590 |
-
|
591 |
-
def setKpParams(self):
|
592 |
-
self.imgIds = []
|
593 |
-
self.catIds = []
|
594 |
-
# np.arange causes trouble. the data point on arange is slightly larger than the true value
|
595 |
-
self.iouThrs = np.linspace(.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
|
596 |
-
self.recThrs = np.linspace(.0, 1.00, int(np.round((1.00 - .0) / .01)) + 1, endpoint=True)
|
597 |
-
self.maxDets = [20]
|
598 |
-
self.areaRng = [[0 ** 2, 1e5 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]]
|
599 |
-
self.areaRngLbl = ['all', 'medium', 'large']
|
600 |
-
self.useCats = 1
|
601 |
-
self.kpt_oks_sigmas = np.array([.26, .25, .25, .35, .35, .79, .79, .72, .72, .62,.62, 1.07, 1.07, .87, .87, .89, .89])/10.0
|
602 |
-
|
603 |
-
def __init__(self, iouType='segm'):
|
604 |
-
if iouType == 'segm' or iouType == 'bbox':
|
605 |
-
self.setDetParams()
|
606 |
-
elif iouType == 'keypoints':
|
607 |
-
self.setKpParams()
|
608 |
-
else:
|
609 |
-
raise Exception('iouType not supported')
|
610 |
-
self.iouType = iouType
|
611 |
-
# useSegm is deprecated
|
612 |
-
self.useSegm = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/util/slconfig.py
DELETED
@@ -1,424 +0,0 @@
|
|
1 |
-
# ==========================================================
|
2 |
-
# Modified from mmcv
|
3 |
-
# ==========================================================
|
4 |
-
import ast
|
5 |
-
import os.path as osp
|
6 |
-
import shutil
|
7 |
-
import sys
|
8 |
-
import tempfile
|
9 |
-
from argparse import Action
|
10 |
-
from importlib import import_module
|
11 |
-
|
12 |
-
from addict import Dict
|
13 |
-
from yapf.yapflib.yapf_api import FormatCode
|
14 |
-
|
15 |
-
BASE_KEY = "_base_"
|
16 |
-
DELETE_KEY = "_delete_"
|
17 |
-
RESERVED_KEYS = ["filename", "text", "pretty_text", "get", "dump", "merge_from_dict"]
|
18 |
-
|
19 |
-
|
20 |
-
def check_file_exist(filename, msg_tmpl='file "{}" does not exist'):
|
21 |
-
if not osp.isfile(filename):
|
22 |
-
raise FileNotFoundError(msg_tmpl.format(filename))
|
23 |
-
|
24 |
-
|
25 |
-
class ConfigDict(Dict):
|
26 |
-
def __missing__(self, name):
|
27 |
-
raise KeyError(name)
|
28 |
-
|
29 |
-
def __getattr__(self, name):
|
30 |
-
try:
|
31 |
-
value = super(ConfigDict, self).__getattr__(name)
|
32 |
-
except KeyError:
|
33 |
-
ex = AttributeError(f"'{self.__class__.__name__}' object has no " f"attribute '{name}'")
|
34 |
-
except Exception as e:
|
35 |
-
ex = e
|
36 |
-
else:
|
37 |
-
return value
|
38 |
-
raise ex
|
39 |
-
|
40 |
-
|
41 |
-
class SLConfig(object):
|
42 |
-
"""
|
43 |
-
config files.
|
44 |
-
only support .py file as config now.
|
45 |
-
|
46 |
-
ref: mmcv.utils.config
|
47 |
-
|
48 |
-
Example:
|
49 |
-
>>> cfg = Config(dict(a=1, b=dict(b1=[0, 1])))
|
50 |
-
>>> cfg.a
|
51 |
-
1
|
52 |
-
>>> cfg.b
|
53 |
-
{'b1': [0, 1]}
|
54 |
-
>>> cfg.b.b1
|
55 |
-
[0, 1]
|
56 |
-
>>> cfg = Config.fromfile('tests/data/config/a.py')
|
57 |
-
>>> cfg.filename
|
58 |
-
"/home/kchen/projects/mmcv/tests/data/config/a.py"
|
59 |
-
>>> cfg.item4
|
60 |
-
'test'
|
61 |
-
>>> cfg
|
62 |
-
"Config [path: /home/kchen/projects/mmcv/tests/data/config/a.py]: "
|
63 |
-
"{'item1': [1, 2], 'item2': {'a': 0}, 'item3': True, 'item4': 'test'}"
|
64 |
-
"""
|
65 |
-
|
66 |
-
@staticmethod
|
67 |
-
def _validate_py_syntax(filename):
|
68 |
-
with open(filename) as f:
|
69 |
-
content = f.read()
|
70 |
-
try:
|
71 |
-
ast.parse(content)
|
72 |
-
except SyntaxError:
|
73 |
-
raise SyntaxError("There are syntax errors in config " f"file {filename}")
|
74 |
-
|
75 |
-
@staticmethod
|
76 |
-
def _file2dict(filename):
|
77 |
-
filename = osp.abspath(osp.expanduser(filename))
|
78 |
-
check_file_exist(filename)
|
79 |
-
if filename.lower().endswith(".py"):
|
80 |
-
with tempfile.TemporaryDirectory() as temp_config_dir:
|
81 |
-
temp_config_file = tempfile.NamedTemporaryFile(dir=temp_config_dir, suffix=".py")
|
82 |
-
temp_config_name = osp.basename(temp_config_file.name)
|
83 |
-
shutil.copyfile(filename, osp.join(temp_config_dir, temp_config_name))
|
84 |
-
temp_module_name = osp.splitext(temp_config_name)[0]
|
85 |
-
sys.path.insert(0, temp_config_dir)
|
86 |
-
SLConfig._validate_py_syntax(filename)
|
87 |
-
mod = import_module(temp_module_name)
|
88 |
-
sys.path.pop(0)
|
89 |
-
cfg_dict = {
|
90 |
-
name: value for name, value in mod.__dict__.items() if not name.startswith("__")
|
91 |
-
}
|
92 |
-
# delete imported module
|
93 |
-
del sys.modules[temp_module_name]
|
94 |
-
# close temp file
|
95 |
-
temp_config_file.close()
|
96 |
-
elif filename.lower().endswith((".yml", ".yaml", ".json")):
|
97 |
-
from .slio import slload
|
98 |
-
|
99 |
-
cfg_dict = slload(filename)
|
100 |
-
else:
|
101 |
-
raise IOError("Only py/yml/yaml/json type are supported now!")
|
102 |
-
|
103 |
-
cfg_text = filename + "\n"
|
104 |
-
with open(filename, "r") as f:
|
105 |
-
cfg_text += f.read()
|
106 |
-
|
107 |
-
# parse the base file
|
108 |
-
if BASE_KEY in cfg_dict:
|
109 |
-
cfg_dir = osp.dirname(filename)
|
110 |
-
base_filename = cfg_dict.pop(BASE_KEY)
|
111 |
-
base_filename = base_filename if isinstance(base_filename, list) else [base_filename]
|
112 |
-
|
113 |
-
cfg_dict_list = list()
|
114 |
-
cfg_text_list = list()
|
115 |
-
for f in base_filename:
|
116 |
-
_cfg_dict, _cfg_text = SLConfig._file2dict(osp.join(cfg_dir, f))
|
117 |
-
cfg_dict_list.append(_cfg_dict)
|
118 |
-
cfg_text_list.append(_cfg_text)
|
119 |
-
|
120 |
-
base_cfg_dict = dict()
|
121 |
-
for c in cfg_dict_list:
|
122 |
-
if len(base_cfg_dict.keys() & c.keys()) > 0:
|
123 |
-
raise KeyError("Duplicate key is not allowed among bases")
|
124 |
-
# TODO Allow the duplicate key while warnning user
|
125 |
-
base_cfg_dict.update(c)
|
126 |
-
|
127 |
-
base_cfg_dict = SLConfig._merge_a_into_b(cfg_dict, base_cfg_dict)
|
128 |
-
cfg_dict = base_cfg_dict
|
129 |
-
|
130 |
-
# merge cfg_text
|
131 |
-
cfg_text_list.append(cfg_text)
|
132 |
-
cfg_text = "\n".join(cfg_text_list)
|
133 |
-
|
134 |
-
return cfg_dict, cfg_text
|
135 |
-
|
136 |
-
@staticmethod
|
137 |
-
def _merge_a_into_b(a, b):
|
138 |
-
"""merge dict `a` into dict `b` (non-inplace).
|
139 |
-
values in `a` will overwrite `b`.
|
140 |
-
copy first to avoid inplace modification
|
141 |
-
|
142 |
-
Args:
|
143 |
-
a ([type]): [description]
|
144 |
-
b ([type]): [description]
|
145 |
-
|
146 |
-
Returns:
|
147 |
-
[dict]: [description]
|
148 |
-
"""
|
149 |
-
# import ipdb; ipdb.set_trace()
|
150 |
-
if not isinstance(a, dict):
|
151 |
-
return a
|
152 |
-
|
153 |
-
b = b.copy()
|
154 |
-
for k, v in a.items():
|
155 |
-
if isinstance(v, dict) and k in b and not v.pop(DELETE_KEY, False):
|
156 |
-
|
157 |
-
if not isinstance(b[k], dict) and not isinstance(b[k], list):
|
158 |
-
# if :
|
159 |
-
# import ipdb; ipdb.set_trace()
|
160 |
-
raise TypeError(
|
161 |
-
f"{k}={v} in child config cannot inherit from base "
|
162 |
-
f"because {k} is a dict in the child config but is of "
|
163 |
-
f"type {type(b[k])} in base config. You may set "
|
164 |
-
f"`{DELETE_KEY}=True` to ignore the base config"
|
165 |
-
)
|
166 |
-
b[k] = SLConfig._merge_a_into_b(v, b[k])
|
167 |
-
elif isinstance(b, list):
|
168 |
-
try:
|
169 |
-
_ = int(k)
|
170 |
-
except:
|
171 |
-
raise TypeError(
|
172 |
-
f"b is a list, " f"index {k} should be an int when input but {type(k)}"
|
173 |
-
)
|
174 |
-
b[int(k)] = SLConfig._merge_a_into_b(v, b[int(k)])
|
175 |
-
else:
|
176 |
-
b[k] = v
|
177 |
-
|
178 |
-
return b
|
179 |
-
|
180 |
-
@staticmethod
|
181 |
-
def fromfile(filename):
|
182 |
-
cfg_dict, cfg_text = SLConfig._file2dict(filename)
|
183 |
-
return SLConfig(cfg_dict, cfg_text=cfg_text, filename=filename)
|
184 |
-
|
185 |
-
def __init__(self, cfg_dict=None, cfg_text=None, filename=None):
|
186 |
-
if cfg_dict is None:
|
187 |
-
cfg_dict = dict()
|
188 |
-
elif not isinstance(cfg_dict, dict):
|
189 |
-
raise TypeError("cfg_dict must be a dict, but " f"got {type(cfg_dict)}")
|
190 |
-
for key in cfg_dict:
|
191 |
-
if key in RESERVED_KEYS:
|
192 |
-
raise KeyError(f"{key} is reserved for config file")
|
193 |
-
|
194 |
-
super(SLConfig, self).__setattr__("_cfg_dict", ConfigDict(cfg_dict))
|
195 |
-
super(SLConfig, self).__setattr__("_filename", filename)
|
196 |
-
if cfg_text:
|
197 |
-
text = cfg_text
|
198 |
-
elif filename:
|
199 |
-
with open(filename, "r") as f:
|
200 |
-
text = f.read()
|
201 |
-
else:
|
202 |
-
text = ""
|
203 |
-
super(SLConfig, self).__setattr__("_text", text)
|
204 |
-
|
205 |
-
@property
|
206 |
-
def filename(self):
|
207 |
-
return self._filename
|
208 |
-
|
209 |
-
@property
|
210 |
-
def text(self):
|
211 |
-
return self._text
|
212 |
-
|
213 |
-
@property
|
214 |
-
def pretty_text(self):
|
215 |
-
|
216 |
-
indent = 4
|
217 |
-
|
218 |
-
def _indent(s_, num_spaces):
|
219 |
-
s = s_.split("\n")
|
220 |
-
if len(s) == 1:
|
221 |
-
return s_
|
222 |
-
first = s.pop(0)
|
223 |
-
s = [(num_spaces * " ") + line for line in s]
|
224 |
-
s = "\n".join(s)
|
225 |
-
s = first + "\n" + s
|
226 |
-
return s
|
227 |
-
|
228 |
-
def _format_basic_types(k, v, use_mapping=False):
|
229 |
-
if isinstance(v, str):
|
230 |
-
v_str = f"'{v}'"
|
231 |
-
else:
|
232 |
-
v_str = str(v)
|
233 |
-
|
234 |
-
if use_mapping:
|
235 |
-
k_str = f"'{k}'" if isinstance(k, str) else str(k)
|
236 |
-
attr_str = f"{k_str}: {v_str}"
|
237 |
-
else:
|
238 |
-
attr_str = f"{str(k)}={v_str}"
|
239 |
-
attr_str = _indent(attr_str, indent)
|
240 |
-
|
241 |
-
return attr_str
|
242 |
-
|
243 |
-
def _format_list(k, v, use_mapping=False):
|
244 |
-
# check if all items in the list are dict
|
245 |
-
if all(isinstance(_, dict) for _ in v):
|
246 |
-
v_str = "[\n"
|
247 |
-
v_str += "\n".join(
|
248 |
-
f"dict({_indent(_format_dict(v_), indent)})," for v_ in v
|
249 |
-
).rstrip(",")
|
250 |
-
if use_mapping:
|
251 |
-
k_str = f"'{k}'" if isinstance(k, str) else str(k)
|
252 |
-
attr_str = f"{k_str}: {v_str}"
|
253 |
-
else:
|
254 |
-
attr_str = f"{str(k)}={v_str}"
|
255 |
-
attr_str = _indent(attr_str, indent) + "]"
|
256 |
-
else:
|
257 |
-
attr_str = _format_basic_types(k, v, use_mapping)
|
258 |
-
return attr_str
|
259 |
-
|
260 |
-
def _contain_invalid_identifier(dict_str):
|
261 |
-
contain_invalid_identifier = False
|
262 |
-
for key_name in dict_str:
|
263 |
-
contain_invalid_identifier |= not str(key_name).isidentifier()
|
264 |
-
return contain_invalid_identifier
|
265 |
-
|
266 |
-
def _format_dict(input_dict, outest_level=False):
|
267 |
-
r = ""
|
268 |
-
s = []
|
269 |
-
|
270 |
-
use_mapping = _contain_invalid_identifier(input_dict)
|
271 |
-
if use_mapping:
|
272 |
-
r += "{"
|
273 |
-
for idx, (k, v) in enumerate(input_dict.items()):
|
274 |
-
is_last = idx >= len(input_dict) - 1
|
275 |
-
end = "" if outest_level or is_last else ","
|
276 |
-
if isinstance(v, dict):
|
277 |
-
v_str = "\n" + _format_dict(v)
|
278 |
-
if use_mapping:
|
279 |
-
k_str = f"'{k}'" if isinstance(k, str) else str(k)
|
280 |
-
attr_str = f"{k_str}: dict({v_str}"
|
281 |
-
else:
|
282 |
-
attr_str = f"{str(k)}=dict({v_str}"
|
283 |
-
attr_str = _indent(attr_str, indent) + ")" + end
|
284 |
-
elif isinstance(v, list):
|
285 |
-
attr_str = _format_list(k, v, use_mapping) + end
|
286 |
-
else:
|
287 |
-
attr_str = _format_basic_types(k, v, use_mapping) + end
|
288 |
-
|
289 |
-
s.append(attr_str)
|
290 |
-
r += "\n".join(s)
|
291 |
-
if use_mapping:
|
292 |
-
r += "}"
|
293 |
-
return r
|
294 |
-
|
295 |
-
cfg_dict = self._cfg_dict.to_dict()
|
296 |
-
text = _format_dict(cfg_dict, outest_level=True)
|
297 |
-
# copied from setup.cfg
|
298 |
-
yapf_style = dict(
|
299 |
-
based_on_style="pep8",
|
300 |
-
blank_line_before_nested_class_or_def=True,
|
301 |
-
split_before_expression_after_opening_paren=True,
|
302 |
-
)
|
303 |
-
text, _ = FormatCode(text, style_config=yapf_style, verify=True)
|
304 |
-
|
305 |
-
return text
|
306 |
-
|
307 |
-
def __repr__(self):
|
308 |
-
return f"Config (path: {self.filename}): {self._cfg_dict.__repr__()}"
|
309 |
-
|
310 |
-
def __len__(self):
|
311 |
-
return len(self._cfg_dict)
|
312 |
-
|
313 |
-
def __getattr__(self, name):
|
314 |
-
# # debug
|
315 |
-
# print('+'*15)
|
316 |
-
# print('name=%s' % name)
|
317 |
-
# print("addr:", id(self))
|
318 |
-
# # print('type(self):', type(self))
|
319 |
-
# print(self.__dict__)
|
320 |
-
# print('+'*15)
|
321 |
-
# if self.__dict__ == {}:
|
322 |
-
# raise ValueError
|
323 |
-
|
324 |
-
return getattr(self._cfg_dict, name)
|
325 |
-
|
326 |
-
def __getitem__(self, name):
|
327 |
-
return self._cfg_dict.__getitem__(name)
|
328 |
-
|
329 |
-
def __setattr__(self, name, value):
|
330 |
-
if isinstance(value, dict):
|
331 |
-
value = ConfigDict(value)
|
332 |
-
self._cfg_dict.__setattr__(name, value)
|
333 |
-
|
334 |
-
def __setitem__(self, name, value):
|
335 |
-
if isinstance(value, dict):
|
336 |
-
value = ConfigDict(value)
|
337 |
-
self._cfg_dict.__setitem__(name, value)
|
338 |
-
|
339 |
-
def __iter__(self):
|
340 |
-
return iter(self._cfg_dict)
|
341 |
-
|
342 |
-
def dump(self, file=None):
|
343 |
-
# import ipdb; ipdb.set_trace()
|
344 |
-
if file is None:
|
345 |
-
return self.pretty_text
|
346 |
-
else:
|
347 |
-
with open(file, "w") as f:
|
348 |
-
f.write(self.pretty_text)
|
349 |
-
|
350 |
-
def merge_from_dict(self, options):
|
351 |
-
"""Merge list into cfg_dict
|
352 |
-
|
353 |
-
Merge the dict parsed by MultipleKVAction into this cfg.
|
354 |
-
|
355 |
-
Examples:
|
356 |
-
>>> options = {'model.backbone.depth': 50,
|
357 |
-
... 'model.backbone.with_cp':True}
|
358 |
-
>>> cfg = Config(dict(model=dict(backbone=dict(type='ResNet'))))
|
359 |
-
>>> cfg.merge_from_dict(options)
|
360 |
-
>>> cfg_dict = super(Config, self).__getattribute__('_cfg_dict')
|
361 |
-
>>> assert cfg_dict == dict(
|
362 |
-
... model=dict(backbone=dict(depth=50, with_cp=True)))
|
363 |
-
|
364 |
-
Args:
|
365 |
-
options (dict): dict of configs to merge from.
|
366 |
-
"""
|
367 |
-
option_cfg_dict = {}
|
368 |
-
for full_key, v in options.items():
|
369 |
-
d = option_cfg_dict
|
370 |
-
key_list = full_key.split(".")
|
371 |
-
for subkey in key_list[:-1]:
|
372 |
-
d.setdefault(subkey, ConfigDict())
|
373 |
-
d = d[subkey]
|
374 |
-
subkey = key_list[-1]
|
375 |
-
d[subkey] = v
|
376 |
-
|
377 |
-
cfg_dict = super(SLConfig, self).__getattribute__("_cfg_dict")
|
378 |
-
super(SLConfig, self).__setattr__(
|
379 |
-
"_cfg_dict", SLConfig._merge_a_into_b(option_cfg_dict, cfg_dict)
|
380 |
-
)
|
381 |
-
|
382 |
-
# for multiprocess
|
383 |
-
def __setstate__(self, state):
|
384 |
-
self.__init__(state)
|
385 |
-
|
386 |
-
def copy(self):
|
387 |
-
return SLConfig(self._cfg_dict.copy())
|
388 |
-
|
389 |
-
def deepcopy(self):
|
390 |
-
return SLConfig(self._cfg_dict.deepcopy())
|
391 |
-
|
392 |
-
|
393 |
-
class DictAction(Action):
|
394 |
-
"""
|
395 |
-
argparse action to split an argument into KEY=VALUE form
|
396 |
-
on the first = and append to a dictionary. List options should
|
397 |
-
be passed as comma separated values, i.e KEY=V1,V2,V3
|
398 |
-
"""
|
399 |
-
|
400 |
-
@staticmethod
|
401 |
-
def _parse_int_float_bool(val):
|
402 |
-
try:
|
403 |
-
return int(val)
|
404 |
-
except ValueError:
|
405 |
-
pass
|
406 |
-
try:
|
407 |
-
return float(val)
|
408 |
-
except ValueError:
|
409 |
-
pass
|
410 |
-
if val.lower() in ["true", "false"]:
|
411 |
-
return True if val.lower() == "true" else False
|
412 |
-
if val.lower() in ["none", "null"]:
|
413 |
-
return None
|
414 |
-
return val
|
415 |
-
|
416 |
-
def __call__(self, parser, namespace, values, option_string=None):
|
417 |
-
options = {}
|
418 |
-
for kv in values:
|
419 |
-
key, val = kv.split("=", maxsplit=1)
|
420 |
-
val = [self._parse_int_float_bool(v) for v in val.split(",")]
|
421 |
-
if len(val) == 1:
|
422 |
-
val = val[0]
|
423 |
-
options[key] = val
|
424 |
-
setattr(namespace, self.dest, options)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|