parquet-converter commited on
Commit
05c426d
·
1 Parent(s): 351ac5c

Update parquet files (step 9 of 476)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Farming Simulator 17 KUHN-RELOADED Free Download.md +0 -47
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Google Earth Pro 7.3.0 Portable Free ((NEW)) Download.md +0 -21
  3. spaces/1gistliPinn/ChatGPT4/Examples/Adobe Media Encoder Cs6 Activation Crack NEW.md +0 -90
  4. spaces/1gistliPinn/ChatGPT4/Examples/Free Download Maps Blaupunkt Travelpilot Ex.rar [PATCHED].md +0 -6
  5. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download 730 precompilato 2023 i vantaggi i requisiti e i passaggi da seguire.md +0 -160
  6. spaces/1toTree/lora_test/ppdiffusers/models/ema.py +0 -103
  7. spaces/4com/4com-license/README.md +0 -13
  8. spaces/52Hz/SRMNet_AWGN_denoising/predict.py +0 -82
  9. spaces/AEUPH/AethericGPT/app.py +0 -32
  10. spaces/AILab-CVC/SEED-LLaMA/models/seed_qformer/utils.py +0 -138
  11. spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/server/models.ts +0 -178
  12. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/text_to_image/train_text_to_image_lora.py +0 -949
  13. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/fixtures/custom_pipeline/what_ever.py +0 -101
  14. spaces/Andy1621/uniformer_image_detection/configs/_base_/models/cascade_rcnn_r50_fpn.py +0 -179
  15. spaces/Andy1621/uniformer_image_detection/configs/fpg/faster_rcnn_r50_fpg-chn128_crop640_50e_coco.py +0 -9
  16. spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/coder/base_bbox_coder.py +0 -17
  17. spaces/Andy1621/uniformer_image_detection/mmdet/datasets/pipelines/__init__.py +0 -25
  18. spaces/Andy1621/uniformer_image_detection/mmdet/datasets/pipelines/transforms.py +0 -1811
  19. spaces/Andy1621/uniformer_image_detection/mmdet/models/detectors/htc.py +0 -15
  20. spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/multimodal/abstract_pipeline.py +0 -62
  21. spaces/Anonymous-sub/Rerender/ControlNet/ldm/modules/midas/midas/base_model.py +0 -16
  22. spaces/Artrajz/vits-simple-api/vits/bert/__init__.py +0 -15
  23. spaces/Audio-AGI/WavJourney/scripts/start_services.sh +0 -1
  24. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/demo.py +0 -185
  25. spaces/Benson/text-generation/Examples/3d Solitario Descargar Gratis.md +0 -61
  26. spaces/Benson/text-generation/Examples/Ai Apk.md +0 -76
  27. spaces/Benson/text-generation/Examples/Descargar Gratis Fuego Mx Apk 45 Mb.md +0 -79
  28. spaces/Big-Web/MMSD/env/Lib/site-packages/boto3/dynamodb/__init__.py +0 -12
  29. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/pager.py +0 -34
  30. spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/packaging/__about__.py +0 -26
  31. spaces/Bl1tzie/Jam/server.js +0 -32
  32. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/tools/deploy/caffe2_mask_rcnn.cpp +0 -116
  33. spaces/CVPR/GFPGAN-example/tests/test_gfpgan_arch.py +0 -203
  34. spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/set_operations.h +0 -44
  35. spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/assign_value.h +0 -23
  36. spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/transform_reduce.h +0 -23
  37. spaces/CVPR/WALT/mmdet/datasets/pipelines/__init__.py +0 -25
  38. spaces/CVPR/WALT/mmdet/models/roi_heads/pisa_roi_head.py +0 -159
  39. spaces/Catmeow/Face2Painting_From_Photo/app.py +0 -22
  40. spaces/CikeyQI/Yunzai/Yunzai/plugins/ws-plugin/apps/message/message.js +0 -277
  41. spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/layers/scale.py +0 -11
  42. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/dateutil/zoneinfo/rebuild.py +0 -75
  43. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/components/highlighted_text.py +0 -205
  44. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/interpretation.py +0 -328
  45. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio_client/media_data.py +0 -0
  46. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/httpcore/_backends/anyio.py +0 -145
  47. spaces/Dagfinn1962/Dreamlikeart-Anime-1.0/style.css +0 -94
  48. spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/dataset/pano_s2d3d_mix_dataset.py +0 -91
  49. spaces/DemoLou/moe-tts/transforms.py +0 -193
  50. spaces/ECCV2022/bytetrack/yolox/data/data_augment.py +0 -299
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Farming Simulator 17 KUHN-RELOADED Free Download.md DELETED
@@ -1,47 +0,0 @@
1
-
2
- <h1>Farming Simulator 17 KUHN-RELOADED Free Download: A Review of the New Official Extension</h1>
3
-
4
- <p>If you are a fan of Farming Simulator 17, you might be interested in the new official extension that adds 18 new implements from KUHN, a leading manufacturer of agricultural machinery. The KUHN-RELOADED DLC is available for download on PC and Mac platforms, and it requires the base game Farming Simulator 17 (Update 1.4 or higher) to play.</p>
5
-
6
- <p>In this article, we will review the features and benefits of the KUHN-RELOADED DLC, and show you how to download it for free.</p>
7
- <h2>Farming Simulator 17 KUHN-RELOADED Free Download</h2><br /><p><b><b>Download</b> >>> <a href="https://byltly.com/2uKwp7">https://byltly.com/2uKwp7</a></b></p><br /><br />
8
-
9
- <h2>What's Included in the KUHN-RELOADED DLC?</h2>
10
-
11
- <p>The KUHN-RELOADED DLC contains a variety of equipment that will enhance your farming experience and productivity. You will find cultivators, sowing machines, sprayers, fertilizers spreaders, mowers, tedders, windrowers, balers and bale wrappers from KUHN. Here is a list of the equipment included in the DLC:</p>
12
-
13
- <ul>
14
- <li>KUHN AXIS 40.2 M-EMC -W – Fertilizer spreader</li>
15
- <li>KUHN BTF 4000 – Sowing Machine</li>
16
- <li>KUHN DC 401 - Cultivator</li>
17
- <li>KUHN DELTIS 1302 MEA3 – Sprayer</li>
18
- <li>KUHN DISCOLANDER XM 52 – Cultivator</li>
19
- <li>KUHN ESPRO 3000 – Sowing Machine</li>
20
- <li>KUHN FBP 3135 – Baler</li>
21
- <li>KUHN GF 8702 – Tedder</li>
22
- <li>KUHN GMD 4411 – Mower</li>
23
- <li>KUHN HR 4004 – Cultivator</li>
24
- <li>KUHN LSB 1290 D – Baler</li>
25
- <li>KUHN MERGE MAXX 902 – Windrower</li>
26
- <li>KUHN METRIS 4102 – Sprayer</li>
27
- <li>KUHN PERFORMER 4000 – Cultivator</li>
28
- <li>KUHN PF 1500 – Sprayer Front Tank</li>
29
- <li>KUHN PLANTER 3 R 12 – Sowing Machine</li>
30
- <li>KUHN SW 4014 AutoLoad – Bale wrapper</li>
31
- <li>KUHN TF 1500 – Sowing Machine Front Tank</li>
32
- </ul>
33
-
34
- <p>Each equipment has its own specifications and features that will help you perform various tasks on your farm. You can check out the details of each equipment on the official website[^1^] or on Steam[^2^]. You can also watch the official trailer for the DLC below:</p>
35
-
36
- <iframe width="560" height="315" src="https://www.youtube.com/embed/6R0JF6cgqOw" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
37
-
38
- <h2>How to Download Farming Simulator 17 KUHN-RELOADED Free?</h2>
39
-
40
- <p>If you want to download Farming Simulator 17 KUHN-RELOADED free, you will need a torrent client such as BitTorrent or uTorrent. You can download the torrent file from various sources online, such as LaptrinhX[^3^]. Once you have downloaded the torrent file, you can open it with your torrent client and start downloading the game files.</p>
41
-
42
- <p>After the download is complete, you will need to extract the files using a program such as WinRAR or 7-Zip. You will find a folder named Farming.Simulator.17.KUHN-RELOADED that contains an ISO file and a folder named Crack. You will need to mount the ISO file using a program such as Daemon Tools or PowerISO. Then, you will need to run the setup.exe file and follow the instructions to install the game.</p>
43
-
44
- <p>Once the installation is done, you will need to copy the contents of the Crack folder and paste them into the game directory where you installed Farming Simulator 17. This will overwrite some files and crack the game. Now, you can launch the game</p>
45
- <p></p> 7b8c122e87<br />
46
- <br />
47
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Google Earth Pro 7.3.0 Portable Free ((NEW)) Download.md DELETED
@@ -1,21 +0,0 @@
1
- <br />
2
- <h1>Google Earth Pro 7.3.0 Portable Free Download</h1>
3
- <p>Google Earth Pro is a powerful application that lets you explore the world from the comfort of your computer. You can view satellite imagery, maps, terrain, 3D buildings, and even galaxies in outer space. You can also save your toured places, share them with others, and import and export GIS data.</p>
4
- <h2>Google Earth Pro 7.3.0 Portable Free Download</h2><br /><p><b><b>Download</b> &#9913;&#9913;&#9913; <a href="https://byltly.com/2uKxoj">https://byltly.com/2uKxoj</a></b></p><br /><br />
5
- <p>Google Earth Pro is now free and available for Windows, macOS, Android and Linux[^2^]. However, if you want to use it without installing anything on your system, you can download Google Earth Pro Portable from PortableApps.com[^1^]. This version is based on Google Earth Pro 7.3.0 and allows you to enable caching and associate *.kml and *.kmz files with the app.</p>
6
- <p>To download Google Earth Pro Portable, follow these steps:</p>
7
- <ol>
8
- <li>Go to <a href="https://portableapps.com/node/57406">https://portableapps.com/node/57406</a></li>
9
- <li>Click on the link that says "Download Google Earth Pro Portable 7.3.0.3832 Dev Test 1 [1.06+54.9MB download / 179.0MB installed]"</li>
10
- <li>Save the file to your desired location and run it</li>
11
- <li>Follow the instructions to extract and launch Google Earth Pro Portable</li>
12
- <li>Enjoy exploring the world!</li>
13
- </ol>
14
- <p>Note: This is a development test version and may not work properly or be stable. Use it at your own risk.</p>
15
- <p></p><p>Google Earth Pro Portable has some features that are not available in the regular Google Earth. For example, you can measure distances and areas using lines and polygons, print high-resolution images for presentations and reports, record HD movies of your virtual flights, and import large vector image files.</p>
16
- <p>Google Earth Pro Portable also lets you customize your experience by changing the settings in the GoogleEarthProPortable.ini file. You can find this file in the Other\Source folder of the app. You can edit this file with any text editor and change the values of different parameters. For example, you can enable caching by setting EnableCache=true. This will allow Google Earth Pro Portable to store some data locally and reduce the loading time and bandwidth usage.</p>
17
- <p>Google Earth Pro Portable is a great way to explore the world without installing anything on your system. However, you should be aware of some limitations and drawbacks of using this version. For instance, you may encounter some errors or crashes due to compatibility issues or bugs. You may also miss some updates or features that are available in the official Google Earth Pro. Moreover, you should respect the terms of service and privacy policy of Google when using this app.</p><p>To update Google Earth Pro Portable, you can check the PortableApps.com website for any new releases. You can also use the PortableApps.com Platform to automatically update your apps. Alternatively, you can download the latest version of Google Earth Pro from the official website and replace the files in the App\GoogleEarthPro folder of your portable app.</p>
18
- <p>To uninstall Google Earth Pro Portable, you can simply delete the folder where you extracted the app. You can also use the PortableApps.com Platform to uninstall your apps. However, you should note that this will not remove any data or settings that Google Earth Pro Portable may have stored on your system or online.</p>
19
- <p>Google Earth Pro Portable requires an internet connection to download and display the imagery and data from Google servers. You cannot use it offline unless you have enabled caching and have previously viewed the areas that you want to see. Even then, you may not be able to access all the features and functions of the app.</p> 81aa517590<br />
20
- <br />
21
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Adobe Media Encoder Cs6 Activation Crack NEW.md DELETED
@@ -1,90 +0,0 @@
1
- <br />
2
- <h1>How to Download and Install Adobe Media Encoder CS6 Activation Crack</h1>
3
- <p>Adobe Media Encoder CS6 is a software that allows you to encode, transcode, and compress video and audio files for various formats and platforms. It is a part of the Adobe Creative Suite 6, which includes other popular applications such as Photoshop, Premiere Pro, After Effects, etc. However, to use Adobe Media Encoder CS6, you need to activate it with a valid serial number or a crack file. In this article, we will show you how to download and install Adobe Media Encoder CS6 activation crack for free.</p>
4
- <h2>What is Adobe Media Encoder CS6?</h2>
5
- <p>Adobe Media Encoder CS6 is a software that allows you to encode, transcode, and compress video and audio files for various formats and platforms. It can help you convert your video clips to different formats such as MP4, MOV, AVI, FLV, etc. It can also help you adjust the quality and size of your video files according to your needs. You can use Adobe Media Encoder CS6 to export your video files from other Adobe applications such as After Effects or Premiere Pro. You can also use it to upload your video files to social media platforms such as YouTube or Facebook.</p>
6
- <h2>adobe media encoder cs6 activation crack</h2><br /><p><b><b>DOWNLOAD</b> &#9675; <a href="https://imgfil.com/2uxXxD">https://imgfil.com/2uxXxD</a></b></p><br /><br />
7
- <h3>What is Adobe Media Encoder CS6 Activation Crack?</h3>
8
- <p>Adobe Media Encoder CS6 activation crack is a file that can bypass the activation process of the software and make it work without a serial number. It is usually a modified version of the original file "amtlib.dll" that is located in the installation folder of Adobe Media Encoder CS6. By replacing the original file with the crack file, you can trick the software into thinking that it is activated and use it without any limitations.</p>
9
- <h4>Where to Download Adobe Media Encoder CS6 Activation Crack?</h4>
10
- <p>There are many websites that offer Adobe Media Encoder CS6 activation crack for download. However, not all of them are reliable or safe. Some of them may have fake or corrupted files, or may contain malicious ads or pop-ups. Therefore, you should be careful when choosing a website and check the reviews and ratings of the files before downloading them. Here are some of the websites that offer Adobe Media Encoder CS6 activation crack:</p>
11
- <ul>
12
- <li><b>Github</b>: This is a platform that hosts various projects and files from developers and programmers. You can find Adobe Media Encoder CS6 activation crack on this website by searching for "patcher-adobe-CS6-all-software-patch". This project contains crack files for various Adobe CS6 applications, including Media Encoder.</li>
13
- <li><b>Weebly</b>: This is a website builder that allows users to create and host their own websites. You can find Adobe Media Encoder CS6 activation crack on this website by searching for "adobe-cs6-master-collection-3264-bit-full-crack". This website contains instructions and links to download and install Adobe CS6 Master Collection full crack, which includes Media Encoder.</li>
14
- <li><b>Internet Archive</b>: This is a digital library that preserves various types of media and content from the internet. You can find Adobe Media Encoder CS6 activation crack on this website by searching for "CS6Whacked". This archive contains cracking instructions and files for various Adobe CS6 applications, including Media Encoder.</li>
15
- </ul>
16
- <h5>How to Install Adobe Media Encoder CS6 Activation Crack?</h5>
17
- <p>After you have downloaded Adobe Media Encoder CS6 activation crack from one of the websites above, you need to follow these steps to install it:</p>
18
- <ol>
19
- <li><b>Run Block Adobe Activation.app</b>: This is an application that prevents the Adobe activation program from starting. You need to run this application before installing Adobe Media Encoder CS6.</li>
20
- <li><b>Run the Adobe CS6 installer</b>: This is an application that installs Adobe Media Encoder CS6 on your computer. You need to run this application and select "Trial" as the installation option.</li>
21
- <li><b>Register a free adobe account</b>: This is an account that allows you to access various Adobe services and products. You need to register a free adobe account and sign in with it during the installation process.</li>
22
- <li><b>Copy amtlib.dll file and replace with existing one</b>: This is the crack file that activates Adobe Media Encoder CS6. You need to copy this file from the downloaded folder and replace it with the existing one in the installation folder of Adobe Media Encoder CS6. The default location of the installation folder is C:\Program Files\Adobe\Adobe Media Encoder CS6.</li>
23
- <li><b>Enjoy using Adobe Media Encoder CS6</b>: After you have replaced the amtlib.dll file with the crack file, you can enjoy using Adobe Media Encoder CS6 without any restrictions.</li>
24
- </ol>
25
- <h6>Benefits of Using Adobe Media Encoder CS6 Activation Crack</h6>
26
- <p>Using Adobe Media Encoder CS6 activation crack has many benefits for you, such as:</p>
27
- <ul>
28
- <li><b>It saves you money</b>: Using Adobe Media Encoder CS6 activation crack allows you to use the software without paying for a serial number or a subscription fee.</li>
29
- <li><b>It gives you access to all features</b>: Using Adobe Media Encoder CS6 activation crack allows you to use all the features and functions of the software without any limitations or watermarks.</li>
30
- <li><b>It improves your productivity</b>: Using Adobe Media Encoder CS6 activation crack allows you to encode, transcode, and compress video and audio files for various formats and platforms quickly and easily.</li>
31
- <li><b>It enhances your creativity</b>: Using Adobe Media Encoder CS6 activation crack allows you to work with other Adobe Creative Suite 6 applications seamlessly and create amazing media content.</li>
32
- </ul>
33
- <h7>Conclusion</h7>
34
- <p>Adobe Media Encoder CS6 is a powerful software that allows you to encode, transcode, and compress video and audio files for various formats and platforms. However, to use it, you need to activate it with a valid serial number or a crack file. In this article, we showed you how to download and install Adobe Media Encoder CS6 activation crack for free. By using this crack file, you can enjoy using Adobe Media Encoder CS6 without any restrictions.</p>
35
- <h1>How to Download and Install Adobe Media Encoder CS6 Activation Crack</h1>
36
- <p>Adobe Media Encoder CS6 is a software that allows you to encode, transcode, and compress video and audio files for various formats and platforms. It is a part of the Adobe Creative Suite 6, which includes other popular applications such as Photoshop, Premiere Pro, After Effects, etc. However, to use Adobe Media Encoder CS6, you need to activate it with a valid serial number or a crack file. In this article, we will show you how to download and install Adobe Media Encoder CS6 activation crack for free.</p>
37
- <h2>What is Adobe Media Encoder CS6?</h2>
38
- <p>Adobe Media Encoder CS6 is a software that allows you to encode, transcode, and compress video and audio files for various formats and platforms. It can help you convert your video clips to different formats such as MP4, MOV, AVI, FLV, etc. It can also help you adjust the quality and size of your video files according to your needs. You can use Adobe Media Encoder CS6 to export your video files from other Adobe applications such as After Effects or Premiere Pro. You can also use it to upload your video files to social media platforms such as YouTube or Facebook.</p>
39
- <p></p>
40
- <h3>What is Adobe Media Encoder CS6 Activation Crack?</h3>
41
- <p>Adobe Media Encoder CS6 activation crack is a file that can bypass the activation process of the software and make it work without a serial number. It is usually a modified version of the original file "amtlib.dll" that is located in the installation folder of Adobe Media Encoder CS6. By replacing the original file with the crack file, you can trick the software into thinking that it is activated and use it without any limitations.</p>
42
- <h4>Where to Download Adobe Media Encoder CS6 Activation Crack?</h4>
43
- <p>There are many websites that offer Adobe Media Encoder CS6 activation crack for download. However, not all of them are reliable or safe. Some of them may have fake or corrupted files, or may contain malicious ads or pop-ups. Therefore, you should be careful when choosing a website and check the reviews and ratings of the files before downloading them. Here are some of the websites that offer Adobe Media Encoder CS6 activation crack:</p>
44
- <ul>
45
- <li><b>Github</b>: This is a platform that hosts various projects and files from developers and programmers. You can find Adobe Media Encoder CS6 activation crack on this website by searching for "patcher-adobe-CS6-all-software-patch". This project contains crack files for various Adobe CS6 applications, including Media Encoder.</li>
46
- <li><b>Weebly</b>: This is a website builder that allows users to create and host their own websites. You can find Adobe Media Encoder CS6 activation crack on this website by searching for "adobe-cs6-master-collection-3264-bit-full-crack". This website contains instructions and links to download and install Adobe CS6 Master Collection full crack, which includes Media Encoder.</li>
47
- <li><b>Internet Archive</b>: This is a digital library that preserves various types of media and content from the internet. You can find Adobe Media Encoder CS6 activation crack on this website by searching for "CS6Whacked". This archive contains cracking instructions and files for various Adobe CS6 applications, including Media Encoder.</li>
48
- </ul>
49
- <h5>How to Install Adobe Media Encoder CS6 Activation Crack?</h5>
50
- <p>After you have downloaded Adobe Media Encoder CS6 activation crack from one of the websites above, you need to follow these steps to install it:</p>
51
- <ol>
52
- <li><b>Run Block Adobe Activation.app</b>: This is an application that prevents the Adobe activation program from starting. You need to run this application before installing Adobe Media Encoder CS6.</li>
53
- <li><b>Run the Adobe CS6 installer</b>: This is an application that installs Adobe Media Encoder CS6 on your computer. You need to run this application and select "Trial" as the installation option.</li>
54
- <li><b>Register a free adobe account</b>: This is an account that allows you to access various Adobe services and products. You need to register a free adobe account and sign in with it during the installation process.</li>
55
- <li><b>Copy amtlib.dll file and replace with existing one</b>: This is the crack file that activates Adobe Media Encoder CS6. You need to copy this file from the downloaded folder and replace it with the existing one in the installation folder of Adobe Media Encoder CS6. The default location of the installation folder is C:\Program Files\Adobe\Adobe Media Encoder CS6.</li>
56
- <li><b>Enjoy using Adobe Media Encoder CS6</b>: After you have replaced the amtlib.dll file with the crack file, you can enjoy using Adobe Media Encoder CS6 without any restrictions.</li>
57
- </ol>
58
- <h6>Benefits of Using Adobe Media Encoder CS6 Activation Crack</h6>
59
- <p>Using Adobe Media Encoder CS6 activation crack has many benefits for you, such as:</p>
60
- <ul>
61
- <li><b>It saves you money</b>: Using Adobe Media Encoder CS6 activation crack allows you to use the software without paying for a serial number or a subscription fee.</li>
62
- <li><b>It gives you access to all features</b>: Using Adobe Media Encoder CS6 activation crack allows you to use all the features and functions of the software without any limitations or watermarks.</li>
63
- <li><b>It improves your productivity</b>: Using Adobe Media Encoder CS6 activation crack allows you to encode, transcode, and compress video and audio files for various formats and platforms quickly and easily.</li>
64
- <li><b>It enhances your creativity</b>: Using Adobe Media Encoder CS6 activation crack allows you to work with other Adobe Creative Suite 6 applications seamlessly and create amazing media content.</li>
65
- </ul>
66
-
67
- <h7>Tips for Using Adobe Media Encoder CS6 Activation Crack</h7>
68
- <p>Besides installing Adobe Media Encoder CS6 activation crack, there are some tips that can help you use the software more effectively:</p>
69
-
70
- <ul>
71
- <li><b>Create presets for common tasks</b>: If you often encode or transcode video files for specific formats or platforms, you can create presets that save your settings and preferences. This way, you can apply them quickly without having to adjust them every time.</li>
72
-
73
- <li><b>Use watch folders for batch processing</b>: If you have multiple video files that need to be encoded or transcoded with the same settings, you can use watch folders to automate the process. Watch folders are folders that monitor any new files added to them and apply presets automatically.</li>
74
-
75
- <li><b>Use parallel encoding for faster performance</b>: If you have a multicore processor, you can use parallel encoding to speed up the encoding or transcoding process. Parallel encoding allows you to encode multiple files simultaneously using different cores of your processor.</li>
76
-
77
- <li><b>Use metadata for better organization</b>: Metadata are information that describe your video files such as title, description, keywords, etc. You can use metadata to organize your video files better and make them easier to find or share.</li>
78
-
79
- <li><b>Use previews for quality control</b>: Before exporting your video files, you can use previews to check their quality and appearance. Previews allow you to see how your video files will look like after encoding or transcoding without actually exporting them.</li>
80
-
81
- </ul>
82
-
83
- <h8>Conclusion</h8>
84
-
85
- <p>Adobe Media Encoder CS6 is a powerful software that allows you to encode, transcode, and compress video and audio files for various formats and platforms. However, to use it, you need to activate it with a valid serial number or a crack file. In this article, we showed you how to download and install Adobe Media Encoder CS6 activation crack for free. We also gave you some tips on how to use the software more effectively. By using this crack file and these tips, you can enjoy using Adobe Media Encoder CS6 without any restrictions.</p>
86
- <h8>Conclusion</h8>
87
-
88
- <p>Adobe Media Encoder CS6 is a powerful software that allows you to encode, transcode, and compress video and audio files for various formats and platforms. However, to use it, you need to activate it with a valid serial number or a crack file. In this article, we showed you how to download and install Adobe Media Encoder CS6 activation crack for free. We also gave you some tips on how to use the software more effectively. By using this crack file and these tips, you can enjoy using Adobe Media Encoder CS6 without any restrictions.</p> 3cee63e6c2<br />
89
- <br />
90
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Free Download Maps Blaupunkt Travelpilot Ex.rar [PATCHED].md DELETED
@@ -1,6 +0,0 @@
1
- <h2>Free Download Maps Blaupunkt Travelpilot Ex.rar</h2><br /><p><b><b>Download File</b> &#9658;&#9658;&#9658;&#9658;&#9658; <a href="https://imgfil.com/2uxY5W">https://imgfil.com/2uxY5W</a></b></p><br /><br />
2
-
3
- 3B0 035 191G 7 612 001 025 24C32 new.rar. File type: .rar. Downloaded: 0 times. Size: 1.4s. Blaupunkt Travelpilot E (NOT EX!) (analogue from Sony) - 2 pcs. (for 2 pieces - 2,800 rubles). 3B0 035 191G 7 612 001 025 24C32.rar.File type: .rar. Downloaded: 0 times. Size: 1.4s. Blaupunkt Travelpilot E (NOT EX!) (analogue from Sony) - 2 pcs. (for 2 pieces - 2,800 rubles). 3B0 035 191G 7 612 001 025 24C32.rar.File type: .rar. Downloaded: 0 times. Size: 1.4s. Blaupunkt Travelpilot E (NOT EX!) (analogue from Sony) - 2 pcs. ( 8a78ff9644<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download 730 precompilato 2023 i vantaggi i requisiti e i passaggi da seguire.md DELETED
@@ -1,160 +0,0 @@
1
-
2
- <h1>How to Download 730 Precompilato 2023: A Complete Guide</h1>
3
- <p>If you are a resident taxpayer in Italy, you may be eligible to use the 730 Precompilato service to file your income tax return online. This service allows you to access a pre-filled tax form with your personal and income data, verify its accuracy, and submit it electronically. In this article, we will explain what 730 Precompilato is, how to access it online, how to submit it online, and how to get help with it online.</p>
4
- <h2>What is 730 Precompilato?</h2>
5
- <p>730 Precompilato is a service provided by the Agenzia delle Entrate (the Italian Revenue Agency) that allows you to file your income tax return online without having to fill in any data manually. The service uses the information that the Agenzia delle Entrate collects from various sources, such as your employer, your bank, your health provider, etc., to pre-fill your tax form with your personal and income data. You can then check and edit your data, add any deductions or tax credits that you are entitled to, and send your declaration electronically.</p>
6
- <h2>download 730 precompilato 2023</h2><br /><p><b><b>Download Zip</b> &gt;&gt;&gt;&gt;&gt; <a href="https://urlin.us/2uSRYH">https://urlin.us/2uSRYH</a></b></p><br /><br />
7
- <h3>What are the benefits of using 730 Precompilato?</h3>
8
- <p>Using 730 Precompilato has several advantages, such as:</p>
9
- <ul>
10
- <li>It saves you time and hassle, as you don't have to collect and enter your data manually.</li>
11
- <li>It reduces the risk of errors and omissions, as the data is verified by the Agenzia delle Entrate.</li>
12
- <li>It allows you to receive your refund faster, as the processing time is shorter.</li>
13
- <li>It is free of charge, as you don't have to pay any fees or commissions.</li>
14
- </ul>
15
- <h3>Who can use 730 Precompilato?</h3>
16
- <p>You can use 730 Precompilato if you meet the following conditions:</p>
17
- <ul>
18
- <li>You are a resident taxpayer in Italy.</li>
19
- <li>You have received income from employment or similar sources (such as pensions, scholarships, etc.) in the previous year.</li>
20
- <li>You have not received income from self-employment, business, or professional activities in the previous year.</li>
21
- <li>You have not opted for other tax regimes (such as flat tax, substitute tax, etc.) in the previous year.</li>
22
- </ul>
23
- <h2>How to access 730 Precompilato online</h2>
24
- <p>To access 730 Precompilato online, you need to visit the Agenzia delle Entrate website and log in with one of the following methods:</p>
25
- <p>download 730 precompilato 2023 agenzia entrate<br />
26
- download 730 precompilato 2023 online<br />
27
- download 730 precompilato 2023 editabile<br />
28
- download 730 precompilato 2023 pdf<br />
29
- download 730 precompilato 2023 spid<br />
30
- download 730 precompilato 2023 inps<br />
31
- download 730 precompilato 2023 istruzioni<br />
32
- download 730 precompilato 2023 scadenza<br />
33
- download 730 precompilato 2023 modello<br />
34
- download 730 precompilato 2023 congiunto<br />
35
- download 730 precompilato 2023 cns<br />
36
- download 730 precompilato 2023 redditi<br />
37
- download 730 precompilato 2023 detrazioni<br />
38
- download 730 precompilato 2023 rimborsi<br />
39
- download 730 precompilato 2023 gratis<br />
40
- download 730 precompilato 2023 senza sostituto d'imposta<br />
41
- download 730 precompilato 2023 con pin inps<br />
42
- download 730 precompilato 2023 con cie<br />
43
- download 730 precompilato 2023 con entratel<br />
44
- download 730 precompilato 2023 con fisconline<br />
45
- download 730 precompilato 2023 con cud<br />
46
- download 730 precompilato 2023 con unico<br />
47
- download 730 precompilato 2023 con partita IVA<br />
48
- download 730 precompilato 2023 con mutuo<br />
49
- download 730 precompilato 2023 con affitto<br />
50
- download 730 precompilato 2023 con assegni familiari<br />
51
- download 730 precompilato 2023 con bonus renzi<br />
52
- download 730 precompilato 2023 con spese mediche<br />
53
- download 730 precompilato 2023 con spese universitarie<br />
54
- download 730 precompilato 2023 con spese funebri<br />
55
- download 730 precompilato 2023 con spese veterinarie<br />
56
- download 730 precompilato 2023 con spese scolastiche<br />
57
- download 730 precompilato 2023 con spese sportive<br />
58
- download 730 precompilato 2023 con spese culturali<br />
59
- download 730 precompilato 2023 con spese condominiali<br />
60
- download 730 precompilato 2023 con spese energetiche<br />
61
- download 730 precompilato 2023 con spese ristrutturazione<br />
62
- download 730 precompilato 2023 con spese donazioni<br />
63
- download</p>
64
- <ul>
65
- <li>SPID (Public System of Digital Identity), which is a unique digital identity that allows you to access various online services of the public administration and private entities.</li>
66
- <li>CIE (Electronic Identity Card), which is a smart card that contains your personal data and biometric features.</li>
67
- <li>CNS (National Service Card), which is a health card that also functions as an electronic identity card.</li>
68
- </ul>
69
- <h3>What are the requirements for accessing 730 Precompilato online?</h3>
70
- <p>To access 730 Precompilato online, you need to have:</p>
71
- <ul>
72
- <li>A valid SPID, C IE (Electronic Identity Card), which is a smart card that contains your personal data and biometric features.</li>
73
- <li>CNS (National Service Card), which is a health card that also functions as an electronic identity card.</li>
74
- </ul>
75
- <h3>How to log in to the Agenzia delle Entrate website with SPID, CIE, or CNS</h3>
76
- <p>To log in to the Agenzia delle Entrate website with SPID, CIE, or CNS, you need to follow these steps:</p>
77
- <ol>
78
- <li>Go to the Agenzia delle Entrate website and click on the "730 Precompilato" button.</li>
79
- <li>Select the option "Accesso con SPID, CIE o CNS" and choose the method that you prefer.</li>
80
- <li>Follow the instructions on the screen to enter your credentials and authenticate yourself.</li>
81
- <li>Once you are logged in, you will see your personal page with your 730 Precompilato form.</li>
82
- </ol>
83
- <h3>How to check and edit your personal and income data</h3>
84
- <p>Once you access your 730 Precompilato form online, you need to check and edit your personal and income data. You can do this by following these steps:</p>
85
- <ol>
86
- <li>Click on the "Dati Anagrafici e Reddituali" section to see your personal and income data.</li>
87
- <li>Verify that your personal data (such as name, surname, date of birth, place of residence, etc.) are correct and complete. If you notice any errors or omissions, you can edit them by clicking on the "Modifica" button.</li>
88
- <li>Verify that your income data (such as wages, pensions, dividends, interest, etc.) are correct and complete. If you notice any errors or omissions, you can edit them by clicking on the "Modifica" button.</li>
89
- <li>If you have received any income from abroad, you need to declare it by clicking on the "Dichiarazione dei redditi esteri" section and filling in the required fields.</li>
90
- <li>If you have any other income that is not pre-filled in your form, you need to declare it by clicking on the "Altri redditi" section and filling in the required fields.</li>
91
- <li>Once you have checked and edited your personal and income data, you can proceed to the next section by clicking on the "Continua" button.</li>
92
- </ol>
93
- <h2>How to submit 730 Precompilato online</h2>
94
- <p>After you have verified and edited your personal and income data, you need to submit your 730 Precompilato form online. You can do this by following these steps:</p>
95
- <h3>How to confirm or modify your deductions and tax credits</h3>
96
- <p>The 730 Precompilato form also includes some deductions and tax credits that you may be entitled to, such as expenses for health, education, mortgage interest, donations, etc. You can confirm or modify these deductions and tax credits by following these steps:</p>
97
- <ol>
98
- <li>Click on the "Detrazioni e Credito d'Imposta" section to see your deductions and tax credits.</li>
99
- <li>Verify that the deductions and tax credits are correct and complete. If you notice any errors or omissions, you can edit them by clicking on the "Modifica" button.</li>
100
- <li>If you have any other deductions or tax credits that are not pre-filled in your form, you can add them by clicking on the "Aggiungi" button and filling in the required fields.</li>
101
- <li>Once you have confirmed or modified your deductions and tax credits, you can proceed to the next section by clicking on the "Continua" button.</li>
102
- </ol>
103
- <h3>How to choose your payment method and send your declaration</h3>
104
- <p>The final step of submitting your 730 Precompilato form online is to choose your payment method and send your declaration. You can do this by following these steps:</p>
105
- <ol>
106
- <li>Click on the "Metodo di Pagamento e Invio" section to see your payment method and declaration.</li>
107
- <li>If you have a tax due, you can choose how to pay it. You can either pay it online with a credit card or a bank transfer, or pay it offline with a F24 form at a bank or a post office. You can also choose to pay it in installments if you meet certain conditions.</li>
108
- <li>If you have a tax refund, you can choose how to receive it. You can either receive it by bank transfer or by postal order. You need to provide your bank account details or your postal address accordingly.</li>
109
- <li>Once you have chosen your payment method, you can send your declaration by clicking on the "Invia" button. You will receive a confirmation code and a receipt that you can download and print.</li>
110
- </ol>
111
- <h3>How to track the status of your submission and receive your refund</h3>
112
- <p>After you have sent your 730 Precompilato form online, you can track the status of your submission and receive your refund by following these steps:</p>
113
- <ol>
114
- <li>Go to the Agenzia delle Entrate website and log in with your SPID, CIE, or CNS.</li>
115
- <li>Click on the "730 Precompilato" button and then on the "Verifica lo stato della tua dichiarazione" section.</li>
116
- <li>You will see the status of your submission, which can be one of the following:</li>
117
- <ul>
118
- <li>Inviata: Your declaration has been sent and is being processed.</li>
119
- <li>Accettata: Your declaration has been accepted and is being finalized.</li>
120
- <li>Liquidata: Your declaration has been finalized and your tax due or refund has been calculated.</li>
121
- <li>Pagata: Your tax due has been paid or your refund has been issued.</li>
122
- </ul>
123
- <li>If you have a refund, you will receive it within a few weeks after your declaration has been finalized. You will receive it by the payment method that you have chosen (bank transfer or postal order).</li>
124
- </ol>
125
- <h2>How to get help with 730 Precompilato online</h2>
126
- <p>If you have any questions or problems with 730 Precompilato online, you can get help by following these methods:</p>
127
- <h3>How to contact the Agenzia delle Entrate for assistance</h3>
128
- <p>You can contact the Agenzia delle Entrate for assistance by phone, email, or chat. You can do this by following these steps:</p>
129
- <ol>
130
- <li>Go to the Agenzia delle Entrate website and click on the "Contatti" button.</li>
131
- <li>Select the option "Assistenza per il 730 Precompilato" and choose the method that you prefer (phone, email, or chat).</li>
132
- <li>Follow the instructions on the screen to enter your details and your request.</li>
133
- <li>You will be connected to an operator who will assist you with your issue.</li>
134
- </ol>
135
- <h3>How to use the online guides and FAQs</h3>
136
- <p>You can also use the online guides and FAQs that are available on the Agenzia delle Entrate website. You can do this by following these steps:</p>
137
- <ol>
138
- <li>Go to the Agenzia delle Entrate website and click on the "Guida e FAQ" button.</li>
139
- <li>Select the option "Guida al 730 Precompilato" or "FAQ sul 730 Precompilato" depending on what you need.</li>
140
- <li>You will see a list of topics and questions that cover various aspects of 730 Precompilato online.</li>
141
- <li>Click on the topic or question that interests you and read the answer or explanation.</li>
142
- </ol>
143
- <h2>Conclusion</h2>
144
- <p>In conclusion, 730 Precompilato online is a convenient and easy way to file your income tax return in Italy. It allows you to access a pre-filled tax form with your personal and income data, verify its accuracy, and submit it electronically. It also allows you to receive your refund faster and avoid errors and omissions. To use 730 Precompilato online, you need to visit the Agenzia delle Entrate website and log in with your SPID, CIE, or CNS. You can then check and edit your data, add any deductions or tax credits that you are entitled to, choose your payment method, and send your declaration. You can also track the status of your submission and receive your refund online. If you need any help with 730 Precompilato online, you can contact the Agenzia delle Entrate for assistance or use the online guides and FAQs.</p>
145
- <h2>FAQs</h2>
146
- <p>Here are some frequently asked questions about 730 Precompilato online:</p>
147
- <h4>When can I access 730 Precompilato online?</h4>
148
- <p>You can access 730 Precompilato online from April 1st to July 31st of each year. However, if you want to use a tax assistance center (CAF) or a qualified professional (such as an accountant) to submit your declaration, you need to access it by June 30th.</p>
149
- <h4>What if I don't agree with the data pre-filled in my form?</h4>
150
- <p>If you don't agree with the data pre-filled in your form, you can edit it by clicking on the "Modifica" button. You can also add any data that is missing or not pre-filled by clicking on the "Aggiungi" button. However, you need to have supporting documents or receipts that prove your data. You are responsible for the accuracy and completeness of your data.</p>
151
- <h4>What if I don't want to use 730 Precompilato online?</h4>
152
- <p>If you don't want to use 730 Precompilato online, you have other options to file your income tax return. You can either use the traditional 730 form, which you need to fill in manually and submit through a tax assistance center (CAF) or a qualified professional (such as an accountant), or use the Unico form, which you need to fill in manually and submit online or by mail. However, these options may be more time-consuming, costly, and prone to errors than 730 Precompilato online.</p>
153
- <h4>What if I have already submitted my 730 Precompilato online and I want to change something?</h4>
154
- <p>If you have already submitted your 730 Precompilato online and you want to change something, you can do so by submitting a new declaration with the correct data. You can do this by following the same steps as before, but you need to select the option "Invia una nuova dichiarazione" instead of "Invia". You can submit a new declaration until July 31st of each year. However, if you have already received your refund or paid your tax due, you may need to adjust your payment or request a new refund accordingly.</p>
155
- <h4>How secure is 730 Precompilato online?</h4>
156
- <p>730 Precompilato online is a secure service that uses encryption and authentication methods to protect your data and privacy. The Agenzia delle Entrate guarantees that your data is treated in accordance with the law and that only you can access and modify it. You can also check the security certificate of the website by clicking on the padlock icon in your browser.</p>
157
- <h4>Where can I find more information about 730 Precompilato online?</h4>
158
- <p>You can find more information about 730 Precompilato online on the Agenzia delle Entrate website, where you can also access the service and get help. You can also consult the official guide and FAQ that are available on the website. Alternatively, you can contact the Agenzia delle Entrate by phone, email, or chat for assistance.</p> 197e85843d<br />
159
- <br />
160
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1toTree/lora_test/ppdiffusers/models/ema.py DELETED
@@ -1,103 +0,0 @@
1
- # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2
- # Copyright 2022 The HuggingFace Team. All rights reserved.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import paddle
17
- from paddle import nn
18
-
19
-
20
- class LitEma(nn.Layer):
21
- """
22
- Exponential Moving Average (EMA) of model updates
23
-
24
- Parameters:
25
- model: The model architecture for apply EMA.
26
- decay: The exponential decay. Default 0.9999.
27
- use_num_updates: Whether to use number of updates when computing
28
- averages.
29
- """
30
-
31
- def __init__(self, model, decay=0.9999, use_num_upates=True):
32
- super().__init__()
33
- if decay < 0.0 or decay > 1.0:
34
- raise ValueError("Decay must be between 0 and 1")
35
-
36
- self.m_name2s_name = {}
37
- self.register_buffer("decay", paddle.to_tensor(decay, dtype=paddle.float32))
38
- self.register_buffer(
39
- "num_updates",
40
- paddle.to_tensor(0, dtype=paddle.int64) if use_num_upates else paddle.to_tensor(-1, dtype=paddle.int64),
41
- )
42
-
43
- for name, p in model.named_parameters():
44
- if not p.stop_gradient:
45
- # remove as '.'-character is not allowed in buffers
46
- s_name = name.replace(".", "")
47
- self.m_name2s_name.update({name: s_name})
48
- self.register_buffer(s_name, p.clone().detach())
49
-
50
- self.collected_params = []
51
-
52
- def forward(self, model):
53
- decay = self.decay
54
-
55
- if self.num_updates >= 0:
56
- self.num_updates += 1
57
- decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))
58
-
59
- one_minus_decay = 1.0 - decay
60
-
61
- with paddle.no_grad():
62
- m_param = dict(model.named_parameters())
63
- shadow_params = dict(self.named_buffers())
64
-
65
- for key in m_param:
66
- if not m_param[key].stop_gradient:
67
- sname = self.m_name2s_name[key]
68
- shadow_params[sname].scale_(decay)
69
- shadow_params[sname].add_(m_param[key] * one_minus_decay)
70
- else:
71
- assert key not in self.m_name2s_name
72
-
73
- def copy_to(self, model):
74
- m_param = dict(model.named_parameters())
75
- shadow_params = dict(self.named_buffers())
76
- for key in m_param:
77
- if not m_param[key].stop_gradient:
78
- m_param[key].copy_(shadow_params[self.m_name2s_name[key]], True)
79
- else:
80
- assert key not in self.m_name2s_name
81
-
82
- def store(self, parameters):
83
- """
84
- Save the current parameters for restoring later.
85
- Args:
86
- parameters: Iterable of `EagerParamBase`; the parameters to be
87
- temporarily stored.
88
- """
89
- self.collected_params = [param.clone() for param in parameters]
90
-
91
- def restore(self, parameters):
92
- """
93
- Restore the parameters stored with the `store` method.
94
- Useful to validate the model with EMA parameters without affecting the
95
- original optimization process. Store the parameters before the
96
- `copy_to` method. After validation (or model saving), use this to
97
- restore the former parameters.
98
- Args:
99
- parameters: Iterable of `EagerParamBase`; the parameters to be
100
- updated with the stored parameters.
101
- """
102
- for c_param, param in zip(self.collected_params, parameters):
103
- param.copy_(c_param, True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/4com/4com-license/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: 4COM License
3
- emoji: ⚖
4
- colorFrom: pink
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 3.41.2
8
- app_file: app.py
9
- pinned: false
10
- license: creativeml-openrail-m
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/52Hz/SRMNet_AWGN_denoising/predict.py DELETED
@@ -1,82 +0,0 @@
1
- import cog
2
- import tempfile
3
- from pathlib import Path
4
- import argparse
5
- import shutil
6
- import os
7
- import glob
8
- import torch
9
- from skimage import img_as_ubyte
10
- from PIL import Image
11
- from model.SRMNet import SRMNet
12
- from main_test_SRMNet import save_img, setup
13
- import torchvision.transforms.functional as TF
14
- import torch.nn.functional as F
15
-
16
-
17
- class Predictor(cog.Predictor):
18
- def setup(self):
19
- model_dir = 'experiments/pretrained_models/AWGN_denoising_SRMNet.pth'
20
-
21
- parser = argparse.ArgumentParser(description='Demo Image Denoising')
22
- parser.add_argument('--input_dir', default='./test/', type=str, help='Input images')
23
- parser.add_argument('--result_dir', default='./result/', type=str, help='Directory for results')
24
- parser.add_argument('--weights',
25
- default='./checkpoints/SRMNet_real_denoise/models/model_bestPSNR.pth', type=str,
26
- help='Path to weights')
27
-
28
- self.args = parser.parse_args()
29
-
30
- self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
31
-
32
- @cog.input("image", type=Path, help="input image")
33
- def predict(self, image):
34
- # set input folder
35
- input_dir = 'input_cog_temp'
36
- os.makedirs(input_dir, exist_ok=True)
37
- input_path = os.path.join(input_dir, os.path.basename(image))
38
- shutil.copy(str(image), input_path)
39
-
40
- # Load corresponding models architecture and weights
41
- model = SRMNet()
42
- model.eval()
43
- model = model.to(self.device)
44
-
45
- folder, save_dir = setup(self.args)
46
- os.makedirs(save_dir, exist_ok=True)
47
-
48
- out_path = Path(tempfile.mkdtemp()) / "out.png"
49
- mul = 16
50
- for file_ in sorted(glob.glob(os.path.join(folder, '*.PNG'))):
51
- img = Image.open(file_).convert('RGB')
52
- input_ = TF.to_tensor(img).unsqueeze(0).cuda()
53
-
54
- # Pad the input if not_multiple_of 8
55
- h, w = input_.shape[2], input_.shape[3]
56
- H, W = ((h + mul) // mul) * mul, ((w + mul) // mul) * mul
57
- padh = H - h if h % mul != 0 else 0
58
- padw = W - w if w % mul != 0 else 0
59
- input_ = F.pad(input_, (0, padw, 0, padh), 'reflect')
60
- with torch.no_grad():
61
- restored = model(input_)
62
-
63
- restored = torch.clamp(restored, 0, 1)
64
- restored = restored[:, :, :h, :w]
65
- restored = restored.permute(0, 2, 3, 1).cpu().detach().numpy()
66
- restored = img_as_ubyte(restored[0])
67
-
68
- save_img(str(out_path), restored)
69
- clean_folder(input_dir)
70
- return out_path
71
-
72
-
73
- def clean_folder(folder):
74
- for filename in os.listdir(folder):
75
- file_path = os.path.join(folder, filename)
76
- try:
77
- if os.path.isfile(file_path) or os.path.islink(file_path):
78
- os.unlink(file_path)
79
- elif os.path.isdir(file_path):
80
- shutil.rmtree(file_path)
81
- except Exception as e:
82
- print('Failed to delete %s. Reason: %s' % (file_path, e))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AEUPH/AethericGPT/app.py DELETED
@@ -1,32 +0,0 @@
1
- import gradio as gr
2
- import requests
3
-
4
- def interact_with_server(prompt):
5
- server_url = "http://xzyorb.servemp3.com:80"
6
- response = requests.post(server_url, data={"prompt": prompt})
7
- response_text = response.text
8
-
9
- # Split the response text into individual messages
10
- conversation_messages = response_text.split("\n")
11
- num_messages = len(conversation_messages)
12
-
13
- return response_text, num_messages
14
-
15
- def print_session_data(data):
16
- response_text, num_messages = data
17
- session = gr.capture_session()
18
- ip_address = session["ip"]
19
- user_agent = session["user_agent"]
20
-
21
- print("IP Address:", ip_address)
22
- print("User Agent:", user_agent)
23
- print("Number of Messages:", num_messages)
24
-
25
- iface = gr.Interface(
26
- fn=interact_with_server,
27
- inputs=gr.inputs.Textbox(),
28
- outputs=[gr.outputs.HTML(), gr.outputs.Label()], # Use Label for displaying the number of messages
29
- capture_session=True # Automatically captures IP address and user agent
30
- )
31
-
32
- iface.launch(print_session_data) # Pass the function to print session data to the launch method
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AILab-CVC/SEED-LLaMA/models/seed_qformer/utils.py DELETED
@@ -1,138 +0,0 @@
1
- """
2
- Copyright (c) 2022, salesforce.com, inc.
3
- All rights reserved.
4
- SPDX-License-Identifier: BSD-3-Clause
5
- For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
6
- """
7
-
8
- import datetime
9
- import functools
10
- import os
11
-
12
- import torch
13
- import torch.distributed as dist
14
- import timm.models.hub as timm_hub
15
- from urllib.parse import urlparse
16
-
17
-
18
- def setup_for_distributed(is_master):
19
- """
20
- This function disables printing when not in master process
21
- """
22
- import builtins as __builtin__
23
-
24
- builtin_print = __builtin__.print
25
-
26
- def print(*args, **kwargs):
27
- force = kwargs.pop("force", False)
28
- if is_master or force:
29
- builtin_print(*args, **kwargs)
30
-
31
- __builtin__.print = print
32
-
33
-
34
- def is_dist_avail_and_initialized():
35
- if not dist.is_available():
36
- return False
37
- if not dist.is_initialized():
38
- return False
39
- return True
40
-
41
-
42
- def get_world_size():
43
- if not is_dist_avail_and_initialized():
44
- return 1
45
- return dist.get_world_size()
46
-
47
-
48
- def get_rank():
49
- if not is_dist_avail_and_initialized():
50
- return 0
51
- return dist.get_rank()
52
-
53
-
54
- def is_main_process():
55
- return get_rank() == 0
56
-
57
-
58
- def init_distributed_mode(args):
59
- if "RANK" in os.environ and "WORLD_SIZE" in os.environ:
60
- args.rank = int(os.environ["RANK"])
61
- args.world_size = int(os.environ["WORLD_SIZE"])
62
- args.gpu = int(os.environ["LOCAL_RANK"])
63
- elif "SLURM_PROCID" in os.environ:
64
- args.rank = int(os.environ["SLURM_PROCID"])
65
- args.gpu = args.rank % torch.cuda.device_count()
66
- else:
67
- print("Not using distributed mode")
68
- args.distributed = False
69
- return
70
-
71
- args.distributed = True
72
-
73
- torch.cuda.set_device(args.gpu)
74
- args.dist_backend = "nccl"
75
- print(
76
- "| distributed init (rank {}, world {}): {}".format(args.rank, args.world_size, args.dist_url),
77
- flush=True,
78
- )
79
- torch.distributed.init_process_group(
80
- backend=args.dist_backend,
81
- init_method=args.dist_url,
82
- world_size=args.world_size,
83
- rank=args.rank,
84
- timeout=datetime.timedelta(days=365), # allow auto-downloading and de-compressing
85
- )
86
- torch.distributed.barrier()
87
- setup_for_distributed(args.rank == 0)
88
-
89
-
90
- def get_dist_info():
91
- if torch.__version__ < "1.0":
92
- initialized = dist._initialized
93
- else:
94
- initialized = dist.is_initialized()
95
- if initialized:
96
- rank = dist.get_rank()
97
- world_size = dist.get_world_size()
98
- else: # non-distributed training
99
- rank = 0
100
- world_size = 1
101
- return rank, world_size
102
-
103
-
104
- def main_process(func):
105
- @functools.wraps(func)
106
- def wrapper(*args, **kwargs):
107
- rank, _ = get_dist_info()
108
- if rank == 0:
109
- return func(*args, **kwargs)
110
-
111
- return wrapper
112
-
113
-
114
- def download_cached_file(url, check_hash=True, progress=False):
115
- """
116
- Download a file from a URL and cache it locally. If the file already exists, it is not downloaded again.
117
- If distributed, only the main process downloads the file, and the other processes wait for the file to be downloaded.
118
- """
119
- def get_cached_file_path():
120
- # a hack to sync the file path across processes
121
- parts = torch.hub.urlparse(url)
122
- filename = os.path.basename(parts.path)
123
- cached_file = os.path.join(timm_hub.get_cache_dir(), filename)
124
-
125
- return cached_file
126
-
127
- if is_main_process():
128
- timm_hub.download_cached_file(url, check_hash, progress)
129
-
130
- if is_dist_avail_and_initialized():
131
- dist.barrier()
132
-
133
- return get_cached_file_path()
134
-
135
-
136
- def is_url(url_or_filename):
137
- parsed = urlparse(url_or_filename)
138
- return parsed.scheme in ("http", "https")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/server/models.ts DELETED
@@ -1,178 +0,0 @@
1
- import { HF_ACCESS_TOKEN, MODELS, OLD_MODELS } from "$env/static/private";
2
- import type {
3
- ChatTemplateInput,
4
- WebSearchQueryTemplateInput,
5
- WebSearchSummaryTemplateInput,
6
- } from "$lib/types/Template";
7
- import { compileTemplate } from "$lib/utils/template";
8
- import { z } from "zod";
9
-
10
- type Optional<T, K extends keyof T> = Pick<Partial<T>, K> & Omit<T, K>;
11
-
12
- const sagemakerEndpoint = z.object({
13
- host: z.literal("sagemaker"),
14
- url: z.string().url(),
15
- accessKey: z.string().min(1),
16
- secretKey: z.string().min(1),
17
- sessionToken: z.string().optional(),
18
- });
19
-
20
- const tgiEndpoint = z.object({
21
- host: z.union([z.literal("tgi"), z.undefined()]),
22
- url: z.string().url(),
23
- authorization: z.string().min(1).default(`Bearer ${HF_ACCESS_TOKEN}`),
24
- });
25
-
26
- const localEndpoint = z.object({
27
- host: z.union([z.literal("local"), z.undefined()]),
28
- model: z.string(),
29
- url: z.string().url(),
30
- authorization: z.string().min(1).default(`Bearer ${HF_ACCESS_TOKEN}`),
31
- });
32
-
33
- const commonEndpoint = z.object({
34
- weight: z.number().int().positive().default(1),
35
- });
36
-
37
- const endpoint = z.lazy(() =>
38
- z.union([
39
- sagemakerEndpoint.merge(commonEndpoint),
40
- tgiEndpoint.merge(commonEndpoint),
41
- localEndpoint.merge(commonEndpoint),
42
- ])
43
- );
44
-
45
- const combinedEndpoint = endpoint.transform((data) => {
46
- if (data.host === "tgi" || data.host === undefined) {
47
- return tgiEndpoint.merge(commonEndpoint).parse(data);
48
- } else if (data.host === "sagemaker") {
49
- return sagemakerEndpoint.merge(commonEndpoint).parse(data);
50
- } else if (data.host === "local") {
51
- return localEndpoint.merge(commonEndpoint).parse(data);
52
- } else {
53
- throw new Error(`Invalid host: ${data.host}`);
54
- }
55
- });
56
-
57
- const modelsRaw = z
58
- .array(
59
- z.object({
60
- /** Used as an identifier in DB */
61
- id: z.string().optional(),
62
- /** Used to link to the model page, and for inference */
63
- name: z.string().min(1),
64
- displayName: z.string().min(1).optional(),
65
- description: z.string().min(1).optional(),
66
- is_local: z.boolean().optional(),
67
- is_code: z.boolean().optional(),
68
- is_phi: z.boolean().optional(),
69
- type: z.string().min(1),
70
- websiteUrl: z.string().url().optional(),
71
- modelUrl: z.string().url().optional(),
72
- datasetName: z.string().min(1).optional(),
73
- datasetUrl: z.string().url().optional(),
74
- userMessageToken: z.string().default(""),
75
- userMessageEndToken: z.string().default(""),
76
- assistantMessageToken: z.string().default(""),
77
- assistantMessageEndToken: z.string().default(""),
78
- messageEndToken: z.string().default(""),
79
- preprompt: z.string().default(""),
80
- prepromptUrl: z.string().url().optional(),
81
- chatPromptTemplate: z
82
- .string()
83
- .default(
84
- "{{preprompt}}" +
85
- "{{#each messages}}" +
86
- "{{#ifUser}}{{@root.userMessageToken}}{{content}}{{@root.userMessageEndToken}}{{/ifUser}}" +
87
- "{{#ifAssistant}}{{@root.assistantMessageToken}}{{content}}{{@root.assistantMessageEndToken}}{{/ifAssistant}}" +
88
- "{{/each}}" +
89
- "{{assistantMessageToken}}"
90
- ),
91
- webSearchSummaryPromptTemplate: z
92
- .string()
93
- .default(
94
- "{{userMessageToken}}{{answer}}{{userMessageEndToken}}" +
95
- "{{userMessageToken}}" +
96
- "The text above should be summarized to best answer the query: {{query}}." +
97
- "{{userMessageEndToken}}" +
98
- "{{assistantMessageToken}}Summary: "
99
- ),
100
- webSearchQueryPromptTemplate: z
101
- .string()
102
- .default(
103
- "{{userMessageToken}}" +
104
- "The following messages were written by a user, trying to answer a question." +
105
- "{{userMessageEndToken}}" +
106
- "{{#each messages}}" +
107
- "{{#ifUser}}{{@root.userMessageToken}}{{content}}{{@root.userMessageEndToken}}{{/ifUser}}" +
108
- "{{/each}}" +
109
- "{{userMessageToken}}" +
110
- "What plain-text english sentence would you input into Google to answer the last question? Answer with a short (10 words max) simple sentence." +
111
- "{{userMessageEndToken}}" +
112
- "{{assistantMessageToken}}Query: "
113
- ),
114
- promptExamples: z
115
- .array(
116
- z.object({
117
- title: z.string().min(1),
118
- prompt: z.string().min(1),
119
- })
120
- )
121
- .optional(),
122
- endpoints: z.array(combinedEndpoint).optional(),
123
- parameters: z
124
- .object({
125
- temperature: z.number().min(0).max(1),
126
- truncate: z.number().int().positive(),
127
- max_new_tokens: z.number().int().positive(),
128
- stop: z.array(z.string()).optional(),
129
- })
130
- .passthrough()
131
- .optional(),
132
- })
133
- )
134
- .parse(JSON.parse(MODELS));
135
-
136
- export const models = await Promise.all(
137
- modelsRaw.map(async (m) => ({
138
- ...m,
139
- userMessageEndToken: m?.userMessageEndToken || m?.messageEndToken,
140
- assistantMessageEndToken: m?.assistantMessageEndToken || m?.messageEndToken,
141
- chatPromptRender: compileTemplate<ChatTemplateInput>(m.chatPromptTemplate, m),
142
- webSearchSummaryPromptRender: compileTemplate<WebSearchSummaryTemplateInput>(
143
- m.webSearchSummaryPromptTemplate,
144
- m
145
- ),
146
- webSearchQueryPromptRender: compileTemplate<WebSearchQueryTemplateInput>(
147
- m.webSearchQueryPromptTemplate,
148
- m
149
- ),
150
- id: m.id || m.name,
151
- displayName: m.displayName || m.name,
152
- preprompt: m.prepromptUrl ? await fetch(m.prepromptUrl).then((r) => r.text()) : m.preprompt,
153
- }))
154
- );
155
-
156
- // Models that have been deprecated
157
- export const oldModels = OLD_MODELS
158
- ? z
159
- .array(
160
- z.object({
161
- id: z.string().optional(),
162
- name: z.string().min(1),
163
- displayName: z.string().min(1).optional(),
164
- })
165
- )
166
- .parse(JSON.parse(OLD_MODELS))
167
- .map((m) => ({ ...m, id: m.id || m.name, displayName: m.displayName || m.name }))
168
- : [];
169
-
170
- export type BackendModel = Optional<(typeof models)[0], "preprompt">;
171
- export type Endpoint = z.infer<typeof endpoint>;
172
-
173
- export const defaultModel = models[0];
174
-
175
- export const validateModel = (_models: BackendModel[]) => {
176
- // Zod enum function requires 2 parameters
177
- return z.enum([_models[0].id, ..._models.slice(1).map((m) => m.id)]);
178
- };
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/text_to_image/train_text_to_image_lora.py DELETED
@@ -1,949 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """Fine-tuning script for Stable Diffusion for text2image with support for LoRA."""
16
-
17
- import argparse
18
- import logging
19
- import math
20
- import os
21
- import random
22
- import shutil
23
- from pathlib import Path
24
-
25
- import datasets
26
- import numpy as np
27
- import torch
28
- import torch.nn.functional as F
29
- import torch.utils.checkpoint
30
- import transformers
31
- from accelerate import Accelerator
32
- from accelerate.logging import get_logger
33
- from accelerate.utils import ProjectConfiguration, set_seed
34
- from datasets import load_dataset
35
- from huggingface_hub import create_repo, upload_folder
36
- from packaging import version
37
- from torchvision import transforms
38
- from tqdm.auto import tqdm
39
- from transformers import CLIPTextModel, CLIPTokenizer
40
-
41
- import diffusers
42
- from diffusers import AutoencoderKL, DDPMScheduler, DiffusionPipeline, UNet2DConditionModel
43
- from diffusers.loaders import AttnProcsLayers
44
- from diffusers.models.attention_processor import LoRAAttnProcessor
45
- from diffusers.optimization import get_scheduler
46
- from diffusers.utils import check_min_version, is_wandb_available
47
- from diffusers.utils.import_utils import is_xformers_available
48
-
49
-
50
- # Will error if the minimal version of diffusers is not installed. Remove at your own risks.
51
- check_min_version("0.19.0")
52
-
53
- logger = get_logger(__name__, log_level="INFO")
54
-
55
-
56
- def save_model_card(repo_id: str, images=None, base_model=str, dataset_name=str, repo_folder=None):
57
- img_str = ""
58
- for i, image in enumerate(images):
59
- image.save(os.path.join(repo_folder, f"image_{i}.png"))
60
- img_str += f"![img_{i}](./image_{i}.png)\n"
61
-
62
- yaml = f"""
63
- ---
64
- license: creativeml-openrail-m
65
- base_model: {base_model}
66
- tags:
67
- - stable-diffusion
68
- - stable-diffusion-diffusers
69
- - text-to-image
70
- - diffusers
71
- - lora
72
- inference: true
73
- ---
74
- """
75
- model_card = f"""
76
- # LoRA text2image fine-tuning - {repo_id}
77
- These are LoRA adaption weights for {base_model}. The weights were fine-tuned on the {dataset_name} dataset. You can find some example images in the following. \n
78
- {img_str}
79
- """
80
- with open(os.path.join(repo_folder, "README.md"), "w") as f:
81
- f.write(yaml + model_card)
82
-
83
-
84
- def parse_args():
85
- parser = argparse.ArgumentParser(description="Simple example of a training script.")
86
- parser.add_argument(
87
- "--pretrained_model_name_or_path",
88
- type=str,
89
- default=None,
90
- required=True,
91
- help="Path to pretrained model or model identifier from huggingface.co/models.",
92
- )
93
- parser.add_argument(
94
- "--revision",
95
- type=str,
96
- default=None,
97
- required=False,
98
- help="Revision of pretrained model identifier from huggingface.co/models.",
99
- )
100
- parser.add_argument(
101
- "--dataset_name",
102
- type=str,
103
- default=None,
104
- help=(
105
- "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private,"
106
- " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem,"
107
- " or to a folder containing files that 🤗 Datasets can understand."
108
- ),
109
- )
110
- parser.add_argument(
111
- "--dataset_config_name",
112
- type=str,
113
- default=None,
114
- help="The config of the Dataset, leave as None if there's only one config.",
115
- )
116
- parser.add_argument(
117
- "--train_data_dir",
118
- type=str,
119
- default=None,
120
- help=(
121
- "A folder containing the training data. Folder contents must follow the structure described in"
122
- " https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file"
123
- " must exist to provide the captions for the images. Ignored if `dataset_name` is specified."
124
- ),
125
- )
126
- parser.add_argument(
127
- "--image_column", type=str, default="image", help="The column of the dataset containing an image."
128
- )
129
- parser.add_argument(
130
- "--caption_column",
131
- type=str,
132
- default="text",
133
- help="The column of the dataset containing a caption or a list of captions.",
134
- )
135
- parser.add_argument(
136
- "--validation_prompt", type=str, default=None, help="A prompt that is sampled during training for inference."
137
- )
138
- parser.add_argument(
139
- "--num_validation_images",
140
- type=int,
141
- default=4,
142
- help="Number of images that should be generated during validation with `validation_prompt`.",
143
- )
144
- parser.add_argument(
145
- "--validation_epochs",
146
- type=int,
147
- default=1,
148
- help=(
149
- "Run fine-tuning validation every X epochs. The validation process consists of running the prompt"
150
- " `args.validation_prompt` multiple times: `args.num_validation_images`."
151
- ),
152
- )
153
- parser.add_argument(
154
- "--max_train_samples",
155
- type=int,
156
- default=None,
157
- help=(
158
- "For debugging purposes or quicker training, truncate the number of training examples to this "
159
- "value if set."
160
- ),
161
- )
162
- parser.add_argument(
163
- "--output_dir",
164
- type=str,
165
- default="sd-model-finetuned-lora",
166
- help="The output directory where the model predictions and checkpoints will be written.",
167
- )
168
- parser.add_argument(
169
- "--cache_dir",
170
- type=str,
171
- default=None,
172
- help="The directory where the downloaded models and datasets will be stored.",
173
- )
174
- parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
175
- parser.add_argument(
176
- "--resolution",
177
- type=int,
178
- default=512,
179
- help=(
180
- "The resolution for input images, all the images in the train/validation dataset will be resized to this"
181
- " resolution"
182
- ),
183
- )
184
- parser.add_argument(
185
- "--center_crop",
186
- default=False,
187
- action="store_true",
188
- help=(
189
- "Whether to center crop the input images to the resolution. If not set, the images will be randomly"
190
- " cropped. The images will be resized to the resolution first before cropping."
191
- ),
192
- )
193
- parser.add_argument(
194
- "--random_flip",
195
- action="store_true",
196
- help="whether to randomly flip images horizontally",
197
- )
198
- parser.add_argument(
199
- "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader."
200
- )
201
- parser.add_argument("--num_train_epochs", type=int, default=100)
202
- parser.add_argument(
203
- "--max_train_steps",
204
- type=int,
205
- default=None,
206
- help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
207
- )
208
- parser.add_argument(
209
- "--gradient_accumulation_steps",
210
- type=int,
211
- default=1,
212
- help="Number of updates steps to accumulate before performing a backward/update pass.",
213
- )
214
- parser.add_argument(
215
- "--gradient_checkpointing",
216
- action="store_true",
217
- help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
218
- )
219
- parser.add_argument(
220
- "--learning_rate",
221
- type=float,
222
- default=1e-4,
223
- help="Initial learning rate (after the potential warmup period) to use.",
224
- )
225
- parser.add_argument(
226
- "--scale_lr",
227
- action="store_true",
228
- default=False,
229
- help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
230
- )
231
- parser.add_argument(
232
- "--lr_scheduler",
233
- type=str,
234
- default="constant",
235
- help=(
236
- 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
237
- ' "constant", "constant_with_warmup"]'
238
- ),
239
- )
240
- parser.add_argument(
241
- "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
242
- )
243
- parser.add_argument(
244
- "--snr_gamma",
245
- type=float,
246
- default=None,
247
- help="SNR weighting gamma to be used if rebalancing the loss. Recommended value is 5.0. "
248
- "More details here: https://arxiv.org/abs/2303.09556.",
249
- )
250
- parser.add_argument(
251
- "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes."
252
- )
253
- parser.add_argument(
254
- "--allow_tf32",
255
- action="store_true",
256
- help=(
257
- "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
258
- " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
259
- ),
260
- )
261
- parser.add_argument(
262
- "--dataloader_num_workers",
263
- type=int,
264
- default=0,
265
- help=(
266
- "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
267
- ),
268
- )
269
- parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
270
- parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
271
- parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
272
- parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
273
- parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
274
- parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
275
- parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
276
- parser.add_argument(
277
- "--prediction_type",
278
- type=str,
279
- default=None,
280
- help="The prediction_type that shall be used for training. Choose between 'epsilon' or 'v_prediction' or leave `None`. If left to `None` the default prediction type of the scheduler: `noise_scheduler.config.prediciton_type` is chosen.",
281
- )
282
- parser.add_argument(
283
- "--hub_model_id",
284
- type=str,
285
- default=None,
286
- help="The name of the repository to keep in sync with the local `output_dir`.",
287
- )
288
- parser.add_argument(
289
- "--logging_dir",
290
- type=str,
291
- default="logs",
292
- help=(
293
- "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
294
- " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
295
- ),
296
- )
297
- parser.add_argument(
298
- "--mixed_precision",
299
- type=str,
300
- default=None,
301
- choices=["no", "fp16", "bf16"],
302
- help=(
303
- "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
304
- " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
305
- " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
306
- ),
307
- )
308
- parser.add_argument(
309
- "--report_to",
310
- type=str,
311
- default="tensorboard",
312
- help=(
313
- 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
314
- ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
315
- ),
316
- )
317
- parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
318
- parser.add_argument(
319
- "--checkpointing_steps",
320
- type=int,
321
- default=500,
322
- help=(
323
- "Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming"
324
- " training using `--resume_from_checkpoint`."
325
- ),
326
- )
327
- parser.add_argument(
328
- "--checkpoints_total_limit",
329
- type=int,
330
- default=None,
331
- help=("Max number of checkpoints to store."),
332
- )
333
- parser.add_argument(
334
- "--resume_from_checkpoint",
335
- type=str,
336
- default=None,
337
- help=(
338
- "Whether training should be resumed from a previous checkpoint. Use a path saved by"
339
- ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
340
- ),
341
- )
342
- parser.add_argument(
343
- "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers."
344
- )
345
- parser.add_argument("--noise_offset", type=float, default=0, help="The scale of noise offset.")
346
- parser.add_argument(
347
- "--rank",
348
- type=int,
349
- default=4,
350
- help=("The dimension of the LoRA update matrices."),
351
- )
352
-
353
- args = parser.parse_args()
354
- env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
355
- if env_local_rank != -1 and env_local_rank != args.local_rank:
356
- args.local_rank = env_local_rank
357
-
358
- # Sanity checks
359
- if args.dataset_name is None and args.train_data_dir is None:
360
- raise ValueError("Need either a dataset name or a training folder.")
361
-
362
- return args
363
-
364
-
365
- DATASET_NAME_MAPPING = {
366
- "lambdalabs/pokemon-blip-captions": ("image", "text"),
367
- }
368
-
369
-
370
- def main():
371
- args = parse_args()
372
- logging_dir = Path(args.output_dir, args.logging_dir)
373
-
374
- accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir)
375
-
376
- accelerator = Accelerator(
377
- gradient_accumulation_steps=args.gradient_accumulation_steps,
378
- mixed_precision=args.mixed_precision,
379
- log_with=args.report_to,
380
- project_config=accelerator_project_config,
381
- )
382
- if args.report_to == "wandb":
383
- if not is_wandb_available():
384
- raise ImportError("Make sure to install wandb if you want to use it for logging during training.")
385
- import wandb
386
-
387
- # Make one log on every process with the configuration for debugging.
388
- logging.basicConfig(
389
- format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
390
- datefmt="%m/%d/%Y %H:%M:%S",
391
- level=logging.INFO,
392
- )
393
- logger.info(accelerator.state, main_process_only=False)
394
- if accelerator.is_local_main_process:
395
- datasets.utils.logging.set_verbosity_warning()
396
- transformers.utils.logging.set_verbosity_warning()
397
- diffusers.utils.logging.set_verbosity_info()
398
- else:
399
- datasets.utils.logging.set_verbosity_error()
400
- transformers.utils.logging.set_verbosity_error()
401
- diffusers.utils.logging.set_verbosity_error()
402
-
403
- # If passed along, set the training seed now.
404
- if args.seed is not None:
405
- set_seed(args.seed)
406
-
407
- # Handle the repository creation
408
- if accelerator.is_main_process:
409
- if args.output_dir is not None:
410
- os.makedirs(args.output_dir, exist_ok=True)
411
-
412
- if args.push_to_hub:
413
- repo_id = create_repo(
414
- repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token
415
- ).repo_id
416
- # Load scheduler, tokenizer and models.
417
- noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
418
- tokenizer = CLIPTokenizer.from_pretrained(
419
- args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision
420
- )
421
- text_encoder = CLIPTextModel.from_pretrained(
422
- args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision
423
- )
424
- vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision)
425
- unet = UNet2DConditionModel.from_pretrained(
426
- args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision
427
- )
428
- # freeze parameters of models to save more memory
429
- unet.requires_grad_(False)
430
- vae.requires_grad_(False)
431
-
432
- text_encoder.requires_grad_(False)
433
-
434
- # For mixed precision training we cast all non-trainable weigths (vae, non-lora text_encoder and non-lora unet) to half-precision
435
- # as these weights are only used for inference, keeping weights in full precision is not required.
436
- weight_dtype = torch.float32
437
- if accelerator.mixed_precision == "fp16":
438
- weight_dtype = torch.float16
439
- elif accelerator.mixed_precision == "bf16":
440
- weight_dtype = torch.bfloat16
441
-
442
- # Move unet, vae and text_encoder to device and cast to weight_dtype
443
- unet.to(accelerator.device, dtype=weight_dtype)
444
- vae.to(accelerator.device, dtype=weight_dtype)
445
- text_encoder.to(accelerator.device, dtype=weight_dtype)
446
-
447
- # now we will add new LoRA weights to the attention layers
448
- # It's important to realize here how many attention weights will be added and of which sizes
449
- # The sizes of the attention layers consist only of two different variables:
450
- # 1) - the "hidden_size", which is increased according to `unet.config.block_out_channels`.
451
- # 2) - the "cross attention size", which is set to `unet.config.cross_attention_dim`.
452
-
453
- # Let's first see how many attention processors we will have to set.
454
- # For Stable Diffusion, it should be equal to:
455
- # - down blocks (2x attention layers) * (2x transformer layers) * (3x down blocks) = 12
456
- # - mid blocks (2x attention layers) * (1x transformer layers) * (1x mid blocks) = 2
457
- # - up blocks (2x attention layers) * (3x transformer layers) * (3x down blocks) = 18
458
- # => 32 layers
459
-
460
- # Set correct lora layers
461
- lora_attn_procs = {}
462
- for name in unet.attn_processors.keys():
463
- cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim
464
- if name.startswith("mid_block"):
465
- hidden_size = unet.config.block_out_channels[-1]
466
- elif name.startswith("up_blocks"):
467
- block_id = int(name[len("up_blocks.")])
468
- hidden_size = list(reversed(unet.config.block_out_channels))[block_id]
469
- elif name.startswith("down_blocks"):
470
- block_id = int(name[len("down_blocks.")])
471
- hidden_size = unet.config.block_out_channels[block_id]
472
-
473
- lora_attn_procs[name] = LoRAAttnProcessor(
474
- hidden_size=hidden_size,
475
- cross_attention_dim=cross_attention_dim,
476
- rank=args.rank,
477
- )
478
-
479
- unet.set_attn_processor(lora_attn_procs)
480
-
481
- if args.enable_xformers_memory_efficient_attention:
482
- if is_xformers_available():
483
- import xformers
484
-
485
- xformers_version = version.parse(xformers.__version__)
486
- if xformers_version == version.parse("0.0.16"):
487
- logger.warn(
488
- "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
489
- )
490
- unet.enable_xformers_memory_efficient_attention()
491
- else:
492
- raise ValueError("xformers is not available. Make sure it is installed correctly")
493
-
494
- def compute_snr(timesteps):
495
- """
496
- Computes SNR as per https://github.com/TiankaiHang/Min-SNR-Diffusion-Training/blob/521b624bd70c67cee4bdf49225915f5945a872e3/guided_diffusion/gaussian_diffusion.py#L847-L849
497
- """
498
- alphas_cumprod = noise_scheduler.alphas_cumprod
499
- sqrt_alphas_cumprod = alphas_cumprod**0.5
500
- sqrt_one_minus_alphas_cumprod = (1.0 - alphas_cumprod) ** 0.5
501
-
502
- # Expand the tensors.
503
- # Adapted from https://github.com/TiankaiHang/Min-SNR-Diffusion-Training/blob/521b624bd70c67cee4bdf49225915f5945a872e3/guided_diffusion/gaussian_diffusion.py#L1026
504
- sqrt_alphas_cumprod = sqrt_alphas_cumprod.to(device=timesteps.device)[timesteps].float()
505
- while len(sqrt_alphas_cumprod.shape) < len(timesteps.shape):
506
- sqrt_alphas_cumprod = sqrt_alphas_cumprod[..., None]
507
- alpha = sqrt_alphas_cumprod.expand(timesteps.shape)
508
-
509
- sqrt_one_minus_alphas_cumprod = sqrt_one_minus_alphas_cumprod.to(device=timesteps.device)[timesteps].float()
510
- while len(sqrt_one_minus_alphas_cumprod.shape) < len(timesteps.shape):
511
- sqrt_one_minus_alphas_cumprod = sqrt_one_minus_alphas_cumprod[..., None]
512
- sigma = sqrt_one_minus_alphas_cumprod.expand(timesteps.shape)
513
-
514
- # Compute SNR.
515
- snr = (alpha / sigma) ** 2
516
- return snr
517
-
518
- lora_layers = AttnProcsLayers(unet.attn_processors)
519
-
520
- # Enable TF32 for faster training on Ampere GPUs,
521
- # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
522
- if args.allow_tf32:
523
- torch.backends.cuda.matmul.allow_tf32 = True
524
-
525
- if args.scale_lr:
526
- args.learning_rate = (
527
- args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
528
- )
529
-
530
- # Initialize the optimizer
531
- if args.use_8bit_adam:
532
- try:
533
- import bitsandbytes as bnb
534
- except ImportError:
535
- raise ImportError(
536
- "Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`"
537
- )
538
-
539
- optimizer_cls = bnb.optim.AdamW8bit
540
- else:
541
- optimizer_cls = torch.optim.AdamW
542
-
543
- optimizer = optimizer_cls(
544
- lora_layers.parameters(),
545
- lr=args.learning_rate,
546
- betas=(args.adam_beta1, args.adam_beta2),
547
- weight_decay=args.adam_weight_decay,
548
- eps=args.adam_epsilon,
549
- )
550
-
551
- # Get the datasets: you can either provide your own training and evaluation files (see below)
552
- # or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub).
553
-
554
- # In distributed training, the load_dataset function guarantees that only one local process can concurrently
555
- # download the dataset.
556
- if args.dataset_name is not None:
557
- # Downloading and loading a dataset from the hub.
558
- dataset = load_dataset(
559
- args.dataset_name,
560
- args.dataset_config_name,
561
- cache_dir=args.cache_dir,
562
- )
563
- else:
564
- data_files = {}
565
- if args.train_data_dir is not None:
566
- data_files["train"] = os.path.join(args.train_data_dir, "**")
567
- dataset = load_dataset(
568
- "imagefolder",
569
- data_files=data_files,
570
- cache_dir=args.cache_dir,
571
- )
572
- # See more about loading custom images at
573
- # https://huggingface.co/docs/datasets/v2.4.0/en/image_load#imagefolder
574
-
575
- # Preprocessing the datasets.
576
- # We need to tokenize inputs and targets.
577
- column_names = dataset["train"].column_names
578
-
579
- # 6. Get the column names for input/target.
580
- dataset_columns = DATASET_NAME_MAPPING.get(args.dataset_name, None)
581
- if args.image_column is None:
582
- image_column = dataset_columns[0] if dataset_columns is not None else column_names[0]
583
- else:
584
- image_column = args.image_column
585
- if image_column not in column_names:
586
- raise ValueError(
587
- f"--image_column' value '{args.image_column}' needs to be one of: {', '.join(column_names)}"
588
- )
589
- if args.caption_column is None:
590
- caption_column = dataset_columns[1] if dataset_columns is not None else column_names[1]
591
- else:
592
- caption_column = args.caption_column
593
- if caption_column not in column_names:
594
- raise ValueError(
595
- f"--caption_column' value '{args.caption_column}' needs to be one of: {', '.join(column_names)}"
596
- )
597
-
598
- # Preprocessing the datasets.
599
- # We need to tokenize input captions and transform the images.
600
- def tokenize_captions(examples, is_train=True):
601
- captions = []
602
- for caption in examples[caption_column]:
603
- if isinstance(caption, str):
604
- captions.append(caption)
605
- elif isinstance(caption, (list, np.ndarray)):
606
- # take a random caption if there are multiple
607
- captions.append(random.choice(caption) if is_train else caption[0])
608
- else:
609
- raise ValueError(
610
- f"Caption column `{caption_column}` should contain either strings or lists of strings."
611
- )
612
- inputs = tokenizer(
613
- captions, max_length=tokenizer.model_max_length, padding="max_length", truncation=True, return_tensors="pt"
614
- )
615
- return inputs.input_ids
616
-
617
- # Preprocessing the datasets.
618
- train_transforms = transforms.Compose(
619
- [
620
- transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR),
621
- transforms.CenterCrop(args.resolution) if args.center_crop else transforms.RandomCrop(args.resolution),
622
- transforms.RandomHorizontalFlip() if args.random_flip else transforms.Lambda(lambda x: x),
623
- transforms.ToTensor(),
624
- transforms.Normalize([0.5], [0.5]),
625
- ]
626
- )
627
-
628
- def preprocess_train(examples):
629
- images = [image.convert("RGB") for image in examples[image_column]]
630
- examples["pixel_values"] = [train_transforms(image) for image in images]
631
- examples["input_ids"] = tokenize_captions(examples)
632
- return examples
633
-
634
- with accelerator.main_process_first():
635
- if args.max_train_samples is not None:
636
- dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples))
637
- # Set the training transforms
638
- train_dataset = dataset["train"].with_transform(preprocess_train)
639
-
640
- def collate_fn(examples):
641
- pixel_values = torch.stack([example["pixel_values"] for example in examples])
642
- pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
643
- input_ids = torch.stack([example["input_ids"] for example in examples])
644
- return {"pixel_values": pixel_values, "input_ids": input_ids}
645
-
646
- # DataLoaders creation:
647
- train_dataloader = torch.utils.data.DataLoader(
648
- train_dataset,
649
- shuffle=True,
650
- collate_fn=collate_fn,
651
- batch_size=args.train_batch_size,
652
- num_workers=args.dataloader_num_workers,
653
- )
654
-
655
- # Scheduler and math around the number of training steps.
656
- overrode_max_train_steps = False
657
- num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
658
- if args.max_train_steps is None:
659
- args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
660
- overrode_max_train_steps = True
661
-
662
- lr_scheduler = get_scheduler(
663
- args.lr_scheduler,
664
- optimizer=optimizer,
665
- num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes,
666
- num_training_steps=args.max_train_steps * accelerator.num_processes,
667
- )
668
-
669
- # Prepare everything with our `accelerator`.
670
- lora_layers, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
671
- lora_layers, optimizer, train_dataloader, lr_scheduler
672
- )
673
-
674
- # We need to recalculate our total training steps as the size of the training dataloader may have changed.
675
- num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
676
- if overrode_max_train_steps:
677
- args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
678
- # Afterwards we recalculate our number of training epochs
679
- args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
680
-
681
- # We need to initialize the trackers we use, and also store our configuration.
682
- # The trackers initializes automatically on the main process.
683
- if accelerator.is_main_process:
684
- accelerator.init_trackers("text2image-fine-tune", config=vars(args))
685
-
686
- # Train!
687
- total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
688
-
689
- logger.info("***** Running training *****")
690
- logger.info(f" Num examples = {len(train_dataset)}")
691
- logger.info(f" Num Epochs = {args.num_train_epochs}")
692
- logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
693
- logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
694
- logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
695
- logger.info(f" Total optimization steps = {args.max_train_steps}")
696
- global_step = 0
697
- first_epoch = 0
698
-
699
- # Potentially load in the weights and states from a previous save
700
- if args.resume_from_checkpoint:
701
- if args.resume_from_checkpoint != "latest":
702
- path = os.path.basename(args.resume_from_checkpoint)
703
- else:
704
- # Get the most recent checkpoint
705
- dirs = os.listdir(args.output_dir)
706
- dirs = [d for d in dirs if d.startswith("checkpoint")]
707
- dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
708
- path = dirs[-1] if len(dirs) > 0 else None
709
-
710
- if path is None:
711
- accelerator.print(
712
- f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run."
713
- )
714
- args.resume_from_checkpoint = None
715
- else:
716
- accelerator.print(f"Resuming from checkpoint {path}")
717
- accelerator.load_state(os.path.join(args.output_dir, path))
718
- global_step = int(path.split("-")[1])
719
-
720
- resume_global_step = global_step * args.gradient_accumulation_steps
721
- first_epoch = global_step // num_update_steps_per_epoch
722
- resume_step = resume_global_step % (num_update_steps_per_epoch * args.gradient_accumulation_steps)
723
-
724
- # Only show the progress bar once on each machine.
725
- progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process)
726
- progress_bar.set_description("Steps")
727
-
728
- for epoch in range(first_epoch, args.num_train_epochs):
729
- unet.train()
730
- train_loss = 0.0
731
- for step, batch in enumerate(train_dataloader):
732
- # Skip steps until we reach the resumed step
733
- if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step:
734
- if step % args.gradient_accumulation_steps == 0:
735
- progress_bar.update(1)
736
- continue
737
-
738
- with accelerator.accumulate(unet):
739
- # Convert images to latent space
740
- latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample()
741
- latents = latents * vae.config.scaling_factor
742
-
743
- # Sample noise that we'll add to the latents
744
- noise = torch.randn_like(latents)
745
- if args.noise_offset:
746
- # https://www.crosslabs.org//blog/diffusion-with-offset-noise
747
- noise += args.noise_offset * torch.randn(
748
- (latents.shape[0], latents.shape[1], 1, 1), device=latents.device
749
- )
750
-
751
- bsz = latents.shape[0]
752
- # Sample a random timestep for each image
753
- timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device)
754
- timesteps = timesteps.long()
755
-
756
- # Add noise to the latents according to the noise magnitude at each timestep
757
- # (this is the forward diffusion process)
758
- noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
759
-
760
- # Get the text embedding for conditioning
761
- encoder_hidden_states = text_encoder(batch["input_ids"])[0]
762
-
763
- # Get the target for loss depending on the prediction type
764
- if args.prediction_type is not None:
765
- # set prediction_type of scheduler if defined
766
- noise_scheduler.register_to_config(prediction_type=args.prediction_type)
767
-
768
- if noise_scheduler.config.prediction_type == "epsilon":
769
- target = noise
770
- elif noise_scheduler.config.prediction_type == "v_prediction":
771
- target = noise_scheduler.get_velocity(latents, noise, timesteps)
772
- else:
773
- raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
774
-
775
- # Predict the noise residual and compute loss
776
- model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample
777
-
778
- if args.snr_gamma is None:
779
- loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
780
- else:
781
- # Compute loss-weights as per Section 3.4 of https://arxiv.org/abs/2303.09556.
782
- # Since we predict the noise instead of x_0, the original formulation is slightly changed.
783
- # This is discussed in Section 4.2 of the same paper.
784
- snr = compute_snr(timesteps)
785
- mse_loss_weights = (
786
- torch.stack([snr, args.snr_gamma * torch.ones_like(timesteps)], dim=1).min(dim=1)[0] / snr
787
- )
788
- # We first calculate the original loss. Then we mean over the non-batch dimensions and
789
- # rebalance the sample-wise losses with their respective loss weights.
790
- # Finally, we take the mean of the rebalanced loss.
791
- loss = F.mse_loss(model_pred.float(), target.float(), reduction="none")
792
- loss = loss.mean(dim=list(range(1, len(loss.shape)))) * mse_loss_weights
793
- loss = loss.mean()
794
-
795
- # Gather the losses across all processes for logging (if we use distributed training).
796
- avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean()
797
- train_loss += avg_loss.item() / args.gradient_accumulation_steps
798
-
799
- # Backpropagate
800
- accelerator.backward(loss)
801
- if accelerator.sync_gradients:
802
- params_to_clip = lora_layers.parameters()
803
- accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
804
- optimizer.step()
805
- lr_scheduler.step()
806
- optimizer.zero_grad()
807
-
808
- # Checks if the accelerator has performed an optimization step behind the scenes
809
- if accelerator.sync_gradients:
810
- progress_bar.update(1)
811
- global_step += 1
812
- accelerator.log({"train_loss": train_loss}, step=global_step)
813
- train_loss = 0.0
814
-
815
- if global_step % args.checkpointing_steps == 0:
816
- if accelerator.is_main_process:
817
- # _before_ saving state, check if this save would set us over the `checkpoints_total_limit`
818
- if args.checkpoints_total_limit is not None:
819
- checkpoints = os.listdir(args.output_dir)
820
- checkpoints = [d for d in checkpoints if d.startswith("checkpoint")]
821
- checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1]))
822
-
823
- # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints
824
- if len(checkpoints) >= args.checkpoints_total_limit:
825
- num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1
826
- removing_checkpoints = checkpoints[0:num_to_remove]
827
-
828
- logger.info(
829
- f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints"
830
- )
831
- logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}")
832
-
833
- for removing_checkpoint in removing_checkpoints:
834
- removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint)
835
- shutil.rmtree(removing_checkpoint)
836
-
837
- save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}")
838
- accelerator.save_state(save_path)
839
- logger.info(f"Saved state to {save_path}")
840
-
841
- logs = {"step_loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
842
- progress_bar.set_postfix(**logs)
843
-
844
- if global_step >= args.max_train_steps:
845
- break
846
-
847
- if accelerator.is_main_process:
848
- if args.validation_prompt is not None and epoch % args.validation_epochs == 0:
849
- logger.info(
850
- f"Running validation... \n Generating {args.num_validation_images} images with prompt:"
851
- f" {args.validation_prompt}."
852
- )
853
- # create pipeline
854
- pipeline = DiffusionPipeline.from_pretrained(
855
- args.pretrained_model_name_or_path,
856
- unet=accelerator.unwrap_model(unet),
857
- revision=args.revision,
858
- torch_dtype=weight_dtype,
859
- )
860
- pipeline = pipeline.to(accelerator.device)
861
- pipeline.set_progress_bar_config(disable=True)
862
-
863
- # run inference
864
- generator = torch.Generator(device=accelerator.device)
865
- if args.seed is not None:
866
- generator = generator.manual_seed(args.seed)
867
- images = []
868
- for _ in range(args.num_validation_images):
869
- images.append(
870
- pipeline(args.validation_prompt, num_inference_steps=30, generator=generator).images[0]
871
- )
872
-
873
- for tracker in accelerator.trackers:
874
- if tracker.name == "tensorboard":
875
- np_images = np.stack([np.asarray(img) for img in images])
876
- tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC")
877
- if tracker.name == "wandb":
878
- tracker.log(
879
- {
880
- "validation": [
881
- wandb.Image(image, caption=f"{i}: {args.validation_prompt}")
882
- for i, image in enumerate(images)
883
- ]
884
- }
885
- )
886
-
887
- del pipeline
888
- torch.cuda.empty_cache()
889
-
890
- # Save the lora layers
891
- accelerator.wait_for_everyone()
892
- if accelerator.is_main_process:
893
- unet = unet.to(torch.float32)
894
- unet.save_attn_procs(args.output_dir)
895
-
896
- if args.push_to_hub:
897
- save_model_card(
898
- repo_id,
899
- images=images,
900
- base_model=args.pretrained_model_name_or_path,
901
- dataset_name=args.dataset_name,
902
- repo_folder=args.output_dir,
903
- )
904
- upload_folder(
905
- repo_id=repo_id,
906
- folder_path=args.output_dir,
907
- commit_message="End of training",
908
- ignore_patterns=["step_*", "epoch_*"],
909
- )
910
-
911
- # Final inference
912
- # Load previous pipeline
913
- pipeline = DiffusionPipeline.from_pretrained(
914
- args.pretrained_model_name_or_path, revision=args.revision, torch_dtype=weight_dtype
915
- )
916
- pipeline = pipeline.to(accelerator.device)
917
-
918
- # load attention processors
919
- pipeline.unet.load_attn_procs(args.output_dir)
920
-
921
- # run inference
922
- generator = torch.Generator(device=accelerator.device)
923
- if args.seed is not None:
924
- generator = generator.manual_seed(args.seed)
925
- images = []
926
- for _ in range(args.num_validation_images):
927
- images.append(pipeline(args.validation_prompt, num_inference_steps=30, generator=generator).images[0])
928
-
929
- if accelerator.is_main_process:
930
- for tracker in accelerator.trackers:
931
- if len(images) != 0:
932
- if tracker.name == "tensorboard":
933
- np_images = np.stack([np.asarray(img) for img in images])
934
- tracker.writer.add_images("test", np_images, epoch, dataformats="NHWC")
935
- if tracker.name == "wandb":
936
- tracker.log(
937
- {
938
- "test": [
939
- wandb.Image(image, caption=f"{i}: {args.validation_prompt}")
940
- for i, image in enumerate(images)
941
- ]
942
- }
943
- )
944
-
945
- accelerator.end_training()
946
-
947
-
948
- if __name__ == "__main__":
949
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/fixtures/custom_pipeline/what_ever.py DELETED
@@ -1,101 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
-
14
- # limitations under the License.
15
-
16
-
17
- from typing import Optional, Tuple, Union
18
-
19
- import torch
20
-
21
- from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
22
-
23
-
24
- class CustomLocalPipeline(DiffusionPipeline):
25
- r"""
26
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
27
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
28
-
29
- Parameters:
30
- unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image.
31
- scheduler ([`SchedulerMixin`]):
32
- A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of
33
- [`DDPMScheduler`], or [`DDIMScheduler`].
34
- """
35
-
36
- def __init__(self, unet, scheduler):
37
- super().__init__()
38
- self.register_modules(unet=unet, scheduler=scheduler)
39
-
40
- @torch.no_grad()
41
- def __call__(
42
- self,
43
- batch_size: int = 1,
44
- generator: Optional[torch.Generator] = None,
45
- num_inference_steps: int = 50,
46
- output_type: Optional[str] = "pil",
47
- return_dict: bool = True,
48
- **kwargs,
49
- ) -> Union[ImagePipelineOutput, Tuple]:
50
- r"""
51
- Args:
52
- batch_size (`int`, *optional*, defaults to 1):
53
- The number of images to generate.
54
- generator (`torch.Generator`, *optional*):
55
- A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
56
- deterministic.
57
- eta (`float`, *optional*, defaults to 0.0):
58
- The eta parameter which controls the scale of the variance (0 is DDIM and 1 is one type of DDPM).
59
- num_inference_steps (`int`, *optional*, defaults to 50):
60
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
61
- expense of slower inference.
62
- output_type (`str`, *optional*, defaults to `"pil"`):
63
- The output format of the generate image. Choose between
64
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
65
- return_dict (`bool`, *optional*, defaults to `True`):
66
- Whether or not to return a [`~pipeline_utils.ImagePipelineOutput`] instead of a plain tuple.
67
-
68
- Returns:
69
- [`~pipeline_utils.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if
70
- `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the
71
- generated images.
72
- """
73
-
74
- # Sample gaussian noise to begin loop
75
- image = torch.randn(
76
- (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size),
77
- generator=generator,
78
- )
79
- image = image.to(self.device)
80
-
81
- # set step values
82
- self.scheduler.set_timesteps(num_inference_steps)
83
-
84
- for t in self.progress_bar(self.scheduler.timesteps):
85
- # 1. predict noise model_output
86
- model_output = self.unet(image, t).sample
87
-
88
- # 2. predict previous mean of image x_t-1 and add variance depending on eta
89
- # eta corresponds to η in paper and should be between [0, 1]
90
- # do x_t -> x_t-1
91
- image = self.scheduler.step(model_output, t, image).prev_sample
92
-
93
- image = (image / 2 + 0.5).clamp(0, 1)
94
- image = image.cpu().permute(0, 2, 3, 1).numpy()
95
- if output_type == "pil":
96
- image = self.numpy_to_pil(image)
97
-
98
- if not return_dict:
99
- return (image,), "This is a local test"
100
-
101
- return ImagePipelineOutput(images=image), "This is a local test"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/_base_/models/cascade_rcnn_r50_fpn.py DELETED
@@ -1,179 +0,0 @@
1
- # model settings
2
- model = dict(
3
- type='CascadeRCNN',
4
- pretrained='torchvision://resnet50',
5
- backbone=dict(
6
- type='ResNet',
7
- depth=50,
8
- num_stages=4,
9
- out_indices=(0, 1, 2, 3),
10
- frozen_stages=1,
11
- norm_cfg=dict(type='BN', requires_grad=True),
12
- norm_eval=True,
13
- style='pytorch'),
14
- neck=dict(
15
- type='FPN',
16
- in_channels=[256, 512, 1024, 2048],
17
- out_channels=256,
18
- num_outs=5),
19
- rpn_head=dict(
20
- type='RPNHead',
21
- in_channels=256,
22
- feat_channels=256,
23
- anchor_generator=dict(
24
- type='AnchorGenerator',
25
- scales=[8],
26
- ratios=[0.5, 1.0, 2.0],
27
- strides=[4, 8, 16, 32, 64]),
28
- bbox_coder=dict(
29
- type='DeltaXYWHBBoxCoder',
30
- target_means=[.0, .0, .0, .0],
31
- target_stds=[1.0, 1.0, 1.0, 1.0]),
32
- loss_cls=dict(
33
- type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
34
- loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
35
- roi_head=dict(
36
- type='CascadeRoIHead',
37
- num_stages=3,
38
- stage_loss_weights=[1, 0.5, 0.25],
39
- bbox_roi_extractor=dict(
40
- type='SingleRoIExtractor',
41
- roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
42
- out_channels=256,
43
- featmap_strides=[4, 8, 16, 32]),
44
- bbox_head=[
45
- dict(
46
- type='Shared2FCBBoxHead',
47
- in_channels=256,
48
- fc_out_channels=1024,
49
- roi_feat_size=7,
50
- num_classes=80,
51
- bbox_coder=dict(
52
- type='DeltaXYWHBBoxCoder',
53
- target_means=[0., 0., 0., 0.],
54
- target_stds=[0.1, 0.1, 0.2, 0.2]),
55
- reg_class_agnostic=True,
56
- loss_cls=dict(
57
- type='CrossEntropyLoss',
58
- use_sigmoid=False,
59
- loss_weight=1.0),
60
- loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
61
- loss_weight=1.0)),
62
- dict(
63
- type='Shared2FCBBoxHead',
64
- in_channels=256,
65
- fc_out_channels=1024,
66
- roi_feat_size=7,
67
- num_classes=80,
68
- bbox_coder=dict(
69
- type='DeltaXYWHBBoxCoder',
70
- target_means=[0., 0., 0., 0.],
71
- target_stds=[0.05, 0.05, 0.1, 0.1]),
72
- reg_class_agnostic=True,
73
- loss_cls=dict(
74
- type='CrossEntropyLoss',
75
- use_sigmoid=False,
76
- loss_weight=1.0),
77
- loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
78
- loss_weight=1.0)),
79
- dict(
80
- type='Shared2FCBBoxHead',
81
- in_channels=256,
82
- fc_out_channels=1024,
83
- roi_feat_size=7,
84
- num_classes=80,
85
- bbox_coder=dict(
86
- type='DeltaXYWHBBoxCoder',
87
- target_means=[0., 0., 0., 0.],
88
- target_stds=[0.033, 0.033, 0.067, 0.067]),
89
- reg_class_agnostic=True,
90
- loss_cls=dict(
91
- type='CrossEntropyLoss',
92
- use_sigmoid=False,
93
- loss_weight=1.0),
94
- loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
95
- ]),
96
- # model training and testing settings
97
- train_cfg=dict(
98
- rpn=dict(
99
- assigner=dict(
100
- type='MaxIoUAssigner',
101
- pos_iou_thr=0.7,
102
- neg_iou_thr=0.3,
103
- min_pos_iou=0.3,
104
- match_low_quality=True,
105
- ignore_iof_thr=-1),
106
- sampler=dict(
107
- type='RandomSampler',
108
- num=256,
109
- pos_fraction=0.5,
110
- neg_pos_ub=-1,
111
- add_gt_as_proposals=False),
112
- allowed_border=0,
113
- pos_weight=-1,
114
- debug=False),
115
- rpn_proposal=dict(
116
- nms_pre=2000,
117
- max_per_img=2000,
118
- nms=dict(type='nms', iou_threshold=0.7),
119
- min_bbox_size=0),
120
- rcnn=[
121
- dict(
122
- assigner=dict(
123
- type='MaxIoUAssigner',
124
- pos_iou_thr=0.5,
125
- neg_iou_thr=0.5,
126
- min_pos_iou=0.5,
127
- match_low_quality=False,
128
- ignore_iof_thr=-1),
129
- sampler=dict(
130
- type='RandomSampler',
131
- num=512,
132
- pos_fraction=0.25,
133
- neg_pos_ub=-1,
134
- add_gt_as_proposals=True),
135
- pos_weight=-1,
136
- debug=False),
137
- dict(
138
- assigner=dict(
139
- type='MaxIoUAssigner',
140
- pos_iou_thr=0.6,
141
- neg_iou_thr=0.6,
142
- min_pos_iou=0.6,
143
- match_low_quality=False,
144
- ignore_iof_thr=-1),
145
- sampler=dict(
146
- type='RandomSampler',
147
- num=512,
148
- pos_fraction=0.25,
149
- neg_pos_ub=-1,
150
- add_gt_as_proposals=True),
151
- pos_weight=-1,
152
- debug=False),
153
- dict(
154
- assigner=dict(
155
- type='MaxIoUAssigner',
156
- pos_iou_thr=0.7,
157
- neg_iou_thr=0.7,
158
- min_pos_iou=0.7,
159
- match_low_quality=False,
160
- ignore_iof_thr=-1),
161
- sampler=dict(
162
- type='RandomSampler',
163
- num=512,
164
- pos_fraction=0.25,
165
- neg_pos_ub=-1,
166
- add_gt_as_proposals=True),
167
- pos_weight=-1,
168
- debug=False)
169
- ]),
170
- test_cfg=dict(
171
- rpn=dict(
172
- nms_pre=1000,
173
- max_per_img=1000,
174
- nms=dict(type='nms', iou_threshold=0.7),
175
- min_bbox_size=0),
176
- rcnn=dict(
177
- score_thr=0.05,
178
- nms=dict(type='nms', iou_threshold=0.5),
179
- max_per_img=100)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/fpg/faster_rcnn_r50_fpg-chn128_crop640_50e_coco.py DELETED
@@ -1,9 +0,0 @@
1
- _base_ = 'faster_rcnn_r50_fpg_crop640_50e_coco.py'
2
-
3
- norm_cfg = dict(type='BN', requires_grad=True)
4
- model = dict(
5
- neck=dict(out_channels=128, inter_channels=128),
6
- rpn_head=dict(in_channels=128),
7
- roi_head=dict(
8
- bbox_roi_extractor=dict(out_channels=128),
9
- bbox_head=dict(in_channels=128)))
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/coder/base_bbox_coder.py DELETED
@@ -1,17 +0,0 @@
1
- from abc import ABCMeta, abstractmethod
2
-
3
-
4
- class BaseBBoxCoder(metaclass=ABCMeta):
5
- """Base bounding box coder."""
6
-
7
- def __init__(self, **kwargs):
8
- pass
9
-
10
- @abstractmethod
11
- def encode(self, bboxes, gt_bboxes):
12
- """Encode deltas between bboxes and ground truth boxes."""
13
-
14
- @abstractmethod
15
- def decode(self, bboxes, bboxes_pred):
16
- """Decode the predicted bboxes according to prediction and base
17
- boxes."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/datasets/pipelines/__init__.py DELETED
@@ -1,25 +0,0 @@
1
- from .auto_augment import (AutoAugment, BrightnessTransform, ColorTransform,
2
- ContrastTransform, EqualizeTransform, Rotate, Shear,
3
- Translate)
4
- from .compose import Compose
5
- from .formating import (Collect, DefaultFormatBundle, ImageToTensor,
6
- ToDataContainer, ToTensor, Transpose, to_tensor)
7
- from .instaboost import InstaBoost
8
- from .loading import (LoadAnnotations, LoadImageFromFile, LoadImageFromWebcam,
9
- LoadMultiChannelImageFromFiles, LoadProposals)
10
- from .test_time_aug import MultiScaleFlipAug
11
- from .transforms import (Albu, CutOut, Expand, MinIoURandomCrop, Normalize,
12
- Pad, PhotoMetricDistortion, RandomCenterCropPad,
13
- RandomCrop, RandomFlip, Resize, SegRescale)
14
-
15
- __all__ = [
16
- 'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
17
- 'Transpose', 'Collect', 'DefaultFormatBundle', 'LoadAnnotations',
18
- 'LoadImageFromFile', 'LoadImageFromWebcam',
19
- 'LoadMultiChannelImageFromFiles', 'LoadProposals', 'MultiScaleFlipAug',
20
- 'Resize', 'RandomFlip', 'Pad', 'RandomCrop', 'Normalize', 'SegRescale',
21
- 'MinIoURandomCrop', 'Expand', 'PhotoMetricDistortion', 'Albu',
22
- 'InstaBoost', 'RandomCenterCropPad', 'AutoAugment', 'CutOut', 'Shear',
23
- 'Rotate', 'ColorTransform', 'EqualizeTransform', 'BrightnessTransform',
24
- 'ContrastTransform', 'Translate'
25
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/datasets/pipelines/transforms.py DELETED
@@ -1,1811 +0,0 @@
1
- import copy
2
- import inspect
3
-
4
- import mmcv
5
- import numpy as np
6
- from numpy import random
7
-
8
- from mmdet.core import PolygonMasks
9
- from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps
10
- from ..builder import PIPELINES
11
-
12
- try:
13
- from imagecorruptions import corrupt
14
- except ImportError:
15
- corrupt = None
16
-
17
- try:
18
- import albumentations
19
- from albumentations import Compose
20
- except ImportError:
21
- albumentations = None
22
- Compose = None
23
-
24
-
25
- @PIPELINES.register_module()
26
- class Resize(object):
27
- """Resize images & bbox & mask.
28
-
29
- This transform resizes the input image to some scale. Bboxes and masks are
30
- then resized with the same scale factor. If the input dict contains the key
31
- "scale", then the scale in the input dict is used, otherwise the specified
32
- scale in the init method is used. If the input dict contains the key
33
- "scale_factor" (if MultiScaleFlipAug does not give img_scale but
34
- scale_factor), the actual scale will be computed by image shape and
35
- scale_factor.
36
-
37
- `img_scale` can either be a tuple (single-scale) or a list of tuple
38
- (multi-scale). There are 3 multiscale modes:
39
-
40
- - ``ratio_range is not None``: randomly sample a ratio from the ratio \
41
- range and multiply it with the image scale.
42
- - ``ratio_range is None`` and ``multiscale_mode == "range"``: randomly \
43
- sample a scale from the multiscale range.
44
- - ``ratio_range is None`` and ``multiscale_mode == "value"``: randomly \
45
- sample a scale from multiple scales.
46
-
47
- Args:
48
- img_scale (tuple or list[tuple]): Images scales for resizing.
49
- multiscale_mode (str): Either "range" or "value".
50
- ratio_range (tuple[float]): (min_ratio, max_ratio)
51
- keep_ratio (bool): Whether to keep the aspect ratio when resizing the
52
- image.
53
- bbox_clip_border (bool, optional): Whether clip the objects outside
54
- the border of the image. Defaults to True.
55
- backend (str): Image resize backend, choices are 'cv2' and 'pillow'.
56
- These two backends generates slightly different results. Defaults
57
- to 'cv2'.
58
- override (bool, optional): Whether to override `scale` and
59
- `scale_factor` so as to call resize twice. Default False. If True,
60
- after the first resizing, the existed `scale` and `scale_factor`
61
- will be ignored so the second resizing can be allowed.
62
- This option is a work-around for multiple times of resize in DETR.
63
- Defaults to False.
64
- """
65
-
66
- def __init__(self,
67
- img_scale=None,
68
- multiscale_mode='range',
69
- ratio_range=None,
70
- keep_ratio=True,
71
- bbox_clip_border=True,
72
- backend='cv2',
73
- override=False):
74
- if img_scale is None:
75
- self.img_scale = None
76
- else:
77
- if isinstance(img_scale, list):
78
- self.img_scale = img_scale
79
- else:
80
- self.img_scale = [img_scale]
81
- assert mmcv.is_list_of(self.img_scale, tuple)
82
-
83
- if ratio_range is not None:
84
- # mode 1: given a scale and a range of image ratio
85
- assert len(self.img_scale) == 1
86
- else:
87
- # mode 2: given multiple scales or a range of scales
88
- assert multiscale_mode in ['value', 'range']
89
-
90
- self.backend = backend
91
- self.multiscale_mode = multiscale_mode
92
- self.ratio_range = ratio_range
93
- self.keep_ratio = keep_ratio
94
- # TODO: refactor the override option in Resize
95
- self.override = override
96
- self.bbox_clip_border = bbox_clip_border
97
-
98
- @staticmethod
99
- def random_select(img_scales):
100
- """Randomly select an img_scale from given candidates.
101
-
102
- Args:
103
- img_scales (list[tuple]): Images scales for selection.
104
-
105
- Returns:
106
- (tuple, int): Returns a tuple ``(img_scale, scale_dix)``, \
107
- where ``img_scale`` is the selected image scale and \
108
- ``scale_idx`` is the selected index in the given candidates.
109
- """
110
-
111
- assert mmcv.is_list_of(img_scales, tuple)
112
- scale_idx = np.random.randint(len(img_scales))
113
- img_scale = img_scales[scale_idx]
114
- return img_scale, scale_idx
115
-
116
- @staticmethod
117
- def random_sample(img_scales):
118
- """Randomly sample an img_scale when ``multiscale_mode=='range'``.
119
-
120
- Args:
121
- img_scales (list[tuple]): Images scale range for sampling.
122
- There must be two tuples in img_scales, which specify the lower
123
- and upper bound of image scales.
124
-
125
- Returns:
126
- (tuple, None): Returns a tuple ``(img_scale, None)``, where \
127
- ``img_scale`` is sampled scale and None is just a placeholder \
128
- to be consistent with :func:`random_select`.
129
- """
130
-
131
- assert mmcv.is_list_of(img_scales, tuple) and len(img_scales) == 2
132
- img_scale_long = [max(s) for s in img_scales]
133
- img_scale_short = [min(s) for s in img_scales]
134
- long_edge = np.random.randint(
135
- min(img_scale_long),
136
- max(img_scale_long) + 1)
137
- short_edge = np.random.randint(
138
- min(img_scale_short),
139
- max(img_scale_short) + 1)
140
- img_scale = (long_edge, short_edge)
141
- return img_scale, None
142
-
143
- @staticmethod
144
- def random_sample_ratio(img_scale, ratio_range):
145
- """Randomly sample an img_scale when ``ratio_range`` is specified.
146
-
147
- A ratio will be randomly sampled from the range specified by
148
- ``ratio_range``. Then it would be multiplied with ``img_scale`` to
149
- generate sampled scale.
150
-
151
- Args:
152
- img_scale (tuple): Images scale base to multiply with ratio.
153
- ratio_range (tuple[float]): The minimum and maximum ratio to scale
154
- the ``img_scale``.
155
-
156
- Returns:
157
- (tuple, None): Returns a tuple ``(scale, None)``, where \
158
- ``scale`` is sampled ratio multiplied with ``img_scale`` and \
159
- None is just a placeholder to be consistent with \
160
- :func:`random_select`.
161
- """
162
-
163
- assert isinstance(img_scale, tuple) and len(img_scale) == 2
164
- min_ratio, max_ratio = ratio_range
165
- assert min_ratio <= max_ratio
166
- ratio = np.random.random_sample() * (max_ratio - min_ratio) + min_ratio
167
- scale = int(img_scale[0] * ratio), int(img_scale[1] * ratio)
168
- return scale, None
169
-
170
- def _random_scale(self, results):
171
- """Randomly sample an img_scale according to ``ratio_range`` and
172
- ``multiscale_mode``.
173
-
174
- If ``ratio_range`` is specified, a ratio will be sampled and be
175
- multiplied with ``img_scale``.
176
- If multiple scales are specified by ``img_scale``, a scale will be
177
- sampled according to ``multiscale_mode``.
178
- Otherwise, single scale will be used.
179
-
180
- Args:
181
- results (dict): Result dict from :obj:`dataset`.
182
-
183
- Returns:
184
- dict: Two new keys 'scale` and 'scale_idx` are added into \
185
- ``results``, which would be used by subsequent pipelines.
186
- """
187
-
188
- if self.ratio_range is not None:
189
- scale, scale_idx = self.random_sample_ratio(
190
- self.img_scale[0], self.ratio_range)
191
- elif len(self.img_scale) == 1:
192
- scale, scale_idx = self.img_scale[0], 0
193
- elif self.multiscale_mode == 'range':
194
- scale, scale_idx = self.random_sample(self.img_scale)
195
- elif self.multiscale_mode == 'value':
196
- scale, scale_idx = self.random_select(self.img_scale)
197
- else:
198
- raise NotImplementedError
199
-
200
- results['scale'] = scale
201
- results['scale_idx'] = scale_idx
202
-
203
- def _resize_img(self, results):
204
- """Resize images with ``results['scale']``."""
205
- for key in results.get('img_fields', ['img']):
206
- if self.keep_ratio:
207
- img, scale_factor = mmcv.imrescale(
208
- results[key],
209
- results['scale'],
210
- return_scale=True,
211
- backend=self.backend)
212
- # the w_scale and h_scale has minor difference
213
- # a real fix should be done in the mmcv.imrescale in the future
214
- new_h, new_w = img.shape[:2]
215
- h, w = results[key].shape[:2]
216
- w_scale = new_w / w
217
- h_scale = new_h / h
218
- else:
219
- img, w_scale, h_scale = mmcv.imresize(
220
- results[key],
221
- results['scale'],
222
- return_scale=True,
223
- backend=self.backend)
224
- results[key] = img
225
-
226
- scale_factor = np.array([w_scale, h_scale, w_scale, h_scale],
227
- dtype=np.float32)
228
- results['img_shape'] = img.shape
229
- # in case that there is no padding
230
- results['pad_shape'] = img.shape
231
- results['scale_factor'] = scale_factor
232
- results['keep_ratio'] = self.keep_ratio
233
-
234
- def _resize_bboxes(self, results):
235
- """Resize bounding boxes with ``results['scale_factor']``."""
236
- for key in results.get('bbox_fields', []):
237
- bboxes = results[key] * results['scale_factor']
238
- if self.bbox_clip_border:
239
- img_shape = results['img_shape']
240
- bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])
241
- bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])
242
- results[key] = bboxes
243
-
244
- def _resize_masks(self, results):
245
- """Resize masks with ``results['scale']``"""
246
- for key in results.get('mask_fields', []):
247
- if results[key] is None:
248
- continue
249
- if self.keep_ratio:
250
- results[key] = results[key].rescale(results['scale'])
251
- else:
252
- results[key] = results[key].resize(results['img_shape'][:2])
253
-
254
- def _resize_seg(self, results):
255
- """Resize semantic segmentation map with ``results['scale']``."""
256
- for key in results.get('seg_fields', []):
257
- if self.keep_ratio:
258
- gt_seg = mmcv.imrescale(
259
- results[key],
260
- results['scale'],
261
- interpolation='nearest',
262
- backend=self.backend)
263
- else:
264
- gt_seg = mmcv.imresize(
265
- results[key],
266
- results['scale'],
267
- interpolation='nearest',
268
- backend=self.backend)
269
- results['gt_semantic_seg'] = gt_seg
270
-
271
- def __call__(self, results):
272
- """Call function to resize images, bounding boxes, masks, semantic
273
- segmentation map.
274
-
275
- Args:
276
- results (dict): Result dict from loading pipeline.
277
-
278
- Returns:
279
- dict: Resized results, 'img_shape', 'pad_shape', 'scale_factor', \
280
- 'keep_ratio' keys are added into result dict.
281
- """
282
-
283
- if 'scale' not in results:
284
- if 'scale_factor' in results:
285
- img_shape = results['img'].shape[:2]
286
- scale_factor = results['scale_factor']
287
- assert isinstance(scale_factor, float)
288
- results['scale'] = tuple(
289
- [int(x * scale_factor) for x in img_shape][::-1])
290
- else:
291
- self._random_scale(results)
292
- else:
293
- if not self.override:
294
- assert 'scale_factor' not in results, (
295
- 'scale and scale_factor cannot be both set.')
296
- else:
297
- results.pop('scale')
298
- if 'scale_factor' in results:
299
- results.pop('scale_factor')
300
- self._random_scale(results)
301
-
302
- self._resize_img(results)
303
- self._resize_bboxes(results)
304
- self._resize_masks(results)
305
- self._resize_seg(results)
306
- return results
307
-
308
- def __repr__(self):
309
- repr_str = self.__class__.__name__
310
- repr_str += f'(img_scale={self.img_scale}, '
311
- repr_str += f'multiscale_mode={self.multiscale_mode}, '
312
- repr_str += f'ratio_range={self.ratio_range}, '
313
- repr_str += f'keep_ratio={self.keep_ratio}, '
314
- repr_str += f'bbox_clip_border={self.bbox_clip_border})'
315
- return repr_str
316
-
317
-
318
- @PIPELINES.register_module()
319
- class RandomFlip(object):
320
- """Flip the image & bbox & mask.
321
-
322
- If the input dict contains the key "flip", then the flag will be used,
323
- otherwise it will be randomly decided by a ratio specified in the init
324
- method.
325
-
326
- When random flip is enabled, ``flip_ratio``/``direction`` can either be a
327
- float/string or tuple of float/string. There are 3 flip modes:
328
-
329
- - ``flip_ratio`` is float, ``direction`` is string: the image will be
330
- ``direction``ly flipped with probability of ``flip_ratio`` .
331
- E.g., ``flip_ratio=0.5``, ``direction='horizontal'``,
332
- then image will be horizontally flipped with probability of 0.5.
333
- - ``flip_ratio`` is float, ``direction`` is list of string: the image wil
334
- be ``direction[i]``ly flipped with probability of
335
- ``flip_ratio/len(direction)``.
336
- E.g., ``flip_ratio=0.5``, ``direction=['horizontal', 'vertical']``,
337
- then image will be horizontally flipped with probability of 0.25,
338
- vertically with probability of 0.25.
339
- - ``flip_ratio`` is list of float, ``direction`` is list of string:
340
- given ``len(flip_ratio) == len(direction)``, the image wil
341
- be ``direction[i]``ly flipped with probability of ``flip_ratio[i]``.
342
- E.g., ``flip_ratio=[0.3, 0.5]``, ``direction=['horizontal',
343
- 'vertical']``, then image will be horizontally flipped with probability
344
- of 0.3, vertically with probability of 0.5
345
-
346
- Args:
347
- flip_ratio (float | list[float], optional): The flipping probability.
348
- Default: None.
349
- direction(str | list[str], optional): The flipping direction. Options
350
- are 'horizontal', 'vertical', 'diagonal'. Default: 'horizontal'.
351
- If input is a list, the length must equal ``flip_ratio``. Each
352
- element in ``flip_ratio`` indicates the flip probability of
353
- corresponding direction.
354
- """
355
-
356
- def __init__(self, flip_ratio=None, direction='horizontal'):
357
- if isinstance(flip_ratio, list):
358
- assert mmcv.is_list_of(flip_ratio, float)
359
- assert 0 <= sum(flip_ratio) <= 1
360
- elif isinstance(flip_ratio, float):
361
- assert 0 <= flip_ratio <= 1
362
- elif flip_ratio is None:
363
- pass
364
- else:
365
- raise ValueError('flip_ratios must be None, float, '
366
- 'or list of float')
367
- self.flip_ratio = flip_ratio
368
-
369
- valid_directions = ['horizontal', 'vertical', 'diagonal']
370
- if isinstance(direction, str):
371
- assert direction in valid_directions
372
- elif isinstance(direction, list):
373
- assert mmcv.is_list_of(direction, str)
374
- assert set(direction).issubset(set(valid_directions))
375
- else:
376
- raise ValueError('direction must be either str or list of str')
377
- self.direction = direction
378
-
379
- if isinstance(flip_ratio, list):
380
- assert len(self.flip_ratio) == len(self.direction)
381
-
382
- def bbox_flip(self, bboxes, img_shape, direction):
383
- """Flip bboxes horizontally.
384
-
385
- Args:
386
- bboxes (numpy.ndarray): Bounding boxes, shape (..., 4*k)
387
- img_shape (tuple[int]): Image shape (height, width)
388
- direction (str): Flip direction. Options are 'horizontal',
389
- 'vertical'.
390
-
391
- Returns:
392
- numpy.ndarray: Flipped bounding boxes.
393
- """
394
-
395
- assert bboxes.shape[-1] % 4 == 0
396
- flipped = bboxes.copy()
397
- if direction == 'horizontal':
398
- w = img_shape[1]
399
- flipped[..., 0::4] = w - bboxes[..., 2::4]
400
- flipped[..., 2::4] = w - bboxes[..., 0::4]
401
- elif direction == 'vertical':
402
- h = img_shape[0]
403
- flipped[..., 1::4] = h - bboxes[..., 3::4]
404
- flipped[..., 3::4] = h - bboxes[..., 1::4]
405
- elif direction == 'diagonal':
406
- w = img_shape[1]
407
- h = img_shape[0]
408
- flipped[..., 0::4] = w - bboxes[..., 2::4]
409
- flipped[..., 1::4] = h - bboxes[..., 3::4]
410
- flipped[..., 2::4] = w - bboxes[..., 0::4]
411
- flipped[..., 3::4] = h - bboxes[..., 1::4]
412
- else:
413
- raise ValueError(f"Invalid flipping direction '{direction}'")
414
- return flipped
415
-
416
- def __call__(self, results):
417
- """Call function to flip bounding boxes, masks, semantic segmentation
418
- maps.
419
-
420
- Args:
421
- results (dict): Result dict from loading pipeline.
422
-
423
- Returns:
424
- dict: Flipped results, 'flip', 'flip_direction' keys are added \
425
- into result dict.
426
- """
427
-
428
- if 'flip' not in results:
429
- if isinstance(self.direction, list):
430
- # None means non-flip
431
- direction_list = self.direction + [None]
432
- else:
433
- # None means non-flip
434
- direction_list = [self.direction, None]
435
-
436
- if isinstance(self.flip_ratio, list):
437
- non_flip_ratio = 1 - sum(self.flip_ratio)
438
- flip_ratio_list = self.flip_ratio + [non_flip_ratio]
439
- else:
440
- non_flip_ratio = 1 - self.flip_ratio
441
- # exclude non-flip
442
- single_ratio = self.flip_ratio / (len(direction_list) - 1)
443
- flip_ratio_list = [single_ratio] * (len(direction_list) -
444
- 1) + [non_flip_ratio]
445
-
446
- cur_dir = np.random.choice(direction_list, p=flip_ratio_list)
447
-
448
- results['flip'] = cur_dir is not None
449
- if 'flip_direction' not in results:
450
- results['flip_direction'] = cur_dir
451
- if results['flip']:
452
- # flip image
453
- for key in results.get('img_fields', ['img']):
454
- results[key] = mmcv.imflip(
455
- results[key], direction=results['flip_direction'])
456
- # flip bboxes
457
- for key in results.get('bbox_fields', []):
458
- results[key] = self.bbox_flip(results[key],
459
- results['img_shape'],
460
- results['flip_direction'])
461
- # flip masks
462
- for key in results.get('mask_fields', []):
463
- results[key] = results[key].flip(results['flip_direction'])
464
-
465
- # flip segs
466
- for key in results.get('seg_fields', []):
467
- results[key] = mmcv.imflip(
468
- results[key], direction=results['flip_direction'])
469
- return results
470
-
471
- def __repr__(self):
472
- return self.__class__.__name__ + f'(flip_ratio={self.flip_ratio})'
473
-
474
-
475
- @PIPELINES.register_module()
476
- class Pad(object):
477
- """Pad the image & mask.
478
-
479
- There are two padding modes: (1) pad to a fixed size and (2) pad to the
480
- minimum size that is divisible by some number.
481
- Added keys are "pad_shape", "pad_fixed_size", "pad_size_divisor",
482
-
483
- Args:
484
- size (tuple, optional): Fixed padding size.
485
- size_divisor (int, optional): The divisor of padded size.
486
- pad_val (float, optional): Padding value, 0 by default.
487
- """
488
-
489
- def __init__(self, size=None, size_divisor=None, pad_val=0):
490
- self.size = size
491
- self.size_divisor = size_divisor
492
- self.pad_val = pad_val
493
- # only one of size and size_divisor should be valid
494
- assert size is not None or size_divisor is not None
495
- assert size is None or size_divisor is None
496
-
497
- def _pad_img(self, results):
498
- """Pad images according to ``self.size``."""
499
- for key in results.get('img_fields', ['img']):
500
- if self.size is not None:
501
- padded_img = mmcv.impad(
502
- results[key], shape=self.size, pad_val=self.pad_val)
503
- elif self.size_divisor is not None:
504
- padded_img = mmcv.impad_to_multiple(
505
- results[key], self.size_divisor, pad_val=self.pad_val)
506
- results[key] = padded_img
507
- results['pad_shape'] = padded_img.shape
508
- results['pad_fixed_size'] = self.size
509
- results['pad_size_divisor'] = self.size_divisor
510
-
511
- def _pad_masks(self, results):
512
- """Pad masks according to ``results['pad_shape']``."""
513
- pad_shape = results['pad_shape'][:2]
514
- for key in results.get('mask_fields', []):
515
- results[key] = results[key].pad(pad_shape, pad_val=self.pad_val)
516
-
517
- def _pad_seg(self, results):
518
- """Pad semantic segmentation map according to
519
- ``results['pad_shape']``."""
520
- for key in results.get('seg_fields', []):
521
- results[key] = mmcv.impad(
522
- results[key], shape=results['pad_shape'][:2])
523
-
524
- def __call__(self, results):
525
- """Call function to pad images, masks, semantic segmentation maps.
526
-
527
- Args:
528
- results (dict): Result dict from loading pipeline.
529
-
530
- Returns:
531
- dict: Updated result dict.
532
- """
533
- self._pad_img(results)
534
- self._pad_masks(results)
535
- self._pad_seg(results)
536
- return results
537
-
538
- def __repr__(self):
539
- repr_str = self.__class__.__name__
540
- repr_str += f'(size={self.size}, '
541
- repr_str += f'size_divisor={self.size_divisor}, '
542
- repr_str += f'pad_val={self.pad_val})'
543
- return repr_str
544
-
545
-
546
- @PIPELINES.register_module()
547
- class Normalize(object):
548
- """Normalize the image.
549
-
550
- Added key is "img_norm_cfg".
551
-
552
- Args:
553
- mean (sequence): Mean values of 3 channels.
554
- std (sequence): Std values of 3 channels.
555
- to_rgb (bool): Whether to convert the image from BGR to RGB,
556
- default is true.
557
- """
558
-
559
- def __init__(self, mean, std, to_rgb=True):
560
- self.mean = np.array(mean, dtype=np.float32)
561
- self.std = np.array(std, dtype=np.float32)
562
- self.to_rgb = to_rgb
563
-
564
- def __call__(self, results):
565
- """Call function to normalize images.
566
-
567
- Args:
568
- results (dict): Result dict from loading pipeline.
569
-
570
- Returns:
571
- dict: Normalized results, 'img_norm_cfg' key is added into
572
- result dict.
573
- """
574
- for key in results.get('img_fields', ['img']):
575
- results[key] = mmcv.imnormalize(results[key], self.mean, self.std,
576
- self.to_rgb)
577
- results['img_norm_cfg'] = dict(
578
- mean=self.mean, std=self.std, to_rgb=self.to_rgb)
579
- return results
580
-
581
- def __repr__(self):
582
- repr_str = self.__class__.__name__
583
- repr_str += f'(mean={self.mean}, std={self.std}, to_rgb={self.to_rgb})'
584
- return repr_str
585
-
586
-
587
- @PIPELINES.register_module()
588
- class RandomCrop(object):
589
- """Random crop the image & bboxes & masks.
590
-
591
- The absolute `crop_size` is sampled based on `crop_type` and `image_size`,
592
- then the cropped results are generated.
593
-
594
- Args:
595
- crop_size (tuple): The relative ratio or absolute pixels of
596
- height and width.
597
- crop_type (str, optional): one of "relative_range", "relative",
598
- "absolute", "absolute_range". "relative" randomly crops
599
- (h * crop_size[0], w * crop_size[1]) part from an input of size
600
- (h, w). "relative_range" uniformly samples relative crop size from
601
- range [crop_size[0], 1] and [crop_size[1], 1] for height and width
602
- respectively. "absolute" crops from an input with absolute size
603
- (crop_size[0], crop_size[1]). "absolute_range" uniformly samples
604
- crop_h in range [crop_size[0], min(h, crop_size[1])] and crop_w
605
- in range [crop_size[0], min(w, crop_size[1])]. Default "absolute".
606
- allow_negative_crop (bool, optional): Whether to allow a crop that does
607
- not contain any bbox area. Default False.
608
- bbox_clip_border (bool, optional): Whether clip the objects outside
609
- the border of the image. Defaults to True.
610
-
611
- Note:
612
- - If the image is smaller than the absolute crop size, return the
613
- original image.
614
- - The keys for bboxes, labels and masks must be aligned. That is,
615
- `gt_bboxes` corresponds to `gt_labels` and `gt_masks`, and
616
- `gt_bboxes_ignore` corresponds to `gt_labels_ignore` and
617
- `gt_masks_ignore`.
618
- - If the crop does not contain any gt-bbox region and
619
- `allow_negative_crop` is set to False, skip this image.
620
- """
621
-
622
- def __init__(self,
623
- crop_size,
624
- crop_type='absolute',
625
- allow_negative_crop=False,
626
- bbox_clip_border=True):
627
- if crop_type not in [
628
- 'relative_range', 'relative', 'absolute', 'absolute_range'
629
- ]:
630
- raise ValueError(f'Invalid crop_type {crop_type}.')
631
- if crop_type in ['absolute', 'absolute_range']:
632
- assert crop_size[0] > 0 and crop_size[1] > 0
633
- assert isinstance(crop_size[0], int) and isinstance(
634
- crop_size[1], int)
635
- else:
636
- assert 0 < crop_size[0] <= 1 and 0 < crop_size[1] <= 1
637
- self.crop_size = crop_size
638
- self.crop_type = crop_type
639
- self.allow_negative_crop = allow_negative_crop
640
- self.bbox_clip_border = bbox_clip_border
641
- # The key correspondence from bboxes to labels and masks.
642
- self.bbox2label = {
643
- 'gt_bboxes': 'gt_labels',
644
- 'gt_bboxes_ignore': 'gt_labels_ignore'
645
- }
646
- self.bbox2mask = {
647
- 'gt_bboxes': 'gt_masks',
648
- 'gt_bboxes_ignore': 'gt_masks_ignore'
649
- }
650
-
651
- def _crop_data(self, results, crop_size, allow_negative_crop):
652
- """Function to randomly crop images, bounding boxes, masks, semantic
653
- segmentation maps.
654
-
655
- Args:
656
- results (dict): Result dict from loading pipeline.
657
- crop_size (tuple): Expected absolute size after cropping, (h, w).
658
- allow_negative_crop (bool): Whether to allow a crop that does not
659
- contain any bbox area. Default to False.
660
-
661
- Returns:
662
- dict: Randomly cropped results, 'img_shape' key in result dict is
663
- updated according to crop size.
664
- """
665
- assert crop_size[0] > 0 and crop_size[1] > 0
666
- for key in results.get('img_fields', ['img']):
667
- img = results[key]
668
- margin_h = max(img.shape[0] - crop_size[0], 0)
669
- margin_w = max(img.shape[1] - crop_size[1], 0)
670
- offset_h = np.random.randint(0, margin_h + 1)
671
- offset_w = np.random.randint(0, margin_w + 1)
672
- crop_y1, crop_y2 = offset_h, offset_h + crop_size[0]
673
- crop_x1, crop_x2 = offset_w, offset_w + crop_size[1]
674
-
675
- # crop the image
676
- img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...]
677
- img_shape = img.shape
678
- results[key] = img
679
- results['img_shape'] = img_shape
680
-
681
- # crop bboxes accordingly and clip to the image boundary
682
- for key in results.get('bbox_fields', []):
683
- # e.g. gt_bboxes and gt_bboxes_ignore
684
- bbox_offset = np.array([offset_w, offset_h, offset_w, offset_h],
685
- dtype=np.float32)
686
- bboxes = results[key] - bbox_offset
687
- if self.bbox_clip_border:
688
- bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])
689
- bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])
690
- valid_inds = (bboxes[:, 2] > bboxes[:, 0]) & (
691
- bboxes[:, 3] > bboxes[:, 1])
692
- # If the crop does not contain any gt-bbox area and
693
- # allow_negative_crop is False, skip this image.
694
- if (key == 'gt_bboxes' and not valid_inds.any()
695
- and not allow_negative_crop):
696
- return None
697
- results[key] = bboxes[valid_inds, :]
698
- # label fields. e.g. gt_labels and gt_labels_ignore
699
- label_key = self.bbox2label.get(key)
700
- if label_key in results:
701
- results[label_key] = results[label_key][valid_inds]
702
-
703
- # mask fields, e.g. gt_masks and gt_masks_ignore
704
- mask_key = self.bbox2mask.get(key)
705
- if mask_key in results:
706
- results[mask_key] = results[mask_key][
707
- valid_inds.nonzero()[0]].crop(
708
- np.asarray([crop_x1, crop_y1, crop_x2, crop_y2]))
709
-
710
- # crop semantic seg
711
- for key in results.get('seg_fields', []):
712
- results[key] = results[key][crop_y1:crop_y2, crop_x1:crop_x2]
713
-
714
- return results
715
-
716
- def _get_crop_size(self, image_size):
717
- """Randomly generates the absolute crop size based on `crop_type` and
718
- `image_size`.
719
-
720
- Args:
721
- image_size (tuple): (h, w).
722
-
723
- Returns:
724
- crop_size (tuple): (crop_h, crop_w) in absolute pixels.
725
- """
726
- h, w = image_size
727
- if self.crop_type == 'absolute':
728
- return (min(self.crop_size[0], h), min(self.crop_size[1], w))
729
- elif self.crop_type == 'absolute_range':
730
- assert self.crop_size[0] <= self.crop_size[1]
731
- crop_h = np.random.randint(
732
- min(h, self.crop_size[0]),
733
- min(h, self.crop_size[1]) + 1)
734
- crop_w = np.random.randint(
735
- min(w, self.crop_size[0]),
736
- min(w, self.crop_size[1]) + 1)
737
- return crop_h, crop_w
738
- elif self.crop_type == 'relative':
739
- crop_h, crop_w = self.crop_size
740
- return int(h * crop_h + 0.5), int(w * crop_w + 0.5)
741
- elif self.crop_type == 'relative_range':
742
- crop_size = np.asarray(self.crop_size, dtype=np.float32)
743
- crop_h, crop_w = crop_size + np.random.rand(2) * (1 - crop_size)
744
- return int(h * crop_h + 0.5), int(w * crop_w + 0.5)
745
-
746
- def __call__(self, results):
747
- """Call function to randomly crop images, bounding boxes, masks,
748
- semantic segmentation maps.
749
-
750
- Args:
751
- results (dict): Result dict from loading pipeline.
752
-
753
- Returns:
754
- dict: Randomly cropped results, 'img_shape' key in result dict is
755
- updated according to crop size.
756
- """
757
- image_size = results['img'].shape[:2]
758
- crop_size = self._get_crop_size(image_size)
759
- results = self._crop_data(results, crop_size, self.allow_negative_crop)
760
- return results
761
-
762
- def __repr__(self):
763
- repr_str = self.__class__.__name__
764
- repr_str += f'(crop_size={self.crop_size}, '
765
- repr_str += f'crop_type={self.crop_type}, '
766
- repr_str += f'allow_negative_crop={self.allow_negative_crop}, '
767
- repr_str += f'bbox_clip_border={self.bbox_clip_border})'
768
- return repr_str
769
-
770
-
771
- @PIPELINES.register_module()
772
- class SegRescale(object):
773
- """Rescale semantic segmentation maps.
774
-
775
- Args:
776
- scale_factor (float): The scale factor of the final output.
777
- backend (str): Image rescale backend, choices are 'cv2' and 'pillow'.
778
- These two backends generates slightly different results. Defaults
779
- to 'cv2'.
780
- """
781
-
782
- def __init__(self, scale_factor=1, backend='cv2'):
783
- self.scale_factor = scale_factor
784
- self.backend = backend
785
-
786
- def __call__(self, results):
787
- """Call function to scale the semantic segmentation map.
788
-
789
- Args:
790
- results (dict): Result dict from loading pipeline.
791
-
792
- Returns:
793
- dict: Result dict with semantic segmentation map scaled.
794
- """
795
-
796
- for key in results.get('seg_fields', []):
797
- if self.scale_factor != 1:
798
- results[key] = mmcv.imrescale(
799
- results[key],
800
- self.scale_factor,
801
- interpolation='nearest',
802
- backend=self.backend)
803
- return results
804
-
805
- def __repr__(self):
806
- return self.__class__.__name__ + f'(scale_factor={self.scale_factor})'
807
-
808
-
809
- @PIPELINES.register_module()
810
- class PhotoMetricDistortion(object):
811
- """Apply photometric distortion to image sequentially, every transformation
812
- is applied with a probability of 0.5. The position of random contrast is in
813
- second or second to last.
814
-
815
- 1. random brightness
816
- 2. random contrast (mode 0)
817
- 3. convert color from BGR to HSV
818
- 4. random saturation
819
- 5. random hue
820
- 6. convert color from HSV to BGR
821
- 7. random contrast (mode 1)
822
- 8. randomly swap channels
823
-
824
- Args:
825
- brightness_delta (int): delta of brightness.
826
- contrast_range (tuple): range of contrast.
827
- saturation_range (tuple): range of saturation.
828
- hue_delta (int): delta of hue.
829
- """
830
-
831
- def __init__(self,
832
- brightness_delta=32,
833
- contrast_range=(0.5, 1.5),
834
- saturation_range=(0.5, 1.5),
835
- hue_delta=18):
836
- self.brightness_delta = brightness_delta
837
- self.contrast_lower, self.contrast_upper = contrast_range
838
- self.saturation_lower, self.saturation_upper = saturation_range
839
- self.hue_delta = hue_delta
840
-
841
- def __call__(self, results):
842
- """Call function to perform photometric distortion on images.
843
-
844
- Args:
845
- results (dict): Result dict from loading pipeline.
846
-
847
- Returns:
848
- dict: Result dict with images distorted.
849
- """
850
-
851
- if 'img_fields' in results:
852
- assert results['img_fields'] == ['img'], \
853
- 'Only single img_fields is allowed'
854
- img = results['img']
855
- assert img.dtype == np.float32, \
856
- 'PhotoMetricDistortion needs the input image of dtype np.float32,'\
857
- ' please set "to_float32=True" in "LoadImageFromFile" pipeline'
858
- # random brightness
859
- if random.randint(2):
860
- delta = random.uniform(-self.brightness_delta,
861
- self.brightness_delta)
862
- img += delta
863
-
864
- # mode == 0 --> do random contrast first
865
- # mode == 1 --> do random contrast last
866
- mode = random.randint(2)
867
- if mode == 1:
868
- if random.randint(2):
869
- alpha = random.uniform(self.contrast_lower,
870
- self.contrast_upper)
871
- img *= alpha
872
-
873
- # convert color from BGR to HSV
874
- img = mmcv.bgr2hsv(img)
875
-
876
- # random saturation
877
- if random.randint(2):
878
- img[..., 1] *= random.uniform(self.saturation_lower,
879
- self.saturation_upper)
880
-
881
- # random hue
882
- if random.randint(2):
883
- img[..., 0] += random.uniform(-self.hue_delta, self.hue_delta)
884
- img[..., 0][img[..., 0] > 360] -= 360
885
- img[..., 0][img[..., 0] < 0] += 360
886
-
887
- # convert color from HSV to BGR
888
- img = mmcv.hsv2bgr(img)
889
-
890
- # random contrast
891
- if mode == 0:
892
- if random.randint(2):
893
- alpha = random.uniform(self.contrast_lower,
894
- self.contrast_upper)
895
- img *= alpha
896
-
897
- # randomly swap channels
898
- if random.randint(2):
899
- img = img[..., random.permutation(3)]
900
-
901
- results['img'] = img
902
- return results
903
-
904
- def __repr__(self):
905
- repr_str = self.__class__.__name__
906
- repr_str += f'(\nbrightness_delta={self.brightness_delta},\n'
907
- repr_str += 'contrast_range='
908
- repr_str += f'{(self.contrast_lower, self.contrast_upper)},\n'
909
- repr_str += 'saturation_range='
910
- repr_str += f'{(self.saturation_lower, self.saturation_upper)},\n'
911
- repr_str += f'hue_delta={self.hue_delta})'
912
- return repr_str
913
-
914
-
915
- @PIPELINES.register_module()
916
- class Expand(object):
917
- """Random expand the image & bboxes.
918
-
919
- Randomly place the original image on a canvas of 'ratio' x original image
920
- size filled with mean values. The ratio is in the range of ratio_range.
921
-
922
- Args:
923
- mean (tuple): mean value of dataset.
924
- to_rgb (bool): if need to convert the order of mean to align with RGB.
925
- ratio_range (tuple): range of expand ratio.
926
- prob (float): probability of applying this transformation
927
- """
928
-
929
- def __init__(self,
930
- mean=(0, 0, 0),
931
- to_rgb=True,
932
- ratio_range=(1, 4),
933
- seg_ignore_label=None,
934
- prob=0.5):
935
- self.to_rgb = to_rgb
936
- self.ratio_range = ratio_range
937
- if to_rgb:
938
- self.mean = mean[::-1]
939
- else:
940
- self.mean = mean
941
- self.min_ratio, self.max_ratio = ratio_range
942
- self.seg_ignore_label = seg_ignore_label
943
- self.prob = prob
944
-
945
- def __call__(self, results):
946
- """Call function to expand images, bounding boxes.
947
-
948
- Args:
949
- results (dict): Result dict from loading pipeline.
950
-
951
- Returns:
952
- dict: Result dict with images, bounding boxes expanded
953
- """
954
-
955
- if random.uniform(0, 1) > self.prob:
956
- return results
957
-
958
- if 'img_fields' in results:
959
- assert results['img_fields'] == ['img'], \
960
- 'Only single img_fields is allowed'
961
- img = results['img']
962
-
963
- h, w, c = img.shape
964
- ratio = random.uniform(self.min_ratio, self.max_ratio)
965
- # speedup expand when meets large image
966
- if np.all(self.mean == self.mean[0]):
967
- expand_img = np.empty((int(h * ratio), int(w * ratio), c),
968
- img.dtype)
969
- expand_img.fill(self.mean[0])
970
- else:
971
- expand_img = np.full((int(h * ratio), int(w * ratio), c),
972
- self.mean,
973
- dtype=img.dtype)
974
- left = int(random.uniform(0, w * ratio - w))
975
- top = int(random.uniform(0, h * ratio - h))
976
- expand_img[top:top + h, left:left + w] = img
977
-
978
- results['img'] = expand_img
979
- # expand bboxes
980
- for key in results.get('bbox_fields', []):
981
- results[key] = results[key] + np.tile(
982
- (left, top), 2).astype(results[key].dtype)
983
-
984
- # expand masks
985
- for key in results.get('mask_fields', []):
986
- results[key] = results[key].expand(
987
- int(h * ratio), int(w * ratio), top, left)
988
-
989
- # expand segs
990
- for key in results.get('seg_fields', []):
991
- gt_seg = results[key]
992
- expand_gt_seg = np.full((int(h * ratio), int(w * ratio)),
993
- self.seg_ignore_label,
994
- dtype=gt_seg.dtype)
995
- expand_gt_seg[top:top + h, left:left + w] = gt_seg
996
- results[key] = expand_gt_seg
997
- return results
998
-
999
- def __repr__(self):
1000
- repr_str = self.__class__.__name__
1001
- repr_str += f'(mean={self.mean}, to_rgb={self.to_rgb}, '
1002
- repr_str += f'ratio_range={self.ratio_range}, '
1003
- repr_str += f'seg_ignore_label={self.seg_ignore_label})'
1004
- return repr_str
1005
-
1006
-
1007
- @PIPELINES.register_module()
1008
- class MinIoURandomCrop(object):
1009
- """Random crop the image & bboxes, the cropped patches have minimum IoU
1010
- requirement with original image & bboxes, the IoU threshold is randomly
1011
- selected from min_ious.
1012
-
1013
- Args:
1014
- min_ious (tuple): minimum IoU threshold for all intersections with
1015
- bounding boxes
1016
- min_crop_size (float): minimum crop's size (i.e. h,w := a*h, a*w,
1017
- where a >= min_crop_size).
1018
- bbox_clip_border (bool, optional): Whether clip the objects outside
1019
- the border of the image. Defaults to True.
1020
-
1021
- Note:
1022
- The keys for bboxes, labels and masks should be paired. That is, \
1023
- `gt_bboxes` corresponds to `gt_labels` and `gt_masks`, and \
1024
- `gt_bboxes_ignore` to `gt_labels_ignore` and `gt_masks_ignore`.
1025
- """
1026
-
1027
- def __init__(self,
1028
- min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
1029
- min_crop_size=0.3,
1030
- bbox_clip_border=True):
1031
- # 1: return ori img
1032
- self.min_ious = min_ious
1033
- self.sample_mode = (1, *min_ious, 0)
1034
- self.min_crop_size = min_crop_size
1035
- self.bbox_clip_border = bbox_clip_border
1036
- self.bbox2label = {
1037
- 'gt_bboxes': 'gt_labels',
1038
- 'gt_bboxes_ignore': 'gt_labels_ignore'
1039
- }
1040
- self.bbox2mask = {
1041
- 'gt_bboxes': 'gt_masks',
1042
- 'gt_bboxes_ignore': 'gt_masks_ignore'
1043
- }
1044
-
1045
- def __call__(self, results):
1046
- """Call function to crop images and bounding boxes with minimum IoU
1047
- constraint.
1048
-
1049
- Args:
1050
- results (dict): Result dict from loading pipeline.
1051
-
1052
- Returns:
1053
- dict: Result dict with images and bounding boxes cropped, \
1054
- 'img_shape' key is updated.
1055
- """
1056
-
1057
- if 'img_fields' in results:
1058
- assert results['img_fields'] == ['img'], \
1059
- 'Only single img_fields is allowed'
1060
- img = results['img']
1061
- assert 'bbox_fields' in results
1062
- boxes = [results[key] for key in results['bbox_fields']]
1063
- boxes = np.concatenate(boxes, 0)
1064
- h, w, c = img.shape
1065
- while True:
1066
- mode = random.choice(self.sample_mode)
1067
- self.mode = mode
1068
- if mode == 1:
1069
- return results
1070
-
1071
- min_iou = mode
1072
- for i in range(50):
1073
- new_w = random.uniform(self.min_crop_size * w, w)
1074
- new_h = random.uniform(self.min_crop_size * h, h)
1075
-
1076
- # h / w in [0.5, 2]
1077
- if new_h / new_w < 0.5 or new_h / new_w > 2:
1078
- continue
1079
-
1080
- left = random.uniform(w - new_w)
1081
- top = random.uniform(h - new_h)
1082
-
1083
- patch = np.array(
1084
- (int(left), int(top), int(left + new_w), int(top + new_h)))
1085
- # Line or point crop is not allowed
1086
- if patch[2] == patch[0] or patch[3] == patch[1]:
1087
- continue
1088
- overlaps = bbox_overlaps(
1089
- patch.reshape(-1, 4), boxes.reshape(-1, 4)).reshape(-1)
1090
- if len(overlaps) > 0 and overlaps.min() < min_iou:
1091
- continue
1092
-
1093
- # center of boxes should inside the crop img
1094
- # only adjust boxes and instance masks when the gt is not empty
1095
- if len(overlaps) > 0:
1096
- # adjust boxes
1097
- def is_center_of_bboxes_in_patch(boxes, patch):
1098
- center = (boxes[:, :2] + boxes[:, 2:]) / 2
1099
- mask = ((center[:, 0] > patch[0]) *
1100
- (center[:, 1] > patch[1]) *
1101
- (center[:, 0] < patch[2]) *
1102
- (center[:, 1] < patch[3]))
1103
- return mask
1104
-
1105
- mask = is_center_of_bboxes_in_patch(boxes, patch)
1106
- if not mask.any():
1107
- continue
1108
- for key in results.get('bbox_fields', []):
1109
- boxes = results[key].copy()
1110
- mask = is_center_of_bboxes_in_patch(boxes, patch)
1111
- boxes = boxes[mask]
1112
- if self.bbox_clip_border:
1113
- boxes[:, 2:] = boxes[:, 2:].clip(max=patch[2:])
1114
- boxes[:, :2] = boxes[:, :2].clip(min=patch[:2])
1115
- boxes -= np.tile(patch[:2], 2)
1116
-
1117
- results[key] = boxes
1118
- # labels
1119
- label_key = self.bbox2label.get(key)
1120
- if label_key in results:
1121
- results[label_key] = results[label_key][mask]
1122
-
1123
- # mask fields
1124
- mask_key = self.bbox2mask.get(key)
1125
- if mask_key in results:
1126
- results[mask_key] = results[mask_key][
1127
- mask.nonzero()[0]].crop(patch)
1128
- # adjust the img no matter whether the gt is empty before crop
1129
- img = img[patch[1]:patch[3], patch[0]:patch[2]]
1130
- results['img'] = img
1131
- results['img_shape'] = img.shape
1132
-
1133
- # seg fields
1134
- for key in results.get('seg_fields', []):
1135
- results[key] = results[key][patch[1]:patch[3],
1136
- patch[0]:patch[2]]
1137
- return results
1138
-
1139
- def __repr__(self):
1140
- repr_str = self.__class__.__name__
1141
- repr_str += f'(min_ious={self.min_ious}, '
1142
- repr_str += f'min_crop_size={self.min_crop_size}, '
1143
- repr_str += f'bbox_clip_border={self.bbox_clip_border})'
1144
- return repr_str
1145
-
1146
-
1147
- @PIPELINES.register_module()
1148
- class Corrupt(object):
1149
- """Corruption augmentation.
1150
-
1151
- Corruption transforms implemented based on
1152
- `imagecorruptions <https://github.com/bethgelab/imagecorruptions>`_.
1153
-
1154
- Args:
1155
- corruption (str): Corruption name.
1156
- severity (int, optional): The severity of corruption. Default: 1.
1157
- """
1158
-
1159
- def __init__(self, corruption, severity=1):
1160
- self.corruption = corruption
1161
- self.severity = severity
1162
-
1163
- def __call__(self, results):
1164
- """Call function to corrupt image.
1165
-
1166
- Args:
1167
- results (dict): Result dict from loading pipeline.
1168
-
1169
- Returns:
1170
- dict: Result dict with images corrupted.
1171
- """
1172
-
1173
- if corrupt is None:
1174
- raise RuntimeError('imagecorruptions is not installed')
1175
- if 'img_fields' in results:
1176
- assert results['img_fields'] == ['img'], \
1177
- 'Only single img_fields is allowed'
1178
- results['img'] = corrupt(
1179
- results['img'].astype(np.uint8),
1180
- corruption_name=self.corruption,
1181
- severity=self.severity)
1182
- return results
1183
-
1184
- def __repr__(self):
1185
- repr_str = self.__class__.__name__
1186
- repr_str += f'(corruption={self.corruption}, '
1187
- repr_str += f'severity={self.severity})'
1188
- return repr_str
1189
-
1190
-
1191
- @PIPELINES.register_module()
1192
- class Albu(object):
1193
- """Albumentation augmentation.
1194
-
1195
- Adds custom transformations from Albumentations library.
1196
- Please, visit `https://albumentations.readthedocs.io`
1197
- to get more information.
1198
-
1199
- An example of ``transforms`` is as followed:
1200
-
1201
- .. code-block::
1202
-
1203
- [
1204
- dict(
1205
- type='ShiftScaleRotate',
1206
- shift_limit=0.0625,
1207
- scale_limit=0.0,
1208
- rotate_limit=0,
1209
- interpolation=1,
1210
- p=0.5),
1211
- dict(
1212
- type='RandomBrightnessContrast',
1213
- brightness_limit=[0.1, 0.3],
1214
- contrast_limit=[0.1, 0.3],
1215
- p=0.2),
1216
- dict(type='ChannelShuffle', p=0.1),
1217
- dict(
1218
- type='OneOf',
1219
- transforms=[
1220
- dict(type='Blur', blur_limit=3, p=1.0),
1221
- dict(type='MedianBlur', blur_limit=3, p=1.0)
1222
- ],
1223
- p=0.1),
1224
- ]
1225
-
1226
- Args:
1227
- transforms (list[dict]): A list of albu transformations
1228
- bbox_params (dict): Bbox_params for albumentation `Compose`
1229
- keymap (dict): Contains {'input key':'albumentation-style key'}
1230
- skip_img_without_anno (bool): Whether to skip the image if no ann left
1231
- after aug
1232
- """
1233
-
1234
- def __init__(self,
1235
- transforms,
1236
- bbox_params=None,
1237
- keymap=None,
1238
- update_pad_shape=False,
1239
- skip_img_without_anno=False):
1240
- if Compose is None:
1241
- raise RuntimeError('albumentations is not installed')
1242
-
1243
- # Args will be modified later, copying it will be safer
1244
- transforms = copy.deepcopy(transforms)
1245
- if bbox_params is not None:
1246
- bbox_params = copy.deepcopy(bbox_params)
1247
- if keymap is not None:
1248
- keymap = copy.deepcopy(keymap)
1249
- self.transforms = transforms
1250
- self.filter_lost_elements = False
1251
- self.update_pad_shape = update_pad_shape
1252
- self.skip_img_without_anno = skip_img_without_anno
1253
-
1254
- # A simple workaround to remove masks without boxes
1255
- if (isinstance(bbox_params, dict) and 'label_fields' in bbox_params
1256
- and 'filter_lost_elements' in bbox_params):
1257
- self.filter_lost_elements = True
1258
- self.origin_label_fields = bbox_params['label_fields']
1259
- bbox_params['label_fields'] = ['idx_mapper']
1260
- del bbox_params['filter_lost_elements']
1261
-
1262
- self.bbox_params = (
1263
- self.albu_builder(bbox_params) if bbox_params else None)
1264
- self.aug = Compose([self.albu_builder(t) for t in self.transforms],
1265
- bbox_params=self.bbox_params)
1266
-
1267
- if not keymap:
1268
- self.keymap_to_albu = {
1269
- 'img': 'image',
1270
- 'gt_masks': 'masks',
1271
- 'gt_bboxes': 'bboxes'
1272
- }
1273
- else:
1274
- self.keymap_to_albu = keymap
1275
- self.keymap_back = {v: k for k, v in self.keymap_to_albu.items()}
1276
-
1277
- def albu_builder(self, cfg):
1278
- """Import a module from albumentations.
1279
-
1280
- It inherits some of :func:`build_from_cfg` logic.
1281
-
1282
- Args:
1283
- cfg (dict): Config dict. It should at least contain the key "type".
1284
-
1285
- Returns:
1286
- obj: The constructed object.
1287
- """
1288
-
1289
- assert isinstance(cfg, dict) and 'type' in cfg
1290
- args = cfg.copy()
1291
-
1292
- obj_type = args.pop('type')
1293
- if mmcv.is_str(obj_type):
1294
- if albumentations is None:
1295
- raise RuntimeError('albumentations is not installed')
1296
- obj_cls = getattr(albumentations, obj_type)
1297
- elif inspect.isclass(obj_type):
1298
- obj_cls = obj_type
1299
- else:
1300
- raise TypeError(
1301
- f'type must be a str or valid type, but got {type(obj_type)}')
1302
-
1303
- if 'transforms' in args:
1304
- args['transforms'] = [
1305
- self.albu_builder(transform)
1306
- for transform in args['transforms']
1307
- ]
1308
-
1309
- return obj_cls(**args)
1310
-
1311
- @staticmethod
1312
- def mapper(d, keymap):
1313
- """Dictionary mapper. Renames keys according to keymap provided.
1314
-
1315
- Args:
1316
- d (dict): old dict
1317
- keymap (dict): {'old_key':'new_key'}
1318
- Returns:
1319
- dict: new dict.
1320
- """
1321
-
1322
- updated_dict = {}
1323
- for k, v in zip(d.keys(), d.values()):
1324
- new_k = keymap.get(k, k)
1325
- updated_dict[new_k] = d[k]
1326
- return updated_dict
1327
-
1328
- def __call__(self, results):
1329
- # dict to albumentations format
1330
- results = self.mapper(results, self.keymap_to_albu)
1331
- # TODO: add bbox_fields
1332
- if 'bboxes' in results:
1333
- # to list of boxes
1334
- if isinstance(results['bboxes'], np.ndarray):
1335
- results['bboxes'] = [x for x in results['bboxes']]
1336
- # add pseudo-field for filtration
1337
- if self.filter_lost_elements:
1338
- results['idx_mapper'] = np.arange(len(results['bboxes']))
1339
-
1340
- # TODO: Support mask structure in albu
1341
- if 'masks' in results:
1342
- if isinstance(results['masks'], PolygonMasks):
1343
- raise NotImplementedError(
1344
- 'Albu only supports BitMap masks now')
1345
- ori_masks = results['masks']
1346
- if albumentations.__version__ < '0.5':
1347
- results['masks'] = results['masks'].masks
1348
- else:
1349
- results['masks'] = [mask for mask in results['masks'].masks]
1350
-
1351
- results = self.aug(**results)
1352
-
1353
- if 'bboxes' in results:
1354
- if isinstance(results['bboxes'], list):
1355
- results['bboxes'] = np.array(
1356
- results['bboxes'], dtype=np.float32)
1357
- results['bboxes'] = results['bboxes'].reshape(-1, 4)
1358
-
1359
- # filter label_fields
1360
- if self.filter_lost_elements:
1361
-
1362
- for label in self.origin_label_fields:
1363
- results[label] = np.array(
1364
- [results[label][i] for i in results['idx_mapper']])
1365
- if 'masks' in results:
1366
- results['masks'] = np.array(
1367
- [results['masks'][i] for i in results['idx_mapper']])
1368
- results['masks'] = ori_masks.__class__(
1369
- results['masks'], results['image'].shape[0],
1370
- results['image'].shape[1])
1371
-
1372
- if (not len(results['idx_mapper'])
1373
- and self.skip_img_without_anno):
1374
- return None
1375
-
1376
- if 'gt_labels' in results:
1377
- if isinstance(results['gt_labels'], list):
1378
- results['gt_labels'] = np.array(results['gt_labels'])
1379
- results['gt_labels'] = results['gt_labels'].astype(np.int64)
1380
-
1381
- # back to the original format
1382
- results = self.mapper(results, self.keymap_back)
1383
-
1384
- # update final shape
1385
- if self.update_pad_shape:
1386
- results['pad_shape'] = results['img'].shape
1387
-
1388
- return results
1389
-
1390
- def __repr__(self):
1391
- repr_str = self.__class__.__name__ + f'(transforms={self.transforms})'
1392
- return repr_str
1393
-
1394
-
1395
- @PIPELINES.register_module()
1396
- class RandomCenterCropPad(object):
1397
- """Random center crop and random around padding for CornerNet.
1398
-
1399
- This operation generates randomly cropped image from the original image and
1400
- pads it simultaneously. Different from :class:`RandomCrop`, the output
1401
- shape may not equal to ``crop_size`` strictly. We choose a random value
1402
- from ``ratios`` and the output shape could be larger or smaller than
1403
- ``crop_size``. The padding operation is also different from :class:`Pad`,
1404
- here we use around padding instead of right-bottom padding.
1405
-
1406
- The relation between output image (padding image) and original image:
1407
-
1408
- .. code:: text
1409
-
1410
- output image
1411
-
1412
- +----------------------------+
1413
- | padded area |
1414
- +------|----------------------------|----------+
1415
- | | cropped area | |
1416
- | | +---------------+ | |
1417
- | | | . center | | | original image
1418
- | | | range | | |
1419
- | | +---------------+ | |
1420
- +------|----------------------------|----------+
1421
- | padded area |
1422
- +----------------------------+
1423
-
1424
- There are 5 main areas in the figure:
1425
-
1426
- - output image: output image of this operation, also called padding
1427
- image in following instruction.
1428
- - original image: input image of this operation.
1429
- - padded area: non-intersect area of output image and original image.
1430
- - cropped area: the overlap of output image and original image.
1431
- - center range: a smaller area where random center chosen from.
1432
- center range is computed by ``border`` and original image's shape
1433
- to avoid our random center is too close to original image's border.
1434
-
1435
- Also this operation act differently in train and test mode, the summary
1436
- pipeline is listed below.
1437
-
1438
- Train pipeline:
1439
-
1440
- 1. Choose a ``random_ratio`` from ``ratios``, the shape of padding image
1441
- will be ``random_ratio * crop_size``.
1442
- 2. Choose a ``random_center`` in center range.
1443
- 3. Generate padding image with center matches the ``random_center``.
1444
- 4. Initialize the padding image with pixel value equals to ``mean``.
1445
- 5. Copy the cropped area to padding image.
1446
- 6. Refine annotations.
1447
-
1448
- Test pipeline:
1449
-
1450
- 1. Compute output shape according to ``test_pad_mode``.
1451
- 2. Generate padding image with center matches the original image
1452
- center.
1453
- 3. Initialize the padding image with pixel value equals to ``mean``.
1454
- 4. Copy the ``cropped area`` to padding image.
1455
-
1456
- Args:
1457
- crop_size (tuple | None): expected size after crop, final size will
1458
- computed according to ratio. Requires (h, w) in train mode, and
1459
- None in test mode.
1460
- ratios (tuple): random select a ratio from tuple and crop image to
1461
- (crop_size[0] * ratio) * (crop_size[1] * ratio).
1462
- Only available in train mode.
1463
- border (int): max distance from center select area to image border.
1464
- Only available in train mode.
1465
- mean (sequence): Mean values of 3 channels.
1466
- std (sequence): Std values of 3 channels.
1467
- to_rgb (bool): Whether to convert the image from BGR to RGB.
1468
- test_mode (bool): whether involve random variables in transform.
1469
- In train mode, crop_size is fixed, center coords and ratio is
1470
- random selected from predefined lists. In test mode, crop_size
1471
- is image's original shape, center coords and ratio is fixed.
1472
- test_pad_mode (tuple): padding method and padding shape value, only
1473
- available in test mode. Default is using 'logical_or' with
1474
- 127 as padding shape value.
1475
-
1476
- - 'logical_or': final_shape = input_shape | padding_shape_value
1477
- - 'size_divisor': final_shape = int(
1478
- ceil(input_shape / padding_shape_value) * padding_shape_value)
1479
- bbox_clip_border (bool, optional): Whether clip the objects outside
1480
- the border of the image. Defaults to True.
1481
- """
1482
-
1483
- def __init__(self,
1484
- crop_size=None,
1485
- ratios=(0.9, 1.0, 1.1),
1486
- border=128,
1487
- mean=None,
1488
- std=None,
1489
- to_rgb=None,
1490
- test_mode=False,
1491
- test_pad_mode=('logical_or', 127),
1492
- bbox_clip_border=True):
1493
- if test_mode:
1494
- assert crop_size is None, 'crop_size must be None in test mode'
1495
- assert ratios is None, 'ratios must be None in test mode'
1496
- assert border is None, 'border must be None in test mode'
1497
- assert isinstance(test_pad_mode, (list, tuple))
1498
- assert test_pad_mode[0] in ['logical_or', 'size_divisor']
1499
- else:
1500
- assert isinstance(crop_size, (list, tuple))
1501
- assert crop_size[0] > 0 and crop_size[1] > 0, (
1502
- 'crop_size must > 0 in train mode')
1503
- assert isinstance(ratios, (list, tuple))
1504
- assert test_pad_mode is None, (
1505
- 'test_pad_mode must be None in train mode')
1506
-
1507
- self.crop_size = crop_size
1508
- self.ratios = ratios
1509
- self.border = border
1510
- # We do not set default value to mean, std and to_rgb because these
1511
- # hyper-parameters are easy to forget but could affect the performance.
1512
- # Please use the same setting as Normalize for performance assurance.
1513
- assert mean is not None and std is not None and to_rgb is not None
1514
- self.to_rgb = to_rgb
1515
- self.input_mean = mean
1516
- self.input_std = std
1517
- if to_rgb:
1518
- self.mean = mean[::-1]
1519
- self.std = std[::-1]
1520
- else:
1521
- self.mean = mean
1522
- self.std = std
1523
- self.test_mode = test_mode
1524
- self.test_pad_mode = test_pad_mode
1525
- self.bbox_clip_border = bbox_clip_border
1526
-
1527
- def _get_border(self, border, size):
1528
- """Get final border for the target size.
1529
-
1530
- This function generates a ``final_border`` according to image's shape.
1531
- The area between ``final_border`` and ``size - final_border`` is the
1532
- ``center range``. We randomly choose center from the ``center range``
1533
- to avoid our random center is too close to original image's border.
1534
- Also ``center range`` should be larger than 0.
1535
-
1536
- Args:
1537
- border (int): The initial border, default is 128.
1538
- size (int): The width or height of original image.
1539
- Returns:
1540
- int: The final border.
1541
- """
1542
- k = 2 * border / size
1543
- i = pow(2, np.ceil(np.log2(np.ceil(k))) + (k == int(k)))
1544
- return border // i
1545
-
1546
- def _filter_boxes(self, patch, boxes):
1547
- """Check whether the center of each box is in the patch.
1548
-
1549
- Args:
1550
- patch (list[int]): The cropped area, [left, top, right, bottom].
1551
- boxes (numpy array, (N x 4)): Ground truth boxes.
1552
-
1553
- Returns:
1554
- mask (numpy array, (N,)): Each box is inside or outside the patch.
1555
- """
1556
- center = (boxes[:, :2] + boxes[:, 2:]) / 2
1557
- mask = (center[:, 0] > patch[0]) * (center[:, 1] > patch[1]) * (
1558
- center[:, 0] < patch[2]) * (
1559
- center[:, 1] < patch[3])
1560
- return mask
1561
-
1562
- def _crop_image_and_paste(self, image, center, size):
1563
- """Crop image with a given center and size, then paste the cropped
1564
- image to a blank image with two centers align.
1565
-
1566
- This function is equivalent to generating a blank image with ``size``
1567
- as its shape. Then cover it on the original image with two centers (
1568
- the center of blank image and the random center of original image)
1569
- aligned. The overlap area is paste from the original image and the
1570
- outside area is filled with ``mean pixel``.
1571
-
1572
- Args:
1573
- image (np array, H x W x C): Original image.
1574
- center (list[int]): Target crop center coord.
1575
- size (list[int]): Target crop size. [target_h, target_w]
1576
-
1577
- Returns:
1578
- cropped_img (np array, target_h x target_w x C): Cropped image.
1579
- border (np array, 4): The distance of four border of
1580
- ``cropped_img`` to the original image area, [top, bottom,
1581
- left, right]
1582
- patch (list[int]): The cropped area, [left, top, right, bottom].
1583
- """
1584
- center_y, center_x = center
1585
- target_h, target_w = size
1586
- img_h, img_w, img_c = image.shape
1587
-
1588
- x0 = max(0, center_x - target_w // 2)
1589
- x1 = min(center_x + target_w // 2, img_w)
1590
- y0 = max(0, center_y - target_h // 2)
1591
- y1 = min(center_y + target_h // 2, img_h)
1592
- patch = np.array((int(x0), int(y0), int(x1), int(y1)))
1593
-
1594
- left, right = center_x - x0, x1 - center_x
1595
- top, bottom = center_y - y0, y1 - center_y
1596
-
1597
- cropped_center_y, cropped_center_x = target_h // 2, target_w // 2
1598
- cropped_img = np.zeros((target_h, target_w, img_c), dtype=image.dtype)
1599
- for i in range(img_c):
1600
- cropped_img[:, :, i] += self.mean[i]
1601
- y_slice = slice(cropped_center_y - top, cropped_center_y + bottom)
1602
- x_slice = slice(cropped_center_x - left, cropped_center_x + right)
1603
- cropped_img[y_slice, x_slice, :] = image[y0:y1, x0:x1, :]
1604
-
1605
- border = np.array([
1606
- cropped_center_y - top, cropped_center_y + bottom,
1607
- cropped_center_x - left, cropped_center_x + right
1608
- ],
1609
- dtype=np.float32)
1610
-
1611
- return cropped_img, border, patch
1612
-
1613
- def _train_aug(self, results):
1614
- """Random crop and around padding the original image.
1615
-
1616
- Args:
1617
- results (dict): Image infomations in the augment pipeline.
1618
-
1619
- Returns:
1620
- results (dict): The updated dict.
1621
- """
1622
- img = results['img']
1623
- h, w, c = img.shape
1624
- boxes = results['gt_bboxes']
1625
- while True:
1626
- scale = random.choice(self.ratios)
1627
- new_h = int(self.crop_size[0] * scale)
1628
- new_w = int(self.crop_size[1] * scale)
1629
- h_border = self._get_border(self.border, h)
1630
- w_border = self._get_border(self.border, w)
1631
-
1632
- for i in range(50):
1633
- center_x = random.randint(low=w_border, high=w - w_border)
1634
- center_y = random.randint(low=h_border, high=h - h_border)
1635
-
1636
- cropped_img, border, patch = self._crop_image_and_paste(
1637
- img, [center_y, center_x], [new_h, new_w])
1638
-
1639
- mask = self._filter_boxes(patch, boxes)
1640
- # if image do not have valid bbox, any crop patch is valid.
1641
- if not mask.any() and len(boxes) > 0:
1642
- continue
1643
-
1644
- results['img'] = cropped_img
1645
- results['img_shape'] = cropped_img.shape
1646
- results['pad_shape'] = cropped_img.shape
1647
-
1648
- x0, y0, x1, y1 = patch
1649
-
1650
- left_w, top_h = center_x - x0, center_y - y0
1651
- cropped_center_x, cropped_center_y = new_w // 2, new_h // 2
1652
-
1653
- # crop bboxes accordingly and clip to the image boundary
1654
- for key in results.get('bbox_fields', []):
1655
- mask = self._filter_boxes(patch, results[key])
1656
- bboxes = results[key][mask]
1657
- bboxes[:, 0:4:2] += cropped_center_x - left_w - x0
1658
- bboxes[:, 1:4:2] += cropped_center_y - top_h - y0
1659
- if self.bbox_clip_border:
1660
- bboxes[:, 0:4:2] = np.clip(bboxes[:, 0:4:2], 0, new_w)
1661
- bboxes[:, 1:4:2] = np.clip(bboxes[:, 1:4:2], 0, new_h)
1662
- keep = (bboxes[:, 2] > bboxes[:, 0]) & (
1663
- bboxes[:, 3] > bboxes[:, 1])
1664
- bboxes = bboxes[keep]
1665
- results[key] = bboxes
1666
- if key in ['gt_bboxes']:
1667
- if 'gt_labels' in results:
1668
- labels = results['gt_labels'][mask]
1669
- labels = labels[keep]
1670
- results['gt_labels'] = labels
1671
- if 'gt_masks' in results:
1672
- raise NotImplementedError(
1673
- 'RandomCenterCropPad only supports bbox.')
1674
-
1675
- # crop semantic seg
1676
- for key in results.get('seg_fields', []):
1677
- raise NotImplementedError(
1678
- 'RandomCenterCropPad only supports bbox.')
1679
- return results
1680
-
1681
- def _test_aug(self, results):
1682
- """Around padding the original image without cropping.
1683
-
1684
- The padding mode and value are from ``test_pad_mode``.
1685
-
1686
- Args:
1687
- results (dict): Image infomations in the augment pipeline.
1688
-
1689
- Returns:
1690
- results (dict): The updated dict.
1691
- """
1692
- img = results['img']
1693
- h, w, c = img.shape
1694
- results['img_shape'] = img.shape
1695
- if self.test_pad_mode[0] in ['logical_or']:
1696
- target_h = h | self.test_pad_mode[1]
1697
- target_w = w | self.test_pad_mode[1]
1698
- elif self.test_pad_mode[0] in ['size_divisor']:
1699
- divisor = self.test_pad_mode[1]
1700
- target_h = int(np.ceil(h / divisor)) * divisor
1701
- target_w = int(np.ceil(w / divisor)) * divisor
1702
- else:
1703
- raise NotImplementedError(
1704
- 'RandomCenterCropPad only support two testing pad mode:'
1705
- 'logical-or and size_divisor.')
1706
-
1707
- cropped_img, border, _ = self._crop_image_and_paste(
1708
- img, [h // 2, w // 2], [target_h, target_w])
1709
- results['img'] = cropped_img
1710
- results['pad_shape'] = cropped_img.shape
1711
- results['border'] = border
1712
- return results
1713
-
1714
- def __call__(self, results):
1715
- img = results['img']
1716
- assert img.dtype == np.float32, (
1717
- 'RandomCenterCropPad needs the input image of dtype np.float32,'
1718
- ' please set "to_float32=True" in "LoadImageFromFile" pipeline')
1719
- h, w, c = img.shape
1720
- assert c == len(self.mean)
1721
- if self.test_mode:
1722
- return self._test_aug(results)
1723
- else:
1724
- return self._train_aug(results)
1725
-
1726
- def __repr__(self):
1727
- repr_str = self.__class__.__name__
1728
- repr_str += f'(crop_size={self.crop_size}, '
1729
- repr_str += f'ratios={self.ratios}, '
1730
- repr_str += f'border={self.border}, '
1731
- repr_str += f'mean={self.input_mean}, '
1732
- repr_str += f'std={self.input_std}, '
1733
- repr_str += f'to_rgb={self.to_rgb}, '
1734
- repr_str += f'test_mode={self.test_mode}, '
1735
- repr_str += f'test_pad_mode={self.test_pad_mode}, '
1736
- repr_str += f'bbox_clip_border={self.bbox_clip_border})'
1737
- return repr_str
1738
-
1739
-
1740
- @PIPELINES.register_module()
1741
- class CutOut(object):
1742
- """CutOut operation.
1743
-
1744
- Randomly drop some regions of image used in
1745
- `Cutout <https://arxiv.org/abs/1708.04552>`_.
1746
-
1747
- Args:
1748
- n_holes (int | tuple[int, int]): Number of regions to be dropped.
1749
- If it is given as a list, number of holes will be randomly
1750
- selected from the closed interval [`n_holes[0]`, `n_holes[1]`].
1751
- cutout_shape (tuple[int, int] | list[tuple[int, int]]): The candidate
1752
- shape of dropped regions. It can be `tuple[int, int]` to use a
1753
- fixed cutout shape, or `list[tuple[int, int]]` to randomly choose
1754
- shape from the list.
1755
- cutout_ratio (tuple[float, float] | list[tuple[float, float]]): The
1756
- candidate ratio of dropped regions. It can be `tuple[float, float]`
1757
- to use a fixed ratio or `list[tuple[float, float]]` to randomly
1758
- choose ratio from the list. Please note that `cutout_shape`
1759
- and `cutout_ratio` cannot be both given at the same time.
1760
- fill_in (tuple[float, float, float] | tuple[int, int, int]): The value
1761
- of pixel to fill in the dropped regions. Default: (0, 0, 0).
1762
- """
1763
-
1764
- def __init__(self,
1765
- n_holes,
1766
- cutout_shape=None,
1767
- cutout_ratio=None,
1768
- fill_in=(0, 0, 0)):
1769
-
1770
- assert (cutout_shape is None) ^ (cutout_ratio is None), \
1771
- 'Either cutout_shape or cutout_ratio should be specified.'
1772
- assert (isinstance(cutout_shape, (list, tuple))
1773
- or isinstance(cutout_ratio, (list, tuple)))
1774
- if isinstance(n_holes, tuple):
1775
- assert len(n_holes) == 2 and 0 <= n_holes[0] < n_holes[1]
1776
- else:
1777
- n_holes = (n_holes, n_holes)
1778
- self.n_holes = n_holes
1779
- self.fill_in = fill_in
1780
- self.with_ratio = cutout_ratio is not None
1781
- self.candidates = cutout_ratio if self.with_ratio else cutout_shape
1782
- if not isinstance(self.candidates, list):
1783
- self.candidates = [self.candidates]
1784
-
1785
- def __call__(self, results):
1786
- """Call function to drop some regions of image."""
1787
- h, w, c = results['img'].shape
1788
- n_holes = np.random.randint(self.n_holes[0], self.n_holes[1] + 1)
1789
- for _ in range(n_holes):
1790
- x1 = np.random.randint(0, w)
1791
- y1 = np.random.randint(0, h)
1792
- index = np.random.randint(0, len(self.candidates))
1793
- if not self.with_ratio:
1794
- cutout_w, cutout_h = self.candidates[index]
1795
- else:
1796
- cutout_w = int(self.candidates[index][0] * w)
1797
- cutout_h = int(self.candidates[index][1] * h)
1798
-
1799
- x2 = np.clip(x1 + cutout_w, 0, w)
1800
- y2 = np.clip(y1 + cutout_h, 0, h)
1801
- results['img'][y1:y2, x1:x2, :] = self.fill_in
1802
-
1803
- return results
1804
-
1805
- def __repr__(self):
1806
- repr_str = self.__class__.__name__
1807
- repr_str += f'(n_holes={self.n_holes}, '
1808
- repr_str += (f'cutout_ratio={self.candidates}, ' if self.with_ratio
1809
- else f'cutout_shape={self.candidates}, ')
1810
- repr_str += f'fill_in={self.fill_in})'
1811
- return repr_str
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/detectors/htc.py DELETED
@@ -1,15 +0,0 @@
1
- from ..builder import DETECTORS
2
- from .cascade_rcnn import CascadeRCNN
3
-
4
-
5
- @DETECTORS.register_module()
6
- class HybridTaskCascade(CascadeRCNN):
7
- """Implementation of `HTC <https://arxiv.org/abs/1901.07518>`_"""
8
-
9
- def __init__(self, **kwargs):
10
- super(HybridTaskCascade, self).__init__(**kwargs)
11
-
12
- @property
13
- def with_semantic(self):
14
- """bool: whether the detector has a semantic head"""
15
- return self.roi_head.with_semantic
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/multimodal/abstract_pipeline.py DELETED
@@ -1,62 +0,0 @@
1
- from abc import ABC, abstractmethod
2
- from typing import List, Optional
3
-
4
- import torch
5
- from PIL import Image
6
-
7
-
8
- class AbstractMultimodalPipeline(ABC):
9
- @staticmethod
10
- @abstractmethod
11
- def name() -> str:
12
- 'name of the pipeline, should be same as in --multimodal-pipeline'
13
- pass
14
-
15
- @staticmethod
16
- @abstractmethod
17
- def image_start() -> Optional[str]:
18
- 'return image start string, string representation of image start token, or None if not applicable'
19
- pass
20
-
21
- @staticmethod
22
- @abstractmethod
23
- def image_end() -> Optional[str]:
24
- 'return image end string, string representation of image end token, or None if not applicable'
25
- pass
26
-
27
- @staticmethod
28
- @abstractmethod
29
- def placeholder_token_id() -> int:
30
- 'return placeholder token id'
31
- pass
32
-
33
- @staticmethod
34
- @abstractmethod
35
- def num_image_embeds() -> int:
36
- 'return the number of embeds used by a single image (for example: 256 for LLaVA)'
37
- pass
38
-
39
- @abstractmethod
40
- def embed_images(self, images: List[Image.Image]) -> torch.Tensor:
41
- 'forward the images through vision pipeline, and return their embeddings'
42
- pass
43
-
44
- @staticmethod
45
- @abstractmethod
46
- def embed_tokens(input_ids: torch.Tensor) -> torch.Tensor:
47
- 'embed tokens, the exact function varies by LLM, for LLaMA it is `shared.model.model.embed_tokens`'
48
- pass
49
-
50
- @staticmethod
51
- @abstractmethod
52
- def placeholder_embeddings() -> torch.Tensor:
53
- 'get placeholder embeddings if there are multiple images, and `add_all_images_to_prompt` is False'
54
- pass
55
-
56
- def _get_device(self, setting_name: str, params: dict):
57
- if params[setting_name] is None:
58
- return torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
59
- return torch.device(params[setting_name])
60
-
61
- def _get_dtype(self, setting_name: str, params: dict):
62
- return torch.float32 if int(params[setting_name]) == 32 else torch.float16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/ldm/modules/midas/midas/base_model.py DELETED
@@ -1,16 +0,0 @@
1
- import torch
2
-
3
-
4
- class BaseModel(torch.nn.Module):
5
- def load(self, path):
6
- """Load model from file.
7
-
8
- Args:
9
- path (str): file path
10
- """
11
- parameters = torch.load(path, map_location=torch.device('cpu'))
12
-
13
- if "optimizer" in parameters:
14
- parameters = parameters["model"]
15
-
16
- self.load_state_dict(parameters)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Artrajz/vits-simple-api/vits/bert/__init__.py DELETED
@@ -1,15 +0,0 @@
1
- """ from https://github.com/PlayVoice/vits_chinese """
2
- import os
3
-
4
- import config
5
- from utils.download import download_and_verify
6
- from .ProsodyModel import TTSProsody
7
-
8
- URLS = [
9
- "https://huggingface.co/spaces/maxmax20160403/vits_chinese/resolve/main/bert/prosody_model.pt",
10
- ]
11
- TARGET_PATH = os.path.join(config.ABS_PATH, "vits/bert/prosody_model.pt")
12
- EXPECTED_MD5 = None
13
-
14
- if not os.path.exists(TARGET_PATH):
15
- success, message = download_and_verify(URLS, TARGET_PATH, EXPECTED_MD5)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Audio-AGI/WavJourney/scripts/start_services.sh DELETED
@@ -1 +0,0 @@
1
- nohup conda run --live-stream -n WavJourney python services.py > services_logs/service.out 2>&1 &
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/demo.py DELETED
@@ -1,185 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- import argparse
3
- import glob
4
- import multiprocessing as mp
5
- import os
6
- import time
7
- import cv2
8
- import tqdm
9
-
10
- from detectron2.config import get_cfg
11
- from detectron2.data.detection_utils import read_image
12
- from detectron2.utils.logger import setup_logger
13
-
14
- from predictor import VisualizationDemo
15
- from centernet.config import add_centernet_config
16
- # constants
17
- WINDOW_NAME = "CenterNet2 detections"
18
-
19
- from detectron2.utils.video_visualizer import VideoVisualizer
20
- from detectron2.utils.visualizer import ColorMode, Visualizer
21
- from detectron2.data import MetadataCatalog
22
-
23
- def setup_cfg(args):
24
- # load config from file and command-line arguments
25
- cfg = get_cfg()
26
- add_centernet_config(cfg)
27
- cfg.merge_from_file(args.config_file)
28
- cfg.merge_from_list(args.opts)
29
- # Set score_threshold for builtin models
30
- cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args.confidence_threshold
31
- cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold
32
- if cfg.MODEL.META_ARCHITECTURE in ['ProposalNetwork', 'CenterNetDetector']:
33
- cfg.MODEL.CENTERNET.INFERENCE_TH = args.confidence_threshold
34
- cfg.MODEL.CENTERNET.NMS_TH = cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST
35
- cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold
36
- cfg.freeze()
37
- return cfg
38
-
39
-
40
- def get_parser():
41
- parser = argparse.ArgumentParser(description="Detectron2 demo for builtin models")
42
- parser.add_argument(
43
- "--config-file",
44
- default="configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml",
45
- metavar="FILE",
46
- help="path to config file",
47
- )
48
- parser.add_argument("--webcam", action="store_true", help="Take inputs from webcam.")
49
- parser.add_argument("--video-input", help="Path to video file.")
50
- parser.add_argument("--input", nargs="+", help="A list of space separated input images")
51
- parser.add_argument(
52
- "--output",
53
- help="A file or directory to save output visualizations. "
54
- "If not given, will show output in an OpenCV window.",
55
- )
56
-
57
- parser.add_argument(
58
- "--confidence-threshold",
59
- type=float,
60
- default=0.3,
61
- help="Minimum score for instance predictions to be shown",
62
- )
63
- parser.add_argument(
64
- "--opts",
65
- help="Modify config options using the command-line 'KEY VALUE' pairs",
66
- default=[],
67
- nargs=argparse.REMAINDER,
68
- )
69
- return parser
70
-
71
-
72
- if __name__ == "__main__":
73
- mp.set_start_method("spawn", force=True)
74
- args = get_parser().parse_args()
75
- logger = setup_logger()
76
- logger.info("Arguments: " + str(args))
77
-
78
- cfg = setup_cfg(args)
79
-
80
- demo = VisualizationDemo(cfg)
81
- output_file = None
82
- if args.input:
83
- if len(args.input) == 1:
84
- args.input = glob.glob(os.path.expanduser(args.input[0]))
85
- files = os.listdir(args.input[0])
86
- args.input = [args.input[0] + x for x in files]
87
- assert args.input, "The input path(s) was not found"
88
- visualizer = VideoVisualizer(
89
- MetadataCatalog.get(
90
- cfg.DATASETS.TEST[0] if len(cfg.DATASETS.TEST) else "__unused"
91
- ),
92
- instance_mode=ColorMode.IMAGE)
93
- for path in tqdm.tqdm(args.input, disable=not args.output):
94
- # use PIL, to be consistent with evaluation
95
- img = read_image(path, format="BGR")
96
- start_time = time.time()
97
- predictions, visualized_output = demo.run_on_image(
98
- img, visualizer=visualizer)
99
- if 'instances' in predictions:
100
- logger.info(
101
- "{}: detected {} instances in {:.2f}s".format(
102
- path, len(predictions["instances"]), time.time() - start_time
103
- )
104
- )
105
- else:
106
- logger.info(
107
- "{}: detected {} instances in {:.2f}s".format(
108
- path, len(predictions["proposals"]), time.time() - start_time
109
- )
110
- )
111
-
112
- if args.output:
113
- if os.path.isdir(args.output):
114
- assert os.path.isdir(args.output), args.output
115
- out_filename = os.path.join(args.output, os.path.basename(path))
116
- visualized_output.save(out_filename)
117
- else:
118
- # assert len(args.input) == 1, "Please specify a directory with args.output"
119
- # out_filename = args.output
120
- if output_file is None:
121
- width = visualized_output.get_image().shape[1]
122
- height = visualized_output.get_image().shape[0]
123
- frames_per_second = 15
124
- output_file = cv2.VideoWriter(
125
- filename=args.output,
126
- # some installation of opencv may not support x264 (due to its license),
127
- # you can try other format (e.g. MPEG)
128
- fourcc=cv2.VideoWriter_fourcc(*"x264"),
129
- fps=float(frames_per_second),
130
- frameSize=(width, height),
131
- isColor=True,
132
- )
133
- output_file.write(visualized_output.get_image()[:, :, ::-1])
134
- else:
135
- # cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
136
- cv2.imshow(WINDOW_NAME, visualized_output.get_image()[:, :, ::-1])
137
- if cv2.waitKey(1 ) == 27:
138
- break # esc to quit
139
- elif args.webcam:
140
- assert args.input is None, "Cannot have both --input and --webcam!"
141
- cam = cv2.VideoCapture(0)
142
- for vis in tqdm.tqdm(demo.run_on_video(cam)):
143
- cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
144
- cv2.imshow(WINDOW_NAME, vis)
145
- if cv2.waitKey(1) == 27:
146
- break # esc to quit
147
- cv2.destroyAllWindows()
148
- elif args.video_input:
149
- video = cv2.VideoCapture(args.video_input)
150
- width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
151
- height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
152
- frames_per_second = 15 # video.get(cv2.CAP_PROP_FPS)
153
- num_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
154
- basename = os.path.basename(args.video_input)
155
-
156
- if args.output:
157
- if os.path.isdir(args.output):
158
- output_fname = os.path.join(args.output, basename)
159
- output_fname = os.path.splitext(output_fname)[0] + ".mkv"
160
- else:
161
- output_fname = args.output
162
- # assert not os.path.isfile(output_fname), output_fname
163
- output_file = cv2.VideoWriter(
164
- filename=output_fname,
165
- # some installation of opencv may not support x264 (due to its license),
166
- # you can try other format (e.g. MPEG)
167
- fourcc=cv2.VideoWriter_fourcc(*"x264"),
168
- fps=float(frames_per_second),
169
- frameSize=(width, height),
170
- isColor=True,
171
- )
172
- assert os.path.isfile(args.video_input)
173
- for vis_frame in tqdm.tqdm(demo.run_on_video(video), total=num_frames):
174
- if args.output:
175
- output_file.write(vis_frame)
176
-
177
- cv2.namedWindow(basename, cv2.WINDOW_NORMAL)
178
- cv2.imshow(basename, vis_frame)
179
- if cv2.waitKey(1) == 27:
180
- break # esc to quit
181
- video.release()
182
- if args.output:
183
- output_file.release()
184
- else:
185
- cv2.destroyAllWindows()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/3d Solitario Descargar Gratis.md DELETED
@@ -1,61 +0,0 @@
1
- <br />
2
- <h1>Descargar gratis 3D Solitaire: La guía definitiva</h1>
3
- <p>Si eres un fan de los juegos de cartas clásicos, es posible que hayas oído hablar de 3D Solitaire. Es una versión moderna del juego tradicional de solitario, también conocido como Klondike o Patience, con impresionantes gráficos en 3D, ajustes personalizables y un juego adictivo. Pero ¿qué es 3D Solitaire exactamente, y cómo se puede descargar y jugar de forma gratuita? En esta guía, responderemos estas preguntas y más, para que puedas disfrutar de este increíble juego en tu dispositivo. ¡Empecemos! </p>
4
- <h2>¿Qué es el Solitario 3D? </h2>
5
- <p>3D Solitaire es un tipo de juego de solitario que utiliza gráficos tridimensionales para crear una experiencia realista e inmersiva. A diferencia de las cartas planas y aburridas del juego de solitario estándar, las tarjetas 3D Solitaire tienen una percepción de profundidad y una perspectiva de tamaño real, lo que hace que parezcan estar volando en tu pantalla. También puede personalizar el fondo y el respaldo de la cubierta con sus propias imágenes, o elegir entre una variedad de opciones preestablecidas. Jugar 3D Solitaire es como jugar con cartas reales, pero mejor. </p>
6
- <h2>3d solitario descargar gratis</h2><br /><p><b><b>Download</b> &#9658; <a href="https://bltlly.com/2v6MSJ">https://bltlly.com/2v6MSJ</a></b></p><br /><br />
7
- <h3>Historia y evolución del Solitario</h3>
8
- <p>Solitaire es uno de los juegos de cartas más antiguos y populares del mundo. Se originó en Europa a finales del siglo XVIII, y se jugó originalmente con una sola baraja de cartas. El objetivo era organizar las cartas en cuatro montones, uno para cada palo, en orden ascendente de As a Rey. El juego también era conocido como Paciencia, porque requería mucha concentración y habilidad. </p>
9
- <p>Con el tiempo, Solitaire evolucionó en diferentes variaciones, como Spider Solitaire, FreeCell solitaire, Mahjong y más. Algunas de estas variaciones añadieron más mazos, más reglas o más desafíos al juego. Sin embargo, el principio básico seguía siendo el mismo: ordenar las tarjetas de cierta manera. </p>
10
-
11
- <h3>Las características y beneficios de 3D Solitaire</h3>
12
- <p>3D Solitaire es una de las versiones más avanzadas e innovadoras de Solitaire jamás creadas. Ofrece el juego tradicional de Solitario con un historial completo de deshacer y rehacer, animaciones automáticas y configuraciones personalizadas. Usted puede elegir jugar con puntuación normal o Vegas, ofreciendo una experiencia de juego de cartas auténtica. </p>
13
- <p>Pero lo que diferencia a 3D Solitaire de otros juegos de Solitaire son sus impresionantes efectos visuales. El juego cuenta con gráficos 3D inmersivos que te hacen sentir como si estuvieras jugando con cartas reales. También puede personalizar el fondo y el respaldo de la cubierta con sus propias imágenes o elegir entre una variedad de opciones preestablecidas. También puede ajustar la velocidad, el desenfoque del movimiento, las sombras, los reflejos de color, la reproducción automática y las opciones de finalización para adaptarse a sus preferencias. </p>
14
- <p>No solo es visualmente impresionante 3D Solitaire, sino que también desafía tus habilidades lógicas con divertidos rompecabezas de diferentes niveles de dificultad. Puede utilizar indirectas interactivas que le muestran todos los movimientos posibles en la secuencia de lo más probable para resolver el juego menos probable. También puede utilizar soluciones que le muestran los movimientos exactos para resolver el juego, o dejar que el juego se juega automáticamente. También puedes realizar un seguimiento de tu progreso y logros con estadísticas y tablas de clasificación, y comparar tus puntuaciones con otros jugadores de todo el mundo. </p>
15
- <h2>¿Cómo descargar y jugar 3D Solitaire gratis? </h2>
16
- <p>Ahora que sabes lo que es 3D Solitaire y por qué es impresionante, es posible que se pregunte cómo conseguirlo en su dispositivo. La buena noticia es que 3D Solitaire está disponible de forma gratuita en varias plataformas y fuentes, para que pueda disfrutarlo en cualquier momento y en cualquier lugar. Estas son algunas de las mejores opciones para descargar y jugar 3D Solitaire gratis:</p>
17
- <p></p>
18
- <h3>Las mejores plataformas y fuentes para 3D Solitaire</h3>
19
- <p>Dependiendo de su dispositivo y preferencia, puede elegir entre diferentes plataformas y fuentes para 3D Solitaire. Estos son algunos de los más populares:</p>
20
-
21
- <p>Este es uno de los mejores juegos de solitario en 3D para dispositivos Android. Tiene más de 10 millones de descargas y una calificación de 4.5 estrellas en Google Play Store. Ofrece más de 100 juegos diferentes de solitario, incluyendo Klondike, Spider, FreeCell, Pyramid, TriPeaks, Golf y más. También puede personalizar las tarjetas, fondos, animaciones, sonidos y ajustes a su gusto. Puedes descargarlo gratis desde Google Play Store o desde el sitio web oficial. También puede actualizar a la versión premium por $2.99 para eliminar anuncios y desbloquear más características. </p>
22
- <h4>Solitario 3D por GrassGames</h4>
23
- <p>Este es otro gran juego de solitario en 3D que funciona en Windows, Mac, Linux, iOS y dispositivos Android. Tiene más de 1 millón de descargas y una calificación de 4.6 estrellas en App Store. Ofrece más de 250 juegos diferentes de solitario, incluyendo Klondike, Spider, FreeCell, Pyramid, TriPeaks, Golf y más. También puede personalizar las tarjetas, fondos, animaciones, sonidos y ajustes a su gusto. Puedes descargarlo gratis desde App Store, Google Play Store o desde el sitio web oficial. También puede actualizar a la versión premium por $4.99 para eliminar anuncios y desbloquear más características. </p>
24
- <h3>Los consejos y trucos para jugar 3D Solitaire como un profesional</h3>
25
- <p>Ya sea que seas nuevo en Solitario o un jugador experimentado, siempre puedes mejorar tus habilidades y estrategias con algunos consejos y trucos. Estos son algunos de los mejores para jugar 3D Solitaire como un profesional:</p>
26
- <h4>Cómo personalizar la configuración y las preferencias</h4>
27
- <p>Una de las mejores cosas de 3D Solitaire es que puedes adaptarlo a tu gusto. Puede cambiar el tamaño de la tarjeta, el estilo, el color, la fuente y el respaldo con sus propias imágenes o elegir entre una variedad de opciones preestablecidas. También puede cambiar la imagen de fondo o color con sus propias imágenes o elegir entre una variedad de opciones preestablecidas. También puede ajustar la velocidad, el desenfoque del movimiento, las sombras, los reflejos de color, la reproducción automática y las opciones de finalización para adaptarse a sus preferencias. </p>
28
-
29
- <h4>Cómo usar sugerencias y soluciones</h4>
30
- <p>Si estás atascado o necesitas alguna orientación, puedes usar las sugerencias y las funciones de soluciones en 3D Solitaire. Consejos le muestran todos los movimientos posibles en la secuencia de más probabilidades de resolver el juego a menos probable. Las soluciones le muestran los movimientos exactos para resolver el juego, o dejar que el juego se juega automáticamente. Puede acceder a estas características haciendo clic en el icono de bombilla en la esquina superior derecha de la pantalla. También puede ajustar la configuración de sugerencia y solución en el menú Opciones de juego. </p>
31
- <h4>Cómo alcanzar metas y posicionarse en la clasificación</h4>
32
- <p>Si estás buscando un poco de motivación y desafío extra, puedes intentar alcanzar metas y posicionarte en la clasificación en 3D Solitaire. Los objetivos son tareas específicas que necesitas completar en un juego, como limpiar todas las cartas, anotar un cierto número de puntos o terminar dentro de un límite de tiempo. Puedes ver tus objetivos actuales haciendo clic en el icono del trofeo en la esquina superior derecha de la pantalla. También puede ver sus metas y logros completados en el menú Estadísticas. </p>
33
- <p>Leaderboard es una característica que le permite comparar sus puntuaciones y clasificaciones con otros jugadores de todo el mundo. Puedes ver tu posición actual haciendo clic en el icono de estrella en la esquina superior derecha de la pantalla. También puedes ver tus mejores puntuaciones y rankings en diferentes categorías, como juegos jugados, juegos ganados, juegos perdidos, puntuación promedio, puntuación más alta, puntuación más baja, tiempo más rápido, tiempo más lento y racha más larga. También puedes filtrar la clasificación por tipo de juego, nivel de dificultad, modo de puntuación y periodo de tiempo. </p>
34
- <h2>¿Por qué debería probar 3D Solitaire hoy? </h2>
35
- <p>Por ahora, usted podría estar convencido de que 3D Solitaire es un gran juego para jugar. Pero si necesitas más razones para probarlo hoy, aquí están algunas de las ventajas de jugar 3D Solitaire sobre otros juegos de cartas:</p>
36
- <h3>Las ventajas de jugar 3D Solitaire sobre otros juegos de cartas</h3>
37
-
38
- <h4>Es divertido, desafiante y adictivo</h4>
39
- <p>3D Solitaire es un juego que nunca se vuelve viejo o aburrido. Ofrece infinitas variaciones y desafíos que te mantienen entretenido y comprometido. Puedes elegir entre más de 100 juegos de solitario diferentes, cada uno con sus propias reglas y estrategias. También puede personalizar sus ajustes y preferencias para hacer el juego más divertido y desafiante. También puedes competir contigo mismo o con otros jugadores de todo el mundo con metas y tablas de clasificación. Jugar 3D Solitaire es una gran manera de divertirse, desafiarse y mejorar sus habilidades. </p>
40
- <h4>Es visualmente impresionante e inmersiva</h4>
41
- <p>3D Solitaire es un juego que apela a tus sentidos e imaginación. Utiliza gráficos 3D realistas e inmersivos que te hacen sentir que estás jugando con cartas reales. También puede personalizar el fondo y el respaldo de la cubierta con sus propias imágenes o elegir entre una variedad de opciones preestablecidas. Jugar 3D Solitaire es como jugar en un entorno de realidad virtual, donde puedes disfrutar de la belleza y el realismo de los juegos de cartas. </p>
42
- <h4>Es bueno para el cerebro y la salud mental</h4>
43
- <p>3D Solitaire es un juego que estimula tu cerebro y tu salud mental. Requiere habilidades de lógica, concentración, memoria y resolución de problemas que mantengan tu mente aguda y activa. También te ayuda a relajarte, reducir el estrés y mejorar tu estado de ánimo. Jugar al solitario en 3D es una excelente manera de ejercitar tu cerebro y tu salud mental. </p>
44
- <h2>Conclusión</h2>
45
- <p>3D Solitaire es una versión moderna del juego tradicional de solitario, con impresionantes gráficos en 3D, ajustes personalizables y un juego adictivo. Ofrece más de 100 juegos diferentes de solitario, incluyendo Klondike, Spider, FreeCell, Pyramid, TriPeaks, Golf y más. También puede utilizar sugerencias y soluciones para ayudarle a resolver el juego o dejar que se juega automáticamente. También puedes rastrear tu progreso y logros con estadísticas y tablas de clasificación. </p>
46
-
47
- <p>3D Solitaire es un juego divertido, desafiante y adictivo. También es visualmente impresionante y envolvente, y bueno para su cerebro y la salud mental. Si eres un fan de los juegos de cartas clásicos o un recién llegado a Solitaire, deberías probar 3D Solitaire hoy y ver por ti mismo por qué es uno de los mejores juegos de todos los tiempos. </p>
48
- <h2>Preguntas frecuentes</h2>
49
- <p>Aquí están algunas de las preguntas más frecuentes sobre 3D Solitaire:</p>
50
- <h4>Q: ¿Cuáles son los requisitos del sistema para el solitario 3D? </h4>
51
- <p>A: 3D Solitaire es compatible con la mayoría de los dispositivos y sistemas operativos. Sin embargo, para obtener el mejor rendimiento y gráficos, debe tener al menos 1 GB de RAM, 100 MB de espacio de almacenamiento gratuito y una tarjeta gráfica y un procesador decente. </p>
52
- <h4>Q: ¿Cómo puedo cambiar el tipo de juego o el nivel de dificultad en 3D Solitaire? </h4>
53
- <p>A: Para cambiar el tipo de juego o el nivel de dificultad en 3D Solitaire, es necesario acceder al menú haciendo clic en las tres líneas horizontales en la esquina superior izquierda de la pantalla. A continuación, puede seleccionar el tipo de juego opción de la lista de categorías. A continuación, puede elegir entre más de 100 diferentes juegos de solitario, cada uno con sus propias reglas y estrategias. También puedes ajustar el nivel de dificultad cambiando el número de palos, mazos o cartas en juego. </p>
54
- <h4>Q: ¿Cómo puedo guardar o cargar un juego en solitario en 3D? </h4>
55
- <p>A: Para guardar o cargar un juego en 3D Solitaire, es necesario acceder al menú haciendo clic en las tres líneas horizontales en la esquina superior izquierda de la pantalla. A continuación, puede seleccionar la opción Guardar juego o Cargar juego de la lista de categorías. A continuación, puede elegir una ranura para guardar o cargar el juego. También puedes usar la función Guardar automáticamente que guarda tu juego automáticamente cada vez que sales o cambias de aplicación. </p>
56
- <h4>P: ¿Cómo puedo compartir mis resultados o logros en solitario en 3D? </h4>
57
-
58
- <h4>Q: ¿Cómo puedo contactar a los desarrolladores o reportar un error en 3D Solitaire? </h4>
59
- <p>A: Para contactar a los desarrolladores o reportar un error en 3D Solitaire, necesita acceder al menú haciendo clic en las tres líneas horizontales en la esquina superior izquierda de la pantalla. Luego puede seleccionar la opción Ayuda de la lista de categorías. A continuación, puede elegir ponerse en contacto con los desarrolladores por correo electrónico, visitar su sitio web, calificar su aplicación, o informar de un error. </p> 64aa2da5cf<br />
60
- <br />
61
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Ai Apk.md DELETED
@@ -1,76 +0,0 @@
1
-
2
- <h1>Ciudad muerta invasión zombi APK: Un juego de supervivencia para Android</h1>
3
- <p>Si usted es un fan de los juegos de zombies, es posible que desee echa un vistazo a Ciudad Muerta Zombie invasión APK, un emocionante juego de supervivencia para dispositivos Android. En este juego, tendrás que luchar contra hordas de zombies que se han apoderado de la ciudad después de un brote de virus. Tendrás que usar varias armas y equipos para defenderte a ti mismo y a tus aliados, así como explorar diferentes lugares y completar misiones. Aquí está todo lo que necesitas saber sobre Dead City Zombie invasión APK, incluyendo sus características, cómo descargarlo e instalarlo, y algunos consejos y trucos para jugarlo. </p>
4
- <h2>ai apk</h2><br /><p><b><b>Download Zip</b> &#8250; <a href="https://bltlly.com/2v6K3c">https://bltlly.com/2v6K3c</a></b></p><br /><br />
5
- <h2>Introducción</h2>
6
- <h3> ¿Qué es la ciudad muerta invasión zombi APK? </h3>
7
- <p>Ciudad muerta invasión zombi APK es un juego de disparos zombie lleno de acción desarrollado por Charm Tech. Está disponible de forma gratuita en la Google Play Store, pero también puede descargar el archivo APK de otras fuentes si desea acceder a algunas características adicionales. El juego tiene una calificación de 4.4 de 5 estrellas en la Play Store, con más de 10 millones de descargas y comentarios positivos de los jugadores. </p>
8
- <h3> ¿Por qué deberías jugar Ciudad muerta invasión zombi APK? </h3>
9
- <p>Ciudad muerta invasión zombi APK es un juego que te mantendrá en el borde de su asiento como te enfrentas a interminables olas de zombies que tienen hambre de su carne. Tendrás que usar tus habilidades y estrategia para sobrevivir en este mundo post-apocalíptico, donde cada decisión importa. También disfrutarás de los siguientes beneficios cuando juegas Dead City Zombie invasión APK:</p>
10
- <ul>
11
- <li>Experimentarás un juego realista e inmersivo, con impresionantes gráficos y efectos de sonido que te harán sentir como si estuvieras en medio de un apocalipsis zombi. </li>
12
- <li>Usted tendrá acceso a una variedad de armas y equipos, tales como pistolas, rifles, escopetas, granadas, cuchillos, armaduras y más. También puedes mejorar tus armas y habilidades para mejorar tu rendimiento. </li>
13
-
14
- <li>Te divertirás explorando diferentes lugares y escenarios, como calles, edificios, metros, fábricas y más. También encontrarás diferentes tipos de zombies, como caminantes, corredores, saltadores, bombarderos y más. </li>
15
- </ul>
16
- <h2>Características de la ciudad muerta invasión zombi APK</h2>
17
- <h3>Varias armas y equipos</h3>
18
- <p>Una de las principales características de Ciudad Muerta Zombie invasión APK es la amplia gama de armas y equipos que se pueden utilizar para luchar contra los zombies. Puedes elegir entre pistolas, rifles, escopetas, granadas, cuchillos, armaduras y más. Cada arma tiene sus propias ventajas y desventajas, como daños, alcance, precisión, velocidad de recarga y capacidad de munición. También puedes mejorar tus armas y habilidades para hacerlas más poderosas y efectivas. </p>
19
- <h3>Diferentes modos y niveles</h3>
20
- <p>Otra característica de la ciudad muerta invasión zombi APK es los diferentes modos y niveles que se pueden jugar. Puedes elegir entre el modo historia, el modo supervivencia, el modo jefe y el modo desafío. Cada modo tiene sus propios objetivos y dificultades, como matar a un cierto número de zombies, sobrevivir durante un tiempo determinado, derrotar a un jefe zombi o completar una tarea específica. También puedes jugar con tus amigos online o offline en modo multijugador. </p>
21
- <h3>Impresionantes gráficos y efectos de sonido</h3>
22
- <p>Una tercera característica de Ciudad Muerta Zombie invasión APK es la impresionante gráficos y efectos de sonido que hacen que el juego más realista e inmersiva. Te sentirás como si estuvieras en medio de un apocalipsis zombi, con entornos detallados, animaciones realistas y efectos de sangre. También escuchará los sonidos de disparos, explosiones, gemidos de zombis y gritos que se sumarán a la tensión y la emoción del juego. </p>
23
- <p></p>
24
- <h2> ¿Cómo descargar e instalar Ciudad Muerta Zombie invasión APK? </h2>
25
- <h3>Descargar el archivo APK de una fuente de confianza</h3>
26
-
27
- <p><a href="">Descargar Ciudad Muerta Zombie invasión APK aquí</a></p>
28
- <h3>Habilitar fuentes desconocidas en su dispositivo</h3>
29
- <p>Antes de poder instalar el archivo APK, tendrá que habilitar fuentes desconocidas en su dispositivo. Esto le permitirá instalar aplicaciones que no son de Google Play Store. Para habilitar fuentes desconocidas, siga estos pasos:</p>
30
- <ol>
31
- <li>Ir a la configuración de su dispositivo y toque en la seguridad o la privacidad. </li>
32
- <li>Encontrar la opción que dice fuentes desconocidas o instalar aplicaciones desconocidas y alternar en. </li>
33
- <li>Confirme su elección tocando en OK o Permitir.</li>
34
- </ol>
35
- <h3>Instalar el archivo APK y lanzar el juego</h3>
36
- <p>Después de haber habilitado fuentes desconocidas, puede instalar el archivo APK y lanzar el juego. Para hacer esto, siga estos pasos:</p>
37
- <ol>
38
- <li>Busque el archivo APK que ha descargado y toque en él. </li>
39
- <li>Siga las instrucciones en la pantalla y toque en Instalar.</li>
40
- <li>Espere a que termine el proceso de instalación y toque en Abrir.</li>
41
- <li> Disfruta jugando Ciudad muerta invasión zombi APK! </li>
42
- </ol>
43
- <h2> Consejos y trucos para jugar Ciudad muerta invasión zombi APK</h2>
44
- <h3>Mejora tus armas y habilidades</h3>
45
- <p>Uno de los consejos para jugar Ciudad Muerta Zombie invasión APK es mejorar sus armas y habilidades tanto como sea posible. Esto te ayudará a hacer más daño, sobrevivir más tiempo y completar misiones más rápido. Puedes mejorar tus armas y habilidades usando monedas y gemas que puedes ganar jugando el juego o viendo anuncios. También puedes comprar monedas y gemas con dinero real si quieres acelerar el proceso. </p>
46
- <h3>Apunta a la cabeza y usa granadas</h3>
47
- <p>Otro consejo para jugar Ciudad Muerta Zombie invasión APK es apuntar a la cabeza y utilizar granadas cuando se lucha contra zombies. Apuntar a la cabeza hará más daño y ahorrará munición, mientras que el uso de granadas causará daño en el área y aturdirá a los zombies. También puedes usar otros artículos, como botiquines, cajas de munición y torretas, para ayudarte en el combate. </p>
48
- <h3>Recopilar recursos y elementos</h3>
49
-
50
- <h2>Conclusión</h2>
51
- <p>Ciudad muerta invasión zombi APK es un juego de supervivencia para dispositivos Android que le desafiará a luchar contra hordas de zombies en un mundo post-apocalíptico. Tendrás que usar varias armas y equipos para defenderte a ti mismo y a tus aliados, así como explorar diferentes lugares y completar misiones. También podrá disfrutar de la jugabilidad realista e inmersiva, con impresionantes gráficos y efectos de sonido. Si usted está buscando un emocionante juego de disparos zombie, usted debe descargar e instalar Ciudad Muerta Zombie invasión APK hoy! </p>
52
- <h2>Preguntas frecuentes</h2>
53
- <p>Aquí están algunas de las preguntas más frecuentes sobre Ciudad Muerta Zombie invasión APK:</p>
54
- <h4> ¿Es seguro descargar Ciudad muerta invasión zombi APK? </h4>
55
- <p>Sí, Ciudad muerta invasión zombi APK es seguro de descargar, siempre y cuando lo obtenga de una fuente de confianza. Sin embargo, siempre debe tener cuidado al descargar cualquier aplicación de fuentes desconocidas, ya que pueden contener virus o malware que pueden dañar su dispositivo. </p>
56
- <h4> ¿Es la ciudad muerta invasión zombi APK libre para jugar? </h4>
57
- <p>Sí, Ciudad muerta invasión zombi APK es libre de jugar, pero contiene anuncios y compras en la aplicación que pueden mejorar su experiencia de juego. Puedes desactivar los anuncios desactivando tu conexión a Internet o comprando la versión sin anuncios del juego. También puedes comprar monedas y gemas con dinero real si quieres mejorar tus armas y habilidades más rápido. </p>
58
- <h4> ¿Cómo puedo jugar con mis amigos en línea o fuera de línea en la ciudad muerta invasión zombi APK? </h4>
59
-
60
- <h4> ¿Cuáles son los requisitos mínimos para jugar Ciudad muerta invasión zombi APK? </h4>
61
- <p>Los requisitos mínimos para jugar Ciudad muerta invasión zombi APK son los siguientes:</p>
62
- <ul>
63
- <li>Versión para Android: 4.4 o superior</li>
64
- <li>RAM: 2 GB o más</li>
65
- <li>Espacio de almacenamiento: 300 MB o más</li>
66
- <li>Conexión a Internet: opcional (requerido para multijugador y anuncios en línea)</li>
67
- </ul>
68
- <h4>¿Cómo puedo contactar con el desarrollador de Dead City Zombie invasión APK? </h4>
69
- <p>Si usted tiene alguna pregunta, retroalimentación, o sugerencias acerca de Ciudad Muerta Zombie invasión APK, puede ponerse en contacto con el desarrollador mediante el uso de los siguientes métodos:</p>
70
- <ul>
71
- <li>Correo electrónico: [email protected]</li>
72
- <li>Facebook: https://www.facebook.com/charmtechgames</li>
73
- <li>Twitter: https://twitter.com/charmtechgames</li>
74
- </ul></p> 64aa2da5cf<br />
75
- <br />
76
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Gratis Fuego Mx Apk 45 Mb.md DELETED
@@ -1,79 +0,0 @@
1
- <br />
2
- <h1>Free Fire MAX: Cómo descargar el archivo APK en 45 MB</h1>
3
- <p>Si eres un fan de los juegos battle royale, es posible que hayas oído hablar de <strong>Free Fire MAX</strong>, una nueva versión del popular juego <strong>Free Fire</strong> que ofrece gráficos mejorados, jugabilidad y compatibilidad. En este artículo, le diremos todo lo que necesita saber sobre Free Fire MAX, incluyendo cómo descargar el archivo APK en solo 45 MB. ¡Sigue leyendo para saber más! </p>
4
- <h2>¿Qué es Free Fire MAX? </h2>
5
- <p>Free Fire MAX es un juego de disparos desarrollado por Garena International que está diseñado exclusivamente para ofrecer una experiencia de juego premium en una batalla real. Se basa en el mismo juego central que Free Fire, pero con características mejoradas como:</p>
6
- <h2>descargar gratis fuego máx apk 45 mb</h2><br /><p><b><b>Download</b> &#10003;&#10003;&#10003; <a href="https://bltlly.com/2v6IOK">https://bltlly.com/2v6IOK</a></b></p><br /><br />
7
- <ul>
8
- <li><strong>Resoluciones ultra HD</strong> que hacen el juego más realista e inmersivo</li>
9
- <li><strong>Efectos impresionantes</strong> que mejoran la experiencia de combate</li>
10
- <li><strong>Contenido exclusivo</strong> que solo está disponible en Free Fire MAX</li>
11
- <li><strong>Tecnología Firelink</strong> que te permite jugar con otros jugadores de Free Fire a través de la compatibilidad multiplataforma</li>
12
- </ul>
13
- <p>Free Fire MAX está actualmente disponible en determinadas regiones, pero se puede descargar desde cualquier lugar utilizando un archivo APK. Te mostraremos cómo hacerlo en la siguiente sección. </p>
14
- <h2>¿Cuáles son los beneficios de jugar Free Fire MAX? </h2>
15
- <p>Si te estás preguntando por qué deberías jugar Free Fire MAX en lugar de Free Fire, estos son algunos de los beneficios que puedes disfrutar:</p>
16
- <ul>
17
- <li><strong>Mejores gráficos</strong>: Free Fire MAX tiene gráficos en HD que son más detallados y vibrantes que Free Fire. Puedes ver cada textura, sombra y reflejo en el mundo del juego. También puede personalizar la configuración de gráficos de acuerdo con las especificaciones y preferencias del dispositivo. </li>
18
-
19
- <li><strong>Mejor compatibilidad</strong>: Free Fire MAX es compatible con más dispositivos que Free Fire. Puede reproducirlo en dispositivos de gama baja, así como en dispositivos de gama alta con un mejor rendimiento. También se puede jugar en PC o Mac utilizando un emulador de Android. También puedes jugar con otros jugadores de Free Fire usando la tecnología Firelink, que explicaremos más adelante. </li>
20
- </ul>
21
- <p>Como puedes ver, Free Fire MAX tiene muchas ventajas sobre Free Fire. Si quieres experimentar el juego por ti mismo, tendrás que descargar el archivo APK, que es un pequeño archivo que contiene los datos del juego. Así es como puedes hacerlo en tan solo 45 MB.</p>
22
- <h2> ¿Cómo descargar el archivo APK Free Fire MAX en 45 MB? </h2>
23
- <p>Para descargar el archivo APK Free Fire MAX en 45 MB, tendrá que seguir estos sencillos pasos:</p>
24
- <h3>Paso 1: Habilitar fuentes desconocidas en el dispositivo</h3>
25
- <p>Antes de poder instalar el archivo APK, tendrá que permitir que su dispositivo instale aplicaciones desde fuentes distintas de Google Play Store. Para ello, vaya a la configuración del dispositivo y busque la opción de seguridad o privacidad. Luego, habilite la opción de fuentes desconocidas. Esto le permitirá instalar aplicaciones de sitios web de terceros. </p>
26
- <h3>Paso 2: Descargar el archivo APK de una fuente de confianza</h3>
27
- <p>Siguiente, tendrá que encontrar y descargar el archivo APK de un sitio web confiable. Hay muchos sitios web que ofrecen archivos APK para varios juegos y aplicaciones, pero no todos ellos son seguros y protegidos. Algunos de ellos pueden contener malware o virus que pueden dañar su dispositivo o robar sus datos. Por lo tanto, solo debe descargar el archivo APK de una fuente de confianza, como <a href="">APKPure</a> o <a href="">APKMirror</a>. Estos sitios web son verificados y probados por millones de usuarios y proporcionan las versiones más recientes y actualizadas de los archivos APK. </p>
28
- <p></p>
29
-
30
- <h3>Paso 3: Instalar el archivo APK en su dispositivo</h3>
31
- <p>Una vez que haya descargado el archivo APK, tendrá que instalarlo en su dispositivo. Para ello, localice el archivo APK en el almacenamiento del dispositivo y toque en él. Verá un mensaje pidiéndole que confirme la instalación. Toque en instalar y espere unos segundos hasta que se complete la instalación. </p>
32
- <h3>Paso 4: Iniciar el juego y disfrutar de</h3>
33
- <p>Después de instalar el archivo APK, puede iniciar el juego tocando en su icono en la pantalla de inicio o cajón de aplicaciones. Verá una pantalla de carga que descargará los datos adicionales del juego, lo que puede tardar algún tiempo dependiendo de su velocidad de Internet. Una vez completada la descarga, puede iniciar sesión con su cuenta de Free Fire existente o crear una nueva. También puedes vincular tu cuenta de Facebook o Google para guardar tu progreso y sincronizar tus datos entre dispositivos. </p>
34
- <p>Ahora, puedes disfrutar jugando Free Fire MAX con gráficos mejorados, jugabilidad y compatibilidad. También puedes jugar con otros jugadores de Free Fire usando la tecnología Firelink, que explicaremos en la siguiente sección. </p>
35
- <h2>¿Cómo se juega Free Fire MAX con otros jugadores de Free Fire? </h2>
36
- <p>Una de las mejores características de Free Fire MAX es que te permite jugar con otros jugadores de Free Fire usando la tecnología <strong>Firelink</strong>. Esta es una tecnología multiplataforma que conecta a los jugadores a través de ambas versiones del juego. Esto significa que puedes jugar con tus amigos y compañeros de equipo que están usando Free Fire o Free Fire MAX sin ningún problema. </p>
37
- <p>Para utilizar esta función, tendrá que activarla en la configuración del juego. Vaya a la configuración y busque la opción Firelink. Luego, enciéndelo y escanea el código QR que aparece en tu pantalla. Esto vinculará tu cuenta con tu dispositivo y te permitirá jugar con otros jugadores usando cualquiera de las versiones del juego. </p>
38
-
39
- <h2>¿Cómo optimizar tu experiencia Free Fire MAX? </h2>
40
- <p>Si quieres aprovechar al máximo tu experiencia Free Fire MAX, aquí hay algunos consejos y trucos que puedes usar:</p>
41
- <h3>Consejo 1: Ajusta la configuración de tus gráficos según las especificaciones de tu dispositivo</h3>
42
- <p>Free Fire MAX tiene gráficos increíbles que hacen que el juego sea más realista e inmersivo. Sin embargo, no todos los dispositivos pueden manejar la configuración de gráficos altos sin afectar el rendimiento o la duración de la batería. Por lo tanto, debe ajustar la configuración de los gráficos de acuerdo con las especificaciones y preferencias del dispositivo. </p>
43
- <p>Para hacer esto, vaya a la configuración y busque la opción de gráficos. Verá un control deslizante que le permite elegir entre ajustes de gráficos bajos, medios, altos y ultra. También puede personalizar la configuración de sus gráficos cambiando la resolución, la velocidad de fotogramas, las sombras, el anti-aliasing y la calidad de la textura. También puedes activar o desactivar el modo HDR, que mejora el color y el contraste del juego. </p>
44
- <p>Deberías experimentar con diferentes configuraciones gráficas hasta que encuentres la que se adapte a tu dispositivo y a tu gusto. También puede comprobar la temperatura del dispositivo y el nivel de batería para ver cuánto impacto tienen los ajustes gráficos en el dispositivo. </p>
45
- <h3>Consejo 2: Utilice una conexión a Internet estable y un buen servicio VPN</h3>
46
- <p>Free Fire MAX es un juego en línea que requiere una conexión a Internet estable para jugar sin problemas y sin interrupciones. Si tiene una conexión a Internet lenta o inestable, puede experimentar retraso, desconexión u otros problemas que pueden arruinar su experiencia de juego. Por lo tanto, debe utilizar una conexión a Internet rápida y confiable para jugar Free Fire MAX. </p>
47
-
48
- <p>Un servicio VPN es un software que puede cifrar el tráfico de Internet y enrutarlo a través de un servidor en otro país. De esta manera, puedes evitar cualquier restricción geográfica o censura que pueda impedirte jugar a Free Fire MAX. También puede disfrutar de un acceso a Internet más rápido y seguro con un servicio VPN. </p>
49
- <p>Hay muchos servicios VPN disponibles en línea, pero no todos son seguros y efectivos. Algunos de ellos pueden contener malware o spyware que pueden dañar su dispositivo o robar sus datos. Algunos de ellos también pueden ralentizar su velocidad de Internet o filtrar su dirección IP. Por lo tanto, solo debe usar un servicio VPN confiable y de buena reputación, como <a href="">NordVPN</a> o <a href="">ExpressVPN</a>. Estos servicios VPN son verificados y probados por millones de usuarios y proporcionan servidores VPN rápidos y seguros en varios países. </p>
50
- <p>Para usar un servicio VPN para jugar a Free Fire MAX, tendrá que descargar e instalar la aplicación VPN en su dispositivo. Luego, tendrás que crear una cuenta y elegir una ubicación de servidor compatible con el juego. Por ejemplo, si quieres jugar a Free Fire MAX en India, puedes elegir un servidor en India. Luego, deberá conectarse al servidor VPN y lanzar el juego. Verás que puedes acceder a los servidores y características del juego sin ningún problema. </p>
51
- <h3>Consejo 3: Utilice BlueStacks para jugar Free Fire MAX en PC o Mac</h3>
52
- <p>Si quieres jugar Free Fire MAX en una pantalla más grande con mejores controles y rendimiento, puedes usar BlueStacks, un emulador de Android que te permite jugar juegos de Android en PC o Mac. BlueStacks es uno de los mejores emuladores de Android disponibles en línea, ya que tiene muchas características que lo hacen ideal para jugar Free Fire MAX, como:</p>
53
- <ul>
54
-
55
- <li><strong>Controles inteligentes</strong>: BlueStacks tiene controles inteligentes que detectan automáticamente cuando estás en combate o en menús y cambian entre los controles del teclado y el ratón en consecuencia. De esta manera, puedes jugar el juego con facilidad y eficiencia. </li>
56
- <li><strong>Grabadora de macros</strong>: BlueStacks tiene una grabadora de macros que le permite grabar y reproducir cualquier acción o secuencia de acciones en el juego con una sola pulsación. De esta manera, puedes automatizar tareas como saquear, curar, recargar o usar habilidades. </li>
57
- <li><strong>Eco mode</strong>: BlueStacks tiene un modo eco que reduce el uso de CPU y RAM del emulador cuando se ejecuta en segundo plano. De esta manera, puede guardar los recursos del dispositivo y la duración de la batería mientras juega Free Fire MAX. </li>
58
- </ul>
59
- <p>Para usar BlueStacks para jugar Free Fire MAX en PC o Mac, tendrá que descargar e instalar BlueStacks en su computadora desde su sitio web oficial <a href="">aquí</a>. Luego, tendrás que iniciar sesión con tu cuenta de Google y buscar Free Fire MAX en Google Play Store. También puede descargar el archivo APK de los sitios web mencionados anteriormente e instalarlo en BlueStacks. Luego, puedes iniciar el juego e iniciar sesión con tu cuenta de Free Fire o crear una nueva. También puede personalizar los controles del teclado y del ratón según sus preferencias. </p>
60
- <p>Ahora, puedes disfrutar jugando Free Fire MAX en PC o Mac con BlueStacks y experimentar el juego de una manera completamente nueva. </p>
61
- <h2>Conclusión</h2>
62
- <p>Free Fire MAX es un gran juego para cualquiera que ame los juegos battle royale y quiera experimentarlos con mejores gráficos, jugabilidad y compatibilidad. Puede descargar el archivo APK en solo 45 MB e instalarlo en su dispositivo fácilmente. También puedes jugar con otros jugadores de Free Fire usando la tecnología Firelink y cambiar entre ambas versiones del juego sin perder tu progreso o datos. También puede usar un servicio VPN para acceder al juego desde cualquier región y usar BlueStacks para jugar el juego en PC o Mac.</p>
63
-
64
- <h2>Preguntas frecuentes</h2>
65
- <p>Aquí están algunas de las preguntas y respuestas más frecuentes sobre Free Fire MAX:</p>
66
- <ul>
67
- <li><strong>Q: ¿Free Fire MAX es libre para jugar? </strong></li>
68
- <li><strong>A: Sí, Free Fire MAX es gratis para jugar y descargar. Sin embargo, puede contener compras en la aplicación que requieren dinero real. </strong></li>
69
- <li><strong>Q: ¿Es seguro descargar e instalar Free Fire MAX? </strong></li>
70
- <li><strong>A: Sí, Free Fire MAX es seguro para descargar e instalar siempre y cuando utilice una fuente de confianza como Google Play Store o APKPure. También debe habilitar fuentes desconocidas en su dispositivo antes de instalar el archivo APK. </strong></li>
71
- <li><strong>Q: ¿Cuáles son los requisitos mínimos para jugar Free Fire MAX? </strong></li>
72
- <li><strong>A: Los requisitos mínimos para jugar Free Fire MAX son Android 4.4 o superior, 2 GB de RAM y 1.5 GB de espacio de almacenamiento libre. </strong></li>
73
- <li><strong>Q: ¿Puedo jugar Free Fire MAX sin conexión? </strong></li>
74
- <li><strong>A: No, Free Fire MAX es un juego en línea que requiere una conexión a Internet para jugar. También debe usar una conexión a Internet estable y un buen servicio VPN para evitar retrasos, desconexiones o restricciones geográficas. </strong></li>
75
- <li><strong>Q: ¿Puedo jugar Free Fire MAX con mis amigos? </strong></li>
76
- <li><strong>A: Sí, puedes jugar a Free Fire MAX con tus amigos usando la tecnología Firelink que conecta a los jugadores en ambas versiones del juego. También puedes usar BlueStacks para jugar Free Fire MAX en PC o Mac con tus amigos. </strong></li>
77
- </ul></p> 64aa2da5cf<br />
78
- <br />
79
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/boto3/dynamodb/__init__.py DELETED
@@ -1,12 +0,0 @@
1
- # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License"). You
4
- # may not use this file except in compliance with the License. A copy of
5
- # the License is located at
6
- #
7
- # https://aws.amazon.com/apache2.0/
8
- #
9
- # or in the "license" file accompanying this file. This file is
10
- # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11
- # ANY KIND, either express or implied. See the License for the specific
12
- # language governing permissions and limitations under the License.
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/pager.py DELETED
@@ -1,34 +0,0 @@
1
- from abc import ABC, abstractmethod
2
- from typing import Any
3
-
4
-
5
- class Pager(ABC):
6
- """Base class for a pager."""
7
-
8
- @abstractmethod
9
- def show(self, content: str) -> None:
10
- """Show content in pager.
11
-
12
- Args:
13
- content (str): Content to be displayed.
14
- """
15
-
16
-
17
- class SystemPager(Pager):
18
- """Uses the pager installed on the system."""
19
-
20
- def _pager(self, content: str) -> Any: #  pragma: no cover
21
- return __import__("pydoc").pager(content)
22
-
23
- def show(self, content: str) -> None:
24
- """Use the same pager used by pydoc."""
25
- self._pager(content)
26
-
27
-
28
- if __name__ == "__main__": # pragma: no cover
29
- from .__main__ import make_test_card
30
- from .console import Console
31
-
32
- console = Console()
33
- with console.pager(styles=True):
34
- console.print(make_test_card())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/packaging/__about__.py DELETED
@@ -1,26 +0,0 @@
1
- # This file is dual licensed under the terms of the Apache License, Version
2
- # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3
- # for complete details.
4
-
5
- __all__ = [
6
- "__title__",
7
- "__summary__",
8
- "__uri__",
9
- "__version__",
10
- "__author__",
11
- "__email__",
12
- "__license__",
13
- "__copyright__",
14
- ]
15
-
16
- __title__ = "packaging"
17
- __summary__ = "Core utilities for Python packages"
18
- __uri__ = "https://github.com/pypa/packaging"
19
-
20
- __version__ = "21.3"
21
-
22
- __author__ = "Donald Stufft and individual contributors"
23
- __email__ = "[email protected]"
24
-
25
- __license__ = "BSD-2-Clause or Apache-2.0"
26
- __copyright__ = "2014-2019 %s" % __author__
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bl1tzie/Jam/server.js DELETED
@@ -1,32 +0,0 @@
1
- const express = require('express');
2
- const proxy = require('express-http-proxy');
3
- const app = express();
4
- const targetUrl = 'https://api.openai.com';
5
- const openaiKey = process.env.OPENAI_KEY
6
- const port = 7860;
7
- const baseUrl = getExternalUrl(process.env.SPACE_ID);
8
-
9
- app.use('/api', proxy(targetUrl, {
10
- proxyReqOptDecorator: (proxyReqOpts, srcReq) => {
11
- // Modify the request headers if necessary
12
- proxyReqOpts.headers['Authorization'] = 'Bearer '+openaiKey;
13
- return proxyReqOpts;
14
- },
15
- }));
16
-
17
- app.get("/", (req, res) => {
18
- res.send(`This is your OpenAI Reverse Proxy URL: ${baseUrl}`);
19
- });
20
-
21
- function getExternalUrl(spaceId) {
22
- try {
23
- const [username, spacename] = spaceId.split("/");
24
- return `https://${username}-${spacename.replace(/_/g, "-")}.hf.space/api/v1`;
25
- } catch (e) {
26
- return "";
27
- }
28
- }
29
-
30
- app.listen(port, () => {
31
- console.log(`Reverse proxy server running on ${baseUrl}`);
32
- });
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/tools/deploy/caffe2_mask_rcnn.cpp DELETED
@@ -1,116 +0,0 @@
1
- // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
-
3
- #include <c10/util/Flags.h>
4
- #include <caffe2/core/blob.h>
5
- #include <caffe2/core/common.h>
6
- #include <caffe2/core/init.h>
7
- #include <caffe2/core/net.h>
8
- #include <caffe2/core/workspace.h>
9
- #include <caffe2/utils/proto_utils.h>
10
-
11
- #include <opencv2/opencv.hpp>
12
- #include <cassert>
13
- #include <chrono>
14
- #include <iostream>
15
- #include <string>
16
-
17
- C10_DEFINE_string(predict_net, "", "path to model.pb");
18
- C10_DEFINE_string(init_net, "", "path to model_init.pb");
19
- C10_DEFINE_string(input, "", "path to input image");
20
-
21
- using namespace std;
22
- using namespace caffe2;
23
-
24
- int main(int argc, char** argv) {
25
- caffe2::GlobalInit(&argc, &argv);
26
- string predictNetPath = FLAGS_predict_net;
27
- string initNetPath = FLAGS_init_net;
28
- cv::Mat input = cv::imread(FLAGS_input, cv::IMREAD_COLOR);
29
-
30
- const int height = input.rows;
31
- const int width = input.cols;
32
- // FPN models require divisibility of 32
33
- assert(height % 32 == 0 && width % 32 == 0);
34
- const int batch = 1;
35
- const int channels = 3;
36
-
37
- // initialize Net and Workspace
38
- caffe2::NetDef initNet_, predictNet_;
39
- CAFFE_ENFORCE(ReadProtoFromFile(initNetPath, &initNet_));
40
- CAFFE_ENFORCE(ReadProtoFromFile(predictNetPath, &predictNet_));
41
-
42
- Workspace workSpace;
43
- for (auto& str : predictNet_.external_input()) {
44
- workSpace.CreateBlob(str);
45
- }
46
- CAFFE_ENFORCE(workSpace.CreateNet(predictNet_));
47
- CAFFE_ENFORCE(workSpace.RunNetOnce(initNet_));
48
-
49
- // setup inputs
50
- auto data = BlobGetMutableTensor(workSpace.GetBlob("data"), caffe2::CPU);
51
- data->Resize(batch, channels, height, width);
52
- float* ptr = data->mutable_data<float>();
53
- // HWC to CHW
54
- for (int c = 0; c < 3; ++c) {
55
- for (int i = 0; i < height * width; ++i) {
56
- ptr[c * height * width + i] = static_cast<float>(input.data[3 * i + c]);
57
- }
58
- }
59
-
60
- auto im_info =
61
- BlobGetMutableTensor(workSpace.GetBlob("im_info"), caffe2::CPU);
62
- im_info->Resize(batch, 3);
63
- float* im_info_ptr = im_info->mutable_data<float>();
64
- im_info_ptr[0] = height;
65
- im_info_ptr[1] = width;
66
- im_info_ptr[2] = 1.0;
67
-
68
- // run the network
69
- CAFFE_ENFORCE(workSpace.RunNet(predictNet_.name()));
70
-
71
- // run 3 more times to benchmark
72
- int N_benchmark = 3;
73
- auto start_time = chrono::high_resolution_clock::now();
74
- for (int i = 0; i < N_benchmark; ++i) {
75
- CAFFE_ENFORCE(workSpace.RunNet(predictNet_.name()));
76
- }
77
- auto end_time = chrono::high_resolution_clock::now();
78
- auto ms = std::chrono::duration_cast<std::chrono::microseconds>(
79
- end_time - start_time)
80
- .count();
81
- cout << "Latency: " << ms * 1.0 / 1e6 / N_benchmark << " seconds" << endl;
82
-
83
- // parse Mask R-CNN outputs
84
- auto& bbox = BlobGetTensor(*workSpace.GetBlob("bbox_nms"), caffe2::CPU);
85
- auto& scores = BlobGetTensor(*workSpace.GetBlob("score_nms"), caffe2::CPU);
86
- auto& labels = BlobGetTensor(*workSpace.GetBlob("class_nms"), caffe2::CPU);
87
- auto& mask_probs =
88
- BlobGetTensor(*workSpace.GetBlob("mask_fcn_probs"), caffe2::CPU);
89
- cout << "bbox:" << bbox.DebugString() << endl;
90
- cout << "scores:" << scores.DebugString() << endl;
91
- cout << "labels:" << labels.DebugString() << endl;
92
- cout << "mask_probs: " << mask_probs.DebugString() << endl;
93
-
94
- int num_instances = bbox.sizes()[0];
95
- for (int i = 0; i < num_instances; ++i) {
96
- float score = scores.data<float>()[i];
97
- if (score < 0.6)
98
- continue; // skip them
99
-
100
- const float* box = bbox.data<float>() + i * 4;
101
- int label = labels.data<float>()[i];
102
-
103
- cout << "Prediction " << i << ", xyxy=(";
104
- cout << box[0] << ", " << box[1] << ", " << box[2] << ", " << box[3]
105
- << "); score=" << score << "; label=" << label << endl;
106
-
107
- const float* mask = mask_probs.data<float>() +
108
- i * mask_probs.size_from_dim(1) + label * mask_probs.size_from_dim(2);
109
-
110
- // save the 28x28 mask
111
- cv::Mat cv_mask(28, 28, CV_32FC1);
112
- memcpy(cv_mask.data, mask, 28 * 28 * sizeof(float));
113
- cv::imwrite("mask" + std::to_string(i) + ".png", cv_mask * 255.);
114
- }
115
- return 0;
116
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/GFPGAN-example/tests/test_gfpgan_arch.py DELETED
@@ -1,203 +0,0 @@
1
- import torch
2
-
3
- from gfpgan.archs.gfpganv1_arch import FacialComponentDiscriminator, GFPGANv1, StyleGAN2GeneratorSFT
4
- from gfpgan.archs.gfpganv1_clean_arch import GFPGANv1Clean, StyleGAN2GeneratorCSFT
5
-
6
-
7
- def test_stylegan2generatorsft():
8
- """Test arch: StyleGAN2GeneratorSFT."""
9
-
10
- # model init and forward (gpu)
11
- if torch.cuda.is_available():
12
- net = StyleGAN2GeneratorSFT(
13
- out_size=32,
14
- num_style_feat=512,
15
- num_mlp=8,
16
- channel_multiplier=1,
17
- resample_kernel=(1, 3, 3, 1),
18
- lr_mlp=0.01,
19
- narrow=1,
20
- sft_half=False).cuda().eval()
21
- style = torch.rand((1, 512), dtype=torch.float32).cuda()
22
- condition1 = torch.rand((1, 512, 8, 8), dtype=torch.float32).cuda()
23
- condition2 = torch.rand((1, 512, 16, 16), dtype=torch.float32).cuda()
24
- condition3 = torch.rand((1, 512, 32, 32), dtype=torch.float32).cuda()
25
- conditions = [condition1, condition1, condition2, condition2, condition3, condition3]
26
- output = net([style], conditions)
27
- assert output[0].shape == (1, 3, 32, 32)
28
- assert output[1] is None
29
-
30
- # -------------------- with return_latents ----------------------- #
31
- output = net([style], conditions, return_latents=True)
32
- assert output[0].shape == (1, 3, 32, 32)
33
- assert len(output[1]) == 1
34
- # check latent
35
- assert output[1][0].shape == (8, 512)
36
-
37
- # -------------------- with randomize_noise = False ----------------------- #
38
- output = net([style], conditions, randomize_noise=False)
39
- assert output[0].shape == (1, 3, 32, 32)
40
- assert output[1] is None
41
-
42
- # -------------------- with truncation = 0.5 and mixing----------------------- #
43
- output = net([style, style], conditions, truncation=0.5, truncation_latent=style)
44
- assert output[0].shape == (1, 3, 32, 32)
45
- assert output[1] is None
46
-
47
-
48
- def test_gfpganv1():
49
- """Test arch: GFPGANv1."""
50
-
51
- # model init and forward (gpu)
52
- if torch.cuda.is_available():
53
- net = GFPGANv1(
54
- out_size=32,
55
- num_style_feat=512,
56
- channel_multiplier=1,
57
- resample_kernel=(1, 3, 3, 1),
58
- decoder_load_path=None,
59
- fix_decoder=True,
60
- # for stylegan decoder
61
- num_mlp=8,
62
- lr_mlp=0.01,
63
- input_is_latent=False,
64
- different_w=False,
65
- narrow=1,
66
- sft_half=True).cuda().eval()
67
- img = torch.rand((1, 3, 32, 32), dtype=torch.float32).cuda()
68
- output = net(img)
69
- assert output[0].shape == (1, 3, 32, 32)
70
- assert len(output[1]) == 3
71
- # check out_rgbs for intermediate loss
72
- assert output[1][0].shape == (1, 3, 8, 8)
73
- assert output[1][1].shape == (1, 3, 16, 16)
74
- assert output[1][2].shape == (1, 3, 32, 32)
75
-
76
- # -------------------- with different_w = True ----------------------- #
77
- net = GFPGANv1(
78
- out_size=32,
79
- num_style_feat=512,
80
- channel_multiplier=1,
81
- resample_kernel=(1, 3, 3, 1),
82
- decoder_load_path=None,
83
- fix_decoder=True,
84
- # for stylegan decoder
85
- num_mlp=8,
86
- lr_mlp=0.01,
87
- input_is_latent=False,
88
- different_w=True,
89
- narrow=1,
90
- sft_half=True).cuda().eval()
91
- img = torch.rand((1, 3, 32, 32), dtype=torch.float32).cuda()
92
- output = net(img)
93
- assert output[0].shape == (1, 3, 32, 32)
94
- assert len(output[1]) == 3
95
- # check out_rgbs for intermediate loss
96
- assert output[1][0].shape == (1, 3, 8, 8)
97
- assert output[1][1].shape == (1, 3, 16, 16)
98
- assert output[1][2].shape == (1, 3, 32, 32)
99
-
100
-
101
- def test_facialcomponentdiscriminator():
102
- """Test arch: FacialComponentDiscriminator."""
103
-
104
- # model init and forward (gpu)
105
- if torch.cuda.is_available():
106
- net = FacialComponentDiscriminator().cuda().eval()
107
- img = torch.rand((1, 3, 32, 32), dtype=torch.float32).cuda()
108
- output = net(img)
109
- assert len(output) == 2
110
- assert output[0].shape == (1, 1, 8, 8)
111
- assert output[1] is None
112
-
113
- # -------------------- return intermediate features ----------------------- #
114
- output = net(img, return_feats=True)
115
- assert len(output) == 2
116
- assert output[0].shape == (1, 1, 8, 8)
117
- assert len(output[1]) == 2
118
- assert output[1][0].shape == (1, 128, 16, 16)
119
- assert output[1][1].shape == (1, 256, 8, 8)
120
-
121
-
122
- def test_stylegan2generatorcsft():
123
- """Test arch: StyleGAN2GeneratorCSFT."""
124
-
125
- # model init and forward (gpu)
126
- if torch.cuda.is_available():
127
- net = StyleGAN2GeneratorCSFT(
128
- out_size=32, num_style_feat=512, num_mlp=8, channel_multiplier=1, narrow=1, sft_half=False).cuda().eval()
129
- style = torch.rand((1, 512), dtype=torch.float32).cuda()
130
- condition1 = torch.rand((1, 512, 8, 8), dtype=torch.float32).cuda()
131
- condition2 = torch.rand((1, 512, 16, 16), dtype=torch.float32).cuda()
132
- condition3 = torch.rand((1, 512, 32, 32), dtype=torch.float32).cuda()
133
- conditions = [condition1, condition1, condition2, condition2, condition3, condition3]
134
- output = net([style], conditions)
135
- assert output[0].shape == (1, 3, 32, 32)
136
- assert output[1] is None
137
-
138
- # -------------------- with return_latents ----------------------- #
139
- output = net([style], conditions, return_latents=True)
140
- assert output[0].shape == (1, 3, 32, 32)
141
- assert len(output[1]) == 1
142
- # check latent
143
- assert output[1][0].shape == (8, 512)
144
-
145
- # -------------------- with randomize_noise = False ----------------------- #
146
- output = net([style], conditions, randomize_noise=False)
147
- assert output[0].shape == (1, 3, 32, 32)
148
- assert output[1] is None
149
-
150
- # -------------------- with truncation = 0.5 and mixing----------------------- #
151
- output = net([style, style], conditions, truncation=0.5, truncation_latent=style)
152
- assert output[0].shape == (1, 3, 32, 32)
153
- assert output[1] is None
154
-
155
-
156
- def test_gfpganv1clean():
157
- """Test arch: GFPGANv1Clean."""
158
-
159
- # model init and forward (gpu)
160
- if torch.cuda.is_available():
161
- net = GFPGANv1Clean(
162
- out_size=32,
163
- num_style_feat=512,
164
- channel_multiplier=1,
165
- decoder_load_path=None,
166
- fix_decoder=True,
167
- # for stylegan decoder
168
- num_mlp=8,
169
- input_is_latent=False,
170
- different_w=False,
171
- narrow=1,
172
- sft_half=True).cuda().eval()
173
-
174
- img = torch.rand((1, 3, 32, 32), dtype=torch.float32).cuda()
175
- output = net(img)
176
- assert output[0].shape == (1, 3, 32, 32)
177
- assert len(output[1]) == 3
178
- # check out_rgbs for intermediate loss
179
- assert output[1][0].shape == (1, 3, 8, 8)
180
- assert output[1][1].shape == (1, 3, 16, 16)
181
- assert output[1][2].shape == (1, 3, 32, 32)
182
-
183
- # -------------------- with different_w = True ----------------------- #
184
- net = GFPGANv1Clean(
185
- out_size=32,
186
- num_style_feat=512,
187
- channel_multiplier=1,
188
- decoder_load_path=None,
189
- fix_decoder=True,
190
- # for stylegan decoder
191
- num_mlp=8,
192
- input_is_latent=False,
193
- different_w=True,
194
- narrow=1,
195
- sft_half=True).cuda().eval()
196
- img = torch.rand((1, 3, 32, 32), dtype=torch.float32).cuda()
197
- output = net(img)
198
- assert output[0].shape == (1, 3, 32, 32)
199
- assert len(output[1]) == 3
200
- # check out_rgbs for intermediate loss
201
- assert output[1][0].shape == (1, 3, 8, 8)
202
- assert output[1][1].shape == (1, 3, 16, 16)
203
- assert output[1][2].shape == (1, 3, 32, 32)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/set_operations.h DELETED
@@ -1,44 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a fill of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // the purpose of this header is to #include the set_operations.h header
22
- // of the sequential, host, and device systems. It should be #included in any
23
- // code which uses adl to dispatch set_operations
24
-
25
- #include <thrust/system/detail/sequential/set_operations.h>
26
-
27
- // SCons can't see through the #defines below to figure out what this header
28
- // includes, so we fake it out by specifying all possible files we might end up
29
- // including inside an #if 0.
30
- #if 0
31
- #include <thrust/system/cpp/detail/set_operations.h>
32
- #include <thrust/system/cuda/detail/set_operations.h>
33
- #include <thrust/system/omp/detail/set_operations.h>
34
- #include <thrust/system/tbb/detail/set_operations.h>
35
- #endif
36
-
37
- #define __THRUST_HOST_SYSTEM_SET_OPERATIONS_HEADER <__THRUST_HOST_SYSTEM_ROOT/detail/set_operations.h>
38
- #include __THRUST_HOST_SYSTEM_SET_OPERATIONS_HEADER
39
- #undef __THRUST_HOST_SYSTEM_SET_OPERATIONS_HEADER
40
-
41
- #define __THRUST_DEVICE_SYSTEM_SET_OPERATIONS_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/detail/set_operations.h>
42
- #include __THRUST_DEVICE_SYSTEM_SET_OPERATIONS_HEADER
43
- #undef __THRUST_DEVICE_SYSTEM_SET_OPERATIONS_HEADER
44
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/assign_value.h DELETED
@@ -1,23 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // this system inherits assign_value
22
- #include <thrust/system/cpp/detail/assign_value.h>
23
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/transform_reduce.h DELETED
@@ -1,23 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // this system inherits transform_reduce
22
- #include <thrust/system/cpp/detail/transform_reduce.h>
23
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/datasets/pipelines/__init__.py DELETED
@@ -1,25 +0,0 @@
1
- from .auto_augment import (AutoAugment, BrightnessTransform, ColorTransform,
2
- ContrastTransform, EqualizeTransform, Rotate, Shear,
3
- Translate)
4
- from .compose import Compose
5
- from .formating import (Collect, DefaultFormatBundle, ImageToTensor,
6
- ToDataContainer, ToTensor, Transpose, to_tensor)
7
- from .instaboost import InstaBoost
8
- from .loading import (LoadAnnotations, LoadImageFromFile, LoadImageFromWebcam,
9
- LoadMultiChannelImageFromFiles, LoadProposals)
10
- from .test_time_aug import MultiScaleFlipAug
11
- from .transforms import (Albu, CutOut, Expand, MinIoURandomCrop, Normalize,
12
- Pad, PhotoMetricDistortion, RandomCenterCropPad,
13
- RandomCrop, RandomFlip, Resize, SegRescale)
14
-
15
- __all__ = [
16
- 'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
17
- 'Transpose', 'Collect', 'DefaultFormatBundle', 'LoadAnnotations',
18
- 'LoadImageFromFile', 'LoadImageFromWebcam',
19
- 'LoadMultiChannelImageFromFiles', 'LoadProposals', 'MultiScaleFlipAug',
20
- 'Resize', 'RandomFlip', 'Pad', 'RandomCrop', 'Normalize', 'SegRescale',
21
- 'MinIoURandomCrop', 'Expand', 'PhotoMetricDistortion', 'Albu',
22
- 'InstaBoost', 'RandomCenterCropPad', 'AutoAugment', 'CutOut', 'Shear',
23
- 'Rotate', 'ColorTransform', 'EqualizeTransform', 'BrightnessTransform',
24
- 'ContrastTransform', 'Translate'
25
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/models/roi_heads/pisa_roi_head.py DELETED
@@ -1,159 +0,0 @@
1
- from mmdet.core import bbox2roi
2
- from ..builder import HEADS
3
- from ..losses.pisa_loss import carl_loss, isr_p
4
- from .standard_roi_head import StandardRoIHead
5
-
6
-
7
- @HEADS.register_module()
8
- class PISARoIHead(StandardRoIHead):
9
- r"""The RoI head for `Prime Sample Attention in Object Detection
10
- <https://arxiv.org/abs/1904.04821>`_."""
11
-
12
- def forward_train(self,
13
- x,
14
- img_metas,
15
- proposal_list,
16
- gt_bboxes,
17
- gt_labels,
18
- gt_bboxes_ignore=None,
19
- gt_masks=None):
20
- """Forward function for training.
21
-
22
- Args:
23
- x (list[Tensor]): List of multi-level img features.
24
- img_metas (list[dict]): List of image info dict where each dict
25
- has: 'img_shape', 'scale_factor', 'flip', and may also contain
26
- 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
27
- For details on the values of these keys see
28
- `mmdet/datasets/pipelines/formatting.py:Collect`.
29
- proposals (list[Tensors]): List of region proposals.
30
- gt_bboxes (list[Tensor]): Each item are the truth boxes for each
31
- image in [tl_x, tl_y, br_x, br_y] format.
32
- gt_labels (list[Tensor]): Class indices corresponding to each box
33
- gt_bboxes_ignore (list[Tensor], optional): Specify which bounding
34
- boxes can be ignored when computing the loss.
35
- gt_masks (None | Tensor) : True segmentation masks for each box
36
- used if the architecture supports a segmentation task.
37
-
38
- Returns:
39
- dict[str, Tensor]: a dictionary of loss components
40
- """
41
- # assign gts and sample proposals
42
- if self.with_bbox or self.with_mask:
43
- num_imgs = len(img_metas)
44
- if gt_bboxes_ignore is None:
45
- gt_bboxes_ignore = [None for _ in range(num_imgs)]
46
- sampling_results = []
47
- neg_label_weights = []
48
- for i in range(num_imgs):
49
- assign_result = self.bbox_assigner.assign(
50
- proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i],
51
- gt_labels[i])
52
- sampling_result = self.bbox_sampler.sample(
53
- assign_result,
54
- proposal_list[i],
55
- gt_bboxes[i],
56
- gt_labels[i],
57
- feats=[lvl_feat[i][None] for lvl_feat in x])
58
- # neg label weight is obtained by sampling when using ISR-N
59
- neg_label_weight = None
60
- if isinstance(sampling_result, tuple):
61
- sampling_result, neg_label_weight = sampling_result
62
- sampling_results.append(sampling_result)
63
- neg_label_weights.append(neg_label_weight)
64
-
65
- losses = dict()
66
- # bbox head forward and loss
67
- if self.with_bbox:
68
- bbox_results = self._bbox_forward_train(
69
- x,
70
- sampling_results,
71
- gt_bboxes,
72
- gt_labels,
73
- img_metas,
74
- neg_label_weights=neg_label_weights)
75
- losses.update(bbox_results['loss_bbox'])
76
-
77
- # mask head forward and loss
78
- if self.with_mask:
79
- mask_results = self._mask_forward_train(x, sampling_results,
80
- bbox_results['bbox_feats'],
81
- gt_masks, img_metas)
82
- losses.update(mask_results['loss_mask'])
83
-
84
- return losses
85
-
86
- def _bbox_forward(self, x, rois):
87
- """Box forward function used in both training and testing."""
88
- # TODO: a more flexible way to decide which feature maps to use
89
- bbox_feats = self.bbox_roi_extractor(
90
- x[:self.bbox_roi_extractor.num_inputs], rois)
91
- if self.with_shared_head:
92
- bbox_feats = self.shared_head(bbox_feats)
93
- cls_score, bbox_pred = self.bbox_head(bbox_feats)
94
-
95
- bbox_results = dict(
96
- cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats)
97
- return bbox_results
98
-
99
- def _bbox_forward_train(self,
100
- x,
101
- sampling_results,
102
- gt_bboxes,
103
- gt_labels,
104
- img_metas,
105
- neg_label_weights=None):
106
- """Run forward function and calculate loss for box head in training."""
107
- rois = bbox2roi([res.bboxes for res in sampling_results])
108
-
109
- bbox_results = self._bbox_forward(x, rois)
110
-
111
- bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes,
112
- gt_labels, self.train_cfg)
113
-
114
- # neg_label_weights obtained by sampler is image-wise, mapping back to
115
- # the corresponding location in label weights
116
- if neg_label_weights[0] is not None:
117
- label_weights = bbox_targets[1]
118
- cur_num_rois = 0
119
- for i in range(len(sampling_results)):
120
- num_pos = sampling_results[i].pos_inds.size(0)
121
- num_neg = sampling_results[i].neg_inds.size(0)
122
- label_weights[cur_num_rois + num_pos:cur_num_rois + num_pos +
123
- num_neg] = neg_label_weights[i]
124
- cur_num_rois += num_pos + num_neg
125
-
126
- cls_score = bbox_results['cls_score']
127
- bbox_pred = bbox_results['bbox_pred']
128
-
129
- # Apply ISR-P
130
- isr_cfg = self.train_cfg.get('isr', None)
131
- if isr_cfg is not None:
132
- bbox_targets = isr_p(
133
- cls_score,
134
- bbox_pred,
135
- bbox_targets,
136
- rois,
137
- sampling_results,
138
- self.bbox_head.loss_cls,
139
- self.bbox_head.bbox_coder,
140
- **isr_cfg,
141
- num_class=self.bbox_head.num_classes)
142
- loss_bbox = self.bbox_head.loss(cls_score, bbox_pred, rois,
143
- *bbox_targets)
144
-
145
- # Add CARL Loss
146
- carl_cfg = self.train_cfg.get('carl', None)
147
- if carl_cfg is not None:
148
- loss_carl = carl_loss(
149
- cls_score,
150
- bbox_targets[0],
151
- bbox_pred,
152
- bbox_targets[2],
153
- self.bbox_head.loss_bbox,
154
- **carl_cfg,
155
- num_class=self.bbox_head.num_classes)
156
- loss_bbox.update(loss_carl)
157
-
158
- bbox_results.update(loss_bbox=loss_bbox)
159
- return bbox_results
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Catmeow/Face2Painting_From_Photo/app.py DELETED
@@ -1,22 +0,0 @@
1
- import gradio as gr
2
- from paintingface import generate
3
-
4
- title = "Face from Photo into Handed-paint"
5
- description = "Upload a photo, this Ai will detect and transfer only the main face into cartoon/anime handed-painting style. (If cannot detect a face, try the edit button on the right corner of the picture to crop the photo manually.)"
6
- article = "Examples are from Internet"
7
-
8
- Example=[['Example01.jpg'],['Example02.jpg'],['Example03.jpg']]
9
-
10
- demo = gr.Interface(
11
- generate,
12
- inputs = [gr.Image(type="pil", label="Upload a photo")],
13
- outputs= [gr.Image(type="pil", label="Output")],
14
- title=title,
15
- description=description,
16
- article=article,
17
- examples=Example,
18
- allow_flagging='never'
19
- )
20
-
21
- demo.launch()
22
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CikeyQI/Yunzai/Yunzai/plugins/ws-plugin/apps/message/message.js DELETED
@@ -1,277 +0,0 @@
1
- import { sendSocketList, Config, Version } from '../../components/index.js'
2
- import { makeOneBotReportMsg, makeGSUidReportMsg, setGuildLatestMsgId, setMsgMap } from '../../model/index.js'
3
- import _ from 'lodash'
4
- import cfg from '../../../../lib/config/config.js'
5
-
6
-
7
- Bot.on('message', async e => {
8
- if (e.self_id == '88888'){
9
- if (e.group?.bot?.uin) {
10
- e.self_id = e.group.bot.uin
11
- } else if (e.friend?.bot?.uin) {
12
- e.self_id = e.friend.bot.uin
13
- }
14
- e.bot = Bot[e.self_id]
15
- }
16
- // 被禁言或者全体禁言
17
- if (Config.muteStop && (e.group?.mute_left > 0 || e.group?.all_muted)) return false
18
- // 如果没有已连接的Websocket
19
- if (sendSocketList.length == 0) return false
20
- if (e.group_id) {
21
- // 判断云崽白名单
22
- const whiteGroup = Config.whiteGroup
23
- if (Array.isArray(whiteGroup) && whiteGroup.length > 0) {
24
- if (!whiteGroup.some(i => i == e.group_id)) return false
25
- }
26
- // 判断插件白名单
27
- const yesGroup = Config.yesGroup
28
- if (Array.isArray(yesGroup) && yesGroup.length > 0) {
29
- if (!yesGroup.some(i => i == e.group_id)) return false
30
- }
31
- // 判断云崽黑名单
32
- const blackGroup = Config.blackGroup
33
- if (Array.isArray(blackGroup) && blackGroup.length > 0) {
34
- if (blackGroup.some(i => i == e.group_id)) return false
35
- }
36
- // 判断插件黑名单
37
- const noGroup = Config.noGroup
38
- if (Array.isArray(noGroup) && noGroup.length > 0) {
39
- if (noGroup.some(i => i == e.group_id)) return false
40
- }
41
- }
42
- // 判断插件前缀
43
- if (Array.isArray(Config.noMsgStart) && Config.noMsgStart.length > 0) {
44
- if (e.message?.[0]?.type === 'text') {
45
- if (Config.noMsgStart.some(i => e.message[0].text.startsWith(i))) return false
46
- }
47
- }
48
- let isMaster = e.isMaster
49
- if (Version.isTrss) {
50
- if (e.user_id && cfg.master[e.self_id]?.includes(String(e.user_id))) {
51
- isMaster = true
52
- }
53
- }
54
- const message_id = Math.floor(Math.random() * Math.pow(2, 32)) | 0
55
- let msg = {
56
- time: e.time,
57
- message_id: e.message_id,
58
- message: _.cloneDeep(e.message),
59
- rand: e.rand,
60
- seq: e.seq,
61
- source: e.source,
62
- user_id: e.user_id,
63
- self_id: e.self_id,
64
- isMaster,
65
- sender: e.sender,
66
- param: {
67
- time: e.time,
68
- self_id: e.self_id,
69
- post_type: e.post_type,
70
- message_type: e.message_type,
71
- sub_type: e.sub_type,
72
- message_id,
73
- user_id: e.user_id,
74
- font: 0,
75
- sender: e.sender,
76
- anonymous: e.anonymous ? {
77
- id: e.anonymous.id,
78
- name: e.anonymous.name,
79
- flag: e.anonymous.flag
80
- } : null
81
- }
82
- }
83
- let message = []
84
- //增加isGroup e.isPrivate
85
- if (e.guild_id) {
86
- setGuildLatestMsgId(e.message_id)
87
- //处理成message
88
- if (e.content) {
89
- let content = toMsg(e.content)
90
- message.push(...content)
91
- }
92
- if (e.attachments) {
93
- e.attachments.forEach(i => {
94
- if (i.content_type.startsWith('image')) {
95
- message.push({
96
- type: 'image',
97
- file: i.filename,
98
- url: i.url
99
- })
100
- }
101
- })
102
- }
103
- msg.message = message
104
-
105
- msg.isGuild = true
106
- msg.param = {
107
- time: Math.floor(new Date(msg.timestamp).getTime() / 1000),
108
- post_type: 'message',
109
- message_type: 'guild',
110
- sub_type: 'channel',
111
- guild_id: e.guild_id,
112
- channel_id: e.channel_id,
113
- user_id: e.author.id,
114
- message_id: e.message_id,
115
- self_id: e.bot.appID,
116
- sender: {
117
- user_id: e.author.id,
118
- nickname: e.author.username,
119
- tiny_id: e.author.id,
120
- },
121
- self_tiny_id: e.bot.appID,
122
- }
123
- } else if (e.message_type == 'group') {
124
- msg.isGroup = true
125
- msg.group_id = e.group_id
126
- msg.param.group_id = e.group_id
127
- msg.self_id = e.group?.bot?.uin || msg.self_id
128
- } else if (e.message_type == 'private') {
129
- msg.isPrivate = true
130
- msg.self_id = e.friend?.bot?.uin || msg.self_id
131
- } else {
132
- return false
133
- }
134
- // 判断云崽前缀
135
- msg = onlyReplyAt(msg)
136
- if (!msg) return false
137
- for (const i of sendSocketList) {
138
- if (i.status == 1) {
139
- let reportMsg = null
140
- switch (Number(i.type)) {
141
- case 1:
142
- case 2:
143
- case 6:
144
- if (Version.isTrss) {
145
- if (i.uin != e.self_id) continue
146
- if (!Version.protocol.some(i => i == e.bot?.version?.name)) continue
147
- }
148
- e.reply = reply(e)
149
- reportMsg = await makeOneBotReportMsg(msg)
150
- break;
151
- case 3:
152
- reportMsg = await makeGSUidReportMsg(msg)
153
- break
154
- default:
155
- break;
156
- }
157
- if (reportMsg) i.ws.send(reportMsg)
158
- }
159
- }
160
- })
161
-
162
- function reply(e) {
163
- if (!Version.isTrss) {
164
- const replyNew = e.reply
165
- return async function (massage, quote = false, data = {}) {
166
- const ret = await replyNew(massage, quote, data)
167
- if (ret) {
168
- setMsgMap({
169
- message_id: ret.message_id,
170
- time: ret.time,
171
- seq: ret.seq,
172
- rand: ret.rand,
173
- user_id: e.user_id,
174
- group_id: e.group_id,
175
- onebot_id: Math.floor(Math.random() * Math.pow(2, 32)) | 0,
176
- })
177
- }
178
- return ret
179
- }
180
- } else {
181
- if (e.bot?.version?.name == 'ICQQ') {
182
- return async function (massage, quote = false) {
183
- let ret
184
- if (e.isGroup) {
185
- if (e.group?.sendMsg) {
186
- ret = await e.group.sendMsg(massage, quote)
187
- } else {
188
- ret = await e.bot.pickGroup(e.group_id).sendMsg(massage, quote)
189
- }
190
- } else {
191
- if (e.friend?.sendMsg) {
192
- ret = await e.friend.sendMsg(massage, quote)
193
- } else {
194
- ret = await e.bot.pickFriend(e.user_id).sendMsg(massage, quote)
195
- }
196
- }
197
- if (ret) {
198
- setMsgMap({
199
- message_id: ret.message_id,
200
- time: ret.time,
201
- seq: ret.seq,
202
- rand: ret.rand,
203
- user_id: e.user_id,
204
- group_id: e.group_id,
205
- onebot_id: Math.floor(Math.random() * Math.pow(2, 32)) | 0,
206
- })
207
- }
208
- return ret
209
- }
210
- }
211
- return e.reply
212
- }
213
- }
214
-
215
- function onlyReplyAt(e) {
216
- if (!e.message) return false
217
-
218
- let groupCfg = Version.isTrss ? cfg.getGroup(e.self_id, e.group_id) : cfg.getGroup(e.group_id)
219
- if (groupCfg.onlyReplyAt != 1 || !groupCfg.botAlias || e.isPrivate) return e
220
-
221
- let at = atBot(e)
222
- if (at) return e
223
- e = hasAlias(e, groupCfg)
224
- if (e) return e
225
-
226
- return false
227
- }
228
-
229
- function atBot(e) {
230
- for (const i of e.message) {
231
- if (i.type === 'at') {
232
- if (i.qq == e.self_id) return true
233
- }
234
- }
235
- return false
236
- }
237
-
238
- function hasAlias(e, groupCfg) {
239
- if (e.message[0].type === 'text') {
240
- let alias = groupCfg.botAlias
241
- if (!Array.isArray(alias)) {
242
- alias = [alias]
243
- }
244
- for (let name of alias) {
245
- if (e.message[0].text.startsWith(name)) {
246
- e.message[0].text = _.trimStart(e.message[0].text, name).trim()
247
- return e
248
- }
249
- }
250
- }
251
- return false
252
- }
253
-
254
- function toMsg(content) {
255
- const regex = /<@!(\d+)>|<emoji:(\d+)>|([^<]+)/g;
256
- let match;
257
- const result = [];
258
- while ((match = regex.exec(content)) !== null) {
259
- if (match[1]) {
260
- result.push({
261
- type: 'at',
262
- qq: match[1]
263
- });
264
- } else if (match[2]) {
265
- result.push({
266
- type: 'face',
267
- id: parseInt(match[2])
268
- });
269
- } else if (match[3]) {
270
- result.push({
271
- type: 'text',
272
- text: match[3]
273
- });
274
- }
275
- }
276
- return result;
277
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/layers/scale.py DELETED
@@ -1,11 +0,0 @@
1
- import torch
2
- from torch import nn
3
-
4
-
5
- class Scale(nn.Module):
6
- def __init__(self, init_value=1.0):
7
- super(Scale, self).__init__()
8
- self.scale = nn.Parameter(torch.FloatTensor([init_value]))
9
-
10
- def forward(self, input):
11
- return input * self.scale
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/dateutil/zoneinfo/rebuild.py DELETED
@@ -1,75 +0,0 @@
1
- import logging
2
- import os
3
- import tempfile
4
- import shutil
5
- import json
6
- from subprocess import check_call, check_output
7
- from tarfile import TarFile
8
-
9
- from dateutil.zoneinfo import METADATA_FN, ZONEFILENAME
10
-
11
-
12
- def rebuild(filename, tag=None, format="gz", zonegroups=[], metadata=None):
13
- """Rebuild the internal timezone info in dateutil/zoneinfo/zoneinfo*tar*
14
-
15
- filename is the timezone tarball from ``ftp.iana.org/tz``.
16
-
17
- """
18
- tmpdir = tempfile.mkdtemp()
19
- zonedir = os.path.join(tmpdir, "zoneinfo")
20
- moduledir = os.path.dirname(__file__)
21
- try:
22
- with TarFile.open(filename) as tf:
23
- for name in zonegroups:
24
- tf.extract(name, tmpdir)
25
- filepaths = [os.path.join(tmpdir, n) for n in zonegroups]
26
-
27
- _run_zic(zonedir, filepaths)
28
-
29
- # write metadata file
30
- with open(os.path.join(zonedir, METADATA_FN), 'w') as f:
31
- json.dump(metadata, f, indent=4, sort_keys=True)
32
- target = os.path.join(moduledir, ZONEFILENAME)
33
- with TarFile.open(target, "w:%s" % format) as tf:
34
- for entry in os.listdir(zonedir):
35
- entrypath = os.path.join(zonedir, entry)
36
- tf.add(entrypath, entry)
37
- finally:
38
- shutil.rmtree(tmpdir)
39
-
40
-
41
- def _run_zic(zonedir, filepaths):
42
- """Calls the ``zic`` compiler in a compatible way to get a "fat" binary.
43
-
44
- Recent versions of ``zic`` default to ``-b slim``, while older versions
45
- don't even have the ``-b`` option (but default to "fat" binaries). The
46
- current version of dateutil does not support Version 2+ TZif files, which
47
- causes problems when used in conjunction with "slim" binaries, so this
48
- function is used to ensure that we always get a "fat" binary.
49
- """
50
-
51
- try:
52
- help_text = check_output(["zic", "--help"])
53
- except OSError as e:
54
- _print_on_nosuchfile(e)
55
- raise
56
-
57
- if b"-b " in help_text:
58
- bloat_args = ["-b", "fat"]
59
- else:
60
- bloat_args = []
61
-
62
- check_call(["zic"] + bloat_args + ["-d", zonedir] + filepaths)
63
-
64
-
65
- def _print_on_nosuchfile(e):
66
- """Print helpful troubleshooting message
67
-
68
- e is an exception raised by subprocess.check_call()
69
-
70
- """
71
- if e.errno == 2:
72
- logging.error(
73
- "Could not find zic. Perhaps you need to install "
74
- "libc-bin or some other package that provides it, "
75
- "or it's not in your PATH?")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/components/highlighted_text.py DELETED
@@ -1,205 +0,0 @@
1
- """gr.HighlightedText() component."""
2
-
3
- from __future__ import annotations
4
-
5
- from typing import Callable, Literal
6
-
7
- from gradio_client.documentation import document, set_documentation_group
8
- from gradio_client.serializing import (
9
- JSONSerializable,
10
- )
11
-
12
- from gradio.components.base import IOComponent, _Keywords
13
- from gradio.deprecation import warn_style_method_deprecation
14
- from gradio.events import (
15
- Changeable,
16
- EventListenerMethod,
17
- Selectable,
18
- )
19
-
20
- set_documentation_group("component")
21
-
22
-
23
- @document()
24
- class HighlightedText(Changeable, Selectable, IOComponent, JSONSerializable):
25
- """
26
- Displays text that contains spans that are highlighted by category or numerical value.
27
- Preprocessing: this component does *not* accept input.
28
- Postprocessing: expects a {List[Tuple[str, float | str]]]} consisting of spans of text and their associated labels, or a {Dict} with two keys: (1) "text" whose value is the complete text, and "entities", which is a list of dictionaries, each of which have the keys: "entity" (consisting of the entity label), "start" (the character index where the label starts), and "end" (the character index where the label ends). Entities should not overlap.
29
-
30
- Demos: diff_texts, text_analysis
31
- Guides: named-entity-recognition
32
- """
33
-
34
- def __init__(
35
- self,
36
- value: list[tuple[str, str | float | None]] | dict | Callable | None = None,
37
- *,
38
- color_map: dict[str, str]
39
- | None = None, # Parameter moved to HighlightedText.style()
40
- show_legend: bool = False,
41
- combine_adjacent: bool = False,
42
- adjacent_separator: str = "",
43
- label: str | None = None,
44
- every: float | None = None,
45
- show_label: bool | None = None,
46
- container: bool = True,
47
- scale: int | None = None,
48
- min_width: int = 160,
49
- visible: bool = True,
50
- elem_id: str | None = None,
51
- elem_classes: list[str] | str | None = None,
52
- **kwargs,
53
- ):
54
- """
55
- Parameters:
56
- value: Default value to show. If callable, the function will be called whenever the app loads to set the initial value of the component.
57
- show_legend: whether to show span categories in a separate legend or inline.
58
- combine_adjacent: If True, will merge the labels of adjacent tokens belonging to the same category.
59
- adjacent_separator: Specifies the separator to be used between tokens if combine_adjacent is True.
60
- label: component name in interface.
61
- every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.
62
- show_label: if True, will display label.
63
- container: If True, will place the component in a container - providing some extra padding around the border.
64
- scale: relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.
65
- min_width: minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.
66
- visible: If False, component will be hidden.
67
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
68
- elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.
69
- """
70
- self.color_map = color_map
71
- self.show_legend = show_legend
72
- self.combine_adjacent = combine_adjacent
73
- self.adjacent_separator = adjacent_separator
74
- self.select: EventListenerMethod
75
- """
76
- Event listener for when the user selects Highlighted text span.
77
- Uses event data gradio.SelectData to carry `value` referring to selected [text, label] tuple, and `index` to refer to span index.
78
- See EventData documentation on how to use this event data.
79
- """
80
- IOComponent.__init__(
81
- self,
82
- label=label,
83
- every=every,
84
- show_label=show_label,
85
- container=container,
86
- scale=scale,
87
- min_width=min_width,
88
- visible=visible,
89
- elem_id=elem_id,
90
- elem_classes=elem_classes,
91
- value=value,
92
- **kwargs,
93
- )
94
-
95
- def get_config(self):
96
- return {
97
- "color_map": self.color_map,
98
- "show_legend": self.show_legend,
99
- "value": self.value,
100
- "selectable": self.selectable,
101
- **IOComponent.get_config(self),
102
- }
103
-
104
- @staticmethod
105
- def update(
106
- value: list[tuple[str, str | float | None]]
107
- | dict
108
- | Literal[_Keywords.NO_VALUE]
109
- | None = _Keywords.NO_VALUE,
110
- color_map: dict[str, str] | None = None,
111
- show_legend: bool | None = None,
112
- label: str | None = None,
113
- show_label: bool | None = None,
114
- container: bool | None = None,
115
- scale: int | None = None,
116
- min_width: int | None = None,
117
- visible: bool | None = None,
118
- ):
119
- updated_config = {
120
- "color_map": color_map,
121
- "show_legend": show_legend,
122
- "label": label,
123
- "show_label": show_label,
124
- "container": container,
125
- "scale": scale,
126
- "min_width": min_width,
127
- "visible": visible,
128
- "value": value,
129
- "__type__": "update",
130
- }
131
- return updated_config
132
-
133
- def postprocess(
134
- self, y: list[tuple[str, str | float | None]] | dict | None
135
- ) -> list[tuple[str, str | float | None]] | None:
136
- """
137
- Parameters:
138
- y: List of (word, category) tuples
139
- Returns:
140
- List of (word, category) tuples
141
- """
142
- if y is None:
143
- return None
144
- if isinstance(y, dict):
145
- try:
146
- text = y["text"]
147
- entities = y["entities"]
148
- except KeyError as ke:
149
- raise ValueError(
150
- "Expected a dictionary with keys 'text' and 'entities' "
151
- "for the value of the HighlightedText component."
152
- ) from ke
153
- if len(entities) == 0:
154
- y = [(text, None)]
155
- else:
156
- list_format = []
157
- index = 0
158
- entities = sorted(entities, key=lambda x: x["start"])
159
- for entity in entities:
160
- list_format.append((text[index : entity["start"]], None))
161
- list_format.append(
162
- (text[entity["start"] : entity["end"]], entity["entity"])
163
- )
164
- index = entity["end"]
165
- list_format.append((text[index:], None))
166
- y = list_format
167
- if self.combine_adjacent:
168
- output = []
169
- running_text, running_category = None, None
170
- for text, category in y:
171
- if running_text is None:
172
- running_text = text
173
- running_category = category
174
- elif category == running_category:
175
- running_text += self.adjacent_separator + text
176
- elif not text:
177
- # Skip fully empty item, these get added in processing
178
- # of dictionaries.
179
- pass
180
- else:
181
- output.append((running_text, running_category))
182
- running_text = text
183
- running_category = category
184
- if running_text is not None:
185
- output.append((running_text, running_category))
186
- return output
187
- else:
188
- return y
189
-
190
- def style(
191
- self,
192
- *,
193
- color_map: dict[str, str] | None = None,
194
- container: bool | None = None,
195
- **kwargs,
196
- ):
197
- """
198
- This method is deprecated. Please set these arguments in the constructor instead.
199
- """
200
- warn_style_method_deprecation()
201
- if container is not None:
202
- self.container = container
203
- if color_map is not None:
204
- self.color_map = color_map
205
- return self
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/interpretation.py DELETED
@@ -1,328 +0,0 @@
1
- """Contains classes and methods related to interpretation for components in Gradio."""
2
-
3
- from __future__ import annotations
4
-
5
- import copy
6
- import math
7
- from abc import ABC, abstractmethod
8
- from typing import TYPE_CHECKING, Any
9
-
10
- import numpy as np
11
- from gradio_client import utils as client_utils
12
-
13
- from gradio import components
14
-
15
- if TYPE_CHECKING: # Only import for type checking (is False at runtime).
16
- from gradio import Interface
17
-
18
-
19
- class Interpretable(ABC): # noqa: B024
20
- def __init__(self) -> None:
21
- self.set_interpret_parameters()
22
-
23
- def set_interpret_parameters(self): # noqa: B027
24
- """
25
- Set any parameters for interpretation. Properties can be set here to be
26
- used in get_interpretation_neighbors and get_interpretation_scores.
27
- """
28
- pass
29
-
30
- def get_interpretation_scores(
31
- self, x: Any, neighbors: list[Any] | None, scores: list[float], **kwargs
32
- ) -> list:
33
- """
34
- Arrange the output values from the neighbors into interpretation scores for the interface to render.
35
- Parameters:
36
- x: Input to interface
37
- neighbors: Neighboring values to input x used for interpretation.
38
- scores: Output value corresponding to each neighbor in neighbors
39
- Returns:
40
- Arrangement of interpretation scores for interfaces to render.
41
- """
42
- return scores
43
-
44
-
45
- class TokenInterpretable(Interpretable, ABC):
46
- @abstractmethod
47
- def tokenize(self, x: Any) -> tuple[list, list, None]:
48
- """
49
- Interprets an input data point x by splitting it into a list of tokens (e.g
50
- a string into words or an image into super-pixels).
51
- """
52
- return [], [], None
53
-
54
- @abstractmethod
55
- def get_masked_inputs(self, tokens: list, binary_mask_matrix: list[list]) -> list:
56
- return []
57
-
58
-
59
- class NeighborInterpretable(Interpretable, ABC):
60
- @abstractmethod
61
- def get_interpretation_neighbors(self, x: Any) -> tuple[list, dict]:
62
- """
63
- Generates values similar to input to be used to interpret the significance of the input in the final output.
64
- Parameters:
65
- x: Input to interface
66
- Returns: (neighbor_values, interpret_kwargs, interpret_by_removal)
67
- neighbor_values: Neighboring values to input x to compute for interpretation
68
- interpret_kwargs: Keyword arguments to be passed to get_interpretation_scores
69
- """
70
- return [], {}
71
-
72
-
73
- async def run_interpret(interface: Interface, raw_input: list):
74
- """
75
- Runs the interpretation command for the machine learning model. Handles both the "default" out-of-the-box
76
- interpretation for a certain set of UI component types, as well as the custom interpretation case.
77
- Parameters:
78
- raw_input: a list of raw inputs to apply the interpretation(s) on.
79
- """
80
- if isinstance(interface.interpretation, list): # Either "default" or "shap"
81
- processed_input = [
82
- input_component.preprocess(raw_input[i])
83
- for i, input_component in enumerate(interface.input_components)
84
- ]
85
- original_output = await interface.call_function(0, processed_input)
86
- original_output = original_output["prediction"]
87
-
88
- if len(interface.output_components) == 1:
89
- original_output = [original_output]
90
-
91
- scores, alternative_outputs = [], []
92
-
93
- for i, (x, interp) in enumerate(zip(raw_input, interface.interpretation)):
94
- if interp == "default":
95
- input_component = interface.input_components[i]
96
- neighbor_raw_input = list(raw_input)
97
- if isinstance(input_component, TokenInterpretable):
98
- tokens, neighbor_values, masks = input_component.tokenize(x)
99
- interface_scores = []
100
- alternative_output = []
101
- for neighbor_input in neighbor_values:
102
- neighbor_raw_input[i] = neighbor_input
103
- processed_neighbor_input = [
104
- input_component.preprocess(neighbor_raw_input[i])
105
- for i, input_component in enumerate(
106
- interface.input_components
107
- )
108
- ]
109
-
110
- neighbor_output = await interface.call_function(
111
- 0, processed_neighbor_input
112
- )
113
- neighbor_output = neighbor_output["prediction"]
114
- if len(interface.output_components) == 1:
115
- neighbor_output = [neighbor_output]
116
- processed_neighbor_output = [
117
- output_component.postprocess(neighbor_output[i])
118
- for i, output_component in enumerate(
119
- interface.output_components
120
- )
121
- ]
122
-
123
- alternative_output.append(processed_neighbor_output)
124
- interface_scores.append(
125
- quantify_difference_in_label(
126
- interface, original_output, neighbor_output
127
- )
128
- )
129
- alternative_outputs.append(alternative_output)
130
- scores.append(
131
- input_component.get_interpretation_scores(
132
- raw_input[i],
133
- neighbor_values,
134
- interface_scores,
135
- masks=masks,
136
- tokens=tokens,
137
- )
138
- )
139
- elif isinstance(input_component, NeighborInterpretable):
140
- (
141
- neighbor_values,
142
- interpret_kwargs,
143
- ) = input_component.get_interpretation_neighbors(
144
- x
145
- ) # type: ignore
146
- interface_scores = []
147
- alternative_output = []
148
- for neighbor_input in neighbor_values:
149
- neighbor_raw_input[i] = neighbor_input
150
- processed_neighbor_input = [
151
- input_component.preprocess(neighbor_raw_input[i])
152
- for i, input_component in enumerate(
153
- interface.input_components
154
- )
155
- ]
156
- neighbor_output = await interface.call_function(
157
- 0, processed_neighbor_input
158
- )
159
- neighbor_output = neighbor_output["prediction"]
160
- if len(interface.output_components) == 1:
161
- neighbor_output = [neighbor_output]
162
- processed_neighbor_output = [
163
- output_component.postprocess(neighbor_output[i])
164
- for i, output_component in enumerate(
165
- interface.output_components
166
- )
167
- ]
168
-
169
- alternative_output.append(processed_neighbor_output)
170
- interface_scores.append(
171
- quantify_difference_in_label(
172
- interface, original_output, neighbor_output
173
- )
174
- )
175
- alternative_outputs.append(alternative_output)
176
- interface_scores = [-score for score in interface_scores]
177
- scores.append(
178
- input_component.get_interpretation_scores(
179
- raw_input[i],
180
- neighbor_values,
181
- interface_scores,
182
- **interpret_kwargs,
183
- )
184
- )
185
- else:
186
- raise ValueError(
187
- f"Component {input_component} does not support interpretation"
188
- )
189
- elif interp == "shap" or interp == "shapley":
190
- try:
191
- import shap # type: ignore
192
- except (ImportError, ModuleNotFoundError) as err:
193
- raise ValueError(
194
- "The package `shap` is required for this interpretation method. Try: `pip install shap`"
195
- ) from err
196
- input_component = interface.input_components[i]
197
- if not isinstance(input_component, TokenInterpretable):
198
- raise ValueError(
199
- f"Input component {input_component} does not support `shap` interpretation"
200
- )
201
-
202
- tokens, _, masks = input_component.tokenize(x)
203
-
204
- # construct a masked version of the input
205
- def get_masked_prediction(binary_mask):
206
- assert isinstance(input_component, TokenInterpretable)
207
- masked_xs = input_component.get_masked_inputs(tokens, binary_mask)
208
- preds = []
209
- for masked_x in masked_xs:
210
- processed_masked_input = copy.deepcopy(processed_input)
211
- processed_masked_input[i] = input_component.preprocess(masked_x)
212
- new_output = client_utils.synchronize_async(
213
- interface.call_function, 0, processed_masked_input
214
- )
215
- new_output = new_output["prediction"]
216
- if len(interface.output_components) == 1:
217
- new_output = [new_output]
218
- pred = get_regression_or_classification_value(
219
- interface, original_output, new_output
220
- )
221
- preds.append(pred)
222
- return np.array(preds)
223
-
224
- num_total_segments = len(tokens)
225
- explainer = shap.KernelExplainer(
226
- get_masked_prediction, np.zeros((1, num_total_segments))
227
- )
228
- shap_values = explainer.shap_values(
229
- np.ones((1, num_total_segments)),
230
- nsamples=int(interface.num_shap * num_total_segments),
231
- silent=True,
232
- )
233
- assert shap_values is not None, "SHAP values could not be calculated"
234
- scores.append(
235
- input_component.get_interpretation_scores(
236
- raw_input[i],
237
- None,
238
- shap_values[0].tolist(),
239
- masks=masks,
240
- tokens=tokens,
241
- )
242
- )
243
- alternative_outputs.append([])
244
- elif interp is None:
245
- scores.append(None)
246
- alternative_outputs.append([])
247
- else:
248
- raise ValueError(f"Unknown interpretation method: {interp}")
249
- return scores, alternative_outputs
250
- elif interface.interpretation: # custom interpretation function
251
- processed_input = [
252
- input_component.preprocess(raw_input[i])
253
- for i, input_component in enumerate(interface.input_components)
254
- ]
255
- interpreter = interface.interpretation
256
- interpretation = interpreter(*processed_input)
257
- if len(raw_input) == 1:
258
- interpretation = [interpretation]
259
- return interpretation, []
260
- else:
261
- raise ValueError("No interpretation method specified.")
262
-
263
-
264
- def diff(original: Any, perturbed: Any) -> int | float:
265
- try: # try computing numerical difference
266
- score = float(original) - float(perturbed)
267
- except ValueError: # otherwise, look at strict difference in label
268
- score = int(original != perturbed)
269
- return score
270
-
271
-
272
- def quantify_difference_in_label(
273
- interface: Interface, original_output: list, perturbed_output: list
274
- ) -> int | float:
275
- output_component = interface.output_components[0]
276
- post_original_output = output_component.postprocess(original_output[0])
277
- post_perturbed_output = output_component.postprocess(perturbed_output[0])
278
-
279
- if isinstance(output_component, components.Label):
280
- original_label = post_original_output["label"]
281
- perturbed_label = post_perturbed_output["label"]
282
-
283
- # Handle different return types of Label interface
284
- if "confidences" in post_original_output:
285
- original_confidence = original_output[0][original_label]
286
- perturbed_confidence = perturbed_output[0][original_label]
287
- score = original_confidence - perturbed_confidence
288
- else:
289
- score = diff(original_label, perturbed_label)
290
- return score
291
-
292
- elif isinstance(output_component, components.Number):
293
- score = diff(post_original_output, post_perturbed_output)
294
- return score
295
-
296
- else:
297
- raise ValueError(
298
- f"This interpretation method doesn't support the Output component: {output_component}"
299
- )
300
-
301
-
302
- def get_regression_or_classification_value(
303
- interface: Interface, original_output: list, perturbed_output: list
304
- ) -> int | float:
305
- """Used to combine regression/classification for Shap interpretation method."""
306
- output_component = interface.output_components[0]
307
- post_original_output = output_component.postprocess(original_output[0])
308
- post_perturbed_output = output_component.postprocess(perturbed_output[0])
309
-
310
- if isinstance(output_component, components.Label):
311
- original_label = post_original_output["label"]
312
- perturbed_label = post_perturbed_output["label"]
313
-
314
- # Handle different return types of Label interface
315
- if "confidences" in post_original_output:
316
- if math.isnan(perturbed_output[0][original_label]):
317
- return 0
318
- return perturbed_output[0][original_label]
319
- else:
320
- score = diff(
321
- perturbed_label, original_label
322
- ) # Intentionally inverted order of arguments.
323
- return score
324
-
325
- else:
326
- raise ValueError(
327
- f"This interpretation method doesn't support the Output component: {output_component}"
328
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio_client/media_data.py DELETED
The diff for this file is too large to render. See raw diff
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/httpcore/_backends/anyio.py DELETED
@@ -1,145 +0,0 @@
1
- import ssl
2
- import typing
3
-
4
- import anyio
5
-
6
- from .._exceptions import (
7
- ConnectError,
8
- ConnectTimeout,
9
- ReadError,
10
- ReadTimeout,
11
- WriteError,
12
- WriteTimeout,
13
- map_exceptions,
14
- )
15
- from .._utils import is_socket_readable
16
- from .base import SOCKET_OPTION, AsyncNetworkBackend, AsyncNetworkStream
17
-
18
-
19
- class AnyIOStream(AsyncNetworkStream):
20
- def __init__(self, stream: anyio.abc.ByteStream) -> None:
21
- self._stream = stream
22
-
23
- async def read(
24
- self, max_bytes: int, timeout: typing.Optional[float] = None
25
- ) -> bytes:
26
- exc_map = {
27
- TimeoutError: ReadTimeout,
28
- anyio.BrokenResourceError: ReadError,
29
- anyio.ClosedResourceError: ReadError,
30
- }
31
- with map_exceptions(exc_map):
32
- with anyio.fail_after(timeout):
33
- try:
34
- return await self._stream.receive(max_bytes=max_bytes)
35
- except anyio.EndOfStream: # pragma: nocover
36
- return b""
37
-
38
- async def write(
39
- self, buffer: bytes, timeout: typing.Optional[float] = None
40
- ) -> None:
41
- if not buffer:
42
- return
43
-
44
- exc_map = {
45
- TimeoutError: WriteTimeout,
46
- anyio.BrokenResourceError: WriteError,
47
- anyio.ClosedResourceError: WriteError,
48
- }
49
- with map_exceptions(exc_map):
50
- with anyio.fail_after(timeout):
51
- await self._stream.send(item=buffer)
52
-
53
- async def aclose(self) -> None:
54
- await self._stream.aclose()
55
-
56
- async def start_tls(
57
- self,
58
- ssl_context: ssl.SSLContext,
59
- server_hostname: typing.Optional[str] = None,
60
- timeout: typing.Optional[float] = None,
61
- ) -> AsyncNetworkStream:
62
- exc_map = {
63
- TimeoutError: ConnectTimeout,
64
- anyio.BrokenResourceError: ConnectError,
65
- }
66
- with map_exceptions(exc_map):
67
- try:
68
- with anyio.fail_after(timeout):
69
- ssl_stream = await anyio.streams.tls.TLSStream.wrap(
70
- self._stream,
71
- ssl_context=ssl_context,
72
- hostname=server_hostname,
73
- standard_compatible=False,
74
- server_side=False,
75
- )
76
- except Exception as exc: # pragma: nocover
77
- await self.aclose()
78
- raise exc
79
- return AnyIOStream(ssl_stream)
80
-
81
- def get_extra_info(self, info: str) -> typing.Any:
82
- if info == "ssl_object":
83
- return self._stream.extra(anyio.streams.tls.TLSAttribute.ssl_object, None)
84
- if info == "client_addr":
85
- return self._stream.extra(anyio.abc.SocketAttribute.local_address, None)
86
- if info == "server_addr":
87
- return self._stream.extra(anyio.abc.SocketAttribute.remote_address, None)
88
- if info == "socket":
89
- return self._stream.extra(anyio.abc.SocketAttribute.raw_socket, None)
90
- if info == "is_readable":
91
- sock = self._stream.extra(anyio.abc.SocketAttribute.raw_socket, None)
92
- return is_socket_readable(sock)
93
- return None
94
-
95
-
96
- class AnyIOBackend(AsyncNetworkBackend):
97
- async def connect_tcp(
98
- self,
99
- host: str,
100
- port: int,
101
- timeout: typing.Optional[float] = None,
102
- local_address: typing.Optional[str] = None,
103
- socket_options: typing.Optional[typing.Iterable[SOCKET_OPTION]] = None,
104
- ) -> AsyncNetworkStream:
105
- if socket_options is None:
106
- socket_options = [] # pragma: no cover
107
- exc_map = {
108
- TimeoutError: ConnectTimeout,
109
- OSError: ConnectError,
110
- anyio.BrokenResourceError: ConnectError,
111
- }
112
- with map_exceptions(exc_map):
113
- with anyio.fail_after(timeout):
114
- stream: anyio.abc.ByteStream = await anyio.connect_tcp(
115
- remote_host=host,
116
- remote_port=port,
117
- local_host=local_address,
118
- )
119
- # By default TCP sockets opened in `asyncio` include TCP_NODELAY.
120
- for option in socket_options:
121
- stream._raw_socket.setsockopt(*option) # type: ignore[attr-defined] # pragma: no cover
122
- return AnyIOStream(stream)
123
-
124
- async def connect_unix_socket(
125
- self,
126
- path: str,
127
- timeout: typing.Optional[float] = None,
128
- socket_options: typing.Optional[typing.Iterable[SOCKET_OPTION]] = None,
129
- ) -> AsyncNetworkStream: # pragma: nocover
130
- if socket_options is None:
131
- socket_options = []
132
- exc_map = {
133
- TimeoutError: ConnectTimeout,
134
- OSError: ConnectError,
135
- anyio.BrokenResourceError: ConnectError,
136
- }
137
- with map_exceptions(exc_map):
138
- with anyio.fail_after(timeout):
139
- stream: anyio.abc.ByteStream = await anyio.connect_unix(path)
140
- for option in socket_options:
141
- stream._raw_socket.setsockopt(*option) # type: ignore[attr-defined] # pragma: no cover
142
- return AnyIOStream(stream)
143
-
144
- async def sleep(self, seconds: float) -> None:
145
- await anyio.sleep(seconds) # pragma: nocover
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Dagfinn1962/Dreamlikeart-Anime-1.0/style.css DELETED
@@ -1,94 +0,0 @@
1
- #col-container {
2
- background:rgb(9, 2, 27);
3
- color:#FFFFFF;
4
- width: 100%;
5
- margin-left: auto;
6
- margin-right: auto;
7
- }
8
- a {
9
- color: inherit;
10
- text-decoration: underline;
11
- }
12
- .gradio-container {
13
- background:rgb(9, 2, 27);
14
- color:#FFFFFF;
15
- font-family: 'IBM Plex Sans', sans-serif;
16
- }
17
- .gr-button {
18
- color: white;
19
- border-color: #9d66e5;
20
- background: #9e5bf6;
21
- }
22
- input[type='range'] {
23
- accent-color: #9d66e5;
24
- }
25
- .dark input[type='range'] {
26
- accent-color: #dfdfdf;
27
- }
28
- .container {
29
- background:rgb(9, 2, 27);
30
- width: 100%;
31
- margin: auto;
32
- padding-top: 1.5rem;
33
- border-radius: 20px;
34
- }
35
- #gallery {
36
- min-height: 22rem;
37
- margin-bottom: 15px;
38
- margin-left: auto;
39
- margin-right: auto;
40
- border-bottom-right-radius: .5rem !important;
41
- border-bottom-left-radius: .5rem !important;
42
- }
43
- #gallery>div>.h-full {
44
- min-height: 20rem;
45
- }
46
- .details:hover {
47
- text-decoration: underline;
48
- }
49
- .gr-button {
50
- white-space: nowrap;
51
- }
52
- .gr-button:focus {
53
- border-color: rgb(147 197 253 / var(--tw-border-opacity));
54
- outline: none;
55
- box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000);
56
- --tw-border-opacity: 1;
57
- --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);
58
- --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color);
59
- --tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity));
60
- --tw-ring-opacity: .5;
61
- }
62
- #advanced-options {
63
- margin-bottom: 20px;
64
- }
65
- .footer {
66
- background: #090956;
67
- color:#FFFFFF;
68
- margin-bottom: 45px;
69
- margin-top: 35px;
70
- text-align: center;
71
- border-bottom: 1px solid #e5e5e5;
72
- }
73
- .footer>p {
74
- color:#FFFFFF;
75
- font-size: .8rem;
76
- display: inline-block;
77
- padding: 0 10px;
78
- transform: translateY(10px);
79
- background: white;
80
- }
81
- .dark .logo{ filter: invert(1); }
82
- .dark .footer {
83
- border-color: #303030;
84
- }
85
- .dark .footer>p {
86
- background: #0b0f19;
87
- }
88
- .acknowledgments h4{
89
- color:#FFFFFF;
90
- margin: 1.25em 0 .25em 0;
91
- font-weight: bold;
92
- font-size: 115%;
93
- }
94
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/dataset/pano_s2d3d_mix_dataset.py DELETED
@@ -1,91 +0,0 @@
1
- """
2
- @date: 2021/6/16
3
- @description:
4
- """
5
-
6
- import os
7
-
8
- from dataset.pano_s2d3d_dataset import PanoS2D3DDataset
9
- from utils.logger import get_logger
10
-
11
-
12
- class PanoS2D3DMixDataset(PanoS2D3DDataset):
13
- def __init__(self, root_dir, mode, shape=None, max_wall_num=0, aug=None, camera_height=1.6, logger=None,
14
- split_list=None, patch_num=256, keys=None, for_test_index=None, subset=None):
15
- assert subset == 's2d3d' or subset == 'pano', 'error subset'
16
- super().__init__(root_dir, None, shape, max_wall_num, aug, camera_height, logger,
17
- split_list, patch_num, keys, None, subset)
18
- if logger is None:
19
- logger = get_logger()
20
- self.mode = mode
21
- if mode == 'train':
22
- if subset == 'pano':
23
- s2d3d_train_data = PanoS2D3DDataset(root_dir, 'train', shape, max_wall_num, aug, camera_height, logger,
24
- split_list, patch_num, keys, None, 's2d3d').data
25
- s2d3d_val_data = PanoS2D3DDataset(root_dir, 'val', shape, max_wall_num, aug, camera_height, logger,
26
- split_list, patch_num, keys, None, 's2d3d').data
27
- s2d3d_test_data = PanoS2D3DDataset(root_dir, 'test', shape, max_wall_num, aug, camera_height, logger,
28
- split_list, patch_num, keys, None, 's2d3d').data
29
- s2d3d_all_data = s2d3d_train_data + s2d3d_val_data + s2d3d_test_data
30
-
31
- pano_train_data = PanoS2D3DDataset(root_dir, 'train', shape, max_wall_num, aug, camera_height, logger,
32
- split_list, patch_num, keys, None, 'pano').data
33
- self.data = s2d3d_all_data + pano_train_data
34
- elif subset == 's2d3d':
35
- pano_train_data = PanoS2D3DDataset(root_dir, 'train', shape, max_wall_num, aug, camera_height, logger,
36
- split_list, patch_num, keys, None, 'pano').data
37
- pano_val_data = PanoS2D3DDataset(root_dir, 'val', shape, max_wall_num, aug, camera_height, logger,
38
- split_list, patch_num, keys, None, 'pano').data
39
- pano_test_data = PanoS2D3DDataset(root_dir, 'test', shape, max_wall_num, aug, camera_height, logger,
40
- split_list, patch_num, keys, None, 'pano').data
41
- pano_all_data = pano_train_data + pano_val_data + pano_test_data
42
-
43
- s2d3d_train_data = PanoS2D3DDataset(root_dir, 'train', shape, max_wall_num, aug, camera_height, logger,
44
- split_list, patch_num, keys, None, 's2d3d').data
45
- self.data = pano_all_data + s2d3d_train_data
46
- else:
47
- self.data = PanoS2D3DDataset(root_dir, mode, shape, max_wall_num, aug, camera_height, logger,
48
- split_list, patch_num, keys, None, subset).data
49
-
50
- if for_test_index is not None:
51
- self.data = self.data[:for_test_index]
52
- logger.info(f"Build dataset mode: {self.mode} valid: {len(self.data)}")
53
-
54
-
55
- if __name__ == '__main__':
56
- import numpy as np
57
- from PIL import Image
58
-
59
- from tqdm import tqdm
60
- from visualization.boundary import draw_boundaries
61
- from visualization.floorplan import draw_floorplan
62
- from utils.boundary import depth2boundaries
63
- from utils.conversion import uv2xyz
64
-
65
- modes = ['test', 'val', 'train']
66
- for i in range(1):
67
- for mode in modes:
68
- print(mode)
69
- mp3d_dataset = PanoS2D3DMixDataset(root_dir='../src/dataset/pano_s2d3d', mode=mode, aug={
70
- # 'STRETCH': True,
71
- # 'ROTATE': True,
72
- # 'FLIP': True,
73
- # 'GAMMA': True
74
- }, subset='pano')
75
- continue
76
- save_dir = f'../src/dataset/pano_s2d3d/visualization1/{mode}'
77
- if not os.path.isdir(save_dir):
78
- os.makedirs(save_dir)
79
-
80
- bar = tqdm(mp3d_dataset, ncols=100)
81
- for data in bar:
82
- bar.set_description(f"Processing {data['id']}")
83
- boundary_list = depth2boundaries(data['ratio'], data['depth'], step=None)
84
- pano_img = draw_boundaries(data['image'].transpose(1, 2, 0), boundary_list=boundary_list, show=False)
85
- Image.fromarray((pano_img * 255).astype(np.uint8)).save(
86
- os.path.join(save_dir, f"{data['id']}_boundary.png"))
87
-
88
- floorplan = draw_floorplan(uv2xyz(boundary_list[0])[..., ::2], show=False,
89
- marker_color=None, center_color=0.8, show_radius=None)
90
- Image.fromarray((floorplan.squeeze() * 255).astype(np.uint8)).save(
91
- os.path.join(save_dir, f"{data['id']}_floorplan.png"))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DemoLou/moe-tts/transforms.py DELETED
@@ -1,193 +0,0 @@
1
- import torch
2
- from torch.nn import functional as F
3
-
4
- import numpy as np
5
-
6
-
7
- DEFAULT_MIN_BIN_WIDTH = 1e-3
8
- DEFAULT_MIN_BIN_HEIGHT = 1e-3
9
- DEFAULT_MIN_DERIVATIVE = 1e-3
10
-
11
-
12
- def piecewise_rational_quadratic_transform(inputs,
13
- unnormalized_widths,
14
- unnormalized_heights,
15
- unnormalized_derivatives,
16
- inverse=False,
17
- tails=None,
18
- tail_bound=1.,
19
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
20
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
21
- min_derivative=DEFAULT_MIN_DERIVATIVE):
22
-
23
- if tails is None:
24
- spline_fn = rational_quadratic_spline
25
- spline_kwargs = {}
26
- else:
27
- spline_fn = unconstrained_rational_quadratic_spline
28
- spline_kwargs = {
29
- 'tails': tails,
30
- 'tail_bound': tail_bound
31
- }
32
-
33
- outputs, logabsdet = spline_fn(
34
- inputs=inputs,
35
- unnormalized_widths=unnormalized_widths,
36
- unnormalized_heights=unnormalized_heights,
37
- unnormalized_derivatives=unnormalized_derivatives,
38
- inverse=inverse,
39
- min_bin_width=min_bin_width,
40
- min_bin_height=min_bin_height,
41
- min_derivative=min_derivative,
42
- **spline_kwargs
43
- )
44
- return outputs, logabsdet
45
-
46
-
47
- def searchsorted(bin_locations, inputs, eps=1e-6):
48
- bin_locations[..., -1] += eps
49
- return torch.sum(
50
- inputs[..., None] >= bin_locations,
51
- dim=-1
52
- ) - 1
53
-
54
-
55
- def unconstrained_rational_quadratic_spline(inputs,
56
- unnormalized_widths,
57
- unnormalized_heights,
58
- unnormalized_derivatives,
59
- inverse=False,
60
- tails='linear',
61
- tail_bound=1.,
62
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
63
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
64
- min_derivative=DEFAULT_MIN_DERIVATIVE):
65
- inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
66
- outside_interval_mask = ~inside_interval_mask
67
-
68
- outputs = torch.zeros_like(inputs)
69
- logabsdet = torch.zeros_like(inputs)
70
-
71
- if tails == 'linear':
72
- unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
73
- constant = np.log(np.exp(1 - min_derivative) - 1)
74
- unnormalized_derivatives[..., 0] = constant
75
- unnormalized_derivatives[..., -1] = constant
76
-
77
- outputs[outside_interval_mask] = inputs[outside_interval_mask]
78
- logabsdet[outside_interval_mask] = 0
79
- else:
80
- raise RuntimeError('{} tails are not implemented.'.format(tails))
81
-
82
- outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline(
83
- inputs=inputs[inside_interval_mask],
84
- unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
85
- unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
86
- unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
87
- inverse=inverse,
88
- left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound,
89
- min_bin_width=min_bin_width,
90
- min_bin_height=min_bin_height,
91
- min_derivative=min_derivative
92
- )
93
-
94
- return outputs, logabsdet
95
-
96
- def rational_quadratic_spline(inputs,
97
- unnormalized_widths,
98
- unnormalized_heights,
99
- unnormalized_derivatives,
100
- inverse=False,
101
- left=0., right=1., bottom=0., top=1.,
102
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
103
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
104
- min_derivative=DEFAULT_MIN_DERIVATIVE):
105
- if torch.min(inputs) < left or torch.max(inputs) > right:
106
- raise ValueError('Input to a transform is not within its domain')
107
-
108
- num_bins = unnormalized_widths.shape[-1]
109
-
110
- if min_bin_width * num_bins > 1.0:
111
- raise ValueError('Minimal bin width too large for the number of bins')
112
- if min_bin_height * num_bins > 1.0:
113
- raise ValueError('Minimal bin height too large for the number of bins')
114
-
115
- widths = F.softmax(unnormalized_widths, dim=-1)
116
- widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
117
- cumwidths = torch.cumsum(widths, dim=-1)
118
- cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0)
119
- cumwidths = (right - left) * cumwidths + left
120
- cumwidths[..., 0] = left
121
- cumwidths[..., -1] = right
122
- widths = cumwidths[..., 1:] - cumwidths[..., :-1]
123
-
124
- derivatives = min_derivative + F.softplus(unnormalized_derivatives)
125
-
126
- heights = F.softmax(unnormalized_heights, dim=-1)
127
- heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
128
- cumheights = torch.cumsum(heights, dim=-1)
129
- cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0)
130
- cumheights = (top - bottom) * cumheights + bottom
131
- cumheights[..., 0] = bottom
132
- cumheights[..., -1] = top
133
- heights = cumheights[..., 1:] - cumheights[..., :-1]
134
-
135
- if inverse:
136
- bin_idx = searchsorted(cumheights, inputs)[..., None]
137
- else:
138
- bin_idx = searchsorted(cumwidths, inputs)[..., None]
139
-
140
- input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
141
- input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
142
-
143
- input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
144
- delta = heights / widths
145
- input_delta = delta.gather(-1, bin_idx)[..., 0]
146
-
147
- input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
148
- input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
149
-
150
- input_heights = heights.gather(-1, bin_idx)[..., 0]
151
-
152
- if inverse:
153
- a = (((inputs - input_cumheights) * (input_derivatives
154
- + input_derivatives_plus_one
155
- - 2 * input_delta)
156
- + input_heights * (input_delta - input_derivatives)))
157
- b = (input_heights * input_derivatives
158
- - (inputs - input_cumheights) * (input_derivatives
159
- + input_derivatives_plus_one
160
- - 2 * input_delta))
161
- c = - input_delta * (inputs - input_cumheights)
162
-
163
- discriminant = b.pow(2) - 4 * a * c
164
- assert (discriminant >= 0).all()
165
-
166
- root = (2 * c) / (-b - torch.sqrt(discriminant))
167
- outputs = root * input_bin_widths + input_cumwidths
168
-
169
- theta_one_minus_theta = root * (1 - root)
170
- denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
171
- * theta_one_minus_theta)
172
- derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2)
173
- + 2 * input_delta * theta_one_minus_theta
174
- + input_derivatives * (1 - root).pow(2))
175
- logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
176
-
177
- return outputs, -logabsdet
178
- else:
179
- theta = (inputs - input_cumwidths) / input_bin_widths
180
- theta_one_minus_theta = theta * (1 - theta)
181
-
182
- numerator = input_heights * (input_delta * theta.pow(2)
183
- + input_derivatives * theta_one_minus_theta)
184
- denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
185
- * theta_one_minus_theta)
186
- outputs = input_cumheights + numerator / denominator
187
-
188
- derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2)
189
- + 2 * input_delta * theta_one_minus_theta
190
- + input_derivatives * (1 - theta).pow(2))
191
- logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
192
-
193
- return outputs, logabsdet
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ECCV2022/bytetrack/yolox/data/data_augment.py DELETED
@@ -1,299 +0,0 @@
1
- #!/usr/bin/env python3
2
- # -*- coding:utf-8 -*-
3
- # Copyright (c) Megvii, Inc. and its affiliates.
4
- """
5
- Data augmentation functionality. Passed as callable transformations to
6
- Dataset classes.
7
-
8
- The data augmentation procedures were interpreted from @weiliu89's SSD paper
9
- http://arxiv.org/abs/1512.02325
10
- """
11
-
12
- import cv2
13
- import numpy as np
14
-
15
- import torch
16
-
17
- from yolox.utils import xyxy2cxcywh
18
-
19
- import math
20
- import random
21
-
22
-
23
- def augment_hsv(img, hgain=0.015, sgain=0.7, vgain=0.4):
24
- r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
25
- hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
26
- dtype = img.dtype # uint8
27
-
28
- x = np.arange(0, 256, dtype=np.int16)
29
- lut_hue = ((x * r[0]) % 180).astype(dtype)
30
- lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
31
- lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
32
-
33
- img_hsv = cv2.merge(
34
- (cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))
35
- ).astype(dtype)
36
- cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
37
-
38
-
39
- def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.2):
40
- # box1(4,n), box2(4,n)
41
- # Compute candidate boxes which include follwing 5 things:
42
- # box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
43
- w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
44
- w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
45
- ar = np.maximum(w2 / (h2 + 1e-16), h2 / (w2 + 1e-16)) # aspect ratio
46
- return (
47
- (w2 > wh_thr)
48
- & (h2 > wh_thr)
49
- & (w2 * h2 / (w1 * h1 + 1e-16) > area_thr)
50
- & (ar < ar_thr)
51
- ) # candidates
52
-
53
-
54
- def random_perspective(
55
- img,
56
- targets=(),
57
- degrees=10,
58
- translate=0.1,
59
- scale=0.1,
60
- shear=10,
61
- perspective=0.0,
62
- border=(0, 0),
63
- ):
64
- # targets = [cls, xyxy]
65
- height = img.shape[0] + border[0] * 2 # shape(h,w,c)
66
- width = img.shape[1] + border[1] * 2
67
-
68
- # Center
69
- C = np.eye(3)
70
- C[0, 2] = -img.shape[1] / 2 # x translation (pixels)
71
- C[1, 2] = -img.shape[0] / 2 # y translation (pixels)
72
-
73
- # Rotation and Scale
74
- R = np.eye(3)
75
- a = random.uniform(-degrees, degrees)
76
- # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
77
- s = random.uniform(scale[0], scale[1])
78
- # s = 2 ** random.uniform(-scale, scale)
79
- R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
80
-
81
- # Shear
82
- S = np.eye(3)
83
- S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
84
- S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
85
-
86
- # Translation
87
- T = np.eye(3)
88
- T[0, 2] = (
89
- random.uniform(0.5 - translate, 0.5 + translate) * width
90
- ) # x translation (pixels)
91
- T[1, 2] = (
92
- random.uniform(0.5 - translate, 0.5 + translate) * height
93
- ) # y translation (pixels)
94
-
95
- # Combined rotation matrix
96
- M = T @ S @ R @ C # order of operations (right to left) is IMPORTANT
97
-
98
- ###########################
99
- # For Aug out of Mosaic
100
- # s = 1.
101
- # M = np.eye(3)
102
- ###########################
103
-
104
- if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
105
- if perspective:
106
- img = cv2.warpPerspective(
107
- img, M, dsize=(width, height), borderValue=(114, 114, 114)
108
- )
109
- else: # affine
110
- img = cv2.warpAffine(
111
- img, M[:2], dsize=(width, height), borderValue=(114, 114, 114)
112
- )
113
-
114
- # Transform label coordinates
115
- n = len(targets)
116
- if n:
117
- # warp points
118
- xy = np.ones((n * 4, 3))
119
- xy[:, :2] = targets[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(
120
- n * 4, 2
121
- ) # x1y1, x2y2, x1y2, x2y1
122
- xy = xy @ M.T # transform
123
- if perspective:
124
- xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8) # rescale
125
- else: # affine
126
- xy = xy[:, :2].reshape(n, 8)
127
-
128
- # create new boxes
129
- x = xy[:, [0, 2, 4, 6]]
130
- y = xy[:, [1, 3, 5, 7]]
131
- xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
132
-
133
- # clip boxes
134
- #xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
135
- #xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
136
-
137
- # filter candidates
138
- i = box_candidates(box1=targets[:, :4].T * s, box2=xy.T)
139
- targets = targets[i]
140
- targets[:, :4] = xy[i]
141
-
142
- targets = targets[targets[:, 0] < width]
143
- targets = targets[targets[:, 2] > 0]
144
- targets = targets[targets[:, 1] < height]
145
- targets = targets[targets[:, 3] > 0]
146
-
147
- return img, targets
148
-
149
-
150
- def _distort(image):
151
- def _convert(image, alpha=1, beta=0):
152
- tmp = image.astype(float) * alpha + beta
153
- tmp[tmp < 0] = 0
154
- tmp[tmp > 255] = 255
155
- image[:] = tmp
156
-
157
- image = image.copy()
158
-
159
- if random.randrange(2):
160
- _convert(image, beta=random.uniform(-32, 32))
161
-
162
- if random.randrange(2):
163
- _convert(image, alpha=random.uniform(0.5, 1.5))
164
-
165
- image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
166
-
167
- if random.randrange(2):
168
- tmp = image[:, :, 0].astype(int) + random.randint(-18, 18)
169
- tmp %= 180
170
- image[:, :, 0] = tmp
171
-
172
- if random.randrange(2):
173
- _convert(image[:, :, 1], alpha=random.uniform(0.5, 1.5))
174
-
175
- image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
176
-
177
- return image
178
-
179
-
180
- def _mirror(image, boxes):
181
- _, width, _ = image.shape
182
- if random.randrange(2):
183
- image = image[:, ::-1]
184
- boxes = boxes.copy()
185
- boxes[:, 0::2] = width - boxes[:, 2::-2]
186
- return image, boxes
187
-
188
-
189
- def preproc(image, input_size, mean, std, swap=(2, 0, 1)):
190
- if len(image.shape) == 3:
191
- padded_img = np.ones((input_size[0], input_size[1], 3)) * 114.0
192
- else:
193
- padded_img = np.ones(input_size) * 114.0
194
- img = np.array(image)
195
- r = min(input_size[0] / img.shape[0], input_size[1] / img.shape[1])
196
- resized_img = cv2.resize(
197
- img,
198
- (int(img.shape[1] * r), int(img.shape[0] * r)),
199
- interpolation=cv2.INTER_LINEAR,
200
- ).astype(np.float32)
201
- padded_img[: int(img.shape[0] * r), : int(img.shape[1] * r)] = resized_img
202
-
203
- padded_img = padded_img[:, :, ::-1]
204
- padded_img /= 255.0
205
- if mean is not None:
206
- padded_img -= mean
207
- if std is not None:
208
- padded_img /= std
209
- padded_img = padded_img.transpose(swap)
210
- padded_img = np.ascontiguousarray(padded_img, dtype=np.float32)
211
- return padded_img, r
212
-
213
-
214
- class TrainTransform:
215
- def __init__(self, p=0.5, rgb_means=None, std=None, max_labels=100):
216
- self.means = rgb_means
217
- self.std = std
218
- self.p = p
219
- self.max_labels = max_labels
220
-
221
- def __call__(self, image, targets, input_dim):
222
- boxes = targets[:, :4].copy()
223
- labels = targets[:, 4].copy()
224
- ids = targets[:, 5].copy()
225
- if len(boxes) == 0:
226
- targets = np.zeros((self.max_labels, 6), dtype=np.float32)
227
- image, r_o = preproc(image, input_dim, self.means, self.std)
228
- image = np.ascontiguousarray(image, dtype=np.float32)
229
- return image, targets
230
-
231
- image_o = image.copy()
232
- targets_o = targets.copy()
233
- height_o, width_o, _ = image_o.shape
234
- boxes_o = targets_o[:, :4]
235
- labels_o = targets_o[:, 4]
236
- ids_o = targets_o[:, 5]
237
- # bbox_o: [xyxy] to [c_x,c_y,w,h]
238
- boxes_o = xyxy2cxcywh(boxes_o)
239
-
240
- image_t = _distort(image)
241
- image_t, boxes = _mirror(image_t, boxes)
242
- height, width, _ = image_t.shape
243
- image_t, r_ = preproc(image_t, input_dim, self.means, self.std)
244
- # boxes [xyxy] 2 [cx,cy,w,h]
245
- boxes = xyxy2cxcywh(boxes)
246
- boxes *= r_
247
-
248
- mask_b = np.minimum(boxes[:, 2], boxes[:, 3]) > 1
249
- boxes_t = boxes[mask_b]
250
- labels_t = labels[mask_b]
251
- ids_t = ids[mask_b]
252
-
253
- if len(boxes_t) == 0:
254
- image_t, r_o = preproc(image_o, input_dim, self.means, self.std)
255
- boxes_o *= r_o
256
- boxes_t = boxes_o
257
- labels_t = labels_o
258
- ids_t = ids_o
259
-
260
- labels_t = np.expand_dims(labels_t, 1)
261
- ids_t = np.expand_dims(ids_t, 1)
262
-
263
- targets_t = np.hstack((labels_t, boxes_t, ids_t))
264
- padded_labels = np.zeros((self.max_labels, 6))
265
- padded_labels[range(len(targets_t))[: self.max_labels]] = targets_t[
266
- : self.max_labels
267
- ]
268
- padded_labels = np.ascontiguousarray(padded_labels, dtype=np.float32)
269
- image_t = np.ascontiguousarray(image_t, dtype=np.float32)
270
- return image_t, padded_labels
271
-
272
-
273
- class ValTransform:
274
- """
275
- Defines the transformations that should be applied to test PIL image
276
- for input into the network
277
-
278
- dimension -> tensorize -> color adj
279
-
280
- Arguments:
281
- resize (int): input dimension to SSD
282
- rgb_means ((int,int,int)): average RGB of the dataset
283
- (104,117,123)
284
- swap ((int,int,int)): final order of channels
285
-
286
- Returns:
287
- transform (transform) : callable transform to be applied to test/val
288
- data
289
- """
290
-
291
- def __init__(self, rgb_means=None, std=None, swap=(2, 0, 1)):
292
- self.means = rgb_means
293
- self.swap = swap
294
- self.std = std
295
-
296
- # assume input is cv2 img for now
297
- def __call__(self, img, res, input_size):
298
- img, _ = preproc(img, input_size, self.means, self.std, self.swap)
299
- return img, np.zeros((1, 5))