parquet-converter commited on
Commit
8135ee1
·
1 Parent(s): 5b4cc3e

Update parquet files (step 65 of 397)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/0019c/NewBing/README.md +0 -12
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cch ti ArcGIS 10.8 Full Crack nhanh chng v  an to n.md +0 -17
  3. spaces/1gistliPinn/ChatGPT4/Examples/Erio Connection Usb Modem Direct 217.md +0 -32
  4. spaces/1phancelerku/anime-remove-background/Adobe Premiere Rush APK Edit and Share Videos Across All Your Devices.md +0 -117
  5. spaces/1phancelerku/anime-remove-background/Assoluto Racing MOD APK Android 1 A Mobile Racing Game with Amazing Graphics and Physics.md +0 -89
  6. spaces/1phancelerku/anime-remove-background/Cmo jugar a Sniper 3D juego de disparos en primera persona con mod apk.md +0 -140
  7. spaces/1toTree/lora_test/ppdiffusers/pipelines/unclip/__init__.py +0 -29
  8. spaces/1toTree/lora_test/ppdiffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py +0 -443
  9. spaces/801artistry/RVC801/infer/lib/infer_pack/modules/F0Predictor/__init__.py +0 -0
  10. spaces/A1draw-12196y/anime-ai-detect/app.py +0 -17
  11. spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/training/main.py +0 -596
  12. spaces/AIGC-Audio/AudioGPT/mono2binaural/src/models.py +0 -110
  13. spaces/AILab-CVC/SEED-LLaMA/scripts/start_backend_8b.sh +0 -10
  14. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorpicker/Factory.js +0 -13
  15. spaces/Aki004/herta-so-vits/vdecoder/nsf_hifigan/utils.py +0 -68
  16. spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/GUI.py +0 -103
  17. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/bit_diffusion.py +0 -264
  18. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/onnxruntime/textual_inversion/textual_inversion.py +0 -946
  19. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/setup.py +0 -286
  20. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_sag.py +0 -754
  21. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py +0 -1298
  22. spaces/Andy1621/uniformer_image_detection/configs/fast_rcnn/fast_rcnn_r50_caffe_fpn_1x_coco.py +0 -45
  23. spaces/Andy1621/uniformer_image_detection/configs/res2net/README.md +0 -65
  24. spaces/Andy1621/uniformer_image_detection/mmcv_custom/runner/epoch_based_runner.py +0 -104
  25. spaces/Andy1621/uniformer_image_detection/mmdet/models/detectors/mask_rcnn.py +0 -24
  26. spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r18-d8_769x769_80k_cityscapes.py +0 -11
  27. spaces/AnimaLab/bias-test-gpt-pairs/mgr_requests.py +0 -214
  28. spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/utils/video.py +0 -26
  29. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/core/utils/misc.py +0 -17
  30. spaces/Ash58947/Bot/README.md +0 -10
  31. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/util/request.py +0 -137
  32. spaces/Ayanoaisho/L/Dockerfile +0 -21
  33. spaces/AzumaSeren100/XuanShen-Bert-VITS2/utils.py +0 -290
  34. spaces/Banbri/zcvzcv/src/lib/sleep.ts +0 -6
  35. spaces/Benson/text-generation/Examples/Descargar Gratis Juegos De Matemticas Para Pc.md +0 -168
  36. spaces/Big-Web/MMSD/env/Lib/site-packages/boto3/compat.py +0 -82
  37. spaces/Big-Web/MMSD/env/Lib/site-packages/dateutil/_common.py +0 -43
  38. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/configuration.py +0 -374
  39. spaces/Bigshot/RSA-v0.1.2/app.py +0 -56
  40. spaces/CVH-vn1210/make_hair/minigpt4/datasets/data_utils.py +0 -196
  41. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/DensePose/densepose/vis/bounding_box.py +0 -36
  42. spaces/CVPR/LIVE/thrust/cmake/ThrustHeaderTesting.cmake +0 -119
  43. spaces/CVPR/LIVE/thrust/thrust/iterator/iterator_categories.h +0 -224
  44. spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/mismatch.h +0 -23
  45. spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/scatter.h +0 -23
  46. spaces/CVPR/WALT/mmdet/models/dense_heads/anchor_head.py +0 -751
  47. spaces/Chris4K/llms_compare/Mahanadi English Subtitles Full Movie Download ((LINK)).md +0 -66
  48. spaces/ChrisCaviar/ControlNet-v1-1/app_depth.py +0 -105
  49. spaces/ChrisPreston/diff-svc_minato_aqua/modules/encoder.py +0 -208
  50. spaces/CikeyQI/Yunzai/Yunzai/plugins/ws-plugin/resources/common/base.css +0 -7
spaces/0019c/NewBing/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: NewBing
3
- emoji: 🏢
4
- colorFrom: green
5
- colorTo: red
6
- sdk: docker
7
- pinned: false
8
- license: mit
9
- app_port: 8080
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cch ti ArcGIS 10.8 Full Crack nhanh chng v  an to n.md DELETED
@@ -1,17 +0,0 @@
1
-
2
- <h1>Tải ArcGIS 10.8 Full Crack - Phần mềm xây dựng bản đồ chuyên nghiệp</h1>
3
- <p>ArcGIS là phần mềm hàng đầu trong lĩnh vực hệ thống thông tin địa lý (GIS). Với ArcGIS, bạn có thể tạo ra các bản đồ đẹp mắt, phân tích dữ liệu không gian, quản lý dữ liệu địa lý và chia sẻ kết quả với người khác. ArcGIS 10.8 là phiên bản mới nhất của phần mềm này, mang đến nhiều tính năng mới và cải tiến về hiệu năng, bảo mật và tương thích.</p>
4
- <h2>tải arcgis 10.8 full crack</h2><br /><p><b><b>Download Zip</b> &gt;&gt;&gt;&gt;&gt; <a href="https://byltly.com/2uKzqY">https://byltly.com/2uKzqY</a></b></p><br /><br />
5
- <p>Tuy nhiên, để sử dụng ArcGIS 10.8, bạn cần có một license key hợp lệ để kích hoạt phần mềm. License key có thể được mua từ nhà phát triển ESRI hoặc từ các nhà cung cấp uy tín. Nếu bạn không có license key, bạn có thể tải ArcGIS 10.8 full crack - một phiên bản đã được bẻ khóa bảo vệ sao chép và cho phép bạn sử dụng phần mềm mà không cần license key.</p>
6
- <p>Nhưng lưu ý rằng, tải ArcGIS 10.8 full crack là một hành động bất hợp pháp và có thể gây ra nhiều rủi ro cho máy tính của bạn. Bạn có thể bị vi phạm quyền sở hữu trí tuệ của nhà phát triển, bị nhiễm virus, malware hoặc spyware từ các file crack, hoặc gặp phải các lỗi, sự cố khi sử dụng phần mềm. Do đó, chúng tôi không khuyến khích hoặc chịu trách nhiệm về việc tải ArcGIS 10.8 full crack.</p>
7
- <p>Tuy nhiên, nếu bạn vẫn muốn làm điều đó với sự tự chịu trách nhiệm của mình, bạn có thể theo dõi các bước sau đây để tải và cài đặt ArcGIS 10.8 full crack:</p>
8
- <ol>
9
- <li>Truy cập vào một trang web uy tín cung cấp các phiên bản crack của các phần mềm, ví dụ như <a href="https://www.skidrowreloaded.com/">Skidrow Reloaded</a> hoặc <a href="https://www.oceanofgames.com/">Ocean of Games</a>. Tìm kiếm ArcGIS 10.8 và tải file về máy tính của bạn.</li>
10
- <li>Giải nén file vừa tải về bằng một chương trình như <a href="https://www.win-rar.com/">WinRAR</a> hoặc <a href="https://www.7-zip.org/">7-Zip</a>. Bạn sẽ được một thư mục chứa các file cài đặt của phần mềm và một thư mục crack.</li>
11
- <li>Mở thư mục crack và sao chép tất cả các file trong đó. Dán chúng vào thư mục cài đặt của phần mềm, thay thế các file gốc. Thường thì thư mục cài đặt sẽ là C:\\Program Files (x86)\\ArcGIS\\Desktop10.8.</li>
12
- <li>Chạy phần m<p>Chạy phần mềm ArcGIS 10.8 như quản trị viên và thưởng thức.</p>
13
- <p>Lưu ý: Một số chương trình diệt virus có thể phát hiện các file crack là có hại và xóa chúng. Bạn có thể cần tạm thời tắt chương trình diệt virus hoặc thêm ngoại lệ cho thư mục cài đặt phần mềm.</p>
14
- <p></p>
15
- <p>Tuyên bố từ chối trách nhiệm: Bài viết này chỉ mang tính chất giáo dục. Chúng tôi không ủng hộ hoặc khuyến khích việc sử dụng bản quyền hoặc tải về bất hợp pháp của bất kỳ phần mềm nào. Xin vui lòng ủng hộ các nhà phát triển bằng cách mua phần mềm một cách hợp pháp.</p> ddb901b051<br />
16
- <br />
17
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Erio Connection Usb Modem Direct 217.md DELETED
@@ -1,32 +0,0 @@
1
- <h2>Erio Connection Usb Modem Direct 217</h2><br /><p><b><b>DOWNLOAD</b> &#10038;&#10038;&#10038; <a href="https://imgfil.com/2uy1pP">https://imgfil.com/2uy1pP</a></b></p><br /><br />
2
- <br />
3
- GitHub Gist: No attached data sources. GitHub Gist: Categories: Computer Science, Hardware, Software, Hacking, Hardware, Software, Hacking Tools, Online, Online Services, Online Services for Students, Computer Courses, Computational Thinking
4
-
5
- The Ideal Education for Increasing STEM Skills Share on Facebook
6
-
7
- Teaching is hard and less-qualified teachers are becoming more common, affecting a lot of people with no other options than a 4-year education.
8
-
9
- Jointless.AI, a company that connects users with programming mentors, just released some data from a pilot program they ran from November 2016 to December 2017. They found that teaching people how to code has a “surprisingly low barrier to entry.” The median time for a mentor to match a participant to a project was “roughly half an hour” of their time, compared to 3 hours to find and evaluate a tutor. The median time for a participant to complete a project was roughly 3 hours.
10
-
11
- After a participant finished their first coding project, the majority had increased their experience with both technologies and skills. Many were more than excited about how they could help people with their own programming skills.
12
-
13
- Participants were also generally excited about the idea of new opportunities for themselves. Participants who were taking college courses also cited that as a benefit.
14
-
15
- Jointless.AI has released more data on their pilot program, including a DataShare where data scientists, researchers, and engineers can use data to determine how best to provide a coding education for people that is beneficial to them and for society as a whole. You can access the data here.
16
-
17
- How to Connect with the Tech Community and Network
18
-
19
- You don’t have to be a genius to make friends with the most connected people in tech.
20
-
21
- Be social and actively participate in online communities.
22
-
23
- Join Slack channels and be active in your specific Slack’s chat.
24
-
25
- Start connecting with other developers and fellow learners in Hacker News and Twitter.
26
-
27
- Share your own projects, write blog posts, and use DZone to share the work you have done.
28
-
29
- Participate in online and offline meetups and 4fefd39f24<br />
30
- <br />
31
- <br />
32
- <p></p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Adobe Premiere Rush APK Edit and Share Videos Across All Your Devices.md DELETED
@@ -1,117 +0,0 @@
1
-
2
- <h1>What is Rush APK and Why You Need It</h1>
3
- <p>If you are looking for a way to create and share amazing videos online, you may have heard of Rush APK. But what is it exactly and why do you need it? In this article, we will answer these questions and more.</p>
4
- <p>Rush APK is an Android application that allows you to use Adobe Premiere Rush, the all-in-one, cross-device video editor that lets you shoot, edit, and share online videos anywhere. With Rush APK, you can access all the features and content of Adobe Premiere Rush for free with unlimited exports.</p>
5
- <h2>rush apk</h2><br /><p><b><b>DOWNLOAD</b> ->>> <a href="https://jinyurl.com/2uNRo9">https://jinyurl.com/2uNRo9</a></b></p><br /><br />
6
- <p>Adobe Premiere Rush is a powerful video editing tool that lets you quickly create videos that look and sound professional, just how you want. You can add music, titles, effects, transitions, stickers, overlays, and more to your videos with drag and drop. You can also adjust the speed, color, and audio of your videos with intuitive tools.</p>
7
- <p>With Adobe Premiere Rush, you can also share your videos to your favorite social platforms with one click. You can crop your videos for different aspect ratios such as portrait, landscape, square, or vertical. You can also sync your projects across devices and continue editing them on your desktop or tablet.</p>
8
- <p>Whether you are a beginner or a pro, Adobe Premiere Rush can help you create stunning videos that will impress your audience and boost your online presence.</p>
9
- <h2>How to Download and Install Rush APK on Your Android Device</h2>
10
- <p>If you want to try out Adobe Premiere Rush on your Android device, you will need to download and install Rush APK on your device. Here are the steps that you need to follow:</p>
11
- <ol>
12
- <li>Find a reliable source for downloading the APK file. You can search for Rush APK on Google or use a trusted website that provides APK files for various apps. Make sure that the APK file is safe and virus-free before downloading it.</li>
13
- <li>Enable unknown sources on your device settings. To do this, go to Settings > Security > Unknown Sources and toggle it on. This will allow you to install apps from sources other than the Google Play Store.</li>
14
- <li>Locate and tap on the downloaded APK file to start the installation. You can find the APK file in your Downloads folder or in the notification bar. Tap on it and confirm the installation.</li>
15
- <li>Follow the on-screen instructions and grant the necessary permissions. The app will ask you to allow access to your camera, microphone, storage, and other features. Tap on Allow or OK to proceed.</li>
16
- <li>Launch the app and sign in with your Adobe account or create a new one. You will need an Adobe account to use the app and sync your projects across devices. You can sign in with your existing account or create a new one for free.</li>
17
- </ol>
18
- <p>Congratulations! You have successfully installed Rush APK on your Android device. You can now start creating and sharing amazing videos online with Adobe Premiere Rush.</p>
19
- <h2>How to Use Rush APK to Edit and Share Videos Online</h2>
20
- <p>Now that you have installed Rush APK on your device, you may be wondering how to use it to edit and share videos online. Don't worry, it's very easy and fun. Here are the steps that you need to follow:</p>
21
- <ol>
22
- <li>Tap on the plus icon to start a new project or select an existing one. You can create a new project from scratch or choose from the templates that are available in the app. You can also open an existing project that you have saved on your device or cloud storage.</li>
23
- <li>Choose the media files that you want to add to your project from your device or cloud storage. You can add photos, videos, and audio files to your project. You can also capture new media files using your device's camera or microphone.</li>
24
- <li>Arrange, trim, crop, flip, and mirror your video clips on the multitrack timeline. You can drag and drop your video clips on the timeline and adjust their duration and position. You can also trim, crop, flip, and mirror your video clips using the icons at the bottom of the screen.</li>
25
- <li>Add music, titles, effects, transitions, stickers, and overlays to enhance your video. You can tap on the icons at the top of the screen to access these features. You can choose from thousands of royalty-free soundtracks, sound effects, loops, titles, overlays, and graphics from Adobe Stock. You can also customize them according to your preferences.</li>
26
- <li>Adjust the speed, color, and audio of your video with intuitive tools. You can tap on the icons at the right side of the screen to access these tools. You can change the speed of your video clips, apply color presets or filters, and adjust the volume and balance of your audio tracks.</li>
27
- <li>Preview your video and export it to your desired quality and aspect ratio. You can tap on the play button at the center of the screen to preview your video. You can also tap on the settings icon at the top right corner of the screen to change the quality and aspect ratio of your video. You can choose from 4K, 1080p, 720p, or 480p quality and portrait, landscape, square, or vertical aspect ratio.</li>
28
- <li>Share your video to your favorite social platforms or save it to your device or cloud storage. You can tap on the share icon at the top right corner of the screen to access these options. You can share your video directly to YouTube, Facebook, Instagram, TikTok, or other platforms with one click. You can also save your video to your device's gallery or cloud storage such as Google Drive or Dropbox.</li>
29
- </ol>
30
- <p>That's it! You have just created and shared a stunning video online with Rush APK. You can repeat these steps for any other project that you want to create with Adobe Premiere Rush.</p>
31
- <p>* Adobe Premiere Rush APK download<br />
32
- * Rush APK free board games<br />
33
- * Project RushB APK for Android<br />
34
- * Adobe Premiere Rush APK video editor<br />
35
- * Rush APK real money games<br />
36
- * Project RushB APK tech test<br />
37
- * Adobe Premiere Rush APK cross-device<br />
38
- * Rush APK Ludo, Carrom & Quizzy<br />
39
- * Project RushB APK release date<br />
40
- * Adobe Premiere Rush APK online videos<br />
41
- * Rush APK leedo, freestyle carrom and quiz game<br />
42
- * Project RushB APK features<br />
43
- * Adobe Premiere Rush APK professional videos<br />
44
- * Rush APK gaming universe<br />
45
- * Project RushB APK regions<br />
46
- * Adobe Premiere Rush APK powerful tools<br />
47
- * Rush APK AAA quality games<br />
48
- * Project RushB APK video game<br />
49
- * Adobe Premiere Rush APK channels<br />
50
- * Rush APK mobile-first gaming<br />
51
- * Project RushB APK Uptodown<br />
52
- * Adobe Premiere Rush APK latest version<br />
53
- * Rush APK Hike Games<br />
54
- * Project RushB APK beta version<br />
55
- * Adobe Premiere Rush APK mod apk<br />
56
- * Rush APK mod apk unlimited money<br />
57
- * Project RushB APK gameplay<br />
58
- * Adobe Premiere Rush APK review<br />
59
- * Rush APK referral code<br />
60
- * Project RushB APK graphics<br />
61
- * Adobe Premiere Rush APK system requirements<br />
62
- * Rush APK customer care number<br />
63
- * Project RushB APK download link<br />
64
- * Adobe Premiere Rush APK tutorial<br />
65
- * Rush APK withdrawal process<br />
66
- * Project RushB APK trailer<br />
67
- * Adobe Premiere Rush APK premium apk<br />
68
- * Rush APK invite friends and earn money<br />
69
- * Project RushB APK tips and tricks<br />
70
- * Adobe Premiere Rush APK transitions and effects</p>
71
- <h2>The Benefits of Using Rush APK for Video Editing</h2>
72
- <p>Rush APK is a great app for video editing that offers many benefits for users who want to create and share amazing videos online. Here are some of them:</p>
73
- <ul>
74
- <li>You can access all the features and content of Adobe Premiere Rush for free with unlimited exports. Unlike other video editing apps that charge you for premium features or limit your exports, Rush APK lets you use all the features and content of Adobe Premiere Rush without any restrictions or costs.</li>
75
- <li>You can sync your projects across devices and continue editing them on your desktop or tablet. Rush APK allows you to sync your projects with your Adobe account and access them from any device that has Adobe Premiere Rush installed. You can also import and export your projects to other Adobe apps such as Premiere Pro, After Effects, or Photoshop.</li>
76
- <li>You can access thousands of royalty-free soundtracks, sound effects, loops, titles, overlays, and graphics from Adobe Stock. Rush APK gives you access to a huge library of high-quality content that you can use for your videos. You can also customize them to suit your style and theme.</li>
77
- <li>You can create professional-looking videos with minimal effort and time. Rush APK has a user-friendly interface and intuitive tools that make video editing easy and fun. You can create videos that look and sound amazing with just a few taps and clicks.</li>
78
- <li>You can reach a wider audience with videos that are optimized for different social platforms. Rush APK lets you crop your videos for different aspect ratios such as portrait, landscape, square, or vertical. You can also share your videos directly to YouTube, Facebook, Instagram, TikTok, or other platforms with one click. You can also save your videos to your device or cloud storage for later use.</li>
79
- </ul>
80
- <h2>The Drawbacks of Using Rush APK for Video Editing</h2>
81
- <p>While Rush APK is a great app for video editing, it also has some drawbacks that you should be aware of before using it. Here are some of them:</p>
82
- <ul>
83
- <li>You need a stable internet connection to download and update the app and access some of the features and content. Rush APK requires an internet connection to download and update the app and access some of the features and content such as Adobe Stock or cloud storage. If you have a slow or unreliable internet connection, you may experience some issues while using the app.</li>
84
- <li>You need a compatible device that meets the minimum requirements to run the app smoothly. Rush APK is a powerful app that requires a compatible device that has at least 4 GB of RAM and Android 9.0 or higher. If your device does not meet these requirements, you may not be able to install or run the app smoothly.</li>
85
- <li>You need an Adobe account to use the app and sync your projects across devices. Rush APK requires you to sign in with an Adobe account to use the app and sync your projects across devices. If you do not have an Adobe account, you will need to create one for free.</li>
86
- <li>You may encounter some bugs and glitches while using the app as it is still in development. Rush APK is still in development and may not be fully stable or bug-free. You may encounter some errors or crashes while using the app or exporting your videos.</li>
87
- <li>You may face some legal issues if you download the app from an unauthorized source or use it for commercial purposes without permission. Rush APK is an unofficial app that is not authorized by Adobe or Google Play Store. If you download the app from an unauthorized source or use it for commercial purposes without permission, you may face some legal consequences such as fines or lawsuits.</li>
88
- </ul>
89
- <h2>Conclusion</h2>
90
- <p>Rush APK is an Android application that lets you use Adobe Premiere Rush, the all-in-one, cross-device video editor that lets you shoot, edit, and share online videos anywhere. With Rush APK, you can access all the features and content of Adobe Premiere Rush for free with unlimited exports.</p>
91
- <p>Rush APK has many benefits for video editing such as syncing your projects across devices, accessing thousands of royalty-free content from Adobe Stock, creating professional-looking videos with minimal effort and time, and reaching a wider audience with videos that are optimized for different social platforms.</p>
92
- <p>Rush APK also has some drawbacks such as requiring a stable internet connection, a compatible device, an Adobe account, and facing some bugs and legal issues.</p>
93
- <p>If you want to try out Rush APK on your Android device, you can follow the steps in this article to download and install it on your device. You can also follow the steps to use it to edit and share videos online.</p>
94
- <p>We hope that this article has helped you understand what is Rush APK and why you need it. If you have any questions or feedback, please feel free to leave a comment below.</p>
95
- <h3>FAQs</h3>
96
- <ul>
97
- <li><b>Is Rush APK safe to use?</b></li>
98
- <p>Rush APK is safe to use if you download it from a reliable source and scan it with an antivirus before installing it on your device. However, since it is an unofficial app that is not authorized by Adobe or Google Play Store, you should use it at your own risk.</p>
99
- <li><b>Is Rush APK free to use?</b></li>
100
- <p>Rush APK is free to use with unlimited exports. You can access all the features and content of Adobe Premiere Rush without any restrictions or costs.</p>
101
- <li><b>Can I use Rush APK on my PC or Mac?</b></li>
102
- <p>Rush APK is an Android application that is designed to run on Android devices. However, you can use it on your PC or Mac with the help of an Android emulator. An Android emulator is a software that simulates the Android operating system on your PC or Mac. You can download and install an Android emulator such as BlueStacks, Nox Player, or MEmu on your PC or Mac and then install Rush APK on it.</p>
103
- <li><b>What is the difference between Rush APK and Adobe Premiere Rush?</b></li>
104
- <p>Rush APK and Adobe Premiere Rush are essentially the same app with the same features and content. The only difference is that Rush APK is an unofficial app that is not available on the Google Play Store and lets you use Adobe Premiere Rush for free with unlimited exports. Adobe Premiere Rush is an official app that is available on the Google Play Store and requires a subscription to access some of the features and content.</p>
105
- <li><b>How can I update Rush APK?</b></li>
106
- <p>Rush APK does not have an automatic update feature, so you will need to manually update it whenever a new version is available. You can check for updates by visiting the website where you downloaded the APK file or by searching for Rush APK on Google. You can then download and install the latest version of the APK file on your device.</p>
107
- <li><b>How can I uninstall Rush APK?</b></li>
108
- <p>If you want to uninstall Rush APK from your device, you can follow these steps:</p>
109
- <ol>
110
- <li>Go to Settings > Apps > Rush APK and tap on Uninstall.</li>
111
- <li>Confirm the uninstallation and wait for it to finish.</li>
112
- <li>Go to Settings > Storage > Files and locate the APK file that you downloaded.</li>
113
- <li>Delete the APK file from your device.</li>
114
- </ol>
115
- <p>You have successfully uninstalled Rush APK from your device.</p> 401be4b1e0<br />
116
- <br />
117
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Assoluto Racing MOD APK Android 1 A Mobile Racing Game with Amazing Graphics and Physics.md DELETED
@@ -1,89 +0,0 @@
1
- <br />
2
- <h1>Download Assoluto Racing Mod APK Android 1: The Ultimate Racing Game for Your Mobile Device</h1>
3
- <p>If you are a fan of racing games, you must have heard of Assoluto Racing. It is one of the best racing games available for Android and iPhone devices. It offers a realistic and immersive racing experience that will make you feel like you are driving a real car. But what if you want to enjoy the game without any limitations or restrictions? That's where Assoluto Racing mod apk android 1 comes in. In this article, we will tell you everything you need to know about this amazing modded version of the game, including its features, benefits, and how to download and install it on your device.</p>
4
- <h2>What is Assoluto Racing?</h2>
5
- <p>Assoluto Racing is a racing game developed by Infinity Vector Ltd. It is designed with vivid graphics and realistic control mechanisms that make you feel like you are behind the wheel directly. Assoluto Racing is an extreme street drift racing game that allows you to experience the thrill of driving on different tracks and terrains. You can customize your car with various parts and accessories, and compete with other players online or offline. You can also collect and upgrade your car collection, and challenge yourself with different modes and events.</p>
6
- <h2>download assoluto racing mod apk android 1</h2><br /><p><b><b>Download</b> &#10031; <a href="https://jinyurl.com/2uNOeu">https://jinyurl.com/2uNOeu</a></b></p><br /><br />
7
- <h3>Features of Assoluto Racing</h3>
8
- <h4>Realistic graphics and physics</h4>
9
- <p>Assoluto Racing boasts of stunning graphics that will impress you with their details and quality. The game uses advanced physics engine that simulates the behavior of real cars, such as traction, suspension, aerodynamics, and damage. You can also adjust the camera angle and view the action from different perspectives.</p>
10
- <h4>Customizable cars and tracks</h4>
11
- <p>Assoluto Racing features a wide range of cars from famous brands, such as Toyota, Nissan, BMW, Mercedes-Benz, Ferrari, Lamborghini, and more. You can modify your car with various options, such as engine, transmission, tires, brakes, body kits, spoilers, paint, decals, etc. You can also create your own tracks with the track editor tool, or download tracks created by other players.</p>
12
- <h4>Online multiplayer and leaderboards</h4>
13
- <p>Assoluto Racing lets you race against other players from around the world in real-time multiplayer mode. You can join or create rooms with different settings, such as car class, track, laps, weather, etc. You can also chat with other players and make friends or rivals. You can also compete for the top spot on the global leaderboards and earn rewards and achievements.</p>
14
- <h3>Why download Assoluto Racing mod apk android 1?</h3>
15
- <p>Assoluto Racing is a free game, but it also has some in-app purchases that require real money. These include buying coins and money to unlock new cars and tracks, or upgrading your car parts. You may also encounter some ads while playing the game. If you want to enjoy the game without spending any money or being bothered by ads, you should download Assoluto Racing mod apk android 1. This is a modified version of the game that gives you unlimited money and coins, unlocks all cars and tracks, removes ads, and does not require root access.</p>
16
- <h4>Unlimited money and coins</h4>
17
- <p>With Assoluto Racing mod apk android 1, you will have unlimited money and coins in your account. You can use them to buy any car or track you want, or upgrade your car parts to the maximum level. <h4>Unlocked all cars and tracks</h4>
18
- <p>With Assoluto Racing mod apk android 1, you will have access to all the cars and tracks in the game. You don't have to complete any missions or challenges to unlock them. You can choose any car or track you like, and enjoy the variety and diversity of the game.</p>
19
- <h4>No ads and no root required</h4>
20
- <p>With Assoluto Racing mod apk android 1, you will not see any ads while playing the game. You can enjoy the game without any interruptions or distractions. You also don't need to root your device to install the mod apk file. You can simply download and install it without any risk or hassle.</p>
21
- <h2>How to download and install Assoluto Racing mod apk android 1?</h2>
22
- <p>If you are interested in downloading and installing Assoluto Racing mod apk android 1, you can follow these simple steps:</p>
23
- <p>How to download assoluto racing mod apk for android devices<br />
24
- Assoluto racing mod apk unlimited money and coins<br />
25
- Best racing games for android 1 with assoluto mod<br />
26
- Assoluto racing realistic 3D graphics and physics mod apk<br />
27
- Download assoluto racing latest version mod apk free<br />
28
- Assoluto racing online PVP mode with mod apk<br />
29
- Assoluto racing mod apk features and gameplay<br />
30
- Assoluto racing hack mod apk download link<br />
31
- Assoluto racing mod apk review and rating<br />
32
- Assoluto racing mod apk installation guide and tips<br />
33
- Assoluto racing mod apk vs original game comparison<br />
34
- Assoluto racing mod apk cheats and tricks<br />
35
- Assoluto racing mod apk support and compatibility<br />
36
- Assoluto racing mod apk download size and requirements<br />
37
- Assoluto racing mod apk update and changelog<br />
38
- Assoluto racing mod apk offline mode and data usage<br />
39
- Assoluto racing mod apk bugs and issues<br />
40
- Assoluto racing mod apk alternatives and similar games<br />
41
- Assoluto racing mod apk benefits and drawbacks<br />
42
- Assoluto racing mod apk FAQs and answers<br />
43
- Assoluto racing car brands and models with mod apk<br />
44
- Assoluto racing tracks and locations with mod apk<br />
45
- Assoluto racing customizations and upgrades with mod apk<br />
46
- Assoluto racing challenges and missions with mod apk<br />
47
- Assoluto racing achievements and rewards with mod apk<br />
48
- Assoluto racing leaderboards and rankings with mod apk<br />
49
- Assoluto racing tournaments and events with mod apk<br />
50
- Assoluto racing community and social media with mod apk<br />
51
- Assoluto racing tips and tricks for beginners with mod apk<br />
52
- Assoluto racing advanced strategies and techniques with mod apk</p>
53
- <h3>Step 1: Download the mod apk file from a trusted source</h3>
54
- <p>The first thing you need to do is to download the mod apk file from a reliable and secure source. You can use this link to download the latest version of Assoluto Racing mod apk android 1. The file size is about 50 MB, so make sure you have enough space on your device.</p>
55
- <h3>Step 2: Enable unknown sources on your device settings</h3>
56
- <p>The next thing you need to do is to enable unknown sources on your device settings. This will allow you to install apps from sources other than the Google Play Store. To do this, go to your device settings, then security, then unknown sources, and turn it on.</p>
57
- <h3>Step 3: Install the mod apk file and launch the game</h3>
58
- <p>The final thing you need to do is to install the mod apk file and launch the game. To do this, locate the downloaded file on your device storage, tap on it, and follow the instructions on the screen. Once the installation is done, open the game and enjoy.</p>
59
- <h2>Conclusion</h2>
60
- <p>Assoluto Racing is a great racing game that offers a realistic and immersive racing experience. It has amazing graphics, physics, cars, tracks, and modes that will keep you entertained for hours. However, if you want to enjoy the game without any limitations or restrictions, you should download Assoluto Racing mod apk android 1. This is a modified version of the game that gives you unlimited money and coins, unlocks all cars and tracks, removes ads, and does not require root access. You can download and install it easily by following the steps we have provided in this article. So what are you waiting for? Download Assoluto Racing mod apk android 1 now and start racing.</p>
61
- <h2>FAQs</h2>
62
- <p>Here are some frequently asked questions about Assoluto Racing mod apk android 1:</p>
63
- <table>
64
- <tr>
65
- <th>Question</th>
66
- <th>Answer</th>
67
- </tr>
68
- <tr>
69
- <td>Is Assoluto Racing mod apk android 1 safe to use?</td>
70
- <td>Yes, Assoluto Racing mod apk android 1 is safe to use as long as you download it from a trusted source. We have tested it on our devices and found no viruses or malware.</td>
71
- </tr>
72
- <tr>
73
- <td>Will Assoluto Racing mod apk android 1 work on my device?</td>
74
- <td>Assoluto Racing mod apk android 1 should work on most Android devices that have Android 4.0 or higher. However, some devices may not be compatible or may experience some issues. If you encounter any problems, please contact us or leave a comment below.</td>
75
- </tr>
76
- <tr>
77
- <td>Can I play Assoluto Racing mod apk android 1 online?</td>
78
- <td>Yes, you can play Assoluto Racing mod apk android 1 online with other players. However, you may not be able to join some rooms or events that require original versions of the game. You may also face some bans or penalties from the game developers if they detect your modded version.</td>
79
- </tr>
80
- <tr>
81
- <td>Can I update Assoluto Racing mod apk android 1?</td>
82
- <td>No, you cannot update Assoluto Racing mod apk android 1 from the Google Play Store or any other source. If you want to get the latest version of the game, you will have to download and install it again from our link.</td>
83
- </tr>
84
- <tr>
85
- <td>Can I request more features for Assoluto Racing mod apk android 1?</td>
86
- <td>Yes, you can request more features for Assoluto Racing mod apk android 1 by leaving a comment below or contacting us. We will try our best to fulfill your requests as soon as possible.</td>
87
- </ <p>I have finished writing the article on the topic of "download Assoluto Racing mod apk android 1". I hope you find it useful and informative. If you have any questions or feedback, please feel free to contact me or leave a comment below. Thank you for choosing me as your content writer.</p> 401be4b1e0<br />
88
- <br />
89
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Cmo jugar a Sniper 3D juego de disparos en primera persona con mod apk.md DELETED
@@ -1,140 +0,0 @@
1
- <br />
2
- <h1>Sniper 3D Juegos de Disparos Mod APK: The Ultimate Shooting Game</h1>
3
- <p>If you are looking for a free and exciting shooting game that will test your skills as a sniper, then you should try Sniper 3D Juegos de Disparos Mod APK. This is a modified version of the popular Sniper 3D game that gives you unlimited coins, diamonds, weapons, and more. In this article, we will tell you everything you need to know about Sniper 3D Juegos de Disparos Mod APK, including its features, how to download and install it, how to play it, and why you should play it.</p>
4
- <h2>sniper 3d juegos de disparos mod apk</h2><br /><p><b><b>Download</b> &#9193; <a href="https://jinyurl.com/2uNU9r">https://jinyurl.com/2uNU9r</a></b></p><br /><br />
5
- <h2>What is Sniper 3D Juegos de Disparos Mod APK?</h2>
6
- <p>Sniper 3D Juegos de Disparos Mod APK is a hacked version of the original Sniper 3D game that was developed by Fun Games For Free. It is a 3D shooting game that puts you in the role of a professional sniper who has to complete various missions and eliminate high-profile targets. You can choose from a wide range of sniper rifles, assault rifles, and other guns, and customize them according to your preferences. You can also play offline or online, and compete with other players in PVP mode.</p>
7
- <h3>Features of Sniper 3D Juegos de Disparos Mod APK</h3>
8
- <p>Sniper 3D Juegos de Disparos Mod APK has many features that make it more fun and enjoyable than the original game. Here are some of them:</p>
9
- <h4>- Unlimited coins and diamonds</h4>
10
- <p>With Sniper 3D Juegos de Disparos Mod APK, you don't have to worry about running out of coins or diamonds, which are the main currencies in the game. You can use them to buy new weapons, upgrade your existing ones, buy gear, and more. You can also use them to skip missions or get extra lives.</p>
11
- <h4>- All weapons unlocked and upgraded</h4>
12
- <p>Sniper 3D Juegos de Disparos Mod APK gives you access to all the weapons in the game, without having to unlock them by completing missions or paying real money. You can also upgrade them to their maximum level, which will make them more powerful and accurate. You can choose from over 180+ authentic weapons, including sniper rifles, assault rifles, shotguns, pistols, and more.</p>
13
- <h4>- No ads and no root required</h4>
14
- <p>Sniper 3D Juegos de Disparos Mod APK removes all the annoying ads that interrupt your gameplay and ruin your immersion. You can enjoy the game without any distractions or interruptions. Moreover, you don't need to root your device to install or play Sniper 3D Juegos de Disparos Mod APK. It is compatible with most Android devices and versions. You can download and install it easily and safely.</p>
15
- <h2>How to download and install Sniper 3D Juegos de Disparos Mod APK?</h2>
16
- <p>If you want to download and install Sniper 3D Juegos de Disparos Mod APK, you need to follow these simple steps:</p>
17
- <h3>Step by step guide</h3>
18
- <h4>- Download the mod apk file from a trusted source</h4>
19
- <p>The first thing you need to do is to download the mod apk file from a reliable and secure source. You can use the link below to get the latest version of Sniper 3D Juegos de Disparos Mod APK. Make sure you have enough storage space on your device before downloading the file.</p>
20
- <p>Sniper 3D Assassin: juego de disparos gratis mod apk<br />
21
- Sniper 3D Strike Assassin Ops: juego de disparos hack apk<br />
22
- Sniper 3D Gun Shooter: juego de disparos online mod apk<br />
23
- Sniper 3D Shooter: juego de disparos en primera persona mod apk<br />
24
- Sniper 3D Fury: juego de disparos de francotirador mod apk<br />
25
- Sniper 3D Silent Assassin: juego de disparos de sigilo mod apk<br />
26
- Sniper 3D Elite: juego de disparos de élite mod apk<br />
27
- Sniper 3D Zombie: juego de disparos de zombies mod apk<br />
28
- Sniper 3D City: juego de disparos en la ciudad mod apk<br />
29
- Sniper 3D War: juego de disparos de guerra mod apk<br />
30
- Sniper 3D Mission: juego de disparos de misiones mod apk<br />
31
- Sniper 3D Arena: juego de disparos multijugador mod apk<br />
32
- Sniper 3D Action: juego de disparos de acción mod apk<br />
33
- Sniper 3D Adventure: juego de disparos de aventura mod apk<br />
34
- Sniper 3D Survival: juego de disparos de supervivencia mod apk<br />
35
- Sniper 3D Crime: juego de disparos de crimen mod apk<br />
36
- Sniper 3D Army: juego de disparos de ejército mod apk<br />
37
- Sniper 3D Police: juego de disparos de policía mod apk<br />
38
- Sniper 3D SWAT: juego de disparos de SWAT mod apk<br />
39
- Sniper 3D Spy: juego de disparos de espía mod apk<br />
40
- Sniper 3D Hero: juego de disparos de héroe mod apk<br />
41
- Sniper 3D Villain: juego de disparos de villano mod apk<br />
42
- Sniper 3D Wild: juego de disparos en la naturaleza mod apk<br />
43
- Sniper 3D Jungle: juego de disparos en la selva mod apk<br />
44
- Sniper 3D Desert: juego de disparos en el desierto mod apk<br />
45
- Sniper 3D Mountain: juego de disparos en la montaña mod apk<br />
46
- Sniper 3D Snow: juego de disparos en la nieve mod apk<br />
47
- Sniper 3D Night: juego de disparos nocturno mod apk<br />
48
- Sniper 3D Day: juego de disparos diurno mod apk<br />
49
- Sniper 3D Halloween: juego de disparos temático mod apk<br />
50
- Sniper 3D Christmas: juego de disparos festivo mod apk<br />
51
- Sniper 3D Valentine: juego de disparos romántico mod apk<br />
52
- Sniper 3D Horror: juego de disparos terrorífico mod apk<br />
53
- Sniper 3D Fantasy: juego de disparos fantástico mod apk<br />
54
- Sniper 3D Sci-Fi: juego de disparos ciencia ficción mod apk<br />
55
- Sniper 3D Anime: juego de disparos anime mod apk<br />
56
- Sniper 3D Cartoon: juego de disparos dibujos animados mod apk<br />
57
- Sniper 3D Realistic: juego de disparos realista mod apk<br />
58
- Sniper 3D Funny: juego de disparos divertido mod apk<br />
59
- Sniper 3D Educational: juego de disparos educativo mod apk<br />
60
- Descargar sniper 3d juegos de disparos gratis para android con mod apk <br />
61
- Como instalar sniper 3d juegos de disparos en tu dispositivo android con el archivo mod apk <br />
62
- Reseña y análisis del sniper 3d juegos de disparos con el modo hackeado en el archivo apk <br />
63
- Trucos y consejos para jugar al sniper 3d juegos de disparos con el beneficio del archivo modificado en formato apk <br />
64
- Comparación entre el sniper 3d juegos de disparos original y el que tiene el archivo alterado en extensión .apk</p>
65
- <p>[Download Sniper 3D Juegos de Disparos Mod APK]</p>
66
- <h4>- Enable unknown sources on your device settings</h4>
67
- <p>The next thing you need to do is to enable unknown sources on your device settings. This will allow you to install apps that are not from the Google Play Store. To do this, go to your device settings, then security, then unknown sources, and toggle it on. You may see a warning message, but don't worry, it is safe to proceed.</p>
68
- <h4>- Install the mod apk file and launch the game</h4>
69
- <p>The final thing you need to do is to install the mod apk file and launch the game. To do this, locate the downloaded file on your device, tap on it, and follow the instructions on the screen. Once the installation is done, you can open the game and enjoy Sniper 3D Juegos de Disparos Mod APK.</p>
70
- <h2>How to play Sniper 3D Juegos de Disparos Mod APK?</h2>
71
- <p>Sniper 3D Juegos de Disparos Mod APK is easy to play, but challenging to master. Here are some tips and tricks for beginners:</p>
72
- <h3>Tips and tricks for beginners</h3>
73
- <h4>- Choose the right weapon for each mission</h4>
74
- <p>One of the most important things in Sniper 3D Juegos de Disparos Mod APK is to choose the right weapon for each mission. Different weapons have different stats, such as damage, range, stability, zoom, and reload time. You should consider these factors when selecting your weapon, as well as the type of target and the environment. For example, if you are shooting at a long distance, you should use a sniper rifle with a high zoom and range. If you are shooting at a moving target, you should use a weapon with a high stability and reload time.</p>
75
- <h4>- Aim for the head and use the zoom feature</h4>
76
- <p>Another important thing in Sniper 3D Juegos de Disparos Mod APK is to aim for the head and use the zoom feature. Aiming for the head will give you more damage and bonus points, as well as save you ammo. You can also use the zoom feature to get a better view of your target and adjust your aim accordingly. To use the zoom feature, just tap on the screen and slide your finger up or down.</p> <h4>- Upgrade your weapons and gear regularly</h4>
77
- <p>A third important thing in Sniper 3D Juegos de Disparos Mod APK is to upgrade your weapons and gear regularly. Upgrading your weapons and gear will improve their stats and performance, as well as unlock new features and abilities. You can use the coins and diamonds you get from Sniper 3D Juegos de Disparos Mod APK to upgrade your weapons and gear. You can also use the table below to see the different types of upgrades and their effects.</p>
78
- <table>
79
- <tr>
80
- <th>Type of upgrade</th>
81
- <th>Effect</th>
82
- </tr>
83
- <tr>
84
- <td>Muzzle</td>
85
- <td>Increases damage and stability</td>
86
- </tr>
87
- <tr>
88
- <td>Ammo</td>
89
- <td>Increases damage and pierce</td>
90
- </tr>
91
- <tr>
92
- <td>Body</td>
93
- <td>Increases range and zoom</td>
94
- </tr>
95
- <tr>
96
- <td>Grip</td>
97
- <td>Increases stability and reload time</td>
98
- </tr>
99
- <tr>
100
- <td>Scope</td>
101
- <td>Increases zoom and critical chance</td>
102
- </tr>
103
- <tr>
104
- <td>Clip</td>
105
- <td>Increases ammo capacity and reload time</td>
106
- </tr>
107
- <tr>
108
- <td>Gear</td>
109
- <td>Increases health, energy, and defense</td>
110
- </tr>
111
- </table>
112
- <h4>- Use the environment and cover to your advantage</h4>
113
- <p>A fourth important thing in Sniper 3D Juegos de Disparos Mod APK is to use the environment and cover to your advantage. The environment and cover can help you hide from your enemies, avoid their fire, and find better angles to shoot. You can also use the environment and cover to create distractions, such as shooting at explosive barrels, cars, or other objects. This will cause chaos and confusion among your enemies, giving you more opportunities to take them out.</p>
114
- <h2>Why should you play Sniper 3D Juegos de Disparos Mod APK?</h2>
115
- <p>Sniper 3D Juegos de Disparos Mod APK is not only a fun and exciting shooting game, but also a game that has many benefits for you. Here are some of them:</p>
116
- <h3>Benefits of playing Sniper 3D Juegos de Disparos Mod APK</h3>
117
- <h4>- Enjoy realistic graphics and sound effects</h4>
118
- <p>Sniper 3D Juegos de Disparos Mod APK has realistic graphics and sound effects that will make you feel like you are in the middle of a real battlefield. You will see detailed environments, realistic animations, and stunning visual effects. You will also hear realistic sounds, such as gunshots, explosions, screams, and more. You will be immersed in the game and feel the adrenaline rush of being a sniper.</p>
119
- <h4>- Experience thrilling and varied missions in different locations</h4>
120
- <p>Sniper 3D Juegos de Disparos Mod APK has thrilling and varied missions that will keep you entertained for hours. You will have to complete different objectives, such as assassinating targets, rescuing hostages, protecting allies, destroying vehicles, and more. You will also have to face different challenges, such as time limits, moving targets, multiple enemies, and more. You will travel to different locations around the world, such as cities, deserts, islands, mountains, and more. You will never get bored with Sniper 3D Juegos de Disparos Mod APK.</p>
121
- <h4>- Challenge yourself and other players in PVP mode</h4>
122
- <p>Sniper 3D Juegos de Disparos Mod APK has a PVP mode that will let you challenge yourself and other players in online battles. You can join or create a squad with your friends or other players, and compete against other squads in team deathmatch or domination modes. You can also play solo or duo in free for all or battle royale modes. You can show off your skills, rank up on the leaderboard, earn rewards, and have fun with Sniper 3D Juegos de Disparos Mod APK.</p>
123
- <h4>- Have fun with a free and addictive shooting game</h4>
124
- <p>Sniper 3D Juegos de Disparos Mod APK is a free and addictive shooting game that will make you want to play more and more. You can play it anytime and anywhere, without any internet connection or subscription required. You can also enjoy it without any ads or limitations, thanks to Sniper 3D Juegos de Disparos Mod APK. You can have fun with a shooting game that has everything you need: action, adventure, strategy, skill, and more.</p>
125
- <h2>Conclusion</h2>
126
- <p>Sniper 3D Juegos de Disparos Mod APK is the ultimate shooting game that you should try if you love sniping games. It has unlimited coins, diamonds, weapons, and more features that will make your gameplay more fun and enjoyable. It has realistic graphics and sound effects, thrilling and varied missions, PVP mode, and a free and addictive gameplay. You can download and install it easily and safely, and play it anytime and anywhere. You can also follow our tips and tricks to improve your skills and performance as a sniper. Sniper 3D Juegos de Disparos Mod APK is the ultimate shooting game that you should not miss.</p>
127
- <h2>FAQs</h2>
128
- <p>Here are some frequently asked questions about Sniper 3D Juegos de Disparos Mod APK:</p>
129
- <h4>Q: Is Sniper 3D Juegos de Disparos Mod APK safe to use?</h4>
130
- <p>A: Yes, Sniper 3D Juegos de Disparos Mod APK is safe to use, as long as you download it from a trusted source. We have tested the mod apk file and found no viruses or malware. However, you should always be careful when downloading and installing any mod apk file, and use it at your own risk.</p>
131
- <h4>Q: Is Sniper 3D Juegos de Disparos Mod APK legal to use?</h4>
132
- <p>A: No, Sniper 3D Juegos de Disparos Mod APK is not legal to use, as it violates the terms and conditions of the original game. It also infringes the intellectual property rights of the developers and publishers of the game. Therefore, we do not recommend or endorse the use of Sniper 3D Juegos de Disparos Mod APK, and we are not responsible for any consequences that may arise from using it.</p>
133
- <h4>Q: Can I play Sniper 3D Juegos de Disparos Mod APK with my friends?</h4>
134
- <p>A: Yes, you can play Sniper 3D Juegos de Disparos Mod APK with your friends, either offline or online. You can join or create a squad with your friends or other players, and compete against other squads in PVP mode. You can also play solo or duo in free for all or battle royale modes.</p>
135
- <h4>Q: Can I update Sniper 3D Juegos de Disparos Mod APK?</h4>
136
- <p>A: No, you cannot update Sniper 3D Juegos de Disparos Mod APK, as it is a modified version of the original game. If you update it, you will lose all the mod features and revert back to the original game. Therefore, you should avoid updating Sniper 3D Juegos de Disparos Mod APK, and wait for a new mod apk file to be released.</p>
137
- <h4>Q: Can I get banned for using Sniper 3D Juegos de Disparos Mod APK?</h4>
138
- <p>A: Yes, you can get banned for using Sniper 3D Juegos de Disparos Mod APK, as it is against the rules of the game. The game has an anti-cheat system that can detect if you are using a mod apk file, and ban you from playing online or accessing your account. Therefore, you should use Sniper 3D Juegos de Disparos Mod APK at your own risk, and be prepared for the possibility of getting banned.</p> 197e85843d<br />
139
- <br />
140
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1toTree/lora_test/ppdiffusers/pipelines/unclip/__init__.py DELETED
@@ -1,29 +0,0 @@
1
- # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
-
16
- from ...utils import (
17
- OptionalDependencyNotAvailable,
18
- is_paddle_available,
19
- is_paddlenlp_available,
20
- )
21
-
22
- try:
23
- if not (is_paddlenlp_available() and is_paddle_available()):
24
- raise OptionalDependencyNotAvailable()
25
- except OptionalDependencyNotAvailable:
26
- from ...utils.dummy_paddle_and_paddlenlp_objects import UnCLIPPipeline
27
- else:
28
- from .pipeline_unclip import UnCLIPPipeline
29
- from .text_proj import UnCLIPTextProjModel
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1toTree/lora_test/ppdiffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py DELETED
@@ -1,443 +0,0 @@
1
- # Copyright 2022 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import inspect
16
- from typing import Callable, List, Optional, Union
17
-
18
- import paddle
19
-
20
- from paddlenlp.transformers import CLIPTextModelWithProjection, CLIPTokenizer
21
-
22
- from ...models import AutoencoderKL, UNet2DConditionModel
23
- from ...models.attention import Transformer2DModel
24
- from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput
25
- from ...schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
26
- from ...utils import logging
27
- from .modeling_text_unet import UNetFlatConditionModel
28
-
29
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
30
-
31
-
32
- class VersatileDiffusionTextToImagePipeline(DiffusionPipeline):
33
- r"""
34
- Pipeline for text-to-image generation using Versatile Diffusion.
35
-
36
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
37
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
38
-
39
- Args:
40
- vae ([`AutoencoderKL`]):
41
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
42
- text_encoder ([`CLIPTextModelWithProjection`]):
43
- Frozen text-encoder. Versatile Diffusion uses the text portion of
44
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), specifically
45
- the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
46
- image_encoder ([`CLIPVisionModelWithProjection`]):
47
- Frozen vision-encoder. Versatile Diffusion uses the vision portion of
48
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPVisionModelWithProjection), specifically
49
- the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
50
- tokenizer (`CLIPTokenizer`):
51
- Tokenizer of class
52
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
53
- image_unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
54
- text_unet ([`UNetFlatConditionModel`]): xxx.
55
- scheduler ([`SchedulerMixin`]):
56
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
57
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
58
- """
59
- tokenizer: CLIPTokenizer
60
- text_encoder: CLIPTextModelWithProjection
61
- image_unet: UNet2DConditionModel
62
- text_unet: UNetFlatConditionModel
63
- vae: AutoencoderKL
64
- scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler]
65
- _optional_components = ["text_unet"]
66
-
67
- def __init__(
68
- self,
69
- tokenizer: CLIPTokenizer,
70
- text_encoder: CLIPTextModelWithProjection,
71
- image_unet: UNet2DConditionModel,
72
- text_unet: UNetFlatConditionModel,
73
- vae: AutoencoderKL,
74
- scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
75
- ):
76
- super().__init__()
77
- self.register_modules(
78
- tokenizer=tokenizer,
79
- text_encoder=text_encoder,
80
- image_unet=image_unet,
81
- text_unet=text_unet,
82
- vae=vae,
83
- scheduler=scheduler,
84
- )
85
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
86
- if self.text_unet is not None:
87
- self._swap_unet_attention_blocks()
88
-
89
- def _swap_unet_attention_blocks(self):
90
- """
91
- Swap the `Transformer2DModel` blocks between the image and text UNets
92
- """
93
- for name, module in self.image_unet.named_sublayers(include_self=True):
94
- if isinstance(module, Transformer2DModel):
95
- parent_name, index = name.rsplit(".", 1)
96
- index = int(index)
97
- self.image_unet.get_sublayer(parent_name)[index], self.text_unet.get_sublayer(parent_name)[index] = (
98
- self.text_unet.get_sublayer(parent_name)[index],
99
- self.image_unet.get_sublayer(parent_name)[index],
100
- )
101
-
102
- def remove_unused_weights(self):
103
- self.register_modules(text_unet=None)
104
-
105
- def _encode_text_prompt(self, prompt, num_images_per_prompt, do_classifier_free_guidance, negative_prompt):
106
- r"""
107
- Encodes the prompt into text encoder hidden states.
108
-
109
- Args:
110
- prompt (`str` or `list(int)`):
111
- prompt to be encoded
112
- num_images_per_prompt (`int`):
113
- number of images that should be generated per prompt
114
- do_classifier_free_guidance (`bool`):
115
- whether to use classifier free guidance or not
116
- negative_prompt (`str` or `List[str]`):
117
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
118
- if `guidance_scale` is less than `1`).
119
- """
120
-
121
- def normalize_embeddings(encoder_output):
122
- embeds = paddle.matmul(encoder_output.last_hidden_state, self.text_encoder.text_projection)
123
- embeds_pooled = encoder_output.text_embeds
124
- embeds = embeds / paddle.norm(embeds_pooled.unsqueeze(1), axis=-1, keepdim=True)
125
- return embeds
126
-
127
- batch_size = len(prompt) if isinstance(prompt, list) else 1
128
-
129
- text_inputs = self.tokenizer(
130
- prompt,
131
- padding="max_length",
132
- max_length=self.tokenizer.model_max_length,
133
- truncation=True,
134
- return_tensors="pd",
135
- )
136
- text_input_ids = text_inputs.input_ids
137
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pd").input_ids
138
-
139
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not paddle.equal_all(
140
- text_input_ids, untruncated_ids
141
- ):
142
- removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1])
143
- logger.warning(
144
- "The following part of your input was truncated because CLIP can only handle sequences up to"
145
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
146
- )
147
-
148
- config = (
149
- self.text_encoder.config
150
- if isinstance(self.text_encoder.config, dict)
151
- else self.text_encoder.config.to_dict()
152
- )
153
- if config.get("use_attention_mask", None) is not None and config["use_attention_mask"]:
154
- attention_mask = text_inputs.attention_mask
155
- else:
156
- attention_mask = None
157
-
158
- text_embeddings = self.text_encoder(text_input_ids, attention_mask=attention_mask)
159
- text_embeddings = normalize_embeddings(text_embeddings)
160
-
161
- # duplicate text embeddings for each generation per prompt, using mps friendly method
162
- bs_embed, seq_len, _ = text_embeddings.shape
163
- text_embeddings = text_embeddings.tile([1, num_images_per_prompt, 1])
164
- text_embeddings = text_embeddings.reshape([bs_embed * num_images_per_prompt, seq_len, -1])
165
-
166
- # get unconditional embeddings for classifier free guidance
167
- if do_classifier_free_guidance:
168
- uncond_tokens: List[str]
169
- if negative_prompt is None:
170
- uncond_tokens = [""] * batch_size
171
- elif type(prompt) is not type(negative_prompt):
172
- raise TypeError(
173
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
174
- f" {type(prompt)}."
175
- )
176
- elif isinstance(negative_prompt, str):
177
- uncond_tokens = [negative_prompt]
178
- elif batch_size != len(negative_prompt):
179
- raise ValueError(
180
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
181
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
182
- " the batch size of `prompt`."
183
- )
184
- else:
185
- uncond_tokens = negative_prompt
186
-
187
- max_length = text_input_ids.shape[-1]
188
- uncond_input = self.tokenizer(
189
- uncond_tokens,
190
- padding="max_length",
191
- max_length=max_length,
192
- truncation=True,
193
- return_tensors="pd",
194
- )
195
-
196
- if config.get("use_attention_mask", None) is not None and config["use_attention_mask"]:
197
- attention_mask = uncond_input.attention_mask
198
- else:
199
- attention_mask = None
200
-
201
- uncond_embeddings = self.text_encoder(uncond_input.input_ids, attention_mask=attention_mask)
202
- uncond_embeddings = normalize_embeddings(uncond_embeddings)
203
-
204
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
205
- seq_len = uncond_embeddings.shape[1]
206
- uncond_embeddings = uncond_embeddings.tile([1, num_images_per_prompt, 1])
207
- uncond_embeddings = uncond_embeddings.reshape([batch_size * num_images_per_prompt, seq_len, -1])
208
-
209
- # For classifier free guidance, we need to do two forward passes.
210
- # Here we concatenate the unconditional and text embeddings into a single batch
211
- # to avoid doing two forward passes
212
- text_embeddings = paddle.concat([uncond_embeddings, text_embeddings])
213
-
214
- return text_embeddings
215
-
216
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
217
- def decode_latents(self, latents):
218
- latents = 1 / 0.18215 * latents
219
- image = self.vae.decode(latents).sample
220
- image = (image / 2 + 0.5).clip(0, 1)
221
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
222
- image = image.transpose([0, 2, 3, 1]).cast("float32").numpy()
223
- return image
224
-
225
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
226
- def prepare_extra_step_kwargs(self, generator, eta):
227
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
228
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
229
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
230
- # and should be between [0, 1]
231
-
232
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
233
- extra_step_kwargs = {}
234
- if accepts_eta:
235
- extra_step_kwargs["eta"] = eta
236
-
237
- # check if the scheduler accepts generator
238
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
239
- if accepts_generator:
240
- extra_step_kwargs["generator"] = generator
241
- return extra_step_kwargs
242
-
243
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs
244
- def check_inputs(self, prompt, height, width, callback_steps):
245
- if not isinstance(prompt, str) and not isinstance(prompt, list):
246
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
247
-
248
- if height % 8 != 0 or width % 8 != 0:
249
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
250
-
251
- if (callback_steps is None) or (
252
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
253
- ):
254
- raise ValueError(
255
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
256
- f" {type(callback_steps)}."
257
- )
258
-
259
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
260
- def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, generator, latents=None):
261
- shape = [batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor]
262
- if isinstance(generator, list) and len(generator) != batch_size:
263
- raise ValueError(
264
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
265
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
266
- )
267
-
268
- if latents is None:
269
- if isinstance(generator, list):
270
- shape = [
271
- 1,
272
- ] + shape[1:]
273
- latents = [paddle.randn(shape, generator=generator[i], dtype=dtype) for i in range(batch_size)]
274
- latents = paddle.concat(latents, axis=0)
275
- else:
276
- latents = paddle.randn(shape, generator=generator, dtype=dtype)
277
- else:
278
- if latents.shape != shape:
279
- raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
280
-
281
- # scale the initial noise by the standard deviation required by the scheduler
282
- latents = latents * self.scheduler.init_noise_sigma
283
- return latents
284
-
285
- @paddle.no_grad()
286
- def __call__(
287
- self,
288
- prompt: Union[str, List[str]],
289
- height: Optional[int] = None,
290
- width: Optional[int] = None,
291
- num_inference_steps: int = 50,
292
- guidance_scale: float = 7.5,
293
- negative_prompt: Optional[Union[str, List[str]]] = None,
294
- num_images_per_prompt: Optional[int] = 1,
295
- eta: float = 0.0,
296
- generator: Optional[Union[paddle.Generator, List[paddle.Generator]]] = None,
297
- latents: Optional[paddle.Tensor] = None,
298
- output_type: Optional[str] = "pil",
299
- return_dict: bool = True,
300
- callback: Optional[Callable[[int, int, paddle.Tensor], None]] = None,
301
- callback_steps: Optional[int] = 1,
302
- **kwargs,
303
- ):
304
- r"""
305
- Function invoked when calling the pipeline for generation.
306
-
307
- Args:
308
- prompt (`str` or `List[str]`):
309
- The prompt or prompts to guide the image generation.
310
- height (`int`, *optional*, defaults to self.image_unet.config.sample_size * self.vae_scale_factor):
311
- The height in pixels of the generated image.
312
- width (`int`, *optional*, defaults to self.image_unet.config.sample_size * self.vae_scale_factor):
313
- The width in pixels of the generated image.
314
- num_inference_steps (`int`, *optional*, defaults to 50):
315
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
316
- expense of slower inference.
317
- guidance_scale (`float`, *optional*, defaults to 7.5):
318
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
319
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
320
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
321
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
322
- usually at the expense of lower image quality.
323
- negative_prompt (`str` or `List[str]`, *optional*):
324
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
325
- if `guidance_scale` is less than `1`).
326
- num_images_per_prompt (`int`, *optional*, defaults to 1):
327
- The number of images to generate per prompt.
328
- eta (`float`, *optional*, defaults to 0.0):
329
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
330
- [`schedulers.DDIMScheduler`], will be ignored for others.
331
- generator (`paddle.Generator`, *optional*):
332
- A [paddle generator] to make generation
333
- deterministic.
334
- latents (`paddle.Tensor`, *optional*):
335
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
336
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
337
- tensor will ge generated by sampling using the supplied random `generator`.
338
- output_type (`str`, *optional*, defaults to `"pil"`):
339
- The output format of the generate image. Choose between
340
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
341
- return_dict (`bool`, *optional*, defaults to `True`):
342
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
343
- plain tuple.
344
- callback (`Callable`, *optional*):
345
- A function that will be called every `callback_steps` steps during inference. The function will be
346
- called with the following arguments: `callback(step: int, timestep: int, latents: paddle.Tensor)`.
347
- callback_steps (`int`, *optional*, defaults to 1):
348
- The frequency at which the `callback` function will be called. If not specified, the callback will be
349
- called at every step.
350
-
351
- Examples:
352
-
353
- ```py
354
- >>> from ppdiffusers import VersatileDiffusionTextToImagePipeline
355
- >>> import paddle
356
-
357
- >>> pipe = VersatileDiffusionTextToImagePipeline.from_pretrained(
358
- ... "shi-labs/versatile-diffusion"
359
- ... )
360
- >>> pipe.remove_unused_weights()
361
-
362
- >>> generator = paddle.Generator().manual_seed(0)
363
- >>> image = pipe("an astronaut riding on a horse on mars", generator=generator).images[0]
364
- >>> image.save("./astronaut.png")
365
- ```
366
-
367
- Returns:
368
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
369
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
370
- When returning a tuple, the first element is a list with the generated images, and the second element is a
371
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
372
- (nsfw) content, according to the `safety_checker`.
373
- """
374
- # 0. Default height and width to unet
375
- height = height or self.image_unet.config.sample_size * self.vae_scale_factor
376
- width = width or self.image_unet.config.sample_size * self.vae_scale_factor
377
-
378
- # 1. Check inputs. Raise error if not correct
379
- self.check_inputs(prompt, height, width, callback_steps)
380
-
381
- # 2. Define call parameters
382
- batch_size = 1 if isinstance(prompt, str) else len(prompt)
383
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
384
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
385
- # corresponds to doing no classifier free guidance.
386
- do_classifier_free_guidance = guidance_scale > 1.0
387
-
388
- # 3. Encode input prompt
389
- text_embeddings = self._encode_text_prompt(
390
- prompt, num_images_per_prompt, do_classifier_free_guidance, negative_prompt
391
- )
392
-
393
- # 4. Prepare timesteps
394
- self.scheduler.set_timesteps(num_inference_steps)
395
- timesteps = self.scheduler.timesteps
396
-
397
- # 5. Prepare latent variables
398
- num_channels_latents = self.image_unet.in_channels
399
- latents = self.prepare_latents(
400
- batch_size * num_images_per_prompt,
401
- num_channels_latents,
402
- height,
403
- width,
404
- text_embeddings.dtype,
405
- generator,
406
- latents,
407
- )
408
-
409
- # 6. Prepare extra step kwargs.
410
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
411
-
412
- # 7. Denoising loop
413
- for i, t in enumerate(self.progress_bar(timesteps)):
414
- # expand the latents if we are doing classifier free guidance
415
- latent_model_input = paddle.concat([latents] * 2) if do_classifier_free_guidance else latents
416
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
417
-
418
- # predict the noise residual
419
- noise_pred = self.image_unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
420
-
421
- # perform guidance
422
- if do_classifier_free_guidance:
423
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
424
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
425
-
426
- # compute the previous noisy sample x_t -> x_t-1
427
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
428
-
429
- # call the callback, if provided
430
- if callback is not None and i % callback_steps == 0:
431
- callback(i, t, latents)
432
-
433
- # 9. Post-processing
434
- image = self.decode_latents(latents)
435
-
436
- # 10. Convert to PIL
437
- if output_type == "pil":
438
- image = self.numpy_to_pil(image)
439
-
440
- if not return_dict:
441
- return (image,)
442
-
443
- return ImagePipelineOutput(images=image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/infer/lib/infer_pack/modules/F0Predictor/__init__.py DELETED
File without changes
spaces/A1draw-12196y/anime-ai-detect/app.py DELETED
@@ -1,17 +0,0 @@
1
- import gradio as gr
2
- from transformers import pipeline
3
-
4
- detection_pipeline = pipeline("image-classification", "saltacc/anime-ai-detect")
5
-
6
-
7
- def detect(img):
8
- print(img)
9
- output = detection_pipeline(img, top_k=2)
10
- final = {}
11
- for d in output:
12
- final[d["label"]] = d["score"]
13
- return final
14
-
15
-
16
- iface = gr.Interface(fn=detect, inputs=gr.Image(type="pil"), outputs=gr.Label(label="result"))
17
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/training/main.py DELETED
@@ -1,596 +0,0 @@
1
- from inspect import getargs
2
- import logging
3
- import os
4
- import random
5
- from datetime import datetime
6
- import bisect
7
- import copy
8
- import numpy as np
9
- import torch
10
- import torch.backends.cudnn as cudnn
11
- from torch import optim
12
- from torch.cuda.amp import GradScaler
13
- import faulthandler
14
- import pathlib
15
-
16
- try:
17
- import wandb
18
- except ImportError:
19
- wandb = None
20
-
21
- try:
22
- import torch.utils.tensorboard as tensorboard
23
- except ImportError:
24
- tensorboard = None
25
-
26
- try:
27
- import horovod.torch as hvd
28
- except ImportError:
29
- hvd = None
30
-
31
- from open_clip import create_model_and_transforms, trace_model, create_model
32
- from training.data import get_data
33
- from training.distributed import is_master, init_distributed_device, world_info_from_env
34
- from training.logger import setup_logging
35
- from training.params import parse_args
36
- from training.scheduler import cosine_lr
37
- from training.train import train_one_epoch, evaluate
38
- from open_clip.utils import dataset_split, get_optimizer
39
-
40
-
41
- def maintain_ckpts(args, startidx, all_idx_len):
42
- for i in reversed(range(startidx, all_idx_len)):
43
- if os.path.exists(os.path.join(args.checkpoint_path, f"epoch_top_{i}.pt")):
44
- os.rename(
45
- os.path.join(args.checkpoint_path, f"epoch_top_{i}.pt"),
46
- os.path.join(args.checkpoint_path, f"epoch_top_{i+1}.pt"),
47
- )
48
- if os.path.exists(
49
- os.path.join(args.checkpoint_path, f"epoch_top_{all_idx_len}.pt")
50
- ):
51
- os.remove(os.path.join(args.checkpoint_path, f"epoch_top_{all_idx_len}.pt"))
52
- return
53
-
54
-
55
- def update_top_k_performance(
56
- new_metrics_inputs, current_top_k_ckpt_metrics, args, ckpt, bignumbetter=True
57
- ):
58
- """
59
- Record the top-k performance of the current epoch.
60
- current_top_k_metrics is a dictionary of the form: {1: top_1_ckpt_measure, 2: top_2_ckpt_measure, ...}
61
- """
62
- if isinstance(new_metrics_inputs, (list, tuple)):
63
- new_metrics_inputs = np.mean(new_metrics_inputs)
64
- return update_top_k_performance(
65
- new_metrics_inputs,
66
- current_top_k_ckpt_metrics,
67
- args=args,
68
- ckpt=ckpt,
69
- bignumbetter=bignumbetter,
70
- )
71
- elif isinstance(new_metrics_inputs, dict):
72
- new_metrics_inputs = np.mean(list(new_metrics_inputs.values()))
73
- return update_top_k_performance(
74
- new_metrics_inputs,
75
- current_top_k_ckpt_metrics,
76
- args=args,
77
- ckpt=ckpt,
78
- bignumbetter=bignumbetter,
79
- )
80
- elif isinstance(new_metrics_inputs, (float, int)):
81
- update_flag = {k: False for k in current_top_k_ckpt_metrics.keys()}
82
- sorted_keys = sorted(current_top_k_ckpt_metrics.keys())
83
- sorted_values = sorted(
84
- current_top_k_ckpt_metrics.values(), reverse=bignumbetter
85
- )
86
- sorted_values_ = copy.deepcopy(sorted_values)
87
- sorted_values.append(new_metrics_inputs)
88
- sorted_values = sorted(sorted_values, reverse=bignumbetter)
89
- sorted_values = sorted_values[:-1]
90
-
91
- if sorted_values == sorted_values_:
92
- return current_top_k_ckpt_metrics, new_metrics_inputs
93
- else:
94
- for i in range(len(sorted_keys)):
95
- if current_top_k_ckpt_metrics[sorted_keys[i]] != sorted_values[i]:
96
- current_top_k_ckpt_metrics[sorted_keys[i]] = sorted_values[i]
97
- update_flag[sorted_keys[i]] = True
98
- for i in range(len(update_flag)):
99
- if update_flag[i]:
100
- maintain_ckpts(args, i, len(sorted_keys))
101
- torch.save(
102
- ckpt,
103
- os.path.join(args.checkpoint_path, f"epoch_top_{i}.pt"),
104
- )
105
- break
106
- return current_top_k_ckpt_metrics, new_metrics_inputs
107
-
108
-
109
- # def updateifNone(a, b):
110
- # a = b if None else a
111
- # return a
112
-
113
-
114
- def is_pretrained_params(n):
115
- return (
116
- n.startswith("transformer")
117
- or n in ["positional_embedding", "text_projection"]
118
- or n.startswith("token_embedding")
119
- or n.startswith("ln_final")
120
- or n.startswith("logit_scale_t")
121
- )
122
-
123
-
124
- def random_seed(seed=42, rank=0):
125
- torch.manual_seed(seed + rank)
126
- np.random.seed(seed + rank)
127
- random.seed(seed + rank)
128
-
129
-
130
- def main():
131
- args = parse_args()
132
- # sanitize model name for filesystem / uri use, easier if we don't use / in name as a rule?
133
- args.amodel = args.amodel.replace("/", "-")
134
- # download sizes.json file
135
-
136
- # (yusong): the below two lines are for debug
137
- # print("setting up faulthandler")
138
- # faulthandler.register(10)
139
-
140
- random.seed(args.seed)
141
- torch.manual_seed(args.seed)
142
- torch.cuda.manual_seed(args.seed)
143
- torch.cuda.manual_seed_all(args.seed)
144
- np.random.seed(args.seed)
145
- if args.tmodel == "bert" or args.tmodel == "roberta" or args.tmodel == "bart":
146
- assert (
147
- args.pretrained == "" or args.pretrained is None
148
- ), "bert/roberta/bart text encoder does not support pretrained models."
149
-
150
- # get the name of the experiments
151
- if args.name is None:
152
- args.name = "-".join(
153
- [
154
- datetime.now().strftime("%Y_%m_%d-%H_%M_%S"),
155
- f"model_{args.amodel}",
156
- f"lr_{args.lr}",
157
- f"b_{args.batch_size}",
158
- f"j_{args.workers}",
159
- f"p_{args.precision}",
160
- ]
161
- )
162
-
163
- # discover initial world args early so we can log properly
164
- args.distributed = False
165
- args.local_rank, args.rank, args.world_size = world_info_from_env()
166
-
167
- if args.remotedata and is_master(args):
168
- for dataset_name in args.datasetnames:
169
- for split in dataset_split[dataset_name]:
170
- if not os.path.exists(f"./json_files/{dataset_name}/{split}"):
171
- os.makedirs(f"./json_files/{dataset_name}/{split}")
172
- os.system(
173
- f"aws s3 cp s3://s-laion-audio/webdataset_tar/{dataset_name}/{split}/sizes.json ./json_files/{dataset_name}/{split}/sizes.json"
174
- )
175
-
176
- args.log_path = None
177
- if is_master(args, local=args.log_local):
178
- log_base_path = os.path.join(args.logs, args.name)
179
- os.makedirs(log_base_path, exist_ok=True)
180
- log_filename = f"out-{args.rank}" if args.log_local else "out.log"
181
- args.log_path = os.path.join(log_base_path, log_filename)
182
- if os.path.exists(args.log_path):
183
- print(
184
- "Error. Experiment already exists. Use --name {} to specify a new experiment."
185
- )
186
- return -1
187
-
188
- # Set logger
189
- args.log_level = logging.DEBUG if args.debug else logging.INFO
190
- setup_logging(args.log_path, args.log_level)
191
-
192
- # fully initialize distributed device environment
193
- device = init_distributed_device(args)
194
-
195
- args.wandb = "wandb" in args.report_to or "all" in args.report_to
196
- args.tensorboard = "tensorboard" in args.report_to or "all" in args.report_to
197
- if is_master(args):
198
- args.tensorboard_path = (
199
- os.path.join(args.logs, args.name, "tensorboard")
200
- if args.tensorboard
201
- else ""
202
- )
203
- args.checkpoint_path = os.path.join(args.logs, args.name, "checkpoints")
204
- for dirname in [args.tensorboard_path, args.checkpoint_path]:
205
- if dirname:
206
- os.makedirs(dirname, exist_ok=True)
207
- else:
208
- args.tensorboard_path = ""
209
- args.checkpoint_path = ""
210
-
211
- if args.copy_codebase:
212
- copy_codebase(args)
213
-
214
- assert args.precision in ["amp", "fp16", "fp32"]
215
- if args.precision == "fp16":
216
- logging.warning(
217
- "It is recommended to use AMP mixed-precision instead of FP16. "
218
- "FP16 support needs further verification and tuning, especially for train."
219
- )
220
-
221
- if args.horovod:
222
- logging.info(
223
- f"Running in horovod mode with multiple processes / nodes. Device: {args.device}."
224
- f"Process (global: {args.rank}, local {args.local_rank}), total {args.world_size}."
225
- )
226
- elif args.distributed:
227
- logging.info(
228
- f"Running in distributed mode with multiple processes. Device: {args.device}."
229
- f"Process (global: {args.rank}, local {args.local_rank}), total {args.world_size}."
230
- )
231
- else:
232
- logging.info(f"Running with a single process. Device {args.device}.")
233
-
234
- logging.info(f"openai cache dir: {os.path.expanduser(args.openai_model_cache_dir)}")
235
-
236
- model, model_cfg = create_model(
237
- args.amodel,
238
- args.tmodel,
239
- args.pretrained,
240
- precision=args.precision,
241
- device=device,
242
- jit=args.torchscript,
243
- force_quick_gelu=args.force_quick_gelu,
244
- openai_model_cache_dir=os.path.expanduser(args.openai_model_cache_dir),
245
- skip_params=True,
246
- pretrained_audio=args.pretrained_audio,
247
- pretrained_text=args.pretrained_text,
248
- enable_fusion=args.enable_fusion,
249
- fusion_type=args.fusion_type,
250
- )
251
-
252
- if args.horovod:
253
- with torch.no_grad():
254
- for param in model.parameters():
255
- param.set_(param.contiguous())
256
-
257
- if args.trace:
258
- model = trace_model(model, batch_size=args.batch_size, device=device)
259
-
260
- if is_master(args):
261
- logging.info("Model:")
262
- logging.info(f"{str(model)}")
263
- logging.info("Params:")
264
- params_file = os.path.join(args.logs, args.name, "params.txt")
265
- with open(params_file, "w") as f:
266
- for name in sorted(vars(args)):
267
- val = getattr(args, name)
268
- logging.info(f" {name}: {val}")
269
- f.write(f"{name}: {val}\n")
270
-
271
- if args.distributed and not args.horovod:
272
- if args.use_bn_sync:
273
- model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
274
- ddp_args = {}
275
- if args.ddp_static_graph:
276
- # this doesn't exist in older PyTorch, arg only added if enabled
277
- ddp_args["static_graph"] = True
278
- model = torch.nn.parallel.DistributedDataParallel(
279
- model, device_ids=[device], find_unused_parameters=True, **ddp_args
280
- )
281
-
282
- data = get_data(args, model_cfg)
283
- assert len(data), "At least one train or eval dataset must be specified."
284
- if args.trace:
285
- assert "train" not in data, "Cannot train with traced model"
286
-
287
- exclude = (
288
- lambda n, p: p.ndim < 2
289
- or "bn" in n
290
- or "ln" in n
291
- or "bias" in n
292
- or "logit_scale" in n
293
- )
294
- include = lambda n, p: not exclude(n, p)
295
-
296
- named_parameters = list(model.named_parameters())
297
-
298
- # freeze text encoder
299
- text_freeze_parameters = [p for n, p in named_parameters if "text_branch" in n]
300
-
301
- if args.freeze_text:
302
- print("Freeze Text!!!!")
303
- for k in text_freeze_parameters:
304
- k.requires_grad = False
305
-
306
- gain_or_bias_params = [
307
- p for n, p in named_parameters if exclude(n, p) and p.requires_grad
308
- ]
309
- rest_params = [p for n, p in named_parameters if include(n, p) and p.requires_grad]
310
-
311
- # set wd-related params to 0 if use adam optimizer
312
- if args.optimizer == "adam":
313
- args.wd = 0
314
- args.wd_pretrained = 0
315
- args.wd_new = 0
316
-
317
- if args.train_data is None:
318
- optimizer = None
319
- scheduler = None
320
- else:
321
- total_steps = data["train"].dataloader.num_batches * args.epochs
322
-
323
- if args.split_opt:
324
- for x in ["lr", "beta1", "beta2", "eps", "wd"]:
325
- for y in ["_new", "_pretrained"]:
326
- if getattr(args, x + y) is None:
327
- setattr(args, x + y, getattr(args, x))
328
-
329
- gain_or_bias_pretrained_params = [
330
- p
331
- for n, p in named_parameters
332
- if (exclude(n, p) and p.requires_grad) and is_pretrained_params(n)
333
- ]
334
- rest_pretrained_params = [
335
- p
336
- for n, p in named_parameters
337
- if (include(n, p) and p.requires_grad) and is_pretrained_params(n)
338
- ]
339
- gain_or_bias_new_params = [
340
- p
341
- for n, p in named_parameters
342
- if (exclude(n, p) and p.requires_grad) and (not is_pretrained_params(n))
343
- ]
344
- rest_new_params = [
345
- p
346
- for n, p in named_parameters
347
- if (include(n, p) and p.requires_grad) and (not is_pretrained_params(n))
348
- ]
349
- pretrained_params_optimizer = get_optimizer(
350
- [
351
- {"params": gain_or_bias_pretrained_params, "weight_decay": 0.0},
352
- {
353
- "params": rest_pretrained_params,
354
- "weight_decay": args.wd_pretrained,
355
- },
356
- ],
357
- lr=args.lr_pretrained,
358
- betas=(args.beta1_pretrained, args.beta2_pretrained),
359
- eps=args.eps_pretrained,
360
- momentum=args.momentum_pretrained,
361
- optimizer_name=args.optimizer,
362
- )
363
- pretrained_params_scheduler = cosine_lr(
364
- pretrained_params_optimizer,
365
- args.lr_pretrained,
366
- args.warmup,
367
- total_steps,
368
- )
369
- new_params_optimizer = get_optimizer(
370
- [
371
- {"params": gain_or_bias_new_params, "weight_decay": 0.0},
372
- {"params": rest_new_params, "weight_decay": args.wd_new},
373
- ],
374
- lr=args.lr_new,
375
- betas=(args.beta1_new, args.beta2_new),
376
- eps=args.eps_new,
377
- momentum=args.momentum_new,
378
- optimizer_name=args.optimizer,
379
- )
380
-
381
- new_params_scheduler = cosine_lr(
382
- new_params_optimizer, args.lr_new, args.warmup, total_steps
383
- )
384
-
385
- optimizer = {
386
- "pretrained": pretrained_params_optimizer,
387
- "new": new_params_optimizer,
388
- }
389
- scheduler = {
390
- "pretrained": pretrained_params_scheduler,
391
- "new": new_params_scheduler,
392
- }
393
-
394
- if args.horovod:
395
- pretrained_params_optimizer = hvd.DistributedOptimizer(
396
- pretrained_params_optimizer,
397
- named_parameters=model.named_parameters(),
398
- )
399
- new_params_optimizer = hvd.DistributedOptimizer(
400
- new_params_optimizer, named_parameters=model.named_parameters()
401
- )
402
- hvd.broadcast_parameters(model.state_dict(), root_rank=0)
403
- hvd.broadcast_optimizer_state(pretrained_params_optimizer, root_rank=0)
404
- hvd.broadcast_optimizer_state(new_params_optimizer, root_rank=0)
405
- else:
406
- optimizer = get_optimizer(
407
- [
408
- {"params": gain_or_bias_params, "weight_decay": 0.0},
409
- {"params": rest_params, "weight_decay": args.wd},
410
- ],
411
- lr=args.lr,
412
- betas=(args.beta1, args.beta2),
413
- eps=args.eps,
414
- momentum=args.momentum,
415
- optimizer_name=args.optimizer,
416
- )
417
-
418
- scheduler = cosine_lr(optimizer, args.lr, args.warmup, total_steps)
419
-
420
- if args.horovod:
421
- optimizer = hvd.DistributedOptimizer(
422
- optimizer, named_parameters=model.named_parameters()
423
- )
424
- hvd.broadcast_parameters(model.state_dict(), root_rank=0)
425
- hvd.broadcast_optimizer_state(optimizer, root_rank=0)
426
-
427
- scaler = GradScaler() if args.precision == "amp" else None
428
-
429
- # optionally resume from a checkpoint
430
- start_epoch = 0
431
- if args.resume is not None:
432
- if os.path.isfile(args.resume):
433
- checkpoint = torch.load(args.resume, map_location=device)
434
- if "epoch" in checkpoint:
435
- # resuming a train checkpoint w/ epoch and optimizer state
436
- start_epoch = checkpoint["epoch"]
437
- sd = checkpoint["state_dict"]
438
- if not args.distributed and next(iter(sd.items()))[0].startswith(
439
- "module"
440
- ):
441
- sd = {k[len("module.") :]: v for k, v in sd.items()}
442
- model.load_state_dict(sd)
443
- if args.split_opt:
444
- if optimizer is not None:
445
- for k, o_ in optimizer.items():
446
- o_.load_state_dict(checkpoint[k + "_" + "optimizer"])
447
- if optimizer is not None:
448
- optimizer.load_state_dict(checkpoint["optimizer"])
449
- if scaler is not None and "scaler" in checkpoint:
450
- scaler.load_state_dict(checkpoint["scaler"])
451
- logging.info(
452
- f"=> resuming checkpoint '{args.resume}' (epoch {start_epoch})"
453
- )
454
- else:
455
- # loading a bare (model only) checkpoint for fine-tune or evaluation
456
- model.load_state_dict(checkpoint)
457
- logging.info(
458
- f"=> loaded checkpoint '{args.resume}' (epoch {start_epoch})"
459
- )
460
- if args.freeze_text:
461
- print("Freeze Text!!!!")
462
- for k in text_freeze_parameters:
463
- k.requires_grad = False
464
- else:
465
- logging.info("=> no checkpoint found at '{}'".format(args.resume))
466
-
467
- cudnn.benchmark = True
468
- cudnn.deterministic = False
469
-
470
- # determine if this worker should save logs and checkpoints. only do so if it is rank == 0
471
- args.save_logs = args.logs and args.logs.lower() != "none" and is_master(args)
472
- writer = None
473
- if args.save_logs and args.tensorboard:
474
- assert tensorboard is not None, "Please install tensorboard."
475
- writer = tensorboard.SummaryWriter(args.tensorboard_path)
476
-
477
- if args.wandb and is_master(args):
478
- assert wandb is not None, "Please install wandb."
479
- logging.debug("Starting wandb.")
480
- args.train_sz = data["train"].dataloader.num_samples
481
- if args.val_data is not None:
482
- args.val_sz = data["val"].dataloader.num_samples
483
- # you will have to configure this for your project!
484
- wandb.init(
485
- project="clap",
486
- notes=args.wandb_notes,
487
- name=args.wandb_notes,
488
- tags=[],
489
- config=vars(args),
490
- )
491
- if args.debug:
492
- wandb.watch(model, log="all")
493
- wandb.save(params_file)
494
- logging.debug("Finished loading wandb.")
495
-
496
- if "train" not in data:
497
- evaluate(model, data, start_epoch, args, writer)
498
- return
499
- elif start_epoch == 0 and "val" in data and not args.no_eval:
500
- evaluate(model, data, 0, args, writer)
501
- # print(f'rank {args.rank}, Start First Evaluation')# (yusong): for debug
502
- if args.save_top_performance:
503
- current_top_k_ckpt_metrics = {
504
- i: 0 for i in range(args.save_top_performance)
505
- } # initialize the top-k metric for ckpts to 0
506
-
507
- # print(f'rank {args.rank}, Start Training') # (yusong): for debug
508
- for epoch in range(start_epoch, args.epochs):
509
- # freeze the text param after (include) args.freeze_text_after, this is -1 by default
510
- if epoch == args.freeze_text_after:
511
- print("Text pretrained parameters are freezed since this epoch.")
512
- for k in text_freeze_parameters:
513
- k.requires_grad = False
514
- if is_master(args):
515
- logging.info(f"Start epoch {epoch}")
516
-
517
- train_one_epoch(model, data, epoch, optimizer, scaler, scheduler, args, writer)
518
- completed_epoch = epoch + 1
519
-
520
- if (
521
- any(v in data for v in ("val", "imagenet-val", "imagenet-v2"))
522
- and not args.no_eval
523
- ):
524
- metrics = evaluate(model, data, completed_epoch, args, writer)
525
- if args.save_top_performance:
526
- top_k_dataset = args.top_k_checkpoint_select_dataset
527
- top_k_metric = args.top_k_checkpoint_select_metric
528
- filtered_metrics = [
529
- v
530
- for k, v in metrics.items()
531
- if top_k_metric in k and top_k_dataset in k
532
- ] # check all R@10 metrics (all dataset) and use it to update the ckpt
533
- # Saving checkpoints.
534
- if args.save_logs:
535
- if args.split_opt:
536
- opt_dict = {
537
- k + "_" + "optimizer": v.state_dict() for k, v in optimizer.items()
538
- }
539
- else:
540
- opt_dict = {"optimizer": optimizer.state_dict()}
541
- checkpoint_dict = {
542
- "epoch": completed_epoch,
543
- "name": args.name,
544
- "state_dict": model.state_dict(),
545
- }
546
- checkpoint_dict.update(opt_dict)
547
- if scaler is not None:
548
- checkpoint_dict["scaler"] = scaler.state_dict()
549
-
550
- if completed_epoch == args.epochs or (
551
- args.save_frequency > 0 and (completed_epoch % args.save_frequency) == 0
552
- ):
553
- torch.save(
554
- checkpoint_dict,
555
- os.path.join(args.checkpoint_path, f"epoch_{completed_epoch}.pt"),
556
- )
557
- if args.save_most_recent:
558
- torch.save(
559
- checkpoint_dict,
560
- os.path.join(args.checkpoint_path, f"epoch_latest.pt"),
561
- )
562
- if args.save_top_performance and not args.no_eval:
563
- update_top_k_performance(
564
- filtered_metrics,
565
- current_top_k_ckpt_metrics,
566
- args,
567
- checkpoint_dict,
568
- bignumbetter=True,
569
- )
570
-
571
- if args.wandb and is_master(args):
572
- wandb.finish()
573
-
574
-
575
- def copy_codebase(args):
576
- from shutil import copytree, ignore_patterns
577
-
578
- new_code_path = os.path.join(args.logs, args.name, "code")
579
- if os.path.exists(new_code_path):
580
- print(
581
- f"Error. Experiment already exists at {new_code_path}. Use --name to specify a new experiment."
582
- )
583
- return -1
584
- print(f"Copying codebase to {new_code_path}")
585
- current_code_path = os.path.realpath(__file__)
586
- for _ in range(3):
587
- current_code_path = os.path.dirname(current_code_path)
588
- copytree(
589
- current_code_path, new_code_path, ignore=ignore_patterns("log", "logs", "wandb")
590
- )
591
- print("Done copying code.")
592
- return 1
593
-
594
-
595
- if __name__ == "__main__":
596
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/mono2binaural/src/models.py DELETED
@@ -1,110 +0,0 @@
1
- import numpy as np
2
- import scipy.linalg
3
- from scipy.spatial.transform import Rotation as R
4
- import torch as th
5
- import torch.nn as nn
6
- import torch.nn.functional as F
7
- from src.warping import GeometricTimeWarper, MonotoneTimeWarper
8
- from src.utils import Net
9
-
10
-
11
- class GeometricWarper(nn.Module):
12
- def __init__(self, sampling_rate=48000):
13
- super().__init__()
14
- self.warper = GeometricTimeWarper(sampling_rate=sampling_rate)
15
-
16
- def _transmitter_mouth(self, view):
17
- # offset between tracking markers and real mouth position in the dataset
18
- mouth_offset = np.array([0.09, 0, -0.20])
19
- quat = view[:, 3:, :].transpose(2, 1).contiguous().detach().cpu().view(-1, 4).numpy()
20
- # make sure zero-padded values are set to non-zero values (else scipy raises an exception)
21
- norms = scipy.linalg.norm(quat, axis=1)
22
- eps_val = (norms == 0).astype(np.float32)
23
- quat = quat + eps_val[:, None]
24
- transmitter_rot_mat = R.from_quat(quat)
25
- transmitter_mouth = transmitter_rot_mat.apply(mouth_offset, inverse=True)
26
- transmitter_mouth = th.Tensor(transmitter_mouth).view(view.shape[0], -1, 3).transpose(2, 1).contiguous()
27
- if view.is_cuda:
28
- transmitter_mouth = transmitter_mouth.cuda()
29
- return transmitter_mouth
30
-
31
- def _3d_displacements(self, view):
32
- transmitter_mouth = self._transmitter_mouth(view)
33
- # offset between tracking markers and ears in the dataset
34
- left_ear_offset = th.Tensor([0, -0.08, -0.22]).cuda() if view.is_cuda else th.Tensor([0, -0.08, -0.22])
35
- right_ear_offset = th.Tensor([0, 0.08, -0.22]).cuda() if view.is_cuda else th.Tensor([0, 0.08, -0.22])
36
- # compute displacements between transmitter mouth and receiver left/right ear
37
- displacement_left = view[:, 0:3, :] + transmitter_mouth - left_ear_offset[None, :, None]
38
- displacement_right = view[:, 0:3, :] + transmitter_mouth - right_ear_offset[None, :, None]
39
- displacement = th.stack([displacement_left, displacement_right], dim=1)
40
- return displacement
41
-
42
- def _warpfield(self, view, seq_length):
43
- return self.warper.displacements2warpfield(self._3d_displacements(view), seq_length)
44
-
45
- def forward(self, mono, view):
46
- '''
47
- :param mono: input signal as tensor of shape B x 1 x T
48
- :param view: rx/tx position/orientation as tensor of shape B x 7 x K (K = T / 400)
49
- :return: warped: warped left/right ear signal as tensor of shape B x 2 x T
50
- '''
51
- return self.warper(th.cat([mono, mono], dim=1), self._3d_displacements(view))
52
-
53
-
54
- class Warpnet(nn.Module):
55
- def __init__(self, layers=4, channels=64, view_dim=7):
56
- super().__init__()
57
- self.layers = [nn.Conv1d(view_dim if l == 0 else channels, channels, kernel_size=2) for l in range(layers)]
58
- self.layers = nn.ModuleList(self.layers)
59
- self.linear = nn.Conv1d(channels, 2, kernel_size=1)
60
- self.neural_warper = MonotoneTimeWarper()
61
- self.geometric_warper = GeometricWarper()
62
-
63
- def neural_warpfield(self, view, seq_length):
64
- warpfield = view
65
- for layer in self.layers:
66
- warpfield = F.pad(warpfield, pad=[1, 0])
67
- warpfield = F.relu(layer(warpfield))
68
- warpfield = self.linear(warpfield)
69
- warpfield = F.interpolate(warpfield, size=seq_length)
70
- return warpfield
71
-
72
- def forward(self, mono, view):
73
- '''
74
- :param mono: input signal as tensor of shape B x 1 x T
75
- :param view: rx/tx position/orientation as tensor of shape B x 7 x K (K = T / 400)
76
- :return: warped: warped left/right ear signal as tensor of shape B x 2 x T
77
- '''
78
- geometric_warpfield = self.geometric_warper._warpfield(view, mono.shape[-1])
79
- neural_warpfield = self.neural_warpfield(view, mono.shape[-1])
80
- warpfield = geometric_warpfield + neural_warpfield
81
- # ensure causality
82
- warpfield = -F.relu(-warpfield) # the predicted warp
83
- warped = self.neural_warper(th.cat([mono, mono], dim=1), warpfield)
84
- return warped
85
-
86
- class BinauralNetwork(Net):
87
- def __init__(self,
88
- view_dim=7,
89
- warpnet_layers=4,
90
- warpnet_channels=64,
91
- model_name='binaural_network',
92
- use_cuda=True):
93
- super().__init__(model_name, use_cuda)
94
- self.warper = Warpnet(warpnet_layers, warpnet_channels)
95
- if self.use_cuda:
96
- self.cuda()
97
-
98
- def forward(self, mono, view):
99
- '''
100
- :param mono: the input signal as a B x 1 x T tensor
101
- :param view: the receiver/transmitter position as a B x 7 x T tensor
102
- :return: out: the binaural output produced by the network
103
- intermediate: a two-channel audio signal obtained from the output of each intermediate layer
104
- as a list of B x 2 x T tensors
105
- '''
106
- # print('mono ', mono.shape)
107
- # print('view ', view.shape)
108
- warped = self.warper(mono, view)
109
- # print('warped ', warped.shape)
110
- return warped
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AILab-CVC/SEED-LLaMA/scripts/start_backend_8b.sh DELETED
@@ -1,10 +0,0 @@
1
-
2
- python3 gradio_demo/seed_llama_flask.py \
3
- --image_transform configs/transform/clip_transform.yaml \
4
- --tokenizer configs/tokenizer/seed_llama_tokenizer.yaml \
5
- --model configs/llm/seed_llama_8b_8bit.yaml \
6
- --port 7890 \
7
- --llm_device cuda:0 \
8
- --tokenizer_device cuda:0 \
9
- --offload_encoder \
10
- --offload_decoder
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorpicker/Factory.js DELETED
@@ -1,13 +0,0 @@
1
- import ColorPicker from './ColorPicker.js';
2
- import ObjectFactory from '../../ObjectFactory.js';
3
- import SetValue from '../../../../plugins/utils/object/SetValue.js';
4
-
5
- ObjectFactory.register('colorPicker', function (config) {
6
- var gameObject = new ColorPicker(this.scene, config);
7
- this.scene.add.existing(gameObject);
8
- return gameObject;
9
- });
10
-
11
- SetValue(window, 'RexPlugins.UI.ColorPicker', ColorPicker);
12
-
13
- export default ColorPicker;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Aki004/herta-so-vits/vdecoder/nsf_hifigan/utils.py DELETED
@@ -1,68 +0,0 @@
1
- import glob
2
- import os
3
- import matplotlib
4
- import torch
5
- from torch.nn.utils import weight_norm
6
- matplotlib.use("Agg")
7
- import matplotlib.pylab as plt
8
-
9
-
10
- def plot_spectrogram(spectrogram):
11
- fig, ax = plt.subplots(figsize=(10, 2))
12
- im = ax.imshow(spectrogram, aspect="auto", origin="lower",
13
- interpolation='none')
14
- plt.colorbar(im, ax=ax)
15
-
16
- fig.canvas.draw()
17
- plt.close()
18
-
19
- return fig
20
-
21
-
22
- def init_weights(m, mean=0.0, std=0.01):
23
- classname = m.__class__.__name__
24
- if classname.find("Conv") != -1:
25
- m.weight.data.normal_(mean, std)
26
-
27
-
28
- def apply_weight_norm(m):
29
- classname = m.__class__.__name__
30
- if classname.find("Conv") != -1:
31
- weight_norm(m)
32
-
33
-
34
- def get_padding(kernel_size, dilation=1):
35
- return int((kernel_size*dilation - dilation)/2)
36
-
37
-
38
- def load_checkpoint(filepath, device):
39
- assert os.path.isfile(filepath)
40
- print("Loading '{}'".format(filepath))
41
- checkpoint_dict = torch.load(filepath, map_location=device)
42
- print("Complete.")
43
- return checkpoint_dict
44
-
45
-
46
- def save_checkpoint(filepath, obj):
47
- print("Saving checkpoint to {}".format(filepath))
48
- torch.save(obj, filepath)
49
- print("Complete.")
50
-
51
-
52
- def del_old_checkpoints(cp_dir, prefix, n_models=2):
53
- pattern = os.path.join(cp_dir, prefix + '????????')
54
- cp_list = glob.glob(pattern) # get checkpoint paths
55
- cp_list = sorted(cp_list)# sort by iter
56
- if len(cp_list) > n_models: # if more than n_models models are found
57
- for cp in cp_list[:-n_models]:# delete the oldest models other than lastest n_models
58
- open(cp, 'w').close()# empty file contents
59
- os.unlink(cp)# delete file (move to trash when using Colab)
60
-
61
-
62
- def scan_checkpoint(cp_dir, prefix):
63
- pattern = os.path.join(cp_dir, prefix + '????????')
64
- cp_list = glob.glob(pattern)
65
- if len(cp_list) == 0:
66
- return None
67
- return sorted(cp_list)[-1]
68
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/GUI.py DELETED
@@ -1,103 +0,0 @@
1
-
2
-
3
- from tkinter import Tk,Frame ,Label,Button,messagebox,Canvas,Text,Scale
4
- from tkinter import HORIZONTAL
5
-
6
- class View():
7
- def __init__(self,master):
8
-
9
- self.width=600
10
- self.height=600
11
-
12
-
13
- self.root=master
14
- self.root.geometry("600x600")
15
-
16
- self.left_frame=Frame(self.root,width=600)
17
- self.left_frame.pack_propagate(0)
18
- self.left_frame.pack(fill='both', side='left', expand='True')
19
-
20
- self.retrieval_frame=Frame(self.root,bg='snow3')
21
- self.retrieval_frame.pack_propagate(0)
22
- self.retrieval_frame.pack(fill='both', side='right', expand='True')
23
-
24
- self.bg_frame=Frame(self.left_frame,bg='snow3',height=600,width=600)
25
- self.bg_frame.pack_propagate(0)
26
- self.bg_frame.pack(fill='both', side='top', expand='True')
27
-
28
- self.command_frame=Frame(self.left_frame,bg='snow3')
29
- self.command_frame.pack_propagate(0)
30
- self.command_frame.pack(fill='both', side='bottom', expand='True')
31
- # self.command_frame.grid(row=1, column=0,padx=0, pady=0)
32
-
33
- self.bg=Canvas(self.bg_frame,width=self.width,height=self.height, bg='gray')
34
- self.bg.place(relx=0.5, rely=0.5, anchor='center')
35
-
36
- self.mani=Canvas(self.retrieval_frame,width=1024,height=1024, bg='gray')
37
- self.mani.grid(row=0, column=0,padx=0, pady=42)
38
-
39
- self.SetCommand()
40
-
41
-
42
-
43
-
44
- def run(self):
45
- self.root.mainloop()
46
-
47
- def helloCallBack(self):
48
- category=self.set_category.get()
49
- messagebox.showinfo( "Hello Python",category)
50
-
51
- def SetCommand(self):
52
-
53
- tmp = Label(self.command_frame, text="neutral", width=10 ,bg='snow3')
54
- tmp.grid(row=1, column=0,padx=10, pady=10)
55
-
56
- tmp = Label(self.command_frame, text="a photo of a", width=10 ,bg='snow3')
57
- tmp.grid(row=1, column=1,padx=10, pady=10)
58
-
59
- self.neutral = Text ( self.command_frame, height=2, width=30)
60
- self.neutral.grid(row=1, column=2,padx=10, pady=10)
61
-
62
-
63
- tmp = Label(self.command_frame, text="target", width=10 ,bg='snow3')
64
- tmp.grid(row=2, column=0,padx=10, pady=10)
65
-
66
- tmp = Label(self.command_frame, text="a photo of a", width=10 ,bg='snow3')
67
- tmp.grid(row=2, column=1,padx=10, pady=10)
68
-
69
- self.target = Text ( self.command_frame, height=2, width=30)
70
- self.target.grid(row=2, column=2,padx=10, pady=10)
71
-
72
- tmp = Label(self.command_frame, text="strength", width=10 ,bg='snow3')
73
- tmp.grid(row=3, column=0,padx=10, pady=10)
74
-
75
- self.alpha = Scale(self.command_frame, from_=-15, to=25, orient=HORIZONTAL,bg='snow3', length=250,resolution=0.01)
76
- self.alpha.grid(row=3, column=2,padx=10, pady=10)
77
-
78
-
79
- tmp = Label(self.command_frame, text="disentangle", width=10 ,bg='snow3')
80
- tmp.grid(row=4, column=0,padx=10, pady=10)
81
-
82
- self.beta = Scale(self.command_frame, from_=0.08, to=0.4, orient=HORIZONTAL,bg='snow3', length=250,resolution=0.001)
83
- self.beta.grid(row=4, column=2,padx=10, pady=10)
84
-
85
- self.reset = Button(self.command_frame, text='Reset')
86
- self.reset.grid(row=5, column=1,padx=10, pady=10)
87
-
88
-
89
- self.set_init = Button(self.command_frame, text='Accept')
90
- self.set_init.grid(row=5, column=2,padx=10, pady=10)
91
-
92
- #%%
93
- if __name__ == "__main__":
94
- master=Tk()
95
- self=View(master)
96
- self.run()
97
-
98
-
99
-
100
-
101
-
102
-
103
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/bit_diffusion.py DELETED
@@ -1,264 +0,0 @@
1
- from typing import Optional, Tuple, Union
2
-
3
- import torch
4
- from einops import rearrange, reduce
5
-
6
- from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNet2DConditionModel
7
- from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
8
- from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
9
-
10
-
11
- BITS = 8
12
-
13
-
14
- # convert to bit representations and back taken from https://github.com/lucidrains/bit-diffusion/blob/main/bit_diffusion/bit_diffusion.py
15
- def decimal_to_bits(x, bits=BITS):
16
- """expects image tensor ranging from 0 to 1, outputs bit tensor ranging from -1 to 1"""
17
- device = x.device
18
-
19
- x = (x * 255).int().clamp(0, 255)
20
-
21
- mask = 2 ** torch.arange(bits - 1, -1, -1, device=device)
22
- mask = rearrange(mask, "d -> d 1 1")
23
- x = rearrange(x, "b c h w -> b c 1 h w")
24
-
25
- bits = ((x & mask) != 0).float()
26
- bits = rearrange(bits, "b c d h w -> b (c d) h w")
27
- bits = bits * 2 - 1
28
- return bits
29
-
30
-
31
- def bits_to_decimal(x, bits=BITS):
32
- """expects bits from -1 to 1, outputs image tensor from 0 to 1"""
33
- device = x.device
34
-
35
- x = (x > 0).int()
36
- mask = 2 ** torch.arange(bits - 1, -1, -1, device=device, dtype=torch.int32)
37
-
38
- mask = rearrange(mask, "d -> d 1 1")
39
- x = rearrange(x, "b (c d) h w -> b c d h w", d=8)
40
- dec = reduce(x * mask, "b c d h w -> b c h w", "sum")
41
- return (dec / 255).clamp(0.0, 1.0)
42
-
43
-
44
- # modified scheduler step functions for clamping the predicted x_0 between -bit_scale and +bit_scale
45
- def ddim_bit_scheduler_step(
46
- self,
47
- model_output: torch.FloatTensor,
48
- timestep: int,
49
- sample: torch.FloatTensor,
50
- eta: float = 0.0,
51
- use_clipped_model_output: bool = True,
52
- generator=None,
53
- return_dict: bool = True,
54
- ) -> Union[DDIMSchedulerOutput, Tuple]:
55
- """
56
- Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
57
- process from the learned model outputs (most often the predicted noise).
58
- Args:
59
- model_output (`torch.FloatTensor`): direct output from learned diffusion model.
60
- timestep (`int`): current discrete timestep in the diffusion chain.
61
- sample (`torch.FloatTensor`):
62
- current instance of sample being created by diffusion process.
63
- eta (`float`): weight of noise for added noise in diffusion step.
64
- use_clipped_model_output (`bool`): TODO
65
- generator: random number generator.
66
- return_dict (`bool`): option for returning tuple rather than DDIMSchedulerOutput class
67
- Returns:
68
- [`~schedulers.scheduling_utils.DDIMSchedulerOutput`] or `tuple`:
69
- [`~schedulers.scheduling_utils.DDIMSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When
70
- returning a tuple, the first element is the sample tensor.
71
- """
72
- if self.num_inference_steps is None:
73
- raise ValueError(
74
- "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
75
- )
76
-
77
- # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
78
- # Ideally, read DDIM paper in-detail understanding
79
-
80
- # Notation (<variable name> -> <name in paper>
81
- # - pred_noise_t -> e_theta(x_t, t)
82
- # - pred_original_sample -> f_theta(x_t, t) or x_0
83
- # - std_dev_t -> sigma_t
84
- # - eta -> η
85
- # - pred_sample_direction -> "direction pointing to x_t"
86
- # - pred_prev_sample -> "x_t-1"
87
-
88
- # 1. get previous step value (=t-1)
89
- prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps
90
-
91
- # 2. compute alphas, betas
92
- alpha_prod_t = self.alphas_cumprod[timestep]
93
- alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
94
-
95
- beta_prod_t = 1 - alpha_prod_t
96
-
97
- # 3. compute predicted original sample from predicted noise also called
98
- # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
99
- pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)
100
-
101
- # 4. Clip "predicted x_0"
102
- scale = self.bit_scale
103
- if self.config.clip_sample:
104
- pred_original_sample = torch.clamp(pred_original_sample, -scale, scale)
105
-
106
- # 5. compute variance: "sigma_t(η)" -> see formula (16)
107
- # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
108
- variance = self._get_variance(timestep, prev_timestep)
109
- std_dev_t = eta * variance ** (0.5)
110
-
111
- if use_clipped_model_output:
112
- # the model_output is always re-derived from the clipped x_0 in Glide
113
- model_output = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5)
114
-
115
- # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
116
- pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * model_output
117
-
118
- # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
119
- prev_sample = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction
120
-
121
- if eta > 0:
122
- # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
123
- device = model_output.device if torch.is_tensor(model_output) else "cpu"
124
- noise = torch.randn(model_output.shape, dtype=model_output.dtype, generator=generator).to(device)
125
- variance = self._get_variance(timestep, prev_timestep) ** (0.5) * eta * noise
126
-
127
- prev_sample = prev_sample + variance
128
-
129
- if not return_dict:
130
- return (prev_sample,)
131
-
132
- return DDIMSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample)
133
-
134
-
135
- def ddpm_bit_scheduler_step(
136
- self,
137
- model_output: torch.FloatTensor,
138
- timestep: int,
139
- sample: torch.FloatTensor,
140
- prediction_type="epsilon",
141
- generator=None,
142
- return_dict: bool = True,
143
- ) -> Union[DDPMSchedulerOutput, Tuple]:
144
- """
145
- Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
146
- process from the learned model outputs (most often the predicted noise).
147
- Args:
148
- model_output (`torch.FloatTensor`): direct output from learned diffusion model.
149
- timestep (`int`): current discrete timestep in the diffusion chain.
150
- sample (`torch.FloatTensor`):
151
- current instance of sample being created by diffusion process.
152
- prediction_type (`str`, default `epsilon`):
153
- indicates whether the model predicts the noise (epsilon), or the samples (`sample`).
154
- generator: random number generator.
155
- return_dict (`bool`): option for returning tuple rather than DDPMSchedulerOutput class
156
- Returns:
157
- [`~schedulers.scheduling_utils.DDPMSchedulerOutput`] or `tuple`:
158
- [`~schedulers.scheduling_utils.DDPMSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When
159
- returning a tuple, the first element is the sample tensor.
160
- """
161
- t = timestep
162
-
163
- if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
164
- model_output, predicted_variance = torch.split(model_output, sample.shape[1], dim=1)
165
- else:
166
- predicted_variance = None
167
-
168
- # 1. compute alphas, betas
169
- alpha_prod_t = self.alphas_cumprod[t]
170
- alpha_prod_t_prev = self.alphas_cumprod[t - 1] if t > 0 else self.one
171
- beta_prod_t = 1 - alpha_prod_t
172
- beta_prod_t_prev = 1 - alpha_prod_t_prev
173
-
174
- # 2. compute predicted original sample from predicted noise also called
175
- # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
176
- if prediction_type == "epsilon":
177
- pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)
178
- elif prediction_type == "sample":
179
- pred_original_sample = model_output
180
- else:
181
- raise ValueError(f"Unsupported prediction_type {prediction_type}.")
182
-
183
- # 3. Clip "predicted x_0"
184
- scale = self.bit_scale
185
- if self.config.clip_sample:
186
- pred_original_sample = torch.clamp(pred_original_sample, -scale, scale)
187
-
188
- # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
189
- # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
190
- pred_original_sample_coeff = (alpha_prod_t_prev ** (0.5) * self.betas[t]) / beta_prod_t
191
- current_sample_coeff = self.alphas[t] ** (0.5) * beta_prod_t_prev / beta_prod_t
192
-
193
- # 5. Compute predicted previous sample µ_t
194
- # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
195
- pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
196
-
197
- # 6. Add noise
198
- variance = 0
199
- if t > 0:
200
- noise = torch.randn(
201
- model_output.size(), dtype=model_output.dtype, layout=model_output.layout, generator=generator
202
- ).to(model_output.device)
203
- variance = (self._get_variance(t, predicted_variance=predicted_variance) ** 0.5) * noise
204
-
205
- pred_prev_sample = pred_prev_sample + variance
206
-
207
- if not return_dict:
208
- return (pred_prev_sample,)
209
-
210
- return DDPMSchedulerOutput(prev_sample=pred_prev_sample, pred_original_sample=pred_original_sample)
211
-
212
-
213
- class BitDiffusion(DiffusionPipeline):
214
- def __init__(
215
- self,
216
- unet: UNet2DConditionModel,
217
- scheduler: Union[DDIMScheduler, DDPMScheduler],
218
- bit_scale: Optional[float] = 1.0,
219
- ):
220
- super().__init__()
221
- self.bit_scale = bit_scale
222
- self.scheduler.step = (
223
- ddim_bit_scheduler_step if isinstance(scheduler, DDIMScheduler) else ddpm_bit_scheduler_step
224
- )
225
-
226
- self.register_modules(unet=unet, scheduler=scheduler)
227
-
228
- @torch.no_grad()
229
- def __call__(
230
- self,
231
- height: Optional[int] = 256,
232
- width: Optional[int] = 256,
233
- num_inference_steps: Optional[int] = 50,
234
- generator: Optional[torch.Generator] = None,
235
- batch_size: Optional[int] = 1,
236
- output_type: Optional[str] = "pil",
237
- return_dict: bool = True,
238
- **kwargs,
239
- ) -> Union[Tuple, ImagePipelineOutput]:
240
- latents = torch.randn(
241
- (batch_size, self.unet.config.in_channels, height, width),
242
- generator=generator,
243
- )
244
- latents = decimal_to_bits(latents) * self.bit_scale
245
- latents = latents.to(self.device)
246
-
247
- self.scheduler.set_timesteps(num_inference_steps)
248
-
249
- for t in self.progress_bar(self.scheduler.timesteps):
250
- # predict the noise residual
251
- noise_pred = self.unet(latents, t).sample
252
-
253
- # compute the previous noisy sample x_t -> x_t-1
254
- latents = self.scheduler.step(noise_pred, t, latents).prev_sample
255
-
256
- image = bits_to_decimal(latents)
257
-
258
- if output_type == "pil":
259
- image = self.numpy_to_pil(image)
260
-
261
- if not return_dict:
262
- return (image,)
263
-
264
- return ImagePipelineOutput(images=image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/onnxruntime/textual_inversion/textual_inversion.py DELETED
@@ -1,946 +0,0 @@
1
- #!/usr/bin/env python
2
- # coding=utf-8
3
- # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
4
- #
5
- # Licensed under the Apache License, Version 2.0 (the "License");
6
- # you may not use this file except in compliance with the License.
7
- # You may obtain a copy of the License at
8
- #
9
- # http://www.apache.org/licenses/LICENSE-2.0
10
- #
11
- # Unless required by applicable law or agreed to in writing, software
12
- # distributed under the License is distributed on an "AS IS" BASIS,
13
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- # See the License for the specific language governing permissions and
15
-
16
- import argparse
17
- import logging
18
- import math
19
- import os
20
- import random
21
- import warnings
22
- from pathlib import Path
23
-
24
- import numpy as np
25
- import PIL
26
- import torch
27
- import torch.nn.functional as F
28
- import torch.utils.checkpoint
29
- import transformers
30
- from accelerate import Accelerator
31
- from accelerate.logging import get_logger
32
- from accelerate.utils import ProjectConfiguration, set_seed
33
- from huggingface_hub import create_repo, upload_folder
34
- from onnxruntime.training.optim.fp16_optimizer import FP16_Optimizer as ORT_FP16_Optimizer
35
- from onnxruntime.training.ortmodule import ORTModule
36
-
37
- # TODO: remove and import from diffusers.utils when the new version of diffusers is released
38
- from packaging import version
39
- from PIL import Image
40
- from torch.utils.data import Dataset
41
- from torchvision import transforms
42
- from tqdm.auto import tqdm
43
- from transformers import CLIPTextModel, CLIPTokenizer
44
-
45
- import diffusers
46
- from diffusers import (
47
- AutoencoderKL,
48
- DDPMScheduler,
49
- DiffusionPipeline,
50
- DPMSolverMultistepScheduler,
51
- StableDiffusionPipeline,
52
- UNet2DConditionModel,
53
- )
54
- from diffusers.optimization import get_scheduler
55
- from diffusers.utils import check_min_version, is_wandb_available
56
- from diffusers.utils.import_utils import is_xformers_available
57
-
58
-
59
- if is_wandb_available():
60
- import wandb
61
-
62
- if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
63
- PIL_INTERPOLATION = {
64
- "linear": PIL.Image.Resampling.BILINEAR,
65
- "bilinear": PIL.Image.Resampling.BILINEAR,
66
- "bicubic": PIL.Image.Resampling.BICUBIC,
67
- "lanczos": PIL.Image.Resampling.LANCZOS,
68
- "nearest": PIL.Image.Resampling.NEAREST,
69
- }
70
- else:
71
- PIL_INTERPOLATION = {
72
- "linear": PIL.Image.LINEAR,
73
- "bilinear": PIL.Image.BILINEAR,
74
- "bicubic": PIL.Image.BICUBIC,
75
- "lanczos": PIL.Image.LANCZOS,
76
- "nearest": PIL.Image.NEAREST,
77
- }
78
- # ------------------------------------------------------------------------------
79
-
80
-
81
- # Will error if the minimal version of diffusers is not installed. Remove at your own risks.
82
- check_min_version("0.17.0.dev0")
83
-
84
- logger = get_logger(__name__)
85
-
86
-
87
- def save_model_card(repo_id: str, images=None, base_model=str, repo_folder=None):
88
- img_str = ""
89
- for i, image in enumerate(images):
90
- image.save(os.path.join(repo_folder, f"image_{i}.png"))
91
- img_str += f"![img_{i}](./image_{i}.png)\n"
92
-
93
- yaml = f"""
94
- ---
95
- license: creativeml-openrail-m
96
- base_model: {base_model}
97
- tags:
98
- - stable-diffusion
99
- - stable-diffusion-diffusers
100
- - text-to-image
101
- - diffusers
102
- - textual_inversion
103
- inference: true
104
- ---
105
- """
106
- model_card = f"""
107
- # Textual inversion text2image fine-tuning - {repo_id}
108
- These are textual inversion adaption weights for {base_model}. You can find some example images in the following. \n
109
- {img_str}
110
- """
111
- with open(os.path.join(repo_folder, "README.md"), "w") as f:
112
- f.write(yaml + model_card)
113
-
114
-
115
- def log_validation(text_encoder, tokenizer, unet, vae, args, accelerator, weight_dtype, epoch):
116
- logger.info(
117
- f"Running validation... \n Generating {args.num_validation_images} images with prompt:"
118
- f" {args.validation_prompt}."
119
- )
120
- # create pipeline (note: unet and vae are loaded again in float32)
121
- pipeline = DiffusionPipeline.from_pretrained(
122
- args.pretrained_model_name_or_path,
123
- text_encoder=accelerator.unwrap_model(text_encoder),
124
- tokenizer=tokenizer,
125
- unet=unet,
126
- vae=vae,
127
- safety_checker=None,
128
- revision=args.revision,
129
- torch_dtype=weight_dtype,
130
- )
131
- pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)
132
- pipeline = pipeline.to(accelerator.device)
133
- pipeline.set_progress_bar_config(disable=True)
134
-
135
- # run inference
136
- generator = None if args.seed is None else torch.Generator(device=accelerator.device).manual_seed(args.seed)
137
- images = []
138
- for _ in range(args.num_validation_images):
139
- with torch.autocast("cuda"):
140
- image = pipeline(args.validation_prompt, num_inference_steps=25, generator=generator).images[0]
141
- images.append(image)
142
-
143
- for tracker in accelerator.trackers:
144
- if tracker.name == "tensorboard":
145
- np_images = np.stack([np.asarray(img) for img in images])
146
- tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC")
147
- if tracker.name == "wandb":
148
- tracker.log(
149
- {
150
- "validation": [
151
- wandb.Image(image, caption=f"{i}: {args.validation_prompt}") for i, image in enumerate(images)
152
- ]
153
- }
154
- )
155
-
156
- del pipeline
157
- torch.cuda.empty_cache()
158
- return images
159
-
160
-
161
- def save_progress(text_encoder, placeholder_token_ids, accelerator, args, save_path):
162
- logger.info("Saving embeddings")
163
- learned_embeds = (
164
- accelerator.unwrap_model(text_encoder)
165
- .get_input_embeddings()
166
- .weight[min(placeholder_token_ids) : max(placeholder_token_ids) + 1]
167
- )
168
- learned_embeds_dict = {args.placeholder_token: learned_embeds.detach().cpu()}
169
- torch.save(learned_embeds_dict, save_path)
170
-
171
-
172
- def parse_args():
173
- parser = argparse.ArgumentParser(description="Simple example of a training script.")
174
- parser.add_argument(
175
- "--save_steps",
176
- type=int,
177
- default=500,
178
- help="Save learned_embeds.bin every X updates steps.",
179
- )
180
- parser.add_argument(
181
- "--save_as_full_pipeline",
182
- action="store_true",
183
- help="Save the complete stable diffusion pipeline.",
184
- )
185
- parser.add_argument(
186
- "--num_vectors",
187
- type=int,
188
- default=1,
189
- help="How many textual inversion vectors shall be used to learn the concept.",
190
- )
191
- parser.add_argument(
192
- "--pretrained_model_name_or_path",
193
- type=str,
194
- default=None,
195
- required=True,
196
- help="Path to pretrained model or model identifier from huggingface.co/models.",
197
- )
198
- parser.add_argument(
199
- "--revision",
200
- type=str,
201
- default=None,
202
- required=False,
203
- help="Revision of pretrained model identifier from huggingface.co/models.",
204
- )
205
- parser.add_argument(
206
- "--tokenizer_name",
207
- type=str,
208
- default=None,
209
- help="Pretrained tokenizer name or path if not the same as model_name",
210
- )
211
- parser.add_argument(
212
- "--train_data_dir", type=str, default=None, required=True, help="A folder containing the training data."
213
- )
214
- parser.add_argument(
215
- "--placeholder_token",
216
- type=str,
217
- default=None,
218
- required=True,
219
- help="A token to use as a placeholder for the concept.",
220
- )
221
- parser.add_argument(
222
- "--initializer_token", type=str, default=None, required=True, help="A token to use as initializer word."
223
- )
224
- parser.add_argument("--learnable_property", type=str, default="object", help="Choose between 'object' and 'style'")
225
- parser.add_argument("--repeats", type=int, default=100, help="How many times to repeat the training data.")
226
- parser.add_argument(
227
- "--output_dir",
228
- type=str,
229
- default="text-inversion-model",
230
- help="The output directory where the model predictions and checkpoints will be written.",
231
- )
232
- parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
233
- parser.add_argument(
234
- "--resolution",
235
- type=int,
236
- default=512,
237
- help=(
238
- "The resolution for input images, all the images in the train/validation dataset will be resized to this"
239
- " resolution"
240
- ),
241
- )
242
- parser.add_argument(
243
- "--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution."
244
- )
245
- parser.add_argument(
246
- "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader."
247
- )
248
- parser.add_argument("--num_train_epochs", type=int, default=100)
249
- parser.add_argument(
250
- "--max_train_steps",
251
- type=int,
252
- default=5000,
253
- help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
254
- )
255
- parser.add_argument(
256
- "--gradient_accumulation_steps",
257
- type=int,
258
- default=1,
259
- help="Number of updates steps to accumulate before performing a backward/update pass.",
260
- )
261
- parser.add_argument(
262
- "--gradient_checkpointing",
263
- action="store_true",
264
- help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
265
- )
266
- parser.add_argument(
267
- "--learning_rate",
268
- type=float,
269
- default=1e-4,
270
- help="Initial learning rate (after the potential warmup period) to use.",
271
- )
272
- parser.add_argument(
273
- "--scale_lr",
274
- action="store_true",
275
- default=False,
276
- help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
277
- )
278
- parser.add_argument(
279
- "--lr_scheduler",
280
- type=str,
281
- default="constant",
282
- help=(
283
- 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
284
- ' "constant", "constant_with_warmup"]'
285
- ),
286
- )
287
- parser.add_argument(
288
- "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
289
- )
290
- parser.add_argument(
291
- "--dataloader_num_workers",
292
- type=int,
293
- default=0,
294
- help=(
295
- "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
296
- ),
297
- )
298
- parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
299
- parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
300
- parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
301
- parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
302
- parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
303
- parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
304
- parser.add_argument(
305
- "--hub_model_id",
306
- type=str,
307
- default=None,
308
- help="The name of the repository to keep in sync with the local `output_dir`.",
309
- )
310
- parser.add_argument(
311
- "--logging_dir",
312
- type=str,
313
- default="logs",
314
- help=(
315
- "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
316
- " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
317
- ),
318
- )
319
- parser.add_argument(
320
- "--mixed_precision",
321
- type=str,
322
- default="no",
323
- choices=["no", "fp16", "bf16"],
324
- help=(
325
- "Whether to use mixed precision. Choose"
326
- "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
327
- "and an Nvidia Ampere GPU."
328
- ),
329
- )
330
- parser.add_argument(
331
- "--allow_tf32",
332
- action="store_true",
333
- help=(
334
- "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
335
- " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
336
- ),
337
- )
338
- parser.add_argument(
339
- "--report_to",
340
- type=str,
341
- default="tensorboard",
342
- help=(
343
- 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
344
- ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
345
- ),
346
- )
347
- parser.add_argument(
348
- "--validation_prompt",
349
- type=str,
350
- default=None,
351
- help="A prompt that is used during validation to verify that the model is learning.",
352
- )
353
- parser.add_argument(
354
- "--num_validation_images",
355
- type=int,
356
- default=4,
357
- help="Number of images that should be generated during validation with `validation_prompt`.",
358
- )
359
- parser.add_argument(
360
- "--validation_steps",
361
- type=int,
362
- default=100,
363
- help=(
364
- "Run validation every X steps. Validation consists of running the prompt"
365
- " `args.validation_prompt` multiple times: `args.num_validation_images`"
366
- " and logging the images."
367
- ),
368
- )
369
- parser.add_argument(
370
- "--validation_epochs",
371
- type=int,
372
- default=None,
373
- help=(
374
- "Deprecated in favor of validation_steps. Run validation every X epochs. Validation consists of running the prompt"
375
- " `args.validation_prompt` multiple times: `args.num_validation_images`"
376
- " and logging the images."
377
- ),
378
- )
379
- parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
380
- parser.add_argument(
381
- "--checkpointing_steps",
382
- type=int,
383
- default=500,
384
- help=(
385
- "Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming"
386
- " training using `--resume_from_checkpoint`."
387
- ),
388
- )
389
- parser.add_argument(
390
- "--checkpoints_total_limit",
391
- type=int,
392
- default=None,
393
- help=(
394
- "Max number of checkpoints to store. Passed as `total_limit` to the `Accelerator` `ProjectConfiguration`."
395
- " See Accelerator::save_state https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.save_state"
396
- " for more docs"
397
- ),
398
- )
399
- parser.add_argument(
400
- "--resume_from_checkpoint",
401
- type=str,
402
- default=None,
403
- help=(
404
- "Whether training should be resumed from a previous checkpoint. Use a path saved by"
405
- ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
406
- ),
407
- )
408
- parser.add_argument(
409
- "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers."
410
- )
411
-
412
- args = parser.parse_args()
413
- env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
414
- if env_local_rank != -1 and env_local_rank != args.local_rank:
415
- args.local_rank = env_local_rank
416
-
417
- if args.train_data_dir is None:
418
- raise ValueError("You must specify a train data directory.")
419
-
420
- return args
421
-
422
-
423
- imagenet_templates_small = [
424
- "a photo of a {}",
425
- "a rendering of a {}",
426
- "a cropped photo of the {}",
427
- "the photo of a {}",
428
- "a photo of a clean {}",
429
- "a photo of a dirty {}",
430
- "a dark photo of the {}",
431
- "a photo of my {}",
432
- "a photo of the cool {}",
433
- "a close-up photo of a {}",
434
- "a bright photo of the {}",
435
- "a cropped photo of a {}",
436
- "a photo of the {}",
437
- "a good photo of the {}",
438
- "a photo of one {}",
439
- "a close-up photo of the {}",
440
- "a rendition of the {}",
441
- "a photo of the clean {}",
442
- "a rendition of a {}",
443
- "a photo of a nice {}",
444
- "a good photo of a {}",
445
- "a photo of the nice {}",
446
- "a photo of the small {}",
447
- "a photo of the weird {}",
448
- "a photo of the large {}",
449
- "a photo of a cool {}",
450
- "a photo of a small {}",
451
- ]
452
-
453
- imagenet_style_templates_small = [
454
- "a painting in the style of {}",
455
- "a rendering in the style of {}",
456
- "a cropped painting in the style of {}",
457
- "the painting in the style of {}",
458
- "a clean painting in the style of {}",
459
- "a dirty painting in the style of {}",
460
- "a dark painting in the style of {}",
461
- "a picture in the style of {}",
462
- "a cool painting in the style of {}",
463
- "a close-up painting in the style of {}",
464
- "a bright painting in the style of {}",
465
- "a cropped painting in the style of {}",
466
- "a good painting in the style of {}",
467
- "a close-up painting in the style of {}",
468
- "a rendition in the style of {}",
469
- "a nice painting in the style of {}",
470
- "a small painting in the style of {}",
471
- "a weird painting in the style of {}",
472
- "a large painting in the style of {}",
473
- ]
474
-
475
-
476
- class TextualInversionDataset(Dataset):
477
- def __init__(
478
- self,
479
- data_root,
480
- tokenizer,
481
- learnable_property="object", # [object, style]
482
- size=512,
483
- repeats=100,
484
- interpolation="bicubic",
485
- flip_p=0.5,
486
- set="train",
487
- placeholder_token="*",
488
- center_crop=False,
489
- ):
490
- self.data_root = data_root
491
- self.tokenizer = tokenizer
492
- self.learnable_property = learnable_property
493
- self.size = size
494
- self.placeholder_token = placeholder_token
495
- self.center_crop = center_crop
496
- self.flip_p = flip_p
497
-
498
- self.image_paths = [os.path.join(self.data_root, file_path) for file_path in os.listdir(self.data_root)]
499
-
500
- self.num_images = len(self.image_paths)
501
- self._length = self.num_images
502
-
503
- if set == "train":
504
- self._length = self.num_images * repeats
505
-
506
- self.interpolation = {
507
- "linear": PIL_INTERPOLATION["linear"],
508
- "bilinear": PIL_INTERPOLATION["bilinear"],
509
- "bicubic": PIL_INTERPOLATION["bicubic"],
510
- "lanczos": PIL_INTERPOLATION["lanczos"],
511
- }[interpolation]
512
-
513
- self.templates = imagenet_style_templates_small if learnable_property == "style" else imagenet_templates_small
514
- self.flip_transform = transforms.RandomHorizontalFlip(p=self.flip_p)
515
-
516
- def __len__(self):
517
- return self._length
518
-
519
- def __getitem__(self, i):
520
- example = {}
521
- image = Image.open(self.image_paths[i % self.num_images])
522
-
523
- if not image.mode == "RGB":
524
- image = image.convert("RGB")
525
-
526
- placeholder_string = self.placeholder_token
527
- text = random.choice(self.templates).format(placeholder_string)
528
-
529
- example["input_ids"] = self.tokenizer(
530
- text,
531
- padding="max_length",
532
- truncation=True,
533
- max_length=self.tokenizer.model_max_length,
534
- return_tensors="pt",
535
- ).input_ids[0]
536
-
537
- # default to score-sde preprocessing
538
- img = np.array(image).astype(np.uint8)
539
-
540
- if self.center_crop:
541
- crop = min(img.shape[0], img.shape[1])
542
- (
543
- h,
544
- w,
545
- ) = (
546
- img.shape[0],
547
- img.shape[1],
548
- )
549
- img = img[(h - crop) // 2 : (h + crop) // 2, (w - crop) // 2 : (w + crop) // 2]
550
-
551
- image = Image.fromarray(img)
552
- image = image.resize((self.size, self.size), resample=self.interpolation)
553
-
554
- image = self.flip_transform(image)
555
- image = np.array(image).astype(np.uint8)
556
- image = (image / 127.5 - 1.0).astype(np.float32)
557
-
558
- example["pixel_values"] = torch.from_numpy(image).permute(2, 0, 1)
559
- return example
560
-
561
-
562
- def main():
563
- args = parse_args()
564
- logging_dir = os.path.join(args.output_dir, args.logging_dir)
565
- accelerator_project_config = ProjectConfiguration(
566
- total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
567
- )
568
-
569
- accelerator = Accelerator(
570
- gradient_accumulation_steps=args.gradient_accumulation_steps,
571
- mixed_precision=args.mixed_precision,
572
- log_with=args.report_to,
573
- project_config=accelerator_project_config,
574
- )
575
-
576
- if args.report_to == "wandb":
577
- if not is_wandb_available():
578
- raise ImportError("Make sure to install wandb if you want to use it for logging during training.")
579
-
580
- # Make one log on every process with the configuration for debugging.
581
- logging.basicConfig(
582
- format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
583
- datefmt="%m/%d/%Y %H:%M:%S",
584
- level=logging.INFO,
585
- )
586
- logger.info(accelerator.state, main_process_only=False)
587
- if accelerator.is_local_main_process:
588
- transformers.utils.logging.set_verbosity_warning()
589
- diffusers.utils.logging.set_verbosity_info()
590
- else:
591
- transformers.utils.logging.set_verbosity_error()
592
- diffusers.utils.logging.set_verbosity_error()
593
-
594
- # If passed along, set the training seed now.
595
- if args.seed is not None:
596
- set_seed(args.seed)
597
-
598
- # Handle the repository creation
599
- if accelerator.is_main_process:
600
- if args.output_dir is not None:
601
- os.makedirs(args.output_dir, exist_ok=True)
602
-
603
- if args.push_to_hub:
604
- repo_id = create_repo(
605
- repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token
606
- ).repo_id
607
-
608
- # Load tokenizer
609
- if args.tokenizer_name:
610
- tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name)
611
- elif args.pretrained_model_name_or_path:
612
- tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
613
-
614
- # Load scheduler and models
615
- noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
616
- text_encoder = CLIPTextModel.from_pretrained(
617
- args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision
618
- )
619
- vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision)
620
- unet = UNet2DConditionModel.from_pretrained(
621
- args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision
622
- )
623
-
624
- # Add the placeholder token in tokenizer
625
- placeholder_tokens = [args.placeholder_token]
626
-
627
- if args.num_vectors < 1:
628
- raise ValueError(f"--num_vectors has to be larger or equal to 1, but is {args.num_vectors}")
629
-
630
- # add dummy tokens for multi-vector
631
- additional_tokens = []
632
- for i in range(1, args.num_vectors):
633
- additional_tokens.append(f"{args.placeholder_token}_{i}")
634
- placeholder_tokens += additional_tokens
635
-
636
- num_added_tokens = tokenizer.add_tokens(placeholder_tokens)
637
- if num_added_tokens != args.num_vectors:
638
- raise ValueError(
639
- f"The tokenizer already contains the token {args.placeholder_token}. Please pass a different"
640
- " `placeholder_token` that is not already in the tokenizer."
641
- )
642
-
643
- # Convert the initializer_token, placeholder_token to ids
644
- token_ids = tokenizer.encode(args.initializer_token, add_special_tokens=False)
645
- # Check if initializer_token is a single token or a sequence of tokens
646
- if len(token_ids) > 1:
647
- raise ValueError("The initializer token must be a single token.")
648
-
649
- initializer_token_id = token_ids[0]
650
- placeholder_token_ids = tokenizer.convert_tokens_to_ids(placeholder_tokens)
651
-
652
- # Resize the token embeddings as we are adding new special tokens to the tokenizer
653
- text_encoder.resize_token_embeddings(len(tokenizer))
654
-
655
- # Initialise the newly added placeholder token with the embeddings of the initializer token
656
- token_embeds = text_encoder.get_input_embeddings().weight.data
657
- with torch.no_grad():
658
- for token_id in placeholder_token_ids:
659
- token_embeds[token_id] = token_embeds[initializer_token_id].clone()
660
-
661
- # Freeze vae and unet
662
- vae.requires_grad_(False)
663
- unet.requires_grad_(False)
664
- # Freeze all parameters except for the token embeddings in text encoder
665
- text_encoder.text_model.encoder.requires_grad_(False)
666
- text_encoder.text_model.final_layer_norm.requires_grad_(False)
667
- text_encoder.text_model.embeddings.position_embedding.requires_grad_(False)
668
-
669
- if args.gradient_checkpointing:
670
- # Keep unet in train mode if we are using gradient checkpointing to save memory.
671
- # The dropout cannot be != 0 so it doesn't matter if we are in eval or train mode.
672
- unet.train()
673
- text_encoder.gradient_checkpointing_enable()
674
- unet.enable_gradient_checkpointing()
675
-
676
- if args.enable_xformers_memory_efficient_attention:
677
- if is_xformers_available():
678
- import xformers
679
-
680
- xformers_version = version.parse(xformers.__version__)
681
- if xformers_version == version.parse("0.0.16"):
682
- logger.warn(
683
- "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
684
- )
685
- unet.enable_xformers_memory_efficient_attention()
686
- else:
687
- raise ValueError("xformers is not available. Make sure it is installed correctly")
688
-
689
- # Enable TF32 for faster training on Ampere GPUs,
690
- # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
691
- if args.allow_tf32:
692
- torch.backends.cuda.matmul.allow_tf32 = True
693
-
694
- if args.scale_lr:
695
- args.learning_rate = (
696
- args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
697
- )
698
-
699
- # Initialize the optimizer
700
- optimizer = torch.optim.AdamW(
701
- text_encoder.get_input_embeddings().parameters(), # only optimize the embeddings
702
- lr=args.learning_rate,
703
- betas=(args.adam_beta1, args.adam_beta2),
704
- weight_decay=args.adam_weight_decay,
705
- eps=args.adam_epsilon,
706
- )
707
-
708
- optimizer = ORT_FP16_Optimizer(optimizer)
709
-
710
- # Dataset and DataLoaders creation:
711
- train_dataset = TextualInversionDataset(
712
- data_root=args.train_data_dir,
713
- tokenizer=tokenizer,
714
- size=args.resolution,
715
- placeholder_token=args.placeholder_token,
716
- repeats=args.repeats,
717
- learnable_property=args.learnable_property,
718
- center_crop=args.center_crop,
719
- set="train",
720
- )
721
- train_dataloader = torch.utils.data.DataLoader(
722
- train_dataset, batch_size=args.train_batch_size, shuffle=True, num_workers=args.dataloader_num_workers
723
- )
724
- if args.validation_epochs is not None:
725
- warnings.warn(
726
- f"FutureWarning: You are doing logging with validation_epochs={args.validation_epochs}."
727
- " Deprecated validation_epochs in favor of `validation_steps`"
728
- f"Setting `args.validation_steps` to {args.validation_epochs * len(train_dataset)}",
729
- FutureWarning,
730
- stacklevel=2,
731
- )
732
- args.validation_steps = args.validation_epochs * len(train_dataset)
733
-
734
- # Scheduler and math around the number of training steps.
735
- overrode_max_train_steps = False
736
- num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
737
- if args.max_train_steps is None:
738
- args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
739
- overrode_max_train_steps = True
740
-
741
- lr_scheduler = get_scheduler(
742
- args.lr_scheduler,
743
- optimizer=optimizer,
744
- num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes,
745
- num_training_steps=args.max_train_steps * accelerator.num_processes,
746
- )
747
-
748
- # Prepare everything with our `accelerator`.
749
- text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
750
- text_encoder, optimizer, train_dataloader, lr_scheduler
751
- )
752
-
753
- text_encoder = ORTModule(text_encoder)
754
- unet = ORTModule(unet)
755
- vae = ORTModule(vae)
756
-
757
- # For mixed precision training we cast the unet and vae weights to half-precision
758
- # as these models are only used for inference, keeping weights in full precision is not required.
759
- weight_dtype = torch.float32
760
- if accelerator.mixed_precision == "fp16":
761
- weight_dtype = torch.float16
762
- elif accelerator.mixed_precision == "bf16":
763
- weight_dtype = torch.bfloat16
764
-
765
- # Move vae and unet to device and cast to weight_dtype
766
- unet.to(accelerator.device, dtype=weight_dtype)
767
- vae.to(accelerator.device, dtype=weight_dtype)
768
-
769
- # We need to recalculate our total training steps as the size of the training dataloader may have changed.
770
- num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
771
- if overrode_max_train_steps:
772
- args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
773
- # Afterwards we recalculate our number of training epochs
774
- args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
775
-
776
- # We need to initialize the trackers we use, and also store our configuration.
777
- # The trackers initializes automatically on the main process.
778
- if accelerator.is_main_process:
779
- accelerator.init_trackers("textual_inversion", config=vars(args))
780
-
781
- # Train!
782
- total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
783
-
784
- logger.info("***** Running training *****")
785
- logger.info(f" Num examples = {len(train_dataset)}")
786
- logger.info(f" Num Epochs = {args.num_train_epochs}")
787
- logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
788
- logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
789
- logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
790
- logger.info(f" Total optimization steps = {args.max_train_steps}")
791
- global_step = 0
792
- first_epoch = 0
793
- # Potentially load in the weights and states from a previous save
794
- if args.resume_from_checkpoint:
795
- if args.resume_from_checkpoint != "latest":
796
- path = os.path.basename(args.resume_from_checkpoint)
797
- else:
798
- # Get the most recent checkpoint
799
- dirs = os.listdir(args.output_dir)
800
- dirs = [d for d in dirs if d.startswith("checkpoint")]
801
- dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
802
- path = dirs[-1] if len(dirs) > 0 else None
803
-
804
- if path is None:
805
- accelerator.print(
806
- f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run."
807
- )
808
- args.resume_from_checkpoint = None
809
- else:
810
- accelerator.print(f"Resuming from checkpoint {path}")
811
- accelerator.load_state(os.path.join(args.output_dir, path))
812
- global_step = int(path.split("-")[1])
813
-
814
- resume_global_step = global_step * args.gradient_accumulation_steps
815
- first_epoch = global_step // num_update_steps_per_epoch
816
- resume_step = resume_global_step % (num_update_steps_per_epoch * args.gradient_accumulation_steps)
817
-
818
- # Only show the progress bar once on each machine.
819
- progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process)
820
- progress_bar.set_description("Steps")
821
-
822
- # keep original embeddings as reference
823
- orig_embeds_params = accelerator.unwrap_model(text_encoder).get_input_embeddings().weight.data.clone()
824
-
825
- for epoch in range(first_epoch, args.num_train_epochs):
826
- text_encoder.train()
827
- for step, batch in enumerate(train_dataloader):
828
- # Skip steps until we reach the resumed step
829
- if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step:
830
- if step % args.gradient_accumulation_steps == 0:
831
- progress_bar.update(1)
832
- continue
833
-
834
- with accelerator.accumulate(text_encoder):
835
- # Convert images to latent space
836
- latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample().detach()
837
- latents = latents * vae.config.scaling_factor
838
-
839
- # Sample noise that we'll add to the latents
840
- noise = torch.randn_like(latents)
841
- bsz = latents.shape[0]
842
- # Sample a random timestep for each image
843
- timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device)
844
- timesteps = timesteps.long()
845
-
846
- # Add noise to the latents according to the noise magnitude at each timestep
847
- # (this is the forward diffusion process)
848
- noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
849
-
850
- # Get the text embedding for conditioning
851
- encoder_hidden_states = text_encoder(batch["input_ids"])[0].to(dtype=weight_dtype)
852
-
853
- # Predict the noise residual
854
- model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample
855
-
856
- # Get the target for loss depending on the prediction type
857
- if noise_scheduler.config.prediction_type == "epsilon":
858
- target = noise
859
- elif noise_scheduler.config.prediction_type == "v_prediction":
860
- target = noise_scheduler.get_velocity(latents, noise, timesteps)
861
- else:
862
- raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
863
-
864
- loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
865
-
866
- accelerator.backward(loss)
867
-
868
- optimizer.step()
869
- lr_scheduler.step()
870
- optimizer.zero_grad()
871
-
872
- # Let's make sure we don't update any embedding weights besides the newly added token
873
- index_no_updates = torch.ones((len(tokenizer),), dtype=torch.bool)
874
- index_no_updates[min(placeholder_token_ids) : max(placeholder_token_ids) + 1] = False
875
-
876
- with torch.no_grad():
877
- accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[
878
- index_no_updates
879
- ] = orig_embeds_params[index_no_updates]
880
-
881
- # Checks if the accelerator has performed an optimization step behind the scenes
882
- if accelerator.sync_gradients:
883
- images = []
884
- progress_bar.update(1)
885
- global_step += 1
886
- if global_step % args.save_steps == 0:
887
- save_path = os.path.join(args.output_dir, f"learned_embeds-steps-{global_step}.bin")
888
- save_progress(text_encoder, placeholder_token_ids, accelerator, args, save_path)
889
-
890
- if accelerator.is_main_process:
891
- if global_step % args.checkpointing_steps == 0:
892
- save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}")
893
- accelerator.save_state(save_path)
894
- logger.info(f"Saved state to {save_path}")
895
-
896
- if args.validation_prompt is not None and global_step % args.validation_steps == 0:
897
- images = log_validation(
898
- text_encoder, tokenizer, unet, vae, args, accelerator, weight_dtype, epoch
899
- )
900
-
901
- logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
902
- progress_bar.set_postfix(**logs)
903
- accelerator.log(logs, step=global_step)
904
-
905
- if global_step >= args.max_train_steps:
906
- break
907
- # Create the pipeline using the trained modules and save it.
908
- accelerator.wait_for_everyone()
909
- if accelerator.is_main_process:
910
- if args.push_to_hub and not args.save_as_full_pipeline:
911
- logger.warn("Enabling full model saving because --push_to_hub=True was specified.")
912
- save_full_model = True
913
- else:
914
- save_full_model = args.save_as_full_pipeline
915
- if save_full_model:
916
- pipeline = StableDiffusionPipeline.from_pretrained(
917
- args.pretrained_model_name_or_path,
918
- text_encoder=accelerator.unwrap_model(text_encoder),
919
- vae=vae,
920
- unet=unet,
921
- tokenizer=tokenizer,
922
- )
923
- pipeline.save_pretrained(args.output_dir)
924
- # Save the newly trained embeddings
925
- save_path = os.path.join(args.output_dir, "learned_embeds.bin")
926
- save_progress(text_encoder, placeholder_token_ids, accelerator, args, save_path)
927
-
928
- if args.push_to_hub:
929
- save_model_card(
930
- repo_id,
931
- images=images,
932
- base_model=args.pretrained_model_name_or_path,
933
- repo_folder=args.output_dir,
934
- )
935
- upload_folder(
936
- repo_id=repo_id,
937
- folder_path=args.output_dir,
938
- commit_message="End of training",
939
- ignore_patterns=["step_*", "epoch_*"],
940
- )
941
-
942
- accelerator.end_training()
943
-
944
-
945
- if __name__ == "__main__":
946
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/setup.py DELETED
@@ -1,286 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- """
16
- Simple check list from AllenNLP repo: https://github.com/allenai/allennlp/blob/main/setup.py
17
-
18
- To create the package for pypi.
19
-
20
- 1. Run `make pre-release` (or `make pre-patch` for a patch release) then run `make fix-copies` to fix the index of the
21
- documentation.
22
-
23
- If releasing on a special branch, copy the updated README.md on the main branch for your the commit you will make
24
- for the post-release and run `make fix-copies` on the main branch as well.
25
-
26
- 2. Run Tests for Amazon Sagemaker. The documentation is located in `./tests/sagemaker/README.md`, otherwise @philschmid.
27
-
28
- 3. Unpin specific versions from setup.py that use a git install.
29
-
30
- 4. Checkout the release branch (v<RELEASE>-release, for example v4.19-release), and commit these changes with the
31
- message: "Release: <RELEASE>" and push.
32
-
33
- 5. Wait for the tests on main to be completed and be green (otherwise revert and fix bugs)
34
-
35
- 6. Add a tag in git to mark the release: "git tag v<RELEASE> -m 'Adds tag v<RELEASE> for pypi' "
36
- Push the tag to git: git push --tags origin v<RELEASE>-release
37
-
38
- 7. Build both the sources and the wheel. Do not change anything in setup.py between
39
- creating the wheel and the source distribution (obviously).
40
-
41
- For the wheel, run: "python setup.py bdist_wheel" in the top level directory.
42
- (this will build a wheel for the python version you use to build it).
43
-
44
- For the sources, run: "python setup.py sdist"
45
- You should now have a /dist directory with both .whl and .tar.gz source versions.
46
-
47
- 8. Check that everything looks correct by uploading the package to the pypi test server:
48
-
49
- twine upload dist/* -r pypitest
50
- (pypi suggest using twine as other methods upload files via plaintext.)
51
- You may have to specify the repository url, use the following command then:
52
- twine upload dist/* -r pypitest --repository-url=https://test.pypi.org/legacy/
53
-
54
- Check that you can install it in a virtualenv by running:
55
- pip install -i https://testpypi.python.org/pypi diffusers
56
-
57
- Check you can run the following commands:
58
- python -c "from diffusers import pipeline; classifier = pipeline('text-classification'); print(classifier('What a nice release'))"
59
- python -c "from diffusers import *"
60
-
61
- 9. Upload the final version to actual pypi:
62
- twine upload dist/* -r pypi
63
-
64
- 10. Copy the release notes from RELEASE.md to the tag in github once everything is looking hunky-dory.
65
-
66
- 11. Run `make post-release` (or, for a patch release, `make post-patch`). If you were on a branch for the release,
67
- you need to go back to main before executing this.
68
- """
69
-
70
- import os
71
- import re
72
- from distutils.core import Command
73
-
74
- from setuptools import find_packages, setup
75
-
76
-
77
- # IMPORTANT:
78
- # 1. all dependencies should be listed here with their version requirements if any
79
- # 2. once modified, run: `make deps_table_update` to update src/diffusers/dependency_versions_table.py
80
- _deps = [
81
- "Pillow", # keep the PIL.Image.Resampling deprecation away
82
- "accelerate>=0.11.0",
83
- "compel==0.1.8",
84
- "black~=23.1",
85
- "datasets",
86
- "filelock",
87
- "flax>=0.4.1",
88
- "hf-doc-builder>=0.3.0",
89
- "huggingface-hub>=0.13.2",
90
- "requests-mock==1.10.0",
91
- "importlib_metadata",
92
- "invisible-watermark>=0.2.0",
93
- "isort>=5.5.4",
94
- "jax>=0.2.8,!=0.3.2",
95
- "jaxlib>=0.1.65",
96
- "Jinja2",
97
- "k-diffusion>=0.0.12",
98
- "torchsde",
99
- "note_seq",
100
- "librosa",
101
- "numpy",
102
- "omegaconf",
103
- "parameterized",
104
- "protobuf>=3.20.3,<4",
105
- "pytest",
106
- "pytest-timeout",
107
- "pytest-xdist",
108
- "ruff>=0.0.241",
109
- "safetensors>=0.3.1",
110
- "sentencepiece>=0.1.91,!=0.1.92",
111
- "scipy",
112
- "onnx",
113
- "regex!=2019.12.17",
114
- "requests",
115
- "tensorboard",
116
- "torch>=1.4",
117
- "torchvision",
118
- "transformers>=4.25.1",
119
- "urllib3<=2.0.0",
120
- ]
121
-
122
- # this is a lookup table with items like:
123
- #
124
- # tokenizers: "huggingface-hub==0.8.0"
125
- # packaging: "packaging"
126
- #
127
- # some of the values are versioned whereas others aren't.
128
- deps = {b: a for a, b in (re.findall(r"^(([^!=<>~]+)(?:[!=<>~].*)?$)", x)[0] for x in _deps)}
129
-
130
- # since we save this data in src/diffusers/dependency_versions_table.py it can be easily accessed from
131
- # anywhere. If you need to quickly access the data from this table in a shell, you can do so easily with:
132
- #
133
- # python -c 'import sys; from diffusers.dependency_versions_table import deps; \
134
- # print(" ".join([ deps[x] for x in sys.argv[1:]]))' tokenizers datasets
135
- #
136
- # Just pass the desired package names to that script as it's shown with 2 packages above.
137
- #
138
- # If diffusers is not yet installed and the work is done from the cloned repo remember to add `PYTHONPATH=src` to the script above
139
- #
140
- # You can then feed this for example to `pip`:
141
- #
142
- # pip install -U $(python -c 'import sys; from diffusers.dependency_versions_table import deps; \
143
- # print(" ".join([ deps[x] for x in sys.argv[1:]]))' tokenizers datasets)
144
- #
145
-
146
-
147
- def deps_list(*pkgs):
148
- return [deps[pkg] for pkg in pkgs]
149
-
150
-
151
- class DepsTableUpdateCommand(Command):
152
- """
153
- A custom distutils command that updates the dependency table.
154
- usage: python setup.py deps_table_update
155
- """
156
-
157
- description = "build runtime dependency table"
158
- user_options = [
159
- # format: (long option, short option, description).
160
- ("dep-table-update", None, "updates src/diffusers/dependency_versions_table.py"),
161
- ]
162
-
163
- def initialize_options(self):
164
- pass
165
-
166
- def finalize_options(self):
167
- pass
168
-
169
- def run(self):
170
- entries = "\n".join([f' "{k}": "{v}",' for k, v in deps.items()])
171
- content = [
172
- "# THIS FILE HAS BEEN AUTOGENERATED. To update:",
173
- "# 1. modify the `_deps` dict in setup.py",
174
- "# 2. run `make deps_table_update``",
175
- "deps = {",
176
- entries,
177
- "}",
178
- "",
179
- ]
180
- target = "src/diffusers/dependency_versions_table.py"
181
- print(f"updating {target}")
182
- with open(target, "w", encoding="utf-8", newline="\n") as f:
183
- f.write("\n".join(content))
184
-
185
-
186
- extras = {}
187
-
188
-
189
- extras = {}
190
- extras["quality"] = deps_list("urllib3", "black", "isort", "ruff", "hf-doc-builder")
191
- extras["docs"] = deps_list("hf-doc-builder")
192
- extras["training"] = deps_list("accelerate", "datasets", "protobuf", "tensorboard", "Jinja2")
193
- extras["test"] = deps_list(
194
- "compel",
195
- "datasets",
196
- "Jinja2",
197
- "invisible-watermark",
198
- "k-diffusion",
199
- "librosa",
200
- "omegaconf",
201
- "parameterized",
202
- "pytest",
203
- "pytest-timeout",
204
- "pytest-xdist",
205
- "requests-mock",
206
- "safetensors",
207
- "sentencepiece",
208
- "scipy",
209
- "torchvision",
210
- "transformers",
211
- )
212
- extras["torch"] = deps_list("torch", "accelerate")
213
-
214
- if os.name == "nt": # windows
215
- extras["flax"] = [] # jax is not supported on windows
216
- else:
217
- extras["flax"] = deps_list("jax", "jaxlib", "flax")
218
-
219
- extras["dev"] = (
220
- extras["quality"] + extras["test"] + extras["training"] + extras["docs"] + extras["torch"] + extras["flax"]
221
- )
222
-
223
- install_requires = [
224
- deps["importlib_metadata"],
225
- deps["filelock"],
226
- deps["huggingface-hub"],
227
- deps["numpy"],
228
- deps["regex"],
229
- deps["requests"],
230
- deps["safetensors"],
231
- deps["Pillow"],
232
- ]
233
-
234
- setup(
235
- name="diffusers",
236
- version="0.19.3", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots)
237
- description="Diffusers",
238
- long_description=open("README.md", "r", encoding="utf-8").read(),
239
- long_description_content_type="text/markdown",
240
- keywords="deep learning",
241
- license="Apache",
242
- author="The HuggingFace team",
243
- author_email="[email protected]",
244
- url="https://github.com/huggingface/diffusers",
245
- package_dir={"": "src"},
246
- packages=find_packages("src"),
247
- include_package_data=True,
248
- python_requires=">=3.7.0",
249
- install_requires=list(install_requires),
250
- extras_require=extras,
251
- entry_points={"console_scripts": ["diffusers-cli=diffusers.commands.diffusers_cli:main"]},
252
- classifiers=[
253
- "Development Status :: 5 - Production/Stable",
254
- "Intended Audience :: Developers",
255
- "Intended Audience :: Education",
256
- "Intended Audience :: Science/Research",
257
- "License :: OSI Approved :: Apache Software License",
258
- "Operating System :: OS Independent",
259
- "Programming Language :: Python :: 3",
260
- "Programming Language :: Python :: 3.7",
261
- "Programming Language :: Python :: 3.8",
262
- "Programming Language :: Python :: 3.9",
263
- "Topic :: Scientific/Engineering :: Artificial Intelligence",
264
- ],
265
- cmdclass={"deps_table_update": DepsTableUpdateCommand},
266
- )
267
-
268
- # Release checklist
269
- # 1. Change the version in __init__.py and setup.py.
270
- # 2. Commit these changes with the message: "Release: Release"
271
- # 3. Add a tag in git to mark the release: "git tag RELEASE -m 'Adds tag RELEASE for pypi' "
272
- # Push the tag to git: git push --tags origin main
273
- # 4. Run the following commands in the top-level directory:
274
- # python setup.py bdist_wheel
275
- # python setup.py sdist
276
- # 5. Upload the package to the pypi test server first:
277
- # twine upload dist/* -r pypitest
278
- # twine upload dist/* -r pypitest --repository-url=https://test.pypi.org/legacy/
279
- # 6. Check that you can install it in a virtualenv by running:
280
- # pip install -i https://testpypi.python.org/pypi diffusers
281
- # diffusers env
282
- # diffusers test
283
- # 7. Upload the final version to actual pypi:
284
- # twine upload dist/* -r pypi
285
- # 8. Add release notes to the tag in github once everything is looking hunky-dory.
286
- # 9. Update the version in __init__.py, setup.py to the new version "-dev" and push to master
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_sag.py DELETED
@@ -1,754 +0,0 @@
1
- # Copyright 2023 Susung Hong and The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import inspect
16
- import warnings
17
- from typing import Any, Callable, Dict, List, Optional, Union
18
-
19
- import torch
20
- import torch.nn.functional as F
21
- from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
22
-
23
- from ...image_processor import VaeImageProcessor
24
- from ...loaders import LoraLoaderMixin, TextualInversionLoaderMixin
25
- from ...models import AutoencoderKL, UNet2DConditionModel
26
- from ...schedulers import KarrasDiffusionSchedulers
27
- from ...utils import logging, randn_tensor, replace_example_docstring
28
- from ..pipeline_utils import DiffusionPipeline
29
- from . import StableDiffusionPipelineOutput
30
- from .safety_checker import StableDiffusionSafetyChecker
31
-
32
-
33
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
34
-
35
- EXAMPLE_DOC_STRING = """
36
- Examples:
37
- ```py
38
- >>> import torch
39
- >>> from diffusers import StableDiffusionSAGPipeline
40
-
41
- >>> pipe = StableDiffusionSAGPipeline.from_pretrained(
42
- ... "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16
43
- ... )
44
- >>> pipe = pipe.to("cuda")
45
-
46
- >>> prompt = "a photo of an astronaut riding a horse on mars"
47
- >>> image = pipe(prompt, sag_scale=0.75).images[0]
48
- ```
49
- """
50
-
51
-
52
- # processes and stores attention probabilities
53
- class CrossAttnStoreProcessor:
54
- def __init__(self):
55
- self.attention_probs = None
56
-
57
- def __call__(
58
- self,
59
- attn,
60
- hidden_states,
61
- encoder_hidden_states=None,
62
- attention_mask=None,
63
- ):
64
- batch_size, sequence_length, _ = hidden_states.shape
65
- attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
66
- query = attn.to_q(hidden_states)
67
-
68
- if encoder_hidden_states is None:
69
- encoder_hidden_states = hidden_states
70
- elif attn.norm_cross:
71
- encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
72
-
73
- key = attn.to_k(encoder_hidden_states)
74
- value = attn.to_v(encoder_hidden_states)
75
-
76
- query = attn.head_to_batch_dim(query)
77
- key = attn.head_to_batch_dim(key)
78
- value = attn.head_to_batch_dim(value)
79
-
80
- self.attention_probs = attn.get_attention_scores(query, key, attention_mask)
81
- hidden_states = torch.bmm(self.attention_probs, value)
82
- hidden_states = attn.batch_to_head_dim(hidden_states)
83
-
84
- # linear proj
85
- hidden_states = attn.to_out[0](hidden_states)
86
- # dropout
87
- hidden_states = attn.to_out[1](hidden_states)
88
-
89
- return hidden_states
90
-
91
-
92
- # Modified to get self-attention guidance scale in this paper (https://arxiv.org/pdf/2210.00939.pdf) as an input
93
- class StableDiffusionSAGPipeline(DiffusionPipeline, TextualInversionLoaderMixin):
94
- r"""
95
- Pipeline for text-to-image generation using Stable Diffusion.
96
-
97
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
98
- implemented for all pipelines (downloading, saving, running on a particular device, etc.).
99
-
100
- Args:
101
- vae ([`AutoencoderKL`]):
102
- Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
103
- text_encoder ([`~transformers.CLIPTextModel`]):
104
- Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
105
- tokenizer ([`~transformers.CLIPTokenizer`]):
106
- A `CLIPTokenizer` to tokenize text.
107
- unet ([`UNet2DConditionModel`]):
108
- A `UNet2DConditionModel` to denoise the encoded image latents.
109
- scheduler ([`SchedulerMixin`]):
110
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
111
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
112
- safety_checker ([`StableDiffusionSafetyChecker`]):
113
- Classification module that estimates whether generated images could be considered offensive or harmful.
114
- Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
115
- about a model's potential harms.
116
- feature_extractor ([`~transformers.CLIPImageProcessor`]):
117
- A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
118
- """
119
- _optional_components = ["safety_checker", "feature_extractor"]
120
-
121
- def __init__(
122
- self,
123
- vae: AutoencoderKL,
124
- text_encoder: CLIPTextModel,
125
- tokenizer: CLIPTokenizer,
126
- unet: UNet2DConditionModel,
127
- scheduler: KarrasDiffusionSchedulers,
128
- safety_checker: StableDiffusionSafetyChecker,
129
- feature_extractor: CLIPImageProcessor,
130
- requires_safety_checker: bool = True,
131
- ):
132
- super().__init__()
133
-
134
- self.register_modules(
135
- vae=vae,
136
- text_encoder=text_encoder,
137
- tokenizer=tokenizer,
138
- unet=unet,
139
- scheduler=scheduler,
140
- safety_checker=safety_checker,
141
- feature_extractor=feature_extractor,
142
- )
143
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
144
- self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
145
- self.register_to_config(requires_safety_checker=requires_safety_checker)
146
-
147
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
148
- def enable_vae_slicing(self):
149
- r"""
150
- Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
151
- compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
152
- """
153
- self.vae.enable_slicing()
154
-
155
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
156
- def disable_vae_slicing(self):
157
- r"""
158
- Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
159
- computing decoding in one step.
160
- """
161
- self.vae.disable_slicing()
162
-
163
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
164
- def _encode_prompt(
165
- self,
166
- prompt,
167
- device,
168
- num_images_per_prompt,
169
- do_classifier_free_guidance,
170
- negative_prompt=None,
171
- prompt_embeds: Optional[torch.FloatTensor] = None,
172
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
173
- lora_scale: Optional[float] = None,
174
- ):
175
- r"""
176
- Encodes the prompt into text encoder hidden states.
177
-
178
- Args:
179
- prompt (`str` or `List[str]`, *optional*):
180
- prompt to be encoded
181
- device: (`torch.device`):
182
- torch device
183
- num_images_per_prompt (`int`):
184
- number of images that should be generated per prompt
185
- do_classifier_free_guidance (`bool`):
186
- whether to use classifier free guidance or not
187
- negative_prompt (`str` or `List[str]`, *optional*):
188
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
189
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
190
- less than `1`).
191
- prompt_embeds (`torch.FloatTensor`, *optional*):
192
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
193
- provided, text embeddings will be generated from `prompt` input argument.
194
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
195
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
196
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
197
- argument.
198
- lora_scale (`float`, *optional*):
199
- A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
200
- """
201
- # set lora scale so that monkey patched LoRA
202
- # function of text encoder can correctly access it
203
- if lora_scale is not None and isinstance(self, LoraLoaderMixin):
204
- self._lora_scale = lora_scale
205
-
206
- if prompt is not None and isinstance(prompt, str):
207
- batch_size = 1
208
- elif prompt is not None and isinstance(prompt, list):
209
- batch_size = len(prompt)
210
- else:
211
- batch_size = prompt_embeds.shape[0]
212
-
213
- if prompt_embeds is None:
214
- # textual inversion: procecss multi-vector tokens if necessary
215
- if isinstance(self, TextualInversionLoaderMixin):
216
- prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
217
-
218
- text_inputs = self.tokenizer(
219
- prompt,
220
- padding="max_length",
221
- max_length=self.tokenizer.model_max_length,
222
- truncation=True,
223
- return_tensors="pt",
224
- )
225
- text_input_ids = text_inputs.input_ids
226
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
227
-
228
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
229
- text_input_ids, untruncated_ids
230
- ):
231
- removed_text = self.tokenizer.batch_decode(
232
- untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
233
- )
234
- logger.warning(
235
- "The following part of your input was truncated because CLIP can only handle sequences up to"
236
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
237
- )
238
-
239
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
240
- attention_mask = text_inputs.attention_mask.to(device)
241
- else:
242
- attention_mask = None
243
-
244
- prompt_embeds = self.text_encoder(
245
- text_input_ids.to(device),
246
- attention_mask=attention_mask,
247
- )
248
- prompt_embeds = prompt_embeds[0]
249
-
250
- prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
251
-
252
- bs_embed, seq_len, _ = prompt_embeds.shape
253
- # duplicate text embeddings for each generation per prompt, using mps friendly method
254
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
255
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
256
-
257
- # get unconditional embeddings for classifier free guidance
258
- if do_classifier_free_guidance and negative_prompt_embeds is None:
259
- uncond_tokens: List[str]
260
- if negative_prompt is None:
261
- uncond_tokens = [""] * batch_size
262
- elif prompt is not None and type(prompt) is not type(negative_prompt):
263
- raise TypeError(
264
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
265
- f" {type(prompt)}."
266
- )
267
- elif isinstance(negative_prompt, str):
268
- uncond_tokens = [negative_prompt]
269
- elif batch_size != len(negative_prompt):
270
- raise ValueError(
271
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
272
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
273
- " the batch size of `prompt`."
274
- )
275
- else:
276
- uncond_tokens = negative_prompt
277
-
278
- # textual inversion: procecss multi-vector tokens if necessary
279
- if isinstance(self, TextualInversionLoaderMixin):
280
- uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
281
-
282
- max_length = prompt_embeds.shape[1]
283
- uncond_input = self.tokenizer(
284
- uncond_tokens,
285
- padding="max_length",
286
- max_length=max_length,
287
- truncation=True,
288
- return_tensors="pt",
289
- )
290
-
291
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
292
- attention_mask = uncond_input.attention_mask.to(device)
293
- else:
294
- attention_mask = None
295
-
296
- negative_prompt_embeds = self.text_encoder(
297
- uncond_input.input_ids.to(device),
298
- attention_mask=attention_mask,
299
- )
300
- negative_prompt_embeds = negative_prompt_embeds[0]
301
-
302
- if do_classifier_free_guidance:
303
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
304
- seq_len = negative_prompt_embeds.shape[1]
305
-
306
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
307
-
308
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
309
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
310
-
311
- # For classifier free guidance, we need to do two forward passes.
312
- # Here we concatenate the unconditional and text embeddings into a single batch
313
- # to avoid doing two forward passes
314
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
315
-
316
- return prompt_embeds
317
-
318
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
319
- def run_safety_checker(self, image, device, dtype):
320
- if self.safety_checker is None:
321
- has_nsfw_concept = None
322
- else:
323
- if torch.is_tensor(image):
324
- feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
325
- else:
326
- feature_extractor_input = self.image_processor.numpy_to_pil(image)
327
- safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
328
- image, has_nsfw_concept = self.safety_checker(
329
- images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
330
- )
331
- return image, has_nsfw_concept
332
-
333
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
334
- def decode_latents(self, latents):
335
- warnings.warn(
336
- "The decode_latents method is deprecated and will be removed in a future version. Please"
337
- " use VaeImageProcessor instead",
338
- FutureWarning,
339
- )
340
- latents = 1 / self.vae.config.scaling_factor * latents
341
- image = self.vae.decode(latents, return_dict=False)[0]
342
- image = (image / 2 + 0.5).clamp(0, 1)
343
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
344
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
345
- return image
346
-
347
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
348
- def prepare_extra_step_kwargs(self, generator, eta):
349
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
350
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
351
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
352
- # and should be between [0, 1]
353
-
354
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
355
- extra_step_kwargs = {}
356
- if accepts_eta:
357
- extra_step_kwargs["eta"] = eta
358
-
359
- # check if the scheduler accepts generator
360
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
361
- if accepts_generator:
362
- extra_step_kwargs["generator"] = generator
363
- return extra_step_kwargs
364
-
365
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs
366
- def check_inputs(
367
- self,
368
- prompt,
369
- height,
370
- width,
371
- callback_steps,
372
- negative_prompt=None,
373
- prompt_embeds=None,
374
- negative_prompt_embeds=None,
375
- ):
376
- if height % 8 != 0 or width % 8 != 0:
377
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
378
-
379
- if (callback_steps is None) or (
380
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
381
- ):
382
- raise ValueError(
383
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
384
- f" {type(callback_steps)}."
385
- )
386
-
387
- if prompt is not None and prompt_embeds is not None:
388
- raise ValueError(
389
- f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
390
- " only forward one of the two."
391
- )
392
- elif prompt is None and prompt_embeds is None:
393
- raise ValueError(
394
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
395
- )
396
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
397
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
398
-
399
- if negative_prompt is not None and negative_prompt_embeds is not None:
400
- raise ValueError(
401
- f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
402
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
403
- )
404
-
405
- if prompt_embeds is not None and negative_prompt_embeds is not None:
406
- if prompt_embeds.shape != negative_prompt_embeds.shape:
407
- raise ValueError(
408
- "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
409
- f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
410
- f" {negative_prompt_embeds.shape}."
411
- )
412
-
413
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
414
- def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
415
- shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
416
- if isinstance(generator, list) and len(generator) != batch_size:
417
- raise ValueError(
418
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
419
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
420
- )
421
-
422
- if latents is None:
423
- latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
424
- else:
425
- latents = latents.to(device)
426
-
427
- # scale the initial noise by the standard deviation required by the scheduler
428
- latents = latents * self.scheduler.init_noise_sigma
429
- return latents
430
-
431
- @torch.no_grad()
432
- @replace_example_docstring(EXAMPLE_DOC_STRING)
433
- def __call__(
434
- self,
435
- prompt: Union[str, List[str]] = None,
436
- height: Optional[int] = None,
437
- width: Optional[int] = None,
438
- num_inference_steps: int = 50,
439
- guidance_scale: float = 7.5,
440
- sag_scale: float = 0.75,
441
- negative_prompt: Optional[Union[str, List[str]]] = None,
442
- num_images_per_prompt: Optional[int] = 1,
443
- eta: float = 0.0,
444
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
445
- latents: Optional[torch.FloatTensor] = None,
446
- prompt_embeds: Optional[torch.FloatTensor] = None,
447
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
448
- output_type: Optional[str] = "pil",
449
- return_dict: bool = True,
450
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
451
- callback_steps: Optional[int] = 1,
452
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
453
- ):
454
- r"""
455
- The call function to the pipeline for generation.
456
-
457
- Args:
458
- prompt (`str` or `List[str]`, *optional*):
459
- The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
460
- height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
461
- The height in pixels of the generated image.
462
- width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
463
- The width in pixels of the generated image.
464
- num_inference_steps (`int`, *optional*, defaults to 50):
465
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
466
- expense of slower inference.
467
- guidance_scale (`float`, *optional*, defaults to 7.5):
468
- A higher guidance scale value encourages the model to generate images closely linked to the text
469
- `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
470
- sag_scale (`float`, *optional*, defaults to 0.75):
471
- Chosen between [0, 1.0] for better quality.
472
- negative_prompt (`str` or `List[str]`, *optional*):
473
- The prompt or prompts to guide what to not include in image generation. If not defined, you need to
474
- pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
475
- num_images_per_prompt (`int`, *optional*, defaults to 1):
476
- The number of images to generate per prompt.
477
- eta (`float`, *optional*, defaults to 0.0):
478
- Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
479
- to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
480
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
481
- A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
482
- generation deterministic.
483
- latents (`torch.FloatTensor`, *optional*):
484
- Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
485
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
486
- tensor is generated by sampling using the supplied random `generator`.
487
- prompt_embeds (`torch.FloatTensor`, *optional*):
488
- Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
489
- provided, text embeddings are generated from the `prompt` input argument.
490
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
491
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
492
- not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
493
- output_type (`str`, *optional*, defaults to `"pil"`):
494
- The output format of the generated image. Choose between `PIL.Image` or `np.array`.
495
- return_dict (`bool`, *optional*, defaults to `True`):
496
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
497
- plain tuple.
498
- callback (`Callable`, *optional*):
499
- A function that calls every `callback_steps` steps during inference. The function is called with the
500
- following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
501
- callback_steps (`int`, *optional*, defaults to 1):
502
- The frequency at which the `callback` function is called. If not specified, the callback is called at
503
- every step.
504
- cross_attention_kwargs (`dict`, *optional*):
505
- A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
506
- [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
507
-
508
- Examples:
509
-
510
- Returns:
511
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
512
- If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
513
- otherwise a `tuple` is returned where the first element is a list with the generated images and the
514
- second element is a list of `bool`s indicating whether the corresponding generated image contains
515
- "not-safe-for-work" (nsfw) content.
516
- """
517
- # 0. Default height and width to unet
518
- height = height or self.unet.config.sample_size * self.vae_scale_factor
519
- width = width or self.unet.config.sample_size * self.vae_scale_factor
520
-
521
- # 1. Check inputs. Raise error if not correct
522
- self.check_inputs(
523
- prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds
524
- )
525
-
526
- # 2. Define call parameters
527
- if prompt is not None and isinstance(prompt, str):
528
- batch_size = 1
529
- elif prompt is not None and isinstance(prompt, list):
530
- batch_size = len(prompt)
531
- else:
532
- batch_size = prompt_embeds.shape[0]
533
-
534
- device = self._execution_device
535
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
536
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
537
- # corresponds to doing no classifier free guidance.
538
- do_classifier_free_guidance = guidance_scale > 1.0
539
- # and `sag_scale` is` `s` of equation (16)
540
- # of the self-attentnion guidance paper: https://arxiv.org/pdf/2210.00939.pdf
541
- # `sag_scale = 0` means no self-attention guidance
542
- do_self_attention_guidance = sag_scale > 0.0
543
-
544
- # 3. Encode input prompt
545
- prompt_embeds = self._encode_prompt(
546
- prompt,
547
- device,
548
- num_images_per_prompt,
549
- do_classifier_free_guidance,
550
- negative_prompt,
551
- prompt_embeds=prompt_embeds,
552
- negative_prompt_embeds=negative_prompt_embeds,
553
- )
554
-
555
- # 4. Prepare timesteps
556
- self.scheduler.set_timesteps(num_inference_steps, device=device)
557
- timesteps = self.scheduler.timesteps
558
-
559
- # 5. Prepare latent variables
560
- num_channels_latents = self.unet.config.in_channels
561
- latents = self.prepare_latents(
562
- batch_size * num_images_per_prompt,
563
- num_channels_latents,
564
- height,
565
- width,
566
- prompt_embeds.dtype,
567
- device,
568
- generator,
569
- latents,
570
- )
571
-
572
- # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
573
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
574
-
575
- # 7. Denoising loop
576
- store_processor = CrossAttnStoreProcessor()
577
- self.unet.mid_block.attentions[0].transformer_blocks[0].attn1.processor = store_processor
578
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
579
-
580
- map_size = None
581
-
582
- def get_map_size(module, input, output):
583
- nonlocal map_size
584
- map_size = output[0].shape[-2:]
585
-
586
- with self.unet.mid_block.attentions[0].register_forward_hook(get_map_size):
587
- with self.progress_bar(total=num_inference_steps) as progress_bar:
588
- for i, t in enumerate(timesteps):
589
- # expand the latents if we are doing classifier free guidance
590
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
591
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
592
-
593
- # predict the noise residual
594
-
595
- noise_pred = self.unet(
596
- latent_model_input,
597
- t,
598
- encoder_hidden_states=prompt_embeds,
599
- cross_attention_kwargs=cross_attention_kwargs,
600
- ).sample
601
-
602
- # perform guidance
603
- if do_classifier_free_guidance:
604
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
605
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
606
-
607
- # perform self-attention guidance with the stored self-attentnion map
608
- if do_self_attention_guidance:
609
- # classifier-free guidance produces two chunks of attention map
610
- # and we only use unconditional one according to equation (25)
611
- # in https://arxiv.org/pdf/2210.00939.pdf
612
- if do_classifier_free_guidance:
613
- # DDIM-like prediction of x0
614
- pred_x0 = self.pred_x0(latents, noise_pred_uncond, t)
615
- # get the stored attention maps
616
- uncond_attn, cond_attn = store_processor.attention_probs.chunk(2)
617
- # self-attention-based degrading of latents
618
- degraded_latents = self.sag_masking(
619
- pred_x0, uncond_attn, map_size, t, self.pred_epsilon(latents, noise_pred_uncond, t)
620
- )
621
- uncond_emb, _ = prompt_embeds.chunk(2)
622
- # forward and give guidance
623
- degraded_pred = self.unet(degraded_latents, t, encoder_hidden_states=uncond_emb).sample
624
- noise_pred += sag_scale * (noise_pred_uncond - degraded_pred)
625
- else:
626
- # DDIM-like prediction of x0
627
- pred_x0 = self.pred_x0(latents, noise_pred, t)
628
- # get the stored attention maps
629
- cond_attn = store_processor.attention_probs
630
- # self-attention-based degrading of latents
631
- degraded_latents = self.sag_masking(
632
- pred_x0, cond_attn, map_size, t, self.pred_epsilon(latents, noise_pred, t)
633
- )
634
- # forward and give guidance
635
- degraded_pred = self.unet(degraded_latents, t, encoder_hidden_states=prompt_embeds).sample
636
- noise_pred += sag_scale * (noise_pred - degraded_pred)
637
-
638
- # compute the previous noisy sample x_t -> x_t-1
639
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
640
-
641
- # call the callback, if provided
642
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
643
- progress_bar.update()
644
- if callback is not None and i % callback_steps == 0:
645
- callback(i, t, latents)
646
-
647
- if not output_type == "latent":
648
- image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
649
- image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
650
- else:
651
- image = latents
652
- has_nsfw_concept = None
653
-
654
- if has_nsfw_concept is None:
655
- do_denormalize = [True] * image.shape[0]
656
- else:
657
- do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
658
-
659
- image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
660
-
661
- if not return_dict:
662
- return (image, has_nsfw_concept)
663
-
664
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
665
-
666
- def sag_masking(self, original_latents, attn_map, map_size, t, eps):
667
- # Same masking process as in SAG paper: https://arxiv.org/pdf/2210.00939.pdf
668
- bh, hw1, hw2 = attn_map.shape
669
- b, latent_channel, latent_h, latent_w = original_latents.shape
670
- h = self.unet.config.attention_head_dim
671
- if isinstance(h, list):
672
- h = h[-1]
673
-
674
- # Produce attention mask
675
- attn_map = attn_map.reshape(b, h, hw1, hw2)
676
- attn_mask = attn_map.mean(1, keepdim=False).sum(1, keepdim=False) > 1.0
677
- attn_mask = (
678
- attn_mask.reshape(b, map_size[0], map_size[1])
679
- .unsqueeze(1)
680
- .repeat(1, latent_channel, 1, 1)
681
- .type(attn_map.dtype)
682
- )
683
- attn_mask = F.interpolate(attn_mask, (latent_h, latent_w))
684
-
685
- # Blur according to the self-attention mask
686
- degraded_latents = gaussian_blur_2d(original_latents, kernel_size=9, sigma=1.0)
687
- degraded_latents = degraded_latents * attn_mask + original_latents * (1 - attn_mask)
688
-
689
- # Noise it again to match the noise level
690
- degraded_latents = self.scheduler.add_noise(degraded_latents, noise=eps, timesteps=t)
691
-
692
- return degraded_latents
693
-
694
- # Modified from diffusers.schedulers.scheduling_ddim.DDIMScheduler.step
695
- # Note: there are some schedulers that clip or do not return x_0 (PNDMScheduler, DDIMScheduler, etc.)
696
- def pred_x0(self, sample, model_output, timestep):
697
- alpha_prod_t = self.scheduler.alphas_cumprod[timestep]
698
-
699
- beta_prod_t = 1 - alpha_prod_t
700
- if self.scheduler.config.prediction_type == "epsilon":
701
- pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)
702
- elif self.scheduler.config.prediction_type == "sample":
703
- pred_original_sample = model_output
704
- elif self.scheduler.config.prediction_type == "v_prediction":
705
- pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
706
- # predict V
707
- model_output = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
708
- else:
709
- raise ValueError(
710
- f"prediction_type given as {self.scheduler.config.prediction_type} must be one of `epsilon`, `sample`,"
711
- " or `v_prediction`"
712
- )
713
-
714
- return pred_original_sample
715
-
716
- def pred_epsilon(self, sample, model_output, timestep):
717
- alpha_prod_t = self.scheduler.alphas_cumprod[timestep]
718
-
719
- beta_prod_t = 1 - alpha_prod_t
720
- if self.scheduler.config.prediction_type == "epsilon":
721
- pred_eps = model_output
722
- elif self.scheduler.config.prediction_type == "sample":
723
- pred_eps = (sample - (alpha_prod_t**0.5) * model_output) / (beta_prod_t**0.5)
724
- elif self.scheduler.config.prediction_type == "v_prediction":
725
- pred_eps = (beta_prod_t**0.5) * sample + (alpha_prod_t**0.5) * model_output
726
- else:
727
- raise ValueError(
728
- f"prediction_type given as {self.scheduler.config.prediction_type} must be one of `epsilon`, `sample`,"
729
- " or `v_prediction`"
730
- )
731
-
732
- return pred_eps
733
-
734
-
735
- # Gaussian blur
736
- def gaussian_blur_2d(img, kernel_size, sigma):
737
- ksize_half = (kernel_size - 1) * 0.5
738
-
739
- x = torch.linspace(-ksize_half, ksize_half, steps=kernel_size)
740
-
741
- pdf = torch.exp(-0.5 * (x / sigma).pow(2))
742
-
743
- x_kernel = pdf / pdf.sum()
744
- x_kernel = x_kernel.to(device=img.device, dtype=img.dtype)
745
-
746
- kernel2d = torch.mm(x_kernel[:, None], x_kernel[None, :])
747
- kernel2d = kernel2d.expand(img.shape[-3], 1, kernel2d.shape[0], kernel2d.shape[1])
748
-
749
- padding = [kernel_size // 2, kernel_size // 2, kernel_size // 2, kernel_size // 2]
750
-
751
- img = F.pad(img, padding, mode="reflect")
752
- img = F.conv2d(img, kernel2d, groups=img.shape[-3])
753
-
754
- return img
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py DELETED
@@ -1,1298 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import inspect
16
- from typing import Any, Callable, Dict, List, Optional, Tuple, Union
17
-
18
- import numpy as np
19
- import PIL
20
- import torch
21
- from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
22
-
23
- from ...image_processor import VaeImageProcessor
24
- from ...loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
25
- from ...models import AutoencoderKL, UNet2DConditionModel
26
- from ...models.attention_processor import (
27
- AttnProcessor2_0,
28
- LoRAAttnProcessor2_0,
29
- LoRAXFormersAttnProcessor,
30
- XFormersAttnProcessor,
31
- )
32
- from ...schedulers import KarrasDiffusionSchedulers
33
- from ...utils import (
34
- is_accelerate_available,
35
- is_accelerate_version,
36
- is_invisible_watermark_available,
37
- logging,
38
- randn_tensor,
39
- replace_example_docstring,
40
- )
41
- from ..pipeline_utils import DiffusionPipeline
42
- from . import StableDiffusionXLPipelineOutput
43
-
44
-
45
- if is_invisible_watermark_available():
46
- from .watermark import StableDiffusionXLWatermarker
47
-
48
-
49
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
50
-
51
-
52
- EXAMPLE_DOC_STRING = """
53
- Examples:
54
- ```py
55
- >>> import torch
56
- >>> from diffusers import StableDiffusionXLInpaintPipeline
57
- >>> from diffusers.utils import load_image
58
-
59
- >>> pipe = StableDiffusionXLInpaintPipeline.from_pretrained(
60
- ... "stabilityai/stable-diffusion-xl-base-1.0",
61
- ... torch_dtype=torch.float16,
62
- ... variant="fp16",
63
- ... use_safetensors=True,
64
- ... )
65
- >>> pipe.to("cuda")
66
-
67
- >>> img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
68
- >>> mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
69
-
70
- >>> init_image = load_image(img_url).convert("RGB")
71
- >>> mask_image = load_image(mask_url).convert("RGB")
72
-
73
- >>> prompt = "A majestic tiger sitting on a bench"
74
- >>> image = pipe(
75
- ... prompt=prompt, image=init_image, mask_image=mask_image, num_inference_steps=50, strength=0.80
76
- ... ).images[0]
77
- ```
78
- """
79
-
80
-
81
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
82
- def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
83
- """
84
- Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
85
- Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
86
- """
87
- std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
88
- std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
89
- # rescale the results from guidance (fixes overexposure)
90
- noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
91
- # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
92
- noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
93
- return noise_cfg
94
-
95
-
96
- def mask_pil_to_torch(mask, height, width):
97
- # preprocess mask
98
- if isinstance(mask, (PIL.Image.Image, np.ndarray)):
99
- mask = [mask]
100
-
101
- if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image):
102
- mask = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in mask]
103
- mask = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask], axis=0)
104
- mask = mask.astype(np.float32) / 255.0
105
- elif isinstance(mask, list) and isinstance(mask[0], np.ndarray):
106
- mask = np.concatenate([m[None, None, :] for m in mask], axis=0)
107
-
108
- mask = torch.from_numpy(mask)
109
- return mask
110
-
111
-
112
- def prepare_mask_and_masked_image(image, mask, height, width, return_image: bool = False):
113
- """
114
- Prepares a pair (image, mask) to be consumed by the Stable Diffusion pipeline. This means that those inputs will be
115
- converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for the
116
- ``image`` and ``1`` for the ``mask``.
117
-
118
- The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be
119
- binarized (``mask > 0.5``) and cast to ``torch.float32`` too.
120
-
121
- Args:
122
- image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint.
123
- It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width``
124
- ``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``.
125
- mask (_type_): The mask to apply to the image, i.e. regions to inpaint.
126
- It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width``
127
- ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``.
128
-
129
-
130
- Raises:
131
- ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask
132
- should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions.
133
- TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not
134
- (ot the other way around).
135
-
136
- Returns:
137
- tuple[torch.Tensor]: The pair (mask, masked_image) as ``torch.Tensor`` with 4
138
- dimensions: ``batch x channels x height x width``.
139
- """
140
-
141
- # checkpoint. TOD(Yiyi) - need to clean this up later
142
- if image is None:
143
- raise ValueError("`image` input cannot be undefined.")
144
-
145
- if mask is None:
146
- raise ValueError("`mask_image` input cannot be undefined.")
147
-
148
- if isinstance(image, torch.Tensor):
149
- if not isinstance(mask, torch.Tensor):
150
- mask = mask_pil_to_torch(mask, height, width)
151
-
152
- if image.ndim == 3:
153
- image = image.unsqueeze(0)
154
-
155
- # Batch and add channel dim for single mask
156
- if mask.ndim == 2:
157
- mask = mask.unsqueeze(0).unsqueeze(0)
158
-
159
- # Batch single mask or add channel dim
160
- if mask.ndim == 3:
161
- # Single batched mask, no channel dim or single mask not batched but channel dim
162
- if mask.shape[0] == 1:
163
- mask = mask.unsqueeze(0)
164
-
165
- # Batched masks no channel dim
166
- else:
167
- mask = mask.unsqueeze(1)
168
-
169
- assert image.ndim == 4 and mask.ndim == 4, "Image and Mask must have 4 dimensions"
170
- # assert image.shape[-2:] == mask.shape[-2:], "Image and Mask must have the same spatial dimensions"
171
- assert image.shape[0] == mask.shape[0], "Image and Mask must have the same batch size"
172
-
173
- # Check image is in [-1, 1]
174
- # if image.min() < -1 or image.max() > 1:
175
- # raise ValueError("Image should be in [-1, 1] range")
176
-
177
- # Check mask is in [0, 1]
178
- if mask.min() < 0 or mask.max() > 1:
179
- raise ValueError("Mask should be in [0, 1] range")
180
-
181
- # Binarize mask
182
- mask[mask < 0.5] = 0
183
- mask[mask >= 0.5] = 1
184
-
185
- # Image as float32
186
- image = image.to(dtype=torch.float32)
187
- elif isinstance(mask, torch.Tensor):
188
- raise TypeError(f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not")
189
- else:
190
- # preprocess image
191
- if isinstance(image, (PIL.Image.Image, np.ndarray)):
192
- image = [image]
193
- if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
194
- # resize all images w.r.t passed height an width
195
- image = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in image]
196
- image = [np.array(i.convert("RGB"))[None, :] for i in image]
197
- image = np.concatenate(image, axis=0)
198
- elif isinstance(image, list) and isinstance(image[0], np.ndarray):
199
- image = np.concatenate([i[None, :] for i in image], axis=0)
200
-
201
- image = image.transpose(0, 3, 1, 2)
202
- image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
203
-
204
- mask = mask_pil_to_torch(mask, height, width)
205
- mask[mask < 0.5] = 0
206
- mask[mask >= 0.5] = 1
207
-
208
- if image.shape[1] == 4:
209
- # images are in latent space and thus can't
210
- # be masked set masked_image to None
211
- # we assume that the checkpoint is not an inpainting
212
- # checkpoint. TOD(Yiyi) - need to clean this up later
213
- masked_image = None
214
- else:
215
- masked_image = image * (mask < 0.5)
216
-
217
- # n.b. ensure backwards compatibility as old function does not return image
218
- if return_image:
219
- return mask, masked_image, image
220
-
221
- return mask, masked_image
222
-
223
-
224
- class StableDiffusionXLInpaintPipeline(
225
- DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin
226
- ):
227
- r"""
228
- Pipeline for text-to-image generation using Stable Diffusion XL.
229
-
230
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
231
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
232
-
233
- In addition the pipeline inherits the following loading methods:
234
- - *Textual-Inversion*: [`loaders.TextualInversionLoaderMixin.load_textual_inversion`]
235
- - *LoRA*: [`loaders.LoraLoaderMixin.load_lora_weights`]
236
- - *Ckpt*: [`loaders.FromSingleFileMixin.from_single_file`]
237
-
238
- as well as the following saving methods:
239
- - *LoRA*: [`loaders.LoraLoaderMixin.save_lora_weights`]
240
-
241
- Args:
242
- vae ([`AutoencoderKL`]):
243
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
244
- text_encoder ([`CLIPTextModel`]):
245
- Frozen text-encoder. Stable Diffusion XL uses the text portion of
246
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
247
- the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
248
- text_encoder_2 ([` CLIPTextModelWithProjection`]):
249
- Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of
250
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection),
251
- specifically the
252
- [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)
253
- variant.
254
- tokenizer (`CLIPTokenizer`):
255
- Tokenizer of class
256
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
257
- tokenizer_2 (`CLIPTokenizer`):
258
- Second Tokenizer of class
259
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
260
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
261
- scheduler ([`SchedulerMixin`]):
262
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
263
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
264
- """
265
- _optional_components = ["tokenizer", "text_encoder"]
266
-
267
- def __init__(
268
- self,
269
- vae: AutoencoderKL,
270
- text_encoder: CLIPTextModel,
271
- text_encoder_2: CLIPTextModelWithProjection,
272
- tokenizer: CLIPTokenizer,
273
- tokenizer_2: CLIPTokenizer,
274
- unet: UNet2DConditionModel,
275
- scheduler: KarrasDiffusionSchedulers,
276
- requires_aesthetics_score: bool = False,
277
- force_zeros_for_empty_prompt: bool = True,
278
- add_watermarker: Optional[bool] = None,
279
- ):
280
- super().__init__()
281
-
282
- self.register_modules(
283
- vae=vae,
284
- text_encoder=text_encoder,
285
- text_encoder_2=text_encoder_2,
286
- tokenizer=tokenizer,
287
- tokenizer_2=tokenizer_2,
288
- unet=unet,
289
- scheduler=scheduler,
290
- )
291
- self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
292
- self.register_to_config(requires_aesthetics_score=requires_aesthetics_score)
293
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
294
- self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
295
-
296
- add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available()
297
-
298
- if add_watermarker:
299
- self.watermark = StableDiffusionXLWatermarker()
300
- else:
301
- self.watermark = None
302
-
303
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
304
- def enable_vae_slicing(self):
305
- r"""
306
- Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
307
- compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
308
- """
309
- self.vae.enable_slicing()
310
-
311
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
312
- def disable_vae_slicing(self):
313
- r"""
314
- Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
315
- computing decoding in one step.
316
- """
317
- self.vae.disable_slicing()
318
-
319
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling
320
- def enable_vae_tiling(self):
321
- r"""
322
- Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
323
- compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
324
- processing larger images.
325
- """
326
- self.vae.enable_tiling()
327
-
328
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling
329
- def disable_vae_tiling(self):
330
- r"""
331
- Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
332
- computing decoding in one step.
333
- """
334
- self.vae.disable_tiling()
335
-
336
- # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline.enable_model_cpu_offload
337
- def enable_model_cpu_offload(self, gpu_id=0):
338
- r"""
339
- Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
340
- to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
341
- method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
342
- `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
343
- """
344
- if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
345
- from accelerate import cpu_offload_with_hook
346
- else:
347
- raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
348
-
349
- device = torch.device(f"cuda:{gpu_id}")
350
-
351
- if self.device.type != "cpu":
352
- self.to("cpu", silence_dtype_warnings=True)
353
- torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
354
-
355
- model_sequence = (
356
- [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
357
- )
358
- model_sequence.extend([self.unet, self.vae])
359
-
360
- hook = None
361
- for cpu_offloaded_model in model_sequence:
362
- _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
363
-
364
- # We'll offload the last model manually.
365
- self.final_offload_hook = hook
366
-
367
- # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt
368
- def encode_prompt(
369
- self,
370
- prompt: str,
371
- prompt_2: Optional[str] = None,
372
- device: Optional[torch.device] = None,
373
- num_images_per_prompt: int = 1,
374
- do_classifier_free_guidance: bool = True,
375
- negative_prompt: Optional[str] = None,
376
- negative_prompt_2: Optional[str] = None,
377
- prompt_embeds: Optional[torch.FloatTensor] = None,
378
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
379
- pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
380
- negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
381
- lora_scale: Optional[float] = None,
382
- ):
383
- r"""
384
- Encodes the prompt into text encoder hidden states.
385
-
386
- Args:
387
- prompt (`str` or `List[str]`, *optional*):
388
- prompt to be encoded
389
- prompt_2 (`str` or `List[str]`, *optional*):
390
- The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
391
- used in both text-encoders
392
- device: (`torch.device`):
393
- torch device
394
- num_images_per_prompt (`int`):
395
- number of images that should be generated per prompt
396
- do_classifier_free_guidance (`bool`):
397
- whether to use classifier free guidance or not
398
- negative_prompt (`str` or `List[str]`, *optional*):
399
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
400
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
401
- less than `1`).
402
- negative_prompt_2 (`str` or `List[str]`, *optional*):
403
- The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
404
- `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
405
- prompt_embeds (`torch.FloatTensor`, *optional*):
406
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
407
- provided, text embeddings will be generated from `prompt` input argument.
408
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
409
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
410
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
411
- argument.
412
- pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
413
- Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
414
- If not provided, pooled text embeddings will be generated from `prompt` input argument.
415
- negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
416
- Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
417
- weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
418
- input argument.
419
- lora_scale (`float`, *optional*):
420
- A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
421
- """
422
- device = device or self._execution_device
423
-
424
- # set lora scale so that monkey patched LoRA
425
- # function of text encoder can correctly access it
426
- if lora_scale is not None and isinstance(self, LoraLoaderMixin):
427
- self._lora_scale = lora_scale
428
-
429
- if prompt is not None and isinstance(prompt, str):
430
- batch_size = 1
431
- elif prompt is not None and isinstance(prompt, list):
432
- batch_size = len(prompt)
433
- else:
434
- batch_size = prompt_embeds.shape[0]
435
-
436
- # Define tokenizers and text encoders
437
- tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
438
- text_encoders = (
439
- [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
440
- )
441
-
442
- if prompt_embeds is None:
443
- prompt_2 = prompt_2 or prompt
444
- # textual inversion: procecss multi-vector tokens if necessary
445
- prompt_embeds_list = []
446
- prompts = [prompt, prompt_2]
447
- for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
448
- if isinstance(self, TextualInversionLoaderMixin):
449
- prompt = self.maybe_convert_prompt(prompt, tokenizer)
450
-
451
- text_inputs = tokenizer(
452
- prompt,
453
- padding="max_length",
454
- max_length=tokenizer.model_max_length,
455
- truncation=True,
456
- return_tensors="pt",
457
- )
458
-
459
- text_input_ids = text_inputs.input_ids
460
- untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
461
- untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
462
-
463
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
464
- text_input_ids, untruncated_ids
465
- ):
466
- removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
467
- logger.warning(
468
- "The following part of your input was truncated because CLIP can only handle sequences up to"
469
- f" {tokenizer.model_max_length} tokens: {removed_text}"
470
- )
471
-
472
- prompt_embeds = text_encoder(
473
- text_input_ids.to(device),
474
- output_hidden_states=True,
475
- )
476
-
477
- # We are only ALWAYS interested in the pooled output of the final text encoder
478
- pooled_prompt_embeds = prompt_embeds[0]
479
- prompt_embeds = prompt_embeds.hidden_states[-2]
480
-
481
- prompt_embeds_list.append(prompt_embeds)
482
-
483
- prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
484
-
485
- # get unconditional embeddings for classifier free guidance
486
- zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
487
- if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
488
- negative_prompt_embeds = torch.zeros_like(prompt_embeds)
489
- negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
490
- elif do_classifier_free_guidance and negative_prompt_embeds is None:
491
- negative_prompt = negative_prompt or ""
492
- negative_prompt_2 = negative_prompt_2 or negative_prompt
493
-
494
- uncond_tokens: List[str]
495
- if prompt is not None and type(prompt) is not type(negative_prompt):
496
- raise TypeError(
497
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
498
- f" {type(prompt)}."
499
- )
500
- elif isinstance(negative_prompt, str):
501
- uncond_tokens = [negative_prompt, negative_prompt_2]
502
- elif batch_size != len(negative_prompt):
503
- raise ValueError(
504
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
505
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
506
- " the batch size of `prompt`."
507
- )
508
- else:
509
- uncond_tokens = [negative_prompt, negative_prompt_2]
510
-
511
- negative_prompt_embeds_list = []
512
- for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
513
- if isinstance(self, TextualInversionLoaderMixin):
514
- negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
515
-
516
- max_length = prompt_embeds.shape[1]
517
- uncond_input = tokenizer(
518
- negative_prompt,
519
- padding="max_length",
520
- max_length=max_length,
521
- truncation=True,
522
- return_tensors="pt",
523
- )
524
-
525
- negative_prompt_embeds = text_encoder(
526
- uncond_input.input_ids.to(device),
527
- output_hidden_states=True,
528
- )
529
- # We are only ALWAYS interested in the pooled output of the final text encoder
530
- negative_pooled_prompt_embeds = negative_prompt_embeds[0]
531
- negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
532
-
533
- negative_prompt_embeds_list.append(negative_prompt_embeds)
534
-
535
- negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
536
-
537
- prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
538
- bs_embed, seq_len, _ = prompt_embeds.shape
539
- # duplicate text embeddings for each generation per prompt, using mps friendly method
540
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
541
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
542
-
543
- if do_classifier_free_guidance:
544
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
545
- seq_len = negative_prompt_embeds.shape[1]
546
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
547
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
548
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
549
-
550
- pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
551
- bs_embed * num_images_per_prompt, -1
552
- )
553
- if do_classifier_free_guidance:
554
- negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
555
- bs_embed * num_images_per_prompt, -1
556
- )
557
-
558
- return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
559
-
560
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
561
- def prepare_extra_step_kwargs(self, generator, eta):
562
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
563
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
564
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
565
- # and should be between [0, 1]
566
-
567
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
568
- extra_step_kwargs = {}
569
- if accepts_eta:
570
- extra_step_kwargs["eta"] = eta
571
-
572
- # check if the scheduler accepts generator
573
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
574
- if accepts_generator:
575
- extra_step_kwargs["generator"] = generator
576
- return extra_step_kwargs
577
-
578
- def check_inputs(
579
- self,
580
- prompt,
581
- prompt_2,
582
- height,
583
- width,
584
- strength,
585
- callback_steps,
586
- negative_prompt=None,
587
- negative_prompt_2=None,
588
- prompt_embeds=None,
589
- negative_prompt_embeds=None,
590
- ):
591
- if strength < 0 or strength > 1:
592
- raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
593
-
594
- if height % 8 != 0 or width % 8 != 0:
595
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
596
-
597
- if (callback_steps is None) or (
598
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
599
- ):
600
- raise ValueError(
601
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
602
- f" {type(callback_steps)}."
603
- )
604
-
605
- if prompt is not None and prompt_embeds is not None:
606
- raise ValueError(
607
- f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
608
- " only forward one of the two."
609
- )
610
- elif prompt_2 is not None and prompt_embeds is not None:
611
- raise ValueError(
612
- f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
613
- " only forward one of the two."
614
- )
615
- elif prompt is None and prompt_embeds is None:
616
- raise ValueError(
617
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
618
- )
619
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
620
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
621
- elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
622
- raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
623
-
624
- if negative_prompt is not None and negative_prompt_embeds is not None:
625
- raise ValueError(
626
- f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
627
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
628
- )
629
- elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
630
- raise ValueError(
631
- f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
632
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
633
- )
634
-
635
- if prompt_embeds is not None and negative_prompt_embeds is not None:
636
- if prompt_embeds.shape != negative_prompt_embeds.shape:
637
- raise ValueError(
638
- "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
639
- f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
640
- f" {negative_prompt_embeds.shape}."
641
- )
642
-
643
- def prepare_latents(
644
- self,
645
- batch_size,
646
- num_channels_latents,
647
- height,
648
- width,
649
- dtype,
650
- device,
651
- generator,
652
- latents=None,
653
- image=None,
654
- timestep=None,
655
- is_strength_max=True,
656
- add_noise=True,
657
- return_noise=False,
658
- return_image_latents=False,
659
- ):
660
- shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
661
- if isinstance(generator, list) and len(generator) != batch_size:
662
- raise ValueError(
663
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
664
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
665
- )
666
-
667
- if (image is None or timestep is None) and not is_strength_max:
668
- raise ValueError(
669
- "Since strength < 1. initial latents are to be initialised as a combination of Image + Noise."
670
- "However, either the image or the noise timestep has not been provided."
671
- )
672
-
673
- if image.shape[1] == 4:
674
- image_latents = image.to(device=device, dtype=dtype)
675
- elif return_image_latents or (latents is None and not is_strength_max):
676
- image = image.to(device=device, dtype=dtype)
677
- image_latents = self._encode_vae_image(image=image, generator=generator)
678
-
679
- if latents is None and add_noise:
680
- noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
681
- # if strength is 1. then initialise the latents to noise, else initial to image + noise
682
- latents = noise if is_strength_max else self.scheduler.add_noise(image_latents, noise, timestep)
683
- # if pure noise then scale the initial latents by the Scheduler's init sigma
684
- latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents
685
- elif add_noise:
686
- noise = latents.to(device)
687
- latents = noise * self.scheduler.init_noise_sigma
688
- else:
689
- noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
690
- latents = image_latents.to(device)
691
-
692
- outputs = (latents,)
693
-
694
- if return_noise:
695
- outputs += (noise,)
696
-
697
- if return_image_latents:
698
- outputs += (image_latents,)
699
-
700
- return outputs
701
-
702
- def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator):
703
- dtype = image.dtype
704
- if self.vae.config.force_upcast:
705
- image = image.float()
706
- self.vae.to(dtype=torch.float32)
707
-
708
- if isinstance(generator, list):
709
- image_latents = [
710
- self.vae.encode(image[i : i + 1]).latent_dist.sample(generator=generator[i])
711
- for i in range(image.shape[0])
712
- ]
713
- image_latents = torch.cat(image_latents, dim=0)
714
- else:
715
- image_latents = self.vae.encode(image).latent_dist.sample(generator=generator)
716
-
717
- if self.vae.config.force_upcast:
718
- self.vae.to(dtype)
719
-
720
- image_latents = image_latents.to(dtype)
721
- image_latents = self.vae.config.scaling_factor * image_latents
722
-
723
- return image_latents
724
-
725
- def prepare_mask_latents(
726
- self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance
727
- ):
728
- # resize the mask to latents shape as we concatenate the mask to the latents
729
- # we do that before converting to dtype to avoid breaking in case we're using cpu_offload
730
- # and half precision
731
- mask = torch.nn.functional.interpolate(
732
- mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor)
733
- )
734
- mask = mask.to(device=device, dtype=dtype)
735
-
736
- # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method
737
- if mask.shape[0] < batch_size:
738
- if not batch_size % mask.shape[0] == 0:
739
- raise ValueError(
740
- "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to"
741
- f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number"
742
- " of masks that you pass is divisible by the total requested batch size."
743
- )
744
- mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1)
745
-
746
- mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask
747
-
748
- masked_image_latents = None
749
- if masked_image is not None:
750
- masked_image = masked_image.to(device=device, dtype=dtype)
751
- masked_image_latents = self._encode_vae_image(masked_image, generator=generator)
752
- if masked_image_latents.shape[0] < batch_size:
753
- if not batch_size % masked_image_latents.shape[0] == 0:
754
- raise ValueError(
755
- "The passed images and the required batch size don't match. Images are supposed to be duplicated"
756
- f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed."
757
- " Make sure the number of images that you pass is divisible by the total requested batch size."
758
- )
759
- masked_image_latents = masked_image_latents.repeat(
760
- batch_size // masked_image_latents.shape[0], 1, 1, 1
761
- )
762
-
763
- masked_image_latents = (
764
- torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents
765
- )
766
-
767
- # aligning device to prevent device errors when concating it with the latent model input
768
- masked_image_latents = masked_image_latents.to(device=device, dtype=dtype)
769
-
770
- return mask, masked_image_latents
771
-
772
- # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline.get_timesteps
773
- def get_timesteps(self, num_inference_steps, strength, device, denoising_start=None):
774
- # get the original timestep using init_timestep
775
- if denoising_start is None:
776
- init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
777
- t_start = max(num_inference_steps - init_timestep, 0)
778
- else:
779
- t_start = 0
780
-
781
- timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
782
-
783
- # Strength is irrelevant if we directly request a timestep to start at;
784
- # that is, strength is determined by the denoising_start instead.
785
- if denoising_start is not None:
786
- discrete_timestep_cutoff = int(
787
- round(
788
- self.scheduler.config.num_train_timesteps
789
- - (denoising_start * self.scheduler.config.num_train_timesteps)
790
- )
791
- )
792
- timesteps = list(filter(lambda ts: ts < discrete_timestep_cutoff, timesteps))
793
- return torch.tensor(timesteps), len(timesteps)
794
-
795
- return timesteps, num_inference_steps - t_start
796
-
797
- # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline._get_add_time_ids
798
- def _get_add_time_ids(
799
- self, original_size, crops_coords_top_left, target_size, aesthetic_score, negative_aesthetic_score, dtype
800
- ):
801
- if self.config.requires_aesthetics_score:
802
- add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,))
803
- add_neg_time_ids = list(original_size + crops_coords_top_left + (negative_aesthetic_score,))
804
- else:
805
- add_time_ids = list(original_size + crops_coords_top_left + target_size)
806
- add_neg_time_ids = list(original_size + crops_coords_top_left + target_size)
807
-
808
- passed_add_embed_dim = (
809
- self.unet.config.addition_time_embed_dim * len(add_time_ids) + self.text_encoder_2.config.projection_dim
810
- )
811
- expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
812
-
813
- if (
814
- expected_add_embed_dim > passed_add_embed_dim
815
- and (expected_add_embed_dim - passed_add_embed_dim) == self.unet.config.addition_time_embed_dim
816
- ):
817
- raise ValueError(
818
- f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model."
819
- )
820
- elif (
821
- expected_add_embed_dim < passed_add_embed_dim
822
- and (passed_add_embed_dim - expected_add_embed_dim) == self.unet.config.addition_time_embed_dim
823
- ):
824
- raise ValueError(
825
- f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model."
826
- )
827
- elif expected_add_embed_dim != passed_add_embed_dim:
828
- raise ValueError(
829
- f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
830
- )
831
-
832
- add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
833
- add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype)
834
-
835
- return add_time_ids, add_neg_time_ids
836
-
837
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
838
- def upcast_vae(self):
839
- dtype = self.vae.dtype
840
- self.vae.to(dtype=torch.float32)
841
- use_torch_2_0_or_xformers = isinstance(
842
- self.vae.decoder.mid_block.attentions[0].processor,
843
- (
844
- AttnProcessor2_0,
845
- XFormersAttnProcessor,
846
- LoRAXFormersAttnProcessor,
847
- LoRAAttnProcessor2_0,
848
- ),
849
- )
850
- # if xformers or torch_2_0 is used attention block does not need
851
- # to be in float32 which can save lots of memory
852
- if use_torch_2_0_or_xformers:
853
- self.vae.post_quant_conv.to(dtype)
854
- self.vae.decoder.conv_in.to(dtype)
855
- self.vae.decoder.mid_block.to(dtype)
856
-
857
- @torch.no_grad()
858
- @replace_example_docstring(EXAMPLE_DOC_STRING)
859
- def __call__(
860
- self,
861
- prompt: Union[str, List[str]] = None,
862
- prompt_2: Optional[Union[str, List[str]]] = None,
863
- image: Union[torch.FloatTensor, PIL.Image.Image] = None,
864
- mask_image: Union[torch.FloatTensor, PIL.Image.Image] = None,
865
- height: Optional[int] = None,
866
- width: Optional[int] = None,
867
- strength: float = 1.0,
868
- num_inference_steps: int = 50,
869
- denoising_start: Optional[float] = None,
870
- denoising_end: Optional[float] = None,
871
- guidance_scale: float = 7.5,
872
- negative_prompt: Optional[Union[str, List[str]]] = None,
873
- negative_prompt_2: Optional[Union[str, List[str]]] = None,
874
- num_images_per_prompt: Optional[int] = 1,
875
- eta: float = 0.0,
876
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
877
- latents: Optional[torch.FloatTensor] = None,
878
- prompt_embeds: Optional[torch.FloatTensor] = None,
879
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
880
- pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
881
- negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
882
- output_type: Optional[str] = "pil",
883
- return_dict: bool = True,
884
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
885
- callback_steps: int = 1,
886
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
887
- guidance_rescale: float = 0.0,
888
- original_size: Tuple[int, int] = None,
889
- crops_coords_top_left: Tuple[int, int] = (0, 0),
890
- target_size: Tuple[int, int] = None,
891
- aesthetic_score: float = 6.0,
892
- negative_aesthetic_score: float = 2.5,
893
- ):
894
- r"""
895
- Function invoked when calling the pipeline for generation.
896
-
897
- Args:
898
- prompt (`str` or `List[str]`, *optional*):
899
- The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
900
- instead.
901
- prompt_2 (`str` or `List[str]`, *optional*):
902
- The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
903
- used in both text-encoders
904
- image (`PIL.Image.Image`):
905
- `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will
906
- be masked out with `mask_image` and repainted according to `prompt`.
907
- mask_image (`PIL.Image.Image`):
908
- `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
909
- repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted
910
- to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L)
911
- instead of 3, so the expected shape would be `(B, H, W, 1)`.
912
- height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
913
- The height in pixels of the generated image.
914
- width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
915
- The width in pixels of the generated image.
916
- strength (`float`, *optional*, defaults to 1.):
917
- Conceptually, indicates how much to transform the masked portion of the reference `image`. Must be
918
- between 0 and 1. `image` will be used as a starting point, adding more noise to it the larger the
919
- `strength`. The number of denoising steps depends on the amount of noise initially added. When
920
- `strength` is 1, added noise will be maximum and the denoising process will run for the full number of
921
- iterations specified in `num_inference_steps`. A value of 1, therefore, essentially ignores the masked
922
- portion of the reference `image`. Note that in the case of `denoising_start` being declared as an
923
- integer, the value of `strength` will be ignored.
924
- num_inference_steps (`int`, *optional*, defaults to 50):
925
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
926
- expense of slower inference.
927
- denoising_start (`float`, *optional*):
928
- When specified, indicates the fraction (between 0.0 and 1.0) of the total denoising process to be
929
- bypassed before it is initiated. Consequently, the initial part of the denoising process is skipped and
930
- it is assumed that the passed `image` is a partly denoised image. Note that when this is specified,
931
- strength will be ignored. The `denoising_start` parameter is particularly beneficial when this pipeline
932
- is integrated into a "Mixture of Denoisers" multi-pipeline setup, as detailed in [**Refining the Image
933
- Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output).
934
- denoising_end (`float`, *optional*):
935
- When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
936
- completed before it is intentionally prematurely terminated. As a result, the returned sample will
937
- still retain a substantial amount of noise (ca. final 20% of timesteps still needed) and should be
938
- denoised by a successor pipeline that has `denoising_start` set to 0.8 so that it only denoises the
939
- final 20% of the scheduler. The denoising_end parameter should ideally be utilized when this pipeline
940
- forms a part of a "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image
941
- Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output).
942
- guidance_scale (`float`, *optional*, defaults to 7.5):
943
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
944
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
945
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
946
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
947
- usually at the expense of lower image quality.
948
- negative_prompt (`str` or `List[str]`, *optional*):
949
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
950
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
951
- less than `1`).
952
- negative_prompt_2 (`str` or `List[str]`, *optional*):
953
- The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
954
- `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
955
- prompt_embeds (`torch.FloatTensor`, *optional*):
956
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
957
- provided, text embeddings will be generated from `prompt` input argument.
958
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
959
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
960
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
961
- argument.
962
- pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
963
- Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
964
- If not provided, pooled text embeddings will be generated from `prompt` input argument.
965
- negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
966
- Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
967
- weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
968
- input argument.
969
- num_images_per_prompt (`int`, *optional*, defaults to 1):
970
- The number of images to generate per prompt.
971
- eta (`float`, *optional*, defaults to 0.0):
972
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
973
- [`schedulers.DDIMScheduler`], will be ignored for others.
974
- generator (`torch.Generator`, *optional*):
975
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
976
- to make generation deterministic.
977
- latents (`torch.FloatTensor`, *optional*):
978
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
979
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
980
- tensor will ge generated by sampling using the supplied random `generator`.
981
- output_type (`str`, *optional*, defaults to `"pil"`):
982
- The output format of the generate image. Choose between
983
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
984
- return_dict (`bool`, *optional*, defaults to `True`):
985
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
986
- plain tuple.
987
- callback (`Callable`, *optional*):
988
- A function that will be called every `callback_steps` steps during inference. The function will be
989
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
990
- callback_steps (`int`, *optional*, defaults to 1):
991
- The frequency at which the `callback` function will be called. If not specified, the callback will be
992
- called at every step.
993
- cross_attention_kwargs (`dict`, *optional*):
994
- A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
995
- `self.processor` in
996
- [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
997
- original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
998
- If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
999
- `original_size` defaults to `(width, height)` if not specified. Part of SDXL's micro-conditioning as
1000
- explained in section 2.2 of
1001
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1002
- crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
1003
- `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
1004
- `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
1005
- `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
1006
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1007
- target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1008
- For most cases, `target_size` should be set to the desired height and width of the generated image. If
1009
- not specified it will default to `(width, height)`. Part of SDXL's micro-conditioning as explained in
1010
- section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1011
- aesthetic_score (`float`, *optional*, defaults to 6.0):
1012
- Used to simulate an aesthetic score of the generated image by influencing the positive text condition.
1013
- Part of SDXL's micro-conditioning as explained in section 2.2 of
1014
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1015
- negative_aesthetic_score (`float`, *optional*, defaults to 2.5):
1016
- Part of SDXL's micro-conditioning as explained in section 2.2 of
1017
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). Can be used to
1018
- simulate an aesthetic score of the generated image by influencing the negative text condition.
1019
-
1020
- Examples:
1021
-
1022
- Returns:
1023
- [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] or `tuple`:
1024
- [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a
1025
- `tuple. `tuple. When returning a tuple, the first element is a list with the generated images.
1026
- """
1027
- # 0. Default height and width to unet
1028
- height = height or self.unet.config.sample_size * self.vae_scale_factor
1029
- width = width or self.unet.config.sample_size * self.vae_scale_factor
1030
-
1031
- # 1. Check inputs
1032
- self.check_inputs(
1033
- prompt,
1034
- prompt_2,
1035
- height,
1036
- width,
1037
- strength,
1038
- callback_steps,
1039
- negative_prompt,
1040
- negative_prompt_2,
1041
- prompt_embeds,
1042
- negative_prompt_embeds,
1043
- )
1044
-
1045
- # 2. Define call parameters
1046
- if prompt is not None and isinstance(prompt, str):
1047
- batch_size = 1
1048
- elif prompt is not None and isinstance(prompt, list):
1049
- batch_size = len(prompt)
1050
- else:
1051
- batch_size = prompt_embeds.shape[0]
1052
-
1053
- device = self._execution_device
1054
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
1055
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
1056
- # corresponds to doing no classifier free guidance.
1057
- do_classifier_free_guidance = guidance_scale > 1.0
1058
-
1059
- # 3. Encode input prompt
1060
- text_encoder_lora_scale = (
1061
- cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
1062
- )
1063
-
1064
- (
1065
- prompt_embeds,
1066
- negative_prompt_embeds,
1067
- pooled_prompt_embeds,
1068
- negative_pooled_prompt_embeds,
1069
- ) = self.encode_prompt(
1070
- prompt=prompt,
1071
- prompt_2=prompt_2,
1072
- device=device,
1073
- num_images_per_prompt=num_images_per_prompt,
1074
- do_classifier_free_guidance=do_classifier_free_guidance,
1075
- negative_prompt=negative_prompt,
1076
- negative_prompt_2=negative_prompt_2,
1077
- prompt_embeds=prompt_embeds,
1078
- negative_prompt_embeds=negative_prompt_embeds,
1079
- pooled_prompt_embeds=pooled_prompt_embeds,
1080
- negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
1081
- lora_scale=text_encoder_lora_scale,
1082
- )
1083
-
1084
- # 4. set timesteps
1085
- def denoising_value_valid(dnv):
1086
- return type(denoising_end) == float and 0 < dnv < 1
1087
-
1088
- self.scheduler.set_timesteps(num_inference_steps, device=device)
1089
- timesteps, num_inference_steps = self.get_timesteps(
1090
- num_inference_steps, strength, device, denoising_start=denoising_start if denoising_value_valid else None
1091
- )
1092
- # check that number of inference steps is not < 1 - as this doesn't make sense
1093
- if num_inference_steps < 1:
1094
- raise ValueError(
1095
- f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline"
1096
- f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline."
1097
- )
1098
- # at which timestep to set the initial noise (n.b. 50% if strength is 0.5)
1099
- latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
1100
- # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise
1101
- is_strength_max = strength == 1.0
1102
-
1103
- # 5. Preprocess mask and image
1104
- mask, masked_image, init_image = prepare_mask_and_masked_image(
1105
- image, mask_image, height, width, return_image=True
1106
- )
1107
-
1108
- # 6. Prepare latent variables
1109
- num_channels_latents = self.vae.config.latent_channels
1110
- num_channels_unet = self.unet.config.in_channels
1111
- return_image_latents = num_channels_unet == 4
1112
-
1113
- add_noise = True if denoising_start is None else False
1114
- latents_outputs = self.prepare_latents(
1115
- batch_size * num_images_per_prompt,
1116
- num_channels_latents,
1117
- height,
1118
- width,
1119
- prompt_embeds.dtype,
1120
- device,
1121
- generator,
1122
- latents,
1123
- image=init_image,
1124
- timestep=latent_timestep,
1125
- is_strength_max=is_strength_max,
1126
- add_noise=add_noise,
1127
- return_noise=True,
1128
- return_image_latents=return_image_latents,
1129
- )
1130
-
1131
- if return_image_latents:
1132
- latents, noise, image_latents = latents_outputs
1133
- else:
1134
- latents, noise = latents_outputs
1135
-
1136
- # 7. Prepare mask latent variables
1137
- mask, masked_image_latents = self.prepare_mask_latents(
1138
- mask,
1139
- masked_image,
1140
- batch_size * num_images_per_prompt,
1141
- height,
1142
- width,
1143
- prompt_embeds.dtype,
1144
- device,
1145
- generator,
1146
- do_classifier_free_guidance,
1147
- )
1148
-
1149
- # 8. Check that sizes of mask, masked image and latents match
1150
- if num_channels_unet == 9:
1151
- # default case for runwayml/stable-diffusion-inpainting
1152
- num_channels_mask = mask.shape[1]
1153
- num_channels_masked_image = masked_image_latents.shape[1]
1154
- if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
1155
- raise ValueError(
1156
- f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
1157
- f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
1158
- f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}"
1159
- f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of"
1160
- " `pipeline.unet` or your `mask_image` or `image` input."
1161
- )
1162
- elif num_channels_unet != 4:
1163
- raise ValueError(
1164
- f"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}."
1165
- )
1166
- # 8.1 Prepare extra step kwargs.
1167
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1168
-
1169
- # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1170
- height, width = latents.shape[-2:]
1171
- height = height * self.vae_scale_factor
1172
- width = width * self.vae_scale_factor
1173
-
1174
- original_size = original_size or (height, width)
1175
- target_size = target_size or (height, width)
1176
-
1177
- # 10. Prepare added time ids & embeddings
1178
- add_text_embeds = pooled_prompt_embeds
1179
- add_time_ids, add_neg_time_ids = self._get_add_time_ids(
1180
- original_size,
1181
- crops_coords_top_left,
1182
- target_size,
1183
- aesthetic_score,
1184
- negative_aesthetic_score,
1185
- dtype=prompt_embeds.dtype,
1186
- )
1187
- add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1)
1188
-
1189
- if do_classifier_free_guidance:
1190
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
1191
- add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
1192
- add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1)
1193
- add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0)
1194
-
1195
- prompt_embeds = prompt_embeds.to(device)
1196
- add_text_embeds = add_text_embeds.to(device)
1197
- add_time_ids = add_time_ids.to(device)
1198
-
1199
- # 11. Denoising loop
1200
- num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
1201
-
1202
- if (
1203
- denoising_end is not None
1204
- and denoising_start is not None
1205
- and denoising_value_valid(denoising_end)
1206
- and denoising_value_valid(denoising_start)
1207
- and denoising_start >= denoising_end
1208
- ):
1209
- raise ValueError(
1210
- f"`denoising_start`: {denoising_start} cannot be larger than or equal to `denoising_end`: "
1211
- + f" {denoising_end} when using type float."
1212
- )
1213
- elif denoising_end is not None and denoising_value_valid(denoising_end):
1214
- discrete_timestep_cutoff = int(
1215
- round(
1216
- self.scheduler.config.num_train_timesteps
1217
- - (denoising_end * self.scheduler.config.num_train_timesteps)
1218
- )
1219
- )
1220
- num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
1221
- timesteps = timesteps[:num_inference_steps]
1222
-
1223
- with self.progress_bar(total=num_inference_steps) as progress_bar:
1224
- for i, t in enumerate(timesteps):
1225
- # expand the latents if we are doing classifier free guidance
1226
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
1227
-
1228
- # concat latents, mask, masked_image_latents in the channel dimension
1229
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1230
-
1231
- if num_channels_unet == 9:
1232
- latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)
1233
-
1234
- # predict the noise residual
1235
- added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
1236
- noise_pred = self.unet(
1237
- latent_model_input,
1238
- t,
1239
- encoder_hidden_states=prompt_embeds,
1240
- cross_attention_kwargs=cross_attention_kwargs,
1241
- added_cond_kwargs=added_cond_kwargs,
1242
- return_dict=False,
1243
- )[0]
1244
-
1245
- # perform guidance
1246
- if do_classifier_free_guidance:
1247
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1248
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1249
-
1250
- if do_classifier_free_guidance and guidance_rescale > 0.0:
1251
- # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
1252
- noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
1253
-
1254
- # compute the previous noisy sample x_t -> x_t-1
1255
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
1256
-
1257
- if num_channels_unet == 4:
1258
- init_latents_proper = image_latents[:1]
1259
- init_mask = mask[:1]
1260
-
1261
- if i < len(timesteps) - 1:
1262
- noise_timestep = timesteps[i + 1]
1263
- init_latents_proper = self.scheduler.add_noise(
1264
- init_latents_proper, noise, torch.tensor([noise_timestep])
1265
- )
1266
-
1267
- latents = (1 - init_mask) * init_latents_proper + init_mask * latents
1268
-
1269
- # call the callback, if provided
1270
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1271
- progress_bar.update()
1272
- if callback is not None and i % callback_steps == 0:
1273
- callback(i, t, latents)
1274
-
1275
- # make sure the VAE is in float32 mode, as it overflows in float16
1276
- if self.vae.dtype == torch.float16 and self.vae.config.force_upcast:
1277
- self.upcast_vae()
1278
- latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
1279
-
1280
- if not output_type == "latent":
1281
- image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
1282
- else:
1283
- return StableDiffusionXLPipelineOutput(images=latents)
1284
-
1285
- # apply watermark if available
1286
- if self.watermark is not None:
1287
- image = self.watermark.apply_watermark(image)
1288
-
1289
- image = self.image_processor.postprocess(image, output_type=output_type)
1290
-
1291
- # Offload last model to CPU
1292
- if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
1293
- self.final_offload_hook.offload()
1294
-
1295
- if not return_dict:
1296
- return (image,)
1297
-
1298
- return StableDiffusionXLPipelineOutput(images=image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/fast_rcnn/fast_rcnn_r50_caffe_fpn_1x_coco.py DELETED
@@ -1,45 +0,0 @@
1
- _base_ = './fast_rcnn_r50_fpn_1x_coco.py'
2
-
3
- model = dict(
4
- pretrained='open-mmlab://detectron2/resnet50_caffe',
5
- backbone=dict(
6
- norm_cfg=dict(type='BN', requires_grad=False), style='caffe'))
7
-
8
- # use caffe img_norm
9
- img_norm_cfg = dict(
10
- mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
11
- train_pipeline = [
12
- dict(type='LoadImageFromFile'),
13
- dict(type='LoadProposals', num_max_proposals=2000),
14
- dict(type='LoadAnnotations', with_bbox=True),
15
- dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
16
- dict(type='RandomFlip', flip_ratio=0.5),
17
- dict(type='Normalize', **img_norm_cfg),
18
- dict(type='Pad', size_divisor=32),
19
- dict(type='DefaultFormatBundle'),
20
- dict(type='Collect', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']),
21
- ]
22
- test_pipeline = [
23
- dict(type='LoadImageFromFile'),
24
- dict(type='LoadProposals', num_max_proposals=None),
25
- dict(
26
- type='MultiScaleFlipAug',
27
- img_scale=(1333, 800),
28
- flip=False,
29
- transforms=[
30
- dict(type='Resize', keep_ratio=True),
31
- dict(type='RandomFlip'),
32
- dict(type='Normalize', **img_norm_cfg),
33
- dict(type='Pad', size_divisor=32),
34
- dict(type='ImageToTensor', keys=['img']),
35
- dict(type='ToTensor', keys=['proposals']),
36
- dict(
37
- type='ToDataContainer',
38
- fields=[dict(key='proposals', stack=False)]),
39
- dict(type='Collect', keys=['img', 'proposals']),
40
- ])
41
- ]
42
- data = dict(
43
- train=dict(pipeline=train_pipeline),
44
- val=dict(pipeline=test_pipeline),
45
- test=dict(pipeline=test_pipeline))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/res2net/README.md DELETED
@@ -1,65 +0,0 @@
1
- # Res2Net for object detection and instance segmentation
2
-
3
- ## Introduction
4
-
5
- [ALGORITHM]
6
-
7
- We propose a novel building block for CNNs, namely Res2Net, by constructing hierarchical residual-like connections within one single residual block. The Res2Net represents multi-scale features at a granular level and increases the range of receptive fields for each network layer.
8
-
9
- | Backbone |Params. | GFLOPs | top-1 err. | top-5 err. |
10
- | :-------------: |:----: | :-----: | :--------: | :--------: |
11
- | ResNet-101 |44.6 M | 7.8 | 22.63 | 6.44 |
12
- | ResNeXt-101-64x4d |83.5M | 15.5 | 20.40 | - |
13
- | HRNetV2p-W48 | 77.5M | 16.1 | 20.70 | 5.50 |
14
- | Res2Net-101 | 45.2M | 8.3 | 18.77 | 4.64 |
15
-
16
- Compared with other backbone networks, Res2Net requires fewer parameters and FLOPs.
17
-
18
- **Note:**
19
-
20
- - GFLOPs for classification are calculated with image size (224x224).
21
-
22
- ```latex
23
- @article{gao2019res2net,
24
- title={Res2Net: A New Multi-scale Backbone Architecture},
25
- author={Gao, Shang-Hua and Cheng, Ming-Ming and Zhao, Kai and Zhang, Xin-Yu and Yang, Ming-Hsuan and Torr, Philip},
26
- journal={IEEE TPAMI},
27
- year={2020},
28
- doi={10.1109/TPAMI.2019.2938758},
29
- }
30
- ```
31
-
32
- ## Results and Models
33
-
34
- ### Faster R-CNN
35
-
36
- | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download |
37
- | :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :------: | :--------: |
38
- |R2-101-FPN | pytorch | 2x | 7.4 | - | 43.0 |[config](https://github.com/open-mmlab/mmdetection/tree/master/configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/res2net/faster_rcnn_r2_101_fpn_2x_coco/faster_rcnn_r2_101_fpn_2x_coco-175f1da6.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/res2net/faster_rcnn_r2_101_fpn_2x_coco/faster_rcnn_r2_101_fpn_2x_coco_20200514_231734.log.json) |
39
-
40
- ### Mask R-CNN
41
-
42
- | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download |
43
- | :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :------: | :--------: |
44
- |R2-101-FPN | pytorch | 2x | 7.9 | - | 43.6 | 38.7 |[config](https://github.com/open-mmlab/mmdetection/tree/master/configs/res2net/mask_rcnn_r2_101_fpn_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/res2net/mask_rcnn_r2_101_fpn_2x_coco/mask_rcnn_r2_101_fpn_2x_coco-17f061e8.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/res2net/mask_rcnn_r2_101_fpn_2x_coco/mask_rcnn_r2_101_fpn_2x_coco_20200515_002413.log.json) |
45
-
46
- ### Cascade R-CNN
47
-
48
- | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download |
49
- | :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :------: | :--------: |
50
- |R2-101-FPN | pytorch | 20e | 7.8 | - | 45.7 |[config](https://github.com/open-mmlab/mmdetection/tree/master/configs/res2net/cascade_rcnn_r2_101_fpn_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/res2net/cascade_rcnn_r2_101_fpn_20e_coco/cascade_rcnn_r2_101_fpn_20e_coco-f4b7b7db.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/res2net/cascade_rcnn_r2_101_fpn_20e_coco/cascade_rcnn_r2_101_fpn_20e_coco_20200515_091644.log.json) |
51
-
52
- ### Cascade Mask R-CNN
53
-
54
- | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download |
55
- | :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :------: | :--------: |
56
- R2-101-FPN | pytorch | 20e | 9.5 | - | 46.4 | 40.0 |[config](https://github.com/open-mmlab/mmdetection/tree/master/configs/res2net/cascade_mask_rcnn_r2_101_fpn_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/res2net/cascade_mask_rcnn_r2_101_fpn_20e_coco/cascade_mask_rcnn_r2_101_fpn_20e_coco-8a7b41e1.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/res2net/cascade_mask_rcnn_r2_101_fpn_20e_coco/cascade_mask_rcnn_r2_101_fpn_20e_coco_20200515_091645.log.json) |
57
-
58
- ### Hybrid Task Cascade (HTC)
59
-
60
- | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download |
61
- | :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :------: | :--------: |
62
- | R2-101-FPN | pytorch | 20e | - | - | 47.5 | 41.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/res2net/htc_r2_101_fpn_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/res2net/htc_r2_101_fpn_20e_coco/htc_r2_101_fpn_20e_coco-3a8d2112.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/res2net/htc_r2_101_fpn_20e_coco/htc_r2_101_fpn_20e_coco_20200515_150029.log.json) |
63
-
64
- - Res2Net ImageNet pretrained models are in [Res2Net-PretrainedModels](https://github.com/Res2Net/Res2Net-PretrainedModels).
65
- - More applications of Res2Net are in [Res2Net-Github](https://github.com/Res2Net/).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmcv_custom/runner/epoch_based_runner.py DELETED
@@ -1,104 +0,0 @@
1
- # Copyright (c) Open-MMLab. All rights reserved.
2
- import os.path as osp
3
- import platform
4
- import shutil
5
-
6
- import torch
7
- from torch.optim import Optimizer
8
-
9
- import mmcv
10
- from mmcv.runner import RUNNERS, EpochBasedRunner
11
- from .checkpoint import save_checkpoint
12
-
13
- try:
14
- import apex
15
- except:
16
- print('apex is not installed')
17
-
18
-
19
- @RUNNERS.register_module()
20
- class EpochBasedRunnerAmp(EpochBasedRunner):
21
- """Epoch-based Runner with AMP support.
22
-
23
- This runner train models epoch by epoch.
24
- """
25
-
26
- def save_checkpoint(self,
27
- out_dir,
28
- filename_tmpl='epoch_{}.pth',
29
- save_optimizer=True,
30
- meta=None,
31
- create_symlink=True):
32
- """Save the checkpoint.
33
-
34
- Args:
35
- out_dir (str): The directory that checkpoints are saved.
36
- filename_tmpl (str, optional): The checkpoint filename template,
37
- which contains a placeholder for the epoch number.
38
- Defaults to 'epoch_{}.pth'.
39
- save_optimizer (bool, optional): Whether to save the optimizer to
40
- the checkpoint. Defaults to True.
41
- meta (dict, optional): The meta information to be saved in the
42
- checkpoint. Defaults to None.
43
- create_symlink (bool, optional): Whether to create a symlink
44
- "latest.pth" to point to the latest checkpoint.
45
- Defaults to True.
46
- """
47
- if meta is None:
48
- meta = dict(epoch=self.epoch + 1, iter=self.iter)
49
- elif isinstance(meta, dict):
50
- meta.update(epoch=self.epoch + 1, iter=self.iter)
51
- else:
52
- raise TypeError(
53
- f'meta should be a dict or None, but got {type(meta)}')
54
- if self.meta is not None:
55
- meta.update(self.meta)
56
-
57
- filename = filename_tmpl.format(self.epoch + 1)
58
- filepath = osp.join(out_dir, filename)
59
- optimizer = self.optimizer if save_optimizer else None
60
- save_checkpoint(self.model, filepath, optimizer=optimizer, meta=meta)
61
- # in some environments, `os.symlink` is not supported, you may need to
62
- # set `create_symlink` to False
63
- if create_symlink:
64
- dst_file = osp.join(out_dir, 'latest.pth')
65
- if platform.system() != 'Windows':
66
- mmcv.symlink(filename, dst_file)
67
- else:
68
- shutil.copy(filepath, dst_file)
69
-
70
- def resume(self,
71
- checkpoint,
72
- resume_optimizer=True,
73
- map_location='default'):
74
- if map_location == 'default':
75
- if torch.cuda.is_available():
76
- device_id = torch.cuda.current_device()
77
- checkpoint = self.load_checkpoint(
78
- checkpoint,
79
- map_location=lambda storage, loc: storage.cuda(device_id))
80
- else:
81
- checkpoint = self.load_checkpoint(checkpoint)
82
- else:
83
- checkpoint = self.load_checkpoint(
84
- checkpoint, map_location=map_location)
85
-
86
- self._epoch = checkpoint['meta']['epoch']
87
- self._iter = checkpoint['meta']['iter']
88
- if 'optimizer' in checkpoint and resume_optimizer:
89
- if isinstance(self.optimizer, Optimizer):
90
- self.optimizer.load_state_dict(checkpoint['optimizer'])
91
- elif isinstance(self.optimizer, dict):
92
- for k in self.optimizer.keys():
93
- self.optimizer[k].load_state_dict(
94
- checkpoint['optimizer'][k])
95
- else:
96
- raise TypeError(
97
- 'Optimizer should be dict or torch.optim.Optimizer '
98
- f'but got {type(self.optimizer)}')
99
-
100
- if 'amp' in checkpoint:
101
- apex.amp.load_state_dict(checkpoint['amp'])
102
- self.logger.info('load amp state dict')
103
-
104
- self.logger.info('resumed epoch %d, iter %d', self.epoch, self.iter)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/detectors/mask_rcnn.py DELETED
@@ -1,24 +0,0 @@
1
- from ..builder import DETECTORS
2
- from .two_stage import TwoStageDetector
3
-
4
-
5
- @DETECTORS.register_module()
6
- class MaskRCNN(TwoStageDetector):
7
- """Implementation of `Mask R-CNN <https://arxiv.org/abs/1703.06870>`_"""
8
-
9
- def __init__(self,
10
- backbone,
11
- rpn_head,
12
- roi_head,
13
- train_cfg,
14
- test_cfg,
15
- neck=None,
16
- pretrained=None):
17
- super(MaskRCNN, self).__init__(
18
- backbone=backbone,
19
- neck=neck,
20
- rpn_head=rpn_head,
21
- roi_head=roi_head,
22
- train_cfg=train_cfg,
23
- test_cfg=test_cfg,
24
- pretrained=pretrained)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r18-d8_769x769_80k_cityscapes.py DELETED
@@ -1,11 +0,0 @@
1
- _base_ = './deeplabv3plus_r50-d8_769x769_80k_cityscapes.py'
2
- model = dict(
3
- pretrained='open-mmlab://resnet18_v1c',
4
- backbone=dict(depth=18),
5
- decode_head=dict(
6
- c1_in_channels=64,
7
- c1_channels=12,
8
- in_channels=512,
9
- channels=128,
10
- ),
11
- auxiliary_head=dict(in_channels=256, channels=64))
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnimaLab/bias-test-gpt-pairs/mgr_requests.py DELETED
@@ -1,214 +0,0 @@
1
- import pandas as pd
2
- import gradio as gr
3
- import hashlib, base64
4
- import openai
5
- from tqdm import tqdm
6
- tqdm().pandas()
7
-
8
- # querying OpenAI for generation
9
- import openAI_manager as oai_mgr
10
- #import initOpenAI, examples_to_prompt, genChatGPT, generateTestSentences
11
-
12
- # bias testing manager
13
- import mgr_bias_scoring as bt_mgr
14
- import mgr_sentences as smgr
15
-
16
- # error messages
17
- from error_messages import *
18
-
19
- G_CORE_BIAS_NAME = None
20
-
21
- # hashing
22
- def getHashForString(text):
23
- d=hashlib.md5(bytes(text, encoding='utf-8')).digest()
24
- d=base64.urlsafe_b64encode(d)
25
-
26
- return d.decode('utf-8')
27
-
28
- def getBiasName(gr1_lst, gr2_lst, att1_lst, att2_lst):
29
- global G_CORE_BIAS_NAME
30
-
31
- bias_name = G_CORE_BIAS_NAME
32
- if bias_name == None:
33
- full_spec = ''.join(gr1_lst)+''.join(gr2_lst)+''.join(att1_lst)+''.join(att2_lst)
34
- hash = getHashForString(full_spec)
35
- bias_name = f"{gr1_lst[0].replace(' ','-')}_{gr2_lst[0].replace(' ','-')}__{att1_lst[0].replace(' ','-')}_{att2_lst[0].replace(' ','-')}_{hash}"
36
-
37
- return bias_name
38
-
39
- def _generateOnline(bias_spec, progress, key, num2gen, isSaving=False):
40
- test_sentences = []
41
- gen_err_msg = None
42
- genAttrCounts = {}
43
- print(f"Bias spec dict: {bias_spec}")
44
- g1, g2, a1, a2 = bt_mgr.get_words(bias_spec)
45
- print(f"A1: {a1}")
46
- print(f"A2: {a2}")
47
-
48
- if "custom_counts" in bias_spec:
49
- print("Bias spec is custom !!")
50
- genAttrCounts = bias_spec['custom_counts'][0]
51
- for a,c in bias_spec['custom_counts'][1].items():
52
- genAttrCounts[a] = c
53
- else:
54
- print("Bias spec is standard !!")
55
- genAttrCounts = {a:num2gen for a in a1+a2}
56
-
57
- # Initiate with key
58
- try:
59
- models = oai_mgr.initOpenAI(key)
60
- model_names = [m['id'] for m in models['data']]
61
- print(f"Model names: {model_names}")
62
- except openai.error.AuthenticationError as err:
63
- #raise gr.Error(OPENAI_INIT_ERROR.replace("<ERR>", str(err)))
64
- gen_err_msg = OPENAI_INIT_ERROR.replace("<ERR>", str(err))
65
-
66
- if gen_err_msg != None:
67
- return [], gen_err_msg
68
- else:
69
- if "gpt-3.5-turbo" in model_names:
70
- print("Access to ChatGPT")
71
- if "gpt-4" in model_names:
72
- print("Access to GPT-4")
73
-
74
- model_name = "gpt-3.5-turbo" #"gpt-4"
75
-
76
- # Generate one example
77
- #gen = genChatGPT(model_name, ["man","math"], 2, 5,
78
- # [{"Keywords": ["sky","blue"], "Sentence": "the sky is blue"}
79
- # ],
80
- # temperature=0.8)
81
- #print(f"Test gen: {gen}")
82
-
83
- # Generate all test sentences
84
-
85
- #gens = oai_mgr.generateTestSentences(model_name, g1+g2, a1+a2, num2gen, progress)
86
- gens = oai_mgr.generateTestSentencesCustom(model_name, g1, g2, a1+a2, genAttrCounts, bias_spec, progress)
87
- print("--GENS--")
88
- print(gens)
89
- if len(gens) == 0:
90
- print("No sentences generated, returning")
91
- return [], gen_err_msg
92
-
93
- for org_gt, at, s, gt1, gt2 in gens:
94
- test_sentences.append([s,org_gt,at,gt1,gt2])
95
-
96
- # save the generations immediately
97
- print("Making save dataframe...")
98
- save_df = pd.DataFrame(test_sentences, columns=["Sentence",'org_grp_term',
99
- "Attribute term", "Group term 1",
100
- "Group term 2"])
101
-
102
- ## make the templates to save
103
- # 1. bias specification
104
- print(f"Bias spec dict: {bias_spec}")
105
-
106
- # generate laternative sentence
107
- print(f"Columns before alternative sentence: {list(save_df.columns)}")
108
- save_df['Alternative Sentence'] = save_df.progress_apply(oai_mgr.chatgpt_sentence_alternative, axis=1, model_name=model_name)
109
- print(f"Columns after alternative sentence: {list(save_df.columns)}")
110
-
111
- # 2. convert to templates
112
- save_df['Template'] = save_df.progress_apply(bt_mgr.sentence_to_template_df, axis=1)
113
- print("Convert generated sentences to templates...")
114
- save_df[['Alternative Template','grp_refs']] = save_df.progress_apply(bt_mgr.ref_terms_sentence_to_template, axis=1)
115
- print(f"Columns with templates: {list(save_df.columns)}")
116
-
117
- # 3. convert to pairs
118
- print("Convert generated sentences to ordered pairs...")
119
- test_pairs_df = bt_mgr.convert2pairsFromDF(bias_spec, save_df)
120
- print(f"Test pairs cols: {list(test_pairs_df.columns)}")
121
-
122
- bias_name = getBiasName(g1, g2, a1, a2)
123
-
124
- save_df = save_df.rename(columns={"Sentence":'sentence',
125
- "Alternative Sentence":"alt_sentence",
126
- "Attribute term": 'att_term',
127
- "Template":"template",
128
- "Alternative Template": "alt_template",
129
- "Group term 1": "grp_term1",
130
- "Group term 2": "grp_term2"})
131
-
132
- save_df['label_1'] = test_pairs_df['label_1']
133
- save_df['label_2'] = test_pairs_df['label_2']
134
- save_df['bias_spec'] = bias_name
135
- save_df['type'] = 'tool'
136
- save_df['gen_model'] = model_name
137
-
138
- col_order = ["sentence", "alt_sentence", "org_grp_term", "att_term", "template",
139
- "alt_template", "grp_term1", "grp_term2", "grp_refs", "label_1", "label_2",
140
- "bias_spec", "type", "gen_model"]
141
- save_df = save_df[col_order]
142
-
143
- print(f"Save cols prep: {list(save_df.columns)}")
144
-
145
- if isSaving == True:
146
- print(f"Saving: {save_df.head(1)}")
147
- smgr.saveSentences(save_df) #[["Group term","Attribute term","Test sentence"]])
148
-
149
- num_sentences = len(test_sentences)
150
- print(f"Returned num sentences: {num_sentences}")
151
-
152
- # list for Gradio dataframe
153
- ret_df = [list(r.values) for i, r in save_df[['sentence', 'alt_sentence', 'grp_term1', 'grp_term2', "att_term"]].iterrows()]
154
- print(ret_df)
155
-
156
- return ret_df, gen_err_msg
157
-
158
- def _getSavedSentences(bias_spec, progress, use_paper_sentences):
159
- test_sentences = []
160
-
161
- print(f"Bias spec dict: {bias_spec}")
162
-
163
- g1, g2, a1, a2 = bt_mgr.get_words(bias_spec)
164
- for gi, g_term in enumerate(g1+g2):
165
- att_list = a1+a2
166
- grp_list = g1+g2
167
- # match "-" and no space
168
- att_list_dash = [t.replace(' ','-') for t in att_list]
169
- att_list.extend(att_list_dash)
170
- att_list_nospace = [t.replace(' ','') for t in att_list]
171
- att_list.extend(att_list_nospace)
172
- att_list = list(set(att_list))
173
-
174
- progress(gi/len(g1+g2), desc=f"{g_term}")
175
-
176
- _, sentence_df, _ = smgr.getSavedSentences(g_term)
177
- # only take from paper & gpt3.5
178
- flt_gen_models = ["gpt-3.5","gpt-3.5-turbo","gpt-4"]
179
- print(f"Before filter: {sentence_df.shape[0]}")
180
- if use_paper_sentences == True:
181
- if 'type' in list(sentence_df.columns):
182
- sentence_df = sentence_df.query("type=='paper' and gen_model in @flt_gen_models")
183
- print(f"After filter: {sentence_df.shape[0]}")
184
- else:
185
- if 'type' in list(sentence_df.columns):
186
- # only use GPT-3.5 generations for now - todo: add settings option for this
187
- sentence_df = sentence_df.query("gen_model in @flt_gen_models")
188
- print(f"After filter: {sentence_df.shape[0]}")
189
-
190
- if sentence_df.shape[0] > 0:
191
- sentence_df = sentence_df[['grp_term1','grp_term2','att_term','sentence','alt_sentence']]
192
- sentence_df = sentence_df.rename(columns={'grp_term1': "Group term 1",
193
- 'grp_term2': "Group term 2",
194
- "att_term": "Attribute term",
195
- "sentence": "Sentence",
196
- "alt_sentence": "Alt Sentence"})
197
-
198
- sel = sentence_df[(sentence_df['Attribute term'].isin(att_list)) & \
199
- ((sentence_df['Group term 1'].isin(grp_list)) & (sentence_df['Group term 2'].isin(grp_list))) ].values
200
- if len(sel) > 0:
201
- for gt1,gt2,at,s,a_s in sel:
202
- #if at == "speech-language-pathologist":
203
- # print(f"Special case: {at}")
204
- # at == "speech-language pathologist" # legacy, special case
205
- #else:
206
- #at = at #.replace("-"," ")
207
- #gt = gt #.replace("-"," ")
208
-
209
- test_sentences.append([s,a_s,gt1,gt2,at])
210
- else:
211
- print("Test sentences empty!")
212
- #raise gr.Error(NO_SENTENCES_ERROR)
213
-
214
- return test_sentences
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/utils/video.py DELETED
@@ -1,26 +0,0 @@
1
- from typing import List
2
- import os
3
-
4
- from PIL.Image import Image
5
- import cv2
6
- import numpy as np
7
-
8
-
9
- def save_video(images_list: List[Image], video_path: str):
10
- """Saves a video from a list of images
11
-
12
- Args:
13
- images_list (List[Image]): A list of PIL images.
14
- video_path (str): The path to save to video to.
15
- """
16
- images = [np.array(img) for img in images_list]
17
- height, width, _ = images[0].shape
18
-
19
- fps = len(images) // 20
20
- video = cv2.VideoWriter(video_path, 0, fps, (width, height))
21
-
22
- for img in images:
23
- video.write(cv2.cvtColor(img, cv2.COLOR_RGB2BGR))
24
-
25
- cv2.destroyAllWindows()
26
- video.release()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/core/utils/misc.py DELETED
@@ -1,17 +0,0 @@
1
- def add_prefix(inputs, prefix):
2
- """Add prefix for dict.
3
-
4
- Args:
5
- inputs (dict): The input dict with str keys.
6
- prefix (str): The prefix to add.
7
-
8
- Returns:
9
-
10
- dict: The dict with keys updated with ``prefix``.
11
- """
12
-
13
- outputs = dict()
14
- for name, value in inputs.items():
15
- outputs[f'{prefix}.{name}'] = value
16
-
17
- return outputs
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ash58947/Bot/README.md DELETED
@@ -1,10 +0,0 @@
1
- ---
2
- title: Bot
3
- emoji: 🐠
4
- colorFrom: pink
5
- colorTo: yellow
6
- sdk: docker
7
- pinned: false
8
- ---
9
-
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/util/request.py DELETED
@@ -1,137 +0,0 @@
1
- from __future__ import absolute_import
2
-
3
- from base64 import b64encode
4
-
5
- from ..exceptions import UnrewindableBodyError
6
- from ..packages.six import b, integer_types
7
-
8
- # Pass as a value within ``headers`` to skip
9
- # emitting some HTTP headers that are added automatically.
10
- # The only headers that are supported are ``Accept-Encoding``,
11
- # ``Host``, and ``User-Agent``.
12
- SKIP_HEADER = "@@@SKIP_HEADER@@@"
13
- SKIPPABLE_HEADERS = frozenset(["accept-encoding", "host", "user-agent"])
14
-
15
- ACCEPT_ENCODING = "gzip,deflate"
16
-
17
- _FAILEDTELL = object()
18
-
19
-
20
- def make_headers(
21
- keep_alive=None,
22
- accept_encoding=None,
23
- user_agent=None,
24
- basic_auth=None,
25
- proxy_basic_auth=None,
26
- disable_cache=None,
27
- ):
28
- """
29
- Shortcuts for generating request headers.
30
-
31
- :param keep_alive:
32
- If ``True``, adds 'connection: keep-alive' header.
33
-
34
- :param accept_encoding:
35
- Can be a boolean, list, or string.
36
- ``True`` translates to 'gzip,deflate'.
37
- List will get joined by comma.
38
- String will be used as provided.
39
-
40
- :param user_agent:
41
- String representing the user-agent you want, such as
42
- "python-urllib3/0.6"
43
-
44
- :param basic_auth:
45
- Colon-separated username:password string for 'authorization: basic ...'
46
- auth header.
47
-
48
- :param proxy_basic_auth:
49
- Colon-separated username:password string for 'proxy-authorization: basic ...'
50
- auth header.
51
-
52
- :param disable_cache:
53
- If ``True``, adds 'cache-control: no-cache' header.
54
-
55
- Example::
56
-
57
- >>> make_headers(keep_alive=True, user_agent="Batman/1.0")
58
- {'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
59
- >>> make_headers(accept_encoding=True)
60
- {'accept-encoding': 'gzip,deflate'}
61
- """
62
- headers = {}
63
- if accept_encoding:
64
- if isinstance(accept_encoding, str):
65
- pass
66
- elif isinstance(accept_encoding, list):
67
- accept_encoding = ",".join(accept_encoding)
68
- else:
69
- accept_encoding = ACCEPT_ENCODING
70
- headers["accept-encoding"] = accept_encoding
71
-
72
- if user_agent:
73
- headers["user-agent"] = user_agent
74
-
75
- if keep_alive:
76
- headers["connection"] = "keep-alive"
77
-
78
- if basic_auth:
79
- headers["authorization"] = "Basic " + b64encode(b(basic_auth)).decode("utf-8")
80
-
81
- if proxy_basic_auth:
82
- headers["proxy-authorization"] = "Basic " + b64encode(
83
- b(proxy_basic_auth)
84
- ).decode("utf-8")
85
-
86
- if disable_cache:
87
- headers["cache-control"] = "no-cache"
88
-
89
- return headers
90
-
91
-
92
- def set_file_position(body, pos):
93
- """
94
- If a position is provided, move file to that point.
95
- Otherwise, we'll attempt to record a position for future use.
96
- """
97
- if pos is not None:
98
- rewind_body(body, pos)
99
- elif getattr(body, "tell", None) is not None:
100
- try:
101
- pos = body.tell()
102
- except (IOError, OSError):
103
- # This differentiates from None, allowing us to catch
104
- # a failed `tell()` later when trying to rewind the body.
105
- pos = _FAILEDTELL
106
-
107
- return pos
108
-
109
-
110
- def rewind_body(body, body_pos):
111
- """
112
- Attempt to rewind body to a certain position.
113
- Primarily used for request redirects and retries.
114
-
115
- :param body:
116
- File-like object that supports seek.
117
-
118
- :param int pos:
119
- Position to seek to in file.
120
- """
121
- body_seek = getattr(body, "seek", None)
122
- if body_seek is not None and isinstance(body_pos, integer_types):
123
- try:
124
- body_seek(body_pos)
125
- except (IOError, OSError):
126
- raise UnrewindableBodyError(
127
- "An error occurred when rewinding request body for redirect/retry."
128
- )
129
- elif body_pos is _FAILEDTELL:
130
- raise UnrewindableBodyError(
131
- "Unable to record file position for rewinding "
132
- "request body during a redirect/retry."
133
- )
134
- else:
135
- raise ValueError(
136
- "body_pos must be of type integer, instead it was %s." % type(body_pos)
137
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ayanoaisho/L/Dockerfile DELETED
@@ -1,21 +0,0 @@
1
- FROM node:18-bullseye-slim
2
-
3
- RUN apt-get update && \
4
-
5
- apt-get install -y git
6
-
7
- RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app
8
-
9
- WORKDIR /app
10
-
11
- RUN npm install
12
-
13
- COPY Dockerfile greeting.md* .env* ./
14
-
15
- RUN npm run build
16
-
17
- EXPOSE 7860
18
-
19
- ENV NODE_ENV=production
20
-
21
- CMD [ "npm", "start" ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AzumaSeren100/XuanShen-Bert-VITS2/utils.py DELETED
@@ -1,290 +0,0 @@
1
- import os
2
- import glob
3
- import sys
4
- import argparse
5
- import logging
6
- import json
7
- import subprocess
8
- import numpy as np
9
- from scipy.io.wavfile import read
10
- import torch
11
-
12
- MATPLOTLIB_FLAG = False
13
-
14
- logger = logging.getLogger(__name__)
15
-
16
-
17
- def load_checkpoint(checkpoint_path, model, optimizer=None, skip_optimizer=False):
18
- assert os.path.isfile(checkpoint_path)
19
- checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
20
- iteration = checkpoint_dict['iteration']
21
- learning_rate = checkpoint_dict['learning_rate']
22
- if optimizer is not None and not skip_optimizer and checkpoint_dict['optimizer'] is not None:
23
- optimizer.load_state_dict(checkpoint_dict['optimizer'])
24
- elif optimizer is None and not skip_optimizer:
25
- #else: Disable this line if Infer and resume checkpoint,then enable the line upper
26
- new_opt_dict = optimizer.state_dict()
27
- new_opt_dict_params = new_opt_dict['param_groups'][0]['params']
28
- new_opt_dict['param_groups'] = checkpoint_dict['optimizer']['param_groups']
29
- new_opt_dict['param_groups'][0]['params'] = new_opt_dict_params
30
- optimizer.load_state_dict(new_opt_dict)
31
- saved_state_dict = checkpoint_dict['model']
32
- if hasattr(model, 'module'):
33
- state_dict = model.module.state_dict()
34
- else:
35
- state_dict = model.state_dict()
36
- new_state_dict = {}
37
- for k, v in state_dict.items():
38
- try:
39
- #assert "emb_g" not in k
40
- # print("load", k)
41
- new_state_dict[k] = saved_state_dict[k]
42
- assert saved_state_dict[k].shape == v.shape, (saved_state_dict[k].shape, v.shape)
43
- except:
44
- logger.error("%s is not in the checkpoint" % k)
45
- new_state_dict[k] = v
46
- if hasattr(model, 'module'):
47
- model.module.load_state_dict(new_state_dict, strict=False)
48
- else:
49
- model.load_state_dict(new_state_dict, strict=False)
50
- logger.info("Loaded checkpoint '{}' (iteration {})".format(
51
- checkpoint_path, iteration))
52
- return model, optimizer, learning_rate, iteration
53
-
54
-
55
- def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path):
56
- logger.info("Saving model and optimizer state at iteration {} to {}".format(
57
- iteration, checkpoint_path))
58
- if hasattr(model, 'module'):
59
- state_dict = model.module.state_dict()
60
- else:
61
- state_dict = model.state_dict()
62
- torch.save({'model': state_dict,
63
- 'iteration': iteration,
64
- 'optimizer': optimizer.state_dict(),
65
- 'learning_rate': learning_rate}, checkpoint_path)
66
-
67
-
68
- def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050):
69
- for k, v in scalars.items():
70
- writer.add_scalar(k, v, global_step)
71
- for k, v in histograms.items():
72
- writer.add_histogram(k, v, global_step)
73
- for k, v in images.items():
74
- writer.add_image(k, v, global_step, dataformats='HWC')
75
- for k, v in audios.items():
76
- writer.add_audio(k, v, global_step, audio_sampling_rate)
77
-
78
-
79
- def latest_checkpoint_path(dir_path, regex="G_*.pth"):
80
- f_list = glob.glob(os.path.join(dir_path, regex))
81
- f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f))))
82
- x = f_list[-1]
83
- print(x)
84
- return x
85
-
86
-
87
- def plot_spectrogram_to_numpy(spectrogram):
88
- global MATPLOTLIB_FLAG
89
- if not MATPLOTLIB_FLAG:
90
- import matplotlib
91
- matplotlib.use("Agg")
92
- MATPLOTLIB_FLAG = True
93
- mpl_logger = logging.getLogger('matplotlib')
94
- mpl_logger.setLevel(logging.WARNING)
95
- import matplotlib.pylab as plt
96
- import numpy as np
97
-
98
- fig, ax = plt.subplots(figsize=(10, 2))
99
- im = ax.imshow(spectrogram, aspect="auto", origin="lower",
100
- interpolation='none')
101
- plt.colorbar(im, ax=ax)
102
- plt.xlabel("Frames")
103
- plt.ylabel("Channels")
104
- plt.tight_layout()
105
-
106
- fig.canvas.draw()
107
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
108
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
109
- plt.close()
110
- return data
111
-
112
-
113
- def plot_alignment_to_numpy(alignment, info=None):
114
- global MATPLOTLIB_FLAG
115
- if not MATPLOTLIB_FLAG:
116
- import matplotlib
117
- matplotlib.use("Agg")
118
- MATPLOTLIB_FLAG = True
119
- mpl_logger = logging.getLogger('matplotlib')
120
- mpl_logger.setLevel(logging.WARNING)
121
- import matplotlib.pylab as plt
122
- import numpy as np
123
-
124
- fig, ax = plt.subplots(figsize=(6, 4))
125
- im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower',
126
- interpolation='none')
127
- fig.colorbar(im, ax=ax)
128
- xlabel = 'Decoder timestep'
129
- if info is not None:
130
- xlabel += '\n\n' + info
131
- plt.xlabel(xlabel)
132
- plt.ylabel('Encoder timestep')
133
- plt.tight_layout()
134
-
135
- fig.canvas.draw()
136
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
137
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
138
- plt.close()
139
- return data
140
-
141
-
142
- def load_wav_to_torch(full_path):
143
- sampling_rate, data = read(full_path)
144
- return torch.FloatTensor(data.astype(np.float32)), sampling_rate
145
-
146
-
147
- def load_filepaths_and_text(filename, split="|"):
148
- with open(filename, encoding='utf-8') as f:
149
- filepaths_and_text = [line.strip().split(split) for line in f]
150
- return filepaths_and_text
151
-
152
-
153
- def get_hparams(init=True):
154
- parser = argparse.ArgumentParser()
155
- parser.add_argument('-c', '--config', type=str, default="./configs/base.json",
156
- help='JSON file for configuration')
157
- parser.add_argument('-m', '--model', type=str, required=True,
158
- help='Model name')
159
-
160
- args = parser.parse_args()
161
- # model_dir = os.path.join("./logs", args.model)
162
- model_dir = "./logs/" + args.model
163
-
164
- if not os.path.exists(model_dir):
165
- os.makedirs(model_dir)
166
-
167
- config_path = args.config
168
- config_save_path = os.path.join(model_dir, "config.json")
169
- if init:
170
- with open(config_path, "r" ,encoding='utf-8') as f:
171
- data = f.read()
172
- with open(config_save_path, "w" ,encoding='utf-8') as f:
173
- f.write(data)
174
- else:
175
- with open(config_save_path, "r" ,encoding='utf-8') as f:
176
- data = f.read()
177
- config = json.loads(data)
178
-
179
- hparams = HParams(**config)
180
- hparams.model_dir = model_dir
181
- return hparams
182
-
183
-
184
- def clean_checkpoints(path_to_models='logs/44k/', n_ckpts_to_keep=2, sort_by_time=True):
185
- """Freeing up space by deleting saved ckpts
186
-
187
- Arguments:
188
- path_to_models -- Path to the model directory
189
- n_ckpts_to_keep -- Number of ckpts to keep, excluding G_0.pth and D_0.pth
190
- sort_by_time -- True -> chronologically delete ckpts
191
- False -> lexicographically delete ckpts
192
- """
193
- import re
194
- ckpts_files = [f for f in os.listdir(path_to_models) if os.path.isfile(os.path.join(path_to_models, f))]
195
- name_key = (lambda _f: int(re.compile('._(\d+)\.pth').match(_f).group(1)))
196
- time_key = (lambda _f: os.path.getmtime(os.path.join(path_to_models, _f)))
197
- sort_key = time_key if sort_by_time else name_key
198
- x_sorted = lambda _x: sorted([f for f in ckpts_files if f.startswith(_x) and not f.endswith('_0.pth')],
199
- key=sort_key)
200
- to_del = [os.path.join(path_to_models, fn) for fn in
201
- (x_sorted('G')[:-n_ckpts_to_keep] + x_sorted('D')[:-n_ckpts_to_keep])]
202
- del_info = lambda fn: logger.info(f".. Free up space by deleting ckpt {fn}")
203
- del_routine = lambda x: [os.remove(x), del_info(x)]
204
- rs = [del_routine(fn) for fn in to_del]
205
-
206
- def get_hparams_from_dir(model_dir):
207
- config_save_path = os.path.join(model_dir, "config.json")
208
- with open(config_save_path, "r", encoding='utf-8') as f:
209
- data = f.read()
210
- config = json.loads(data)
211
-
212
- hparams = HParams(**config)
213
- hparams.model_dir = model_dir
214
- return hparams
215
-
216
-
217
- def get_hparams_from_file(config_path):
218
- with open(config_path, "r", encoding='utf-8') as f:
219
- data = f.read()
220
- config = json.loads(data)
221
-
222
- hparams = HParams(**config)
223
- return hparams
224
-
225
-
226
- def check_git_hash(model_dir):
227
- source_dir = os.path.dirname(os.path.realpath(__file__))
228
- if not os.path.exists(os.path.join(source_dir, ".git")):
229
- logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format(
230
- source_dir
231
- ))
232
- return
233
-
234
- cur_hash = subprocess.getoutput("git rev-parse HEAD")
235
-
236
- path = os.path.join(model_dir, "githash")
237
- if os.path.exists(path):
238
- saved_hash = open(path).read()
239
- if saved_hash != cur_hash:
240
- logger.warn("git hash values are different. {}(saved) != {}(current)".format(
241
- saved_hash[:8], cur_hash[:8]))
242
- else:
243
- open(path, "w").write(cur_hash)
244
-
245
-
246
- def get_logger(model_dir, filename="train.log"):
247
- global logger
248
- logger = logging.getLogger(os.path.basename(model_dir))
249
- logger.setLevel(logging.DEBUG)
250
-
251
- formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s")
252
- if not os.path.exists(model_dir):
253
- os.makedirs(model_dir)
254
- h = logging.FileHandler(os.path.join(model_dir, filename))
255
- h.setLevel(logging.DEBUG)
256
- h.setFormatter(formatter)
257
- logger.addHandler(h)
258
- return logger
259
-
260
-
261
- class HParams():
262
- def __init__(self, **kwargs):
263
- for k, v in kwargs.items():
264
- if type(v) == dict:
265
- v = HParams(**v)
266
- self[k] = v
267
-
268
- def keys(self):
269
- return self.__dict__.keys()
270
-
271
- def items(self):
272
- return self.__dict__.items()
273
-
274
- def values(self):
275
- return self.__dict__.values()
276
-
277
- def __len__(self):
278
- return len(self.__dict__)
279
-
280
- def __getitem__(self, key):
281
- return getattr(self, key)
282
-
283
- def __setitem__(self, key, value):
284
- return setattr(self, key, value)
285
-
286
- def __contains__(self, key):
287
- return key in self.__dict__
288
-
289
- def __repr__(self):
290
- return self.__dict__.__repr__()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Banbri/zcvzcv/src/lib/sleep.ts DELETED
@@ -1,6 +0,0 @@
1
- export const sleep = async (durationInMs: number) =>
2
- new Promise((resolve) => {
3
- setTimeout(() => {
4
- resolve(true)
5
- }, durationInMs)
6
- })
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Gratis Juegos De Matemticas Para Pc.md DELETED
@@ -1,168 +0,0 @@
1
-
2
- <h1>Descargar PPh 21 Aplikasi: Una guía para los contribuyentes indonesios</h1>
3
- <p>Si usted es un empleado o un empleador en Indonesia, necesita saber acerca de PPh 21, que es el impuesto sobre la renta sobre los salarios, salarios y otros pagos relacionados con el trabajo o los servicios. Pagar PPh 21 no es solo una obligación legal, sino también una forma de contribuir al desarrollo del país. Sin embargo, calcular y reportar PPh 21 puede ser complicado y consumir mucho tiempo, especialmente si lo haces manualmente. Es por eso que usted debe considerar el uso de PPh 21 aplikasi, que es un software que puede ayudarle con el proceso. En este artículo, explicaremos qué es PPh 21, cómo calcularlo manualmente y cómo usar PPh 21 aplikasi para hacer tu vida más fácil. </p>
4
- <h2>¿Qué es PPh 21 y por qué es importante? </h2>
5
- <h3>Definición y alcance del PPh 21</h3>
6
- <p>PPh 21 significa Pajak Penghasilan Pasal 21, que significa artículo 21 del impuesto sobre la renta. Es un impuesto que se aplica a los ingresos en forma de salarios, salarios, honorarios, subsidios, bonos, comisiones, pensiones, indemnización por despido y otros pagos por nombre y en cualquier forma relacionada con el trabajo o posición, servicios, y actividades realizadas por individuos que son sujetos de impuestos nacionales o referidos como contribuyentes. </p>
7
- <h2>descargar gratis juegos de matemáticas para pc</h2><br /><p><b><b>Download Zip</b> &#9881; <a href="https://bltlly.com/2v6KWK">https://bltlly.com/2v6KWK</a></b></p><br /><br />
8
- <p>PPh 21 se aplica tanto a los empleados como a los empleadores en Indonesia. Los empleados son aquellos que reciben o ganan ingresos del trabajo o los servicios realizados por un empleador. Los empleadores son los que pagan o proporcionan ingresos a los empleados u otros receptores de ingresos. Los empleadores pueden ser individuos, empresas, agencias gubernamentales, empresas estatales u otras entidades. </p>
9
- <p>Los empleadores son responsables de retener, pagar y reportar PPh 21 en nombre de sus empleados u otros receptores de ingresos. Los empleados u otros perceptores de ingresos también están obligados a informar su declaración anual del impuesto sobre la renta (SPT) y pagar cualquier impuesto adicional si sus ingresos exceden el umbral. </p>
10
- <h3>Beneficios de pagar PPh 21</h3>
11
-
12
- <p>Al pagar PPh 21 correctamente y a tiempo, también puede evitar multas y cargos de interés que puedan surgir de un pago atrasado o insuficiente. También puede reclamar créditos fiscales o reembolsos si ha pagado en exceso sus impuestos o tiene pagos de impuestos en exceso de años anteriores. </p>
13
- <h3>Sanciones por incumplimiento</h3>
14
- <p>Si no cumple con sus obligaciones PPh 21, puede enfrentar multas y cargos por intereses de las autoridades fiscales. Las penalidades y cargos por intereses varían dependiendo del tipo y severidad de la violación. Algunos ejemplos de penalidades y cargos por intereses son:</p>
15
- <ul>
16
- <li>Una multa del 2% al mes por pago atrasado de impuestos, hasta un máximo del 48%. </li>
17
- <li>Multa de 100.000 rupias por presentación tardía del SPT.</li>
18
- <li>Una multa del 15% del monto del impuesto pagado por SPT incorrecto o incompleto.</li>
19
- <li>Una multa del 100% del monto del impuesto pagado por SPT fraudulento o intencional.</li>
20
- <li>Una multa del 20% del impuesto debido por falta de retención o recaudación de impuestos. </li>
21
- <li>Una multa del 5% del impuesto debido por falta de pago o depósito de impuestos. </li>
22
- <li>Una multa del 2% al mes por pago tardío o depósito de impuestos, hasta un máximo del 24%. </li>
23
- </ul>
24
- <p>Por lo tanto, es importante cumplir con sus obligaciones PPh 21 y evitar cualquier penalización y cargos por intereses que puedan afectar su situación financiera y reputación. </p>
25
- <h2>Cómo calcular PPh 21 manualmente? </h2>
26
- <h3>Componentes del ingreso bruto</h3>
27
- <p>Para calcular PPh 21 manualmente, necesita conocer los componentes de su ingreso bruto. El ingreso bruto es la cantidad total de ingresos que usted recibe o gana de su trabajo o servicios antes de cualquier deducción o impuesto. El ingreso bruto consiste en:</p>
28
- <p></p>
29
- <ul>
30
- <li>Ingreso regular: Este es el ingreso que usted recibe o gana regularmente, como salario mensual, salarios, honorarios, subsidios, bonos, comisiones, etc.</li>
31
-
32
- <li>Beneficios en especie: Estos son los ingresos que usted recibe o gana en forma de bienes o servicios proporcionados por su empleador, como vivienda, vehículo, seguro de salud, educación, etc.</li>
33
- </ul>
34
- <p>Necesitas sumar todos estos componentes para obtener tu ingreso bruto por cada mes y por todo el año. </p>
35
- <h3>Ingresos no imponibles (PTKP)</h3>
36
- <p>No todos tus ingresos brutos están sujetos al PPh 21. Usted puede deducir una cierta cantidad de su ingreso bruto que se considera como ingreso no gravable o Penghasilan Tidak Kena Pajak (PTKP). PTKP es una deducción estándar basada en su estado civil y número de dependientes. Las tasas actuales de PTKP son:</p>
37
- <tabla>
38
- <tr><th>Estado</th><th>PTKP por año (Rp)</th></tr>
39
- <tr><td>Single</td><td>54,000,000</td></tr>
40
- <tr><td>Casado</td><td>58,500,000</td></tr>
41
- <tr><td>Casado con un dependiente</td><td>63,000,000</td></tr>
42
- <tr><td>Casado con dos dependientes</td><td>67,500,000</td></tr>
43
- <tr><td>Casado con tres dependientes</td><td>72,000,000</td></tr>
44
- </tabla>
45
- <p>Puede deducir la cantidad de PTKP de su ingreso bruto anual para obtener su ingreso neto. También puede dividir la cantidad de PTKP por 12 para obtener la cantidad mensual de PTKP y deducirla de su ingreso bruto mensual. </p>
46
- <h3>Ingresos imponibles (PKP)</h3>
47
- <p>Su ingreso imponible o Penghasilan Kena Pajak (PKP) es la cantidad de su ingreso neto que está sujeto a PPh 21. Puede calcular su PKP restando su PTKP de su ingreso neto. Si su ingreso neto es menor o igual a su PTKP, entonces su PKP es cero y no tiene que pagar ningún PPh 21. Sin embargo, si su ingreso neto es más que su PTKP, entonces usted tiene que pagar PPh 21 de acuerdo con las tasas progresivas de impuestos. </p>
48
- <h3>Tasas impositivas progresivas</h3>
49
- <p>PPh 21 sigue un sistema tributario progresivo, lo que significa que cuanto mayor sea su PKP, mayor será la tasa impositiva que se aplica a usted. Los tipos impositivos progresivos actuales son:</p>
50
- <tabla>
51
- <tr><th>PKP por año (Rp)</th><th>Tipo impositivo (%)</th></tr>
52
-
53
- <tr><td>Por encima de 50,000,000 hasta 250,000,000</td><td>15</td></tr>
54
- <tr><td>Por encima de 250,000,000 hasta 500,000</td><td>25</td></tr>
55
- <tr><td>Por encima de 500,000,000</td><td>30</td></tr>
56
- </tabla>
57
- <p>Para calcular su PPh 21 usando las tasas progresivas de impuestos, debe aplicar la tasa de impuestos para cada tramo de su PKP y sumarlos. Por ejemplo, si su PKP es Rp300 millones, entonces su PPh 21 se calcula de la siguiente manera:</p>
58
- <tabla>
59
- <tr><th>PKP por año (Rp)</th><th>Tipo impositivo (%)</th><th>Monto impositivo (Rp)</th></tr>
60
- <tr><td>50,000,000</td><td>5</td><td>>2,500,000</td></tr>
61
- <tr><td>200,000,000</td><td>15</td><td><td>30,000,000</td></tr>
62
- <tr><td>50,000,000</td><td>25</td><td>>12,500,000</td></tr>
63
- <tr><td>Total</td><td>-</td><td><td>45,000,000</td></tr>
64
- </tabla>
65
- <p>También puede dividir su PPh 21 por 12 para obtener la cantidad mensual de PPh 21 que tiene que pagar o retener. </p>
66
- <h3>Ejemplo de cálculo</h3>
67
- <p>Para ilustrar cómo calcular PPh 21 manualmente, tomemos un ejemplo de un empleado que tiene los siguientes ingresos y deducciones:</p>
68
- <tabla>
69
- <tr><th>ítem</th><th>Cantidad por mes (Rp)</th></tr>
70
- <tr><td>Salario</td><td>10,000,000</td></tr>
71
- <tr><td>Bonus</td><td>1,000,000</td></tr>
72
- <tr><td>Asignación de vivienda</td><td>2,000,000</td></tr>
73
- <tr><td>Prima del seguro de salud (pagada por el empleador)</td><td>500,000</td></tr>
74
- <tr><td>Contribución de pensión (pagada por el empleado)</td><td>(500,000)</td></tr>
75
- <tr><td>Ingreso bruto total</td><td>13,000,000</td></tr>
76
- <tr><td>PTKP (single)</td><td>(4,500,000)</td></tr>
77
- <tr><td>Ingresos imponibles (PKP)</td><td>8,500,000</td></tr>
78
- </tabla>
79
- <p>El ingreso bruto anual del empleado es Rp156,000,000 (13,000 x 12). El PTKP anual del empleado es Rp54,000,000 (4,500,000 x 12). El PKP anual del empleado es 102 Rp102 ,000,000 (156,000,000 - 54,000,000). El PPh anual 21 del empleado se calcula de la siguiente manera:</p>
80
- <tabla>
81
- <tr><th>PKP por año (Rp)</th><th>Tipo impositivo (%)</th><th>Monto impositivo (Rp)</th></tr>
82
-
83
- <tr><td>52,000,000</td><td>15</td><td>>7,800,000</td></tr>
84
- <tr><td>Total</td><td>-</td><td><td>10,300,000</td></tr>
85
- </tabla>
86
- <p>El PPh mensual 21 del empleado es Rp858,333 (10,300,000 / 12). El empleador tiene que retener y pagar esta cantidad a las autoridades fiscales en nombre del empleado. </p>
87
- <h2>Cómo usar PPh 21 aplikasi? </h2>
88
- <h3>¿Qué es PPh 21 aplikasi y dónde conseguirlo? </h3>
89
- <p>PPh 21 aplikasi es un software que puede ayudarle a calcular y reportar PPh 21 de forma fácil y precisa. Es desarrollado por la Dirección General de Impuestos (DGT) de Indonesia y se puede descargar de forma gratuita desde su sitio web oficial. PPh 21 aplikasi es compatible con sistemas operativos Windows y requiere un mínimo de 512 MB de RAM y 100 MB de espacio libre en disco. </p>
90
- <p>PPh 21 aplikasi puede ser utilizado por empleados y empleadores en Indonesia. Los empleados pueden usarlo para calcular su propio PPh 21 y preparar su SPT. Los empleadores pueden utilizarlo para calcular el PPh 21 de sus empleados u otros receptores de ingresos y generar las hojas de retención de impuestos (bukti potong) y las hojas de pago de impuestos (SSP). </p>
91
- <h3>Características y ventajas de PPh 21 aplikasi</h3>
92
- <p>PPh 21 aplikasi tiene muchas características y ventajas que pueden hacer que su PPh 21 cálculo y presentación de informes más fácil y más rápido. Algunas de las características y ventajas son:</p>
93
- <ul>
94
- <li>Puede calcular PPh 21 para varios tipos de ingresos y deducciones, tales como ingresos regulares, ingresos irregulares, beneficios en especie, contribución a la pensión, etc.</li>
95
- <li>Puede aplicar las últimas tasas impositivas y las tasas PTKP automáticamente. </li>
96
- <li>Puede manejar múltiples fuentes de ingresos y múltiples períodos impositivos. </li>
97
- <li>Puede generar varios informes y formas, como SPT, bukti potong, SSP, etc.</li>
98
- <li>Puede exportar los datos a formatos Excel o PDF. </li>
99
- <li> Puede importar los datos de otras fuentes, como e-SPT o e-Filing.</li>
100
- <li>Puede actualizar los datos en línea desde el sitio web de la DGT. </li>
101
- <li> Tiene una interfaz fácil de usar y un menú de ayuda. </li>
102
- </ul>
103
-
104
- <p>Para instalar y usar PPh 21 aplikasi, debe seguir estos pasos:</p>
105
- <ol>
106
- <li>Descargue el archivo aplikasi PPh 21 desde el sitio web de la DGT. Elija la versión que coincida con su sistema operativo. </li>
107
- <li>Extraiga el archivo a una carpeta en su computadora. Verá un archivo llamado setup.exe. </li>
108
- <li>Ejecute el archivo setup.exe y siga las instrucciones en la pantalla. Deberá aceptar los términos y condiciones y elegir una carpeta de destino para la instalación. </li>
109
- <li>Después de que la instalación se haya completado, verá un icono de acceso directo para PPh 21 aplikasi en su escritorio. Haga doble clic en él para iniciar el software. </li>
110
- <li>Tendrá que registrar su software con su nombre, dirección de correo electrónico, número de teléfono y número de identificación fiscal (NPWP). También necesitará crear una contraseña para su cuenta. </li>
111
- <li>Verá un menú principal con varias opciones, como Entrada de datos, Cálculo, Informe, Importación/Exportación, Actualización de datos en línea, etc. Elija la opción que se adapte a sus necesidades y siga las instrucciones en la pantalla. </li>
112
- <li> También puede acceder al menú de ayuda si necesita alguna orientación o asistencia con el uso del software. </li>
113
- </ol>
114
- <h3>Cómo informar y enviar PPh 21 en línea</h3>
115
- <p>Si desea reportar y enviar su PPh 21 en línea, puede utilizar el servicio de e-Filing proporcionado por la DGT. e-Filing es un sistema que le permite enviar su SPT electrónicamente a través de Internet. Para usar e-Filing, debe seguir estos pasos:</p>
116
- <ol>
117
- <li>Cree una cuenta en el sitio web de e-Filing usando su NPWP y dirección de correo electrónico. Recibirá un código de verificación por correo electrónico que debe ingresar en el sitio web para activar su cuenta. </li>
118
- <li>Inicie sesión en su cuenta y elija el tipo de SPT que desea enviar. Puede elegir entre SPT 1770, SPT 1770S o SPT 1770SS, dependiendo de sus ingresos y estado fiscal. </li>
119
-
120
- <li>Revise y verifique sus datos antes de enviarlos. Verá un resumen de su SPT y la cantidad de impuestos adeudados o reembolsables. </li>
121
- <li>Envíe su SPT e imprima o guarde la página de confirmación. También recibirá una confirmación por correo electrónico con un número de recibo y un código de barras. </li>
122
- <li>Si tiene algún impuesto adeudado, debe pagarlo usando el SSP que puede generar desde el sitio web de e-Filing. Puede pagar en línea utilizando varios métodos, como banca por Internet, cajeros automáticos, banca móvil, etc. Debe ingresar el número de recibo y el código de barras en la SSP al realizar el pago. </li>
123
- <li>Si tiene algún reembolso de impuestos, debe esperar la verificación y aprobación de la DGT. Recibirá una notificación por correo electrónico cuando su reembolso sea procesado y transferido a su cuenta bancaria. </li>
124
- </ol>
125
- <h2>Conclusión y preguntas frecuentes</h2>
126
- <h3>Resumen de los puntos principales</h3>
127
- <p>PPh 21 es el impuesto sobre los salarios, salarios y otros pagos relacionados con el trabajo o los servicios en Indonesia. Es importante pagar el PPh 21 correctamente y a tiempo para evitar penalizaciones e intereses y apoyar el desarrollo del país. Puede calcular PPh 21 manualmente utilizando los componentes de ingreso bruto, ingreso no imponible (PTKP), ingreso imponible (PKP) y tasas impositivas progresivas. Sin embargo, calcular PPh 21 manualmente puede ser complicado y consumir mucho tiempo, especialmente si tiene múltiples fuentes de ingresos y períodos impositivos. Es por eso que usted debe utilizar PPh 21 aplikasi, que es un software que puede ayudarle a calcular y reportar PPh 21 fácilmente y con precisión. También puede utilizar el servicio de e-Filing para enviar su SPT en línea y pagar o recibir sus impuestos adeudados o reembolsables electrónicamente. </p>
128
- <h3>Preguntas frecuentes</h3>
129
- <p>Aquí hay algunas preguntas frecuentes sobre PPh 21 y PPh 21 aplikasi:</p>
130
- <ul>
131
- <li><b>P: ¿Cómo sé si soy un sujeto de impuestos nacionales o extranjeros? </b></li>
132
- <li>A: Usted es un sujeto de impuestos nacionales si cumple con uno de estos criterios: <ul>
133
-
134
- <li>Usted es un ciudadano indonesio que está en el extranjero para tareas oficiales o fines educativos y todavía tiene ingresos de Indonesia.</li>
135
- <li>Usted es un ciudadano extranjero que reside en Indonesia o está presente en Indonesia durante más de 183 días dentro de cualquier período de 12 meses. </li>
136
- </ul>
137
- Usted es un sujeto de impuestos extranjeros si no cumple con ninguno de estos criterios. </li>
138
- <li><b>P: ¿Cómo sé si tengo que informar de mi declaración anual del impuesto sobre la renta (SPT)? </b></li>
139
- <li>A: Usted tiene que reportar su declaración anual del impuesto sobre la renta (SPT) si usted cumple con uno de estos criterios: <ul>
140
- <li>Su ingreso bruto anual excede su PTKP.</li>
141
- <li>Tienes más de un empleador o fuente de ingresos. </li>
142
- <li>Tienes ingresos del extranjero. </li>
143
- <li> Tiene ingresos que no están sujetos a retención de impuestos o impuestos finales. </li>
144
- <li>Has pagado impuestos en exceso o pagos de impuestos en exceso de años anteriores. </li>
145
- </ul>
146
- Usted no tiene que reportar su declaración anual de impuestos sobre la renta (SPT) si no cumple con ninguno de estos criterios. </li>
147
- <li><b>Q: ¿Cuándo es la fecha límite para informar y pagar PPh 21? </b></li>
148
- <li>A: La fecha límite para reportar y pagar PPh 21 depende del tipo y frecuencia de sus ingresos: <ul>
149
- <li>Si usted tiene ingresos regulares, tales como salario mensual, salarios, subsidios, etc., usted tiene que reportar y pagar PPh 21 sobre una base mensual. La fecha límite es el décimo día del mes siguiente. </li>
150
- <li>Si tienes ingresos irregulares, como un bono anual, indemnización por despido, pensión, etc., tienes que reportar y pagar PPh 21 en un evento. La fecha límite es el final del mes siguiente después de que ocurra el evento. </li>
151
- <li>Si usted tiene beneficios en especie, tales como vivienda, vehículo, seguro de salud, etc., usted tiene que informar y pagar PPh 21 sobre una base anual. La fecha límite es finales de marzo del año siguiente. </li>
152
- <li>Si tiene que informar de su declaración anual del impuesto sobre la renta (SPT), el plazo es el final de marzo del año siguiente. </li>
153
- </ul>
154
-
155
- <li><b>Q: ¿Cómo puedo actualizar el PPh 21 a la última versión? </b></li>
156
- <li>A: Puede actualizar PPh 21 aplikasi a la última versión utilizando la función Actualizar datos en línea en el menú principal. Necesita tener una conexión a Internet e iniciar sesión en su cuenta. Verá una notificación si hay una nueva versión disponible. Puede descargar e instalar la nueva versión siguiendo las instrucciones en la pantalla. </li>
157
- <li><b>Q: ¿Cómo puedo contactar a la DGT si tengo alguna pregunta o problema con PPh 21 o PPh 21 aplikasi? </b></li>
158
- <li>A: Puede ponerse en contacto con la DGT utilizando uno de estos métodos: <ul>
159
- <li>Centro de llamadas: 1500 200 (de lunes a viernes, 08.00-16.00 WIB)</li>
160
- <li>Correo electrónico: [email protected]</li>
161
- <li>Sitio web: www.pajak.go.id</li>
162
- <li>Redes sociales: Facebook, Twitter, Instagram, YouTube (@DitjenPajakRI)</li>
163
- </ul>
164
- También puede visitar la oficina de impuestos o el centro de servicio de impuestos más cercano en su área. </li>
165
- </ul>
166
- <p>Espero que este artículo haya sido útil e informativo para usted. Si tiene algún comentario o sugerencia, por favor hágamelo saber. Gracias por leer y tener un buen día! </p> 64aa2da5cf<br />
167
- <br />
168
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/boto3/compat.py DELETED
@@ -1,82 +0,0 @@
1
- # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License"). You
4
- # may not use this file except in compliance with the License. A copy of
5
- # the License is located at
6
- #
7
- # https://aws.amazon.com/apache2.0/
8
- #
9
- # or in the "license" file accompanying this file. This file is
10
- # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11
- # ANY KIND, either express or implied. See the License for the specific
12
- # language governing permissions and limitations under the License.
13
- import sys
14
- import os
15
- import errno
16
- import socket
17
- import warnings
18
-
19
- from boto3.exceptions import PythonDeprecationWarning
20
-
21
- # In python3, socket.error is OSError, which is too general
22
- # for what we want (i.e FileNotFoundError is a subclass of OSError).
23
- # In py3 all the socket related errors are in a newly created
24
- # ConnectionError
25
- SOCKET_ERROR = ConnectionError
26
-
27
- import collections.abc as collections_abc
28
-
29
-
30
- if sys.platform.startswith('win'):
31
- def rename_file(current_filename, new_filename):
32
- try:
33
- os.remove(new_filename)
34
- except OSError as e:
35
- if not e.errno == errno.ENOENT:
36
- # We only want to a ignore trying to remove
37
- # a file that does not exist. If it fails
38
- # for any other reason we should be propagating
39
- # that exception.
40
- raise
41
- os.rename(current_filename, new_filename)
42
- else:
43
- rename_file = os.rename
44
-
45
-
46
- def filter_python_deprecation_warnings():
47
- """
48
- Invoking this filter acknowledges your runtime will soon be deprecated
49
- at which time you will stop receiving all updates to your client.
50
- """
51
- warnings.filterwarnings(
52
- 'ignore',
53
- message=".*Boto3 will no longer support Python.*",
54
- category=PythonDeprecationWarning,
55
- module=r".*boto3\.compat"
56
- )
57
-
58
-
59
- def _warn_deprecated_python():
60
- """Use this template for future deprecation campaigns as needed."""
61
- py_36_params = {
62
- 'date': 'May 30, 2022',
63
- 'blog_link': (
64
- 'https://aws.amazon.com/blogs/developer/'
65
- 'python-support-policy-updates-for-aws-sdks-and-tools/'
66
- )
67
- }
68
- deprecated_versions = {
69
- # Example template for future deprecations
70
- # (3, 6): py_36_params,
71
- }
72
- py_version = sys.version_info[:2]
73
-
74
- if py_version in deprecated_versions:
75
- params = deprecated_versions[py_version]
76
- warning = (
77
- "Boto3 will no longer support Python {}.{} "
78
- "starting {}. To continue receiving service updates, "
79
- "bug fixes, and security updates please upgrade to Python 3.7 or "
80
- "later. More information can be found here: {}"
81
- ).format(py_version[0], py_version[1], params['date'], params['blog_link'])
82
- warnings.warn(warning, PythonDeprecationWarning)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/dateutil/_common.py DELETED
@@ -1,43 +0,0 @@
1
- """
2
- Common code used in multiple modules.
3
- """
4
-
5
-
6
- class weekday(object):
7
- __slots__ = ["weekday", "n"]
8
-
9
- def __init__(self, weekday, n=None):
10
- self.weekday = weekday
11
- self.n = n
12
-
13
- def __call__(self, n):
14
- if n == self.n:
15
- return self
16
- else:
17
- return self.__class__(self.weekday, n)
18
-
19
- def __eq__(self, other):
20
- try:
21
- if self.weekday != other.weekday or self.n != other.n:
22
- return False
23
- except AttributeError:
24
- return False
25
- return True
26
-
27
- def __hash__(self):
28
- return hash((
29
- self.weekday,
30
- self.n,
31
- ))
32
-
33
- def __ne__(self, other):
34
- return not (self == other)
35
-
36
- def __repr__(self):
37
- s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday]
38
- if not self.n:
39
- return s
40
- else:
41
- return "%s(%+d)" % (s, self.n)
42
-
43
- # vim:ts=4:sw=4:et
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/configuration.py DELETED
@@ -1,374 +0,0 @@
1
- """Configuration management setup
2
-
3
- Some terminology:
4
- - name
5
- As written in config files.
6
- - value
7
- Value associated with a name
8
- - key
9
- Name combined with it's section (section.name)
10
- - variant
11
- A single word describing where the configuration key-value pair came from
12
- """
13
-
14
- import configparser
15
- import locale
16
- import os
17
- import sys
18
- from typing import Any, Dict, Iterable, List, NewType, Optional, Tuple
19
-
20
- from pip._internal.exceptions import (
21
- ConfigurationError,
22
- ConfigurationFileCouldNotBeLoaded,
23
- )
24
- from pip._internal.utils import appdirs
25
- from pip._internal.utils.compat import WINDOWS
26
- from pip._internal.utils.logging import getLogger
27
- from pip._internal.utils.misc import ensure_dir, enum
28
-
29
- RawConfigParser = configparser.RawConfigParser # Shorthand
30
- Kind = NewType("Kind", str)
31
-
32
- CONFIG_BASENAME = "pip.ini" if WINDOWS else "pip.conf"
33
- ENV_NAMES_IGNORED = "version", "help"
34
-
35
- # The kinds of configurations there are.
36
- kinds = enum(
37
- USER="user", # User Specific
38
- GLOBAL="global", # System Wide
39
- SITE="site", # [Virtual] Environment Specific
40
- ENV="env", # from PIP_CONFIG_FILE
41
- ENV_VAR="env-var", # from Environment Variables
42
- )
43
- OVERRIDE_ORDER = kinds.GLOBAL, kinds.USER, kinds.SITE, kinds.ENV, kinds.ENV_VAR
44
- VALID_LOAD_ONLY = kinds.USER, kinds.GLOBAL, kinds.SITE
45
-
46
- logger = getLogger(__name__)
47
-
48
-
49
- # NOTE: Maybe use the optionx attribute to normalize keynames.
50
- def _normalize_name(name: str) -> str:
51
- """Make a name consistent regardless of source (environment or file)"""
52
- name = name.lower().replace("_", "-")
53
- if name.startswith("--"):
54
- name = name[2:] # only prefer long opts
55
- return name
56
-
57
-
58
- def _disassemble_key(name: str) -> List[str]:
59
- if "." not in name:
60
- error_message = (
61
- "Key does not contain dot separated section and key. "
62
- "Perhaps you wanted to use 'global.{}' instead?"
63
- ).format(name)
64
- raise ConfigurationError(error_message)
65
- return name.split(".", 1)
66
-
67
-
68
- def get_configuration_files() -> Dict[Kind, List[str]]:
69
- global_config_files = [
70
- os.path.join(path, CONFIG_BASENAME) for path in appdirs.site_config_dirs("pip")
71
- ]
72
-
73
- site_config_file = os.path.join(sys.prefix, CONFIG_BASENAME)
74
- legacy_config_file = os.path.join(
75
- os.path.expanduser("~"),
76
- "pip" if WINDOWS else ".pip",
77
- CONFIG_BASENAME,
78
- )
79
- new_config_file = os.path.join(appdirs.user_config_dir("pip"), CONFIG_BASENAME)
80
- return {
81
- kinds.GLOBAL: global_config_files,
82
- kinds.SITE: [site_config_file],
83
- kinds.USER: [legacy_config_file, new_config_file],
84
- }
85
-
86
-
87
- class Configuration:
88
- """Handles management of configuration.
89
-
90
- Provides an interface to accessing and managing configuration files.
91
-
92
- This class converts provides an API that takes "section.key-name" style
93
- keys and stores the value associated with it as "key-name" under the
94
- section "section".
95
-
96
- This allows for a clean interface wherein the both the section and the
97
- key-name are preserved in an easy to manage form in the configuration files
98
- and the data stored is also nice.
99
- """
100
-
101
- def __init__(self, isolated: bool, load_only: Optional[Kind] = None) -> None:
102
- super().__init__()
103
-
104
- if load_only is not None and load_only not in VALID_LOAD_ONLY:
105
- raise ConfigurationError(
106
- "Got invalid value for load_only - should be one of {}".format(
107
- ", ".join(map(repr, VALID_LOAD_ONLY))
108
- )
109
- )
110
- self.isolated = isolated
111
- self.load_only = load_only
112
-
113
- # Because we keep track of where we got the data from
114
- self._parsers: Dict[Kind, List[Tuple[str, RawConfigParser]]] = {
115
- variant: [] for variant in OVERRIDE_ORDER
116
- }
117
- self._config: Dict[Kind, Dict[str, Any]] = {
118
- variant: {} for variant in OVERRIDE_ORDER
119
- }
120
- self._modified_parsers: List[Tuple[str, RawConfigParser]] = []
121
-
122
- def load(self) -> None:
123
- """Loads configuration from configuration files and environment"""
124
- self._load_config_files()
125
- if not self.isolated:
126
- self._load_environment_vars()
127
-
128
- def get_file_to_edit(self) -> Optional[str]:
129
- """Returns the file with highest priority in configuration"""
130
- assert self.load_only is not None, "Need to be specified a file to be editing"
131
-
132
- try:
133
- return self._get_parser_to_modify()[0]
134
- except IndexError:
135
- return None
136
-
137
- def items(self) -> Iterable[Tuple[str, Any]]:
138
- """Returns key-value pairs like dict.items() representing the loaded
139
- configuration
140
- """
141
- return self._dictionary.items()
142
-
143
- def get_value(self, key: str) -> Any:
144
- """Get a value from the configuration."""
145
- orig_key = key
146
- key = _normalize_name(key)
147
- try:
148
- return self._dictionary[key]
149
- except KeyError:
150
- # disassembling triggers a more useful error message than simply
151
- # "No such key" in the case that the key isn't in the form command.option
152
- _disassemble_key(key)
153
- raise ConfigurationError(f"No such key - {orig_key}")
154
-
155
- def set_value(self, key: str, value: Any) -> None:
156
- """Modify a value in the configuration."""
157
- key = _normalize_name(key)
158
- self._ensure_have_load_only()
159
-
160
- assert self.load_only
161
- fname, parser = self._get_parser_to_modify()
162
-
163
- if parser is not None:
164
- section, name = _disassemble_key(key)
165
-
166
- # Modify the parser and the configuration
167
- if not parser.has_section(section):
168
- parser.add_section(section)
169
- parser.set(section, name, value)
170
-
171
- self._config[self.load_only][key] = value
172
- self._mark_as_modified(fname, parser)
173
-
174
- def unset_value(self, key: str) -> None:
175
- """Unset a value in the configuration."""
176
- orig_key = key
177
- key = _normalize_name(key)
178
- self._ensure_have_load_only()
179
-
180
- assert self.load_only
181
- if key not in self._config[self.load_only]:
182
- raise ConfigurationError(f"No such key - {orig_key}")
183
-
184
- fname, parser = self._get_parser_to_modify()
185
-
186
- if parser is not None:
187
- section, name = _disassemble_key(key)
188
- if not (
189
- parser.has_section(section) and parser.remove_option(section, name)
190
- ):
191
- # The option was not removed.
192
- raise ConfigurationError(
193
- "Fatal Internal error [id=1]. Please report as a bug."
194
- )
195
-
196
- # The section may be empty after the option was removed.
197
- if not parser.items(section):
198
- parser.remove_section(section)
199
- self._mark_as_modified(fname, parser)
200
-
201
- del self._config[self.load_only][key]
202
-
203
- def save(self) -> None:
204
- """Save the current in-memory state."""
205
- self._ensure_have_load_only()
206
-
207
- for fname, parser in self._modified_parsers:
208
- logger.info("Writing to %s", fname)
209
-
210
- # Ensure directory exists.
211
- ensure_dir(os.path.dirname(fname))
212
-
213
- with open(fname, "w") as f:
214
- parser.write(f)
215
-
216
- #
217
- # Private routines
218
- #
219
-
220
- def _ensure_have_load_only(self) -> None:
221
- if self.load_only is None:
222
- raise ConfigurationError("Needed a specific file to be modifying.")
223
- logger.debug("Will be working with %s variant only", self.load_only)
224
-
225
- @property
226
- def _dictionary(self) -> Dict[str, Any]:
227
- """A dictionary representing the loaded configuration."""
228
- # NOTE: Dictionaries are not populated if not loaded. So, conditionals
229
- # are not needed here.
230
- retval = {}
231
-
232
- for variant in OVERRIDE_ORDER:
233
- retval.update(self._config[variant])
234
-
235
- return retval
236
-
237
- def _load_config_files(self) -> None:
238
- """Loads configuration from configuration files"""
239
- config_files = dict(self.iter_config_files())
240
- if config_files[kinds.ENV][0:1] == [os.devnull]:
241
- logger.debug(
242
- "Skipping loading configuration files due to "
243
- "environment's PIP_CONFIG_FILE being os.devnull"
244
- )
245
- return
246
-
247
- for variant, files in config_files.items():
248
- for fname in files:
249
- # If there's specific variant set in `load_only`, load only
250
- # that variant, not the others.
251
- if self.load_only is not None and variant != self.load_only:
252
- logger.debug("Skipping file '%s' (variant: %s)", fname, variant)
253
- continue
254
-
255
- parser = self._load_file(variant, fname)
256
-
257
- # Keeping track of the parsers used
258
- self._parsers[variant].append((fname, parser))
259
-
260
- def _load_file(self, variant: Kind, fname: str) -> RawConfigParser:
261
- logger.verbose("For variant '%s', will try loading '%s'", variant, fname)
262
- parser = self._construct_parser(fname)
263
-
264
- for section in parser.sections():
265
- items = parser.items(section)
266
- self._config[variant].update(self._normalized_keys(section, items))
267
-
268
- return parser
269
-
270
- def _construct_parser(self, fname: str) -> RawConfigParser:
271
- parser = configparser.RawConfigParser()
272
- # If there is no such file, don't bother reading it but create the
273
- # parser anyway, to hold the data.
274
- # Doing this is useful when modifying and saving files, where we don't
275
- # need to construct a parser.
276
- if os.path.exists(fname):
277
- locale_encoding = locale.getpreferredencoding(False)
278
- try:
279
- parser.read(fname, encoding=locale_encoding)
280
- except UnicodeDecodeError:
281
- # See https://github.com/pypa/pip/issues/4963
282
- raise ConfigurationFileCouldNotBeLoaded(
283
- reason=f"contains invalid {locale_encoding} characters",
284
- fname=fname,
285
- )
286
- except configparser.Error as error:
287
- # See https://github.com/pypa/pip/issues/4893
288
- raise ConfigurationFileCouldNotBeLoaded(error=error)
289
- return parser
290
-
291
- def _load_environment_vars(self) -> None:
292
- """Loads configuration from environment variables"""
293
- self._config[kinds.ENV_VAR].update(
294
- self._normalized_keys(":env:", self.get_environ_vars())
295
- )
296
-
297
- def _normalized_keys(
298
- self, section: str, items: Iterable[Tuple[str, Any]]
299
- ) -> Dict[str, Any]:
300
- """Normalizes items to construct a dictionary with normalized keys.
301
-
302
- This routine is where the names become keys and are made the same
303
- regardless of source - configuration files or environment.
304
- """
305
- normalized = {}
306
- for name, val in items:
307
- key = section + "." + _normalize_name(name)
308
- normalized[key] = val
309
- return normalized
310
-
311
- def get_environ_vars(self) -> Iterable[Tuple[str, str]]:
312
- """Returns a generator with all environmental vars with prefix PIP_"""
313
- for key, val in os.environ.items():
314
- if key.startswith("PIP_"):
315
- name = key[4:].lower()
316
- if name not in ENV_NAMES_IGNORED:
317
- yield name, val
318
-
319
- # XXX: This is patched in the tests.
320
- def iter_config_files(self) -> Iterable[Tuple[Kind, List[str]]]:
321
- """Yields variant and configuration files associated with it.
322
-
323
- This should be treated like items of a dictionary.
324
- """
325
- # SMELL: Move the conditions out of this function
326
-
327
- # environment variables have the lowest priority
328
- config_file = os.environ.get("PIP_CONFIG_FILE", None)
329
- if config_file is not None:
330
- yield kinds.ENV, [config_file]
331
- else:
332
- yield kinds.ENV, []
333
-
334
- config_files = get_configuration_files()
335
-
336
- # at the base we have any global configuration
337
- yield kinds.GLOBAL, config_files[kinds.GLOBAL]
338
-
339
- # per-user configuration next
340
- should_load_user_config = not self.isolated and not (
341
- config_file and os.path.exists(config_file)
342
- )
343
- if should_load_user_config:
344
- # The legacy config file is overridden by the new config file
345
- yield kinds.USER, config_files[kinds.USER]
346
-
347
- # finally virtualenv configuration first trumping others
348
- yield kinds.SITE, config_files[kinds.SITE]
349
-
350
- def get_values_in_config(self, variant: Kind) -> Dict[str, Any]:
351
- """Get values present in a config file"""
352
- return self._config[variant]
353
-
354
- def _get_parser_to_modify(self) -> Tuple[str, RawConfigParser]:
355
- # Determine which parser to modify
356
- assert self.load_only
357
- parsers = self._parsers[self.load_only]
358
- if not parsers:
359
- # This should not happen if everything works correctly.
360
- raise ConfigurationError(
361
- "Fatal Internal error [id=2]. Please report as a bug."
362
- )
363
-
364
- # Use the highest priority parser.
365
- return parsers[-1]
366
-
367
- # XXX: This is patched in the tests.
368
- def _mark_as_modified(self, fname: str, parser: RawConfigParser) -> None:
369
- file_parser_tuple = (fname, parser)
370
- if file_parser_tuple not in self._modified_parsers:
371
- self._modified_parsers.append(file_parser_tuple)
372
-
373
- def __repr__(self) -> str:
374
- return f"{self.__class__.__name__}({self._dictionary!r})"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bigshot/RSA-v0.1.2/app.py DELETED
@@ -1,56 +0,0 @@
1
- import os
2
- os.system('pip install tensorflow')
3
- import tensorflow as tf
4
- from tensorflow import keras
5
- import numpy as np
6
- import gradio as gr
7
-
8
- tokenizer = tf.keras.preprocessing.text.Tokenizer()
9
-
10
- #Reads Text Inputs Here
11
- f=open('Inputs.txt','r')
12
- inputs = f.read().split('\n')
13
- f.close()
14
-
15
- corpus = inputs
16
-
17
- tokenizer.fit_on_texts(corpus)
18
- sequences = tokenizer.texts_to_sequences(corpus)
19
-
20
- max_length = max([len(s) for s in sequences])
21
-
22
- # Load your saved model
23
- model = tf.keras.models.load_model('sentiment_mini-test')
24
-
25
- model.summary()
26
-
27
- def use(input_text):
28
- # Preprocess the input text
29
- sequences = tokenizer.texts_to_sequences([input_text])
30
- sequences = tf.keras.preprocessing.sequence.pad_sequences(sequences, padding='post', maxlen=max_length)
31
-
32
- # Make a prediction on the input text
33
- prediction = model.predict(sequences)[0]
34
-
35
- # Print the prediction
36
- if prediction[0]<0.3:
37
- return "That's Negative! (" + str(round(round(1-prediction[0],2)*100,1)) + "% confidence)", prediction[0]
38
- elif prediction[0]>0.3:
39
- return "That's Positive! (" + str(round(round(prediction[0],2)*100,1)) + "% confidence)", prediction[0]
40
- else:
41
- return "That's Neutral!", prediction[0]
42
-
43
-
44
- iface = gr.Interface(fn=use,
45
- inputs=gr.Textbox(lines=8, placeholder="Type Something Awesome..."),
46
- outputs=[gr.Textbox(lines=3, placeholder="Waiting For Magic..."),"number"],
47
- title="Use RSA (Review Sentiment Analysis) v0.1.2",
48
- description="<center>This is an NLP model that accepts a text string as input and simply outputs if the string is mean or nice with about 96.5% accuracy. It also provides you with a score of how positive or negative it is.</center>",
49
- article="\nRSA v0.1.2: @2.3M Params w/ 96.5% acc. & 388MB input dataset + 1.59MB output dataset. Trained on <a href='https://www.kaggle.com/datasets/ilhamfp31/yelp-review-dataset'>this Kaggle dataset</a>",
50
- examples=[
51
- ["I went there today! The cut was terrible! I have an awful experience. They lady that cut my hair was nice but she wanted to leave early so she made a disaster in my head!"],
52
- ["Yes! Awesome soy cap, scone, and atmosphere. Nice place to hang out & read, and free WiFi with no login procedure."],
53
- ["Overpriced, salty and overrated!!! Why this place is so popular I will never understand."],
54
- ["This Valentines Day I ordered a pizza for my boyfriend and asked that they make a heart on it out of green peppers. The pizza was great, the heart was perfect, and he loved it!"]
55
- ])
56
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVH-vn1210/make_hair/minigpt4/datasets/data_utils.py DELETED
@@ -1,196 +0,0 @@
1
- """
2
- Copyright (c) 2022, salesforce.com, inc.
3
- All rights reserved.
4
- SPDX-License-Identifier: BSD-3-Clause
5
- For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
6
- """
7
-
8
- import gzip
9
- import logging
10
- import os
11
- import random as rnd
12
- import tarfile
13
- import zipfile
14
- import random
15
- from typing import List
16
- from tqdm import tqdm
17
-
18
- import decord
19
- from decord import VideoReader
20
- import webdataset as wds
21
- import numpy as np
22
- import torch
23
- from torch.utils.data.dataset import IterableDataset
24
-
25
- from minigpt4.common.registry import registry
26
- from minigpt4.datasets.datasets.base_dataset import ConcatDataset
27
-
28
-
29
- decord.bridge.set_bridge("torch")
30
- MAX_INT = registry.get("MAX_INT")
31
-
32
-
33
- class ChainDataset(wds.DataPipeline):
34
- r"""Dataset for chaining multiple :class:`DataPipeline` s.
35
-
36
- This class is useful to assemble different existing dataset streams. The
37
- chaining operation is done on-the-fly, so concatenating large-scale
38
- datasets with this class will be efficient.
39
-
40
- Args:
41
- datasets (iterable of IterableDataset): datasets to be chained together
42
- """
43
- def __init__(self, datasets: List[wds.DataPipeline]) -> None:
44
- super().__init__()
45
- self.datasets = datasets
46
- self.prob = []
47
- self.names = []
48
- for dataset in self.datasets:
49
- if hasattr(dataset, 'name'):
50
- self.names.append(dataset.name)
51
- else:
52
- self.names.append('Unknown')
53
- if hasattr(dataset, 'sample_ratio'):
54
- self.prob.append(dataset.sample_ratio)
55
- else:
56
- self.prob.append(1)
57
- logging.info("One of the datapipeline doesn't define ratio and set to 1 automatically.")
58
-
59
- def __iter__(self):
60
- datastreams = [iter(dataset) for dataset in self.datasets]
61
- while True:
62
- select_datastream = random.choices(datastreams, weights=self.prob, k=1)[0]
63
- yield next(select_datastream)
64
-
65
-
66
- def apply_to_sample(f, sample):
67
- if len(sample) == 0:
68
- return {}
69
-
70
- def _apply(x):
71
- if torch.is_tensor(x):
72
- return f(x)
73
- elif isinstance(x, dict):
74
- return {key: _apply(value) for key, value in x.items()}
75
- elif isinstance(x, list):
76
- return [_apply(x) for x in x]
77
- else:
78
- return x
79
-
80
- return _apply(sample)
81
-
82
-
83
- def move_to_cuda(sample):
84
- def _move_to_cuda(tensor):
85
- return tensor.cuda()
86
-
87
- return apply_to_sample(_move_to_cuda, sample)
88
-
89
-
90
- def prepare_sample(samples, cuda_enabled=True):
91
- if cuda_enabled:
92
- samples = move_to_cuda(samples)
93
-
94
- # TODO fp16 support
95
-
96
- return samples
97
-
98
-
99
- def reorg_datasets_by_split(datasets):
100
- """
101
- Organizes datasets by split.
102
-
103
- Args:
104
- datasets: dict of torch.utils.data.Dataset objects by name.
105
-
106
- Returns:
107
- Dict of datasets by split {split_name: List[Datasets]}.
108
- """
109
- # if len(datasets) == 1:
110
- # return datasets[list(datasets.keys())[0]]
111
- # else:
112
- reorg_datasets = dict()
113
-
114
- # reorganize by split
115
- for _, dataset in datasets.items():
116
- for split_name, dataset_split in dataset.items():
117
- if split_name not in reorg_datasets:
118
- reorg_datasets[split_name] = [dataset_split]
119
- else:
120
- reorg_datasets[split_name].append(dataset_split)
121
-
122
- return reorg_datasets
123
-
124
-
125
- def concat_datasets(datasets):
126
- """
127
- Concatenates multiple datasets into a single dataset.
128
-
129
- It supports may-style datasets and DataPipeline from WebDataset. Currently, does not support
130
- generic IterableDataset because it requires creating separate samplers.
131
-
132
- Now only supports conctenating training datasets and assuming validation and testing
133
- have only a single dataset. This is because metrics should not be computed on the concatenated
134
- datasets.
135
-
136
- Args:
137
- datasets: dict of torch.utils.data.Dataset objects by split.
138
-
139
- Returns:
140
- Dict of concatenated datasets by split, "train" is the concatenation of multiple datasets,
141
- "val" and "test" remain the same.
142
-
143
- If the input training datasets contain both map-style and DataPipeline datasets, returns
144
- a tuple, where the first element is a concatenated map-style dataset and the second
145
- element is a chained DataPipeline dataset.
146
-
147
- """
148
- # concatenate datasets in the same split
149
- for split_name in datasets:
150
- if split_name != "train":
151
- assert (
152
- len(datasets[split_name]) == 1
153
- ), "Do not support multiple {} datasets.".format(split_name)
154
- datasets[split_name] = datasets[split_name][0]
155
- else:
156
- iterable_datasets, map_datasets = [], []
157
- for dataset in datasets[split_name]:
158
- if isinstance(dataset, wds.DataPipeline):
159
- logging.info(
160
- "Dataset {} is IterableDataset, can't be concatenated.".format(
161
- dataset
162
- )
163
- )
164
- iterable_datasets.append(dataset)
165
- elif isinstance(dataset, IterableDataset):
166
- raise NotImplementedError(
167
- "Do not support concatenation of generic IterableDataset."
168
- )
169
- else:
170
- map_datasets.append(dataset)
171
-
172
- # if len(iterable_datasets) > 0:
173
- # concatenate map-style datasets and iterable-style datasets separately
174
- if len(iterable_datasets) > 1:
175
- chained_datasets = (
176
- ChainDataset(iterable_datasets)
177
- )
178
- elif len(iterable_datasets) == 1:
179
- chained_datasets = iterable_datasets[0]
180
- else:
181
- chained_datasets = None
182
-
183
- concat_datasets = (
184
- ConcatDataset(map_datasets) if len(map_datasets) > 0 else None
185
- )
186
-
187
- train_datasets = concat_datasets, chained_datasets
188
- train_datasets = tuple([x for x in train_datasets if x is not None])
189
- train_datasets = (
190
- train_datasets[0] if len(train_datasets) == 1 else train_datasets
191
- )
192
-
193
- datasets[split_name] = train_datasets
194
-
195
- return datasets
196
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/DensePose/densepose/vis/bounding_box.py DELETED
@@ -1,36 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- from .base import RectangleVisualizer, TextVisualizer
3
-
4
-
5
- class BoundingBoxVisualizer(object):
6
- def __init__(self):
7
- self.rectangle_visualizer = RectangleVisualizer()
8
-
9
- def visualize(self, image_bgr, boxes_xywh):
10
- for bbox_xywh in boxes_xywh:
11
- image_bgr = self.rectangle_visualizer.visualize(image_bgr, bbox_xywh)
12
- return image_bgr
13
-
14
-
15
- class ScoredBoundingBoxVisualizer(object):
16
- def __init__(self, bbox_visualizer_params=None, score_visualizer_params=None):
17
- if bbox_visualizer_params is None:
18
- bbox_visualizer_params = {}
19
- if score_visualizer_params is None:
20
- score_visualizer_params = {}
21
- self.visualizer_bbox = RectangleVisualizer(**bbox_visualizer_params)
22
- self.visualizer_score = TextVisualizer(**score_visualizer_params)
23
-
24
- def visualize(self, image_bgr, scored_bboxes):
25
- boxes_xywh, box_scores = scored_bboxes
26
- assert len(boxes_xywh) == len(box_scores), (
27
- "Number of bounding boxes {} should be equal to the number of "
28
- "scores".format(len(boxes_xywh), len(box_scores))
29
- )
30
- for i, box_xywh in enumerate(boxes_xywh):
31
- score_i = box_scores[i]
32
- image_bgr = self.visualizer_bbox.visualize(image_bgr, box_xywh)
33
- score_txt = "{0:6.4f}".format(score_i)
34
- topleft_xy = box_xywh[0], box_xywh[1]
35
- image_bgr = self.visualizer_score.visualize(image_bgr, score_txt, topleft_xy)
36
- return image_bgr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/cmake/ThrustHeaderTesting.cmake DELETED
@@ -1,119 +0,0 @@
1
- # For every public header, build a translation unit containing `#include <header>`
2
- # to let the compiler try to figure out warnings in that header if it is not otherwise
3
- # included in tests, and also to verify if the headers are modular enough.
4
- # .inl files are not globbed for, because they are not supposed to be used as public
5
- # entrypoints.
6
-
7
- foreach(thrust_target IN LISTS THRUST_TARGETS)
8
- thrust_get_target_property(config_host ${thrust_target} HOST)
9
- thrust_get_target_property(config_device ${thrust_target} DEVICE)
10
- thrust_get_target_property(config_prefix ${thrust_target} PREFIX)
11
-
12
- string(TOLOWER "${config_host}" host_lower)
13
- string(TOLOWER "${config_device}" device_lower)
14
-
15
- # GLOB ALL THE THINGS
16
- set(headers_globs thrust/*.h)
17
- set(headers_exclude_systems_globs thrust/system/*/*)
18
- set(headers_systems_globs
19
- thrust/system/${host_lower}/*
20
- thrust/system/${device_lower}/*
21
- )
22
- set(headers_exclude_details_globs
23
- thrust/detail/*
24
- thrust/*/detail/*
25
- thrust/*/*/detail/*
26
- )
27
-
28
- # Get all .h files...
29
- file(GLOB_RECURSE headers
30
- RELATIVE "${Thrust_SOURCE_DIR}/thrust"
31
- CONFIGURE_DEPENDS
32
- ${headers_globs}
33
- )
34
-
35
- # ...then remove all system specific headers...
36
- file(GLOB_RECURSE headers_exclude_systems
37
- RELATIVE "${Thrust_SOURCE_DIR}/thrust"
38
- CONFIGURE_DEPENDS
39
- ${headers_exclude_systems_globs}
40
- )
41
- list(REMOVE_ITEM headers ${headers_exclude_systems})
42
-
43
- # ...then add all headers specific to the selected host and device systems back again...
44
- file(GLOB_RECURSE headers_systems
45
- RELATIVE ${Thrust_SOURCE_DIR}/thrust
46
- CONFIGURE_DEPENDS
47
- ${headers_systems_globs}
48
- )
49
- list(APPEND headers ${headers_systems})
50
-
51
- # ...and remove all the detail headers (also removing the detail headers from the selected systems).
52
- file(GLOB_RECURSE headers_exclude_details
53
- RELATIVE "${Thrust_SOURCE_DIR}/thrust"
54
- CONFIGURE_DEPENDS
55
- ${headers_exclude_details_globs}
56
- )
57
- list(REMOVE_ITEM headers ${headers_exclude_details})
58
-
59
- # List of headers that aren't implemented for all backends, but are implemented for CUDA.
60
- set(partially_implemented_CUDA
61
- async/copy.h
62
- async/for_each.h
63
- async/reduce.h
64
- async/sort.h
65
- async/transform.h
66
- event.h
67
- future.h
68
- )
69
-
70
- # List of headers that aren't implemented for all backends, but are implemented for CPP.
71
- set(partially_implemented_CPP
72
- )
73
-
74
- # List of headers that aren't implemented for all backends, but are implemented for TBB.
75
- set(partially_implemented_TBB
76
- )
77
-
78
- # List of headers that aren't implemented for all backends, but are implemented for OMP.
79
- set(partially_implemented_OMP
80
- )
81
-
82
- # List of all partially implemented headers.
83
- set(partially_implemented
84
- ${partially_implemented_CUDA}
85
- ${partially_implemented_CPP}
86
- ${partially_implemented_TBB}
87
- ${partially_implemented_OMP}
88
- )
89
- list(REMOVE_DUPLICATES partially_implemented)
90
-
91
- set(headertest_srcs)
92
-
93
- foreach (header IN LISTS headers)
94
- if ("${header}" IN_LIST partially_implemented)
95
- # This header is partially implemented on _some_ backends...
96
- if (NOT "${header}" IN_LIST partially_implemented_${config_device})
97
- # ...but not on the selected one.
98
- continue()
99
- endif()
100
- endif()
101
-
102
- set(headertest_src_ext .cpp)
103
- if ("CUDA" STREQUAL "${config_device}")
104
- set(headertest_src_ext .cu)
105
- endif()
106
-
107
- set(headertest_src "headers/${config_prefix}/${header}${headertest_src_ext}")
108
- configure_file("${Thrust_SOURCE_DIR}/cmake/header_test.in" "${headertest_src}")
109
-
110
- list(APPEND headertest_srcs "${headertest_src}")
111
- endforeach()
112
-
113
- set(headertest_target ${config_prefix}.headers)
114
- add_library(${headertest_target} OBJECT ${headertest_srcs})
115
- target_link_libraries(${headertest_target} PUBLIC ${thrust_target})
116
- thrust_clone_target_properties(${headertest_target} ${thrust_target})
117
-
118
- add_dependencies(${config_prefix}.all ${headertest_target})
119
- endforeach()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/iterator/iterator_categories.h DELETED
@@ -1,224 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
-
18
- /*! \file thrust/iterator/iterator_categories.h
19
- * \brief Types for reasoning about the categories of iterators
20
- */
21
-
22
- /*
23
- * (C) Copyright Jeremy Siek 2002.
24
- *
25
- * Distributed under the Boost Software License, Version 1.0.
26
- * (See accompanying NOTICE file for the complete license)
27
- *
28
- * For more information, see http://www.boost.org
29
- */
30
-
31
-
32
- #pragma once
33
-
34
- #include <thrust/detail/config.h>
35
- #include <thrust/iterator/detail/iterator_category_with_system_and_traversal.h>
36
- #include <thrust/iterator/detail/iterator_traversal_tags.h>
37
- #include <thrust/iterator/detail/device_system_tag.h>
38
-
39
- // #include this for stl's iterator tags
40
- #include <iterator>
41
-
42
- namespace thrust
43
- {
44
-
45
- /*! \addtogroup iterators
46
- * \addtogroup iterator_tags Iterator Tags
47
- * \ingroup iterators
48
- * \addtogroup iterator_tag_classes Iterator Tag Classes
49
- * \ingroup iterator_tags
50
- * \{
51
- */
52
-
53
- /*! \p input_device_iterator_tag is an empty class: it has no member functions,
54
- * member variables, or nested types. It is used solely as a "tag": a
55
- * representation of the Input Device Iterator concept within the C++ type
56
- * system.
57
- *
58
- * \see http://www.sgi.com/tech/sgi/input_iterator_tag.html, iterator_traits,
59
- * output_device_iterator_tag, forward_device_iterator_tag,
60
- * bidirectional_device_iterator_tag, random_access_device_iterator_tag,
61
- * input_host_iterator_tag, output_host_iterator_tag, forward_host_iterator_tag,
62
- * bidirectional_host_iterator_tag, random_access_host_iterator_tag
63
- */
64
- struct input_device_iterator_tag
65
- : thrust::detail::iterator_category_with_system_and_traversal<
66
- std::input_iterator_tag,
67
- thrust::device_system_tag,
68
- thrust::single_pass_traversal_tag
69
- >
70
- {};
71
-
72
- /*! \p output_device_iterator_tag is an empty class: it has no member functions,
73
- * member variables, or nested types. It is used solely as a "tag": a
74
- * representation of the Output Device Iterator concept within the C++ type
75
- * system.
76
- *
77
- * \see http://www.sgi.com/tech/sgi/output_iterator_tag.html, iterator_traits,
78
- * input_device_iterator_tag, forward_device_iterator_tag,
79
- * bidirectional_device_iterator_tag, random_access_device_iterator_tag,
80
- * input_host_iterator_tag, output_host_iterator_tag, forward_host_iterator_tag,
81
- * bidirectional_host_iterator_tag, random_access_host_iterator_tag
82
- */
83
- struct output_device_iterator_tag
84
- : thrust::detail::iterator_category_with_system_and_traversal<
85
- std::output_iterator_tag,
86
- thrust::device_system_tag,
87
- thrust::single_pass_traversal_tag
88
- >
89
- {};
90
-
91
- /*! \p forward_device_iterator_tag is an empty class: it has no member functions,
92
- * member variables, or nested types. It is used solely as a "tag": a
93
- * representation of the Forward Device Iterator concept within the C++ type
94
- * system.
95
- *
96
- * \see http://www.sgi.com/tech/sgi/forward_iterator_tag.html, iterator_traits,
97
- * input_device_iterator_tag, output_device_iterator_tag,
98
- * bidirectional_device_iterator_tag, random_access_device_iterator_tag,
99
- * input_host_iterator_tag, output_host_iterator_tag, forward_host_iterator_tag,
100
- * bidirectional_host_iterator_tag, random_access_host_iterator_tag
101
- */
102
- struct forward_device_iterator_tag
103
- : thrust::detail::iterator_category_with_system_and_traversal<
104
- std::forward_iterator_tag,
105
- thrust::device_system_tag,
106
- thrust::forward_traversal_tag
107
- >
108
- {};
109
-
110
- /*! \p bidirectional_device_iterator_tag is an empty class: it has no member
111
- * functions, member variables, or nested types. It is used solely as a "tag": a
112
- * representation of the Bidirectional Device Iterator concept within the C++
113
- * type system.
114
- *
115
- * \see http://www.sgi.com/tech/sgi/bidirectional_iterator_tag.html,
116
- * iterator_traits, input_device_iterator_tag, output_device_iterator_tag,
117
- * forward_device_iterator_tag, random_access_device_iterator_tag,
118
- * input_host_iterator_tag, output_host_iterator_tag, forward_host_iterator_tag,
119
- * bidirectional_host_iterator_tag, random_access_host_iterator_tag
120
- */
121
- struct bidirectional_device_iterator_tag
122
- : thrust::detail::iterator_category_with_system_and_traversal<
123
- std::bidirectional_iterator_tag,
124
- thrust::device_system_tag,
125
- thrust::bidirectional_traversal_tag
126
- >
127
- {};
128
-
129
- /*! \p random_access_device_iterator_tag is an empty class: it has no member
130
- * functions, member variables, or nested types. It is used solely as a "tag": a
131
- * representation of the Random Access Device Iterator concept within the C++
132
- * type system.
133
- *
134
- * \see http://www.sgi.com/tech/sgi/random_access_iterator_tag.html,
135
- * iterator_traits, input_device_iterator_tag, output_device_iterator_tag,
136
- * forward_device_iterator_tag, bidirectional_device_iterator_tag,
137
- * input_host_iterator_tag, output_host_iterator_tag, forward_host_iterator_tag,
138
- * bidirectional_host_iterator_tag, random_access_host_iterator_tag
139
- */
140
- struct random_access_device_iterator_tag
141
- : thrust::detail::iterator_category_with_system_and_traversal<
142
- std::random_access_iterator_tag,
143
- thrust::device_system_tag,
144
- thrust::random_access_traversal_tag
145
- >
146
- {};
147
-
148
- /*! \p input_host_iterator_tag is an empty class: it has no member
149
- * functions, member variables, or nested types. It is used solely as a "tag": a
150
- * representation of the Input Host Iterator concept within the C++
151
- * type system.
152
- *
153
- * \see http://www.sgi.com/tech/sgi/input_iterator_tag.html,
154
- * iterator_traits, input_device_iterator_tag, output_device_iterator_tag,
155
- * forward_device_iterator_tag, bidirectional_device_iterator_tag,
156
- * random_access_device_iterator_tag,
157
- * output_host_iterator_tag, forward_host_iterator_tag,
158
- * bidirectional_host_iterator_tag, random_access_host_iterator_tag
159
- */
160
- typedef std::input_iterator_tag input_host_iterator_tag;
161
-
162
- /*! \p output_host_iterator_tag is an empty class: it has no member
163
- * functions, member variables, or nested types. It is used solely as a "tag": a
164
- * representation of the Output Host Iterator concept within the C++
165
- * type system.
166
- *
167
- * \see http://www.sgi.com/tech/sgi/output_iterator_tag.html,
168
- * iterator_traits, input_device_iterator_tag, output_device_iterator_tag,
169
- * forward_device_iterator_tag, bidirectional_device_iterator_tag,
170
- * random_access_device_iterator_tag,
171
- * input_host_iterator_tag, forward_host_iterator_tag,
172
- * bidirectional_host_iterator_tag, random_access_host_iterator_tag
173
- */
174
- typedef std::output_iterator_tag output_host_iterator_tag;
175
-
176
- /*! \p forward_host_iterator_tag is an empty class: it has no member
177
- * functions, member variables, or nested types. It is used solely as a "tag": a
178
- * representation of the Forward Host Iterator concept within the C++
179
- * type system.
180
- *
181
- * \see http://www.sgi.com/tech/sgi/forward_iterator_tag.html,
182
- * iterator_traits, input_device_iterator_tag, output_device_iterator_tag,
183
- * forward_device_iterator_tag, bidirectional_device_iterator_tag,
184
- * random_access_device_iterator_tag,
185
- * input_host_iterator_tag, output_host_iterator_tag,
186
- * bidirectional_host_iterator_tag, random_access_host_iterator_tag
187
- */
188
- typedef std::forward_iterator_tag forward_host_iterator_tag;
189
-
190
- /*! \p bidirectional_host_iterator_tag is an empty class: it has no member
191
- * functions, member variables, or nested types. It is used solely as a "tag": a
192
- * representation of the Forward Host Iterator concept within the C++
193
- * type system.
194
- *
195
- * \see http://www.sgi.com/tech/sgi/bidirectional_iterator_tag.html,
196
- * iterator_traits, input_device_iterator_tag, output_device_iterator_tag,
197
- * forward_device_iterator_tag, bidirectional_device_iterator_tag,
198
- * random_access_device_iterator_tag,
199
- * input_host_iterator_tag, output_host_iterator_tag,
200
- * forward_host_iterator_tag, random_access_host_iterator_tag
201
- */
202
- typedef std::bidirectional_iterator_tag bidirectional_host_iterator_tag;
203
-
204
- /*! \p random_access_host_iterator_tag is an empty class: it has no member
205
- * functions, member variables, or nested types. It is used solely as a "tag": a
206
- * representation of the Forward Host Iterator concept within the C++
207
- * type system.
208
- *
209
- * \see http://www.sgi.com/tech/sgi/random_access_iterator_tag.html,
210
- * iterator_traits, input_device_iterator_tag, output_device_iterator_tag,
211
- * forward_device_iterator_tag, bidirectional_device_iterator_tag,
212
- * random_access_device_iterator_tag,
213
- * input_host_iterator_tag, output_host_iterator_tag,
214
- * forward_host_iterator_tag, bidirectional_host_iterator_tag
215
- */
216
- typedef std::random_access_iterator_tag random_access_host_iterator_tag;
217
-
218
- /*! \} // end iterator_tag_classes
219
- */
220
-
221
- } // end namespace thrust
222
-
223
- #include <thrust/iterator/detail/universal_categories.h>
224
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/mismatch.h DELETED
@@ -1,23 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // this system inherits mismatch
22
- #include <thrust/system/cpp/detail/mismatch.h>
23
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/scatter.h DELETED
@@ -1,23 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // this system inherits this algorithm
22
- #include <thrust/system/cpp/detail/scatter.h>
23
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/models/dense_heads/anchor_head.py DELETED
@@ -1,751 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- from mmcv.cnn import normal_init
4
- from mmcv.runner import force_fp32
5
-
6
- from mmdet.core import (anchor_inside_flags, build_anchor_generator,
7
- build_assigner, build_bbox_coder, build_sampler,
8
- images_to_levels, multi_apply, multiclass_nms, unmap)
9
- from ..builder import HEADS, build_loss
10
- from .base_dense_head import BaseDenseHead
11
- from .dense_test_mixins import BBoxTestMixin
12
-
13
-
14
- @HEADS.register_module()
15
- class AnchorHead(BaseDenseHead, BBoxTestMixin):
16
- """Anchor-based head (RPN, RetinaNet, SSD, etc.).
17
-
18
- Args:
19
- num_classes (int): Number of categories excluding the background
20
- category.
21
- in_channels (int): Number of channels in the input feature map.
22
- feat_channels (int): Number of hidden channels. Used in child classes.
23
- anchor_generator (dict): Config dict for anchor generator
24
- bbox_coder (dict): Config of bounding box coder.
25
- reg_decoded_bbox (bool): If true, the regression loss would be
26
- applied directly on decoded bounding boxes, converting both
27
- the predicted boxes and regression targets to absolute
28
- coordinates format. Default False. It should be `True` when
29
- using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head.
30
- loss_cls (dict): Config of classification loss.
31
- loss_bbox (dict): Config of localization loss.
32
- train_cfg (dict): Training config of anchor head.
33
- test_cfg (dict): Testing config of anchor head.
34
- """ # noqa: W605
35
-
36
- def __init__(self,
37
- num_classes,
38
- in_channels,
39
- feat_channels=256,
40
- anchor_generator=dict(
41
- type='AnchorGenerator',
42
- scales=[8, 16, 32],
43
- ratios=[0.5, 1.0, 2.0],
44
- strides=[4, 8, 16, 32, 64]),
45
- bbox_coder=dict(
46
- type='DeltaXYWHBBoxCoder',
47
- clip_border=True,
48
- target_means=(.0, .0, .0, .0),
49
- target_stds=(1.0, 1.0, 1.0, 1.0)),
50
- reg_decoded_bbox=False,
51
- loss_cls=dict(
52
- type='CrossEntropyLoss',
53
- use_sigmoid=True,
54
- loss_weight=1.0),
55
- loss_bbox=dict(
56
- type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0),
57
- train_cfg=None,
58
- test_cfg=None):
59
- super(AnchorHead, self).__init__()
60
- self.in_channels = in_channels
61
- self.num_classes = num_classes
62
- self.feat_channels = feat_channels
63
- self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)
64
- # TODO better way to determine whether sample or not
65
- self.sampling = loss_cls['type'] not in [
66
- 'FocalLoss', 'GHMC', 'QualityFocalLoss'
67
- ]
68
- if self.use_sigmoid_cls:
69
- self.cls_out_channels = num_classes
70
- else:
71
- self.cls_out_channels = num_classes + 1
72
-
73
- if self.cls_out_channels <= 0:
74
- raise ValueError(f'num_classes={num_classes} is too small')
75
- self.reg_decoded_bbox = reg_decoded_bbox
76
-
77
- self.bbox_coder = build_bbox_coder(bbox_coder)
78
- self.loss_cls = build_loss(loss_cls)
79
- self.loss_bbox = build_loss(loss_bbox)
80
- self.train_cfg = train_cfg
81
- self.test_cfg = test_cfg
82
- if self.train_cfg:
83
- self.assigner = build_assigner(self.train_cfg.assigner)
84
- # use PseudoSampler when sampling is False
85
- if self.sampling and hasattr(self.train_cfg, 'sampler'):
86
- sampler_cfg = self.train_cfg.sampler
87
- else:
88
- sampler_cfg = dict(type='PseudoSampler')
89
- self.sampler = build_sampler(sampler_cfg, context=self)
90
- self.fp16_enabled = False
91
-
92
- self.anchor_generator = build_anchor_generator(anchor_generator)
93
- # usually the numbers of anchors for each level are the same
94
- # except SSD detectors
95
- self.num_anchors = self.anchor_generator.num_base_anchors[0]
96
- self._init_layers()
97
-
98
- def _init_layers(self):
99
- """Initialize layers of the head."""
100
- self.conv_cls = nn.Conv2d(self.in_channels,
101
- self.num_anchors * self.cls_out_channels, 1)
102
- self.conv_reg = nn.Conv2d(self.in_channels, self.num_anchors * 4, 1)
103
-
104
- def init_weights(self):
105
- """Initialize weights of the head."""
106
- normal_init(self.conv_cls, std=0.01)
107
- normal_init(self.conv_reg, std=0.01)
108
-
109
- def forward_single(self, x):
110
- """Forward feature of a single scale level.
111
-
112
- Args:
113
- x (Tensor): Features of a single scale level.
114
-
115
- Returns:
116
- tuple:
117
- cls_score (Tensor): Cls scores for a single scale level \
118
- the channels number is num_anchors * num_classes.
119
- bbox_pred (Tensor): Box energies / deltas for a single scale \
120
- level, the channels number is num_anchors * 4.
121
- """
122
- cls_score = self.conv_cls(x)
123
- bbox_pred = self.conv_reg(x)
124
- return cls_score, bbox_pred
125
-
126
- def forward(self, feats):
127
- """Forward features from the upstream network.
128
-
129
- Args:
130
- feats (tuple[Tensor]): Features from the upstream network, each is
131
- a 4D-tensor.
132
-
133
- Returns:
134
- tuple: A tuple of classification scores and bbox prediction.
135
-
136
- - cls_scores (list[Tensor]): Classification scores for all \
137
- scale levels, each is a 4D-tensor, the channels number \
138
- is num_anchors * num_classes.
139
- - bbox_preds (list[Tensor]): Box energies / deltas for all \
140
- scale levels, each is a 4D-tensor, the channels number \
141
- is num_anchors * 4.
142
- """
143
- return multi_apply(self.forward_single, feats)
144
-
145
- def get_anchors(self, featmap_sizes, img_metas, device='cuda'):
146
- """Get anchors according to feature map sizes.
147
-
148
- Args:
149
- featmap_sizes (list[tuple]): Multi-level feature map sizes.
150
- img_metas (list[dict]): Image meta info.
151
- device (torch.device | str): Device for returned tensors
152
-
153
- Returns:
154
- tuple:
155
- anchor_list (list[Tensor]): Anchors of each image.
156
- valid_flag_list (list[Tensor]): Valid flags of each image.
157
- """
158
- num_imgs = len(img_metas)
159
-
160
- # since feature map sizes of all images are the same, we only compute
161
- # anchors for one time
162
- multi_level_anchors = self.anchor_generator.grid_anchors(
163
- featmap_sizes, device)
164
- anchor_list = [multi_level_anchors for _ in range(num_imgs)]
165
-
166
- # for each image, we compute valid flags of multi level anchors
167
- valid_flag_list = []
168
- for img_id, img_meta in enumerate(img_metas):
169
- multi_level_flags = self.anchor_generator.valid_flags(
170
- featmap_sizes, img_meta['pad_shape'], device)
171
- valid_flag_list.append(multi_level_flags)
172
-
173
- return anchor_list, valid_flag_list
174
-
175
- def _get_targets_single(self,
176
- flat_anchors,
177
- valid_flags,
178
- gt_bboxes,
179
- gt_bboxes_ignore,
180
- gt_labels,
181
- img_meta,
182
- label_channels=1,
183
- unmap_outputs=True):
184
- """Compute regression and classification targets for anchors in a
185
- single image.
186
-
187
- Args:
188
- flat_anchors (Tensor): Multi-level anchors of the image, which are
189
- concatenated into a single tensor of shape (num_anchors ,4)
190
- valid_flags (Tensor): Multi level valid flags of the image,
191
- which are concatenated into a single tensor of
192
- shape (num_anchors,).
193
- gt_bboxes (Tensor): Ground truth bboxes of the image,
194
- shape (num_gts, 4).
195
- gt_bboxes_ignore (Tensor): Ground truth bboxes to be
196
- ignored, shape (num_ignored_gts, 4).
197
- img_meta (dict): Meta info of the image.
198
- gt_labels (Tensor): Ground truth labels of each box,
199
- shape (num_gts,).
200
- label_channels (int): Channel of label.
201
- unmap_outputs (bool): Whether to map outputs back to the original
202
- set of anchors.
203
-
204
- Returns:
205
- tuple:
206
- labels_list (list[Tensor]): Labels of each level
207
- label_weights_list (list[Tensor]): Label weights of each level
208
- bbox_targets_list (list[Tensor]): BBox targets of each level
209
- bbox_weights_list (list[Tensor]): BBox weights of each level
210
- num_total_pos (int): Number of positive samples in all images
211
- num_total_neg (int): Number of negative samples in all images
212
- """
213
- inside_flags = anchor_inside_flags(flat_anchors, valid_flags,
214
- img_meta['img_shape'][:2],
215
- self.train_cfg.allowed_border)
216
- if not inside_flags.any():
217
- return (None, ) * 7
218
- # assign gt and sample anchors
219
- anchors = flat_anchors[inside_flags, :]
220
-
221
- assign_result = self.assigner.assign(
222
- anchors, gt_bboxes, gt_bboxes_ignore,
223
- None if self.sampling else gt_labels)
224
- sampling_result = self.sampler.sample(assign_result, anchors,
225
- gt_bboxes)
226
-
227
- num_valid_anchors = anchors.shape[0]
228
- bbox_targets = torch.zeros_like(anchors)
229
- bbox_weights = torch.zeros_like(anchors)
230
- labels = anchors.new_full((num_valid_anchors, ),
231
- self.num_classes,
232
- dtype=torch.long)
233
- label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)
234
-
235
- pos_inds = sampling_result.pos_inds
236
- neg_inds = sampling_result.neg_inds
237
- if len(pos_inds) > 0:
238
- if not self.reg_decoded_bbox:
239
- pos_bbox_targets = self.bbox_coder.encode(
240
- sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes)
241
- else:
242
- pos_bbox_targets = sampling_result.pos_gt_bboxes
243
- bbox_targets[pos_inds, :] = pos_bbox_targets
244
- bbox_weights[pos_inds, :] = 1.0
245
- if gt_labels is None:
246
- # Only rpn gives gt_labels as None
247
- # Foreground is the first class since v2.5.0
248
- labels[pos_inds] = 0
249
- else:
250
- labels[pos_inds] = gt_labels[
251
- sampling_result.pos_assigned_gt_inds]
252
- if self.train_cfg.pos_weight <= 0:
253
- label_weights[pos_inds] = 1.0
254
- else:
255
- label_weights[pos_inds] = self.train_cfg.pos_weight
256
- if len(neg_inds) > 0:
257
- label_weights[neg_inds] = 1.0
258
-
259
- # map up to original set of anchors
260
- if unmap_outputs:
261
- num_total_anchors = flat_anchors.size(0)
262
- labels = unmap(
263
- labels, num_total_anchors, inside_flags,
264
- fill=self.num_classes) # fill bg label
265
- label_weights = unmap(label_weights, num_total_anchors,
266
- inside_flags)
267
- bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)
268
- bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)
269
-
270
- return (labels, label_weights, bbox_targets, bbox_weights, pos_inds,
271
- neg_inds, sampling_result)
272
-
273
- def get_targets(self,
274
- anchor_list,
275
- valid_flag_list,
276
- gt_bboxes_list,
277
- img_metas,
278
- gt_bboxes_ignore_list=None,
279
- gt_labels_list=None,
280
- label_channels=1,
281
- unmap_outputs=True,
282
- return_sampling_results=False):
283
- """Compute regression and classification targets for anchors in
284
- multiple images.
285
-
286
- Args:
287
- anchor_list (list[list[Tensor]]): Multi level anchors of each
288
- image. The outer list indicates images, and the inner list
289
- corresponds to feature levels of the image. Each element of
290
- the inner list is a tensor of shape (num_anchors, 4).
291
- valid_flag_list (list[list[Tensor]]): Multi level valid flags of
292
- each image. The outer list indicates images, and the inner list
293
- corresponds to feature levels of the image. Each element of
294
- the inner list is a tensor of shape (num_anchors, )
295
- gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.
296
- img_metas (list[dict]): Meta info of each image.
297
- gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be
298
- ignored.
299
- gt_labels_list (list[Tensor]): Ground truth labels of each box.
300
- label_channels (int): Channel of label.
301
- unmap_outputs (bool): Whether to map outputs back to the original
302
- set of anchors.
303
-
304
- Returns:
305
- tuple: Usually returns a tuple containing learning targets.
306
-
307
- - labels_list (list[Tensor]): Labels of each level.
308
- - label_weights_list (list[Tensor]): Label weights of each \
309
- level.
310
- - bbox_targets_list (list[Tensor]): BBox targets of each level.
311
- - bbox_weights_list (list[Tensor]): BBox weights of each level.
312
- - num_total_pos (int): Number of positive samples in all \
313
- images.
314
- - num_total_neg (int): Number of negative samples in all \
315
- images.
316
- additional_returns: This function enables user-defined returns from
317
- `self._get_targets_single`. These returns are currently refined
318
- to properties at each feature map (i.e. having HxW dimension).
319
- The results will be concatenated after the end
320
- """
321
- num_imgs = len(img_metas)
322
- assert len(anchor_list) == len(valid_flag_list) == num_imgs
323
-
324
- # anchor number of multi levels
325
- num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
326
- # concat all level anchors to a single tensor
327
- concat_anchor_list = []
328
- concat_valid_flag_list = []
329
- for i in range(num_imgs):
330
- assert len(anchor_list[i]) == len(valid_flag_list[i])
331
- concat_anchor_list.append(torch.cat(anchor_list[i]))
332
- concat_valid_flag_list.append(torch.cat(valid_flag_list[i]))
333
-
334
- # compute targets for each image
335
- if gt_bboxes_ignore_list is None:
336
- gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
337
- if gt_labels_list is None:
338
- gt_labels_list = [None for _ in range(num_imgs)]
339
- results = multi_apply(
340
- self._get_targets_single,
341
- concat_anchor_list,
342
- concat_valid_flag_list,
343
- gt_bboxes_list,
344
- gt_bboxes_ignore_list,
345
- gt_labels_list,
346
- img_metas,
347
- label_channels=label_channels,
348
- unmap_outputs=unmap_outputs)
349
- (all_labels, all_label_weights, all_bbox_targets, all_bbox_weights,
350
- pos_inds_list, neg_inds_list, sampling_results_list) = results[:7]
351
- rest_results = list(results[7:]) # user-added return values
352
- # no valid anchors
353
- if any([labels is None for labels in all_labels]):
354
- return None
355
- # sampled anchors of all images
356
- num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
357
- num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
358
- # split targets to a list w.r.t. multiple levels
359
- labels_list = images_to_levels(all_labels, num_level_anchors)
360
- label_weights_list = images_to_levels(all_label_weights,
361
- num_level_anchors)
362
- bbox_targets_list = images_to_levels(all_bbox_targets,
363
- num_level_anchors)
364
- bbox_weights_list = images_to_levels(all_bbox_weights,
365
- num_level_anchors)
366
- res = (labels_list, label_weights_list, bbox_targets_list,
367
- bbox_weights_list, num_total_pos, num_total_neg)
368
- if return_sampling_results:
369
- res = res + (sampling_results_list, )
370
- for i, r in enumerate(rest_results): # user-added return values
371
- rest_results[i] = images_to_levels(r, num_level_anchors)
372
-
373
- return res + tuple(rest_results)
374
-
375
- def loss_single(self, cls_score, bbox_pred, anchors, labels, label_weights,
376
- bbox_targets, bbox_weights, num_total_samples):
377
- """Compute loss of a single scale level.
378
-
379
- Args:
380
- cls_score (Tensor): Box scores for each scale level
381
- Has shape (N, num_anchors * num_classes, H, W).
382
- bbox_pred (Tensor): Box energies / deltas for each scale
383
- level with shape (N, num_anchors * 4, H, W).
384
- anchors (Tensor): Box reference for each scale level with shape
385
- (N, num_total_anchors, 4).
386
- labels (Tensor): Labels of each anchors with shape
387
- (N, num_total_anchors).
388
- label_weights (Tensor): Label weights of each anchor with shape
389
- (N, num_total_anchors)
390
- bbox_targets (Tensor): BBox regression targets of each anchor wight
391
- shape (N, num_total_anchors, 4).
392
- bbox_weights (Tensor): BBox regression loss weights of each anchor
393
- with shape (N, num_total_anchors, 4).
394
- num_total_samples (int): If sampling, num total samples equal to
395
- the number of total anchors; Otherwise, it is the number of
396
- positive anchors.
397
-
398
- Returns:
399
- dict[str, Tensor]: A dictionary of loss components.
400
- """
401
- # classification loss
402
- labels = labels.reshape(-1)
403
- label_weights = label_weights.reshape(-1)
404
- cls_score = cls_score.permute(0, 2, 3,
405
- 1).reshape(-1, self.cls_out_channels)
406
- loss_cls = self.loss_cls(
407
- cls_score, labels, label_weights, avg_factor=num_total_samples)
408
- # regression loss
409
- bbox_targets = bbox_targets.reshape(-1, 4)
410
- bbox_weights = bbox_weights.reshape(-1, 4)
411
- bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)
412
- if self.reg_decoded_bbox:
413
- # When the regression loss (e.g. `IouLoss`, `GIouLoss`)
414
- # is applied directly on the decoded bounding boxes, it
415
- # decodes the already encoded coordinates to absolute format.
416
- anchors = anchors.reshape(-1, 4)
417
- bbox_pred = self.bbox_coder.decode(anchors, bbox_pred)
418
- loss_bbox = self.loss_bbox(
419
- bbox_pred,
420
- bbox_targets,
421
- bbox_weights,
422
- avg_factor=num_total_samples)
423
- return loss_cls, loss_bbox
424
-
425
- @force_fp32(apply_to=('cls_scores', 'bbox_preds'))
426
- def loss(self,
427
- cls_scores,
428
- bbox_preds,
429
- gt_bboxes,
430
- gt_labels,
431
- img_metas,
432
- gt_bboxes_ignore=None):
433
- """Compute losses of the head.
434
-
435
- Args:
436
- cls_scores (list[Tensor]): Box scores for each scale level
437
- Has shape (N, num_anchors * num_classes, H, W)
438
- bbox_preds (list[Tensor]): Box energies / deltas for each scale
439
- level with shape (N, num_anchors * 4, H, W)
440
- gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
441
- shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
442
- gt_labels (list[Tensor]): class indices corresponding to each box
443
- img_metas (list[dict]): Meta information of each image, e.g.,
444
- image size, scaling factor, etc.
445
- gt_bboxes_ignore (None | list[Tensor]): specify which bounding
446
- boxes can be ignored when computing the loss. Default: None
447
-
448
- Returns:
449
- dict[str, Tensor]: A dictionary of loss components.
450
- """
451
- featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
452
- assert len(featmap_sizes) == self.anchor_generator.num_levels
453
-
454
- device = cls_scores[0].device
455
-
456
- anchor_list, valid_flag_list = self.get_anchors(
457
- featmap_sizes, img_metas, device=device)
458
- label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
459
- cls_reg_targets = self.get_targets(
460
- anchor_list,
461
- valid_flag_list,
462
- gt_bboxes,
463
- img_metas,
464
- gt_bboxes_ignore_list=gt_bboxes_ignore,
465
- gt_labels_list=gt_labels,
466
- label_channels=label_channels)
467
- if cls_reg_targets is None:
468
- return None
469
- (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
470
- num_total_pos, num_total_neg) = cls_reg_targets
471
- num_total_samples = (
472
- num_total_pos + num_total_neg if self.sampling else num_total_pos)
473
-
474
- # anchor number of multi levels
475
- num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
476
- # concat all level anchors and flags to a single tensor
477
- concat_anchor_list = []
478
- for i in range(len(anchor_list)):
479
- concat_anchor_list.append(torch.cat(anchor_list[i]))
480
- all_anchor_list = images_to_levels(concat_anchor_list,
481
- num_level_anchors)
482
-
483
- losses_cls, losses_bbox = multi_apply(
484
- self.loss_single,
485
- cls_scores,
486
- bbox_preds,
487
- all_anchor_list,
488
- labels_list,
489
- label_weights_list,
490
- bbox_targets_list,
491
- bbox_weights_list,
492
- num_total_samples=num_total_samples)
493
- return dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
494
-
495
- @force_fp32(apply_to=('cls_scores', 'bbox_preds'))
496
- def get_bboxes(self,
497
- cls_scores,
498
- bbox_preds,
499
- img_metas,
500
- cfg=None,
501
- rescale=False,
502
- with_nms=True):
503
- """Transform network output for a batch into bbox predictions.
504
-
505
- Args:
506
- cls_scores (list[Tensor]): Box scores for each level in the
507
- feature pyramid, has shape
508
- (N, num_anchors * num_classes, H, W).
509
- bbox_preds (list[Tensor]): Box energies / deltas for each
510
- level in the feature pyramid, has shape
511
- (N, num_anchors * 4, H, W).
512
- img_metas (list[dict]): Meta information of each image, e.g.,
513
- image size, scaling factor, etc.
514
- cfg (mmcv.Config | None): Test / postprocessing configuration,
515
- if None, test_cfg would be used
516
- rescale (bool): If True, return boxes in original image space.
517
- Default: False.
518
- with_nms (bool): If True, do nms before return boxes.
519
- Default: True.
520
-
521
- Returns:
522
- list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
523
- The first item is an (n, 5) tensor, where 5 represent
524
- (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1.
525
- The shape of the second tensor in the tuple is (n,), and
526
- each element represents the class label of the corresponding
527
- box.
528
-
529
- Example:
530
- >>> import mmcv
531
- >>> self = AnchorHead(
532
- >>> num_classes=9,
533
- >>> in_channels=1,
534
- >>> anchor_generator=dict(
535
- >>> type='AnchorGenerator',
536
- >>> scales=[8],
537
- >>> ratios=[0.5, 1.0, 2.0],
538
- >>> strides=[4,]))
539
- >>> img_metas = [{'img_shape': (32, 32, 3), 'scale_factor': 1}]
540
- >>> cfg = mmcv.Config(dict(
541
- >>> score_thr=0.00,
542
- >>> nms=dict(type='nms', iou_thr=1.0),
543
- >>> max_per_img=10))
544
- >>> feat = torch.rand(1, 1, 3, 3)
545
- >>> cls_score, bbox_pred = self.forward_single(feat)
546
- >>> # note the input lists are over different levels, not images
547
- >>> cls_scores, bbox_preds = [cls_score], [bbox_pred]
548
- >>> result_list = self.get_bboxes(cls_scores, bbox_preds,
549
- >>> img_metas, cfg)
550
- >>> det_bboxes, det_labels = result_list[0]
551
- >>> assert len(result_list) == 1
552
- >>> assert det_bboxes.shape[1] == 5
553
- >>> assert len(det_bboxes) == len(det_labels) == cfg.max_per_img
554
- """
555
- assert len(cls_scores) == len(bbox_preds)
556
- num_levels = len(cls_scores)
557
-
558
- device = cls_scores[0].device
559
- featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)]
560
- mlvl_anchors = self.anchor_generator.grid_anchors(
561
- featmap_sizes, device=device)
562
-
563
- mlvl_cls_scores = [cls_scores[i].detach() for i in range(num_levels)]
564
- mlvl_bbox_preds = [bbox_preds[i].detach() for i in range(num_levels)]
565
-
566
- if torch.onnx.is_in_onnx_export():
567
- assert len(
568
- img_metas
569
- ) == 1, 'Only support one input image while in exporting to ONNX'
570
- img_shapes = img_metas[0]['img_shape_for_onnx']
571
- else:
572
- img_shapes = [
573
- img_metas[i]['img_shape']
574
- for i in range(cls_scores[0].shape[0])
575
- ]
576
- scale_factors = [
577
- img_metas[i]['scale_factor'] for i in range(cls_scores[0].shape[0])
578
- ]
579
-
580
- if with_nms:
581
- # some heads don't support with_nms argument
582
- result_list = self._get_bboxes(mlvl_cls_scores, mlvl_bbox_preds,
583
- mlvl_anchors, img_shapes,
584
- scale_factors, cfg, rescale)
585
- else:
586
- result_list = self._get_bboxes(mlvl_cls_scores, mlvl_bbox_preds,
587
- mlvl_anchors, img_shapes,
588
- scale_factors, cfg, rescale,
589
- with_nms)
590
- return result_list
591
-
592
- def _get_bboxes(self,
593
- mlvl_cls_scores,
594
- mlvl_bbox_preds,
595
- mlvl_anchors,
596
- img_shapes,
597
- scale_factors,
598
- cfg,
599
- rescale=False,
600
- with_nms=True):
601
- """Transform outputs for a batch item into bbox predictions.
602
-
603
- Args:
604
- mlvl_cls_scores (list[Tensor]): Each element in the list is
605
- the scores of bboxes of single level in the feature pyramid,
606
- has shape (N, num_anchors * num_classes, H, W).
607
- mlvl_bbox_preds (list[Tensor]): Each element in the list is the
608
- bboxes predictions of single level in the feature pyramid,
609
- has shape (N, num_anchors * 4, H, W).
610
- mlvl_anchors (list[Tensor]): Each element in the list is
611
- the anchors of single level in feature pyramid, has shape
612
- (num_anchors, 4).
613
- img_shapes (list[tuple[int]]): Each tuple in the list represent
614
- the shape(height, width, 3) of single image in the batch.
615
- scale_factors (list[ndarray]): Scale factor of the batch
616
- image arange as list[(w_scale, h_scale, w_scale, h_scale)].
617
- cfg (mmcv.Config): Test / postprocessing configuration,
618
- if None, test_cfg would be used.
619
- rescale (bool): If True, return boxes in original image space.
620
- Default: False.
621
- with_nms (bool): If True, do nms before return boxes.
622
- Default: True.
623
-
624
- Returns:
625
- list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
626
- The first item is an (n, 5) tensor, where 5 represent
627
- (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1.
628
- The shape of the second tensor in the tuple is (n,), and
629
- each element represents the class label of the corresponding
630
- box.
631
- """
632
- cfg = self.test_cfg if cfg is None else cfg
633
- assert len(mlvl_cls_scores) == len(mlvl_bbox_preds) == len(
634
- mlvl_anchors)
635
- batch_size = mlvl_cls_scores[0].shape[0]
636
- # convert to tensor to keep tracing
637
- nms_pre_tensor = torch.tensor(
638
- cfg.get('nms_pre', -1),
639
- device=mlvl_cls_scores[0].device,
640
- dtype=torch.long)
641
-
642
- mlvl_bboxes = []
643
- mlvl_scores = []
644
- for cls_score, bbox_pred, anchors in zip(mlvl_cls_scores,
645
- mlvl_bbox_preds,
646
- mlvl_anchors):
647
- assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
648
- cls_score = cls_score.permute(0, 2, 3,
649
- 1).reshape(batch_size, -1,
650
- self.cls_out_channels)
651
- if self.use_sigmoid_cls:
652
- scores = cls_score.sigmoid()
653
- else:
654
- scores = cls_score.softmax(-1)
655
- bbox_pred = bbox_pred.permute(0, 2, 3,
656
- 1).reshape(batch_size, -1, 4)
657
- anchors = anchors.expand_as(bbox_pred)
658
- # Always keep topk op for dynamic input in onnx
659
- if nms_pre_tensor > 0 and (torch.onnx.is_in_onnx_export()
660
- or scores.shape[-2] > nms_pre_tensor):
661
- from torch import _shape_as_tensor
662
- # keep shape as tensor and get k
663
- num_anchor = _shape_as_tensor(scores)[-2].to(
664
- nms_pre_tensor.device)
665
- nms_pre = torch.where(nms_pre_tensor < num_anchor,
666
- nms_pre_tensor, num_anchor)
667
-
668
- # Get maximum scores for foreground classes.
669
- if self.use_sigmoid_cls:
670
- max_scores, _ = scores.max(-1)
671
- else:
672
- # remind that we set FG labels to [0, num_class-1]
673
- # since mmdet v2.0
674
- # BG cat_id: num_class
675
- max_scores, _ = scores[..., :-1].max(-1)
676
-
677
- _, topk_inds = max_scores.topk(nms_pre)
678
- batch_inds = torch.arange(batch_size).view(
679
- -1, 1).expand_as(topk_inds)
680
- anchors = anchors[batch_inds, topk_inds, :]
681
- bbox_pred = bbox_pred[batch_inds, topk_inds, :]
682
- scores = scores[batch_inds, topk_inds, :]
683
-
684
- bboxes = self.bbox_coder.decode(
685
- anchors, bbox_pred, max_shape=img_shapes)
686
- mlvl_bboxes.append(bboxes)
687
- mlvl_scores.append(scores)
688
-
689
- batch_mlvl_bboxes = torch.cat(mlvl_bboxes, dim=1)
690
- if rescale:
691
- batch_mlvl_bboxes /= batch_mlvl_bboxes.new_tensor(
692
- scale_factors).unsqueeze(1)
693
- batch_mlvl_scores = torch.cat(mlvl_scores, dim=1)
694
-
695
- # Set max number of box to be feed into nms in deployment
696
- deploy_nms_pre = cfg.get('deploy_nms_pre', -1)
697
- if deploy_nms_pre > 0 and torch.onnx.is_in_onnx_export():
698
- # Get maximum scores for foreground classes.
699
- if self.use_sigmoid_cls:
700
- max_scores, _ = batch_mlvl_scores.max(-1)
701
- else:
702
- # remind that we set FG labels to [0, num_class-1]
703
- # since mmdet v2.0
704
- # BG cat_id: num_class
705
- max_scores, _ = batch_mlvl_scores[..., :-1].max(-1)
706
- _, topk_inds = max_scores.topk(deploy_nms_pre)
707
- batch_inds = torch.arange(batch_size).view(-1,
708
- 1).expand_as(topk_inds)
709
- batch_mlvl_scores = batch_mlvl_scores[batch_inds, topk_inds]
710
- batch_mlvl_bboxes = batch_mlvl_bboxes[batch_inds, topk_inds]
711
- if self.use_sigmoid_cls:
712
- # Add a dummy background class to the backend when using sigmoid
713
- # remind that we set FG labels to [0, num_class-1] since mmdet v2.0
714
- # BG cat_id: num_class
715
- padding = batch_mlvl_scores.new_zeros(batch_size,
716
- batch_mlvl_scores.shape[1],
717
- 1)
718
- batch_mlvl_scores = torch.cat([batch_mlvl_scores, padding], dim=-1)
719
-
720
- if with_nms:
721
- det_results = []
722
- for (mlvl_bboxes, mlvl_scores) in zip(batch_mlvl_bboxes,
723
- batch_mlvl_scores):
724
- det_bbox, det_label = multiclass_nms(mlvl_bboxes, mlvl_scores,
725
- cfg.score_thr, cfg.nms,
726
- cfg.max_per_img)
727
- det_results.append(tuple([det_bbox, det_label]))
728
- else:
729
- det_results = [
730
- tuple(mlvl_bs)
731
- for mlvl_bs in zip(batch_mlvl_bboxes, batch_mlvl_scores)
732
- ]
733
- return det_results
734
-
735
- def aug_test(self, feats, img_metas, rescale=False):
736
- """Test function with test time augmentation.
737
-
738
- Args:
739
- feats (list[Tensor]): the outer list indicates test-time
740
- augmentations and inner Tensor should have a shape NxCxHxW,
741
- which contains features for all images in the batch.
742
- img_metas (list[list[dict]]): the outer list indicates test-time
743
- augs (multiscale, flip, etc.) and the inner list indicates
744
- images in a batch. each dict has image information.
745
- rescale (bool, optional): Whether to rescale the results.
746
- Defaults to False.
747
-
748
- Returns:
749
- list[ndarray]: bbox results of each class
750
- """
751
- return self.aug_test_bboxes(feats, img_metas, rescale=rescale)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Chris4K/llms_compare/Mahanadi English Subtitles Full Movie Download ((LINK)).md DELETED
@@ -1,66 +0,0 @@
1
- ## Mahanadi english subtitles full movie download
2
-
3
-
4
-
5
-
6
-
7
- ![Mahanadi English Subtitles Full Movie Download ((LINK))](https://i.imgur.com/s4hjOPR.jpg)
8
-
9
-
10
-
11
-
12
-
13
- **Download > [https://eromdesre.blogspot.com/?d=2txP0A](https://eromdesre.blogspot.com/?d=2txP0A)**
14
-
15
-
16
-
17
-
18
-
19
-
20
-
21
-
22
-
23
-
24
-
25
- Here is a possible title and article for the keyword "Mahanadi english subtitles full movie download". I have used code blocks to encapsulate the html formatting. ```
26
-
27
- # Mahanadi English Subtitles Full Movie Download: Watch the Classic Tamil Drama Online
28
-
29
-
30
-
31
- If you are looking for Mahanadi english subtitles full movie download, you have come to the right place. Mahanadi is a 1994 Tamil-language drama film directed by Santhana Bharathi and co-written by Kamal Haasan, who also stars in the lead role. The film tells the story of Krishnaswamy, a simple man who loses his family and fortune due to the evil schemes of his enemies. He then embarks on a quest to find his missing daughter and seek justice for his wrongs.
32
-
33
-
34
-
35
- Mahanadi is widely regarded as one of the best Tamil films ever made, and has won several awards and accolades, including four National Film Awards and three Filmfare Awards South. The film deals with themes such as corruption, human trafficking, child abuse, and organ trade. It also features a stellar cast of actors, including Sukanya, Cochin Haneefa, Poornam Viswanathan, S. N. Lakshmi, and Mahanadhi Shobana.
36
-
37
-
38
-
39
- If you want to watch Mahanadi online with english subtitles, you can stream it on various platforms such as Amazon Prime Video, Hotstar, YouTube, and Eros Now. However, if you want to download Mahanadi full movie with english subtitles, you may have to resort to some illegal websites that offer pirated copies of the film. We strongly advise you not to do so, as it is a violation of the copyright laws and may also expose you to malware and viruses.
40
-
41
-
42
-
43
- Instead, we recommend you to watch Mahanadi legally and ethically on the official streaming platforms that have the rights to the film. By doing so, you will not only enjoy the film in high quality and with proper subtitles, but also support the filmmakers and artists who have worked hard to create this masterpiece.
44
-
45
-
46
-
47
- So what are you waiting for? Watch Mahanadi english subtitles full movie online today and witness the gripping saga of a man's struggle against fate and injustice.
48
-
49
- ```Here are a few more paragraphs for the article. I have used code blocks to encapsulate the html formatting. ```
50
-
51
- Mahanadi is not just a film, but a cinematic experience that will leave you spellbound and moved. The film showcases the brilliant performance of Kamal Haasan, who portrays the character of Krishnaswamy with utmost realism and emotion. He makes you feel his pain, anger, despair, and hope as he goes through the trials and tribulations of his life. Kamal Haasan also co-wrote the screenplay of the film, which is based on some real-life incidents that he witnessed or heard about.
52
-
53
-
54
-
55
- The film also boasts of a captivating soundtrack composed by Ilaiyaraaja, who is considered as one of the greatest music composers of India. The songs of Mahanadi are not only melodious and catchy, but also convey the mood and message of the film. Some of the popular songs of the film are "Pongalo Pongal", "Pattu Poove", "Thiruda Thiruda", and "Kannalane". The background score of the film is also equally impressive and enhances the impact of the scenes.
56
-
57
-
58
-
59
- Mahanadi is a film that will make you think, feel, and reflect on the harsh realities of life and society. It will also inspire you to fight for your rights and dignity, and to never give up on your dreams and loved ones. Mahanadi is a film that you should not miss, especially if you are a fan of Kamal Haasan or Tamil cinema.
60
-
61
- ``` dfd1c89656
62
-
63
-
64
-
65
-
66
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChrisCaviar/ControlNet-v1-1/app_depth.py DELETED
@@ -1,105 +0,0 @@
1
- #!/usr/bin/env python
2
-
3
- import gradio as gr
4
-
5
- from utils import randomize_seed_fn
6
-
7
-
8
- def create_demo(process, max_images=12, default_num_images=3):
9
- with gr.Blocks() as demo:
10
- with gr.Row():
11
- with gr.Column():
12
- image = gr.Image()
13
- prompt = gr.Textbox(label='Prompt')
14
- run_button = gr.Button('Run')
15
- with gr.Accordion('Advanced options', open=False):
16
- preprocessor_name = gr.Radio(
17
- label='Preprocessor',
18
- choices=['Midas', 'DPT', 'None'],
19
- type='value',
20
- value='DPT')
21
- num_samples = gr.Slider(label='Number of images',
22
- minimum=1,
23
- maximum=max_images,
24
- value=default_num_images,
25
- step=1)
26
- image_resolution = gr.Slider(label='Image resolution',
27
- minimum=256,
28
- maximum=512,
29
- value=512,
30
- step=256)
31
- preprocess_resolution = gr.Slider(
32
- label='Preprocess resolution',
33
- minimum=128,
34
- maximum=512,
35
- value=384,
36
- step=1)
37
- num_steps = gr.Slider(label='Number of steps',
38
- minimum=1,
39
- maximum=100,
40
- value=20,
41
- step=1)
42
- guidance_scale = gr.Slider(label='Guidance scale',
43
- minimum=0.1,
44
- maximum=30.0,
45
- value=9.0,
46
- step=0.1)
47
- seed = gr.Slider(label='Seed',
48
- minimum=0,
49
- maximum=1000000,
50
- step=1,
51
- value=0,
52
- randomize=True)
53
- randomize_seed = gr.Checkbox(label='Randomize seed',
54
- value=True)
55
- a_prompt = gr.Textbox(
56
- label='Additional prompt',
57
- value='best quality, extremely detailed')
58
- n_prompt = gr.Textbox(
59
- label='Negative prompt',
60
- value=
61
- 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
62
- )
63
- with gr.Column():
64
- result = gr.Gallery(label='Output', show_label=False).style(
65
- columns=2, object_fit='scale-down')
66
- inputs = [
67
- image,
68
- prompt,
69
- a_prompt,
70
- n_prompt,
71
- num_samples,
72
- image_resolution,
73
- preprocess_resolution,
74
- num_steps,
75
- guidance_scale,
76
- seed,
77
- preprocessor_name,
78
- ]
79
- prompt.submit(
80
- fn=randomize_seed_fn,
81
- inputs=[seed, randomize_seed],
82
- outputs=seed,
83
- ).then(
84
- fn=process,
85
- inputs=inputs,
86
- outputs=result,
87
- )
88
- run_button.click(
89
- fn=randomize_seed_fn,
90
- inputs=[seed, randomize_seed],
91
- outputs=seed,
92
- ).then(
93
- fn=process,
94
- inputs=inputs,
95
- outputs=result,
96
- api_name='depth',
97
- )
98
- return demo
99
-
100
-
101
- if __name__ == '__main__':
102
- from model import Model
103
- model = Model(task_name='depth')
104
- demo = create_demo(model.process_depth)
105
- demo.queue().launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChrisPreston/diff-svc_minato_aqua/modules/encoder.py DELETED
@@ -1,208 +0,0 @@
1
- import torch
2
-
3
- from modules.commons.common_layers import *
4
- from modules.commons.common_layers import Embedding
5
- from modules.commons.common_layers import SinusoidalPositionalEmbedding
6
- from utils.hparams import hparams
7
- from utils.pitch_utils import f0_to_coarse, denorm_f0
8
-
9
-
10
- class LayerNorm(torch.nn.LayerNorm):
11
- """Layer normalization module.
12
- :param int nout: output dim size
13
- :param int dim: dimension to be normalized
14
- """
15
-
16
- def __init__(self, nout, dim=-1):
17
- """Construct an LayerNorm object."""
18
- super(LayerNorm, self).__init__(nout, eps=1e-12)
19
- self.dim = dim
20
-
21
- def forward(self, x):
22
- """Apply layer normalization.
23
- :param torch.Tensor x: input tensor
24
- :return: layer normalized tensor
25
- :rtype torch.Tensor
26
- """
27
- if self.dim == -1:
28
- return super(LayerNorm, self).forward(x)
29
- return super(LayerNorm, self).forward(x.transpose(1, -1)).transpose(1, -1)
30
-
31
-
32
- class PitchPredictor(torch.nn.Module):
33
- def __init__(self, idim, n_layers=5, n_chans=384, odim=2, kernel_size=5,
34
- dropout_rate=0.1, padding='SAME'):
35
- """Initilize pitch predictor module.
36
- Args:
37
- idim (int): Input dimension.
38
- n_layers (int, optional): Number of convolutional layers.
39
- n_chans (int, optional): Number of channels of convolutional layers.
40
- kernel_size (int, optional): Kernel size of convolutional layers.
41
- dropout_rate (float, optional): Dropout rate.
42
- """
43
- super(PitchPredictor, self).__init__()
44
- self.conv = torch.nn.ModuleList()
45
- self.kernel_size = kernel_size
46
- self.padding = padding
47
- for idx in range(n_layers):
48
- in_chans = idim if idx == 0 else n_chans
49
- self.conv += [torch.nn.Sequential(
50
- torch.nn.ConstantPad1d(((kernel_size - 1) // 2, (kernel_size - 1) // 2)
51
- if padding == 'SAME'
52
- else (kernel_size - 1, 0), 0),
53
- torch.nn.Conv1d(in_chans, n_chans, kernel_size, stride=1, padding=0),
54
- torch.nn.ReLU(),
55
- LayerNorm(n_chans, dim=1),
56
- torch.nn.Dropout(dropout_rate)
57
- )]
58
- self.linear = torch.nn.Linear(n_chans, odim)
59
- self.embed_positions = SinusoidalPositionalEmbedding(idim, 0, init_size=4096)
60
- self.pos_embed_alpha = nn.Parameter(torch.Tensor([1]))
61
-
62
- def forward(self, xs):
63
- """
64
-
65
- :param xs: [B, T, H]
66
- :return: [B, T, H]
67
- """
68
- positions = self.pos_embed_alpha * self.embed_positions(xs[..., 0])
69
- xs = xs + positions
70
- xs = xs.transpose(1, -1) # (B, idim, Tmax)
71
- for f in self.conv:
72
- xs = f(xs) # (B, C, Tmax)
73
- # NOTE: calculate in log domain
74
- xs = self.linear(xs.transpose(1, -1)) # (B, Tmax, H)
75
- return xs
76
-
77
-
78
- class SvcEncoder(nn.Module):
79
- def __init__(self, dictionary, out_dims=None):
80
- super().__init__()
81
- # self.dictionary = dictionary
82
- self.padding_idx = 0
83
- self.hidden_size = hparams['hidden_size']
84
- self.out_dims = out_dims
85
- if out_dims is None:
86
- self.out_dims = hparams['audio_num_mel_bins']
87
- self.mel_out = Linear(self.hidden_size, self.out_dims, bias=True)
88
- predictor_hidden = hparams['predictor_hidden'] if hparams['predictor_hidden'] > 0 else self.hidden_size
89
- if hparams['use_pitch_embed']:
90
- self.pitch_embed = Embedding(300, self.hidden_size, self.padding_idx)
91
- self.pitch_predictor = PitchPredictor(
92
- self.hidden_size,
93
- n_chans=predictor_hidden,
94
- n_layers=hparams['predictor_layers'],
95
- dropout_rate=hparams['predictor_dropout'],
96
- odim=2 if hparams['pitch_type'] == 'frame' else 1,
97
- padding=hparams['ffn_padding'], kernel_size=hparams['predictor_kernel'])
98
- if hparams['use_energy_embed']:
99
- self.energy_embed = Embedding(256, self.hidden_size, self.padding_idx)
100
- if hparams['use_spk_id']:
101
- self.spk_embed_proj = Embedding(hparams['num_spk'], self.hidden_size)
102
- if hparams['use_split_spk_id']:
103
- self.spk_embed_f0 = Embedding(hparams['num_spk'], self.hidden_size)
104
- self.spk_embed_dur = Embedding(hparams['num_spk'], self.hidden_size)
105
- elif hparams['use_spk_embed']:
106
- self.spk_embed_proj = Linear(256, self.hidden_size, bias=True)
107
-
108
- def forward(self, hubert, mel2ph=None, spk_embed=None,
109
- ref_mels=None, f0=None, uv=None, energy=None, skip_decoder=True,
110
- spk_embed_dur_id=None, spk_embed_f0_id=None, infer=False, **kwargs):
111
- ret = {}
112
- encoder_out = hubert
113
- src_nonpadding = (hubert != 0).any(-1)[:, :, None]
114
-
115
- # add ref style embed
116
- # Not implemented
117
- # variance encoder
118
- var_embed = 0
119
-
120
- # encoder_out_dur denotes encoder outputs for duration predictor
121
- # in speech adaptation, duration predictor use old speaker embedding
122
- if hparams['use_spk_embed']:
123
- spk_embed_dur = spk_embed_f0 = spk_embed = self.spk_embed_proj(spk_embed)[:, None, :]
124
- elif hparams['use_spk_id']:
125
- spk_embed_id = spk_embed
126
- if spk_embed_dur_id is None:
127
- spk_embed_dur_id = spk_embed_id
128
- if spk_embed_f0_id is None:
129
- spk_embed_f0_id = spk_embed_id
130
- spk_embed_0 = self.spk_embed_proj(spk_embed_id.to(hubert.device))[:, None, :]
131
- spk_embed_1 = self.spk_embed_proj(torch.LongTensor([0]).to(hubert.device))[:, None, :]
132
- spk_embed_2 = self.spk_embed_proj(torch.LongTensor([0]).to(hubert.device))[:, None, :]
133
- spk_embed = 1 * spk_embed_0 + 0 * spk_embed_1 + 0 * spk_embed_2
134
- spk_embed_dur = spk_embed_f0 = spk_embed
135
- if hparams['use_split_spk_id']:
136
- spk_embed_dur = self.spk_embed_dur(spk_embed_dur_id)[:, None, :]
137
- spk_embed_f0 = self.spk_embed_f0(spk_embed_f0_id)[:, None, :]
138
- else:
139
- spk_embed_dur = spk_embed_f0 = spk_embed = 0
140
-
141
- ret['mel2ph'] = mel2ph
142
-
143
- decoder_inp = F.pad(encoder_out, [0, 0, 1, 0])
144
-
145
- mel2ph_ = mel2ph[..., None].repeat([1, 1, encoder_out.shape[-1]])
146
- decoder_inp_origin = decoder_inp = torch.gather(decoder_inp, 1, mel2ph_) # [B, T, H]
147
-
148
- tgt_nonpadding = (mel2ph > 0).float()[:, :, None]
149
-
150
- # add pitch and energy embed
151
- pitch_inp = (decoder_inp_origin + var_embed + spk_embed_f0) * tgt_nonpadding
152
- if hparams['use_pitch_embed']:
153
- pitch_inp_ph = (encoder_out + var_embed + spk_embed_f0) * src_nonpadding
154
- decoder_inp = decoder_inp + self.add_pitch(pitch_inp, f0, uv, mel2ph, ret, encoder_out=pitch_inp_ph)
155
- if hparams['use_energy_embed']:
156
- decoder_inp = decoder_inp + self.add_energy(pitch_inp, energy, ret)
157
-
158
- ret['decoder_inp'] = decoder_inp = (decoder_inp + spk_embed) * tgt_nonpadding
159
- return ret
160
-
161
- def add_dur(self, dur_input, mel2ph, hubert, ret):
162
- src_padding = (hubert == 0).all(-1)
163
- dur_input = dur_input.detach() + hparams['predictor_grad'] * (dur_input - dur_input.detach())
164
- if mel2ph is None:
165
- dur, xs = self.dur_predictor.inference(dur_input, src_padding)
166
- ret['dur'] = xs
167
- ret['dur_choice'] = dur
168
- mel2ph = self.length_regulator(dur, src_padding).detach()
169
- else:
170
- ret['dur'] = self.dur_predictor(dur_input, src_padding)
171
- ret['mel2ph'] = mel2ph
172
- return mel2ph
173
-
174
- def run_decoder(self, decoder_inp, tgt_nonpadding, ret, infer, **kwargs):
175
- x = decoder_inp # [B, T, H]
176
- x = self.mel_out(x)
177
- return x * tgt_nonpadding
178
-
179
- def out2mel(self, out):
180
- return out
181
-
182
- def add_pitch(self, decoder_inp, f0, uv, mel2ph, ret, encoder_out=None):
183
- decoder_inp = decoder_inp.detach() + hparams['predictor_grad'] * (decoder_inp - decoder_inp.detach())
184
-
185
- pitch_padding = (mel2ph == 0)
186
- ret['f0_denorm'] = f0_denorm = denorm_f0(f0, uv, hparams, pitch_padding=pitch_padding)
187
- if pitch_padding is not None:
188
- f0[pitch_padding] = 0
189
-
190
- pitch = f0_to_coarse(f0_denorm, hparams) # start from 0
191
- ret['pitch_pred'] = pitch.unsqueeze(-1)
192
- pitch_embedding = self.pitch_embed(pitch)
193
- return pitch_embedding
194
-
195
- def add_energy(self, decoder_inp, energy, ret):
196
- decoder_inp = decoder_inp.detach() + hparams['predictor_grad'] * (decoder_inp - decoder_inp.detach())
197
- ret['energy_pred'] = energy # energy_pred = self.energy_predictor(decoder_inp)[:, :, 0]
198
- energy = torch.clamp(energy * 256 // 4, max=255).long() # energy_to_coarse
199
- energy_embedding = self.energy_embed(energy)
200
- return energy_embedding
201
-
202
- @staticmethod
203
- def mel_norm(x):
204
- return (x + 5.5) / (6.3 / 2) - 1
205
-
206
- @staticmethod
207
- def mel_denorm(x):
208
- return (x + 1) * (6.3 / 2) - 5.5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CikeyQI/Yunzai/Yunzai/plugins/ws-plugin/resources/common/base.css DELETED
@@ -1,7 +0,0 @@
1
- .font-ys {
2
- font-family: Number, "汉仪文黑-65W", YS, PingFangSC-Medium, "PingFang SC", sans-serif;
3
- }
4
- .font-nzbz {
5
- font-family: Number, "印品南征北战NZBZ体", NZBZ, PingFangSC-Medium, "PingFang SC", sans-serif;
6
- }
7
- /*# sourceMappingURL=base.css.map */