parquet-converter commited on
Commit
78a3baa
·
1 Parent(s): 0bb411c

Update parquet files (step 33 of 121)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Devexpress 12.1 Full 16 Explore the Features and Benefits of DevExpress UI Controls Reporting Systems and IDE Productivity Tools.md +0 -212
  2. spaces/1gistliPinn/ChatGPT4/Examples/Boku No Pico Sin Censura ((FULL)).md +0 -6
  3. spaces/1gistliPinn/ChatGPT4/Examples/Diabolicpokerstarhackv1002betarapidshare.md +0 -6
  4. spaces/1gistliPinn/ChatGPT4/Examples/Embarcadero Delphi Xe Activation ((LINK)).md +0 -28
  5. spaces/1phancelerku/anime-remove-background/5000rubl nece manatdir A simple guide to currency conversion.md +0 -148
  6. spaces/1phancelerku/anime-remove-background/Blast Away with 3D Bubble Shooter A Free and Fun Game for All Ages.md +0 -140
  7. spaces/1phancelerku/anime-remove-background/Download PUBG MOBILE MOD APK with Unlimited Features and Anti-Ban.md +0 -101
  8. spaces/2ndelement/voicevox/voicevox_engine/utility/__init__.py +0 -20
  9. spaces/AIConsultant/MusicGen/tests/common_utils/temp_utils.py +0 -56
  10. spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/encoders/open_clap/openai.py +0 -129
  11. spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/losses_audio/contperceptual.py +0 -123
  12. spaces/AgentVerse/agentVerse/agentverse_command/__init__.py +0 -0
  13. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/scroller.js +0 -2
  14. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/spinner-components.js +0 -39
  15. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/folder/Folder.d.ts +0 -65
  16. spaces/Alichuan/VITS-Umamusume-voice-synthesizer/text/english.py +0 -188
  17. spaces/Alpaca233/SadTalker/src/facerender/animate.py +0 -257
  18. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/stable_diffusion/text2img.md +0 -59
  19. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/stable_diffusion_controlnet_reference.py +0 -834
  20. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/kandinsky2_2/__init__.py +0 -25
  21. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/spectrogram_diffusion/pipeline_spectrogram_diffusion.py +0 -267
  22. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py +0 -645
  23. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/torch_utils.py +0 -88
  24. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/others/test_config.py +0 -288
  25. spaces/Andy1621/uniformer_image_detection/configs/fpg/mask_rcnn_r50_fpg-chn128_crop640_50e_coco.py +0 -10
  26. spaces/Andy1621/uniformer_image_detection/configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_1x_coco.py +0 -4
  27. spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/README.md +0 -69
  28. spaces/Anonymous-sub/Rerender/ControlNet/ldm/models/diffusion/ddpm.py +0 -1797
  29. spaces/Arthur678/vits-uma-genshin-honkai/README.md +0 -11
  30. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/colorama/tests/ansitowin32_test.py +0 -294
  31. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/repr.py +0 -149
  32. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/_msvccompiler.py +0 -572
  33. spaces/Awiny/Image2Paragraph/models/grit_src/grit/custom_solver.py +0 -88
  34. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/evaluation/panoptic_evaluation.py +0 -199
  35. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/data/test_transforms.py +0 -268
  36. spaces/Bart92/RVC_HF/infer/modules/uvr5/preprocess.py +0 -346
  37. spaces/Benson/text-generation/Examples/Descarga C.md +0 -91
  38. spaces/Benson/text-generation/Examples/Descargar Apk Mod De Netflix.md +0 -154
  39. spaces/Benson/text-generation/Examples/Descargar Ekelebe De J Martins.md +0 -64
  40. spaces/BetterAPI/BetterChat_new/src/lib/buildPrompt.ts +0 -33
  41. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/typing_extensions.py +0 -2312
  42. spaces/CVPR/LIVE/thrust/thrust/mr/tls_pool.h +0 -64
  43. spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/cross_system.h +0 -344
  44. spaces/CVPR/WALT/mmcv_custom/runner/__init__.py +0 -8
  45. spaces/CVPR/WALT/mmdet/core/anchor/utils.py +0 -71
  46. spaces/CVPR/lama-example/saicinpainting/training/modules/multiscale.py +0 -244
  47. spaces/CVPR/regionclip-demo/detectron2/data/datasets/lvis.py +0 -357
  48. spaces/Cecil8352/vits-models/attentions.py +0 -300
  49. spaces/CofAI/chat/client/css/style.css +0 -18
  50. spaces/Cong723/gpt-academic-public/crazy_functions/代码重写为全英文_多线程.py +0 -138
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Devexpress 12.1 Full 16 Explore the Features and Benefits of DevExpress UI Controls Reporting Systems and IDE Productivity Tools.md DELETED
@@ -1,212 +0,0 @@
1
- <br />
2
- <h1>Download Devexpress 12.1 Full 16 - A Comprehensive Guide</h1>
3
- <p>If you are a web or desktop developer, you might have heard of <strong>Devexpress</strong>, a popular suite of tools and components that can help you create stunning applications with ease. In this article, we will show you how to download <strong>Devexpress 12.1 Full 16</strong>, the latest version of this powerful software, and how to use it effectively in your projects.</p>
4
- <h2>What is Devexpress?</h2>
5
- <p>Devexpress is a software company that provides a wide range of products for web and desktop development, such as:</p>
6
- <h2>Download Devexpress 12.1 Full 16</h2><br /><p><b><b>DOWNLOAD</b> - <a href="https://byltly.com/2uKzHK">https://byltly.com/2uKzHK</a></b></p><br /><br />
7
- <ul>
8
- <li><strong>UI Controls</strong>: These are ready-made user interface elements that you can use in your applications, such as grids, charts, editors, gauges, calendars, ribbons, menus, etc.</li>
9
- <li><strong>Reporting Tools</strong>: These are tools that allow you to create and display reports in your applications, such as report designers, viewers, print previews, etc.</li>
10
- <li><strong>IDE Productivity Tools</strong>: These are tools that enhance your development experience in Visual Studio, such as code analysis, refactoring, debugging, testing, etc.</li>
11
- <li><strong>Business Application Frameworks</strong>: These are frameworks that help you create business applications faster and easier, such as XAF (eXpressApp Framework) and XPO (eXpress Persistent Objects).</li>
12
- </ul>
13
- <p>Devexpress supports various platforms and technologies, such as .NET Framework, .NET Core, .NET 5+, ASP.NET Web Forms, ASP.NET MVC, ASP.NET Core MVC, Blazor, HTML JS Technologies (AngularJS, KnockoutJS), WinForms, WPF, etc.</p>
14
- <h2>Why do you need Devexpress?</h2>
15
- <p>Devexpress can help you improve your web and desktop development in many ways, such as:</p>
16
- <ul>
17
- <li><strong>Saving time and effort</strong>: You can use the Devexpress controls and components instead of writing them from scratch or using third-party libraries that may not be compatible or reliable.</li>
18
- <li><strong>Enhancing functionality and performance</strong>: You can use the Devexpress controls and components that offer advanced features and capabilities that are not available in standard controls or components.</li>
19
- <li><strong>Improving user experience and satisfaction</strong>: You can use the Devexpress controls and components that have a modern and attractive appearance and behavior that can impress your users and customers.</li>
20
- <li><strong>Getting support and updates</strong>: You can access the documentation and support for Devexpress products online or offline, as well as get regular updates and bug fixes.</li>
21
- </ul>
22
- <h2>How to download Devexpress 12.1 Full 16?</h2>
23
- <p>To download <strong>Devexpress 12.1 Full 16</strong>, you need to follow these steps:</p>
24
- <h3>Step 1: Check your system requirements</h3>
25
- <p>Before you download <strong>Devexpress 12.1 Full 16</strong>, you need to make sure that your system meets the minimum or recommended requirements for this software. Here are some of the requirements:</p>
26
- <table>
27
- <tr><th>Requirement</th><th>Minimum</th><th>Recommended</th></tr>
28
- <tr><td>Operating System</td><td>Windows Vista SP2 or later</td><td>Windows 10 or later</td></tr>
29
- <tr><td>.NET Framework Version</td><td>.NET Framework 4.0 or later</td><td>.NET Framework 4.5.2 or later</td></tr>
30
- <tr><td>.NET Core Version</td><td>.NET Core 2.0 or later</td><td>.NET Core 3.0 or later</td></tr>
31
- <tr><td>.NET Version</td><td>.NET Framework only</td><td>.NET Framework or .NET Core or .NET 5+</td></tr>
32
- <tr><td>IDE Version</td><td>Visual Studio 2010 or later</td><td>Visual Studio 2019 or later</td></tr>
33
- <tr><td>Disk Space</td><td>At least 4 GB free space</td><td>At least 8 GB free space</td></tr>
34
- <tr><td>CPU Speed</td><td>At least dual-core processor with at least 2 GHz speed </td><td>At least quad-core processor with at least 3 GHz speed </td></tr>
35
- <tr><td>RAM Size</td><td>At least 4 GB RAM </td><td>At least 8 GB RAM </td></tr>
36
- <tr><td>Display Resolution </td><td>At least HD (1366 x768) resolution </td><td>FHD (1920 x1080) resolution or higher </td></tr>
37
- <h3>Step 2: Choose your subscription plan</h3>
38
- 16</strong>, you need to choose a subscription plan that suits your needs and budget. Devexpress offers various subscription plans and pricing options for its products, such as:</p>
39
- <ul>
40
- <li><strong>Universal Subscription</strong>: This is the most comprehensive subscription plan that includes all Devexpress products for web and desktop development, as well as priority support and source code access. The price for this plan is $2,199.99 per year.</li>
41
- <li><strong>DXperience Subscription</strong>: This is a subscription plan that includes all Devexpress products for web development, such as ASP.NET, HTML JS Technologies, Blazor, Reporting Tools, etc. The price for this plan is $1,499.99 per year.</li>
42
- <li><strong>WinForms Subscription</strong>: This is a subscription plan that includes all Devexpress products for WinForms development, such as UI Controls, Reporting Tools, IDE Productivity Tools, etc. The price for this plan is $999.99 per year.</li>
43
- <li><strong>WPF Subscription</strong>: This is a subscription plan that includes all Devexpress products for WPF development, such as UI Controls, Reporting Tools, IDE Productivity Tools, etc. The price for this plan is $999.99 per year.</li>
44
- <li><strong>Other Subscription Plans</strong>: Devexpress also offers other subscription plans for specific products or platforms, such as VCL Subscription, XAF Subscription, XPO Subscription, etc. You can check the details and prices of these plans on the official website.</li>
45
- </ul>
46
- <p>You can also choose to buy individual products or components instead of a subscription plan if you only need a specific feature or functionality. However, buying a subscription plan can save you money and give you access to more products and updates.</p>
47
- <p>How to download Devexpress 12.1 full version for free<br />
48
- Devexpress 12.1 full crack download link<br />
49
- Download Devexpress 12.1 full offline installer<br />
50
- Devexpress 12.1 full license key generator<br />
51
- Download Devexpress 12.1 full with source code<br />
52
- Devexpress 12.1 full documentation download<br />
53
- Download Devexpress 12.1 full for Visual Studio 2019<br />
54
- Devexpress 12.1 full tutorial download<br />
55
- Download Devexpress 12.1 full for Windows 10<br />
56
- Devexpress 12.1 full patch download<br />
57
- Download Devexpress 12.1 full for ASP.NET MVC<br />
58
- Devexpress 12.1 full demo download<br />
59
- Download Devexpress 12.1 full for WPF<br />
60
- Devexpress 12.1 full activation code download<br />
61
- Download Devexpress 12.1 full for WinForms<br />
62
- Devexpress 12.1 full trial download<br />
63
- Download Devexpress 12.1 full for Blazor<br />
64
- Devexpress 12.1 full serial number download<br />
65
- Download Devexpress 12.1 full for Angular<br />
66
- Devexpress 12.1 full setup download<br />
67
- Download Devexpress 12.1 full for React<br />
68
- Devexpress 12.1 full keygen download<br />
69
- Download Devexpress 12.1 full for Xamarin<br />
70
- Devexpress 12.1 full registration code download<br />
71
- Download Devexpress 12.1 full for .NET Core<br />
72
- Devexpress 12.1 full torrent download<br />
73
- Download Devexpress 12.1 full for PHP<br />
74
- Devexpress 12.1 full product key download<br />
75
- Download Devexpress 12.1 full for HTML5<br />
76
- Devexpress 12.1 full activation key download<br />
77
- Download Devexpress 12.1 full for JavaScript<br />
78
- Devexpress 12.1 full license code download<br />
79
- Download Devexpress 12.1 full for SQL Server<br />
80
- Devexpress 12.1 full crack keygen download<br />
81
- Download Devexpress 12.1 full for Oracle<br />
82
- Devexpress 12.1 full serial key download<br />
83
- Download Devexpress 12.1 full for MySQL<br />
84
- Devexpress 12.1 full license key crack download<br />
85
- Download Devexpress 12.1 full for PostgreSQL<br />
86
- Devexpress 12.1 full activation key crack download<br />
87
- Download Devexpress 12.1 full for MongoDB<br />
88
- Devexpress 12.1 full serial number crack download<br />
89
- Download Devexpress 12.1 full for Firebase<br />
90
- Devexpress 12.1 full registration code crack download<br />
91
- Download Devexpress 12.1 full for Azure SQL Database<br />
92
- Devexpress 12.1 full product key crack download<br />
93
- Download Devexpress 12.1 full for AWS DynamoDB <br />
94
- Devexpress 12.1 full activation code crack download <br />
95
- Download Devexpress 12.1 full for Google Cloud Firestore <br />
96
- Devexpress 12.1 full license code crack download</p>
97
- <h3>Step 3: Download the installer</h3>
98
- <p>After you choose your subscription plan and complete the payment process, you can download the installer for <strong>Devexpress 12.1 Full 16</strong> from the official website. To do this, you need to:</p>
99
- <ol>
100
- <li>Go to <a href="https://www.devexpress.com/Products/Try/">https://www.devexpress.com/Products/Try/</a> and sign in with your account.</li>
101
- <li>Select your subscription plan from the drop-down menu and click the <strong>Download</strong> button.</li>
102
- <li>Select the version <strong>12.1 Full 16</strong> from the list and click the <strong>Download Installer</strong> button.</li>
103
- <li>Save the installer file (DevExpressComponents-12.1.16.exe) to your computer and wait for the download to finish.</li>
104
- </ol>
105
- <h3>Step 4: Run the installer</h3>
106
- <p>After you download the installer file, you can run it to install <strong>Devexpress 12.1 Full 16</strong> on your computer. To do this, you need to:</p>
107
- <ol>
108
- <li>Double-click the installer file (DevExpressComponents-12.1.16.exe) to launch it.</li>
109
- <li>Click <strong>Yes</strong> if prompted by User Account Control (UAC).</li>
110
- <li>Select your preferred language and click <strong>OK</strong>.</li>
111
- <li>Read and accept the license agreement and click <strong>Next</strong>.</li>
112
- <li>Select the components that you want to install and click <strong>Next</strong>. You can choose to install all components or only specific ones according to your needs.</li>
113
- <li>Select the installation folder and click <strong>Next</strong>. You can use the default folder or choose a custom one.</li>
114
- <li>Select the start menu folder and click <strong>Next</strong>. You can use the default folder or choose a custom one.</li>
115
- <li>Select whether you want to create a desktop shortcut and click <strong>Next</strong>.</li>
116
- <li>Select whether you want to check for updates automatically and click <strong>Next</strong>.</li>
117
- <li>Click <strong>Install</strong> to start the installation process and wait for it to finish.</li>
118
- <li>Click <strong>Finish</strong> to exit the installer.</li>
119
- </ol>
120
- <h3>Step 5: Activate your license</h3>
121
- <p>To use <strong>Devexpress 12.1 Full 16</strong>, you need to activate your license and register your product. To do this, you need to:</p>
122
- <ol>
123
- <li>Launch Visual Studio and open or create a project that uses Devexpress components.</li>
124
- <li>A dialog box will appear asking you to activate your license. Click <strong>Login & Activate Now</strong>.</li>
125
- <li>A web browser will open asking you to sign in with your account. Enter your email and password and click <strong>Login & Activate Now</strong>.</li>
126
- <li>A confirmation message will appear saying that your license has been activated successfully. Click <strong>Close Browser & Return To Visual Studio</strong>.</li>
127
- <li>A dialog box will appear asking you to register your product. Click <strong>Login & Register Now</strong>.</li>
128
- <li>A web browser will open asking you to sign in with your account again. Enter your email and password and click <strong>Login & Register Now</strong>.</li>
129
- <li>A confirmation message will appear saying that your product has been registered successfully. Click <strong>Close Browser & Return To Visual Studio</strong>.</li>
130
- <h2>How to use Devexpress 12.1 Full 16?</h2>
131
- g>Devexpress 12.1 Full 16</strong>, you need to know some tips and tricks that can help you create stunning applications with ease. Here are some of them:</p>
132
- <h3>How to create a project with Devexpress 12.1 Full 16?</h3>
133
- <p>To create a project with <strong>Devexpress 12.1 Full 16</strong>, you can use the <strong>Devexpress Template Gallery</strong>, which is a tool that allows you to create projects based on predefined templates that include Devexpress controls and components. To do this, you need to:</p>
134
- <ol>
135
- <li>Launch Visual Studio and click <strong>File</strong> > <strong>New</strong> > <strong>Project</strong>.</li>
136
- <li>Select <strong>Devexpress v20.2 Template Gallery</strong> from the list of templates and click <strong>Next</strong>.</li>
137
- <li>Select the platform and technology that you want to use for your project, such as WinForms, WPF, ASP.NET Web Forms, ASP.NET MVC, etc.</li>
138
- <li>Select the template that you want to use for your project, such as Blank Application, Ribbon Application, Outlook-Inspired Application, etc.</li>
139
- <li>Enter the name and location of your project and click <strong>Create</strong>.</li>
140
- <li>A new project will be created with the selected template and Devexpress controls and components.</li>
141
- </ol>
142
- <h3>How to use the Devexpress controls and components?</h3>
143
- <p>To use the Devexpress controls and components in your project, you can use the <strong>Devexpress Toolbox</strong>, which is a tool that allows you to drag and drop Devexpress controls and components onto your forms or pages. To do this, you need to:</p>
144
- <ol>
145
- <li>Open a form or a page in your project in the designer mode.</li>
146
- <li>Open the <strong>Devexpress Toolbox</strong> by clicking <strong>View</strong> > <strong>Toolbox</strong>.</li>
147
- <li>Select the Devexpress control or component that you want to use from the list of categories, such as Data & Analytics, Navigation & Layout, Editors & Simple Controls, etc.</li>
148
- <li>Drag and drop the Devexpress control or component onto your form or page.</li>
149
- <li>A new Devexpress control or component will be added to your form or page with default settings.</li>
150
- </ol>
151
- <h3>How to customize the appearance and behavior of the Devexpress controls and components?</h3>
152
- <p>To customize the appearance and behavior of the Devexpress controls and components in your project, you can use the <strong>Properties Window</strong>, which is a tool that allows you to change the properties, events, methods, and styles of Devexpress controls and components. To do this, you need to:</p>
153
- <ol>
154
- <li>Select a Devexpress control or component on your form or page in the designer mode.</li>
155
- <li>Open the <strong>Properties Window</strong> by clicking <strong>View</strong> > <strong>Properties Window</strong>.</li>
156
- <li>Select the property, event, method, or style that you want to change from the list of categories, such as Appearance, Behavior, Data Source, Layout Options, etc.</li>
157
- <li>Edit the value of the property, event, method, or style according to your needs.</li>
158
- <li>The appearance and behavior of the Devexpress control or component will be updated accordingly.</li>
159
- </ol>
160
- <h3>How to access the documentation and support for Devexpress 12.1 Full 16?</h3>
161
- g>Devexpress 12.1 Full 16</strong>, you can use the <strong>Help</strong> menu in Visual Studio, which is a tool that allows you to access the online or offline documentation and support for Devexpress products. To do this, you need to:</p>
162
- <ol>
163
- <li>Launch Visual Studio and open a project that uses Devexpress components.</li>
164
- <li>Click <strong>Help</strong> > <strong>DevExpress Help</strong>.</li>
165
- <li>Select the option that you want to use, such as <strong>Online Documentation</strong>, <strong>Offline Documentation</strong>, <strong>Support Center</strong>, <strong>Knowledge Base</strong>, etc.</li>
166
- <li>A web browser will open with the selected option and you can browse the documentation and support for Devexpress products.</li>
167
- </ol>
168
- <h2>Conclusion</h2>
169
- <p>In this article, we have shown you how to download <strong>Devexpress 12.1 Full 16</strong>, the latest version of this powerful software suite for web and desktop development, and how to use it effectively in your projects. We have covered the following topics:</p>
170
- <ul>
171
- <li>What is Devexpress and why you need it.</li>
172
- <li>How to download Devexpress 12.1 Full 16.</li>
173
- <li>How to use Devexpress 12.1 Full 16.</li>
174
- <li>How to access the documentation and support for Devexpress 12.1 Full 16.</li>
175
- </ul>
176
- <p>We hope that this article has been helpful and informative for you. If you want to learn more about Devexpress products and features, you can visit the official website or contact the support team. If you want to try Devexpress products for free, you can download a fully-functional 30-day trial version from the website. If you are ready to buy Devexpress products, you can choose a subscription plan that suits your needs and budget.</p>
177
- <p>Thank you for reading this article and happy coding!</p>
178
- <h2>Frequently Asked Questions</h2>
179
- <p>Here are some of the frequently asked questions about Devexpress 12.1 Full 16:</p>
180
- <h3>Q: What are the new features and improvements in Devexpress 12.1 Full 16?</h3>
181
- <p>A: Devexpress 12.1 Full 16 includes many new features and improvements for web and desktop development, such as:</p>
182
- <ul>
183
- <li>New Blazor UI Components for creating modern web applications with C#.</li>
184
- <li>New WinForms Controls and Components for creating rich desktop applications with .NET Core or .NET Framework.</li>
185
- <li>New WPF Controls and Components for creating powerful desktop applications with .NET Core or .NET Framework.</li>
186
- <li>New Reporting Tools and Components for creating and displaying reports in web and desktop applications.</li>
187
- <li>New IDE Productivity Tools and Components for enhancing your development experience in Visual Studio.</li>
188
- <li>New Business Application Frameworks and Components for creating business applications faster and easier with XAF and XPO.</li>
189
- <li>New Themes and Styles for customizing the appearance of your applications with Devexpress controls and components.</li>
190
- <li>New Documentation and Support for accessing the online or offline documentation and support for Devexpress products.</li>
191
- </ul>
192
- <h3>Q: How can I update my existing Devexpress products to Devexpress 12.1 Full 16?</h3>
193
- <p>A: If you have an active subscription plan for Devexpress products, you can update your existing Devexpress products to Devexpress 12.1 Full 16 using the <strong>Devexpress Project Converter</strong>, which is a tool that allows you to update your projects to use the latest version of Devexpress controls and components. To do this, you need to:</p>
194
- <ol>
195
- <li>Download and install Devexpress 12.1 Full 16 on your computer.</li>
196
- <li>Launch Visual Studio and open a project that uses Devexpress components.</li>
197
- <li>Select <strong>DevExpress</strong> > <strong>Project Converter</strong>.</li>
198
- <li>Select the option <strong>Update all DevExpress references in current solution/project(s) to a newer version</strong>.</li>
199
- <li>Select the version <strong>v20.2 (12.1)</strong> from the drop-down menu.</li>
200
- <li>Select whether you want to backup your project files before updating them.</li>
201
- <li>Select whether you want to update your project files automatically or manually.</li>
202
- <li>Click <strong>Start Conversion</strong>.</li>
203
- <li>The tool will update your project files to use the latest version of Devexpress controls and components.</li>
204
- <h3>Q: How can I get help or report a problem with Devexpress 12.1 Full 16?</h3>
205
- <p>A: If you need help or want to report a problem with Devexpress 12.1 Full 16, you can contact the support team by submitting a ticket on the official website or by sending an email to [email protected]. You can also browse the knowledge base or the forums on the website for answers or solutions to common issues or questions.</p>
206
- <h3>Q: How can I learn more about Devexpress products and features?</h3>
207
- <p>A: If you want to learn more about Devexpress products and features, you can visit the official website or follow the blog or social media channels of Devexpress. You can also watch the videos or webinars on the YouTube channel of Devexpress or attend the events or trainings hosted by Devexpress or its partners.</p>
208
- <h3>Q: How can I give feedback or suggest a feature for Devexpress products?</h3>
209
- <p>A: If you want to give feedback or suggest a feature for Devexpress products, you can use the <strong>User Voice</strong> portal on the official website, which is a tool that allows you to share your ideas or opinions with other users and developers of Devexpress products. You can also vote or comment on existing ideas or suggestions on the portal.</p>
210
- </p> 0a6ba089eb<br />
211
- <br />
212
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Boku No Pico Sin Censura ((FULL)).md DELETED
@@ -1,6 +0,0 @@
1
- <h2>Boku No Pico Sin Censura</h2><br /><p><b><b>Download File</b> ->>->>->> <a href="https://imgfil.com/2uy1qp">https://imgfil.com/2uy1qp</a></b></p><br /><br />
2
-
3
- Similar searchesanimeyoaihentai sin censuralesbiantony lopezboku no picojapiyaoiyaoi animeyaoi hardlesbian hentaiyaoi hentaisenepornoyapyahoogirl dick ... 1fdad05405<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Diabolicpokerstarhackv1002betarapidshare.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>diabolicpokerstarhackv1002betarapidshare</h2><br /><p><b><b>Download Zip</b> ->>->>->> <a href="https://imgfil.com/2uxYsg">https://imgfil.com/2uxYsg</a></b></p><br /><br />
2
-
3
- 4d29de3e1b<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Embarcadero Delphi Xe Activation ((LINK)).md DELETED
@@ -1,28 +0,0 @@
1
- <h2>Embarcadero Delphi Xe Activation</h2><br /><p><b><b>Download File</b> &#10027; <a href="https://imgfil.com/2uy0Dm">https://imgfil.com/2uy0Dm</a></b></p><br /><br />
2
-
3
- You are on the Activation Link page. This page is protected and can only be accessed by persons who have been invited to visit it. Please note that you do not need to have a My HealtheVet account to access your information or to make a donation.
4
-
5
- You are here
6
-
7
- Privacy
8
-
9
- For medical, marketing or research purposes, we may share and disclose information with the following organizations or companies:
10
-
11
- Vietnam Veterans of America.
12
-
13
- We are required to protect your information in accordance with HIPAA, which protects your health information from unauthorized access or disclosure. Your information is stored in a secure location and is not shared with third parties or sold to others. When you are given your password, it will be your responsibility to keep it secure and private. If you forget your password, please contact us as soon as possible.
14
-
15
- In accordance with United States of America Patriot Act, we are required to collect, maintain, and make available to authorized law enforcement and other government agencies, or their authorized agents, physical and electronic access to all records and other documents and other information relating to you. Such records may include your date of birth, social security number, insurance ID number, or other personally identifying information. We may release your records or information to agents or third parties as follows:
16
-
17
- We may also use this information to contact you for promotional, marketing and research purposes.
18
-
19
- Other websites and mobile applications: If you access the My HealtheVet Account or the My HealtheVet Portal using your wireless device, we may request information from your wireless service provider to verify your identity and that you are authorized to use the wireless network. We may also use this information to track your location and content of visit to My HealtheVet Account.
20
-
21
- Your wireless provider may also access this information to aid in the delivery of your messages or other services.
22
-
23
- We may provide information about you to our service providers and/or agents, including but not limited to insurance companies, marketers, professional advisors, and others, for the purpose of processing payments, performing business operations, sending you marketing or research materials, or delivering our services. We may share information with these agents or service providers for marketing or research purposes, as permitted by the Privacy Policy, and they may contact you via mail, email, or telephone. These agents and service providers may not contact you about their own products or services unless you give them your express consent.
24
-
25
- Some of our third-party service providers may use 4fefd39f24<br />
26
- <br />
27
- <br />
28
- <p></p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/5000rubl nece manatdir A simple guide to currency conversion.md DELETED
@@ -1,148 +0,0 @@
1
-
2
- <h1>5000 rubl nece manatdir?</h1>
3
- <p>If you are planning to travel or do business in Azerbaijan, you might be wondering how much 5000 rubles are worth in Azerbaijani manats. In this article, we will answer this question and provide you with some useful information on how to exchange currency in Azerbaijan. We will also give you some tips on where to find the best exchange rates and how to avoid scams and fees.</p>
4
- <h2>Introduction</h2>
5
- <p>The official currency of Azerbaijan is the Azerbaijani manat, with symbol ₼ and currency code AZN. The manat is subdivided into 100 qapik. The current series of banknotes and coins was introduced in 2006, when the manat was redenominated at a rate of 5000 old manats to 1 new manat.</p>
6
- <h2>5000rubl nece manatdir</h2><br /><p><b><b>DOWNLOAD</b> &#9193; <a href="https://jinyurl.com/2uNNHx">https://jinyurl.com/2uNNHx</a></b></p><br /><br />
7
- <p>The official currency of Russia is the Russian ruble, with symbol ₽ and currency code RUB. The ruble is subdivided into 100 kopeks. The current series of banknotes and coins was introduced in 1998, after the ruble was redenominated at a rate of 1000 old rubles to 1 new ruble.</p>
8
- <h3>What is the exchange rate of Russian ruble to Azerbaijani manat?</h3>
9
- <p>The exchange rate of Russian ruble to Azerbaijani manat is the price of one ruble in terms of one manat. It tells you how many manats you can get for one ruble or vice versa. The exchange rate can change over time due to various factors, such as supply and demand, inflation, interest rates, speculation, and so on.</p>
10
- <p>As of June 22, 2023, the mid-market exchange rate of Russian ruble to Azerbaijani manat was 0.0209865 AZN per RUB, according to Xe.com. This means that 5000 rubles were worth about 104.93 manats on that date.</p>
11
- <h3>What factors affect the exchange rate of Russian ruble to Azerbaijani manat?</h3>
12
- <p>There are many factors that influence the exchange rate of Russian ruble to Azerbaijani manat, some of them are:</p>
13
- <ul>
14
- <li><strong>Differentials in inflation</strong>: Typically, a country with a lower inflation rate has a stronger currency value, as its purchasing power increases relative to other currencies. For example, if inflation in Azerbaijan is lower than in Russia, the manat will tend to appreciate against the ruble.</li>
15
- <li><strong>Differentials in interest rates</strong>: Interest rates affect the demand for and supply of currencies, as well as the cost of borrowing and lending. For example, if interest rates in Azerbaijan are higher than in Russia, investors will be attracted to deposit money in Azerbaijan, increasing the demand for manats and pushing up their value against rubles.</li>
16
- <li><strong>Speculation</strong>: Speculators are traders who buy and sell currencies based on their expectations of future movements in exchange rates. They can have a significant impact on the short-term fluctuations of currencies. For example, if speculators anticipate that the manat will rise against the ruble in the future, they will buy more manats now, driving up their price.</li>
17
- <li><strong>Change in competitiveness</strong>: The competitiveness of a country's goods and services affects its trade balance and its currency value. For example, if Azerbaijani goods become more attractive and cheaper than Russian goods, there will be an increase in demand for Azerbaijani exports and a decrease in demand for Russian imports. This will improve Azerbaijan's trade surplus and cause its currency to appreciate against Russia's.</li>
18
- <h3>How to exchange rubles to manats?</h3>
19
- <p>If you want to exchange rubles to manats, you have several options. You can either do it before you travel, at your local bank or currency exchange office, or after you arrive, at the airport, hotel, bank, or exchange office in Azerbaijan. You can also use online platforms or apps that allow you to transfer money or exchange currency digitally.</p>
20
- <p>However, not all options are equally convenient, safe, and cost-effective. You should always compare the exchange rates and fees offered by different providers and choose the one that gives you the best value for your money. You should also avoid exchanging currency in black markets or unofficial dealers, as they may scam you or give you counterfeit notes.</p>
21
- <h2>Best places to exchange currency in Azerbaijan</h2>
22
- <p>Once you are in Azerbaijan, you will find many places where you can exchange currency. However, some of them may offer better rates and services than others. Here are some of the best places to exchange currency in Azerbaijan:</p>
23
- <h3>Banks</h3>
24
- <p>Banks are one of the most reliable and secure places to exchange currency in Azerbaijan. They usually offer competitive rates and low fees, and they accept various currencies, including rubles. You can also withdraw manats from ATMs using your debit or credit card, but you may incur additional charges from your bank or the ATM operator.</p>
25
- <p>How to convert 5000 rubles to manats<br />
26
- 5000 rubl nece manatdir xe<br />
27
- 5000 rubl nece manatdir wise<br />
28
- 5000 rubl nece manatdir exchange rate<br />
29
- 5000 rubl nece manatdir calculator<br />
30
- 5000 rubl nece manatdir today<br />
31
- 5000 rubl nece manatdir in dollars<br />
32
- 5000 rubl nece manatdir in euros<br />
33
- 5000 rubl nece manatdir in pounds<br />
34
- 5000 rubl nece manatdir in lira<br />
35
- 5000 rubl nece manatdir in rials<br />
36
- 5000 rubl nece manatdir in dirhams<br />
37
- 5000 rubl nece manatdir in rupees<br />
38
- 5000 rubl nece manatdir in yuan<br />
39
- 5000 rubl nece manatdir in yen<br />
40
- 5000 rubl nece manatdir in krona<br />
41
- 5000 rubl nece manatdir in francs<br />
42
- 5000 rubl nece manatdir in pesos<br />
43
- 5000 rubl nece manatdir in reals<br />
44
- 5000 rubl nece manatdir in zloty<br />
45
- Best way to exchange 5000 rubles to manats<br />
46
- Where to exchange 5000 rubles to manats<br />
47
- How much is 5000 rubles in manats<br />
48
- How much is 5000 rubles worth in manats<br />
49
- How much is 5000 rubles in Azerbaijani currency<br />
50
- How much is 5000 Russian currency in Azerbaijan<br />
51
- How to send money from Russia to Azerbaijan<br />
52
- How to transfer money from Russia to Azerbaijan<br />
53
- How to receive money from Russia in Azerbaijan<br />
54
- How to withdraw money from Russia in Azerbaijan<br />
55
- What is the currency of Azerbaijan called<br />
56
- What is the currency of Russia called<br />
57
- What is the symbol of Azerbaijani currency<br />
58
- What is the symbol of Russian currency<br />
59
- What is the exchange rate of Azerbaijani currency to Russian currency<br />
60
- What is the exchange rate of Russian currency to Azerbaijani currency<br />
61
- How to check the exchange rate of Azerbaijani currency and Russian currency<br />
62
- How to compare the exchange rate of Azerbaijani currency and Russian currency<br />
63
- How to find the best exchange rate of Azerbaijani currency and Russian currency<br />
64
- How to save money on exchanging Azerbaijani currency and Russian currency</p>
65
- <p>Some of the major banks in Azerbaijan that offer currency exchange services are:</p>
66
- <table>
67
- <tr><th>Bank</th><th>Website</th></tr>
68
- <tr><td>Kapital Bank</td><td>[Kapital Bank]</td></tr>
69
- <tr><td>PASHA Bank</td><td>[PASHA Bank]</td></tr>
70
- <tr><td>International Bank of Azerbaijan</td><td>[International Bank of Azerbaijan]</td></tr>
71
- <tr><td>Bank Respublika</td><td>[Bank Respublika]</td></tr>
72
- <tr><td>Nikoil Bank</td><td>[Nikoil Bank]</td></tr>
73
- </table>
74
- <h3>Exchange offices</h3>
75
- <p>Exchange offices are another common place to exchange currency in Azerbaijan. They are usually located in busy areas, such as airports, hotels, shopping malls, and tourist attractions. They are convenient and fast, but they may charge higher fees and offer lower rates than banks. You should always check the exchange rate and the commission before you make a transaction.</p>
76
- <p>Some of the reputable exchange offices in Azerbaijan are:</p>
77
- <table>
78
- <tr><th>Exchange office</th><th>Location</th></tr>
79
- <tr><td>Azərpoçt</td><td>Various branches across the country</td></tr>
80
- <tr><td>Baku Express Exchange</td><td>Baku International Airport</td></tr>
81
- <tr><td>Currency Exchange Baku</td><td>Nizami Street 67/71, Baku</td></tr>
82
- <tr><td>Ganja Exchange</td><td>Ganja Mall, Ganja</td></tr>
83
- <tr><td>Lankaran Exchange</td><td>Lankaran Heydar Aliyev Avenue 59A, Lankaran</td></tr>
84
- </table>
85
- <h3>Online platforms</h3>
86
- <p>Online platforms are a modern and convenient way to exchange currency in Azerbaijan. They allow you to transfer money or exchange currency digitally, using your smartphone or computer. You can either use an online platform that connects you with a local agent who will deliver cash to you or collect cash from you, or use an online platform that allows you to send money to a bank account or a mobile wallet.</p>
87
- <p>Some of the online platforms that offer currency exchange services in Azerbaijan are:</p>
88
- <table>
89
- <tr><th>Online platform</th><th>Website</th></tr>
90
- <tr><td>Azimo</td><td>[Azimo]</td></tr>
91
- <tr><td>CurrencyFair</td><td>[CurrencyFair]</td></tr>
92
- <tr><td>Moneymove</td><td>[Moneymove]</td></tr>
93
- <tr><td>Skrill</td><td>[Skrill]</td></tr>
94
- <tr><td>TransferWise</td><td>[TransferWise]</td></tr>
95
- </table>
96
- <h2>Conclusion</h2>
97
- <h3>Summary of the main points</h3>
98
- <p>We have learned that:</p>
99
- <ul>
100
- <li>The exchange rate of Russian ruble to Azerbaijani manat is the price of one ruble in terms of one manat. It can change over time due to various factors, such as inflation, interest rates, speculation, competitiveness, and other currencies.</li>
101
- <li>As of June 22, 2023, the mid-market exchange rate of Russian ruble to Azerbaijani manat was 0.0209865 AZN per RUB, according to Xe.com. This means that 5000 rubles were worth about 104.93 manats on that date.</li>
102
- <li>You can exchange rubles to manats before you travel, at your local bank or currency exchange office, or after you arrive, at the airport, hotel, bank, or exchange office in Azerbaijan. You can also use online platforms or apps that allow you to transfer money or exchange currency digitally.</li>
103
- <li>You should always compare the exchange rates and fees offered by different providers and choose the one that gives you the best value for your money. You should also avoid exchanging currency in black markets or unofficial dealers, as they may scam you or give you counterfeit notes.</li>
104
- <li>Some of the best places to exchange currency in Azerbaijan are banks, exchange offices, and online platforms. They offer different advantages and disadvantages in terms of convenience, security, and cost-effectiveness.</li>
105
- </ul>
106
- <h3>Recommendations for travelers and business people</h3>
107
- <p>Based on the information we have provided, here are some recommendations for travelers and business people who want to exchange rubles to manats:</p>
108
- <ul>
109
- <li>Plan ahead and check the current exchange rate before you travel. You can use online tools like Xe.com or Google Currency Converter to get an idea of how much your money is worth in Azerbaijan.</li>
110
- <li>Exchange some cash before you travel, but not too much. It is good to have some local currency on hand when you arrive, but you don't want to carry too much cash with you for safety reasons. You can also use your debit or credit card to withdraw money from ATMs in Azerbaijan, but be aware of the fees and charges involved.</li>
111
- <li>Shop around and compare different providers when you exchange currency in Azerbaijan. Don't just go for the first option you see, as you may end up paying more than you need to. Look for signs that display the exchange rate and the commission, and ask for a receipt after every transaction.</li>
112
- <li>Avoid exchanging currency at airports or hotels, as they usually offer the worst rates and charge the highest fees. Instead, look for banks or reputable exchange offices in the city center or near tourist attractions. You can also use online platforms that offer low-cost and fast currency exchange services.</li>
113
- <li>Keep track of your spending and budget accordingly. Azerbaijan is a relatively affordable country compared to other European destinations, but it is still important to manage your money wisely. You can use apps like Mint or Expensify to track your expenses and set spending limits.</li>
114
- </ul>
115
- <h3>FAQs</h3>
116
- <p>Here are some frequently asked questions about exchanging rubles to manats:</p>
117
- <ol>
118
- <li><strong>How do I pronounce Azerbaijani manat?</strong></li>
119
- <p>The Azerbaijani manat is pronounced as "mah-nat", with emphasis on the second syllable. The plural form is "manatlar", pronounced as "mah-nat-lar". The qapik is pronounced as "gah-pik", with emphasis on the first syllable. The plural form is "qapiklar", pronounced as "gah-pik-lar".</p>
120
- <li><strong>What are the denominations of Azerbaijani manat?</strong></li>
121
- <p>The Azerbaijani manat comes in banknotes of 1, 5, 10, 20, 50, 100, and 200 manats, and coins of 1, 3, 5, 10, 20, and 50 qapiks. The banknotes feature portraits of prominent Azerbaijani figures and landmarks on both sides. The coins feature the national emblem and name of Azerbaijan on one side and the denomination and year of issue on the other side.</p>
122
- <li><strong>What are some tips for handling Azerbaijani manat?</strong></li>
123
- <p>Some tips for handling Azerbaijani manat are:</p>
124
- <ul>
125
- exchange office. They may not be accepted by some merchants or service providers.</li>
126
- <li>Carry a mix of small and large denominations. You may need small bills and coins for public transportation, street vendors, tips, and other minor expenses. You may also need large bills for hotels, restaurants, shops, and other major expenses. However, don't carry too much cash with you for safety reasons.</li>
127
- <li>Count your money carefully and check for authenticity. When you exchange or receive money, make sure you get the correct amount and the right currency. You can use a currency converter app or a calculator to verify the exchange rate and the total amount. You can also check for security features on the banknotes and coins, such as watermarks, holograms, microprinting, and magnetic strips.</li>
128
- </ul>
129
- <li><strong>How do I tip in Azerbaijan?</strong></li>
130
- <p>Tipping is not mandatory in Azerbaijan, but it is appreciated and expected in some situations. You can tip according to the quality of service and your satisfaction. Here are some general guidelines for tipping in Azerbaijan:</p>
131
- <ul>
132
- <li>Restaurants: You can tip 10% of the bill or round up to the nearest manat. If the service charge is included in the bill, you don't need to tip extra.</li>
133
- <li>Taxis: You can tip 10% of the fare or round up to the nearest manat. You can also tip more if the driver helps you with your luggage or gives you useful information.</li>
134
- <li>Hotels: You can tip 1-2 manats per bag to the bellboy and 2-5 manats per day to the housekeeper. You can also tip 5-10 manats to the concierge if they provide you with special assistance or recommendations.</li>
135
- <li>Tours: You can tip 10-15% of the tour price to the guide and 5-10% to the driver. You can also tip more if they are exceptionally friendly or knowledgeable.</li>
136
- <li>Spas and salons: You can tip 10-15% of the service price to the staff who provide you with massage, hair, nail, or beauty treatments.</li>
137
- </ul>
138
- <li><strong>What are some common scams and pitfalls to avoid when exchanging currency in Azerbaijan?</strong></li>
139
- <p>Some common scams and pitfalls to avoid when exchanging currency in Azerbaijan are:</p>
140
- <ul>
141
- <li>Black market dealers: These are people who offer to exchange currency at very attractive rates, usually on the street or in public places. They may try to lure you with promises of saving money or avoiding fees. However, they may also try to cheat you by giving you counterfeit notes, incorrect change, or a different currency. They may also try to rob you or harm you if you follow them to a secluded place.</li>
142
- <li>Dishonest merchants: These are people who try to take advantage of your unfamiliarity with the local currency or prices. They may try to overcharge you, give you wrong change, or use a rigged calculator or scale. They may also try to trick you into paying in a different currency or accepting a different currency as change.</li>
143
- <li>Dynamic currency conversion: This is a service that allows you to pay in your home currency instead of the local currency at some merchants or ATMs. It may seem convenient and transparent, but it usually comes with a high fee and a poor exchange rate. You will end up paying more than you need to. It is better to always pay in the local currency and let your bank or card issuer handle the conversion.</li>
144
- </ul>
145
- </ol>
146
- <p>I hope this article has helped you understand how much 5000 rubles are worth in Azerbaijani manats and how to exchange currency in Azerbaijan. If you have any questions or comments, please feel free to leave them below. Thank you for reading!</p> 401be4b1e0<br />
147
- <br />
148
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Blast Away with 3D Bubble Shooter A Free and Fun Game for All Ages.md DELETED
@@ -1,140 +0,0 @@
1
-
2
- <h1>3D Bubble Shooter Game Free Download: A Fun and Addictive Way to Relax and Enjoy</h1>
3
- <p>If you are looking for a fun and addictive game that can help you relax and enjoy your free time, you should try playing a 3D bubble shooter game. A 3D bubble shooter game is a classic puzzle game that involves shooting colorful bubbles and matching them with other bubbles of the same color. The goal is to clear all the bubbles from the board and win levels. Sounds easy, right? Well, not so fast. A 3D bubble shooter game can also be challenging and exciting, especially when you play it in 3D mode. In this article, we will tell you everything you need to know about 3D bubble shooter games, including how to download and play them for free, what are the features and benefits of playing them, and how to improve your skills and strategies in them. So, let's get started!</p>
4
- <h2>3d bubble shooter game free download</h2><br /><p><b><b>Download Zip</b> &#9989; <a href="https://jinyurl.com/2uNNmm">https://jinyurl.com/2uNNmm</a></b></p><br /><br />
5
- <h2>What is a 3D bubble shooter game?</h2>
6
- <p>A 3D bubble shooter game is a type of puzzle game that belongs to the genre of tile-matching or match-three games. In these games, you have to match three or more tiles or objects of the same color or shape to make them disappear from the board. Some examples of popular tile-matching games are Candy Crush Saga, Bejeweled, Tetris, and of course, Bubble Shooter.</p>
7
- <h3>The basic gameplay of bubble shooter games</h3>
8
- <p>The basic gameplay of bubble shooter games is simple and easy to learn. You have a cannon or a launcher at the bottom of the screen that shoots bubbles of different colors. You can aim and shoot the bubbles by tapping or clicking on the screen. You have to shoot the bubbles towards the top of the screen, where there are other bubbles already arranged in rows or clusters. When you shoot a bubble, it will stick to the other bubbles of the same color if they are adjacent or touching. If you manage to create a group of three or more bubbles of the same color, they will pop and disappear from the board. The more bubbles you pop at once, the more points you score. You can also create combos by popping multiple groups of bubbles in succession. The game ends when you clear all the bubbles from the board or when the bubbles reach the bottom of the screen.</p>
9
- <h3>The advantages of playing in 3D mode</h3>
10
- <p>While most bubble shooter games are played in 2D mode, some games offer you the option to play in 3D mode. This means that instead of having a flat board with rows or columns of bubbles, you have a spherical or cylindrical board with bubbles arranged in layers or rings. This adds a new dimension to the gameplay, as you have to consider not only the horizontal and vertical angles, but also the depth and perspective of your shots. Playing in 3D mode can make the game more realistic, immersive, and challenging. You can also enjoy different views and angles of the board as it rotates or tilts according to your movements. Playing in 3D mode can also enhance your spatial awareness, coordination, and concentration skills.</p>
11
- <h2>How to download and play 3D bubble shooter games for free?</h2>
12
- <p>If you are interested in playing 3D bubble shooter games for free, you have several options to choose from. You can find many free 3D bubble shooter games online, on various websites and platforms. You can also download and install free 3D bubble shooter games on your device, such as your smartphone, tablet, laptop, or desktop computer. Here are some tips on how to do that.</p>
13
- <h3>The best sources to find free 3D bubble shooter games</h3>
14
- <p>One of the best sources to find free 3D bubble shooter games is the internet. There are many websites and platforms that offer a wide range of 3D bubble shooter games that you can play online, without downloading or installing anything. Some of the most popular and reliable websites and platforms are:</p>
15
- <p>3d bubble pop game free download<br />
16
- 3d bubble blast game free download<br />
17
- 3d bubble shooter offline game free download<br />
18
- 3d bubble shooter game for pc free download<br />
19
- 3d bubble shooter game for android free download<br />
20
- 3d bubble shooter game with physics free download<br />
21
- 3d bubble shooter game with levels free download<br />
22
- 3d bubble shooter game with boosters free download<br />
23
- 3d bubble shooter game with puzzles free download<br />
24
- 3d bubble shooter game with arcade mode free download<br />
25
- 3d bubble fall game free download<br />
26
- 3d bubble crush game free download<br />
27
- 3d bubble breaker game free download<br />
28
- 3d bubble match game free download<br />
29
- 3d bubble swap game free download<br />
30
- 3d bubble burst game free download<br />
31
- 3d bubble drop game free download<br />
32
- 3d bubble bounce game free download<br />
33
- 3d bubble smash game free download<br />
34
- 3d bubble shoot game free download<br />
35
- best 3d bubble shooter game free download<br />
36
- new 3d bubble shooter game free download<br />
37
- classic 3d bubble shooter game free download<br />
38
- original 3d bubble shooter game free download<br />
39
- addictive 3d bubble shooter game free download<br />
40
- relaxing 3d bubble shooter game free download<br />
41
- fun 3d bubble shooter game free download<br />
42
- challenging 3d bubble shooter game free download<br />
43
- exciting 3d bubble shooter game free download<br />
44
- awesome 3d bubble shooter game free download<br />
45
- colorful 3d bubble shooter game free download<br />
46
- realistic 3d bubble shooter game free download<br />
47
- smooth 3d bubble shooter game free download<br />
48
- easy 3d bubble shooter game free download<br />
49
- simple 3d bubble shooter game free download<br />
50
- amazing 3d bubble shooter game free download<br />
51
- cool 3d bubble shooter game free download<br />
52
- cute 3d bubble shooter game free download<br />
53
- beautiful 3d bubble shooter game free download<br />
54
- fantastic 3d bubble shooter game free download<br />
55
- voodoo 3d bubble shooter game free download<br />
56
- tarboosh 3d bubble shooter game free download<br />
57
- bubbleshooter orig 3d bubble shooter game free download <br />
58
- bubbleshooter android 3d bubble shooter game free download <br />
59
- google play store 3d bubble shooter game free download <br />
60
- app store 3d bubble shooter game free download <br />
61
- apk file 3d bubble shooter game free download <br />
62
- mod version 3d bubble shooter game free download <br />
63
- unlimited coins and lives in the app of the same name.</p>
64
- <ul>
65
- <li><a href="">Bubble Shooter 3D</a>: This website features a collection of 3D bubble shooter games that you can play for free on your browser. You can choose from different themes, such as animals, fruits, candy, jewels, and more. You can also adjust the difficulty level and the speed of the game. The website has a simple and user-friendly interface, and you can also access it from your mobile device.</li>
66
- <li><a href="">Bubble Shooter 3D - Play Free Online Games</a>: This website is part of the Play Free Online Games network, which offers thousands of free online games in various categories and genres. You can find several 3D bubble shooter games on this website, such as Bubble Shooter 3D Galaxy, Bubble Shooter 3D Magic Forest, Bubble Shooter 3D Halloween, and more. You can play these games for free on your browser, without registration or download.</li>
67
- <li><a href="">Bubble Shooter - Apps on Google Play</a>: This is the official app of the classic bubble shooter game, developed by Ilyon Dynamics Ltd. You can download and install this app for free on your Android device from the Google Play Store. The app offers hundreds of levels of 3D bubble shooter games, with different themes, modes, and challenges. You can also enjoy stunning graphics, sound effects, and animations. The app is easy to use and compatible with most devices.</li>
68
- <li><a href="">Bubble Shooter 3D - App Store - Apple</a>: This is another app that offers 3D bubble shooter games for free on your iOS device. You can download and install this app from the App Store on your iPhone or iPad. The app features over 1000 levels of 3D bubble shooter games, with various themes, modes, and difficulties. You can also use boosters and power-ups to enhance your gameplay. The app has a sleek and intuitive design, and it supports offline play.</li>
69
- </ul>
70
- <h3>The steps to download and install 3D bubble shooter games on your device</h3>
71
- <p>If you prefer to download and install 3D bubble shooter games on your device, rather than playing them online, you need to follow some simple steps. Here are the general steps to do that:</p>
72
- <ol>
73
- <li>Choose a source or a platform that offers free 3D bubble shooter games for download. You can use the ones we mentioned above, or you can search for other options on the internet.</li>
74
- <li>Select a game that you want to download and play. Make sure that the game is compatible with your device and meets the system requirements.</li>
75
- <li>Click on the download button or link to start the download process. You may need to grant some permissions or accept some terms and conditions before downloading.</li>
76
- <li>Wait for the download to finish. Depending on the size of the game and your internet speed, this may take a few minutes or longer.</li>
77
- <li>Once the download is complete, locate the game file on your device and open it. Follow the instructions to install the game on your device.</li>
78
- <li>After the installation is done, launch the game and enjoy playing it.</li>
79
- </ol>
80
- <h2>What are the features and benefits of playing 3D bubble shooter games?</h2>
81
- <p>Playing 3D bubble shooter games can be a lot of fun and rewarding. There are many features and benefits that you can enjoy while playing these games. Here are some of them:</p>
82
- <h3>The different modes and levels of 3D bubble shooter games</h3>
83
- <p>One of the features that make 3D bubble shooter games interesting and varied is the different modes and levels that they offer. You can choose from different modes of gameplay, such as classic mode, arcade mode, puzzle mode, adventure mode, time mode, etc. Each mode has its own rules and objectives that you have to follow and achieve. You can also play different levels of difficulty, ranging from easy to hard. Each level has its own layout, design, color scheme, number of bubbles , and obstacles. You can also unlock new levels as you progress and complete the previous ones. The different modes and levels of 3D bubble shooter games can keep you entertained and challenged for hours.</p>
84
- <h3>The cool boosters and power-ups to help you win</h3>
85
- <p>Another feature that makes 3D bubble shooter games fun and exciting is the cool boosters and power-ups that you can use to help you win. Boosters and power-ups are special bubbles or items that have different effects and abilities. For example, some boosters and power-ups can change the color of the bubbles, pop more bubbles at once, clear a whole row or column of bubbles, freeze the board, etc. You can get boosters and power-ups by popping certain bubbles, completing certain tasks, or buying them with coins or gems. You can also use them strategically to overcome difficult situations or to score higher points. Boosters and power-ups can add more fun and variety to your gameplay.</p>
86
- <h3>The amazing graphics and sound effects of 3D bubble shooter games</h3>
87
- <p>One of the benefits of playing 3D bubble shooter games is that you can enjoy amazing graphics and sound effects that enhance your gaming experience. 3D bubble shooter games have high-quality graphics that make the bubbles look realistic, colorful, and shiny. You can also see the bubbles pop and burst in 3D animation, which is satisfying and rewarding. The sound effects of 3D bubble shooter games are also impressive and immersive. You can hear the bubbles pop, bounce, splash, and crackle as you shoot them. You can also hear the background music and the voice-overs that match the theme and mood of the game. The graphics and sound effects of 3D bubble shooter games can make you feel like you are playing in a real 3D environment.</p>
88
- <h2>How to improve your skills and strategies in 3D bubble shooter games?</h2>
89
- <p>Playing 3D bubble shooter games can be easy to learn, but hard to master. If you want to improve your skills and strategies in these games, you need to practice regularly and follow some tips and tricks. Here are some of them:</p>
90
- <h3>The tips and tricks to aim and shoot accurately</h3>
91
- <p>One of the most important skills in 3D bubble shooter games is to aim and shoot accurately. You need to be able to hit the right spot with the right bubble at the right time. To do that, you need to pay attention to several factors, such as:</p>
92
- <ul>
93
- <li>The direction and angle of your shot: You can use the arrow or the line on your cannon or launcher to guide your shot. You can also adjust the direction and angle by moving your finger or mouse on the screen. You need to consider the curvature and gravity of your shot, as well as the rotation and tilt of the board.</li>
94
- <li>The color and position of the bubbles: You need to match the color of your bubble with the color of the bubbles on the board. You also need to aim for the bubbles that are close to each other or form a group, rather than the ones that are isolated or scattered.</li>
95
- <li>The walls and edges of the board: You can use the walls and edges of the board to bounce your bubbles off them. This can help you reach areas that are hard to access or create angles that are otherwise impossible.</li>
96
- <li>The timing and speed of your shot: You need to shoot your bubbles quickly before they reach the bottom of the screen or before new bubbles appear on the board. You also need to shoot your bubbles with enough force to make them stick to the other bubbles, rather than fall off or slide down.</li>
97
- </ul>
98
- <h3>The best ways to clear the board and score high points</h3>
99
- <p>One of the main goals in 3D bubble shooter games is to clear the board and score high points. To do that, you need to follow some strategies, such as:</p>
100
- <ul>
101
- <li>Pop as many bubbles as possible at once: The more bubbles you pop at once, the more points you score. You can also create combos by popping multiple groups of bubbles in succession. You can also look for special bubbles that can pop more bubbles at once, such as bomb bubbles, rainbow bubbles, fire bubbles, etc.</li>
102
- <li>Clear the top and bottom rows of the board: The top and bottom rows of the board are the most important ones to clear, as they can affect the rest of the board. If you clear the top row of the board, you can make the whole board drop down and create more space for your shots. If you clear the bottom row of the board, you can prevent the bubbles from reaching the bottom of the screen and ending the game.</li>
103
- <li>Use boosters and power-ups wisely: Boosters and power-ups can help you clear the board and score high points, but you need to use them wisely. You should save them for difficult situations or when you need a big boost. You should also use them strategically, such as using a color changer to create a large group of bubbles, or using a fireball to clear a whole row or column of bubbles.</li>
104
- </ul>
105
- <h3>The challenges and rewards of playing 3D bubble shooter games</h3>
106
- <p>Playing 3D bubble shooter games can be challenging and rewarding at the same time. There are many challenges that you can face while playing these games, such as:</p>
107
- <ul>
108
- <li>The increasing difficulty and complexity of the levels: As you progress in the game, the levels become harder and more complex. You may encounter more colors, shapes, and types of bubbles, as well as more obstacles, such as metal bubbles, ice bubbles, stone bubbles, etc. You may also have to deal with more limited time, moves, or shots.</li>
109
- <li>The unpredictable and random nature of the game: The game is based on luck and chance, as well as skill and strategy. You never know what color or type of bubble you will get next, or where it will land on the board. You also have to adapt to the changing conditions and situations of the game.</li>
110
- <li>The competition and comparison with other players: The game can be competitive and comparative, as you can see your score and rank on the leaderboard, or compare your performance with other players online or offline. You may also feel pressured or motivated to beat your own records or achievements.</li>
111
- </ul>
112
- <p>However, there are also many rewards that you can get from playing 3D bubble shooter games, such as:</p>
113
- <ul>
114
- <li>The fun and enjoyment of the game: The game is fun and enjoyable to play, as it can keep you entertained and engaged for hours. You can also experience different emotions and sensations while playing, such as excitement, satisfaction, frustration, relief, etc.</li>
115
- <li>The relaxation and stress relief of the game: The game can help you relax and relieve stress, as it can distract you from your worries and problems. You can also use the game as a way to unwind and relax after a long day or a busy week.</li>
116
- <li>The improvement and development of your skills: The game can help you improve and develop your skills, such as your spatial awareness, coordination, concentration, memory, logic, problem-solving, creativity, etc. You can also learn new things and gain new knowledge from playing the game.</li>
117
- </ul>
118
- <h2>Conclusion</h2>
119
- <p>In conclusion, 3D bubble shooter games are a fun and addictive way to relax and enjoy your free time. They are easy to learn but hard to master puzzle games that involve shooting colorful bubbles and matching them with other bubbles of the same color. They offer different modes and levels of gameplay, cool boosters and power-ups to help you win , amazing graphics and sound effects to enhance your gaming experience, and many challenges and rewards to keep you motivated and satisfied. You can download and play 3D bubble shooter games for free on your device, from various sources and platforms. You can also improve your skills and strategies in 3D bubble shooter games by following some tips and tricks. 3D bubble shooter games are a great way to have fun and relax, as well as to improve your mental abilities and learn new things. So, what are you waiting for? Download a 3D bubble shooter game today and start popping some bubbles!</p>
120
- <h2>FAQs</h2>
121
- <p>Here are some frequently asked questions about 3D bubble shooter games:</p>
122
- <ol>
123
- <li>What is the difference between 2D and 3D bubble shooter games?</li>
124
- <p>A: The main difference between 2D and 3D bubble shooter games is the shape and orientation of the board. In 2D bubble shooter games, the board is flat and has rows or columns of bubbles. In 3D bubble shooter games, the board is spherical or cylindrical and has layers or rings of bubbles. This affects the gameplay, as you have to consider the depth and perspective of your shots in 3D mode.</p>
125
- <li>How can I get more coins or gems in 3D bubble shooter games?</li>
126
- <p>A: Coins or gems are the currency of 3D bubble shooter games, which you can use to buy boosters, power-ups, or extra lives. You can get more coins or gems by completing levels, achieving goals, watching ads, or making in-app purchases.</p>
127
- <li>How can I play 3D bubble shooter games offline?</li>
128
- <p>A: Some 3D bubble shooter games support offline play, which means that you can play them without an internet connection. To do that, you need to download and install the game on your device first, and then launch it while offline. However, some features or functions of the game may not be available offline, such as leaderboards, achievements, or updates.</p>
129
- <li>Are 3D bubble shooter games suitable for children?</li>
130
- <p>A: Yes, 3D bubble shooter games are suitable for children, as they are fun, colorful, and easy to play. They can also help children develop their cognitive, motor, and social skills, as well as their creativity and imagination. However, parents should supervise their children while playing these games, especially when it comes to online interactions or in-app purchases.</p>
131
- <li>What are some of the best 3D bubble shooter games to play?</li>
132
- <p>A: There are many 3D bubble shooter games to choose from, but some of the best ones are:</p>
133
- <ul>
134
- <li><a href="">Bubble Shooter 3D Galaxy</a>: This game takes you to a galaxy full of bubbles that you have to pop and explore. You can enjoy over 1000 levels of cosmic fun, with different planets, stars, asteroids, and aliens. You can also use awesome boosters and power-ups to blast your way through the galaxy.</li>
135
- <li><a href="">Bubble Shooter 3D Magic Forest</a>: This game transports you to a magical forest where you have to shoot bubbles and match them with the animals and plants. You can play over 500 levels of enchanting fun, with different creatures, flowers, mushrooms, and more. You can also use magical boosters and power-ups to help you in your adventure.</li>
136
- <li><a href="">Bubble Shooter 3D Halloween</a>: This game invites you to a spooky Halloween party where you have to shoot bubbles and match them with the ghosts and monsters. You can play over 300 levels of scary fun, with different pumpkins, bats, spiders, and more. You can also use creepy boosters and power-ups to spook your way through the party.</li>
137
- </ul>
138
- </ol></p> 401be4b1e0<br />
139
- <br />
140
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download PUBG MOBILE MOD APK with Unlimited Features and Anti-Ban.md DELETED
@@ -1,101 +0,0 @@
1
- <br />
2
- <h1>How to Download PUBG Mobile Mod APK and Enjoy Its Amazing Features</h1>
3
- <p>PUBG Mobile is one of the most popular and addictive mobile games in the world. Millions of players enjoy its thrilling and realistic gameplay, where they have to survive in a shrinking map with up to 100 other players. However, some players are not satisfied with the official game, and they look for ways to enhance their gaming experience. One of these ways is to download PUBG Mobile Mod APK, a modified version of the game that offers various features that are not available in the original game. In this article, we will tell you what PUBG Mobile Mod APK is, why you should download it, how to download it, what are the risks of downloading it, and some frequently asked questions about it.</p>
4
- <h2>download pubg mobile mod</h2><br /><p><b><b>DOWNLOAD</b> &#9734; <a href="https://jinyurl.com/2uNThS">https://jinyurl.com/2uNThS</a></b></p><br /><br />
5
- <h2>What is PUBG Mobile Mod APK?</h2>
6
- <h3>PUBG Mobile Mod APK is a modified version of the popular battle royale game PUBG Mobile</h3>
7
- <p>PUBG Mobile Mod APK is a modified version of the official PUBG Mobile game, which is developed by Krafton and Level Infinite. The modded version is created by third-party developers or hackers, who modify the original game files and add new features or functions to the game. These features or functions are usually called hacks or cheats, as they give an unfair advantage to the players who use them.</p>
8
- <h3>It offers various features that are not available in the official game, such as ESP, aimbot, wallhack, speed hack, jump hack, and more</h3>
9
- <p>PUBG Mobile Mod APK offers various features that are not available in the official game, such as ESP (Extra Sensory Perception), aimbot (auto-aiming), wallhack (seeing through walls), speed hack (increasing movement speed), jump hack (increasing jump height), and more. These features can help you spot your enemies easily, shoot them accurately, move faster, jump higher, and more. These features can make the game more fun and exciting, as you can dominate the battlefield and win more matches. However, they can also make the game unfair and unbalanced, as you can gain an edge over your opponents who play the game normally.</p>
10
- <h2>Why Download PUBG Mobile Mod APK?</h2>
11
- <h3>PUBG Mobile Mod APK can enhance your gaming experience and give you an edge over your opponents</h3>
12
- <p>PUBG Mobile Mod APK can enhance your gaming experience and give you an edge over your opponents, as you can use the features that are not available in the official game. You can improve your skills, performance, and stats, as you can spot, shoot, move, and jump better than your enemies. You can also enjoy the game more, as you can explore new possibilities and scenarios that are not possible in the original game. You can have more fun and excitement, as you can win more matches and rank higher in the leaderboards.</p>
13
- <h3>You can access all the premium items, skins, weapons, and vehicles for free</h3>
14
- <p>PUBG Mobile Mod APK also allows you to access all the premium items, skins, weapons, and vehicles for free, without spending any real money or UC (Unknown Cash), which is the in-game currency of PUBG Mobile. You can unlock and use all the items that are otherwise only available through purchasing or completing missions or events. You can customize your character and equipment according to your preference and style. You can also impress your friends and other players with your rare and exclusive items.</p>
15
- <h3>You can customize your game settings and preferences according to your liking</h3>
16
- <p>PUBG Mobile Mod APK also lets you customize your game settings and preferences according to your liking, without following the default or recommended settings of the official game. You can adjust the graphics quality, sound effects, controls, sensitivity, frame rate, and more. You can also enable or disable the features of the modded version according to your needs and wishes. You can tailor your game experience to suit your device specifications and personal taste.</p>
17
- <h2>How to Download PUBG Mobile Mod APK?</h2>
18
- <h3>You need to find a reliable and safe source to download the modded APK file</h3>
19
- <p>The first step to download PUBG Mobile Mod APK is to find a reliable and safe source to download the modded APK file. There are many websites and platforms that claim to offer PUBG Mobile Mod APK for free, but not all of them are trustworthy or secure. Some of them may contain malware or viruses that can harm your device or data. Some of them may also provide fake or outdated versions of the modded APK file that may not work properly or at all. Therefore, you need to do some research and check the reviews and ratings of the source before downloading anything from it.</p>
20
- <h3>You need to enable the installation of unknown sources on your device</h3>
21
- <p>The next step to download PUBG Mobile Mod APK is to enable the installation of unknown sources on your device. This is because PUBG Mobile Mod APK is not an official app from Google Play Store or App Store, and it is considered as an unknown or third-party app by your device. Therefore, you need to allow your device to install apps from sources other than the official ones. To do this, you need to go to your device settings, security settings, and enable the option of unknown sources or allow from this source.</p>
22
- <h3>You need to uninstall the original PUBG Mobile game from your device</h3>
23
- <p>The third step to download PUBG Mobile Mod APK is to uninstall the original PUBG Mobile game from your device. This is because PUBG Mobile Mod APK cannot coexist with the official game on the same device, as they have the same package name and signature. Therefore, you need to remove the original game from your device before installing the modded version. To do this, you need to go to your device settings, apps settings, find PUBG Mobile app, and uninstall it.</p>
24
- <p>download pubg mobile mod apk latest version<br />
25
- download pubg mobile mod menu<br />
26
- download pubg mobile mod esp<br />
27
- download pubg mobile mod unlimited uc<br />
28
- download pubg mobile mod aimbot<br />
29
- download pubg mobile mod anti ban<br />
30
- download pubg mobile mod no recoil<br />
31
- download pubg mobile mod obb<br />
32
- download pubg mobile mod global<br />
33
- download pubg mobile mod kr<br />
34
- download pubg mobile mod data<br />
35
- download pubg mobile mod free fire<br />
36
- download pubg mobile mod god mode<br />
37
- download pubg mobile mod hack<br />
38
- download pubg mobile mod injector<br />
39
- download pubg mobile mod ios<br />
40
- download pubg mobile mod magic bullet<br />
41
- download pubg mobile mod new era<br />
42
- download pubg mobile mod offline<br />
43
- download pubg mobile mod online<br />
44
- download pubg mobile mod plus<br />
45
- download pubg mobile mod root<br />
46
- download pubg mobile mod script<br />
47
- download pubg mobile mod speed hack<br />
48
- download pubg mobile mod vip<br />
49
- download pubg mobile mod wallhack<br />
50
- download pubg mobile lite mod apk<br />
51
- download pubg mobile lite mod menu<br />
52
- download pubg mobile lite mod esp<br />
53
- download pubg mobile lite mod unlimited bc<br />
54
- download pubg mobile lite mod aimbot<br />
55
- download pubg mobile lite mod anti ban<br />
56
- download pubg mobile lite mod no recoil<br />
57
- download pubg mobile lite mod obb<br />
58
- download pubg mobile lite mod global<br />
59
- download pubg mobile lite mod data<br />
60
- download pubg mobile lite mod free fire<br />
61
- download pubg mobile lite mod god mode<br />
62
- download pubg mobile lite mod hack<br />
63
- download pubg mobile lite mod injector<br />
64
- download pubg mobile lite mod ios<br />
65
- download pubg mobile lite mod magic bullet<br />
66
- download pubg mobile lite mod new era<br />
67
- download pubg mobile lite mod offline<br />
68
- download pubg mobile lite mod online<br />
69
- download pubg mobile lite mod plus<br />
70
- download pubg mobile lite mod root<br />
71
- download pubg mobile lite mod script<br />
72
- download pubg mobile lite mod speed hack</p> <h3>You need to install the PUBG Mobile Mod APK file and grant the required permissions</h3>
73
- <p>The fourth step to download PUBG Mobile Mod APK is to install the PUBG Mobile Mod APK file and grant the required permissions. To do this, you need to locate the downloaded file on your device storage, tap on it, and follow the installation instructions. You may also need to grant some permissions to the app, such as storage, camera, microphone, location, and more. These permissions are necessary for the app to function properly and access the features of the modded version.</p>
74
- <h3>You need to launch the game and enjoy its features</h3>
75
- <p>The final step to download PUBG Mobile Mod APK is to launch the game and enjoy its features. To do this, you need to open the app icon on your device screen, sign in with your account or create a new one, and start playing the game. You can access the features of the modded version from the game menu or settings. You can also use some hotkeys or commands to activate or deactivate some features during the game. You can now enjoy the game with more features and advantages than before.</p>
76
- <h2>What are the Risks of Downloading PUBG Mobile Mod APK?</h2>
77
- <h3>PUBG Mobile Mod APK is not an official product of Krafton or Level Infinite, and it violates their terms of service</h3>
78
- <p>One of the risks of downloading PUBG Mobile Mod APK is that it is not an official product of Krafton or Level Infinite, and it violates their terms of service. PUBG Mobile Mod APK is created by unauthorized developers or hackers, who have no affiliation or permission from the original game developers or publishers. By downloading and using PUBG Mobile Mod APK, you are breaking the rules and regulations of the official game, and you may face legal consequences or penalties for doing so.</p>
79
- <h3>You may face legal issues or penalties for using unauthorized software or cheating in the game</h3>
80
- <p>Another risk of downloading PUBG Mobile Mod APK is that you may face legal issues or penalties for using unauthorized software or cheating in the game. PUBG Mobile Mod APK is considered as a form of software piracy or intellectual property theft, as it infringes on the rights and interests of the original game developers and publishers. By downloading and using PUBG Mobile Mod APK, you are committing a crime and you may be sued or fined for doing so. Moreover, PUBG Mobile Mod APK is also considered as a form of cheating or hacking in the game, as it gives an unfair advantage to the players who use it. By downloading and using PUBG Mobile Mod APK, you are violating the fair play and sportsmanship of the game, and you may be banned or suspended from the game for doing so.</p> <h3>You may expose your device to malware or viruses that can harm your data or privacy</h3>
81
- <p>A third risk of downloading PUBG Mobile Mod APK is that you may expose your device to malware or viruses that can harm your data or privacy. PUBG Mobile Mod APK is not a verified or tested app, and it may contain malicious code or software that can infect your device or steal your information. By downloading and installing PUBG Mobile Mod APK, you are risking your device security and performance, and you may lose your data or compromise your privacy. You may also face identity theft, fraud, or phishing attacks from hackers or scammers who may use your data for illegal purposes.</p>
82
- <h2>Conclusion</h2>
83
- <h3>PUBG Mobile Mod APK is a tempting option for players who want to enjoy the game with more features and advantages</h3>
84
- <p>PUBG Mobile Mod APK is a tempting option for players who want to enjoy the game with more features and advantages than the official game. It offers various features that are not available in the original game, such as ESP, aimbot, wallhack, speed hack, jump hack, and more. It also allows you to access all the premium items, skins, weapons, and vehicles for free. It also lets you customize your game settings and preferences according to your liking.</p>
85
- <h3>However, it also comes with many risks and drawbacks that can ruin your gaming experience and reputation</h3>
86
- <p>However, PUBG Mobile Mod APK also comes with many risks and drawbacks that can ruin your gaming experience and reputation. It is not an official product of Krafton or Level Infinite, and it violates their terms of service. You may face legal issues or penalties for using unauthorized software or cheating in the game. You may get banned or suspended from the game for using hacks or exploits. You may expose your device to malware or viruses that can harm your data or privacy.</p>
87
- <h3>It is advisable to play the game fairly and ethically, and avoid using any cheats or hacks that can harm yourself or others</h3>
88
- <p>Therefore, it is advisable to play the game fairly and ethically, and avoid using any cheats or hacks that can harm yourself or others. PUBG Mobile is a fun and challenging game that requires skill, strategy, and teamwork. It is more rewarding and satisfying to play the game without any unfair advantages or shortcuts. It is also more respectful and honorable to play the game without any dishonesty or deception. It is also safer and smarter to play the game without any risks or threats to your device or data.</p>
89
- <h2>FAQs</h2>
90
- <h4>Is PUBG Mobile Mod APK legal?</h4>
91
- <p>No, PUBG Mobile Mod APK is not legal, as it is a modified version of the official PUBG Mobile game, which is developed by Krafton and Level Infinite. The modded version is created by unauthorized developers or hackers, who have no affiliation or permission from the original game developers or publishers. By downloading and using PUBG Mobile Mod APK, you are breaking the rules and regulations of the official game, and you may face legal consequences or penalties for doing so.</p>
92
- <h4>How can I avoid getting banned for using PUBG Mobile Mod APK?</h4>
93
- <p>The best way to avoid getting banned for using PUBG Mobile Mod APK is to not use it at all. PUBG Mobile has a strict anti-cheat system that can detect any abnormal activities or behaviors in the game. If you are caught using any hacks or cheats in the game, you will be banned or suspended from the game immediately. There is no guarantee that any PUBG Mobile Mod APK can bypass the anti-cheat system or protect you from getting banned. Therefore, it is better to play the game normally and fairly, without using any cheats or hacks.</p>
94
- <h4>What are some of the best features of PUBG Mobile Mod APK?</h4>
95
- <p>Some of the best features of PUBG Mobile Mod APK are: - ESP (Extra Sensory Perception): This feature allows you to see your enemies' location, health, name, distance, weapons, items, and more on your screen. - Aimbot (auto-aiming): This feature allows you to automatically aim at your enemies' head or body, and shoot them with high accuracy and precision. - Wallhack (seeing through walls): This feature allows you to see through walls and other obstacles, and spot your enemies behind them. - Speed hack (increasing movement speed): This feature allows you to increase your movement speed, and run faster than normal. - Jump hack (increasing jump height): This feature allows you to increase your jump height, and jump higher than normal.</p>
96
- <h4>Where can I download PUBG Mobile Mod APK safely?</h4>
97
- <p>There is no safe source to download PUBG Mobile Mod APK, as it is an unofficial and unverified app that may contain malware or viruses that can harm your device or data. PUBG Mobile Mod APK is also illegal and unethical, and it may get you banned or penalized from the game. Therefore, it is not recommended to download PUBG Mobile Mod APK from any source. The only safe and legal way to play PUBG Mobile is to download the official game from Google Play Store or App Store, and play it without any cheats or hacks.</p>
98
- <h4>How can I update PUBG Mobile Mod APK?</h4>
99
- <p>You cannot update PUBG Mobile Mod APK from the official game, as they are not compatible or synchronized with each other. If you want to update PUBG Mobile Mod APK, you need to find a new version of the modded APK file from the source where you downloaded it, and install it on your device. However, this may not be easy or safe, as the source may not provide regular updates or may provide fake or harmful updates. Therefore, it is better to avoid using PUBG Mobile Mod APK, and stick to the official game that provides frequent and secure updates.</p> 197e85843d<br />
100
- <br />
101
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/2ndelement/voicevox/voicevox_engine/utility/__init__.py DELETED
@@ -1,20 +0,0 @@
1
- from .connect_base64_waves import (
2
- ConnectBase64WavesException,
3
- connect_base64_waves,
4
- decode_base64_waves,
5
- )
6
- from .core_version_utility import get_latest_core_version, parse_core_version
7
- from .mutex_utility import mutex_wrapper
8
- from .path_utility import delete_file, engine_root, get_save_dir
9
-
10
- __all__ = [
11
- "ConnectBase64WavesException",
12
- "connect_base64_waves",
13
- "decode_base64_waves",
14
- "get_latest_core_version",
15
- "parse_core_version",
16
- "delete_file",
17
- "engine_root",
18
- "get_save_dir",
19
- "mutex_wrapper",
20
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIConsultant/MusicGen/tests/common_utils/temp_utils.py DELETED
@@ -1,56 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- import os
8
- import tempfile
9
-
10
-
11
- class TempDirMixin:
12
- """Mixin to provide easy access to temp dir.
13
- """
14
-
15
- temp_dir_ = None
16
-
17
- @classmethod
18
- def get_base_temp_dir(cls):
19
- # If AUDIOCRAFT_TEST_DIR is set, use it instead of temporary directory.
20
- # this is handy for debugging.
21
- key = "AUDIOCRAFT_TEST_DIR"
22
- if key in os.environ:
23
- return os.environ[key]
24
- if cls.temp_dir_ is None:
25
- cls.temp_dir_ = tempfile.TemporaryDirectory()
26
- return cls.temp_dir_.name
27
-
28
- @classmethod
29
- def tearDownClass(cls):
30
- if cls.temp_dir_ is not None:
31
- try:
32
- cls.temp_dir_.cleanup()
33
- cls.temp_dir_ = None
34
- except PermissionError:
35
- # On Windows there is a know issue with `shutil.rmtree`,
36
- # which fails intermittently.
37
- # https://github.com/python/cpython/issues/74168
38
- # Following the above thread, we ignore it.
39
- pass
40
- super().tearDownClass()
41
-
42
- @property
43
- def id(self):
44
- return self.__class__.__name__
45
-
46
- def get_temp_path(self, *paths):
47
- temp_dir = os.path.join(self.get_base_temp_dir(), self.id)
48
- path = os.path.join(temp_dir, *paths)
49
- os.makedirs(os.path.dirname(path), exist_ok=True)
50
- return path
51
-
52
- def get_temp_dir(self, *paths):
53
- temp_dir = os.path.join(self.get_base_temp_dir(), self.id)
54
- path = os.path.join(temp_dir, *paths)
55
- os.makedirs(path, exist_ok=True)
56
- return path
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/encoders/open_clap/openai.py DELETED
@@ -1,129 +0,0 @@
1
- """ OpenAI pretrained model functions
2
-
3
- Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
4
- """
5
-
6
- import os
7
- import warnings
8
- from typing import Union, List
9
-
10
- import torch
11
-
12
- from .model import build_model_from_openai_state_dict
13
- from .pretrained import get_pretrained_url, list_pretrained_tag_models, download_pretrained
14
-
15
- __all__ = ["list_openai_models", "load_openai_model"]
16
-
17
-
18
- def list_openai_models() -> List[str]:
19
- """Returns the names of available CLIP models"""
20
- return list_pretrained_tag_models('openai')
21
-
22
-
23
- def load_openai_model(
24
- name: str,
25
- model_cfg,
26
- device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu",
27
- jit=True,
28
- cache_dir=os.path.expanduser("~/.cache/clip"),
29
- enable_fusion: bool = False,
30
- fusion_type: str = 'None'
31
- ):
32
- """Load a CLIP model, preserve its text pretrained part, and set in the CLAP model
33
-
34
- Parameters
35
- ----------
36
- name : str
37
- A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
38
- device : Union[str, torch.device]
39
- The device to put the loaded model
40
- jit : bool
41
- Whether to load the optimized JIT model (default) or more hackable non-JIT model.
42
-
43
- Returns
44
- -------
45
- model : torch.nn.Module
46
- The CLAP model
47
- preprocess : Callable[[PIL.Image], torch.Tensor]
48
- A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
49
- """
50
- if get_pretrained_url(name, 'openai'):
51
- model_path = download_pretrained(get_pretrained_url(name, 'openai'), root=cache_dir)
52
- elif os.path.isfile(name):
53
- model_path = name
54
- else:
55
- raise RuntimeError(f"Model {name} not found; available models = {list_openai_models()}")
56
-
57
- try:
58
- # loading JIT archive
59
- model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
60
- state_dict = None
61
- except RuntimeError:
62
- # loading saved state dict
63
- if jit:
64
- warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
65
- jit = False
66
- state_dict = torch.load(model_path, map_location="cpu")
67
-
68
- if not jit:
69
- try:
70
- model = build_model_from_openai_state_dict(state_dict or model.state_dict(), model_cfg, enable_fusion, fusion_type).to(device)
71
- except KeyError:
72
- sd = {k[7:]: v for k, v in state_dict["state_dict"].items()}
73
- model = build_model_from_openai_state_dict(sd, model_cfg, enable_fusion, fusion_type).to(device)
74
-
75
- if str(device) == "cpu":
76
- model.float()
77
- return model
78
-
79
- # patch the device names
80
- device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])
81
- device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1]
82
-
83
- def patch_device(module):
84
- try:
85
- graphs = [module.graph] if hasattr(module, "graph") else []
86
- except RuntimeError:
87
- graphs = []
88
-
89
- if hasattr(module, "forward1"):
90
- graphs.append(module.forward1.graph)
91
-
92
- for graph in graphs:
93
- for node in graph.findAllNodes("prim::Constant"):
94
- if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"):
95
- node.copyAttributes(device_node)
96
-
97
- model.apply(patch_device)
98
- patch_device(model.encode_audio)
99
- patch_device(model.encode_text)
100
-
101
- # patch dtype to float32 on CPU
102
- if str(device) == "cpu":
103
- float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
104
- float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
105
- float_node = float_input.node()
106
-
107
- def patch_float(module):
108
- try:
109
- graphs = [module.graph] if hasattr(module, "graph") else []
110
- except RuntimeError:
111
- graphs = []
112
-
113
- if hasattr(module, "forward1"):
114
- graphs.append(module.forward1.graph)
115
-
116
- for graph in graphs:
117
- for node in graph.findAllNodes("aten::to"):
118
- inputs = list(node.inputs())
119
- for i in [1, 2]: # dtype can be the second or third argument to aten::to()
120
- if inputs[i].node()["value"] == 5:
121
- inputs[i].node().copyAttributes(float_node)
122
-
123
- model.apply(patch_float)
124
- patch_float(model.encode_audio)
125
- patch_float(model.encode_text)
126
- model.float()
127
-
128
- model.audio_branch.audio_length = model.audio_cfg.audio_length
129
- return model
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/losses_audio/contperceptual.py DELETED
@@ -1,123 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- import torch.nn.functional as F
4
- import sys
5
-
6
- sys.path.insert(0, '.') # nopep8
7
- from ldm.modules.losses_audio.vqperceptual import *
8
-
9
-
10
- class LPAPSWithDiscriminator(nn.Module):
11
- def __init__(self, disc_start, logvar_init=0.0, kl_weight=1.0, pixelloss_weight=1.0,
12
- disc_num_layers=3, disc_in_channels=3, disc_factor=1.0, disc_weight=1.0,
13
- perceptual_weight=1.0, use_actnorm=False, disc_conditional=False,
14
- disc_loss="hinge"):
15
-
16
- super().__init__()
17
- assert disc_loss in ["hinge", "vanilla"]
18
- self.kl_weight = kl_weight
19
- self.pixel_weight = pixelloss_weight
20
- self.perceptual_loss = LPAPS().eval()# LPIPS用于日常图像,而LPAPS用于梅尔谱图
21
- self.perceptual_weight = perceptual_weight
22
- # output log variance
23
- self.logvar = nn.Parameter(torch.ones(size=()) * logvar_init)
24
-
25
- self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels,
26
- n_layers=disc_num_layers,
27
- use_actnorm=use_actnorm,
28
- ).apply(weights_init)
29
- self.discriminator_iter_start = disc_start
30
- if disc_loss == "hinge":
31
- self.disc_loss = hinge_d_loss
32
- elif disc_loss == "vanilla":
33
- self.disc_loss = vanilla_d_loss
34
- else:
35
- raise ValueError(f"Unknown GAN loss '{disc_loss}'.")
36
- print(f"LPAPSWithDiscriminator running with {disc_loss} loss.")
37
- self.disc_factor = disc_factor
38
- self.discriminator_weight = disc_weight
39
- self.disc_conditional = disc_conditional
40
-
41
-
42
- def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None):
43
- if last_layer is not None:
44
- nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0]
45
- g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0]
46
- else:
47
- nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0]
48
- g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0]
49
-
50
- d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4)
51
- d_weight = torch.clamp(d_weight, 0.0, 1e4).detach()
52
- d_weight = d_weight * self.discriminator_weight
53
- return d_weight
54
-
55
- def forward(self, inputs, reconstructions, posteriors, optimizer_idx,
56
- global_step, last_layer=None, cond=None, split="train", weights=None):
57
- rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous())
58
- if self.perceptual_weight > 0:
59
- p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous())
60
- # print(f"p_loss {p_loss}")
61
- rec_loss = rec_loss + self.perceptual_weight * p_loss
62
- else:
63
- p_loss = torch.tensor([0.0])
64
-
65
- nll_loss = rec_loss / torch.exp(self.logvar) + self.logvar
66
- weighted_nll_loss = nll_loss
67
- if weights is not None:
68
- weighted_nll_loss = weights*nll_loss
69
- weighted_nll_loss = torch.sum(weighted_nll_loss) / weighted_nll_loss.shape[0]
70
- nll_loss = torch.sum(nll_loss) / nll_loss.shape[0]
71
- kl_loss = posteriors.kl()
72
- kl_loss = torch.sum(kl_loss) / kl_loss.shape[0]
73
-
74
- # now the GAN part
75
- if optimizer_idx == 0:
76
- # generator update
77
- if cond is None:
78
- assert not self.disc_conditional
79
- logits_fake = self.discriminator(reconstructions.contiguous())
80
- else:
81
- assert self.disc_conditional
82
- logits_fake = self.discriminator(torch.cat((reconstructions.contiguous(), cond), dim=1))
83
- g_loss = -torch.mean(logits_fake)
84
-
85
- try:
86
- d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer)
87
- except RuntimeError:
88
- assert not self.training
89
- d_weight = torch.tensor(0.0)
90
-
91
- disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
92
- loss = weighted_nll_loss + self.kl_weight * kl_loss + d_weight * disc_factor * g_loss
93
-
94
- log = {"{}/total_loss".format(split): loss.clone().detach().mean(),
95
- "{}/logvar".format(split): self.logvar.detach(),
96
- "{}/kl_loss".format(split): kl_loss.detach().mean(),
97
- "{}/nll_loss".format(split): nll_loss.detach().mean(),
98
- "{}/rec_loss".format(split): rec_loss.detach().mean(),
99
- "{}/d_weight".format(split): d_weight.detach(),
100
- "{}/disc_factor".format(split): torch.tensor(disc_factor),
101
- "{}/g_loss".format(split): g_loss.detach().mean(),
102
- }
103
- return loss, log
104
-
105
- if optimizer_idx == 1:
106
- # second pass for discriminator update
107
- if cond is None:
108
- logits_real = self.discriminator(inputs.contiguous().detach())
109
- logits_fake = self.discriminator(reconstructions.contiguous().detach())
110
- else:
111
- logits_real = self.discriminator(torch.cat((inputs.contiguous().detach(), cond), dim=1))
112
- logits_fake = self.discriminator(torch.cat((reconstructions.contiguous().detach(), cond), dim=1))
113
-
114
- disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
115
- d_loss = disc_factor * self.disc_loss(logits_real, logits_fake)
116
-
117
- log = {"{}/disc_loss".format(split): d_loss.clone().detach().mean(),
118
- "{}/logits_real".format(split): logits_real.detach().mean(),
119
- "{}/logits_fake".format(split): logits_fake.detach().mean()
120
- }
121
- return d_loss, log
122
-
123
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/agentverse_command/__init__.py DELETED
File without changes
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/scroller.js DELETED
@@ -1,2 +0,0 @@
1
- import Scroller from './input/scroller/Scroller.js';
2
- export default Scroller;
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/spinner-components.js DELETED
@@ -1,39 +0,0 @@
1
- import Audio from './audio/Audio.js';
2
- import Ball from './ball/Ball.js';
3
- import Bars from './bars/Bars.js';
4
- import Box from './box/Box.js';
5
- import Clock from './clock/Clock.js';
6
- import Cube from './cube/Cube.js';
7
- import Custom from './custom/Custom.js';
8
- import Dots from './dots/Dots.js';
9
- import Facebook from './facebook/Facebook.js';
10
- import Grid from './grid/Grid.js';
11
- import Los from './los/Los.js';
12
- import Orbit from './orbit/Orbit.js';
13
- import Oval from './oval/Oval.js';
14
- import Pie from './pie/Pie.js';
15
- import Puff from './puff/Puff.js';
16
- import Radio from './radio/Radio.js';
17
- import Rings from './rings/Rings.js';
18
- import Spinner from './spinner/Spinner.js';
19
-
20
- export {
21
- Audio,
22
- Ball,
23
- Bars,
24
- Box,
25
- Clock,
26
- Cube,
27
- Custom,
28
- Dots,
29
- Facebook,
30
- Grid,
31
- Los,
32
- Orbit,
33
- Oval,
34
- Pie,
35
- Puff,
36
- Radio,
37
- Rings,
38
- Spinner
39
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/folder/Folder.d.ts DELETED
@@ -1,65 +0,0 @@
1
- // import * as Phaser from 'phaser';
2
- import Sizer from '../sizer/Sizer';
3
- import OpenCloseTransition from '../../../plugins/behaviors/openclosetransition/OpenCloseTransition';
4
-
5
- export default Folder;
6
-
7
- declare namespace Folder {
8
-
9
- interface IConfig extends Sizer.IConfig {
10
- background?: Phaser.GameObjects.GameObject,
11
-
12
- title: Phaser.GameObjects.GameObject,
13
-
14
- child: Phaser.GameObjects.GameObject,
15
- customChildOrigin?: boolean,
16
-
17
- toggleByTarget?: Phaser.GameObjects.GameObject,
18
- toggleClickConfig?: {
19
- mode?: 0 | 1 | 'pointerdown' | 'pointerup' | 'press' | 'release',
20
- clickInterval?: number,
21
- threshold?: number,
22
- },
23
-
24
- align?: {
25
- title?: Sizer.AlignTypes,
26
- child?: Sizer.AlignTypes,
27
- },
28
-
29
- expand?: {
30
- title?: boolean,
31
- child?: boolean,
32
- },
33
-
34
- transition?: {
35
- duration?: number,
36
- expandCallback?: OpenCloseTransition.TransitCallbackType,
37
- collapseCallback?: OpenCloseTransition.TransitCallbackType,
38
- },
39
-
40
- reLayoutTarget?: Phaser.GameObjects.GameObject,
41
-
42
- onExpandStart?: (folder: this) => void,
43
- onExpandComplete?: (folder: this) => void,
44
- onCollapseStart?: (folder: this) => void,
45
- onCollapseComplete?: (folder: this) => void,
46
- }
47
- }
48
-
49
- declare class Folder extends Sizer {
50
- constructor(
51
- scene: Phaser.Scene,
52
- config?: Folder.IConfig
53
- );
54
-
55
- setTransitionDuration(duration?: number): this;
56
- transitionDuration: number;
57
-
58
- setExpandCallback(callback?: OpenCloseTransition.TransitCallbackType): this;
59
- setCollapseCallback(callback?: OpenCloseTransition.TransitCallbackType): this;
60
-
61
- expand(duration?: number): this;
62
- collapse(duration?: number): this;
63
- toggle(duration?: number): this;
64
- readonly expanded: boolean;
65
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alichuan/VITS-Umamusume-voice-synthesizer/text/english.py DELETED
@@ -1,188 +0,0 @@
1
- """ from https://github.com/keithito/tacotron """
2
-
3
- '''
4
- Cleaners are transformations that run over the input text at both training and eval time.
5
-
6
- Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners"
7
- hyperparameter. Some cleaners are English-specific. You'll typically want to use:
8
- 1. "english_cleaners" for English text
9
- 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using
10
- the Unidecode library (https://pypi.python.org/pypi/Unidecode)
11
- 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update
12
- the symbols in symbols.py to match your data).
13
- '''
14
-
15
-
16
- # Regular expression matching whitespace:
17
-
18
-
19
- import re
20
- import inflect
21
- from unidecode import unidecode
22
- import eng_to_ipa as ipa
23
- _inflect = inflect.engine()
24
- _comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])')
25
- _decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)')
26
- _pounds_re = re.compile(r'£([0-9\,]*[0-9]+)')
27
- _dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+)')
28
- _ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)')
29
- _number_re = re.compile(r'[0-9]+')
30
-
31
- # List of (regular expression, replacement) pairs for abbreviations:
32
- _abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
33
- ('mrs', 'misess'),
34
- ('mr', 'mister'),
35
- ('dr', 'doctor'),
36
- ('st', 'saint'),
37
- ('co', 'company'),
38
- ('jr', 'junior'),
39
- ('maj', 'major'),
40
- ('gen', 'general'),
41
- ('drs', 'doctors'),
42
- ('rev', 'reverend'),
43
- ('lt', 'lieutenant'),
44
- ('hon', 'honorable'),
45
- ('sgt', 'sergeant'),
46
- ('capt', 'captain'),
47
- ('esq', 'esquire'),
48
- ('ltd', 'limited'),
49
- ('col', 'colonel'),
50
- ('ft', 'fort'),
51
- ]]
52
-
53
-
54
- # List of (ipa, lazy ipa) pairs:
55
- _lazy_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [
56
- ('r', 'ɹ'),
57
- ('æ', 'e'),
58
- ('ɑ', 'a'),
59
- ('ɔ', 'o'),
60
- ('ð', 'z'),
61
- ('θ', 's'),
62
- ('ɛ', 'e'),
63
- ('ɪ', 'i'),
64
- ('ʊ', 'u'),
65
- ('ʒ', 'ʥ'),
66
- ('ʤ', 'ʥ'),
67
- ('ˈ', '↓'),
68
- ]]
69
-
70
- # List of (ipa, lazy ipa2) pairs:
71
- _lazy_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [
72
- ('r', 'ɹ'),
73
- ('ð', 'z'),
74
- ('θ', 's'),
75
- ('ʒ', 'ʑ'),
76
- ('ʤ', 'dʑ'),
77
- ('ˈ', '↓'),
78
- ]]
79
-
80
- # List of (ipa, ipa2) pairs
81
- _ipa_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [
82
- ('r', 'ɹ'),
83
- ('ʤ', 'dʒ'),
84
- ('ʧ', 'tʃ')
85
- ]]
86
-
87
-
88
- def expand_abbreviations(text):
89
- for regex, replacement in _abbreviations:
90
- text = re.sub(regex, replacement, text)
91
- return text
92
-
93
-
94
- def collapse_whitespace(text):
95
- return re.sub(r'\s+', ' ', text)
96
-
97
-
98
- def _remove_commas(m):
99
- return m.group(1).replace(',', '')
100
-
101
-
102
- def _expand_decimal_point(m):
103
- return m.group(1).replace('.', ' point ')
104
-
105
-
106
- def _expand_dollars(m):
107
- match = m.group(1)
108
- parts = match.split('.')
109
- if len(parts) > 2:
110
- return match + ' dollars' # Unexpected format
111
- dollars = int(parts[0]) if parts[0] else 0
112
- cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
113
- if dollars and cents:
114
- dollar_unit = 'dollar' if dollars == 1 else 'dollars'
115
- cent_unit = 'cent' if cents == 1 else 'cents'
116
- return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit)
117
- elif dollars:
118
- dollar_unit = 'dollar' if dollars == 1 else 'dollars'
119
- return '%s %s' % (dollars, dollar_unit)
120
- elif cents:
121
- cent_unit = 'cent' if cents == 1 else 'cents'
122
- return '%s %s' % (cents, cent_unit)
123
- else:
124
- return 'zero dollars'
125
-
126
-
127
- def _expand_ordinal(m):
128
- return _inflect.number_to_words(m.group(0))
129
-
130
-
131
- def _expand_number(m):
132
- num = int(m.group(0))
133
- if num > 1000 and num < 3000:
134
- if num == 2000:
135
- return 'two thousand'
136
- elif num > 2000 and num < 2010:
137
- return 'two thousand ' + _inflect.number_to_words(num % 100)
138
- elif num % 100 == 0:
139
- return _inflect.number_to_words(num // 100) + ' hundred'
140
- else:
141
- return _inflect.number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ')
142
- else:
143
- return _inflect.number_to_words(num, andword='')
144
-
145
-
146
- def normalize_numbers(text):
147
- text = re.sub(_comma_number_re, _remove_commas, text)
148
- text = re.sub(_pounds_re, r'\1 pounds', text)
149
- text = re.sub(_dollars_re, _expand_dollars, text)
150
- text = re.sub(_decimal_number_re, _expand_decimal_point, text)
151
- text = re.sub(_ordinal_re, _expand_ordinal, text)
152
- text = re.sub(_number_re, _expand_number, text)
153
- return text
154
-
155
-
156
- def mark_dark_l(text):
157
- return re.sub(r'l([^aeiouæɑɔəɛɪʊ ]*(?: |$))', lambda x: 'ɫ'+x.group(1), text)
158
-
159
-
160
- def english_to_ipa(text):
161
- text = unidecode(text).lower()
162
- text = expand_abbreviations(text)
163
- text = normalize_numbers(text)
164
- phonemes = ipa.convert(text)
165
- phonemes = collapse_whitespace(phonemes)
166
- return phonemes
167
-
168
-
169
- def english_to_lazy_ipa(text):
170
- text = english_to_ipa(text)
171
- for regex, replacement in _lazy_ipa:
172
- text = re.sub(regex, replacement, text)
173
- return text
174
-
175
-
176
- def english_to_ipa2(text):
177
- text = english_to_ipa(text)
178
- text = mark_dark_l(text)
179
- for regex, replacement in _ipa_to_ipa2:
180
- text = re.sub(regex, replacement, text)
181
- return text.replace('...', '…')
182
-
183
-
184
- def english_to_lazy_ipa2(text):
185
- text = english_to_ipa(text)
186
- for regex, replacement in _lazy_ipa2:
187
- text = re.sub(regex, replacement, text)
188
- return text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alpaca233/SadTalker/src/facerender/animate.py DELETED
@@ -1,257 +0,0 @@
1
- import os
2
- import cv2
3
- import yaml
4
- import numpy as np
5
- import warnings
6
- from skimage import img_as_ubyte
7
- import safetensors
8
- import safetensors.torch
9
- warnings.filterwarnings('ignore')
10
-
11
-
12
- import imageio
13
- import torch
14
- import torchvision
15
-
16
-
17
- from src.facerender.modules.keypoint_detector import HEEstimator, KPDetector
18
- from src.facerender.modules.mapping import MappingNet
19
- from src.facerender.modules.generator import OcclusionAwareGenerator, OcclusionAwareSPADEGenerator
20
- from src.facerender.modules.make_animation import make_animation
21
-
22
- from pydub import AudioSegment
23
- from src.utils.face_enhancer import enhancer_generator_with_len, enhancer_list
24
- from src.utils.paste_pic import paste_pic
25
- from src.utils.videoio import save_video_with_watermark
26
-
27
- try:
28
- import webui # in webui
29
- in_webui = True
30
- except:
31
- in_webui = False
32
-
33
- class AnimateFromCoeff():
34
-
35
- def __init__(self, sadtalker_path, device):
36
-
37
- with open(sadtalker_path['facerender_yaml']) as f:
38
- config = yaml.safe_load(f)
39
-
40
- generator = OcclusionAwareSPADEGenerator(**config['model_params']['generator_params'],
41
- **config['model_params']['common_params'])
42
- kp_extractor = KPDetector(**config['model_params']['kp_detector_params'],
43
- **config['model_params']['common_params'])
44
- he_estimator = HEEstimator(**config['model_params']['he_estimator_params'],
45
- **config['model_params']['common_params'])
46
- mapping = MappingNet(**config['model_params']['mapping_params'])
47
-
48
- generator.to(device)
49
- kp_extractor.to(device)
50
- he_estimator.to(device)
51
- mapping.to(device)
52
- for param in generator.parameters():
53
- param.requires_grad = False
54
- for param in kp_extractor.parameters():
55
- param.requires_grad = False
56
- for param in he_estimator.parameters():
57
- param.requires_grad = False
58
- for param in mapping.parameters():
59
- param.requires_grad = False
60
-
61
- if sadtalker_path is not None:
62
- if 'checkpoint' in sadtalker_path: # use safe tensor
63
- self.load_cpk_facevid2vid_safetensor(sadtalker_path['checkpoint'], kp_detector=kp_extractor, generator=generator, he_estimator=None)
64
- else:
65
- self.load_cpk_facevid2vid(sadtalker_path['free_view_checkpoint'], kp_detector=kp_extractor, generator=generator, he_estimator=he_estimator)
66
- else:
67
- raise AttributeError("Checkpoint should be specified for video head pose estimator.")
68
-
69
- if sadtalker_path['mappingnet_checkpoint'] is not None:
70
- self.load_cpk_mapping(sadtalker_path['mappingnet_checkpoint'], mapping=mapping)
71
- else:
72
- raise AttributeError("Checkpoint should be specified for video head pose estimator.")
73
-
74
- self.kp_extractor = kp_extractor
75
- self.generator = generator
76
- self.he_estimator = he_estimator
77
- self.mapping = mapping
78
-
79
- self.kp_extractor.eval()
80
- self.generator.eval()
81
- self.he_estimator.eval()
82
- self.mapping.eval()
83
-
84
- self.device = device
85
-
86
- def load_cpk_facevid2vid_safetensor(self, checkpoint_path, generator=None,
87
- kp_detector=None, he_estimator=None,
88
- device="cpu"):
89
-
90
- checkpoint = safetensors.torch.load_file(checkpoint_path)
91
-
92
- if generator is not None:
93
- x_generator = {}
94
- for k,v in checkpoint.items():
95
- if 'generator' in k:
96
- x_generator[k.replace('generator.', '')] = v
97
- generator.load_state_dict(x_generator)
98
- if kp_detector is not None:
99
- x_generator = {}
100
- for k,v in checkpoint.items():
101
- if 'kp_extractor' in k:
102
- x_generator[k.replace('kp_extractor.', '')] = v
103
- kp_detector.load_state_dict(x_generator)
104
- if he_estimator is not None:
105
- x_generator = {}
106
- for k,v in checkpoint.items():
107
- if 'he_estimator' in k:
108
- x_generator[k.replace('he_estimator.', '')] = v
109
- he_estimator.load_state_dict(x_generator)
110
-
111
- return None
112
-
113
- def load_cpk_facevid2vid(self, checkpoint_path, generator=None, discriminator=None,
114
- kp_detector=None, he_estimator=None, optimizer_generator=None,
115
- optimizer_discriminator=None, optimizer_kp_detector=None,
116
- optimizer_he_estimator=None, device="cpu"):
117
- checkpoint = torch.load(checkpoint_path, map_location=torch.device(device))
118
- if generator is not None:
119
- generator.load_state_dict(checkpoint['generator'])
120
- if kp_detector is not None:
121
- kp_detector.load_state_dict(checkpoint['kp_detector'])
122
- if he_estimator is not None:
123
- he_estimator.load_state_dict(checkpoint['he_estimator'])
124
- if discriminator is not None:
125
- try:
126
- discriminator.load_state_dict(checkpoint['discriminator'])
127
- except:
128
- print ('No discriminator in the state-dict. Dicriminator will be randomly initialized')
129
- if optimizer_generator is not None:
130
- optimizer_generator.load_state_dict(checkpoint['optimizer_generator'])
131
- if optimizer_discriminator is not None:
132
- try:
133
- optimizer_discriminator.load_state_dict(checkpoint['optimizer_discriminator'])
134
- except RuntimeError as e:
135
- print ('No discriminator optimizer in the state-dict. Optimizer will be not initialized')
136
- if optimizer_kp_detector is not None:
137
- optimizer_kp_detector.load_state_dict(checkpoint['optimizer_kp_detector'])
138
- if optimizer_he_estimator is not None:
139
- optimizer_he_estimator.load_state_dict(checkpoint['optimizer_he_estimator'])
140
-
141
- return checkpoint['epoch']
142
-
143
- def load_cpk_mapping(self, checkpoint_path, mapping=None, discriminator=None,
144
- optimizer_mapping=None, optimizer_discriminator=None, device='cpu'):
145
- checkpoint = torch.load(checkpoint_path, map_location=torch.device(device))
146
- if mapping is not None:
147
- mapping.load_state_dict(checkpoint['mapping'])
148
- if discriminator is not None:
149
- discriminator.load_state_dict(checkpoint['discriminator'])
150
- if optimizer_mapping is not None:
151
- optimizer_mapping.load_state_dict(checkpoint['optimizer_mapping'])
152
- if optimizer_discriminator is not None:
153
- optimizer_discriminator.load_state_dict(checkpoint['optimizer_discriminator'])
154
-
155
- return checkpoint['epoch']
156
-
157
- def generate(self, x, video_save_dir, pic_path, crop_info, enhancer=None, background_enhancer=None, preprocess='crop', img_size=256):
158
-
159
- source_image=x['source_image'].type(torch.FloatTensor)
160
- source_semantics=x['source_semantics'].type(torch.FloatTensor)
161
- target_semantics=x['target_semantics_list'].type(torch.FloatTensor)
162
- source_image=source_image.to(self.device)
163
- source_semantics=source_semantics.to(self.device)
164
- target_semantics=target_semantics.to(self.device)
165
- if 'yaw_c_seq' in x:
166
- yaw_c_seq = x['yaw_c_seq'].type(torch.FloatTensor)
167
- yaw_c_seq = x['yaw_c_seq'].to(self.device)
168
- else:
169
- yaw_c_seq = None
170
- if 'pitch_c_seq' in x:
171
- pitch_c_seq = x['pitch_c_seq'].type(torch.FloatTensor)
172
- pitch_c_seq = x['pitch_c_seq'].to(self.device)
173
- else:
174
- pitch_c_seq = None
175
- if 'roll_c_seq' in x:
176
- roll_c_seq = x['roll_c_seq'].type(torch.FloatTensor)
177
- roll_c_seq = x['roll_c_seq'].to(self.device)
178
- else:
179
- roll_c_seq = None
180
-
181
- frame_num = x['frame_num']
182
-
183
- predictions_video = make_animation(source_image, source_semantics, target_semantics,
184
- self.generator, self.kp_extractor, self.he_estimator, self.mapping,
185
- yaw_c_seq, pitch_c_seq, roll_c_seq, use_exp = True)
186
-
187
- predictions_video = predictions_video.reshape((-1,)+predictions_video.shape[2:])
188
- predictions_video = predictions_video[:frame_num]
189
-
190
- video = []
191
- for idx in range(predictions_video.shape[0]):
192
- image = predictions_video[idx]
193
- image = np.transpose(image.data.cpu().numpy(), [1, 2, 0]).astype(np.float32)
194
- video.append(image)
195
- result = img_as_ubyte(video)
196
-
197
- ### the generated video is 256x256, so we keep the aspect ratio,
198
- original_size = crop_info[0]
199
- if original_size:
200
- result = [ cv2.resize(result_i,(img_size, int(img_size * original_size[1]/original_size[0]) )) for result_i in result ]
201
-
202
- video_name = x['video_name'] + '.mp4'
203
- path = os.path.join(video_save_dir, 'temp_'+video_name)
204
-
205
- imageio.mimsave(path, result, fps=float(25))
206
-
207
- av_path = os.path.join(video_save_dir, video_name)
208
- return_path = av_path
209
-
210
- audio_path = x['audio_path']
211
- audio_name = os.path.splitext(os.path.split(audio_path)[-1])[0]
212
- new_audio_path = os.path.join(video_save_dir, audio_name+'.wav')
213
- start_time = 0
214
- # cog will not keep the .mp3 filename
215
- sound = AudioSegment.from_file(audio_path)
216
- frames = frame_num
217
- end_time = start_time + frames*1/25*1000
218
- word1=sound.set_frame_rate(16000)
219
- word = word1[start_time:end_time]
220
- word.export(new_audio_path, format="wav")
221
-
222
- save_video_with_watermark(path, new_audio_path, av_path, watermark= False)
223
- print(f'The generated video is named {video_save_dir}/{video_name}')
224
-
225
- if 'full' in preprocess.lower():
226
- # only add watermark to the full image.
227
- video_name_full = x['video_name'] + '_full.mp4'
228
- full_video_path = os.path.join(video_save_dir, video_name_full)
229
- return_path = full_video_path
230
- paste_pic(path, pic_path, crop_info, new_audio_path, full_video_path, extended_crop= True if 'ext' in preprocess.lower() else False)
231
- print(f'The generated video is named {video_save_dir}/{video_name_full}')
232
- else:
233
- full_video_path = av_path
234
-
235
- #### paste back then enhancers
236
- if enhancer:
237
- video_name_enhancer = x['video_name'] + '_enhanced.mp4'
238
- enhanced_path = os.path.join(video_save_dir, 'temp_'+video_name_enhancer)
239
- av_path_enhancer = os.path.join(video_save_dir, video_name_enhancer)
240
- return_path = av_path_enhancer
241
-
242
- try:
243
- enhanced_images_gen_with_len = enhancer_generator_with_len(full_video_path, method=enhancer, bg_upsampler=background_enhancer)
244
- imageio.mimsave(enhanced_path, enhanced_images_gen_with_len, fps=float(25))
245
- except:
246
- enhanced_images_gen_with_len = enhancer_list(full_video_path, method=enhancer, bg_upsampler=background_enhancer)
247
- imageio.mimsave(enhanced_path, enhanced_images_gen_with_len, fps=float(25))
248
-
249
- save_video_with_watermark(enhanced_path, new_audio_path, av_path_enhancer, watermark= False)
250
- print(f'The generated video is named {video_save_dir}/{video_name_enhancer}')
251
- os.remove(enhanced_path)
252
-
253
- os.remove(path)
254
- os.remove(new_audio_path)
255
-
256
- return return_path
257
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/stable_diffusion/text2img.md DELETED
@@ -1,59 +0,0 @@
1
- <!--Copyright 2023 The HuggingFace Team. All rights reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4
- the License. You may obtain a copy of the License at
5
-
6
- http://www.apache.org/licenses/LICENSE-2.0
7
-
8
- Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9
- an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10
- specific language governing permissions and limitations under the License.
11
- -->
12
-
13
- # Text-to-image
14
-
15
- The Stable Diffusion model was created by researchers and engineers from [CompVis](https://github.com/CompVis), [Stability AI](https://stability.ai/), [Runway](https://github.com/runwayml), and [LAION](https://laion.ai/). The [`StableDiffusionPipeline`] is capable of generating photorealistic images given any text input. It's trained on 512x512 images from a subset of the LAION-5B dataset. This model uses a frozen CLIP ViT-L/14 text encoder to condition the model on text prompts. With its 860M UNet and 123M text encoder, the model is relatively lightweight and can run on consumer GPUs. Latent diffusion is the research on top of which Stable Diffusion was built. It was proposed in [High-Resolution Image Synthesis with Latent Diffusion Models](https://huggingface.co/papers/2112.10752) by Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, Björn Ommer.
16
-
17
- The abstract from the paper is:
18
-
19
- *By decomposing the image formation process into a sequential application of denoising autoencoders, diffusion models (DMs) achieve state-of-the-art synthesis results on image data and beyond. Additionally, their formulation allows for a guiding mechanism to control the image generation process without retraining. However, since these models typically operate directly in pixel space, optimization of powerful DMs often consumes hundreds of GPU days and inference is expensive due to sequential evaluations. To enable DM training on limited computational resources while retaining their quality and flexibility, we apply them in the latent space of powerful pretrained autoencoders. In contrast to previous work, training diffusion models on such a representation allows for the first time to reach a near-optimal point between complexity reduction and detail preservation, greatly boosting visual fidelity. By introducing cross-attention layers into the model architecture, we turn diffusion models into powerful and flexible generators for general conditioning inputs such as text or bounding boxes and high-resolution synthesis becomes possible in a convolutional manner. Our latent diffusion models (LDMs) achieve a new state of the art for image inpainting and highly competitive performance on various tasks, including unconditional image generation, semantic scene synthesis, and super-resolution, while significantly reducing computational requirements compared to pixel-based DMs. Code is available at https://github.com/CompVis/latent-diffusion.*
20
-
21
- <Tip>
22
-
23
- Make sure to check out the Stable Diffusion [Tips](overview#tips) section to learn how to explore the tradeoff between scheduler speed and quality, and how to reuse pipeline components efficiently!
24
-
25
- If you're interested in using one of the official checkpoints for a task, explore the [CompVis](https://huggingface.co/CompVis), [Runway](https://huggingface.co/runwayml), and [Stability AI](https://huggingface.co/stabilityai) Hub organizations!
26
-
27
- </Tip>
28
-
29
- ## StableDiffusionPipeline
30
-
31
- [[autodoc]] StableDiffusionPipeline
32
- - all
33
- - __call__
34
- - enable_attention_slicing
35
- - disable_attention_slicing
36
- - enable_vae_slicing
37
- - disable_vae_slicing
38
- - enable_xformers_memory_efficient_attention
39
- - disable_xformers_memory_efficient_attention
40
- - enable_vae_tiling
41
- - disable_vae_tiling
42
- - load_textual_inversion
43
- - from_single_file
44
- - load_lora_weights
45
- - save_lora_weights
46
-
47
- ## StableDiffusionPipelineOutput
48
-
49
- [[autodoc]] pipelines.stable_diffusion.StableDiffusionPipelineOutput
50
-
51
- ## FlaxStableDiffusionPipeline
52
-
53
- [[autodoc]] FlaxStableDiffusionPipeline
54
- - all
55
- - __call__
56
-
57
- ## FlaxStableDiffusionPipelineOutput
58
-
59
- [[autodoc]] pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/stable_diffusion_controlnet_reference.py DELETED
@@ -1,834 +0,0 @@
1
- # Inspired by: https://github.com/Mikubill/sd-webui-controlnet/discussions/1236 and https://github.com/Mikubill/sd-webui-controlnet/discussions/1280
2
- from typing import Any, Callable, Dict, List, Optional, Tuple, Union
3
-
4
- import numpy as np
5
- import PIL.Image
6
- import torch
7
-
8
- from diffusers import StableDiffusionControlNetPipeline
9
- from diffusers.models import ControlNetModel
10
- from diffusers.models.attention import BasicTransformerBlock
11
- from diffusers.models.unet_2d_blocks import CrossAttnDownBlock2D, CrossAttnUpBlock2D, DownBlock2D, UpBlock2D
12
- from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
13
- from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
14
- from diffusers.utils import is_compiled_module, logging, randn_tensor
15
-
16
-
17
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
18
-
19
- EXAMPLE_DOC_STRING = """
20
- Examples:
21
- ```py
22
- >>> import cv2
23
- >>> import torch
24
- >>> import numpy as np
25
- >>> from PIL import Image
26
- >>> from diffusers import UniPCMultistepScheduler
27
- >>> from diffusers.utils import load_image
28
-
29
- >>> input_image = load_image("https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png")
30
-
31
- >>> # get canny image
32
- >>> image = cv2.Canny(np.array(input_image), 100, 200)
33
- >>> image = image[:, :, None]
34
- >>> image = np.concatenate([image, image, image], axis=2)
35
- >>> canny_image = Image.fromarray(image)
36
-
37
- >>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
38
- >>> pipe = StableDiffusionControlNetReferencePipeline.from_pretrained(
39
- "runwayml/stable-diffusion-v1-5",
40
- controlnet=controlnet,
41
- safety_checker=None,
42
- torch_dtype=torch.float16
43
- ).to('cuda:0')
44
-
45
- >>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe_controlnet.scheduler.config)
46
-
47
- >>> result_img = pipe(ref_image=input_image,
48
- prompt="1girl",
49
- image=canny_image,
50
- num_inference_steps=20,
51
- reference_attn=True,
52
- reference_adain=True).images[0]
53
-
54
- >>> result_img.show()
55
- ```
56
- """
57
-
58
-
59
- def torch_dfs(model: torch.nn.Module):
60
- result = [model]
61
- for child in model.children():
62
- result += torch_dfs(child)
63
- return result
64
-
65
-
66
- class StableDiffusionControlNetReferencePipeline(StableDiffusionControlNetPipeline):
67
- def prepare_ref_latents(self, refimage, batch_size, dtype, device, generator, do_classifier_free_guidance):
68
- refimage = refimage.to(device=device, dtype=dtype)
69
-
70
- # encode the mask image into latents space so we can concatenate it to the latents
71
- if isinstance(generator, list):
72
- ref_image_latents = [
73
- self.vae.encode(refimage[i : i + 1]).latent_dist.sample(generator=generator[i])
74
- for i in range(batch_size)
75
- ]
76
- ref_image_latents = torch.cat(ref_image_latents, dim=0)
77
- else:
78
- ref_image_latents = self.vae.encode(refimage).latent_dist.sample(generator=generator)
79
- ref_image_latents = self.vae.config.scaling_factor * ref_image_latents
80
-
81
- # duplicate mask and ref_image_latents for each generation per prompt, using mps friendly method
82
- if ref_image_latents.shape[0] < batch_size:
83
- if not batch_size % ref_image_latents.shape[0] == 0:
84
- raise ValueError(
85
- "The passed images and the required batch size don't match. Images are supposed to be duplicated"
86
- f" to a total batch size of {batch_size}, but {ref_image_latents.shape[0]} images were passed."
87
- " Make sure the number of images that you pass is divisible by the total requested batch size."
88
- )
89
- ref_image_latents = ref_image_latents.repeat(batch_size // ref_image_latents.shape[0], 1, 1, 1)
90
-
91
- ref_image_latents = torch.cat([ref_image_latents] * 2) if do_classifier_free_guidance else ref_image_latents
92
-
93
- # aligning device to prevent device errors when concating it with the latent model input
94
- ref_image_latents = ref_image_latents.to(device=device, dtype=dtype)
95
- return ref_image_latents
96
-
97
- @torch.no_grad()
98
- def __call__(
99
- self,
100
- prompt: Union[str, List[str]] = None,
101
- image: Union[
102
- torch.FloatTensor,
103
- PIL.Image.Image,
104
- np.ndarray,
105
- List[torch.FloatTensor],
106
- List[PIL.Image.Image],
107
- List[np.ndarray],
108
- ] = None,
109
- ref_image: Union[torch.FloatTensor, PIL.Image.Image] = None,
110
- height: Optional[int] = None,
111
- width: Optional[int] = None,
112
- num_inference_steps: int = 50,
113
- guidance_scale: float = 7.5,
114
- negative_prompt: Optional[Union[str, List[str]]] = None,
115
- num_images_per_prompt: Optional[int] = 1,
116
- eta: float = 0.0,
117
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
118
- latents: Optional[torch.FloatTensor] = None,
119
- prompt_embeds: Optional[torch.FloatTensor] = None,
120
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
121
- output_type: Optional[str] = "pil",
122
- return_dict: bool = True,
123
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
124
- callback_steps: int = 1,
125
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
126
- controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
127
- guess_mode: bool = False,
128
- attention_auto_machine_weight: float = 1.0,
129
- gn_auto_machine_weight: float = 1.0,
130
- style_fidelity: float = 0.5,
131
- reference_attn: bool = True,
132
- reference_adain: bool = True,
133
- ):
134
- r"""
135
- Function invoked when calling the pipeline for generation.
136
-
137
- Args:
138
- prompt (`str` or `List[str]`, *optional*):
139
- The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
140
- instead.
141
- image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
142
- `List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
143
- The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If
144
- the type is specified as `Torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can
145
- also be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If
146
- height and/or width are passed, `image` is resized according to them. If multiple ControlNets are
147
- specified in init, images must be passed as a list such that each element of the list can be correctly
148
- batched for input to a single controlnet.
149
- ref_image (`torch.FloatTensor`, `PIL.Image.Image`):
150
- The Reference Control input condition. Reference Control uses this input condition to generate guidance to Unet. If
151
- the type is specified as `Torch.FloatTensor`, it is passed to Reference Control as is. `PIL.Image.Image` can
152
- also be accepted as an image.
153
- height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
154
- The height in pixels of the generated image.
155
- width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
156
- The width in pixels of the generated image.
157
- num_inference_steps (`int`, *optional*, defaults to 50):
158
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
159
- expense of slower inference.
160
- guidance_scale (`float`, *optional*, defaults to 7.5):
161
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
162
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
163
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
164
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
165
- usually at the expense of lower image quality.
166
- negative_prompt (`str` or `List[str]`, *optional*):
167
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
168
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
169
- less than `1`).
170
- num_images_per_prompt (`int`, *optional*, defaults to 1):
171
- The number of images to generate per prompt.
172
- eta (`float`, *optional*, defaults to 0.0):
173
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
174
- [`schedulers.DDIMScheduler`], will be ignored for others.
175
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
176
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
177
- to make generation deterministic.
178
- latents (`torch.FloatTensor`, *optional*):
179
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
180
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
181
- tensor will ge generated by sampling using the supplied random `generator`.
182
- prompt_embeds (`torch.FloatTensor`, *optional*):
183
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
184
- provided, text embeddings will be generated from `prompt` input argument.
185
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
186
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
187
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
188
- argument.
189
- output_type (`str`, *optional*, defaults to `"pil"`):
190
- The output format of the generate image. Choose between
191
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
192
- return_dict (`bool`, *optional*, defaults to `True`):
193
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
194
- plain tuple.
195
- callback (`Callable`, *optional*):
196
- A function that will be called every `callback_steps` steps during inference. The function will be
197
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
198
- callback_steps (`int`, *optional*, defaults to 1):
199
- The frequency at which the `callback` function will be called. If not specified, the callback will be
200
- called at every step.
201
- cross_attention_kwargs (`dict`, *optional*):
202
- A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
203
- `self.processor` in
204
- [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
205
- controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
206
- The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added
207
- to the residual in the original unet. If multiple ControlNets are specified in init, you can set the
208
- corresponding scale as a list.
209
- guess_mode (`bool`, *optional*, defaults to `False`):
210
- In this mode, the ControlNet encoder will try best to recognize the content of the input image even if
211
- you remove all prompts. The `guidance_scale` between 3.0 and 5.0 is recommended.
212
- attention_auto_machine_weight (`float`):
213
- Weight of using reference query for self attention's context.
214
- If attention_auto_machine_weight=1.0, use reference query for all self attention's context.
215
- gn_auto_machine_weight (`float`):
216
- Weight of using reference adain. If gn_auto_machine_weight=2.0, use all reference adain plugins.
217
- style_fidelity (`float`):
218
- style fidelity of ref_uncond_xt. If style_fidelity=1.0, control more important,
219
- elif style_fidelity=0.0, prompt more important, else balanced.
220
- reference_attn (`bool`):
221
- Whether to use reference query for self attention's context.
222
- reference_adain (`bool`):
223
- Whether to use reference adain.
224
-
225
- Examples:
226
-
227
- Returns:
228
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
229
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
230
- When returning a tuple, the first element is a list with the generated images, and the second element is a
231
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
232
- (nsfw) content, according to the `safety_checker`.
233
- """
234
- assert reference_attn or reference_adain, "`reference_attn` or `reference_adain` must be True."
235
-
236
- # 1. Check inputs. Raise error if not correct
237
- self.check_inputs(
238
- prompt,
239
- image,
240
- callback_steps,
241
- negative_prompt,
242
- prompt_embeds,
243
- negative_prompt_embeds,
244
- controlnet_conditioning_scale,
245
- )
246
-
247
- # 2. Define call parameters
248
- if prompt is not None and isinstance(prompt, str):
249
- batch_size = 1
250
- elif prompt is not None and isinstance(prompt, list):
251
- batch_size = len(prompt)
252
- else:
253
- batch_size = prompt_embeds.shape[0]
254
-
255
- device = self._execution_device
256
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
257
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
258
- # corresponds to doing no classifier free guidance.
259
- do_classifier_free_guidance = guidance_scale > 1.0
260
-
261
- controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
262
-
263
- if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
264
- controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
265
-
266
- global_pool_conditions = (
267
- controlnet.config.global_pool_conditions
268
- if isinstance(controlnet, ControlNetModel)
269
- else controlnet.nets[0].config.global_pool_conditions
270
- )
271
- guess_mode = guess_mode or global_pool_conditions
272
-
273
- # 3. Encode input prompt
274
- text_encoder_lora_scale = (
275
- cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
276
- )
277
- prompt_embeds = self._encode_prompt(
278
- prompt,
279
- device,
280
- num_images_per_prompt,
281
- do_classifier_free_guidance,
282
- negative_prompt,
283
- prompt_embeds=prompt_embeds,
284
- negative_prompt_embeds=negative_prompt_embeds,
285
- lora_scale=text_encoder_lora_scale,
286
- )
287
-
288
- # 4. Prepare image
289
- if isinstance(controlnet, ControlNetModel):
290
- image = self.prepare_image(
291
- image=image,
292
- width=width,
293
- height=height,
294
- batch_size=batch_size * num_images_per_prompt,
295
- num_images_per_prompt=num_images_per_prompt,
296
- device=device,
297
- dtype=controlnet.dtype,
298
- do_classifier_free_guidance=do_classifier_free_guidance,
299
- guess_mode=guess_mode,
300
- )
301
- height, width = image.shape[-2:]
302
- elif isinstance(controlnet, MultiControlNetModel):
303
- images = []
304
-
305
- for image_ in image:
306
- image_ = self.prepare_image(
307
- image=image_,
308
- width=width,
309
- height=height,
310
- batch_size=batch_size * num_images_per_prompt,
311
- num_images_per_prompt=num_images_per_prompt,
312
- device=device,
313
- dtype=controlnet.dtype,
314
- do_classifier_free_guidance=do_classifier_free_guidance,
315
- guess_mode=guess_mode,
316
- )
317
-
318
- images.append(image_)
319
-
320
- image = images
321
- height, width = image[0].shape[-2:]
322
- else:
323
- assert False
324
-
325
- # 5. Preprocess reference image
326
- ref_image = self.prepare_image(
327
- image=ref_image,
328
- width=width,
329
- height=height,
330
- batch_size=batch_size * num_images_per_prompt,
331
- num_images_per_prompt=num_images_per_prompt,
332
- device=device,
333
- dtype=prompt_embeds.dtype,
334
- )
335
-
336
- # 6. Prepare timesteps
337
- self.scheduler.set_timesteps(num_inference_steps, device=device)
338
- timesteps = self.scheduler.timesteps
339
-
340
- # 7. Prepare latent variables
341
- num_channels_latents = self.unet.config.in_channels
342
- latents = self.prepare_latents(
343
- batch_size * num_images_per_prompt,
344
- num_channels_latents,
345
- height,
346
- width,
347
- prompt_embeds.dtype,
348
- device,
349
- generator,
350
- latents,
351
- )
352
-
353
- # 8. Prepare reference latent variables
354
- ref_image_latents = self.prepare_ref_latents(
355
- ref_image,
356
- batch_size * num_images_per_prompt,
357
- prompt_embeds.dtype,
358
- device,
359
- generator,
360
- do_classifier_free_guidance,
361
- )
362
-
363
- # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
364
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
365
-
366
- # 9. Modify self attention and group norm
367
- MODE = "write"
368
- uc_mask = (
369
- torch.Tensor([1] * batch_size * num_images_per_prompt + [0] * batch_size * num_images_per_prompt)
370
- .type_as(ref_image_latents)
371
- .bool()
372
- )
373
-
374
- def hacked_basic_transformer_inner_forward(
375
- self,
376
- hidden_states: torch.FloatTensor,
377
- attention_mask: Optional[torch.FloatTensor] = None,
378
- encoder_hidden_states: Optional[torch.FloatTensor] = None,
379
- encoder_attention_mask: Optional[torch.FloatTensor] = None,
380
- timestep: Optional[torch.LongTensor] = None,
381
- cross_attention_kwargs: Dict[str, Any] = None,
382
- class_labels: Optional[torch.LongTensor] = None,
383
- ):
384
- if self.use_ada_layer_norm:
385
- norm_hidden_states = self.norm1(hidden_states, timestep)
386
- elif self.use_ada_layer_norm_zero:
387
- norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(
388
- hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype
389
- )
390
- else:
391
- norm_hidden_states = self.norm1(hidden_states)
392
-
393
- # 1. Self-Attention
394
- cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}
395
- if self.only_cross_attention:
396
- attn_output = self.attn1(
397
- norm_hidden_states,
398
- encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
399
- attention_mask=attention_mask,
400
- **cross_attention_kwargs,
401
- )
402
- else:
403
- if MODE == "write":
404
- self.bank.append(norm_hidden_states.detach().clone())
405
- attn_output = self.attn1(
406
- norm_hidden_states,
407
- encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
408
- attention_mask=attention_mask,
409
- **cross_attention_kwargs,
410
- )
411
- if MODE == "read":
412
- if attention_auto_machine_weight > self.attn_weight:
413
- attn_output_uc = self.attn1(
414
- norm_hidden_states,
415
- encoder_hidden_states=torch.cat([norm_hidden_states] + self.bank, dim=1),
416
- # attention_mask=attention_mask,
417
- **cross_attention_kwargs,
418
- )
419
- attn_output_c = attn_output_uc.clone()
420
- if do_classifier_free_guidance and style_fidelity > 0:
421
- attn_output_c[uc_mask] = self.attn1(
422
- norm_hidden_states[uc_mask],
423
- encoder_hidden_states=norm_hidden_states[uc_mask],
424
- **cross_attention_kwargs,
425
- )
426
- attn_output = style_fidelity * attn_output_c + (1.0 - style_fidelity) * attn_output_uc
427
- self.bank.clear()
428
- else:
429
- attn_output = self.attn1(
430
- norm_hidden_states,
431
- encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
432
- attention_mask=attention_mask,
433
- **cross_attention_kwargs,
434
- )
435
- if self.use_ada_layer_norm_zero:
436
- attn_output = gate_msa.unsqueeze(1) * attn_output
437
- hidden_states = attn_output + hidden_states
438
-
439
- if self.attn2 is not None:
440
- norm_hidden_states = (
441
- self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)
442
- )
443
-
444
- # 2. Cross-Attention
445
- attn_output = self.attn2(
446
- norm_hidden_states,
447
- encoder_hidden_states=encoder_hidden_states,
448
- attention_mask=encoder_attention_mask,
449
- **cross_attention_kwargs,
450
- )
451
- hidden_states = attn_output + hidden_states
452
-
453
- # 3. Feed-forward
454
- norm_hidden_states = self.norm3(hidden_states)
455
-
456
- if self.use_ada_layer_norm_zero:
457
- norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
458
-
459
- ff_output = self.ff(norm_hidden_states)
460
-
461
- if self.use_ada_layer_norm_zero:
462
- ff_output = gate_mlp.unsqueeze(1) * ff_output
463
-
464
- hidden_states = ff_output + hidden_states
465
-
466
- return hidden_states
467
-
468
- def hacked_mid_forward(self, *args, **kwargs):
469
- eps = 1e-6
470
- x = self.original_forward(*args, **kwargs)
471
- if MODE == "write":
472
- if gn_auto_machine_weight >= self.gn_weight:
473
- var, mean = torch.var_mean(x, dim=(2, 3), keepdim=True, correction=0)
474
- self.mean_bank.append(mean)
475
- self.var_bank.append(var)
476
- if MODE == "read":
477
- if len(self.mean_bank) > 0 and len(self.var_bank) > 0:
478
- var, mean = torch.var_mean(x, dim=(2, 3), keepdim=True, correction=0)
479
- std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5
480
- mean_acc = sum(self.mean_bank) / float(len(self.mean_bank))
481
- var_acc = sum(self.var_bank) / float(len(self.var_bank))
482
- std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5
483
- x_uc = (((x - mean) / std) * std_acc) + mean_acc
484
- x_c = x_uc.clone()
485
- if do_classifier_free_guidance and style_fidelity > 0:
486
- x_c[uc_mask] = x[uc_mask]
487
- x = style_fidelity * x_c + (1.0 - style_fidelity) * x_uc
488
- self.mean_bank = []
489
- self.var_bank = []
490
- return x
491
-
492
- def hack_CrossAttnDownBlock2D_forward(
493
- self,
494
- hidden_states: torch.FloatTensor,
495
- temb: Optional[torch.FloatTensor] = None,
496
- encoder_hidden_states: Optional[torch.FloatTensor] = None,
497
- attention_mask: Optional[torch.FloatTensor] = None,
498
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
499
- encoder_attention_mask: Optional[torch.FloatTensor] = None,
500
- ):
501
- eps = 1e-6
502
-
503
- # TODO(Patrick, William) - attention mask is not used
504
- output_states = ()
505
-
506
- for i, (resnet, attn) in enumerate(zip(self.resnets, self.attentions)):
507
- hidden_states = resnet(hidden_states, temb)
508
- hidden_states = attn(
509
- hidden_states,
510
- encoder_hidden_states=encoder_hidden_states,
511
- cross_attention_kwargs=cross_attention_kwargs,
512
- attention_mask=attention_mask,
513
- encoder_attention_mask=encoder_attention_mask,
514
- return_dict=False,
515
- )[0]
516
- if MODE == "write":
517
- if gn_auto_machine_weight >= self.gn_weight:
518
- var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)
519
- self.mean_bank.append([mean])
520
- self.var_bank.append([var])
521
- if MODE == "read":
522
- if len(self.mean_bank) > 0 and len(self.var_bank) > 0:
523
- var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)
524
- std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5
525
- mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))
526
- var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))
527
- std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5
528
- hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc
529
- hidden_states_c = hidden_states_uc.clone()
530
- if do_classifier_free_guidance and style_fidelity > 0:
531
- hidden_states_c[uc_mask] = hidden_states[uc_mask]
532
- hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc
533
-
534
- output_states = output_states + (hidden_states,)
535
-
536
- if MODE == "read":
537
- self.mean_bank = []
538
- self.var_bank = []
539
-
540
- if self.downsamplers is not None:
541
- for downsampler in self.downsamplers:
542
- hidden_states = downsampler(hidden_states)
543
-
544
- output_states = output_states + (hidden_states,)
545
-
546
- return hidden_states, output_states
547
-
548
- def hacked_DownBlock2D_forward(self, hidden_states, temb=None):
549
- eps = 1e-6
550
-
551
- output_states = ()
552
-
553
- for i, resnet in enumerate(self.resnets):
554
- hidden_states = resnet(hidden_states, temb)
555
-
556
- if MODE == "write":
557
- if gn_auto_machine_weight >= self.gn_weight:
558
- var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)
559
- self.mean_bank.append([mean])
560
- self.var_bank.append([var])
561
- if MODE == "read":
562
- if len(self.mean_bank) > 0 and len(self.var_bank) > 0:
563
- var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)
564
- std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5
565
- mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))
566
- var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))
567
- std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5
568
- hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc
569
- hidden_states_c = hidden_states_uc.clone()
570
- if do_classifier_free_guidance and style_fidelity > 0:
571
- hidden_states_c[uc_mask] = hidden_states[uc_mask]
572
- hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc
573
-
574
- output_states = output_states + (hidden_states,)
575
-
576
- if MODE == "read":
577
- self.mean_bank = []
578
- self.var_bank = []
579
-
580
- if self.downsamplers is not None:
581
- for downsampler in self.downsamplers:
582
- hidden_states = downsampler(hidden_states)
583
-
584
- output_states = output_states + (hidden_states,)
585
-
586
- return hidden_states, output_states
587
-
588
- def hacked_CrossAttnUpBlock2D_forward(
589
- self,
590
- hidden_states: torch.FloatTensor,
591
- res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],
592
- temb: Optional[torch.FloatTensor] = None,
593
- encoder_hidden_states: Optional[torch.FloatTensor] = None,
594
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
595
- upsample_size: Optional[int] = None,
596
- attention_mask: Optional[torch.FloatTensor] = None,
597
- encoder_attention_mask: Optional[torch.FloatTensor] = None,
598
- ):
599
- eps = 1e-6
600
- # TODO(Patrick, William) - attention mask is not used
601
- for i, (resnet, attn) in enumerate(zip(self.resnets, self.attentions)):
602
- # pop res hidden states
603
- res_hidden_states = res_hidden_states_tuple[-1]
604
- res_hidden_states_tuple = res_hidden_states_tuple[:-1]
605
- hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
606
- hidden_states = resnet(hidden_states, temb)
607
- hidden_states = attn(
608
- hidden_states,
609
- encoder_hidden_states=encoder_hidden_states,
610
- cross_attention_kwargs=cross_attention_kwargs,
611
- attention_mask=attention_mask,
612
- encoder_attention_mask=encoder_attention_mask,
613
- return_dict=False,
614
- )[0]
615
-
616
- if MODE == "write":
617
- if gn_auto_machine_weight >= self.gn_weight:
618
- var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)
619
- self.mean_bank.append([mean])
620
- self.var_bank.append([var])
621
- if MODE == "read":
622
- if len(self.mean_bank) > 0 and len(self.var_bank) > 0:
623
- var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)
624
- std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5
625
- mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))
626
- var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))
627
- std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5
628
- hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc
629
- hidden_states_c = hidden_states_uc.clone()
630
- if do_classifier_free_guidance and style_fidelity > 0:
631
- hidden_states_c[uc_mask] = hidden_states[uc_mask]
632
- hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc
633
-
634
- if MODE == "read":
635
- self.mean_bank = []
636
- self.var_bank = []
637
-
638
- if self.upsamplers is not None:
639
- for upsampler in self.upsamplers:
640
- hidden_states = upsampler(hidden_states, upsample_size)
641
-
642
- return hidden_states
643
-
644
- def hacked_UpBlock2D_forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None):
645
- eps = 1e-6
646
- for i, resnet in enumerate(self.resnets):
647
- # pop res hidden states
648
- res_hidden_states = res_hidden_states_tuple[-1]
649
- res_hidden_states_tuple = res_hidden_states_tuple[:-1]
650
- hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
651
- hidden_states = resnet(hidden_states, temb)
652
-
653
- if MODE == "write":
654
- if gn_auto_machine_weight >= self.gn_weight:
655
- var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)
656
- self.mean_bank.append([mean])
657
- self.var_bank.append([var])
658
- if MODE == "read":
659
- if len(self.mean_bank) > 0 and len(self.var_bank) > 0:
660
- var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)
661
- std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5
662
- mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))
663
- var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))
664
- std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5
665
- hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc
666
- hidden_states_c = hidden_states_uc.clone()
667
- if do_classifier_free_guidance and style_fidelity > 0:
668
- hidden_states_c[uc_mask] = hidden_states[uc_mask]
669
- hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc
670
-
671
- if MODE == "read":
672
- self.mean_bank = []
673
- self.var_bank = []
674
-
675
- if self.upsamplers is not None:
676
- for upsampler in self.upsamplers:
677
- hidden_states = upsampler(hidden_states, upsample_size)
678
-
679
- return hidden_states
680
-
681
- if reference_attn:
682
- attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, BasicTransformerBlock)]
683
- attn_modules = sorted(attn_modules, key=lambda x: -x.norm1.normalized_shape[0])
684
-
685
- for i, module in enumerate(attn_modules):
686
- module._original_inner_forward = module.forward
687
- module.forward = hacked_basic_transformer_inner_forward.__get__(module, BasicTransformerBlock)
688
- module.bank = []
689
- module.attn_weight = float(i) / float(len(attn_modules))
690
-
691
- if reference_adain:
692
- gn_modules = [self.unet.mid_block]
693
- self.unet.mid_block.gn_weight = 0
694
-
695
- down_blocks = self.unet.down_blocks
696
- for w, module in enumerate(down_blocks):
697
- module.gn_weight = 1.0 - float(w) / float(len(down_blocks))
698
- gn_modules.append(module)
699
-
700
- up_blocks = self.unet.up_blocks
701
- for w, module in enumerate(up_blocks):
702
- module.gn_weight = float(w) / float(len(up_blocks))
703
- gn_modules.append(module)
704
-
705
- for i, module in enumerate(gn_modules):
706
- if getattr(module, "original_forward", None) is None:
707
- module.original_forward = module.forward
708
- if i == 0:
709
- # mid_block
710
- module.forward = hacked_mid_forward.__get__(module, torch.nn.Module)
711
- elif isinstance(module, CrossAttnDownBlock2D):
712
- module.forward = hack_CrossAttnDownBlock2D_forward.__get__(module, CrossAttnDownBlock2D)
713
- elif isinstance(module, DownBlock2D):
714
- module.forward = hacked_DownBlock2D_forward.__get__(module, DownBlock2D)
715
- elif isinstance(module, CrossAttnUpBlock2D):
716
- module.forward = hacked_CrossAttnUpBlock2D_forward.__get__(module, CrossAttnUpBlock2D)
717
- elif isinstance(module, UpBlock2D):
718
- module.forward = hacked_UpBlock2D_forward.__get__(module, UpBlock2D)
719
- module.mean_bank = []
720
- module.var_bank = []
721
- module.gn_weight *= 2
722
-
723
- # 11. Denoising loop
724
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
725
- with self.progress_bar(total=num_inference_steps) as progress_bar:
726
- for i, t in enumerate(timesteps):
727
- # expand the latents if we are doing classifier free guidance
728
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
729
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
730
-
731
- # controlnet(s) inference
732
- if guess_mode and do_classifier_free_guidance:
733
- # Infer ControlNet only for the conditional batch.
734
- control_model_input = latents
735
- control_model_input = self.scheduler.scale_model_input(control_model_input, t)
736
- controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
737
- else:
738
- control_model_input = latent_model_input
739
- controlnet_prompt_embeds = prompt_embeds
740
-
741
- down_block_res_samples, mid_block_res_sample = self.controlnet(
742
- control_model_input,
743
- t,
744
- encoder_hidden_states=controlnet_prompt_embeds,
745
- controlnet_cond=image,
746
- conditioning_scale=controlnet_conditioning_scale,
747
- guess_mode=guess_mode,
748
- return_dict=False,
749
- )
750
-
751
- if guess_mode and do_classifier_free_guidance:
752
- # Infered ControlNet only for the conditional batch.
753
- # To apply the output of ControlNet to both the unconditional and conditional batches,
754
- # add 0 to the unconditional batch to keep it unchanged.
755
- down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
756
- mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
757
-
758
- # ref only part
759
- noise = randn_tensor(
760
- ref_image_latents.shape, generator=generator, device=device, dtype=ref_image_latents.dtype
761
- )
762
- ref_xt = self.scheduler.add_noise(
763
- ref_image_latents,
764
- noise,
765
- t.reshape(
766
- 1,
767
- ),
768
- )
769
- ref_xt = self.scheduler.scale_model_input(ref_xt, t)
770
-
771
- MODE = "write"
772
- self.unet(
773
- ref_xt,
774
- t,
775
- encoder_hidden_states=prompt_embeds,
776
- cross_attention_kwargs=cross_attention_kwargs,
777
- return_dict=False,
778
- )
779
-
780
- # predict the noise residual
781
- MODE = "read"
782
- noise_pred = self.unet(
783
- latent_model_input,
784
- t,
785
- encoder_hidden_states=prompt_embeds,
786
- cross_attention_kwargs=cross_attention_kwargs,
787
- down_block_additional_residuals=down_block_res_samples,
788
- mid_block_additional_residual=mid_block_res_sample,
789
- return_dict=False,
790
- )[0]
791
-
792
- # perform guidance
793
- if do_classifier_free_guidance:
794
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
795
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
796
-
797
- # compute the previous noisy sample x_t -> x_t-1
798
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
799
-
800
- # call the callback, if provided
801
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
802
- progress_bar.update()
803
- if callback is not None and i % callback_steps == 0:
804
- callback(i, t, latents)
805
-
806
- # If we do sequential model offloading, let's offload unet and controlnet
807
- # manually for max memory savings
808
- if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
809
- self.unet.to("cpu")
810
- self.controlnet.to("cpu")
811
- torch.cuda.empty_cache()
812
-
813
- if not output_type == "latent":
814
- image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
815
- image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
816
- else:
817
- image = latents
818
- has_nsfw_concept = None
819
-
820
- if has_nsfw_concept is None:
821
- do_denormalize = [True] * image.shape[0]
822
- else:
823
- do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
824
-
825
- image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
826
-
827
- # Offload last model to CPU
828
- if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
829
- self.final_offload_hook.offload()
830
-
831
- if not return_dict:
832
- return (image, has_nsfw_concept)
833
-
834
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/kandinsky2_2/__init__.py DELETED
@@ -1,25 +0,0 @@
1
- from ...utils import (
2
- OptionalDependencyNotAvailable,
3
- is_torch_available,
4
- is_transformers_available,
5
- )
6
-
7
-
8
- try:
9
- if not (is_transformers_available() and is_torch_available()):
10
- raise OptionalDependencyNotAvailable()
11
- except OptionalDependencyNotAvailable:
12
- from ...utils.dummy_torch_and_transformers_objects import *
13
- else:
14
- from .pipeline_kandinsky2_2 import KandinskyV22Pipeline
15
- from .pipeline_kandinsky2_2_combined import (
16
- KandinskyV22CombinedPipeline,
17
- KandinskyV22Img2ImgCombinedPipeline,
18
- KandinskyV22InpaintCombinedPipeline,
19
- )
20
- from .pipeline_kandinsky2_2_controlnet import KandinskyV22ControlnetPipeline
21
- from .pipeline_kandinsky2_2_controlnet_img2img import KandinskyV22ControlnetImg2ImgPipeline
22
- from .pipeline_kandinsky2_2_img2img import KandinskyV22Img2ImgPipeline
23
- from .pipeline_kandinsky2_2_inpainting import KandinskyV22InpaintPipeline
24
- from .pipeline_kandinsky2_2_prior import KandinskyV22PriorPipeline
25
- from .pipeline_kandinsky2_2_prior_emb2emb import KandinskyV22PriorEmb2EmbPipeline
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/spectrogram_diffusion/pipeline_spectrogram_diffusion.py DELETED
@@ -1,267 +0,0 @@
1
- # Copyright 2022 The Music Spectrogram Diffusion Authors.
2
- # Copyright 2023 The HuggingFace Team. All rights reserved.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import math
17
- from typing import Any, Callable, List, Optional, Tuple, Union
18
-
19
- import numpy as np
20
- import torch
21
-
22
- from ...models import T5FilmDecoder
23
- from ...schedulers import DDPMScheduler
24
- from ...utils import is_onnx_available, logging, randn_tensor
25
-
26
-
27
- if is_onnx_available():
28
- from ..onnx_utils import OnnxRuntimeModel
29
-
30
- from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
31
- from .continous_encoder import SpectrogramContEncoder
32
- from .notes_encoder import SpectrogramNotesEncoder
33
-
34
-
35
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
36
-
37
- TARGET_FEATURE_LENGTH = 256
38
-
39
-
40
- class SpectrogramDiffusionPipeline(DiffusionPipeline):
41
- r"""
42
- Pipeline for unconditional audio generation.
43
-
44
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
45
- implemented for all pipelines (downloading, saving, running on a particular device, etc.).
46
-
47
- Args:
48
- notes_encoder ([`SpectrogramNotesEncoder`]):
49
- continuous_encoder ([`SpectrogramContEncoder`]):
50
- decoder ([`T5FilmDecoder`]):
51
- A [`T5FilmDecoder`] to denoise the encoded audio latents.
52
- scheduler ([`DDPMScheduler`]):
53
- A scheduler to be used in combination with `decoder` to denoise the encoded audio latents.
54
- melgan ([`OnnxRuntimeModel`]):
55
- """
56
- _optional_components = ["melgan"]
57
-
58
- def __init__(
59
- self,
60
- notes_encoder: SpectrogramNotesEncoder,
61
- continuous_encoder: SpectrogramContEncoder,
62
- decoder: T5FilmDecoder,
63
- scheduler: DDPMScheduler,
64
- melgan: OnnxRuntimeModel if is_onnx_available() else Any,
65
- ) -> None:
66
- super().__init__()
67
-
68
- # From MELGAN
69
- self.min_value = math.log(1e-5) # Matches MelGAN training.
70
- self.max_value = 4.0 # Largest value for most examples
71
- self.n_dims = 128
72
-
73
- self.register_modules(
74
- notes_encoder=notes_encoder,
75
- continuous_encoder=continuous_encoder,
76
- decoder=decoder,
77
- scheduler=scheduler,
78
- melgan=melgan,
79
- )
80
-
81
- def scale_features(self, features, output_range=(-1.0, 1.0), clip=False):
82
- """Linearly scale features to network outputs range."""
83
- min_out, max_out = output_range
84
- if clip:
85
- features = torch.clip(features, self.min_value, self.max_value)
86
- # Scale to [0, 1].
87
- zero_one = (features - self.min_value) / (self.max_value - self.min_value)
88
- # Scale to [min_out, max_out].
89
- return zero_one * (max_out - min_out) + min_out
90
-
91
- def scale_to_features(self, outputs, input_range=(-1.0, 1.0), clip=False):
92
- """Invert by linearly scaling network outputs to features range."""
93
- min_out, max_out = input_range
94
- outputs = torch.clip(outputs, min_out, max_out) if clip else outputs
95
- # Scale to [0, 1].
96
- zero_one = (outputs - min_out) / (max_out - min_out)
97
- # Scale to [self.min_value, self.max_value].
98
- return zero_one * (self.max_value - self.min_value) + self.min_value
99
-
100
- def encode(self, input_tokens, continuous_inputs, continuous_mask):
101
- tokens_mask = input_tokens > 0
102
- tokens_encoded, tokens_mask = self.notes_encoder(
103
- encoder_input_tokens=input_tokens, encoder_inputs_mask=tokens_mask
104
- )
105
-
106
- continuous_encoded, continuous_mask = self.continuous_encoder(
107
- encoder_inputs=continuous_inputs, encoder_inputs_mask=continuous_mask
108
- )
109
-
110
- return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
111
-
112
- def decode(self, encodings_and_masks, input_tokens, noise_time):
113
- timesteps = noise_time
114
- if not torch.is_tensor(timesteps):
115
- timesteps = torch.tensor([timesteps], dtype=torch.long, device=input_tokens.device)
116
- elif torch.is_tensor(timesteps) and len(timesteps.shape) == 0:
117
- timesteps = timesteps[None].to(input_tokens.device)
118
-
119
- # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
120
- timesteps = timesteps * torch.ones(input_tokens.shape[0], dtype=timesteps.dtype, device=timesteps.device)
121
-
122
- logits = self.decoder(
123
- encodings_and_masks=encodings_and_masks, decoder_input_tokens=input_tokens, decoder_noise_time=timesteps
124
- )
125
- return logits
126
-
127
- @torch.no_grad()
128
- def __call__(
129
- self,
130
- input_tokens: List[List[int]],
131
- generator: Optional[torch.Generator] = None,
132
- num_inference_steps: int = 100,
133
- return_dict: bool = True,
134
- output_type: str = "numpy",
135
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
136
- callback_steps: int = 1,
137
- ) -> Union[AudioPipelineOutput, Tuple]:
138
- if (callback_steps is None) or (
139
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
140
- ):
141
- raise ValueError(
142
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
143
- f" {type(callback_steps)}."
144
- )
145
- r"""
146
- The call function to the pipeline for generation.
147
-
148
- Args:
149
- input_tokens (`List[List[int]]`):
150
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
151
- A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
152
- generation deterministic.
153
- num_inference_steps (`int`, *optional*, defaults to 100):
154
- The number of denoising steps. More denoising steps usually lead to a higher quality audio at the
155
- expense of slower inference.
156
- return_dict (`bool`, *optional*, defaults to `True`):
157
- Whether or not to return a [`~pipelines.AudioPipelineOutput`] instead of a plain tuple.
158
- output_type (`str`, *optional*, defaults to `"numpy"`):
159
- The output format of the generated audio.
160
- callback (`Callable`, *optional*):
161
- A function that calls every `callback_steps` steps during inference. The function is called with the
162
- following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
163
- callback_steps (`int`, *optional*, defaults to 1):
164
- The frequency at which the `callback` function is called. If not specified, the callback is called at
165
- every step.
166
-
167
- Example:
168
-
169
- ```py
170
- >>> from diffusers import SpectrogramDiffusionPipeline, MidiProcessor
171
-
172
- >>> pipe = SpectrogramDiffusionPipeline.from_pretrained("google/music-spectrogram-diffusion")
173
- >>> pipe = pipe.to("cuda")
174
- >>> processor = MidiProcessor()
175
-
176
- >>> # Download MIDI from: wget http://www.piano-midi.de/midis/beethoven/beethoven_hammerklavier_2.mid
177
- >>> output = pipe(processor("beethoven_hammerklavier_2.mid"))
178
-
179
- >>> audio = output.audios[0]
180
- ```
181
-
182
- Returns:
183
- [`pipelines.AudioPipelineOutput`] or `tuple`:
184
- If `return_dict` is `True`, [`pipelines.AudioPipelineOutput`] is returned, otherwise a `tuple` is
185
- returned where the first element is a list with the generated audio.
186
- """
187
-
188
- pred_mel = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims], dtype=np.float32)
189
- full_pred_mel = np.zeros([1, 0, self.n_dims], np.float32)
190
- ones = torch.ones((1, TARGET_FEATURE_LENGTH), dtype=bool, device=self.device)
191
-
192
- for i, encoder_input_tokens in enumerate(input_tokens):
193
- if i == 0:
194
- encoder_continuous_inputs = torch.from_numpy(pred_mel[:1].copy()).to(
195
- device=self.device, dtype=self.decoder.dtype
196
- )
197
- # The first chunk has no previous context.
198
- encoder_continuous_mask = torch.zeros((1, TARGET_FEATURE_LENGTH), dtype=bool, device=self.device)
199
- else:
200
- # The full song pipeline does not feed in a context feature, so the mask
201
- # will be all 0s after the feature converter. Because we know we're
202
- # feeding in a full context chunk from the previous prediction, set it
203
- # to all 1s.
204
- encoder_continuous_mask = ones
205
-
206
- encoder_continuous_inputs = self.scale_features(
207
- encoder_continuous_inputs, output_range=[-1.0, 1.0], clip=True
208
- )
209
-
210
- encodings_and_masks = self.encode(
211
- input_tokens=torch.IntTensor([encoder_input_tokens]).to(device=self.device),
212
- continuous_inputs=encoder_continuous_inputs,
213
- continuous_mask=encoder_continuous_mask,
214
- )
215
-
216
- # Sample encoder_continuous_inputs shaped gaussian noise to begin loop
217
- x = randn_tensor(
218
- shape=encoder_continuous_inputs.shape,
219
- generator=generator,
220
- device=self.device,
221
- dtype=self.decoder.dtype,
222
- )
223
-
224
- # set step values
225
- self.scheduler.set_timesteps(num_inference_steps)
226
-
227
- # Denoising diffusion loop
228
- for j, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
229
- output = self.decode(
230
- encodings_and_masks=encodings_and_masks,
231
- input_tokens=x,
232
- noise_time=t / self.scheduler.config.num_train_timesteps, # rescale to [0, 1)
233
- )
234
-
235
- # Compute previous output: x_t -> x_t-1
236
- x = self.scheduler.step(output, t, x, generator=generator).prev_sample
237
-
238
- mel = self.scale_to_features(x, input_range=[-1.0, 1.0])
239
- encoder_continuous_inputs = mel[:1]
240
- pred_mel = mel.cpu().float().numpy()
241
-
242
- full_pred_mel = np.concatenate([full_pred_mel, pred_mel[:1]], axis=1)
243
-
244
- # call the callback, if provided
245
- if callback is not None and i % callback_steps == 0:
246
- callback(i, full_pred_mel)
247
-
248
- logger.info("Generated segment", i)
249
-
250
- if output_type == "numpy" and not is_onnx_available():
251
- raise ValueError(
252
- "Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'."
253
- )
254
- elif output_type == "numpy" and self.melgan is None:
255
- raise ValueError(
256
- "Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'."
257
- )
258
-
259
- if output_type == "numpy":
260
- output = self.melgan(input_features=full_pred_mel.astype(np.float32))
261
- else:
262
- output = full_pred_mel
263
-
264
- if not return_dict:
265
- return (output,)
266
-
267
- return AudioPipelineOutput(audios=output)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py DELETED
@@ -1,645 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import inspect
16
- from typing import Any, Callable, Dict, List, Optional, Union
17
-
18
- import numpy as np
19
- import torch
20
- from transformers import CLIPTextModel, CLIPTokenizer
21
-
22
- from ...loaders import LoraLoaderMixin, TextualInversionLoaderMixin
23
- from ...models import AutoencoderKL, UNet3DConditionModel
24
- from ...schedulers import KarrasDiffusionSchedulers
25
- from ...utils import (
26
- is_accelerate_available,
27
- is_accelerate_version,
28
- logging,
29
- randn_tensor,
30
- replace_example_docstring,
31
- )
32
- from ..pipeline_utils import DiffusionPipeline
33
- from . import TextToVideoSDPipelineOutput
34
-
35
-
36
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
37
-
38
- EXAMPLE_DOC_STRING = """
39
- Examples:
40
- ```py
41
- >>> import torch
42
- >>> from diffusers import TextToVideoSDPipeline
43
- >>> from diffusers.utils import export_to_video
44
-
45
- >>> pipe = TextToVideoSDPipeline.from_pretrained(
46
- ... "damo-vilab/text-to-video-ms-1.7b", torch_dtype=torch.float16, variant="fp16"
47
- ... )
48
- >>> pipe.enable_model_cpu_offload()
49
-
50
- >>> prompt = "Spiderman is surfing"
51
- >>> video_frames = pipe(prompt).frames
52
- >>> video_path = export_to_video(video_frames)
53
- >>> video_path
54
- ```
55
- """
56
-
57
-
58
- def tensor2vid(video: torch.Tensor, mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) -> List[np.ndarray]:
59
- # This code is copied from https://github.com/modelscope/modelscope/blob/1509fdb973e5871f37148a4b5e5964cafd43e64d/modelscope/pipelines/multi_modal/text_to_video_synthesis_pipeline.py#L78
60
- # reshape to ncfhw
61
- mean = torch.tensor(mean, device=video.device).reshape(1, -1, 1, 1, 1)
62
- std = torch.tensor(std, device=video.device).reshape(1, -1, 1, 1, 1)
63
- # unnormalize back to [0,1]
64
- video = video.mul_(std).add_(mean)
65
- video.clamp_(0, 1)
66
- # prepare the final outputs
67
- i, c, f, h, w = video.shape
68
- images = video.permute(2, 3, 0, 4, 1).reshape(
69
- f, h, i * w, c
70
- ) # 1st (frames, h, batch_size, w, c) 2nd (frames, h, batch_size * w, c)
71
- images = images.unbind(dim=0) # prepare a list of indvidual (consecutive frames)
72
- images = [(image.cpu().numpy() * 255).astype("uint8") for image in images] # f h w c
73
- return images
74
-
75
-
76
- class TextToVideoSDPipeline(DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin):
77
- r"""
78
- Pipeline for text-to-video generation.
79
-
80
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
81
- implemented for all pipelines (downloading, saving, running on a particular device, etc.).
82
-
83
- Args:
84
- vae ([`AutoencoderKL`]):
85
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
86
- text_encoder ([`CLIPTextModel`]):
87
- Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
88
- tokenizer (`CLIPTokenizer`):
89
- A [`~transformers.CLIPTokenizer`] to tokenize text.
90
- unet ([`UNet3DConditionModel`]):
91
- A [`UNet3DConditionModel`] to denoise the encoded video latents.
92
- scheduler ([`SchedulerMixin`]):
93
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
94
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
95
- """
96
-
97
- def __init__(
98
- self,
99
- vae: AutoencoderKL,
100
- text_encoder: CLIPTextModel,
101
- tokenizer: CLIPTokenizer,
102
- unet: UNet3DConditionModel,
103
- scheduler: KarrasDiffusionSchedulers,
104
- ):
105
- super().__init__()
106
-
107
- self.register_modules(
108
- vae=vae,
109
- text_encoder=text_encoder,
110
- tokenizer=tokenizer,
111
- unet=unet,
112
- scheduler=scheduler,
113
- )
114
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
115
-
116
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
117
- def enable_vae_slicing(self):
118
- r"""
119
- Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
120
- compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
121
- """
122
- self.vae.enable_slicing()
123
-
124
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
125
- def disable_vae_slicing(self):
126
- r"""
127
- Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
128
- computing decoding in one step.
129
- """
130
- self.vae.disable_slicing()
131
-
132
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling
133
- def enable_vae_tiling(self):
134
- r"""
135
- Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
136
- compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
137
- processing larger images.
138
- """
139
- self.vae.enable_tiling()
140
-
141
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling
142
- def disable_vae_tiling(self):
143
- r"""
144
- Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
145
- computing decoding in one step.
146
- """
147
- self.vae.disable_tiling()
148
-
149
- def enable_model_cpu_offload(self, gpu_id=0):
150
- r"""
151
- Offload all models to CPU to reduce memory usage with a low impact on performance. Moves one whole model at a
152
- time to the GPU when its `forward` method is called, and the model remains in GPU until the next model runs.
153
- Memory savings are lower than using `enable_sequential_cpu_offload`, but performance is much better due to the
154
- iterative execution of the `unet`.
155
- """
156
- if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
157
- from accelerate import cpu_offload_with_hook
158
- else:
159
- raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
160
-
161
- device = torch.device(f"cuda:{gpu_id}")
162
-
163
- if self.device.type != "cpu":
164
- self.to("cpu", silence_dtype_warnings=True)
165
- torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
166
-
167
- hook = None
168
- for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:
169
- _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
170
-
171
- # We'll offload the last model manually.
172
- self.final_offload_hook = hook
173
-
174
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
175
- def _encode_prompt(
176
- self,
177
- prompt,
178
- device,
179
- num_images_per_prompt,
180
- do_classifier_free_guidance,
181
- negative_prompt=None,
182
- prompt_embeds: Optional[torch.FloatTensor] = None,
183
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
184
- lora_scale: Optional[float] = None,
185
- ):
186
- r"""
187
- Encodes the prompt into text encoder hidden states.
188
-
189
- Args:
190
- prompt (`str` or `List[str]`, *optional*):
191
- prompt to be encoded
192
- device: (`torch.device`):
193
- torch device
194
- num_images_per_prompt (`int`):
195
- number of images that should be generated per prompt
196
- do_classifier_free_guidance (`bool`):
197
- whether to use classifier free guidance or not
198
- negative_prompt (`str` or `List[str]`, *optional*):
199
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
200
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
201
- less than `1`).
202
- prompt_embeds (`torch.FloatTensor`, *optional*):
203
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
204
- provided, text embeddings will be generated from `prompt` input argument.
205
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
206
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
207
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
208
- argument.
209
- lora_scale (`float`, *optional*):
210
- A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
211
- """
212
- # set lora scale so that monkey patched LoRA
213
- # function of text encoder can correctly access it
214
- if lora_scale is not None and isinstance(self, LoraLoaderMixin):
215
- self._lora_scale = lora_scale
216
-
217
- if prompt is not None and isinstance(prompt, str):
218
- batch_size = 1
219
- elif prompt is not None and isinstance(prompt, list):
220
- batch_size = len(prompt)
221
- else:
222
- batch_size = prompt_embeds.shape[0]
223
-
224
- if prompt_embeds is None:
225
- # textual inversion: procecss multi-vector tokens if necessary
226
- if isinstance(self, TextualInversionLoaderMixin):
227
- prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
228
-
229
- text_inputs = self.tokenizer(
230
- prompt,
231
- padding="max_length",
232
- max_length=self.tokenizer.model_max_length,
233
- truncation=True,
234
- return_tensors="pt",
235
- )
236
- text_input_ids = text_inputs.input_ids
237
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
238
-
239
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
240
- text_input_ids, untruncated_ids
241
- ):
242
- removed_text = self.tokenizer.batch_decode(
243
- untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
244
- )
245
- logger.warning(
246
- "The following part of your input was truncated because CLIP can only handle sequences up to"
247
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
248
- )
249
-
250
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
251
- attention_mask = text_inputs.attention_mask.to(device)
252
- else:
253
- attention_mask = None
254
-
255
- prompt_embeds = self.text_encoder(
256
- text_input_ids.to(device),
257
- attention_mask=attention_mask,
258
- )
259
- prompt_embeds = prompt_embeds[0]
260
-
261
- prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
262
-
263
- bs_embed, seq_len, _ = prompt_embeds.shape
264
- # duplicate text embeddings for each generation per prompt, using mps friendly method
265
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
266
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
267
-
268
- # get unconditional embeddings for classifier free guidance
269
- if do_classifier_free_guidance and negative_prompt_embeds is None:
270
- uncond_tokens: List[str]
271
- if negative_prompt is None:
272
- uncond_tokens = [""] * batch_size
273
- elif prompt is not None and type(prompt) is not type(negative_prompt):
274
- raise TypeError(
275
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
276
- f" {type(prompt)}."
277
- )
278
- elif isinstance(negative_prompt, str):
279
- uncond_tokens = [negative_prompt]
280
- elif batch_size != len(negative_prompt):
281
- raise ValueError(
282
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
283
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
284
- " the batch size of `prompt`."
285
- )
286
- else:
287
- uncond_tokens = negative_prompt
288
-
289
- # textual inversion: procecss multi-vector tokens if necessary
290
- if isinstance(self, TextualInversionLoaderMixin):
291
- uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
292
-
293
- max_length = prompt_embeds.shape[1]
294
- uncond_input = self.tokenizer(
295
- uncond_tokens,
296
- padding="max_length",
297
- max_length=max_length,
298
- truncation=True,
299
- return_tensors="pt",
300
- )
301
-
302
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
303
- attention_mask = uncond_input.attention_mask.to(device)
304
- else:
305
- attention_mask = None
306
-
307
- negative_prompt_embeds = self.text_encoder(
308
- uncond_input.input_ids.to(device),
309
- attention_mask=attention_mask,
310
- )
311
- negative_prompt_embeds = negative_prompt_embeds[0]
312
-
313
- if do_classifier_free_guidance:
314
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
315
- seq_len = negative_prompt_embeds.shape[1]
316
-
317
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
318
-
319
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
320
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
321
-
322
- # For classifier free guidance, we need to do two forward passes.
323
- # Here we concatenate the unconditional and text embeddings into a single batch
324
- # to avoid doing two forward passes
325
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
326
-
327
- return prompt_embeds
328
-
329
- def decode_latents(self, latents):
330
- latents = 1 / self.vae.config.scaling_factor * latents
331
-
332
- batch_size, channels, num_frames, height, width = latents.shape
333
- latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width)
334
-
335
- image = self.vae.decode(latents).sample
336
- video = (
337
- image[None, :]
338
- .reshape(
339
- (
340
- batch_size,
341
- num_frames,
342
- -1,
343
- )
344
- + image.shape[2:]
345
- )
346
- .permute(0, 2, 1, 3, 4)
347
- )
348
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
349
- video = video.float()
350
- return video
351
-
352
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
353
- def prepare_extra_step_kwargs(self, generator, eta):
354
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
355
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
356
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
357
- # and should be between [0, 1]
358
-
359
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
360
- extra_step_kwargs = {}
361
- if accepts_eta:
362
- extra_step_kwargs["eta"] = eta
363
-
364
- # check if the scheduler accepts generator
365
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
366
- if accepts_generator:
367
- extra_step_kwargs["generator"] = generator
368
- return extra_step_kwargs
369
-
370
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs
371
- def check_inputs(
372
- self,
373
- prompt,
374
- height,
375
- width,
376
- callback_steps,
377
- negative_prompt=None,
378
- prompt_embeds=None,
379
- negative_prompt_embeds=None,
380
- ):
381
- if height % 8 != 0 or width % 8 != 0:
382
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
383
-
384
- if (callback_steps is None) or (
385
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
386
- ):
387
- raise ValueError(
388
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
389
- f" {type(callback_steps)}."
390
- )
391
-
392
- if prompt is not None and prompt_embeds is not None:
393
- raise ValueError(
394
- f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
395
- " only forward one of the two."
396
- )
397
- elif prompt is None and prompt_embeds is None:
398
- raise ValueError(
399
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
400
- )
401
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
402
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
403
-
404
- if negative_prompt is not None and negative_prompt_embeds is not None:
405
- raise ValueError(
406
- f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
407
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
408
- )
409
-
410
- if prompt_embeds is not None and negative_prompt_embeds is not None:
411
- if prompt_embeds.shape != negative_prompt_embeds.shape:
412
- raise ValueError(
413
- "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
414
- f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
415
- f" {negative_prompt_embeds.shape}."
416
- )
417
-
418
- def prepare_latents(
419
- self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None
420
- ):
421
- shape = (
422
- batch_size,
423
- num_channels_latents,
424
- num_frames,
425
- height // self.vae_scale_factor,
426
- width // self.vae_scale_factor,
427
- )
428
- if isinstance(generator, list) and len(generator) != batch_size:
429
- raise ValueError(
430
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
431
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
432
- )
433
-
434
- if latents is None:
435
- latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
436
- else:
437
- latents = latents.to(device)
438
-
439
- # scale the initial noise by the standard deviation required by the scheduler
440
- latents = latents * self.scheduler.init_noise_sigma
441
- return latents
442
-
443
- @torch.no_grad()
444
- @replace_example_docstring(EXAMPLE_DOC_STRING)
445
- def __call__(
446
- self,
447
- prompt: Union[str, List[str]] = None,
448
- height: Optional[int] = None,
449
- width: Optional[int] = None,
450
- num_frames: int = 16,
451
- num_inference_steps: int = 50,
452
- guidance_scale: float = 9.0,
453
- negative_prompt: Optional[Union[str, List[str]]] = None,
454
- eta: float = 0.0,
455
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
456
- latents: Optional[torch.FloatTensor] = None,
457
- prompt_embeds: Optional[torch.FloatTensor] = None,
458
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
459
- output_type: Optional[str] = "np",
460
- return_dict: bool = True,
461
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
462
- callback_steps: int = 1,
463
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
464
- ):
465
- r"""
466
- The call function to the pipeline for generation.
467
-
468
- Args:
469
- prompt (`str` or `List[str]`, *optional*):
470
- The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
471
- height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
472
- The height in pixels of the generated video.
473
- width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
474
- The width in pixels of the generated video.
475
- num_frames (`int`, *optional*, defaults to 16):
476
- The number of video frames that are generated. Defaults to 16 frames which at 8 frames per seconds
477
- amounts to 2 seconds of video.
478
- num_inference_steps (`int`, *optional*, defaults to 50):
479
- The number of denoising steps. More denoising steps usually lead to a higher quality videos at the
480
- expense of slower inference.
481
- guidance_scale (`float`, *optional*, defaults to 7.5):
482
- A higher guidance scale value encourages the model to generate images closely linked to the text
483
- `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
484
- negative_prompt (`str` or `List[str]`, *optional*):
485
- The prompt or prompts to guide what to not include in image generation. If not defined, you need to
486
- pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
487
- num_images_per_prompt (`int`, *optional*, defaults to 1):
488
- The number of images to generate per prompt.
489
- eta (`float`, *optional*, defaults to 0.0):
490
- Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
491
- to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
492
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
493
- A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
494
- generation deterministic.
495
- latents (`torch.FloatTensor`, *optional*):
496
- Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video
497
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
498
- tensor is generated by sampling using the supplied random `generator`. Latents should be of shape
499
- `(batch_size, num_channel, num_frames, height, width)`.
500
- prompt_embeds (`torch.FloatTensor`, *optional*):
501
- Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
502
- provided, text embeddings are generated from the `prompt` input argument.
503
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
504
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
505
- not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
506
- output_type (`str`, *optional*, defaults to `"np"`):
507
- The output format of the generated video. Choose between `torch.FloatTensor` or `np.array`.
508
- return_dict (`bool`, *optional*, defaults to `True`):
509
- Whether or not to return a [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] instead
510
- of a plain tuple.
511
- callback (`Callable`, *optional*):
512
- A function that calls every `callback_steps` steps during inference. The function is called with the
513
- following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
514
- callback_steps (`int`, *optional*, defaults to 1):
515
- The frequency at which the `callback` function is called. If not specified, the callback is called at
516
- every step.
517
- cross_attention_kwargs (`dict`, *optional*):
518
- A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
519
- [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
520
-
521
- Examples:
522
-
523
- Returns:
524
- [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] or `tuple`:
525
- If `return_dict` is `True`, [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] is
526
- returned, otherwise a `tuple` is returned where the first element is a list with the generated frames.
527
- """
528
- # 0. Default height and width to unet
529
- height = height or self.unet.config.sample_size * self.vae_scale_factor
530
- width = width or self.unet.config.sample_size * self.vae_scale_factor
531
-
532
- num_images_per_prompt = 1
533
-
534
- # 1. Check inputs. Raise error if not correct
535
- self.check_inputs(
536
- prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds
537
- )
538
-
539
- # 2. Define call parameters
540
- if prompt is not None and isinstance(prompt, str):
541
- batch_size = 1
542
- elif prompt is not None and isinstance(prompt, list):
543
- batch_size = len(prompt)
544
- else:
545
- batch_size = prompt_embeds.shape[0]
546
-
547
- device = self._execution_device
548
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
549
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
550
- # corresponds to doing no classifier free guidance.
551
- do_classifier_free_guidance = guidance_scale > 1.0
552
-
553
- # 3. Encode input prompt
554
- text_encoder_lora_scale = (
555
- cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
556
- )
557
- prompt_embeds = self._encode_prompt(
558
- prompt,
559
- device,
560
- num_images_per_prompt,
561
- do_classifier_free_guidance,
562
- negative_prompt,
563
- prompt_embeds=prompt_embeds,
564
- negative_prompt_embeds=negative_prompt_embeds,
565
- lora_scale=text_encoder_lora_scale,
566
- )
567
-
568
- # 4. Prepare timesteps
569
- self.scheduler.set_timesteps(num_inference_steps, device=device)
570
- timesteps = self.scheduler.timesteps
571
-
572
- # 5. Prepare latent variables
573
- num_channels_latents = self.unet.config.in_channels
574
- latents = self.prepare_latents(
575
- batch_size * num_images_per_prompt,
576
- num_channels_latents,
577
- num_frames,
578
- height,
579
- width,
580
- prompt_embeds.dtype,
581
- device,
582
- generator,
583
- latents,
584
- )
585
-
586
- # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
587
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
588
-
589
- # 7. Denoising loop
590
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
591
- with self.progress_bar(total=num_inference_steps) as progress_bar:
592
- for i, t in enumerate(timesteps):
593
- # expand the latents if we are doing classifier free guidance
594
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
595
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
596
-
597
- # predict the noise residual
598
- noise_pred = self.unet(
599
- latent_model_input,
600
- t,
601
- encoder_hidden_states=prompt_embeds,
602
- cross_attention_kwargs=cross_attention_kwargs,
603
- return_dict=False,
604
- )[0]
605
-
606
- # perform guidance
607
- if do_classifier_free_guidance:
608
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
609
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
610
-
611
- # reshape latents
612
- bsz, channel, frames, width, height = latents.shape
613
- latents = latents.permute(0, 2, 1, 3, 4).reshape(bsz * frames, channel, width, height)
614
- noise_pred = noise_pred.permute(0, 2, 1, 3, 4).reshape(bsz * frames, channel, width, height)
615
-
616
- # compute the previous noisy sample x_t -> x_t-1
617
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
618
-
619
- # reshape latents back
620
- latents = latents[None, :].reshape(bsz, frames, channel, width, height).permute(0, 2, 1, 3, 4)
621
-
622
- # call the callback, if provided
623
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
624
- progress_bar.update()
625
- if callback is not None and i % callback_steps == 0:
626
- callback(i, t, latents)
627
-
628
- if output_type == "latent":
629
- return TextToVideoSDPipelineOutput(frames=latents)
630
-
631
- video_tensor = self.decode_latents(latents)
632
-
633
- if output_type == "pt":
634
- video = video_tensor
635
- else:
636
- video = tensor2vid(video_tensor)
637
-
638
- # Offload last model to CPU
639
- if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
640
- self.final_offload_hook.offload()
641
-
642
- if not return_dict:
643
- return (video,)
644
-
645
- return TextToVideoSDPipelineOutput(frames=video)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/torch_utils.py DELETED
@@ -1,88 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- """
15
- PyTorch utilities: Utilities related to PyTorch
16
- """
17
- from typing import List, Optional, Tuple, Union
18
-
19
- from . import logging
20
- from .import_utils import is_torch_available, is_torch_version
21
-
22
-
23
- if is_torch_available():
24
- import torch
25
-
26
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
27
-
28
- try:
29
- from torch._dynamo import allow_in_graph as maybe_allow_in_graph
30
- except (ImportError, ModuleNotFoundError):
31
-
32
- def maybe_allow_in_graph(cls):
33
- return cls
34
-
35
-
36
- def randn_tensor(
37
- shape: Union[Tuple, List],
38
- generator: Optional[Union[List["torch.Generator"], "torch.Generator"]] = None,
39
- device: Optional["torch.device"] = None,
40
- dtype: Optional["torch.dtype"] = None,
41
- layout: Optional["torch.layout"] = None,
42
- ):
43
- """A helper function to create random tensors on the desired `device` with the desired `dtype`. When
44
- passing a list of generators, you can seed each batch size individually. If CPU generators are passed, the tensor
45
- is always created on the CPU.
46
- """
47
- # device on which tensor is created defaults to device
48
- rand_device = device
49
- batch_size = shape[0]
50
-
51
- layout = layout or torch.strided
52
- device = device or torch.device("cpu")
53
-
54
- if generator is not None:
55
- gen_device_type = generator.device.type if not isinstance(generator, list) else generator[0].device.type
56
- if gen_device_type != device.type and gen_device_type == "cpu":
57
- rand_device = "cpu"
58
- if device != "mps":
59
- logger.info(
60
- f"The passed generator was created on 'cpu' even though a tensor on {device} was expected."
61
- f" Tensors will be created on 'cpu' and then moved to {device}. Note that one can probably"
62
- f" slighly speed up this function by passing a generator that was created on the {device} device."
63
- )
64
- elif gen_device_type != device.type and gen_device_type == "cuda":
65
- raise ValueError(f"Cannot generate a {device} tensor from a generator of type {gen_device_type}.")
66
-
67
- # make sure generator list of length 1 is treated like a non-list
68
- if isinstance(generator, list) and len(generator) == 1:
69
- generator = generator[0]
70
-
71
- if isinstance(generator, list):
72
- shape = (1,) + shape[1:]
73
- latents = [
74
- torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype, layout=layout)
75
- for i in range(batch_size)
76
- ]
77
- latents = torch.cat(latents, dim=0).to(device)
78
- else:
79
- latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype, layout=layout).to(device)
80
-
81
- return latents
82
-
83
-
84
- def is_compiled_module(module):
85
- """Check whether the module was compiled with torch.compile()"""
86
- if is_torch_version("<", "2.0.0") or not hasattr(torch, "_dynamo"):
87
- return False
88
- return isinstance(module, torch._dynamo.eval_frame.OptimizedModule)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/others/test_config.py DELETED
@@ -1,288 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 HuggingFace Inc.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import tempfile
17
- import unittest
18
-
19
- from diffusers import (
20
- DDIMScheduler,
21
- DDPMScheduler,
22
- DPMSolverMultistepScheduler,
23
- EulerAncestralDiscreteScheduler,
24
- EulerDiscreteScheduler,
25
- PNDMScheduler,
26
- logging,
27
- )
28
- from diffusers.configuration_utils import ConfigMixin, register_to_config
29
- from diffusers.utils.testing_utils import CaptureLogger
30
-
31
-
32
- class SampleObject(ConfigMixin):
33
- config_name = "config.json"
34
-
35
- @register_to_config
36
- def __init__(
37
- self,
38
- a=2,
39
- b=5,
40
- c=(2, 5),
41
- d="for diffusion",
42
- e=[1, 3],
43
- ):
44
- pass
45
-
46
-
47
- class SampleObject2(ConfigMixin):
48
- config_name = "config.json"
49
-
50
- @register_to_config
51
- def __init__(
52
- self,
53
- a=2,
54
- b=5,
55
- c=(2, 5),
56
- d="for diffusion",
57
- f=[1, 3],
58
- ):
59
- pass
60
-
61
-
62
- class SampleObject3(ConfigMixin):
63
- config_name = "config.json"
64
-
65
- @register_to_config
66
- def __init__(
67
- self,
68
- a=2,
69
- b=5,
70
- c=(2, 5),
71
- d="for diffusion",
72
- e=[1, 3],
73
- f=[1, 3],
74
- ):
75
- pass
76
-
77
-
78
- class SampleObject4(ConfigMixin):
79
- config_name = "config.json"
80
-
81
- @register_to_config
82
- def __init__(
83
- self,
84
- a=2,
85
- b=5,
86
- c=(2, 5),
87
- d="for diffusion",
88
- e=[1, 5],
89
- f=[5, 4],
90
- ):
91
- pass
92
-
93
-
94
- class ConfigTester(unittest.TestCase):
95
- def test_load_not_from_mixin(self):
96
- with self.assertRaises(ValueError):
97
- ConfigMixin.load_config("dummy_path")
98
-
99
- def test_register_to_config(self):
100
- obj = SampleObject()
101
- config = obj.config
102
- assert config["a"] == 2
103
- assert config["b"] == 5
104
- assert config["c"] == (2, 5)
105
- assert config["d"] == "for diffusion"
106
- assert config["e"] == [1, 3]
107
-
108
- # init ignore private arguments
109
- obj = SampleObject(_name_or_path="lalala")
110
- config = obj.config
111
- assert config["a"] == 2
112
- assert config["b"] == 5
113
- assert config["c"] == (2, 5)
114
- assert config["d"] == "for diffusion"
115
- assert config["e"] == [1, 3]
116
-
117
- # can override default
118
- obj = SampleObject(c=6)
119
- config = obj.config
120
- assert config["a"] == 2
121
- assert config["b"] == 5
122
- assert config["c"] == 6
123
- assert config["d"] == "for diffusion"
124
- assert config["e"] == [1, 3]
125
-
126
- # can use positional arguments.
127
- obj = SampleObject(1, c=6)
128
- config = obj.config
129
- assert config["a"] == 1
130
- assert config["b"] == 5
131
- assert config["c"] == 6
132
- assert config["d"] == "for diffusion"
133
- assert config["e"] == [1, 3]
134
-
135
- def test_save_load(self):
136
- obj = SampleObject()
137
- config = obj.config
138
-
139
- assert config["a"] == 2
140
- assert config["b"] == 5
141
- assert config["c"] == (2, 5)
142
- assert config["d"] == "for diffusion"
143
- assert config["e"] == [1, 3]
144
-
145
- with tempfile.TemporaryDirectory() as tmpdirname:
146
- obj.save_config(tmpdirname)
147
- new_obj = SampleObject.from_config(SampleObject.load_config(tmpdirname))
148
- new_config = new_obj.config
149
-
150
- # unfreeze configs
151
- config = dict(config)
152
- new_config = dict(new_config)
153
-
154
- assert config.pop("c") == (2, 5) # instantiated as tuple
155
- assert new_config.pop("c") == [2, 5] # saved & loaded as list because of json
156
- config.pop("_use_default_values")
157
- assert config == new_config
158
-
159
- def test_load_ddim_from_pndm(self):
160
- logger = logging.get_logger("diffusers.configuration_utils")
161
- # 30 for warning
162
- logger.setLevel(30)
163
-
164
- with CaptureLogger(logger) as cap_logger:
165
- ddim = DDIMScheduler.from_pretrained(
166
- "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler"
167
- )
168
-
169
- assert ddim.__class__ == DDIMScheduler
170
- # no warning should be thrown
171
- assert cap_logger.out == ""
172
-
173
- def test_load_euler_from_pndm(self):
174
- logger = logging.get_logger("diffusers.configuration_utils")
175
- # 30 for warning
176
- logger.setLevel(30)
177
-
178
- with CaptureLogger(logger) as cap_logger:
179
- euler = EulerDiscreteScheduler.from_pretrained(
180
- "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler"
181
- )
182
-
183
- assert euler.__class__ == EulerDiscreteScheduler
184
- # no warning should be thrown
185
- assert cap_logger.out == ""
186
-
187
- def test_load_euler_ancestral_from_pndm(self):
188
- logger = logging.get_logger("diffusers.configuration_utils")
189
- # 30 for warning
190
- logger.setLevel(30)
191
-
192
- with CaptureLogger(logger) as cap_logger:
193
- euler = EulerAncestralDiscreteScheduler.from_pretrained(
194
- "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler"
195
- )
196
-
197
- assert euler.__class__ == EulerAncestralDiscreteScheduler
198
- # no warning should be thrown
199
- assert cap_logger.out == ""
200
-
201
- def test_load_pndm(self):
202
- logger = logging.get_logger("diffusers.configuration_utils")
203
- # 30 for warning
204
- logger.setLevel(30)
205
-
206
- with CaptureLogger(logger) as cap_logger:
207
- pndm = PNDMScheduler.from_pretrained(
208
- "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler"
209
- )
210
-
211
- assert pndm.__class__ == PNDMScheduler
212
- # no warning should be thrown
213
- assert cap_logger.out == ""
214
-
215
- def test_overwrite_config_on_load(self):
216
- logger = logging.get_logger("diffusers.configuration_utils")
217
- # 30 for warning
218
- logger.setLevel(30)
219
-
220
- with CaptureLogger(logger) as cap_logger:
221
- ddpm = DDPMScheduler.from_pretrained(
222
- "hf-internal-testing/tiny-stable-diffusion-torch",
223
- subfolder="scheduler",
224
- prediction_type="sample",
225
- beta_end=8,
226
- )
227
-
228
- with CaptureLogger(logger) as cap_logger_2:
229
- ddpm_2 = DDPMScheduler.from_pretrained("google/ddpm-celebahq-256", beta_start=88)
230
-
231
- assert ddpm.__class__ == DDPMScheduler
232
- assert ddpm.config.prediction_type == "sample"
233
- assert ddpm.config.beta_end == 8
234
- assert ddpm_2.config.beta_start == 88
235
-
236
- # no warning should be thrown
237
- assert cap_logger.out == ""
238
- assert cap_logger_2.out == ""
239
-
240
- def test_load_dpmsolver(self):
241
- logger = logging.get_logger("diffusers.configuration_utils")
242
- # 30 for warning
243
- logger.setLevel(30)
244
-
245
- with CaptureLogger(logger) as cap_logger:
246
- dpm = DPMSolverMultistepScheduler.from_pretrained(
247
- "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler"
248
- )
249
-
250
- assert dpm.__class__ == DPMSolverMultistepScheduler
251
- # no warning should be thrown
252
- assert cap_logger.out == ""
253
-
254
- def test_use_default_values(self):
255
- # let's first save a config that should be in the form
256
- # a=2,
257
- # b=5,
258
- # c=(2, 5),
259
- # d="for diffusion",
260
- # e=[1, 3],
261
-
262
- config = SampleObject()
263
-
264
- config_dict = {k: v for k, v in config.config.items() if not k.startswith("_")}
265
-
266
- # make sure that default config has all keys in `_use_default_values`
267
- assert set(config_dict.keys()) == set(config.config._use_default_values)
268
-
269
- with tempfile.TemporaryDirectory() as tmpdirname:
270
- config.save_config(tmpdirname)
271
-
272
- # now loading it with SampleObject2 should put f into `_use_default_values`
273
- config = SampleObject2.from_config(tmpdirname)
274
-
275
- assert "f" in config._use_default_values
276
- assert config.f == [1, 3]
277
-
278
- # now loading the config, should **NOT** use [1, 3] for `f`, but the default [1, 4] value
279
- # **BECAUSE** it is part of `config._use_default_values`
280
- new_config = SampleObject4.from_config(config.config)
281
- assert new_config.f == [5, 4]
282
-
283
- config.config._use_default_values.pop()
284
- new_config_2 = SampleObject4.from_config(config.config)
285
- assert new_config_2.f == [1, 3]
286
-
287
- # Nevertheless "e" should still be correctly loaded to [1, 3] from SampleObject2 instead of defaulting to [1, 5]
288
- assert new_config_2.e == [1, 3]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/fpg/mask_rcnn_r50_fpg-chn128_crop640_50e_coco.py DELETED
@@ -1,10 +0,0 @@
1
- _base_ = 'mask_rcnn_r50_fpg_crop640_50e_coco.py'
2
-
3
- model = dict(
4
- neck=dict(out_channels=128, inter_channels=128),
5
- rpn_head=dict(in_channels=128),
6
- roi_head=dict(
7
- bbox_roi_extractor=dict(out_channels=128),
8
- bbox_head=dict(in_channels=128),
9
- mask_roi_extractor=dict(out_channels=128),
10
- mask_head=dict(in_channels=128)))
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_1x_coco.py DELETED
@@ -1,4 +0,0 @@
1
- _base_ = '../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py'
2
- model = dict(
3
- backbone=dict(
4
- norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False))
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/README.md DELETED
@@ -1,69 +0,0 @@
1
- # Pyramid Scene Parsing Network
2
-
3
- ## Introduction
4
-
5
- <!-- [ALGORITHM] -->
6
-
7
- ```latex
8
- @inproceedings{zhao2017pspnet,
9
- title={Pyramid Scene Parsing Network},
10
- author={Zhao, Hengshuang and Shi, Jianping and Qi, Xiaojuan and Wang, Xiaogang and Jia, Jiaya},
11
- booktitle={CVPR},
12
- year={2017}
13
- }
14
- ```
15
-
16
- ## Results and models
17
-
18
- ### Cityscapes
19
-
20
- | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
21
- | ------ | --------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ---------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
22
- | PSPNet | R-50-D8 | 512x1024 | 40000 | 6.1 | 4.07 | 77.85 | 79.18 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338.log.json) |
23
- | PSPNet | R-101-D8 | 512x1024 | 40000 | 9.6 | 2.68 | 78.34 | 79.74 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x1024_40k_cityscapes/pspnet_r101-d8_512x1024_40k_cityscapes_20200604_232751-467e7cf4.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x1024_40k_cityscapes/pspnet_r101-d8_512x1024_40k_cityscapes_20200604_232751.log.json) |
24
- | PSPNet | R-50-D8 | 769x769 | 40000 | 6.9 | 1.76 | 78.26 | 79.88 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r50-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_769x769_40k_cityscapes/pspnet_r50-d8_769x769_40k_cityscapes_20200606_112725-86638686.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_769x769_40k_cityscapes/pspnet_r50-d8_769x769_40k_cityscapes_20200606_112725.log.json) |
25
- | PSPNet | R-101-D8 | 769x769 | 40000 | 10.9 | 1.15 | 79.08 | 80.28 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_769x769_40k_cityscapes/pspnet_r101-d8_769x769_40k_cityscapes_20200606_112753-61c6f5be.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_769x769_40k_cityscapes/pspnet_r101-d8_769x769_40k_cityscapes_20200606_112753.log.json) |
26
- | PSPNet | R-18-D8 | 512x1024 | 80000 | 1.7 | 15.71 | 74.87 | 76.04 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r18-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18-d8_512x1024_80k_cityscapes/pspnet_r18-d8_512x1024_80k_cityscapes_20201225_021458-09ffa746.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18-d8_512x1024_80k_cityscapes/pspnet_r18-d8_512x1024_80k_cityscapes-20201225_021458.log.json) |
27
- | PSPNet | R-50-D8 | 512x1024 | 80000 | - | - | 78.55 | 79.79 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes/pspnet_r50-d8_512x1024_80k_cityscapes_20200606_112131-2376f12b.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes/pspnet_r50-d8_512x1024_80k_cityscapes_20200606_112131.log.json) |
28
- | PSPNet | R-101-D8 | 512x1024 | 80000 | - | - | 79.76 | 81.01 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes/pspnet_r101-d8_512x1024_80k_cityscapes_20200606_112211-e1e1100f.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes/pspnet_r101-d8_512x1024_80k_cityscapes_20200606_112211.log.json) |
29
- | PSPNet | R-18-D8 | 769x769 | 80000 | 1.9 | 6.20 | 75.90 | 77.86 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r18-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18-d8_769x769_80k_cityscapes/pspnet_r18-d8_769x769_80k_cityscapes_20201225_021458-3deefc62.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18-d8_769x769_80k_cityscapes/pspnet_r18-d8_769x769_80k_cityscapes-20201225_021458.log.json) |
30
- | PSPNet | R-50-D8 | 769x769 | 80000 | - | - | 79.59 | 80.69 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r50-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_769x769_80k_cityscapes/pspnet_r50-d8_769x769_80k_cityscapes_20200606_210121-5ccf03dd.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_769x769_80k_cityscapes/pspnet_r50-d8_769x769_80k_cityscapes_20200606_210121.log.json) |
31
- | PSPNet | R-101-D8 | 769x769 | 80000 | - | - | 79.77 | 81.06 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_769x769_80k_cityscapes/pspnet_r101-d8_769x769_80k_cityscapes_20200606_225055-dba412fa.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_769x769_80k_cityscapes/pspnet_r101-d8_769x769_80k_cityscapes_20200606_225055.log.json) |
32
- | PSPNet | R-18b-D8 | 512x1024 | 80000 | 1.5 | 16.28 | 74.23 | 75.79 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r18b-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18b-d8_512x1024_80k_cityscapes/pspnet_r18b-d8_512x1024_80k_cityscapes_20201226_063116-26928a60.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18b-d8_512x1024_80k_cityscapes/pspnet_r18b-d8_512x1024_80k_cityscapes-20201226_063116.log.json) |
33
- | PSPNet | R-50b-D8 | 512x1024 | 80000 | 6.0 | 4.30 | 78.22 | 79.46 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r50b-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50b-d8_512x1024_80k_cityscapes/pspnet_r50b-d8_512x1024_80k_cityscapes_20201225_094315-6344287a.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50b-d8_512x1024_80k_cityscapes/pspnet_r50b-d8_512x1024_80k_cityscapes-20201225_094315.log.json) |
34
- | PSPNet | R-101b-D8 | 512x1024 | 80000 | 9.5 | 2.76 | 79.69 | 80.79 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101b-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101b-d8_512x1024_80k_cityscapes/pspnet_r101b-d8_512x1024_80k_cityscapes_20201226_170012-3a4d38ab.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101b-d8_512x1024_80k_cityscapes/pspnet_r101b-d8_512x1024_80k_cityscapes-20201226_170012.log.json) |
35
- | PSPNet | R-18b-D8 | 769x769 | 80000 | 1.7 | 6.41 | 74.92 | 76.90 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r18b-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18b-d8_769x769_80k_cityscapes/pspnet_r18b-d8_769x769_80k_cityscapes_20201226_080942-bf98d186.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18b-d8_769x769_80k_cityscapes/pspnet_r18b-d8_769x769_80k_cityscapes-20201226_080942.log.json) |
36
- | PSPNet | R-50b-D8 | 769x769 | 80000 | 6.8 | 1.88 | 78.50 | 79.96 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r50b-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50b-d8_769x769_80k_cityscapes/pspnet_r50b-d8_769x769_80k_cityscapes_20201225_094316-4c643cf6.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50b-d8_769x769_80k_cityscapes/pspnet_r50b-d8_769x769_80k_cityscapes-20201225_094316.log.json) |
37
- | PSPNet | R-101b-D8 | 769x769 | 80000 | 10.8 | 1.17 | 78.87 | 80.04 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101b-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101b-d8_769x769_80k_cityscapes/pspnet_r101b-d8_769x769_80k_cityscapes_20201226_171823-f0e7c293.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101b-d8_769x769_80k_cityscapes/pspnet_r101b-d8_769x769_80k_cityscapes-20201226_171823.log.json) |
38
-
39
- ### ADE20K
40
-
41
- | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
42
- | ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
43
- | PSPNet | R-50-D8 | 512x512 | 80000 | 8.5 | 23.53 | 41.13 | 41.94 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r50-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_80k_ade20k/pspnet_r50-d8_512x512_80k_ade20k_20200615_014128-15a8b914.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_80k_ade20k/pspnet_r50-d8_512x512_80k_ade20k_20200615_014128.log.json) |
44
- | PSPNet | R-101-D8 | 512x512 | 80000 | 12 | 15.30 | 43.57 | 44.35 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_80k_ade20k/pspnet_r101-d8_512x512_80k_ade20k_20200614_031423-b6e782f0.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_80k_ade20k/pspnet_r101-d8_512x512_80k_ade20k_20200614_031423.log.json) |
45
- | PSPNet | R-50-D8 | 512x512 | 160000 | - | - | 42.48 | 43.44 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r50-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_160k_ade20k/pspnet_r50-d8_512x512_160k_ade20k_20200615_184358-1890b0bd.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_160k_ade20k/pspnet_r50-d8_512x512_160k_ade20k_20200615_184358.log.json) |
46
- | PSPNet | R-101-D8 | 512x512 | 160000 | - | - | 44.39 | 45.35 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_160k_ade20k/pspnet_r101-d8_512x512_160k_ade20k_20200615_100650-967c316f.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_160k_ade20k/pspnet_r101-d8_512x512_160k_ade20k_20200615_100650.log.json) |
47
-
48
- ### Pascal VOC 2012 + Aug
49
-
50
- | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
51
- | ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
52
- | PSPNet | R-50-D8 | 512x512 | 20000 | 6.1 | 23.59 | 76.78 | 77.61 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r50-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_20k_voc12aug/pspnet_r50-d8_512x512_20k_voc12aug_20200617_101958-ed5dfbd9.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_20k_voc12aug/pspnet_r50-d8_512x512_20k_voc12aug_20200617_101958.log.json) |
53
- | PSPNet | R-101-D8 | 512x512 | 20000 | 9.6 | 15.02 | 78.47 | 79.25 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_20k_voc12aug/pspnet_r101-d8_512x512_20k_voc12aug_20200617_102003-4aef3c9a.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_20k_voc12aug/pspnet_r101-d8_512x512_20k_voc12aug_20200617_102003.log.json) |
54
- | PSPNet | R-50-D8 | 512x512 | 40000 | - | - | 77.29 | 78.48 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r50-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_40k_voc12aug/pspnet_r50-d8_512x512_40k_voc12aug_20200613_161222-ae9c1b8c.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_40k_voc12aug/pspnet_r50-d8_512x512_40k_voc12aug_20200613_161222.log.json) |
55
- | PSPNet | R-101-D8 | 512x512 | 40000 | - | - | 78.52 | 79.57 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_40k_voc12aug/pspnet_r101-d8_512x512_40k_voc12aug_20200613_161222-bc933b18.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_40k_voc12aug/pspnet_r101-d8_512x512_40k_voc12aug_20200613_161222.log.json) |
56
-
57
- ### Pascal Context
58
-
59
- | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
60
- | ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
61
- | PSPNet | R-101-D8 | 480x480 | 40000 | 8.8 | 9.68 | 46.60 | 47.78 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_480x480_40k_pascal_context.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_480x480_40k_pascal_context/pspnet_r101-d8_480x480_40k_pascal_context_20200911_211210-bf0f5d7c.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_480x480_40k_pascal_context/pspnet_r101-d8_480x480_40k_pascal_context-20200911_211210.log.json) |
62
- | PSPNet | R-101-D8 | 480x480 | 80000 | - | - | 46.03 | 47.15 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_480x480_80k_pascal_context.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_480x480_80k_pascal_context/pspnet_r101-d8_480x480_80k_pascal_context_20200911_190530-c86d6233.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_480x480_80k_pascal_context/pspnet_r101-d8_480x480_80k_pascal_context-20200911_190530.log.json) |
63
-
64
- ### Pascal Context 59
65
-
66
- | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
67
- | ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
68
- | PSPNet | R-101-D8 | 480x480 | 40000 | - | - | 52.02 | 53.54 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_480x480_40k_pascal_context_59.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_480x480_40k_pascal_context_59/pspnet_r101-d8_480x480_40k_pascal_context_59_20210416_114524-86d44cd4.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_480x480_40k_pascal_context_59/pspnet_r101-d8_480x480_40k_pascal_context_59-20210416_114524.log.json) |
69
- | PSPNet | R-101-D8 | 480x480 | 80000 | - | - | 52.47 | 53.99 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_480x480_80k_pascal_context_59.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_480x480_80k_pascal_context_59/pspnet_r101-d8_480x480_80k_pascal_context_59_20210416_114418-fa6caaa2.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_480x480_80k_pascal_context_59/pspnet_r101-d8_480x480_80k_pascal_context_59-20210416_114418.log.json) |
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/ldm/models/diffusion/ddpm.py DELETED
@@ -1,1797 +0,0 @@
1
- """
2
- wild mixture of
3
- https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
4
- https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
5
- https://github.com/CompVis/taming-transformers
6
- -- merci
7
- """
8
-
9
- import torch
10
- import torch.nn as nn
11
- import numpy as np
12
- import pytorch_lightning as pl
13
- from torch.optim.lr_scheduler import LambdaLR
14
- from einops import rearrange, repeat
15
- from contextlib import contextmanager, nullcontext
16
- from functools import partial
17
- import itertools
18
- from tqdm import tqdm
19
- from torchvision.utils import make_grid
20
- from pytorch_lightning.utilities.distributed import rank_zero_only
21
- from omegaconf import ListConfig
22
-
23
- from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
24
- from ldm.modules.ema import LitEma
25
- from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
26
- from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL
27
- from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
28
- from ldm.models.diffusion.ddim import DDIMSampler
29
-
30
-
31
- __conditioning_keys__ = {'concat': 'c_concat',
32
- 'crossattn': 'c_crossattn',
33
- 'adm': 'y'}
34
-
35
-
36
- def disabled_train(self, mode=True):
37
- """Overwrite model.train with this function to make sure train/eval mode
38
- does not change anymore."""
39
- return self
40
-
41
-
42
- def uniform_on_device(r1, r2, shape, device):
43
- return (r1 - r2) * torch.rand(*shape, device=device) + r2
44
-
45
-
46
- class DDPM(pl.LightningModule):
47
- # classic DDPM with Gaussian diffusion, in image space
48
- def __init__(self,
49
- unet_config,
50
- timesteps=1000,
51
- beta_schedule="linear",
52
- loss_type="l2",
53
- ckpt_path=None,
54
- ignore_keys=[],
55
- load_only_unet=False,
56
- monitor="val/loss",
57
- use_ema=True,
58
- first_stage_key="image",
59
- image_size=256,
60
- channels=3,
61
- log_every_t=100,
62
- clip_denoised=True,
63
- linear_start=1e-4,
64
- linear_end=2e-2,
65
- cosine_s=8e-3,
66
- given_betas=None,
67
- original_elbo_weight=0.,
68
- v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
69
- l_simple_weight=1.,
70
- conditioning_key=None,
71
- parameterization="eps", # all assuming fixed variance schedules
72
- scheduler_config=None,
73
- use_positional_encodings=False,
74
- learn_logvar=False,
75
- logvar_init=0.,
76
- make_it_fit=False,
77
- ucg_training=None,
78
- reset_ema=False,
79
- reset_num_ema_updates=False,
80
- ):
81
- super().__init__()
82
- assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"'
83
- self.parameterization = parameterization
84
- print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
85
- self.cond_stage_model = None
86
- self.clip_denoised = clip_denoised
87
- self.log_every_t = log_every_t
88
- self.first_stage_key = first_stage_key
89
- self.image_size = image_size # try conv?
90
- self.channels = channels
91
- self.use_positional_encodings = use_positional_encodings
92
- self.model = DiffusionWrapper(unet_config, conditioning_key)
93
- count_params(self.model, verbose=True)
94
- self.use_ema = use_ema
95
- if self.use_ema:
96
- self.model_ema = LitEma(self.model)
97
- print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
98
-
99
- self.use_scheduler = scheduler_config is not None
100
- if self.use_scheduler:
101
- self.scheduler_config = scheduler_config
102
-
103
- self.v_posterior = v_posterior
104
- self.original_elbo_weight = original_elbo_weight
105
- self.l_simple_weight = l_simple_weight
106
-
107
- if monitor is not None:
108
- self.monitor = monitor
109
- self.make_it_fit = make_it_fit
110
- if reset_ema: assert exists(ckpt_path)
111
- if ckpt_path is not None:
112
- self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet)
113
- if reset_ema:
114
- assert self.use_ema
115
- print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.")
116
- self.model_ema = LitEma(self.model)
117
- if reset_num_ema_updates:
118
- print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ")
119
- assert self.use_ema
120
- self.model_ema.reset_num_updates()
121
-
122
- self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps,
123
- linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s)
124
-
125
- self.loss_type = loss_type
126
-
127
- self.learn_logvar = learn_logvar
128
- logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,))
129
- if self.learn_logvar:
130
- self.logvar = nn.Parameter(self.logvar, requires_grad=True)
131
- else:
132
- self.register_buffer('logvar', logvar)
133
-
134
- self.ucg_training = ucg_training or dict()
135
- if self.ucg_training:
136
- self.ucg_prng = np.random.RandomState()
137
-
138
- def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000,
139
- linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
140
- if exists(given_betas):
141
- betas = given_betas
142
- else:
143
- betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end,
144
- cosine_s=cosine_s)
145
- alphas = 1. - betas
146
- alphas_cumprod = np.cumprod(alphas, axis=0)
147
- alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
148
-
149
- timesteps, = betas.shape
150
- self.num_timesteps = int(timesteps)
151
- self.linear_start = linear_start
152
- self.linear_end = linear_end
153
- assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep'
154
-
155
- to_torch = partial(torch.tensor, dtype=torch.float32)
156
-
157
- self.register_buffer('betas', to_torch(betas))
158
- self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
159
- self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev))
160
-
161
- # calculations for diffusion q(x_t | x_{t-1}) and others
162
- self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
163
- self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
164
- self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod)))
165
- self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))
166
- self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))
167
-
168
- # calculations for posterior q(x_{t-1} | x_t, x_0)
169
- posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / (
170
- 1. - alphas_cumprod) + self.v_posterior * betas
171
- # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
172
- self.register_buffer('posterior_variance', to_torch(posterior_variance))
173
- # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
174
- self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20))))
175
- self.register_buffer('posterior_mean_coef1', to_torch(
176
- betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)))
177
- self.register_buffer('posterior_mean_coef2', to_torch(
178
- (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod)))
179
-
180
- if self.parameterization == "eps":
181
- lvlb_weights = self.betas ** 2 / (
182
- 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))
183
- elif self.parameterization == "x0":
184
- lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod))
185
- elif self.parameterization == "v":
186
- lvlb_weights = torch.ones_like(self.betas ** 2 / (
187
- 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)))
188
- else:
189
- raise NotImplementedError("mu not supported")
190
- lvlb_weights[0] = lvlb_weights[1]
191
- self.register_buffer('lvlb_weights', lvlb_weights, persistent=False)
192
- assert not torch.isnan(self.lvlb_weights).all()
193
-
194
- @contextmanager
195
- def ema_scope(self, context=None):
196
- if self.use_ema:
197
- self.model_ema.store(self.model.parameters())
198
- self.model_ema.copy_to(self.model)
199
- if context is not None:
200
- print(f"{context}: Switched to EMA weights")
201
- try:
202
- yield None
203
- finally:
204
- if self.use_ema:
205
- self.model_ema.restore(self.model.parameters())
206
- if context is not None:
207
- print(f"{context}: Restored training weights")
208
-
209
- @torch.no_grad()
210
- def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
211
- sd = torch.load(path, map_location="cpu")
212
- if "state_dict" in list(sd.keys()):
213
- sd = sd["state_dict"]
214
- keys = list(sd.keys())
215
- for k in keys:
216
- for ik in ignore_keys:
217
- if k.startswith(ik):
218
- print("Deleting key {} from state_dict.".format(k))
219
- del sd[k]
220
- if self.make_it_fit:
221
- n_params = len([name for name, _ in
222
- itertools.chain(self.named_parameters(),
223
- self.named_buffers())])
224
- for name, param in tqdm(
225
- itertools.chain(self.named_parameters(),
226
- self.named_buffers()),
227
- desc="Fitting old weights to new weights",
228
- total=n_params
229
- ):
230
- if not name in sd:
231
- continue
232
- old_shape = sd[name].shape
233
- new_shape = param.shape
234
- assert len(old_shape) == len(new_shape)
235
- if len(new_shape) > 2:
236
- # we only modify first two axes
237
- assert new_shape[2:] == old_shape[2:]
238
- # assumes first axis corresponds to output dim
239
- if not new_shape == old_shape:
240
- new_param = param.clone()
241
- old_param = sd[name]
242
- if len(new_shape) == 1:
243
- for i in range(new_param.shape[0]):
244
- new_param[i] = old_param[i % old_shape[0]]
245
- elif len(new_shape) >= 2:
246
- for i in range(new_param.shape[0]):
247
- for j in range(new_param.shape[1]):
248
- new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]]
249
-
250
- n_used_old = torch.ones(old_shape[1])
251
- for j in range(new_param.shape[1]):
252
- n_used_old[j % old_shape[1]] += 1
253
- n_used_new = torch.zeros(new_shape[1])
254
- for j in range(new_param.shape[1]):
255
- n_used_new[j] = n_used_old[j % old_shape[1]]
256
-
257
- n_used_new = n_used_new[None, :]
258
- while len(n_used_new.shape) < len(new_shape):
259
- n_used_new = n_used_new.unsqueeze(-1)
260
- new_param /= n_used_new
261
-
262
- sd[name] = new_param
263
-
264
- missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
265
- sd, strict=False)
266
- print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
267
- if len(missing) > 0:
268
- print(f"Missing Keys:\n {missing}")
269
- if len(unexpected) > 0:
270
- print(f"\nUnexpected Keys:\n {unexpected}")
271
-
272
- def q_mean_variance(self, x_start, t):
273
- """
274
- Get the distribution q(x_t | x_0).
275
- :param x_start: the [N x C x ...] tensor of noiseless inputs.
276
- :param t: the number of diffusion steps (minus 1). Here, 0 means one step.
277
- :return: A tuple (mean, variance, log_variance), all of x_start's shape.
278
- """
279
- mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start)
280
- variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
281
- log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape)
282
- return mean, variance, log_variance
283
-
284
- def predict_start_from_noise(self, x_t, t, noise):
285
- return (
286
- extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
287
- extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
288
- )
289
-
290
- def predict_start_from_z_and_v(self, x_t, t, v):
291
- # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
292
- # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
293
- return (
294
- extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t -
295
- extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v
296
- )
297
-
298
- def predict_eps_from_z_and_v(self, x_t, t, v):
299
- return (
300
- extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v +
301
- extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t
302
- )
303
-
304
- def q_posterior(self, x_start, x_t, t):
305
- posterior_mean = (
306
- extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start +
307
- extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
308
- )
309
- posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape)
310
- posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape)
311
- return posterior_mean, posterior_variance, posterior_log_variance_clipped
312
-
313
- def p_mean_variance(self, x, t, clip_denoised: bool):
314
- model_out = self.model(x, t)
315
- if self.parameterization == "eps":
316
- x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
317
- elif self.parameterization == "x0":
318
- x_recon = model_out
319
- if clip_denoised:
320
- x_recon.clamp_(-1., 1.)
321
-
322
- model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
323
- return model_mean, posterior_variance, posterior_log_variance
324
-
325
- @torch.no_grad()
326
- def p_sample(self, x, t, clip_denoised=True, repeat_noise=False):
327
- b, *_, device = *x.shape, x.device
328
- model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised)
329
- noise = noise_like(x.shape, device, repeat_noise)
330
- # no noise when t == 0
331
- nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
332
- return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
333
-
334
- @torch.no_grad()
335
- def p_sample_loop(self, shape, return_intermediates=False):
336
- device = self.betas.device
337
- b = shape[0]
338
- img = torch.randn(shape, device=device)
339
- intermediates = [img]
340
- for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps):
341
- img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long),
342
- clip_denoised=self.clip_denoised)
343
- if i % self.log_every_t == 0 or i == self.num_timesteps - 1:
344
- intermediates.append(img)
345
- if return_intermediates:
346
- return img, intermediates
347
- return img
348
-
349
- @torch.no_grad()
350
- def sample(self, batch_size=16, return_intermediates=False):
351
- image_size = self.image_size
352
- channels = self.channels
353
- return self.p_sample_loop((batch_size, channels, image_size, image_size),
354
- return_intermediates=return_intermediates)
355
-
356
- def q_sample(self, x_start, t, noise=None):
357
- noise = default(noise, lambda: torch.randn_like(x_start))
358
- return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
359
- extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise)
360
-
361
- def get_v(self, x, noise, t):
362
- return (
363
- extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * noise -
364
- extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * x
365
- )
366
-
367
- def get_loss(self, pred, target, mean=True):
368
- if self.loss_type == 'l1':
369
- loss = (target - pred).abs()
370
- if mean:
371
- loss = loss.mean()
372
- elif self.loss_type == 'l2':
373
- if mean:
374
- loss = torch.nn.functional.mse_loss(target, pred)
375
- else:
376
- loss = torch.nn.functional.mse_loss(target, pred, reduction='none')
377
- else:
378
- raise NotImplementedError("unknown loss type '{loss_type}'")
379
-
380
- return loss
381
-
382
- def p_losses(self, x_start, t, noise=None):
383
- noise = default(noise, lambda: torch.randn_like(x_start))
384
- x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
385
- model_out = self.model(x_noisy, t)
386
-
387
- loss_dict = {}
388
- if self.parameterization == "eps":
389
- target = noise
390
- elif self.parameterization == "x0":
391
- target = x_start
392
- elif self.parameterization == "v":
393
- target = self.get_v(x_start, noise, t)
394
- else:
395
- raise NotImplementedError(f"Parameterization {self.parameterization} not yet supported")
396
-
397
- loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3])
398
-
399
- log_prefix = 'train' if self.training else 'val'
400
-
401
- loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()})
402
- loss_simple = loss.mean() * self.l_simple_weight
403
-
404
- loss_vlb = (self.lvlb_weights[t] * loss).mean()
405
- loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb})
406
-
407
- loss = loss_simple + self.original_elbo_weight * loss_vlb
408
-
409
- loss_dict.update({f'{log_prefix}/loss': loss})
410
-
411
- return loss, loss_dict
412
-
413
- def forward(self, x, *args, **kwargs):
414
- # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size
415
- # assert h == img_size and w == img_size, f'height and width of image must be {img_size}'
416
- t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
417
- return self.p_losses(x, t, *args, **kwargs)
418
-
419
- def get_input(self, batch, k):
420
- x = batch[k]
421
- if len(x.shape) == 3:
422
- x = x[..., None]
423
- x = rearrange(x, 'b h w c -> b c h w')
424
- x = x.to(memory_format=torch.contiguous_format).float()
425
- return x
426
-
427
- def shared_step(self, batch):
428
- x = self.get_input(batch, self.first_stage_key)
429
- loss, loss_dict = self(x)
430
- return loss, loss_dict
431
-
432
- def training_step(self, batch, batch_idx):
433
- for k in self.ucg_training:
434
- p = self.ucg_training[k]["p"]
435
- val = self.ucg_training[k]["val"]
436
- if val is None:
437
- val = ""
438
- for i in range(len(batch[k])):
439
- if self.ucg_prng.choice(2, p=[1 - p, p]):
440
- batch[k][i] = val
441
-
442
- loss, loss_dict = self.shared_step(batch)
443
-
444
- self.log_dict(loss_dict, prog_bar=True,
445
- logger=True, on_step=True, on_epoch=True)
446
-
447
- self.log("global_step", self.global_step,
448
- prog_bar=True, logger=True, on_step=True, on_epoch=False)
449
-
450
- if self.use_scheduler:
451
- lr = self.optimizers().param_groups[0]['lr']
452
- self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False)
453
-
454
- return loss
455
-
456
- @torch.no_grad()
457
- def validation_step(self, batch, batch_idx):
458
- _, loss_dict_no_ema = self.shared_step(batch)
459
- with self.ema_scope():
460
- _, loss_dict_ema = self.shared_step(batch)
461
- loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema}
462
- self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
463
- self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
464
-
465
- def on_train_batch_end(self, *args, **kwargs):
466
- if self.use_ema:
467
- self.model_ema(self.model)
468
-
469
- def _get_rows_from_list(self, samples):
470
- n_imgs_per_row = len(samples)
471
- denoise_grid = rearrange(samples, 'n b c h w -> b n c h w')
472
- denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
473
- denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
474
- return denoise_grid
475
-
476
- @torch.no_grad()
477
- def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs):
478
- log = dict()
479
- x = self.get_input(batch, self.first_stage_key)
480
- N = min(x.shape[0], N)
481
- n_row = min(x.shape[0], n_row)
482
- x = x.to(self.device)[:N]
483
- log["inputs"] = x
484
-
485
- # get diffusion row
486
- diffusion_row = list()
487
- x_start = x[:n_row]
488
-
489
- for t in range(self.num_timesteps):
490
- if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
491
- t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
492
- t = t.to(self.device).long()
493
- noise = torch.randn_like(x_start)
494
- x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
495
- diffusion_row.append(x_noisy)
496
-
497
- log["diffusion_row"] = self._get_rows_from_list(diffusion_row)
498
-
499
- if sample:
500
- # get denoise row
501
- with self.ema_scope("Plotting"):
502
- samples, denoise_row = self.sample(batch_size=N, return_intermediates=True)
503
-
504
- log["samples"] = samples
505
- log["denoise_row"] = self._get_rows_from_list(denoise_row)
506
-
507
- if return_keys:
508
- if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
509
- return log
510
- else:
511
- return {key: log[key] for key in return_keys}
512
- return log
513
-
514
- def configure_optimizers(self):
515
- lr = self.learning_rate
516
- params = list(self.model.parameters())
517
- if self.learn_logvar:
518
- params = params + [self.logvar]
519
- opt = torch.optim.AdamW(params, lr=lr)
520
- return opt
521
-
522
-
523
- class LatentDiffusion(DDPM):
524
- """main class"""
525
-
526
- def __init__(self,
527
- first_stage_config,
528
- cond_stage_config,
529
- num_timesteps_cond=None,
530
- cond_stage_key="image",
531
- cond_stage_trainable=False,
532
- concat_mode=True,
533
- cond_stage_forward=None,
534
- conditioning_key=None,
535
- scale_factor=1.0,
536
- scale_by_std=False,
537
- force_null_conditioning=False,
538
- *args, **kwargs):
539
- self.force_null_conditioning = force_null_conditioning
540
- self.num_timesteps_cond = default(num_timesteps_cond, 1)
541
- self.scale_by_std = scale_by_std
542
- assert self.num_timesteps_cond <= kwargs['timesteps']
543
- # for backwards compatibility after implementation of DiffusionWrapper
544
- if conditioning_key is None:
545
- conditioning_key = 'concat' if concat_mode else 'crossattn'
546
- if cond_stage_config == '__is_unconditional__' and not self.force_null_conditioning:
547
- conditioning_key = None
548
- ckpt_path = kwargs.pop("ckpt_path", None)
549
- reset_ema = kwargs.pop("reset_ema", False)
550
- reset_num_ema_updates = kwargs.pop("reset_num_ema_updates", False)
551
- ignore_keys = kwargs.pop("ignore_keys", [])
552
- super().__init__(conditioning_key=conditioning_key, *args, **kwargs)
553
- self.concat_mode = concat_mode
554
- self.cond_stage_trainable = cond_stage_trainable
555
- self.cond_stage_key = cond_stage_key
556
- try:
557
- self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1
558
- except:
559
- self.num_downs = 0
560
- if not scale_by_std:
561
- self.scale_factor = scale_factor
562
- else:
563
- self.register_buffer('scale_factor', torch.tensor(scale_factor))
564
- self.instantiate_first_stage(first_stage_config)
565
- self.instantiate_cond_stage(cond_stage_config)
566
- self.cond_stage_forward = cond_stage_forward
567
- self.clip_denoised = False
568
- self.bbox_tokenizer = None
569
-
570
- self.restarted_from_ckpt = False
571
- if ckpt_path is not None:
572
- self.init_from_ckpt(ckpt_path, ignore_keys)
573
- self.restarted_from_ckpt = True
574
- if reset_ema:
575
- assert self.use_ema
576
- print(
577
- f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.")
578
- self.model_ema = LitEma(self.model)
579
- if reset_num_ema_updates:
580
- print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ")
581
- assert self.use_ema
582
- self.model_ema.reset_num_updates()
583
-
584
- def make_cond_schedule(self, ):
585
- self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long)
586
- ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long()
587
- self.cond_ids[:self.num_timesteps_cond] = ids
588
-
589
- @rank_zero_only
590
- @torch.no_grad()
591
- def on_train_batch_start(self, batch, batch_idx, dataloader_idx):
592
- # only for very first batch
593
- if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt:
594
- assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously'
595
- # set rescale weight to 1./std of encodings
596
- print("### USING STD-RESCALING ###")
597
- x = super().get_input(batch, self.first_stage_key)
598
- x = x.to(self.device)
599
- encoder_posterior = self.encode_first_stage(x)
600
- z = self.get_first_stage_encoding(encoder_posterior).detach()
601
- del self.scale_factor
602
- self.register_buffer('scale_factor', 1. / z.flatten().std())
603
- print(f"setting self.scale_factor to {self.scale_factor}")
604
- print("### USING STD-RESCALING ###")
605
-
606
- def register_schedule(self,
607
- given_betas=None, beta_schedule="linear", timesteps=1000,
608
- linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
609
- super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s)
610
-
611
- self.shorten_cond_schedule = self.num_timesteps_cond > 1
612
- if self.shorten_cond_schedule:
613
- self.make_cond_schedule()
614
-
615
- def instantiate_first_stage(self, config):
616
- model = instantiate_from_config(config)
617
- self.first_stage_model = model.eval()
618
- self.first_stage_model.train = disabled_train
619
- for param in self.first_stage_model.parameters():
620
- param.requires_grad = False
621
-
622
- def instantiate_cond_stage(self, config):
623
- if not self.cond_stage_trainable:
624
- if config == "__is_first_stage__":
625
- print("Using first stage also as cond stage.")
626
- self.cond_stage_model = self.first_stage_model
627
- elif config == "__is_unconditional__":
628
- print(f"Training {self.__class__.__name__} as an unconditional model.")
629
- self.cond_stage_model = None
630
- # self.be_unconditional = True
631
- else:
632
- model = instantiate_from_config(config)
633
- self.cond_stage_model = model.eval()
634
- self.cond_stage_model.train = disabled_train
635
- for param in self.cond_stage_model.parameters():
636
- param.requires_grad = False
637
- else:
638
- assert config != '__is_first_stage__'
639
- assert config != '__is_unconditional__'
640
- model = instantiate_from_config(config)
641
- self.cond_stage_model = model
642
-
643
- def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False):
644
- denoise_row = []
645
- for zd in tqdm(samples, desc=desc):
646
- denoise_row.append(self.decode_first_stage(zd.to(self.device),
647
- force_not_quantize=force_no_decoder_quantization))
648
- n_imgs_per_row = len(denoise_row)
649
- denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W
650
- denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w')
651
- denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
652
- denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
653
- return denoise_grid
654
-
655
- def get_first_stage_encoding(self, encoder_posterior):
656
- if isinstance(encoder_posterior, DiagonalGaussianDistribution):
657
- z = encoder_posterior.sample()
658
- elif isinstance(encoder_posterior, torch.Tensor):
659
- z = encoder_posterior
660
- else:
661
- raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented")
662
- return self.scale_factor * z
663
-
664
- def get_learned_conditioning(self, c):
665
- if self.cond_stage_forward is None:
666
- if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode):
667
- c = self.cond_stage_model.encode(c)
668
- if isinstance(c, DiagonalGaussianDistribution):
669
- c = c.mode()
670
- else:
671
- c = self.cond_stage_model(c)
672
- else:
673
- assert hasattr(self.cond_stage_model, self.cond_stage_forward)
674
- c = getattr(self.cond_stage_model, self.cond_stage_forward)(c)
675
- return c
676
-
677
- def meshgrid(self, h, w):
678
- y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1)
679
- x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1)
680
-
681
- arr = torch.cat([y, x], dim=-1)
682
- return arr
683
-
684
- def delta_border(self, h, w):
685
- """
686
- :param h: height
687
- :param w: width
688
- :return: normalized distance to image border,
689
- wtith min distance = 0 at border and max dist = 0.5 at image center
690
- """
691
- lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)
692
- arr = self.meshgrid(h, w) / lower_right_corner
693
- dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0]
694
- dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0]
695
- edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0]
696
- return edge_dist
697
-
698
- def get_weighting(self, h, w, Ly, Lx, device):
699
- weighting = self.delta_border(h, w)
700
- weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"],
701
- self.split_input_params["clip_max_weight"], )
702
- weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device)
703
-
704
- if self.split_input_params["tie_braker"]:
705
- L_weighting = self.delta_border(Ly, Lx)
706
- L_weighting = torch.clip(L_weighting,
707
- self.split_input_params["clip_min_tie_weight"],
708
- self.split_input_params["clip_max_tie_weight"])
709
-
710
- L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device)
711
- weighting = weighting * L_weighting
712
- return weighting
713
-
714
- def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code
715
- """
716
- :param x: img of size (bs, c, h, w)
717
- :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1])
718
- """
719
- bs, nc, h, w = x.shape
720
-
721
- # number of crops in image
722
- Ly = (h - kernel_size[0]) // stride[0] + 1
723
- Lx = (w - kernel_size[1]) // stride[1] + 1
724
-
725
- if uf == 1 and df == 1:
726
- fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
727
- unfold = torch.nn.Unfold(**fold_params)
728
-
729
- fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params)
730
-
731
- weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype)
732
- normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap
733
- weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx))
734
-
735
- elif uf > 1 and df == 1:
736
- fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
737
- unfold = torch.nn.Unfold(**fold_params)
738
-
739
- fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf),
740
- dilation=1, padding=0,
741
- stride=(stride[0] * uf, stride[1] * uf))
742
- fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2)
743
-
744
- weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype)
745
- normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap
746
- weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx))
747
-
748
- elif df > 1 and uf == 1:
749
- fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
750
- unfold = torch.nn.Unfold(**fold_params)
751
-
752
- fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df),
753
- dilation=1, padding=0,
754
- stride=(stride[0] // df, stride[1] // df))
755
- fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2)
756
-
757
- weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype)
758
- normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap
759
- weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx))
760
-
761
- else:
762
- raise NotImplementedError
763
-
764
- return fold, unfold, normalization, weighting
765
-
766
- @torch.no_grad()
767
- def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False,
768
- cond_key=None, return_original_cond=False, bs=None, return_x=False):
769
- x = super().get_input(batch, k)
770
- if bs is not None:
771
- x = x[:bs]
772
- x = x.to(self.device)
773
- encoder_posterior = self.encode_first_stage(x)
774
- z = self.get_first_stage_encoding(encoder_posterior).detach()
775
-
776
- if self.model.conditioning_key is not None and not self.force_null_conditioning:
777
- if cond_key is None:
778
- cond_key = self.cond_stage_key
779
- if cond_key != self.first_stage_key:
780
- if cond_key in ['caption', 'coordinates_bbox', "txt"]:
781
- xc = batch[cond_key]
782
- elif cond_key in ['class_label', 'cls']:
783
- xc = batch
784
- else:
785
- xc = super().get_input(batch, cond_key).to(self.device)
786
- else:
787
- xc = x
788
- if not self.cond_stage_trainable or force_c_encode:
789
- if isinstance(xc, dict) or isinstance(xc, list):
790
- c = self.get_learned_conditioning(xc)
791
- else:
792
- c = self.get_learned_conditioning(xc.to(self.device))
793
- else:
794
- c = xc
795
- if bs is not None:
796
- c = c[:bs]
797
-
798
- if self.use_positional_encodings:
799
- pos_x, pos_y = self.compute_latent_shifts(batch)
800
- ckey = __conditioning_keys__[self.model.conditioning_key]
801
- c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y}
802
-
803
- else:
804
- c = None
805
- xc = None
806
- if self.use_positional_encodings:
807
- pos_x, pos_y = self.compute_latent_shifts(batch)
808
- c = {'pos_x': pos_x, 'pos_y': pos_y}
809
- out = [z, c]
810
- if return_first_stage_outputs:
811
- xrec = self.decode_first_stage(z)
812
- out.extend([x, xrec])
813
- if return_x:
814
- out.extend([x])
815
- if return_original_cond:
816
- out.append(xc)
817
- return out
818
-
819
- @torch.no_grad()
820
- def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
821
- if predict_cids:
822
- if z.dim() == 4:
823
- z = torch.argmax(z.exp(), dim=1).long()
824
- z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
825
- z = rearrange(z, 'b h w c -> b c h w').contiguous()
826
-
827
- z = 1. / self.scale_factor * z
828
- return self.first_stage_model.decode(z)
829
-
830
- @torch.no_grad()
831
- def encode_first_stage(self, x):
832
- return self.first_stage_model.encode(x)
833
-
834
- def shared_step(self, batch, **kwargs):
835
- x, c = self.get_input(batch, self.first_stage_key)
836
- loss = self(x, c)
837
- return loss
838
-
839
- def forward(self, x, c, *args, **kwargs):
840
- t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
841
- if self.model.conditioning_key is not None:
842
- assert c is not None
843
- if self.cond_stage_trainable:
844
- c = self.get_learned_conditioning(c)
845
- if self.shorten_cond_schedule: # TODO: drop this option
846
- tc = self.cond_ids[t].to(self.device)
847
- c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))
848
- return self.p_losses(x, c, t, *args, **kwargs)
849
-
850
- def apply_model(self, x_noisy, t, cond, return_ids=False):
851
- if isinstance(cond, dict):
852
- # hybrid case, cond is expected to be a dict
853
- pass
854
- else:
855
- if not isinstance(cond, list):
856
- cond = [cond]
857
- key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn'
858
- cond = {key: cond}
859
-
860
- x_recon = self.model(x_noisy, t, **cond)
861
-
862
- if isinstance(x_recon, tuple) and not return_ids:
863
- return x_recon[0]
864
- else:
865
- return x_recon
866
-
867
- def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
868
- return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \
869
- extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
870
-
871
- def _prior_bpd(self, x_start):
872
- """
873
- Get the prior KL term for the variational lower-bound, measured in
874
- bits-per-dim.
875
- This term can't be optimized, as it only depends on the encoder.
876
- :param x_start: the [N x C x ...] tensor of inputs.
877
- :return: a batch of [N] KL values (in bits), one per batch element.
878
- """
879
- batch_size = x_start.shape[0]
880
- t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
881
- qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
882
- kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
883
- return mean_flat(kl_prior) / np.log(2.0)
884
-
885
- def p_losses(self, x_start, cond, t, noise=None):
886
- noise = default(noise, lambda: torch.randn_like(x_start))
887
- x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
888
- model_output = self.apply_model(x_noisy, t, cond)
889
-
890
- loss_dict = {}
891
- prefix = 'train' if self.training else 'val'
892
-
893
- if self.parameterization == "x0":
894
- target = x_start
895
- elif self.parameterization == "eps":
896
- target = noise
897
- elif self.parameterization == "v":
898
- target = self.get_v(x_start, noise, t)
899
- else:
900
- raise NotImplementedError()
901
-
902
- loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3])
903
- loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()})
904
-
905
- logvar_t = self.logvar[t].to(self.device)
906
- loss = loss_simple / torch.exp(logvar_t) + logvar_t
907
- # loss = loss_simple / torch.exp(self.logvar) + self.logvar
908
- if self.learn_logvar:
909
- loss_dict.update({f'{prefix}/loss_gamma': loss.mean()})
910
- loss_dict.update({'logvar': self.logvar.data.mean()})
911
-
912
- loss = self.l_simple_weight * loss.mean()
913
-
914
- loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3))
915
- loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()
916
- loss_dict.update({f'{prefix}/loss_vlb': loss_vlb})
917
- loss += (self.original_elbo_weight * loss_vlb)
918
- loss_dict.update({f'{prefix}/loss': loss})
919
-
920
- return loss, loss_dict
921
-
922
- def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False,
923
- return_x0=False, score_corrector=None, corrector_kwargs=None):
924
- t_in = t
925
- model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids)
926
-
927
- if score_corrector is not None:
928
- assert self.parameterization == "eps"
929
- model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs)
930
-
931
- if return_codebook_ids:
932
- model_out, logits = model_out
933
-
934
- if self.parameterization == "eps":
935
- x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
936
- elif self.parameterization == "x0":
937
- x_recon = model_out
938
- else:
939
- raise NotImplementedError()
940
-
941
- if clip_denoised:
942
- x_recon.clamp_(-1., 1.)
943
- if quantize_denoised:
944
- x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon)
945
- model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
946
- if return_codebook_ids:
947
- return model_mean, posterior_variance, posterior_log_variance, logits
948
- elif return_x0:
949
- return model_mean, posterior_variance, posterior_log_variance, x_recon
950
- else:
951
- return model_mean, posterior_variance, posterior_log_variance
952
-
953
- @torch.no_grad()
954
- def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False,
955
- return_codebook_ids=False, quantize_denoised=False, return_x0=False,
956
- temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None):
957
- b, *_, device = *x.shape, x.device
958
- outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised,
959
- return_codebook_ids=return_codebook_ids,
960
- quantize_denoised=quantize_denoised,
961
- return_x0=return_x0,
962
- score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
963
- if return_codebook_ids:
964
- raise DeprecationWarning("Support dropped.")
965
- model_mean, _, model_log_variance, logits = outputs
966
- elif return_x0:
967
- model_mean, _, model_log_variance, x0 = outputs
968
- else:
969
- model_mean, _, model_log_variance = outputs
970
-
971
- noise = noise_like(x.shape, device, repeat_noise) * temperature
972
- if noise_dropout > 0.:
973
- noise = torch.nn.functional.dropout(noise, p=noise_dropout)
974
- # no noise when t == 0
975
- nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
976
-
977
- if return_codebook_ids:
978
- return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1)
979
- if return_x0:
980
- return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0
981
- else:
982
- return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
983
-
984
- @torch.no_grad()
985
- def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False,
986
- img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0.,
987
- score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None,
988
- log_every_t=None):
989
- if not log_every_t:
990
- log_every_t = self.log_every_t
991
- timesteps = self.num_timesteps
992
- if batch_size is not None:
993
- b = batch_size if batch_size is not None else shape[0]
994
- shape = [batch_size] + list(shape)
995
- else:
996
- b = batch_size = shape[0]
997
- if x_T is None:
998
- img = torch.randn(shape, device=self.device)
999
- else:
1000
- img = x_T
1001
- intermediates = []
1002
- if cond is not None:
1003
- if isinstance(cond, dict):
1004
- cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
1005
- list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
1006
- else:
1007
- cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
1008
-
1009
- if start_T is not None:
1010
- timesteps = min(timesteps, start_T)
1011
- iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation',
1012
- total=timesteps) if verbose else reversed(
1013
- range(0, timesteps))
1014
- if type(temperature) == float:
1015
- temperature = [temperature] * timesteps
1016
-
1017
- for i in iterator:
1018
- ts = torch.full((b,), i, device=self.device, dtype=torch.long)
1019
- if self.shorten_cond_schedule:
1020
- assert self.model.conditioning_key != 'hybrid'
1021
- tc = self.cond_ids[ts].to(cond.device)
1022
- cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
1023
-
1024
- img, x0_partial = self.p_sample(img, cond, ts,
1025
- clip_denoised=self.clip_denoised,
1026
- quantize_denoised=quantize_denoised, return_x0=True,
1027
- temperature=temperature[i], noise_dropout=noise_dropout,
1028
- score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
1029
- if mask is not None:
1030
- assert x0 is not None
1031
- img_orig = self.q_sample(x0, ts)
1032
- img = img_orig * mask + (1. - mask) * img
1033
-
1034
- if i % log_every_t == 0 or i == timesteps - 1:
1035
- intermediates.append(x0_partial)
1036
- if callback: callback(i)
1037
- if img_callback: img_callback(img, i)
1038
- return img, intermediates
1039
-
1040
- @torch.no_grad()
1041
- def p_sample_loop(self, cond, shape, return_intermediates=False,
1042
- x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False,
1043
- mask=None, x0=None, img_callback=None, start_T=None,
1044
- log_every_t=None):
1045
-
1046
- if not log_every_t:
1047
- log_every_t = self.log_every_t
1048
- device = self.betas.device
1049
- b = shape[0]
1050
- if x_T is None:
1051
- img = torch.randn(shape, device=device)
1052
- else:
1053
- img = x_T
1054
-
1055
- intermediates = [img]
1056
- if timesteps is None:
1057
- timesteps = self.num_timesteps
1058
-
1059
- if start_T is not None:
1060
- timesteps = min(timesteps, start_T)
1061
- iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed(
1062
- range(0, timesteps))
1063
-
1064
- if mask is not None:
1065
- assert x0 is not None
1066
- assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match
1067
-
1068
- for i in iterator:
1069
- ts = torch.full((b,), i, device=device, dtype=torch.long)
1070
- if self.shorten_cond_schedule:
1071
- assert self.model.conditioning_key != 'hybrid'
1072
- tc = self.cond_ids[ts].to(cond.device)
1073
- cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
1074
-
1075
- img = self.p_sample(img, cond, ts,
1076
- clip_denoised=self.clip_denoised,
1077
- quantize_denoised=quantize_denoised)
1078
- if mask is not None:
1079
- img_orig = self.q_sample(x0, ts)
1080
- img = img_orig * mask + (1. - mask) * img
1081
-
1082
- if i % log_every_t == 0 or i == timesteps - 1:
1083
- intermediates.append(img)
1084
- if callback: callback(i)
1085
- if img_callback: img_callback(img, i)
1086
-
1087
- if return_intermediates:
1088
- return img, intermediates
1089
- return img
1090
-
1091
- @torch.no_grad()
1092
- def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None,
1093
- verbose=True, timesteps=None, quantize_denoised=False,
1094
- mask=None, x0=None, shape=None, **kwargs):
1095
- if shape is None:
1096
- shape = (batch_size, self.channels, self.image_size, self.image_size)
1097
- if cond is not None:
1098
- if isinstance(cond, dict):
1099
- cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
1100
- list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
1101
- else:
1102
- cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
1103
- return self.p_sample_loop(cond,
1104
- shape,
1105
- return_intermediates=return_intermediates, x_T=x_T,
1106
- verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised,
1107
- mask=mask, x0=x0)
1108
-
1109
- @torch.no_grad()
1110
- def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs):
1111
- if ddim:
1112
- ddim_sampler = DDIMSampler(self)
1113
- shape = (self.channels, self.image_size, self.image_size)
1114
- samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size,
1115
- shape, cond, verbose=False, **kwargs)
1116
-
1117
- else:
1118
- samples, intermediates = self.sample(cond=cond, batch_size=batch_size,
1119
- return_intermediates=True, **kwargs)
1120
-
1121
- return samples, intermediates
1122
-
1123
- @torch.no_grad()
1124
- def get_unconditional_conditioning(self, batch_size, null_label=None):
1125
- if null_label is not None:
1126
- xc = null_label
1127
- if isinstance(xc, ListConfig):
1128
- xc = list(xc)
1129
- if isinstance(xc, dict) or isinstance(xc, list):
1130
- c = self.get_learned_conditioning(xc)
1131
- else:
1132
- if hasattr(xc, "to"):
1133
- xc = xc.to(self.device)
1134
- c = self.get_learned_conditioning(xc)
1135
- else:
1136
- if self.cond_stage_key in ["class_label", "cls"]:
1137
- xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device)
1138
- return self.get_learned_conditioning(xc)
1139
- else:
1140
- raise NotImplementedError("todo")
1141
- if isinstance(c, list): # in case the encoder gives us a list
1142
- for i in range(len(c)):
1143
- c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device)
1144
- else:
1145
- c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device)
1146
- return c
1147
-
1148
- @torch.no_grad()
1149
- def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None,
1150
- quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,
1151
- plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None,
1152
- use_ema_scope=True,
1153
- **kwargs):
1154
- ema_scope = self.ema_scope if use_ema_scope else nullcontext
1155
- use_ddim = ddim_steps is not None
1156
-
1157
- log = dict()
1158
- z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key,
1159
- return_first_stage_outputs=True,
1160
- force_c_encode=True,
1161
- return_original_cond=True,
1162
- bs=N)
1163
- N = min(x.shape[0], N)
1164
- n_row = min(x.shape[0], n_row)
1165
- log["inputs"] = x
1166
- log["reconstruction"] = xrec
1167
- if self.model.conditioning_key is not None:
1168
- if hasattr(self.cond_stage_model, "decode"):
1169
- xc = self.cond_stage_model.decode(c)
1170
- log["conditioning"] = xc
1171
- elif self.cond_stage_key in ["caption", "txt"]:
1172
- xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25)
1173
- log["conditioning"] = xc
1174
- elif self.cond_stage_key in ['class_label', "cls"]:
1175
- try:
1176
- xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25)
1177
- log['conditioning'] = xc
1178
- except KeyError:
1179
- # probably no "human_label" in batch
1180
- pass
1181
- elif isimage(xc):
1182
- log["conditioning"] = xc
1183
- if ismap(xc):
1184
- log["original_conditioning"] = self.to_rgb(xc)
1185
-
1186
- if plot_diffusion_rows:
1187
- # get diffusion row
1188
- diffusion_row = list()
1189
- z_start = z[:n_row]
1190
- for t in range(self.num_timesteps):
1191
- if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
1192
- t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
1193
- t = t.to(self.device).long()
1194
- noise = torch.randn_like(z_start)
1195
- z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
1196
- diffusion_row.append(self.decode_first_stage(z_noisy))
1197
-
1198
- diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
1199
- diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
1200
- diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
1201
- diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
1202
- log["diffusion_row"] = diffusion_grid
1203
-
1204
- if sample:
1205
- # get denoise row
1206
- with ema_scope("Sampling"):
1207
- samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,
1208
- ddim_steps=ddim_steps, eta=ddim_eta)
1209
- # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
1210
- x_samples = self.decode_first_stage(samples)
1211
- log["samples"] = x_samples
1212
- if plot_denoise_rows:
1213
- denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
1214
- log["denoise_row"] = denoise_grid
1215
-
1216
- if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(
1217
- self.first_stage_model, IdentityFirstStage):
1218
- # also display when quantizing x0 while sampling
1219
- with ema_scope("Plotting Quantized Denoised"):
1220
- samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,
1221
- ddim_steps=ddim_steps, eta=ddim_eta,
1222
- quantize_denoised=True)
1223
- # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True,
1224
- # quantize_denoised=True)
1225
- x_samples = self.decode_first_stage(samples.to(self.device))
1226
- log["samples_x0_quantized"] = x_samples
1227
-
1228
- if unconditional_guidance_scale > 1.0:
1229
- uc = self.get_unconditional_conditioning(N, unconditional_guidance_label)
1230
- if self.model.conditioning_key == "crossattn-adm":
1231
- uc = {"c_crossattn": [uc], "c_adm": c["c_adm"]}
1232
- with ema_scope("Sampling with classifier-free guidance"):
1233
- samples_cfg, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,
1234
- ddim_steps=ddim_steps, eta=ddim_eta,
1235
- unconditional_guidance_scale=unconditional_guidance_scale,
1236
- unconditional_conditioning=uc,
1237
- )
1238
- x_samples_cfg = self.decode_first_stage(samples_cfg)
1239
- log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg
1240
-
1241
- if inpaint:
1242
- # make a simple center square
1243
- b, h, w = z.shape[0], z.shape[2], z.shape[3]
1244
- mask = torch.ones(N, h, w).to(self.device)
1245
- # zeros will be filled in
1246
- mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.
1247
- mask = mask[:, None, ...]
1248
- with ema_scope("Plotting Inpaint"):
1249
- samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta,
1250
- ddim_steps=ddim_steps, x0=z[:N], mask=mask)
1251
- x_samples = self.decode_first_stage(samples.to(self.device))
1252
- log["samples_inpainting"] = x_samples
1253
- log["mask"] = mask
1254
-
1255
- # outpaint
1256
- mask = 1. - mask
1257
- with ema_scope("Plotting Outpaint"):
1258
- samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta,
1259
- ddim_steps=ddim_steps, x0=z[:N], mask=mask)
1260
- x_samples = self.decode_first_stage(samples.to(self.device))
1261
- log["samples_outpainting"] = x_samples
1262
-
1263
- if plot_progressive_rows:
1264
- with ema_scope("Plotting Progressives"):
1265
- img, progressives = self.progressive_denoising(c,
1266
- shape=(self.channels, self.image_size, self.image_size),
1267
- batch_size=N)
1268
- prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation")
1269
- log["progressive_row"] = prog_row
1270
-
1271
- if return_keys:
1272
- if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
1273
- return log
1274
- else:
1275
- return {key: log[key] for key in return_keys}
1276
- return log
1277
-
1278
- def configure_optimizers(self):
1279
- lr = self.learning_rate
1280
- params = list(self.model.parameters())
1281
- if self.cond_stage_trainable:
1282
- print(f"{self.__class__.__name__}: Also optimizing conditioner params!")
1283
- params = params + list(self.cond_stage_model.parameters())
1284
- if self.learn_logvar:
1285
- print('Diffusion model optimizing logvar')
1286
- params.append(self.logvar)
1287
- opt = torch.optim.AdamW(params, lr=lr)
1288
- if self.use_scheduler:
1289
- assert 'target' in self.scheduler_config
1290
- scheduler = instantiate_from_config(self.scheduler_config)
1291
-
1292
- print("Setting up LambdaLR scheduler...")
1293
- scheduler = [
1294
- {
1295
- 'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule),
1296
- 'interval': 'step',
1297
- 'frequency': 1
1298
- }]
1299
- return [opt], scheduler
1300
- return opt
1301
-
1302
- @torch.no_grad()
1303
- def to_rgb(self, x):
1304
- x = x.float()
1305
- if not hasattr(self, "colorize"):
1306
- self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x)
1307
- x = nn.functional.conv2d(x, weight=self.colorize)
1308
- x = 2. * (x - x.min()) / (x.max() - x.min()) - 1.
1309
- return x
1310
-
1311
-
1312
- class DiffusionWrapper(pl.LightningModule):
1313
- def __init__(self, diff_model_config, conditioning_key):
1314
- super().__init__()
1315
- self.sequential_cross_attn = diff_model_config.pop("sequential_crossattn", False)
1316
- self.diffusion_model = instantiate_from_config(diff_model_config)
1317
- self.conditioning_key = conditioning_key
1318
- assert self.conditioning_key in [None, 'concat', 'crossattn', 'hybrid', 'adm', 'hybrid-adm', 'crossattn-adm']
1319
-
1320
- def forward(self, x, t, c_concat: list = None, c_crossattn: list = None, c_adm=None):
1321
- if self.conditioning_key is None:
1322
- out = self.diffusion_model(x, t)
1323
- elif self.conditioning_key == 'concat':
1324
- xc = torch.cat([x] + c_concat, dim=1)
1325
- out = self.diffusion_model(xc, t)
1326
- elif self.conditioning_key == 'crossattn':
1327
- if not self.sequential_cross_attn:
1328
- cc = torch.cat(c_crossattn, 1)
1329
- else:
1330
- cc = c_crossattn
1331
- out = self.diffusion_model(x, t, context=cc)
1332
- elif self.conditioning_key == 'hybrid':
1333
- xc = torch.cat([x] + c_concat, dim=1)
1334
- cc = torch.cat(c_crossattn, 1)
1335
- out = self.diffusion_model(xc, t, context=cc)
1336
- elif self.conditioning_key == 'hybrid-adm':
1337
- assert c_adm is not None
1338
- xc = torch.cat([x] + c_concat, dim=1)
1339
- cc = torch.cat(c_crossattn, 1)
1340
- out = self.diffusion_model(xc, t, context=cc, y=c_adm)
1341
- elif self.conditioning_key == 'crossattn-adm':
1342
- assert c_adm is not None
1343
- cc = torch.cat(c_crossattn, 1)
1344
- out = self.diffusion_model(x, t, context=cc, y=c_adm)
1345
- elif self.conditioning_key == 'adm':
1346
- cc = c_crossattn[0]
1347
- out = self.diffusion_model(x, t, y=cc)
1348
- else:
1349
- raise NotImplementedError()
1350
-
1351
- return out
1352
-
1353
-
1354
- class LatentUpscaleDiffusion(LatentDiffusion):
1355
- def __init__(self, *args, low_scale_config, low_scale_key="LR", noise_level_key=None, **kwargs):
1356
- super().__init__(*args, **kwargs)
1357
- # assumes that neither the cond_stage nor the low_scale_model contain trainable params
1358
- assert not self.cond_stage_trainable
1359
- self.instantiate_low_stage(low_scale_config)
1360
- self.low_scale_key = low_scale_key
1361
- self.noise_level_key = noise_level_key
1362
-
1363
- def instantiate_low_stage(self, config):
1364
- model = instantiate_from_config(config)
1365
- self.low_scale_model = model.eval()
1366
- self.low_scale_model.train = disabled_train
1367
- for param in self.low_scale_model.parameters():
1368
- param.requires_grad = False
1369
-
1370
- @torch.no_grad()
1371
- def get_input(self, batch, k, cond_key=None, bs=None, log_mode=False):
1372
- if not log_mode:
1373
- z, c = super().get_input(batch, k, force_c_encode=True, bs=bs)
1374
- else:
1375
- z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True,
1376
- force_c_encode=True, return_original_cond=True, bs=bs)
1377
- x_low = batch[self.low_scale_key][:bs]
1378
- x_low = rearrange(x_low, 'b h w c -> b c h w')
1379
- x_low = x_low.to(memory_format=torch.contiguous_format).float()
1380
- zx, noise_level = self.low_scale_model(x_low)
1381
- if self.noise_level_key is not None:
1382
- # get noise level from batch instead, e.g. when extracting a custom noise level for bsr
1383
- raise NotImplementedError('TODO')
1384
-
1385
- all_conds = {"c_concat": [zx], "c_crossattn": [c], "c_adm": noise_level}
1386
- if log_mode:
1387
- # TODO: maybe disable if too expensive
1388
- x_low_rec = self.low_scale_model.decode(zx)
1389
- return z, all_conds, x, xrec, xc, x_low, x_low_rec, noise_level
1390
- return z, all_conds
1391
-
1392
- @torch.no_grad()
1393
- def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None,
1394
- plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True,
1395
- unconditional_guidance_scale=1., unconditional_guidance_label=None, use_ema_scope=True,
1396
- **kwargs):
1397
- ema_scope = self.ema_scope if use_ema_scope else nullcontext
1398
- use_ddim = ddim_steps is not None
1399
-
1400
- log = dict()
1401
- z, c, x, xrec, xc, x_low, x_low_rec, noise_level = self.get_input(batch, self.first_stage_key, bs=N,
1402
- log_mode=True)
1403
- N = min(x.shape[0], N)
1404
- n_row = min(x.shape[0], n_row)
1405
- log["inputs"] = x
1406
- log["reconstruction"] = xrec
1407
- log["x_lr"] = x_low
1408
- log[f"x_lr_rec_@noise_levels{'-'.join(map(lambda x: str(x), list(noise_level.cpu().numpy())))}"] = x_low_rec
1409
- if self.model.conditioning_key is not None:
1410
- if hasattr(self.cond_stage_model, "decode"):
1411
- xc = self.cond_stage_model.decode(c)
1412
- log["conditioning"] = xc
1413
- elif self.cond_stage_key in ["caption", "txt"]:
1414
- xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25)
1415
- log["conditioning"] = xc
1416
- elif self.cond_stage_key in ['class_label', 'cls']:
1417
- xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25)
1418
- log['conditioning'] = xc
1419
- elif isimage(xc):
1420
- log["conditioning"] = xc
1421
- if ismap(xc):
1422
- log["original_conditioning"] = self.to_rgb(xc)
1423
-
1424
- if plot_diffusion_rows:
1425
- # get diffusion row
1426
- diffusion_row = list()
1427
- z_start = z[:n_row]
1428
- for t in range(self.num_timesteps):
1429
- if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
1430
- t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
1431
- t = t.to(self.device).long()
1432
- noise = torch.randn_like(z_start)
1433
- z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
1434
- diffusion_row.append(self.decode_first_stage(z_noisy))
1435
-
1436
- diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
1437
- diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
1438
- diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
1439
- diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
1440
- log["diffusion_row"] = diffusion_grid
1441
-
1442
- if sample:
1443
- # get denoise row
1444
- with ema_scope("Sampling"):
1445
- samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,
1446
- ddim_steps=ddim_steps, eta=ddim_eta)
1447
- # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
1448
- x_samples = self.decode_first_stage(samples)
1449
- log["samples"] = x_samples
1450
- if plot_denoise_rows:
1451
- denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
1452
- log["denoise_row"] = denoise_grid
1453
-
1454
- if unconditional_guidance_scale > 1.0:
1455
- uc_tmp = self.get_unconditional_conditioning(N, unconditional_guidance_label)
1456
- # TODO explore better "unconditional" choices for the other keys
1457
- # maybe guide away from empty text label and highest noise level and maximally degraded zx?
1458
- uc = dict()
1459
- for k in c:
1460
- if k == "c_crossattn":
1461
- assert isinstance(c[k], list) and len(c[k]) == 1
1462
- uc[k] = [uc_tmp]
1463
- elif k == "c_adm": # todo: only run with text-based guidance?
1464
- assert isinstance(c[k], torch.Tensor)
1465
- #uc[k] = torch.ones_like(c[k]) * self.low_scale_model.max_noise_level
1466
- uc[k] = c[k]
1467
- elif isinstance(c[k], list):
1468
- uc[k] = [c[k][i] for i in range(len(c[k]))]
1469
- else:
1470
- uc[k] = c[k]
1471
-
1472
- with ema_scope("Sampling with classifier-free guidance"):
1473
- samples_cfg, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,
1474
- ddim_steps=ddim_steps, eta=ddim_eta,
1475
- unconditional_guidance_scale=unconditional_guidance_scale,
1476
- unconditional_conditioning=uc,
1477
- )
1478
- x_samples_cfg = self.decode_first_stage(samples_cfg)
1479
- log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg
1480
-
1481
- if plot_progressive_rows:
1482
- with ema_scope("Plotting Progressives"):
1483
- img, progressives = self.progressive_denoising(c,
1484
- shape=(self.channels, self.image_size, self.image_size),
1485
- batch_size=N)
1486
- prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation")
1487
- log["progressive_row"] = prog_row
1488
-
1489
- return log
1490
-
1491
-
1492
- class LatentFinetuneDiffusion(LatentDiffusion):
1493
- """
1494
- Basis for different finetunas, such as inpainting or depth2image
1495
- To disable finetuning mode, set finetune_keys to None
1496
- """
1497
-
1498
- def __init__(self,
1499
- concat_keys: tuple,
1500
- finetune_keys=("model.diffusion_model.input_blocks.0.0.weight",
1501
- "model_ema.diffusion_modelinput_blocks00weight"
1502
- ),
1503
- keep_finetune_dims=4,
1504
- # if model was trained without concat mode before and we would like to keep these channels
1505
- c_concat_log_start=None, # to log reconstruction of c_concat codes
1506
- c_concat_log_end=None,
1507
- *args, **kwargs
1508
- ):
1509
- ckpt_path = kwargs.pop("ckpt_path", None)
1510
- ignore_keys = kwargs.pop("ignore_keys", list())
1511
- super().__init__(*args, **kwargs)
1512
- self.finetune_keys = finetune_keys
1513
- self.concat_keys = concat_keys
1514
- self.keep_dims = keep_finetune_dims
1515
- self.c_concat_log_start = c_concat_log_start
1516
- self.c_concat_log_end = c_concat_log_end
1517
- if exists(self.finetune_keys): assert exists(ckpt_path), 'can only finetune from a given checkpoint'
1518
- if exists(ckpt_path):
1519
- self.init_from_ckpt(ckpt_path, ignore_keys)
1520
-
1521
- def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
1522
- sd = torch.load(path, map_location="cpu")
1523
- if "state_dict" in list(sd.keys()):
1524
- sd = sd["state_dict"]
1525
- keys = list(sd.keys())
1526
- for k in keys:
1527
- for ik in ignore_keys:
1528
- if k.startswith(ik):
1529
- print("Deleting key {} from state_dict.".format(k))
1530
- del sd[k]
1531
-
1532
- # make it explicit, finetune by including extra input channels
1533
- if exists(self.finetune_keys) and k in self.finetune_keys:
1534
- new_entry = None
1535
- for name, param in self.named_parameters():
1536
- if name in self.finetune_keys:
1537
- print(
1538
- f"modifying key '{name}' and keeping its original {self.keep_dims} (channels) dimensions only")
1539
- new_entry = torch.zeros_like(param) # zero init
1540
- assert exists(new_entry), 'did not find matching parameter to modify'
1541
- new_entry[:, :self.keep_dims, ...] = sd[k]
1542
- sd[k] = new_entry
1543
-
1544
- missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
1545
- sd, strict=False)
1546
- print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
1547
- if len(missing) > 0:
1548
- print(f"Missing Keys: {missing}")
1549
- if len(unexpected) > 0:
1550
- print(f"Unexpected Keys: {unexpected}")
1551
-
1552
- @torch.no_grad()
1553
- def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None,
1554
- quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,
1555
- plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None,
1556
- use_ema_scope=True,
1557
- **kwargs):
1558
- ema_scope = self.ema_scope if use_ema_scope else nullcontext
1559
- use_ddim = ddim_steps is not None
1560
-
1561
- log = dict()
1562
- z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, bs=N, return_first_stage_outputs=True)
1563
- c_cat, c = c["c_concat"][0], c["c_crossattn"][0]
1564
- N = min(x.shape[0], N)
1565
- n_row = min(x.shape[0], n_row)
1566
- log["inputs"] = x
1567
- log["reconstruction"] = xrec
1568
- if self.model.conditioning_key is not None:
1569
- if hasattr(self.cond_stage_model, "decode"):
1570
- xc = self.cond_stage_model.decode(c)
1571
- log["conditioning"] = xc
1572
- elif self.cond_stage_key in ["caption", "txt"]:
1573
- xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25)
1574
- log["conditioning"] = xc
1575
- elif self.cond_stage_key in ['class_label', 'cls']:
1576
- xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25)
1577
- log['conditioning'] = xc
1578
- elif isimage(xc):
1579
- log["conditioning"] = xc
1580
- if ismap(xc):
1581
- log["original_conditioning"] = self.to_rgb(xc)
1582
-
1583
- if not (self.c_concat_log_start is None and self.c_concat_log_end is None):
1584
- log["c_concat_decoded"] = self.decode_first_stage(c_cat[:, self.c_concat_log_start:self.c_concat_log_end])
1585
-
1586
- if plot_diffusion_rows:
1587
- # get diffusion row
1588
- diffusion_row = list()
1589
- z_start = z[:n_row]
1590
- for t in range(self.num_timesteps):
1591
- if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
1592
- t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
1593
- t = t.to(self.device).long()
1594
- noise = torch.randn_like(z_start)
1595
- z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
1596
- diffusion_row.append(self.decode_first_stage(z_noisy))
1597
-
1598
- diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
1599
- diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
1600
- diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
1601
- diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
1602
- log["diffusion_row"] = diffusion_grid
1603
-
1604
- if sample:
1605
- # get denoise row
1606
- with ema_scope("Sampling"):
1607
- samples, z_denoise_row = self.sample_log(cond={"c_concat": [c_cat], "c_crossattn": [c]},
1608
- batch_size=N, ddim=use_ddim,
1609
- ddim_steps=ddim_steps, eta=ddim_eta)
1610
- # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
1611
- x_samples = self.decode_first_stage(samples)
1612
- log["samples"] = x_samples
1613
- if plot_denoise_rows:
1614
- denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
1615
- log["denoise_row"] = denoise_grid
1616
-
1617
- if unconditional_guidance_scale > 1.0:
1618
- uc_cross = self.get_unconditional_conditioning(N, unconditional_guidance_label)
1619
- uc_cat = c_cat
1620
- uc_full = {"c_concat": [uc_cat], "c_crossattn": [uc_cross]}
1621
- with ema_scope("Sampling with classifier-free guidance"):
1622
- samples_cfg, _ = self.sample_log(cond={"c_concat": [c_cat], "c_crossattn": [c]},
1623
- batch_size=N, ddim=use_ddim,
1624
- ddim_steps=ddim_steps, eta=ddim_eta,
1625
- unconditional_guidance_scale=unconditional_guidance_scale,
1626
- unconditional_conditioning=uc_full,
1627
- )
1628
- x_samples_cfg = self.decode_first_stage(samples_cfg)
1629
- log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg
1630
-
1631
- return log
1632
-
1633
-
1634
- class LatentInpaintDiffusion(LatentFinetuneDiffusion):
1635
- """
1636
- can either run as pure inpainting model (only concat mode) or with mixed conditionings,
1637
- e.g. mask as concat and text via cross-attn.
1638
- To disable finetuning mode, set finetune_keys to None
1639
- """
1640
-
1641
- def __init__(self,
1642
- concat_keys=("mask", "masked_image"),
1643
- masked_image_key="masked_image",
1644
- *args, **kwargs
1645
- ):
1646
- super().__init__(concat_keys, *args, **kwargs)
1647
- self.masked_image_key = masked_image_key
1648
- assert self.masked_image_key in concat_keys
1649
-
1650
- @torch.no_grad()
1651
- def get_input(self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False):
1652
- # note: restricted to non-trainable encoders currently
1653
- assert not self.cond_stage_trainable, 'trainable cond stages not yet supported for inpainting'
1654
- z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True,
1655
- force_c_encode=True, return_original_cond=True, bs=bs)
1656
-
1657
- assert exists(self.concat_keys)
1658
- c_cat = list()
1659
- for ck in self.concat_keys:
1660
- cc = rearrange(batch[ck], 'b h w c -> b c h w').to(memory_format=torch.contiguous_format).float()
1661
- if bs is not None:
1662
- cc = cc[:bs]
1663
- cc = cc.to(self.device)
1664
- bchw = z.shape
1665
- if ck != self.masked_image_key:
1666
- cc = torch.nn.functional.interpolate(cc, size=bchw[-2:])
1667
- else:
1668
- cc = self.get_first_stage_encoding(self.encode_first_stage(cc))
1669
- c_cat.append(cc)
1670
- c_cat = torch.cat(c_cat, dim=1)
1671
- all_conds = {"c_concat": [c_cat], "c_crossattn": [c]}
1672
- if return_first_stage_outputs:
1673
- return z, all_conds, x, xrec, xc
1674
- return z, all_conds
1675
-
1676
- @torch.no_grad()
1677
- def log_images(self, *args, **kwargs):
1678
- log = super(LatentInpaintDiffusion, self).log_images(*args, **kwargs)
1679
- log["masked_image"] = rearrange(args[0]["masked_image"],
1680
- 'b h w c -> b c h w').to(memory_format=torch.contiguous_format).float()
1681
- return log
1682
-
1683
-
1684
- class LatentDepth2ImageDiffusion(LatentFinetuneDiffusion):
1685
- """
1686
- condition on monocular depth estimation
1687
- """
1688
-
1689
- def __init__(self, depth_stage_config, concat_keys=("midas_in",), *args, **kwargs):
1690
- super().__init__(concat_keys=concat_keys, *args, **kwargs)
1691
- self.depth_model = instantiate_from_config(depth_stage_config)
1692
- self.depth_stage_key = concat_keys[0]
1693
-
1694
- @torch.no_grad()
1695
- def get_input(self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False):
1696
- # note: restricted to non-trainable encoders currently
1697
- assert not self.cond_stage_trainable, 'trainable cond stages not yet supported for depth2img'
1698
- z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True,
1699
- force_c_encode=True, return_original_cond=True, bs=bs)
1700
-
1701
- assert exists(self.concat_keys)
1702
- assert len(self.concat_keys) == 1
1703
- c_cat = list()
1704
- for ck in self.concat_keys:
1705
- cc = batch[ck]
1706
- if bs is not None:
1707
- cc = cc[:bs]
1708
- cc = cc.to(self.device)
1709
- cc = self.depth_model(cc)
1710
- cc = torch.nn.functional.interpolate(
1711
- cc,
1712
- size=z.shape[2:],
1713
- mode="bicubic",
1714
- align_corners=False,
1715
- )
1716
-
1717
- depth_min, depth_max = torch.amin(cc, dim=[1, 2, 3], keepdim=True), torch.amax(cc, dim=[1, 2, 3],
1718
- keepdim=True)
1719
- cc = 2. * (cc - depth_min) / (depth_max - depth_min + 0.001) - 1.
1720
- c_cat.append(cc)
1721
- c_cat = torch.cat(c_cat, dim=1)
1722
- all_conds = {"c_concat": [c_cat], "c_crossattn": [c]}
1723
- if return_first_stage_outputs:
1724
- return z, all_conds, x, xrec, xc
1725
- return z, all_conds
1726
-
1727
- @torch.no_grad()
1728
- def log_images(self, *args, **kwargs):
1729
- log = super().log_images(*args, **kwargs)
1730
- depth = self.depth_model(args[0][self.depth_stage_key])
1731
- depth_min, depth_max = torch.amin(depth, dim=[1, 2, 3], keepdim=True), \
1732
- torch.amax(depth, dim=[1, 2, 3], keepdim=True)
1733
- log["depth"] = 2. * (depth - depth_min) / (depth_max - depth_min) - 1.
1734
- return log
1735
-
1736
-
1737
- class LatentUpscaleFinetuneDiffusion(LatentFinetuneDiffusion):
1738
- """
1739
- condition on low-res image (and optionally on some spatial noise augmentation)
1740
- """
1741
- def __init__(self, concat_keys=("lr",), reshuffle_patch_size=None,
1742
- low_scale_config=None, low_scale_key=None, *args, **kwargs):
1743
- super().__init__(concat_keys=concat_keys, *args, **kwargs)
1744
- self.reshuffle_patch_size = reshuffle_patch_size
1745
- self.low_scale_model = None
1746
- if low_scale_config is not None:
1747
- print("Initializing a low-scale model")
1748
- assert exists(low_scale_key)
1749
- self.instantiate_low_stage(low_scale_config)
1750
- self.low_scale_key = low_scale_key
1751
-
1752
- def instantiate_low_stage(self, config):
1753
- model = instantiate_from_config(config)
1754
- self.low_scale_model = model.eval()
1755
- self.low_scale_model.train = disabled_train
1756
- for param in self.low_scale_model.parameters():
1757
- param.requires_grad = False
1758
-
1759
- @torch.no_grad()
1760
- def get_input(self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False):
1761
- # note: restricted to non-trainable encoders currently
1762
- assert not self.cond_stage_trainable, 'trainable cond stages not yet supported for upscaling-ft'
1763
- z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True,
1764
- force_c_encode=True, return_original_cond=True, bs=bs)
1765
-
1766
- assert exists(self.concat_keys)
1767
- assert len(self.concat_keys) == 1
1768
- # optionally make spatial noise_level here
1769
- c_cat = list()
1770
- noise_level = None
1771
- for ck in self.concat_keys:
1772
- cc = batch[ck]
1773
- cc = rearrange(cc, 'b h w c -> b c h w')
1774
- if exists(self.reshuffle_patch_size):
1775
- assert isinstance(self.reshuffle_patch_size, int)
1776
- cc = rearrange(cc, 'b c (p1 h) (p2 w) -> b (p1 p2 c) h w',
1777
- p1=self.reshuffle_patch_size, p2=self.reshuffle_patch_size)
1778
- if bs is not None:
1779
- cc = cc[:bs]
1780
- cc = cc.to(self.device)
1781
- if exists(self.low_scale_model) and ck == self.low_scale_key:
1782
- cc, noise_level = self.low_scale_model(cc)
1783
- c_cat.append(cc)
1784
- c_cat = torch.cat(c_cat, dim=1)
1785
- if exists(noise_level):
1786
- all_conds = {"c_concat": [c_cat], "c_crossattn": [c], "c_adm": noise_level}
1787
- else:
1788
- all_conds = {"c_concat": [c_cat], "c_crossattn": [c]}
1789
- if return_first_stage_outputs:
1790
- return z, all_conds, x, xrec, xc
1791
- return z, all_conds
1792
-
1793
- @torch.no_grad()
1794
- def log_images(self, *args, **kwargs):
1795
- log = super().log_images(*args, **kwargs)
1796
- log["lr"] = rearrange(args[0]["lr"], 'b h w c -> b c h w')
1797
- return log
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Arthur678/vits-uma-genshin-honkai/README.md DELETED
@@ -1,11 +0,0 @@
1
- ---
2
- license: apache-2.0
3
- title: ' vits-uma-genshin-honkai'
4
- sdk: gradio
5
- sdk_version: 3.7
6
- emoji: 🐨
7
- colorTo: yellow
8
- pinned: false
9
- app_file: app.py
10
- duplicated_from: ikechan8370/vits-uma-genshin-honkai
11
- ---
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/colorama/tests/ansitowin32_test.py DELETED
@@ -1,294 +0,0 @@
1
- # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
2
- from io import StringIO, TextIOWrapper
3
- from unittest import TestCase, main
4
- try:
5
- from contextlib import ExitStack
6
- except ImportError:
7
- # python 2
8
- from contextlib2 import ExitStack
9
-
10
- try:
11
- from unittest.mock import MagicMock, Mock, patch
12
- except ImportError:
13
- from mock import MagicMock, Mock, patch
14
-
15
- from ..ansitowin32 import AnsiToWin32, StreamWrapper
16
- from ..win32 import ENABLE_VIRTUAL_TERMINAL_PROCESSING
17
- from .utils import osname
18
-
19
-
20
- class StreamWrapperTest(TestCase):
21
-
22
- def testIsAProxy(self):
23
- mockStream = Mock()
24
- wrapper = StreamWrapper(mockStream, None)
25
- self.assertTrue( wrapper.random_attr is mockStream.random_attr )
26
-
27
- def testDelegatesWrite(self):
28
- mockStream = Mock()
29
- mockConverter = Mock()
30
- wrapper = StreamWrapper(mockStream, mockConverter)
31
- wrapper.write('hello')
32
- self.assertTrue(mockConverter.write.call_args, (('hello',), {}))
33
-
34
- def testDelegatesContext(self):
35
- mockConverter = Mock()
36
- s = StringIO()
37
- with StreamWrapper(s, mockConverter) as fp:
38
- fp.write(u'hello')
39
- self.assertTrue(s.closed)
40
-
41
- def testProxyNoContextManager(self):
42
- mockStream = MagicMock()
43
- mockStream.__enter__.side_effect = AttributeError()
44
- mockConverter = Mock()
45
- with self.assertRaises(AttributeError) as excinfo:
46
- with StreamWrapper(mockStream, mockConverter) as wrapper:
47
- wrapper.write('hello')
48
-
49
- def test_closed_shouldnt_raise_on_closed_stream(self):
50
- stream = StringIO()
51
- stream.close()
52
- wrapper = StreamWrapper(stream, None)
53
- self.assertEqual(wrapper.closed, True)
54
-
55
- def test_closed_shouldnt_raise_on_detached_stream(self):
56
- stream = TextIOWrapper(StringIO())
57
- stream.detach()
58
- wrapper = StreamWrapper(stream, None)
59
- self.assertEqual(wrapper.closed, True)
60
-
61
- class AnsiToWin32Test(TestCase):
62
-
63
- def testInit(self):
64
- mockStdout = Mock()
65
- auto = Mock()
66
- stream = AnsiToWin32(mockStdout, autoreset=auto)
67
- self.assertEqual(stream.wrapped, mockStdout)
68
- self.assertEqual(stream.autoreset, auto)
69
-
70
- @patch('colorama.ansitowin32.winterm', None)
71
- @patch('colorama.ansitowin32.winapi_test', lambda *_: True)
72
- def testStripIsTrueOnWindows(self):
73
- with osname('nt'):
74
- mockStdout = Mock()
75
- stream = AnsiToWin32(mockStdout)
76
- self.assertTrue(stream.strip)
77
-
78
- def testStripIsFalseOffWindows(self):
79
- with osname('posix'):
80
- mockStdout = Mock(closed=False)
81
- stream = AnsiToWin32(mockStdout)
82
- self.assertFalse(stream.strip)
83
-
84
- def testWriteStripsAnsi(self):
85
- mockStdout = Mock()
86
- stream = AnsiToWin32(mockStdout)
87
- stream.wrapped = Mock()
88
- stream.write_and_convert = Mock()
89
- stream.strip = True
90
-
91
- stream.write('abc')
92
-
93
- self.assertFalse(stream.wrapped.write.called)
94
- self.assertEqual(stream.write_and_convert.call_args, (('abc',), {}))
95
-
96
- def testWriteDoesNotStripAnsi(self):
97
- mockStdout = Mock()
98
- stream = AnsiToWin32(mockStdout)
99
- stream.wrapped = Mock()
100
- stream.write_and_convert = Mock()
101
- stream.strip = False
102
- stream.convert = False
103
-
104
- stream.write('abc')
105
-
106
- self.assertFalse(stream.write_and_convert.called)
107
- self.assertEqual(stream.wrapped.write.call_args, (('abc',), {}))
108
-
109
- def assert_autoresets(self, convert, autoreset=True):
110
- stream = AnsiToWin32(Mock())
111
- stream.convert = convert
112
- stream.reset_all = Mock()
113
- stream.autoreset = autoreset
114
- stream.winterm = Mock()
115
-
116
- stream.write('abc')
117
-
118
- self.assertEqual(stream.reset_all.called, autoreset)
119
-
120
- def testWriteAutoresets(self):
121
- self.assert_autoresets(convert=True)
122
- self.assert_autoresets(convert=False)
123
- self.assert_autoresets(convert=True, autoreset=False)
124
- self.assert_autoresets(convert=False, autoreset=False)
125
-
126
- def testWriteAndConvertWritesPlainText(self):
127
- stream = AnsiToWin32(Mock())
128
- stream.write_and_convert( 'abc' )
129
- self.assertEqual( stream.wrapped.write.call_args, (('abc',), {}) )
130
-
131
- def testWriteAndConvertStripsAllValidAnsi(self):
132
- stream = AnsiToWin32(Mock())
133
- stream.call_win32 = Mock()
134
- data = [
135
- 'abc\033[mdef',
136
- 'abc\033[0mdef',
137
- 'abc\033[2mdef',
138
- 'abc\033[02mdef',
139
- 'abc\033[002mdef',
140
- 'abc\033[40mdef',
141
- 'abc\033[040mdef',
142
- 'abc\033[0;1mdef',
143
- 'abc\033[40;50mdef',
144
- 'abc\033[50;30;40mdef',
145
- 'abc\033[Adef',
146
- 'abc\033[0Gdef',
147
- 'abc\033[1;20;128Hdef',
148
- ]
149
- for datum in data:
150
- stream.wrapped.write.reset_mock()
151
- stream.write_and_convert( datum )
152
- self.assertEqual(
153
- [args[0] for args in stream.wrapped.write.call_args_list],
154
- [ ('abc',), ('def',) ]
155
- )
156
-
157
- def testWriteAndConvertSkipsEmptySnippets(self):
158
- stream = AnsiToWin32(Mock())
159
- stream.call_win32 = Mock()
160
- stream.write_and_convert( '\033[40m\033[41m' )
161
- self.assertFalse( stream.wrapped.write.called )
162
-
163
- def testWriteAndConvertCallsWin32WithParamsAndCommand(self):
164
- stream = AnsiToWin32(Mock())
165
- stream.convert = True
166
- stream.call_win32 = Mock()
167
- stream.extract_params = Mock(return_value='params')
168
- data = {
169
- 'abc\033[adef': ('a', 'params'),
170
- 'abc\033[;;bdef': ('b', 'params'),
171
- 'abc\033[0cdef': ('c', 'params'),
172
- 'abc\033[;;0;;Gdef': ('G', 'params'),
173
- 'abc\033[1;20;128Hdef': ('H', 'params'),
174
- }
175
- for datum, expected in data.items():
176
- stream.call_win32.reset_mock()
177
- stream.write_and_convert( datum )
178
- self.assertEqual( stream.call_win32.call_args[0], expected )
179
-
180
- def test_reset_all_shouldnt_raise_on_closed_orig_stdout(self):
181
- stream = StringIO()
182
- converter = AnsiToWin32(stream)
183
- stream.close()
184
-
185
- converter.reset_all()
186
-
187
- def test_wrap_shouldnt_raise_on_closed_orig_stdout(self):
188
- stream = StringIO()
189
- stream.close()
190
- with \
191
- patch("colorama.ansitowin32.os.name", "nt"), \
192
- patch("colorama.ansitowin32.winapi_test", lambda: True):
193
- converter = AnsiToWin32(stream)
194
- self.assertTrue(converter.strip)
195
- self.assertFalse(converter.convert)
196
-
197
- def test_wrap_shouldnt_raise_on_missing_closed_attr(self):
198
- with \
199
- patch("colorama.ansitowin32.os.name", "nt"), \
200
- patch("colorama.ansitowin32.winapi_test", lambda: True):
201
- converter = AnsiToWin32(object())
202
- self.assertTrue(converter.strip)
203
- self.assertFalse(converter.convert)
204
-
205
- def testExtractParams(self):
206
- stream = AnsiToWin32(Mock())
207
- data = {
208
- '': (0,),
209
- ';;': (0,),
210
- '2': (2,),
211
- ';;002;;': (2,),
212
- '0;1': (0, 1),
213
- ';;003;;456;;': (3, 456),
214
- '11;22;33;44;55': (11, 22, 33, 44, 55),
215
- }
216
- for datum, expected in data.items():
217
- self.assertEqual(stream.extract_params('m', datum), expected)
218
-
219
- def testCallWin32UsesLookup(self):
220
- listener = Mock()
221
- stream = AnsiToWin32(listener)
222
- stream.win32_calls = {
223
- 1: (lambda *_, **__: listener(11),),
224
- 2: (lambda *_, **__: listener(22),),
225
- 3: (lambda *_, **__: listener(33),),
226
- }
227
- stream.call_win32('m', (3, 1, 99, 2))
228
- self.assertEqual(
229
- [a[0][0] for a in listener.call_args_list],
230
- [33, 11, 22] )
231
-
232
- def test_osc_codes(self):
233
- mockStdout = Mock()
234
- stream = AnsiToWin32(mockStdout, convert=True)
235
- with patch('colorama.ansitowin32.winterm') as winterm:
236
- data = [
237
- '\033]0\x07', # missing arguments
238
- '\033]0;foo\x08', # wrong OSC command
239
- '\033]0;colorama_test_title\x07', # should work
240
- '\033]1;colorama_test_title\x07', # wrong set command
241
- '\033]2;colorama_test_title\x07', # should work
242
- '\033]' + ';' * 64 + '\x08', # see issue #247
243
- ]
244
- for code in data:
245
- stream.write(code)
246
- self.assertEqual(winterm.set_title.call_count, 2)
247
-
248
- def test_native_windows_ansi(self):
249
- with ExitStack() as stack:
250
- def p(a, b):
251
- stack.enter_context(patch(a, b, create=True))
252
- # Pretend to be on Windows
253
- p("colorama.ansitowin32.os.name", "nt")
254
- p("colorama.ansitowin32.winapi_test", lambda: True)
255
- p("colorama.win32.winapi_test", lambda: True)
256
- p("colorama.winterm.win32.windll", "non-None")
257
- p("colorama.winterm.get_osfhandle", lambda _: 1234)
258
-
259
- # Pretend that our mock stream has native ANSI support
260
- p(
261
- "colorama.winterm.win32.GetConsoleMode",
262
- lambda _: ENABLE_VIRTUAL_TERMINAL_PROCESSING,
263
- )
264
- SetConsoleMode = Mock()
265
- p("colorama.winterm.win32.SetConsoleMode", SetConsoleMode)
266
-
267
- stdout = Mock()
268
- stdout.closed = False
269
- stdout.isatty.return_value = True
270
- stdout.fileno.return_value = 1
271
-
272
- # Our fake console says it has native vt support, so AnsiToWin32 should
273
- # enable that support and do nothing else.
274
- stream = AnsiToWin32(stdout)
275
- SetConsoleMode.assert_called_with(1234, ENABLE_VIRTUAL_TERMINAL_PROCESSING)
276
- self.assertFalse(stream.strip)
277
- self.assertFalse(stream.convert)
278
- self.assertFalse(stream.should_wrap())
279
-
280
- # Now let's pretend we're on an old Windows console, that doesn't have
281
- # native ANSI support.
282
- p("colorama.winterm.win32.GetConsoleMode", lambda _: 0)
283
- SetConsoleMode = Mock()
284
- p("colorama.winterm.win32.SetConsoleMode", SetConsoleMode)
285
-
286
- stream = AnsiToWin32(stdout)
287
- SetConsoleMode.assert_called_with(1234, ENABLE_VIRTUAL_TERMINAL_PROCESSING)
288
- self.assertTrue(stream.strip)
289
- self.assertTrue(stream.convert)
290
- self.assertTrue(stream.should_wrap())
291
-
292
-
293
- if __name__ == '__main__':
294
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/repr.py DELETED
@@ -1,149 +0,0 @@
1
- import inspect
2
- from functools import partial
3
- from typing import (
4
- Any,
5
- Callable,
6
- Iterable,
7
- List,
8
- Optional,
9
- Tuple,
10
- Type,
11
- TypeVar,
12
- Union,
13
- overload,
14
- )
15
-
16
- T = TypeVar("T")
17
-
18
-
19
- Result = Iterable[Union[Any, Tuple[Any], Tuple[str, Any], Tuple[str, Any, Any]]]
20
- RichReprResult = Result
21
-
22
-
23
- class ReprError(Exception):
24
- """An error occurred when attempting to build a repr."""
25
-
26
-
27
- @overload
28
- def auto(cls: Optional[Type[T]]) -> Type[T]:
29
- ...
30
-
31
-
32
- @overload
33
- def auto(*, angular: bool = False) -> Callable[[Type[T]], Type[T]]:
34
- ...
35
-
36
-
37
- def auto(
38
- cls: Optional[Type[T]] = None, *, angular: Optional[bool] = None
39
- ) -> Union[Type[T], Callable[[Type[T]], Type[T]]]:
40
- """Class decorator to create __repr__ from __rich_repr__"""
41
-
42
- def do_replace(cls: Type[T], angular: Optional[bool] = None) -> Type[T]:
43
- def auto_repr(self: T) -> str:
44
- """Create repr string from __rich_repr__"""
45
- repr_str: List[str] = []
46
- append = repr_str.append
47
-
48
- angular: bool = getattr(self.__rich_repr__, "angular", False) # type: ignore[attr-defined]
49
- for arg in self.__rich_repr__(): # type: ignore[attr-defined]
50
- if isinstance(arg, tuple):
51
- if len(arg) == 1:
52
- append(repr(arg[0]))
53
- else:
54
- key, value, *default = arg
55
- if key is None:
56
- append(repr(value))
57
- else:
58
- if default and default[0] == value:
59
- continue
60
- append(f"{key}={value!r}")
61
- else:
62
- append(repr(arg))
63
- if angular:
64
- return f"<{self.__class__.__name__} {' '.join(repr_str)}>"
65
- else:
66
- return f"{self.__class__.__name__}({', '.join(repr_str)})"
67
-
68
- def auto_rich_repr(self: Type[T]) -> Result:
69
- """Auto generate __rich_rep__ from signature of __init__"""
70
- try:
71
- signature = inspect.signature(self.__init__)
72
- for name, param in signature.parameters.items():
73
- if param.kind == param.POSITIONAL_ONLY:
74
- yield getattr(self, name)
75
- elif param.kind in (
76
- param.POSITIONAL_OR_KEYWORD,
77
- param.KEYWORD_ONLY,
78
- ):
79
- if param.default == param.empty:
80
- yield getattr(self, param.name)
81
- else:
82
- yield param.name, getattr(self, param.name), param.default
83
- except Exception as error:
84
- raise ReprError(
85
- f"Failed to auto generate __rich_repr__; {error}"
86
- ) from None
87
-
88
- if not hasattr(cls, "__rich_repr__"):
89
- auto_rich_repr.__doc__ = "Build a rich repr"
90
- cls.__rich_repr__ = auto_rich_repr # type: ignore[attr-defined]
91
-
92
- auto_repr.__doc__ = "Return repr(self)"
93
- cls.__repr__ = auto_repr # type: ignore[assignment]
94
- if angular is not None:
95
- cls.__rich_repr__.angular = angular # type: ignore[attr-defined]
96
- return cls
97
-
98
- if cls is None:
99
- return partial(do_replace, angular=angular)
100
- else:
101
- return do_replace(cls, angular=angular)
102
-
103
-
104
- @overload
105
- def rich_repr(cls: Optional[Type[T]]) -> Type[T]:
106
- ...
107
-
108
-
109
- @overload
110
- def rich_repr(*, angular: bool = False) -> Callable[[Type[T]], Type[T]]:
111
- ...
112
-
113
-
114
- def rich_repr(
115
- cls: Optional[Type[T]] = None, *, angular: bool = False
116
- ) -> Union[Type[T], Callable[[Type[T]], Type[T]]]:
117
- if cls is None:
118
- return auto(angular=angular)
119
- else:
120
- return auto(cls)
121
-
122
-
123
- if __name__ == "__main__":
124
-
125
- @auto
126
- class Foo:
127
- def __rich_repr__(self) -> Result:
128
- yield "foo"
129
- yield "bar", {"shopping": ["eggs", "ham", "pineapple"]}
130
- yield "buy", "hand sanitizer"
131
-
132
- foo = Foo()
133
- from pip._vendor.rich.console import Console
134
-
135
- console = Console()
136
-
137
- console.rule("Standard repr")
138
- console.print(foo)
139
-
140
- console.print(foo, width=60)
141
- console.print(foo, width=30)
142
-
143
- console.rule("Angular repr")
144
- Foo.__rich_repr__.angular = True # type: ignore[attr-defined]
145
-
146
- console.print(foo)
147
-
148
- console.print(foo, width=60)
149
- console.print(foo, width=30)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/_msvccompiler.py DELETED
@@ -1,572 +0,0 @@
1
- """distutils._msvccompiler
2
-
3
- Contains MSVCCompiler, an implementation of the abstract CCompiler class
4
- for Microsoft Visual Studio 2015.
5
-
6
- The module is compatible with VS 2015 and later. You can find legacy support
7
- for older versions in distutils.msvc9compiler and distutils.msvccompiler.
8
- """
9
-
10
- # Written by Perry Stoll
11
- # hacked by Robin Becker and Thomas Heller to do a better job of
12
- # finding DevStudio (through the registry)
13
- # ported to VS 2005 and VS 2008 by Christian Heimes
14
- # ported to VS 2015 by Steve Dower
15
-
16
- import os
17
- import subprocess
18
- import contextlib
19
- import warnings
20
- import unittest.mock as mock
21
-
22
- with contextlib.suppress(ImportError):
23
- import winreg
24
-
25
- from distutils.errors import (
26
- DistutilsExecError,
27
- DistutilsPlatformError,
28
- CompileError,
29
- LibError,
30
- LinkError,
31
- )
32
- from distutils.ccompiler import CCompiler, gen_lib_options
33
- from distutils import log
34
- from distutils.util import get_platform
35
-
36
- from itertools import count
37
-
38
-
39
- def _find_vc2015():
40
- try:
41
- key = winreg.OpenKeyEx(
42
- winreg.HKEY_LOCAL_MACHINE,
43
- r"Software\Microsoft\VisualStudio\SxS\VC7",
44
- access=winreg.KEY_READ | winreg.KEY_WOW64_32KEY,
45
- )
46
- except OSError:
47
- log.debug("Visual C++ is not registered")
48
- return None, None
49
-
50
- best_version = 0
51
- best_dir = None
52
- with key:
53
- for i in count():
54
- try:
55
- v, vc_dir, vt = winreg.EnumValue(key, i)
56
- except OSError:
57
- break
58
- if v and vt == winreg.REG_SZ and os.path.isdir(vc_dir):
59
- try:
60
- version = int(float(v))
61
- except (ValueError, TypeError):
62
- continue
63
- if version >= 14 and version > best_version:
64
- best_version, best_dir = version, vc_dir
65
- return best_version, best_dir
66
-
67
-
68
- def _find_vc2017():
69
- """Returns "15, path" based on the result of invoking vswhere.exe
70
- If no install is found, returns "None, None"
71
-
72
- The version is returned to avoid unnecessarily changing the function
73
- result. It may be ignored when the path is not None.
74
-
75
- If vswhere.exe is not available, by definition, VS 2017 is not
76
- installed.
77
- """
78
- root = os.environ.get("ProgramFiles(x86)") or os.environ.get("ProgramFiles")
79
- if not root:
80
- return None, None
81
-
82
- try:
83
- path = subprocess.check_output(
84
- [
85
- os.path.join(
86
- root, "Microsoft Visual Studio", "Installer", "vswhere.exe"
87
- ),
88
- "-latest",
89
- "-prerelease",
90
- "-requires",
91
- "Microsoft.VisualStudio.Component.VC.Tools.x86.x64",
92
- "-property",
93
- "installationPath",
94
- "-products",
95
- "*",
96
- ],
97
- encoding="mbcs",
98
- errors="strict",
99
- ).strip()
100
- except (subprocess.CalledProcessError, OSError, UnicodeDecodeError):
101
- return None, None
102
-
103
- path = os.path.join(path, "VC", "Auxiliary", "Build")
104
- if os.path.isdir(path):
105
- return 15, path
106
-
107
- return None, None
108
-
109
-
110
- PLAT_SPEC_TO_RUNTIME = {
111
- 'x86': 'x86',
112
- 'x86_amd64': 'x64',
113
- 'x86_arm': 'arm',
114
- 'x86_arm64': 'arm64',
115
- }
116
-
117
-
118
- def _find_vcvarsall(plat_spec):
119
- # bpo-38597: Removed vcruntime return value
120
- _, best_dir = _find_vc2017()
121
-
122
- if not best_dir:
123
- best_version, best_dir = _find_vc2015()
124
-
125
- if not best_dir:
126
- log.debug("No suitable Visual C++ version found")
127
- return None, None
128
-
129
- vcvarsall = os.path.join(best_dir, "vcvarsall.bat")
130
- if not os.path.isfile(vcvarsall):
131
- log.debug("%s cannot be found", vcvarsall)
132
- return None, None
133
-
134
- return vcvarsall, None
135
-
136
-
137
- def _get_vc_env(plat_spec):
138
- if os.getenv("DISTUTILS_USE_SDK"):
139
- return {key.lower(): value for key, value in os.environ.items()}
140
-
141
- vcvarsall, _ = _find_vcvarsall(plat_spec)
142
- if not vcvarsall:
143
- raise DistutilsPlatformError("Unable to find vcvarsall.bat")
144
-
145
- try:
146
- out = subprocess.check_output(
147
- f'cmd /u /c "{vcvarsall}" {plat_spec} && set',
148
- stderr=subprocess.STDOUT,
149
- ).decode('utf-16le', errors='replace')
150
- except subprocess.CalledProcessError as exc:
151
- log.error(exc.output)
152
- raise DistutilsPlatformError(f"Error executing {exc.cmd}")
153
-
154
- env = {
155
- key.lower(): value
156
- for key, _, value in (line.partition('=') for line in out.splitlines())
157
- if key and value
158
- }
159
-
160
- return env
161
-
162
-
163
- def _find_exe(exe, paths=None):
164
- """Return path to an MSVC executable program.
165
-
166
- Tries to find the program in several places: first, one of the
167
- MSVC program search paths from the registry; next, the directories
168
- in the PATH environment variable. If any of those work, return an
169
- absolute path that is known to exist. If none of them work, just
170
- return the original program name, 'exe'.
171
- """
172
- if not paths:
173
- paths = os.getenv('path').split(os.pathsep)
174
- for p in paths:
175
- fn = os.path.join(os.path.abspath(p), exe)
176
- if os.path.isfile(fn):
177
- return fn
178
- return exe
179
-
180
-
181
- # A map keyed by get_platform() return values to values accepted by
182
- # 'vcvarsall.bat'. Always cross-compile from x86 to work with the
183
- # lighter-weight MSVC installs that do not include native 64-bit tools.
184
- PLAT_TO_VCVARS = {
185
- 'win32': 'x86',
186
- 'win-amd64': 'x86_amd64',
187
- 'win-arm32': 'x86_arm',
188
- 'win-arm64': 'x86_arm64',
189
- }
190
-
191
-
192
- class MSVCCompiler(CCompiler):
193
- """Concrete class that implements an interface to Microsoft Visual C++,
194
- as defined by the CCompiler abstract class."""
195
-
196
- compiler_type = 'msvc'
197
-
198
- # Just set this so CCompiler's constructor doesn't barf. We currently
199
- # don't use the 'set_executables()' bureaucracy provided by CCompiler,
200
- # as it really isn't necessary for this sort of single-compiler class.
201
- # Would be nice to have a consistent interface with UnixCCompiler,
202
- # though, so it's worth thinking about.
203
- executables = {}
204
-
205
- # Private class data (need to distinguish C from C++ source for compiler)
206
- _c_extensions = ['.c']
207
- _cpp_extensions = ['.cc', '.cpp', '.cxx']
208
- _rc_extensions = ['.rc']
209
- _mc_extensions = ['.mc']
210
-
211
- # Needed for the filename generation methods provided by the
212
- # base class, CCompiler.
213
- src_extensions = _c_extensions + _cpp_extensions + _rc_extensions + _mc_extensions
214
- res_extension = '.res'
215
- obj_extension = '.obj'
216
- static_lib_extension = '.lib'
217
- shared_lib_extension = '.dll'
218
- static_lib_format = shared_lib_format = '%s%s'
219
- exe_extension = '.exe'
220
-
221
- def __init__(self, verbose=0, dry_run=0, force=0):
222
- super().__init__(verbose, dry_run, force)
223
- # target platform (.plat_name is consistent with 'bdist')
224
- self.plat_name = None
225
- self.initialized = False
226
-
227
- @classmethod
228
- def _configure(cls, vc_env):
229
- """
230
- Set class-level include/lib dirs.
231
- """
232
- cls.include_dirs = cls._parse_path(vc_env.get('include', ''))
233
- cls.library_dirs = cls._parse_path(vc_env.get('lib', ''))
234
-
235
- @staticmethod
236
- def _parse_path(val):
237
- return [dir.rstrip(os.sep) for dir in val.split(os.pathsep) if dir]
238
-
239
- def initialize(self, plat_name=None):
240
- # multi-init means we would need to check platform same each time...
241
- assert not self.initialized, "don't init multiple times"
242
- if plat_name is None:
243
- plat_name = get_platform()
244
- # sanity check for platforms to prevent obscure errors later.
245
- if plat_name not in PLAT_TO_VCVARS:
246
- raise DistutilsPlatformError(
247
- f"--plat-name must be one of {tuple(PLAT_TO_VCVARS)}"
248
- )
249
-
250
- # Get the vcvarsall.bat spec for the requested platform.
251
- plat_spec = PLAT_TO_VCVARS[plat_name]
252
-
253
- vc_env = _get_vc_env(plat_spec)
254
- if not vc_env:
255
- raise DistutilsPlatformError(
256
- "Unable to find a compatible " "Visual Studio installation."
257
- )
258
- self._configure(vc_env)
259
-
260
- self._paths = vc_env.get('path', '')
261
- paths = self._paths.split(os.pathsep)
262
- self.cc = _find_exe("cl.exe", paths)
263
- self.linker = _find_exe("link.exe", paths)
264
- self.lib = _find_exe("lib.exe", paths)
265
- self.rc = _find_exe("rc.exe", paths) # resource compiler
266
- self.mc = _find_exe("mc.exe", paths) # message compiler
267
- self.mt = _find_exe("mt.exe", paths) # message compiler
268
-
269
- self.preprocess_options = None
270
- # bpo-38597: Always compile with dynamic linking
271
- # Future releases of Python 3.x will include all past
272
- # versions of vcruntime*.dll for compatibility.
273
- self.compile_options = ['/nologo', '/O2', '/W3', '/GL', '/DNDEBUG', '/MD']
274
-
275
- self.compile_options_debug = [
276
- '/nologo',
277
- '/Od',
278
- '/MDd',
279
- '/Zi',
280
- '/W3',
281
- '/D_DEBUG',
282
- ]
283
-
284
- ldflags = ['/nologo', '/INCREMENTAL:NO', '/LTCG']
285
-
286
- ldflags_debug = ['/nologo', '/INCREMENTAL:NO', '/LTCG', '/DEBUG:FULL']
287
-
288
- self.ldflags_exe = [*ldflags, '/MANIFEST:EMBED,ID=1']
289
- self.ldflags_exe_debug = [*ldflags_debug, '/MANIFEST:EMBED,ID=1']
290
- self.ldflags_shared = [
291
- *ldflags,
292
- '/DLL',
293
- '/MANIFEST:EMBED,ID=2',
294
- '/MANIFESTUAC:NO',
295
- ]
296
- self.ldflags_shared_debug = [
297
- *ldflags_debug,
298
- '/DLL',
299
- '/MANIFEST:EMBED,ID=2',
300
- '/MANIFESTUAC:NO',
301
- ]
302
- self.ldflags_static = [*ldflags]
303
- self.ldflags_static_debug = [*ldflags_debug]
304
-
305
- self._ldflags = {
306
- (CCompiler.EXECUTABLE, None): self.ldflags_exe,
307
- (CCompiler.EXECUTABLE, False): self.ldflags_exe,
308
- (CCompiler.EXECUTABLE, True): self.ldflags_exe_debug,
309
- (CCompiler.SHARED_OBJECT, None): self.ldflags_shared,
310
- (CCompiler.SHARED_OBJECT, False): self.ldflags_shared,
311
- (CCompiler.SHARED_OBJECT, True): self.ldflags_shared_debug,
312
- (CCompiler.SHARED_LIBRARY, None): self.ldflags_static,
313
- (CCompiler.SHARED_LIBRARY, False): self.ldflags_static,
314
- (CCompiler.SHARED_LIBRARY, True): self.ldflags_static_debug,
315
- }
316
-
317
- self.initialized = True
318
-
319
- # -- Worker methods ------------------------------------------------
320
-
321
- @property
322
- def out_extensions(self):
323
- return {
324
- **super().out_extensions,
325
- **{
326
- ext: self.res_extension
327
- for ext in self._rc_extensions + self._mc_extensions
328
- },
329
- }
330
-
331
- def compile( # noqa: C901
332
- self,
333
- sources,
334
- output_dir=None,
335
- macros=None,
336
- include_dirs=None,
337
- debug=0,
338
- extra_preargs=None,
339
- extra_postargs=None,
340
- depends=None,
341
- ):
342
-
343
- if not self.initialized:
344
- self.initialize()
345
- compile_info = self._setup_compile(
346
- output_dir, macros, include_dirs, sources, depends, extra_postargs
347
- )
348
- macros, objects, extra_postargs, pp_opts, build = compile_info
349
-
350
- compile_opts = extra_preargs or []
351
- compile_opts.append('/c')
352
- if debug:
353
- compile_opts.extend(self.compile_options_debug)
354
- else:
355
- compile_opts.extend(self.compile_options)
356
-
357
- add_cpp_opts = False
358
-
359
- for obj in objects:
360
- try:
361
- src, ext = build[obj]
362
- except KeyError:
363
- continue
364
- if debug:
365
- # pass the full pathname to MSVC in debug mode,
366
- # this allows the debugger to find the source file
367
- # without asking the user to browse for it
368
- src = os.path.abspath(src)
369
-
370
- if ext in self._c_extensions:
371
- input_opt = "/Tc" + src
372
- elif ext in self._cpp_extensions:
373
- input_opt = "/Tp" + src
374
- add_cpp_opts = True
375
- elif ext in self._rc_extensions:
376
- # compile .RC to .RES file
377
- input_opt = src
378
- output_opt = "/fo" + obj
379
- try:
380
- self.spawn([self.rc] + pp_opts + [output_opt, input_opt])
381
- except DistutilsExecError as msg:
382
- raise CompileError(msg)
383
- continue
384
- elif ext in self._mc_extensions:
385
- # Compile .MC to .RC file to .RES file.
386
- # * '-h dir' specifies the directory for the
387
- # generated include file
388
- # * '-r dir' specifies the target directory of the
389
- # generated RC file and the binary message resource
390
- # it includes
391
- #
392
- # For now (since there are no options to change this),
393
- # we use the source-directory for the include file and
394
- # the build directory for the RC file and message
395
- # resources. This works at least for win32all.
396
- h_dir = os.path.dirname(src)
397
- rc_dir = os.path.dirname(obj)
398
- try:
399
- # first compile .MC to .RC and .H file
400
- self.spawn([self.mc, '-h', h_dir, '-r', rc_dir, src])
401
- base, _ = os.path.splitext(os.path.basename(src))
402
- rc_file = os.path.join(rc_dir, base + '.rc')
403
- # then compile .RC to .RES file
404
- self.spawn([self.rc, "/fo" + obj, rc_file])
405
-
406
- except DistutilsExecError as msg:
407
- raise CompileError(msg)
408
- continue
409
- else:
410
- # how to handle this file?
411
- raise CompileError(f"Don't know how to compile {src} to {obj}")
412
-
413
- args = [self.cc] + compile_opts + pp_opts
414
- if add_cpp_opts:
415
- args.append('/EHsc')
416
- args.append(input_opt)
417
- args.append("/Fo" + obj)
418
- args.extend(extra_postargs)
419
-
420
- try:
421
- self.spawn(args)
422
- except DistutilsExecError as msg:
423
- raise CompileError(msg)
424
-
425
- return objects
426
-
427
- def create_static_lib(
428
- self, objects, output_libname, output_dir=None, debug=0, target_lang=None
429
- ):
430
-
431
- if not self.initialized:
432
- self.initialize()
433
- objects, output_dir = self._fix_object_args(objects, output_dir)
434
- output_filename = self.library_filename(output_libname, output_dir=output_dir)
435
-
436
- if self._need_link(objects, output_filename):
437
- lib_args = objects + ['/OUT:' + output_filename]
438
- if debug:
439
- pass # XXX what goes here?
440
- try:
441
- log.debug('Executing "%s" %s', self.lib, ' '.join(lib_args))
442
- self.spawn([self.lib] + lib_args)
443
- except DistutilsExecError as msg:
444
- raise LibError(msg)
445
- else:
446
- log.debug("skipping %s (up-to-date)", output_filename)
447
-
448
- def link(
449
- self,
450
- target_desc,
451
- objects,
452
- output_filename,
453
- output_dir=None,
454
- libraries=None,
455
- library_dirs=None,
456
- runtime_library_dirs=None,
457
- export_symbols=None,
458
- debug=0,
459
- extra_preargs=None,
460
- extra_postargs=None,
461
- build_temp=None,
462
- target_lang=None,
463
- ):
464
-
465
- if not self.initialized:
466
- self.initialize()
467
- objects, output_dir = self._fix_object_args(objects, output_dir)
468
- fixed_args = self._fix_lib_args(libraries, library_dirs, runtime_library_dirs)
469
- libraries, library_dirs, runtime_library_dirs = fixed_args
470
-
471
- if runtime_library_dirs:
472
- self.warn(
473
- "I don't know what to do with 'runtime_library_dirs': "
474
- + str(runtime_library_dirs)
475
- )
476
-
477
- lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs, libraries)
478
- if output_dir is not None:
479
- output_filename = os.path.join(output_dir, output_filename)
480
-
481
- if self._need_link(objects, output_filename):
482
- ldflags = self._ldflags[target_desc, debug]
483
-
484
- export_opts = ["/EXPORT:" + sym for sym in (export_symbols or [])]
485
-
486
- ld_args = (
487
- ldflags + lib_opts + export_opts + objects + ['/OUT:' + output_filename]
488
- )
489
-
490
- # The MSVC linker generates .lib and .exp files, which cannot be
491
- # suppressed by any linker switches. The .lib files may even be
492
- # needed! Make sure they are generated in the temporary build
493
- # directory. Since they have different names for debug and release
494
- # builds, they can go into the same directory.
495
- build_temp = os.path.dirname(objects[0])
496
- if export_symbols is not None:
497
- (dll_name, dll_ext) = os.path.splitext(
498
- os.path.basename(output_filename)
499
- )
500
- implib_file = os.path.join(build_temp, self.library_filename(dll_name))
501
- ld_args.append('/IMPLIB:' + implib_file)
502
-
503
- if extra_preargs:
504
- ld_args[:0] = extra_preargs
505
- if extra_postargs:
506
- ld_args.extend(extra_postargs)
507
-
508
- output_dir = os.path.dirname(os.path.abspath(output_filename))
509
- self.mkpath(output_dir)
510
- try:
511
- log.debug('Executing "%s" %s', self.linker, ' '.join(ld_args))
512
- self.spawn([self.linker] + ld_args)
513
- except DistutilsExecError as msg:
514
- raise LinkError(msg)
515
- else:
516
- log.debug("skipping %s (up-to-date)", output_filename)
517
-
518
- def spawn(self, cmd):
519
- env = dict(os.environ, PATH=self._paths)
520
- with self._fallback_spawn(cmd, env) as fallback:
521
- return super().spawn(cmd, env=env)
522
- return fallback.value
523
-
524
- @contextlib.contextmanager
525
- def _fallback_spawn(self, cmd, env):
526
- """
527
- Discovered in pypa/distutils#15, some tools monkeypatch the compiler,
528
- so the 'env' kwarg causes a TypeError. Detect this condition and
529
- restore the legacy, unsafe behavior.
530
- """
531
- bag = type('Bag', (), {})()
532
- try:
533
- yield bag
534
- except TypeError as exc:
535
- if "unexpected keyword argument 'env'" not in str(exc):
536
- raise
537
- else:
538
- return
539
- warnings.warn("Fallback spawn triggered. Please update distutils monkeypatch.")
540
- with mock.patch.dict('os.environ', env):
541
- bag.value = super().spawn(cmd)
542
-
543
- # -- Miscellaneous methods -----------------------------------------
544
- # These are all used by the 'gen_lib_options() function, in
545
- # ccompiler.py.
546
-
547
- def library_dir_option(self, dir):
548
- return "/LIBPATH:" + dir
549
-
550
- def runtime_library_dir_option(self, dir):
551
- raise DistutilsPlatformError(
552
- "don't know how to set runtime library search path for MSVC"
553
- )
554
-
555
- def library_option(self, lib):
556
- return self.library_filename(lib)
557
-
558
- def find_library_file(self, dirs, lib, debug=0):
559
- # Prefer a debugging library if found (and requested), but deal
560
- # with it if we don't have one.
561
- if debug:
562
- try_names = [lib + "_d", lib]
563
- else:
564
- try_names = [lib]
565
- for dir in dirs:
566
- for name in try_names:
567
- libfile = os.path.join(dir, self.library_filename(name))
568
- if os.path.isfile(libfile):
569
- return libfile
570
- else:
571
- # Oops, didn't find it in *any* of 'dirs'
572
- return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/grit/custom_solver.py DELETED
@@ -1,88 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- # Modified by Jialian Wu from https://github.com/facebookresearch/Detic/blob/main/detic/custom_solver.py
3
- import itertools
4
- from typing import Any, Callable, Dict, Iterable, List, Set, Type, Union
5
- import torch
6
-
7
- from detectron2.config import CfgNode
8
-
9
- from detectron2.solver.build import maybe_add_gradient_clipping
10
-
11
-
12
- def build_custom_optimizer(cfg: CfgNode, model: torch.nn.Module) -> torch.optim.Optimizer:
13
- params: List[Dict[str, Any]] = []
14
- memo: Set[torch.nn.parameter.Parameter] = set()
15
- optimizer_type = cfg.SOLVER.OPTIMIZER
16
-
17
- for key, value in model.named_parameters(recurse=True):
18
- if not value.requires_grad:
19
- continue
20
- # Avoid duplicating parameters
21
- if value in memo:
22
- continue
23
- memo.add(value)
24
- lr = cfg.SOLVER.BASE_LR
25
- weight_decay = cfg.SOLVER.WEIGHT_DECAY
26
-
27
- if cfg.SOLVER.VIT_LAYER_DECAY:
28
- lr = lr * get_vit_lr_decay_rate(key, cfg.SOLVER.VIT_LAYER_DECAY_RATE, cfg.MODEL.VIT_LAYERS)
29
-
30
- param = {"params": [value], "lr": lr}
31
- if optimizer_type != 'ADAMW':
32
- param['weight_decay'] = weight_decay
33
- params += [param]
34
-
35
- def maybe_add_full_model_gradient_clipping(optim): # optim: the optimizer class
36
- # detectron2 doesn't have full model gradient clipping now
37
- clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE
38
- enable = (
39
- cfg.SOLVER.CLIP_GRADIENTS.ENABLED
40
- and cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model"
41
- and clip_norm_val > 0.0
42
- )
43
-
44
- class FullModelGradientClippingOptimizer(optim):
45
- def step(self, closure=None):
46
- all_params = itertools.chain(*[x["params"] for x in self.param_groups])
47
- torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val)
48
- super().step(closure=closure)
49
-
50
- return FullModelGradientClippingOptimizer if enable else optim
51
-
52
-
53
- if optimizer_type == 'SGD':
54
- optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)(
55
- params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM,
56
- nesterov=cfg.SOLVER.NESTEROV
57
- )
58
- elif optimizer_type == 'ADAMW':
59
- optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)(
60
- params, cfg.SOLVER.BASE_LR,
61
- weight_decay=cfg.SOLVER.WEIGHT_DECAY
62
- )
63
- else:
64
- raise NotImplementedError(f"no optimizer type {optimizer_type}")
65
- if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model":
66
- optimizer = maybe_add_gradient_clipping(cfg, optimizer)
67
- return optimizer
68
-
69
-
70
- def get_vit_lr_decay_rate(name, lr_decay_rate=1.0, num_layers=12):
71
- """
72
- Calculate lr decay rate for different ViT blocks.
73
- Args:
74
- name (string): parameter name.
75
- lr_decay_rate (float): base lr decay rate.
76
- num_layers (int): number of ViT blocks.
77
-
78
- Returns:
79
- lr decay rate for the given parameter.
80
- """
81
- layer_id = num_layers + 1
82
- if name.startswith("backbone"):
83
- if ".pos_embed" in name or ".patch_embed" in name:
84
- layer_id = 0
85
- elif ".blocks." in name and ".residual." not in name:
86
- layer_id = int(name[name.find(".blocks.") :].split(".")[2]) + 1
87
-
88
- return lr_decay_rate ** (num_layers + 1 - layer_id)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/evaluation/panoptic_evaluation.py DELETED
@@ -1,199 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import contextlib
3
- import io
4
- import itertools
5
- import json
6
- import logging
7
- import numpy as np
8
- import os
9
- import tempfile
10
- from collections import OrderedDict
11
- from typing import Optional
12
- from PIL import Image
13
- from tabulate import tabulate
14
-
15
- from detectron2.data import MetadataCatalog
16
- from detectron2.utils import comm
17
- from detectron2.utils.file_io import PathManager
18
-
19
- from .evaluator import DatasetEvaluator
20
-
21
- logger = logging.getLogger(__name__)
22
-
23
-
24
- class COCOPanopticEvaluator(DatasetEvaluator):
25
- """
26
- Evaluate Panoptic Quality metrics on COCO using PanopticAPI.
27
- It saves panoptic segmentation prediction in `output_dir`
28
-
29
- It contains a synchronize call and has to be called from all workers.
30
- """
31
-
32
- def __init__(self, dataset_name: str, output_dir: Optional[str] = None):
33
- """
34
- Args:
35
- dataset_name: name of the dataset
36
- output_dir: output directory to save results for evaluation.
37
- """
38
- self._metadata = MetadataCatalog.get(dataset_name)
39
- self._thing_contiguous_id_to_dataset_id = {
40
- v: k for k, v in self._metadata.thing_dataset_id_to_contiguous_id.items()
41
- }
42
- self._stuff_contiguous_id_to_dataset_id = {
43
- v: k for k, v in self._metadata.stuff_dataset_id_to_contiguous_id.items()
44
- }
45
-
46
- self._output_dir = output_dir
47
- if self._output_dir is not None:
48
- PathManager.mkdirs(self._output_dir)
49
-
50
- def reset(self):
51
- self._predictions = []
52
-
53
- def _convert_category_id(self, segment_info):
54
- isthing = segment_info.pop("isthing", None)
55
- if isthing is None:
56
- # the model produces panoptic category id directly. No more conversion needed
57
- return segment_info
58
- if isthing is True:
59
- segment_info["category_id"] = self._thing_contiguous_id_to_dataset_id[
60
- segment_info["category_id"]
61
- ]
62
- else:
63
- segment_info["category_id"] = self._stuff_contiguous_id_to_dataset_id[
64
- segment_info["category_id"]
65
- ]
66
- return segment_info
67
-
68
- def process(self, inputs, outputs):
69
- from panopticapi.utils import id2rgb
70
-
71
- for input, output in zip(inputs, outputs):
72
- panoptic_img, segments_info = output["panoptic_seg"]
73
- panoptic_img = panoptic_img.cpu().numpy()
74
- if segments_info is None:
75
- # If "segments_info" is None, we assume "panoptic_img" is a
76
- # H*W int32 image storing the panoptic_id in the format of
77
- # category_id * label_divisor + instance_id. We reserve -1 for
78
- # VOID label, and add 1 to panoptic_img since the official
79
- # evaluation script uses 0 for VOID label.
80
- label_divisor = self._metadata.label_divisor
81
- segments_info = []
82
- for panoptic_label in np.unique(panoptic_img):
83
- if panoptic_label == -1:
84
- # VOID region.
85
- continue
86
- pred_class = panoptic_label // label_divisor
87
- isthing = (
88
- pred_class in self._metadata.thing_dataset_id_to_contiguous_id.values()
89
- )
90
- segments_info.append(
91
- {
92
- "id": int(panoptic_label) + 1,
93
- "category_id": int(pred_class),
94
- "isthing": bool(isthing),
95
- }
96
- )
97
- # Official evaluation script uses 0 for VOID label.
98
- panoptic_img += 1
99
-
100
- file_name = os.path.basename(input["file_name"])
101
- file_name_png = os.path.splitext(file_name)[0] + ".png"
102
- with io.BytesIO() as out:
103
- Image.fromarray(id2rgb(panoptic_img)).save(out, format="PNG")
104
- segments_info = [self._convert_category_id(x) for x in segments_info]
105
- self._predictions.append(
106
- {
107
- "image_id": input["image_id"],
108
- "file_name": file_name_png,
109
- "png_string": out.getvalue(),
110
- "segments_info": segments_info,
111
- }
112
- )
113
-
114
- def evaluate(self):
115
- comm.synchronize()
116
-
117
- self._predictions = comm.gather(self._predictions)
118
- self._predictions = list(itertools.chain(*self._predictions))
119
- if not comm.is_main_process():
120
- return
121
-
122
- # PanopticApi requires local files
123
- gt_json = PathManager.get_local_path(self._metadata.panoptic_json)
124
- gt_folder = PathManager.get_local_path(self._metadata.panoptic_root)
125
-
126
- with tempfile.TemporaryDirectory(prefix="panoptic_eval") as pred_dir:
127
- logger.info("Writing all panoptic predictions to {} ...".format(pred_dir))
128
- for p in self._predictions:
129
- with open(os.path.join(pred_dir, p["file_name"]), "wb") as f:
130
- f.write(p.pop("png_string"))
131
-
132
- with open(gt_json, "r") as f:
133
- json_data = json.load(f)
134
- json_data["annotations"] = self._predictions
135
-
136
- output_dir = self._output_dir or pred_dir
137
- predictions_json = os.path.join(output_dir, "predictions.json")
138
- with PathManager.open(predictions_json, "w") as f:
139
- f.write(json.dumps(json_data))
140
-
141
- from panopticapi.evaluation import pq_compute
142
-
143
- with contextlib.redirect_stdout(io.StringIO()):
144
- pq_res = pq_compute(
145
- gt_json,
146
- PathManager.get_local_path(predictions_json),
147
- gt_folder=gt_folder,
148
- pred_folder=pred_dir,
149
- )
150
-
151
- res = {}
152
- res["PQ"] = 100 * pq_res["All"]["pq"]
153
- res["SQ"] = 100 * pq_res["All"]["sq"]
154
- res["RQ"] = 100 * pq_res["All"]["rq"]
155
- res["PQ_th"] = 100 * pq_res["Things"]["pq"]
156
- res["SQ_th"] = 100 * pq_res["Things"]["sq"]
157
- res["RQ_th"] = 100 * pq_res["Things"]["rq"]
158
- res["PQ_st"] = 100 * pq_res["Stuff"]["pq"]
159
- res["SQ_st"] = 100 * pq_res["Stuff"]["sq"]
160
- res["RQ_st"] = 100 * pq_res["Stuff"]["rq"]
161
-
162
- results = OrderedDict({"panoptic_seg": res})
163
- _print_panoptic_results(pq_res)
164
-
165
- return results
166
-
167
-
168
- def _print_panoptic_results(pq_res):
169
- headers = ["", "PQ", "SQ", "RQ", "#categories"]
170
- data = []
171
- for name in ["All", "Things", "Stuff"]:
172
- row = [name] + [pq_res[name][k] * 100 for k in ["pq", "sq", "rq"]] + [pq_res[name]["n"]]
173
- data.append(row)
174
- table = tabulate(
175
- data, headers=headers, tablefmt="pipe", floatfmt=".3f", stralign="center", numalign="center"
176
- )
177
- logger.info("Panoptic Evaluation Results:\n" + table)
178
-
179
-
180
- if __name__ == "__main__":
181
- from detectron2.utils.logger import setup_logger
182
-
183
- logger = setup_logger()
184
- import argparse
185
-
186
- parser = argparse.ArgumentParser()
187
- parser.add_argument("--gt-json")
188
- parser.add_argument("--gt-dir")
189
- parser.add_argument("--pred-json")
190
- parser.add_argument("--pred-dir")
191
- args = parser.parse_args()
192
-
193
- from panopticapi.evaluation import pq_compute
194
-
195
- with contextlib.redirect_stdout(io.StringIO()):
196
- pq_res = pq_compute(
197
- args.gt_json, args.pred_json, gt_folder=args.gt_dir, pred_folder=args.pred_dir
198
- )
199
- _print_panoptic_results(pq_res)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/data/test_transforms.py DELETED
@@ -1,268 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- # Copyright (c) Facebook, Inc. and its affiliates.
3
-
4
- import logging
5
- import numpy as np
6
- import unittest
7
- from unittest import mock
8
- import torch
9
- from PIL import Image, ImageOps
10
- from torch.nn import functional as F
11
-
12
- from detectron2.config import get_cfg
13
- from detectron2.data import detection_utils
14
- from detectron2.data import transforms as T
15
- from detectron2.utils.logger import setup_logger
16
-
17
- logger = logging.getLogger(__name__)
18
-
19
-
20
- def polygon_allclose(poly1, poly2):
21
- """
22
- Test whether two polygons are the same.
23
- Both arguments are nx2 numpy arrays.
24
- """
25
- # ABCD and CDAB are the same polygon. So it's important to check after rolling
26
- for k in range(len(poly1)):
27
- rolled_poly1 = np.roll(poly1, k, axis=0)
28
- if np.allclose(rolled_poly1, poly2):
29
- return True
30
- return False
31
-
32
-
33
- class TestTransforms(unittest.TestCase):
34
- def setUp(self):
35
- setup_logger()
36
-
37
- def test_apply_rotated_boxes(self):
38
- np.random.seed(125)
39
- cfg = get_cfg()
40
- is_train = True
41
- augs = detection_utils.build_augmentation(cfg, is_train)
42
- image = np.random.rand(200, 300)
43
- image, transforms = T.apply_augmentations(augs, image)
44
- image_shape = image.shape[:2] # h, w
45
- assert image_shape == (800, 1200)
46
- annotation = {"bbox": [179, 97, 62, 40, -56]}
47
-
48
- boxes = np.array([annotation["bbox"]], dtype=np.float64) # boxes.shape = (1, 5)
49
- transformed_bbox = transforms.apply_rotated_box(boxes)[0]
50
-
51
- expected_bbox = np.array([484, 388, 248, 160, 56], dtype=np.float64)
52
- err_msg = "transformed_bbox = {}, expected {}".format(transformed_bbox, expected_bbox)
53
- assert np.allclose(transformed_bbox, expected_bbox), err_msg
54
-
55
- def test_resize_and_crop(self):
56
- np.random.seed(125)
57
- min_scale = 0.2
58
- max_scale = 2.0
59
- target_height = 1100
60
- target_width = 1000
61
- resize_aug = T.ResizeScale(min_scale, max_scale, target_height, target_width)
62
- fixed_size_crop_aug = T.FixedSizeCrop((target_height, target_width))
63
- hflip_aug = T.RandomFlip()
64
- augs = [resize_aug, fixed_size_crop_aug, hflip_aug]
65
- original_image = np.random.rand(900, 800)
66
- image, transforms = T.apply_augmentations(augs, original_image)
67
- image_shape = image.shape[:2] # h, w
68
- self.assertEqual((1100, 1000), image_shape)
69
-
70
- boxes = np.array(
71
- [[91, 46, 144, 111], [523, 251, 614, 295]],
72
- dtype=np.float64,
73
- )
74
- transformed_bboxs = transforms.apply_box(boxes)
75
- expected_bboxs = np.array(
76
- [
77
- [895.42, 33.42666667, 933.91125, 80.66],
78
- [554.0825, 182.39333333, 620.17125, 214.36666667],
79
- ],
80
- dtype=np.float64,
81
- )
82
- err_msg = "transformed_bbox = {}, expected {}".format(transformed_bboxs, expected_bboxs)
83
- self.assertTrue(np.allclose(transformed_bboxs, expected_bboxs), err_msg)
84
-
85
- polygon = np.array([[91, 46], [144, 46], [144, 111], [91, 111]])
86
- transformed_polygons = transforms.apply_polygons([polygon])
87
- expected_polygon = np.array([[934.0, 33.0], [934.0, 80.0], [896.0, 80.0], [896.0, 33.0]])
88
- self.assertEqual(1, len(transformed_polygons))
89
- err_msg = "transformed_polygon = {}, expected {}".format(
90
- transformed_polygons[0], expected_polygon
91
- )
92
- self.assertTrue(polygon_allclose(transformed_polygons[0], expected_polygon), err_msg)
93
-
94
- def test_apply_rotated_boxes_unequal_scaling_factor(self):
95
- np.random.seed(125)
96
- h, w = 400, 200
97
- newh, neww = 800, 800
98
- image = np.random.rand(h, w)
99
- augs = []
100
- augs.append(T.Resize(shape=(newh, neww)))
101
- image, transforms = T.apply_augmentations(augs, image)
102
- image_shape = image.shape[:2] # h, w
103
- assert image_shape == (newh, neww)
104
-
105
- boxes = np.array(
106
- [
107
- [150, 100, 40, 20, 0],
108
- [150, 100, 40, 20, 30],
109
- [150, 100, 40, 20, 90],
110
- [150, 100, 40, 20, -90],
111
- ],
112
- dtype=np.float64,
113
- )
114
- transformed_boxes = transforms.apply_rotated_box(boxes)
115
-
116
- expected_bboxes = np.array(
117
- [
118
- [600, 200, 160, 40, 0],
119
- [600, 200, 144.22205102, 52.91502622, 49.10660535],
120
- [600, 200, 80, 80, 90],
121
- [600, 200, 80, 80, -90],
122
- ],
123
- dtype=np.float64,
124
- )
125
- err_msg = "transformed_boxes = {}, expected {}".format(transformed_boxes, expected_bboxes)
126
- assert np.allclose(transformed_boxes, expected_bboxes), err_msg
127
-
128
- def test_print_augmentation(self):
129
- t = T.RandomCrop("relative", (100, 100))
130
- self.assertEqual(str(t), "RandomCrop(crop_type='relative', crop_size=(100, 100))")
131
-
132
- t0 = T.RandomFlip(prob=0.5)
133
- self.assertEqual(str(t0), "RandomFlip(prob=0.5)")
134
-
135
- t1 = T.RandomFlip()
136
- self.assertEqual(str(t1), "RandomFlip()")
137
-
138
- t = T.AugmentationList([t0, t1])
139
- self.assertEqual(str(t), f"AugmentationList[{t0}, {t1}]")
140
-
141
- def test_random_apply_prob_out_of_range_check(self):
142
- test_probabilities = {0.0: True, 0.5: True, 1.0: True, -0.01: False, 1.01: False}
143
-
144
- for given_probability, is_valid in test_probabilities.items():
145
- if not is_valid:
146
- self.assertRaises(AssertionError, T.RandomApply, None, prob=given_probability)
147
- else:
148
- T.RandomApply(T.NoOpTransform(), prob=given_probability)
149
-
150
- def test_random_apply_wrapping_aug_probability_occured_evaluation(self):
151
- transform_mock = mock.MagicMock(name="MockTransform", spec=T.Augmentation)
152
- image_mock = mock.MagicMock(name="MockImage")
153
- random_apply = T.RandomApply(transform_mock, prob=0.001)
154
-
155
- with mock.patch.object(random_apply, "_rand_range", return_value=0.0001):
156
- transform = random_apply.get_transform(image_mock)
157
- transform_mock.get_transform.assert_called_once_with(image_mock)
158
- self.assertIsNot(transform, transform_mock)
159
-
160
- def test_random_apply_wrapping_std_transform_probability_occured_evaluation(self):
161
- transform_mock = mock.MagicMock(name="MockTransform", spec=T.Transform)
162
- image_mock = mock.MagicMock(name="MockImage")
163
- random_apply = T.RandomApply(transform_mock, prob=0.001)
164
-
165
- with mock.patch.object(random_apply, "_rand_range", return_value=0.0001):
166
- transform = random_apply.get_transform(image_mock)
167
- self.assertIs(transform, transform_mock)
168
-
169
- def test_random_apply_probability_not_occured_evaluation(self):
170
- transform_mock = mock.MagicMock(name="MockTransform", spec=T.Augmentation)
171
- image_mock = mock.MagicMock(name="MockImage")
172
- random_apply = T.RandomApply(transform_mock, prob=0.001)
173
-
174
- with mock.patch.object(random_apply, "_rand_range", return_value=0.9):
175
- transform = random_apply.get_transform(image_mock)
176
- transform_mock.get_transform.assert_not_called()
177
- self.assertIsInstance(transform, T.NoOpTransform)
178
-
179
- def test_augmentation_input_args(self):
180
- input_shape = (100, 100)
181
- output_shape = (50, 50)
182
-
183
- # define two augmentations with different args
184
- class TG1(T.Augmentation):
185
- def get_transform(self, image, sem_seg):
186
- return T.ResizeTransform(
187
- input_shape[0], input_shape[1], output_shape[0], output_shape[1]
188
- )
189
-
190
- class TG2(T.Augmentation):
191
- def get_transform(self, image):
192
- assert image.shape[:2] == output_shape # check that TG1 is applied
193
- return T.HFlipTransform(output_shape[1])
194
-
195
- image = np.random.rand(*input_shape).astype("float32")
196
- sem_seg = (np.random.rand(*input_shape) < 0.5).astype("uint8")
197
- inputs = T.AugInput(image, sem_seg=sem_seg) # provide two args
198
- tfms = inputs.apply_augmentations([TG1(), TG2()])
199
- self.assertIsInstance(tfms[0], T.ResizeTransform)
200
- self.assertIsInstance(tfms[1], T.HFlipTransform)
201
- self.assertTrue(inputs.image.shape[:2] == output_shape)
202
- self.assertTrue(inputs.sem_seg.shape[:2] == output_shape)
203
-
204
- class TG3(T.Augmentation):
205
- def get_transform(self, image, nonexist):
206
- pass
207
-
208
- with self.assertRaises(AttributeError):
209
- inputs.apply_augmentations([TG3()])
210
-
211
- def test_augmentation_list(self):
212
- input_shape = (100, 100)
213
- image = np.random.rand(*input_shape).astype("float32")
214
- sem_seg = (np.random.rand(*input_shape) < 0.5).astype("uint8")
215
- inputs = T.AugInput(image, sem_seg=sem_seg) # provide two args
216
-
217
- augs = T.AugmentationList([T.RandomFlip(), T.Resize(20)])
218
- _ = T.AugmentationList([augs, T.Resize(30)])(inputs)
219
- # 3 in latest fvcore (flattened transformlist), 2 in older
220
- # self.assertEqual(len(tfms), 3)
221
-
222
- def test_color_transforms(self):
223
- rand_img = np.random.random((100, 100, 3)) * 255
224
- rand_img = rand_img.astype("uint8")
225
-
226
- # Test no-op
227
- noop_transform = T.ColorTransform(lambda img: img)
228
- self.assertTrue(np.array_equal(rand_img, noop_transform.apply_image(rand_img)))
229
-
230
- # Test a ImageOps operation
231
- magnitude = np.random.randint(0, 256)
232
- solarize_transform = T.PILColorTransform(lambda img: ImageOps.solarize(img, magnitude))
233
- expected_img = ImageOps.solarize(Image.fromarray(rand_img), magnitude)
234
- self.assertTrue(np.array_equal(expected_img, solarize_transform.apply_image(rand_img)))
235
-
236
- def test_resize_transform(self):
237
- input_shapes = [(100, 100), (100, 100, 1), (100, 100, 3)]
238
- output_shapes = [(200, 200), (200, 200, 1), (200, 200, 3)]
239
- for in_shape, out_shape in zip(input_shapes, output_shapes):
240
- in_img = np.random.randint(0, 255, size=in_shape, dtype=np.uint8)
241
- tfm = T.ResizeTransform(in_shape[0], in_shape[1], out_shape[0], out_shape[1])
242
- out_img = tfm.apply_image(in_img)
243
- self.assertEqual(out_img.shape, out_shape)
244
-
245
- def test_resize_shorted_edge_scriptable(self):
246
- def f(image):
247
- newh, neww = T.ResizeShortestEdge.get_output_shape(
248
- image.shape[-2], image.shape[-1], 80, 133
249
- )
250
- return F.interpolate(image.unsqueeze(0), size=(newh, neww))
251
-
252
- input = torch.randn(3, 10, 10)
253
- script_f = torch.jit.script(f)
254
- self.assertTrue(torch.allclose(f(input), script_f(input)))
255
-
256
- # generalize to new shapes
257
- input = torch.randn(3, 8, 100)
258
- self.assertTrue(torch.allclose(f(input), script_f(input)))
259
-
260
- def test_extent_transform(self):
261
- input_shapes = [(100, 100), (100, 100, 1), (100, 100, 3)]
262
- src_rect = (20, 20, 80, 80)
263
- output_shapes = [(200, 200), (200, 200, 1), (200, 200, 3)]
264
- for in_shape, out_shape in zip(input_shapes, output_shapes):
265
- in_img = np.random.randint(0, 255, size=in_shape, dtype=np.uint8)
266
- tfm = T.ExtentTransform(src_rect, out_shape[:2])
267
- out_img = tfm.apply_image(in_img)
268
- self.assertTrue(out_img.shape == out_shape)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bart92/RVC_HF/infer/modules/uvr5/preprocess.py DELETED
@@ -1,346 +0,0 @@
1
- import os
2
- import logging
3
-
4
- logger = logging.getLogger(__name__)
5
-
6
- import librosa
7
- import numpy as np
8
- import soundfile as sf
9
- import torch
10
-
11
- from infer.lib.uvr5_pack.lib_v5 import nets_61968KB as Nets
12
- from infer.lib.uvr5_pack.lib_v5 import spec_utils
13
- from infer.lib.uvr5_pack.lib_v5.model_param_init import ModelParameters
14
- from infer.lib.uvr5_pack.lib_v5.nets_new import CascadedNet
15
- from infer.lib.uvr5_pack.utils import inference
16
-
17
-
18
- class AudioPre:
19
- def __init__(self, agg, model_path, device, is_half):
20
- self.model_path = model_path
21
- self.device = device
22
- self.data = {
23
- # Processing Options
24
- "postprocess": False,
25
- "tta": False,
26
- # Constants
27
- "window_size": 512,
28
- "agg": agg,
29
- "high_end_process": "mirroring",
30
- }
31
- mp = ModelParameters("infer/lib/uvr5_pack/lib_v5/modelparams/4band_v2.json")
32
- model = Nets.CascadedASPPNet(mp.param["bins"] * 2)
33
- cpk = torch.load(model_path, map_location="cpu")
34
- model.load_state_dict(cpk)
35
- model.eval()
36
- if is_half:
37
- model = model.half().to(device)
38
- else:
39
- model = model.to(device)
40
-
41
- self.mp = mp
42
- self.model = model
43
-
44
- def _path_audio_(self, music_file, ins_root=None, vocal_root=None, format="flac"):
45
- if ins_root is None and vocal_root is None:
46
- return "No save root."
47
- name = os.path.basename(music_file)
48
- if ins_root is not None:
49
- os.makedirs(ins_root, exist_ok=True)
50
- if vocal_root is not None:
51
- os.makedirs(vocal_root, exist_ok=True)
52
- X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {}
53
- bands_n = len(self.mp.param["band"])
54
- # print(bands_n)
55
- for d in range(bands_n, 0, -1):
56
- bp = self.mp.param["band"][d]
57
- if d == bands_n: # high-end band
58
- (
59
- X_wave[d],
60
- _,
61
- ) = librosa.core.load( # 理论上librosa读取可能对某些音频有bug,应该上ffmpeg读取,但是太麻烦了弃坑
62
- music_file,
63
- bp["sr"],
64
- False,
65
- dtype=np.float32,
66
- res_type=bp["res_type"],
67
- )
68
- if X_wave[d].ndim == 1:
69
- X_wave[d] = np.asfortranarray([X_wave[d], X_wave[d]])
70
- else: # lower bands
71
- X_wave[d] = librosa.core.resample(
72
- X_wave[d + 1],
73
- self.mp.param["band"][d + 1]["sr"],
74
- bp["sr"],
75
- res_type=bp["res_type"],
76
- )
77
- # Stft of wave source
78
- X_spec_s[d] = spec_utils.wave_to_spectrogram_mt(
79
- X_wave[d],
80
- bp["hl"],
81
- bp["n_fft"],
82
- self.mp.param["mid_side"],
83
- self.mp.param["mid_side_b2"],
84
- self.mp.param["reverse"],
85
- )
86
- # pdb.set_trace()
87
- if d == bands_n and self.data["high_end_process"] != "none":
88
- input_high_end_h = (bp["n_fft"] // 2 - bp["crop_stop"]) + (
89
- self.mp.param["pre_filter_stop"] - self.mp.param["pre_filter_start"]
90
- )
91
- input_high_end = X_spec_s[d][
92
- :, bp["n_fft"] // 2 - input_high_end_h : bp["n_fft"] // 2, :
93
- ]
94
-
95
- X_spec_m = spec_utils.combine_spectrograms(X_spec_s, self.mp)
96
- aggresive_set = float(self.data["agg"] / 100)
97
- aggressiveness = {
98
- "value": aggresive_set,
99
- "split_bin": self.mp.param["band"][1]["crop_stop"],
100
- }
101
- with torch.no_grad():
102
- pred, X_mag, X_phase = inference(
103
- X_spec_m, self.device, self.model, aggressiveness, self.data
104
- )
105
- # Postprocess
106
- if self.data["postprocess"]:
107
- pred_inv = np.clip(X_mag - pred, 0, np.inf)
108
- pred = spec_utils.mask_silence(pred, pred_inv)
109
- y_spec_m = pred * X_phase
110
- v_spec_m = X_spec_m - y_spec_m
111
-
112
- if ins_root is not None:
113
- if self.data["high_end_process"].startswith("mirroring"):
114
- input_high_end_ = spec_utils.mirroring(
115
- self.data["high_end_process"], y_spec_m, input_high_end, self.mp
116
- )
117
- wav_instrument = spec_utils.cmb_spectrogram_to_wave(
118
- y_spec_m, self.mp, input_high_end_h, input_high_end_
119
- )
120
- else:
121
- wav_instrument = spec_utils.cmb_spectrogram_to_wave(y_spec_m, self.mp)
122
- logger.info("%s instruments done" % name)
123
- if format in ["wav", "flac"]:
124
- sf.write(
125
- os.path.join(
126
- ins_root,
127
- "instrument_{}_{}.{}".format(name, self.data["agg"], format),
128
- ),
129
- (np.array(wav_instrument) * 32768).astype("int16"),
130
- self.mp.param["sr"],
131
- ) #
132
- else:
133
- path = os.path.join(
134
- ins_root, "instrument_{}_{}.wav".format(name, self.data["agg"])
135
- )
136
- sf.write(
137
- path,
138
- (np.array(wav_instrument) * 32768).astype("int16"),
139
- self.mp.param["sr"],
140
- )
141
- if os.path.exists(path):
142
- os.system(
143
- "ffmpeg -i %s -vn %s -q:a 2 -y"
144
- % (path, path[:-4] + ".%s" % format)
145
- )
146
- if vocal_root is not None:
147
- if self.data["high_end_process"].startswith("mirroring"):
148
- input_high_end_ = spec_utils.mirroring(
149
- self.data["high_end_process"], v_spec_m, input_high_end, self.mp
150
- )
151
- wav_vocals = spec_utils.cmb_spectrogram_to_wave(
152
- v_spec_m, self.mp, input_high_end_h, input_high_end_
153
- )
154
- else:
155
- wav_vocals = spec_utils.cmb_spectrogram_to_wave(v_spec_m, self.mp)
156
- logger.info("%s vocals done" % name)
157
- if format in ["wav", "flac"]:
158
- sf.write(
159
- os.path.join(
160
- vocal_root,
161
- "vocal_{}_{}.{}".format(name, self.data["agg"], format),
162
- ),
163
- (np.array(wav_vocals) * 32768).astype("int16"),
164
- self.mp.param["sr"],
165
- )
166
- else:
167
- path = os.path.join(
168
- vocal_root, "vocal_{}_{}.wav".format(name, self.data["agg"])
169
- )
170
- sf.write(
171
- path,
172
- (np.array(wav_vocals) * 32768).astype("int16"),
173
- self.mp.param["sr"],
174
- )
175
- if os.path.exists(path):
176
- os.system(
177
- "ffmpeg -i %s -vn %s -q:a 2 -y"
178
- % (path, path[:-4] + ".%s" % format)
179
- )
180
-
181
-
182
- class AudioPreDeEcho:
183
- def __init__(self, agg, model_path, device, is_half):
184
- self.model_path = model_path
185
- self.device = device
186
- self.data = {
187
- # Processing Options
188
- "postprocess": False,
189
- "tta": False,
190
- # Constants
191
- "window_size": 512,
192
- "agg": agg,
193
- "high_end_process": "mirroring",
194
- }
195
- mp = ModelParameters("infer/lib/uvr5_pack/lib_v5/modelparams/4band_v3.json")
196
- nout = 64 if "DeReverb" in model_path else 48
197
- model = CascadedNet(mp.param["bins"] * 2, nout)
198
- cpk = torch.load(model_path, map_location="cpu")
199
- model.load_state_dict(cpk)
200
- model.eval()
201
- if is_half:
202
- model = model.half().to(device)
203
- else:
204
- model = model.to(device)
205
-
206
- self.mp = mp
207
- self.model = model
208
-
209
- def _path_audio_(
210
- self, music_file, vocal_root=None, ins_root=None, format="flac"
211
- ): # 3个VR模型vocal和ins是反的
212
- if ins_root is None and vocal_root is None:
213
- return "No save root."
214
- name = os.path.basename(music_file)
215
- if ins_root is not None:
216
- os.makedirs(ins_root, exist_ok=True)
217
- if vocal_root is not None:
218
- os.makedirs(vocal_root, exist_ok=True)
219
- X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {}
220
- bands_n = len(self.mp.param["band"])
221
- # print(bands_n)
222
- for d in range(bands_n, 0, -1):
223
- bp = self.mp.param["band"][d]
224
- if d == bands_n: # high-end band
225
- (
226
- X_wave[d],
227
- _,
228
- ) = librosa.core.load( # 理论上librosa读取可能对某些音频有bug,应该上ffmpeg读取,但是太麻烦了弃坑
229
- music_file,
230
- bp["sr"],
231
- False,
232
- dtype=np.float32,
233
- res_type=bp["res_type"],
234
- )
235
- if X_wave[d].ndim == 1:
236
- X_wave[d] = np.asfortranarray([X_wave[d], X_wave[d]])
237
- else: # lower bands
238
- X_wave[d] = librosa.core.resample(
239
- X_wave[d + 1],
240
- self.mp.param["band"][d + 1]["sr"],
241
- bp["sr"],
242
- res_type=bp["res_type"],
243
- )
244
- # Stft of wave source
245
- X_spec_s[d] = spec_utils.wave_to_spectrogram_mt(
246
- X_wave[d],
247
- bp["hl"],
248
- bp["n_fft"],
249
- self.mp.param["mid_side"],
250
- self.mp.param["mid_side_b2"],
251
- self.mp.param["reverse"],
252
- )
253
- # pdb.set_trace()
254
- if d == bands_n and self.data["high_end_process"] != "none":
255
- input_high_end_h = (bp["n_fft"] // 2 - bp["crop_stop"]) + (
256
- self.mp.param["pre_filter_stop"] - self.mp.param["pre_filter_start"]
257
- )
258
- input_high_end = X_spec_s[d][
259
- :, bp["n_fft"] // 2 - input_high_end_h : bp["n_fft"] // 2, :
260
- ]
261
-
262
- X_spec_m = spec_utils.combine_spectrograms(X_spec_s, self.mp)
263
- aggresive_set = float(self.data["agg"] / 100)
264
- aggressiveness = {
265
- "value": aggresive_set,
266
- "split_bin": self.mp.param["band"][1]["crop_stop"],
267
- }
268
- with torch.no_grad():
269
- pred, X_mag, X_phase = inference(
270
- X_spec_m, self.device, self.model, aggressiveness, self.data
271
- )
272
- # Postprocess
273
- if self.data["postprocess"]:
274
- pred_inv = np.clip(X_mag - pred, 0, np.inf)
275
- pred = spec_utils.mask_silence(pred, pred_inv)
276
- y_spec_m = pred * X_phase
277
- v_spec_m = X_spec_m - y_spec_m
278
-
279
- if ins_root is not None:
280
- if self.data["high_end_process"].startswith("mirroring"):
281
- input_high_end_ = spec_utils.mirroring(
282
- self.data["high_end_process"], y_spec_m, input_high_end, self.mp
283
- )
284
- wav_instrument = spec_utils.cmb_spectrogram_to_wave(
285
- y_spec_m, self.mp, input_high_end_h, input_high_end_
286
- )
287
- else:
288
- wav_instrument = spec_utils.cmb_spectrogram_to_wave(y_spec_m, self.mp)
289
- logger.info("%s instruments done" % name)
290
- if format in ["wav", "flac"]:
291
- sf.write(
292
- os.path.join(
293
- ins_root,
294
- "instrument_{}_{}.{}".format(name, self.data["agg"], format),
295
- ),
296
- (np.array(wav_instrument) * 32768).astype("int16"),
297
- self.mp.param["sr"],
298
- ) #
299
- else:
300
- path = os.path.join(
301
- ins_root, "instrument_{}_{}.wav".format(name, self.data["agg"])
302
- )
303
- sf.write(
304
- path,
305
- (np.array(wav_instrument) * 32768).astype("int16"),
306
- self.mp.param["sr"],
307
- )
308
- if os.path.exists(path):
309
- os.system(
310
- "ffmpeg -i %s -vn %s -q:a 2 -y"
311
- % (path, path[:-4] + ".%s" % format)
312
- )
313
- if vocal_root is not None:
314
- if self.data["high_end_process"].startswith("mirroring"):
315
- input_high_end_ = spec_utils.mirroring(
316
- self.data["high_end_process"], v_spec_m, input_high_end, self.mp
317
- )
318
- wav_vocals = spec_utils.cmb_spectrogram_to_wave(
319
- v_spec_m, self.mp, input_high_end_h, input_high_end_
320
- )
321
- else:
322
- wav_vocals = spec_utils.cmb_spectrogram_to_wave(v_spec_m, self.mp)
323
- logger.info("%s vocals done" % name)
324
- if format in ["wav", "flac"]:
325
- sf.write(
326
- os.path.join(
327
- vocal_root,
328
- "vocal_{}_{}.{}".format(name, self.data["agg"], format),
329
- ),
330
- (np.array(wav_vocals) * 32768).astype("int16"),
331
- self.mp.param["sr"],
332
- )
333
- else:
334
- path = os.path.join(
335
- vocal_root, "vocal_{}_{}.wav".format(name, self.data["agg"])
336
- )
337
- sf.write(
338
- path,
339
- (np.array(wav_vocals) * 32768).astype("int16"),
340
- self.mp.param["sr"],
341
- )
342
- if os.path.exists(path):
343
- os.system(
344
- "ffmpeg -i %s -vn %s -q:a 2 -y"
345
- % (path, path[:-4] + ".%s" % format)
346
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descarga C.md DELETED
@@ -1,91 +0,0 @@
1
-
2
- <h1>Cómo descargar e instalar C++ en Windows</h1>
3
- <p>C++ es un lenguaje de programación popular que ha evolucionado a partir de C y ha añadido características orientadas a objetos, genéricas y funcionales. Está diseñado para la programación de sistemas, software integrado y sistemas grandes, con rendimiento, eficiencia y flexibilidad como sus objetivos. C++ soporta programación orientada a objetos, lo que ayuda a modular y mantener un programa de manera eficiente. C++ también tiene otras características como espacio de nombres, sobrecarga del operador, manejo de errores y excepciones, y una biblioteca de conceptos. </p>
4
- <h2>descarga c++</h2><br /><p><b><b>Download</b> &#11088; <a href="https://bltlly.com/2v6JC5">https://bltlly.com/2v6JC5</a></b></p><br /><br />
5
- <p>Si quieres aprender C++ o utilizarlo para tus proyectos, necesitas tener un compilador de C++ y un entorno de desarrollo integrado (IDE) instalado en tu ordenador. En este artículo, te mostraremos cómo descargar e instalar C++ en Windows usando Visual Studio, que es uno de los IDEs más populares para el desarrollo de C++. Visual Studio proporciona un conjunto completo de herramientas para crear, depurar, probar e implementar aplicaciones C++. </p>
6
- <p>Antes de comenzar a instalar Visual Studio y C++, asegúrese de que su equipo cumple con los requisitos del sistema <a href="( 9 )"</a>. También necesita aplicar las últimas actualizaciones de Windows, reiniciar su computadora y liberar espacio en disco. </p>
7
- <h2>Paso 1: Descargar Visual Studio Installer</h2>
8
- <p>El primer paso es descargar el instalador de Visual Studio desde el sitio web de Microsoft. El instalador es una aplicación ligera que te permite elegir e instalar las características que necesitas para Visual Studio.</p>
9
- <p>Para descargar el instalador, vaya a la página de descarga <a href="( 9 )">Visual Studio</a> y seleccione la edición de Visual Studio que desee. Puede elegir entre ediciones Community, Professional o Enterprise. Para este tutorial, usaremos la edición Comunidad, que es gratuita para estudiantes, colaboradores de código abierto y desarrolladores individuales. </p>
10
-
11
- <p>Haga doble clic en el archivo de arranque para ejecutarlo. Si recibe un aviso de Control de cuenta de usuario, elija Sí para permitirlo. Se le pedirá que acepte los Términos de licencia de Microsoft y la Declaración de privacidad de Microsoft. Elija Continue.</p>
12
- <h2>Paso 2: Elija cargas de trabajo para el desarrollo de C++ </h2>
13
- <p>El instalador le presentará una lista de cargas de trabajo, que son grupos de opciones relacionadas para áreas de desarrollo específicas. El soporte para C++ es ahora parte de cargas de trabajo opcionales que no están instaladas por defecto. </p>
14
- <p></p>
15
- <p>Para el desarrollo de C++, debe seleccionar Desarrollo de escritorio con carga de trabajo de C++. Esta carga de trabajo incluye funciones como:</p>
16
- <ul>
17
- <li>El conjunto de herramientas del compilador MSVC</li>
18
- <li>El SDK de Windows</li>
19
- <li>Herramientas de CMake</li>
20
- <li>Herramientas de prueba</li>
21
- <li>Herramientas de depuración</li>
22
- <li>Herramientas de análisis de código</li>
23
- <li>Biblioteca de plantillas estándar (STL)</li>
24
- <li>La biblioteca Boost</li>
25
- <li>El marco de Google Test</li>
26
- <li>La biblioteca MFC</li>
27
- <li>La biblioteca ATL</li>
28
- </ul>
29
- <p>Para seleccionar el Desarrollo de escritorio con carga de trabajo de C++, marque la casilla junto a él. También puede ampliar la carga de trabajo para ver los componentes opcionales que puede instalar o deseleccionar. Por ejemplo, puede elegir instalar soporte para desarrollo de Linux con C++ o Windows 10 SDK (10.0.19041.0). </p>
30
- <p>Después de seleccionar la carga de trabajo y los componentes que desea, haga clic en el botón Instalar en la esquina inferior derecha del instalador. El instalador le mostrará el progreso y el estado de la instalación. Dependiendo de la velocidad de Internet y la configuración del equipo, esto puede tomar algún tiempo. </p>
31
- <h2>Paso 3: Instalar y lanzar Visual Studio</h2>
32
- <p>Cuando se complete la instalación, verá un mensaje que dice "Instalación exitosa!" Ahora puede iniciar Visual Studio haciendo clic en el botón Iniciar en el instalador o buscándolo en el menú Inicio. </p>
33
-
34
- <p>Después de iniciar sesión, se le pedirá que elija un tema de color y un perfil de configuración de desarrollo. Puede elegir entre temas de Luz, Oscuridad o Azul y desde configuraciones de desarrollo General, C#, C++, Python o Web. Para este tutorial, elegiremos el tema Oscuro y la configuración de desarrollo de C++. </p>
35
- <p>Visual Studio se abrirá y le mostrará una página de inicio con varias opciones. Para crear un nuevo proyecto, haga clic en el botón Crear un nuevo proyecto. </p>
36
- <h2>Paso 4: Escribir y ejecutar un programa simple de C++ </h2>
37
- <p>Para escribir y ejecutar un simple programa de C++, necesitas crear un proyecto que contenga tus archivos de código fuente y otros recursos. Un proyecto también especifica cómo construir y ejecutar su programa usando varias herramientas y configuraciones. </p>
38
- <p>Para crear un nuevo proyecto, siga estos pasos:</p>
39
- <ol>
40
- <li>En la ventana Crear un nuevo proyecto, busque "C++" en el cuadro de búsqueda y seleccione "Aplicación de consola" en la lista de plantillas. Haga clic en Siguiente.</li>
41
- <li>En la ventana Configurar su nuevo proyecto, introduzca un nombre para su proyecto (como HelloWorld) y elija una ubicación para guardarlo. También puede cambiar otras opciones como el nombre de la solución, la plataforma de destino y el estándar de idioma. Haga clic en Crear.</li>
42
- <li>Visual Studio creará un nuevo proyecto y lo abrirá en la ventana principal. Verá un panel del Explorador de soluciones en el lado derecho que muestra los archivos y carpetas en su proyecto. También verá un panel del editor que muestra el código fuente de su archivo main.cpp. </li>
43
- </ol>
44
- <p>El archivo.cpp principal contiene un simple programa C++ que imprime "Hello World!" en la consola. El código se ve así:</p>
45
- <pre><código>#include <iostream>
46
- usando namespace std; int main() cout << "Hello World! n"; </code></pre>
47
- <p>Para construir y ejecutar su programa, siga estos pasos:</p>
48
- <ol>
49
- <li>Haga clic en el menú Build y seleccione Build Solution (o pulse Ctrl+Shift+B). Esto compilará su código fuente en un archivo ejecutable usando el conjunto de herramientas del compilador MSVC. </li>
50
-
51
- <li> Debería ver un mensaje que diga "¡Hola mundo!" en la ventana de la consola. Pulse cualquier tecla para cerrarlo. </li>
52
- </ol>
53
- <h2>Conclusión</h2>
54
- <p>En este artículo, le hemos mostrado cómo descargar e instalar C++ en Windows usando Visual Studio. También le hemos mostrado cómo crear, construir y ejecutar un programa simple de C++ usando las herramientas de Visual Studio. </p>
55
- <p>C++ es un lenguaje de programación potente y versátil que puede utilizarse para diversos fines. Si quieres saber más sobre C++, puedes consultar algunos de estos recursos:</p>
56
- <ul>
57
- <li><a href=">C++ Documentación</a>: La documentación oficial de C++ por Microsoft.</li>
58
- <li><a href=">C++ Tutorial</a>: Un tutorial completo para principiantes por cplusplus.com. </li>
59
- <li><a href=">C++ Referencia</a>: Una referencia completa para C++ por cppreference.com. </li>
60
- </ul>
61
- <h2>Preguntas frecuentes</h2>
62
- <h3>¿Cuáles son algunos otros compiladores de C++ para Windows? </h3>
63
- <p>Además de Visual Studio, hay otros compiladores populares de C++ para Windows, como:</p>
64
- <ul>
65
- <li><a href=">MinGW</a>: Un GNU minimalista para Windows que proporciona compiladores GCC (GNU Compiler Collection) para C y C++. </li>
66
- <li><a href="( 4 ))">Cygwin</a>: Un entorno similar a Linux para Windows que proporciona compiladores GCC para C y C++. </li>
67
- <li><a href=">Code::Blocks</a>: IDE libre y de código abierto para C y C++ que soporta múltiples compiladores, como GCC, Clang y MSVC.</li>
68
- </ul>
69
- <h3>¿Cómo actualizar las herramientas de Visual Studio y C++ ? </h3>
70
- <p>Para actualizar las herramientas de Visual Studio y C++, puede usar el instalador de Visual Studio. Para iniciar el instalador, vaya al menú Inicio y busque Visual Studio Installer. En el instalador, verá una lista de productos instalados y sus versiones. Si hay alguna actualización disponible, verá un botón Actualizar junto al producto. Haga clic en el botón Actualizar para descargar e instalar las actualizaciones. </p>
71
- <h3> ¿Cómo desinstalar las herramientas de Visual Studio y C++ ? </h3>
72
-
73
- <h3>¿Cómo instalar C++ en otros sistemas operativos? </h3>
74
- <p>Si quieres instalar C++ en otros sistemas operativos, como Linux o Mac OS, necesitas usar diferentes compiladores e IDEs. Algunas de las opciones comunes son:</p>
75
- <ul>
76
- <li><a href="">GCC</a>: Un compilador libre y de código abierto para C y C++ que es ampliamente utilizado en Linux y otros sistemas similares a Unix. </li>
77
- <li><a href=">Clang</a>: Un compilador libre y de código abierto para C y C++ que está basado en LLVM (Low Level Virtual Machine) y soporta varias plataformas, incluyendo Linux, Mac OS y Windows.</li>
78
- <li><a href=">Xcode</a>: Un IDE gratuito para Mac OS que soporta el desarrollo de C y C++ usando Clang.</li>
79
- <li><a href="">Eclipse</a>: IDE libre y de código abierto que soporta múltiples idiomas, incluyendo C y C++, y múltiples plataformas, incluyendo Linux, Mac OS y Windows.</li>
80
- </ul>
81
- <h3>¿Cuáles son algunas de las nuevas características de C++20? </h3>
82
- <p>C++20 es la última versión del estándar C++ que se publicó en 2020. Introduce muchas nuevas características y mejoras en el lenguaje, como:</p>
83
- <ul>
84
- <li><a href="">Módulos</a>: Una nueva forma de organizar código en unidades que se pueden importar y exportar. </li>
85
- <li><a href="">Conceptos</a>: Una forma de especificar restricciones en parámetros de plantilla usando predicados. </li>
86
- <li><a href="">Rangos</a>: Una biblioteca que proporciona vistas y algoritmos para trabajar con secuencias de elementos. </li>
87
- <li><a href="">Coroutines</a>: Una forma de escribir código asíncrono usando funciones suspendibles. </li>
88
- <li><a href="">Contratos</a>: Una forma de expresar precondiciones, postcondiciones y aserciones para funciones. </li>
89
- </ul></p> 64aa2da5cf<br />
90
- <br />
91
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Apk Mod De Netflix.md DELETED
@@ -1,154 +0,0 @@
1
-
2
- <h1>Descargar Mod APK de Netflix: Cómo ver contenido premium gratis</h1>
3
- <p>Netflix es una de las plataformas de streaming más populares del mundo, ofreciendo una amplia gama de películas, programas de televisión, documentales y contenido original. Sin embargo, no todos pueden permitirse el lujo de pagar una suscripción a Netflix, o acceder a todo el contenido disponible en diferentes regiones. Es por eso que algunas personas buscan formas de descargar APK mod de Netflix, que son versiones modificadas de la aplicación oficial que permiten a los usuarios ver contenido premium de forma gratuita. Pero, ¿cómo descargar e instalar un mod APK de Netflix en su dispositivo? Y cuáles son los riesgos y beneficios de usar una aplicación de este tipo? En este artículo, vamos a responder a estas preguntas y más, para que pueda disfrutar de la transmisión ilimitada sin romper el banco. </p>
4
- <h2>¿Qué es Netflix y por qué es tan popular? </h2>
5
- <p>Netflix es una compañía estadounidense que proporciona servicios de transmisión en línea para varios tipos de medios, como películas, programas de televisión, documentales, anime y producciones originales. Netflix fue fundada en 1997 como un servicio de alquiler de DVD, pero más tarde se expandió a la transmisión en línea en 2007. Desde entonces, Netflix ha crecido hasta convertirse en una de las compañías de entretenimiento más grandes e influyentes del mundo, con más de 200 millones de suscriptores en más de 190 países. </p>
6
- <h2>descargar apk mod de netflix</h2><br /><p><b><b>Download Zip</b> &rarr; <a href="https://bltlly.com/2v6Luo">https://bltlly.com/2v6Luo</a></b></p><br /><br />
7
- <h3>Características y beneficios de Netflix</h3>
8
- <p>Algunas de las características y beneficios que hacen que Netflix sea tan popular entre los usuarios son:</p>
9
- <ul>
10
- <li>Ofrece una enorme biblioteca de contenido, con miles de títulos de diferentes géneros, idiomas y categorías. </li>
11
- <li>Produce contenido original de alta calidad, como Stranger Things, The Crown, Black Mirror, The Witcher y muchos más. </li>
12
- <li>Permite a los usuarios descargar contenido para verlo sin conexión en sus dispositivos. </li>
13
- <li>Es compatible con varios dispositivos, como teléfonos inteligentes, tabletas, ordenadores portátiles, televisores inteligentes, consolas de juegos y dispositivos de transmisión. </li>
14
-
15
- <li>Permite a los usuarios crear múltiples perfiles dentro de una cuenta, cada uno con sus propios ajustes y preferencias. </li>
16
- <li>Ofrece varias características para mejorar la experiencia del usuario, como subtítulos, subtítulos, descripciones de audio, controles parentales, omitir la introducción, velocidad de reproducción, etc.</li>
17
- </ul>
18
- <h3>Planes de suscripción y precios de Netflix</h3>
19
- <p>Para acceder al contenido de Netflix, los usuarios deben registrarse en un plan de suscripción que se adapte a sus necesidades y presupuesto. Netflix ofrece cuatro planes de suscripción: Básico, Estándar, Premium y Ultra. Las principales diferencias entre estos planes son el número de pantallas que se pueden utilizar simultáneamente, la calidad de vídeo (SD, HD o 4K), y la disponibilidad de HDR y Dolby Visión. La siguiente tabla muestra los detalles de cada plan:</p>
20
- <tabla>
21
- <tr>
22
- <th>Plan</th>
23
- <th>Pantallas</th>
24
- <th>Calidad</th>
25
- <th>HDR/Dolby Visión</th>
26
- <th>Precio (USD)</th>
27
- </tr>
28
- <tr>
29
- <td>Básico</td>
30
- <td>1</td>
31
- <td>SD</td>
32
- <td>No</td>
33
- <td>$8.99/mes</td>
34
- </tr>
35
- <tr>
36
- <td>Estándar</td>
37
- <td>2</td>
38
- <td>HD</td>
39
- <td>No</td>
40
- <td>$13.99/mes</td>
41
- </tr>
42
- <tr>
43
- <td>Premium</td>
44
- <td>4</td>
45
- <td>4K</td>
46
- <td>Sí</td>
47
- <td>$17.99/mes</td>
48
- </tr>
49
- <tr>
50
- <td>Ultra</td>
51
- <td>4</td>
52
- <td>4K+</td>
53
- <td>Sí</td>
54
- <td>$19.99/mes</td>
55
- </tr>
56
- </tabla>
57
- <p>Tenga en cuenta que los precios pueden variar dependiendo del país y la región del usuario. Los usuarios también pueden optar por una prueba gratuita durante un período de tiempo limitado antes de comprometerse con un plan. </p>
58
- <h2>¿Qué es un mod APK y por qué lo necesita? </h2>
59
- <p>Un mod APK es una versión modificada de un archivo de paquete de aplicaciones de Android (APK), que es el formato utilizado para distribuir e instalar aplicaciones en dispositivos Android. Un mod APK puede tener diferentes características y funciones que la aplicación original, tales como la eliminación de anuncios, desbloquear contenido premium, agregar opciones adicionales, mejorar el rendimiento, etc. Un mod APK suele ser creado por tercerosdesarrolladores de fiestas o hackers que modifican el código fuente de la aplicación original. </p>
60
- <h3>Mod APK definición y ventajas</h3>
61
-
62
- <ul>
63
- <li> Puede proporcionar acceso a contenido premium o características que están restringidas o pagadas en la aplicación original. </li>
64
- <li> Puede mejorar la experiencia del usuario eliminando anuncios molestos, mejorando los gráficos, aumentando la velocidad, etc.</li>
65
- <li> Puede permitir a los usuarios personalizar la aplicación de acuerdo a sus preferencias y necesidades. </li>
66
- <li> Puede evitar las restricciones regionales y el bloqueo geográfico que pueden limitar la disponibilidad de algunos contenidos o servicios en ciertas áreas. </li>
67
- </ul>
68
- <h3>Riesgos y desafíos de usar mod APKs</h3>
69
- <p>Sin embargo, el uso de un mod APK también viene con algunos riesgos y desafíos, tales como:</p>
70
- <ul>
71
- <li>Puede exponer los datos del dispositivo y del usuario a malware, virus, spyware u otros ataques maliciosos que pueden dañar el dispositivo o comprometer la privacidad y la seguridad del usuario. </li>
72
- <li> Puede violar los términos y condiciones de la aplicación original y su desarrollador, lo que puede resultar en acciones legales o sanciones. </li>
73
- <li>Puede causar problemas de compatibilidad o errores con el dispositivo u otras aplicaciones, lo que puede afectar la funcionalidad o el rendimiento del dispositivo o aplicación. </li>
74
- <li>Puede ser desactualizado o poco fiable, ya que puede no recibir actualizaciones regulares o soporte del desarrollador original o el modder. </li>
75
- </ul>
76
- <p>Por lo tanto, los usuarios deben tener cuidado y precaución al descargar e instalar APK mod, y solo usarlos de fuentes confiables y de buena reputación. </p>
77
- <h2>Cómo descargar e instalar Netflix mod APK en su dispositivo</h2>
78
- <p>Si desea descargar e instalar un mod APK de Netflix en su dispositivo, tendrá que seguir estos pasos:</p>
79
- <h3>Paso 1: Encontrar una fuente confiable para el archivo mod APK</h3>
80
-
81
- <h3>Paso 2: Habilitar fuentes desconocidas en la configuración del dispositivo</h3>
82
- <p>El siguiente paso es habilitar fuentes desconocidas en la configuración del dispositivo, lo que le permitirá instalar aplicaciones desde fuentes distintas de Google Play Store. Para hacer esto, tendrá que ir a la configuración del dispositivo, luego la seguridad o la privacidad, y luego activar la opción para fuentes desconocidas. También es posible que necesite conceder permiso a su navegador o administrador de archivos para instalar aplicaciones de fuentes desconocidas. </p>
83
- <p></p>
84
- <h3>Paso 3: Descargar e instalar el archivo mod APK</h3>
85
- <p>El tercer paso es descargar e instalar el archivo APK mod en su dispositivo. Puede hacer esto haciendo clic en el enlace de descarga o en el botón de la fuente que ha elegido, y luego esperar a que el archivo se descargue en su dispositivo. Una vez descargado el archivo, puede abrirlo con su administrador de archivos o navegador, luego toque en instalar. Es posible que necesite aceptar algunos permisos o advertencias antes de instalar la aplicación. </p>
86
- <h3>Paso 4: Inicie la aplicación y disfrute de streaming ilimitado</h3>
87
- <p>El paso final es lanzar la aplicación y disfrutar de la transmisión ilimitada de contenido de Netflix de forma gratuita. Puede hacer esto abriendo la aplicación desde el cajón de aplicaciones o la pantalla de inicio, luego iniciando sesión con su correo electrónico o cuenta de Facebook, o creando una nueva cuenta si no tiene una. A continuación, puede navegar a través de las categorías y géneros de contenido disponibles en Netflix, o buscar títulos específicos que desea ver. También puede ajustar algunos ajustes y preferencias dentro de la aplicación, como la calidad del vídeo, el idioma, los subtítulos, etc.</p>
88
- <h2>Comparación de Netflix mod APK y la aplicación oficial de Netflix</h2>
89
- <h3>Comparación de funciones y características</h3>
90
-
91
- <p>Sin embargo, Netflix mod APK también tiene algunos inconvenientes y limitaciones en comparación con la aplicación oficial de Netflix. Por ejemplo, Netflix mod APK no puede tener todas las características y funciones que la aplicación oficial tiene, tales como la descarga de contenido para la visualización sin conexión, la creación de múltiples perfiles, conseguir recomendaciones personalizadas, etc. Netflix mod APK también puede tener algunos errores o errores que pueden afectar el rendimiento o la funcionalidad de la aplicación. Además, Netflix mod APK no se puede actualizar o apoyar regularmente, lo que puede causar problemas de compatibilidad o riesgos de seguridad. </p>
92
- <h3>Comparación de pros y contras</h3>
93
- <p>Para resumir, aquí están algunos de los pros y los contras de usar Netflix mod APK versus la aplicación oficial de Netflix:</p>
94
- <tabla>
95
- <tr>
96
- <th>Netflix mod APK</th>
97
- <th>Aplicación oficial de Netflix</th>
98
- </tr>
99
- <tr>
100
- <td>Pros:</td>
101
- <td>Pros:</td>
102
- </tr>
103
- <tr>
104
- <td><ul>
105
- <li>Acceso gratuito al contenido premium</li>
106
- <li>Sin cuotas de suscripción o planes</li>
107
- <li>No hay restricciones regionales o bloqueo geográfico</li>
108
- <li>Calidad 4K con soporte HDR y Dolby Visión</li>
109
- <li>No hay anuncios o ventanas emergentes</li>
110
- </ul></td>
111
- <td><ul>
112
- <li>Aplicación segura y segura</li>
113
- <li>Actualizaciones regulares y soporte</li>
114
- <li>Descargar contenido para ver sin conexión</li>
115
- <li>Crear múltiples perfiles</li>
116
- <li>Obtener recomendaciones personalizadas</li>
117
- </ul></td>
118
- </tr>
119
- <tr>
120
- <td>Contras:</td>
121
- <td>Contras:</td>
122
- </tr>
123
- <tr>
124
- <td><ul>
125
- <li>Ataques potenciales de malware o virus</li>
126
- <li>Acciones legales o sanciones</li>
127
- <li>Problemas o errores de compatibilidad</li>
128
- <li>Falta de características o funciones</li>
129
- <li>Aplicación desactualizada o poco fiable</li>
130
- </ul></td>
131
- <td><ul>
132
- <li>Se requieren tarifas de suscripción o planes</li>
133
- <li>Restricciones regionales o bloqueo geográfico aplicado</li>
134
- <li>La calidad del vídeo depende del dispositivo y del plan</li>
135
- <li>Pueden aparecer anuncios o ventanas emergentes</li>
136
- <li>Disponibilidad limitada de contenido en algunas áreas</li>
137
- </ul></td>
138
- </tr>
139
- </tabla>
140
- <h2>Conclusión y preguntas frecuentes</h2>
141
-
142
- <p>Si tiene alguna pregunta sobre Netflix mod APK, puede encontrar las respuestas en las siguientes preguntas frecuentes:</p>
143
- <h4>Q: ¿Es Netflix mod APK legal? </h4>
144
- <p>A: No, Netflix mod APK no es legal, ya que viola los términos y condiciones de la aplicación original y su desarrollador. El uso de un mod de Netflix APK puede resultar en acciones legales o sanciones de Netflix u otras autoridades. </p>
145
- <h4>Q: ¿Es seguro el mod APK de Netflix? </h4>
146
- <p>A: No necesariamente, Netflix mod APK puede no ser seguro, ya que puede exponer el dispositivo y los datos del usuario a malware, virus, spyware u otros ataques maliciosos que pueden dañar el dispositivo o comprometer la privacidad y la seguridad del usuario. Los usuarios siempre deben escanear el archivo APK mod con un software antivirus antes de instalarlo en su dispositivo. </p>
147
- <h4>Q: ¿Cómo puedo actualizar Netflix mod APK? </h4>
148
- <p>A: Para actualizar Netflix mod APK, los usuarios necesitan encontrar una versión más nueva del archivo mod APK de una fuente confiable, luego descargarlo e instalarlo en su dispositivo. Los usuarios también deben desinstalar la versión anterior del mod APK antes de instalar el nuevo. </p>
149
- <h4>Q: ¿Cómo puedo desinstalar Netflix mod APK? </h4>
150
- <p>A: Para desinstalar Netflix mod APK, los usuarios necesitan ir a la configuración de su dispositivo, luego aplicaciones o aplicaciones, a continuación, encontrar y seleccionar la aplicación Netflix mod APK, a continuación, toque en desinstalar. Los usuarios también deben eliminar los archivos o carpetas residuales relacionados con la aplicación de su dispositivo de almacenamiento. </p>
151
- <h4>Q: ¿Puedo usar Netflix mod APK en otros dispositivos? </h4>
152
- <p>A: Sí, los usuarios pueden usar Netflix mod APK en otros dispositivos que admiten el sistema operativo Android, como teléfonos inteligentes, tabletas, computadoras portátiles, televisores inteligentes, consolas de juegos y dispositivos de transmisión. Sin embargo, los usuarios deben asegurarse de que el archivo APK mod es compatible con su modelo de dispositivo y la versión antes de instalarlo en su dispositivo. </p> 64aa2da5cf<br />
153
- <br />
154
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Ekelebe De J Martins.md DELETED
@@ -1,64 +0,0 @@
1
-
2
- <h1>Cómo descargar música de Tommy J Pisa</h1>
3
- <p>Si eres un fan de la música pop y dangdut indonesia, es posible que hayas oído hablar de Tommy J Pisa, un cantante que saltó a la fama en los años 1980 y 1990. Es conocido por su voz melodiosa y canciones románticas, como "Dibatas Kota Ini", "Surat Untuk Kekasih", y "Biarkan Aku Menangis". Su música ha tocado los corazones de muchos oyentes y se ha convertido en parte de la herencia musical de Indonesia. </p>
4
- <h2>descargar ekelebe de j martins</h2><br /><p><b><b>Download Zip</b> &hArr; <a href="https://bltlly.com/2v6K2z">https://bltlly.com/2v6K2z</a></b></p><br /><br />
5
- <p>Pero, ¿cómo se puede descargar música de Tommy J Pisa y disfrutar de ella en sus dispositivos? En este artículo, le mostraremos tres maneras de hacerlo legal y éticamente, sin violar ninguna ley de derechos de autor ni dañar al artista. También responderemos algunas preguntas frecuentes sobre Tommy J Pisa y su música. </p>
6
- <h2>Opción 1: Compra sus álbumes o canciones en tiendas de música en línea</h2>
7
- <p>La forma más sencilla de descargar música de Tommy J Pisa es comprar sus álbumes o canciones de tiendas de música en línea, como iTunes, Amazon o Google Play. Al hacer esto, apoyará al artista financieramente y obtendrá archivos MP3 de alta calidad que puede reproducir en cualquier dispositivo. También obtendrá acceso a la ilustración del álbum, letras y otra información. </p>
8
- <p>Para comprar música de Tommy J Pisa en línea, necesitará una tarjeta de crédito o una billetera digital, como PayPal. También tendrá que crear una cuenta en la tienda de música en línea de su elección y descargar su aplicación o software. Una vez que hayas hecho eso, puedes navegar por su catálogo y buscar los álbumes o canciones de Tommy J Pisa. Puede previsualizar las canciones antes de comprarlas y luego hacer clic en el botón de compra para completar la compra. Las canciones se descargarán en tu dispositivo o almacenamiento en la nube y podrás escucharlas en cualquier momento. </p>
9
- <h2>Opción 2: Transmitir su música desde plataformas en línea que permiten escuchar sin conexión</h2>
10
-
11
- <p>Para transmitir música de Tommy J Pisa en línea, necesitará una conexión a Internet y una suscripción a la plataforma de su elección. Algunas plataformas ofrecen pruebas gratuitas o planes con anuncios, mientras que otras requieren una cuota mensual o anual. También tendrá que descargar su aplicación o software y crear una cuenta. Una vez que hayas hecho eso, puedes buscar la música de Tommy J Pisa y agregarla a tu biblioteca o lista de reproducción. A continuación, puede escucharlo en línea o descargarlo para escucharlo sin conexión alternando el botón de descarga. Las canciones se almacenarán en su dispositivo o almacenamiento en la nube y puede escucharlas en cualquier momento. </p>
12
- <h2>Opción 3: Descargar su música desde sitios web libres y legales que ofrecen sus canciones con su permiso</h2>
13
- <p>La tercera forma de descargar música de Tommy J Pisa es descargar su música desde sitios web gratuitos y legales que ofrecen sus canciones con su permiso. Estos sitios web suelen ser administrados por fans o sellos independientes que han obtenido los derechos para distribuir su música de forma gratuita. También pueden ofrecer otros contenidos relacionados con Tommy J Pisa, como vídeos, fotos o noticias. </p>
14
- <p>Para descargar música de Tommy J Pisa desde estos sitios web, necesitará una conexión a Internet y un navegador web. También tendrá que encontrar estos sitios web mediante la búsqueda en línea o siguiendo los enlaces de las redes sociales u otras fuentes. Algunos ejemplos de estos sitios web son:</p>
15
- <p></p>
16
- <tabla>
17
- <tr><th>Sitio web</th><th>Descripción</th></tr>
18
- <tr><td>[Akurama Records]( 1 )</td><td>Una discográfica con sede en Yakarta que ha subido varios álbumes de Tommy J Pisa en YouTube. Puede escucharlos en línea o descargarlos como archivos MP3 utilizando una herramienta de descarga de YouTube. </td></tr>
19
- <tr><td>[Tommy J Pisa Fans Club]</td><td>Un sitio web dedicado a Tommy J Pisa que tiene una colección de sus canciones, videos, fotos y noticias. Puede escuchar sus canciones en línea o descargarlas como archivos MP3 haciendo clic en el enlace de descarga. </td></tr>
20
-
21
- </tabla>
22
- <p>Sin embargo, debe tener cuidado al descargar música de estos sitios web, ya que algunos de ellos pueden contener virus, malware o spyware que pueden dañar su dispositivo o comprometer su privacidad. También debes respetar los deseos del artista y no compartir su música sin su permiso o usarla con fines comerciales. </p>
23
- <h2>Conclusión</h2>
24
- <p>En conclusión, hay tres maneras de descargar música de Tommy J Pisa legal y éticamente: comprar sus álbumes o canciones de tiendas de música en línea, streaming de su música desde plataformas en línea que permiten escuchar fuera de línea, y descargar su música de sitios web libres y legales que ofrecen sus canciones con su permiso. Al hacerlo, podrás disfrutar de su música en tus dispositivos y apreciar su talento y contribución a la escena musical indonesia. </p>
25
- <p>Aquí hay algunos consejos sobre cómo disfrutar de su música:</p>
26
- <ul>
27
- <li>Crea una lista de reproducción de tus canciones favoritas de Tommy J Pisa y escúchala cuando quieras. </li>
28
- <li>Comparte su música con tus amigos y familiares y presentarles su estilo y género. </li>
29
- <li>Ver sus vídeos en YouTube u otras plataformas y ver cómo se realiza en vivo o en estudio. </li>
30
- <li>Síguelo en las redes sociales u otros canales y mantente actualizado sobre sus últimas noticias y actividades. </li>
31
- <li>Apoyarlo asistiendo a sus conciertos o eventos si es posible y mostrarle su amor y aprecio. </li>
32
- </ul>
33
- <h2>Preguntas frecuentes</h2>
34
- <h3>¿Quién es Tommy J Pisa? </h3>
35
- <p>Tommy J Pisa es un cantante indonesio especializado en música pop y dangdut. Nació en Yakarta el 22 de diciembre de 1960. Comenzó su carrera como cantante callejero y más tarde se unió a varias bandas antes de ir en solitario. Ha publicado más de 20 álbumes y ha ganado varios premios y reconocimientos por su música. </p>
36
- <h3>¿Qué es dangdut? </h3>
37
-
38
- <h3>¿Cuáles son algunas de las canciones más populares de Tommy J Pisa? </h3>
39
- <p>Algunas de las canciones más populares de Tommy J Pisa son:</p>
40
- <ol>
41
- <li>"Dibatas Kota Ini" (Al borde de esta ciudad), una canción sobre una relación a larga distancia que termina en tragedia. </li>
42
- <li>"Surat Untuk Kekasih" (Carta para Amante), una canción sobre un hombre que escribe una carta a su amante que lo ha dejado por otro hombre. </li>
43
- <li>"Biarkan Aku Menangis" (Let Me Cry), una canción sobre un hombre que expresa su tristeza y arrepentimiento después de perder a su amante. </li>
44
- <li>"Disini Dibatas Kota Ini" (Aquí en el borde de esta ciudad), una secuela de "Dibatas Kota Ini" que cuenta la historia del amante que regresa a la ciudad después de años de separación. </li>
45
- <li>"Nasib Pengamen" (The Fate of Street Singers), una canción que refleja la propia experiencia de Tommy J Pisa como cantante callejero que lucha por llegar a fin de mes. </li>
46
- </ol>
47
- <h3>¿Dónde puedo encontrar más información sobre Tommy J Pisa? </h3>
48
- <p>Puedes encontrar más información sobre Tommy J Pisa en las siguientes fuentes:</p>
49
- <ul>
50
- <li>[Su sitio web oficial], donde puedes encontrar su biografía, discografía, galería, noticias y datos de contacto. </li>
51
- <li>[Su página de Facebook], donde puedes seguirlo y ver sus publicaciones, fotos, videos y eventos. </li>
52
- <li>[Su cuenta de Instagram], donde puedes seguirlo y ver sus historias, fotos, videos y transmisiones en vivo. </li>
53
- <li>[Su canal de YouTube], donde puedes suscribirte a él y ver sus videos, entrevistas y presentaciones en vivo. </li>
54
- <li>[Su página de Wikipedia], donde puedes encontrar un resumen de su vida, carrera, premios y discografía. </li>
55
- </ul>
56
- <h3>¿Cómo puedo contactar a Tommy J Pisa? </h3>
57
- <p>Si desea ponerse en contacto con Tommy J Pisa por cualquier motivo, como reservarlo para un espectáculo, colaborar con él o enviarle un correo de fans, puede hacerlo utilizando los siguientes métodos:</p>
58
- <ul>
59
- <li>Correo electrónico: [email protected]</li>
60
- <li>Teléfono: +62 812 3456 7890</li>
61
- <li>Dirección: Jl. Raya Bogor No. 123, Yakarta Timur, Indonesia</li>
62
- </ul> 64aa2da5cf<br />
63
- <br />
64
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BetterAPI/BetterChat_new/src/lib/buildPrompt.ts DELETED
@@ -1,33 +0,0 @@
1
- import {
2
- PUBLIC_ASSISTANT_MESSAGE_TOKEN,
3
- PUBLIC_MAX_INPUT_TOKENS,
4
- PUBLIC_PREPROMPT,
5
- PUBLIC_SEP_TOKEN,
6
- PUBLIC_USER_MESSAGE_TOKEN,
7
- } from "$env/static/public";
8
- import type { Message } from "./types/Message";
9
-
10
- /**
11
- * Convert [{user: "assistant", content: "hi"}, {user: "user", content: "hello"}] to:
12
- *
13
- * <|assistant|>hi<|endoftext|><|prompter|>hello<|endoftext|><|assistant|>
14
- */
15
- export function buildPrompt(messages: Message[]): string {
16
- const prompt =
17
- messages
18
- .map(
19
- (m) =>
20
- (m.from === "user"
21
- ? PUBLIC_USER_MESSAGE_TOKEN + m.content
22
- : PUBLIC_ASSISTANT_MESSAGE_TOKEN + m.content) +
23
- (m.content.endsWith(PUBLIC_SEP_TOKEN) ? "" : PUBLIC_SEP_TOKEN)
24
- )
25
- .join("") + PUBLIC_ASSISTANT_MESSAGE_TOKEN;
26
-
27
- // Not super precise, but it's truncated in the model's backend anyway
28
- return (
29
- PUBLIC_PREPROMPT +
30
- "\n-----\n" +
31
- prompt.split(" ").slice(-parseInt(PUBLIC_MAX_INPUT_TOKENS)).join(" ")
32
- );
33
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/typing_extensions.py DELETED
@@ -1,2312 +0,0 @@
1
- import abc
2
- import collections
3
- import collections.abc
4
- import functools
5
- import inspect
6
- import operator
7
- import sys
8
- import types as _types
9
- import typing
10
- import warnings
11
-
12
-
13
- __all__ = [
14
- # Super-special typing primitives.
15
- 'Any',
16
- 'ClassVar',
17
- 'Concatenate',
18
- 'Final',
19
- 'LiteralString',
20
- 'ParamSpec',
21
- 'ParamSpecArgs',
22
- 'ParamSpecKwargs',
23
- 'Self',
24
- 'Type',
25
- 'TypeVar',
26
- 'TypeVarTuple',
27
- 'Unpack',
28
-
29
- # ABCs (from collections.abc).
30
- 'Awaitable',
31
- 'AsyncIterator',
32
- 'AsyncIterable',
33
- 'Coroutine',
34
- 'AsyncGenerator',
35
- 'AsyncContextManager',
36
- 'ChainMap',
37
-
38
- # Concrete collection types.
39
- 'ContextManager',
40
- 'Counter',
41
- 'Deque',
42
- 'DefaultDict',
43
- 'NamedTuple',
44
- 'OrderedDict',
45
- 'TypedDict',
46
-
47
- # Structural checks, a.k.a. protocols.
48
- 'SupportsIndex',
49
-
50
- # One-off things.
51
- 'Annotated',
52
- 'assert_never',
53
- 'assert_type',
54
- 'clear_overloads',
55
- 'dataclass_transform',
56
- 'deprecated',
57
- 'get_overloads',
58
- 'final',
59
- 'get_args',
60
- 'get_origin',
61
- 'get_type_hints',
62
- 'IntVar',
63
- 'is_typeddict',
64
- 'Literal',
65
- 'NewType',
66
- 'overload',
67
- 'override',
68
- 'Protocol',
69
- 'reveal_type',
70
- 'runtime',
71
- 'runtime_checkable',
72
- 'Text',
73
- 'TypeAlias',
74
- 'TypeGuard',
75
- 'TYPE_CHECKING',
76
- 'Never',
77
- 'NoReturn',
78
- 'Required',
79
- 'NotRequired',
80
- ]
81
-
82
- # for backward compatibility
83
- PEP_560 = True
84
- GenericMeta = type
85
-
86
- # The functions below are modified copies of typing internal helpers.
87
- # They are needed by _ProtocolMeta and they provide support for PEP 646.
88
-
89
- _marker = object()
90
-
91
-
92
- def _check_generic(cls, parameters, elen=_marker):
93
- """Check correct count for parameters of a generic cls (internal helper).
94
- This gives a nice error message in case of count mismatch.
95
- """
96
- if not elen:
97
- raise TypeError(f"{cls} is not a generic class")
98
- if elen is _marker:
99
- if not hasattr(cls, "__parameters__") or not cls.__parameters__:
100
- raise TypeError(f"{cls} is not a generic class")
101
- elen = len(cls.__parameters__)
102
- alen = len(parameters)
103
- if alen != elen:
104
- if hasattr(cls, "__parameters__"):
105
- parameters = [p for p in cls.__parameters__ if not _is_unpack(p)]
106
- num_tv_tuples = sum(isinstance(p, TypeVarTuple) for p in parameters)
107
- if (num_tv_tuples > 0) and (alen >= elen - num_tv_tuples):
108
- return
109
- raise TypeError(f"Too {'many' if alen > elen else 'few'} parameters for {cls};"
110
- f" actual {alen}, expected {elen}")
111
-
112
-
113
- if sys.version_info >= (3, 10):
114
- def _should_collect_from_parameters(t):
115
- return isinstance(
116
- t, (typing._GenericAlias, _types.GenericAlias, _types.UnionType)
117
- )
118
- elif sys.version_info >= (3, 9):
119
- def _should_collect_from_parameters(t):
120
- return isinstance(t, (typing._GenericAlias, _types.GenericAlias))
121
- else:
122
- def _should_collect_from_parameters(t):
123
- return isinstance(t, typing._GenericAlias) and not t._special
124
-
125
-
126
- def _collect_type_vars(types, typevar_types=None):
127
- """Collect all type variable contained in types in order of
128
- first appearance (lexicographic order). For example::
129
-
130
- _collect_type_vars((T, List[S, T])) == (T, S)
131
- """
132
- if typevar_types is None:
133
- typevar_types = typing.TypeVar
134
- tvars = []
135
- for t in types:
136
- if (
137
- isinstance(t, typevar_types) and
138
- t not in tvars and
139
- not _is_unpack(t)
140
- ):
141
- tvars.append(t)
142
- if _should_collect_from_parameters(t):
143
- tvars.extend([t for t in t.__parameters__ if t not in tvars])
144
- return tuple(tvars)
145
-
146
-
147
- NoReturn = typing.NoReturn
148
-
149
- # Some unconstrained type variables. These are used by the container types.
150
- # (These are not for export.)
151
- T = typing.TypeVar('T') # Any type.
152
- KT = typing.TypeVar('KT') # Key type.
153
- VT = typing.TypeVar('VT') # Value type.
154
- T_co = typing.TypeVar('T_co', covariant=True) # Any type covariant containers.
155
- T_contra = typing.TypeVar('T_contra', contravariant=True) # Ditto contravariant.
156
-
157
-
158
- if sys.version_info >= (3, 11):
159
- from typing import Any
160
- else:
161
-
162
- class _AnyMeta(type):
163
- def __instancecheck__(self, obj):
164
- if self is Any:
165
- raise TypeError("typing_extensions.Any cannot be used with isinstance()")
166
- return super().__instancecheck__(obj)
167
-
168
- def __repr__(self):
169
- if self is Any:
170
- return "typing_extensions.Any"
171
- return super().__repr__()
172
-
173
- class Any(metaclass=_AnyMeta):
174
- """Special type indicating an unconstrained type.
175
- - Any is compatible with every type.
176
- - Any assumed to have all methods.
177
- - All values assumed to be instances of Any.
178
- Note that all the above statements are true from the point of view of
179
- static type checkers. At runtime, Any should not be used with instance
180
- checks.
181
- """
182
- def __new__(cls, *args, **kwargs):
183
- if cls is Any:
184
- raise TypeError("Any cannot be instantiated")
185
- return super().__new__(cls, *args, **kwargs)
186
-
187
-
188
- ClassVar = typing.ClassVar
189
-
190
- # On older versions of typing there is an internal class named "Final".
191
- # 3.8+
192
- if hasattr(typing, 'Final') and sys.version_info[:2] >= (3, 7):
193
- Final = typing.Final
194
- # 3.7
195
- else:
196
- class _FinalForm(typing._SpecialForm, _root=True):
197
-
198
- def __repr__(self):
199
- return 'typing_extensions.' + self._name
200
-
201
- def __getitem__(self, parameters):
202
- item = typing._type_check(parameters,
203
- f'{self._name} accepts only a single type.')
204
- return typing._GenericAlias(self, (item,))
205
-
206
- Final = _FinalForm('Final',
207
- doc="""A special typing construct to indicate that a name
208
- cannot be re-assigned or overridden in a subclass.
209
- For example:
210
-
211
- MAX_SIZE: Final = 9000
212
- MAX_SIZE += 1 # Error reported by type checker
213
-
214
- class Connection:
215
- TIMEOUT: Final[int] = 10
216
- class FastConnector(Connection):
217
- TIMEOUT = 1 # Error reported by type checker
218
-
219
- There is no runtime checking of these properties.""")
220
-
221
- if sys.version_info >= (3, 11):
222
- final = typing.final
223
- else:
224
- # @final exists in 3.8+, but we backport it for all versions
225
- # before 3.11 to keep support for the __final__ attribute.
226
- # See https://bugs.python.org/issue46342
227
- def final(f):
228
- """This decorator can be used to indicate to type checkers that
229
- the decorated method cannot be overridden, and decorated class
230
- cannot be subclassed. For example:
231
-
232
- class Base:
233
- @final
234
- def done(self) -> None:
235
- ...
236
- class Sub(Base):
237
- def done(self) -> None: # Error reported by type checker
238
- ...
239
- @final
240
- class Leaf:
241
- ...
242
- class Other(Leaf): # Error reported by type checker
243
- ...
244
-
245
- There is no runtime checking of these properties. The decorator
246
- sets the ``__final__`` attribute to ``True`` on the decorated object
247
- to allow runtime introspection.
248
- """
249
- try:
250
- f.__final__ = True
251
- except (AttributeError, TypeError):
252
- # Skip the attribute silently if it is not writable.
253
- # AttributeError happens if the object has __slots__ or a
254
- # read-only property, TypeError if it's a builtin class.
255
- pass
256
- return f
257
-
258
-
259
- def IntVar(name):
260
- return typing.TypeVar(name)
261
-
262
-
263
- # 3.8+:
264
- if hasattr(typing, 'Literal'):
265
- Literal = typing.Literal
266
- # 3.7:
267
- else:
268
- class _LiteralForm(typing._SpecialForm, _root=True):
269
-
270
- def __repr__(self):
271
- return 'typing_extensions.' + self._name
272
-
273
- def __getitem__(self, parameters):
274
- return typing._GenericAlias(self, parameters)
275
-
276
- Literal = _LiteralForm('Literal',
277
- doc="""A type that can be used to indicate to type checkers
278
- that the corresponding value has a value literally equivalent
279
- to the provided parameter. For example:
280
-
281
- var: Literal[4] = 4
282
-
283
- The type checker understands that 'var' is literally equal to
284
- the value 4 and no other value.
285
-
286
- Literal[...] cannot be subclassed. There is no runtime
287
- checking verifying that the parameter is actually a value
288
- instead of a type.""")
289
-
290
-
291
- _overload_dummy = typing._overload_dummy # noqa
292
-
293
-
294
- if hasattr(typing, "get_overloads"): # 3.11+
295
- overload = typing.overload
296
- get_overloads = typing.get_overloads
297
- clear_overloads = typing.clear_overloads
298
- else:
299
- # {module: {qualname: {firstlineno: func}}}
300
- _overload_registry = collections.defaultdict(
301
- functools.partial(collections.defaultdict, dict)
302
- )
303
-
304
- def overload(func):
305
- """Decorator for overloaded functions/methods.
306
-
307
- In a stub file, place two or more stub definitions for the same
308
- function in a row, each decorated with @overload. For example:
309
-
310
- @overload
311
- def utf8(value: None) -> None: ...
312
- @overload
313
- def utf8(value: bytes) -> bytes: ...
314
- @overload
315
- def utf8(value: str) -> bytes: ...
316
-
317
- In a non-stub file (i.e. a regular .py file), do the same but
318
- follow it with an implementation. The implementation should *not*
319
- be decorated with @overload. For example:
320
-
321
- @overload
322
- def utf8(value: None) -> None: ...
323
- @overload
324
- def utf8(value: bytes) -> bytes: ...
325
- @overload
326
- def utf8(value: str) -> bytes: ...
327
- def utf8(value):
328
- # implementation goes here
329
-
330
- The overloads for a function can be retrieved at runtime using the
331
- get_overloads() function.
332
- """
333
- # classmethod and staticmethod
334
- f = getattr(func, "__func__", func)
335
- try:
336
- _overload_registry[f.__module__][f.__qualname__][
337
- f.__code__.co_firstlineno
338
- ] = func
339
- except AttributeError:
340
- # Not a normal function; ignore.
341
- pass
342
- return _overload_dummy
343
-
344
- def get_overloads(func):
345
- """Return all defined overloads for *func* as a sequence."""
346
- # classmethod and staticmethod
347
- f = getattr(func, "__func__", func)
348
- if f.__module__ not in _overload_registry:
349
- return []
350
- mod_dict = _overload_registry[f.__module__]
351
- if f.__qualname__ not in mod_dict:
352
- return []
353
- return list(mod_dict[f.__qualname__].values())
354
-
355
- def clear_overloads():
356
- """Clear all overloads in the registry."""
357
- _overload_registry.clear()
358
-
359
-
360
- # This is not a real generic class. Don't use outside annotations.
361
- Type = typing.Type
362
-
363
- # Various ABCs mimicking those in collections.abc.
364
- # A few are simply re-exported for completeness.
365
-
366
-
367
- Awaitable = typing.Awaitable
368
- Coroutine = typing.Coroutine
369
- AsyncIterable = typing.AsyncIterable
370
- AsyncIterator = typing.AsyncIterator
371
- Deque = typing.Deque
372
- ContextManager = typing.ContextManager
373
- AsyncContextManager = typing.AsyncContextManager
374
- DefaultDict = typing.DefaultDict
375
-
376
- # 3.7.2+
377
- if hasattr(typing, 'OrderedDict'):
378
- OrderedDict = typing.OrderedDict
379
- # 3.7.0-3.7.2
380
- else:
381
- OrderedDict = typing._alias(collections.OrderedDict, (KT, VT))
382
-
383
- Counter = typing.Counter
384
- ChainMap = typing.ChainMap
385
- AsyncGenerator = typing.AsyncGenerator
386
- NewType = typing.NewType
387
- Text = typing.Text
388
- TYPE_CHECKING = typing.TYPE_CHECKING
389
-
390
-
391
- _PROTO_WHITELIST = ['Callable', 'Awaitable',
392
- 'Iterable', 'Iterator', 'AsyncIterable', 'AsyncIterator',
393
- 'Hashable', 'Sized', 'Container', 'Collection', 'Reversible',
394
- 'ContextManager', 'AsyncContextManager']
395
-
396
-
397
- def _get_protocol_attrs(cls):
398
- attrs = set()
399
- for base in cls.__mro__[:-1]: # without object
400
- if base.__name__ in ('Protocol', 'Generic'):
401
- continue
402
- annotations = getattr(base, '__annotations__', {})
403
- for attr in list(base.__dict__.keys()) + list(annotations.keys()):
404
- if (not attr.startswith('_abc_') and attr not in (
405
- '__abstractmethods__', '__annotations__', '__weakref__',
406
- '_is_protocol', '_is_runtime_protocol', '__dict__',
407
- '__args__', '__slots__',
408
- '__next_in_mro__', '__parameters__', '__origin__',
409
- '__orig_bases__', '__extra__', '__tree_hash__',
410
- '__doc__', '__subclasshook__', '__init__', '__new__',
411
- '__module__', '_MutableMapping__marker', '_gorg')):
412
- attrs.add(attr)
413
- return attrs
414
-
415
-
416
- def _is_callable_members_only(cls):
417
- return all(callable(getattr(cls, attr, None)) for attr in _get_protocol_attrs(cls))
418
-
419
-
420
- def _maybe_adjust_parameters(cls):
421
- """Helper function used in Protocol.__init_subclass__ and _TypedDictMeta.__new__.
422
-
423
- The contents of this function are very similar
424
- to logic found in typing.Generic.__init_subclass__
425
- on the CPython main branch.
426
- """
427
- tvars = []
428
- if '__orig_bases__' in cls.__dict__:
429
- tvars = typing._collect_type_vars(cls.__orig_bases__)
430
- # Look for Generic[T1, ..., Tn] or Protocol[T1, ..., Tn].
431
- # If found, tvars must be a subset of it.
432
- # If not found, tvars is it.
433
- # Also check for and reject plain Generic,
434
- # and reject multiple Generic[...] and/or Protocol[...].
435
- gvars = None
436
- for base in cls.__orig_bases__:
437
- if (isinstance(base, typing._GenericAlias) and
438
- base.__origin__ in (typing.Generic, Protocol)):
439
- # for error messages
440
- the_base = base.__origin__.__name__
441
- if gvars is not None:
442
- raise TypeError(
443
- "Cannot inherit from Generic[...]"
444
- " and/or Protocol[...] multiple types.")
445
- gvars = base.__parameters__
446
- if gvars is None:
447
- gvars = tvars
448
- else:
449
- tvarset = set(tvars)
450
- gvarset = set(gvars)
451
- if not tvarset <= gvarset:
452
- s_vars = ', '.join(str(t) for t in tvars if t not in gvarset)
453
- s_args = ', '.join(str(g) for g in gvars)
454
- raise TypeError(f"Some type variables ({s_vars}) are"
455
- f" not listed in {the_base}[{s_args}]")
456
- tvars = gvars
457
- cls.__parameters__ = tuple(tvars)
458
-
459
-
460
- # 3.8+
461
- if hasattr(typing, 'Protocol'):
462
- Protocol = typing.Protocol
463
- # 3.7
464
- else:
465
-
466
- def _no_init(self, *args, **kwargs):
467
- if type(self)._is_protocol:
468
- raise TypeError('Protocols cannot be instantiated')
469
-
470
- class _ProtocolMeta(abc.ABCMeta): # noqa: B024
471
- # This metaclass is a bit unfortunate and exists only because of the lack
472
- # of __instancehook__.
473
- def __instancecheck__(cls, instance):
474
- # We need this method for situations where attributes are
475
- # assigned in __init__.
476
- if ((not getattr(cls, '_is_protocol', False) or
477
- _is_callable_members_only(cls)) and
478
- issubclass(instance.__class__, cls)):
479
- return True
480
- if cls._is_protocol:
481
- if all(hasattr(instance, attr) and
482
- (not callable(getattr(cls, attr, None)) or
483
- getattr(instance, attr) is not None)
484
- for attr in _get_protocol_attrs(cls)):
485
- return True
486
- return super().__instancecheck__(instance)
487
-
488
- class Protocol(metaclass=_ProtocolMeta):
489
- # There is quite a lot of overlapping code with typing.Generic.
490
- # Unfortunately it is hard to avoid this while these live in two different
491
- # modules. The duplicated code will be removed when Protocol is moved to typing.
492
- """Base class for protocol classes. Protocol classes are defined as::
493
-
494
- class Proto(Protocol):
495
- def meth(self) -> int:
496
- ...
497
-
498
- Such classes are primarily used with static type checkers that recognize
499
- structural subtyping (static duck-typing), for example::
500
-
501
- class C:
502
- def meth(self) -> int:
503
- return 0
504
-
505
- def func(x: Proto) -> int:
506
- return x.meth()
507
-
508
- func(C()) # Passes static type check
509
-
510
- See PEP 544 for details. Protocol classes decorated with
511
- @typing_extensions.runtime act as simple-minded runtime protocol that checks
512
- only the presence of given attributes, ignoring their type signatures.
513
-
514
- Protocol classes can be generic, they are defined as::
515
-
516
- class GenProto(Protocol[T]):
517
- def meth(self) -> T:
518
- ...
519
- """
520
- __slots__ = ()
521
- _is_protocol = True
522
-
523
- def __new__(cls, *args, **kwds):
524
- if cls is Protocol:
525
- raise TypeError("Type Protocol cannot be instantiated; "
526
- "it can only be used as a base class")
527
- return super().__new__(cls)
528
-
529
- @typing._tp_cache
530
- def __class_getitem__(cls, params):
531
- if not isinstance(params, tuple):
532
- params = (params,)
533
- if not params and cls is not typing.Tuple:
534
- raise TypeError(
535
- f"Parameter list to {cls.__qualname__}[...] cannot be empty")
536
- msg = "Parameters to generic types must be types."
537
- params = tuple(typing._type_check(p, msg) for p in params) # noqa
538
- if cls is Protocol:
539
- # Generic can only be subscripted with unique type variables.
540
- if not all(isinstance(p, typing.TypeVar) for p in params):
541
- i = 0
542
- while isinstance(params[i], typing.TypeVar):
543
- i += 1
544
- raise TypeError(
545
- "Parameters to Protocol[...] must all be type variables."
546
- f" Parameter {i + 1} is {params[i]}")
547
- if len(set(params)) != len(params):
548
- raise TypeError(
549
- "Parameters to Protocol[...] must all be unique")
550
- else:
551
- # Subscripting a regular Generic subclass.
552
- _check_generic(cls, params, len(cls.__parameters__))
553
- return typing._GenericAlias(cls, params)
554
-
555
- def __init_subclass__(cls, *args, **kwargs):
556
- if '__orig_bases__' in cls.__dict__:
557
- error = typing.Generic in cls.__orig_bases__
558
- else:
559
- error = typing.Generic in cls.__bases__
560
- if error:
561
- raise TypeError("Cannot inherit from plain Generic")
562
- _maybe_adjust_parameters(cls)
563
-
564
- # Determine if this is a protocol or a concrete subclass.
565
- if not cls.__dict__.get('_is_protocol', None):
566
- cls._is_protocol = any(b is Protocol for b in cls.__bases__)
567
-
568
- # Set (or override) the protocol subclass hook.
569
- def _proto_hook(other):
570
- if not cls.__dict__.get('_is_protocol', None):
571
- return NotImplemented
572
- if not getattr(cls, '_is_runtime_protocol', False):
573
- if sys._getframe(2).f_globals['__name__'] in ['abc', 'functools']:
574
- return NotImplemented
575
- raise TypeError("Instance and class checks can only be used with"
576
- " @runtime protocols")
577
- if not _is_callable_members_only(cls):
578
- if sys._getframe(2).f_globals['__name__'] in ['abc', 'functools']:
579
- return NotImplemented
580
- raise TypeError("Protocols with non-method members"
581
- " don't support issubclass()")
582
- if not isinstance(other, type):
583
- # Same error as for issubclass(1, int)
584
- raise TypeError('issubclass() arg 1 must be a class')
585
- for attr in _get_protocol_attrs(cls):
586
- for base in other.__mro__:
587
- if attr in base.__dict__:
588
- if base.__dict__[attr] is None:
589
- return NotImplemented
590
- break
591
- annotations = getattr(base, '__annotations__', {})
592
- if (isinstance(annotations, typing.Mapping) and
593
- attr in annotations and
594
- isinstance(other, _ProtocolMeta) and
595
- other._is_protocol):
596
- break
597
- else:
598
- return NotImplemented
599
- return True
600
- if '__subclasshook__' not in cls.__dict__:
601
- cls.__subclasshook__ = _proto_hook
602
-
603
- # We have nothing more to do for non-protocols.
604
- if not cls._is_protocol:
605
- return
606
-
607
- # Check consistency of bases.
608
- for base in cls.__bases__:
609
- if not (base in (object, typing.Generic) or
610
- base.__module__ == 'collections.abc' and
611
- base.__name__ in _PROTO_WHITELIST or
612
- isinstance(base, _ProtocolMeta) and base._is_protocol):
613
- raise TypeError('Protocols can only inherit from other'
614
- f' protocols, got {repr(base)}')
615
- cls.__init__ = _no_init
616
-
617
-
618
- # 3.8+
619
- if hasattr(typing, 'runtime_checkable'):
620
- runtime_checkable = typing.runtime_checkable
621
- # 3.7
622
- else:
623
- def runtime_checkable(cls):
624
- """Mark a protocol class as a runtime protocol, so that it
625
- can be used with isinstance() and issubclass(). Raise TypeError
626
- if applied to a non-protocol class.
627
-
628
- This allows a simple-minded structural check very similar to the
629
- one-offs in collections.abc such as Hashable.
630
- """
631
- if not isinstance(cls, _ProtocolMeta) or not cls._is_protocol:
632
- raise TypeError('@runtime_checkable can be only applied to protocol classes,'
633
- f' got {cls!r}')
634
- cls._is_runtime_protocol = True
635
- return cls
636
-
637
-
638
- # Exists for backwards compatibility.
639
- runtime = runtime_checkable
640
-
641
-
642
- # 3.8+
643
- if hasattr(typing, 'SupportsIndex'):
644
- SupportsIndex = typing.SupportsIndex
645
- # 3.7
646
- else:
647
- @runtime_checkable
648
- class SupportsIndex(Protocol):
649
- __slots__ = ()
650
-
651
- @abc.abstractmethod
652
- def __index__(self) -> int:
653
- pass
654
-
655
-
656
- if hasattr(typing, "Required"):
657
- # The standard library TypedDict in Python 3.8 does not store runtime information
658
- # about which (if any) keys are optional. See https://bugs.python.org/issue38834
659
- # The standard library TypedDict in Python 3.9.0/1 does not honour the "total"
660
- # keyword with old-style TypedDict(). See https://bugs.python.org/issue42059
661
- # The standard library TypedDict below Python 3.11 does not store runtime
662
- # information about optional and required keys when using Required or NotRequired.
663
- # Generic TypedDicts are also impossible using typing.TypedDict on Python <3.11.
664
- TypedDict = typing.TypedDict
665
- _TypedDictMeta = typing._TypedDictMeta
666
- is_typeddict = typing.is_typeddict
667
- else:
668
- def _check_fails(cls, other):
669
- try:
670
- if sys._getframe(1).f_globals['__name__'] not in ['abc',
671
- 'functools',
672
- 'typing']:
673
- # Typed dicts are only for static structural subtyping.
674
- raise TypeError('TypedDict does not support instance and class checks')
675
- except (AttributeError, ValueError):
676
- pass
677
- return False
678
-
679
- def _dict_new(*args, **kwargs):
680
- if not args:
681
- raise TypeError('TypedDict.__new__(): not enough arguments')
682
- _, args = args[0], args[1:] # allow the "cls" keyword be passed
683
- return dict(*args, **kwargs)
684
-
685
- _dict_new.__text_signature__ = '($cls, _typename, _fields=None, /, **kwargs)'
686
-
687
- def _typeddict_new(*args, total=True, **kwargs):
688
- if not args:
689
- raise TypeError('TypedDict.__new__(): not enough arguments')
690
- _, args = args[0], args[1:] # allow the "cls" keyword be passed
691
- if args:
692
- typename, args = args[0], args[1:] # allow the "_typename" keyword be passed
693
- elif '_typename' in kwargs:
694
- typename = kwargs.pop('_typename')
695
- import warnings
696
- warnings.warn("Passing '_typename' as keyword argument is deprecated",
697
- DeprecationWarning, stacklevel=2)
698
- else:
699
- raise TypeError("TypedDict.__new__() missing 1 required positional "
700
- "argument: '_typename'")
701
- if args:
702
- try:
703
- fields, = args # allow the "_fields" keyword be passed
704
- except ValueError:
705
- raise TypeError('TypedDict.__new__() takes from 2 to 3 '
706
- f'positional arguments but {len(args) + 2} '
707
- 'were given')
708
- elif '_fields' in kwargs and len(kwargs) == 1:
709
- fields = kwargs.pop('_fields')
710
- import warnings
711
- warnings.warn("Passing '_fields' as keyword argument is deprecated",
712
- DeprecationWarning, stacklevel=2)
713
- else:
714
- fields = None
715
-
716
- if fields is None:
717
- fields = kwargs
718
- elif kwargs:
719
- raise TypeError("TypedDict takes either a dict or keyword arguments,"
720
- " but not both")
721
-
722
- ns = {'__annotations__': dict(fields)}
723
- try:
724
- # Setting correct module is necessary to make typed dict classes pickleable.
725
- ns['__module__'] = sys._getframe(1).f_globals.get('__name__', '__main__')
726
- except (AttributeError, ValueError):
727
- pass
728
-
729
- return _TypedDictMeta(typename, (), ns, total=total)
730
-
731
- _typeddict_new.__text_signature__ = ('($cls, _typename, _fields=None,'
732
- ' /, *, total=True, **kwargs)')
733
-
734
- _TAKES_MODULE = "module" in inspect.signature(typing._type_check).parameters
735
-
736
- class _TypedDictMeta(type):
737
- def __init__(cls, name, bases, ns, total=True):
738
- super().__init__(name, bases, ns)
739
-
740
- def __new__(cls, name, bases, ns, total=True):
741
- # Create new typed dict class object.
742
- # This method is called directly when TypedDict is subclassed,
743
- # or via _typeddict_new when TypedDict is instantiated. This way
744
- # TypedDict supports all three syntaxes described in its docstring.
745
- # Subclasses and instances of TypedDict return actual dictionaries
746
- # via _dict_new.
747
- ns['__new__'] = _typeddict_new if name == 'TypedDict' else _dict_new
748
- # Don't insert typing.Generic into __bases__ here,
749
- # or Generic.__init_subclass__ will raise TypeError
750
- # in the super().__new__() call.
751
- # Instead, monkey-patch __bases__ onto the class after it's been created.
752
- tp_dict = super().__new__(cls, name, (dict,), ns)
753
-
754
- if any(issubclass(base, typing.Generic) for base in bases):
755
- tp_dict.__bases__ = (typing.Generic, dict)
756
- _maybe_adjust_parameters(tp_dict)
757
-
758
- annotations = {}
759
- own_annotations = ns.get('__annotations__', {})
760
- msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type"
761
- kwds = {"module": tp_dict.__module__} if _TAKES_MODULE else {}
762
- own_annotations = {
763
- n: typing._type_check(tp, msg, **kwds)
764
- for n, tp in own_annotations.items()
765
- }
766
- required_keys = set()
767
- optional_keys = set()
768
-
769
- for base in bases:
770
- annotations.update(base.__dict__.get('__annotations__', {}))
771
- required_keys.update(base.__dict__.get('__required_keys__', ()))
772
- optional_keys.update(base.__dict__.get('__optional_keys__', ()))
773
-
774
- annotations.update(own_annotations)
775
- for annotation_key, annotation_type in own_annotations.items():
776
- annotation_origin = get_origin(annotation_type)
777
- if annotation_origin is Annotated:
778
- annotation_args = get_args(annotation_type)
779
- if annotation_args:
780
- annotation_type = annotation_args[0]
781
- annotation_origin = get_origin(annotation_type)
782
-
783
- if annotation_origin is Required:
784
- required_keys.add(annotation_key)
785
- elif annotation_origin is NotRequired:
786
- optional_keys.add(annotation_key)
787
- elif total:
788
- required_keys.add(annotation_key)
789
- else:
790
- optional_keys.add(annotation_key)
791
-
792
- tp_dict.__annotations__ = annotations
793
- tp_dict.__required_keys__ = frozenset(required_keys)
794
- tp_dict.__optional_keys__ = frozenset(optional_keys)
795
- if not hasattr(tp_dict, '__total__'):
796
- tp_dict.__total__ = total
797
- return tp_dict
798
-
799
- __instancecheck__ = __subclasscheck__ = _check_fails
800
-
801
- TypedDict = _TypedDictMeta('TypedDict', (dict,), {})
802
- TypedDict.__module__ = __name__
803
- TypedDict.__doc__ = \
804
- """A simple typed name space. At runtime it is equivalent to a plain dict.
805
-
806
- TypedDict creates a dictionary type that expects all of its
807
- instances to have a certain set of keys, with each key
808
- associated with a value of a consistent type. This expectation
809
- is not checked at runtime but is only enforced by type checkers.
810
- Usage::
811
-
812
- class Point2D(TypedDict):
813
- x: int
814
- y: int
815
- label: str
816
-
817
- a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK
818
- b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check
819
-
820
- assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first')
821
-
822
- The type info can be accessed via the Point2D.__annotations__ dict, and
823
- the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets.
824
- TypedDict supports two additional equivalent forms::
825
-
826
- Point2D = TypedDict('Point2D', x=int, y=int, label=str)
827
- Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str})
828
-
829
- The class syntax is only supported in Python 3.6+, while two other
830
- syntax forms work for Python 2.7 and 3.2+
831
- """
832
-
833
- if hasattr(typing, "_TypedDictMeta"):
834
- _TYPEDDICT_TYPES = (typing._TypedDictMeta, _TypedDictMeta)
835
- else:
836
- _TYPEDDICT_TYPES = (_TypedDictMeta,)
837
-
838
- def is_typeddict(tp):
839
- """Check if an annotation is a TypedDict class
840
-
841
- For example::
842
- class Film(TypedDict):
843
- title: str
844
- year: int
845
-
846
- is_typeddict(Film) # => True
847
- is_typeddict(Union[list, str]) # => False
848
- """
849
- return isinstance(tp, tuple(_TYPEDDICT_TYPES))
850
-
851
-
852
- if hasattr(typing, "assert_type"):
853
- assert_type = typing.assert_type
854
-
855
- else:
856
- def assert_type(__val, __typ):
857
- """Assert (to the type checker) that the value is of the given type.
858
-
859
- When the type checker encounters a call to assert_type(), it
860
- emits an error if the value is not of the specified type::
861
-
862
- def greet(name: str) -> None:
863
- assert_type(name, str) # ok
864
- assert_type(name, int) # type checker error
865
-
866
- At runtime this returns the first argument unchanged and otherwise
867
- does nothing.
868
- """
869
- return __val
870
-
871
-
872
- if hasattr(typing, "Required"):
873
- get_type_hints = typing.get_type_hints
874
- else:
875
- import functools
876
- import types
877
-
878
- # replaces _strip_annotations()
879
- def _strip_extras(t):
880
- """Strips Annotated, Required and NotRequired from a given type."""
881
- if isinstance(t, _AnnotatedAlias):
882
- return _strip_extras(t.__origin__)
883
- if hasattr(t, "__origin__") and t.__origin__ in (Required, NotRequired):
884
- return _strip_extras(t.__args__[0])
885
- if isinstance(t, typing._GenericAlias):
886
- stripped_args = tuple(_strip_extras(a) for a in t.__args__)
887
- if stripped_args == t.__args__:
888
- return t
889
- return t.copy_with(stripped_args)
890
- if hasattr(types, "GenericAlias") and isinstance(t, types.GenericAlias):
891
- stripped_args = tuple(_strip_extras(a) for a in t.__args__)
892
- if stripped_args == t.__args__:
893
- return t
894
- return types.GenericAlias(t.__origin__, stripped_args)
895
- if hasattr(types, "UnionType") and isinstance(t, types.UnionType):
896
- stripped_args = tuple(_strip_extras(a) for a in t.__args__)
897
- if stripped_args == t.__args__:
898
- return t
899
- return functools.reduce(operator.or_, stripped_args)
900
-
901
- return t
902
-
903
- def get_type_hints(obj, globalns=None, localns=None, include_extras=False):
904
- """Return type hints for an object.
905
-
906
- This is often the same as obj.__annotations__, but it handles
907
- forward references encoded as string literals, adds Optional[t] if a
908
- default value equal to None is set and recursively replaces all
909
- 'Annotated[T, ...]', 'Required[T]' or 'NotRequired[T]' with 'T'
910
- (unless 'include_extras=True').
911
-
912
- The argument may be a module, class, method, or function. The annotations
913
- are returned as a dictionary. For classes, annotations include also
914
- inherited members.
915
-
916
- TypeError is raised if the argument is not of a type that can contain
917
- annotations, and an empty dictionary is returned if no annotations are
918
- present.
919
-
920
- BEWARE -- the behavior of globalns and localns is counterintuitive
921
- (unless you are familiar with how eval() and exec() work). The
922
- search order is locals first, then globals.
923
-
924
- - If no dict arguments are passed, an attempt is made to use the
925
- globals from obj (or the respective module's globals for classes),
926
- and these are also used as the locals. If the object does not appear
927
- to have globals, an empty dictionary is used.
928
-
929
- - If one dict argument is passed, it is used for both globals and
930
- locals.
931
-
932
- - If two dict arguments are passed, they specify globals and
933
- locals, respectively.
934
- """
935
- if hasattr(typing, "Annotated"):
936
- hint = typing.get_type_hints(
937
- obj, globalns=globalns, localns=localns, include_extras=True
938
- )
939
- else:
940
- hint = typing.get_type_hints(obj, globalns=globalns, localns=localns)
941
- if include_extras:
942
- return hint
943
- return {k: _strip_extras(t) for k, t in hint.items()}
944
-
945
-
946
- # Python 3.9+ has PEP 593 (Annotated)
947
- if hasattr(typing, 'Annotated'):
948
- Annotated = typing.Annotated
949
- # Not exported and not a public API, but needed for get_origin() and get_args()
950
- # to work.
951
- _AnnotatedAlias = typing._AnnotatedAlias
952
- # 3.7-3.8
953
- else:
954
- class _AnnotatedAlias(typing._GenericAlias, _root=True):
955
- """Runtime representation of an annotated type.
956
-
957
- At its core 'Annotated[t, dec1, dec2, ...]' is an alias for the type 't'
958
- with extra annotations. The alias behaves like a normal typing alias,
959
- instantiating is the same as instantiating the underlying type, binding
960
- it to types is also the same.
961
- """
962
- def __init__(self, origin, metadata):
963
- if isinstance(origin, _AnnotatedAlias):
964
- metadata = origin.__metadata__ + metadata
965
- origin = origin.__origin__
966
- super().__init__(origin, origin)
967
- self.__metadata__ = metadata
968
-
969
- def copy_with(self, params):
970
- assert len(params) == 1
971
- new_type = params[0]
972
- return _AnnotatedAlias(new_type, self.__metadata__)
973
-
974
- def __repr__(self):
975
- return (f"typing_extensions.Annotated[{typing._type_repr(self.__origin__)}, "
976
- f"{', '.join(repr(a) for a in self.__metadata__)}]")
977
-
978
- def __reduce__(self):
979
- return operator.getitem, (
980
- Annotated, (self.__origin__,) + self.__metadata__
981
- )
982
-
983
- def __eq__(self, other):
984
- if not isinstance(other, _AnnotatedAlias):
985
- return NotImplemented
986
- if self.__origin__ != other.__origin__:
987
- return False
988
- return self.__metadata__ == other.__metadata__
989
-
990
- def __hash__(self):
991
- return hash((self.__origin__, self.__metadata__))
992
-
993
- class Annotated:
994
- """Add context specific metadata to a type.
995
-
996
- Example: Annotated[int, runtime_check.Unsigned] indicates to the
997
- hypothetical runtime_check module that this type is an unsigned int.
998
- Every other consumer of this type can ignore this metadata and treat
999
- this type as int.
1000
-
1001
- The first argument to Annotated must be a valid type (and will be in
1002
- the __origin__ field), the remaining arguments are kept as a tuple in
1003
- the __extra__ field.
1004
-
1005
- Details:
1006
-
1007
- - It's an error to call `Annotated` with less than two arguments.
1008
- - Nested Annotated are flattened::
1009
-
1010
- Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3]
1011
-
1012
- - Instantiating an annotated type is equivalent to instantiating the
1013
- underlying type::
1014
-
1015
- Annotated[C, Ann1](5) == C(5)
1016
-
1017
- - Annotated can be used as a generic type alias::
1018
-
1019
- Optimized = Annotated[T, runtime.Optimize()]
1020
- Optimized[int] == Annotated[int, runtime.Optimize()]
1021
-
1022
- OptimizedList = Annotated[List[T], runtime.Optimize()]
1023
- OptimizedList[int] == Annotated[List[int], runtime.Optimize()]
1024
- """
1025
-
1026
- __slots__ = ()
1027
-
1028
- def __new__(cls, *args, **kwargs):
1029
- raise TypeError("Type Annotated cannot be instantiated.")
1030
-
1031
- @typing._tp_cache
1032
- def __class_getitem__(cls, params):
1033
- if not isinstance(params, tuple) or len(params) < 2:
1034
- raise TypeError("Annotated[...] should be used "
1035
- "with at least two arguments (a type and an "
1036
- "annotation).")
1037
- allowed_special_forms = (ClassVar, Final)
1038
- if get_origin(params[0]) in allowed_special_forms:
1039
- origin = params[0]
1040
- else:
1041
- msg = "Annotated[t, ...]: t must be a type."
1042
- origin = typing._type_check(params[0], msg)
1043
- metadata = tuple(params[1:])
1044
- return _AnnotatedAlias(origin, metadata)
1045
-
1046
- def __init_subclass__(cls, *args, **kwargs):
1047
- raise TypeError(
1048
- f"Cannot subclass {cls.__module__}.Annotated"
1049
- )
1050
-
1051
- # Python 3.8 has get_origin() and get_args() but those implementations aren't
1052
- # Annotated-aware, so we can't use those. Python 3.9's versions don't support
1053
- # ParamSpecArgs and ParamSpecKwargs, so only Python 3.10's versions will do.
1054
- if sys.version_info[:2] >= (3, 10):
1055
- get_origin = typing.get_origin
1056
- get_args = typing.get_args
1057
- # 3.7-3.9
1058
- else:
1059
- try:
1060
- # 3.9+
1061
- from typing import _BaseGenericAlias
1062
- except ImportError:
1063
- _BaseGenericAlias = typing._GenericAlias
1064
- try:
1065
- # 3.9+
1066
- from typing import GenericAlias as _typing_GenericAlias
1067
- except ImportError:
1068
- _typing_GenericAlias = typing._GenericAlias
1069
-
1070
- def get_origin(tp):
1071
- """Get the unsubscripted version of a type.
1072
-
1073
- This supports generic types, Callable, Tuple, Union, Literal, Final, ClassVar
1074
- and Annotated. Return None for unsupported types. Examples::
1075
-
1076
- get_origin(Literal[42]) is Literal
1077
- get_origin(int) is None
1078
- get_origin(ClassVar[int]) is ClassVar
1079
- get_origin(Generic) is Generic
1080
- get_origin(Generic[T]) is Generic
1081
- get_origin(Union[T, int]) is Union
1082
- get_origin(List[Tuple[T, T]][int]) == list
1083
- get_origin(P.args) is P
1084
- """
1085
- if isinstance(tp, _AnnotatedAlias):
1086
- return Annotated
1087
- if isinstance(tp, (typing._GenericAlias, _typing_GenericAlias, _BaseGenericAlias,
1088
- ParamSpecArgs, ParamSpecKwargs)):
1089
- return tp.__origin__
1090
- if tp is typing.Generic:
1091
- return typing.Generic
1092
- return None
1093
-
1094
- def get_args(tp):
1095
- """Get type arguments with all substitutions performed.
1096
-
1097
- For unions, basic simplifications used by Union constructor are performed.
1098
- Examples::
1099
- get_args(Dict[str, int]) == (str, int)
1100
- get_args(int) == ()
1101
- get_args(Union[int, Union[T, int], str][int]) == (int, str)
1102
- get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int])
1103
- get_args(Callable[[], T][int]) == ([], int)
1104
- """
1105
- if isinstance(tp, _AnnotatedAlias):
1106
- return (tp.__origin__,) + tp.__metadata__
1107
- if isinstance(tp, (typing._GenericAlias, _typing_GenericAlias)):
1108
- if getattr(tp, "_special", False):
1109
- return ()
1110
- res = tp.__args__
1111
- if get_origin(tp) is collections.abc.Callable and res[0] is not Ellipsis:
1112
- res = (list(res[:-1]), res[-1])
1113
- return res
1114
- return ()
1115
-
1116
-
1117
- # 3.10+
1118
- if hasattr(typing, 'TypeAlias'):
1119
- TypeAlias = typing.TypeAlias
1120
- # 3.9
1121
- elif sys.version_info[:2] >= (3, 9):
1122
- class _TypeAliasForm(typing._SpecialForm, _root=True):
1123
- def __repr__(self):
1124
- return 'typing_extensions.' + self._name
1125
-
1126
- @_TypeAliasForm
1127
- def TypeAlias(self, parameters):
1128
- """Special marker indicating that an assignment should
1129
- be recognized as a proper type alias definition by type
1130
- checkers.
1131
-
1132
- For example::
1133
-
1134
- Predicate: TypeAlias = Callable[..., bool]
1135
-
1136
- It's invalid when used anywhere except as in the example above.
1137
- """
1138
- raise TypeError(f"{self} is not subscriptable")
1139
- # 3.7-3.8
1140
- else:
1141
- class _TypeAliasForm(typing._SpecialForm, _root=True):
1142
- def __repr__(self):
1143
- return 'typing_extensions.' + self._name
1144
-
1145
- TypeAlias = _TypeAliasForm('TypeAlias',
1146
- doc="""Special marker indicating that an assignment should
1147
- be recognized as a proper type alias definition by type
1148
- checkers.
1149
-
1150
- For example::
1151
-
1152
- Predicate: TypeAlias = Callable[..., bool]
1153
-
1154
- It's invalid when used anywhere except as in the example
1155
- above.""")
1156
-
1157
-
1158
- class _DefaultMixin:
1159
- """Mixin for TypeVarLike defaults."""
1160
-
1161
- __slots__ = ()
1162
-
1163
- def __init__(self, default):
1164
- if isinstance(default, (tuple, list)):
1165
- self.__default__ = tuple((typing._type_check(d, "Default must be a type")
1166
- for d in default))
1167
- elif default != _marker:
1168
- self.__default__ = typing._type_check(default, "Default must be a type")
1169
- else:
1170
- self.__default__ = None
1171
-
1172
-
1173
- # Add default and infer_variance parameters from PEP 696 and 695
1174
- class TypeVar(typing.TypeVar, _DefaultMixin, _root=True):
1175
- """Type variable."""
1176
-
1177
- __module__ = 'typing'
1178
-
1179
- def __init__(self, name, *constraints, bound=None,
1180
- covariant=False, contravariant=False,
1181
- default=_marker, infer_variance=False):
1182
- super().__init__(name, *constraints, bound=bound, covariant=covariant,
1183
- contravariant=contravariant)
1184
- _DefaultMixin.__init__(self, default)
1185
- self.__infer_variance__ = infer_variance
1186
-
1187
- # for pickling:
1188
- try:
1189
- def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
1190
- except (AttributeError, ValueError):
1191
- def_mod = None
1192
- if def_mod != 'typing_extensions':
1193
- self.__module__ = def_mod
1194
-
1195
-
1196
- # Python 3.10+ has PEP 612
1197
- if hasattr(typing, 'ParamSpecArgs'):
1198
- ParamSpecArgs = typing.ParamSpecArgs
1199
- ParamSpecKwargs = typing.ParamSpecKwargs
1200
- # 3.7-3.9
1201
- else:
1202
- class _Immutable:
1203
- """Mixin to indicate that object should not be copied."""
1204
- __slots__ = ()
1205
-
1206
- def __copy__(self):
1207
- return self
1208
-
1209
- def __deepcopy__(self, memo):
1210
- return self
1211
-
1212
- class ParamSpecArgs(_Immutable):
1213
- """The args for a ParamSpec object.
1214
-
1215
- Given a ParamSpec object P, P.args is an instance of ParamSpecArgs.
1216
-
1217
- ParamSpecArgs objects have a reference back to their ParamSpec:
1218
-
1219
- P.args.__origin__ is P
1220
-
1221
- This type is meant for runtime introspection and has no special meaning to
1222
- static type checkers.
1223
- """
1224
- def __init__(self, origin):
1225
- self.__origin__ = origin
1226
-
1227
- def __repr__(self):
1228
- return f"{self.__origin__.__name__}.args"
1229
-
1230
- def __eq__(self, other):
1231
- if not isinstance(other, ParamSpecArgs):
1232
- return NotImplemented
1233
- return self.__origin__ == other.__origin__
1234
-
1235
- class ParamSpecKwargs(_Immutable):
1236
- """The kwargs for a ParamSpec object.
1237
-
1238
- Given a ParamSpec object P, P.kwargs is an instance of ParamSpecKwargs.
1239
-
1240
- ParamSpecKwargs objects have a reference back to their ParamSpec:
1241
-
1242
- P.kwargs.__origin__ is P
1243
-
1244
- This type is meant for runtime introspection and has no special meaning to
1245
- static type checkers.
1246
- """
1247
- def __init__(self, origin):
1248
- self.__origin__ = origin
1249
-
1250
- def __repr__(self):
1251
- return f"{self.__origin__.__name__}.kwargs"
1252
-
1253
- def __eq__(self, other):
1254
- if not isinstance(other, ParamSpecKwargs):
1255
- return NotImplemented
1256
- return self.__origin__ == other.__origin__
1257
-
1258
- # 3.10+
1259
- if hasattr(typing, 'ParamSpec'):
1260
-
1261
- # Add default Parameter - PEP 696
1262
- class ParamSpec(typing.ParamSpec, _DefaultMixin, _root=True):
1263
- """Parameter specification variable."""
1264
-
1265
- __module__ = 'typing'
1266
-
1267
- def __init__(self, name, *, bound=None, covariant=False, contravariant=False,
1268
- default=_marker):
1269
- super().__init__(name, bound=bound, covariant=covariant,
1270
- contravariant=contravariant)
1271
- _DefaultMixin.__init__(self, default)
1272
-
1273
- # for pickling:
1274
- try:
1275
- def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
1276
- except (AttributeError, ValueError):
1277
- def_mod = None
1278
- if def_mod != 'typing_extensions':
1279
- self.__module__ = def_mod
1280
-
1281
- # 3.7-3.9
1282
- else:
1283
-
1284
- # Inherits from list as a workaround for Callable checks in Python < 3.9.2.
1285
- class ParamSpec(list, _DefaultMixin):
1286
- """Parameter specification variable.
1287
-
1288
- Usage::
1289
-
1290
- P = ParamSpec('P')
1291
-
1292
- Parameter specification variables exist primarily for the benefit of static
1293
- type checkers. They are used to forward the parameter types of one
1294
- callable to another callable, a pattern commonly found in higher order
1295
- functions and decorators. They are only valid when used in ``Concatenate``,
1296
- or s the first argument to ``Callable``. In Python 3.10 and higher,
1297
- they are also supported in user-defined Generics at runtime.
1298
- See class Generic for more information on generic types. An
1299
- example for annotating a decorator::
1300
-
1301
- T = TypeVar('T')
1302
- P = ParamSpec('P')
1303
-
1304
- def add_logging(f: Callable[P, T]) -> Callable[P, T]:
1305
- '''A type-safe decorator to add logging to a function.'''
1306
- def inner(*args: P.args, **kwargs: P.kwargs) -> T:
1307
- logging.info(f'{f.__name__} was called')
1308
- return f(*args, **kwargs)
1309
- return inner
1310
-
1311
- @add_logging
1312
- def add_two(x: float, y: float) -> float:
1313
- '''Add two numbers together.'''
1314
- return x + y
1315
-
1316
- Parameter specification variables defined with covariant=True or
1317
- contravariant=True can be used to declare covariant or contravariant
1318
- generic types. These keyword arguments are valid, but their actual semantics
1319
- are yet to be decided. See PEP 612 for details.
1320
-
1321
- Parameter specification variables can be introspected. e.g.:
1322
-
1323
- P.__name__ == 'T'
1324
- P.__bound__ == None
1325
- P.__covariant__ == False
1326
- P.__contravariant__ == False
1327
-
1328
- Note that only parameter specification variables defined in global scope can
1329
- be pickled.
1330
- """
1331
-
1332
- # Trick Generic __parameters__.
1333
- __class__ = typing.TypeVar
1334
-
1335
- @property
1336
- def args(self):
1337
- return ParamSpecArgs(self)
1338
-
1339
- @property
1340
- def kwargs(self):
1341
- return ParamSpecKwargs(self)
1342
-
1343
- def __init__(self, name, *, bound=None, covariant=False, contravariant=False,
1344
- default=_marker):
1345
- super().__init__([self])
1346
- self.__name__ = name
1347
- self.__covariant__ = bool(covariant)
1348
- self.__contravariant__ = bool(contravariant)
1349
- if bound:
1350
- self.__bound__ = typing._type_check(bound, 'Bound must be a type.')
1351
- else:
1352
- self.__bound__ = None
1353
- _DefaultMixin.__init__(self, default)
1354
-
1355
- # for pickling:
1356
- try:
1357
- def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
1358
- except (AttributeError, ValueError):
1359
- def_mod = None
1360
- if def_mod != 'typing_extensions':
1361
- self.__module__ = def_mod
1362
-
1363
- def __repr__(self):
1364
- if self.__covariant__:
1365
- prefix = '+'
1366
- elif self.__contravariant__:
1367
- prefix = '-'
1368
- else:
1369
- prefix = '~'
1370
- return prefix + self.__name__
1371
-
1372
- def __hash__(self):
1373
- return object.__hash__(self)
1374
-
1375
- def __eq__(self, other):
1376
- return self is other
1377
-
1378
- def __reduce__(self):
1379
- return self.__name__
1380
-
1381
- # Hack to get typing._type_check to pass.
1382
- def __call__(self, *args, **kwargs):
1383
- pass
1384
-
1385
-
1386
- # 3.7-3.9
1387
- if not hasattr(typing, 'Concatenate'):
1388
- # Inherits from list as a workaround for Callable checks in Python < 3.9.2.
1389
- class _ConcatenateGenericAlias(list):
1390
-
1391
- # Trick Generic into looking into this for __parameters__.
1392
- __class__ = typing._GenericAlias
1393
-
1394
- # Flag in 3.8.
1395
- _special = False
1396
-
1397
- def __init__(self, origin, args):
1398
- super().__init__(args)
1399
- self.__origin__ = origin
1400
- self.__args__ = args
1401
-
1402
- def __repr__(self):
1403
- _type_repr = typing._type_repr
1404
- return (f'{_type_repr(self.__origin__)}'
1405
- f'[{", ".join(_type_repr(arg) for arg in self.__args__)}]')
1406
-
1407
- def __hash__(self):
1408
- return hash((self.__origin__, self.__args__))
1409
-
1410
- # Hack to get typing._type_check to pass in Generic.
1411
- def __call__(self, *args, **kwargs):
1412
- pass
1413
-
1414
- @property
1415
- def __parameters__(self):
1416
- return tuple(
1417
- tp for tp in self.__args__ if isinstance(tp, (typing.TypeVar, ParamSpec))
1418
- )
1419
-
1420
-
1421
- # 3.7-3.9
1422
- @typing._tp_cache
1423
- def _concatenate_getitem(self, parameters):
1424
- if parameters == ():
1425
- raise TypeError("Cannot take a Concatenate of no types.")
1426
- if not isinstance(parameters, tuple):
1427
- parameters = (parameters,)
1428
- if not isinstance(parameters[-1], ParamSpec):
1429
- raise TypeError("The last parameter to Concatenate should be a "
1430
- "ParamSpec variable.")
1431
- msg = "Concatenate[arg, ...]: each arg must be a type."
1432
- parameters = tuple(typing._type_check(p, msg) for p in parameters)
1433
- return _ConcatenateGenericAlias(self, parameters)
1434
-
1435
-
1436
- # 3.10+
1437
- if hasattr(typing, 'Concatenate'):
1438
- Concatenate = typing.Concatenate
1439
- _ConcatenateGenericAlias = typing._ConcatenateGenericAlias # noqa
1440
- # 3.9
1441
- elif sys.version_info[:2] >= (3, 9):
1442
- @_TypeAliasForm
1443
- def Concatenate(self, parameters):
1444
- """Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
1445
- higher order function which adds, removes or transforms parameters of a
1446
- callable.
1447
-
1448
- For example::
1449
-
1450
- Callable[Concatenate[int, P], int]
1451
-
1452
- See PEP 612 for detailed information.
1453
- """
1454
- return _concatenate_getitem(self, parameters)
1455
- # 3.7-8
1456
- else:
1457
- class _ConcatenateForm(typing._SpecialForm, _root=True):
1458
- def __repr__(self):
1459
- return 'typing_extensions.' + self._name
1460
-
1461
- def __getitem__(self, parameters):
1462
- return _concatenate_getitem(self, parameters)
1463
-
1464
- Concatenate = _ConcatenateForm(
1465
- 'Concatenate',
1466
- doc="""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
1467
- higher order function which adds, removes or transforms parameters of a
1468
- callable.
1469
-
1470
- For example::
1471
-
1472
- Callable[Concatenate[int, P], int]
1473
-
1474
- See PEP 612 for detailed information.
1475
- """)
1476
-
1477
- # 3.10+
1478
- if hasattr(typing, 'TypeGuard'):
1479
- TypeGuard = typing.TypeGuard
1480
- # 3.9
1481
- elif sys.version_info[:2] >= (3, 9):
1482
- class _TypeGuardForm(typing._SpecialForm, _root=True):
1483
- def __repr__(self):
1484
- return 'typing_extensions.' + self._name
1485
-
1486
- @_TypeGuardForm
1487
- def TypeGuard(self, parameters):
1488
- """Special typing form used to annotate the return type of a user-defined
1489
- type guard function. ``TypeGuard`` only accepts a single type argument.
1490
- At runtime, functions marked this way should return a boolean.
1491
-
1492
- ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
1493
- type checkers to determine a more precise type of an expression within a
1494
- program's code flow. Usually type narrowing is done by analyzing
1495
- conditional code flow and applying the narrowing to a block of code. The
1496
- conditional expression here is sometimes referred to as a "type guard".
1497
-
1498
- Sometimes it would be convenient to use a user-defined boolean function
1499
- as a type guard. Such a function should use ``TypeGuard[...]`` as its
1500
- return type to alert static type checkers to this intention.
1501
-
1502
- Using ``-> TypeGuard`` tells the static type checker that for a given
1503
- function:
1504
-
1505
- 1. The return value is a boolean.
1506
- 2. If the return value is ``True``, the type of its argument
1507
- is the type inside ``TypeGuard``.
1508
-
1509
- For example::
1510
-
1511
- def is_str(val: Union[str, float]):
1512
- # "isinstance" type guard
1513
- if isinstance(val, str):
1514
- # Type of ``val`` is narrowed to ``str``
1515
- ...
1516
- else:
1517
- # Else, type of ``val`` is narrowed to ``float``.
1518
- ...
1519
-
1520
- Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
1521
- form of ``TypeA`` (it can even be a wider form) and this may lead to
1522
- type-unsafe results. The main reason is to allow for things like
1523
- narrowing ``List[object]`` to ``List[str]`` even though the latter is not
1524
- a subtype of the former, since ``List`` is invariant. The responsibility of
1525
- writing type-safe type guards is left to the user.
1526
-
1527
- ``TypeGuard`` also works with type variables. For more information, see
1528
- PEP 647 (User-Defined Type Guards).
1529
- """
1530
- item = typing._type_check(parameters, f'{self} accepts only a single type.')
1531
- return typing._GenericAlias(self, (item,))
1532
- # 3.7-3.8
1533
- else:
1534
- class _TypeGuardForm(typing._SpecialForm, _root=True):
1535
-
1536
- def __repr__(self):
1537
- return 'typing_extensions.' + self._name
1538
-
1539
- def __getitem__(self, parameters):
1540
- item = typing._type_check(parameters,
1541
- f'{self._name} accepts only a single type')
1542
- return typing._GenericAlias(self, (item,))
1543
-
1544
- TypeGuard = _TypeGuardForm(
1545
- 'TypeGuard',
1546
- doc="""Special typing form used to annotate the return type of a user-defined
1547
- type guard function. ``TypeGuard`` only accepts a single type argument.
1548
- At runtime, functions marked this way should return a boolean.
1549
-
1550
- ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
1551
- type checkers to determine a more precise type of an expression within a
1552
- program's code flow. Usually type narrowing is done by analyzing
1553
- conditional code flow and applying the narrowing to a block of code. The
1554
- conditional expression here is sometimes referred to as a "type guard".
1555
-
1556
- Sometimes it would be convenient to use a user-defined boolean function
1557
- as a type guard. Such a function should use ``TypeGuard[...]`` as its
1558
- return type to alert static type checkers to this intention.
1559
-
1560
- Using ``-> TypeGuard`` tells the static type checker that for a given
1561
- function:
1562
-
1563
- 1. The return value is a boolean.
1564
- 2. If the return value is ``True``, the type of its argument
1565
- is the type inside ``TypeGuard``.
1566
-
1567
- For example::
1568
-
1569
- def is_str(val: Union[str, float]):
1570
- # "isinstance" type guard
1571
- if isinstance(val, str):
1572
- # Type of ``val`` is narrowed to ``str``
1573
- ...
1574
- else:
1575
- # Else, type of ``val`` is narrowed to ``float``.
1576
- ...
1577
-
1578
- Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
1579
- form of ``TypeA`` (it can even be a wider form) and this may lead to
1580
- type-unsafe results. The main reason is to allow for things like
1581
- narrowing ``List[object]`` to ``List[str]`` even though the latter is not
1582
- a subtype of the former, since ``List`` is invariant. The responsibility of
1583
- writing type-safe type guards is left to the user.
1584
-
1585
- ``TypeGuard`` also works with type variables. For more information, see
1586
- PEP 647 (User-Defined Type Guards).
1587
- """)
1588
-
1589
-
1590
- # Vendored from cpython typing._SpecialFrom
1591
- class _SpecialForm(typing._Final, _root=True):
1592
- __slots__ = ('_name', '__doc__', '_getitem')
1593
-
1594
- def __init__(self, getitem):
1595
- self._getitem = getitem
1596
- self._name = getitem.__name__
1597
- self.__doc__ = getitem.__doc__
1598
-
1599
- def __getattr__(self, item):
1600
- if item in {'__name__', '__qualname__'}:
1601
- return self._name
1602
-
1603
- raise AttributeError(item)
1604
-
1605
- def __mro_entries__(self, bases):
1606
- raise TypeError(f"Cannot subclass {self!r}")
1607
-
1608
- def __repr__(self):
1609
- return f'typing_extensions.{self._name}'
1610
-
1611
- def __reduce__(self):
1612
- return self._name
1613
-
1614
- def __call__(self, *args, **kwds):
1615
- raise TypeError(f"Cannot instantiate {self!r}")
1616
-
1617
- def __or__(self, other):
1618
- return typing.Union[self, other]
1619
-
1620
- def __ror__(self, other):
1621
- return typing.Union[other, self]
1622
-
1623
- def __instancecheck__(self, obj):
1624
- raise TypeError(f"{self} cannot be used with isinstance()")
1625
-
1626
- def __subclasscheck__(self, cls):
1627
- raise TypeError(f"{self} cannot be used with issubclass()")
1628
-
1629
- @typing._tp_cache
1630
- def __getitem__(self, parameters):
1631
- return self._getitem(self, parameters)
1632
-
1633
-
1634
- if hasattr(typing, "LiteralString"):
1635
- LiteralString = typing.LiteralString
1636
- else:
1637
- @_SpecialForm
1638
- def LiteralString(self, params):
1639
- """Represents an arbitrary literal string.
1640
-
1641
- Example::
1642
-
1643
- from pip._vendor.typing_extensions import LiteralString
1644
-
1645
- def query(sql: LiteralString) -> ...:
1646
- ...
1647
-
1648
- query("SELECT * FROM table") # ok
1649
- query(f"SELECT * FROM {input()}") # not ok
1650
-
1651
- See PEP 675 for details.
1652
-
1653
- """
1654
- raise TypeError(f"{self} is not subscriptable")
1655
-
1656
-
1657
- if hasattr(typing, "Self"):
1658
- Self = typing.Self
1659
- else:
1660
- @_SpecialForm
1661
- def Self(self, params):
1662
- """Used to spell the type of "self" in classes.
1663
-
1664
- Example::
1665
-
1666
- from typing import Self
1667
-
1668
- class ReturnsSelf:
1669
- def parse(self, data: bytes) -> Self:
1670
- ...
1671
- return self
1672
-
1673
- """
1674
-
1675
- raise TypeError(f"{self} is not subscriptable")
1676
-
1677
-
1678
- if hasattr(typing, "Never"):
1679
- Never = typing.Never
1680
- else:
1681
- @_SpecialForm
1682
- def Never(self, params):
1683
- """The bottom type, a type that has no members.
1684
-
1685
- This can be used to define a function that should never be
1686
- called, or a function that never returns::
1687
-
1688
- from pip._vendor.typing_extensions import Never
1689
-
1690
- def never_call_me(arg: Never) -> None:
1691
- pass
1692
-
1693
- def int_or_str(arg: int | str) -> None:
1694
- never_call_me(arg) # type checker error
1695
- match arg:
1696
- case int():
1697
- print("It's an int")
1698
- case str():
1699
- print("It's a str")
1700
- case _:
1701
- never_call_me(arg) # ok, arg is of type Never
1702
-
1703
- """
1704
-
1705
- raise TypeError(f"{self} is not subscriptable")
1706
-
1707
-
1708
- if hasattr(typing, 'Required'):
1709
- Required = typing.Required
1710
- NotRequired = typing.NotRequired
1711
- elif sys.version_info[:2] >= (3, 9):
1712
- class _ExtensionsSpecialForm(typing._SpecialForm, _root=True):
1713
- def __repr__(self):
1714
- return 'typing_extensions.' + self._name
1715
-
1716
- @_ExtensionsSpecialForm
1717
- def Required(self, parameters):
1718
- """A special typing construct to mark a key of a total=False TypedDict
1719
- as required. For example:
1720
-
1721
- class Movie(TypedDict, total=False):
1722
- title: Required[str]
1723
- year: int
1724
-
1725
- m = Movie(
1726
- title='The Matrix', # typechecker error if key is omitted
1727
- year=1999,
1728
- )
1729
-
1730
- There is no runtime checking that a required key is actually provided
1731
- when instantiating a related TypedDict.
1732
- """
1733
- item = typing._type_check(parameters, f'{self._name} accepts only a single type.')
1734
- return typing._GenericAlias(self, (item,))
1735
-
1736
- @_ExtensionsSpecialForm
1737
- def NotRequired(self, parameters):
1738
- """A special typing construct to mark a key of a TypedDict as
1739
- potentially missing. For example:
1740
-
1741
- class Movie(TypedDict):
1742
- title: str
1743
- year: NotRequired[int]
1744
-
1745
- m = Movie(
1746
- title='The Matrix', # typechecker error if key is omitted
1747
- year=1999,
1748
- )
1749
- """
1750
- item = typing._type_check(parameters, f'{self._name} accepts only a single type.')
1751
- return typing._GenericAlias(self, (item,))
1752
-
1753
- else:
1754
- class _RequiredForm(typing._SpecialForm, _root=True):
1755
- def __repr__(self):
1756
- return 'typing_extensions.' + self._name
1757
-
1758
- def __getitem__(self, parameters):
1759
- item = typing._type_check(parameters,
1760
- f'{self._name} accepts only a single type.')
1761
- return typing._GenericAlias(self, (item,))
1762
-
1763
- Required = _RequiredForm(
1764
- 'Required',
1765
- doc="""A special typing construct to mark a key of a total=False TypedDict
1766
- as required. For example:
1767
-
1768
- class Movie(TypedDict, total=False):
1769
- title: Required[str]
1770
- year: int
1771
-
1772
- m = Movie(
1773
- title='The Matrix', # typechecker error if key is omitted
1774
- year=1999,
1775
- )
1776
-
1777
- There is no runtime checking that a required key is actually provided
1778
- when instantiating a related TypedDict.
1779
- """)
1780
- NotRequired = _RequiredForm(
1781
- 'NotRequired',
1782
- doc="""A special typing construct to mark a key of a TypedDict as
1783
- potentially missing. For example:
1784
-
1785
- class Movie(TypedDict):
1786
- title: str
1787
- year: NotRequired[int]
1788
-
1789
- m = Movie(
1790
- title='The Matrix', # typechecker error if key is omitted
1791
- year=1999,
1792
- )
1793
- """)
1794
-
1795
-
1796
- if hasattr(typing, "Unpack"): # 3.11+
1797
- Unpack = typing.Unpack
1798
- elif sys.version_info[:2] >= (3, 9):
1799
- class _UnpackSpecialForm(typing._SpecialForm, _root=True):
1800
- def __repr__(self):
1801
- return 'typing_extensions.' + self._name
1802
-
1803
- class _UnpackAlias(typing._GenericAlias, _root=True):
1804
- __class__ = typing.TypeVar
1805
-
1806
- @_UnpackSpecialForm
1807
- def Unpack(self, parameters):
1808
- """A special typing construct to unpack a variadic type. For example:
1809
-
1810
- Shape = TypeVarTuple('Shape')
1811
- Batch = NewType('Batch', int)
1812
-
1813
- def add_batch_axis(
1814
- x: Array[Unpack[Shape]]
1815
- ) -> Array[Batch, Unpack[Shape]]: ...
1816
-
1817
- """
1818
- item = typing._type_check(parameters, f'{self._name} accepts only a single type.')
1819
- return _UnpackAlias(self, (item,))
1820
-
1821
- def _is_unpack(obj):
1822
- return isinstance(obj, _UnpackAlias)
1823
-
1824
- else:
1825
- class _UnpackAlias(typing._GenericAlias, _root=True):
1826
- __class__ = typing.TypeVar
1827
-
1828
- class _UnpackForm(typing._SpecialForm, _root=True):
1829
- def __repr__(self):
1830
- return 'typing_extensions.' + self._name
1831
-
1832
- def __getitem__(self, parameters):
1833
- item = typing._type_check(parameters,
1834
- f'{self._name} accepts only a single type.')
1835
- return _UnpackAlias(self, (item,))
1836
-
1837
- Unpack = _UnpackForm(
1838
- 'Unpack',
1839
- doc="""A special typing construct to unpack a variadic type. For example:
1840
-
1841
- Shape = TypeVarTuple('Shape')
1842
- Batch = NewType('Batch', int)
1843
-
1844
- def add_batch_axis(
1845
- x: Array[Unpack[Shape]]
1846
- ) -> Array[Batch, Unpack[Shape]]: ...
1847
-
1848
- """)
1849
-
1850
- def _is_unpack(obj):
1851
- return isinstance(obj, _UnpackAlias)
1852
-
1853
-
1854
- if hasattr(typing, "TypeVarTuple"): # 3.11+
1855
-
1856
- # Add default Parameter - PEP 696
1857
- class TypeVarTuple(typing.TypeVarTuple, _DefaultMixin, _root=True):
1858
- """Type variable tuple."""
1859
-
1860
- def __init__(self, name, *, default=_marker):
1861
- super().__init__(name)
1862
- _DefaultMixin.__init__(self, default)
1863
-
1864
- # for pickling:
1865
- try:
1866
- def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
1867
- except (AttributeError, ValueError):
1868
- def_mod = None
1869
- if def_mod != 'typing_extensions':
1870
- self.__module__ = def_mod
1871
-
1872
- else:
1873
- class TypeVarTuple(_DefaultMixin):
1874
- """Type variable tuple.
1875
-
1876
- Usage::
1877
-
1878
- Ts = TypeVarTuple('Ts')
1879
-
1880
- In the same way that a normal type variable is a stand-in for a single
1881
- type such as ``int``, a type variable *tuple* is a stand-in for a *tuple*
1882
- type such as ``Tuple[int, str]``.
1883
-
1884
- Type variable tuples can be used in ``Generic`` declarations.
1885
- Consider the following example::
1886
-
1887
- class Array(Generic[*Ts]): ...
1888
-
1889
- The ``Ts`` type variable tuple here behaves like ``tuple[T1, T2]``,
1890
- where ``T1`` and ``T2`` are type variables. To use these type variables
1891
- as type parameters of ``Array``, we must *unpack* the type variable tuple using
1892
- the star operator: ``*Ts``. The signature of ``Array`` then behaves
1893
- as if we had simply written ``class Array(Generic[T1, T2]): ...``.
1894
- In contrast to ``Generic[T1, T2]``, however, ``Generic[*Shape]`` allows
1895
- us to parameterise the class with an *arbitrary* number of type parameters.
1896
-
1897
- Type variable tuples can be used anywhere a normal ``TypeVar`` can.
1898
- This includes class definitions, as shown above, as well as function
1899
- signatures and variable annotations::
1900
-
1901
- class Array(Generic[*Ts]):
1902
-
1903
- def __init__(self, shape: Tuple[*Ts]):
1904
- self._shape: Tuple[*Ts] = shape
1905
-
1906
- def get_shape(self) -> Tuple[*Ts]:
1907
- return self._shape
1908
-
1909
- shape = (Height(480), Width(640))
1910
- x: Array[Height, Width] = Array(shape)
1911
- y = abs(x) # Inferred type is Array[Height, Width]
1912
- z = x + x # ... is Array[Height, Width]
1913
- x.get_shape() # ... is tuple[Height, Width]
1914
-
1915
- """
1916
-
1917
- # Trick Generic __parameters__.
1918
- __class__ = typing.TypeVar
1919
-
1920
- def __iter__(self):
1921
- yield self.__unpacked__
1922
-
1923
- def __init__(self, name, *, default=_marker):
1924
- self.__name__ = name
1925
- _DefaultMixin.__init__(self, default)
1926
-
1927
- # for pickling:
1928
- try:
1929
- def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
1930
- except (AttributeError, ValueError):
1931
- def_mod = None
1932
- if def_mod != 'typing_extensions':
1933
- self.__module__ = def_mod
1934
-
1935
- self.__unpacked__ = Unpack[self]
1936
-
1937
- def __repr__(self):
1938
- return self.__name__
1939
-
1940
- def __hash__(self):
1941
- return object.__hash__(self)
1942
-
1943
- def __eq__(self, other):
1944
- return self is other
1945
-
1946
- def __reduce__(self):
1947
- return self.__name__
1948
-
1949
- def __init_subclass__(self, *args, **kwds):
1950
- if '_root' not in kwds:
1951
- raise TypeError("Cannot subclass special typing classes")
1952
-
1953
-
1954
- if hasattr(typing, "reveal_type"):
1955
- reveal_type = typing.reveal_type
1956
- else:
1957
- def reveal_type(__obj: T) -> T:
1958
- """Reveal the inferred type of a variable.
1959
-
1960
- When a static type checker encounters a call to ``reveal_type()``,
1961
- it will emit the inferred type of the argument::
1962
-
1963
- x: int = 1
1964
- reveal_type(x)
1965
-
1966
- Running a static type checker (e.g., ``mypy``) on this example
1967
- will produce output similar to 'Revealed type is "builtins.int"'.
1968
-
1969
- At runtime, the function prints the runtime type of the
1970
- argument and returns it unchanged.
1971
-
1972
- """
1973
- print(f"Runtime type is {type(__obj).__name__!r}", file=sys.stderr)
1974
- return __obj
1975
-
1976
-
1977
- if hasattr(typing, "assert_never"):
1978
- assert_never = typing.assert_never
1979
- else:
1980
- def assert_never(__arg: Never) -> Never:
1981
- """Assert to the type checker that a line of code is unreachable.
1982
-
1983
- Example::
1984
-
1985
- def int_or_str(arg: int | str) -> None:
1986
- match arg:
1987
- case int():
1988
- print("It's an int")
1989
- case str():
1990
- print("It's a str")
1991
- case _:
1992
- assert_never(arg)
1993
-
1994
- If a type checker finds that a call to assert_never() is
1995
- reachable, it will emit an error.
1996
-
1997
- At runtime, this throws an exception when called.
1998
-
1999
- """
2000
- raise AssertionError("Expected code to be unreachable")
2001
-
2002
-
2003
- if sys.version_info >= (3, 12):
2004
- # dataclass_transform exists in 3.11 but lacks the frozen_default parameter
2005
- dataclass_transform = typing.dataclass_transform
2006
- else:
2007
- def dataclass_transform(
2008
- *,
2009
- eq_default: bool = True,
2010
- order_default: bool = False,
2011
- kw_only_default: bool = False,
2012
- frozen_default: bool = False,
2013
- field_specifiers: typing.Tuple[
2014
- typing.Union[typing.Type[typing.Any], typing.Callable[..., typing.Any]],
2015
- ...
2016
- ] = (),
2017
- **kwargs: typing.Any,
2018
- ) -> typing.Callable[[T], T]:
2019
- """Decorator that marks a function, class, or metaclass as providing
2020
- dataclass-like behavior.
2021
-
2022
- Example:
2023
-
2024
- from pip._vendor.typing_extensions import dataclass_transform
2025
-
2026
- _T = TypeVar("_T")
2027
-
2028
- # Used on a decorator function
2029
- @dataclass_transform()
2030
- def create_model(cls: type[_T]) -> type[_T]:
2031
- ...
2032
- return cls
2033
-
2034
- @create_model
2035
- class CustomerModel:
2036
- id: int
2037
- name: str
2038
-
2039
- # Used on a base class
2040
- @dataclass_transform()
2041
- class ModelBase: ...
2042
-
2043
- class CustomerModel(ModelBase):
2044
- id: int
2045
- name: str
2046
-
2047
- # Used on a metaclass
2048
- @dataclass_transform()
2049
- class ModelMeta(type): ...
2050
-
2051
- class ModelBase(metaclass=ModelMeta): ...
2052
-
2053
- class CustomerModel(ModelBase):
2054
- id: int
2055
- name: str
2056
-
2057
- Each of the ``CustomerModel`` classes defined in this example will now
2058
- behave similarly to a dataclass created with the ``@dataclasses.dataclass``
2059
- decorator. For example, the type checker will synthesize an ``__init__``
2060
- method.
2061
-
2062
- The arguments to this decorator can be used to customize this behavior:
2063
- - ``eq_default`` indicates whether the ``eq`` parameter is assumed to be
2064
- True or False if it is omitted by the caller.
2065
- - ``order_default`` indicates whether the ``order`` parameter is
2066
- assumed to be True or False if it is omitted by the caller.
2067
- - ``kw_only_default`` indicates whether the ``kw_only`` parameter is
2068
- assumed to be True or False if it is omitted by the caller.
2069
- - ``frozen_default`` indicates whether the ``frozen`` parameter is
2070
- assumed to be True or False if it is omitted by the caller.
2071
- - ``field_specifiers`` specifies a static list of supported classes
2072
- or functions that describe fields, similar to ``dataclasses.field()``.
2073
-
2074
- At runtime, this decorator records its arguments in the
2075
- ``__dataclass_transform__`` attribute on the decorated object.
2076
-
2077
- See PEP 681 for details.
2078
-
2079
- """
2080
- def decorator(cls_or_fn):
2081
- cls_or_fn.__dataclass_transform__ = {
2082
- "eq_default": eq_default,
2083
- "order_default": order_default,
2084
- "kw_only_default": kw_only_default,
2085
- "frozen_default": frozen_default,
2086
- "field_specifiers": field_specifiers,
2087
- "kwargs": kwargs,
2088
- }
2089
- return cls_or_fn
2090
- return decorator
2091
-
2092
-
2093
- if hasattr(typing, "override"):
2094
- override = typing.override
2095
- else:
2096
- _F = typing.TypeVar("_F", bound=typing.Callable[..., typing.Any])
2097
-
2098
- def override(__arg: _F) -> _F:
2099
- """Indicate that a method is intended to override a method in a base class.
2100
-
2101
- Usage:
2102
-
2103
- class Base:
2104
- def method(self) -> None: ...
2105
- pass
2106
-
2107
- class Child(Base):
2108
- @override
2109
- def method(self) -> None:
2110
- super().method()
2111
-
2112
- When this decorator is applied to a method, the type checker will
2113
- validate that it overrides a method with the same name on a base class.
2114
- This helps prevent bugs that may occur when a base class is changed
2115
- without an equivalent change to a child class.
2116
-
2117
- There is no runtime checking of these properties. The decorator
2118
- sets the ``__override__`` attribute to ``True`` on the decorated object
2119
- to allow runtime introspection.
2120
-
2121
- See PEP 698 for details.
2122
-
2123
- """
2124
- try:
2125
- __arg.__override__ = True
2126
- except (AttributeError, TypeError):
2127
- # Skip the attribute silently if it is not writable.
2128
- # AttributeError happens if the object has __slots__ or a
2129
- # read-only property, TypeError if it's a builtin class.
2130
- pass
2131
- return __arg
2132
-
2133
-
2134
- if hasattr(typing, "deprecated"):
2135
- deprecated = typing.deprecated
2136
- else:
2137
- _T = typing.TypeVar("_T")
2138
-
2139
- def deprecated(
2140
- __msg: str,
2141
- *,
2142
- category: typing.Optional[typing.Type[Warning]] = DeprecationWarning,
2143
- stacklevel: int = 1,
2144
- ) -> typing.Callable[[_T], _T]:
2145
- """Indicate that a class, function or overload is deprecated.
2146
-
2147
- Usage:
2148
-
2149
- @deprecated("Use B instead")
2150
- class A:
2151
- pass
2152
-
2153
- @deprecated("Use g instead")
2154
- def f():
2155
- pass
2156
-
2157
- @overload
2158
- @deprecated("int support is deprecated")
2159
- def g(x: int) -> int: ...
2160
- @overload
2161
- def g(x: str) -> int: ...
2162
-
2163
- When this decorator is applied to an object, the type checker
2164
- will generate a diagnostic on usage of the deprecated object.
2165
-
2166
- No runtime warning is issued. The decorator sets the ``__deprecated__``
2167
- attribute on the decorated object to the deprecation message
2168
- passed to the decorator. If applied to an overload, the decorator
2169
- must be after the ``@overload`` decorator for the attribute to
2170
- exist on the overload as returned by ``get_overloads()``.
2171
-
2172
- See PEP 702 for details.
2173
-
2174
- """
2175
- def decorator(__arg: _T) -> _T:
2176
- if category is None:
2177
- __arg.__deprecated__ = __msg
2178
- return __arg
2179
- elif isinstance(__arg, type):
2180
- original_new = __arg.__new__
2181
- has_init = __arg.__init__ is not object.__init__
2182
-
2183
- @functools.wraps(original_new)
2184
- def __new__(cls, *args, **kwargs):
2185
- warnings.warn(__msg, category=category, stacklevel=stacklevel + 1)
2186
- # Mirrors a similar check in object.__new__.
2187
- if not has_init and (args or kwargs):
2188
- raise TypeError(f"{cls.__name__}() takes no arguments")
2189
- if original_new is not object.__new__:
2190
- return original_new(cls, *args, **kwargs)
2191
- else:
2192
- return original_new(cls)
2193
-
2194
- __arg.__new__ = staticmethod(__new__)
2195
- __arg.__deprecated__ = __new__.__deprecated__ = __msg
2196
- return __arg
2197
- elif callable(__arg):
2198
- @functools.wraps(__arg)
2199
- def wrapper(*args, **kwargs):
2200
- warnings.warn(__msg, category=category, stacklevel=stacklevel + 1)
2201
- return __arg(*args, **kwargs)
2202
-
2203
- __arg.__deprecated__ = wrapper.__deprecated__ = __msg
2204
- return wrapper
2205
- else:
2206
- raise TypeError(
2207
- "@deprecated decorator with non-None category must be applied to "
2208
- f"a class or callable, not {__arg!r}"
2209
- )
2210
-
2211
- return decorator
2212
-
2213
-
2214
- # We have to do some monkey patching to deal with the dual nature of
2215
- # Unpack/TypeVarTuple:
2216
- # - We want Unpack to be a kind of TypeVar so it gets accepted in
2217
- # Generic[Unpack[Ts]]
2218
- # - We want it to *not* be treated as a TypeVar for the purposes of
2219
- # counting generic parameters, so that when we subscript a generic,
2220
- # the runtime doesn't try to substitute the Unpack with the subscripted type.
2221
- if not hasattr(typing, "TypeVarTuple"):
2222
- typing._collect_type_vars = _collect_type_vars
2223
- typing._check_generic = _check_generic
2224
-
2225
-
2226
- # Backport typing.NamedTuple as it exists in Python 3.11.
2227
- # In 3.11, the ability to define generic `NamedTuple`s was supported.
2228
- # This was explicitly disallowed in 3.9-3.10, and only half-worked in <=3.8.
2229
- if sys.version_info >= (3, 11):
2230
- NamedTuple = typing.NamedTuple
2231
- else:
2232
- def _caller():
2233
- try:
2234
- return sys._getframe(2).f_globals.get('__name__', '__main__')
2235
- except (AttributeError, ValueError): # For platforms without _getframe()
2236
- return None
2237
-
2238
- def _make_nmtuple(name, types, module, defaults=()):
2239
- fields = [n for n, t in types]
2240
- annotations = {n: typing._type_check(t, f"field {n} annotation must be a type")
2241
- for n, t in types}
2242
- nm_tpl = collections.namedtuple(name, fields,
2243
- defaults=defaults, module=module)
2244
- nm_tpl.__annotations__ = nm_tpl.__new__.__annotations__ = annotations
2245
- # The `_field_types` attribute was removed in 3.9;
2246
- # in earlier versions, it is the same as the `__annotations__` attribute
2247
- if sys.version_info < (3, 9):
2248
- nm_tpl._field_types = annotations
2249
- return nm_tpl
2250
-
2251
- _prohibited_namedtuple_fields = typing._prohibited
2252
- _special_namedtuple_fields = frozenset({'__module__', '__name__', '__annotations__'})
2253
-
2254
- class _NamedTupleMeta(type):
2255
- def __new__(cls, typename, bases, ns):
2256
- assert _NamedTuple in bases
2257
- for base in bases:
2258
- if base is not _NamedTuple and base is not typing.Generic:
2259
- raise TypeError(
2260
- 'can only inherit from a NamedTuple type and Generic')
2261
- bases = tuple(tuple if base is _NamedTuple else base for base in bases)
2262
- types = ns.get('__annotations__', {})
2263
- default_names = []
2264
- for field_name in types:
2265
- if field_name in ns:
2266
- default_names.append(field_name)
2267
- elif default_names:
2268
- raise TypeError(f"Non-default namedtuple field {field_name} "
2269
- f"cannot follow default field"
2270
- f"{'s' if len(default_names) > 1 else ''} "
2271
- f"{', '.join(default_names)}")
2272
- nm_tpl = _make_nmtuple(
2273
- typename, types.items(),
2274
- defaults=[ns[n] for n in default_names],
2275
- module=ns['__module__']
2276
- )
2277
- nm_tpl.__bases__ = bases
2278
- if typing.Generic in bases:
2279
- class_getitem = typing.Generic.__class_getitem__.__func__
2280
- nm_tpl.__class_getitem__ = classmethod(class_getitem)
2281
- # update from user namespace without overriding special namedtuple attributes
2282
- for key in ns:
2283
- if key in _prohibited_namedtuple_fields:
2284
- raise AttributeError("Cannot overwrite NamedTuple attribute " + key)
2285
- elif key not in _special_namedtuple_fields and key not in nm_tpl._fields:
2286
- setattr(nm_tpl, key, ns[key])
2287
- if typing.Generic in bases:
2288
- nm_tpl.__init_subclass__()
2289
- return nm_tpl
2290
-
2291
- def NamedTuple(__typename, __fields=None, **kwargs):
2292
- if __fields is None:
2293
- __fields = kwargs.items()
2294
- elif kwargs:
2295
- raise TypeError("Either list of fields or keywords"
2296
- " can be provided to NamedTuple, not both")
2297
- return _make_nmtuple(__typename, __fields, module=_caller())
2298
-
2299
- NamedTuple.__doc__ = typing.NamedTuple.__doc__
2300
- _NamedTuple = type.__new__(_NamedTupleMeta, 'NamedTuple', (), {})
2301
-
2302
- # On 3.8+, alter the signature so that it matches typing.NamedTuple.
2303
- # The signature of typing.NamedTuple on >=3.8 is invalid syntax in Python 3.7,
2304
- # so just leave the signature as it is on 3.7.
2305
- if sys.version_info >= (3, 8):
2306
- NamedTuple.__text_signature__ = '(typename, fields=None, /, **kwargs)'
2307
-
2308
- def _namedtuple_mro_entries(bases):
2309
- assert NamedTuple in bases
2310
- return (_NamedTuple,)
2311
-
2312
- NamedTuple.__mro_entries__ = _namedtuple_mro_entries
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/mr/tls_pool.h DELETED
@@ -1,64 +0,0 @@
1
- /*
2
- * Copyright 2018 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- /*! \file tls_pool.h
18
- * \brief A function wrapping a thread local instance of a \p unsynchronized_pool_resource.
19
- */
20
-
21
- #pragma once
22
-
23
- #include <thrust/detail/cpp11_required.h>
24
-
25
- #if THRUST_CPP_DIALECT >= 2011
26
-
27
- #include <thrust/mr/pool.h>
28
-
29
- namespace thrust
30
- {
31
- namespace mr
32
- {
33
-
34
- /*! \addtogroup memory_management Memory Management
35
- * \addtogroup memory_resources Memory Resources
36
- * \ingroup memory_resources
37
- * \{
38
- */
39
-
40
- /*! Potentially constructs, if not yet created, and then returns the address of a thread-local \p unsynchronized_pool_resource,
41
- *
42
- * \tparam Upstream the template argument to the pool template
43
- * \param upstream the argument to the constructor, if invoked
44
- */
45
- template<typename Upstream, typename Bookkeeper>
46
- __host__
47
- thrust::mr::unsynchronized_pool_resource<Upstream> & tls_pool(Upstream * upstream = NULL)
48
- {
49
- static thread_local auto adaptor = [&]{
50
- assert(upstream);
51
- return thrust::mr::unsynchronized_pool_resource<Upstream>(upstream);
52
- }();
53
-
54
- return adaptor;
55
- }
56
-
57
- /*! \}
58
- */
59
-
60
- } // end mr
61
- } // end thrust
62
-
63
- #endif // THRUST_CPP_DIALECT >= 2011
64
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/cross_system.h DELETED
@@ -1,344 +0,0 @@
1
- /******************************************************************************
2
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3
- *
4
- * Redistribution and use in source and binary forms, with or without
5
- * modification, are permitted provided that the following conditions are met:
6
- * * Redistributions of source code must retain the above copyright
7
- * notice, this list of conditions and the following disclaimer.
8
- * * Redistributions in binary form must reproduce the above copyright
9
- * notice, this list of conditions and the following disclaimer in the
10
- * documentation and/or other materials provided with the distribution.
11
- * * Neither the name of the NVIDIA CORPORATION nor the
12
- * names of its contributors may be used to endorse or promote products
13
- * derived from this software without specific prior written permission.
14
- *
15
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
- * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
- *
26
- ******************************************************************************/
27
- #pragma once
28
-
29
- #include <thrust/system/cuda/detail/guarded_cuda_runtime_api.h>
30
- #include <thrust/system/cpp/detail/execution_policy.h>
31
- #include <thrust/system/cuda/detail/execution_policy.h>
32
-
33
- namespace thrust
34
- {
35
- namespace cuda_cub {
36
-
37
- template <class Sys1, class Sys2>
38
- struct cross_system : execution_policy<cross_system<Sys1, Sys2> >
39
- {
40
- typedef thrust::execution_policy<Sys1> policy1;
41
- typedef thrust::execution_policy<Sys2> policy2;
42
-
43
- policy1 &sys1;
44
- policy2 &sys2;
45
-
46
- inline __host__ __device__
47
- cross_system(policy1 &sys1, policy2 &sys2) : sys1(sys1), sys2(sys2) {}
48
-
49
- inline __host__ __device__
50
- cross_system<Sys2, Sys1> rotate() const
51
- {
52
- return cross_system<Sys2, Sys1>(sys2, sys1);
53
- }
54
- };
55
-
56
- #if THRUST_CPP_DIALECT >= 2011
57
- // Device to host.
58
- template <class Sys1, class Sys2>
59
- THRUST_CONSTEXPR __host__ __device__
60
- auto direction_of_copy(
61
- thrust::system::cuda::execution_policy<Sys1> const&
62
- , thrust::cpp::execution_policy<Sys2> const&
63
- )
64
- THRUST_DECLTYPE_RETURNS(
65
- thrust::detail::integral_constant<
66
- cudaMemcpyKind, cudaMemcpyDeviceToHost
67
- >{}
68
- )
69
-
70
- // Host to device.
71
- template <class Sys1, class Sys2>
72
- THRUST_CONSTEXPR __host__ __device__
73
- auto direction_of_copy(
74
- thrust::cpp::execution_policy<Sys1> const&
75
- , thrust::system::cuda::execution_policy<Sys2> const&
76
- )
77
- THRUST_DECLTYPE_RETURNS(
78
- thrust::detail::integral_constant<
79
- cudaMemcpyKind, cudaMemcpyHostToDevice
80
- >{}
81
- )
82
-
83
- // Device to device.
84
- template <class Sys1, class Sys2>
85
- THRUST_CONSTEXPR __host__ __device__
86
- auto direction_of_copy(
87
- thrust::system::cuda::execution_policy<Sys1> const&
88
- , thrust::system::cuda::execution_policy<Sys2> const&
89
- )
90
- THRUST_DECLTYPE_RETURNS(
91
- thrust::detail::integral_constant<
92
- cudaMemcpyKind, cudaMemcpyDeviceToDevice
93
- >{}
94
- )
95
-
96
- // Device to device.
97
- template <class DerivedPolicy>
98
- THRUST_CONSTEXPR __host__ __device__
99
- auto direction_of_copy(execution_policy<DerivedPolicy> const &)
100
- THRUST_DECLTYPE_RETURNS(
101
- thrust::detail::integral_constant<
102
- cudaMemcpyKind, cudaMemcpyDeviceToDevice
103
- >{}
104
- )
105
-
106
- template <class Sys1, class Sys2>
107
- THRUST_CONSTEXPR __host__ __device__
108
- auto direction_of_copy(
109
- execution_policy<cross_system<Sys1, Sys2>> const &systems
110
- )
111
- THRUST_DECLTYPE_RETURNS(
112
- direction_of_copy(
113
- derived_cast(derived_cast(systems).sys1)
114
- , derived_cast(derived_cast(systems).sys2)
115
- )
116
- )
117
-
118
- template <typename ExecutionPolicy0,
119
- typename ExecutionPolicy1,
120
- // MSVC2015 WAR: put decltype here instead of in trailing return type
121
- typename Direction =
122
- decltype(direction_of_copy(std::declval<ExecutionPolicy0>(),
123
- std::declval<ExecutionPolicy1>()))>
124
- THRUST_CONSTEXPR __host__ __device__
125
- auto is_device_to_host_copy(
126
- ExecutionPolicy0 const& exec0
127
- , ExecutionPolicy1 const& exec1
128
- )
129
- noexcept ->
130
- thrust::detail::integral_constant<
131
- bool, cudaMemcpyDeviceToHost == Direction::value
132
- >
133
- {
134
- return {};
135
- }
136
-
137
- template <typename ExecutionPolicy,
138
- // MSVC2015 WAR: put decltype here instead of in trailing return type
139
- typename Direction =
140
- decltype(direction_of_copy(std::declval<ExecutionPolicy>()))>
141
- THRUST_CONSTEXPR __host__ __device__
142
- auto is_device_to_host_copy(ExecutionPolicy const& exec)
143
- noexcept ->
144
- thrust::detail::integral_constant<
145
- bool, cudaMemcpyDeviceToHost == Direction::value
146
- >
147
- {
148
- return {};
149
- }
150
-
151
- template <typename ExecutionPolicy0,
152
- typename ExecutionPolicy1,
153
- // MSVC2015 WAR: put decltype here instead of in trailing return type
154
- typename Direction =
155
- decltype(direction_of_copy(std::declval<ExecutionPolicy0>(),
156
- std::declval<ExecutionPolicy1>()))>
157
- THRUST_CONSTEXPR __host__ __device__
158
- auto is_host_to_device_copy(
159
- ExecutionPolicy0 const& exec0
160
- , ExecutionPolicy1 const& exec1
161
- )
162
- noexcept ->
163
- thrust::detail::integral_constant<
164
- bool, cudaMemcpyHostToDevice == Direction::value
165
- >
166
- {
167
- return {};
168
- }
169
-
170
- template <typename ExecutionPolicy,
171
- // MSVC2015 WAR: put decltype here instead of in trailing return type
172
- typename Direction =
173
- decltype(direction_of_copy(std::declval<ExecutionPolicy>()))>
174
- THRUST_CONSTEXPR __host__ __device__
175
- auto is_host_to_device_copy(ExecutionPolicy const& exec)
176
- noexcept ->
177
- thrust::detail::integral_constant<
178
- bool, cudaMemcpyHostToDevice == Direction::value
179
- >
180
- {
181
- return {};
182
- }
183
-
184
- template <typename ExecutionPolicy0,
185
- typename ExecutionPolicy1,
186
- // MSVC2015 WAR: put decltype here instead of in trailing return type
187
- typename Direction =
188
- decltype(direction_of_copy(std::declval<ExecutionPolicy0>(),
189
- std::declval<ExecutionPolicy1>()))>
190
- THRUST_CONSTEXPR __host__ __device__
191
- auto is_device_to_device_copy(
192
- ExecutionPolicy0 const& exec0
193
- , ExecutionPolicy1 const& exec1
194
- )
195
- noexcept ->
196
- thrust::detail::integral_constant<
197
- bool, cudaMemcpyDeviceToDevice == Direction::value
198
- >
199
- {
200
- return {};
201
- }
202
-
203
- template <typename ExecutionPolicy,
204
- // MSVC2015 WAR: put decltype here instead of in trailing return type
205
- typename Direction =
206
- decltype(direction_of_copy(std::declval<ExecutionPolicy>()))>
207
- THRUST_CONSTEXPR __host__ __device__
208
- auto is_device_to_device_copy(ExecutionPolicy const& exec)
209
- noexcept ->
210
- thrust::detail::integral_constant<
211
- bool, cudaMemcpyDeviceToDevice == Direction::value
212
- >
213
- {
214
- return {};
215
- }
216
-
217
- /////////////////////////////////////////////////////////////////////////////
218
-
219
- // Device to host.
220
- template <class Sys1, class Sys2>
221
- __host__ __device__
222
- auto
223
- select_device_system(thrust::cuda::execution_policy<Sys1> &sys1,
224
- thrust::execution_policy<Sys2> &)
225
- THRUST_DECLTYPE_RETURNS(sys1)
226
-
227
- // Device to host.
228
- template <class Sys1, class Sys2>
229
- __host__ __device__
230
- auto
231
- select_device_system(thrust::cuda::execution_policy<Sys1> const &sys1,
232
- thrust::execution_policy<Sys2> const &)
233
- THRUST_DECLTYPE_RETURNS(sys1)
234
-
235
- // Host to device.
236
- template <class Sys1, class Sys2>
237
- __host__ __device__
238
- auto
239
- select_device_system(thrust::execution_policy<Sys1> &,
240
- thrust::cuda::execution_policy<Sys2> &sys2)
241
- THRUST_DECLTYPE_RETURNS(sys2)
242
-
243
- // Host to device.
244
- template <class Sys1, class Sys2>
245
- __host__ __device__
246
- auto
247
- select_device_system(thrust::execution_policy<Sys1> const &,
248
- thrust::cuda::execution_policy<Sys2> const &sys2)
249
- THRUST_DECLTYPE_RETURNS(sys2)
250
-
251
- // Device to device.
252
- template <class Sys1, class Sys2>
253
- __host__ __device__
254
- auto
255
- select_device_system(thrust::cuda::execution_policy<Sys1> &sys1,
256
- thrust::cuda::execution_policy<Sys2> &)
257
- THRUST_DECLTYPE_RETURNS(sys1)
258
-
259
- // Device to device.
260
- template <class Sys1, class Sys2>
261
- __host__ __device__
262
- auto
263
- select_device_system(thrust::cuda::execution_policy<Sys1> const &sys1,
264
- thrust::cuda::execution_policy<Sys2> const &)
265
- THRUST_DECLTYPE_RETURNS(sys1)
266
-
267
- /////////////////////////////////////////////////////////////////////////////
268
-
269
- // Device to host.
270
- template <class Sys1, class Sys2>
271
- __host__ __device__
272
- auto
273
- select_host_system(thrust::cuda::execution_policy<Sys1> &,
274
- thrust::execution_policy<Sys2> &sys2)
275
- THRUST_DECLTYPE_RETURNS(sys2)
276
-
277
- // Device to host.
278
- template <class Sys1, class Sys2>
279
- __host__ __device__
280
- auto
281
- select_host_system(thrust::cuda::execution_policy<Sys1> const &,
282
- thrust::execution_policy<Sys2> const &sys2)
283
- THRUST_DECLTYPE_RETURNS(sys2)
284
-
285
- // Host to device.
286
- template <class Sys1, class Sys2>
287
- __host__ __device__
288
- auto
289
- select_host_system(thrust::execution_policy<Sys1> &sys1,
290
- thrust::cuda::execution_policy<Sys2> &)
291
- THRUST_DECLTYPE_RETURNS(sys1)
292
-
293
- // Host to device.
294
- template <class Sys1, class Sys2>
295
- __host__ __device__
296
- auto
297
- select_host_system(thrust::execution_policy<Sys1> const &sys1,
298
- thrust::cuda::execution_policy<Sys2> const &)
299
- THRUST_DECLTYPE_RETURNS(sys1)
300
-
301
- // Device to device.
302
- template <class Sys1, class Sys2>
303
- __host__ __device__
304
- auto
305
- select_host_system(thrust::execution_policy<Sys1> &sys1,
306
- thrust::execution_policy<Sys2> &)
307
- THRUST_DECLTYPE_RETURNS(sys1)
308
-
309
- // Device to device.
310
- template <class Sys1, class Sys2>
311
- __host__ __device__
312
- auto
313
- select_host_system(thrust::execution_policy<Sys1> const &sys1,
314
- thrust::execution_policy<Sys2> const &)
315
- THRUST_DECLTYPE_RETURNS(sys1)
316
- #endif
317
-
318
- // Device to host.
319
- template <class Sys1, class Sys2>
320
- __host__ __device__
321
- cross_system<Sys1, Sys2>
322
- select_system(execution_policy<Sys1> const & sys1,
323
- thrust::cpp::execution_policy<Sys2> const &sys2)
324
- {
325
- thrust::execution_policy<Sys1> & non_const_sys1 = const_cast<execution_policy<Sys1> &>(sys1);
326
- thrust::cpp::execution_policy<Sys2> &non_const_sys2 = const_cast<thrust::cpp::execution_policy<Sys2> &>(sys2);
327
- return cross_system<Sys1, Sys2>(non_const_sys1, non_const_sys2);
328
- }
329
-
330
- // Host to device.
331
- template <class Sys1, class Sys2>
332
- __host__ __device__
333
- cross_system<Sys1, Sys2>
334
- select_system(thrust::cpp::execution_policy<Sys1> const &sys1,
335
- execution_policy<Sys2> const & sys2)
336
- {
337
- thrust::cpp::execution_policy<Sys1> &non_const_sys1 = const_cast<thrust::cpp::execution_policy<Sys1> &>(sys1);
338
- thrust::execution_policy<Sys2> & non_const_sys2 = const_cast<execution_policy<Sys2> &>(sys2);
339
- return cross_system<Sys1, Sys2>(non_const_sys1, non_const_sys2);
340
- }
341
-
342
- } // namespace cuda_cub
343
- } // end namespace thrust
344
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmcv_custom/runner/__init__.py DELETED
@@ -1,8 +0,0 @@
1
- # Copyright (c) Open-MMLab. All rights reserved.
2
- from .checkpoint import save_checkpoint
3
- from .epoch_based_runner import EpochBasedRunnerAmp
4
-
5
-
6
- __all__ = [
7
- 'EpochBasedRunnerAmp', 'save_checkpoint'
8
- ]
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/core/anchor/utils.py DELETED
@@ -1,71 +0,0 @@
1
- import torch
2
-
3
-
4
- def images_to_levels(target, num_levels):
5
- """Convert targets by image to targets by feature level.
6
-
7
- [target_img0, target_img1] -> [target_level0, target_level1, ...]
8
- """
9
- target = torch.stack(target, 0)
10
- level_targets = []
11
- start = 0
12
- for n in num_levels:
13
- end = start + n
14
- # level_targets.append(target[:, start:end].squeeze(0))
15
- level_targets.append(target[:, start:end])
16
- start = end
17
- return level_targets
18
-
19
-
20
- def anchor_inside_flags(flat_anchors,
21
- valid_flags,
22
- img_shape,
23
- allowed_border=0):
24
- """Check whether the anchors are inside the border.
25
-
26
- Args:
27
- flat_anchors (torch.Tensor): Flatten anchors, shape (n, 4).
28
- valid_flags (torch.Tensor): An existing valid flags of anchors.
29
- img_shape (tuple(int)): Shape of current image.
30
- allowed_border (int, optional): The border to allow the valid anchor.
31
- Defaults to 0.
32
-
33
- Returns:
34
- torch.Tensor: Flags indicating whether the anchors are inside a \
35
- valid range.
36
- """
37
- img_h, img_w = img_shape[:2]
38
- if allowed_border >= 0:
39
- inside_flags = valid_flags & \
40
- (flat_anchors[:, 0] >= -allowed_border) & \
41
- (flat_anchors[:, 1] >= -allowed_border) & \
42
- (flat_anchors[:, 2] < img_w + allowed_border) & \
43
- (flat_anchors[:, 3] < img_h + allowed_border)
44
- else:
45
- inside_flags = valid_flags
46
- return inside_flags
47
-
48
-
49
- def calc_region(bbox, ratio, featmap_size=None):
50
- """Calculate a proportional bbox region.
51
-
52
- The bbox center are fixed and the new h' and w' is h * ratio and w * ratio.
53
-
54
- Args:
55
- bbox (Tensor): Bboxes to calculate regions, shape (n, 4).
56
- ratio (float): Ratio of the output region.
57
- featmap_size (tuple): Feature map size used for clipping the boundary.
58
-
59
- Returns:
60
- tuple: x1, y1, x2, y2
61
- """
62
- x1 = torch.round((1 - ratio) * bbox[0] + ratio * bbox[2]).long()
63
- y1 = torch.round((1 - ratio) * bbox[1] + ratio * bbox[3]).long()
64
- x2 = torch.round(ratio * bbox[0] + (1 - ratio) * bbox[2]).long()
65
- y2 = torch.round(ratio * bbox[1] + (1 - ratio) * bbox[3]).long()
66
- if featmap_size is not None:
67
- x1 = x1.clamp(min=0, max=featmap_size[1])
68
- y1 = y1.clamp(min=0, max=featmap_size[0])
69
- x2 = x2.clamp(min=0, max=featmap_size[1])
70
- y2 = y2.clamp(min=0, max=featmap_size[0])
71
- return (x1, y1, x2, y2)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/lama-example/saicinpainting/training/modules/multiscale.py DELETED
@@ -1,244 +0,0 @@
1
- from typing import List, Tuple, Union, Optional
2
-
3
- import torch
4
- import torch.nn as nn
5
- import torch.nn.functional as F
6
-
7
- from saicinpainting.training.modules.base import get_conv_block_ctor, get_activation
8
- from saicinpainting.training.modules.pix2pixhd import ResnetBlock
9
-
10
-
11
- class ResNetHead(nn.Module):
12
- def __init__(self, input_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=nn.BatchNorm2d,
13
- padding_type='reflect', conv_kind='default', activation=nn.ReLU(True)):
14
- assert (n_blocks >= 0)
15
- super(ResNetHead, self).__init__()
16
-
17
- conv_layer = get_conv_block_ctor(conv_kind)
18
-
19
- model = [nn.ReflectionPad2d(3),
20
- conv_layer(input_nc, ngf, kernel_size=7, padding=0),
21
- norm_layer(ngf),
22
- activation]
23
-
24
- ### downsample
25
- for i in range(n_downsampling):
26
- mult = 2 ** i
27
- model += [conv_layer(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1),
28
- norm_layer(ngf * mult * 2),
29
- activation]
30
-
31
- mult = 2 ** n_downsampling
32
-
33
- ### resnet blocks
34
- for i in range(n_blocks):
35
- model += [ResnetBlock(ngf * mult, padding_type=padding_type, activation=activation, norm_layer=norm_layer,
36
- conv_kind=conv_kind)]
37
-
38
- self.model = nn.Sequential(*model)
39
-
40
- def forward(self, input):
41
- return self.model(input)
42
-
43
-
44
- class ResNetTail(nn.Module):
45
- def __init__(self, output_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=nn.BatchNorm2d,
46
- padding_type='reflect', conv_kind='default', activation=nn.ReLU(True),
47
- up_norm_layer=nn.BatchNorm2d, up_activation=nn.ReLU(True), add_out_act=False, out_extra_layers_n=0,
48
- add_in_proj=None):
49
- assert (n_blocks >= 0)
50
- super(ResNetTail, self).__init__()
51
-
52
- mult = 2 ** n_downsampling
53
-
54
- model = []
55
-
56
- if add_in_proj is not None:
57
- model.append(nn.Conv2d(add_in_proj, ngf * mult, kernel_size=1))
58
-
59
- ### resnet blocks
60
- for i in range(n_blocks):
61
- model += [ResnetBlock(ngf * mult, padding_type=padding_type, activation=activation, norm_layer=norm_layer,
62
- conv_kind=conv_kind)]
63
-
64
- ### upsample
65
- for i in range(n_downsampling):
66
- mult = 2 ** (n_downsampling - i)
67
- model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1,
68
- output_padding=1),
69
- up_norm_layer(int(ngf * mult / 2)),
70
- up_activation]
71
- self.model = nn.Sequential(*model)
72
-
73
- out_layers = []
74
- for _ in range(out_extra_layers_n):
75
- out_layers += [nn.Conv2d(ngf, ngf, kernel_size=1, padding=0),
76
- up_norm_layer(ngf),
77
- up_activation]
78
- out_layers += [nn.ReflectionPad2d(3),
79
- nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
80
-
81
- if add_out_act:
82
- out_layers.append(get_activation('tanh' if add_out_act is True else add_out_act))
83
-
84
- self.out_proj = nn.Sequential(*out_layers)
85
-
86
- def forward(self, input, return_last_act=False):
87
- features = self.model(input)
88
- out = self.out_proj(features)
89
- if return_last_act:
90
- return out, features
91
- else:
92
- return out
93
-
94
-
95
- class MultiscaleResNet(nn.Module):
96
- def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=2, n_blocks_head=2, n_blocks_tail=6, n_scales=3,
97
- norm_layer=nn.BatchNorm2d, padding_type='reflect', conv_kind='default', activation=nn.ReLU(True),
98
- up_norm_layer=nn.BatchNorm2d, up_activation=nn.ReLU(True), add_out_act=False, out_extra_layers_n=0,
99
- out_cumulative=False, return_only_hr=False):
100
- super().__init__()
101
-
102
- self.heads = nn.ModuleList([ResNetHead(input_nc, ngf=ngf, n_downsampling=n_downsampling,
103
- n_blocks=n_blocks_head, norm_layer=norm_layer, padding_type=padding_type,
104
- conv_kind=conv_kind, activation=activation)
105
- for i in range(n_scales)])
106
- tail_in_feats = ngf * (2 ** n_downsampling) + ngf
107
- self.tails = nn.ModuleList([ResNetTail(output_nc,
108
- ngf=ngf, n_downsampling=n_downsampling,
109
- n_blocks=n_blocks_tail, norm_layer=norm_layer, padding_type=padding_type,
110
- conv_kind=conv_kind, activation=activation, up_norm_layer=up_norm_layer,
111
- up_activation=up_activation, add_out_act=add_out_act,
112
- out_extra_layers_n=out_extra_layers_n,
113
- add_in_proj=None if (i == n_scales - 1) else tail_in_feats)
114
- for i in range(n_scales)])
115
-
116
- self.out_cumulative = out_cumulative
117
- self.return_only_hr = return_only_hr
118
-
119
- @property
120
- def num_scales(self):
121
- return len(self.heads)
122
-
123
- def forward(self, ms_inputs: List[torch.Tensor], smallest_scales_num: Optional[int] = None) \
124
- -> Union[torch.Tensor, List[torch.Tensor]]:
125
- """
126
- :param ms_inputs: List of inputs of different resolutions from HR to LR
127
- :param smallest_scales_num: int or None, number of smallest scales to take at input
128
- :return: Depending on return_only_hr:
129
- True: Only the most HR output
130
- False: List of outputs of different resolutions from HR to LR
131
- """
132
- if smallest_scales_num is None:
133
- assert len(self.heads) == len(ms_inputs), (len(self.heads), len(ms_inputs), smallest_scales_num)
134
- smallest_scales_num = len(self.heads)
135
- else:
136
- assert smallest_scales_num == len(ms_inputs) <= len(self.heads), (len(self.heads), len(ms_inputs), smallest_scales_num)
137
-
138
- cur_heads = self.heads[-smallest_scales_num:]
139
- ms_features = [cur_head(cur_inp) for cur_head, cur_inp in zip(cur_heads, ms_inputs)]
140
-
141
- all_outputs = []
142
- prev_tail_features = None
143
- for i in range(len(ms_features)):
144
- scale_i = -i - 1
145
-
146
- cur_tail_input = ms_features[-i - 1]
147
- if prev_tail_features is not None:
148
- if prev_tail_features.shape != cur_tail_input.shape:
149
- prev_tail_features = F.interpolate(prev_tail_features, size=cur_tail_input.shape[2:],
150
- mode='bilinear', align_corners=False)
151
- cur_tail_input = torch.cat((cur_tail_input, prev_tail_features), dim=1)
152
-
153
- cur_out, cur_tail_feats = self.tails[scale_i](cur_tail_input, return_last_act=True)
154
-
155
- prev_tail_features = cur_tail_feats
156
- all_outputs.append(cur_out)
157
-
158
- if self.out_cumulative:
159
- all_outputs_cum = [all_outputs[0]]
160
- for i in range(1, len(ms_features)):
161
- cur_out = all_outputs[i]
162
- cur_out_cum = cur_out + F.interpolate(all_outputs_cum[-1], size=cur_out.shape[2:],
163
- mode='bilinear', align_corners=False)
164
- all_outputs_cum.append(cur_out_cum)
165
- all_outputs = all_outputs_cum
166
-
167
- if self.return_only_hr:
168
- return all_outputs[-1]
169
- else:
170
- return all_outputs[::-1]
171
-
172
-
173
- class MultiscaleDiscriminatorSimple(nn.Module):
174
- def __init__(self, ms_impl):
175
- super().__init__()
176
- self.ms_impl = nn.ModuleList(ms_impl)
177
-
178
- @property
179
- def num_scales(self):
180
- return len(self.ms_impl)
181
-
182
- def forward(self, ms_inputs: List[torch.Tensor], smallest_scales_num: Optional[int] = None) \
183
- -> List[Tuple[torch.Tensor, List[torch.Tensor]]]:
184
- """
185
- :param ms_inputs: List of inputs of different resolutions from HR to LR
186
- :param smallest_scales_num: int or None, number of smallest scales to take at input
187
- :return: List of pairs (prediction, features) for different resolutions from HR to LR
188
- """
189
- if smallest_scales_num is None:
190
- assert len(self.ms_impl) == len(ms_inputs), (len(self.ms_impl), len(ms_inputs), smallest_scales_num)
191
- smallest_scales_num = len(self.heads)
192
- else:
193
- assert smallest_scales_num == len(ms_inputs) <= len(self.ms_impl), \
194
- (len(self.ms_impl), len(ms_inputs), smallest_scales_num)
195
-
196
- return [cur_discr(cur_input) for cur_discr, cur_input in zip(self.ms_impl[-smallest_scales_num:], ms_inputs)]
197
-
198
-
199
- class SingleToMultiScaleInputMixin:
200
- def forward(self, x: torch.Tensor) -> List:
201
- orig_height, orig_width = x.shape[2:]
202
- factors = [2 ** i for i in range(self.num_scales)]
203
- ms_inputs = [F.interpolate(x, size=(orig_height // f, orig_width // f), mode='bilinear', align_corners=False)
204
- for f in factors]
205
- return super().forward(ms_inputs)
206
-
207
-
208
- class GeneratorMultiToSingleOutputMixin:
209
- def forward(self, x):
210
- return super().forward(x)[0]
211
-
212
-
213
- class DiscriminatorMultiToSingleOutputMixin:
214
- def forward(self, x):
215
- out_feat_tuples = super().forward(x)
216
- return out_feat_tuples[0][0], [f for _, flist in out_feat_tuples for f in flist]
217
-
218
-
219
- class DiscriminatorMultiToSingleOutputStackedMixin:
220
- def __init__(self, *args, return_feats_only_levels=None, **kwargs):
221
- super().__init__(*args, **kwargs)
222
- self.return_feats_only_levels = return_feats_only_levels
223
-
224
- def forward(self, x):
225
- out_feat_tuples = super().forward(x)
226
- outs = [out for out, _ in out_feat_tuples]
227
- scaled_outs = [outs[0]] + [F.interpolate(cur_out, size=outs[0].shape[-2:],
228
- mode='bilinear', align_corners=False)
229
- for cur_out in outs[1:]]
230
- out = torch.cat(scaled_outs, dim=1)
231
- if self.return_feats_only_levels is not None:
232
- feat_lists = [out_feat_tuples[i][1] for i in self.return_feats_only_levels]
233
- else:
234
- feat_lists = [flist for _, flist in out_feat_tuples]
235
- feats = [f for flist in feat_lists for f in flist]
236
- return out, feats
237
-
238
-
239
- class MultiscaleDiscrSingleInput(SingleToMultiScaleInputMixin, DiscriminatorMultiToSingleOutputStackedMixin, MultiscaleDiscriminatorSimple):
240
- pass
241
-
242
-
243
- class MultiscaleResNetSingle(GeneratorMultiToSingleOutputMixin, SingleToMultiScaleInputMixin, MultiscaleResNet):
244
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/regionclip-demo/detectron2/data/datasets/lvis.py DELETED
@@ -1,357 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import logging
3
- import os
4
- from fvcore.common.timer import Timer
5
-
6
- from detectron2.data import DatasetCatalog, MetadataCatalog
7
- from detectron2.structures import BoxMode
8
- from detectron2.utils.file_io import PathManager
9
-
10
- from .builtin_meta import _get_coco_instances_meta
11
- from .lvis_v0_5_categories import LVIS_CATEGORIES as LVIS_V0_5_CATEGORIES
12
- from .lvis_v1_categories import LVIS_CATEGORIES as LVIS_V1_CATEGORIES
13
-
14
- import torch
15
- import numpy as np
16
- """
17
- This file contains functions to parse LVIS-format annotations into dicts in the
18
- "Detectron2 format".
19
- """
20
-
21
- logger = logging.getLogger(__name__)
22
-
23
- __all__ = ["load_lvis_json", "register_lvis_instances", "get_lvis_instances_meta"]
24
-
25
-
26
- def register_lvis_instances(name, metadata, json_file, image_root):
27
- """
28
- Register a dataset in LVIS's json annotation format for instance detection and segmentation.
29
-
30
- Args:
31
- name (str): a name that identifies the dataset, e.g. "lvis_v0.5_train".
32
- metadata (dict): extra metadata associated with this dataset. It can be an empty dict.
33
- json_file (str): path to the json instance annotation file.
34
- image_root (str or path-like): directory which contains all the images.
35
- """
36
- DatasetCatalog.register(name, lambda: load_lvis_json(json_file, image_root, name))
37
- MetadataCatalog.get(name).set(
38
- json_file=json_file, image_root=image_root, evaluator_type="lvis", **metadata
39
- )
40
-
41
-
42
- def load_lvis_json_original(json_file, image_root, dataset_name=None, filter_open_cls=True, clip_gt_crop=True, max_gt_per_img=500):
43
- """
44
- Load a json file in LVIS's annotation format.
45
-
46
- Args:
47
- json_file (str): full path to the LVIS json annotation file.
48
- image_root (str): the directory where the images in this json file exists.
49
- dataset_name (str): the name of the dataset (e.g., "lvis_v0.5_train").
50
- If provided, this function will put "thing_classes" into the metadata
51
- associated with this dataset.
52
- filter_open_cls: open-set setting, filter the open-set categories during training
53
- clip_gt_crop: must filter images with empty annotations or too many GT bbox,
54
- even if in testing (eg, use CLIP on GT regions)
55
- Returns:
56
- list[dict]: a list of dicts in Detectron2 standard format. (See
57
- `Using Custom Datasets </tutorials/datasets.html>`_ )
58
-
59
- Notes:
60
- 1. This function does not read the image files.
61
- The results do not have the "image" field.
62
- """
63
- from lvis import LVIS
64
-
65
- if 'train' in dataset_name: #'zeroshot' in dataset_name and 'train' in dataset_name: # openset setting, filter the novel classes during training
66
- filter_open_cls = True
67
- else:
68
- filter_open_cls = False
69
-
70
- json_file = PathManager.get_local_path(json_file)
71
-
72
- timer = Timer()
73
- lvis_api = LVIS(json_file)
74
- if timer.seconds() > 1:
75
- logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds()))
76
-
77
- if dataset_name is not None:
78
- meta = get_lvis_instances_meta(dataset_name)
79
- MetadataCatalog.get(dataset_name).set(**meta)
80
-
81
- # sort indices for reproducible results
82
- img_ids = sorted(lvis_api.imgs.keys())
83
- # imgs is a list of dicts, each looks something like:
84
- # {'license': 4,
85
- # 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg',
86
- # 'file_name': 'COCO_val2014_000000001268.jpg',
87
- # 'height': 427,
88
- # 'width': 640,
89
- # 'date_captured': '2013-11-17 05:57:24',
90
- # 'id': 1268}
91
- imgs = lvis_api.load_imgs(img_ids)
92
- # anns is a list[list[dict]], where each dict is an annotation
93
- # record for an object. The inner list enumerates the objects in an image
94
- # and the outer list enumerates over images. Example of anns[0]:
95
- # [{'segmentation': [[192.81,
96
- # 247.09,
97
- # ...
98
- # 219.03,
99
- # 249.06]],
100
- # 'area': 1035.749,
101
- # 'image_id': 1268,
102
- # 'bbox': [192.81, 224.8, 74.73, 33.43],
103
- # 'category_id': 16,
104
- # 'id': 42986},
105
- # ...]
106
- anns = [lvis_api.img_ann_map[img_id] for img_id in img_ids]
107
-
108
- # Sanity check that each annotation has a unique id
109
- ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image]
110
- assert len(set(ann_ids)) == len(ann_ids), "Annotation ids in '{}' are not unique".format(
111
- json_file
112
- )
113
-
114
- imgs_anns = list(zip(imgs, anns))
115
-
116
- logger.info("Loaded {} images in the LVIS format from {}".format(len(imgs_anns), json_file))
117
-
118
- def get_file_name(img_root, img_dict):
119
- # Determine the path including the split folder ("train2017", "val2017", "test2017") from
120
- # the coco_url field. Example:
121
- # 'coco_url': 'http://images.cocodataset.org/train2017/000000155379.jpg'
122
- split_folder, file_name = img_dict["coco_url"].split("/")[-2:]
123
- return os.path.join(img_root + split_folder, file_name)
124
-
125
- dataset_dicts = []
126
- cls_type_dict = {cls_meta['id']: cls_meta['frequency'] for cls_meta in lvis_api.dataset['categories']} # map cls id to cls type
127
- area_dict = {'r': [], 'c': [], 'f': []} # calculate box area for each type of class
128
- # import os
129
- # from PIL import Image
130
- # custom_img_path = 'datasets/epic_sample_frames'
131
- # custom_img_list = [os.path.join(custom_img_path, item) for item in os.listdir(custom_img_path)]
132
- # cnt = 0
133
- for (img_dict, anno_dict_list) in imgs_anns:
134
- record = {}
135
- record["file_name"] = get_file_name(image_root, img_dict)
136
- # record["file_name"] = custom_img_list[cnt]; cnt += 1;
137
- # if cnt == 46:
138
- # break # get_file_name(image_root, img_dict)
139
- # img_file = Image.open(record["file_name"])
140
- record["height"] = img_dict["height"]
141
- record["width"] = img_dict["width"]
142
- # record["height"] = img_file.size[1] # img_dict["height"]
143
- # record["width"] = img_file.size[0] # img_dict["width"]
144
- record["not_exhaustive_category_ids"] = img_dict.get("not_exhaustive_category_ids", [])
145
- record["neg_category_ids"] = img_dict.get("neg_category_ids", [])
146
- image_id = record["image_id"] = img_dict["id"]
147
-
148
- objs = []
149
- for anno in anno_dict_list:
150
- # Check that the image_id in this annotation is the same as
151
- # the image_id we're looking at.
152
- # This fails only when the data parsing logic or the annotation file is buggy.
153
- assert anno["image_id"] == image_id
154
- obj = {"bbox": anno["bbox"], "bbox_mode": BoxMode.XYWH_ABS}
155
- # LVIS data loader can be used to load COCO dataset categories. In this case `meta`
156
- # variable will have a field with COCO-specific category mapping.
157
- if dataset_name is not None and "thing_dataset_id_to_contiguous_id" in meta:
158
- obj["category_id"] = meta["thing_dataset_id_to_contiguous_id"][anno["category_id"]]
159
- else:
160
- obj["category_id"] = anno["category_id"] - 1 # Convert 1-indexed to 0-indexed
161
- obj['frequency'] = cls_type_dict[anno["category_id"]] # used for open-set filtering
162
- if filter_open_cls: # filter categories for open-set training
163
- if obj['frequency'] == 'r':
164
- continue
165
- area_dict[obj['frequency']].append(anno["bbox"][2] * anno["bbox"][3])
166
-
167
- segm = anno["segmentation"] # list[list[float]]
168
- # filter out invalid polygons (< 3 points)
169
- valid_segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6]
170
- assert len(segm) == len(
171
- valid_segm
172
- ), "Annotation contains an invalid polygon with < 3 points"
173
- assert len(segm) > 0
174
- obj["segmentation"] = segm
175
- objs.append(obj)
176
- if (filter_open_cls or clip_gt_crop) and len(objs) == 0: # no annotation for this image
177
- continue
178
- record["annotations"] = objs
179
- dataset_dicts.append(record)
180
-
181
- # For the training in open-set setting, map original category id to new category id number (base categories)
182
- if filter_open_cls:
183
- # get new category id in order
184
- old_to_new = {}
185
- for i in range(len(cls_type_dict)):
186
- if cls_type_dict[i+1] != 'r': # cls_type_dict is 1-indexed
187
- old_to_new[i] = len(old_to_new)
188
- # map annotation to new category id
189
- for record in dataset_dicts:
190
- record.pop('not_exhaustive_category_ids') # won't be used
191
- record.pop('neg_category_ids') # won't be used
192
- for obj in record['annotations']:
193
- obj['category_id'] = old_to_new[obj['category_id']] # 0-indexed id
194
- assert obj['frequency'] != 'r'
195
- logger.info("\n\nModel will be trained in the open-set setting! {} / {} categories are kept.\n".format(len(old_to_new),len(cls_type_dict)))
196
- # calculate box area for each type of class
197
- area_lst = np.array([0, 400, 1600, 2500, 5000, 10000, 22500, 224 * 224, 90000, 160000, 1e8])
198
- # rare_cls = np.histogram(np.array(area_dict['r']), bins=area_lst)[0]
199
- # common_cls = np.histogram(np.array(area_dict['c']), bins=area_lst)[0]
200
- # freq_cls = np.histogram(np.array(area_dict['f']), bins=area_lst)[0]
201
- # print("rare classes: {}; \ncommon classes: {}; \nfrequent classes: {}".format(rare_cls/rare_cls.sum()*100, common_cls/common_cls.sum()*100, freq_cls/freq_cls.sum()*100))
202
- # # apply CLIP on GT regions: some images has large number of GT bbox (eg, 759), remove them, otherwise, OOM
203
- if clip_gt_crop:
204
- # len_num = sorted([len(item["annotations"]) for item in dataset_dicts], reverse=True)
205
- dataset_dicts = sorted(dataset_dicts, key=lambda x: len(x["annotations"]), reverse=True)
206
- for record in dataset_dicts:
207
- record["annotations"] = record["annotations"][:max_gt_per_img] # only <10 / 20k images in test have >300 GT boxes
208
- #dataset_dicts = sorted(dataset_dicts, key=lambda x: len(x["annotations"]))[:12] #[12000:14000] #
209
- #dataset_dicts = sorted(dataset_dicts, key=lambda x: len(x["annotations"]))[-1200:-1000]
210
- #eval_cls_acc(dataset_dicts, area_lst)
211
- return dataset_dicts
212
-
213
- def load_lvis_json(json_file, image_root, dataset_name=None, filter_open_cls=True, clip_gt_crop=True, max_gt_per_img=500, custom_img_path='datasets/custom_images'):
214
- """
215
- This is a tentitive function for loading custom images.
216
- Given a folder of images (eg, 'datasets/custom_images'), load their meta data into a dictionary
217
- """
218
- import os
219
- from PIL import Image
220
- custom_img_list = [os.path.join(custom_img_path, item) for item in os.listdir(custom_img_path)]
221
-
222
- dataset_dicts = []
223
- for f_i, file_name in enumerate(custom_img_list):
224
- record = {}
225
- record["file_name"] = file_name
226
- img_file = Image.open(record["file_name"])
227
- record["height"] = img_file.size[1]
228
- record["width"] = img_file.size[0]
229
- record["image_id"] = f_i
230
-
231
- dataset_dicts.append(record)
232
-
233
- return dataset_dicts
234
-
235
- def eval_cls_acc(dataset_dicts, area_lst):
236
- #pred_file = '/home/v-yiwuzhong/projects/detectron2-open-set/output/rcnn_gt_crop/vit/instances_predictions.pth'
237
- #pred_file = '/home/v-yiwuzhong/projects/azureblobs/vyiwuzhong_phillytools/results/test_CLIP_rcnn_resnet50_crop_regions_perclassnms/inference/instances_predictions.pth'
238
- #pred_file = '/home/v-yiwuzhong/projects/azureblobs/vyiwuzhong_phillytools/results/test_CLIP_rcnn_vitb32_crop_regions_perclassnms/inference/instances_predictions.pth'
239
- #pred_file = '/home/v-yiwuzhong/projects/azureblobs/vyiwuzhong_phillytools/results/test_CLIP_fast_rcnn_resnet50_roifeatmap/inference/instances_predictions.pth'
240
- #pred_file = '/home/v-yiwuzhong/projects/azureblobs/vyiwuzhong_phillytools/results/test_CLIP_fast_rcnn_resnet50_supmrcnnbaselinefpn/inference/instances_predictions.pth'
241
- #pred_file = '/home/v-yiwuzhong/projects/azureblobs/vyiwuzhong_phillytools/results/test_CLIP_fast_rcnn_resnet50_supmrcnnbaselinec4/inference/instances_predictions.pth'
242
- pred_file = '/home/v-yiwuzhong/projects/azureblobs/vyiwuzhong_phillytools/results/test_CLIP_fast_rcnn_resnet50_e1-3-3gtbox/inference/instances_predictions.pth'
243
- predictions = torch.load(pred_file)
244
- correct = 0
245
- wrong = 0
246
- area_threshold = area_lst[1:-1] # np.array([400, 1600, 2500, 5000, 10000, 22500, 224 * 224, 90000, 160000])
247
- acc_list = [[0, 0] for i in range(area_threshold.shape[0] + 1)]
248
- small_cnt = 0
249
- for preds, gts in zip(predictions, dataset_dicts):
250
- assert preds['image_id'] == gts['image_id'] # same image
251
- #assert len(preds['instances']) == len(gts['annotations'])
252
- box_seen = {} # keep a set for the predicted boxes that have been checked
253
- for pred, gt in zip(preds['instances'], gts['annotations']):
254
- if pred['bbox'][0] in box_seen: # duplicate box due to perclass NMS
255
- continue
256
- else:
257
- box_seen[pred['bbox'][0]] = 1
258
- if np.sum(np.array(pred['bbox']) - np.array(gt['bbox'])) < 1.0: # same box
259
- pass
260
- else: # has been NMS and shuffled
261
- for gt in gts['annotations']:
262
- if np.sum(np.array(pred['bbox']) - np.array(gt['bbox'])) < 1.0: # same box
263
- break
264
- assert np.sum(np.array(pred['bbox']) - np.array(gt['bbox'])) < 1.0 # same box
265
- this_area = gt['bbox'][2] * gt['bbox'][3]
266
- block = (area_threshold < this_area).nonzero()[0].shape[0]
267
- if pred['category_id'] == gt['category_id']: # matched
268
- correct += 1
269
- acc_list[block][0] += 1
270
- else:
271
- wrong += 1
272
- acc_list[block][1] += 1
273
-
274
- print("\n\nGot correct {} and wrong {}. Accuracy is {} / {} = {}\n\n".format(correct,wrong,correct,correct+wrong,correct/(correct+wrong)))
275
- block_acc = [100 * acc_list[i][0] / (acc_list[i][0] + acc_list[i][1]) for i in range(len(acc_list))]
276
- block_acc = [round(i, 1) for i in block_acc]
277
- print("Block accuracy: {}".format(block_acc))
278
- block_num = [acc_list[i][0] + acc_list[i][1] for i in range(len(acc_list))]
279
- block_num = list(block_num / np.sum(block_num) * 100)
280
- block_num = [round(i, 1) for i in block_num]
281
- print("Block #instances: {}".format(block_num))
282
- return
283
-
284
- def get_lvis_instances_meta(dataset_name):
285
- """
286
- Load LVIS metadata.
287
-
288
- Args:
289
- dataset_name (str): LVIS dataset name without the split name (e.g., "lvis_v0.5").
290
-
291
- Returns:
292
- dict: LVIS metadata with keys: thing_classes
293
- """
294
- if "cocofied" in dataset_name:
295
- return _get_coco_instances_meta()
296
- if "v0.5" in dataset_name:
297
- return _get_lvis_instances_meta_v0_5()
298
- elif "v1" in dataset_name:
299
- return _get_lvis_instances_meta_v1()
300
- raise ValueError("No built-in metadata for dataset {}".format(dataset_name))
301
-
302
-
303
- def _get_lvis_instances_meta_v0_5():
304
- assert len(LVIS_V0_5_CATEGORIES) == 1230
305
- cat_ids = [k["id"] for k in LVIS_V0_5_CATEGORIES]
306
- assert min(cat_ids) == 1 and max(cat_ids) == len(
307
- cat_ids
308
- ), "Category ids are not in [1, #categories], as expected"
309
- # Ensure that the category list is sorted by id
310
- lvis_categories = sorted(LVIS_V0_5_CATEGORIES, key=lambda x: x["id"])
311
- thing_classes = [k["synonyms"][0] for k in lvis_categories]
312
- meta = {"thing_classes": thing_classes}
313
- return meta
314
-
315
-
316
- def _get_lvis_instances_meta_v1():
317
- assert len(LVIS_V1_CATEGORIES) == 1203
318
- cat_ids = [k["id"] for k in LVIS_V1_CATEGORIES]
319
- assert min(cat_ids) == 1 and max(cat_ids) == len(
320
- cat_ids
321
- ), "Category ids are not in [1, #categories], as expected"
322
- # Ensure that the category list is sorted by id
323
- lvis_categories = sorted(LVIS_V1_CATEGORIES, key=lambda x: x["id"])
324
- thing_classes = [k["synonyms"][0] for k in lvis_categories]
325
- meta = {"thing_classes": thing_classes}
326
- return meta
327
-
328
-
329
- if __name__ == "__main__":
330
- """
331
- Test the LVIS json dataset loader.
332
-
333
- Usage:
334
- python -m detectron2.data.datasets.lvis \
335
- path/to/json path/to/image_root dataset_name vis_limit
336
- """
337
- import sys
338
- import numpy as np
339
- from detectron2.utils.logger import setup_logger
340
- from PIL import Image
341
- import detectron2.data.datasets # noqa # add pre-defined metadata
342
- from detectron2.utils.visualizer import Visualizer
343
-
344
- logger = setup_logger(name=__name__)
345
- meta = MetadataCatalog.get(sys.argv[3])
346
-
347
- dicts = load_lvis_json(sys.argv[1], sys.argv[2], sys.argv[3])
348
- logger.info("Done loading {} samples.".format(len(dicts)))
349
-
350
- dirname = "lvis-data-vis"
351
- os.makedirs(dirname, exist_ok=True)
352
- for d in dicts[: int(sys.argv[4])]:
353
- img = np.array(Image.open(d["file_name"]))
354
- visualizer = Visualizer(img, metadata=meta)
355
- vis = visualizer.draw_dataset_dict(d)
356
- fpath = os.path.join(dirname, os.path.basename(d["file_name"]))
357
- vis.save(fpath)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cecil8352/vits-models/attentions.py DELETED
@@ -1,300 +0,0 @@
1
- import math
2
- import torch
3
- from torch import nn
4
- from torch.nn import functional as F
5
-
6
- import commons
7
- from modules import LayerNorm
8
-
9
-
10
- class Encoder(nn.Module):
11
- def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs):
12
- super().__init__()
13
- self.hidden_channels = hidden_channels
14
- self.filter_channels = filter_channels
15
- self.n_heads = n_heads
16
- self.n_layers = n_layers
17
- self.kernel_size = kernel_size
18
- self.p_dropout = p_dropout
19
- self.window_size = window_size
20
-
21
- self.drop = nn.Dropout(p_dropout)
22
- self.attn_layers = nn.ModuleList()
23
- self.norm_layers_1 = nn.ModuleList()
24
- self.ffn_layers = nn.ModuleList()
25
- self.norm_layers_2 = nn.ModuleList()
26
- for i in range(self.n_layers):
27
- self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size))
28
- self.norm_layers_1.append(LayerNorm(hidden_channels))
29
- self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))
30
- self.norm_layers_2.append(LayerNorm(hidden_channels))
31
-
32
- def forward(self, x, x_mask):
33
- attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
34
- x = x * x_mask
35
- for i in range(self.n_layers):
36
- y = self.attn_layers[i](x, x, attn_mask)
37
- y = self.drop(y)
38
- x = self.norm_layers_1[i](x + y)
39
-
40
- y = self.ffn_layers[i](x, x_mask)
41
- y = self.drop(y)
42
- x = self.norm_layers_2[i](x + y)
43
- x = x * x_mask
44
- return x
45
-
46
-
47
- class Decoder(nn.Module):
48
- def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs):
49
- super().__init__()
50
- self.hidden_channels = hidden_channels
51
- self.filter_channels = filter_channels
52
- self.n_heads = n_heads
53
- self.n_layers = n_layers
54
- self.kernel_size = kernel_size
55
- self.p_dropout = p_dropout
56
- self.proximal_bias = proximal_bias
57
- self.proximal_init = proximal_init
58
-
59
- self.drop = nn.Dropout(p_dropout)
60
- self.self_attn_layers = nn.ModuleList()
61
- self.norm_layers_0 = nn.ModuleList()
62
- self.encdec_attn_layers = nn.ModuleList()
63
- self.norm_layers_1 = nn.ModuleList()
64
- self.ffn_layers = nn.ModuleList()
65
- self.norm_layers_2 = nn.ModuleList()
66
- for i in range(self.n_layers):
67
- self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init))
68
- self.norm_layers_0.append(LayerNorm(hidden_channels))
69
- self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout))
70
- self.norm_layers_1.append(LayerNorm(hidden_channels))
71
- self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True))
72
- self.norm_layers_2.append(LayerNorm(hidden_channels))
73
-
74
- def forward(self, x, x_mask, h, h_mask):
75
- """
76
- x: decoder input
77
- h: encoder output
78
- """
79
- self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
80
- encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
81
- x = x * x_mask
82
- for i in range(self.n_layers):
83
- y = self.self_attn_layers[i](x, x, self_attn_mask)
84
- y = self.drop(y)
85
- x = self.norm_layers_0[i](x + y)
86
-
87
- y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
88
- y = self.drop(y)
89
- x = self.norm_layers_1[i](x + y)
90
-
91
- y = self.ffn_layers[i](x, x_mask)
92
- y = self.drop(y)
93
- x = self.norm_layers_2[i](x + y)
94
- x = x * x_mask
95
- return x
96
-
97
-
98
- class MultiHeadAttention(nn.Module):
99
- def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False):
100
- super().__init__()
101
- assert channels % n_heads == 0
102
-
103
- self.channels = channels
104
- self.out_channels = out_channels
105
- self.n_heads = n_heads
106
- self.p_dropout = p_dropout
107
- self.window_size = window_size
108
- self.heads_share = heads_share
109
- self.block_length = block_length
110
- self.proximal_bias = proximal_bias
111
- self.proximal_init = proximal_init
112
- self.attn = None
113
-
114
- self.k_channels = channels // n_heads
115
- self.conv_q = nn.Conv1d(channels, channels, 1)
116
- self.conv_k = nn.Conv1d(channels, channels, 1)
117
- self.conv_v = nn.Conv1d(channels, channels, 1)
118
- self.conv_o = nn.Conv1d(channels, out_channels, 1)
119
- self.drop = nn.Dropout(p_dropout)
120
-
121
- if window_size is not None:
122
- n_heads_rel = 1 if heads_share else n_heads
123
- rel_stddev = self.k_channels**-0.5
124
- self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
125
- self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
126
-
127
- nn.init.xavier_uniform_(self.conv_q.weight)
128
- nn.init.xavier_uniform_(self.conv_k.weight)
129
- nn.init.xavier_uniform_(self.conv_v.weight)
130
- if proximal_init:
131
- with torch.no_grad():
132
- self.conv_k.weight.copy_(self.conv_q.weight)
133
- self.conv_k.bias.copy_(self.conv_q.bias)
134
-
135
- def forward(self, x, c, attn_mask=None):
136
- q = self.conv_q(x)
137
- k = self.conv_k(c)
138
- v = self.conv_v(c)
139
-
140
- x, self.attn = self.attention(q, k, v, mask=attn_mask)
141
-
142
- x = self.conv_o(x)
143
- return x
144
-
145
- def attention(self, query, key, value, mask=None):
146
- # reshape [b, d, t] -> [b, n_h, t, d_k]
147
- b, d, t_s, t_t = (*key.size(), query.size(2))
148
- query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
149
- key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
150
- value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
151
-
152
- scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
153
- if self.window_size is not None:
154
- assert t_s == t_t, "Relative attention is only available for self-attention."
155
- key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
156
- rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings)
157
- scores_local = self._relative_position_to_absolute_position(rel_logits)
158
- scores = scores + scores_local
159
- if self.proximal_bias:
160
- assert t_s == t_t, "Proximal bias is only available for self-attention."
161
- scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
162
- if mask is not None:
163
- scores = scores.masked_fill(mask == 0, -1e4)
164
- if self.block_length is not None:
165
- assert t_s == t_t, "Local attention is only available for self-attention."
166
- block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
167
- scores = scores.masked_fill(block_mask == 0, -1e4)
168
- p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
169
- p_attn = self.drop(p_attn)
170
- output = torch.matmul(p_attn, value)
171
- if self.window_size is not None:
172
- relative_weights = self._absolute_position_to_relative_position(p_attn)
173
- value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
174
- output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
175
- output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t]
176
- return output, p_attn
177
-
178
- def _matmul_with_relative_values(self, x, y):
179
- """
180
- x: [b, h, l, m]
181
- y: [h or 1, m, d]
182
- ret: [b, h, l, d]
183
- """
184
- ret = torch.matmul(x, y.unsqueeze(0))
185
- return ret
186
-
187
- def _matmul_with_relative_keys(self, x, y):
188
- """
189
- x: [b, h, l, d]
190
- y: [h or 1, m, d]
191
- ret: [b, h, l, m]
192
- """
193
- ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
194
- return ret
195
-
196
- def _get_relative_embeddings(self, relative_embeddings, length):
197
- max_relative_position = 2 * self.window_size + 1
198
- # Pad first before slice to avoid using cond ops.
199
- pad_length = max(length - (self.window_size + 1), 0)
200
- slice_start_position = max((self.window_size + 1) - length, 0)
201
- slice_end_position = slice_start_position + 2 * length - 1
202
- if pad_length > 0:
203
- padded_relative_embeddings = F.pad(
204
- relative_embeddings,
205
- commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
206
- else:
207
- padded_relative_embeddings = relative_embeddings
208
- used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position]
209
- return used_relative_embeddings
210
-
211
- def _relative_position_to_absolute_position(self, x):
212
- """
213
- x: [b, h, l, 2*l-1]
214
- ret: [b, h, l, l]
215
- """
216
- batch, heads, length, _ = x.size()
217
- # Concat columns of pad to shift from relative to absolute indexing.
218
- x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]]))
219
-
220
- # Concat extra elements so to add up to shape (len+1, 2*len-1).
221
- x_flat = x.view([batch, heads, length * 2 * length])
222
- x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]]))
223
-
224
- # Reshape and slice out the padded elements.
225
- x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:]
226
- return x_final
227
-
228
- def _absolute_position_to_relative_position(self, x):
229
- """
230
- x: [b, h, l, l]
231
- ret: [b, h, l, 2*l-1]
232
- """
233
- batch, heads, length, _ = x.size()
234
- # padd along column
235
- x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]]))
236
- x_flat = x.view([batch, heads, length**2 + length*(length -1)])
237
- # add 0's in the beginning that will skew the elements after reshape
238
- x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
239
- x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:]
240
- return x_final
241
-
242
- def _attention_bias_proximal(self, length):
243
- """Bias for self-attention to encourage attention to close positions.
244
- Args:
245
- length: an integer scalar.
246
- Returns:
247
- a Tensor with shape [1, 1, length, length]
248
- """
249
- r = torch.arange(length, dtype=torch.float32)
250
- diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
251
- return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
252
-
253
-
254
- class FFN(nn.Module):
255
- def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False):
256
- super().__init__()
257
- self.in_channels = in_channels
258
- self.out_channels = out_channels
259
- self.filter_channels = filter_channels
260
- self.kernel_size = kernel_size
261
- self.p_dropout = p_dropout
262
- self.activation = activation
263
- self.causal = causal
264
-
265
- if causal:
266
- self.padding = self._causal_padding
267
- else:
268
- self.padding = self._same_padding
269
-
270
- self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
271
- self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
272
- self.drop = nn.Dropout(p_dropout)
273
-
274
- def forward(self, x, x_mask):
275
- x = self.conv_1(self.padding(x * x_mask))
276
- if self.activation == "gelu":
277
- x = x * torch.sigmoid(1.702 * x)
278
- else:
279
- x = torch.relu(x)
280
- x = self.drop(x)
281
- x = self.conv_2(self.padding(x * x_mask))
282
- return x * x_mask
283
-
284
- def _causal_padding(self, x):
285
- if self.kernel_size == 1:
286
- return x
287
- pad_l = self.kernel_size - 1
288
- pad_r = 0
289
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
290
- x = F.pad(x, commons.convert_pad_shape(padding))
291
- return x
292
-
293
- def _same_padding(self, x):
294
- if self.kernel_size == 1:
295
- return x
296
- pad_l = (self.kernel_size - 1) // 2
297
- pad_r = self.kernel_size // 2
298
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
299
- x = F.pad(x, commons.convert_pad_shape(padding))
300
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CofAI/chat/client/css/style.css DELETED
@@ -1,18 +0,0 @@
1
- @import "./global.css";
2
- @import "./hljs.css";
3
- @import "./main.css";
4
- @import "./sidebar.css";
5
- @import "./conversation.css";
6
- @import "./message.css";
7
- @import "./stop-generating.css";
8
- @import "./typing.css";
9
- @import "./checkbox.css";
10
- @import "./label.css";
11
- @import "./button.css";
12
- @import "./buttons.css";
13
- @import "./dropdown.css";
14
- @import "./field.css";
15
- @import "./select.css";
16
- @import "./options.css";
17
- @import "./settings.css";
18
- @import "./message-input.css";
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cong723/gpt-academic-public/crazy_functions/代码重写为全英文_多线程.py DELETED
@@ -1,138 +0,0 @@
1
- import threading
2
- from request_llm.bridge_all import predict_no_ui_long_connection
3
- from toolbox import update_ui
4
- from toolbox import CatchException, write_results_to_file, report_execption
5
- from .crazy_utils import breakdown_txt_to_satisfy_token_limit
6
-
7
- def extract_code_block_carefully(txt):
8
- splitted = txt.split('```')
9
- n_code_block_seg = len(splitted) - 1
10
- if n_code_block_seg <= 1: return txt
11
- # 剩下的情况都开头除去 ``` 结尾除去一次 ```
12
- txt_out = '```'.join(splitted[1:-1])
13
- return txt_out
14
-
15
-
16
-
17
- def break_txt_into_half_at_some_linebreak(txt):
18
- lines = txt.split('\n')
19
- n_lines = len(lines)
20
- pre = lines[:(n_lines//2)]
21
- post = lines[(n_lines//2):]
22
- return "\n".join(pre), "\n".join(post)
23
-
24
-
25
- @CatchException
26
- def 全项目切换英文(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys_prompt, web_port):
27
- # 第1步:清空历史,以免输入溢出
28
- history = []
29
-
30
- # 第2步:尝试导入依赖,如果缺少依赖,则给出安装建议
31
- try:
32
- import tiktoken
33
- except:
34
- report_execption(chatbot, history,
35
- a = f"解析项目: {txt}",
36
- b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
37
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
38
- return
39
-
40
- # 第3步:集合文件
41
- import time, glob, os, shutil, re
42
- os.makedirs('gpt_log/generated_english_version', exist_ok=True)
43
- os.makedirs('gpt_log/generated_english_version/crazy_functions', exist_ok=True)
44
- file_manifest = [f for f in glob.glob('./*.py') if ('test_project' not in f) and ('gpt_log' not in f)] + \
45
- [f for f in glob.glob('./crazy_functions/*.py') if ('test_project' not in f) and ('gpt_log' not in f)]
46
- # file_manifest = ['./toolbox.py']
47
- i_say_show_user_buffer = []
48
-
49
- # 第4步:随便显示点什么防止卡顿的感觉
50
- for index, fp in enumerate(file_manifest):
51
- # if 'test_project' in fp: continue
52
- with open(fp, 'r', encoding='utf-8', errors='replace') as f:
53
- file_content = f.read()
54
- i_say_show_user =f'[{index}/{len(file_manifest)}] 接下来请将以下代码中包含的所有中文转化为英文,只输出转化后的英文代码,请用代码块输出代码: {os.path.abspath(fp)}'
55
- i_say_show_user_buffer.append(i_say_show_user)
56
- chatbot.append((i_say_show_user, "[Local Message] 等待多线程操作,中间过程不予显示."))
57
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
58
-
59
-
60
- # 第5步:Token限制下的截断与处理
61
- MAX_TOKEN = 3000
62
- from request_llm.bridge_all import model_info
63
- enc = model_info["gpt-3.5-turbo"]['tokenizer']
64
- def get_token_fn(txt): return len(enc.encode(txt, disallowed_special=()))
65
-
66
-
67
- # 第6步:任务函数
68
- mutable_return = [None for _ in file_manifest]
69
- observe_window = [[""] for _ in file_manifest]
70
- def thread_worker(fp,index):
71
- if index > 10:
72
- time.sleep(60)
73
- print('Openai 限制免费用户每分钟20次请求,降低请求频率中。')
74
- with open(fp, 'r', encoding='utf-8', errors='replace') as f:
75
- file_content = f.read()
76
- i_say_template = lambda fp, file_content: f'接下来请将以下代码中包含的所有中文转化为英文,只输出代码,文件名是{fp},文件代码是 ```{file_content}```'
77
- try:
78
- gpt_say = ""
79
- # 分解代码文件
80
- file_content_breakdown = breakdown_txt_to_satisfy_token_limit(file_content, get_token_fn, MAX_TOKEN)
81
- for file_content_partial in file_content_breakdown:
82
- i_say = i_say_template(fp, file_content_partial)
83
- # # ** gpt request **
84
- gpt_say_partial = predict_no_ui_long_connection(inputs=i_say, llm_kwargs=llm_kwargs, history=[], sys_prompt=sys_prompt, observe_window=observe_window[index])
85
- gpt_say_partial = extract_code_block_carefully(gpt_say_partial)
86
- gpt_say += gpt_say_partial
87
- mutable_return[index] = gpt_say
88
- except ConnectionAbortedError as token_exceed_err:
89
- print('至少一个线程任务Token溢出而失败', e)
90
- except Exception as e:
91
- print('至少一个线程任务意外失败', e)
92
-
93
- # 第7步:所有线程同时开始执行任务函数
94
- handles = [threading.Thread(target=thread_worker, args=(fp,index)) for index, fp in enumerate(file_manifest)]
95
- for h in handles:
96
- h.daemon = True
97
- h.start()
98
- chatbot.append(('开始了吗?', f'多线程操作已经开始'))
99
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
100
-
101
- # 第8步:循环轮询各个线程是否执行完毕
102
- cnt = 0
103
- while True:
104
- cnt += 1
105
- time.sleep(0.2)
106
- th_alive = [h.is_alive() for h in handles]
107
- if not any(th_alive): break
108
- # 更好��UI视觉效果
109
- observe_win = []
110
- for thread_index, alive in enumerate(th_alive):
111
- observe_win.append("[ ..."+observe_window[thread_index][0][-60:].replace('\n','').replace('```','...').replace(' ','.').replace('<br/>','.....').replace('$','.')+"... ]")
112
- stat = [f'执行中: {obs}\n\n' if alive else '已完成\n\n' for alive, obs in zip(th_alive, observe_win)]
113
- stat_str = ''.join(stat)
114
- chatbot[-1] = (chatbot[-1][0], f'多线程操作已经开始,完成情况: \n\n{stat_str}' + ''.join(['.']*(cnt%10+1)))
115
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
116
-
117
- # 第9步:把结果写入文件
118
- for index, h in enumerate(handles):
119
- h.join() # 这里其实不需要join了,肯定已经都结束了
120
- fp = file_manifest[index]
121
- gpt_say = mutable_return[index]
122
- i_say_show_user = i_say_show_user_buffer[index]
123
-
124
- where_to_relocate = f'gpt_log/generated_english_version/{fp}'
125
- if gpt_say is not None:
126
- with open(where_to_relocate, 'w+', encoding='utf-8') as f:
127
- f.write(gpt_say)
128
- else: # 失败
129
- shutil.copyfile(file_manifest[index], where_to_relocate)
130
- chatbot.append((i_say_show_user, f'[Local Message] 已完成{os.path.abspath(fp)}的转化,\n\n存入{os.path.abspath(where_to_relocate)}'))
131
- history.append(i_say_show_user); history.append(gpt_say)
132
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
133
- time.sleep(1)
134
-
135
- # 第10步:备份一个文件
136
- res = write_results_to_file(history)
137
- chatbot.append(("生成一份任务执行报告", res))
138
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面