parquet-converter commited on
Commit
913b740
·
1 Parent(s): c2d5f7e

Update parquet files (step 13 of 397)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Al Amin Accounting Software Crack Keygen The Ultimate Guide for Windows Users.md +0 -152
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/EASEUS Partition Master 6.0.1 Server Edition Portable 64 Bit.md +0 -119
  3. spaces/1acneusushi/gradio-2dmoleculeeditor/data/EasyWorship 7 Full Version The Ultimate Solution for Creating and Presenting Worship Media.md +0 -22
  4. spaces/1gistliPinn/ChatGPT4/Examples/Chess Titans Free _HOT_ Download Full Version For Pc.md +0 -48
  5. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Dominos Pizza - Food Delivery APK A Must-Have App for Pizza Lovers.md +0 -113
  6. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Frozen City Mod APK 1.0.6 for Android - Free Purchase.md +0 -87
  7. spaces/1phancelerku/anime-remove-background/Clash Royale Bluestacks Play the Best Strategy Game on Your PC for Free.md +0 -117
  8. spaces/1phancelerku/anime-remove-background/Download Downloader How to Boost Your Download Speeds and Manage Your Files.md +0 -92
  9. spaces/1phancelerku/anime-remove-background/Download Table No. 21 Full Movie in 720p HD Quality from Filmyzilla.md +0 -337
  10. spaces/1phancelerku/anime-remove-background/Download Treasure Mathstorm and Join the Super Solvers in an Amazing Adventure.md +0 -152
  11. spaces/30SecondsToMoon/30SecondsToMoon/README.md +0 -13
  12. spaces/AIFILMS/StyleGANEX/models/mtcnn/mtcnn_pytorch/src/visualization_utils.py +0 -31
  13. spaces/AIWaves/Software_Company/app.py +0 -254
  14. spaces/AIZero2Hero4Health/4-ImageSimilaritySearch-SL/app.py +0 -186
  15. spaces/ASJMO/freegpt/client/css/settings.css +0 -44
  16. spaces/AchyuthGamer/OpenGPT/client/css/message.css +0 -65
  17. spaces/Adapter/T2I-Adapter/ldm/data/utils.py +0 -40
  18. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/spiralcurve-plugin.d.ts +0 -15
  19. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/checkbox/Checkbox.js +0 -2
  20. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/menu/methods/SetTransitCallbackMethods.js +0 -32
  21. spaces/AlexWang/lama/saicinpainting/evaluation/losses/fid/inception.py +0 -323
  22. spaces/AlexWang/lama/saicinpainting/training/visualizers/directory.py +0 -36
  23. spaces/Aloento/9Nine-PITS/text/frontend/normalizer/numbers.py +0 -86
  24. spaces/Andres99/Tune-A-Video-Training-UI/app.py +0 -84
  25. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/unclip_text_interpolation.py +0 -573
  26. spaces/Andy1621/uniformer_image_detection/mmdet/models/detectors/detr.py +0 -46
  27. spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_r18-d8_512x1024_80k_cityscapes.py +0 -9
  28. spaces/Andy1621/uniformerv2_demo/uniformerv2.py +0 -510
  29. spaces/AntiUser/DeepDanbooru_string/app.py +0 -185
  30. spaces/Ariharasudhan/YoloV5/utils/loggers/wandb/wandb_utils.py +0 -589
  31. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/distlib/util.py +0 -1932
  32. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/live.py +0 -375
  33. spaces/Awesimo/jojogan/e4e/models/stylegan2/op/upfirdn2d.py +0 -184
  34. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/export/c10.py +0 -534
  35. spaces/BAAI/AltDiffusion/README.md +0 -13
  36. spaces/Benson/text-generation/Examples/20 Minutos Hasta El Amanecer Descarga Gratuita.md +0 -61
  37. spaces/Billyosoro/ESRGAN/realesrgan/__init__.py +0 -6
  38. spaces/Bradjan310/ehartford-Wizard-Vicuna-30B-Uncensored/app.py +0 -3
  39. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/.github/ISSUE_TEMPLATE/questions-help-support.md +0 -21
  40. spaces/CVPR/LIVE/thrust/thrust/detail/temporary_buffer.h +0 -76
  41. spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/generate.h +0 -57
  42. spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/scatter.h +0 -81
  43. spaces/CVPR/WALT/walt/datasets/pipelines/transforms.py +0 -1861
  44. spaces/CofAI/chat.b4/g4f/Provider/Providers/Theb.py +0 -28
  45. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/attr/_config.py +0 -31
  46. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/dateutil/parser/_parser.py +0 -1613
  47. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/components/radio.py +0 -193
  48. spaces/Defalt-404/Bittensor_Explore/README.md +0 -12
  49. spaces/Detomo/ai-comic-generation/src/app/engine/forbidden.ts +0 -6
  50. spaces/DragGan/DragGan-Inversion/PTI/models/StyleCLIP/models/stylegan2/model.py +0 -674
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Al Amin Accounting Software Crack Keygen The Ultimate Guide for Windows Users.md DELETED
@@ -1,152 +0,0 @@
1
-
2
- <h1>Al-Amin Accounting Software: A Comprehensive Solution for Your Business Needs</h1>
3
- <p>If you are looking for a reliable, efficient, and user-friendly accounting software for your business, you might want to consider Al-Amin Accounting Software. Al-Amin Accounting Software is a product of SyrianSoft, a leading software company in the Middle East that has been developing accounting solutions since 1992. Al-Amin Accounting Software is designed to meet the needs of small, medium, and large businesses in various sectors and industries. It offers a range of features and benefits that can help you manage your business operations more effectively and efficiently.</p>
4
- <h2>al amin accounting software crack keygen</h2><br /><p><b><b>Download</b> &#9881;&#9881;&#9881; <a href="https://byltly.com/2uKwf4">https://byltly.com/2uKwf4</a></b></p><br /><br />
5
- <p>In this article, we will explore the features and benefits of Al-Amin Accounting Software, how to download and install it on your computer, how to crack and activate it (and why you shouldn't), and some alternatives to consider. By the end of this article, you will have a better understanding of what Al-Amin Accounting Software can do for your business and how to get started with it.</p>
6
- <h2>Features and Benefits of Al-Amin Accounting Software</h2>
7
- <p>Al-Amin Accounting Software is a comprehensive solution that covers various aspects of your business management. It has four main modules: accounting and financial management, inventory and warehouse management, sales and customer relationship management, and human resources and payroll management. Each module has its own features and benefits that can help you streamline your business processes and improve your productivity and profitability. Here are some of the key features and benefits of each module:</p>
8
- <h3>Accounting and financial management</h3>
9
- <p>This module helps you manage your accounts, invoices, payments, budgets, etc. with ease and accuracy. Some of the features and benefits of this module are:</p>
10
- <ul>
11
- <li>It supports multiple currencies, languages, branches, companies, etc.</li>
12
- <li>It allows you to create unlimited accounts, sub-accounts, cost centers, etc.</li>
13
- <li>It enables you to record various types of transactions such as cash receipts, cash payments, bank deposits, bank withdrawals, journal entries, etc.</li>
14
- <li>It generates various types of invoices such as sales invoices, purchase invoices, service invoices, proforma invoices, etc.</li>
15
- <li>It tracks your receivables and payables and sends reminders to your customers and suppliers.</li>
16
- <li>It helps you manage your cash flow and budget by providing cash flow statements, budget reports, variance analysis, etc.</li>
17
- <li>It integrates with other modules such as inventory, sales, payroll, etc. to provide accurate financial data.</li>
18
- <li>It produces various types of financial reports such as balance sheet, income statement, trial balance, general ledger, etc.</li>
19
- </ul>
20
- <h3>Inventory and warehouse management</h3>
21
- <p>This module helps you track your stock, purchases, sales, transfers, etc. with ease and accuracy. Some of the features and benefits of this module are:</p>
22
- <p>al amin accounting software activation code<br />
23
- al amin accounting software license key generator<br />
24
- al amin accounting software serial number free download<br />
25
- al amin accounting software full version cracked<br />
26
- al amin accounting software patch file<br />
27
- al amin accounting software registration key<br />
28
- al amin accounting software unlock code<br />
29
- al amin accounting software crack keygen torrent<br />
30
- al amin accounting software crack keygen online<br />
31
- al amin accounting software crack keygen download<br />
32
- al amin accounting software crack keygen 2021<br />
33
- al amin accounting software crack keygen 2022<br />
34
- al amin accounting software crack keygen 2023<br />
35
- al amin accounting software crack keygen latest version<br />
36
- al amin accounting software crack keygen updated version<br />
37
- al amin accounting software crack keygen for windows<br />
38
- al amin accounting software crack keygen for mac<br />
39
- al amin accounting software crack keygen for linux<br />
40
- al amin accounting software crack keygen for android<br />
41
- al amin accounting software crack keygen for ios<br />
42
- how to crack al amin accounting software<br />
43
- how to get al amin accounting software for free<br />
44
- how to install al amin accounting software cracked version<br />
45
- how to use al amin accounting software without license key<br />
46
- how to bypass al amin accounting software activation<br />
47
- is it safe to use al amin accounting software crack keygen<br />
48
- is it legal to use al amin accounting software crack keygen<br />
49
- is it ethical to use al amin accounting software crack keygen<br />
50
- what are the benefits of using al amin accounting software crack keygen<br />
51
- what are the risks of using al amin accounting software crack keygen<br />
52
- what are the alternatives to using al amin accounting software crack keygen<br />
53
- where to find al amin accounting software crack keygen<br />
54
- where to download al amin accounting software crack keygen<br />
55
- where to buy al amin accounting software crack keygen<br />
56
- where to sell al amin accounting software crack keygen<br />
57
- who uses al amin accounting software crack keygen<br />
58
- who makes al amin accounting software crack keygen<br />
59
- who sells al amin accounting software crack keygen<br />
60
- why use al amin accounting software crack keygen<br />
61
- why not use al amin accounting software crack keygen<br />
62
- best way to use al amin accounting software crack keygen<br />
63
- best place to get al amin accounting software crack keygen<br />
64
- best source of al amin accounting software crack keygen<br />
65
- best method to generate al amin accounting software crack keygen<br />
66
- best tool for creating al amin accounting software crack keygen<br />
67
- easiest way to use al amin accounting software crack keygen<br />
68
- easiest place to get al amin accounting software crack keygen<br />
69
- easiest source of al amin accounting software crack keygen<br />
70
- easiest method to generate al amin accounting software crack keygen</p>
71
- <ul>
72
- <li>It supports multiple warehouses, locations, units, categories, etc.</li>
73
- <li>It allows you to create unlimited items, sub-items, batches, serial numbers, etc.</li>
74
- <li>It enables you to record various types of transactions such as purchase orders, purchase receipts, purchase returns, sales orders, sales deliveries, sales returns, stock transfers, stock adjustments, etc.</li>
75
- <li>It tracks your inventory levels, costs, prices, margins, etc. and alerts you when your stock is low or high.</li>
76
- <li>It helps you manage your inventory valuation by using different methods such as FIFO, LIFO, average cost, standard cost, etc.</li>
77
- <li>It integrates with other modules such as accounting, sales, payroll, etc. to provide accurate inventory data.</li>
78
- <li>It produces various types of inventory reports such as inventory status, inventory movement, inventory valuation, inventory aging, etc.</li>
79
- </ul>
80
- <h3>Sales and customer relationship management</h3>
81
- <p>This module helps you manage your sales orders, quotations, contracts, customers, etc. with ease and efficiency. Some of the features and benefits of this module are:</p>
82
- <ul>
83
- <li>It supports multiple sales channels, markets, segments, etc.</li>
84
- <li>It allows you to create unlimited customers, sub-customers, contacts, leads, opportunities, etc.</li>
85
- <li>It enables you to record various types of transactions such as quotations, sales orders, sales contracts, sales deliveries, sales invoices, sales returns, etc.</li>
86
- <li>It tracks your sales performance by providing sales analysis by customer, product, branch, region, etc. </li>
87
- <li>It helps you manage your customer relationship by providing customer profile, history, feedback, loyalty, etc. </li>
88
- <li>It integrates with other modules such as accounting, inventory, payroll, etc. to provide accurate sales data. </li>
89
- <li>It produces various types of sales reports such as sales summary, sales detail, sales commission, sales forecast, etc. </li>
90
- </ul>
91
- <h3>Human resources and payroll management</h3>
92
- <p>This module helps you manage your employees, salaries, deductions, leaves, etc. with ease and compliance. Some of the features and benefits of this module are:</p>
93
- <ul>
94
- <li>It supports multiple branches, departments, positions, grades, etc. </li>
95
- <li>It allows you to create unlimited employees, sub-employees, dependents, beneficiaries, etc. </li>
96
- <li>It enables you to record various types of transactions such as attendance, absence, overtime, leave, loan, advance, bonus, penalty, etc. </li>
97
- <li>It tracks your payroll costs by providing payroll analysis by employee, branch, department, position, grade, etc. </li>
98
- <li>It helps you manage your payroll compliance by providing tax calculation, social security calculation, insurance calculation, wage protection system (WPS), etc. </li>
99
- <li>It integrates with other modules such as accounting, inventory, sales, etc. to provide accurate payroll data. </li>
100
- <li>It produces various types of payroll reports such as payslip, payroll summary, payroll detail, payroll statement, payroll register, etc. </li>
101
- </ul>
102
- <h2>How to Download and Install Al-Ameen Accounting Software</h2>
103
- <p>If you are interested in trying out Al-Ameen Accounting Software for yourself or for your business, you can download it from the official website of SyrianSoft. Here are the steps to download and install Al-Ameen Accounting Software on your computer:</p>
104
- <h3>System requirements</h3>
105
- <p>Before downloading Al-Ameen Accounting Software,</p><p><strong><em><u><mark style="background-color:#FFFF00;">make sure that your computer meets the minimum or recommended specifications for running the software.</mark></u></em></strong></p>
106
- <p>According to the developer's website, the minimum and recommended system requirements for Al-Ameen Accounting Software are as follows:</p>
107
- | Software | Minimum | Recommended | | --- | --- | --- | | Microsoft SQL Server | 2012 | 2012 or higher | | Microsoft .NET Framework | 4.5.2 | 4.5.2 or higher | | Visual C++ Redistributable for Visual Studio | 2015 | 2015 or higher | | Sentinel Protection Key | Required | Required | | Internet Explorer | 11 | 11 or higher | | Platform Update (Windows 7 SP1 and Windows Server 2008 R2 SP1) | Required | Required | | Hardware | Minimum | Recommended | | --- | --- | --- | | Processor | 1 GHz | 2 GHz or higher | | Memory | 2 GB | 4 GB or higher | | Hard Disk (Free Space) | 500 MB | 1 GB or higher | <h3>Download links</h3>
108
- <p>To download Al-Ameen Accounting Software, you need to visit the official website of SyrianSoft and register for an account. After logging in, you can access the download page and choose the version that suits your needs. The latest version of Al-Ameen Accounting Software is 9.0 - (900.11), which was released on May 18, 2017. The download package consists of two files: Release_Notes.pdf and V_9_900_16_11.exe. The total size of the package is about 255 MB.</p>
109
- <h3>Installation steps</h3>
110
- <p>To install Al-Ameen Accounting Software on your computer, you need to follow these steps:</p>
111
- <ol>
112
- <li>Download the two files from the download page and save them in one folder on your hard disk.</li>
113
- <li>Click the file V_9_900_16_11.exe and an extract window will appear. Click Extract button and wait for the extraction process to finish.</li>
114
- <li>A new file Ameen.exe will appear in the same folder where you saved the downloaded files. Click this file and the installation wizard will start on your computer.</li>
115
- <li>Follow the instructions on the screen to complete the installation process. You may need to restart your computer after the installation.</li>
116
- <li>After restarting your computer, you can launch Al-Ameen Accounting Software from the Start menu or from the desktop shortcut.</li>
117
- </ol>
118
- <h2>How to Crack and Activate Al-Ameen Accounting Software</h2>
119
- <p>If you are wondering how to crack and activate Al-Ameen Accounting Software, we have some bad news for you: it is not possible, and even if it was, it would be illegal and unethical. Here are some reasons why you should not try to crack and activate Al-Ameen Accounting Software:</p>
120
- <h3>Disclaimer</h3>
121
- <p>Al-Ameen Accounting Software is a licensed software that requires a valid protection key to run. The protection key is a hardware device that plugs into your computer's USB port and verifies your license with the developer's server. Without the protection key, Al-Ameen Accounting Software will run as a demo version with limited functionality and data entry. Cracking and activating Al-Ameen Accounting Software means bypassing the protection key and using a fake license to run the full version of the software. This is a violation of the terms and conditions of use of Al-Ameen Accounting Software and an infringement of the intellectual property rights of SyrianSoft. By cracking and activating Al-Ameen Accounting Software, you are committing a crime that can result in legal action against you.</p>
122
- <h3>Risks and consequences</h3>
123
- <p>Even if you manage to find a way to crack and activate Al-Ameen Accounting Software, you are exposing yourself to various risks and consequences that can harm your computer, your data, and your business. Some of these risks and consequences are:</p>
124
- <ul>
125
- <li>You may download malware or viruses that can damage your computer or steal your personal information.</li>
126
- <li>You may get a corrupted or outdated version of Al-Ameen Accounting Software that can cause errors or crashes.</li>
127
- <li>You may lose your data or compromise its security by using an unverified source of Al-Ameen Accounting Software.</li>
128
- <li>You may miss out on important updates, patches, bug fixes, and new features that SyrianSoft provides for its customers.</li>
129
- <li>You may face technical issues or compatibility problems that SyrianSoft cannot help you with because you are using an illegal version of Al-Ameen Accounting Software.</li>
130
- <li>You may lose your credibility and reputation as a business owner by using a pirated software that does not comply with professional standards and ethics.</li>
131
- </ul>
132
- <h3>Alternatives</h3>
133
- <p>If you are looking for alternatives to cracking and activating Al-Ameen Accounting Software, you have some options that are legal and ethical. Some of these options are:</p>
134
- <ul>
135
- <li>You can buy a legitimate license of Al-Ameen Accounting Software from SyrianSoft or its authorized dealers. This way, you can enjoy all the features and benefits of Al-Ameen Accounting Software without any risk or consequence.</li>
136
- <li>You can request a free trial of Al-Ameen Accounting Software from SyrianSoft or its authorized dealers. This way, you can test Al-Ameen Accounting Software for a limited period of time before deciding whether to buy it or not.</li>
137
- <li>You can look for other accounting software that suits your budget and needs. There are many accounting software available in the market that offer different features and prices. You can compare them and choose the one that works best for you.</li>
138
- </ul>
139
- <h1>Conclusion</h1>
140
- <p>In conclusion, Al-Ameen Accounting Software is a comprehensive solution for your business needs that offers various features and benefits that can help you manage your accounting, inventory, sales, and payroll processes more effectively and efficiently. It is easy to download and install on your computer, but it requires a valid protection key to run. Cracking and activating Al-Ameen Accounting Software is not possible, and even if it was, it would be illegal and unethical. You should avoid doing so and look for legal and ethical alternatives instead. We hope this article has given you a clear overview of what Al-Ameen Accounting Software can do for your business and how to get started with it. If you have any questions or comments, please feel free to contact us. We would love to hear from you.</p>
141
- <h4>Frequently Asked Questions</h4>
142
- <p>Here are some frequently asked questions about Al-Ameen Accounting Software:</p>
143
- <ol>
144
- <li><strong>What is the price of Al-Ameen Accounting Software?</strong><br>The price of Al-Ameen Accounting Software depends on the number of users, modules, and features you need. You can contact SyrianSoft or its authorized dealers for a quotation.</li>
145
- <li><strong>How can I get support for Al-Ameen Accounting Software?</strong><br>You can get support for Al-Ameen Accounting Software by contacting SyrianSoft or its authorized dealers via phone, email, or online chat. You can also visit their website for online help, tutorials, and FAQs.</li>
146
- <li><strong>Can I use Al-Ameen Accounting Software on multiple computers?</strong><br>Yes, you can use Al-Ameen Accounting Software on multiple computers as long as they are connected to the same network. You will need one protection key per computer, however.</li>
147
- <li><strong>Can I customize Al-Ameen Accounting Software according to my needs?</strong><br>Yes, you can customize Al-Ameen Accounting Software according to your needs by using its built-in tools such as report designer, form designer, label designer, etc. You can also request SyrianSoft or its authorized dealers for custom development services if you need more advanced customization.</li>
148
- <li><strong>Can I integrate Al-Ameen Accounting Software with other software?</strong><br>Yes, you can integrate Al-Ameen Accounting Software with other software by using its built-in tools such as data import/export, data synchronization, web services, etc. You can also request SyrianSoft or its authorized dealers for integration services if you need more complex integration.</li>
149
- </ol>
150
- </p> 0a6ba089eb<br />
151
- <br />
152
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/EASEUS Partition Master 6.0.1 Server Edition Portable 64 Bit.md DELETED
@@ -1,119 +0,0 @@
1
-
2
- <h1>EASEUS Partition Master 6.0.1 Server Edition Portable 64 bit</h1>
3
- <p>EASEUS Partition Master is a powerful and easy-to-use partition software that allows you to create, resize, move, merge, split, clone, recover, convert, and manage disk partitions on Windows servers and PCs. It supports various file systems such as FAT32, NTFS, EXT2/EXT3/EXT4, ReFS, exFAT, etc. It also supports MBR and GPT disk styles, dynamic disks and volumes, RAID arrays, SSDs and HDDs, USB drives and memory cards.</p>
4
- <p>In this article, we will introduce EASEUS Partition Master 6.0.1 Server Edition Portable 64 bit, which is a special version of EASEUS Partition Master that can run directly from a USB flash drive or CD/DVD without installation. We will also show you how to use it to perform some common partition operations on your server or PC.</p>
5
- <h2>EASEUS Partition Master 6.0.1 Server Edition Portable 64 bit</h2><br /><p><b><b>Download Zip</b> &rArr;&rArr;&rArr; <a href="https://byltly.com/2uKxp0">https://byltly.com/2uKxp0</a></b></p><br /><br />
6
- <h2>What is EASEUS Partition Master 6.0.1 Server Edition Portable 64 bit?</h2>
7
- <p>EASEUS Partition Master 6.0.1 Server Edition Portable 64 bit is a portable version of EASEUS Partition Master 6.0.1 Server Edition that can run on any Windows server or PC with a 64-bit processor without installation or activation. It has all the features of EASEUS Partition Master 6.0.1 Server Edition, which include:</p>
8
- <ul>
9
- <li>Resize/move partition: You can resize or move any partition on your disk without losing data or rebooting your system.</li>
10
- <li>Clone disk/partition: You can clone an entire disk or a single partition to another disk or partition for backup or migration purposes.</li>
11
- <li>Merge/split partition: You can merge two adjacent partitions into one larger partition or split a large partition into two smaller partitions for better disk space management.</li>
12
- <li>Convert disk/partition: You can convert a disk from MBR to GPT or vice versa without deleting partitions or data. You can also convert a partition from one file system to another without formatting or losing data.</li>
13
- <li>Recover partition: You can recover deleted or lost partitions from unallocated space or damaged disks with ease.</li>
14
- <li>Manage dynamic volume: You can create, delete, format, resize, move, extend, shrink, split, merge, change drive letter, set active/inactive, explore properties of dynamic volumes on your disk.</li>
15
- <li>Partition through command prompts: You can execute partition commands through command prompts for advanced users.</li>
16
- <li>Repair RAID-5 volume: You can repair corrupted RAID-5 volume by reconstructing the data of the failed member disk.</li>
17
- </ul>
18
- <p>EASEUS Partition Master 6.0.1 Server Edition Portable 64 bit is compatible with Windows Server 2003/2008/2012/2016/2019 and Windows XP/Vista/7/8/10 (64-bit only). It supports up to 32 disks and unlimited hard disk capacity.</p>
19
- <h2>Why use EASEUS Partition Master 6.0.1 Server Edition Portable 64 bit?</h2>
20
- <p>EASEUS Partition Master 6.0.1 Server Edition Portable 64 bit has some advantages over other partition software:</p>
21
- <ul>
22
- <li>It is portable: You can run it from a USB flash drive or CD/DVD without installing it on your system. This is convenient and safe, as you don't need to modify your system or registry settings.</li>
23
- <li>It is fast: You can perform partition operations in a few minutes or seconds, depending on the size and speed of your disk.</li>
24
- <li>It is reliable: You can trust EASEUS Partition Master to handle your disk partitions without causing any data loss or system crash.</li>
25
- <li>It is versatile: You can use EASEUS Partition Master to manage not only basic disks, but also dynamic disks, RAID arrays, SSDs, and external devices.</li>
26
- <li>It is cost-effective: You can get EASEUS Partition Master 6.0.1 Server Edition Portable 64 bit for free from the official website or some third-party sources. You don't need to pay for a license or subscription fee.</li>
27
- </ul>
28
- <h2>How to use EASEUS Partition Master 6.0.1 Server Edition Portable 64 bit?</h2>
29
- <p>To use EASEUS Partition Master 6.0.1 Server Edition Portable 64 bit, you need to follow these steps:</p>
30
- <ol>
31
- <li>Download EASEUS Partition Master 6.0.1 Server Edition Portable 64 bit from the official website or some third-party sources. The file size is about 40 MB.</li>
32
- <li>Extract the downloaded file to a USB flash drive or CD/DVD. You can use any compression software such as WinRAR or 7-Zip to do this.</li>
33
- <li>Connect the USB flash drive or CD/DVD to the server or PC that you want to manage the disk partitions on.</li>
34
- <li>Run the EPM.exe file from the USB flash drive or CD/DVD. You will see the main interface of EASEUS Partition Master 6.0.1 Server Edition Portable 64 bit.</li>
35
- <li>Select the disk or partition that you want to operate on from the disk map or the list on the left panel.</li>
36
- <li>Right-click on the disk or partition and choose the desired operation from the context menu. You can also use the toolbar buttons or the menu bar options to access the operations.</li>
37
- <li>Follow the instructions on the screen to complete the operation. You may need to confirm some actions or restart your system depending on the operation.</li>
38
- </ol>
39
- <h2>Some common partition operations with EASEUS Partition Master 6.0.1 Server Edition Portable 64 bit</h2>
40
- <p>In this section, we will show you how to perform some common partition operations with EASEUS Partition Master 6.0.1 Server Edition Portable 64 bit, such as resizing, cloning, merging, splitting, converting, and recovering partitions.</p>
41
- <h3>How to resize a partition with EASEUS Partition Master 6.0.1 Server Edition Portable 64 bit?</h3>
42
- <p>To resize a partition with EASEUS Partition Master 6.0.1 Server Edition Portable 64 bit, you need to follow these steps:</p>
43
- <ol>
44
- <li>Select the partition that you want to resize from the disk map or the list on the left panel.</li>
45
- <li>Right-click on the partition and choose Resize/Move from the context menu.</li>
46
- <li>In the pop-up window, drag the left or right border of the partition to adjust its size. You can also enter the exact size in MB in the boxes below.</li>
47
- <li>Click OK to confirm the changes. You will see a pending operation on the bottom panel.</li>
48
- <li>Click Apply on the toolbar to execute the operation. You may need to restart your system if you are resizing a system partition.</li>
49
- </ol>
50
- <h3>How to clone a disk/partition with EASEUS Partition Master 6.0.1 Server Edition Portable 64 bit?</h3>
51
- <p>To clone a disk/partition with EASEUS Partition Master 6.0.1 Server Edition Portable 64 bit, you need to follow these steps:</p>
52
- <ol>
53
- <li>Select the disk or partition that you want to clone from the disk map or the list on the left panel.</li>
54
- <li>Right-click on the disk or partition and choose Clone from the context menu.</li>
55
- <li>In the pop-up window, select the destination disk or partition that you want to clone to. Make sure it has enough space to hold all the data from the source disk or partition.</li>
56
- <li>Click Next to continue. You can choose to clone the disk or partition sector by sector or adjust the partition layout on the destination disk or partition.</li>
57
- <li>Click Proceed to start the cloning process. You may need to restart your system if you are cloning a system disk or partition.</li>
58
- </ol>
59
- <h3>How to merge partitions with EASEUS Partition Master 6.0.1 Server Edition Portable 64 bit?</h3>
60
- <p>To merge partitions with EASEUS Partition Master 6.0.1 Server Edition Portable 64 bit, you need to follow these steps:</p>
61
- <p></p>
62
- <ol>
63
- <li>Select one of the partitions that you want to merge from the disk map or the list on the left panel.</li>
64
- <li>Right-click on the partition and choose Merge from the context menu.</li>
65
- <li>In the pop-up window, select another partition that you want to merge with the first one. The two partitions must be adjacent and have the same file system.</li>
66
- <li>Click OK to confirm the changes. You will see a pending operation on the bottom panel.</li>
67
- <li>Click Apply on the toolbar to execute the operation. You may need to restart your system if you are merging a system partition.</li>
68
- </ol>
69
- <h3>How to split a partition with EASEUS Partition Master 6.0.1 Server Edition Portable 64 bit?</h3>
70
- <p>To split a partition with EASEUS Partition Master 6.0.1 Server Edition Portable 64 bit, you need to follow these steps:</p>
71
- <ol>
72
- <li>Select the partition that you want to split from the disk map or the list on the left panel.</li>
73
- <li>Right-click on the partition and choose Split from the context menu.</li>
74
- <li>In the pop-up window, drag the slider or enter the size in MB to specify how much space you want to allocate for the new partition.</li>
75
- <li>Click OK to confirm the changes. You will see a pending operation on the bottom panel.</li>
76
- <li>Click Apply on the toolbar to execute the operation. You may need to restart your system if you are splitting a system partition.</li>
77
- </ol>
78
- <h3>How to convert a disk/partition with EASEUS Partition Master 6.0.1 Server Edition Portable 64 bit?</h3>
79
- <p>To convert a disk/partition with EASEUS Partition Master 6.0.1 Server Edition Portable 64 bit, you need to follow these steps:</p>
80
- <ol>
81
- <li>Select the disk or partition that you want to convert from the disk map or the list on the left panel.</li>
82
- <li>Right-click on the disk or partition and choose Convert from the context menu.</li>
83
- <li>In the pop-up window, choose whether you want to convert a disk from MBR to GPT or vice versa, or convert a partition from one file system to another.</li>
84
- <li>Click OK to confirm the changes. You will see a pending operation on the bottom panel.</li>
85
- <li>Click Apply on the toolbar to execute the operation. You may need to restart your system if you are converting a system disk or partition.</li>
86
- </ol>
87
- <h3>How to recover a partition with EASEUS Partition Master 6.0.1 Server Edition Portable 64 bit?</h3>
88
- <p>To recover a partition with EASEUS Partition Master 6.0.1 Server Edition Portable 64 bit, you need to follow these steps:</p>
89
- <ol>
90
- <li>Select an unallocated space or a damaged disk that contains the deleted or lost partition from the disk map or the list on the left panel.</li>
91
- <li>Right-click on the unallocated space or the damaged disk and choose Partition Recovery from the context menu.</li>
92
- <li>In the pop-up window, choose whether you want to perform a quick scan or a deep scan to search for the deleted or lost partition. A quick scan is faster but may not find all the partitions, while a deep scan is slower but more thorough.</li>
93
- <li>Click Next to start the scanning process. You will see a list of found partitions on the right panel.</li>
94
- <li>Select the partition that you want to recover and click Proceed to recover it. You can also preview the files on the partition before recovering it.</li>
95
- <li>Click Apply on the toolbar to execute the operation. You may need to restart your system if you are recovering a system partition.</li>
96
- </ol>
97
- <h2>Conclusion</h2>
98
- <p>EASEUS Partition Master 6.0.1 Server Edition Portable 64 bit is a powerful and easy-to-use partition software that can run directly from a USB flash drive or CD/DVD without installation. It can help you create, resize, move, merge, split, clone, recover, convert, and manage disk partitions on Windows servers and PCs. It supports various file systems, disk styles, dynamic disks and volumes, RAID arrays, SSDs and HDDs, USB drives and memory cards. It is fast, reliable, versatile, and cost-effective. It is a great tool for disk partition management and maintenance.</p>
99
- <h2>FAQs</h2>
100
- <h3>Q: How can I get EASEUS Partition Master 6.0.1 Server Edition Portable 64 bit?</h3>
101
- <p>A: You can get EASEUS Partition Master 6.0.1 Server Edition Portable 64 bit for free from the official website or some third-party sources. You can also download it from this link: </p>
102
- <h3>Q: What are the system requirements for EASEUS Partition Master 6.0.1 Server Edition Portable 64 bit?</h3>
103
- <p>A: EASEUS Partition Master 6.0.1 Server Edition Portable 64 bit requires a Windows server or PC with a 64-bit processor, at least 512 MB of RAM, and at least 100 MB of free disk space.</p>
104
- <h3>Q: What are the limitations of EASEUS Partition Master 6.0.1 Server Edition Portable 64 bit?</h3>
105
- <p>A: EASEUS Partition Master 6.0.1 Server Edition Portable 64 bit has some limitations compared to other versions of EASEUS Partition Master, such as:</p>
106
- <ul>
107
- <li>It does not support Windows Server 2000/2003 R2/2008 R2/2012 R2/2016 R2/2019 R2.</li>
108
- <li>It does not support Windows XP/Vista/7/8/10 (32-bit only).</li>
109
- <li>It does not support Linux partitions such as EXT4/EXT3/EXT2/SWAP/XFS/Btrfs.</li>
110
- <li>It does not support BitLocker encrypted partitions.</li>
111
- <li>It does not support ReFS file system.</li>
112
- <li>It does not support WinPE bootable disk creation.</li>
113
- </ul>
114
- <h3>Q: How can I update EASEUS Partition Master 6.0.1 Server Edition Portable 64 bit?</h3>
115
- <p>A: EASEUS Partition Master 6.0.1 Server Edition Portable 64 bit does not support automatic updates. You need to download the latest version from the official website or some third-party sources and replace the old version on your USB flash drive or CD/DVD.</p>
116
- <h3>Q: How can I contact EASEUS for technical support or feedback?</h3>
117
- <p>A: You can contact EASEUS by email at [email protected] or by phone at +1-800-570-4634 (toll-free in US and Canada) or +86-28-85432479 (international). You can also visit their website at for more information and resources.</p> b2dd77e56b<br />
118
- <br />
119
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/EasyWorship 7 Full Version The Ultimate Solution for Creating and Presenting Worship Media.md DELETED
@@ -1,22 +0,0 @@
1
-
2
- <h1>How to Download and Install EasyWorship 7 Full Version for Free</h1>
3
- <p>EasyWorship 7 is a powerful and easy-to-use software that allows you to create and present worship slides, lyrics, videos, scriptures, and more. With EasyWorship 7, you can design and customize your own media library, schedule and manage your services, and control your presentation from any device. EasyWorship 7 is a great tool for churches, ministries, and worship teams who want to enhance their worship experience and engage their audience.</p>
4
- <h2>easyworship 7 full version</h2><br /><p><b><b>Download</b> &#10084; <a href="https://byltly.com/2uKzqy">https://byltly.com/2uKzqy</a></b></p><br /><br />
5
- <p>However, EasyWorship 7 is not a free software. You need to purchase a license to use it legally and access all its features. The official price of EasyWorship 7 is $499 for the full version and $199 for the upgrade version. This may be too expensive for some users who want to try out the software or use it for personal or non-commercial purposes.</p>
6
- <p>Fortunately, there is a way to download and install EasyWorship 7 full version for free and use it without paying anything. In this article, we will show you how to do that step by step. But before we proceed, we want to warn you that downloading and using cracked software is illegal and risky. You may face legal consequences, malware infections, data loss, or other problems if you choose to do so. We do not condone or encourage piracy in any way. This article is for educational purposes only.</p>
7
- <h2>What is EasyWorship 7 Full Version?</h2>
8
- <p>A full version of a software is a complete and unlocked version that has all the features and functions of the original software. A full version of a software usually requires a license key or activation code to use it legally and properly.</p>
9
- <p></p>
10
- <p>EasyWorship 7 full version is a complete and unlocked version of EasyWorship 7 that has all the features and functions of the original software. It does not require a license key or activation code to use it. It also has some additional features or functions that are not available in the official release. For example, some users claim that the full version has more themes, backgrounds, fonts, and transitions than the original one.</p>
11
- <p>However, using EasyWorship 7 full version also has some drawbacks and risks. For one thing, it is illegal and violates the terms and conditions of Softouch Development Inc., the developer of EasyWorship. You may face legal actions or penalties if you are caught using it. For another thing, it is unsafe and unreliable. You may download malware or viruses along with the full version that can harm your computer or steal your data. You may also experience errors, crashes, bugs, or compatibility issues that can affect your work quality and efficiency.</p>
12
- <h2>How to Download and Install EasyWorship 7 Full Version for Free?</h2>
13
- <p>If you still want to download and install EasyWorship 7 full version for free despite the risks and consequences, here are the steps you need to follow:</p>
14
- <ol>
15
- <li>Go to a website that offers EasyWorship 7 full version for free download. There are many websites that claim to provide this service, but not all of them are trustworthy or legitimate. Some of them may contain malware or viruses that can infect your computer or redirect you to other unwanted sites. To avoid this, you should do some research and check the reviews and ratings of the website before downloading anything from it.</li>
16
- <li>Select the download link or button and wait for the download process to start. Depending on the size of the file and your internet speed, this may take some time. You may also need to complete some surveys or offers before you can access the download link.</li>
17
- <li>Once the download is complete, locate the file on your computer and extract it using a file extractor program such as WinRAR or 7-Zip. You should see a folder containing the setup file and the crack file.</li>
18
- <li>Run the setup file and follow the instructions to install EasyWorship 7 on your computer. You may need to enter some information such as your name, email address, or country during the installation process.</li>
19
- <li>After the installation is done, do not run or open EasyWorship 7 yet. Instead, go to the folder where you extracted the crack file and copy it.</li>
20
- <li>Paste the crack file into the installation directory of EasyWorship 7. This is usually located at C</p> ddb901b051<br />
21
- <br />
22
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Chess Titans Free _HOT_ Download Full Version For Pc.md DELETED
@@ -1,48 +0,0 @@
1
- <h2>Chess Titans Free Download Full Version For Pc</h2><br /><p><b><b>DOWNLOAD</b> &mdash;&mdash;&mdash; <a href="https://imgfil.com/2uxYVg">https://imgfil.com/2uxYVg</a></b></p><br /><br />
2
- <br />
3
- You can play chess against the computer and see your progress. There is also a friendly ranking system to see who is the best player of the tournament. With a single click you can take a snapshot, add new pieces or save the game.
4
-
5
- Easy to play, easy to learn
6
-
7
- Simple three-dimensional graphics, to keep it as clear and easy to learn as possible. Simply drag and drop your pieces into the game to play. Want to play chess with the computer? You can even set the computer to play for you.
8
-
9
- A traditional look
10
-
11
- Choose your colors and set the background and playing pieces. You can even change the background and use hex colors. The game is classic in its look, but there is a lot of detail.
12
-
13
- Play against the computer
14
-
15
- Play against the computer in a friendly competition. You can choose the level of difficulty or play a friend’s game. The computer knows the standard moves and pieces, so you don’t have to tell it. Create your own board or play against the computer in a three-dimensional board.
16
-
17
- Chess Titans for Windows lets you play three different board sizes, with three levels of difficulty. It also comes with eight unique game boards to choose from. It is also a friendly competition between friends, as there are 10,000 different boards available.
18
-
19
- The new version of Chess Titans has been completely redesigned. The new chess engines are used to play the game. The new chess engines used are the HyperChess and Chess King. The game is better than ever, and has a completely new user interface.
20
-
21
- Use the 10,000 boards available
22
-
23
- Play a friend's game or play against the computer
24
-
25
- Create your own board or play against the computer
26
-
27
- Controls:
28
-
29
- Move your pieces: left and right arrow keys
30
-
31
- Drag a piece to a new square: W
32
-
33
- Drag a piece to open the piece menu: A
34
-
35
- Drag a piece to select a piece: S
36
-
37
- Switch a piece with another piece: B
38
-
39
- Take a snapshot: Ctrl+F
40
-
41
- List the pieces on the board: Space bar
42
-
43
- Save the game: Ctrl+S
44
-
45
- Chess Titans for Windows is a classic chess game, but with a twist. After starting the game, you can play with or against the computer. You can choose the type of game, board size and level of difficulty. There are 10 4fefd39f24<br />
46
- <br />
47
- <br />
48
- <p></p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Dominos Pizza - Food Delivery APK A Must-Have App for Pizza Lovers.md DELETED
@@ -1,113 +0,0 @@
1
- <br />
2
- <h1>Domino's APK: How to Order Pizza Online with Ease</h1>
3
- <p>Do you love pizza? Do you want to order it online from the comfort of your home or office? Do you want to enjoy delicious pizza at affordable prices and fast delivery? If you answered yes to any of these questions, then you need to download Domino's APK on your Android device.</p>
4
- <h2>What is Domino's APK?</h2>
5
- <h3>A brief introduction to the app and its features</h3>
6
- <p>Domino's APK is the official app of Domino's Pizza, one of the most popular pizza chains in the world. With this app, you can order pizza online from your nearest Domino's outlet and get it delivered to your doorstep in no time. You can also customize your pizza with your choice of crust, toppings, cheese, and sauces. You can also order other items from the menu, such as pasta, sandwiches, salads, desserts, drinks, and more.</p>
7
- <h2>dominos apk</h2><br /><p><b><b>DOWNLOAD</b> > <a href="https://urlin.us/2uSUQg">https://urlin.us/2uSUQg</a></b></p><br /><br />
8
- <h3>How to download and install the app on your device</h3>
9
- <p>Downloading and installing Domino's APK is very easy and simple. All you have to do is follow these steps:</p>
10
- <ol>
11
- <li>Search for Domino's APK or Pizza delivery app on the Google Play Store or Apple App Store and tap on install.</li>
12
- <li>Wait for the app to download and install on your device.</li>
13
- <li>Open the app and grant the necessary permissions for location, camera, storage, etc.</li>
14
- <li>You are ready to order pizza online with Domino's APK.</li>
15
- </ol>
16
- <h2>How to Use Domino's APK to Order Pizza Online</h2>
17
- <h3>How to create an account and log in</h3>
18
- <p>To use Domino's APK, you need to create an account and log in with your email address or phone number. You can also sign up with your Facebook or Google account. Creating an account will help you save your preferences, address, payment details, and order history. You can also earn rewards points for every order you place with Domino's APK.</p>
19
- <h3>How to browse the menu and customize your order</h3>
20
- <p>Once you log in, you can browse the menu by tapping on the categories or using the search bar. You can also filter the menu by price, popularity, or ratings. You can tap on any item you like and see its details, such as ingredients, calories, price, etc. You can also customize your order by adding or removing toppings, cheese, sauces, etc. You can also choose the size and quantity of your order.</p>
21
- <h3>How to apply coupons and offers</h3>
22
- <p>Domino's APK offers various coupons and offers that can help you save money on your order. You can find them on the home screen or under the deals section. You can also enter a coupon code manually if you have one. To apply a coupon or offer, simply select it and add it to your cart. You will see the discounted price on your checkout screen.</p>
23
- <h3>How to track your order and enjoy contactless delivery</h3>
24
- <p>After placing your order, you can track its status and progress on the app or on the website. You can also call the store or the delivery person if you have any queries or issues. Domino's APK also offers contactless delivery, which means you can get your order delivered without any physical contact with the delivery person. You can choose this option on the app or on the website and pay online. You can also instruct the delivery person to leave your order at a safe place, such as your doorstep, lobby, or gate.</p>
25
- <h2>Why Choose Domino's APK for Pizza Delivery?</h2>
26
- <h3>The benefits of ordering from Domino's</h3>
27
- <p>There are many reasons why you should choose Domino's APK for pizza delivery. Here are some of them:</p>
28
- <ul>
29
- <li>Domino's offers a wide variety of pizzas and other items to suit your taste and budget.</li>
30
- <li>Domino's guarantees fast and fresh delivery of your order within 30 minutes or less.</li>
31
- <li>Domino's has a 100% satisfaction guarantee, which means you can get a free replacement or refund if you are not happy with your order.</li>
32
- <li>Domino's has a loyalty program called Piece of the Pie Rewards, which allows you to earn points for every order and redeem them for free pizza and other rewards.</li>
33
- <li>Domino's has a user-friendly and convenient app that makes ordering pizza online a breeze.</li>
34
- </ul>
35
- <h3>The customer reviews and ratings of the app</h3>
36
- <p>Domino's APK has received positive feedback and ratings from its users. The app has a 4.5-star rating on the Google Play Store and a 4.7-star rating on the Apple App Store. Here are some of the reviews from the users:</p>
37
- <p>dominos pizza app download<br />
38
- dominos online ordering apk<br />
39
- dominos app for android free<br />
40
- dominos pizza delivery apk<br />
41
- dominos app latest version<br />
42
- dominos apk mod<br />
43
- dominos app coupon code<br />
44
- dominos pizza tracker apk<br />
45
- dominos app not working<br />
46
- dominos apk old version<br />
47
- dominos app rewards<br />
48
- dominos pizza maker apk<br />
49
- dominos app deals<br />
50
- dominos apk mirror<br />
51
- dominos app review<br />
52
- dominos pizza menu apk<br />
53
- dominos app login<br />
54
- dominos apk pure<br />
55
- dominos app gift card<br />
56
- dominos pizza game apk<br />
57
- dominos app offers<br />
58
- dominos apk file<br />
59
- dominos app feedback<br />
60
- dominos pizza coupons apk<br />
61
- dominos app update<br />
62
- dominos apk for pc<br />
63
- dominos app contact number<br />
64
- dominos pizza maker game apk<br />
65
- dominos app promo code<br />
66
- dominos apk hack<br />
67
- dominos app customer service<br />
68
- dominos pizza online apk<br />
69
- dominos app payment options<br />
70
- dominos pizza simulator apk<br />
71
- dominos app referral code<br />
72
- dominos apk uptodown<br />
73
- dominos app store<br />
74
- dominos pizza order tracker apk<br />
75
- dominos app discount code<br />
76
- dominos apk cracked<br />
77
- dominos app support<br />
78
- dominos pizza maker simulator apk<br />
79
- dominos app free pizza points<br />
80
- dominos apk android 4.4.2 <br />
81
- dominos app faq <br />
82
- domino's pizza food delivery apk <br />
83
- domino's app order history <br />
84
- domino's pizza maker 3d cooking game apk</p>
85
- <blockquote>
86
- <p>"I love this app. It's easy to use and I can order pizza anytime I want. The delivery is fast and the pizza is always hot and delicious. I also like the coupons and offers that they have. I highly recommend this app to anyone who loves pizza."</p>
87
- <p>"This app is awesome. It has everything I need to order pizza online. I can customize my pizza, apply coupons, track my order, and enjoy contactless delivery. The app is also very secure and reliable. I have never had any issues with it."</p>
88
- <p>"This app is amazing. It saves me time and money when I order pizza online. The app is very simple and intuitive to use. I can also earn rewards points for every order and get free pizza and other perks. The app is a must-have for pizza lovers."</p>
89
- </blockquote>
90
- <h3>The comparison with other pizza delivery apps</h3>
91
- <p>Domino's APK is not the only pizza delivery app available in the market. There are other apps that offer similar services, such as Pizza Hut, Papa John's, Little Caesars, etc. However, Domino's APK stands out from the rest in terms of quality, speed, convenience, and value. Here is a table that compares Domino's APK with other pizza delivery apps:</p>
92
- <table border="1">
93
- <tr><th>Pizza Delivery App</th><th>Menu Variety</th><th>Delivery Time</th><th>Customer Satisfaction</th><th>Loyalty Program</th></tr>
94
- <tr><td>Domino's APK</td><td>High</td><td>30 minutes or less</td><td>100% guarantee</td><td>Piece of the Pie Rewards</td></tr>
95
- <tr><td>Pizza Hut</td><td>Medium</td><td>40 minutes or more</td><td>No guarantee</td><td>Hut Rewards</td></tr>
96
- <tr><td>Papa John's</td><td>Low</td><td>45 minutes or more</td><td>No guarantee</td><td>Papa Rewards</td></tr>
97
- <tr><td>Little Caesars</td><td>Low</td><td>No delivery option</td><td>No guarantee</td><td>No loyalty program</td></tr>
98
- </table>
99
- <h2>Conclusion</h2>
100
- <p>To sum up, Domino's APK is the best pizza delivery app that you can use to order pizza online with ease. It has a wide range of pizzas and other items to choose from, fast and fresh delivery, 100% satisfaction guarantee, and a rewarding loyalty program. It also has a user-friendly and convenient app that makes ordering pizza online a breeze. So, what are you waiting for? Download Domino's APK today and enjoy delicious pizza at your doorstep.</p>
101
- <h2>FAQs</h2>
102
- <h3>Q1. Is Domino's APK safe and secure?</h3>
103
- <p>A1. Yes, Domino's APK is safe and secure to use. It uses encryption and other security measures to protect your personal and payment information. It also complies with all the privacy policies and regulations.</p>
104
- <h <h3>Q2. What are the payment options available on Domino's APK?</h3>
105
- <p>A2. Domino's APK offers various payment options for your convenience. You can pay online with your credit card, debit card, net banking, UPI, or wallet. You can also pay cash on delivery or use a gift card or voucher.</p>
106
- <h3>Q3. How can I contact Domino's customer service?</h3>
107
- <p>A3. Domino's customer service is always ready to help you with any queries or issues you may have. You can contact them by calling the toll-free number 1800-103-6888 or by emailing them at [email protected]. You can also chat with them on the app or on the website.</p>
108
- <h3>Q4. What are the minimum requirements for Domino's APK?</h3>
109
- <p>A4. Domino's APK requires an Android device with a minimum of 4.4 version and a minimum of 50 MB of free space. It also requires an internet connection and GPS access to function properly.</p>
110
- <h3>Q5. Can I order from Domino's APK in other countries?</h3>
111
- <p>A5. No, Domino's APK is only available for ordering pizza online in India. If you are in another country, you can use the website or the app of the local Domino's franchise to order pizza online.</p> 197e85843d<br />
112
- <br />
113
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Frozen City Mod APK 1.0.6 for Android - Free Purchase.md DELETED
@@ -1,87 +0,0 @@
1
- <br />
2
- <h1>Frozen City Mod APK 1.0.6: A Survival Game in a Post-Apocalyptic World</h1>
3
- <p>Do you love survival games that challenge your skills and creativity? Do you want to experience a thrilling adventure in a frozen city where zombies and mutants roam? If yes, then you should try Frozen City mod APK 1.0.6, a modified version of the original game that gives you unlimited resources, free purchase, and no ads. In this article, we will tell you everything you need to know about this amazing game and how to download and install it on your Android device.</p>
4
- <h2>frozen city mod apk 1.0 6</h2><br /><p><b><b>Download File</b> &#10022;&#10022;&#10022; <a href="https://urlin.us/2uST8c">https://urlin.us/2uST8c</a></b></p><br /><br />
5
- <h2>Introduction</h2>
6
- <h3>What is Frozen City?</h3>
7
- <p>Frozen City is a survival game developed by Century Games Pte Ltd, where you have to build your shelter, scavenge for resources, craft weapons and tools, and fight against zombies and mutants in a post-apocalyptic world. The game is set in a city that has been frozen by a mysterious disaster, and you are one of the few survivors who have to struggle to survive. You can explore the city, find other survivors, join clans, trade items, and complete quests. The game has a realistic physics system, dynamic weather, day and night cycle, and stunning graphics.</p>
8
- <h3>What is a mod APK?</h3>
9
- <p>A mod APK is a modified version of an original APK (Android Package Kit) file, which is the format used to distribute and install applications on Android devices. A mod APK can have extra features, unlocked items, unlimited resources, or other advantages that are not available in the original version of the game or app. A mod APK can be created by anyone who has the skills and tools to modify the original APK file.</p>
10
- <h3>Why download Frozen City mod APK 1.0.6?</h3>
11
- <p>If you are a fan of Frozen City, you might want to download Frozen City mod APK 1.0.6 because it offers some benefits that can enhance your gaming experience. For example, you can enjoy free purchase, which means you can buy anything in the game without spending real money. You can also have unlimited resources, such as wood, metal, food, water, and energy, which are essential for building your shelter and crafting items. Moreover, you can play the game without any annoying ads that can interrupt your gameplay or consume your data.</p>
12
- <p>frozen city mod apk 1.0 6 download<br />
13
- frozen city mod apk 1.0 6 unlimited money<br />
14
- frozen city mod apk 1.0 6 latest version<br />
15
- frozen city mod apk 1.0 6 free purchase<br />
16
- frozen city mod apk 1.0 6 android<br />
17
- frozen city mod apk 1.0 6 hack<br />
18
- frozen city mod apk 1.0 6 offline<br />
19
- frozen city mod apk 1.0 6 gameplay<br />
20
- frozen city mod apk 1.0 6 review<br />
21
- frozen city mod apk 1.0 6 update<br />
22
- frozen city mod apk 1.0 6 cheats<br />
23
- frozen city mod apk 1.0 6 no root<br />
24
- frozen city mod apk 1.0 6 obb<br />
25
- frozen city mod apk 1.0 6 online<br />
26
- frozen city mod apk 1.0 6 features<br />
27
- frozen city mod apk 1.0 6 tips<br />
28
- frozen city mod apk 1.0 6 guide<br />
29
- frozen city mod apk 1.0 6 tutorial<br />
30
- frozen city mod apk 1.0 6 install<br />
31
- frozen city mod apk 1.0 6 requirements<br />
32
- frozen city mod apk 1.0 6 size<br />
33
- frozen city mod apk 1.0 6 screenshots<br />
34
- frozen city mod apk 1.0 6 trailer<br />
35
- frozen city mod apk 1.0 6 video<br />
36
- frozen city mod apk 1.0 6 link<br />
37
- frozen city mod apk 1.0 6 mirror<br />
38
- frozen city mod apk 1.0 6 alternative<br />
39
- frozen city mod apk 1.0 6 happymod<br />
40
- frozen city mod apk 1.0 6 rexdl<br />
41
- frozen city mod apk 1.0 6 apkpure<br />
42
- frozen city mod apk 1.0 6 apkmody<br />
43
- frozen city mod apk 1.0 6 revdl<br />
44
- frozen city mod apk 1.0 6 an1<br />
45
- frozen city mod apk 1.0 6 andropalace<br />
46
- frozen city mod apk 1.0 6 mob.org<br />
47
- frozen city mod apk 1.0 6 androidrepublica<br />
48
- frozen city mod apk 1.0 6 blackmod.net<br />
49
- frozen city mod apk 1.0 6 platinmods.com<br />
50
- frozen city mod apk 1.0 6 androidoyun.club<br />
51
- frozen city mod apk</p>
52
- <h2>Features of Frozen City mod APK 1.0.6</h2>
53
- <h3>Free purchase</h3>
54
- <p>With Frozen City mod APK 1.0.6, you can buy anything in the game for free, such as weapons, armor, vehicles, furniture, decorations, and more. You don't need to worry about running out of money or gems, as you can have unlimited amounts of them with this mod.</p>
55
- <h3>Unlimited resources</h3>
56
- <p>Another feature of Frozen City mod APK 1.0.6 is that it gives you unlimited resources that you need to survive in the frozen city. You can have unlimited wood, metal, food, water, and energy with this mod, which means you don't need to scavenge for them or wait for them to regenerate. You can use them to build your shelter, craft items, cook food, and power your devices.</p>
57
- <h3>No ads</h3>
58
- <p>Frozen City mod APK 1.0.6 also removes all the ads that can appear in the game from time to time. Ads can be annoying and distracting when you are playing a survival game that requires your attention and data. With Frozen City mod APK 1.0.6, you can enjoy the game without any interruptions or distractions.</p>
59
- <h3>High-quality graphics and sound</h3>
60
- <p>Frozen City mod APK 1.0.6 does not compromise the quality of the graphics and sound of the game. In fact, it enhances them by making them more realistic and immersive. You can admire the details of the frozen city, the weather effects, the lighting, and the shadows. You can also hear the sounds of the zombies, the mutants, the weapons, and the environment. Frozen City mod APK 1.0.6 will make you feel like you are in a real post-apocalyptic world.</p>
61
- <h2>How to download and install Frozen City mod APK 1.0.6</h2>
62
- <h3>Step 1: Enable unknown sources</h3>
63
- <p>Before you can download and install Frozen City mod APK 1.0.6, you need to enable unknown sources on your Android device. This will allow you to install apps that are not from the Google Play Store. To do this, go to your device settings, then security, then unknown sources, and toggle it on.</p>
64
- <h3>Step 2: Download the mod APK file</h3>
65
- <p>Next, you need to download the mod APK file of Frozen City from a reliable source. You can use this link to download it: [Frozen City mod APK 1.0.6]. Make sure you have enough storage space on your device before downloading it.</p>
66
- <h3>Step 3: Install the mod APK file</h3>
67
- <p>After downloading the mod APK file, you need to install it on your device. To do this, locate the file in your downloads folder or wherever you saved it, and tap on it. You will see a prompt asking you to confirm the installation. Tap on install and wait for it to finish.</p>
68
- <h3>Step 4: Enjoy the game</h3>
69
- <p>Once the installation is done, you can launch the game from your app drawer or home screen. You will see a new icon with the name Frozen City mod APK 1.0.6. Tap on it and enjoy the game with all its features.</p>
70
- <h2>Conclusion</h2>
71
- <p>Frozen City mod APK 1.0.6 is a great way to enjoy a survival game in a frozen city where zombies and mutants are your enemies. You can have free purchase, unlimited resources, no ads, and high-quality graphics and sound with this mod. You can also explore the city, find other survivors, join clans, trade items, and complete quests with this mod. If you want to download and install Frozen City mod APK 1.0.6 on your Android device, just follow the steps we have provided in this article.</p>
72
- <h2>FAQs</h2>
73
- <p>Here are some frequently asked questions about Frozen City mod APK 1.0.6:</p>
74
- <ul>
75
- <li><b>Is Frozen City mod APK 1.0.6 safe to use?</b></li>
76
- <p>Yes, Frozen City mod APK 1.0.6 is safe to use as long as you download it from a trusted source and scan it with an antivirus before installing it.</p>
77
- <li><b>Does Frozen City mod APK 1.0.6 require root access?</b></li>
78
- <p>No, Frozen City mod APK 1.0.6 does not require root access to work on your device.</p>
79
- <li><b>Can I play Frozen City mod APK 1.0.6 online with other players?</b></li>
80
- <p>Yes, you can play Frozen City mod APK 1.0.6 online with other players who have the same version of the game.</p>
81
- <li><b>Can I update Frozen City mod APK 1.0.6 when a new version is released?</b></li>
82
- <p>No, you cannot update Frozen City mod APK 1.0.6 when a new version is released because it will overwrite the mod features and restore the original version of the game.</p>
83
- <li><b>Can I uninstall Frozen City mod APK 1.0.6 if I don't like it?</b></li>
84
- <p>Yes, you can uninstall Frozen City mod APK 1.0.6 if you don't like it or if it causes any problems on your device.</p>
85
- </ul></p> 197e85843d<br />
86
- <br />
87
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Clash Royale Bluestacks Play the Best Strategy Game on Your PC for Free.md DELETED
@@ -1,117 +0,0 @@
1
-
2
- <h1>How to Download and Play Clash Royale on Bluestacks</h1>
3
- <p>Clash Royale is one of the most popular and addictive mobile games in the world. It is a real-time strategy game where you collect cards, build decks, and battle other players online. You can join clans, chat with friends, unlock new cards, and earn chests full of rewards. But what if you want to play Clash Royale on a bigger screen, with better graphics, faster performance, and more control? That's where Bluestacks comes in.</p>
4
- <p>Bluestacks is the best mobile gaming platform for PC and Mac. It lets you play thousands of Android games on your computer, with full keyboard and mouse support, custom settings, and advanced features. You can also stream your gameplay to Facebook or Twitch, record your screen, take screenshots, and more. With Bluestacks, you can enjoy playing Clash Royale on your PC or Mac like never before.</p>
5
- <h2>download clash royale bluestacks</h2><br /><p><b><b>Download Zip</b> &#10003; <a href="https://jinyurl.com/2uNS92">https://jinyurl.com/2uNS92</a></b></p><br /><br />
6
- <p>In this article, we will show you how to download and install Bluestacks on your PC or Mac, and how to play Clash Royale on it. Follow these simple steps and get ready to clash!</p>
7
- <h2>Step 1: Download and install Bluestacks on your PC or Mac</h2>
8
- <p>The first thing you need to do is to download Bluestacks from its official website. You can choose from different versions of Bluestacks, depending on your operating system and Android preference. For example, you can download Bluestacks 5 for Windows 10 with Android 11, or Bluestacks 5 Nougat 64-bit for Mac. Make sure your PC or Mac meets the minimum system requirements for Bluestacks before downloading it.</p>
9
- <p>Once you have downloaded the Bluestacks installer, run it and follow the instructions to install it on your PC or Mac. You can choose the default location for installation or change it to a different drive. The installation process may take a few minutes, depending on your internet speed and computer performance.</p>
10
- <h2>Step 2: Launch Bluestacks and sign in with your Google account</h2>
11
- <p>After installing Bluestacks, launch it from your desktop or start menu. You will see a window like this:</p>
12
- <p>How to play Clash Royale PC with Bluestacks Emulator<br />
13
- Clash Royale Bluestacks script for automating moves<br />
14
- Download Clash Royale PC for Windows & Mac (May 2023)<br />
15
- How to get Clash Royale (and other supercell games) on Bluestacks<br />
16
- Clash Royale Bluestacks settings for optimal performance<br />
17
- Clash Royale Bluestacks vs other Android emulators<br />
18
- How to install APK pure on Bluestacks for Clash Royale<br />
19
- Clash Royale Bluestacks stream feature for Facebook and Twitch<br />
20
- How to update Clash Royale on Bluestacks<br />
21
- Clash Royale Bluestacks keyboard controls and shortcuts<br />
22
- How to fix Clash Royale Bluestacks black screen issue<br />
23
- How to transfer Clash Royale account from Bluestacks to phone<br />
24
- How to play Clash Royale on Bluestacks offline mode<br />
25
- How to use cheat engine on Clash Royale Bluestacks<br />
26
- How to record Clash Royale gameplay on Bluestacks<br />
27
- How to change language on Clash Royale Bluestacks<br />
28
- How to play Clash Royale on Bluestacks with friends<br />
29
- How to sync Clash Royale progress between Bluestacks and Google Play<br />
30
- How to uninstall Clash Royale from Bluestacks<br />
31
- How to download Clash Royale mod apk on Bluestacks<br />
32
- How to play Clash Royale on Bluestacks without lag<br />
33
- How to run multiple instances of Clash Royale on Bluestacks<br />
34
- How to enter and exit shooting mode in Clash Royale Bluestacks<br />
35
- How to create and run a script for Clash Royale Bluestacks<br />
36
- How to customize CPU, RAM and resolution for Clash Royale Bluestacks<br />
37
- How to download and install BlueStacks 3.0 for Clash Royale PC<br />
38
- How to use BlueStacks macro recorder for Clash Royale PC<br />
39
- How to play Clash Royale on BlueStacks with mouse and keyboard<br />
40
- How to use BlueStacks multi-instance manager for Clash Royale PC<br />
41
- How to enable high FPS mode for Clash Royale on BlueStacks<br />
42
- How to use BlueStacks smart controls for Clash Royale PC<br />
43
- How to use BlueStacks gamepad support for Clash Royale PC<br />
44
- How to use BlueStacks eco mode for Clash Royale PC<br />
45
- How to use BlueStacks farm mode for Clash Royale PC<br />
46
- How to use BlueStacks sync feature for Clash Royale PC<br />
47
- How to use BlueStacks app center for Clash Royale PC<br />
48
- How to use BlueStacks app player settings for Clash Royale PC<br />
49
- How to use BlueStacks cloud connect for Clash Royale PC<br />
50
- How to use BlueStacks media manager for Clash Royale PC<br />
51
- How to use BlueStacks screenshot tool for Clash Royale PC<br />
52
- How to use BlueStacks location tool for Clash Royale PC<br />
53
- How to use BlueStacks shake tool for Clash Royale PC<br />
54
- How to use BlueStacks rotate tool for Clash Royale PC <br />
55
- How to use BlueStacks zoom tool for Clash Royale PC <br />
56
- How to use BlueStacks notification center for Clash Royale PC <br />
57
- How to use BlueStacks help center for Clash Royale PC <br />
58
- How to use BlueStacks feedback tool for Clash Royale PC <br />
59
- How to use BlueStacks reward center for Clash Royale PC</p>
60
- <img src="(^3^)" alt="Bluestacks home screen">
61
- <p>Here, you need to sign in with your Google account to access the Google Play Store and other Google services. If you don't have a Google account yet, you can create one here. Signing in with your Google account will also sync your game progress and purchases across devices.</p>
62
- <h2>Step 3: Search for Clash Royale in the Google Play Store and install it</h2>
63
- <p>Now that you have signed in with your Google account, you can search for Clash Royale in the Google Play Store app on Bluestacks. You can find the app icon on the home screen or in the app center. Click on it to open it.</p>
64
- <p>In the Google Play Store app, type "Clash Royale" in the search bar and hit enter. You will see a list of results like this:</p>
65
- <img src="(^6^)" alt="Clash Royale search results">
66
- <p>Click on the first result that says "Clash Royale" by Supercell. This will take you to the game's page in the Google Play Store. Here, you can see more information about the game, such as its description, screenshots, reviews, ratings, etc.</p>
67
- <p>To install Clash Royale on Bluestacks, click on the green "Install" button. This will start downloading and installing the game on your PC or Mac. The process may take a few minutes, depending on your internet speed.</p>
68
- <h2>Step 4: Enjoy playing Clash Royale on your PC or Mac with Bluestacks</h2>
69
- <p>Congratulations! You have successfully installed Clash Royale on Bluestacks. Now you can enjoy playing the game on your PC or Mac with a bigger screen, better graphics, faster performance, and more control. You can also use the Bluestacks features to enhance your gaming experience, such as:</p>
70
- <ul>
71
- <li>Customize your keyboard and mouse controls to suit your play style. You can use the game guide to see the default controls or change them as you wish.</li>
72
- <li>Use the multi-instance feature to play multiple accounts of Clash Royale at the same time. You can also switch between different instances easily with the multi-instance manager.</li>
73
- <li>Use the macro feature to record and execute complex actions with a single keystroke. You can also edit and share your macros with other players.</li>
74
- <li>Use the eco mode to reduce CPU and RAM usage and improve battery life. You can also enable or disable notifications, sound, and background apps.</li>
75
- </ul>
76
- <p>With Bluestacks, you can take your Clash Royale gameplay to the next level. You can also explore other games in the Bluestacks app center, such as Clash of Clans, Brawl Stars, PUBG Mobile, and more.</p>
77
- <h1>Conclusion</h1>
78
- <p>In this article, we have shown you how to download and play Clash Royale on Bluestacks, the best mobile gaming platform for PC and Mac. We have also explained the benefits of playing Clash Royale on Bluestacks and how to use its features to enhance your gaming experience. We hope you found this article helpful and informative.</p>
79
- <p>If you are a fan of Clash Royale or any other mobile game, we highly recommend you to try out Bluestacks. It is free, easy, and fun to use. You can download it from here and start playing your favorite games on your PC or Mac today.</p>
80
- <p>Thank you for reading this article. If you have any questions or feedback, please leave them in the comments section below. We would love to hear from you. Happy clashing!</p>
81
- <h1>FAQs</h1>
82
- <h3>Q: Is Bluestacks safe to use?</h3>
83
- <p>A: Yes, Bluestacks is safe to use. It is a legitimate software that has been downloaded by millions of users worldwide. It does not contain any malware, viruses, or spyware. It also does not access or modify any of your personal data or files.</p>
84
- <h3>Q: Is Bluestacks free to use?</h3>
85
- <p>A: Yes, Bluestacks is free to use. You can download and install it on your PC or Mac without paying anything. You can also play any game on it without any limitations or restrictions. However, some games may have in-app purchases or ads that require real money.</p>
86
- <h3>Q: How do I update Clash Royale on Bluestacks?</h3>
87
- <p>A: To update Clash Royale on Bluestacks, you need to follow these steps:</p>
88
- <ol>
89
- <li>Open the Google Play Store app on Bluestacks.</li>
90
- <li>Click on the menu icon (three horizontal lines) on the top left corner.</li>
91
- <li>Select "My apps & games" from the menu.</li>
92
- <li>Find Clash Royale in the list of installed apps and click on "Update".</li>
93
- <li>Wait for the update to finish and launch the game.</li>
94
- </ol>
95
- <h3>Q: How do I transfer my Clash Royale account from my phone to Bluestacks?</h3>
96
- <p>A: To transfer your Clash Royale account from your phone to Bluestacks, you need to follow these steps:</p>
97
- <ol>
98
- <li>On your phone, open Clash Royale and go to the settings menu (gear icon).</li>
99
- <li>Select "Link a device" and then "This is the old device".</li>
100
- <li>Select "I want to link to another device" and then "Android device".</li>
101
- <li>You will see a code that is valid for two minutes.</li>
102
- <li>On Bluestacks, open Clash Royale and go to the settings menu (gear icon).</li>
103
- <li>Select "Link a device" and then "This is the new device".</li>
104
- <li>Enter the code from your phone and confirm.</li>
105
- <li>Your account will be transferred to Bluestacks.</li>
106
- </ol>
107
- <h3>Q: How do I contact Bluestacks support?</h3>
108
- <p>A: If you have any issues or problems with Bluestacks, you can contact their support team by following these steps:</p>
109
- <ol>
110
- <li>Open Bluestacks and click on the menu icon (three horizontal lines) on the top right corner.</li> <li>Select "Help and Support" from the menu.</li>
111
- <li>You will see a list of topics and articles that may help you solve your issue.</li>
112
- <li>If you still need assistance, click on the "Report a Problem" button at the bottom of the page.</li>
113
- <li>Fill out the form with your name, email, description of the problem, and any attachments.</li>
114
- <li>Click on the "Submit" button and wait for a response from the Bluestacks support team.</li>
115
- </ol></p> 197e85843d<br />
116
- <br />
117
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Downloader How to Boost Your Download Speeds and Manage Your Files.md DELETED
@@ -1,92 +0,0 @@
1
-
2
- <h1>Download Downloader: What Is It and Why Do You Need It?</h1>
3
- <p>If you frequently download files from the internet, you know how frustrating it can be to deal with slow speeds, broken links, timeouts, and other issues. That's why you need a download manager, also known as a download downloader. A download manager is a software tool that helps you manage your downloads more efficiently and effectively. It can boost your download speed, resume interrupted downloads, organize your files, convert formats, and more. In this article, we will show you how to choose the best download manager for your needs, review the top 5 free download managers of 2023, and give you some tips on how to use them effectively.</p>
4
- <h2>download downloader</h2><br /><p><b><b>Download File</b> &#10145; <a href="https://jinyurl.com/2uNUGu">https://jinyurl.com/2uNUGu</a></b></p><br /><br />
5
- <h2>How to Choose the Best Download Manager for Your Needs</h2>
6
- <p>There are many download managers available on the market, but not all of them are created equal. Some may have more features than others, some may be more compatible with your device or browser, some may be more secure or user-friendly. Here are some factors to consider when selecting a download manager:</p>
7
- <ul>
8
- <li><strong>Speed:</strong> One of the main reasons to use a download manager is to increase your download speed. A good download manager should be able to accelerate your downloads by using multiple connections, splitting files into smaller chunks, and optimizing your bandwidth.</li>
9
- <li><strong>Features:</strong> Another reason to use a download manager is to access more features than your browser's default downloader. A good download manager should be able to support various file types, protocols, and sources, such as HTTP, FTP, BitTorrent, YouTube, etc. It should also be able to preview files before downloading them, resume broken downloads, schedule downloads for later times, organize downloads into folders or categories, convert formats if needed, and integrate with your browser or antivirus software.</li>
10
- <li><strong>Compatibility:</strong> A good download manager should be compatible with your device and browser. It should be able to run smoothly on your operating system (Windows, Mac OS X, Linux), whether it's desktop or mobile. It should also be able to work with your preferred browser (Chrome, Firefox, Edge), whether it's through an extension or a standalone app.</li>
11
- <li><strong>Security:</strong> A good download manager should be secure and reliable. It should be able to scan files for viruses or malware before downloading them. It should also be able to protect your privacy by encrypting your data or using proxy servers if needed.</li>
12
- </ul>
13
- <h3>The Top 5 Free Download Managers of 2023</h3>
14
- <h4>Download Accelerator Plus</h4>
15
- <p>Download Accelerator Plus (DAP) is one of the most popular download managers on the market. It has over 300 million users worldwide and boasts impressive speeds up to 400% faster than regular downloads. It also also has a built-in media file previewer that lets you watch videos or listen to music before downloading them. DAP supports various protocols and sources, such as HTTP, FTP, BitTorrent, YouTube, etc. It also integrates with your browser and antivirus software for seamless downloading. DAP is free to use, but you can upgrade to a premium version for more features and benefits.</p>
16
- <h4>Ninja Download Manager</h4>
17
- <p>Ninja Download Manager (NDM) is a powerful and well-designed download manager for media files. It has a sleek and intuitive interface that lets you manage your downloads easily and efficiently. NDM can accelerate your downloads by using multiple connections and smart logic. It can also resume broken downloads, schedule downloads for later times, organize downloads into categories, and convert formats if needed. NDM supports various protocols and sources, such as HTTP, HTTPS, FTP, YouTube, etc. It also integrates with your browser and clipboard for convenient downloading. NDM is free to use, but you can upgrade to a pro version for more features and benefits.</p>
18
- <p>download manager<br />
19
- download accelerator<br />
20
- download speed booster<br />
21
- download video from youtube<br />
22
- download music from spotify<br />
23
- download files from google drive<br />
24
- download games for pc<br />
25
- download ebooks for free<br />
26
- download pdf converter<br />
27
- download antivirus software<br />
28
- download resume templates<br />
29
- download fonts for word<br />
30
- download wallpapers for desktop<br />
31
- download subtitles for movies<br />
32
- download podcasts for offline listening<br />
33
- download instagram stories<br />
34
- download tiktok videos<br />
35
- download netflix shows<br />
36
- download whatsapp status<br />
37
- download zoom app<br />
38
- download windows 10 iso<br />
39
- download android emulator<br />
40
- download chrome browser<br />
41
- download firefox browser<br />
42
- download opera browser<br />
43
- download tor browser<br />
44
- download vpn for pc<br />
45
- download torrent client<br />
46
- download utorrent downloader<br />
47
- download bittorrent downloader<br />
48
- download magnet link downloader<br />
49
- download youtube downloader hd<br />
50
- download youtube downloader mp3<br />
51
- download youtube downloader mp4<br />
52
- download facebook video downloader<br />
53
- download twitter video downloader<br />
54
- download vimeo video downloader<br />
55
- download dailymotion video downloader<br />
56
- download soundcloud music downloader<br />
57
- download bandcamp music downloader<br />
58
- download spotify music downloader<br />
59
- download amazon music downloader<br />
60
- download apple music downloader<br />
61
- download deezer music downloader<br />
62
- download tidal music downloader<br />
63
- download audiomack music downloader<br />
64
- download mixcloud music downloader<br />
65
- download internet archive downloader</p>
66
- <h4>Free Download Manager</h4>
67
- <p>Free Download Manager (FDM) is a versatile and user-friendly download manager with BitTorrent support. It has a simple and clean interface that lets you manage your downloads easily and efficiently. FDM can accelerate your downloads by using multiple connections and splitting files into smaller chunks. It can also resume broken downloads, schedule downloads for later times, organize downloads into folders or categories, and convert formats if needed. FDM supports various protocols and sources, such as HTTP, HTTPS, FTP, BitTorrent, YouTube, etc. It also integrates with your browser and antivirus software for seamless downloading. FDM is free and open-source, but you can donate to support the developers.</p>
68
- <h4>JDownloader</h4>
69
- <p>JDownloader is a feature-rich and customizable download manager with remote control. It has a complex and advanced interface that lets you manage your downloads in detail and with flexibility. JDownloader can accelerate your downloads by using multiple connections and splitting files into smaller chunks. It can also resume broken downloads, schedule downloads for later times, organize downloads into folders or categories, and convert formats if needed. JDownloader supports various protocols and sources, such as HTTP, HTTPS, FTP, BitTorrent, YouTube, etc. It also integrates with your browser and clipboard for convenient downloading. JDownloader is free and open-source, but you can buy a premium account for more features and benefits.</p>
70
- <h4>Internet Download Manager</h4>
71
- <p>Internet Download Manager (IDM) is a fast and reliable download manager with browser integration. It has a simple and classic interface that lets you manage your downloads easily and efficiently. IDM can accelerate your downloads by using multiple connections and dynamic file segmentation. It can also resume broken downloads, schedule downloads for later times, organize downloads into folders or categories, and convert formats if needed. IDM supports various protocols and sources, such as HTTP, HTTPS, FTP, BitTorrent, YouTube, etc. It also integrates with your browser and antivirus software for seamless downloading. IDM is not free to use, but you can try it for 30 days before buying it.</p>
72
- <h2>How to Use a Download Manager Effectively</h2>
73
- <p>Now that you have learned about the best download managers of 2023, you may wonder how to use them effectively to optimize your download experience. Here are some tips and tricks on how to do that:</p>
74
- <ul>
75
- <li><strong>Schedule your downloads:</strong> If you have a lot of files to download or if you want to save bandwidth or battery life, you can schedule your downloads for later times when you are not using your device or when the internet connection is better.</li>
76
- <li><strong>Organize your downloads:</strong> If you have a lot of files to download or if you want to find them easily later on, you can organize your downloads into folders or categories based on their type, source, date, etc.</li>
77
- <li><strong>Resume your downloads:</strong> If your download is interrupted by an error or a power outage or if you want to pause it for some reason, you can resume it from where it left off without losing any data or time.</li>
78
- <li><strong>Convert your downloads:</strong> If your download is in a format that is not compatible with your device or player or if you want to reduce its size or quality, you can convert it to another format that suits your needs.</li>
79
- </ul>
80
- <h2>Conclusion</h2>
81
- <p>A download manager is a software tool that helps you manage your downloads more efficiently and effectively. It can boost your download speed, resume interrupted downloads, organize your files, convert formats, and more. In this article, we have shown you how to choose the best download manager for your needs, reviewed the top 5 free download managers of 2023, and given you some tips on how to use them effectively. We hope you have found this article helpful and informative. If you want to try out a download manager for yourself, you can download one of the options we have mentioned above or search for other alternatives online. You will be amazed by how much easier and faster your download experience will be with a download manager. Happy downloading!</p>
82
- <h2>FAQs</h2>
83
- <p>Here are some frequently asked questions about download managers:</p>
84
- <ol>
85
- <li><strong>What is the difference between a download manager and a torrent client?</strong><br>A download manager is a software tool that helps you download files from various sources and protocols, such as HTTP, FTP, YouTube, etc. A torrent client is a software tool that helps you download files from BitTorrent, a peer-to-peer protocol that uses a network of users to share files.</li>
86
- <li><strong>Are download managers safe to use?</strong><br>Download managers are generally safe to use, as long as you download them from reputable sources and scan them for viruses or malware before installing them. However, you should also be careful about the files you download with them, as some of them may contain harmful or illegal content. Always check the file name, size, type, and source before downloading it.</li>
87
- <li><strong>Do download managers work with all browsers?</strong><br>Most download managers work with all major browsers, such as Chrome, Firefox, Edge, etc. However, some of them may require an extension or a plugin to integrate with your browser. You can check the compatibility of your download manager with your browser on its official website or in its settings.</li>
88
- <li><strong>Do download managers use more bandwidth or data?</strong><br>Download managers may use more bandwidth or data than regular downloads, as they use multiple connections and split files into smaller chunks to accelerate your downloads. However, this also depends on your internet speed, file size, and source. You can limit the bandwidth or data usage of your download manager in its settings if needed.</li>
89
- <li><strong>How can I uninstall a download manager?</strong><br>You can uninstall a download manager like any other software on your device. You can go to your control panel or settings and look for the option to uninstall programs or apps. You can then select your download manager and follow the instructions to remove it from your device.</li>
90
- </ol></p> 401be4b1e0<br />
91
- <br />
92
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Table No. 21 Full Movie in 720p HD Quality from Filmyzilla.md DELETED
@@ -1,337 +0,0 @@
1
- <br />
2
- <h1>Table No. 21 Full Movie Download Filmyzilla 720p: A Thrilling and Illegal Adventure</h1>
3
- <p>If you are looking for a movie that will keep you on the edge of your seat, you might be tempted to download Table No. 21 full movie from Filmyzilla, a website that offers free downloads of pirated movies and shows. But before you do that, you should know what you are getting into and why it is not a good idea.</p>
4
- <h2>table no 21 full movie download filmyzilla 720p</h2><br /><p><b><b>Download Zip</b> &#9193; <a href="https://jinyurl.com/2uNTML">https://jinyurl.com/2uNTML</a></b></p><br /><br />
5
- <h2>What is Table No. 21?</h2>
6
- <p>Table No. 21 is a 2013 Hindi thriller movie starring Paresh Rawal, Rajeev Khandelwal and Tina Desai. It is named after Article 21 of the Indian Constitution, which talks about the protection of life and personal liberty. The movie touches upon the pertinent social issue of ragging, or bullying in college campuses.</p>
7
- <h3>A brief summary of the plot</h3>
8
- <p>The movie follows Vivaan and Siya, a married couple who struggle to make ends meet. They win a trip to Fiji in a lucky draw, where they meet Mr. Khan, a mysterious and charming man who invites them to participate in a live game show called Table No. 21. He tells them that the winner of the game will get a whopping amount of ₹210 million as prize money. The rules are simple: they have to answer eight personal questions truthfully and complete a task related to each question. However, as the game progresses, the questions and tasks become increasingly horrific and reveal dark secrets from their past. They soon realize that they are trapped in a deadly game of survival with no escape.</p>
9
- <h3>The cast and crew of the movie</h3>
10
- <p>The movie is directed by Aditya Datt and produced by Eros International. The screenplay is written by Shantanu Ray Chhibber and Sheershak Anand, based on their own story. The music is composed by Gajendra Verma, Neeraj Shridhar and Sachin Gupta.</p>
11
- <p>table no 21 full movie online free watch hd<br />
12
- table no 21 hindi movie download 720p filmywap<br />
13
- table no 21 thriller film streaming on zee5<br />
14
- table no 21 paresh rawal movie download mp4<br />
15
- table no 21 rajeev khandelwal movie watch online<br />
16
- table no 21 full movie free download in hindi<br />
17
- table no 21 2013 movie download 480p filmyzilla<br />
18
- table no 21 adventure movie online on jiocinema<br />
19
- table no 21 tina desai movie download torrent<br />
20
- table no 21 full movie hd quality download<br />
21
- table no 21 hindi thriller film watch online free<br />
22
- table no 21 movie download link filmyzilla<br />
23
- table no 21 full movie online with english subtitles<br />
24
- table no 21 aditya datt movie download pagalworld<br />
25
- table no 21 full movie streaming on netflix<br />
26
- table no 21 hindi movie watch online hd quality<br />
27
- table no 21 movie download in hindi 720p filmyhit<br />
28
- table no 21 full movie online on youtube<br />
29
- table no 21 paresh rawal thriller film download<br />
30
- table no 21 rajeev khandelwal movie online free<br />
31
- table no 21 full movie download filmyzilla hd<br />
32
- table no 21 hindi movie online on amazon prime video<br />
33
- table no 21 tina desai movie watch online hd<br />
34
- table no 21 full movie download in hindi filmyzilla<br />
35
- table no 21 adventure thriller film online free<br />
36
- table no 21 movie download filmyzilla mp4 hd<br />
37
- table no 21 full movie online on hotstar<br />
38
- table no 21 hindi movie download filmyzilla.com<br />
39
- table no 21 paresh rawal movie online hd quality<br />
40
- table no 21 rajeev khandelwal thriller film download<br />
41
- table no 21 full movie watch online free filmyzilla<br />
42
- table no 21 hindi movie streaming on mx player<br />
43
- table no 21 tina desai adventure film download<br />
44
- table no 21 full movie download in hindi mp4moviez<br />
45
- table no 21 thriller film watch online hd quality<br />
46
- table no 21 movie download filmyzilla in hindi hd<br />
47
- table no 21 full movie online on voot<br />
48
- table no 21 hindi movie download filmyzilla.in<br />
49
- table no 21 paresh rawal adventure film online free<br />
50
- table no 21 rajeev khandelwal movie download hd quality</p>
51
- <p>The main cast of the movie are:</p>
52
- <ul>
53
- <li>Paresh Rawal as Abdul Razaq Khan, the host of the game show</li>
54
- <li>Rajeev Khandelwal as Vivaan Agasthi, one of the contestants</li>
55
- <li>Tina Desai as Siya Agasthi, Vivaan's wife and another contestant</li>
56
- <li>Dhruv Ganesh as Akram Khan, Mr. Khan's son who was ragged by Vivaan and his friends in college</li>
57
- <li>Asheesh Kapur as Bittoo, one of Vivaan's friends</li>
58
- <li>Sana Amin Sheikh as Neeti, one of Siya's friends</li>
59
- <li>Hanif Hilal as Ghouse, Mr. Khan's bodyguard</li>
60
- </ul>
61
- <h3>The critical reception and box office performance</h3>
62
- <p>The movie received mixed to positive reviews from critics and audiences alike. It was praised for its gripping plot, suspenseful twists, powerful performances, and social message. However, it was also criticized for its violence, implausible scenarios, and lack of originality.</p>
63
- <p>The movie performed above average at the box office, earning ₹177.95 million against a budget of ₹85 million.</p>
64
- <h2>What is Filmyzilla?</h2>
65
- <p>Fil <h2>What is Filmyzilla?</h2>
66
- <p>Filmyzilla is a notorious website that provides free downloads of pirated movies and shows from Bollywood, Hollywood, Tollywood, and other regional film industries. It is one of the most popular and visited websites for movie piracy in India and across the world.</p>
67
- <h3>A notorious website for pirating movies and shows</h3>
68
- <p>Filmyzilla has been operating for several years and has a huge collection of movies and shows in various languages, genres, and formats. It uploads the latest releases within hours or days of their theatrical or digital premiere, often in high quality. It also offers old and classic movies, as well as dubbed and subbed versions of foreign movies.</p>
69
- <p>Filmyzilla is an illegal website that violates the Indian and international laws on copyright and intellectual property rights. It hosts and distributes the pirated content without the permission or consent of the original creators or owners. It also generates revenue from advertisements and pop-ups that may contain malware or viruses.</p>
70
- <h3>The categories and formats of movies available on Filmyzilla</h3>
71
- <p>Filmyzilla has a user-friendly interface that allows users to browse and download movies and shows according to their preferences. It has various categories such as:</p>
72
- <ul>
73
- <li>Bollywood Movies</li>
74
- <li>Hollywood Movies</li>
75
- <li>Hollywood Hindi Dubbed Movies</li>
76
- <li>South Indian Hindi Dubbed Movies</li>
77
- <li>Punjabi Movies</li>
78
- <li>Bengali Movies</li>
79
- <li>Tamil Movies</li>
80
- <li>Telugu Movies</li>
81
- <li>Malayalam Movies</li>
82
- <li>Marathi Movies</li>
83
- <li>Gujarati Movies</li>
84
- <li>Kannada Movies</li>
85
- <li>Urdu Movies</li>
86
- <li>Pakistani Movies</li>
87
- <li>Nepali Movies</li>
88
- <li>Bhojpuri Movies</li>
89
- <li>Web Series</li>
90
- <li>TV Shows</li>
91
- <li>Awards Shows</li>
92
- <li>Documentaries</li>
93
- <li>Anime</li>
94
- <li>Cartoons</li>
95
- </ul>
96
- <p>Filmyzilla also offers different formats and qualities of movies and shows such as:</p>
97
- <ul>
98
- <li>MP4</li>
99
- <li>MKV</li>
100
- <li>AVI</li>
101
- <li>WEBM</li>
102
- <li>3GP</li>
103
- <li>360p</li>
104
- <li>480p</li>
105
- <li>720p</li>
106
- <li>1080p</li>
107
- <li>HDRip</li>
108
- <li>DVDRip</li>
109
- <li>BluRay</li>
110
- <li>DVDScr</li> <li>CamRip</li>
111
- <li>PreDVDRip</li>
112
- </ul>
113
- <h3>The latest movies leaked by Filmyzilla</h3>
114
- <p>Filmyzilla is notorious for leaking the latest movies and shows from various film industries. Some of the recent movies that have been leaked by Filmyzilla are:</p>
115
- <ul>
116
- <li>Bell Bottom</li>
117
- <li>Shershaah</li>
118
- <li>Bhuj: The Pride of India</li>
119
- <li>Mimi</li>
120
- <li>Fast and Furious 9</li>
121
- <li>Black Widow</li>
122
- <li>The Suicide Squad</li>
123
- <li>Jungle Cruise</li>
124
- <li>Loki</li>
125
- <li>The Family Man Season 2</li>
126
- <li>Mirzapur Season 2</li>
127
- <li>Scam 1992</li>
128
- <li>Money Heist Season 4</li>
129
- <li>Extraction</li>
130
- <li>Tenet</li>
131
- </ul>
132
- <h2>How to download Table No. 21 full movie from Filmyzilla?</h2>
133
- <p>If you are still interested in downloading Table No. 21 full movie from Filmyzilla, you should know that it is not an easy or safe process. You will have to face many risks and challenges along the way, and you may also face legal consequences for your actions. Here are the steps to download the movie from Filmyzilla:</p>
134
- <h3>The steps to access and download the movie</h3>
135
- <ol>
136
- <li>First, you will need a VPN (Virtual Private Network) service to bypass the geo-restrictions and access the Filmyzilla website. A VPN will also protect your online identity and privacy from hackers and trackers.</li>
137
- <li>Next, you will need to find a working domain name of Filmyzilla, as the website keeps changing its domain name to avoid detection and blocking by the authorities. Some of the common domain names of Filmyzilla are filmyzilla.com, filmyzilla.in, filmyzilla.net, filmyzilla.vip, filmyzilla.pro, filmyzilla.me, filmyzilla.co.in, filmyzilla.live, etc.</li>
138
- <li>Once you find a working domain name, you will need to enter it in your browser and access the Filmyzilla website. You will see a lot of advertisements and pop-ups on the website, which may redirect you to other websites or download unwanted software on your device. You will have to close them or avoid clicking on them.</li>
139
- <li>Then, you will need to search for Table No. 21 full movie on the website using the search bar or the categories. You will see a list of results with different formats and qualities of the movie. You will have to choose the one that suits your preference and device compatibility.</li>
140
- <li>After that, you will need to click on the download link or button of the movie. You may have to go through some verification processes or captcha tests before you can start the download. You may also see some fake download links or buttons that may lead you to other websites or download malware on your device. You will have to be careful and avoid them.</li>
141
- <li>Finally, you will need to wait for the download to complete and then enjoy watching Table No. 21 full movie on your device.</li>
142
- </ol> <h3>The risks and challenges of downloading from Filmyzilla</h3>
143
- <p>Downloading Table No. 21 full movie from Filmyzilla may seem like a convenient and cost-effective option, but it comes with many risks and challenges that may ruin your experience and cause you trouble. Some of the risks and challenges are:</p>
144
- <ul>
145
- <li>You may download a corrupted or incomplete file that may not play properly or damage your device.</li>
146
- <li>You may download a file that contains malware or viruses that may infect your device and compromise your data and security.</li>
147
- <li>You may face slow download speeds, frequent interruptions, or low-quality videos due to the high traffic and low bandwidth of the website.</li>
148
- <li>You may expose your online activity and identity to hackers and trackers who may monitor your browsing history, IP address, location, and personal information.</li>
149
- <li>You may violate the terms and conditions of your internet service provider (ISP) and face penalties such as throttling, suspension, or termination of your service.</li>
150
- </ul>
151
- <h3>The legal consequences of movie piracy in India</h3>
152
- <p>Downloading Table No. 21 full movie from Filmyzilla is not only risky and challenging, but also illegal and punishable by law. Movie piracy is a serious crime in India that violates the Cinematograph Act of 1952, the Information Technology Act of 2000, and the Indian Penal Code of 1860. According to these laws, anyone who downloads, uploads, streams, distributes, or exhibits pirated movies or shows without the authorization of the rightful owners can face the following legal consequences:</p>
153
- <ul>
154
- <li>A fine of up to ₹10 lakh or three times the value of the pirated content, whichever is higher.</li>
155
- <li>A jail term of up to three years.</li>
156
- <li>A civil lawsuit by the original creators or owners for damages and compensation.</li>
157
- <li>A criminal case by the government for violating the national interest and security.</li>
158
- </ul>
159
- <h2>Why you should avoid downloading Table No. 21 from Filmyzilla?</h2>
160
- <p>By now, you should have realized that downloading Table No. 21 full movie from Filmyzilla is not worth it. It is a bad idea that will not only harm you, but also the film industry and the artists who work hard to create quality content for you. Here are some reasons why you should avoid downloading Table No. 21 from Filmyzilla:</p>
161
- <h3>The ethical and moral issues of supporting piracy</h3>
162
- <p>When you download Table No. 21 full movie from Filmyzilla, you are supporting piracy, which is an unethical and immoral act. Piracy is a form of theft that deprives the original creators and owners of their rightful earnings and recognition. It also disrespects their artistic vision and hard work. By downloading pirated movies, you are encouraging more piracy and discouraging more creativity. You are also depriving yourself of the authentic and enjoyable experience of watching movies in theatres or on legal platforms.</p> <h3>The impact of piracy on the film industry and the artists</h3>
163
- <p>When you download Table No. 21 full movie from Filmyzilla, you are also affecting the film industry and the artists who depend on it for their livelihood. Piracy causes huge losses to the producers, distributors, exhibitors, and other stakeholders of the film industry. According to a report by Ernst & Young, the Indian film industry lost ₹189.5 billion in 2018 due to piracy. Piracy also affects the quality and quantity of movies that are made, as it reduces the incentive and resources for filmmakers to invest in new projects. Piracy also deprives the artists of their fair share of revenue and appreciation, which may demotivate them and affect their career prospects.</p>
164
- <h3>The alternatives to watch Table No. 21 legally and safely</h3>
165
- <p>Instead of downloading Table No. 21 full movie from Filmyzilla, you should opt for legal and safe alternatives to watch the movie. There are many platforms that offer Table No. 21 for online streaming or download at a reasonable price. Some of them are:</p>
166
- <ul>
167
- <li>Eros Now: This is the official platform of Eros International, the producer of Table No. 21. You can watch the movie on Eros Now with a subscription plan that starts from ₹49 per month. You can also download the movie for offline viewing on your device.</li>
168
- <li>YouTube: This is the most popular and accessible platform for watching movies and shows online. You can rent or buy Table No. 21 on YouTube for ₹25 or ₹50 respectively. You can also download the movie for offline viewing on your device.</li>
169
- <li>Google Play Movies: This is another platform that allows you to rent or buy movies and shows online. You can rent or buy Table No. 21 on Google Play Movies for ₹25 or ₹50 respectively. You can also download the movie for offline viewing on your device.</li>
170
- <li>Amazon Prime Video: This is one of the leading platforms for streaming movies and shows online. You can watch Table No. 21 on Amazon Prime Video with a subscription plan that starts from ₹129 per month or ₹999 per year. You can also download the movie for offline viewing on your device.</li>
171
- </ul>
172
- <p>By choosing these alternatives, you will not only enjoy watching Table No. 21 in high quality and without any interruptions, but also support the film industry and the artists who deserve your respect and admiration.</p>
173
- <h2>Conclusion</h2>
174
- <p>Table No. 21 is a thrilling and engaging movie that will keep you hooked till the end. It is a movie that deserves to be watched legally and safely, not illegally and riskily. Downloading Table No. 21 full movie from Filmyzilla is a bad idea that will expose you to many dangers and troubles, as well as harm the film industry and the artists who work hard to entertain you. Therefore, you should avoid downloading Table No. 21 from Filmyzilla and opt for legal and safe alternatives to watch the movie.</p>
175
- <h2>FAQs</h2>
176
- <p>Here are some frequently asked questions about Table No. 21 and Filmyzilla:</p>
177
- <ol>
178
- <li>Is Table No. 21 based on a true story?</li>
179
- <p>No, Table No. 21 is not based on a true story, but it is inspired by Article 21 of the Indian Constitution, which talks about the protection of life and personal liberty.</p>
180
- <li>What is the meaning of Table No. 21?</li>
181
- <p>Table No. 21 is the name of the game show that Mr. Khan hosts in the movie. It is also a reference to Article 21 of the Indian Constitution, which is violated by Mr. Khan in his quest for revenge.</p>
182
- <li>What is ragging and why is it an issue in India?</li>
183
- <p>Ragging is a form of bullying that involves physical, mental, or sexual abuse of new or junior students by senior students in educational institutions. It is an issue in India because it causes many cases of harassment, humiliation, injury, suicide, and murder among students every year.</p>
184
- <li>How does Filmyzilla get access to new movies?</li>
185
- <p>Filmyzilla gets access to new movies by using various sources such as camcorders, screen recorders, hacked servers, leaked copies, etc. It then uploads them on its website or shares them with other websites.</p>
186
- <li>How can I report or block Filmyzilla?</li>
187
- <p>You can report or block Filmyzilla by contacting your ISP, cybercrime cell, or anti-piracy cell and providing them with the details of the website. You can also use software or extensions that block access to pirated websites.</p>
188
- </ol> I have completed writing the article as per your instructions. I hope you are satisfied with the quality and content of the article. Here is the final version of the article: <h1>Table No. 21 Full Movie Download Filmyzilla 720p: A Thrilling and Illegal Adventure</h1>
189
- <p>If you are looking for a movie that will keep you on the edge of your seat, you might be tempted to download Table No. 21 full movie from Filmyzilla, a website that offers free downloads of pirated movies and shows. But before you do that, you should know what you are getting into and why it is not a good idea.</p>
190
- <h2>What is Table No. 21?</h2>
191
- <p>Table No. 21 is a 2013 Hindi thriller movie starring Paresh Rawal, Rajeev Khandelwal and Tina Desai. It is named after Article 21 of the Indian Constitution, which talks about the protection of life and personal liberty. The movie touches upon the pertinent social issue of ragging, or bullying in college campuses.</p>
192
- <h3>A brief summary of the plot</h3>
193
- <p>The movie follows Vivaan and Siya, a married couple who struggle to make ends meet. They win a trip to Fiji in a lucky draw, where they meet Mr. Khan, a mysterious and charming man who invites them to participate in a live game show called Table No. 21. He tells them that the winner of the game will get a whopping amount of ₹210 million as prize money. The rules are simple: they have to answer eight personal questions truthfully and complete a task related to each question. However, as the game progresses, the questions and tasks become increasingly horrific and reveal dark secrets from their past. They soon realize that they are trapped in a deadly game of survival with no escape.</p>
194
- <h3>The cast and crew of the movie</h3>
195
- <p>The movie is directed by Aditya Datt and produced by Eros International. The screenplay is written by Shantanu Ray Chhibber and Sheershak Anand, based on their own story. The music is composed by Gajendra Verma, Neeraj Shridhar and Sachin Gupta.</p>
196
- <p>The main cast of the movie are:</p>
197
- <ul>
198
- <li>Paresh Rawal as Abdul Razaq Khan, the host of the game show</li>
199
- <li>Rajeev Khandelwal as Vivaan Agasthi, one of the contestants</li>
200
- <li>Tina Desai as Siya Agasthi, Vivaan's wife and another contestant</li>
201
- <li>Dhruv Ganesh as Akram Khan, Mr. Khan's son who was ragged by Vivaan and his friends in college</li>
202
- <li>Asheesh Kapur as Bittoo, one of Vivaan's friends</li>
203
- <li>Sana Amin Sheikh as Neeti, one of Siya's friends</li>
204
- <li>Hanif Hilal as Ghouse, Mr. Khan's bodyguard</li>
205
- </ul>
206
- <h3>The critical reception and box office performance</h3>
207
- <p>The movie received mixed to positive reviews from critics and audiences alike. It was praised for its gripping plot, suspenseful twists, powerful performances, and social message. However, it was also criticized for its violence, implausible scenarios, and lack of originality.</p>
208
- <p>The movie performed above average at the box office, earning ₹177.95 million against a budget of ₹85 million.</p>
209
- <h2>What is Filmyzilla?</h2>
210
- <p>Filmyzilla is a notorious website that provides free downloads of pirated movies and shows from Bollywood, Hollywood, Tollywood, and other regional film industries. It is one of the most popular and visited websites for movie piracy in India and across the world.</p>
211
- <h3>A notorious website for pirating movies and shows</h3>
212
- <p>Filmyzilla has been operating for several years and has a huge collection of movies and shows in various languages, genres, and formats. It uploads the latest releases within hours or days of their theatrical or digital premiere, often in high quality. It also offers old and classic movies, as well as dubbed and subbed versions of foreign movies.</p>
213
- <p>Filmyzilla is an illegal website that violates the Indian and international laws on copyright and intellectual property rights. It hosts and distributes the pirated content without the permission or consent of the original creators or owners. It also generates revenue from advertisements and pop-ups that may contain malware or viruses.</p>
214
- <h3>The categories and formats of movies available on Filmyzilla</h3>
215
- <p>Filmyzilla has a user-friendly interface that allows users to browse and download movies and shows according to their preferences. It has various categories such as:</p>
216
- <ul>
217
- <li>Bollywood Movies</li>
218
- <li>Hollywood Movies</li>
219
- <li>Hollywood Hindi Dubbed Movies</li>
220
- <li>South Indian Hindi Dubbed Movies</li>
221
- <li>Punjabi Movies</li>
222
- <li>Bengali Movies</li>
223
- <li>Tamil Movies</li>
224
- <li>Telugu Movies</li>
225
- <li>Malayalam Movies</li>
226
- <li>Marathi Movies</li>
227
- <li>Gujarati Movies</li>
228
- <li>Kannada Movies</li>
229
- <li>Urdu Movies</li>
230
- <li>Pakistani Movies</li>
231
- <li>Nepali Movies</li>
232
- <li>Bhojpuri Movies</li>
233
- <li>Web Series</li>
234
- <li>TV Shows</li>
235
- <li>Awards Shows</li>
236
- <li>Documentaries</li>
237
- <li>Anime</li>
238
- <li>Cartoons</li>
239
- </ul>
240
- <p>Filmyzilla also offers different formats and qualities of movies and shows such as:</p>
241
- <ul>
242
- <li>MP4</li>
243
- <li>MKV</li>
244
- <li>AVI</li>
245
- <li>WEBM</li>
246
- <li>3GP</li>
247
- <li>360p</li>
248
- <li>480p</li>
249
- <li>720p</li>
250
- <li>1080p</li>
251
- <li>HDRip</li>
252
- <li>DVDRip</li>
253
- <li>BluRay</li>
254
- <li>DVDScr</li>
255
- <li>CamRip</li>
256
- <li>PreDVDRip</li>
257
- </ul>
258
- <h3>The latest movies leaked by Filmyzilla</h3>
259
- <p>Filmyzilla is notorious for leaking the latest movies and shows from various film industries. Some of the recent movies that have been leaked by Filmyzilla are:</p>
260
- <ul>
261
- <li>Bell Bottom</li>
262
- <li>Shershaah</li>
263
- <li>Bhuj: The Pride of India</li>
264
- <li>Mimi</li>
265
- <li>Fast and Furious 9</li>
266
- <li>Black Widow</li>
267
- <li>The Suicide Squad</li>
268
- <li>Jungle Cruise</li>
269
- <li>Loki</li>
270
- <li>The Family Man Season 2</li>
271
- <li>Mirzapur Season 2</li>
272
- <li>Scam 1992</li>
273
- <li>Money Heist Season 4</li>
274
- <li>Extraction</li>
275
- <li>Tenet</li>
276
- </ul>
277
- <h2>How to download Table No. 21 full movie from Filmyzilla?</h2>
278
- <p>If you are still interested in downloading Table No. 21 full movie from Filmyzilla, you should know that it is not an easy or safe process. You will have to face many risks and challenges along the way, and you may also face legal consequences for your actions. Here are the steps to download the movie from Filmyzilla:</p>
279
- <h3>The steps to access and download the movie <h3>The steps to access and download the movie</h3>
280
- <ol>
281
- <li>First, you will need a VPN (Virtual Private Network) service to bypass the geo-restrictions and access the Filmyzilla website. A VPN will also protect your online identity and privacy from hackers and trackers.</li>
282
- <li>Next, you will need to find a working domain name of Filmyzilla, as the website keeps changing its domain name to avoid detection and blocking by the authorities. Some of the common domain names of Filmyzilla are filmyzilla.com, filmyzilla.in, filmyzilla.net, filmyzilla.vip, filmyzilla.pro, filmyzilla.me, filmyzilla.co.in, filmyzilla.live, etc.</li>
283
- <li>Once you find a working domain name, you will need to enter it in your browser and access the Filmyzilla website. You will see a lot of advertisements and pop-ups on the website, which may redirect you to other websites or download unwanted software on your device. You will have to close them or avoid clicking on them.</li>
284
- <li>Then, you will need to search for Table No. 21 full movie on the website using the search bar or the categories. You will see a list of results with different formats and qualities of the movie. You will have to choose the one that suits your preference and device compatibility.</li>
285
- <li>After that, you will need to click on the download link or button of the movie. You may have to go through some verification processes or captcha tests before you can start the download. You may also see some fake download links or buttons that may lead you to other websites or download malware on your device. You will have to be careful and avoid them.</li>
286
- <li>Finally, you will need to wait for the download to complete and then enjoy watching Table No. 21 full movie on your device.</li>
287
- </ol>
288
- <h3>The risks and challenges of downloading from Filmyzilla</h3>
289
- <p>Downloading Table No. 21 full movie from Filmyzilla may seem like a convenient and cost-effective option, but it comes with many risks and challenges that may ruin your experience and cause you trouble. Some of the risks and challenges are:</p>
290
- <ul>
291
- <li>You may download a corrupted or incomplete file that may not play properly or damage your device.</li>
292
- <li>You may download a file that contains malware or viruses that may infect your device and compromise your data and security.</li>
293
- <li>You may face slow download speeds, frequent interruptions, or low-quality videos due to the high traffic and low bandwidth of the website.</li>
294
- <li>You may expose your online activity and identity to hackers and trackers who may monitor your browsing history, IP address, location, and personal information.</li>
295
- <li>You may violate the terms and conditions of your internet service provider (ISP) and face penalties such as throttling, suspension, or termination of your service.</li>
296
- </ul>
297
- <h3>The legal consequences of movie piracy in India</h3>
298
- <p>Downloading Table No. 21 full movie from Filmyzilla is not only risky and challenging, but also illegal and punishable by law. Movie piracy is a serious crime in India that violates the Cinematograph Act of 1952, the Information Technology Act of 2000, and the Indian Penal Code of 1860. According to these laws, anyone who downloads, uploads, streams, distributes, or exhibits pirated movies or shows without the authorization of the rightful owners can face the following legal consequences:</p>
299
- <ul>
300
- <li>A fine of up to ₹10 lakh or three times the value of the pirated content, whichever is higher.</li>
301
- <li>A jail term of up to three years.</li>
302
- <li>A civil lawsuit by the original creators or owners for damages and compensation.</li>
303
- <li>A criminal case by the government for violating the national interest and security.</li>
304
- </ul>
305
- <h2>Why you should avoid downloading Table No. 21 from Filmyzilla?</h2>
306
- <p>By now, you should have realized that downloading Table No. 21 full movie from Filmyzilla is not worth it. It is a bad idea that will not only harm you, but also the film industry and the artists who work hard to create quality content for you. Here are some reasons why you should avoid downloading Table No. 21 from Filmyzilla:</p>
307
- <h3>The ethical and moral issues of supporting piracy</h3>
308
- <p>When you download Table No. 21 full movie from Filmyzilla, you are supporting piracy, which is an unethical and immoral act. Piracy is a form of theft that deprives the original creators and owners of their rightful earnings and recognition. It also disrespects their artistic vision and hard work. By downloading pirated movies, you are encouraging more piracy and discouraging more creativity. You are also depriving yourself of the authentic and enjoyable experience of watching movies in theatres or on legal platforms.</p>
309
- <h3>The impact of piracy on <h3>The impact of piracy on the film industry and the artists</h3>
310
- <p>When you download Table No. 21 full movie from Filmyzilla, you are also affecting the film industry and the artists who depend on it for their livelihood. Piracy causes huge losses to the producers, distributors, exhibitors, and other stakeholders of the film industry. According to a report by Ernst & Young, the Indian film industry lost ₹189.5 billion in 2018 due to piracy. Piracy also affects the quality and quantity of movies that are made, as it reduces the incentive and resources for filmmakers to invest in new projects. Piracy also deprives the artists of their fair share of revenue and appreciation, which may demotivate them and affect their career prospects.</p>
311
- <h3>The alternatives to watch Table No. 21 legally and safely</h3>
312
- <p>Instead of downloading Table No. 21 full movie from Filmyzilla, you should opt for legal and safe alternatives to watch the movie. There are many platforms that offer Table No. 21 for online streaming or download at a reasonable price. Some of them are:</p>
313
- <ul>
314
- <li>Eros Now: This is the official platform of Eros International, the producer of Table No. 21. You can watch the movie on Eros Now with a subscription plan that starts from ₹49 per month. You can also download the movie for offline viewing on your device.</li>
315
- <li>YouTube: This is the most popular and accessible platform for watching movies and shows online. You can rent or buy Table No. 21 on YouTube for ₹25 or ₹50 respectively. You can also download the movie for offline viewing on your device.</li>
316
- <li>Google Play Movies: This is another platform that allows you to rent or buy movies and shows online. You can rent or buy Table No. 21 on Google Play Movies for ₹25 or ₹50 respectively. You can also download the movie for offline viewing on your device.</li>
317
- <li>Amazon Prime Video: This is one of the leading platforms for streaming movies and shows online. You can watch Table No. 21 on Amazon Prime Video with a subscription plan that starts from ₹129 per month or ₹999 per year. You can also download the movie for offline viewing on your device.</li>
318
- </ul>
319
- <p>By choosing these alternatives, you will not only enjoy watching Table No. 21 in high quality and without any interruptions, but also support the film industry and the artists who deserve your respect and admiration.</p>
320
- <h2>Conclusion</h2>
321
- <p>Table No. 21 is a thrilling and engaging movie that will keep you hooked till the end. It is a movie that deserves to be watched legally and safely, not illegally and riskily. Downloading Table No. 21 full movie from Filmyzilla is a bad idea that will expose you to many dangers and troubles, as well as harm the film industry and the artists who work hard to entertain you. Therefore, you should avoid downloading Table No. 21 from Filmyzilla and opt for legal and safe alternatives to watch the movie.</p>
322
- <h2>FAQs</h2>
323
- <p>Here are some frequently asked questions about Table No. 21 and Filmyzilla:</p>
324
- <ol>
325
- <li>Is Table No. 21 based on a true story?</li>
326
- <p>No, Table No. 21 is not based on a true story, but it is inspired by Article 21 of the Indian Constitution, which talks about the protection of life and personal liberty.</p>
327
- <li>What is the meaning of Table No. 21?</li>
328
- <p>Table No. 21 is the name of the game show that Mr. Khan hosts in the movie. It is also a reference to Article 21 of the Indian Constitution, which is violated by Mr. Khan in his quest for revenge.</p>
329
- <li>What is ragging and why is it an issue in India?</li>
330
- <p>Ragging is a form of bullying that involves physical, mental, or sexual abuse of new or junior students by senior students in educational institutions. It is an issue in India because it causes many cases of harassment, humiliation, injury, suicide, and murder among students every year.</p>
331
- <li>How does Filmyzilla get access to new movies?</li>
332
- <p>Filmyzilla gets access to new movies by using various sources such as camcorders, screen recorders, hacked servers, leaked copies, etc. It then uploads them on its website or shares them with other websites.</p>
333
- <li>How can I report or block Filmyzilla?</li>
334
- <p>You can report or block Filmyzilla by contacting your ISP, cybercrime cell, or anti-piracy cell and providing them with the details of the website. You can also use software or extensions that block access to pirated websites.</p>
335
- </ol></p> 197e85843d<br />
336
- <br />
337
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Treasure Mathstorm and Join the Super Solvers in an Amazing Adventure.md DELETED
@@ -1,152 +0,0 @@
1
- <br />
2
- <h1>How to Download Treasure Mathstorm: A Fun and Educational Game for Kids</h1>
3
- <p>Do you want to help your kids learn math in a fun and engaging way? Do you want to introduce them to a classic educational game that has entertained and challenged millions of children around the world? If you answered yes, then you should download Treasure Mathstorm, a game that combines math, adventure, and humor in a delightful way.</p>
4
- <h2>download treasure mathstorm</h2><br /><p><b><b>DOWNLOAD</b> &#187;&#187;&#187; <a href="https://jinyurl.com/2uNUeR">https://jinyurl.com/2uNUeR</a></b></p><br /><br />
5
- <p>Treasure Mathstorm is an educational game designed for kids ages 6 to 8. It was developed by The Learning Company in 1992 and it is part of the Super Solvers series. In this game, you have to help the elves restore Treasure Mountain by solving math problems and finding treasures. Along the way, you will encounter various obstacles, puzzles, and surprises that will make your journey more exciting.</p>
6
- <p>In this article, we will tell you everything you need to know about Treasure Mathstorm, including what it is, how to download it, and how to play it. We will also share some tips and tricks to help you get the most out of this game. So, let's get started!</p>
7
- <h2>What is Treasure Mathstorm?</h2>
8
- <p>Treasure Mathstorm is an educational game that teaches kids various math skills and concepts in a fun and interactive way. It is suitable for kids who are in grades 1 to 3 or who have a basic knowledge of arithmetic. The game covers topics such as addition, subtraction, multiplication, division, fractions, decimals, time, money, measurement, geometry, logic, and problem-solving.</p>
9
- <h3>The story and the goal of the game</h3>
10
- <p>The story of Treasure Mathstorm is that the Master of Mischief, a villain who likes to cause trouble, has invented a machine that changes the weather and freezes Treasure Mountain. He has also hidden all the treasures on the mountain and locked them with math problems. Your goal is to restore the mountain by locating different treasures on the mountain and returning them to the castle at the top. When all the treasures have been restored, the king will have his power back and all of the ice will melt.</p>
11
- <p>download treasure mathstorm free<br />
12
- download treasure mathstorm for windows 10<br />
13
- download treasure mathstorm online<br />
14
- download treasure mathstorm mac<br />
15
- download treasure mathstorm dos<br />
16
- download treasure mathstorm game<br />
17
- download treasure mathstorm full version<br />
18
- download treasure mathstorm iso<br />
19
- download treasure mathstorm emulator<br />
20
- download treasure mathstorm 1992<br />
21
- download treasure mathstorm windows 3.1<br />
22
- download treasure mathstorm cd rom<br />
23
- download treasure mathstorm internet archive<br />
24
- download treasure mathstorm classic reload<br />
25
- download treasure mathstorm super solvers<br />
26
- download treasure mathstorm learning company<br />
27
- download treasure mathstorm educational game<br />
28
- download treasure mathstorm master of mischief<br />
29
- download treasure mathstorm mountain<br />
30
- download treasure mathstorm castle<br />
31
- download treasure mathstorm elves<br />
32
- download treasure mathstorm weather machine<br />
33
- download treasure mathstorm addition subtraction multiplication<br />
34
- download treasure mathstorm telling time counting money<br />
35
- download treasure mathstorm skill level<br />
36
- how to download treasure mathstorm<br />
37
- where to download treasure mathstorm<br />
38
- why download treasure mathstorm<br />
39
- what is treasure mathstorm<br />
40
- who made treasure mathstorm<br />
41
- when was treasure mathstorm released<br />
42
- is treasure mathstorm compatible with windows 10<br />
43
- is treasure mathstorm still available<br />
44
- is treasure mathstorm fun<br />
45
- is treasure mathstorm educational<br />
46
- can i download treasure mathstorm for free<br />
47
- can i play treasure mathstorm online<br />
48
- can i run treasure mathstorm on mac<br />
49
- can i use dosbox to play treasure mathstorm<br />
50
- can i get the full version of treasure mathstorm<br />
51
- best site to download treasure mathstorm<br />
52
- best way to play treasure mathstorm on windows 10<br />
53
- best emulator for treasure mathstorm<br />
54
- best settings for treasure mathstorm dosbox<br />
55
- best tips and tricks for playing treasure mathstorm</p>
56
- <h3>The math skills and concepts covered in the game</h3>
57
- <p>The math skills and concepts covered in Treasure Mathstorm are divided into three levels of difficulty: easy, medium, and hard. You can choose which level you want to play at any time during the game. The math skills and concepts covered in each level are as follows:</p>
58
- <ul>
59
- <li>Easy: addition and subtraction up to 18, telling time by hours and half-hours, counting money up to $1.00, identifying shapes and colors.</li>
60
- <li>Medium: addition and subtraction up to 99, telling time by quarter-hours, counting money up to $5.00, identifying fractions (halves, thirds, fourths), measuring length with inches.</li>
61
- <li>Hard: addition and subtraction up to 999, telling time by minutes, counting money up to $10.00, identifying fractions (sixths, eighths), measuring length with feet.</li>
62
- </ul>
63
- <h3>The features and benefits of the game</h3>
64
- <p>Treasure Mathstorm has many features and benefits that make it a great educational game for kids. Some of them are:</p>
65
- <ul>
66
- <li>It adapts to your child's skill level and progress. The game automatically adjusts the difficulty of the math problems based on your child's performance. It also keeps track of your child's scores and achievements.</li>
67
- <li>It provides feedback and encouragement. The game gives your child immediate feedback on whether they answered a math problem correctly or incorrectly. It also provides hints and explanations when needed. It also praises your child for their efforts and achievements.</li>
68
- <li>It offers variety and fun. The game has different types of math problems and activities that keep your child engaged and motivated. It also has colorful graphics, animations, sound effects, and music that make the game more enjoyable.</li>
69
- <li>It fosters creativity and exploration. The game allows your child to explore the mountain and discover different treasures and surprises. It also lets your child customize their character and their backpack with different items and accessories.</li>
70
- </ul>
71
- <h2>How to download Treasure Mathstorm?</h2>
72
- <p>If you want to download Treasure Mathstorm, you need to make sure that your computer meets the system requirements and compatibility of the game. You also need to find a reliable source and link to download the game. Finally, you need to follow the steps and tips to install and run the game on your computer.</p>
73
- <h3>The system requirements and compatibility of the game</h3>
74
- <p>Treasure Mathstorm is an old game that was originally designed for DOS and Windows 3.x operating systems. Therefore, it may not run smoothly on modern computers with newer operating systems such as Windows 10, Mac OS, or Linux. However, there are ways to make the game compatible with your computer by using emulators or virtual machines.</p>
75
- <p>An emulator is a software that mimics the functions of an old operating system or device on your computer. A virtual machine is a software that creates a separate environment on your computer that runs an old operating system or device. Both methods allow you to run old games and programs on your computer without affecting your main system.</p>
76
- <p>Some of the popular emulators and virtual machines that you can use to run Treasure Mathstorm are:</p>
77
- <ul>
78
- <li>DOSBox: an emulator that runs DOS games and programs on Windows, Mac OS, Linux, and other platforms.</li>
79
- <li>ScummVM: an emulator that runs games that use the SCUMM engine, such as Treasure Mathstorm.</li>
80
- <li>VirtualBox: a virtual machine that runs various operating systems such as Windows 3.x, Windows 95, Windows 98, etc.</li>
81
- <li>VMware: another virtual machine that runs various operating systems such as Windows 3.x, Windows 95, Windows 98, etc.</li>
82
- </ul>
83
- <p>You can download these emulators and virtual machines from their official websites or from other trusted sources. You can also find tutorials and guides on how to use them online.</p>
84
- <h3>The sources and links to download the game</h3>
85
- <p>Once you have chosen an emulator or a virtual machine to run Treasure Mathstorm, you need to find a source and a link to download the game. There are many websites that offer old games for free or for a small fee. However, not all of them are safe and legal. Some of them may contain viruses, malware, or spyware that can harm your computer or steal your personal information. Some of them may also violate the copyright laws or the terms of service of the original developers or publishers of the game.</p>
86
- <p>Therefore, you need to be careful and selective when choosing a source and a link to download Treasure Mathstorm. You need to check the reputation and the reviews of the website before downloading anything from it. You also need to scan the downloaded files with an antivirus program before opening them. You also need to respect the rights and the wishes of the original developers or publishers of the game.</p>
87
- <p>Some of the reputable and legal sources and links to download Treasure Mathstorm are:</p>
88
- <ul>
89
- <li>The Learning Company: the original developer and publisher of Treasure Mathstorm. They offer a digital download of the game for $9.99 on their website.</li>
90
- <li>GOG.com: a digital distribution platform that sells old games that are DRM-free (no copy protection) and compatible with modern systems. They offer Treasure Mathstorm for $5.99 on their website.</li>
91
- <li>Abandonia: a website that hosts old games that are abandoned by their developers or publishers. They offer Treasure Mathstorm for free on their website.</li>
92
- </ul>
93
- <h3>The steps and tips to install and run the game</h3>
94
- <p>After you have downloaded Treasure Mathstorm from a source and a link of your choice, you need to follow these steps and tips to install and run the game on your computer:</p>
95
- <ol>
96
- <li>Extract the downloaded files from the ZIP or RAR archive using a program such as WinZip or WinRAR. </li>
97
- <li>Create a folder on your computer where you want to store the game files.</li>
98
- <li>Copy or move the extracted files to the folder you created in step 2.</li>
99
- <li>Open the emulator or the virtual machine of your choice and configure it according to the instructions and the system requirements of the game.</li>
100
- <li>Mount or load the game folder or the game file (usually a .exe or a .bat file) on the emulator or the virtual machine and start the game.</li>
101
- <li>Enjoy playing Treasure Mathstorm!</li>
102
- </ol>
103
- <p>Some tips and tricks to help you install and run the game are:</p>
104
- <ul>
105
- <li>If you encounter any errors or problems while installing or running the game, you can try to change the settings of the emulator or the virtual machine, such as the memory, the sound, the graphics, etc.</li>
106
- <li>If you want to save your progress and your scores in the game, you need to create a save file on the emulator or the virtual machine. You can also backup your save file on your computer or on a cloud service.</li>
107
- <li>If you want to play Treasure Mathstorm with other players online, you can use a program such as DOSBox Daum or DOSBox-X that supports multiplayer mode. You can also use a program such as Hamachi or Tunngle that creates a virtual network for online gaming.</li>
108
- </ul>
109
- <h2>How to play Treasure Mathstorm?</h2>
110
- <p>Now that you have installed and run Treasure Mathstorm on your computer, you are ready to play it. In this section, we will explain how to play Treasure Mathstorm, including the main screen and the menu options of the game, the levels and the challenges of the game, and the rewards and the achievements of the game.</p>
111
- <h3>The main screen and the menu options of the game</h3>
112
- <p>The main screen of Treasure Mathstorm is where you can see your character, your backpack, your score, your level, and your time. You can also see the mountain and the castle in the background. You can use your mouse or your keyboard to move your character around and interact with different objects and characters on the screen.</p>
113
- <p>The menu options of Treasure Mathstorm are located at the top of the screen. You can access them by clicking on them with your mouse or by pressing a key on your keyboard. The menu options are:</p>
114
- <ul>
115
- <li>File: where you can start a new game, load a saved game, save your current game, quit the game, or change your player name.</li>
116
- <li>Options: where you can change the difficulty level of the math problems, turn on or off the music and sound effects, adjust the volume, or view the credits.</li>
117
- <li>Help: where you can get help on how to play Treasure Mathstorm, how to use DOSBox or ScummVM, or how to contact The Learning Company.</li>
118
- </ul>
119
- <h3>The levels and the challenges of the game</h3>
120
- <p>Treasure Mathstorm has three levels of difficulty: easy, medium, and hard. You can choose which level you want to play at any time during the game. The level you choose affects the type and the number of math problems you have to solve in the game. Treasure Mathstorm has 10 levels of challenges that you have to complete in order to restore the mountain. Each level has a different theme and a different number of treasures to find. The themes and the number of treasures are: - Level 1: Snowy Slopes (10 treasures) - Level 2: Icy Caves (15 treasures) - Level 3: Frozen Forest (20 treasures) - Level 4: Snowman Village (25 treasures) - Level 5: Ice Castle (30 treasures) - Level 6: Crystal Caverns (35 treasures) - Level 7: Blizzard Bluffs (40 treasures) - Level 8: Polar Peak (45 treasures) - Level 9: Cloud City (50 treasures) - Level 10: Treasure Mountain (55 treasures) To complete a level, you have to find all the treasures on that level and return them to the castle at the top of the mountain. To find a treasure, you have to solve a math problem that is attached to it. To return a treasure, you have to carry it to the castle and drop it in the correct bin. The math problems in Treasure Mathstorm are varied and fun. They include: - Addition and subtraction problems that involve snowballs, snowflakes, icicles, etc. - Multiplication and division problems that involve snowmen, penguins, polar bears, etc. - Fraction problems that involve pies, pizzas, cakes, etc. - Decimal problems that involve thermometers, clocks, scales, etc. - Time problems that involve clocks, watches, calendars, etc. - Money problems that involve coins, bills, wallets, etc. - Measurement problems that involve rulers, tapes, scales, etc. - Geometry problems that involve shapes, angles, lines, etc. - Logic problems that involve patterns, sequences, puzzles, etc. - Problem-solving problems that involve word problems, equations, graphs, etc. The math problems in Treasure Mathstorm are not only educational but also entertaining. They have humorous scenarios and characters that make the game more enjoyable. For example: - You have to help a snowman find his missing nose by solving a fraction problem. - You have to help a penguin buy a hat by solving a money problem. - You have to help a polar bear catch a fish by solving a geometry problem. - You have to help a cloud fairy make a rainbow by solving a logic problem. <h3>The rewards and the achievements of the game</h3>
121
- <p>Treasure Mathstorm has many rewards and achievements that motivate you to play the game and improve your math skills. Some of them are:</p>
122
- <ul>
123
- <li>You can earn stars for each math problem you solve correctly. The more stars you earn, the higher your score will be.</li>
124
- <li>You can earn medals for each level you complete. The medals are bronze, silver, gold, and platinum. The higher the medal, the better your performance on that level.</li>
125
- <li>You can earn trophies for each level of difficulty you complete. The trophies are easy, medium, and hard. The higher the trophy, the more challenging the math problems you solved.</li>
126
- <li>You can earn badges for special achievements in the game. The badges are explorer, adventurer, mastermind, super solver, etc. The more badges you earn, the more skills and concepts you mastered.</li>
127
- <li>You can customize your character and your backpack with different items and accessories that you find or buy in the game. You can also change your character's name and appearance.</li>
128
- </ul>
129
- <h2>Conclusion</h2>
130
- <p>Treasure Mathstorm is an educational game that teaches kids math skills and concepts in a fun and interactive way. It is suitable for kids who are in grades 1 to 3 or who have a basic knowledge of arithmetic. The game covers topics such as addition, subtraction, multiplication, division, fractions, decimals, time, money, measurement, geometry, logic, and problem-solving. Treasure Mathstorm is also a fun and engaging game that combines math, adventure, and humor in a delightful way. It has colorful graphics, animations, sound effects, and music that make the game more enjoyable. It also has various obstacles, puzzles, and surprises that make the game more exciting. It also has different types of math problems and activities that keep the game varied and interesting. Treasure Mathstorm is an old game that was originally designed for DOS and Windows 3.x operating systems. Therefore, it may not run smoothly on modern computers with newer operating systems such as Windows 10, Mac OS, or Linux. However, there are ways to make the game compatible with your computer by using emulators or virtual machines. Treasure Mathstorm is a game that you can download from various sources and links online. However, you need to be careful and selective when choosing a source and a link to download the game. You need to check the reputation and the reviews of the website before downloading anything from it. You also need to scan the downloaded files with an antivirus program before opening them. You also need to respect the rights and the wishes of the original developers or publishers of the game. Treasure Mathstorm is a game that you can play by following some steps and tips to install and run the game on your computer. You also need to follow some tips and tricks to help you play the game better and faster. You also need to enjoy playing the game and learning math at the same time. We hope that this article has helped you learn how to download Treasure Mathstorm: a fun and educational game for kids. We also hope that you have fun playing Treasure Mathstorm and improving your math skills. If you have any questions or comments about Treasure Mathstorm or this article, please feel free to contact us or leave a comment below. Thank you for reading! <h3>FAQs</h3>
131
- <p>Here are some frequently asked questions about Treasure Mathstorm:</p>
132
- <ol>
133
- <li>Q: How long does it take to complete Treasure Mathstorm?</li>
134
- <li>A: It depends on your skill level and your speed. However, it usually takes about 10 to 15 hours to complete all 10 levels of Treasure Mathstorm.</li>
135
- <li>Q: How can I get more stars, medals, trophies, and badges in Treasure Mathstorm?</li>
136
- <li>A: You can get more stars by solving more math problems correctly. You can get more medals by completing more levels with higher scores. You can get more trophies by completing more levels of difficulty. You can get more badges by achieving special goals in the game.</li>
137
- <li>Q: How can I save my progress and my scores in Treasure Mathstorm?</li>
138
- <li>A: You can save your progress and your scores in Treasure Mathstorm by creating a save file on the emulator or the virtual machine that you are using. You can also backup your save file on your computer or on a cloud service.</li>
139
- <li>Q: How can I play Treasure Mathstorm with other players online?</li>
140
- <li>A: You can play Treasure Mathstorm with other players online by using a program such as DOSBox Daum or DOSBox-X that supports multiplayer mode. You can also use a program such as Hamachi or Tunngle that creates a virtual network for online gaming.</li>
141
- <li>Q: Where can I find more information and resources about Treasure Mathstorm?</li>
142
- <li>A: You can find more information and resources about Treasure Mathstorm on these websites:</li>
143
- <ul>
144
- <li>The Learning Company: the original developer and publisher of Treasure Mathstorm. They offer a digital download of the game for $9.99 on their website.</li>
145
- <li>GOG.com: a digital distribution platform that sells old games that are DRM-free (no copy protection) and compatible with modern systems. They offer Treasure Mathstorm for $5.99 on their website.</li>
146
- <li>Abandonia: a website that hosts old games that are abandoned by their developers or publishers. They offer Treasure Mathstorm for free on their website.</li>
147
- <li>MobyGames: a website that provides information and reviews about old games. They have a page dedicated to Treasure Mathstorm on their website.</li>
148
- <li>Wikipedia: a free online encyclopedia that provides information about various topics. They have an article about Treasure Mathstorm on their website.</li>
149
- </ul>
150
- </ol></p> 197e85843d<br />
151
- <br />
152
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/30SecondsToMoon/30SecondsToMoon/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: 30SecondsToMoon
3
- emoji: 📉
4
- colorFrom: green
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 3.50.2
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/StyleGANEX/models/mtcnn/mtcnn_pytorch/src/visualization_utils.py DELETED
@@ -1,31 +0,0 @@
1
- from PIL import ImageDraw
2
-
3
-
4
- def show_bboxes(img, bounding_boxes, facial_landmarks=[]):
5
- """Draw bounding boxes and facial landmarks.
6
-
7
- Arguments:
8
- img: an instance of PIL.Image.
9
- bounding_boxes: a float numpy array of shape [n, 5].
10
- facial_landmarks: a float numpy array of shape [n, 10].
11
-
12
- Returns:
13
- an instance of PIL.Image.
14
- """
15
-
16
- img_copy = img.copy()
17
- draw = ImageDraw.Draw(img_copy)
18
-
19
- for b in bounding_boxes:
20
- draw.rectangle([
21
- (b[0], b[1]), (b[2], b[3])
22
- ], outline='white')
23
-
24
- for p in facial_landmarks:
25
- for i in range(5):
26
- draw.ellipse([
27
- (p[i] - 1.0, p[i + 5] - 1.0),
28
- (p[i] + 1.0, p[i + 5] + 1.0)
29
- ], outline='blue')
30
-
31
- return img_copy
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIWaves/Software_Company/app.py DELETED
@@ -1,254 +0,0 @@
1
- import sys
2
-
3
- import os
4
- from gradio_base import WebUI, UIHelper, PORT, HOST, Client
5
- from gradio_config import GradioConfig as gc
6
- from typing import List, Tuple, Any
7
- import gradio as gr
8
- import time
9
-
10
- class CodeUI(WebUI):
11
-
12
- def render_and_register_ui(self):
13
- self.agent_name:list = [self.cache["agents_name"]] if isinstance(self.cache["agents_name"], str) else self.cache['agents_name']
14
- gc.add_agent(self.agent_name)
15
-
16
- def __init__(
17
- self,
18
- client_cmd: list,
19
- socket_host: str = HOST,
20
- socket_port: int = PORT,
21
- bufsize: int = 1024,
22
- ui_name: str = "CodeUI"
23
- ):
24
- super(CodeUI, self).__init__(client_cmd, socket_host, socket_port, bufsize, ui_name)
25
- self.first_recieve_from_client()
26
- self.data_history = list()
27
- self.caller = 0
28
-
29
- def construct_ui(self):
30
- with gr.Blocks(css=gc.CSS) as demo:
31
- gr.Markdown("""# Agents""")
32
- gr.Markdown("""**Agents** is an open-source library/framework for building autonomous language agents.if you want to know more about **Agents**, please check our<a href="https://arxiv.org/pdf/2309.07870.pdf">📄 Paper</a> and<a href="http://www.aiwaves-agents.com/">📦 Github</a>. Here is a demo of **Agents**.""")
33
- gr.Markdown("""<font size=5>If an error occurs or the queue is too long, please create your own demo by clicking <font color=red>Duplicate This Space</font> in the upper right corner. Please be patient with building, thank you! It takes about 3-4 minutes.</font>""")
34
- with gr.Row():
35
- with gr.Column():
36
- self.text_api = gr.Textbox(
37
- value = self.cache["api_key"],
38
- placeholder="openai key",
39
- label="Please input valid openai key for gpt-3.5-turbo-16k."
40
- )
41
- self.radio_mode = gr.Radio(
42
- [Client.SINGLE_MODE],
43
- value=Client.SINGLE_MODE,
44
- interactive=True,
45
- label = Client.MODE_LABEL,
46
- info = Client.MODE_INFO
47
- )
48
- self.chatbot = gr.Chatbot(
49
- elem_id="chatbot1"
50
- )
51
- self.btn_next = gr.Button(
52
- value="Next Agent",
53
- visible=False, elem_id="btn"
54
- )
55
- with gr.Row():
56
- self.text_requirement = gr.Textbox(
57
- value=self.cache['requirement'],
58
- placeholder="Please enter your content",
59
- scale=9,
60
- )
61
- self.btn_start = gr.Button(
62
- value="Start!",
63
- scale=1
64
- )
65
- self.btn_reset = gr.Button(
66
- value="Restart",
67
- visible=False
68
- )
69
-
70
- with gr.Column():
71
- self.file = gr.File(visible=False)
72
- self.chat_code_show = gr.Chatbot(
73
- elem_id="chatbot1",
74
- visible=False
75
- )
76
-
77
- self.btn_start.click(
78
- fn=self.btn_send_when_click,
79
- inputs=[self.chatbot, self.text_requirement, self.radio_mode, self.text_api],
80
- outputs=[self.chatbot, self.btn_start, self.text_requirement, self.btn_reset]
81
- ).then(
82
- fn=self.btn_send_after_click,
83
- inputs=[self.file, self.chatbot, self.chat_code_show, self.btn_start, self.btn_reset, self.text_requirement],
84
- outputs=[self.file, self.chatbot, self.chat_code_show, self.btn_start, self.btn_reset, self.text_requirement, self.btn_next]
85
- )
86
- self.text_requirement.submit(
87
- fn=self.btn_send_when_click,
88
- inputs=[self.chatbot, self.text_requirement, self.text_api],
89
- outputs=[self.chatbot, self.btn_start, self.text_requirement, self.btn_reset]
90
- ).then(
91
- fn=self.btn_send_after_click,
92
- inputs=[self.file, self.chatbot, self.chat_code_show, self.btn_start, self.btn_reset, self.text_requirement],
93
- outputs=[self.file, self.chatbot, self.chat_code_show, self.btn_start, self.btn_reset, self.text_requirement, self.btn_next]
94
- )
95
- self.btn_reset.click(
96
- fn=self.btn_reset_when_click,
97
- inputs=[],
98
- outputs=[self.file, self.chatbot, self.chat_code_show, self.btn_start, self.btn_reset, self.text_requirement, self.btn_next]
99
- ).then(
100
- fn=self.btn_reset_after_click,
101
- inputs=[self.file, self.chatbot, self.chat_code_show, self.btn_start, self.btn_reset, self.text_requirement],
102
- outputs=[self.file, self.chatbot, self.chat_code_show, self.btn_start, self.btn_reset, self.text_requirement, self.btn_next]
103
- )
104
- self.file.select(
105
- fn=self.file_when_select,
106
- inputs=[self.file],
107
- outputs=[self.chat_code_show]
108
- )
109
- self.btn_next.click(
110
- fn = self.btn_next_when_click,
111
- inputs=[],
112
- outputs=[self.btn_next]
113
- ).then(
114
- fn=self.btn_send_after_click,
115
- inputs=[self.file, self.chatbot, self.chat_code_show, self.btn_start, self.btn_reset, self.text_requirement],
116
- outputs=[self.file, self.chatbot, self.chat_code_show, self.btn_start, self.btn_reset, self.text_requirement, self.btn_next]
117
- )
118
-
119
- self.demo = demo
120
-
121
-
122
- def handle_message(self, history:list, state, agent_name, token, node_name):
123
- if state % 10 == 0:
124
- self.data_history.append({agent_name: token})
125
- elif state % 10 == 1:
126
- # Same state. Need to add new bubble in same bubble.
127
- if len(self.data_history) == 0:
128
- self.data_history.append({agent_name:""})
129
- self.data_history[-1][agent_name] += token
130
- elif state % 10 == 2:
131
- # New state. Need to add new bubble.
132
- history.append([None, ""])
133
- self.data_history.clear()
134
- self.data_history.append({agent_name: token})
135
- else:
136
- assert False, "Invalid state."
137
- render_data = self.render_bubble(history, self.data_history, node_name, render_node_name=True)
138
- return render_data
139
-
140
- def btn_send_when_click(self, chatbot, text_requirement, mode, api):
141
- """
142
- inputs=[self.chatbot, self.text_requirement, radio, text_api],
143
- outputs=[self.chatbot, self.btn_start, self.text_requirement, self.btn_reset]
144
- """
145
- chatbot = [[UIHelper.wrap_css(content=text_requirement, name="User"), None]]
146
- yield chatbot,\
147
- gr.Button.update(visible=True, interactive=False, value="Running"),\
148
- gr.Textbox.update(visible=True, interactive=False, value=""),\
149
- gr.Button.update(visible=False, interactive=False)
150
- self.send_start_cmd({'requirement': text_requirement, "mode": mode, "api_key": api})
151
- return
152
-
153
- def btn_send_after_click(
154
- self,
155
- file,
156
- history,
157
- show_code,
158
- btn_send,
159
- btn_reset,
160
- text_requirement
161
- ):
162
- """
163
- outputs=[self.file, self.chatbot, self.chat_code_show, self.btn_start, self.btn_reset, self.text_requirement, self.btn_next]
164
- """
165
- if self.caller == 0:
166
- self.data_history = list()
167
- self.caller = 0
168
- receive_server = self.receive_server
169
- while True:
170
- data_list: List = receive_server.send(None)
171
- for item in data_list:
172
- data = eval(item)
173
- assert isinstance(data, list)
174
- state, agent_name, token, node_name = data
175
- assert isinstance(state, int)
176
- assert state in [10, 11, 12, 99, 98]
177
- if state == 99:
178
- # finish
179
- fs = [self.cache['pwd']+'/output_code/'+_ for _ in os.listdir(self.cache['pwd']+'/output_code')]
180
- yield gr.File.update(value=fs, visible=True, interactive=True),\
181
- history, \
182
- gr.Chatbot.update(visible=True),\
183
- gr.Button.update(visible=True, interactive=True, value="Start"),\
184
- gr.Button.update(visible=True, interactive=True),\
185
- gr.Textbox.update(visible=True, interactive=True, placeholder="Please input your requirement", value=""),\
186
- gr.Button.update(visible=False)
187
- return
188
- elif state == 98:
189
- yield gr.File.update(visible=False),\
190
- history, \
191
- gr.Chatbot.update(visible=False),\
192
- gr.Button.update(visible=True, interactive=False),\
193
- gr.Button.update(visible=True, interactive=True),\
194
- gr.Textbox.update(visible=True, interactive=False),\
195
- gr.Button.update(visible=True, value=f"Next Agent: 🤖{agent_name} | Next Node: ⭕{node_name}")
196
- return
197
- history = self.handle_message(history, state, agent_name, token, node_name)
198
- yield gr.File.update(visible=False),\
199
- history, \
200
- gr.Chatbot.update(visible=False),\
201
- gr.Button.update(visible=True, interactive=False),\
202
- gr.Button.update(visible=False, interactive=False),\
203
- gr.Textbox.update(visible=True, interactive=False),\
204
- gr.Button.update(visible=False)
205
-
206
- def btn_reset_when_click(self):
207
- """
208
- inputs = []
209
- outputs = [self.file, self.chatbot, self.chat_code_show, self.btn_start, self.btn_reset, self.text_requirement, self.btn_next]
210
- """
211
- return gr.File.update(visible=False),\
212
- None, None, gr.Button.update(value="Restarting...", interactive=False),\
213
- gr.Button.update(value="Restarting...", interactive=False),\
214
- gr.Textbox.update(value="Restarting", interactive=False),\
215
- gr.Button.update(visible=False)
216
-
217
- def btn_reset_after_click(
218
- self,
219
- file,
220
- chatbot,
221
- show_code,
222
- btn_send,
223
- btn_reset,
224
- text_requirement
225
- ):
226
- self.reset()
227
- self.first_recieve_from_client(reset_mode=True)
228
- return gr.File.update(value=None, visible=False),\
229
- gr.Chatbot.update(value=None, visible=True),\
230
- gr.Chatbot.update(value=None, visible=False),\
231
- gr.Button.update(value="Start", visible=True, interactive=True),\
232
- gr.Button.update(value="Restart", interactive=False, visible=False),\
233
- gr.Textbox.update(value=self.cache['requirement'], interactive=True, visible=True),\
234
- gr.Button.update(visible=False)
235
-
236
- def file_when_select(self, file):
237
- CODE_PREFIX = "```python\n{}\n```"
238
- with open(file.name, "r", encoding='utf-8') as f:
239
- contents = f.readlines()
240
- codes = "".join(contents)
241
- return [[CODE_PREFIX.format(codes),None]]
242
-
243
- def btn_next_when_click(self):
244
- self.caller = 1 # it will remain the value in self.data_history
245
- self.send_message("nothing")
246
- time.sleep(0.5)
247
- yield gr.Button.update(visible=False)
248
- return
249
-
250
-
251
- if __name__ == '__main__':
252
- ui = CodeUI(client_cmd=["python","gradio_backend.py"])
253
- ui.construct_ui()
254
- ui.run()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIZero2Hero4Health/4-ImageSimilaritySearch-SL/app.py DELETED
@@ -1,186 +0,0 @@
1
- from html import escape
2
- import re
3
- import streamlit as st
4
- import pandas as pd, numpy as np
5
- from transformers import CLIPProcessor, CLIPModel
6
- from st_clickable_images import clickable_images
7
-
8
- @st.cache(
9
- show_spinner=False,
10
- hash_funcs={
11
- CLIPModel: lambda _: None,
12
- CLIPProcessor: lambda _: None,
13
- dict: lambda _: None,
14
- },
15
- )
16
- def load():
17
- model = CLIPModel.from_pretrained("openai/clip-vit-large-patch14")
18
- processor = CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14")
19
- df = {0: pd.read_csv("data.csv"), 1: pd.read_csv("data2.csv")}
20
- embeddings = {0: np.load("embeddings.npy"), 1: np.load("embeddings2.npy")}
21
- for k in [0, 1]:
22
- embeddings[k] = embeddings[k] / np.linalg.norm(
23
- embeddings[k], axis=1, keepdims=True
24
- )
25
- return model, processor, df, embeddings
26
-
27
-
28
- model, processor, df, embeddings = load()
29
- source = {0: "\nSource: Unsplash", 1: "\nSource: The Movie Database (TMDB)"}
30
-
31
-
32
- def compute_text_embeddings(list_of_strings):
33
- inputs = processor(text=list_of_strings, return_tensors="pt", padding=True)
34
- result = model.get_text_features(**inputs).detach().numpy()
35
- return result / np.linalg.norm(result, axis=1, keepdims=True)
36
-
37
-
38
- def image_search(query, corpus, n_results=24):
39
- positive_embeddings = None
40
-
41
- def concatenate_embeddings(e1, e2):
42
- if e1 is None:
43
- return e2
44
- else:
45
- return np.concatenate((e1, e2), axis=0)
46
-
47
- splitted_query = query.split("EXCLUDING ")
48
- dot_product = 0
49
- k = 0 if corpus == "Unsplash" else 1
50
- if len(splitted_query[0]) > 0:
51
- positive_queries = splitted_query[0].split(";")
52
- for positive_query in positive_queries:
53
- match = re.match(r"\[(Movies|Unsplash):(\d{1,5})\](.*)", positive_query)
54
- if match:
55
- corpus2, idx, remainder = match.groups()
56
- idx, remainder = int(idx), remainder.strip()
57
- k2 = 0 if corpus2 == "Unsplash" else 1
58
- positive_embeddings = concatenate_embeddings(
59
- positive_embeddings, embeddings[k2][idx : idx + 1, :]
60
- )
61
- if len(remainder) > 0:
62
- positive_embeddings = concatenate_embeddings(
63
- positive_embeddings, compute_text_embeddings([remainder])
64
- )
65
- else:
66
- positive_embeddings = concatenate_embeddings(
67
- positive_embeddings, compute_text_embeddings([positive_query])
68
- )
69
- dot_product = embeddings[k] @ positive_embeddings.T
70
- dot_product = dot_product - np.median(dot_product, axis=0)
71
- dot_product = dot_product / np.max(dot_product, axis=0, keepdims=True)
72
- dot_product = np.min(dot_product, axis=1)
73
-
74
- if len(splitted_query) > 1:
75
- negative_queries = (" ".join(splitted_query[1:])).split(";")
76
- negative_embeddings = compute_text_embeddings(negative_queries)
77
- dot_product2 = embeddings[k] @ negative_embeddings.T
78
- dot_product2 = dot_product2 - np.median(dot_product2, axis=0)
79
- dot_product2 = dot_product2 / np.max(dot_product2, axis=0, keepdims=True)
80
- dot_product -= np.max(np.maximum(dot_product2, 0), axis=1)
81
-
82
- results = np.argsort(dot_product)[-1 : -n_results - 1 : -1]
83
- return [
84
- (
85
- df[k].iloc[i]["path"],
86
- df[k].iloc[i]["tooltip"] + source[k],
87
- i,
88
- )
89
- for i in results
90
- ]
91
-
92
-
93
- description = """
94
- # Semantic image search
95
- **Enter your query and hit enter**
96
- """
97
-
98
- howto = """
99
- - Click image to find similar images
100
- - Use "**;**" to combine multiple queries)
101
- - Use "**EXCLUDING**", to exclude a query
102
- """
103
-
104
-
105
- def main():
106
- st.markdown(
107
- """
108
- <style>
109
- .block-container{
110
- max-width: 1200px;
111
- }
112
- div.row-widget.stRadio > div{
113
- flex-direction:row;
114
- display: flex;
115
- justify-content: center;
116
- }
117
- div.row-widget.stRadio > div > label{
118
- margin-left: 5px;
119
- margin-right: 5px;
120
- }
121
- section.main>div:first-child {
122
- padding-top: 0px;
123
- }
124
- section:not(.main)>div:first-child {
125
- padding-top: 30px;
126
- }
127
- div.reportview-container > section:first-child{
128
- max-width: 320px;
129
- }
130
- #MainMenu {
131
- visibility: hidden;
132
- }
133
- footer {
134
- visibility: hidden;
135
- }
136
- </style>""",
137
- unsafe_allow_html=True,
138
- )
139
- st.sidebar.markdown(description)
140
- with st.sidebar.expander("Advanced use"):
141
- st.markdown(howto)
142
-
143
-
144
- st.sidebar.markdown(f"Try these test prompts: orange, blue, beach, lighthouse, mountain, sunset, parade")
145
- st.sidebar.markdown(f"Unsplash has categories that match: backgrounds, photos, nature, iphone, etc")
146
- st.sidebar.markdown(f"Unsplash images contain animals, apps, events, feelings, food, travel, nature, people, religion, sports, things, stock")
147
- st.sidebar.markdown(f"Unsplash things include flag, tree, clock, money, tattoo, arrow, book, car, fireworks, ghost, health, kiss, dance, balloon, crown, eye, house, music, airplane, lighthouse, typewriter, toys")
148
- st.sidebar.markdown(f"unsplash feelings include funny, heart, love, cool, congratulations, love, scary, cute, friendship, inspirational, hug, sad, cursed, beautiful, crazy, respect, transformation, peaceful, happy")
149
- st.sidebar.markdown(f"unsplash people contain baby, life, women, family, girls, pregnancy, society, old people, musician, attractive, bohemian")
150
- st.sidebar.markdown(f"imagenet queries include: photo of, photo of many, sculpture of, rendering of, graffiti of, tattoo of, embroidered, drawing of, plastic, black and white, painting, video game, doodle, origami, sketch, etc")
151
-
152
-
153
- _, c, _ = st.columns((1, 3, 1))
154
- if "query" in st.session_state:
155
- query = c.text_input("", value=st.session_state["query"])
156
- else:
157
-
158
- query = c.text_input("", value="lighthouse")
159
- corpus = st.radio("", ["Unsplash"])
160
- #corpus = st.radio("", ["Unsplash", "Movies"])
161
- if len(query) > 0:
162
- results = image_search(query, corpus)
163
- clicked = clickable_images(
164
- [result[0] for result in results],
165
- titles=[result[1] for result in results],
166
- div_style={
167
- "display": "flex",
168
- "justify-content": "center",
169
- "flex-wrap": "wrap",
170
- },
171
- img_style={"margin": "2px", "height": "200px"},
172
- )
173
- if clicked >= 0:
174
- change_query = False
175
- if "last_clicked" not in st.session_state:
176
- change_query = True
177
- else:
178
- if clicked != st.session_state["last_clicked"]:
179
- change_query = True
180
- if change_query:
181
- st.session_state["query"] = f"[{corpus}:{results[clicked][2]}]"
182
- st.experimental_rerun()
183
-
184
-
185
- if __name__ == "__main__":
186
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ASJMO/freegpt/client/css/settings.css DELETED
@@ -1,44 +0,0 @@
1
- .settings-container {
2
- color: var(--colour-2);
3
- margin: 24px 0px 8px 0px;
4
- justify-content: center;
5
- }
6
-
7
- .settings-container span {
8
- font-size: 0.875rem;
9
- margin: 0;
10
- }
11
-
12
- .settings-container label {
13
- width: 24px;
14
- height: 16px;
15
- }
16
-
17
- .settings-container .field {
18
- justify-content: space-between;
19
- }
20
-
21
- .settings-container .checkbox input + label,
22
- .settings-container .checkbox input:checked + label:after {
23
- background: var(--colour-1);
24
- }
25
-
26
- .settings-container .checkbox input + label:after,
27
- .settings-container .checkbox input:checked + label {
28
- background: var(--colour-3);
29
- }
30
-
31
- .settings-container .checkbox label:after {
32
- left: 2px;
33
- width: 10px;
34
- height: 10px;
35
- }
36
-
37
- .settings-container .checkbox input:checked + label:after {
38
- left: calc(100% - 2px - 10px);
39
- }
40
-
41
- .settings-container .dropdown {
42
- padding: 4px 8px;
43
- font-size: 0.75rem;
44
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT/client/css/message.css DELETED
@@ -1,65 +0,0 @@
1
- .message {
2
- width: 100%;
3
- overflow-wrap: break-word;
4
- display: flex;
5
- gap: var(--section-gap);
6
- padding: var(--section-gap);
7
- padding-bottom: 0;
8
- }
9
-
10
- .message:last-child {
11
- animation: 0.6s show_message;
12
- }
13
-
14
- @keyframes show_message {
15
- from {
16
- transform: translateY(10px);
17
- opacity: 0;
18
- }
19
- }
20
-
21
- .message .avatar-container img {
22
- max-width: 48px;
23
- max-height: 48px;
24
- box-shadow: 0.4px 0.5px 0.7px -2px rgba(0, 0, 0, 0.08), 1.1px 1.3px 2px -2px rgba(0, 0, 0, 0.041),
25
- 2.7px 3px 4.8px -2px rgba(0, 0, 0, 0.029), 9px 10px 16px -2px rgba(0, 0, 0, 0.022);
26
- }
27
-
28
- .message .content {
29
- display: flex;
30
- flex-direction: column;
31
- width: 90%;
32
- gap: 18px;
33
- }
34
-
35
- .message .content p,
36
- .message .content li,
37
- .message .content code {
38
- font-size: 1rem;
39
- line-height: 1.3;
40
- }
41
-
42
- @media screen and (max-height: 720px) {
43
- .message {
44
- padding: 12px;
45
- gap: 0;
46
- }
47
-
48
- .message .content {
49
- margin-left: 8px;
50
- width: 80%;
51
- }
52
-
53
- .message .avatar-container img {
54
- max-width: 32px;
55
- max-height: 32px;
56
- }
57
-
58
- .message .content,
59
- .message .content p,
60
- .message .content li,
61
- .message .content code {
62
- font-size: 0.875rem;
63
- line-height: 1.3;
64
- }
65
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Adapter/T2I-Adapter/ldm/data/utils.py DELETED
@@ -1,40 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
-
3
- import cv2
4
- import numpy as np
5
- from torchvision.transforms import transforms
6
- from torchvision.transforms.functional import to_tensor
7
- from transformers import CLIPProcessor
8
-
9
- from basicsr.utils import img2tensor
10
-
11
-
12
- class AddCannyFreezeThreshold(object):
13
-
14
- def __init__(self, low_threshold=100, high_threshold=200):
15
- self.low_threshold = low_threshold
16
- self.high_threshold = high_threshold
17
-
18
- def __call__(self, sample):
19
- # sample['jpg'] is PIL image
20
- x = sample['jpg']
21
- img = cv2.cvtColor(np.array(x), cv2.COLOR_RGB2BGR)
22
- canny = cv2.Canny(img, self.low_threshold, self.high_threshold)[..., None]
23
- sample['canny'] = img2tensor(canny, bgr2rgb=True, float32=True) / 255.
24
- sample['jpg'] = to_tensor(x)
25
- return sample
26
-
27
-
28
- class AddStyle(object):
29
-
30
- def __init__(self, version):
31
- self.processor = CLIPProcessor.from_pretrained(version)
32
- self.pil_to_tensor = transforms.ToTensor()
33
-
34
- def __call__(self, sample):
35
- # sample['jpg'] is PIL image
36
- x = sample['jpg']
37
- style = self.processor(images=x, return_tensors="pt")['pixel_values'][0]
38
- sample['style'] = style
39
- sample['jpg'] = to_tensor(x)
40
- return sample
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/spiralcurve-plugin.d.ts DELETED
@@ -1,15 +0,0 @@
1
- import SpiralCurve from './spiralcurve';
2
-
3
- export default class SpiralCurvePlugin extends Phaser.Plugins.BasePlugin {
4
- add(
5
- config?: SpiralCurve.IConfig
6
- ): SpiralCurve;
7
-
8
- add(
9
- x?: number, y?: number,
10
- startRadius?: number, endRadius?: number,
11
- startAngle?: number, endAngle?: number,
12
- rotation?: number
13
- ): SpiralCurve
14
-
15
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/checkbox/Checkbox.js DELETED
@@ -1,2 +0,0 @@
1
- import Checkbox from '../../../plugins/checkbox.js';
2
- export default Checkbox;
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/menu/methods/SetTransitCallbackMethods.js DELETED
@@ -1,32 +0,0 @@
1
- import GetEaseConfig from './GetEaseConfig.js';
2
-
3
- var PopUp = function (menu, duration) {
4
- menu.popUp(GetEaseConfig(menu.root.easeIn, menu))
5
- }
6
-
7
- var ScaleDown = function (menu, duration) {
8
- // Don't destroy here
9
- menu.scaleDown(GetEaseConfig(menu.root.easeOut, menu));
10
- }
11
-
12
- export default {
13
- setTransitInCallback(callback) {
14
- if (callback === undefined) {
15
- callback = PopUp;
16
- }
17
-
18
- this.transitInCallback = callback;
19
- // callback = function(gameObject, duration) {}
20
- return this;
21
- },
22
-
23
- setTransitOutCallback(callback) {
24
- if (callback === undefined) {
25
- callback = ScaleDown;
26
- }
27
-
28
- this.transitOutCallback = callback;
29
- // callback = function(gameObject, duration) {}
30
- return this;
31
- }
32
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexWang/lama/saicinpainting/evaluation/losses/fid/inception.py DELETED
@@ -1,323 +0,0 @@
1
- import logging
2
-
3
- import torch
4
- import torch.nn as nn
5
- import torch.nn.functional as F
6
- from torchvision import models
7
-
8
- try:
9
- from torchvision.models.utils import load_state_dict_from_url
10
- except ImportError:
11
- from torch.utils.model_zoo import load_url as load_state_dict_from_url
12
-
13
- # Inception weights ported to Pytorch from
14
- # http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz
15
- FID_WEIGHTS_URL = 'https://github.com/mseitzer/pytorch-fid/releases/download/fid_weights/pt_inception-2015-12-05-6726825d.pth'
16
-
17
-
18
- LOGGER = logging.getLogger(__name__)
19
-
20
-
21
- class InceptionV3(nn.Module):
22
- """Pretrained InceptionV3 network returning feature maps"""
23
-
24
- # Index of default block of inception to return,
25
- # corresponds to output of final average pooling
26
- DEFAULT_BLOCK_INDEX = 3
27
-
28
- # Maps feature dimensionality to their output blocks indices
29
- BLOCK_INDEX_BY_DIM = {
30
- 64: 0, # First max pooling features
31
- 192: 1, # Second max pooling featurs
32
- 768: 2, # Pre-aux classifier features
33
- 2048: 3 # Final average pooling features
34
- }
35
-
36
- def __init__(self,
37
- output_blocks=[DEFAULT_BLOCK_INDEX],
38
- resize_input=True,
39
- normalize_input=True,
40
- requires_grad=False,
41
- use_fid_inception=True):
42
- """Build pretrained InceptionV3
43
-
44
- Parameters
45
- ----------
46
- output_blocks : list of int
47
- Indices of blocks to return features of. Possible values are:
48
- - 0: corresponds to output of first max pooling
49
- - 1: corresponds to output of second max pooling
50
- - 2: corresponds to output which is fed to aux classifier
51
- - 3: corresponds to output of final average pooling
52
- resize_input : bool
53
- If true, bilinearly resizes input to width and height 299 before
54
- feeding input to model. As the network without fully connected
55
- layers is fully convolutional, it should be able to handle inputs
56
- of arbitrary size, so resizing might not be strictly needed
57
- normalize_input : bool
58
- If true, scales the input from range (0, 1) to the range the
59
- pretrained Inception network expects, namely (-1, 1)
60
- requires_grad : bool
61
- If true, parameters of the model require gradients. Possibly useful
62
- for finetuning the network
63
- use_fid_inception : bool
64
- If true, uses the pretrained Inception model used in Tensorflow's
65
- FID implementation. If false, uses the pretrained Inception model
66
- available in torchvision. The FID Inception model has different
67
- weights and a slightly different structure from torchvision's
68
- Inception model. If you want to compute FID scores, you are
69
- strongly advised to set this parameter to true to get comparable
70
- results.
71
- """
72
- super(InceptionV3, self).__init__()
73
-
74
- self.resize_input = resize_input
75
- self.normalize_input = normalize_input
76
- self.output_blocks = sorted(output_blocks)
77
- self.last_needed_block = max(output_blocks)
78
-
79
- assert self.last_needed_block <= 3, \
80
- 'Last possible output block index is 3'
81
-
82
- self.blocks = nn.ModuleList()
83
-
84
- if use_fid_inception:
85
- inception = fid_inception_v3()
86
- else:
87
- inception = models.inception_v3(pretrained=True)
88
-
89
- # Block 0: input to maxpool1
90
- block0 = [
91
- inception.Conv2d_1a_3x3,
92
- inception.Conv2d_2a_3x3,
93
- inception.Conv2d_2b_3x3,
94
- nn.MaxPool2d(kernel_size=3, stride=2)
95
- ]
96
- self.blocks.append(nn.Sequential(*block0))
97
-
98
- # Block 1: maxpool1 to maxpool2
99
- if self.last_needed_block >= 1:
100
- block1 = [
101
- inception.Conv2d_3b_1x1,
102
- inception.Conv2d_4a_3x3,
103
- nn.MaxPool2d(kernel_size=3, stride=2)
104
- ]
105
- self.blocks.append(nn.Sequential(*block1))
106
-
107
- # Block 2: maxpool2 to aux classifier
108
- if self.last_needed_block >= 2:
109
- block2 = [
110
- inception.Mixed_5b,
111
- inception.Mixed_5c,
112
- inception.Mixed_5d,
113
- inception.Mixed_6a,
114
- inception.Mixed_6b,
115
- inception.Mixed_6c,
116
- inception.Mixed_6d,
117
- inception.Mixed_6e,
118
- ]
119
- self.blocks.append(nn.Sequential(*block2))
120
-
121
- # Block 3: aux classifier to final avgpool
122
- if self.last_needed_block >= 3:
123
- block3 = [
124
- inception.Mixed_7a,
125
- inception.Mixed_7b,
126
- inception.Mixed_7c,
127
- nn.AdaptiveAvgPool2d(output_size=(1, 1))
128
- ]
129
- self.blocks.append(nn.Sequential(*block3))
130
-
131
- for param in self.parameters():
132
- param.requires_grad = requires_grad
133
-
134
- def forward(self, inp):
135
- """Get Inception feature maps
136
-
137
- Parameters
138
- ----------
139
- inp : torch.autograd.Variable
140
- Input tensor of shape Bx3xHxW. Values are expected to be in
141
- range (0, 1)
142
-
143
- Returns
144
- -------
145
- List of torch.autograd.Variable, corresponding to the selected output
146
- block, sorted ascending by index
147
- """
148
- outp = []
149
- x = inp
150
-
151
- if self.resize_input:
152
- x = F.interpolate(x,
153
- size=(299, 299),
154
- mode='bilinear',
155
- align_corners=False)
156
-
157
- if self.normalize_input:
158
- x = 2 * x - 1 # Scale from range (0, 1) to range (-1, 1)
159
-
160
- for idx, block in enumerate(self.blocks):
161
- x = block(x)
162
- if idx in self.output_blocks:
163
- outp.append(x)
164
-
165
- if idx == self.last_needed_block:
166
- break
167
-
168
- return outp
169
-
170
-
171
- def fid_inception_v3():
172
- """Build pretrained Inception model for FID computation
173
-
174
- The Inception model for FID computation uses a different set of weights
175
- and has a slightly different structure than torchvision's Inception.
176
-
177
- This method first constructs torchvision's Inception and then patches the
178
- necessary parts that are different in the FID Inception model.
179
- """
180
- LOGGER.info('fid_inception_v3 called')
181
- inception = models.inception_v3(num_classes=1008,
182
- aux_logits=False,
183
- pretrained=False)
184
- LOGGER.info('models.inception_v3 done')
185
- inception.Mixed_5b = FIDInceptionA(192, pool_features=32)
186
- inception.Mixed_5c = FIDInceptionA(256, pool_features=64)
187
- inception.Mixed_5d = FIDInceptionA(288, pool_features=64)
188
- inception.Mixed_6b = FIDInceptionC(768, channels_7x7=128)
189
- inception.Mixed_6c = FIDInceptionC(768, channels_7x7=160)
190
- inception.Mixed_6d = FIDInceptionC(768, channels_7x7=160)
191
- inception.Mixed_6e = FIDInceptionC(768, channels_7x7=192)
192
- inception.Mixed_7b = FIDInceptionE_1(1280)
193
- inception.Mixed_7c = FIDInceptionE_2(2048)
194
-
195
- LOGGER.info('fid_inception_v3 patching done')
196
-
197
- state_dict = load_state_dict_from_url(FID_WEIGHTS_URL, progress=True)
198
- LOGGER.info('fid_inception_v3 weights downloaded')
199
-
200
- inception.load_state_dict(state_dict)
201
- LOGGER.info('fid_inception_v3 weights loaded into model')
202
-
203
- return inception
204
-
205
-
206
- class FIDInceptionA(models.inception.InceptionA):
207
- """InceptionA block patched for FID computation"""
208
- def __init__(self, in_channels, pool_features):
209
- super(FIDInceptionA, self).__init__(in_channels, pool_features)
210
-
211
- def forward(self, x):
212
- branch1x1 = self.branch1x1(x)
213
-
214
- branch5x5 = self.branch5x5_1(x)
215
- branch5x5 = self.branch5x5_2(branch5x5)
216
-
217
- branch3x3dbl = self.branch3x3dbl_1(x)
218
- branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
219
- branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
220
-
221
- # Patch: Tensorflow's average pool does not use the padded zero's in
222
- # its average calculation
223
- branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1,
224
- count_include_pad=False)
225
- branch_pool = self.branch_pool(branch_pool)
226
-
227
- outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
228
- return torch.cat(outputs, 1)
229
-
230
-
231
- class FIDInceptionC(models.inception.InceptionC):
232
- """InceptionC block patched for FID computation"""
233
- def __init__(self, in_channels, channels_7x7):
234
- super(FIDInceptionC, self).__init__(in_channels, channels_7x7)
235
-
236
- def forward(self, x):
237
- branch1x1 = self.branch1x1(x)
238
-
239
- branch7x7 = self.branch7x7_1(x)
240
- branch7x7 = self.branch7x7_2(branch7x7)
241
- branch7x7 = self.branch7x7_3(branch7x7)
242
-
243
- branch7x7dbl = self.branch7x7dbl_1(x)
244
- branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
245
- branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
246
- branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
247
- branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
248
-
249
- # Patch: Tensorflow's average pool does not use the padded zero's in
250
- # its average calculation
251
- branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1,
252
- count_include_pad=False)
253
- branch_pool = self.branch_pool(branch_pool)
254
-
255
- outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]
256
- return torch.cat(outputs, 1)
257
-
258
-
259
- class FIDInceptionE_1(models.inception.InceptionE):
260
- """First InceptionE block patched for FID computation"""
261
- def __init__(self, in_channels):
262
- super(FIDInceptionE_1, self).__init__(in_channels)
263
-
264
- def forward(self, x):
265
- branch1x1 = self.branch1x1(x)
266
-
267
- branch3x3 = self.branch3x3_1(x)
268
- branch3x3 = [
269
- self.branch3x3_2a(branch3x3),
270
- self.branch3x3_2b(branch3x3),
271
- ]
272
- branch3x3 = torch.cat(branch3x3, 1)
273
-
274
- branch3x3dbl = self.branch3x3dbl_1(x)
275
- branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
276
- branch3x3dbl = [
277
- self.branch3x3dbl_3a(branch3x3dbl),
278
- self.branch3x3dbl_3b(branch3x3dbl),
279
- ]
280
- branch3x3dbl = torch.cat(branch3x3dbl, 1)
281
-
282
- # Patch: Tensorflow's average pool does not use the padded zero's in
283
- # its average calculation
284
- branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1,
285
- count_include_pad=False)
286
- branch_pool = self.branch_pool(branch_pool)
287
-
288
- outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
289
- return torch.cat(outputs, 1)
290
-
291
-
292
- class FIDInceptionE_2(models.inception.InceptionE):
293
- """Second InceptionE block patched for FID computation"""
294
- def __init__(self, in_channels):
295
- super(FIDInceptionE_2, self).__init__(in_channels)
296
-
297
- def forward(self, x):
298
- branch1x1 = self.branch1x1(x)
299
-
300
- branch3x3 = self.branch3x3_1(x)
301
- branch3x3 = [
302
- self.branch3x3_2a(branch3x3),
303
- self.branch3x3_2b(branch3x3),
304
- ]
305
- branch3x3 = torch.cat(branch3x3, 1)
306
-
307
- branch3x3dbl = self.branch3x3dbl_1(x)
308
- branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
309
- branch3x3dbl = [
310
- self.branch3x3dbl_3a(branch3x3dbl),
311
- self.branch3x3dbl_3b(branch3x3dbl),
312
- ]
313
- branch3x3dbl = torch.cat(branch3x3dbl, 1)
314
-
315
- # Patch: The FID Inception model uses max pooling instead of average
316
- # pooling. This is likely an error in this specific Inception
317
- # implementation, as other Inception models use average pooling here
318
- # (which matches the description in the paper).
319
- branch_pool = F.max_pool2d(x, kernel_size=3, stride=1, padding=1)
320
- branch_pool = self.branch_pool(branch_pool)
321
-
322
- outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
323
- return torch.cat(outputs, 1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexWang/lama/saicinpainting/training/visualizers/directory.py DELETED
@@ -1,36 +0,0 @@
1
- import os
2
-
3
- import cv2
4
- import numpy as np
5
-
6
- from saicinpainting.training.visualizers.base import BaseVisualizer, visualize_mask_and_images_batch
7
- from saicinpainting.utils import check_and_warn_input_range
8
-
9
-
10
- class DirectoryVisualizer(BaseVisualizer):
11
- DEFAULT_KEY_ORDER = 'image predicted_image inpainted'.split(' ')
12
-
13
- def __init__(self, outdir, key_order=DEFAULT_KEY_ORDER, max_items_in_batch=10,
14
- last_without_mask=True, rescale_keys=None):
15
- self.outdir = outdir
16
- os.makedirs(self.outdir, exist_ok=True)
17
- self.key_order = key_order
18
- self.max_items_in_batch = max_items_in_batch
19
- self.last_without_mask = last_without_mask
20
- self.rescale_keys = rescale_keys
21
-
22
- def __call__(self, epoch_i, batch_i, batch, suffix='', rank=None):
23
- check_and_warn_input_range(batch['image'], 0, 1, 'DirectoryVisualizer target image')
24
- vis_img = visualize_mask_and_images_batch(batch, self.key_order, max_items=self.max_items_in_batch,
25
- last_without_mask=self.last_without_mask,
26
- rescale_keys=self.rescale_keys)
27
-
28
- vis_img = np.clip(vis_img * 255, 0, 255).astype('uint8')
29
-
30
- curoutdir = os.path.join(self.outdir, f'epoch{epoch_i:04d}{suffix}')
31
- os.makedirs(curoutdir, exist_ok=True)
32
- rank_suffix = f'_r{rank}' if rank is not None else ''
33
- out_fname = os.path.join(curoutdir, f'batch{batch_i:07d}{rank_suffix}.jpg')
34
-
35
- vis_img = cv2.cvtColor(vis_img, cv2.COLOR_RGB2BGR)
36
- cv2.imwrite(out_fname, vis_img)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Aloento/9Nine-PITS/text/frontend/normalizer/numbers.py DELETED
@@ -1,86 +0,0 @@
1
- # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- # number expansion is not that easy
15
- import re
16
-
17
- import inflect
18
-
19
- _inflect = inflect.engine()
20
- _comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])')
21
- _decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)')
22
- _pounds_re = re.compile(r'£([0-9\,]*[0-9]+)')
23
- _dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+)')
24
- _ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)')
25
- _number_re = re.compile(r'[0-9]+')
26
-
27
-
28
- def _remove_commas(m):
29
- return m.group(1).replace(',', '')
30
-
31
-
32
- def _expand_decimal_point(m):
33
- return m.group(1).replace('.', ' point ')
34
-
35
-
36
- def _expand_dollars(m):
37
- match = m.group(1)
38
- parts = match.split('.')
39
- if len(parts) > 2:
40
- return match + ' dollars' # Unexpected format
41
- dollars = int(parts[0]) if parts[0] else 0
42
- cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
43
- if dollars and cents:
44
- dollar_unit = 'dollar' if dollars == 1 else 'dollars'
45
- cent_unit = 'cent' if cents == 1 else 'cents'
46
- return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit)
47
- elif dollars:
48
- dollar_unit = 'dollar' if dollars == 1 else 'dollars'
49
- return '%s %s' % (dollars, dollar_unit)
50
- elif cents:
51
- cent_unit = 'cent' if cents == 1 else 'cents'
52
- return '%s %s' % (cents, cent_unit)
53
- else:
54
- return 'zero dollars'
55
-
56
-
57
- def _expand_ordinal(m):
58
- return _inflect.number_to_words(m.group(0))
59
-
60
-
61
- def _expand_number(m):
62
- num = int(m.group(0))
63
- if num > 1000 and num < 3000:
64
- if num == 2000:
65
- return 'two thousand'
66
- elif num > 2000 and num < 2010:
67
- return 'two thousand ' + _inflect.number_to_words(num % 100)
68
- elif num % 100 == 0:
69
- return _inflect.number_to_words(num // 100) + ' hundred'
70
- else:
71
- return _inflect.number_to_words(
72
- num, andword='', zero='oh', group=2).replace(', ', ' ')
73
- else:
74
- return _inflect.number_to_words(num, andword='')
75
-
76
-
77
- def normalize_numbers(text):
78
- """ Normalize numbers in English text.
79
- """
80
- text = re.sub(_comma_number_re, _remove_commas, text)
81
- text = re.sub(_pounds_re, r'\1 pounds', text)
82
- text = re.sub(_dollars_re, _expand_dollars, text)
83
- text = re.sub(_decimal_number_re, _expand_decimal_point, text)
84
- text = re.sub(_ordinal_re, _expand_ordinal, text)
85
- text = re.sub(_number_re, _expand_number, text)
86
- return text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andres99/Tune-A-Video-Training-UI/app.py DELETED
@@ -1,84 +0,0 @@
1
- #!/usr/bin/env python
2
-
3
- from __future__ import annotations
4
-
5
- import os
6
- from subprocess import getoutput
7
-
8
- import gradio as gr
9
- import torch
10
-
11
- from app_inference import create_inference_demo
12
- from app_training import create_training_demo
13
- from app_upload import create_upload_demo
14
- from inference import InferencePipeline
15
- from trainer import Trainer
16
-
17
- TITLE = '# [Tune-A-Video](https://tuneavideo.github.io/) UI'
18
-
19
- ORIGINAL_SPACE_ID = 'Tune-A-Video-library/Tune-A-Video-Training-UI'
20
- SPACE_ID = os.getenv('SPACE_ID', ORIGINAL_SPACE_ID)
21
- GPU_DATA = getoutput('nvidia-smi')
22
- SHARED_UI_WARNING = f'''## Attention - Training doesn't work in this shared UI. You can duplicate and use it with a paid private T4 GPU.
23
-
24
- <center><a class="duplicate-button" style="display:inline-block" target="_blank" href="https://huggingface.co/spaces/{SPACE_ID}?duplicate=true"><img style="margin-top:0;margin-bottom:0" src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></center>
25
- '''
26
-
27
- if os.getenv('SYSTEM') == 'spaces' and SPACE_ID != ORIGINAL_SPACE_ID:
28
- SETTINGS = f'<a href="https://huggingface.co/spaces/{SPACE_ID}/settings">Settings</a>'
29
- else:
30
- SETTINGS = 'Settings'
31
-
32
- INVALID_GPU_WARNING = f'''## Attention - the specified GPU is invalid. Training may not work. Make sure you have selected a `T4 GPU` for this task.'''
33
-
34
- CUDA_NOT_AVAILABLE_WARNING = f'''## Attention - Running on CPU.
35
- <center>
36
- You can assign a GPU in the {SETTINGS} tab if you are running this on HF Spaces.
37
- You can use "T4 small/medium" to run this demo.
38
- </center>
39
- '''
40
-
41
- HF_TOKEN_NOT_SPECIFIED_WARNING = f'''The environment variable `HF_TOKEN` is not specified. Feel free to specify your Hugging Face token with write permission if you don't want to manually provide it for every run.
42
- <center>
43
- You can check and create your Hugging Face tokens <a href="https://huggingface.co/settings/tokens" target="_blank">here</a>.
44
- You can specify environment variables in the "Repository secrets" section of the {SETTINGS} tab.
45
- </center>
46
- '''
47
-
48
- HF_TOKEN = os.getenv('HF_TOKEN')
49
-
50
-
51
- def show_warning(warning_text: str) -> gr.Blocks:
52
- with gr.Blocks() as demo:
53
- with gr.Box():
54
- gr.Markdown(warning_text)
55
- return demo
56
-
57
-
58
- pipe = InferencePipeline(HF_TOKEN)
59
- trainer = Trainer(HF_TOKEN)
60
-
61
- with gr.Blocks(css='style.css') as demo:
62
- if SPACE_ID == ORIGINAL_SPACE_ID:
63
- show_warning(SHARED_UI_WARNING)
64
- elif not torch.cuda.is_available():
65
- show_warning(CUDA_NOT_AVAILABLE_WARNING)
66
- elif (not 'T4' in GPU_DATA):
67
- show_warning(INVALID_GPU_WARNING)
68
-
69
- gr.Markdown(TITLE)
70
- with gr.Tabs():
71
- with gr.TabItem('Train'):
72
- create_training_demo(trainer, pipe)
73
- with gr.TabItem('Run'):
74
- create_inference_demo(pipe, HF_TOKEN)
75
- with gr.TabItem('Upload'):
76
- gr.Markdown('''
77
- - You can use this tab to upload models later if you choose not to upload models in training time or if upload in training time failed.
78
- ''')
79
- create_upload_demo(HF_TOKEN)
80
-
81
- if not HF_TOKEN:
82
- show_warning(HF_TOKEN_NOT_SPECIFIED_WARNING)
83
-
84
- demo.queue(max_size=1).launch(share=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/unclip_text_interpolation.py DELETED
@@ -1,573 +0,0 @@
1
- import inspect
2
- from typing import List, Optional, Tuple, Union
3
-
4
- import torch
5
- from torch.nn import functional as F
6
- from transformers import CLIPTextModelWithProjection, CLIPTokenizer
7
- from transformers.models.clip.modeling_clip import CLIPTextModelOutput
8
-
9
- from diffusers import (
10
- DiffusionPipeline,
11
- ImagePipelineOutput,
12
- PriorTransformer,
13
- UnCLIPScheduler,
14
- UNet2DConditionModel,
15
- UNet2DModel,
16
- )
17
- from diffusers.pipelines.unclip import UnCLIPTextProjModel
18
- from diffusers.utils import is_accelerate_available, logging, randn_tensor
19
-
20
-
21
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
22
-
23
-
24
- def slerp(val, low, high):
25
- """
26
- Find the interpolation point between the 'low' and 'high' values for the given 'val'. See https://en.wikipedia.org/wiki/Slerp for more details on the topic.
27
- """
28
- low_norm = low / torch.norm(low)
29
- high_norm = high / torch.norm(high)
30
- omega = torch.acos((low_norm * high_norm))
31
- so = torch.sin(omega)
32
- res = (torch.sin((1.0 - val) * omega) / so) * low + (torch.sin(val * omega) / so) * high
33
- return res
34
-
35
-
36
- class UnCLIPTextInterpolationPipeline(DiffusionPipeline):
37
-
38
- """
39
- Pipeline for prompt-to-prompt interpolation on CLIP text embeddings and using the UnCLIP / Dall-E to decode them to images.
40
-
41
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
42
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
43
-
44
- Args:
45
- text_encoder ([`CLIPTextModelWithProjection`]):
46
- Frozen text-encoder.
47
- tokenizer (`CLIPTokenizer`):
48
- Tokenizer of class
49
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
50
- prior ([`PriorTransformer`]):
51
- The canonincal unCLIP prior to approximate the image embedding from the text embedding.
52
- text_proj ([`UnCLIPTextProjModel`]):
53
- Utility class to prepare and combine the embeddings before they are passed to the decoder.
54
- decoder ([`UNet2DConditionModel`]):
55
- The decoder to invert the image embedding into an image.
56
- super_res_first ([`UNet2DModel`]):
57
- Super resolution unet. Used in all but the last step of the super resolution diffusion process.
58
- super_res_last ([`UNet2DModel`]):
59
- Super resolution unet. Used in the last step of the super resolution diffusion process.
60
- prior_scheduler ([`UnCLIPScheduler`]):
61
- Scheduler used in the prior denoising process. Just a modified DDPMScheduler.
62
- decoder_scheduler ([`UnCLIPScheduler`]):
63
- Scheduler used in the decoder denoising process. Just a modified DDPMScheduler.
64
- super_res_scheduler ([`UnCLIPScheduler`]):
65
- Scheduler used in the super resolution denoising process. Just a modified DDPMScheduler.
66
-
67
- """
68
-
69
- prior: PriorTransformer
70
- decoder: UNet2DConditionModel
71
- text_proj: UnCLIPTextProjModel
72
- text_encoder: CLIPTextModelWithProjection
73
- tokenizer: CLIPTokenizer
74
- super_res_first: UNet2DModel
75
- super_res_last: UNet2DModel
76
-
77
- prior_scheduler: UnCLIPScheduler
78
- decoder_scheduler: UnCLIPScheduler
79
- super_res_scheduler: UnCLIPScheduler
80
-
81
- # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.__init__
82
- def __init__(
83
- self,
84
- prior: PriorTransformer,
85
- decoder: UNet2DConditionModel,
86
- text_encoder: CLIPTextModelWithProjection,
87
- tokenizer: CLIPTokenizer,
88
- text_proj: UnCLIPTextProjModel,
89
- super_res_first: UNet2DModel,
90
- super_res_last: UNet2DModel,
91
- prior_scheduler: UnCLIPScheduler,
92
- decoder_scheduler: UnCLIPScheduler,
93
- super_res_scheduler: UnCLIPScheduler,
94
- ):
95
- super().__init__()
96
-
97
- self.register_modules(
98
- prior=prior,
99
- decoder=decoder,
100
- text_encoder=text_encoder,
101
- tokenizer=tokenizer,
102
- text_proj=text_proj,
103
- super_res_first=super_res_first,
104
- super_res_last=super_res_last,
105
- prior_scheduler=prior_scheduler,
106
- decoder_scheduler=decoder_scheduler,
107
- super_res_scheduler=super_res_scheduler,
108
- )
109
-
110
- # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents
111
- def prepare_latents(self, shape, dtype, device, generator, latents, scheduler):
112
- if latents is None:
113
- latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
114
- else:
115
- if latents.shape != shape:
116
- raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
117
- latents = latents.to(device)
118
-
119
- latents = latents * scheduler.init_noise_sigma
120
- return latents
121
-
122
- # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline._encode_prompt
123
- def _encode_prompt(
124
- self,
125
- prompt,
126
- device,
127
- num_images_per_prompt,
128
- do_classifier_free_guidance,
129
- text_model_output: Optional[Union[CLIPTextModelOutput, Tuple]] = None,
130
- text_attention_mask: Optional[torch.Tensor] = None,
131
- ):
132
- if text_model_output is None:
133
- batch_size = len(prompt) if isinstance(prompt, list) else 1
134
- # get prompt text embeddings
135
- text_inputs = self.tokenizer(
136
- prompt,
137
- padding="max_length",
138
- max_length=self.tokenizer.model_max_length,
139
- truncation=True,
140
- return_tensors="pt",
141
- )
142
- text_input_ids = text_inputs.input_ids
143
- text_mask = text_inputs.attention_mask.bool().to(device)
144
-
145
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
146
-
147
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
148
- text_input_ids, untruncated_ids
149
- ):
150
- removed_text = self.tokenizer.batch_decode(
151
- untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
152
- )
153
- logger.warning(
154
- "The following part of your input was truncated because CLIP can only handle sequences up to"
155
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
156
- )
157
- text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]
158
-
159
- text_encoder_output = self.text_encoder(text_input_ids.to(device))
160
-
161
- prompt_embeds = text_encoder_output.text_embeds
162
- text_encoder_hidden_states = text_encoder_output.last_hidden_state
163
-
164
- else:
165
- batch_size = text_model_output[0].shape[0]
166
- prompt_embeds, text_encoder_hidden_states = text_model_output[0], text_model_output[1]
167
- text_mask = text_attention_mask
168
-
169
- prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0)
170
- text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
171
- text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0)
172
-
173
- if do_classifier_free_guidance:
174
- uncond_tokens = [""] * batch_size
175
-
176
- uncond_input = self.tokenizer(
177
- uncond_tokens,
178
- padding="max_length",
179
- max_length=self.tokenizer.model_max_length,
180
- truncation=True,
181
- return_tensors="pt",
182
- )
183
- uncond_text_mask = uncond_input.attention_mask.bool().to(device)
184
- negative_prompt_embeds_text_encoder_output = self.text_encoder(uncond_input.input_ids.to(device))
185
-
186
- negative_prompt_embeds = negative_prompt_embeds_text_encoder_output.text_embeds
187
- uncond_text_encoder_hidden_states = negative_prompt_embeds_text_encoder_output.last_hidden_state
188
-
189
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
190
-
191
- seq_len = negative_prompt_embeds.shape[1]
192
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt)
193
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len)
194
-
195
- seq_len = uncond_text_encoder_hidden_states.shape[1]
196
- uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1)
197
- uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view(
198
- batch_size * num_images_per_prompt, seq_len, -1
199
- )
200
- uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0)
201
-
202
- # done duplicates
203
-
204
- # For classifier free guidance, we need to do two forward passes.
205
- # Here we concatenate the unconditional and text embeddings into a single batch
206
- # to avoid doing two forward passes
207
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
208
- text_encoder_hidden_states = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states])
209
-
210
- text_mask = torch.cat([uncond_text_mask, text_mask])
211
-
212
- return prompt_embeds, text_encoder_hidden_states, text_mask
213
-
214
- # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.enable_sequential_cpu_offload
215
- def enable_sequential_cpu_offload(self, gpu_id=0):
216
- r"""
217
- Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, the pipeline's
218
- models have their state dicts saved to CPU and then are moved to a `torch.device('meta') and loaded to GPU only
219
- when their specific submodule has its `forward` method called.
220
- """
221
- if is_accelerate_available():
222
- from accelerate import cpu_offload
223
- else:
224
- raise ImportError("Please install accelerate via `pip install accelerate`")
225
-
226
- device = torch.device(f"cuda:{gpu_id}")
227
-
228
- # TODO: self.prior.post_process_latents is not covered by the offload hooks, so it fails if added to the list
229
- models = [
230
- self.decoder,
231
- self.text_proj,
232
- self.text_encoder,
233
- self.super_res_first,
234
- self.super_res_last,
235
- ]
236
- for cpu_offloaded_model in models:
237
- if cpu_offloaded_model is not None:
238
- cpu_offload(cpu_offloaded_model, device)
239
-
240
- @property
241
- # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline._execution_device
242
- def _execution_device(self):
243
- r"""
244
- Returns the device on which the pipeline's models will be executed. After calling
245
- `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
246
- hooks.
247
- """
248
- if self.device != torch.device("meta") or not hasattr(self.decoder, "_hf_hook"):
249
- return self.device
250
- for module in self.decoder.modules():
251
- if (
252
- hasattr(module, "_hf_hook")
253
- and hasattr(module._hf_hook, "execution_device")
254
- and module._hf_hook.execution_device is not None
255
- ):
256
- return torch.device(module._hf_hook.execution_device)
257
- return self.device
258
-
259
- @torch.no_grad()
260
- def __call__(
261
- self,
262
- start_prompt: str,
263
- end_prompt: str,
264
- steps: int = 5,
265
- prior_num_inference_steps: int = 25,
266
- decoder_num_inference_steps: int = 25,
267
- super_res_num_inference_steps: int = 7,
268
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
269
- prior_guidance_scale: float = 4.0,
270
- decoder_guidance_scale: float = 8.0,
271
- enable_sequential_cpu_offload=True,
272
- gpu_id=0,
273
- output_type: Optional[str] = "pil",
274
- return_dict: bool = True,
275
- ):
276
- """
277
- Function invoked when calling the pipeline for generation.
278
-
279
- Args:
280
- start_prompt (`str`):
281
- The prompt to start the image generation interpolation from.
282
- end_prompt (`str`):
283
- The prompt to end the image generation interpolation at.
284
- steps (`int`, *optional*, defaults to 5):
285
- The number of steps over which to interpolate from start_prompt to end_prompt. The pipeline returns
286
- the same number of images as this value.
287
- prior_num_inference_steps (`int`, *optional*, defaults to 25):
288
- The number of denoising steps for the prior. More denoising steps usually lead to a higher quality
289
- image at the expense of slower inference.
290
- decoder_num_inference_steps (`int`, *optional*, defaults to 25):
291
- The number of denoising steps for the decoder. More denoising steps usually lead to a higher quality
292
- image at the expense of slower inference.
293
- super_res_num_inference_steps (`int`, *optional*, defaults to 7):
294
- The number of denoising steps for super resolution. More denoising steps usually lead to a higher
295
- quality image at the expense of slower inference.
296
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
297
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
298
- to make generation deterministic.
299
- prior_guidance_scale (`float`, *optional*, defaults to 4.0):
300
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
301
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
302
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
303
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
304
- usually at the expense of lower image quality.
305
- decoder_guidance_scale (`float`, *optional*, defaults to 4.0):
306
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
307
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
308
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
309
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
310
- usually at the expense of lower image quality.
311
- output_type (`str`, *optional*, defaults to `"pil"`):
312
- The output format of the generated image. Choose between
313
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
314
- enable_sequential_cpu_offload (`bool`, *optional*, defaults to `True`):
315
- If True, offloads all models to CPU using accelerate, significantly reducing memory usage. When called, the pipeline's
316
- models have their state dicts saved to CPU and then are moved to a `torch.device('meta') and loaded to GPU only
317
- when their specific submodule has its `forward` method called.
318
- gpu_id (`int`, *optional*, defaults to `0`):
319
- The gpu_id to be passed to enable_sequential_cpu_offload. Only works when enable_sequential_cpu_offload is set to True.
320
- return_dict (`bool`, *optional*, defaults to `True`):
321
- Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
322
- """
323
-
324
- if not isinstance(start_prompt, str) or not isinstance(end_prompt, str):
325
- raise ValueError(
326
- f"`start_prompt` and `end_prompt` should be of type `str` but got {type(start_prompt)} and"
327
- f" {type(end_prompt)} instead"
328
- )
329
-
330
- if enable_sequential_cpu_offload:
331
- self.enable_sequential_cpu_offload(gpu_id=gpu_id)
332
-
333
- device = self._execution_device
334
-
335
- # Turn the prompts into embeddings.
336
- inputs = self.tokenizer(
337
- [start_prompt, end_prompt],
338
- padding="max_length",
339
- truncation=True,
340
- max_length=self.tokenizer.model_max_length,
341
- return_tensors="pt",
342
- )
343
- inputs.to(device)
344
- text_model_output = self.text_encoder(**inputs)
345
-
346
- text_attention_mask = torch.max(inputs.attention_mask[0], inputs.attention_mask[1])
347
- text_attention_mask = torch.cat([text_attention_mask.unsqueeze(0)] * steps).to(device)
348
-
349
- # Interpolate from the start to end prompt using slerp and add the generated images to an image output pipeline
350
- batch_text_embeds = []
351
- batch_last_hidden_state = []
352
-
353
- for interp_val in torch.linspace(0, 1, steps):
354
- text_embeds = slerp(interp_val, text_model_output.text_embeds[0], text_model_output.text_embeds[1])
355
- last_hidden_state = slerp(
356
- interp_val, text_model_output.last_hidden_state[0], text_model_output.last_hidden_state[1]
357
- )
358
- batch_text_embeds.append(text_embeds.unsqueeze(0))
359
- batch_last_hidden_state.append(last_hidden_state.unsqueeze(0))
360
-
361
- batch_text_embeds = torch.cat(batch_text_embeds)
362
- batch_last_hidden_state = torch.cat(batch_last_hidden_state)
363
-
364
- text_model_output = CLIPTextModelOutput(
365
- text_embeds=batch_text_embeds, last_hidden_state=batch_last_hidden_state
366
- )
367
-
368
- batch_size = text_model_output[0].shape[0]
369
-
370
- do_classifier_free_guidance = prior_guidance_scale > 1.0 or decoder_guidance_scale > 1.0
371
-
372
- prompt_embeds, text_encoder_hidden_states, text_mask = self._encode_prompt(
373
- prompt=None,
374
- device=device,
375
- num_images_per_prompt=1,
376
- do_classifier_free_guidance=do_classifier_free_guidance,
377
- text_model_output=text_model_output,
378
- text_attention_mask=text_attention_mask,
379
- )
380
-
381
- # prior
382
-
383
- self.prior_scheduler.set_timesteps(prior_num_inference_steps, device=device)
384
- prior_timesteps_tensor = self.prior_scheduler.timesteps
385
-
386
- embedding_dim = self.prior.config.embedding_dim
387
-
388
- prior_latents = self.prepare_latents(
389
- (batch_size, embedding_dim),
390
- prompt_embeds.dtype,
391
- device,
392
- generator,
393
- None,
394
- self.prior_scheduler,
395
- )
396
-
397
- for i, t in enumerate(self.progress_bar(prior_timesteps_tensor)):
398
- # expand the latents if we are doing classifier free guidance
399
- latent_model_input = torch.cat([prior_latents] * 2) if do_classifier_free_guidance else prior_latents
400
-
401
- predicted_image_embedding = self.prior(
402
- latent_model_input,
403
- timestep=t,
404
- proj_embedding=prompt_embeds,
405
- encoder_hidden_states=text_encoder_hidden_states,
406
- attention_mask=text_mask,
407
- ).predicted_image_embedding
408
-
409
- if do_classifier_free_guidance:
410
- predicted_image_embedding_uncond, predicted_image_embedding_text = predicted_image_embedding.chunk(2)
411
- predicted_image_embedding = predicted_image_embedding_uncond + prior_guidance_scale * (
412
- predicted_image_embedding_text - predicted_image_embedding_uncond
413
- )
414
-
415
- if i + 1 == prior_timesteps_tensor.shape[0]:
416
- prev_timestep = None
417
- else:
418
- prev_timestep = prior_timesteps_tensor[i + 1]
419
-
420
- prior_latents = self.prior_scheduler.step(
421
- predicted_image_embedding,
422
- timestep=t,
423
- sample=prior_latents,
424
- generator=generator,
425
- prev_timestep=prev_timestep,
426
- ).prev_sample
427
-
428
- prior_latents = self.prior.post_process_latents(prior_latents)
429
-
430
- image_embeddings = prior_latents
431
-
432
- # done prior
433
-
434
- # decoder
435
-
436
- text_encoder_hidden_states, additive_clip_time_embeddings = self.text_proj(
437
- image_embeddings=image_embeddings,
438
- prompt_embeds=prompt_embeds,
439
- text_encoder_hidden_states=text_encoder_hidden_states,
440
- do_classifier_free_guidance=do_classifier_free_guidance,
441
- )
442
-
443
- if device.type == "mps":
444
- # HACK: MPS: There is a panic when padding bool tensors,
445
- # so cast to int tensor for the pad and back to bool afterwards
446
- text_mask = text_mask.type(torch.int)
447
- decoder_text_mask = F.pad(text_mask, (self.text_proj.clip_extra_context_tokens, 0), value=1)
448
- decoder_text_mask = decoder_text_mask.type(torch.bool)
449
- else:
450
- decoder_text_mask = F.pad(text_mask, (self.text_proj.clip_extra_context_tokens, 0), value=True)
451
-
452
- self.decoder_scheduler.set_timesteps(decoder_num_inference_steps, device=device)
453
- decoder_timesteps_tensor = self.decoder_scheduler.timesteps
454
-
455
- num_channels_latents = self.decoder.config.in_channels
456
- height = self.decoder.config.sample_size
457
- width = self.decoder.config.sample_size
458
-
459
- decoder_latents = self.prepare_latents(
460
- (batch_size, num_channels_latents, height, width),
461
- text_encoder_hidden_states.dtype,
462
- device,
463
- generator,
464
- None,
465
- self.decoder_scheduler,
466
- )
467
-
468
- for i, t in enumerate(self.progress_bar(decoder_timesteps_tensor)):
469
- # expand the latents if we are doing classifier free guidance
470
- latent_model_input = torch.cat([decoder_latents] * 2) if do_classifier_free_guidance else decoder_latents
471
-
472
- noise_pred = self.decoder(
473
- sample=latent_model_input,
474
- timestep=t,
475
- encoder_hidden_states=text_encoder_hidden_states,
476
- class_labels=additive_clip_time_embeddings,
477
- attention_mask=decoder_text_mask,
478
- ).sample
479
-
480
- if do_classifier_free_guidance:
481
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
482
- noise_pred_uncond, _ = noise_pred_uncond.split(latent_model_input.shape[1], dim=1)
483
- noise_pred_text, predicted_variance = noise_pred_text.split(latent_model_input.shape[1], dim=1)
484
- noise_pred = noise_pred_uncond + decoder_guidance_scale * (noise_pred_text - noise_pred_uncond)
485
- noise_pred = torch.cat([noise_pred, predicted_variance], dim=1)
486
-
487
- if i + 1 == decoder_timesteps_tensor.shape[0]:
488
- prev_timestep = None
489
- else:
490
- prev_timestep = decoder_timesteps_tensor[i + 1]
491
-
492
- # compute the previous noisy sample x_t -> x_t-1
493
- decoder_latents = self.decoder_scheduler.step(
494
- noise_pred, t, decoder_latents, prev_timestep=prev_timestep, generator=generator
495
- ).prev_sample
496
-
497
- decoder_latents = decoder_latents.clamp(-1, 1)
498
-
499
- image_small = decoder_latents
500
-
501
- # done decoder
502
-
503
- # super res
504
-
505
- self.super_res_scheduler.set_timesteps(super_res_num_inference_steps, device=device)
506
- super_res_timesteps_tensor = self.super_res_scheduler.timesteps
507
-
508
- channels = self.super_res_first.config.in_channels // 2
509
- height = self.super_res_first.config.sample_size
510
- width = self.super_res_first.config.sample_size
511
-
512
- super_res_latents = self.prepare_latents(
513
- (batch_size, channels, height, width),
514
- image_small.dtype,
515
- device,
516
- generator,
517
- None,
518
- self.super_res_scheduler,
519
- )
520
-
521
- if device.type == "mps":
522
- # MPS does not support many interpolations
523
- image_upscaled = F.interpolate(image_small, size=[height, width])
524
- else:
525
- interpolate_antialias = {}
526
- if "antialias" in inspect.signature(F.interpolate).parameters:
527
- interpolate_antialias["antialias"] = True
528
-
529
- image_upscaled = F.interpolate(
530
- image_small, size=[height, width], mode="bicubic", align_corners=False, **interpolate_antialias
531
- )
532
-
533
- for i, t in enumerate(self.progress_bar(super_res_timesteps_tensor)):
534
- # no classifier free guidance
535
-
536
- if i == super_res_timesteps_tensor.shape[0] - 1:
537
- unet = self.super_res_last
538
- else:
539
- unet = self.super_res_first
540
-
541
- latent_model_input = torch.cat([super_res_latents, image_upscaled], dim=1)
542
-
543
- noise_pred = unet(
544
- sample=latent_model_input,
545
- timestep=t,
546
- ).sample
547
-
548
- if i + 1 == super_res_timesteps_tensor.shape[0]:
549
- prev_timestep = None
550
- else:
551
- prev_timestep = super_res_timesteps_tensor[i + 1]
552
-
553
- # compute the previous noisy sample x_t -> x_t-1
554
- super_res_latents = self.super_res_scheduler.step(
555
- noise_pred, t, super_res_latents, prev_timestep=prev_timestep, generator=generator
556
- ).prev_sample
557
-
558
- image = super_res_latents
559
- # done super res
560
-
561
- # post processing
562
-
563
- image = image * 0.5 + 0.5
564
- image = image.clamp(0, 1)
565
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
566
-
567
- if output_type == "pil":
568
- image = self.numpy_to_pil(image)
569
-
570
- if not return_dict:
571
- return (image,)
572
-
573
- return ImagePipelineOutput(images=image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/detectors/detr.py DELETED
@@ -1,46 +0,0 @@
1
- from mmdet.core import bbox2result
2
- from ..builder import DETECTORS
3
- from .single_stage import SingleStageDetector
4
-
5
-
6
- @DETECTORS.register_module()
7
- class DETR(SingleStageDetector):
8
- r"""Implementation of `DETR: End-to-End Object Detection with
9
- Transformers <https://arxiv.org/pdf/2005.12872>`_"""
10
-
11
- def __init__(self,
12
- backbone,
13
- bbox_head,
14
- train_cfg=None,
15
- test_cfg=None,
16
- pretrained=None):
17
- super(DETR, self).__init__(backbone, None, bbox_head, train_cfg,
18
- test_cfg, pretrained)
19
-
20
- def simple_test(self, img, img_metas, rescale=False):
21
- """Test function without test time augmentation.
22
-
23
- Args:
24
- imgs (list[torch.Tensor]): List of multiple images
25
- img_metas (list[dict]): List of image information.
26
- rescale (bool, optional): Whether to rescale the results.
27
- Defaults to False.
28
-
29
- Returns:
30
- list[list[np.ndarray]]: BBox results of each image and classes.
31
- The outer list corresponds to each image. The inner list
32
- corresponds to each class.
33
- """
34
- batch_size = len(img_metas)
35
- assert batch_size == 1, 'Currently only batch_size 1 for inference ' \
36
- f'mode is supported. Found batch_size {batch_size}.'
37
- x = self.extract_feat(img)
38
- outs = self.bbox_head(x, img_metas)
39
- bbox_list = self.bbox_head.get_bboxes(
40
- *outs, img_metas, rescale=rescale)
41
-
42
- bbox_results = [
43
- bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)
44
- for det_bboxes, det_labels in bbox_list
45
- ]
46
- return bbox_results
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_r18-d8_512x1024_80k_cityscapes.py DELETED
@@ -1,9 +0,0 @@
1
- _base_ = './fcn_r50-d8_512x1024_80k_cityscapes.py'
2
- model = dict(
3
- pretrained='open-mmlab://resnet18_v1c',
4
- backbone=dict(depth=18),
5
- decode_head=dict(
6
- in_channels=512,
7
- channels=128,
8
- ),
9
- auxiliary_head=dict(in_channels=256, channels=64))
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformerv2_demo/uniformerv2.py DELETED
@@ -1,510 +0,0 @@
1
- #!/usr/bin/env python
2
- import os
3
- from collections import OrderedDict
4
-
5
- from timm.models.layers import DropPath
6
- import torch
7
- from torch import nn
8
- from torch.nn import MultiheadAttention
9
- import torch.nn.functional as F
10
- import torch.utils.checkpoint as checkpoint
11
-
12
-
13
- MODEL_PATH = './'
14
- _MODELS = {
15
- "ViT-B/16": os.path.join(MODEL_PATH, "vit_b16.pth"),
16
- "ViT-L/14": os.path.join(MODEL_PATH, "vit_l14.pth"),
17
- "ViT-L/14_336": os.path.join(MODEL_PATH, "vit_l14_336.pth"),
18
- }
19
-
20
-
21
- class LayerNorm(nn.LayerNorm):
22
- """Subclass torch's LayerNorm to handle fp16."""
23
-
24
- def forward(self, x):
25
- orig_type = x.dtype
26
- ret = super().forward(x.type(torch.float32))
27
- return ret.type(orig_type)
28
-
29
-
30
- class QuickGELU(nn.Module):
31
- def forward(self, x):
32
- return x * torch.sigmoid(1.702 * x)
33
-
34
-
35
- class Local_MHRA(nn.Module):
36
- def __init__(self, d_model, dw_reduction=1.5, pos_kernel_size=3):
37
- super().__init__()
38
-
39
- padding = pos_kernel_size // 2
40
- re_d_model = int(d_model // dw_reduction)
41
- self.pos_embed = nn.Sequential(
42
- nn.BatchNorm3d(d_model),
43
- nn.Conv3d(d_model, re_d_model, kernel_size=1, stride=1, padding=0),
44
- nn.Conv3d(re_d_model, re_d_model, kernel_size=(pos_kernel_size, 1, 1), stride=(1, 1, 1), padding=(padding, 0, 0), groups=re_d_model),
45
- nn.Conv3d(re_d_model, d_model, kernel_size=1, stride=1, padding=0),
46
- )
47
-
48
- # init zero
49
- print('Init zero for Conv in pos_emb')
50
- nn.init.constant_(self.pos_embed[3].weight, 0)
51
- nn.init.constant_(self.pos_embed[3].bias, 0)
52
-
53
- def forward(self, x):
54
- return self.pos_embed(x)
55
-
56
-
57
- class ResidualAttentionBlock(nn.Module):
58
- def __init__(
59
- self, d_model, n_head, attn_mask=None, drop_path=0.0,
60
- dw_reduction=1.5, no_lmhra=False, double_lmhra=True
61
- ):
62
- super().__init__()
63
-
64
- self.n_head = n_head
65
- self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
66
- print(f'Drop path rate: {drop_path}')
67
-
68
- self.no_lmhra = no_lmhra
69
- self.double_lmhra = double_lmhra
70
- print(f'No L_MHRA: {no_lmhra}')
71
- print(f'Double L_MHRA: {double_lmhra}')
72
- if not no_lmhra:
73
- self.lmhra1 = Local_MHRA(d_model, dw_reduction=dw_reduction)
74
- if double_lmhra:
75
- self.lmhra2 = Local_MHRA(d_model, dw_reduction=dw_reduction)
76
-
77
- # spatial
78
- self.attn = MultiheadAttention(d_model, n_head)
79
- self.ln_1 = LayerNorm(d_model)
80
- self.mlp = nn.Sequential(OrderedDict([
81
- ("c_fc", nn.Linear(d_model, d_model * 4)),
82
- ("gelu", QuickGELU()),
83
- ("c_proj", nn.Linear(d_model * 4, d_model))
84
- ]))
85
- self.ln_2 = LayerNorm(d_model)
86
- self.attn_mask = attn_mask
87
-
88
- def attention(self, x):
89
- self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
90
- return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
91
-
92
- def forward(self, x, T=8, use_checkpoint=False):
93
- # x: 1+HW, NT, C
94
- if not self.no_lmhra:
95
- # Local MHRA
96
- tmp_x = x[1:, :, :]
97
- L, NT, C = tmp_x.shape
98
- N = NT // T
99
- H = W = int(L ** 0.5)
100
- tmp_x = tmp_x.view(H, W, N, T, C).permute(2, 4, 3, 0, 1).contiguous()
101
- tmp_x = tmp_x + self.drop_path(self.lmhra1(tmp_x))
102
- tmp_x = tmp_x.view(N, C, T, L).permute(3, 0, 2, 1).contiguous().view(L, NT, C)
103
- x = torch.cat([x[:1, :, :], tmp_x], dim=0)
104
- # MHSA
105
- if use_checkpoint:
106
- attn_out = checkpoint.checkpoint(self.attention, self.ln_1(x))
107
- x = x + self.drop_path(attn_out)
108
- else:
109
- x = x + self.drop_path(self.attention(self.ln_1(x)))
110
- # Local MHRA
111
- if not self.no_lmhra and self.double_lmhra:
112
- tmp_x = x[1:, :, :]
113
- tmp_x = tmp_x.view(H, W, N, T, C).permute(2, 4, 3, 0, 1).contiguous()
114
- tmp_x = tmp_x + self.drop_path(self.lmhra2(tmp_x))
115
- tmp_x = tmp_x.view(N, C, T, L).permute(3, 0, 2, 1).contiguous().view(L, NT, C)
116
- x = torch.cat([x[:1, :, :], tmp_x], dim=0)
117
- # FFN
118
- if use_checkpoint:
119
- mlp_out = checkpoint.checkpoint(self.mlp, self.ln_2(x))
120
- x = x + self.drop_path(mlp_out)
121
- else:
122
- x = x + self.drop_path(self.mlp(self.ln_2(x)))
123
- return x
124
-
125
-
126
- class Extractor(nn.Module):
127
- def __init__(
128
- self, d_model, n_head, attn_mask=None,
129
- mlp_factor=4.0, dropout=0.0, drop_path=0.0,
130
- ):
131
- super().__init__()
132
-
133
- self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
134
- print(f'Drop path rate: {drop_path}')
135
- self.attn = nn.MultiheadAttention(d_model, n_head)
136
- self.ln_1 = nn.LayerNorm(d_model)
137
- d_mlp = round(mlp_factor * d_model)
138
- self.mlp = nn.Sequential(OrderedDict([
139
- ("c_fc", nn.Linear(d_model, d_mlp)),
140
- ("gelu", QuickGELU()),
141
- ("dropout", nn.Dropout(dropout)),
142
- ("c_proj", nn.Linear(d_mlp, d_model))
143
- ]))
144
- self.ln_2 = nn.LayerNorm(d_model)
145
- self.ln_3 = nn.LayerNorm(d_model)
146
- self.attn_mask = attn_mask
147
-
148
- # zero init
149
- nn.init.xavier_uniform_(self.attn.in_proj_weight)
150
- nn.init.constant_(self.attn.out_proj.weight, 0.)
151
- nn.init.constant_(self.attn.out_proj.bias, 0.)
152
- nn.init.xavier_uniform_(self.mlp[0].weight)
153
- nn.init.constant_(self.mlp[-1].weight, 0.)
154
- nn.init.constant_(self.mlp[-1].bias, 0.)
155
-
156
- def attention(self, x, y):
157
- d_model = self.ln_1.weight.size(0)
158
- q = (x @ self.attn.in_proj_weight[:d_model].T) + self.attn.in_proj_bias[:d_model]
159
-
160
- k = (y @ self.attn.in_proj_weight[d_model:-d_model].T) + self.attn.in_proj_bias[d_model:-d_model]
161
- v = (y @ self.attn.in_proj_weight[-d_model:].T) + self.attn.in_proj_bias[-d_model:]
162
- Tx, Ty, N = q.size(0), k.size(0), q.size(1)
163
- q = q.view(Tx, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3)
164
- k = k.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3)
165
- v = v.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3)
166
- aff = (q @ k.transpose(-2, -1) / (self.attn.head_dim ** 0.5))
167
-
168
- aff = aff.softmax(dim=-1)
169
- out = aff @ v
170
- out = out.permute(2, 0, 1, 3).flatten(2)
171
- out = self.attn.out_proj(out)
172
- return out
173
-
174
- def forward(self, x, y):
175
- x = x + self.drop_path(self.attention(self.ln_1(x), self.ln_3(y)))
176
- x = x + self.drop_path(self.mlp(self.ln_2(x)))
177
- return x
178
-
179
-
180
- class Transformer(nn.Module):
181
- def __init__(
182
- self, width, layers, heads, attn_mask=None, backbone_drop_path_rate=0.,
183
- use_checkpoint=False, checkpoint_num=[0], t_size=8, dw_reduction=2,
184
- no_lmhra=False, double_lmhra=True,
185
- return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
186
- n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
187
- mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
188
- cls_dropout=0.5, num_classes=400,
189
- ):
190
- super().__init__()
191
- self.T = t_size
192
- self.return_list = return_list
193
- # backbone
194
- b_dpr = [x.item() for x in torch.linspace(0, backbone_drop_path_rate, layers)]
195
- self.resblocks = nn.ModuleList([
196
- ResidualAttentionBlock(
197
- width, heads, attn_mask,
198
- drop_path=b_dpr[i],
199
- dw_reduction=dw_reduction,
200
- no_lmhra=no_lmhra,
201
- double_lmhra=double_lmhra,
202
- ) for i in range(layers)
203
- ])
204
- # checkpoint
205
- self.use_checkpoint = use_checkpoint
206
- self.checkpoint_num = checkpoint_num
207
- self.n_layers = n_layers
208
- print(f'Use checkpoint: {self.use_checkpoint}')
209
- print(f'Checkpoint number: {self.checkpoint_num}')
210
-
211
- # global block
212
- assert n_layers == len(return_list)
213
- if n_layers > 0:
214
- self.temporal_cls_token = nn.Parameter(torch.zeros(1, 1, n_dim))
215
- self.dpe = nn.ModuleList([
216
- nn.Conv3d(n_dim, n_dim, kernel_size=3, stride=1, padding=1, bias=True, groups=n_dim)
217
- for i in range(n_layers)
218
- ])
219
- for m in self.dpe:
220
- nn.init.constant_(m.bias, 0.)
221
- dpr = [x.item() for x in torch.linspace(0, drop_path_rate, n_layers)]
222
- self.dec = nn.ModuleList([
223
- Extractor(
224
- n_dim, n_head, mlp_factor=mlp_factor,
225
- dropout=mlp_dropout[i], drop_path=dpr[i],
226
- ) for i in range(n_layers)
227
- ])
228
- self.balance = nn.Parameter(torch.zeros((n_dim)))
229
- self.sigmoid = nn.Sigmoid()
230
- # projection
231
- self.proj = nn.Sequential(
232
- nn.LayerNorm(n_dim),
233
- nn.Dropout(cls_dropout),
234
- nn.Linear(n_dim, num_classes),
235
- )
236
-
237
- def forward(self, x):
238
- T_down = self.T
239
- L, NT, C = x.shape
240
- N = NT // T_down
241
- H = W = int((L - 1) ** 0.5)
242
-
243
- if self.n_layers > 0:
244
- cls_token = self.temporal_cls_token.repeat(1, N, 1)
245
-
246
- j = -1
247
- for i, resblock in enumerate(self.resblocks):
248
- if self.use_checkpoint and i < self.checkpoint_num[0]:
249
- x = resblock(x, self.T, use_checkpoint=True)
250
- else:
251
- x = resblock(x, T_down)
252
- if i in self.return_list:
253
- j += 1
254
- tmp_x = x.clone()
255
- tmp_x = tmp_x.view(L, N, T_down, C)
256
- # dpe
257
- _, tmp_feats = tmp_x[:1], tmp_x[1:]
258
- tmp_feats = tmp_feats.permute(1, 3, 2, 0).reshape(N, C, T_down, H, W)
259
- tmp_feats = self.dpe[j](tmp_feats).view(N, C, T_down, L - 1).permute(3, 0, 2, 1).contiguous()
260
- tmp_x[1:] = tmp_x[1:] + tmp_feats
261
- # global block
262
- tmp_x = tmp_x.permute(2, 0, 1, 3).flatten(0, 1) # T * L, N, C
263
- cls_token = self.dec[j](cls_token, tmp_x)
264
-
265
- if self.n_layers > 0:
266
- weight = self.sigmoid(self.balance)
267
- residual = x.view(L, N, T_down, C)[0].mean(1) # L, N, T, C
268
- return self.proj((1 - weight) * cls_token[0, :, :] + weight * residual)
269
- else:
270
- residual = x.view(L, N, T_down, C)[0].mean(1) # L, N, T, C
271
- return self.proj(residual)
272
-
273
-
274
- class VisionTransformer(nn.Module):
275
- def __init__(
276
- self,
277
- # backbone
278
- input_resolution, patch_size, width, layers, heads, output_dim, backbone_drop_path_rate=0.,
279
- use_checkpoint=False, checkpoint_num=[0], t_size=8, kernel_size=3, dw_reduction=1.5,
280
- temporal_downsample=True,
281
- no_lmhra=-False, double_lmhra=True,
282
- # global block
283
- return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
284
- n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
285
- mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
286
- cls_dropout=0.5, num_classes=400,
287
- ):
288
- super().__init__()
289
- self.input_resolution = input_resolution
290
- self.output_dim = output_dim
291
- padding = (kernel_size - 1) // 2
292
- if temporal_downsample:
293
- self.conv1 = nn.Conv3d(3, width, (kernel_size, patch_size, patch_size), (2, patch_size, patch_size), (padding, 0, 0), bias=False)
294
- t_size = t_size // 2
295
- else:
296
- self.conv1 = nn.Conv3d(3, width, (1, patch_size, patch_size), (1, patch_size, patch_size), (0, 0, 0), bias=False)
297
-
298
- scale = width ** -0.5
299
- self.class_embedding = nn.Parameter(scale * torch.randn(width))
300
- self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
301
- self.ln_pre = LayerNorm(width)
302
-
303
- self.transformer = Transformer(
304
- width, layers, heads, dw_reduction=dw_reduction,
305
- backbone_drop_path_rate=backbone_drop_path_rate,
306
- use_checkpoint=use_checkpoint, checkpoint_num=checkpoint_num, t_size=t_size,
307
- no_lmhra=no_lmhra, double_lmhra=double_lmhra,
308
- return_list=return_list, n_layers=n_layers, n_dim=n_dim, n_head=n_head,
309
- mlp_factor=mlp_factor, drop_path_rate=drop_path_rate, mlp_dropout=mlp_dropout,
310
- cls_dropout=cls_dropout, num_classes=num_classes,
311
- )
312
-
313
- def forward(self, x):
314
- x = self.conv1(x) # shape = [*, width, grid, grid]
315
- N, C, T, H, W = x.shape
316
- x = x.permute(0, 2, 3, 4, 1).reshape(N * T, H * W, C)
317
-
318
- x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
319
- x = x + self.positional_embedding.to(x.dtype)
320
- x = self.ln_pre(x)
321
-
322
- x = x.permute(1, 0, 2) # NLD -> LND
323
- out = self.transformer(x)
324
- return out
325
-
326
-
327
- def inflate_weight(weight_2d, time_dim, center=True):
328
- print(f'Init center: {center}')
329
- if center:
330
- weight_3d = torch.zeros(*weight_2d.shape)
331
- weight_3d = weight_3d.unsqueeze(2).repeat(1, 1, time_dim, 1, 1)
332
- middle_idx = time_dim // 2
333
- weight_3d[:, :, middle_idx, :, :] = weight_2d
334
- else:
335
- weight_3d = weight_2d.unsqueeze(2).repeat(1, 1, time_dim, 1, 1)
336
- weight_3d = weight_3d / time_dim
337
- return weight_3d
338
-
339
-
340
- def load_state_dict(model, state_dict):
341
- state_dict_3d = model.state_dict()
342
- for k in state_dict.keys():
343
- if state_dict[k].shape != state_dict_3d[k].shape:
344
- if len(state_dict_3d[k].shape) <= 2:
345
- print(f'Ignore: {k}')
346
- continue
347
- print(f'Inflate: {k}, {state_dict[k].shape} => {state_dict_3d[k].shape}')
348
- time_dim = state_dict_3d[k].shape[2]
349
- state_dict[k] = inflate_weight(state_dict[k], time_dim)
350
- model.load_state_dict(state_dict, strict=False)
351
-
352
-
353
- def uniformerv2_b16(
354
- pretrained=True, use_checkpoint=False, checkpoint_num=[0],
355
- t_size=16, dw_reduction=1.5, backbone_drop_path_rate=0.,
356
- temporal_downsample=True,
357
- no_lmhra=False, double_lmhra=True,
358
- return_list=[8, 9, 10, 11],
359
- n_layers=4, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
360
- mlp_dropout=[0.5, 0.5, 0.5, 0.5],
361
- cls_dropout=0.5, num_classes=400,
362
- ):
363
- model = VisionTransformer(
364
- input_resolution=224,
365
- patch_size=16,
366
- width=768,
367
- layers=12,
368
- heads=12,
369
- output_dim=512,
370
- use_checkpoint=use_checkpoint,
371
- checkpoint_num=checkpoint_num,
372
- t_size=t_size,
373
- dw_reduction=dw_reduction,
374
- backbone_drop_path_rate=backbone_drop_path_rate,
375
- temporal_downsample=temporal_downsample,
376
- no_lmhra=no_lmhra,
377
- double_lmhra=double_lmhra,
378
- return_list=return_list,
379
- n_layers=n_layers,
380
- n_dim=n_dim,
381
- n_head=n_head,
382
- mlp_factor=mlp_factor,
383
- drop_path_rate=drop_path_rate,
384
- mlp_dropout=mlp_dropout,
385
- cls_dropout=cls_dropout,
386
- num_classes=num_classes,
387
- )
388
-
389
- if pretrained:
390
- print('load pretrained weights')
391
- state_dict = torch.load(_MODELS["ViT-B/16"], map_location='cpu')
392
- load_state_dict(model, state_dict)
393
- return model.eval()
394
-
395
-
396
- def uniformerv2_l14(
397
- pretrained=True, use_checkpoint=False, checkpoint_num=[0],
398
- t_size=16, dw_reduction=1.5, backbone_drop_path_rate=0.,
399
- temporal_downsample=True,
400
- no_lmhra=False, double_lmhra=True,
401
- return_list=[20, 21, 22, 23],
402
- n_layers=4, n_dim=1024, n_head=16, mlp_factor=4.0, drop_path_rate=0.,
403
- mlp_dropout=[0.5, 0.5, 0.5, 0.5],
404
- cls_dropout=0.5, num_classes=400,
405
- ):
406
- model = VisionTransformer(
407
- input_resolution=224,
408
- patch_size=14,
409
- width=1024,
410
- layers=24,
411
- heads=16,
412
- output_dim=768,
413
- use_checkpoint=use_checkpoint,
414
- checkpoint_num=checkpoint_num,
415
- t_size=t_size,
416
- dw_reduction=dw_reduction,
417
- backbone_drop_path_rate=backbone_drop_path_rate,
418
- temporal_downsample=temporal_downsample,
419
- no_lmhra=no_lmhra,
420
- double_lmhra=double_lmhra,
421
- return_list=return_list,
422
- n_layers=n_layers,
423
- n_dim=n_dim,
424
- n_head=n_head,
425
- mlp_factor=mlp_factor,
426
- drop_path_rate=drop_path_rate,
427
- mlp_dropout=mlp_dropout,
428
- cls_dropout=cls_dropout,
429
- num_classes=num_classes,
430
- )
431
-
432
- if pretrained:
433
- print('load pretrained weights')
434
- state_dict = torch.load(_MODELS["ViT-L/14"], map_location='cpu')
435
- load_state_dict(model, state_dict)
436
- return model.eval()
437
-
438
-
439
- def uniformerv2_l14_336(
440
- pretrained=True, use_checkpoint=False, checkpoint_num=[0],
441
- t_size=16, dw_reduction=1.5, backbone_drop_path_rate=0.,
442
- no_temporal_downsample=True,
443
- no_lmhra=False, double_lmhra=True,
444
- return_list=[20, 21, 22, 23],
445
- n_layers=4, n_dim=1024, n_head=16, mlp_factor=4.0, drop_path_rate=0.,
446
- mlp_dropout=[0.5, 0.5, 0.5, 0.5],
447
- cls_dropout=0.5, num_classes=400,
448
- ):
449
- model = VisionTransformer(
450
- input_resolution=336,
451
- patch_size=14,
452
- width=1024,
453
- layers=24,
454
- heads=16,
455
- output_dim=768,
456
- use_checkpoint=use_checkpoint,
457
- checkpoint_num=checkpoint_num,
458
- t_size=t_size,
459
- dw_reduction=dw_reduction,
460
- backbone_drop_path_rate=backbone_drop_path_rate,
461
- no_temporal_downsample=no_temporal_downsample,
462
- no_lmhra=no_lmhra,
463
- double_lmhra=double_lmhra,
464
- return_list=return_list,
465
- n_layers=n_layers,
466
- n_dim=n_dim,
467
- n_head=n_head,
468
- mlp_factor=mlp_factor,
469
- drop_path_rate=drop_path_rate,
470
- mlp_dropout=mlp_dropout,
471
- cls_dropout=cls_dropout,
472
- num_classes=num_classes,
473
- )
474
-
475
- if pretrained:
476
- print('load pretrained weights')
477
- state_dict = torch.load(_MODELS["ViT-L/14_336"], map_location='cpu')
478
- load_state_dict(model, state_dict)
479
- return model.eval()
480
-
481
-
482
- if __name__ == '__main__':
483
- import time
484
- from fvcore.nn import FlopCountAnalysis
485
- from fvcore.nn import flop_count_table
486
- import numpy as np
487
-
488
- seed = 4217
489
- np.random.seed(seed)
490
- torch.manual_seed(seed)
491
- torch.cuda.manual_seed(seed)
492
- torch.cuda.manual_seed_all(seed)
493
- num_frames = 16
494
-
495
- model = uniformerv2_l14(
496
- pretrained=False,
497
- t_size=num_frames, backbone_drop_path_rate=0., drop_path_rate=0.,
498
- dw_reduction=1.5,
499
- no_lmhra=False,
500
- temporal_downsample=True,
501
- return_list=[8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23],
502
- mlp_dropout=[0.5]*16,
503
- n_layers=16
504
- )
505
- print(model)
506
-
507
- flops = FlopCountAnalysis(model, torch.rand(1, 3, num_frames, 224, 224))
508
- s = time.time()
509
- print(flop_count_table(flops, max_depth=1))
510
- print(time.time()-s)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AntiUser/DeepDanbooru_string/app.py DELETED
@@ -1,185 +0,0 @@
1
- #!/usr/bin/env python
2
-
3
- from __future__ import annotations
4
-
5
- import argparse
6
- import functools
7
- import os
8
- import html
9
- import pathlib
10
- import tarfile
11
-
12
- import deepdanbooru as dd
13
- import gradio as gr
14
- import huggingface_hub
15
- import numpy as np
16
- import PIL.Image
17
- import tensorflow as tf
18
- import piexif
19
- import piexif.helper
20
-
21
- TITLE = 'DeepDanbooru String'
22
-
23
- TOKEN = os.environ['TOKEN']
24
- MODEL_REPO = 'CikeyQI/DeepDanbooru_string'
25
- MODEL_FILENAME = 'model-resnet_custom_v3.h5'
26
- LABEL_FILENAME = 'tags.txt'
27
-
28
-
29
- def parse_args() -> argparse.Namespace:
30
- parser = argparse.ArgumentParser()
31
- parser.add_argument('--score-slider-step', type=float, default=0.05)
32
- parser.add_argument('--score-threshold', type=float, default=0.5)
33
- parser.add_argument('--theme', type=str, default='dark-grass')
34
- parser.add_argument('--live', action='store_true')
35
- parser.add_argument('--share', action='store_true')
36
- parser.add_argument('--port', type=int)
37
- parser.add_argument('--disable-queue',
38
- dest='enable_queue',
39
- action='store_false')
40
- parser.add_argument('--allow-flagging', type=str, default='never')
41
- return parser.parse_args()
42
-
43
-
44
- def load_sample_image_paths() -> list[pathlib.Path]:
45
- image_dir = pathlib.Path('images')
46
- if not image_dir.exists():
47
- dataset_repo = 'hysts/sample-images-TADNE'
48
- path = huggingface_hub.hf_hub_download(dataset_repo,
49
- 'images.tar.gz',
50
- repo_type='dataset',
51
- use_auth_token=TOKEN)
52
- with tarfile.open(path) as f:
53
- f.extractall()
54
- return sorted(image_dir.glob('*'))
55
-
56
-
57
- def load_model() -> tf.keras.Model:
58
- path = huggingface_hub.hf_hub_download(MODEL_REPO,
59
- MODEL_FILENAME,
60
- use_auth_token=TOKEN)
61
- model = tf.keras.models.load_model(path)
62
- return model
63
-
64
-
65
- def load_labels() -> list[str]:
66
- path = huggingface_hub.hf_hub_download(MODEL_REPO,
67
- LABEL_FILENAME,
68
- use_auth_token=TOKEN)
69
- with open(path) as f:
70
- labels = [line.strip() for line in f.readlines()]
71
- return labels
72
-
73
- def plaintext_to_html(text):
74
- text = "<p>" + "<br>\n".join([f"{html.escape(x)}" for x in text.split('\n')]) + "</p>"
75
- return text
76
-
77
- def predict(image: PIL.Image.Image, score_threshold: float,
78
- model: tf.keras.Model, labels: list[str]) -> dict[str, float]:
79
- rawimage = image
80
- _, height, width, _ = model.input_shape
81
- image = np.asarray(image)
82
- image = tf.image.resize(image,
83
- size=(height, width),
84
- method=tf.image.ResizeMethod.AREA,
85
- preserve_aspect_ratio=True)
86
- image = image.numpy()
87
- image = dd.image.transform_and_pad_image(image, width, height)
88
- image = image / 255.
89
- probs = model.predict(image[None, ...])[0]
90
- probs = probs.astype(float)
91
- res = dict()
92
- for prob, label in zip(probs.tolist(), labels):
93
- if prob < score_threshold:
94
- continue
95
- res[label] = prob
96
- b = dict(sorted(res.items(),key=lambda item:item[1], reverse=True))
97
- a = ', '.join(list(b.keys())).replace('_',' ').replace('(','\(').replace(')','\)')
98
- c = ', '.join(list(b.keys()))
99
-
100
- items = rawimage.info
101
- geninfo = ''
102
-
103
- if "exif" in rawimage.info:
104
- exif = piexif.load(rawimage.info["exif"])
105
- exif_comment = (exif or {}).get("Exif", {}).get(piexif.ExifIFD.UserComment, b'')
106
- try:
107
- exif_comment = piexif.helper.UserComment.load(exif_comment)
108
- except ValueError:
109
- exif_comment = exif_comment.decode('utf8', errors="ignore")
110
-
111
- items['exif comment'] = exif_comment
112
- geninfo = exif_comment
113
-
114
- for field in ['jfif', 'jfif_version', 'jfif_unit', 'jfif_density', 'dpi', 'exif',
115
- 'loop', 'background', 'timestamp', 'duration']:
116
- items.pop(field, None)
117
-
118
- geninfo = items.get('parameters', geninfo)
119
-
120
- info = f"""
121
- <p><h4>PNG Info</h4></p>
122
- """
123
- for key, text in items.items():
124
- info += f"""
125
- <div>
126
- <p><b>{plaintext_to_html(str(key))}</b></p>
127
- <p>{plaintext_to_html(str(text))}</p>
128
- </div>
129
- """.strip()+"\n"
130
-
131
- if len(info) == 0:
132
- message = "Nothing found in the image."
133
- info = f"<div><p>{message}<p></div>"
134
-
135
- return (a,c,res,info)
136
-
137
-
138
- def main():
139
- args = parse_args()
140
- model = load_model()
141
- labels = load_labels()
142
-
143
- func = functools.partial(predict, model=model, labels=labels)
144
- func = functools.update_wrapper(func, predict)
145
-
146
- gr.Interface(
147
- func,
148
- [
149
- gr.inputs.Image(type='pil', label='Input'),
150
- gr.inputs.Slider(0,
151
- 1,
152
- step=args.score_slider_step,
153
- default=args.score_threshold,
154
- label='Score Threshold'),
155
- ],
156
- [
157
- gr.outputs.Textbox(label='Output (string)'),
158
- gr.outputs.Textbox(label='Output (raw string)'),
159
- gr.outputs.Label(label='Output (label)'),
160
- gr.outputs.HTML()
161
- ],
162
- examples=[
163
- ['miku.jpg',0.5],
164
- ['miku2.jpg',0.5]
165
- ],
166
- title=TITLE,
167
- description='''
168
- Demo for [KichangKim/DeepDanbooru](https://github.com/KichangKim/DeepDanbooru) with "ready to copy" prompt and a prompt analyzer.
169
-
170
- Modified from [hysts/DeepDanbooru](https://huggingface.co/spaces/hysts/DeepDanbooru)
171
-
172
- PNG Info code forked from [AUTOMATIC1111/stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui)
173
- ''',
174
- theme=args.theme,
175
- allow_flagging=args.allow_flagging,
176
- live=args.live,
177
- ).launch(
178
- enable_queue=args.enable_queue,
179
- server_port=args.port,
180
- share=args.share,
181
- )
182
-
183
-
184
- if __name__ == '__main__':
185
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ariharasudhan/YoloV5/utils/loggers/wandb/wandb_utils.py DELETED
@@ -1,589 +0,0 @@
1
- """Utilities and tools for tracking runs with Weights & Biases."""
2
-
3
- import logging
4
- import os
5
- import sys
6
- from contextlib import contextmanager
7
- from pathlib import Path
8
- from typing import Dict
9
-
10
- import yaml
11
- from tqdm import tqdm
12
-
13
- FILE = Path(__file__).resolve()
14
- ROOT = FILE.parents[3] # YOLOv5 root directory
15
- if str(ROOT) not in sys.path:
16
- sys.path.append(str(ROOT)) # add ROOT to PATH
17
-
18
- from utils.dataloaders import LoadImagesAndLabels, img2label_paths
19
- from utils.general import LOGGER, check_dataset, check_file
20
-
21
- try:
22
- import wandb
23
-
24
- assert hasattr(wandb, '__version__') # verify package import not local dir
25
- except (ImportError, AssertionError):
26
- wandb = None
27
-
28
- RANK = int(os.getenv('RANK', -1))
29
- WANDB_ARTIFACT_PREFIX = 'wandb-artifact://'
30
-
31
-
32
- def remove_prefix(from_string, prefix=WANDB_ARTIFACT_PREFIX):
33
- return from_string[len(prefix):]
34
-
35
-
36
- def check_wandb_config_file(data_config_file):
37
- wandb_config = '_wandb.'.join(data_config_file.rsplit('.', 1)) # updated data.yaml path
38
- if Path(wandb_config).is_file():
39
- return wandb_config
40
- return data_config_file
41
-
42
-
43
- def check_wandb_dataset(data_file):
44
- is_trainset_wandb_artifact = False
45
- is_valset_wandb_artifact = False
46
- if isinstance(data_file, dict):
47
- # In that case another dataset manager has already processed it and we don't have to
48
- return data_file
49
- if check_file(data_file) and data_file.endswith('.yaml'):
50
- with open(data_file, errors='ignore') as f:
51
- data_dict = yaml.safe_load(f)
52
- is_trainset_wandb_artifact = isinstance(data_dict['train'],
53
- str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX)
54
- is_valset_wandb_artifact = isinstance(data_dict['val'],
55
- str) and data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX)
56
- if is_trainset_wandb_artifact or is_valset_wandb_artifact:
57
- return data_dict
58
- else:
59
- return check_dataset(data_file)
60
-
61
-
62
- def get_run_info(run_path):
63
- run_path = Path(remove_prefix(run_path, WANDB_ARTIFACT_PREFIX))
64
- run_id = run_path.stem
65
- project = run_path.parent.stem
66
- entity = run_path.parent.parent.stem
67
- model_artifact_name = 'run_' + run_id + '_model'
68
- return entity, project, run_id, model_artifact_name
69
-
70
-
71
- def check_wandb_resume(opt):
72
- process_wandb_config_ddp_mode(opt) if RANK not in [-1, 0] else None
73
- if isinstance(opt.resume, str):
74
- if opt.resume.startswith(WANDB_ARTIFACT_PREFIX):
75
- if RANK not in [-1, 0]: # For resuming DDP runs
76
- entity, project, run_id, model_artifact_name = get_run_info(opt.resume)
77
- api = wandb.Api()
78
- artifact = api.artifact(entity + '/' + project + '/' + model_artifact_name + ':latest')
79
- modeldir = artifact.download()
80
- opt.weights = str(Path(modeldir) / "last.pt")
81
- return True
82
- return None
83
-
84
-
85
- def process_wandb_config_ddp_mode(opt):
86
- with open(check_file(opt.data), errors='ignore') as f:
87
- data_dict = yaml.safe_load(f) # data dict
88
- train_dir, val_dir = None, None
89
- if isinstance(data_dict['train'], str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX):
90
- api = wandb.Api()
91
- train_artifact = api.artifact(remove_prefix(data_dict['train']) + ':' + opt.artifact_alias)
92
- train_dir = train_artifact.download()
93
- train_path = Path(train_dir) / 'data/images/'
94
- data_dict['train'] = str(train_path)
95
-
96
- if isinstance(data_dict['val'], str) and data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX):
97
- api = wandb.Api()
98
- val_artifact = api.artifact(remove_prefix(data_dict['val']) + ':' + opt.artifact_alias)
99
- val_dir = val_artifact.download()
100
- val_path = Path(val_dir) / 'data/images/'
101
- data_dict['val'] = str(val_path)
102
- if train_dir or val_dir:
103
- ddp_data_path = str(Path(val_dir) / 'wandb_local_data.yaml')
104
- with open(ddp_data_path, 'w') as f:
105
- yaml.safe_dump(data_dict, f)
106
- opt.data = ddp_data_path
107
-
108
-
109
- class WandbLogger():
110
- """Log training runs, datasets, models, and predictions to Weights & Biases.
111
-
112
- This logger sends information to W&B at wandb.ai. By default, this information
113
- includes hyperparameters, system configuration and metrics, model metrics,
114
- and basic data metrics and analyses.
115
-
116
- By providing additional command line arguments to train.py, datasets,
117
- models and predictions can also be logged.
118
-
119
- For more on how this logger is used, see the Weights & Biases documentation:
120
- https://docs.wandb.com/guides/integrations/yolov5
121
- """
122
-
123
- def __init__(self, opt, run_id=None, job_type='Training'):
124
- """
125
- - Initialize WandbLogger instance
126
- - Upload dataset if opt.upload_dataset is True
127
- - Setup training processes if job_type is 'Training'
128
-
129
- arguments:
130
- opt (namespace) -- Commandline arguments for this run
131
- run_id (str) -- Run ID of W&B run to be resumed
132
- job_type (str) -- To set the job_type for this run
133
-
134
- """
135
- # Temporary-fix
136
- if opt.upload_dataset:
137
- opt.upload_dataset = False
138
- # LOGGER.info("Uploading Dataset functionality is not being supported temporarily due to a bug.")
139
-
140
- # Pre-training routine --
141
- self.job_type = job_type
142
- self.wandb, self.wandb_run = wandb, None if not wandb else wandb.run
143
- self.val_artifact, self.train_artifact = None, None
144
- self.train_artifact_path, self.val_artifact_path = None, None
145
- self.result_artifact = None
146
- self.val_table, self.result_table = None, None
147
- self.bbox_media_panel_images = []
148
- self.val_table_path_map = None
149
- self.max_imgs_to_log = 16
150
- self.wandb_artifact_data_dict = None
151
- self.data_dict = None
152
- # It's more elegant to stick to 1 wandb.init call,
153
- # but useful config data is overwritten in the WandbLogger's wandb.init call
154
- if isinstance(opt.resume, str): # checks resume from artifact
155
- if opt.resume.startswith(WANDB_ARTIFACT_PREFIX):
156
- entity, project, run_id, model_artifact_name = get_run_info(opt.resume)
157
- model_artifact_name = WANDB_ARTIFACT_PREFIX + model_artifact_name
158
- assert wandb, 'install wandb to resume wandb runs'
159
- # Resume wandb-artifact:// runs here| workaround for not overwriting wandb.config
160
- self.wandb_run = wandb.init(id=run_id,
161
- project=project,
162
- entity=entity,
163
- resume='allow',
164
- allow_val_change=True)
165
- opt.resume = model_artifact_name
166
- elif self.wandb:
167
- self.wandb_run = wandb.init(config=opt,
168
- resume="allow",
169
- project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem,
170
- entity=opt.entity,
171
- name=opt.name if opt.name != 'exp' else None,
172
- job_type=job_type,
173
- id=run_id,
174
- allow_val_change=True) if not wandb.run else wandb.run
175
- if self.wandb_run:
176
- if self.job_type == 'Training':
177
- if opt.upload_dataset:
178
- if not opt.resume:
179
- self.wandb_artifact_data_dict = self.check_and_upload_dataset(opt)
180
-
181
- if isinstance(opt.data, dict):
182
- # This means another dataset manager has already processed the dataset info (e.g. ClearML)
183
- # and they will have stored the already processed dict in opt.data
184
- self.data_dict = opt.data
185
- elif opt.resume:
186
- # resume from artifact
187
- if isinstance(opt.resume, str) and opt.resume.startswith(WANDB_ARTIFACT_PREFIX):
188
- self.data_dict = dict(self.wandb_run.config.data_dict)
189
- else: # local resume
190
- self.data_dict = check_wandb_dataset(opt.data)
191
- else:
192
- self.data_dict = check_wandb_dataset(opt.data)
193
- self.wandb_artifact_data_dict = self.wandb_artifact_data_dict or self.data_dict
194
-
195
- # write data_dict to config. useful for resuming from artifacts. Do this only when not resuming.
196
- self.wandb_run.config.update({'data_dict': self.wandb_artifact_data_dict}, allow_val_change=True)
197
- self.setup_training(opt)
198
-
199
- if self.job_type == 'Dataset Creation':
200
- self.wandb_run.config.update({"upload_dataset": True})
201
- self.data_dict = self.check_and_upload_dataset(opt)
202
-
203
- def check_and_upload_dataset(self, opt):
204
- """
205
- Check if the dataset format is compatible and upload it as W&B artifact
206
-
207
- arguments:
208
- opt (namespace)-- Commandline arguments for current run
209
-
210
- returns:
211
- Updated dataset info dictionary where local dataset paths are replaced by WAND_ARFACT_PREFIX links.
212
- """
213
- assert wandb, 'Install wandb to upload dataset'
214
- config_path = self.log_dataset_artifact(opt.data, opt.single_cls,
215
- 'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem)
216
- with open(config_path, errors='ignore') as f:
217
- wandb_data_dict = yaml.safe_load(f)
218
- return wandb_data_dict
219
-
220
- def setup_training(self, opt):
221
- """
222
- Setup the necessary processes for training YOLO models:
223
- - Attempt to download model checkpoint and dataset artifacts if opt.resume stats with WANDB_ARTIFACT_PREFIX
224
- - Update data_dict, to contain info of previous run if resumed and the paths of dataset artifact if downloaded
225
- - Setup log_dict, initialize bbox_interval
226
-
227
- arguments:
228
- opt (namespace) -- commandline arguments for this run
229
-
230
- """
231
- self.log_dict, self.current_epoch = {}, 0
232
- self.bbox_interval = opt.bbox_interval
233
- if isinstance(opt.resume, str):
234
- modeldir, _ = self.download_model_artifact(opt)
235
- if modeldir:
236
- self.weights = Path(modeldir) / "last.pt"
237
- config = self.wandb_run.config
238
- opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp, opt.imgsz = str(
239
- self.weights), config.save_period, config.batch_size, config.bbox_interval, config.epochs,\
240
- config.hyp, config.imgsz
241
- data_dict = self.data_dict
242
- if self.val_artifact is None: # If --upload_dataset is set, use the existing artifact, don't download
243
- self.train_artifact_path, self.train_artifact = self.download_dataset_artifact(
244
- data_dict.get('train'), opt.artifact_alias)
245
- self.val_artifact_path, self.val_artifact = self.download_dataset_artifact(
246
- data_dict.get('val'), opt.artifact_alias)
247
-
248
- if self.train_artifact_path is not None:
249
- train_path = Path(self.train_artifact_path) / 'data/images/'
250
- data_dict['train'] = str(train_path)
251
- if self.val_artifact_path is not None:
252
- val_path = Path(self.val_artifact_path) / 'data/images/'
253
- data_dict['val'] = str(val_path)
254
-
255
- if self.val_artifact is not None:
256
- self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation")
257
- columns = ["epoch", "id", "ground truth", "prediction"]
258
- columns.extend(self.data_dict['names'])
259
- self.result_table = wandb.Table(columns)
260
- self.val_table = self.val_artifact.get("val")
261
- if self.val_table_path_map is None:
262
- self.map_val_table_path()
263
- if opt.bbox_interval == -1:
264
- self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1
265
- if opt.evolve or opt.noplots:
266
- self.bbox_interval = opt.bbox_interval = opt.epochs + 1 # disable bbox_interval
267
- train_from_artifact = self.train_artifact_path is not None and self.val_artifact_path is not None
268
- # Update the the data_dict to point to local artifacts dir
269
- if train_from_artifact:
270
- self.data_dict = data_dict
271
-
272
- def download_dataset_artifact(self, path, alias):
273
- """
274
- download the model checkpoint artifact if the path starts with WANDB_ARTIFACT_PREFIX
275
-
276
- arguments:
277
- path -- path of the dataset to be used for training
278
- alias (str)-- alias of the artifact to be download/used for training
279
-
280
- returns:
281
- (str, wandb.Artifact) -- path of the downladed dataset and it's corresponding artifact object if dataset
282
- is found otherwise returns (None, None)
283
- """
284
- if isinstance(path, str) and path.startswith(WANDB_ARTIFACT_PREFIX):
285
- artifact_path = Path(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias)
286
- dataset_artifact = wandb.use_artifact(artifact_path.as_posix().replace("\\", "/"))
287
- assert dataset_artifact is not None, "'Error: W&B dataset artifact doesn\'t exist'"
288
- datadir = dataset_artifact.download()
289
- return datadir, dataset_artifact
290
- return None, None
291
-
292
- def download_model_artifact(self, opt):
293
- """
294
- download the model checkpoint artifact if the resume path starts with WANDB_ARTIFACT_PREFIX
295
-
296
- arguments:
297
- opt (namespace) -- Commandline arguments for this run
298
- """
299
- if opt.resume.startswith(WANDB_ARTIFACT_PREFIX):
300
- model_artifact = wandb.use_artifact(remove_prefix(opt.resume, WANDB_ARTIFACT_PREFIX) + ":latest")
301
- assert model_artifact is not None, 'Error: W&B model artifact doesn\'t exist'
302
- modeldir = model_artifact.download()
303
- # epochs_trained = model_artifact.metadata.get('epochs_trained')
304
- total_epochs = model_artifact.metadata.get('total_epochs')
305
- is_finished = total_epochs is None
306
- assert not is_finished, 'training is finished, can only resume incomplete runs.'
307
- return modeldir, model_artifact
308
- return None, None
309
-
310
- def log_model(self, path, opt, epoch, fitness_score, best_model=False):
311
- """
312
- Log the model checkpoint as W&B artifact
313
-
314
- arguments:
315
- path (Path) -- Path of directory containing the checkpoints
316
- opt (namespace) -- Command line arguments for this run
317
- epoch (int) -- Current epoch number
318
- fitness_score (float) -- fitness score for current epoch
319
- best_model (boolean) -- Boolean representing if the current checkpoint is the best yet.
320
- """
321
- model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model',
322
- type='model',
323
- metadata={
324
- 'original_url': str(path),
325
- 'epochs_trained': epoch + 1,
326
- 'save period': opt.save_period,
327
- 'project': opt.project,
328
- 'total_epochs': opt.epochs,
329
- 'fitness_score': fitness_score})
330
- model_artifact.add_file(str(path / 'last.pt'), name='last.pt')
331
- wandb.log_artifact(model_artifact,
332
- aliases=['latest', 'last', 'epoch ' + str(self.current_epoch), 'best' if best_model else ''])
333
- LOGGER.info(f"Saving model artifact on epoch {epoch + 1}")
334
-
335
- def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False):
336
- """
337
- Log the dataset as W&B artifact and return the new data file with W&B links
338
-
339
- arguments:
340
- data_file (str) -- the .yaml file with information about the dataset like - path, classes etc.
341
- single_class (boolean) -- train multi-class data as single-class
342
- project (str) -- project name. Used to construct the artifact path
343
- overwrite_config (boolean) -- overwrites the data.yaml file if set to true otherwise creates a new
344
- file with _wandb postfix. Eg -> data_wandb.yaml
345
-
346
- returns:
347
- the new .yaml file with artifact links. it can be used to start training directly from artifacts
348
- """
349
- upload_dataset = self.wandb_run.config.upload_dataset
350
- log_val_only = isinstance(upload_dataset, str) and upload_dataset == 'val'
351
- self.data_dict = check_dataset(data_file) # parse and check
352
- data = dict(self.data_dict)
353
- nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names'])
354
- names = {k: v for k, v in enumerate(names)} # to index dictionary
355
-
356
- # log train set
357
- if not log_val_only:
358
- self.train_artifact = self.create_dataset_table(LoadImagesAndLabels(data['train'], rect=True, batch_size=1),
359
- names,
360
- name='train') if data.get('train') else None
361
- if data.get('train'):
362
- data['train'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'train')
363
-
364
- self.val_artifact = self.create_dataset_table(
365
- LoadImagesAndLabels(data['val'], rect=True, batch_size=1), names, name='val') if data.get('val') else None
366
- if data.get('val'):
367
- data['val'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'val')
368
-
369
- path = Path(data_file)
370
- # create a _wandb.yaml file with artifacts links if both train and test set are logged
371
- if not log_val_only:
372
- path = (path.stem if overwrite_config else path.stem + '_wandb') + '.yaml' # updated data.yaml path
373
- path = ROOT / 'data' / path
374
- data.pop('download', None)
375
- data.pop('path', None)
376
- with open(path, 'w') as f:
377
- yaml.safe_dump(data, f)
378
- LOGGER.info(f"Created dataset config file {path}")
379
-
380
- if self.job_type == 'Training': # builds correct artifact pipeline graph
381
- if not log_val_only:
382
- self.wandb_run.log_artifact(
383
- self.train_artifact) # calling use_artifact downloads the dataset. NOT NEEDED!
384
- self.wandb_run.use_artifact(self.val_artifact)
385
- self.val_artifact.wait()
386
- self.val_table = self.val_artifact.get('val')
387
- self.map_val_table_path()
388
- else:
389
- self.wandb_run.log_artifact(self.train_artifact)
390
- self.wandb_run.log_artifact(self.val_artifact)
391
- return path
392
-
393
- def map_val_table_path(self):
394
- """
395
- Map the validation dataset Table like name of file -> it's id in the W&B Table.
396
- Useful for - referencing artifacts for evaluation.
397
- """
398
- self.val_table_path_map = {}
399
- LOGGER.info("Mapping dataset")
400
- for i, data in enumerate(tqdm(self.val_table.data)):
401
- self.val_table_path_map[data[3]] = data[0]
402
-
403
- def create_dataset_table(self, dataset: LoadImagesAndLabels, class_to_id: Dict[int, str], name: str = 'dataset'):
404
- """
405
- Create and return W&B artifact containing W&B Table of the dataset.
406
-
407
- arguments:
408
- dataset -- instance of LoadImagesAndLabels class used to iterate over the data to build Table
409
- class_to_id -- hash map that maps class ids to labels
410
- name -- name of the artifact
411
-
412
- returns:
413
- dataset artifact to be logged or used
414
- """
415
- # TODO: Explore multiprocessing to slpit this loop parallely| This is essential for speeding up the the logging
416
- artifact = wandb.Artifact(name=name, type="dataset")
417
- img_files = tqdm([dataset.path]) if isinstance(dataset.path, str) and Path(dataset.path).is_dir() else None
418
- img_files = tqdm(dataset.im_files) if not img_files else img_files
419
- for img_file in img_files:
420
- if Path(img_file).is_dir():
421
- artifact.add_dir(img_file, name='data/images')
422
- labels_path = 'labels'.join(dataset.path.rsplit('images', 1))
423
- artifact.add_dir(labels_path, name='data/labels')
424
- else:
425
- artifact.add_file(img_file, name='data/images/' + Path(img_file).name)
426
- label_file = Path(img2label_paths([img_file])[0])
427
- artifact.add_file(str(label_file), name='data/labels/' +
428
- label_file.name) if label_file.exists() else None
429
- table = wandb.Table(columns=["id", "train_image", "Classes", "name"])
430
- class_set = wandb.Classes([{'id': id, 'name': name} for id, name in class_to_id.items()])
431
- for si, (img, labels, paths, shapes) in enumerate(tqdm(dataset)):
432
- box_data, img_classes = [], {}
433
- for cls, *xywh in labels[:, 1:].tolist():
434
- cls = int(cls)
435
- box_data.append({
436
- "position": {
437
- "middle": [xywh[0], xywh[1]],
438
- "width": xywh[2],
439
- "height": xywh[3]},
440
- "class_id": cls,
441
- "box_caption": "%s" % (class_to_id[cls])})
442
- img_classes[cls] = class_to_id[cls]
443
- boxes = {"ground_truth": {"box_data": box_data, "class_labels": class_to_id}} # inference-space
444
- table.add_data(si, wandb.Image(paths, classes=class_set, boxes=boxes), list(img_classes.values()),
445
- Path(paths).name)
446
- artifact.add(table, name)
447
- return artifact
448
-
449
- def log_training_progress(self, predn, path, names):
450
- """
451
- Build evaluation Table. Uses reference from validation dataset table.
452
-
453
- arguments:
454
- predn (list): list of predictions in the native space in the format - [xmin, ymin, xmax, ymax, confidence, class]
455
- path (str): local path of the current evaluation image
456
- names (dict(int, str)): hash map that maps class ids to labels
457
- """
458
- class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()])
459
- box_data = []
460
- avg_conf_per_class = [0] * len(self.data_dict['names'])
461
- pred_class_count = {}
462
- for *xyxy, conf, cls in predn.tolist():
463
- if conf >= 0.25:
464
- cls = int(cls)
465
- box_data.append({
466
- "position": {
467
- "minX": xyxy[0],
468
- "minY": xyxy[1],
469
- "maxX": xyxy[2],
470
- "maxY": xyxy[3]},
471
- "class_id": cls,
472
- "box_caption": f"{names[cls]} {conf:.3f}",
473
- "scores": {
474
- "class_score": conf},
475
- "domain": "pixel"})
476
- avg_conf_per_class[cls] += conf
477
-
478
- if cls in pred_class_count:
479
- pred_class_count[cls] += 1
480
- else:
481
- pred_class_count[cls] = 1
482
-
483
- for pred_class in pred_class_count.keys():
484
- avg_conf_per_class[pred_class] = avg_conf_per_class[pred_class] / pred_class_count[pred_class]
485
-
486
- boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space
487
- id = self.val_table_path_map[Path(path).name]
488
- self.result_table.add_data(self.current_epoch, id, self.val_table.data[id][1],
489
- wandb.Image(self.val_table.data[id][1], boxes=boxes, classes=class_set),
490
- *avg_conf_per_class)
491
-
492
- def val_one_image(self, pred, predn, path, names, im):
493
- """
494
- Log validation data for one image. updates the result Table if validation dataset is uploaded and log bbox media panel
495
-
496
- arguments:
497
- pred (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class]
498
- predn (list): list of predictions in the native space - [xmin, ymin, xmax, ymax, confidence, class]
499
- path (str): local path of the current evaluation image
500
- """
501
- if self.val_table and self.result_table: # Log Table if Val dataset is uploaded as artifact
502
- self.log_training_progress(predn, path, names)
503
-
504
- if len(self.bbox_media_panel_images) < self.max_imgs_to_log and self.current_epoch > 0:
505
- if self.current_epoch % self.bbox_interval == 0:
506
- box_data = [{
507
- "position": {
508
- "minX": xyxy[0],
509
- "minY": xyxy[1],
510
- "maxX": xyxy[2],
511
- "maxY": xyxy[3]},
512
- "class_id": int(cls),
513
- "box_caption": f"{names[int(cls)]} {conf:.3f}",
514
- "scores": {
515
- "class_score": conf},
516
- "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()]
517
- boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space
518
- self.bbox_media_panel_images.append(wandb.Image(im, boxes=boxes, caption=path.name))
519
-
520
- def log(self, log_dict):
521
- """
522
- save the metrics to the logging dictionary
523
-
524
- arguments:
525
- log_dict (Dict) -- metrics/media to be logged in current step
526
- """
527
- if self.wandb_run:
528
- for key, value in log_dict.items():
529
- self.log_dict[key] = value
530
-
531
- def end_epoch(self, best_result=False):
532
- """
533
- commit the log_dict, model artifacts and Tables to W&B and flush the log_dict.
534
-
535
- arguments:
536
- best_result (boolean): Boolean representing if the result of this evaluation is best or not
537
- """
538
- if self.wandb_run:
539
- with all_logging_disabled():
540
- if self.bbox_media_panel_images:
541
- self.log_dict["BoundingBoxDebugger"] = self.bbox_media_panel_images
542
- try:
543
- wandb.log(self.log_dict)
544
- except BaseException as e:
545
- LOGGER.info(
546
- f"An error occurred in wandb logger. The training will proceed without interruption. More info\n{e}"
547
- )
548
- self.wandb_run.finish()
549
- self.wandb_run = None
550
-
551
- self.log_dict = {}
552
- self.bbox_media_panel_images = []
553
- if self.result_artifact:
554
- self.result_artifact.add(self.result_table, 'result')
555
- wandb.log_artifact(self.result_artifact,
556
- aliases=[
557
- 'latest', 'last', 'epoch ' + str(self.current_epoch),
558
- ('best' if best_result else '')])
559
-
560
- wandb.log({"evaluation": self.result_table})
561
- columns = ["epoch", "id", "ground truth", "prediction"]
562
- columns.extend(self.data_dict['names'])
563
- self.result_table = wandb.Table(columns)
564
- self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation")
565
-
566
- def finish_run(self):
567
- """
568
- Log metrics if any and finish the current W&B run
569
- """
570
- if self.wandb_run:
571
- if self.log_dict:
572
- with all_logging_disabled():
573
- wandb.log(self.log_dict)
574
- wandb.run.finish()
575
-
576
-
577
- @contextmanager
578
- def all_logging_disabled(highest_level=logging.CRITICAL):
579
- """ source - https://gist.github.com/simon-weber/7853144
580
- A context manager that will prevent any logging messages triggered during the body from being processed.
581
- :param highest_level: the maximum logging level in use.
582
- This would only need to be changed if a custom level greater than CRITICAL is defined.
583
- """
584
- previous_level = logging.root.manager.disable
585
- logging.disable(highest_level)
586
- try:
587
- yield
588
- finally:
589
- logging.disable(previous_level)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/distlib/util.py DELETED
@@ -1,1932 +0,0 @@
1
- #
2
- # Copyright (C) 2012-2021 The Python Software Foundation.
3
- # See LICENSE.txt and CONTRIBUTORS.txt.
4
- #
5
- import codecs
6
- from collections import deque
7
- import contextlib
8
- import csv
9
- from glob import iglob as std_iglob
10
- import io
11
- import json
12
- import logging
13
- import os
14
- import py_compile
15
- import re
16
- import socket
17
- try:
18
- import ssl
19
- except ImportError: # pragma: no cover
20
- ssl = None
21
- import subprocess
22
- import sys
23
- import tarfile
24
- import tempfile
25
- import textwrap
26
-
27
- try:
28
- import threading
29
- except ImportError: # pragma: no cover
30
- import dummy_threading as threading
31
- import time
32
-
33
- from . import DistlibException
34
- from .compat import (string_types, text_type, shutil, raw_input, StringIO,
35
- cache_from_source, urlopen, urljoin, httplib, xmlrpclib,
36
- splittype, HTTPHandler, BaseConfigurator, valid_ident,
37
- Container, configparser, URLError, ZipFile, fsdecode,
38
- unquote, urlparse)
39
-
40
- logger = logging.getLogger(__name__)
41
-
42
- #
43
- # Requirement parsing code as per PEP 508
44
- #
45
-
46
- IDENTIFIER = re.compile(r'^([\w\.-]+)\s*')
47
- VERSION_IDENTIFIER = re.compile(r'^([\w\.*+-]+)\s*')
48
- COMPARE_OP = re.compile(r'^(<=?|>=?|={2,3}|[~!]=)\s*')
49
- MARKER_OP = re.compile(r'^((<=?)|(>=?)|={2,3}|[~!]=|in|not\s+in)\s*')
50
- OR = re.compile(r'^or\b\s*')
51
- AND = re.compile(r'^and\b\s*')
52
- NON_SPACE = re.compile(r'(\S+)\s*')
53
- STRING_CHUNK = re.compile(r'([\s\w\.{}()*+#:;,/?!~`@$%^&=|<>\[\]-]+)')
54
-
55
-
56
- def parse_marker(marker_string):
57
- """
58
- Parse a marker string and return a dictionary containing a marker expression.
59
-
60
- The dictionary will contain keys "op", "lhs" and "rhs" for non-terminals in
61
- the expression grammar, or strings. A string contained in quotes is to be
62
- interpreted as a literal string, and a string not contained in quotes is a
63
- variable (such as os_name).
64
- """
65
- def marker_var(remaining):
66
- # either identifier, or literal string
67
- m = IDENTIFIER.match(remaining)
68
- if m:
69
- result = m.groups()[0]
70
- remaining = remaining[m.end():]
71
- elif not remaining:
72
- raise SyntaxError('unexpected end of input')
73
- else:
74
- q = remaining[0]
75
- if q not in '\'"':
76
- raise SyntaxError('invalid expression: %s' % remaining)
77
- oq = '\'"'.replace(q, '')
78
- remaining = remaining[1:]
79
- parts = [q]
80
- while remaining:
81
- # either a string chunk, or oq, or q to terminate
82
- if remaining[0] == q:
83
- break
84
- elif remaining[0] == oq:
85
- parts.append(oq)
86
- remaining = remaining[1:]
87
- else:
88
- m = STRING_CHUNK.match(remaining)
89
- if not m:
90
- raise SyntaxError('error in string literal: %s' % remaining)
91
- parts.append(m.groups()[0])
92
- remaining = remaining[m.end():]
93
- else:
94
- s = ''.join(parts)
95
- raise SyntaxError('unterminated string: %s' % s)
96
- parts.append(q)
97
- result = ''.join(parts)
98
- remaining = remaining[1:].lstrip() # skip past closing quote
99
- return result, remaining
100
-
101
- def marker_expr(remaining):
102
- if remaining and remaining[0] == '(':
103
- result, remaining = marker(remaining[1:].lstrip())
104
- if remaining[0] != ')':
105
- raise SyntaxError('unterminated parenthesis: %s' % remaining)
106
- remaining = remaining[1:].lstrip()
107
- else:
108
- lhs, remaining = marker_var(remaining)
109
- while remaining:
110
- m = MARKER_OP.match(remaining)
111
- if not m:
112
- break
113
- op = m.groups()[0]
114
- remaining = remaining[m.end():]
115
- rhs, remaining = marker_var(remaining)
116
- lhs = {'op': op, 'lhs': lhs, 'rhs': rhs}
117
- result = lhs
118
- return result, remaining
119
-
120
- def marker_and(remaining):
121
- lhs, remaining = marker_expr(remaining)
122
- while remaining:
123
- m = AND.match(remaining)
124
- if not m:
125
- break
126
- remaining = remaining[m.end():]
127
- rhs, remaining = marker_expr(remaining)
128
- lhs = {'op': 'and', 'lhs': lhs, 'rhs': rhs}
129
- return lhs, remaining
130
-
131
- def marker(remaining):
132
- lhs, remaining = marker_and(remaining)
133
- while remaining:
134
- m = OR.match(remaining)
135
- if not m:
136
- break
137
- remaining = remaining[m.end():]
138
- rhs, remaining = marker_and(remaining)
139
- lhs = {'op': 'or', 'lhs': lhs, 'rhs': rhs}
140
- return lhs, remaining
141
-
142
- return marker(marker_string)
143
-
144
-
145
- def parse_requirement(req):
146
- """
147
- Parse a requirement passed in as a string. Return a Container
148
- whose attributes contain the various parts of the requirement.
149
- """
150
- remaining = req.strip()
151
- if not remaining or remaining.startswith('#'):
152
- return None
153
- m = IDENTIFIER.match(remaining)
154
- if not m:
155
- raise SyntaxError('name expected: %s' % remaining)
156
- distname = m.groups()[0]
157
- remaining = remaining[m.end():]
158
- extras = mark_expr = versions = uri = None
159
- if remaining and remaining[0] == '[':
160
- i = remaining.find(']', 1)
161
- if i < 0:
162
- raise SyntaxError('unterminated extra: %s' % remaining)
163
- s = remaining[1:i]
164
- remaining = remaining[i + 1:].lstrip()
165
- extras = []
166
- while s:
167
- m = IDENTIFIER.match(s)
168
- if not m:
169
- raise SyntaxError('malformed extra: %s' % s)
170
- extras.append(m.groups()[0])
171
- s = s[m.end():]
172
- if not s:
173
- break
174
- if s[0] != ',':
175
- raise SyntaxError('comma expected in extras: %s' % s)
176
- s = s[1:].lstrip()
177
- if not extras:
178
- extras = None
179
- if remaining:
180
- if remaining[0] == '@':
181
- # it's a URI
182
- remaining = remaining[1:].lstrip()
183
- m = NON_SPACE.match(remaining)
184
- if not m:
185
- raise SyntaxError('invalid URI: %s' % remaining)
186
- uri = m.groups()[0]
187
- t = urlparse(uri)
188
- # there are issues with Python and URL parsing, so this test
189
- # is a bit crude. See bpo-20271, bpo-23505. Python doesn't
190
- # always parse invalid URLs correctly - it should raise
191
- # exceptions for malformed URLs
192
- if not (t.scheme and t.netloc):
193
- raise SyntaxError('Invalid URL: %s' % uri)
194
- remaining = remaining[m.end():].lstrip()
195
- else:
196
-
197
- def get_versions(ver_remaining):
198
- """
199
- Return a list of operator, version tuples if any are
200
- specified, else None.
201
- """
202
- m = COMPARE_OP.match(ver_remaining)
203
- versions = None
204
- if m:
205
- versions = []
206
- while True:
207
- op = m.groups()[0]
208
- ver_remaining = ver_remaining[m.end():]
209
- m = VERSION_IDENTIFIER.match(ver_remaining)
210
- if not m:
211
- raise SyntaxError('invalid version: %s' % ver_remaining)
212
- v = m.groups()[0]
213
- versions.append((op, v))
214
- ver_remaining = ver_remaining[m.end():]
215
- if not ver_remaining or ver_remaining[0] != ',':
216
- break
217
- ver_remaining = ver_remaining[1:].lstrip()
218
- # Some packages have a trailing comma which would break things
219
- # See issue #148
220
- if not ver_remaining:
221
- break
222
- m = COMPARE_OP.match(ver_remaining)
223
- if not m:
224
- raise SyntaxError('invalid constraint: %s' % ver_remaining)
225
- if not versions:
226
- versions = None
227
- return versions, ver_remaining
228
-
229
- if remaining[0] != '(':
230
- versions, remaining = get_versions(remaining)
231
- else:
232
- i = remaining.find(')', 1)
233
- if i < 0:
234
- raise SyntaxError('unterminated parenthesis: %s' % remaining)
235
- s = remaining[1:i]
236
- remaining = remaining[i + 1:].lstrip()
237
- # As a special diversion from PEP 508, allow a version number
238
- # a.b.c in parentheses as a synonym for ~= a.b.c (because this
239
- # is allowed in earlier PEPs)
240
- if COMPARE_OP.match(s):
241
- versions, _ = get_versions(s)
242
- else:
243
- m = VERSION_IDENTIFIER.match(s)
244
- if not m:
245
- raise SyntaxError('invalid constraint: %s' % s)
246
- v = m.groups()[0]
247
- s = s[m.end():].lstrip()
248
- if s:
249
- raise SyntaxError('invalid constraint: %s' % s)
250
- versions = [('~=', v)]
251
-
252
- if remaining:
253
- if remaining[0] != ';':
254
- raise SyntaxError('invalid requirement: %s' % remaining)
255
- remaining = remaining[1:].lstrip()
256
-
257
- mark_expr, remaining = parse_marker(remaining)
258
-
259
- if remaining and remaining[0] != '#':
260
- raise SyntaxError('unexpected trailing data: %s' % remaining)
261
-
262
- if not versions:
263
- rs = distname
264
- else:
265
- rs = '%s %s' % (distname, ', '.join(['%s %s' % con for con in versions]))
266
- return Container(name=distname, extras=extras, constraints=versions,
267
- marker=mark_expr, url=uri, requirement=rs)
268
-
269
-
270
- def get_resources_dests(resources_root, rules):
271
- """Find destinations for resources files"""
272
-
273
- def get_rel_path(root, path):
274
- # normalizes and returns a lstripped-/-separated path
275
- root = root.replace(os.path.sep, '/')
276
- path = path.replace(os.path.sep, '/')
277
- assert path.startswith(root)
278
- return path[len(root):].lstrip('/')
279
-
280
- destinations = {}
281
- for base, suffix, dest in rules:
282
- prefix = os.path.join(resources_root, base)
283
- for abs_base in iglob(prefix):
284
- abs_glob = os.path.join(abs_base, suffix)
285
- for abs_path in iglob(abs_glob):
286
- resource_file = get_rel_path(resources_root, abs_path)
287
- if dest is None: # remove the entry if it was here
288
- destinations.pop(resource_file, None)
289
- else:
290
- rel_path = get_rel_path(abs_base, abs_path)
291
- rel_dest = dest.replace(os.path.sep, '/').rstrip('/')
292
- destinations[resource_file] = rel_dest + '/' + rel_path
293
- return destinations
294
-
295
-
296
- def in_venv():
297
- if hasattr(sys, 'real_prefix'):
298
- # virtualenv venvs
299
- result = True
300
- else:
301
- # PEP 405 venvs
302
- result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix)
303
- return result
304
-
305
-
306
- def get_executable():
307
- # The __PYVENV_LAUNCHER__ dance is apparently no longer needed, as
308
- # changes to the stub launcher mean that sys.executable always points
309
- # to the stub on OS X
310
- # if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__'
311
- # in os.environ):
312
- # result = os.environ['__PYVENV_LAUNCHER__']
313
- # else:
314
- # result = sys.executable
315
- # return result
316
- # Avoid normcasing: see issue #143
317
- # result = os.path.normcase(sys.executable)
318
- result = sys.executable
319
- if not isinstance(result, text_type):
320
- result = fsdecode(result)
321
- return result
322
-
323
-
324
- def proceed(prompt, allowed_chars, error_prompt=None, default=None):
325
- p = prompt
326
- while True:
327
- s = raw_input(p)
328
- p = prompt
329
- if not s and default:
330
- s = default
331
- if s:
332
- c = s[0].lower()
333
- if c in allowed_chars:
334
- break
335
- if error_prompt:
336
- p = '%c: %s\n%s' % (c, error_prompt, prompt)
337
- return c
338
-
339
-
340
- def extract_by_key(d, keys):
341
- if isinstance(keys, string_types):
342
- keys = keys.split()
343
- result = {}
344
- for key in keys:
345
- if key in d:
346
- result[key] = d[key]
347
- return result
348
-
349
- def read_exports(stream):
350
- if sys.version_info[0] >= 3:
351
- # needs to be a text stream
352
- stream = codecs.getreader('utf-8')(stream)
353
- # Try to load as JSON, falling back on legacy format
354
- data = stream.read()
355
- stream = StringIO(data)
356
- try:
357
- jdata = json.load(stream)
358
- result = jdata['extensions']['python.exports']['exports']
359
- for group, entries in result.items():
360
- for k, v in entries.items():
361
- s = '%s = %s' % (k, v)
362
- entry = get_export_entry(s)
363
- assert entry is not None
364
- entries[k] = entry
365
- return result
366
- except Exception:
367
- stream.seek(0, 0)
368
-
369
- def read_stream(cp, stream):
370
- if hasattr(cp, 'read_file'):
371
- cp.read_file(stream)
372
- else:
373
- cp.readfp(stream)
374
-
375
- cp = configparser.ConfigParser()
376
- try:
377
- read_stream(cp, stream)
378
- except configparser.MissingSectionHeaderError:
379
- stream.close()
380
- data = textwrap.dedent(data)
381
- stream = StringIO(data)
382
- read_stream(cp, stream)
383
-
384
- result = {}
385
- for key in cp.sections():
386
- result[key] = entries = {}
387
- for name, value in cp.items(key):
388
- s = '%s = %s' % (name, value)
389
- entry = get_export_entry(s)
390
- assert entry is not None
391
- #entry.dist = self
392
- entries[name] = entry
393
- return result
394
-
395
-
396
- def write_exports(exports, stream):
397
- if sys.version_info[0] >= 3:
398
- # needs to be a text stream
399
- stream = codecs.getwriter('utf-8')(stream)
400
- cp = configparser.ConfigParser()
401
- for k, v in exports.items():
402
- # TODO check k, v for valid values
403
- cp.add_section(k)
404
- for entry in v.values():
405
- if entry.suffix is None:
406
- s = entry.prefix
407
- else:
408
- s = '%s:%s' % (entry.prefix, entry.suffix)
409
- if entry.flags:
410
- s = '%s [%s]' % (s, ', '.join(entry.flags))
411
- cp.set(k, entry.name, s)
412
- cp.write(stream)
413
-
414
-
415
- @contextlib.contextmanager
416
- def tempdir():
417
- td = tempfile.mkdtemp()
418
- try:
419
- yield td
420
- finally:
421
- shutil.rmtree(td)
422
-
423
- @contextlib.contextmanager
424
- def chdir(d):
425
- cwd = os.getcwd()
426
- try:
427
- os.chdir(d)
428
- yield
429
- finally:
430
- os.chdir(cwd)
431
-
432
-
433
- @contextlib.contextmanager
434
- def socket_timeout(seconds=15):
435
- cto = socket.getdefaulttimeout()
436
- try:
437
- socket.setdefaulttimeout(seconds)
438
- yield
439
- finally:
440
- socket.setdefaulttimeout(cto)
441
-
442
-
443
- class cached_property(object):
444
- def __init__(self, func):
445
- self.func = func
446
- #for attr in ('__name__', '__module__', '__doc__'):
447
- # setattr(self, attr, getattr(func, attr, None))
448
-
449
- def __get__(self, obj, cls=None):
450
- if obj is None:
451
- return self
452
- value = self.func(obj)
453
- object.__setattr__(obj, self.func.__name__, value)
454
- #obj.__dict__[self.func.__name__] = value = self.func(obj)
455
- return value
456
-
457
- def convert_path(pathname):
458
- """Return 'pathname' as a name that will work on the native filesystem.
459
-
460
- The path is split on '/' and put back together again using the current
461
- directory separator. Needed because filenames in the setup script are
462
- always supplied in Unix style, and have to be converted to the local
463
- convention before we can actually use them in the filesystem. Raises
464
- ValueError on non-Unix-ish systems if 'pathname' either starts or
465
- ends with a slash.
466
- """
467
- if os.sep == '/':
468
- return pathname
469
- if not pathname:
470
- return pathname
471
- if pathname[0] == '/':
472
- raise ValueError("path '%s' cannot be absolute" % pathname)
473
- if pathname[-1] == '/':
474
- raise ValueError("path '%s' cannot end with '/'" % pathname)
475
-
476
- paths = pathname.split('/')
477
- while os.curdir in paths:
478
- paths.remove(os.curdir)
479
- if not paths:
480
- return os.curdir
481
- return os.path.join(*paths)
482
-
483
-
484
- class FileOperator(object):
485
- def __init__(self, dry_run=False):
486
- self.dry_run = dry_run
487
- self.ensured = set()
488
- self._init_record()
489
-
490
- def _init_record(self):
491
- self.record = False
492
- self.files_written = set()
493
- self.dirs_created = set()
494
-
495
- def record_as_written(self, path):
496
- if self.record:
497
- self.files_written.add(path)
498
-
499
- def newer(self, source, target):
500
- """Tell if the target is newer than the source.
501
-
502
- Returns true if 'source' exists and is more recently modified than
503
- 'target', or if 'source' exists and 'target' doesn't.
504
-
505
- Returns false if both exist and 'target' is the same age or younger
506
- than 'source'. Raise PackagingFileError if 'source' does not exist.
507
-
508
- Note that this test is not very accurate: files created in the same
509
- second will have the same "age".
510
- """
511
- if not os.path.exists(source):
512
- raise DistlibException("file '%r' does not exist" %
513
- os.path.abspath(source))
514
- if not os.path.exists(target):
515
- return True
516
-
517
- return os.stat(source).st_mtime > os.stat(target).st_mtime
518
-
519
- def copy_file(self, infile, outfile, check=True):
520
- """Copy a file respecting dry-run and force flags.
521
- """
522
- self.ensure_dir(os.path.dirname(outfile))
523
- logger.info('Copying %s to %s', infile, outfile)
524
- if not self.dry_run:
525
- msg = None
526
- if check:
527
- if os.path.islink(outfile):
528
- msg = '%s is a symlink' % outfile
529
- elif os.path.exists(outfile) and not os.path.isfile(outfile):
530
- msg = '%s is a non-regular file' % outfile
531
- if msg:
532
- raise ValueError(msg + ' which would be overwritten')
533
- shutil.copyfile(infile, outfile)
534
- self.record_as_written(outfile)
535
-
536
- def copy_stream(self, instream, outfile, encoding=None):
537
- assert not os.path.isdir(outfile)
538
- self.ensure_dir(os.path.dirname(outfile))
539
- logger.info('Copying stream %s to %s', instream, outfile)
540
- if not self.dry_run:
541
- if encoding is None:
542
- outstream = open(outfile, 'wb')
543
- else:
544
- outstream = codecs.open(outfile, 'w', encoding=encoding)
545
- try:
546
- shutil.copyfileobj(instream, outstream)
547
- finally:
548
- outstream.close()
549
- self.record_as_written(outfile)
550
-
551
- def write_binary_file(self, path, data):
552
- self.ensure_dir(os.path.dirname(path))
553
- if not self.dry_run:
554
- if os.path.exists(path):
555
- os.remove(path)
556
- with open(path, 'wb') as f:
557
- f.write(data)
558
- self.record_as_written(path)
559
-
560
- def write_text_file(self, path, data, encoding):
561
- self.write_binary_file(path, data.encode(encoding))
562
-
563
- def set_mode(self, bits, mask, files):
564
- if os.name == 'posix' or (os.name == 'java' and os._name == 'posix'):
565
- # Set the executable bits (owner, group, and world) on
566
- # all the files specified.
567
- for f in files:
568
- if self.dry_run:
569
- logger.info("changing mode of %s", f)
570
- else:
571
- mode = (os.stat(f).st_mode | bits) & mask
572
- logger.info("changing mode of %s to %o", f, mode)
573
- os.chmod(f, mode)
574
-
575
- set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f)
576
-
577
- def ensure_dir(self, path):
578
- path = os.path.abspath(path)
579
- if path not in self.ensured and not os.path.exists(path):
580
- self.ensured.add(path)
581
- d, f = os.path.split(path)
582
- self.ensure_dir(d)
583
- logger.info('Creating %s' % path)
584
- if not self.dry_run:
585
- os.mkdir(path)
586
- if self.record:
587
- self.dirs_created.add(path)
588
-
589
- def byte_compile(self, path, optimize=False, force=False, prefix=None, hashed_invalidation=False):
590
- dpath = cache_from_source(path, not optimize)
591
- logger.info('Byte-compiling %s to %s', path, dpath)
592
- if not self.dry_run:
593
- if force or self.newer(path, dpath):
594
- if not prefix:
595
- diagpath = None
596
- else:
597
- assert path.startswith(prefix)
598
- diagpath = path[len(prefix):]
599
- compile_kwargs = {}
600
- if hashed_invalidation and hasattr(py_compile, 'PycInvalidationMode'):
601
- compile_kwargs['invalidation_mode'] = py_compile.PycInvalidationMode.CHECKED_HASH
602
- py_compile.compile(path, dpath, diagpath, True, **compile_kwargs) # raise error
603
- self.record_as_written(dpath)
604
- return dpath
605
-
606
- def ensure_removed(self, path):
607
- if os.path.exists(path):
608
- if os.path.isdir(path) and not os.path.islink(path):
609
- logger.debug('Removing directory tree at %s', path)
610
- if not self.dry_run:
611
- shutil.rmtree(path)
612
- if self.record:
613
- if path in self.dirs_created:
614
- self.dirs_created.remove(path)
615
- else:
616
- if os.path.islink(path):
617
- s = 'link'
618
- else:
619
- s = 'file'
620
- logger.debug('Removing %s %s', s, path)
621
- if not self.dry_run:
622
- os.remove(path)
623
- if self.record:
624
- if path in self.files_written:
625
- self.files_written.remove(path)
626
-
627
- def is_writable(self, path):
628
- result = False
629
- while not result:
630
- if os.path.exists(path):
631
- result = os.access(path, os.W_OK)
632
- break
633
- parent = os.path.dirname(path)
634
- if parent == path:
635
- break
636
- path = parent
637
- return result
638
-
639
- def commit(self):
640
- """
641
- Commit recorded changes, turn off recording, return
642
- changes.
643
- """
644
- assert self.record
645
- result = self.files_written, self.dirs_created
646
- self._init_record()
647
- return result
648
-
649
- def rollback(self):
650
- if not self.dry_run:
651
- for f in list(self.files_written):
652
- if os.path.exists(f):
653
- os.remove(f)
654
- # dirs should all be empty now, except perhaps for
655
- # __pycache__ subdirs
656
- # reverse so that subdirs appear before their parents
657
- dirs = sorted(self.dirs_created, reverse=True)
658
- for d in dirs:
659
- flist = os.listdir(d)
660
- if flist:
661
- assert flist == ['__pycache__']
662
- sd = os.path.join(d, flist[0])
663
- os.rmdir(sd)
664
- os.rmdir(d) # should fail if non-empty
665
- self._init_record()
666
-
667
- def resolve(module_name, dotted_path):
668
- if module_name in sys.modules:
669
- mod = sys.modules[module_name]
670
- else:
671
- mod = __import__(module_name)
672
- if dotted_path is None:
673
- result = mod
674
- else:
675
- parts = dotted_path.split('.')
676
- result = getattr(mod, parts.pop(0))
677
- for p in parts:
678
- result = getattr(result, p)
679
- return result
680
-
681
-
682
- class ExportEntry(object):
683
- def __init__(self, name, prefix, suffix, flags):
684
- self.name = name
685
- self.prefix = prefix
686
- self.suffix = suffix
687
- self.flags = flags
688
-
689
- @cached_property
690
- def value(self):
691
- return resolve(self.prefix, self.suffix)
692
-
693
- def __repr__(self): # pragma: no cover
694
- return '<ExportEntry %s = %s:%s %s>' % (self.name, self.prefix,
695
- self.suffix, self.flags)
696
-
697
- def __eq__(self, other):
698
- if not isinstance(other, ExportEntry):
699
- result = False
700
- else:
701
- result = (self.name == other.name and
702
- self.prefix == other.prefix and
703
- self.suffix == other.suffix and
704
- self.flags == other.flags)
705
- return result
706
-
707
- __hash__ = object.__hash__
708
-
709
-
710
- ENTRY_RE = re.compile(r'''(?P<name>(\w|[-.+])+)
711
- \s*=\s*(?P<callable>(\w+)([:\.]\w+)*)
712
- \s*(\[\s*(?P<flags>[\w-]+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])?
713
- ''', re.VERBOSE)
714
-
715
- def get_export_entry(specification):
716
- m = ENTRY_RE.search(specification)
717
- if not m:
718
- result = None
719
- if '[' in specification or ']' in specification:
720
- raise DistlibException("Invalid specification "
721
- "'%s'" % specification)
722
- else:
723
- d = m.groupdict()
724
- name = d['name']
725
- path = d['callable']
726
- colons = path.count(':')
727
- if colons == 0:
728
- prefix, suffix = path, None
729
- else:
730
- if colons != 1:
731
- raise DistlibException("Invalid specification "
732
- "'%s'" % specification)
733
- prefix, suffix = path.split(':')
734
- flags = d['flags']
735
- if flags is None:
736
- if '[' in specification or ']' in specification:
737
- raise DistlibException("Invalid specification "
738
- "'%s'" % specification)
739
- flags = []
740
- else:
741
- flags = [f.strip() for f in flags.split(',')]
742
- result = ExportEntry(name, prefix, suffix, flags)
743
- return result
744
-
745
-
746
- def get_cache_base(suffix=None):
747
- """
748
- Return the default base location for distlib caches. If the directory does
749
- not exist, it is created. Use the suffix provided for the base directory,
750
- and default to '.distlib' if it isn't provided.
751
-
752
- On Windows, if LOCALAPPDATA is defined in the environment, then it is
753
- assumed to be a directory, and will be the parent directory of the result.
754
- On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home
755
- directory - using os.expanduser('~') - will be the parent directory of
756
- the result.
757
-
758
- The result is just the directory '.distlib' in the parent directory as
759
- determined above, or with the name specified with ``suffix``.
760
- """
761
- if suffix is None:
762
- suffix = '.distlib'
763
- if os.name == 'nt' and 'LOCALAPPDATA' in os.environ:
764
- result = os.path.expandvars('$localappdata')
765
- else:
766
- # Assume posix, or old Windows
767
- result = os.path.expanduser('~')
768
- # we use 'isdir' instead of 'exists', because we want to
769
- # fail if there's a file with that name
770
- if os.path.isdir(result):
771
- usable = os.access(result, os.W_OK)
772
- if not usable:
773
- logger.warning('Directory exists but is not writable: %s', result)
774
- else:
775
- try:
776
- os.makedirs(result)
777
- usable = True
778
- except OSError:
779
- logger.warning('Unable to create %s', result, exc_info=True)
780
- usable = False
781
- if not usable:
782
- result = tempfile.mkdtemp()
783
- logger.warning('Default location unusable, using %s', result)
784
- return os.path.join(result, suffix)
785
-
786
-
787
- def path_to_cache_dir(path):
788
- """
789
- Convert an absolute path to a directory name for use in a cache.
790
-
791
- The algorithm used is:
792
-
793
- #. On Windows, any ``':'`` in the drive is replaced with ``'---'``.
794
- #. Any occurrence of ``os.sep`` is replaced with ``'--'``.
795
- #. ``'.cache'`` is appended.
796
- """
797
- d, p = os.path.splitdrive(os.path.abspath(path))
798
- if d:
799
- d = d.replace(':', '---')
800
- p = p.replace(os.sep, '--')
801
- return d + p + '.cache'
802
-
803
-
804
- def ensure_slash(s):
805
- if not s.endswith('/'):
806
- return s + '/'
807
- return s
808
-
809
-
810
- def parse_credentials(netloc):
811
- username = password = None
812
- if '@' in netloc:
813
- prefix, netloc = netloc.rsplit('@', 1)
814
- if ':' not in prefix:
815
- username = prefix
816
- else:
817
- username, password = prefix.split(':', 1)
818
- if username:
819
- username = unquote(username)
820
- if password:
821
- password = unquote(password)
822
- return username, password, netloc
823
-
824
-
825
- def get_process_umask():
826
- result = os.umask(0o22)
827
- os.umask(result)
828
- return result
829
-
830
- def is_string_sequence(seq):
831
- result = True
832
- i = None
833
- for i, s in enumerate(seq):
834
- if not isinstance(s, string_types):
835
- result = False
836
- break
837
- assert i is not None
838
- return result
839
-
840
- PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-'
841
- '([a-z0-9_.+-]+)', re.I)
842
- PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)')
843
-
844
-
845
- def split_filename(filename, project_name=None):
846
- """
847
- Extract name, version, python version from a filename (no extension)
848
-
849
- Return name, version, pyver or None
850
- """
851
- result = None
852
- pyver = None
853
- filename = unquote(filename).replace(' ', '-')
854
- m = PYTHON_VERSION.search(filename)
855
- if m:
856
- pyver = m.group(1)
857
- filename = filename[:m.start()]
858
- if project_name and len(filename) > len(project_name) + 1:
859
- m = re.match(re.escape(project_name) + r'\b', filename)
860
- if m:
861
- n = m.end()
862
- result = filename[:n], filename[n + 1:], pyver
863
- if result is None:
864
- m = PROJECT_NAME_AND_VERSION.match(filename)
865
- if m:
866
- result = m.group(1), m.group(3), pyver
867
- return result
868
-
869
- # Allow spaces in name because of legacy dists like "Twisted Core"
870
- NAME_VERSION_RE = re.compile(r'(?P<name>[\w .-]+)\s*'
871
- r'\(\s*(?P<ver>[^\s)]+)\)$')
872
-
873
- def parse_name_and_version(p):
874
- """
875
- A utility method used to get name and version from a string.
876
-
877
- From e.g. a Provides-Dist value.
878
-
879
- :param p: A value in a form 'foo (1.0)'
880
- :return: The name and version as a tuple.
881
- """
882
- m = NAME_VERSION_RE.match(p)
883
- if not m:
884
- raise DistlibException('Ill-formed name/version string: \'%s\'' % p)
885
- d = m.groupdict()
886
- return d['name'].strip().lower(), d['ver']
887
-
888
- def get_extras(requested, available):
889
- result = set()
890
- requested = set(requested or [])
891
- available = set(available or [])
892
- if '*' in requested:
893
- requested.remove('*')
894
- result |= available
895
- for r in requested:
896
- if r == '-':
897
- result.add(r)
898
- elif r.startswith('-'):
899
- unwanted = r[1:]
900
- if unwanted not in available:
901
- logger.warning('undeclared extra: %s' % unwanted)
902
- if unwanted in result:
903
- result.remove(unwanted)
904
- else:
905
- if r not in available:
906
- logger.warning('undeclared extra: %s' % r)
907
- result.add(r)
908
- return result
909
- #
910
- # Extended metadata functionality
911
- #
912
-
913
- def _get_external_data(url):
914
- result = {}
915
- try:
916
- # urlopen might fail if it runs into redirections,
917
- # because of Python issue #13696. Fixed in locators
918
- # using a custom redirect handler.
919
- resp = urlopen(url)
920
- headers = resp.info()
921
- ct = headers.get('Content-Type')
922
- if not ct.startswith('application/json'):
923
- logger.debug('Unexpected response for JSON request: %s', ct)
924
- else:
925
- reader = codecs.getreader('utf-8')(resp)
926
- #data = reader.read().decode('utf-8')
927
- #result = json.loads(data)
928
- result = json.load(reader)
929
- except Exception as e:
930
- logger.exception('Failed to get external data for %s: %s', url, e)
931
- return result
932
-
933
- _external_data_base_url = 'https://www.red-dove.com/pypi/projects/'
934
-
935
- def get_project_data(name):
936
- url = '%s/%s/project.json' % (name[0].upper(), name)
937
- url = urljoin(_external_data_base_url, url)
938
- result = _get_external_data(url)
939
- return result
940
-
941
- def get_package_data(name, version):
942
- url = '%s/%s/package-%s.json' % (name[0].upper(), name, version)
943
- url = urljoin(_external_data_base_url, url)
944
- return _get_external_data(url)
945
-
946
-
947
- class Cache(object):
948
- """
949
- A class implementing a cache for resources that need to live in the file system
950
- e.g. shared libraries. This class was moved from resources to here because it
951
- could be used by other modules, e.g. the wheel module.
952
- """
953
-
954
- def __init__(self, base):
955
- """
956
- Initialise an instance.
957
-
958
- :param base: The base directory where the cache should be located.
959
- """
960
- # we use 'isdir' instead of 'exists', because we want to
961
- # fail if there's a file with that name
962
- if not os.path.isdir(base): # pragma: no cover
963
- os.makedirs(base)
964
- if (os.stat(base).st_mode & 0o77) != 0:
965
- logger.warning('Directory \'%s\' is not private', base)
966
- self.base = os.path.abspath(os.path.normpath(base))
967
-
968
- def prefix_to_dir(self, prefix):
969
- """
970
- Converts a resource prefix to a directory name in the cache.
971
- """
972
- return path_to_cache_dir(prefix)
973
-
974
- def clear(self):
975
- """
976
- Clear the cache.
977
- """
978
- not_removed = []
979
- for fn in os.listdir(self.base):
980
- fn = os.path.join(self.base, fn)
981
- try:
982
- if os.path.islink(fn) or os.path.isfile(fn):
983
- os.remove(fn)
984
- elif os.path.isdir(fn):
985
- shutil.rmtree(fn)
986
- except Exception:
987
- not_removed.append(fn)
988
- return not_removed
989
-
990
-
991
- class EventMixin(object):
992
- """
993
- A very simple publish/subscribe system.
994
- """
995
- def __init__(self):
996
- self._subscribers = {}
997
-
998
- def add(self, event, subscriber, append=True):
999
- """
1000
- Add a subscriber for an event.
1001
-
1002
- :param event: The name of an event.
1003
- :param subscriber: The subscriber to be added (and called when the
1004
- event is published).
1005
- :param append: Whether to append or prepend the subscriber to an
1006
- existing subscriber list for the event.
1007
- """
1008
- subs = self._subscribers
1009
- if event not in subs:
1010
- subs[event] = deque([subscriber])
1011
- else:
1012
- sq = subs[event]
1013
- if append:
1014
- sq.append(subscriber)
1015
- else:
1016
- sq.appendleft(subscriber)
1017
-
1018
- def remove(self, event, subscriber):
1019
- """
1020
- Remove a subscriber for an event.
1021
-
1022
- :param event: The name of an event.
1023
- :param subscriber: The subscriber to be removed.
1024
- """
1025
- subs = self._subscribers
1026
- if event not in subs:
1027
- raise ValueError('No subscribers: %r' % event)
1028
- subs[event].remove(subscriber)
1029
-
1030
- def get_subscribers(self, event):
1031
- """
1032
- Return an iterator for the subscribers for an event.
1033
- :param event: The event to return subscribers for.
1034
- """
1035
- return iter(self._subscribers.get(event, ()))
1036
-
1037
- def publish(self, event, *args, **kwargs):
1038
- """
1039
- Publish a event and return a list of values returned by its
1040
- subscribers.
1041
-
1042
- :param event: The event to publish.
1043
- :param args: The positional arguments to pass to the event's
1044
- subscribers.
1045
- :param kwargs: The keyword arguments to pass to the event's
1046
- subscribers.
1047
- """
1048
- result = []
1049
- for subscriber in self.get_subscribers(event):
1050
- try:
1051
- value = subscriber(event, *args, **kwargs)
1052
- except Exception:
1053
- logger.exception('Exception during event publication')
1054
- value = None
1055
- result.append(value)
1056
- logger.debug('publish %s: args = %s, kwargs = %s, result = %s',
1057
- event, args, kwargs, result)
1058
- return result
1059
-
1060
- #
1061
- # Simple sequencing
1062
- #
1063
- class Sequencer(object):
1064
- def __init__(self):
1065
- self._preds = {}
1066
- self._succs = {}
1067
- self._nodes = set() # nodes with no preds/succs
1068
-
1069
- def add_node(self, node):
1070
- self._nodes.add(node)
1071
-
1072
- def remove_node(self, node, edges=False):
1073
- if node in self._nodes:
1074
- self._nodes.remove(node)
1075
- if edges:
1076
- for p in set(self._preds.get(node, ())):
1077
- self.remove(p, node)
1078
- for s in set(self._succs.get(node, ())):
1079
- self.remove(node, s)
1080
- # Remove empties
1081
- for k, v in list(self._preds.items()):
1082
- if not v:
1083
- del self._preds[k]
1084
- for k, v in list(self._succs.items()):
1085
- if not v:
1086
- del self._succs[k]
1087
-
1088
- def add(self, pred, succ):
1089
- assert pred != succ
1090
- self._preds.setdefault(succ, set()).add(pred)
1091
- self._succs.setdefault(pred, set()).add(succ)
1092
-
1093
- def remove(self, pred, succ):
1094
- assert pred != succ
1095
- try:
1096
- preds = self._preds[succ]
1097
- succs = self._succs[pred]
1098
- except KeyError: # pragma: no cover
1099
- raise ValueError('%r not a successor of anything' % succ)
1100
- try:
1101
- preds.remove(pred)
1102
- succs.remove(succ)
1103
- except KeyError: # pragma: no cover
1104
- raise ValueError('%r not a successor of %r' % (succ, pred))
1105
-
1106
- def is_step(self, step):
1107
- return (step in self._preds or step in self._succs or
1108
- step in self._nodes)
1109
-
1110
- def get_steps(self, final):
1111
- if not self.is_step(final):
1112
- raise ValueError('Unknown: %r' % final)
1113
- result = []
1114
- todo = []
1115
- seen = set()
1116
- todo.append(final)
1117
- while todo:
1118
- step = todo.pop(0)
1119
- if step in seen:
1120
- # if a step was already seen,
1121
- # move it to the end (so it will appear earlier
1122
- # when reversed on return) ... but not for the
1123
- # final step, as that would be confusing for
1124
- # users
1125
- if step != final:
1126
- result.remove(step)
1127
- result.append(step)
1128
- else:
1129
- seen.add(step)
1130
- result.append(step)
1131
- preds = self._preds.get(step, ())
1132
- todo.extend(preds)
1133
- return reversed(result)
1134
-
1135
- @property
1136
- def strong_connections(self):
1137
- #http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
1138
- index_counter = [0]
1139
- stack = []
1140
- lowlinks = {}
1141
- index = {}
1142
- result = []
1143
-
1144
- graph = self._succs
1145
-
1146
- def strongconnect(node):
1147
- # set the depth index for this node to the smallest unused index
1148
- index[node] = index_counter[0]
1149
- lowlinks[node] = index_counter[0]
1150
- index_counter[0] += 1
1151
- stack.append(node)
1152
-
1153
- # Consider successors
1154
- try:
1155
- successors = graph[node]
1156
- except Exception:
1157
- successors = []
1158
- for successor in successors:
1159
- if successor not in lowlinks:
1160
- # Successor has not yet been visited
1161
- strongconnect(successor)
1162
- lowlinks[node] = min(lowlinks[node],lowlinks[successor])
1163
- elif successor in stack:
1164
- # the successor is in the stack and hence in the current
1165
- # strongly connected component (SCC)
1166
- lowlinks[node] = min(lowlinks[node],index[successor])
1167
-
1168
- # If `node` is a root node, pop the stack and generate an SCC
1169
- if lowlinks[node] == index[node]:
1170
- connected_component = []
1171
-
1172
- while True:
1173
- successor = stack.pop()
1174
- connected_component.append(successor)
1175
- if successor == node: break
1176
- component = tuple(connected_component)
1177
- # storing the result
1178
- result.append(component)
1179
-
1180
- for node in graph:
1181
- if node not in lowlinks:
1182
- strongconnect(node)
1183
-
1184
- return result
1185
-
1186
- @property
1187
- def dot(self):
1188
- result = ['digraph G {']
1189
- for succ in self._preds:
1190
- preds = self._preds[succ]
1191
- for pred in preds:
1192
- result.append(' %s -> %s;' % (pred, succ))
1193
- for node in self._nodes:
1194
- result.append(' %s;' % node)
1195
- result.append('}')
1196
- return '\n'.join(result)
1197
-
1198
- #
1199
- # Unarchiving functionality for zip, tar, tgz, tbz, whl
1200
- #
1201
-
1202
- ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip',
1203
- '.tgz', '.tbz', '.whl')
1204
-
1205
- def unarchive(archive_filename, dest_dir, format=None, check=True):
1206
-
1207
- def check_path(path):
1208
- if not isinstance(path, text_type):
1209
- path = path.decode('utf-8')
1210
- p = os.path.abspath(os.path.join(dest_dir, path))
1211
- if not p.startswith(dest_dir) or p[plen] != os.sep:
1212
- raise ValueError('path outside destination: %r' % p)
1213
-
1214
- dest_dir = os.path.abspath(dest_dir)
1215
- plen = len(dest_dir)
1216
- archive = None
1217
- if format is None:
1218
- if archive_filename.endswith(('.zip', '.whl')):
1219
- format = 'zip'
1220
- elif archive_filename.endswith(('.tar.gz', '.tgz')):
1221
- format = 'tgz'
1222
- mode = 'r:gz'
1223
- elif archive_filename.endswith(('.tar.bz2', '.tbz')):
1224
- format = 'tbz'
1225
- mode = 'r:bz2'
1226
- elif archive_filename.endswith('.tar'):
1227
- format = 'tar'
1228
- mode = 'r'
1229
- else: # pragma: no cover
1230
- raise ValueError('Unknown format for %r' % archive_filename)
1231
- try:
1232
- if format == 'zip':
1233
- archive = ZipFile(archive_filename, 'r')
1234
- if check:
1235
- names = archive.namelist()
1236
- for name in names:
1237
- check_path(name)
1238
- else:
1239
- archive = tarfile.open(archive_filename, mode)
1240
- if check:
1241
- names = archive.getnames()
1242
- for name in names:
1243
- check_path(name)
1244
- if format != 'zip' and sys.version_info[0] < 3:
1245
- # See Python issue 17153. If the dest path contains Unicode,
1246
- # tarfile extraction fails on Python 2.x if a member path name
1247
- # contains non-ASCII characters - it leads to an implicit
1248
- # bytes -> unicode conversion using ASCII to decode.
1249
- for tarinfo in archive.getmembers():
1250
- if not isinstance(tarinfo.name, text_type):
1251
- tarinfo.name = tarinfo.name.decode('utf-8')
1252
- archive.extractall(dest_dir)
1253
-
1254
- finally:
1255
- if archive:
1256
- archive.close()
1257
-
1258
-
1259
- def zip_dir(directory):
1260
- """zip a directory tree into a BytesIO object"""
1261
- result = io.BytesIO()
1262
- dlen = len(directory)
1263
- with ZipFile(result, "w") as zf:
1264
- for root, dirs, files in os.walk(directory):
1265
- for name in files:
1266
- full = os.path.join(root, name)
1267
- rel = root[dlen:]
1268
- dest = os.path.join(rel, name)
1269
- zf.write(full, dest)
1270
- return result
1271
-
1272
- #
1273
- # Simple progress bar
1274
- #
1275
-
1276
- UNITS = ('', 'K', 'M', 'G','T','P')
1277
-
1278
-
1279
- class Progress(object):
1280
- unknown = 'UNKNOWN'
1281
-
1282
- def __init__(self, minval=0, maxval=100):
1283
- assert maxval is None or maxval >= minval
1284
- self.min = self.cur = minval
1285
- self.max = maxval
1286
- self.started = None
1287
- self.elapsed = 0
1288
- self.done = False
1289
-
1290
- def update(self, curval):
1291
- assert self.min <= curval
1292
- assert self.max is None or curval <= self.max
1293
- self.cur = curval
1294
- now = time.time()
1295
- if self.started is None:
1296
- self.started = now
1297
- else:
1298
- self.elapsed = now - self.started
1299
-
1300
- def increment(self, incr):
1301
- assert incr >= 0
1302
- self.update(self.cur + incr)
1303
-
1304
- def start(self):
1305
- self.update(self.min)
1306
- return self
1307
-
1308
- def stop(self):
1309
- if self.max is not None:
1310
- self.update(self.max)
1311
- self.done = True
1312
-
1313
- @property
1314
- def maximum(self):
1315
- return self.unknown if self.max is None else self.max
1316
-
1317
- @property
1318
- def percentage(self):
1319
- if self.done:
1320
- result = '100 %'
1321
- elif self.max is None:
1322
- result = ' ?? %'
1323
- else:
1324
- v = 100.0 * (self.cur - self.min) / (self.max - self.min)
1325
- result = '%3d %%' % v
1326
- return result
1327
-
1328
- def format_duration(self, duration):
1329
- if (duration <= 0) and self.max is None or self.cur == self.min:
1330
- result = '??:??:??'
1331
- #elif duration < 1:
1332
- # result = '--:--:--'
1333
- else:
1334
- result = time.strftime('%H:%M:%S', time.gmtime(duration))
1335
- return result
1336
-
1337
- @property
1338
- def ETA(self):
1339
- if self.done:
1340
- prefix = 'Done'
1341
- t = self.elapsed
1342
- #import pdb; pdb.set_trace()
1343
- else:
1344
- prefix = 'ETA '
1345
- if self.max is None:
1346
- t = -1
1347
- elif self.elapsed == 0 or (self.cur == self.min):
1348
- t = 0
1349
- else:
1350
- #import pdb; pdb.set_trace()
1351
- t = float(self.max - self.min)
1352
- t /= self.cur - self.min
1353
- t = (t - 1) * self.elapsed
1354
- return '%s: %s' % (prefix, self.format_duration(t))
1355
-
1356
- @property
1357
- def speed(self):
1358
- if self.elapsed == 0:
1359
- result = 0.0
1360
- else:
1361
- result = (self.cur - self.min) / self.elapsed
1362
- for unit in UNITS:
1363
- if result < 1000:
1364
- break
1365
- result /= 1000.0
1366
- return '%d %sB/s' % (result, unit)
1367
-
1368
- #
1369
- # Glob functionality
1370
- #
1371
-
1372
- RICH_GLOB = re.compile(r'\{([^}]*)\}')
1373
- _CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]')
1374
- _CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$')
1375
-
1376
-
1377
- def iglob(path_glob):
1378
- """Extended globbing function that supports ** and {opt1,opt2,opt3}."""
1379
- if _CHECK_RECURSIVE_GLOB.search(path_glob):
1380
- msg = """invalid glob %r: recursive glob "**" must be used alone"""
1381
- raise ValueError(msg % path_glob)
1382
- if _CHECK_MISMATCH_SET.search(path_glob):
1383
- msg = """invalid glob %r: mismatching set marker '{' or '}'"""
1384
- raise ValueError(msg % path_glob)
1385
- return _iglob(path_glob)
1386
-
1387
-
1388
- def _iglob(path_glob):
1389
- rich_path_glob = RICH_GLOB.split(path_glob, 1)
1390
- if len(rich_path_glob) > 1:
1391
- assert len(rich_path_glob) == 3, rich_path_glob
1392
- prefix, set, suffix = rich_path_glob
1393
- for item in set.split(','):
1394
- for path in _iglob(''.join((prefix, item, suffix))):
1395
- yield path
1396
- else:
1397
- if '**' not in path_glob:
1398
- for item in std_iglob(path_glob):
1399
- yield item
1400
- else:
1401
- prefix, radical = path_glob.split('**', 1)
1402
- if prefix == '':
1403
- prefix = '.'
1404
- if radical == '':
1405
- radical = '*'
1406
- else:
1407
- # we support both
1408
- radical = radical.lstrip('/')
1409
- radical = radical.lstrip('\\')
1410
- for path, dir, files in os.walk(prefix):
1411
- path = os.path.normpath(path)
1412
- for fn in _iglob(os.path.join(path, radical)):
1413
- yield fn
1414
-
1415
- if ssl:
1416
- from .compat import (HTTPSHandler as BaseHTTPSHandler, match_hostname,
1417
- CertificateError)
1418
-
1419
-
1420
- #
1421
- # HTTPSConnection which verifies certificates/matches domains
1422
- #
1423
-
1424
- class HTTPSConnection(httplib.HTTPSConnection):
1425
- ca_certs = None # set this to the path to the certs file (.pem)
1426
- check_domain = True # only used if ca_certs is not None
1427
-
1428
- # noinspection PyPropertyAccess
1429
- def connect(self):
1430
- sock = socket.create_connection((self.host, self.port), self.timeout)
1431
- if getattr(self, '_tunnel_host', False):
1432
- self.sock = sock
1433
- self._tunnel()
1434
-
1435
- context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
1436
- if hasattr(ssl, 'OP_NO_SSLv2'):
1437
- context.options |= ssl.OP_NO_SSLv2
1438
- if self.cert_file:
1439
- context.load_cert_chain(self.cert_file, self.key_file)
1440
- kwargs = {}
1441
- if self.ca_certs:
1442
- context.verify_mode = ssl.CERT_REQUIRED
1443
- context.load_verify_locations(cafile=self.ca_certs)
1444
- if getattr(ssl, 'HAS_SNI', False):
1445
- kwargs['server_hostname'] = self.host
1446
-
1447
- self.sock = context.wrap_socket(sock, **kwargs)
1448
- if self.ca_certs and self.check_domain:
1449
- try:
1450
- match_hostname(self.sock.getpeercert(), self.host)
1451
- logger.debug('Host verified: %s', self.host)
1452
- except CertificateError: # pragma: no cover
1453
- self.sock.shutdown(socket.SHUT_RDWR)
1454
- self.sock.close()
1455
- raise
1456
-
1457
- class HTTPSHandler(BaseHTTPSHandler):
1458
- def __init__(self, ca_certs, check_domain=True):
1459
- BaseHTTPSHandler.__init__(self)
1460
- self.ca_certs = ca_certs
1461
- self.check_domain = check_domain
1462
-
1463
- def _conn_maker(self, *args, **kwargs):
1464
- """
1465
- This is called to create a connection instance. Normally you'd
1466
- pass a connection class to do_open, but it doesn't actually check for
1467
- a class, and just expects a callable. As long as we behave just as a
1468
- constructor would have, we should be OK. If it ever changes so that
1469
- we *must* pass a class, we'll create an UnsafeHTTPSConnection class
1470
- which just sets check_domain to False in the class definition, and
1471
- choose which one to pass to do_open.
1472
- """
1473
- result = HTTPSConnection(*args, **kwargs)
1474
- if self.ca_certs:
1475
- result.ca_certs = self.ca_certs
1476
- result.check_domain = self.check_domain
1477
- return result
1478
-
1479
- def https_open(self, req):
1480
- try:
1481
- return self.do_open(self._conn_maker, req)
1482
- except URLError as e:
1483
- if 'certificate verify failed' in str(e.reason):
1484
- raise CertificateError('Unable to verify server certificate '
1485
- 'for %s' % req.host)
1486
- else:
1487
- raise
1488
-
1489
- #
1490
- # To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The-
1491
- # Middle proxy using HTTP listens on port 443, or an index mistakenly serves
1492
- # HTML containing a http://xyz link when it should be https://xyz),
1493
- # you can use the following handler class, which does not allow HTTP traffic.
1494
- #
1495
- # It works by inheriting from HTTPHandler - so build_opener won't add a
1496
- # handler for HTTP itself.
1497
- #
1498
- class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler):
1499
- def http_open(self, req):
1500
- raise URLError('Unexpected HTTP request on what should be a secure '
1501
- 'connection: %s' % req)
1502
-
1503
- #
1504
- # XML-RPC with timeouts
1505
- #
1506
- class Transport(xmlrpclib.Transport):
1507
- def __init__(self, timeout, use_datetime=0):
1508
- self.timeout = timeout
1509
- xmlrpclib.Transport.__init__(self, use_datetime)
1510
-
1511
- def make_connection(self, host):
1512
- h, eh, x509 = self.get_host_info(host)
1513
- if not self._connection or host != self._connection[0]:
1514
- self._extra_headers = eh
1515
- self._connection = host, httplib.HTTPConnection(h)
1516
- return self._connection[1]
1517
-
1518
- if ssl:
1519
- class SafeTransport(xmlrpclib.SafeTransport):
1520
- def __init__(self, timeout, use_datetime=0):
1521
- self.timeout = timeout
1522
- xmlrpclib.SafeTransport.__init__(self, use_datetime)
1523
-
1524
- def make_connection(self, host):
1525
- h, eh, kwargs = self.get_host_info(host)
1526
- if not kwargs:
1527
- kwargs = {}
1528
- kwargs['timeout'] = self.timeout
1529
- if not self._connection or host != self._connection[0]:
1530
- self._extra_headers = eh
1531
- self._connection = host, httplib.HTTPSConnection(h, None,
1532
- **kwargs)
1533
- return self._connection[1]
1534
-
1535
-
1536
- class ServerProxy(xmlrpclib.ServerProxy):
1537
- def __init__(self, uri, **kwargs):
1538
- self.timeout = timeout = kwargs.pop('timeout', None)
1539
- # The above classes only come into play if a timeout
1540
- # is specified
1541
- if timeout is not None:
1542
- # scheme = splittype(uri) # deprecated as of Python 3.8
1543
- scheme = urlparse(uri)[0]
1544
- use_datetime = kwargs.get('use_datetime', 0)
1545
- if scheme == 'https':
1546
- tcls = SafeTransport
1547
- else:
1548
- tcls = Transport
1549
- kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime)
1550
- self.transport = t
1551
- xmlrpclib.ServerProxy.__init__(self, uri, **kwargs)
1552
-
1553
- #
1554
- # CSV functionality. This is provided because on 2.x, the csv module can't
1555
- # handle Unicode. However, we need to deal with Unicode in e.g. RECORD files.
1556
- #
1557
-
1558
- def _csv_open(fn, mode, **kwargs):
1559
- if sys.version_info[0] < 3:
1560
- mode += 'b'
1561
- else:
1562
- kwargs['newline'] = ''
1563
- # Python 3 determines encoding from locale. Force 'utf-8'
1564
- # file encoding to match other forced utf-8 encoding
1565
- kwargs['encoding'] = 'utf-8'
1566
- return open(fn, mode, **kwargs)
1567
-
1568
-
1569
- class CSVBase(object):
1570
- defaults = {
1571
- 'delimiter': str(','), # The strs are used because we need native
1572
- 'quotechar': str('"'), # str in the csv API (2.x won't take
1573
- 'lineterminator': str('\n') # Unicode)
1574
- }
1575
-
1576
- def __enter__(self):
1577
- return self
1578
-
1579
- def __exit__(self, *exc_info):
1580
- self.stream.close()
1581
-
1582
-
1583
- class CSVReader(CSVBase):
1584
- def __init__(self, **kwargs):
1585
- if 'stream' in kwargs:
1586
- stream = kwargs['stream']
1587
- if sys.version_info[0] >= 3:
1588
- # needs to be a text stream
1589
- stream = codecs.getreader('utf-8')(stream)
1590
- self.stream = stream
1591
- else:
1592
- self.stream = _csv_open(kwargs['path'], 'r')
1593
- self.reader = csv.reader(self.stream, **self.defaults)
1594
-
1595
- def __iter__(self):
1596
- return self
1597
-
1598
- def next(self):
1599
- result = next(self.reader)
1600
- if sys.version_info[0] < 3:
1601
- for i, item in enumerate(result):
1602
- if not isinstance(item, text_type):
1603
- result[i] = item.decode('utf-8')
1604
- return result
1605
-
1606
- __next__ = next
1607
-
1608
- class CSVWriter(CSVBase):
1609
- def __init__(self, fn, **kwargs):
1610
- self.stream = _csv_open(fn, 'w')
1611
- self.writer = csv.writer(self.stream, **self.defaults)
1612
-
1613
- def writerow(self, row):
1614
- if sys.version_info[0] < 3:
1615
- r = []
1616
- for item in row:
1617
- if isinstance(item, text_type):
1618
- item = item.encode('utf-8')
1619
- r.append(item)
1620
- row = r
1621
- self.writer.writerow(row)
1622
-
1623
- #
1624
- # Configurator functionality
1625
- #
1626
-
1627
- class Configurator(BaseConfigurator):
1628
-
1629
- value_converters = dict(BaseConfigurator.value_converters)
1630
- value_converters['inc'] = 'inc_convert'
1631
-
1632
- def __init__(self, config, base=None):
1633
- super(Configurator, self).__init__(config)
1634
- self.base = base or os.getcwd()
1635
-
1636
- def configure_custom(self, config):
1637
- def convert(o):
1638
- if isinstance(o, (list, tuple)):
1639
- result = type(o)([convert(i) for i in o])
1640
- elif isinstance(o, dict):
1641
- if '()' in o:
1642
- result = self.configure_custom(o)
1643
- else:
1644
- result = {}
1645
- for k in o:
1646
- result[k] = convert(o[k])
1647
- else:
1648
- result = self.convert(o)
1649
- return result
1650
-
1651
- c = config.pop('()')
1652
- if not callable(c):
1653
- c = self.resolve(c)
1654
- props = config.pop('.', None)
1655
- # Check for valid identifiers
1656
- args = config.pop('[]', ())
1657
- if args:
1658
- args = tuple([convert(o) for o in args])
1659
- items = [(k, convert(config[k])) for k in config if valid_ident(k)]
1660
- kwargs = dict(items)
1661
- result = c(*args, **kwargs)
1662
- if props:
1663
- for n, v in props.items():
1664
- setattr(result, n, convert(v))
1665
- return result
1666
-
1667
- def __getitem__(self, key):
1668
- result = self.config[key]
1669
- if isinstance(result, dict) and '()' in result:
1670
- self.config[key] = result = self.configure_custom(result)
1671
- return result
1672
-
1673
- def inc_convert(self, value):
1674
- """Default converter for the inc:// protocol."""
1675
- if not os.path.isabs(value):
1676
- value = os.path.join(self.base, value)
1677
- with codecs.open(value, 'r', encoding='utf-8') as f:
1678
- result = json.load(f)
1679
- return result
1680
-
1681
-
1682
- class SubprocessMixin(object):
1683
- """
1684
- Mixin for running subprocesses and capturing their output
1685
- """
1686
- def __init__(self, verbose=False, progress=None):
1687
- self.verbose = verbose
1688
- self.progress = progress
1689
-
1690
- def reader(self, stream, context):
1691
- """
1692
- Read lines from a subprocess' output stream and either pass to a progress
1693
- callable (if specified) or write progress information to sys.stderr.
1694
- """
1695
- progress = self.progress
1696
- verbose = self.verbose
1697
- while True:
1698
- s = stream.readline()
1699
- if not s:
1700
- break
1701
- if progress is not None:
1702
- progress(s, context)
1703
- else:
1704
- if not verbose:
1705
- sys.stderr.write('.')
1706
- else:
1707
- sys.stderr.write(s.decode('utf-8'))
1708
- sys.stderr.flush()
1709
- stream.close()
1710
-
1711
- def run_command(self, cmd, **kwargs):
1712
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
1713
- stderr=subprocess.PIPE, **kwargs)
1714
- t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout'))
1715
- t1.start()
1716
- t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr'))
1717
- t2.start()
1718
- p.wait()
1719
- t1.join()
1720
- t2.join()
1721
- if self.progress is not None:
1722
- self.progress('done.', 'main')
1723
- elif self.verbose:
1724
- sys.stderr.write('done.\n')
1725
- return p
1726
-
1727
-
1728
- def normalize_name(name):
1729
- """Normalize a python package name a la PEP 503"""
1730
- # https://www.python.org/dev/peps/pep-0503/#normalized-names
1731
- return re.sub('[-_.]+', '-', name).lower()
1732
-
1733
- # def _get_pypirc_command():
1734
- # """
1735
- # Get the distutils command for interacting with PyPI configurations.
1736
- # :return: the command.
1737
- # """
1738
- # from distutils.core import Distribution
1739
- # from distutils.config import PyPIRCCommand
1740
- # d = Distribution()
1741
- # return PyPIRCCommand(d)
1742
-
1743
- class PyPIRCFile(object):
1744
-
1745
- DEFAULT_REPOSITORY = 'https://upload.pypi.org/legacy/'
1746
- DEFAULT_REALM = 'pypi'
1747
-
1748
- def __init__(self, fn=None, url=None):
1749
- if fn is None:
1750
- fn = os.path.join(os.path.expanduser('~'), '.pypirc')
1751
- self.filename = fn
1752
- self.url = url
1753
-
1754
- def read(self):
1755
- result = {}
1756
-
1757
- if os.path.exists(self.filename):
1758
- repository = self.url or self.DEFAULT_REPOSITORY
1759
-
1760
- config = configparser.RawConfigParser()
1761
- config.read(self.filename)
1762
- sections = config.sections()
1763
- if 'distutils' in sections:
1764
- # let's get the list of servers
1765
- index_servers = config.get('distutils', 'index-servers')
1766
- _servers = [server.strip() for server in
1767
- index_servers.split('\n')
1768
- if server.strip() != '']
1769
- if _servers == []:
1770
- # nothing set, let's try to get the default pypi
1771
- if 'pypi' in sections:
1772
- _servers = ['pypi']
1773
- else:
1774
- for server in _servers:
1775
- result = {'server': server}
1776
- result['username'] = config.get(server, 'username')
1777
-
1778
- # optional params
1779
- for key, default in (('repository', self.DEFAULT_REPOSITORY),
1780
- ('realm', self.DEFAULT_REALM),
1781
- ('password', None)):
1782
- if config.has_option(server, key):
1783
- result[key] = config.get(server, key)
1784
- else:
1785
- result[key] = default
1786
-
1787
- # work around people having "repository" for the "pypi"
1788
- # section of their config set to the HTTP (rather than
1789
- # HTTPS) URL
1790
- if (server == 'pypi' and
1791
- repository in (self.DEFAULT_REPOSITORY, 'pypi')):
1792
- result['repository'] = self.DEFAULT_REPOSITORY
1793
- elif (result['server'] != repository and
1794
- result['repository'] != repository):
1795
- result = {}
1796
- elif 'server-login' in sections:
1797
- # old format
1798
- server = 'server-login'
1799
- if config.has_option(server, 'repository'):
1800
- repository = config.get(server, 'repository')
1801
- else:
1802
- repository = self.DEFAULT_REPOSITORY
1803
- result = {
1804
- 'username': config.get(server, 'username'),
1805
- 'password': config.get(server, 'password'),
1806
- 'repository': repository,
1807
- 'server': server,
1808
- 'realm': self.DEFAULT_REALM
1809
- }
1810
- return result
1811
-
1812
- def update(self, username, password):
1813
- # import pdb; pdb.set_trace()
1814
- config = configparser.RawConfigParser()
1815
- fn = self.filename
1816
- config.read(fn)
1817
- if not config.has_section('pypi'):
1818
- config.add_section('pypi')
1819
- config.set('pypi', 'username', username)
1820
- config.set('pypi', 'password', password)
1821
- with open(fn, 'w') as f:
1822
- config.write(f)
1823
-
1824
- def _load_pypirc(index):
1825
- """
1826
- Read the PyPI access configuration as supported by distutils.
1827
- """
1828
- return PyPIRCFile(url=index.url).read()
1829
-
1830
- def _store_pypirc(index):
1831
- PyPIRCFile().update(index.username, index.password)
1832
-
1833
- #
1834
- # get_platform()/get_host_platform() copied from Python 3.10.a0 source, with some minor
1835
- # tweaks
1836
- #
1837
-
1838
- def get_host_platform():
1839
- """Return a string that identifies the current platform. This is used mainly to
1840
- distinguish platform-specific build directories and platform-specific built
1841
- distributions. Typically includes the OS name and version and the
1842
- architecture (as supplied by 'os.uname()'), although the exact information
1843
- included depends on the OS; eg. on Linux, the kernel version isn't
1844
- particularly important.
1845
-
1846
- Examples of returned values:
1847
- linux-i586
1848
- linux-alpha (?)
1849
- solaris-2.6-sun4u
1850
-
1851
- Windows will return one of:
1852
- win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc)
1853
- win32 (all others - specifically, sys.platform is returned)
1854
-
1855
- For other non-POSIX platforms, currently just returns 'sys.platform'.
1856
-
1857
- """
1858
- if os.name == 'nt':
1859
- if 'amd64' in sys.version.lower():
1860
- return 'win-amd64'
1861
- if '(arm)' in sys.version.lower():
1862
- return 'win-arm32'
1863
- if '(arm64)' in sys.version.lower():
1864
- return 'win-arm64'
1865
- return sys.platform
1866
-
1867
- # Set for cross builds explicitly
1868
- if "_PYTHON_HOST_PLATFORM" in os.environ:
1869
- return os.environ["_PYTHON_HOST_PLATFORM"]
1870
-
1871
- if os.name != 'posix' or not hasattr(os, 'uname'):
1872
- # XXX what about the architecture? NT is Intel or Alpha,
1873
- # Mac OS is M68k or PPC, etc.
1874
- return sys.platform
1875
-
1876
- # Try to distinguish various flavours of Unix
1877
-
1878
- (osname, host, release, version, machine) = os.uname()
1879
-
1880
- # Convert the OS name to lowercase, remove '/' characters, and translate
1881
- # spaces (for "Power Macintosh")
1882
- osname = osname.lower().replace('/', '')
1883
- machine = machine.replace(' ', '_').replace('/', '-')
1884
-
1885
- if osname[:5] == 'linux':
1886
- # At least on Linux/Intel, 'machine' is the processor --
1887
- # i386, etc.
1888
- # XXX what about Alpha, SPARC, etc?
1889
- return "%s-%s" % (osname, machine)
1890
-
1891
- elif osname[:5] == 'sunos':
1892
- if release[0] >= '5': # SunOS 5 == Solaris 2
1893
- osname = 'solaris'
1894
- release = '%d.%s' % (int(release[0]) - 3, release[2:])
1895
- # We can't use 'platform.architecture()[0]' because a
1896
- # bootstrap problem. We use a dict to get an error
1897
- # if some suspicious happens.
1898
- bitness = {2147483647:'32bit', 9223372036854775807:'64bit'}
1899
- machine += '.%s' % bitness[sys.maxsize]
1900
- # fall through to standard osname-release-machine representation
1901
- elif osname[:3] == 'aix':
1902
- from _aix_support import aix_platform
1903
- return aix_platform()
1904
- elif osname[:6] == 'cygwin':
1905
- osname = 'cygwin'
1906
- rel_re = re.compile (r'[\d.]+', re.ASCII)
1907
- m = rel_re.match(release)
1908
- if m:
1909
- release = m.group()
1910
- elif osname[:6] == 'darwin':
1911
- import _osx_support, distutils.sysconfig
1912
- osname, release, machine = _osx_support.get_platform_osx(
1913
- distutils.sysconfig.get_config_vars(),
1914
- osname, release, machine)
1915
-
1916
- return '%s-%s-%s' % (osname, release, machine)
1917
-
1918
-
1919
- _TARGET_TO_PLAT = {
1920
- 'x86' : 'win32',
1921
- 'x64' : 'win-amd64',
1922
- 'arm' : 'win-arm32',
1923
- }
1924
-
1925
-
1926
- def get_platform():
1927
- if os.name != 'nt':
1928
- return get_host_platform()
1929
- cross_compilation_target = os.environ.get('VSCMD_ARG_TGT_ARCH')
1930
- if cross_compilation_target not in _TARGET_TO_PLAT:
1931
- return get_host_platform()
1932
- return _TARGET_TO_PLAT[cross_compilation_target]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/live.py DELETED
@@ -1,375 +0,0 @@
1
- import sys
2
- from threading import Event, RLock, Thread
3
- from types import TracebackType
4
- from typing import IO, Any, Callable, List, Optional, TextIO, Type, cast
5
-
6
- from . import get_console
7
- from .console import Console, ConsoleRenderable, RenderableType, RenderHook
8
- from .control import Control
9
- from .file_proxy import FileProxy
10
- from .jupyter import JupyterMixin
11
- from .live_render import LiveRender, VerticalOverflowMethod
12
- from .screen import Screen
13
- from .text import Text
14
-
15
-
16
- class _RefreshThread(Thread):
17
- """A thread that calls refresh() at regular intervals."""
18
-
19
- def __init__(self, live: "Live", refresh_per_second: float) -> None:
20
- self.live = live
21
- self.refresh_per_second = refresh_per_second
22
- self.done = Event()
23
- super().__init__(daemon=True)
24
-
25
- def stop(self) -> None:
26
- self.done.set()
27
-
28
- def run(self) -> None:
29
- while not self.done.wait(1 / self.refresh_per_second):
30
- with self.live._lock:
31
- if not self.done.is_set():
32
- self.live.refresh()
33
-
34
-
35
- class Live(JupyterMixin, RenderHook):
36
- """Renders an auto-updating live display of any given renderable.
37
-
38
- Args:
39
- renderable (RenderableType, optional): The renderable to live display. Defaults to displaying nothing.
40
- console (Console, optional): Optional Console instance. Default will an internal Console instance writing to stdout.
41
- screen (bool, optional): Enable alternate screen mode. Defaults to False.
42
- auto_refresh (bool, optional): Enable auto refresh. If disabled, you will need to call `refresh()` or `update()` with refresh flag. Defaults to True
43
- refresh_per_second (float, optional): Number of times per second to refresh the live display. Defaults to 4.
44
- transient (bool, optional): Clear the renderable on exit (has no effect when screen=True). Defaults to False.
45
- redirect_stdout (bool, optional): Enable redirection of stdout, so ``print`` may be used. Defaults to True.
46
- redirect_stderr (bool, optional): Enable redirection of stderr. Defaults to True.
47
- vertical_overflow (VerticalOverflowMethod, optional): How to handle renderable when it is too tall for the console. Defaults to "ellipsis".
48
- get_renderable (Callable[[], RenderableType], optional): Optional callable to get renderable. Defaults to None.
49
- """
50
-
51
- def __init__(
52
- self,
53
- renderable: Optional[RenderableType] = None,
54
- *,
55
- console: Optional[Console] = None,
56
- screen: bool = False,
57
- auto_refresh: bool = True,
58
- refresh_per_second: float = 4,
59
- transient: bool = False,
60
- redirect_stdout: bool = True,
61
- redirect_stderr: bool = True,
62
- vertical_overflow: VerticalOverflowMethod = "ellipsis",
63
- get_renderable: Optional[Callable[[], RenderableType]] = None,
64
- ) -> None:
65
- assert refresh_per_second > 0, "refresh_per_second must be > 0"
66
- self._renderable = renderable
67
- self.console = console if console is not None else get_console()
68
- self._screen = screen
69
- self._alt_screen = False
70
-
71
- self._redirect_stdout = redirect_stdout
72
- self._redirect_stderr = redirect_stderr
73
- self._restore_stdout: Optional[IO[str]] = None
74
- self._restore_stderr: Optional[IO[str]] = None
75
-
76
- self._lock = RLock()
77
- self.ipy_widget: Optional[Any] = None
78
- self.auto_refresh = auto_refresh
79
- self._started: bool = False
80
- self.transient = True if screen else transient
81
-
82
- self._refresh_thread: Optional[_RefreshThread] = None
83
- self.refresh_per_second = refresh_per_second
84
-
85
- self.vertical_overflow = vertical_overflow
86
- self._get_renderable = get_renderable
87
- self._live_render = LiveRender(
88
- self.get_renderable(), vertical_overflow=vertical_overflow
89
- )
90
-
91
- @property
92
- def is_started(self) -> bool:
93
- """Check if live display has been started."""
94
- return self._started
95
-
96
- def get_renderable(self) -> RenderableType:
97
- renderable = (
98
- self._get_renderable()
99
- if self._get_renderable is not None
100
- else self._renderable
101
- )
102
- return renderable or ""
103
-
104
- def start(self, refresh: bool = False) -> None:
105
- """Start live rendering display.
106
-
107
- Args:
108
- refresh (bool, optional): Also refresh. Defaults to False.
109
- """
110
- with self._lock:
111
- if self._started:
112
- return
113
- self.console.set_live(self)
114
- self._started = True
115
- if self._screen:
116
- self._alt_screen = self.console.set_alt_screen(True)
117
- self.console.show_cursor(False)
118
- self._enable_redirect_io()
119
- self.console.push_render_hook(self)
120
- if refresh:
121
- try:
122
- self.refresh()
123
- except Exception:
124
- # If refresh fails, we want to stop the redirection of sys.stderr,
125
- # so the error stacktrace is properly displayed in the terminal.
126
- # (or, if the code that calls Rich captures the exception and wants to display something,
127
- # let this be displayed in the terminal).
128
- self.stop()
129
- raise
130
- if self.auto_refresh:
131
- self._refresh_thread = _RefreshThread(self, self.refresh_per_second)
132
- self._refresh_thread.start()
133
-
134
- def stop(self) -> None:
135
- """Stop live rendering display."""
136
- with self._lock:
137
- if not self._started:
138
- return
139
- self.console.clear_live()
140
- self._started = False
141
-
142
- if self.auto_refresh and self._refresh_thread is not None:
143
- self._refresh_thread.stop()
144
- self._refresh_thread = None
145
- # allow it to fully render on the last even if overflow
146
- self.vertical_overflow = "visible"
147
- with self.console:
148
- try:
149
- if not self._alt_screen and not self.console.is_jupyter:
150
- self.refresh()
151
- finally:
152
- self._disable_redirect_io()
153
- self.console.pop_render_hook()
154
- if not self._alt_screen and self.console.is_terminal:
155
- self.console.line()
156
- self.console.show_cursor(True)
157
- if self._alt_screen:
158
- self.console.set_alt_screen(False)
159
-
160
- if self.transient and not self._alt_screen:
161
- self.console.control(self._live_render.restore_cursor())
162
- if self.ipy_widget is not None and self.transient:
163
- self.ipy_widget.close() # pragma: no cover
164
-
165
- def __enter__(self) -> "Live":
166
- self.start(refresh=self._renderable is not None)
167
- return self
168
-
169
- def __exit__(
170
- self,
171
- exc_type: Optional[Type[BaseException]],
172
- exc_val: Optional[BaseException],
173
- exc_tb: Optional[TracebackType],
174
- ) -> None:
175
- self.stop()
176
-
177
- def _enable_redirect_io(self) -> None:
178
- """Enable redirecting of stdout / stderr."""
179
- if self.console.is_terminal or self.console.is_jupyter:
180
- if self._redirect_stdout and not isinstance(sys.stdout, FileProxy):
181
- self._restore_stdout = sys.stdout
182
- sys.stdout = cast("TextIO", FileProxy(self.console, sys.stdout))
183
- if self._redirect_stderr and not isinstance(sys.stderr, FileProxy):
184
- self._restore_stderr = sys.stderr
185
- sys.stderr = cast("TextIO", FileProxy(self.console, sys.stderr))
186
-
187
- def _disable_redirect_io(self) -> None:
188
- """Disable redirecting of stdout / stderr."""
189
- if self._restore_stdout:
190
- sys.stdout = cast("TextIO", self._restore_stdout)
191
- self._restore_stdout = None
192
- if self._restore_stderr:
193
- sys.stderr = cast("TextIO", self._restore_stderr)
194
- self._restore_stderr = None
195
-
196
- @property
197
- def renderable(self) -> RenderableType:
198
- """Get the renderable that is being displayed
199
-
200
- Returns:
201
- RenderableType: Displayed renderable.
202
- """
203
- renderable = self.get_renderable()
204
- return Screen(renderable) if self._alt_screen else renderable
205
-
206
- def update(self, renderable: RenderableType, *, refresh: bool = False) -> None:
207
- """Update the renderable that is being displayed
208
-
209
- Args:
210
- renderable (RenderableType): New renderable to use.
211
- refresh (bool, optional): Refresh the display. Defaults to False.
212
- """
213
- if isinstance(renderable, str):
214
- renderable = self.console.render_str(renderable)
215
- with self._lock:
216
- self._renderable = renderable
217
- if refresh:
218
- self.refresh()
219
-
220
- def refresh(self) -> None:
221
- """Update the display of the Live Render."""
222
- with self._lock:
223
- self._live_render.set_renderable(self.renderable)
224
- if self.console.is_jupyter: # pragma: no cover
225
- try:
226
- from IPython.display import display
227
- from ipywidgets import Output
228
- except ImportError:
229
- import warnings
230
-
231
- warnings.warn('install "ipywidgets" for Jupyter support')
232
- else:
233
- if self.ipy_widget is None:
234
- self.ipy_widget = Output()
235
- display(self.ipy_widget)
236
-
237
- with self.ipy_widget:
238
- self.ipy_widget.clear_output(wait=True)
239
- self.console.print(self._live_render.renderable)
240
- elif self.console.is_terminal and not self.console.is_dumb_terminal:
241
- with self.console:
242
- self.console.print(Control())
243
- elif (
244
- not self._started and not self.transient
245
- ): # if it is finished allow files or dumb-terminals to see final result
246
- with self.console:
247
- self.console.print(Control())
248
-
249
- def process_renderables(
250
- self, renderables: List[ConsoleRenderable]
251
- ) -> List[ConsoleRenderable]:
252
- """Process renderables to restore cursor and display progress."""
253
- self._live_render.vertical_overflow = self.vertical_overflow
254
- if self.console.is_interactive:
255
- # lock needs acquiring as user can modify live_render renderable at any time unlike in Progress.
256
- with self._lock:
257
- reset = (
258
- Control.home()
259
- if self._alt_screen
260
- else self._live_render.position_cursor()
261
- )
262
- renderables = [reset, *renderables, self._live_render]
263
- elif (
264
- not self._started and not self.transient
265
- ): # if it is finished render the final output for files or dumb_terminals
266
- renderables = [*renderables, self._live_render]
267
-
268
- return renderables
269
-
270
-
271
- if __name__ == "__main__": # pragma: no cover
272
- import random
273
- import time
274
- from itertools import cycle
275
- from typing import Dict, List, Tuple
276
-
277
- from .align import Align
278
- from .console import Console
279
- from .live import Live as Live
280
- from .panel import Panel
281
- from .rule import Rule
282
- from .syntax import Syntax
283
- from .table import Table
284
-
285
- console = Console()
286
-
287
- syntax = Syntax(
288
- '''def loop_last(values: Iterable[T]) -> Iterable[Tuple[bool, T]]:
289
- """Iterate and generate a tuple with a flag for last value."""
290
- iter_values = iter(values)
291
- try:
292
- previous_value = next(iter_values)
293
- except StopIteration:
294
- return
295
- for value in iter_values:
296
- yield False, previous_value
297
- previous_value = value
298
- yield True, previous_value''',
299
- "python",
300
- line_numbers=True,
301
- )
302
-
303
- table = Table("foo", "bar", "baz")
304
- table.add_row("1", "2", "3")
305
-
306
- progress_renderables = [
307
- "You can make the terminal shorter and taller to see the live table hide"
308
- "Text may be printed while the progress bars are rendering.",
309
- Panel("In fact, [i]any[/i] renderable will work"),
310
- "Such as [magenta]tables[/]...",
311
- table,
312
- "Pretty printed structures...",
313
- {"type": "example", "text": "Pretty printed"},
314
- "Syntax...",
315
- syntax,
316
- Rule("Give it a try!"),
317
- ]
318
-
319
- examples = cycle(progress_renderables)
320
-
321
- exchanges = [
322
- "SGD",
323
- "MYR",
324
- "EUR",
325
- "USD",
326
- "AUD",
327
- "JPY",
328
- "CNH",
329
- "HKD",
330
- "CAD",
331
- "INR",
332
- "DKK",
333
- "GBP",
334
- "RUB",
335
- "NZD",
336
- "MXN",
337
- "IDR",
338
- "TWD",
339
- "THB",
340
- "VND",
341
- ]
342
- with Live(console=console) as live_table:
343
- exchange_rate_dict: Dict[Tuple[str, str], float] = {}
344
-
345
- for index in range(100):
346
- select_exchange = exchanges[index % len(exchanges)]
347
-
348
- for exchange in exchanges:
349
- if exchange == select_exchange:
350
- continue
351
- time.sleep(0.4)
352
- if random.randint(0, 10) < 1:
353
- console.log(next(examples))
354
- exchange_rate_dict[(select_exchange, exchange)] = 200 / (
355
- (random.random() * 320) + 1
356
- )
357
- if len(exchange_rate_dict) > len(exchanges) - 1:
358
- exchange_rate_dict.pop(list(exchange_rate_dict.keys())[0])
359
- table = Table(title="Exchange Rates")
360
-
361
- table.add_column("Source Currency")
362
- table.add_column("Destination Currency")
363
- table.add_column("Exchange Rate")
364
-
365
- for ((source, dest), exchange_rate) in exchange_rate_dict.items():
366
- table.add_row(
367
- source,
368
- dest,
369
- Text(
370
- f"{exchange_rate:.4f}",
371
- style="red" if exchange_rate < 1.0 else "green",
372
- ),
373
- )
374
-
375
- live_table.update(Align.center(table))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awesimo/jojogan/e4e/models/stylegan2/op/upfirdn2d.py DELETED
@@ -1,184 +0,0 @@
1
- import os
2
-
3
- import torch
4
- from torch.autograd import Function
5
- from torch.utils.cpp_extension import load
6
-
7
- module_path = os.path.dirname(__file__)
8
- upfirdn2d_op = load(
9
- 'upfirdn2d',
10
- sources=[
11
- os.path.join(module_path, 'upfirdn2d.cpp'),
12
- os.path.join(module_path, 'upfirdn2d_kernel.cu'),
13
- ],
14
- )
15
-
16
-
17
- class UpFirDn2dBackward(Function):
18
- @staticmethod
19
- def forward(
20
- ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size
21
- ):
22
- up_x, up_y = up
23
- down_x, down_y = down
24
- g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad
25
-
26
- grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1)
27
-
28
- grad_input = upfirdn2d_op.upfirdn2d(
29
- grad_output,
30
- grad_kernel,
31
- down_x,
32
- down_y,
33
- up_x,
34
- up_y,
35
- g_pad_x0,
36
- g_pad_x1,
37
- g_pad_y0,
38
- g_pad_y1,
39
- )
40
- grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3])
41
-
42
- ctx.save_for_backward(kernel)
43
-
44
- pad_x0, pad_x1, pad_y0, pad_y1 = pad
45
-
46
- ctx.up_x = up_x
47
- ctx.up_y = up_y
48
- ctx.down_x = down_x
49
- ctx.down_y = down_y
50
- ctx.pad_x0 = pad_x0
51
- ctx.pad_x1 = pad_x1
52
- ctx.pad_y0 = pad_y0
53
- ctx.pad_y1 = pad_y1
54
- ctx.in_size = in_size
55
- ctx.out_size = out_size
56
-
57
- return grad_input
58
-
59
- @staticmethod
60
- def backward(ctx, gradgrad_input):
61
- kernel, = ctx.saved_tensors
62
-
63
- gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx.in_size[3], 1)
64
-
65
- gradgrad_out = upfirdn2d_op.upfirdn2d(
66
- gradgrad_input,
67
- kernel,
68
- ctx.up_x,
69
- ctx.up_y,
70
- ctx.down_x,
71
- ctx.down_y,
72
- ctx.pad_x0,
73
- ctx.pad_x1,
74
- ctx.pad_y0,
75
- ctx.pad_y1,
76
- )
77
- # gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.out_size[0], ctx.out_size[1], ctx.in_size[3])
78
- gradgrad_out = gradgrad_out.view(
79
- ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]
80
- )
81
-
82
- return gradgrad_out, None, None, None, None, None, None, None, None
83
-
84
-
85
- class UpFirDn2d(Function):
86
- @staticmethod
87
- def forward(ctx, input, kernel, up, down, pad):
88
- up_x, up_y = up
89
- down_x, down_y = down
90
- pad_x0, pad_x1, pad_y0, pad_y1 = pad
91
-
92
- kernel_h, kernel_w = kernel.shape
93
- batch, channel, in_h, in_w = input.shape
94
- ctx.in_size = input.shape
95
-
96
- input = input.reshape(-1, in_h, in_w, 1)
97
-
98
- ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1]))
99
-
100
- out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
101
- out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
102
- ctx.out_size = (out_h, out_w)
103
-
104
- ctx.up = (up_x, up_y)
105
- ctx.down = (down_x, down_y)
106
- ctx.pad = (pad_x0, pad_x1, pad_y0, pad_y1)
107
-
108
- g_pad_x0 = kernel_w - pad_x0 - 1
109
- g_pad_y0 = kernel_h - pad_y0 - 1
110
- g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1
111
- g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1
112
-
113
- ctx.g_pad = (g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1)
114
-
115
- out = upfirdn2d_op.upfirdn2d(
116
- input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1
117
- )
118
- # out = out.view(major, out_h, out_w, minor)
119
- out = out.view(-1, channel, out_h, out_w)
120
-
121
- return out
122
-
123
- @staticmethod
124
- def backward(ctx, grad_output):
125
- kernel, grad_kernel = ctx.saved_tensors
126
-
127
- grad_input = UpFirDn2dBackward.apply(
128
- grad_output,
129
- kernel,
130
- grad_kernel,
131
- ctx.up,
132
- ctx.down,
133
- ctx.pad,
134
- ctx.g_pad,
135
- ctx.in_size,
136
- ctx.out_size,
137
- )
138
-
139
- return grad_input, None, None, None, None
140
-
141
-
142
- def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):
143
- out = UpFirDn2d.apply(
144
- input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])
145
- )
146
-
147
- return out
148
-
149
-
150
- def upfirdn2d_native(
151
- input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1
152
- ):
153
- _, in_h, in_w, minor = input.shape
154
- kernel_h, kernel_w = kernel.shape
155
-
156
- out = input.view(-1, in_h, 1, in_w, 1, minor)
157
- out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])
158
- out = out.view(-1, in_h * up_y, in_w * up_x, minor)
159
-
160
- out = F.pad(
161
- out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]
162
- )
163
- out = out[
164
- :,
165
- max(-pad_y0, 0): out.shape[1] - max(-pad_y1, 0),
166
- max(-pad_x0, 0): out.shape[2] - max(-pad_x1, 0),
167
- :,
168
- ]
169
-
170
- out = out.permute(0, 3, 1, 2)
171
- out = out.reshape(
172
- [-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]
173
- )
174
- w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
175
- out = F.conv2d(out, w)
176
- out = out.reshape(
177
- -1,
178
- minor,
179
- in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1,
180
- in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1,
181
- )
182
- out = out.permute(0, 2, 3, 1)
183
-
184
- return out[:, ::down_y, ::down_x, :]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/export/c10.py DELETED
@@ -1,534 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
-
3
- import math
4
- import torch
5
- import torch.nn.functional as F
6
-
7
- from detectron2.layers import cat
8
- from detectron2.layers.roi_align_rotated import ROIAlignRotated
9
- from detectron2.modeling import poolers
10
- from detectron2.modeling.proposal_generator import rpn
11
- from detectron2.modeling.roi_heads.mask_head import mask_rcnn_inference
12
- from detectron2.structures import Boxes, ImageList, Instances, Keypoints
13
-
14
- from .shared import alias, to_device
15
-
16
-
17
- """
18
- This file contains caffe2-compatible implementation of several detectron2 components.
19
- """
20
-
21
-
22
- class Caffe2Boxes(Boxes):
23
- """
24
- Representing a list of detectron2.structures.Boxes from minibatch, each box
25
- is represented by a 5d vector (batch index + 4 coordinates), or a 6d vector
26
- (batch index + 5 coordinates) for RotatedBoxes.
27
- """
28
-
29
- def __init__(self, tensor):
30
- assert isinstance(tensor, torch.Tensor)
31
- assert tensor.dim() == 2 and tensor.size(-1) in [4, 5, 6], tensor.size()
32
- # TODO: make tensor immutable when dim is Nx5 for Boxes,
33
- # and Nx6 for RotatedBoxes?
34
- self.tensor = tensor
35
-
36
-
37
- # TODO clean up this class, maybe just extend Instances
38
- class InstancesList(object):
39
- """
40
- Tensor representation of a list of Instances object for a batch of images.
41
-
42
- When dealing with a batch of images with Caffe2 ops, a list of bboxes
43
- (instances) are usually represented by single Tensor with size
44
- (sigma(Ni), 5) or (sigma(Ni), 4) plus a batch split Tensor. This class is
45
- for providing common functions to convert between these two representations.
46
- """
47
-
48
- def __init__(self, im_info, indices, extra_fields=None):
49
- # [N, 3] -> (H, W, Scale)
50
- self.im_info = im_info
51
- # [N,] -> indice of batch to which the instance belongs
52
- self.indices = indices
53
- # [N, ...]
54
- self.batch_extra_fields = extra_fields or {}
55
-
56
- self.image_size = self.im_info
57
-
58
- def get_fields(self):
59
- """like `get_fields` in the Instances object,
60
- but return each field in tensor representations"""
61
- ret = {}
62
- for k, v in self.batch_extra_fields.items():
63
- # if isinstance(v, torch.Tensor):
64
- # tensor_rep = v
65
- # elif isinstance(v, (Boxes, Keypoints)):
66
- # tensor_rep = v.tensor
67
- # else:
68
- # raise ValueError("Can't find tensor representation for: {}".format())
69
- ret[k] = v
70
- return ret
71
-
72
- def has(self, name):
73
- return name in self.batch_extra_fields
74
-
75
- def set(self, name, value):
76
- data_len = len(value)
77
- if len(self.batch_extra_fields):
78
- assert (
79
- len(self) == data_len
80
- ), "Adding a field of length {} to a Instances of length {}".format(data_len, len(self))
81
- self.batch_extra_fields[name] = value
82
-
83
- def __setattr__(self, name, val):
84
- if name in ["im_info", "indices", "batch_extra_fields", "image_size"]:
85
- super().__setattr__(name, val)
86
- else:
87
- self.set(name, val)
88
-
89
- def __getattr__(self, name):
90
- if name not in self.batch_extra_fields:
91
- raise AttributeError("Cannot find field '{}' in the given Instances!".format(name))
92
- return self.batch_extra_fields[name]
93
-
94
- def __len__(self):
95
- return len(self.indices)
96
-
97
- def flatten(self):
98
- ret = []
99
- for _, v in self.batch_extra_fields.items():
100
- if isinstance(v, (Boxes, Keypoints)):
101
- ret.append(v.tensor)
102
- else:
103
- ret.append(v)
104
- return ret
105
-
106
- @staticmethod
107
- def to_d2_instances_list(instances_list):
108
- """
109
- Convert InstancesList to List[Instances]. The input `instances_list` can
110
- also be a List[Instances], in this case this method is a non-op.
111
- """
112
- if not isinstance(instances_list, InstancesList):
113
- assert all(isinstance(x, Instances) for x in instances_list)
114
- return instances_list
115
-
116
- ret = []
117
- for i, info in enumerate(instances_list.im_info):
118
- instances = Instances(torch.Size([int(info[0].item()), int(info[1].item())]))
119
-
120
- ids = instances_list.indices == i
121
- for k, v in instances_list.batch_extra_fields.items():
122
- if isinstance(v, torch.Tensor):
123
- instances.set(k, v[ids])
124
- continue
125
- elif isinstance(v, Boxes):
126
- instances.set(k, v[ids, -4:])
127
- continue
128
-
129
- target_type, tensor_source = v
130
- assert isinstance(tensor_source, torch.Tensor)
131
- assert tensor_source.shape[0] == instances_list.indices.shape[0]
132
- tensor_source = tensor_source[ids]
133
-
134
- if issubclass(target_type, Boxes):
135
- instances.set(k, Boxes(tensor_source[:, -4:]))
136
- elif issubclass(target_type, Keypoints):
137
- instances.set(k, Keypoints(tensor_source))
138
- elif issubclass(target_type, torch.Tensor):
139
- instances.set(k, tensor_source)
140
- else:
141
- raise ValueError("Can't handle targe type: {}".format(target_type))
142
-
143
- ret.append(instances)
144
- return ret
145
-
146
-
147
- class Caffe2Compatible(object):
148
- """
149
- A model can inherit this class to indicate that it can be traced and deployed with caffe2.
150
- """
151
-
152
- def _get_tensor_mode(self):
153
- return self._tensor_mode
154
-
155
- def _set_tensor_mode(self, v):
156
- self._tensor_mode = v
157
-
158
- tensor_mode = property(_get_tensor_mode, _set_tensor_mode)
159
- """
160
- If true, the model expects C2-style tensor only inputs/outputs format.
161
- """
162
-
163
-
164
- class Caffe2RPN(Caffe2Compatible, rpn.RPN):
165
- def _generate_proposals(
166
- self, images, objectness_logits_pred, anchor_deltas_pred, gt_instances=None
167
- ):
168
- assert isinstance(images, ImageList)
169
- if self.tensor_mode:
170
- im_info = images.image_sizes
171
- else:
172
- im_info = torch.tensor([[im_sz[0], im_sz[1], 1.0] for im_sz in images.image_sizes]).to(
173
- images.tensor.device
174
- )
175
- assert isinstance(im_info, torch.Tensor)
176
-
177
- rpn_rois_list = []
178
- rpn_roi_probs_list = []
179
- for scores, bbox_deltas, cell_anchors_tensor, feat_stride in zip(
180
- objectness_logits_pred,
181
- anchor_deltas_pred,
182
- iter(self.anchor_generator.cell_anchors),
183
- self.anchor_generator.strides,
184
- ):
185
- scores = scores.detach()
186
- bbox_deltas = bbox_deltas.detach()
187
-
188
- rpn_rois, rpn_roi_probs = torch.ops._caffe2.GenerateProposals(
189
- scores,
190
- bbox_deltas,
191
- im_info,
192
- cell_anchors_tensor,
193
- spatial_scale=1.0 / feat_stride,
194
- pre_nms_topN=self.pre_nms_topk[self.training],
195
- post_nms_topN=self.post_nms_topk[self.training],
196
- nms_thresh=self.nms_thresh,
197
- min_size=self.min_box_size,
198
- # correct_transform_coords=True, # deprecated argument
199
- angle_bound_on=True, # Default
200
- angle_bound_lo=-180,
201
- angle_bound_hi=180,
202
- clip_angle_thresh=1.0, # Default
203
- legacy_plus_one=False,
204
- )
205
- rpn_rois_list.append(rpn_rois)
206
- rpn_roi_probs_list.append(rpn_roi_probs)
207
-
208
- # For FPN in D2, in RPN all proposals from different levels are concated
209
- # together, ranked and picked by top post_nms_topk. Then in ROIPooler
210
- # it calculates level_assignments and calls the RoIAlign from
211
- # the corresponding level.
212
-
213
- if len(objectness_logits_pred) == 1:
214
- rpn_rois = rpn_rois_list[0]
215
- rpn_roi_probs = rpn_roi_probs_list[0]
216
- else:
217
- assert len(rpn_rois_list) == len(rpn_roi_probs_list)
218
- rpn_post_nms_topN = self.post_nms_topk[self.training]
219
-
220
- device = rpn_rois_list[0].device
221
- input_list = [to_device(x, "cpu") for x in (rpn_rois_list + rpn_roi_probs_list)]
222
-
223
- # TODO remove this after confirming rpn_max_level/rpn_min_level
224
- # is not needed in CollectRpnProposals.
225
- feature_strides = list(self.anchor_generator.strides)
226
- rpn_min_level = int(math.log2(feature_strides[0]))
227
- rpn_max_level = int(math.log2(feature_strides[-1]))
228
- assert (rpn_max_level - rpn_min_level + 1) == len(
229
- rpn_rois_list
230
- ), "CollectRpnProposals requires continuous levels"
231
-
232
- rpn_rois = torch.ops._caffe2.CollectRpnProposals(
233
- input_list,
234
- # NOTE: in current implementation, rpn_max_level and rpn_min_level
235
- # are not needed, only the subtraction of two matters and it
236
- # can be infer from the number of inputs. Keep them now for
237
- # consistency.
238
- rpn_max_level=2 + len(rpn_rois_list) - 1,
239
- rpn_min_level=2,
240
- rpn_post_nms_topN=rpn_post_nms_topN,
241
- )
242
- rpn_rois = to_device(rpn_rois, device)
243
- rpn_roi_probs = []
244
-
245
- proposals = self.c2_postprocess(im_info, rpn_rois, rpn_roi_probs, self.tensor_mode)
246
- return proposals, {}
247
-
248
- def forward(self, images, features, gt_instances=None):
249
- assert not self.training
250
- features = [features[f] for f in self.in_features]
251
- objectness_logits_pred, anchor_deltas_pred = self.rpn_head(features)
252
- return self._generate_proposals(
253
- images,
254
- objectness_logits_pred,
255
- anchor_deltas_pred,
256
- gt_instances,
257
- )
258
-
259
- @staticmethod
260
- def c2_postprocess(im_info, rpn_rois, rpn_roi_probs, tensor_mode):
261
- proposals = InstancesList(
262
- im_info=im_info,
263
- indices=rpn_rois[:, 0],
264
- extra_fields={
265
- "proposal_boxes": Caffe2Boxes(rpn_rois),
266
- "objectness_logits": (torch.Tensor, rpn_roi_probs),
267
- },
268
- )
269
- if not tensor_mode:
270
- proposals = InstancesList.to_d2_instances_list(proposals)
271
- else:
272
- proposals = [proposals]
273
- return proposals
274
-
275
-
276
- class Caffe2ROIPooler(Caffe2Compatible, poolers.ROIPooler):
277
- @staticmethod
278
- def c2_preprocess(box_lists):
279
- assert all(isinstance(x, Boxes) for x in box_lists)
280
- if all(isinstance(x, Caffe2Boxes) for x in box_lists):
281
- # input is pure-tensor based
282
- assert len(box_lists) == 1
283
- pooler_fmt_boxes = box_lists[0].tensor
284
- else:
285
- pooler_fmt_boxes = poolers.convert_boxes_to_pooler_format(box_lists)
286
- return pooler_fmt_boxes
287
-
288
- def forward(self, x, box_lists):
289
- assert not self.training
290
-
291
- pooler_fmt_boxes = self.c2_preprocess(box_lists)
292
- num_level_assignments = len(self.level_poolers)
293
-
294
- if num_level_assignments == 1:
295
- if isinstance(self.level_poolers[0], ROIAlignRotated):
296
- c2_roi_align = torch.ops._caffe2.RoIAlignRotated
297
- aligned = True
298
- else:
299
- c2_roi_align = torch.ops._caffe2.RoIAlign
300
- aligned = self.level_poolers[0].aligned
301
-
302
- x0 = x[0]
303
- if x0.is_quantized:
304
- x0 = x0.dequantize()
305
-
306
- out = c2_roi_align(
307
- x0,
308
- pooler_fmt_boxes,
309
- order="NCHW",
310
- spatial_scale=float(self.level_poolers[0].spatial_scale),
311
- pooled_h=int(self.output_size[0]),
312
- pooled_w=int(self.output_size[1]),
313
- sampling_ratio=int(self.level_poolers[0].sampling_ratio),
314
- aligned=aligned,
315
- )
316
- return out
317
-
318
- device = pooler_fmt_boxes.device
319
- assert (
320
- self.max_level - self.min_level + 1 == 4
321
- ), "Currently DistributeFpnProposals only support 4 levels"
322
- fpn_outputs = torch.ops._caffe2.DistributeFpnProposals(
323
- to_device(pooler_fmt_boxes, "cpu"),
324
- roi_canonical_scale=self.canonical_box_size,
325
- roi_canonical_level=self.canonical_level,
326
- roi_max_level=self.max_level,
327
- roi_min_level=self.min_level,
328
- legacy_plus_one=False,
329
- )
330
- fpn_outputs = [to_device(x, device) for x in fpn_outputs]
331
-
332
- rois_fpn_list = fpn_outputs[:-1]
333
- rois_idx_restore_int32 = fpn_outputs[-1]
334
-
335
- roi_feat_fpn_list = []
336
- for roi_fpn, x_level, pooler in zip(rois_fpn_list, x, self.level_poolers):
337
- if isinstance(pooler, ROIAlignRotated):
338
- c2_roi_align = torch.ops._caffe2.RoIAlignRotated
339
- aligned = True
340
- else:
341
- c2_roi_align = torch.ops._caffe2.RoIAlign
342
- aligned = bool(pooler.aligned)
343
-
344
- if x_level.is_quantized:
345
- x_level = x_level.dequantize()
346
-
347
- roi_feat_fpn = c2_roi_align(
348
- x_level,
349
- roi_fpn,
350
- order="NCHW",
351
- spatial_scale=float(pooler.spatial_scale),
352
- pooled_h=int(self.output_size[0]),
353
- pooled_w=int(self.output_size[1]),
354
- sampling_ratio=int(pooler.sampling_ratio),
355
- aligned=aligned,
356
- )
357
- roi_feat_fpn_list.append(roi_feat_fpn)
358
-
359
- roi_feat_shuffled = cat(roi_feat_fpn_list, dim=0)
360
- assert roi_feat_shuffled.numel() > 0 and rois_idx_restore_int32.numel() > 0, (
361
- "Caffe2 export requires tracing with a model checkpoint + input that can produce valid"
362
- " detections. But no detections were obtained with the given checkpoint and input!"
363
- )
364
- roi_feat = torch.ops._caffe2.BatchPermutation(roi_feat_shuffled, rois_idx_restore_int32)
365
- return roi_feat
366
-
367
-
368
- class Caffe2FastRCNNOutputsInference:
369
- def __init__(self, tensor_mode):
370
- self.tensor_mode = tensor_mode # whether the output is caffe2 tensor mode
371
-
372
- def __call__(self, box_predictor, predictions, proposals):
373
- """equivalent to FastRCNNOutputLayers.inference"""
374
- num_classes = box_predictor.num_classes
375
- score_thresh = box_predictor.test_score_thresh
376
- nms_thresh = box_predictor.test_nms_thresh
377
- topk_per_image = box_predictor.test_topk_per_image
378
- is_rotated = len(box_predictor.box2box_transform.weights) == 5
379
-
380
- if is_rotated:
381
- box_dim = 5
382
- assert box_predictor.box2box_transform.weights[4] == 1, (
383
- "The weights for Rotated BBoxTransform in C2 have only 4 dimensions,"
384
- + " thus enforcing the angle weight to be 1 for now"
385
- )
386
- box2box_transform_weights = box_predictor.box2box_transform.weights[:4]
387
- else:
388
- box_dim = 4
389
- box2box_transform_weights = box_predictor.box2box_transform.weights
390
-
391
- class_logits, box_regression = predictions
392
- if num_classes + 1 == class_logits.shape[1]:
393
- class_prob = F.softmax(class_logits, -1)
394
- else:
395
- assert num_classes == class_logits.shape[1]
396
- class_prob = F.sigmoid(class_logits)
397
- # BoxWithNMSLimit will infer num_classes from the shape of the class_prob
398
- # So append a zero column as placeholder for the background class
399
- class_prob = torch.cat((class_prob, torch.zeros(class_prob.shape[0], 1)), dim=1)
400
-
401
- assert box_regression.shape[1] % box_dim == 0
402
- cls_agnostic_bbox_reg = box_regression.shape[1] // box_dim == 1
403
-
404
- input_tensor_mode = proposals[0].proposal_boxes.tensor.shape[1] == box_dim + 1
405
-
406
- rois = type(proposals[0].proposal_boxes).cat([p.proposal_boxes for p in proposals])
407
- device, dtype = rois.tensor.device, rois.tensor.dtype
408
- if input_tensor_mode:
409
- im_info = proposals[0].image_size
410
- rois = rois.tensor
411
- else:
412
- im_info = torch.tensor(
413
- [[sz[0], sz[1], 1.0] for sz in [x.image_size for x in proposals]]
414
- )
415
- batch_ids = cat(
416
- [
417
- torch.full((b, 1), i, dtype=dtype, device=device)
418
- for i, b in enumerate(len(p) for p in proposals)
419
- ],
420
- dim=0,
421
- )
422
- rois = torch.cat([batch_ids, rois.tensor], dim=1)
423
-
424
- roi_pred_bbox, roi_batch_splits = torch.ops._caffe2.BBoxTransform(
425
- to_device(rois, "cpu"),
426
- to_device(box_regression, "cpu"),
427
- to_device(im_info, "cpu"),
428
- weights=box2box_transform_weights,
429
- apply_scale=True,
430
- rotated=is_rotated,
431
- angle_bound_on=True,
432
- angle_bound_lo=-180,
433
- angle_bound_hi=180,
434
- clip_angle_thresh=1.0,
435
- legacy_plus_one=False,
436
- )
437
- roi_pred_bbox = to_device(roi_pred_bbox, device)
438
- roi_batch_splits = to_device(roi_batch_splits, device)
439
-
440
- nms_outputs = torch.ops._caffe2.BoxWithNMSLimit(
441
- to_device(class_prob, "cpu"),
442
- to_device(roi_pred_bbox, "cpu"),
443
- to_device(roi_batch_splits, "cpu"),
444
- score_thresh=float(score_thresh),
445
- nms=float(nms_thresh),
446
- detections_per_im=int(topk_per_image),
447
- soft_nms_enabled=False,
448
- soft_nms_method="linear",
449
- soft_nms_sigma=0.5,
450
- soft_nms_min_score_thres=0.001,
451
- rotated=is_rotated,
452
- cls_agnostic_bbox_reg=cls_agnostic_bbox_reg,
453
- input_boxes_include_bg_cls=False,
454
- output_classes_include_bg_cls=False,
455
- legacy_plus_one=False,
456
- )
457
- roi_score_nms = to_device(nms_outputs[0], device)
458
- roi_bbox_nms = to_device(nms_outputs[1], device)
459
- roi_class_nms = to_device(nms_outputs[2], device)
460
- roi_batch_splits_nms = to_device(nms_outputs[3], device)
461
- roi_keeps_nms = to_device(nms_outputs[4], device)
462
- roi_keeps_size_nms = to_device(nms_outputs[5], device)
463
- if not self.tensor_mode:
464
- roi_class_nms = roi_class_nms.to(torch.int64)
465
-
466
- roi_batch_ids = cat(
467
- [
468
- torch.full((b, 1), i, dtype=dtype, device=device)
469
- for i, b in enumerate(int(x.item()) for x in roi_batch_splits_nms)
470
- ],
471
- dim=0,
472
- )
473
-
474
- roi_class_nms = alias(roi_class_nms, "class_nms")
475
- roi_score_nms = alias(roi_score_nms, "score_nms")
476
- roi_bbox_nms = alias(roi_bbox_nms, "bbox_nms")
477
- roi_batch_splits_nms = alias(roi_batch_splits_nms, "batch_splits_nms")
478
- roi_keeps_nms = alias(roi_keeps_nms, "keeps_nms")
479
- roi_keeps_size_nms = alias(roi_keeps_size_nms, "keeps_size_nms")
480
-
481
- results = InstancesList(
482
- im_info=im_info,
483
- indices=roi_batch_ids[:, 0],
484
- extra_fields={
485
- "pred_boxes": Caffe2Boxes(roi_bbox_nms),
486
- "scores": roi_score_nms,
487
- "pred_classes": roi_class_nms,
488
- },
489
- )
490
-
491
- if not self.tensor_mode:
492
- results = InstancesList.to_d2_instances_list(results)
493
- batch_splits = roi_batch_splits_nms.int().tolist()
494
- kept_indices = list(roi_keeps_nms.to(torch.int64).split(batch_splits))
495
- else:
496
- results = [results]
497
- kept_indices = [roi_keeps_nms]
498
-
499
- return results, kept_indices
500
-
501
-
502
- class Caffe2MaskRCNNInference:
503
- def __call__(self, pred_mask_logits, pred_instances):
504
- """equivalent to mask_head.mask_rcnn_inference"""
505
- if all(isinstance(x, InstancesList) for x in pred_instances):
506
- assert len(pred_instances) == 1
507
- mask_probs_pred = pred_mask_logits.sigmoid()
508
- mask_probs_pred = alias(mask_probs_pred, "mask_fcn_probs")
509
- pred_instances[0].pred_masks = mask_probs_pred
510
- else:
511
- mask_rcnn_inference(pred_mask_logits, pred_instances)
512
-
513
-
514
- class Caffe2KeypointRCNNInference:
515
- def __init__(self, use_heatmap_max_keypoint):
516
- self.use_heatmap_max_keypoint = use_heatmap_max_keypoint
517
-
518
- def __call__(self, pred_keypoint_logits, pred_instances):
519
- # just return the keypoint heatmap for now,
520
- # there will be option to call HeatmapMaxKeypointOp
521
- output = alias(pred_keypoint_logits, "kps_score")
522
- if all(isinstance(x, InstancesList) for x in pred_instances):
523
- assert len(pred_instances) == 1
524
- if self.use_heatmap_max_keypoint:
525
- device = output.device
526
- output = torch.ops._caffe2.HeatmapMaxKeypoint(
527
- to_device(output, "cpu"),
528
- pred_instances[0].pred_boxes.tensor,
529
- should_output_softmax=True, # worth make it configerable?
530
- )
531
- output = to_device(output, device)
532
- output = alias(output, "keypoints_out")
533
- pred_instances[0].pred_keypoints = output
534
- return pred_keypoint_logits
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BAAI/AltDiffusion/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: AltDiffusion
3
- emoji: ❤️
4
- colorFrom: purple
5
- colorTo: gray
6
- sdk: gradio
7
- sdk_version: 3.10.1
8
- app_file: app.py
9
- pinned: false
10
- license: creativeml-openrail-m
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/20 Minutos Hasta El Amanecer Descarga Gratuita.md DELETED
@@ -1,61 +0,0 @@
1
- <br />
2
- <h1>20 minutos hasta el amanecer: un juego de supervivencia Roguelite revisión</h1>
3
- <p>Si usted está buscando un ritmo rápido, lleno de acción, y desafiante juego que pondrá a prueba sus habilidades y reflejos, entonces es posible que desee echa un vistazo 20 Minutes Till Dawn. Este es un juego de supervivencia roguelite donde tienes que luchar contra hordas interminables de monstruos Lovecraftian y sobrevivir a la noche. En este artículo, revisaremos las características del juego, la jugabilidad, los gráficos, el sonido, los pros, los contras y más. </p>
4
- <h2>Introducción</h2>
5
- <p>20 Minutes Till Dawn es un videojuego roguelike shoot 'em up desarrollado y publicado por flanne. El juego fue lanzado en acceso temprano en Steam el 8 de junio de 2022, y fue portado a Android e iOS por Erabit Studios el 9 de septiembre, 2022. El juego salió de Steam con la versión 1.0 el 8 de junio de 2023.</p>
6
- <h2>20 minutos hasta el amanecer descarga gratuita</h2><br /><p><b><b>Download</b> --->>> <a href="https://bltlly.com/2v6MGV">https://bltlly.com/2v6MGV</a></b></p><br /><br />
7
- <p>El juego pertenece al género de la supervivencia roguelite, lo que significa que cuenta con permadeath, aleatorización y progresión a través de carreras. El objetivo del juego es sobrevivir durante 20 minutos hasta el amanecer, mientras se enfrenta a un ataque de monstruos que se vuelven más fuertes y más numerosos a medida que pasa el tiempo. El juego está inspirado en Vampire Survivors, pero con opciones de combate y personalización más activas. </p>
8
- <p>El juego está disponible en Steam por $4.99, así como en Google Play, App Store y TapTap gratis. El juego ha recibido críticas muy positivas de jugadores y críticos por igual, con más de 20.000 comentarios en Steam y más de 6 millones de descargas en plataformas móviles. El juego también ha sido presentado por IGN, TheGamer, Level Winner y otros medios de comunicación. </p>
9
- <h2>Juego</h2>
10
- <p>El modo de juego de 20 Minutes Till Dawn es simple pero desafiante. Usted controla a un personaje que puede moverse con las teclas WASD o un joystick virtual, apuntar con el ratón o la pantalla táctil, y disparar con clic izquierdo o toque. También puedes usar el botón derecho o doble toque para activar tu habilidad especial, que varía dependiendo de tu personaje. </p>
11
-
12
- <p>A medida que matas monstruos, ganas puntos de experiencia que te permiten subir de nivel. Cada vez que subes de nivel, puedes elegir una de las cuatro mejoras generadas al azar que mejoran tus estadísticas o habilidades. Estas mejoras pueden ir desde aumentar tu daño o salud, hasta agregar efectos como fuego, veneno o aturdimiento a tus ataques, hasta desbloquear nuevas habilidades como guion, escudo o invocación. Las actualizaciones son permanentes para la ejecución actual, pero se pierden cuando mueres o reinicias. </p>
13
- <p>Para sobrevivir a la noche, tienes que seguir moviéndote y disparando, evitando los ataques de los enemigos y los peligros ambientales. Los enemigos vienen en diferentes formas y tamaños, cada uno con su propio comportamiento y patrón de ataque. Algunos de ellos son rápidos y ágiles, algunos son lentos y sucios, algunos son a distancia y explosivos, y algunos son sigilosos y mortales. También encontrarás jefes cada pocos minutos, que son mucho más fuertes y más duros que los enemigos normales. Los jefes tienen habilidades y debilidades únicas que tienes que explotar para derrotarlos. </p>
14
- <p>El juego tiene cuatro modos de juego diferentes: Normal, Hardcore, Endless y Custom. El modo normal es el modo predeterminado, donde tienes que sobrevivir durante 20 minutos con tres vidas. El modo Hardcore es similar al modo Normal, pero solo tienes una vida y los enemigos son más agresivos. El modo sin fin es donde puedes jugar todo el tiempo que quieras, pero los enemigos se vuelven más difíciles y más frecuentes a medida que pasa el tiempo. El modo personalizado es donde puedes crear tus propias reglas y ajustes para el juego, como cambiar el límite de tiempo, la tasa de aparición de enemigos, el nivel de dificultad y más. </p>
15
- <h2>Gráficos y sonido</h2>
16
-
17
- <p>El sonido de 20 Minutes Till Dawn es envolvente y cautivador, con una banda sonora que coincide con el estado de ánimo y la intensidad del juego. El juego tiene una música estilo synthwave que es pegadiza y energética, con diferentes pistas para cada entorno y jefe. El juego también tiene efectos de sonido que son realistas y satisfactorios, como el sonido de disparos, explosiones, gritos, pasos y más. El juego no tiene voz ni diálogo, pero tiene mensajes de texto que aparecen en la pantalla para darte pistas o advertencias. </p>
18
- <p></p>
19
- <p>El juego funciona bien en la mayoría de los dispositivos y plataformas, con un juego suave y un retraso mínimo o problemas técnicos. El juego tiene bajos requisitos del sistema para los usuarios de PC, así como opciones para ajustar la calidad de los gráficos y la resolución para los usuarios móviles. El juego también es compatible con el ahorro de la nube , soporte de controlador, tablas de clasificación , logros , y cooperativo multijugador . </p>
20
- <h2>Pros y contras</h2>
21
- <p>20 Minutes Till Dawn es un juego divertido y adictivo que te mantendrá entretenido durante horas. Sin embargo, como cualquier otro juego, también tiene sus pros y sus contras. Aquí están algunos de ellos:</p>
22
- <tabla>
23
- <tr><th>Pros</th><th>Contras</th></tr>
24
- <tr><td>- Juego rápido y desafiante que requiere habilidad y estrategia</td><td>- Permadeath puede ser frustrante y desalentador para algunos jugadores</td></tr>
25
- <tr><td>- Variedad de personajes, armas, mejoras, enemigos, jefes, entornos y modos de juego que ofrecen valor de reproducción</td><td>- La aleatorización puede ser injusta o desequilibrada a veces</td></tr>
26
- <tr><td>- Gráficos de estilo retro que son coloridos y atmosféricos</td><td>- Los gráficos pixelados pueden no atraer a todos</td></tr>
27
- <tr><td>- Música estilo synthwave que es pegadiza y energética</td><td>- La música puede ser repetitiva o molesta después de un rato</td></tr>
28
- <tr><td>- Bajos requisitos del sistema y compatibilidad multiplataforma</td><td>- Algunos errores o fallos ocasionales pueden ocurrir</td></tr>
29
- </tabla>
30
- <h2>Conclusión</h2>
31
-
32
- <p>Si estás interesado en jugar 20 Minutes Till Dawn, puedes encontrar más información o descargar el juego desde los siguientes enlaces:</p>
33
- <ul>
34
- <li>Vapor: [20 minutos hasta el amanecer en el vapor]</li>
35
- <li>Google Play: [20 minutos hasta el amanecer - Aplicaciones en Google Play]</li>
36
- <li>App Store: [ 20 minutos hasta el amanecer en la App Store]</li>
37
- <li>TapTap: [20 minutos hasta el amanecer - TapTap]</li>
38
- </ul>
39
- <p>También puede ver algunos videos de juego o leer algunos comentarios de las siguientes fuentes:</p>
40
- <ul>
41
- <li>IGN: [20 minutos hasta el amanecer Revisión - IGN]</li>
42
- <li>TheGamer: [20 minutos hasta el amanecer Revisión: Una Roguelite que te mantiene en sus dedos de los pies]</li>
43
- <li>Nivel ganador: [20 minutos hasta el amanecer Guía para principiantes: Consejos, trucos y estrategias para sobrevivir la noche]</li>
44
- </ul>
45
- <h2>Preguntas frecuentes</h2>
46
- <p>Aquí están algunas de las preguntas más frecuentes sobre 20 minutos hasta el amanecer:</p>
47
- <ol>
48
- <li><b>¿Cómo puedo desbloquear más personajes y armas? </b></li>
49
- <p>Puedes desbloquear más personajes y armas gastando gemas, que se ganan matando monstruos o completando logros. También puedes encontrar algunas armas como botín gotas de enemigos o cofres. </p>
50
- <li><b>¿Cómo puedo guardar mi progreso? </b></li>
51
- <p>Puede guardar su progreso utilizando la función de almacenamiento en la nube, que está disponible en todas las plataformas. También puede utilizar la función de ahorro local, que está disponible en PC y plataformas móviles. Sin embargo, tenga en cuenta que su progreso solo se guarda entre ejecuciones, no durante las ejecuciones. Si muere o reinicia, perderá sus actualizaciones y elementos actuales. </p>
52
- <li><b>¿Cómo puedo jugar con mis amigos? </b></li>
53
- <p>Puedes jugar con tus amigos usando la función multijugador co-op, que está disponible en todas las plataformas. Puedes unirte o alojar un juego con hasta cuatro jugadores en línea o localmente. También puedes chatear con tus amigos usando la función de chat de voz o texto. </p>
54
- <li><b>¿Cómo cambio la configuración del juego? </b></li>
55
-
56
- <li><b>¿Cómo puedo contactar a los desarrolladores o reportar un error? </b></li>
57
- <p>Puede ponerse en contacto con los desarrolladores o informar de un error mediante la función de retroalimentación, que está disponible en todas las plataformas. También puede visitar el sitio web oficial, el servidor de discordia, la página de Twitter o la página de Facebook del juego. </p>
58
- </ol>
59
- <p>Espero que hayas disfrutado de este artículo y te haya resultado útil. Si tienes alguna pregunta o comentario, puedes dejarlos abajo. Gracias por leer y tener un gran día! </p> 64aa2da5cf<br />
60
- <br />
61
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Billyosoro/ESRGAN/realesrgan/__init__.py DELETED
@@ -1,6 +0,0 @@
1
- # flake8: noqa
2
- from .archs import *
3
- from .data import *
4
- from .models import *
5
- from .utils import *
6
- #from .version import *
 
 
 
 
 
 
 
spaces/Bradjan310/ehartford-Wizard-Vicuna-30B-Uncensored/app.py DELETED
@@ -1,3 +0,0 @@
1
- import gradio as gr
2
-
3
- gr.Interface.load("models/ehartford/Wizard-Vicuna-30B-Uncensored").launch()
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/.github/ISSUE_TEMPLATE/questions-help-support.md DELETED
@@ -1,21 +0,0 @@
1
- ---
2
- name: "❓How to do something?"
3
- about: How to do X with detectron2? How detectron2 does X?
4
-
5
- ---
6
-
7
- ## ❓ How to use Detectron2
8
-
9
- Questions like:
10
-
11
- 1. How to do X with detectron2?
12
- 2. How detectron2 does X?
13
-
14
- NOTE:
15
-
16
- 1. If you met any unexpected issue when using detectron2 and wish to know why,
17
- please use the "Unexpected Problems / Bugs" issue template.
18
-
19
- 2. We do not answer general machine learning / computer vision questions that are not specific to
20
- detectron2, such as how a model works, how to improve your training/make it converge, or what algorithm/methods can be
21
- used to achieve X.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/detail/temporary_buffer.h DELETED
@@ -1,76 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
- #include <thrust/detail/execution_policy.h>
21
- #include <thrust/pair.h>
22
- #include <thrust/detail/pointer.h>
23
- #include <thrust/detail/raw_pointer_cast.h>
24
- #include <thrust/detail/execute_with_allocator.h>
25
- #include <thrust/system/detail/generic/temporary_buffer.h>
26
- #include <thrust/system/detail/adl/temporary_buffer.h>
27
-
28
- namespace thrust
29
- {
30
- namespace detail
31
- {
32
-
33
-
34
- template<typename T, typename DerivedPolicy, typename Pair>
35
- __host__ __device__
36
- thrust::pair<thrust::pointer<T,DerivedPolicy>, typename thrust::pointer<T,DerivedPolicy>::difference_type>
37
- down_cast_pair(Pair p)
38
- {
39
- // XXX should use a hypothetical thrust::static_pointer_cast here
40
- thrust::pointer<T,DerivedPolicy> ptr = thrust::pointer<T,DerivedPolicy>(static_cast<T*>(thrust::raw_pointer_cast(p.first)));
41
-
42
- typedef thrust::pair<thrust::pointer<T,DerivedPolicy>, typename thrust::pointer<T,DerivedPolicy>::difference_type> result_type;
43
- return result_type(ptr, p.second);
44
- } // end down_cast_pair()
45
-
46
-
47
- } // end detail
48
-
49
-
50
- __thrust_exec_check_disable__
51
- template<typename T, typename DerivedPolicy>
52
- __host__ __device__
53
- thrust::pair<thrust::pointer<T,DerivedPolicy>, typename thrust::pointer<T,DerivedPolicy>::difference_type>
54
- get_temporary_buffer(const thrust::detail::execution_policy_base<DerivedPolicy> &exec, typename thrust::pointer<T,DerivedPolicy>::difference_type n)
55
- {
56
- using thrust::detail::get_temporary_buffer; // execute_with_allocator
57
- using thrust::system::detail::generic::get_temporary_buffer;
58
-
59
- return thrust::detail::down_cast_pair<T,DerivedPolicy>(get_temporary_buffer<T>(thrust::detail::derived_cast(thrust::detail::strip_const(exec)), n));
60
- } // end get_temporary_buffer()
61
-
62
-
63
- __thrust_exec_check_disable__
64
- template<typename DerivedPolicy, typename Pointer>
65
- __host__ __device__
66
- void return_temporary_buffer(const thrust::detail::execution_policy_base<DerivedPolicy> &exec, Pointer p, std::ptrdiff_t n)
67
- {
68
- using thrust::detail::return_temporary_buffer; // execute_with_allocator
69
- using thrust::system::detail::generic::return_temporary_buffer;
70
-
71
- return return_temporary_buffer(thrust::detail::derived_cast(thrust::detail::strip_const(exec)), p, n);
72
- } // end return_temporary_buffer()
73
-
74
-
75
- } // end thrust
76
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/generate.h DELETED
@@ -1,57 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
-
18
- #pragma once
19
-
20
- #include <thrust/detail/config.h>
21
- #include <thrust/system/detail/generic/tag.h>
22
-
23
- namespace thrust
24
- {
25
- namespace system
26
- {
27
- namespace detail
28
- {
29
- namespace generic
30
- {
31
-
32
- template<typename ExecutionPolicy,
33
- typename ForwardIterator,
34
- typename Generator>
35
- __host__ __device__
36
- void generate(thrust::execution_policy<ExecutionPolicy> &exec,
37
- ForwardIterator first,
38
- ForwardIterator last,
39
- Generator gen);
40
-
41
- template<typename ExecutionPolicy,
42
- typename OutputIterator,
43
- typename Size,
44
- typename Generator>
45
- __host__ __device__
46
- OutputIterator generate_n(thrust::execution_policy<ExecutionPolicy> &exec,
47
- OutputIterator first,
48
- Size n,
49
- Generator gen);
50
-
51
- } // end namespace generic
52
- } // end namespace detail
53
- } // end namespace system
54
- } // end namespace thrust
55
-
56
- #include <thrust/system/detail/generic/generate.inl>
57
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/scatter.h DELETED
@@ -1,81 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
-
18
- #pragma once
19
-
20
- #include <thrust/detail/config.h>
21
- #include <thrust/system/detail/generic/tag.h>
22
-
23
- namespace thrust
24
- {
25
- namespace system
26
- {
27
- namespace detail
28
- {
29
- namespace generic
30
- {
31
-
32
-
33
- template<typename DerivedPolicy,
34
- typename InputIterator1,
35
- typename InputIterator2,
36
- typename RandomAccessIterator>
37
- __host__ __device__
38
- void scatter(thrust::execution_policy<DerivedPolicy> &exec,
39
- InputIterator1 first,
40
- InputIterator1 last,
41
- InputIterator2 map,
42
- RandomAccessIterator output);
43
-
44
-
45
- template<typename DerivedPolicy,
46
- typename InputIterator1,
47
- typename InputIterator2,
48
- typename InputIterator3,
49
- typename RandomAccessIterator>
50
- __host__ __device__
51
- void scatter_if(thrust::execution_policy<DerivedPolicy> &exec,
52
- InputIterator1 first,
53
- InputIterator1 last,
54
- InputIterator2 map,
55
- InputIterator3 stencil,
56
- RandomAccessIterator output);
57
-
58
-
59
- template<typename DerivedPolicy,
60
- typename InputIterator1,
61
- typename InputIterator2,
62
- typename InputIterator3,
63
- typename RandomAccessIterator,
64
- typename Predicate>
65
- __host__ __device__
66
- void scatter_if(thrust::execution_policy<DerivedPolicy> &exec,
67
- InputIterator1 first,
68
- InputIterator1 last,
69
- InputIterator2 map,
70
- InputIterator3 stencil,
71
- RandomAccessIterator output,
72
- Predicate pred);
73
-
74
-
75
- } // end namespace generic
76
- } // end namespace detail
77
- } // end namespace system
78
- } // end namespace thrust
79
-
80
- #include <thrust/system/detail/generic/scatter.inl>
81
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/walt/datasets/pipelines/transforms.py DELETED
@@ -1,1861 +0,0 @@
1
- import copy
2
- import inspect
3
-
4
- import mmcv
5
- import numpy as np
6
- from numpy import random
7
-
8
- from mmdet.core import PolygonMasks
9
- from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps
10
- from ..builder import PIPELINES
11
-
12
- try:
13
- from imagecorruptions import corrupt
14
- except ImportError:
15
- corrupt = None
16
-
17
- try:
18
- import albumentations
19
- from albumentations import Compose
20
- except ImportError:
21
- albumentations = None
22
- Compose = None
23
-
24
-
25
- @PIPELINES.register_module()
26
- class Resize(object):
27
- """Resize images & bbox & mask.
28
-
29
- This transform resizes the input image to some scale. Bboxes and masks are
30
- then resized with the same scale factor. If the input dict contains the key
31
- "scale", then the scale in the input dict is used, otherwise the specified
32
- scale in the init method is used. If the input dict contains the key
33
- "scale_factor" (if MultiScaleFlipAug does not give img_scale but
34
- scale_factor), the actual scale will be computed by image shape and
35
- scale_factor.
36
-
37
- `img_scale` can either be a tuple (single-scale) or a list of tuple
38
- (multi-scale). There are 3 multiscale modes:
39
-
40
- - ``ratio_range is not None``: randomly sample a ratio from the ratio \
41
- range and multiply it with the image scale.
42
- - ``ratio_range is None`` and ``multiscale_mode == "range"``: randomly \
43
- sample a scale from the multiscale range.
44
- - ``ratio_range is None`` and ``multiscale_mode == "value"``: randomly \
45
- sample a scale from multiple scales.
46
-
47
- Args:
48
- img_scale (tuple or list[tuple]): Images scales for resizing.
49
- multiscale_mode (str): Either "range" or "value".
50
- ratio_range (tuple[float]): (min_ratio, max_ratio)
51
- keep_ratio (bool): Whether to keep the aspect ratio when resizing the
52
- image.
53
- bbox_clip_border (bool, optional): Whether clip the objects outside
54
- the border of the image. Defaults to True.
55
- backend (str): Image resize backend, choices are 'cv2' and 'pillow'.
56
- These two backends generates slightly different results. Defaults
57
- to 'cv2'.
58
- override (bool, optional): Whether to override `scale` and
59
- `scale_factor` so as to call resize twice. Default False. If True,
60
- after the first resizing, the existed `scale` and `scale_factor`
61
- will be ignored so the second resizing can be allowed.
62
- This option is a work-around for multiple times of resize in DETR.
63
- Defaults to False.
64
- """
65
-
66
- def __init__(self,
67
- img_scale=None,
68
- multiscale_mode='range',
69
- ratio_range=None,
70
- keep_ratio=True,
71
- bbox_clip_border=True,
72
- backend='cv2',
73
- override=False):
74
- if img_scale is None:
75
- self.img_scale = None
76
- else:
77
- if isinstance(img_scale, list):
78
- self.img_scale = img_scale
79
- else:
80
- self.img_scale = [img_scale]
81
- assert mmcv.is_list_of(self.img_scale, tuple)
82
-
83
- if ratio_range is not None:
84
- # mode 1: given a scale and a range of image ratio
85
- assert len(self.img_scale) == 1
86
- else:
87
- # mode 2: given multiple scales or a range of scales
88
- assert multiscale_mode in ['value', 'range']
89
-
90
- self.backend = backend
91
- self.multiscale_mode = multiscale_mode
92
- self.ratio_range = ratio_range
93
- self.keep_ratio = keep_ratio
94
- # TODO: refactor the override option in Resize
95
- self.override = override
96
- self.bbox_clip_border = bbox_clip_border
97
-
98
- @staticmethod
99
- def random_select(img_scales):
100
- """Randomly select an img_scale from given candidates.
101
-
102
- Args:
103
- img_scales (list[tuple]): Images scales for selection.
104
-
105
- Returns:
106
- (tuple, int): Returns a tuple ``(img_scale, scale_dix)``, \
107
- where ``img_scale`` is the selected image scale and \
108
- ``scale_idx`` is the selected index in the given candidates.
109
- """
110
-
111
- assert mmcv.is_list_of(img_scales, tuple)
112
- scale_idx = np.random.randint(len(img_scales))
113
- img_scale = img_scales[scale_idx]
114
- return img_scale, scale_idx
115
-
116
- @staticmethod
117
- def random_sample(img_scales):
118
- """Randomly sample an img_scale when ``multiscale_mode=='range'``.
119
-
120
- Args:
121
- img_scales (list[tuple]): Images scale range for sampling.
122
- There must be two tuples in img_scales, which specify the lower
123
- and upper bound of image scales.
124
-
125
- Returns:
126
- (tuple, None): Returns a tuple ``(img_scale, None)``, where \
127
- ``img_scale`` is sampled scale and None is just a placeholder \
128
- to be consistent with :func:`random_select`.
129
- """
130
-
131
- assert mmcv.is_list_of(img_scales, tuple) and len(img_scales) == 2
132
- img_scale_long = [max(s) for s in img_scales]
133
- img_scale_short = [min(s) for s in img_scales]
134
- long_edge = np.random.randint(
135
- min(img_scale_long),
136
- max(img_scale_long) + 1)
137
- short_edge = np.random.randint(
138
- min(img_scale_short),
139
- max(img_scale_short) + 1)
140
- img_scale = (long_edge, short_edge)
141
- return img_scale, None
142
-
143
- @staticmethod
144
- def random_sample_ratio(img_scale, ratio_range):
145
- """Randomly sample an img_scale when ``ratio_range`` is specified.
146
-
147
- A ratio will be randomly sampled from the range specified by
148
- ``ratio_range``. Then it would be multiplied with ``img_scale`` to
149
- generate sampled scale.
150
-
151
- Args:
152
- img_scale (tuple): Images scale base to multiply with ratio.
153
- ratio_range (tuple[float]): The minimum and maximum ratio to scale
154
- the ``img_scale``.
155
-
156
- Returns:
157
- (tuple, None): Returns a tuple ``(scale, None)``, where \
158
- ``scale`` is sampled ratio multiplied with ``img_scale`` and \
159
- None is just a placeholder to be consistent with \
160
- :func:`random_select`.
161
- """
162
-
163
- assert isinstance(img_scale, tuple) and len(img_scale) == 2
164
- min_ratio, max_ratio = ratio_range
165
- assert min_ratio <= max_ratio
166
- ratio = np.random.random_sample() * (max_ratio - min_ratio) + min_ratio
167
- scale = int(img_scale[0] * ratio), int(img_scale[1] * ratio)
168
- return scale, None
169
-
170
- def _random_scale(self, results):
171
- """Randomly sample an img_scale according to ``ratio_range`` and
172
- ``multiscale_mode``.
173
-
174
- If ``ratio_range`` is specified, a ratio will be sampled and be
175
- multiplied with ``img_scale``.
176
- If multiple scales are specified by ``img_scale``, a scale will be
177
- sampled according to ``multiscale_mode``.
178
- Otherwise, single scale will be used.
179
-
180
- Args:
181
- results (dict): Result dict from :obj:`dataset`.
182
-
183
- Returns:
184
- dict: Two new keys 'scale` and 'scale_idx` are added into \
185
- ``results``, which would be used by subsequent pipelines.
186
- """
187
-
188
- if self.ratio_range is not None:
189
- scale, scale_idx = self.random_sample_ratio(
190
- self.img_scale[0], self.ratio_range)
191
- elif len(self.img_scale) == 1:
192
- scale, scale_idx = self.img_scale[0], 0
193
- elif self.multiscale_mode == 'range':
194
- scale, scale_idx = self.random_sample(self.img_scale)
195
- elif self.multiscale_mode == 'value':
196
- scale, scale_idx = self.random_select(self.img_scale)
197
- else:
198
- raise NotImplementedError
199
-
200
- results['scale'] = scale
201
- results['scale_idx'] = scale_idx
202
-
203
- def _resize_img(self, results):
204
- """Resize images with ``results['scale']``."""
205
- for key in results.get('img_fields', ['img']):
206
- if self.keep_ratio:
207
- img, scale_factor = mmcv.imrescale(
208
- results[key],
209
- results['scale'],
210
- return_scale=True,
211
- backend=self.backend)
212
- # the w_scale and h_scale has minor difference
213
- # a real fix should be done in the mmcv.imrescale in the future
214
- new_h, new_w = img.shape[:2]
215
- h, w = results[key].shape[:2]
216
- w_scale = new_w / w
217
- h_scale = new_h / h
218
- else:
219
- img, w_scale, h_scale = mmcv.imresize(
220
- results[key],
221
- results['scale'],
222
- return_scale=True,
223
- backend=self.backend)
224
- results[key] = img
225
-
226
- scale_factor = np.array([w_scale, h_scale, w_scale, h_scale],
227
- dtype=np.float32)
228
- results['img_shape'] = img.shape
229
- # in case that there is no padding
230
- results['pad_shape'] = img.shape
231
- results['scale_factor'] = scale_factor
232
- results['keep_ratio'] = self.keep_ratio
233
-
234
- def _resize_bboxes(self, results):
235
- """Resize bounding boxes with ``results['scale_factor']``."""
236
- for key in results.get('bbox_fields', []):
237
- bboxes = results[key] * results['scale_factor']
238
- if self.bbox_clip_border:
239
- img_shape = results['img_shape']
240
- bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])
241
- bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])
242
- results[key] = bboxes
243
-
244
- def _resize_bboxes3d(self, results):
245
- """Resize bounding boxes with ``results['scale_factor']``."""
246
- key = 'gt_bboxes_3d_proj'
247
- bboxes3d_proj = results[key][:,:,:2]
248
- img_shape = results['img_shape']
249
- for i in range(results[key].shape[1]):
250
- bboxes3d_proj[:,i,:] = bboxes3d_proj[:,i,:] * results['scale_factor'][:2]
251
- if self.bbox_clip_border:
252
- bboxes3d_proj[:, i, 0] = np.clip(bboxes3d_proj[:, i, 0], 0, img_shape[1])
253
- bboxes3d_proj[:, i, 1] = np.clip(bboxes3d_proj[:, i, 1], 0, img_shape[1])
254
- results[key] = bboxes3d_proj
255
-
256
- def _resize_masks(self, results):
257
- """Resize masks with ``results['scale']``"""
258
- for key in results.get('mask_fields', []):
259
- if results[key] is None:
260
- continue
261
- if self.keep_ratio:
262
- results[key] = results[key].rescale(results['scale'])
263
- else:
264
- results[key] = results[key].resize(results['img_shape'][:2])
265
-
266
- def _resize_seg(self, results):
267
- """Resize semantic segmentation map with ``results['scale']``."""
268
- for key in results.get('seg_fields', []):
269
- if self.keep_ratio:
270
- gt_seg = mmcv.imrescale(
271
- results[key],
272
- results['scale'],
273
- interpolation='nearest',
274
- backend=self.backend)
275
- else:
276
- gt_seg = mmcv.imresize(
277
- results[key],
278
- results['scale'],
279
- interpolation='nearest',
280
- backend=self.backend)
281
- results['gt_semantic_seg'] = gt_seg
282
-
283
- def __call__(self, results):
284
- """Call function to resize images, bounding boxes, masks, semantic
285
- segmentation map.
286
-
287
- Args:
288
- results (dict): Result dict from loading pipeline.
289
-
290
- Returns:
291
- dict: Resized results, 'img_shape', 'pad_shape', 'scale_factor', \
292
- 'keep_ratio' keys are added into result dict.
293
- """
294
-
295
- if 'scale' not in results:
296
- if 'scale_factor' in results:
297
- img_shape = results['img'].shape[:2]
298
- scale_factor = results['scale_factor']
299
- assert isinstance(scale_factor, float)
300
- results['scale'] = tuple(
301
- [int(x * scale_factor) for x in img_shape][::-1])
302
- else:
303
- self._random_scale(results)
304
- else:
305
- if not self.override:
306
- assert 'scale_factor' not in results, (
307
- 'scale and scale_factor cannot be both set.')
308
- else:
309
- results.pop('scale')
310
- if 'scale_factor' in results:
311
- results.pop('scale_factor')
312
- self._random_scale(results)
313
-
314
- self._resize_img(results)
315
- self._resize_bboxes(results)
316
- self._resize_bboxes3d(results)
317
- self._resize_masks(results)
318
- self._resize_seg(results)
319
- return results
320
-
321
- def __repr__(self):
322
- repr_str = self.__class__.__name__
323
- repr_str += f'(img_scale={self.img_scale}, '
324
- repr_str += f'multiscale_mode={self.multiscale_mode}, '
325
- repr_str += f'ratio_range={self.ratio_range}, '
326
- repr_str += f'keep_ratio={self.keep_ratio}, '
327
- repr_str += f'bbox_clip_border={self.bbox_clip_border})'
328
- return repr_str
329
-
330
-
331
- @PIPELINES.register_module()
332
- class RandomFlip(object):
333
- """Flip the image & bbox & mask.
334
-
335
- If the input dict contains the key "flip", then the flag will be used,
336
- otherwise it will be randomly decided by a ratio specified in the init
337
- method.
338
-
339
- When random flip is enabled, ``flip_ratio``/``direction`` can either be a
340
- float/string or tuple of float/string. There are 3 flip modes:
341
-
342
- - ``flip_ratio`` is float, ``direction`` is string: the image will be
343
- ``direction``ly flipped with probability of ``flip_ratio`` .
344
- E.g., ``flip_ratio=0.5``, ``direction='horizontal'``,
345
- then image will be horizontally flipped with probability of 0.5.
346
- - ``flip_ratio`` is float, ``direction`` is list of string: the image wil
347
- be ``direction[i]``ly flipped with probability of
348
- ``flip_ratio/len(direction)``.
349
- E.g., ``flip_ratio=0.5``, ``direction=['horizontal', 'vertical']``,
350
- then image will be horizontally flipped with probability of 0.25,
351
- vertically with probability of 0.25.
352
- - ``flip_ratio`` is list of float, ``direction`` is list of string:
353
- given ``len(flip_ratio) == len(direction)``, the image wil
354
- be ``direction[i]``ly flipped with probability of ``flip_ratio[i]``.
355
- E.g., ``flip_ratio=[0.3, 0.5]``, ``direction=['horizontal',
356
- 'vertical']``, then image will be horizontally flipped with probability
357
- of 0.3, vertically with probability of 0.5
358
-
359
- Args:
360
- flip_ratio (float | list[float], optional): The flipping probability.
361
- Default: None.
362
- direction(str | list[str], optional): The flipping direction. Options
363
- are 'horizontal', 'vertical', 'diagonal'. Default: 'horizontal'.
364
- If input is a list, the length must equal ``flip_ratio``. Each
365
- element in ``flip_ratio`` indicates the flip probability of
366
- corresponding direction.
367
- """
368
-
369
- def __init__(self, flip_ratio=None, direction='horizontal'):
370
- if isinstance(flip_ratio, list):
371
- assert mmcv.is_list_of(flip_ratio, float)
372
- assert 0 <= sum(flip_ratio) <= 1
373
- elif isinstance(flip_ratio, float):
374
- assert 0 <= flip_ratio <= 1
375
- elif flip_ratio is None:
376
- pass
377
- else:
378
- raise ValueError('flip_ratios must be None, float, '
379
- 'or list of float')
380
- self.flip_ratio = flip_ratio
381
-
382
- valid_directions = ['horizontal', 'vertical', 'diagonal']
383
- if isinstance(direction, str):
384
- assert direction in valid_directions
385
- elif isinstance(direction, list):
386
- assert mmcv.is_list_of(direction, str)
387
- assert set(direction).issubset(set(valid_directions))
388
- else:
389
- raise ValueError('direction must be either str or list of str')
390
- self.direction = direction
391
-
392
- if isinstance(flip_ratio, list):
393
- assert len(self.flip_ratio) == len(self.direction)
394
-
395
- def bbox_flip(self, bboxes, img_shape, direction):
396
- """Flip bboxes horizontally.
397
-
398
- Args:
399
- bboxes (numpy.ndarray): Bounding boxes, shape (..., 4*k)
400
- img_shape (tuple[int]): Image shape (height, width)
401
- direction (str): Flip direction. Options are 'horizontal',
402
- 'vertical'.
403
-
404
- Returns:
405
- numpy.ndarray: Flipped bounding boxes.
406
- """
407
-
408
- assert bboxes.shape[-1] % 4 == 0
409
- flipped = bboxes.copy()
410
- if direction == 'horizontal':
411
- w = img_shape[1]
412
- flipped[..., 0::4] = w - bboxes[..., 2::4]
413
- flipped[..., 2::4] = w - bboxes[..., 0::4]
414
- elif direction == 'vertical':
415
- h = img_shape[0]
416
- flipped[..., 1::4] = h - bboxes[..., 3::4]
417
- flipped[..., 3::4] = h - bboxes[..., 1::4]
418
- elif direction == 'diagonal':
419
- w = img_shape[1]
420
- h = img_shape[0]
421
- flipped[..., 0::4] = w - bboxes[..., 2::4]
422
- flipped[..., 1::4] = h - bboxes[..., 3::4]
423
- flipped[..., 2::4] = w - bboxes[..., 0::4]
424
- flipped[..., 3::4] = h - bboxes[..., 1::4]
425
- else:
426
- raise ValueError(f"Invalid flipping direction '{direction}'")
427
- return flipped
428
-
429
- def bbox3d_proj_flip(self, bboxes, img_shape, direction):
430
- """Flip bboxes horizontally.
431
-
432
- Args:
433
- bboxes (numpy.ndarray): Bounding boxes, shape (..., 4*k)
434
- img_shape (tuple[int]): Image shape (height, width)
435
- direction (str): Flip direction. Options are 'horizontal',
436
- 'vertical'.
437
-
438
- Returns:
439
- numpy.ndarray: Flipped bounding boxes.
440
- """
441
-
442
- flipped = bboxes.copy()
443
- if direction == 'horizontal':
444
- w = img_shape[1]
445
-
446
- flipped[:,:,0] = w - bboxes[:,:, 0]
447
- elif direction == 'vertical':
448
- h = img_shape[0]
449
- flipped[:,:,1] = h - bboxes[:,:, 1]
450
- elif direction == 'diagonal':
451
- w = img_shape[1]
452
- h = img_shape[0]
453
- flipped[:,:,0] = w - bboxes[:,:, 0]
454
- flipped[:,:,1] = h - bboxes[:,:, 1]
455
- else:
456
- raise ValueError(f"Invalid flipping direction '{direction}'")
457
- flipped[bboxes == -100] = -100
458
- return flipped
459
-
460
-
461
- def __call__(self, results):
462
- """Call function to flip bounding boxes, masks, semantic segmentation
463
- maps.
464
-
465
- Args:
466
- results (dict): Result dict from loading pipeline.
467
-
468
- Returns:
469
- dict: Flipped results, 'flip', 'flip_direction' keys are added \
470
- into result dict.
471
- """
472
-
473
- if 'flip' not in results:
474
- if isinstance(self.direction, list):
475
- # None means non-flip
476
- direction_list = self.direction + [None]
477
- else:
478
- # None means non-flip
479
- direction_list = [self.direction, None]
480
-
481
- if isinstance(self.flip_ratio, list):
482
- non_flip_ratio = 1 - sum(self.flip_ratio)
483
- flip_ratio_list = self.flip_ratio + [non_flip_ratio]
484
- else:
485
- non_flip_ratio = 1 - self.flip_ratio
486
- # exclude non-flip
487
- single_ratio = self.flip_ratio / (len(direction_list) - 1)
488
- flip_ratio_list = [single_ratio] * (len(direction_list) -
489
- 1) + [non_flip_ratio]
490
-
491
- cur_dir = np.random.choice(direction_list, p=flip_ratio_list)
492
-
493
- results['flip'] = cur_dir is not None
494
- if 'flip_direction' not in results:
495
- results['flip_direction'] = cur_dir
496
- if results['flip']:
497
- # flip image
498
- for key in results.get('img_fields', ['img']):
499
- results[key] = mmcv.imflip(
500
- results[key], direction=results['flip_direction'])
501
- # flip bboxes
502
- for key in results.get('bbox_fields', []):
503
- results[key] = self.bbox_flip(results[key],
504
- results['img_shape'],
505
- results['flip_direction'])
506
- for key in results.get('bbox3d_fields', []):
507
- if '_proj' in key:
508
- results[key] = self.bbox3d_proj_flip(results[key],
509
- results['img_shape'],
510
- results['flip_direction'])
511
- # flip masks
512
- for key in results.get('mask_fields', []):
513
- results[key] = results[key].flip(results['flip_direction'])
514
-
515
- # flip segs
516
- for key in results.get('seg_fields', []):
517
- results[key] = mmcv.imflip(
518
- results[key], direction=results['flip_direction'])
519
- return results
520
-
521
- def __repr__(self):
522
- return self.__class__.__name__ + f'(flip_ratio={self.flip_ratio})'
523
-
524
-
525
- @PIPELINES.register_module()
526
- class Pad(object):
527
- """Pad the image & mask.
528
-
529
- There are two padding modes: (1) pad to a fixed size and (2) pad to the
530
- minimum size that is divisible by some number.
531
- Added keys are "pad_shape", "pad_fixed_size", "pad_size_divisor",
532
-
533
- Args:
534
- size (tuple, optional): Fixed padding size.
535
- size_divisor (int, optional): The divisor of padded size.
536
- pad_val (float, optional): Padding value, 0 by default.
537
- """
538
-
539
- def __init__(self, size=None, size_divisor=None, pad_val=0):
540
- self.size = size
541
- self.size_divisor = size_divisor
542
- self.pad_val = pad_val
543
- # only one of size and size_divisor should be valid
544
- assert size is not None or size_divisor is not None
545
- assert size is None or size_divisor is None
546
-
547
- def _pad_img(self, results):
548
- """Pad images according to ``self.size``."""
549
- for key in results.get('img_fields', ['img']):
550
- if self.size is not None:
551
- padded_img = mmcv.impad(
552
- results[key], shape=self.size, pad_val=self.pad_val)
553
- elif self.size_divisor is not None:
554
- padded_img = mmcv.impad_to_multiple(
555
- results[key], self.size_divisor, pad_val=self.pad_val)
556
- results[key] = padded_img
557
- results['pad_shape'] = padded_img.shape
558
- results['pad_fixed_size'] = self.size
559
- results['pad_size_divisor'] = self.size_divisor
560
-
561
- def _pad_masks(self, results):
562
- """Pad masks according to ``results['pad_shape']``."""
563
- pad_shape = results['pad_shape'][:2]
564
- for key in results.get('mask_fields', []):
565
- results[key] = results[key].pad(pad_shape, pad_val=self.pad_val)
566
-
567
- def _pad_seg(self, results):
568
- """Pad semantic segmentation map according to
569
- ``results['pad_shape']``."""
570
- for key in results.get('seg_fields', []):
571
- results[key] = mmcv.impad(
572
- results[key], shape=results['pad_shape'][:2])
573
-
574
- def __call__(self, results):
575
- """Call function to pad images, masks, semantic segmentation maps.
576
-
577
- Args:
578
- results (dict): Result dict from loading pipeline.
579
-
580
- Returns:
581
- dict: Updated result dict.
582
- """
583
- self._pad_img(results)
584
- self._pad_masks(results)
585
- self._pad_seg(results)
586
- return results
587
-
588
- def __repr__(self):
589
- repr_str = self.__class__.__name__
590
- repr_str += f'(size={self.size}, '
591
- repr_str += f'size_divisor={self.size_divisor}, '
592
- repr_str += f'pad_val={self.pad_val})'
593
- return repr_str
594
-
595
-
596
- @PIPELINES.register_module()
597
- class Normalize(object):
598
- """Normalize the image.
599
-
600
- Added key is "img_norm_cfg".
601
-
602
- Args:
603
- mean (sequence): Mean values of 3 channels.
604
- std (sequence): Std values of 3 channels.
605
- to_rgb (bool): Whether to convert the image from BGR to RGB,
606
- default is true.
607
- """
608
-
609
- def __init__(self, mean, std, to_rgb=True):
610
- self.mean = np.array(mean, dtype=np.float32)
611
- self.std = np.array(std, dtype=np.float32)
612
- self.to_rgb = to_rgb
613
-
614
- def __call__(self, results):
615
- """Call function to normalize images.
616
-
617
- Args:
618
- results (dict): Result dict from loading pipeline.
619
-
620
- Returns:
621
- dict: Normalized results, 'img_norm_cfg' key is added into
622
- result dict.
623
- """
624
- for key in results.get('img_fields', ['img']):
625
- results[key] = mmcv.imnormalize(results[key], self.mean, self.std,
626
- self.to_rgb)
627
- results['img_norm_cfg'] = dict(
628
- mean=self.mean, std=self.std, to_rgb=self.to_rgb)
629
- return results
630
-
631
- def __repr__(self):
632
- repr_str = self.__class__.__name__
633
- repr_str += f'(mean={self.mean}, std={self.std}, to_rgb={self.to_rgb})'
634
- return repr_str
635
-
636
-
637
- @PIPELINES.register_module()
638
- class RandomCrop(object):
639
- """Random crop the image & bboxes & masks.
640
-
641
- The absolute `crop_size` is sampled based on `crop_type` and `image_size`,
642
- then the cropped results are generated.
643
-
644
- Args:
645
- crop_size (tuple): The relative ratio or absolute pixels of
646
- height and width.
647
- crop_type (str, optional): one of "relative_range", "relative",
648
- "absolute", "absolute_range". "relative" randomly crops
649
- (h * crop_size[0], w * crop_size[1]) part from an input of size
650
- (h, w). "relative_range" uniformly samples relative crop size from
651
- range [crop_size[0], 1] and [crop_size[1], 1] for height and width
652
- respectively. "absolute" crops from an input with absolute size
653
- (crop_size[0], crop_size[1]). "absolute_range" uniformly samples
654
- crop_h in range [crop_size[0], min(h, crop_size[1])] and crop_w
655
- in range [crop_size[0], min(w, crop_size[1])]. Default "absolute".
656
- allow_negative_crop (bool, optional): Whether to allow a crop that does
657
- not contain any bbox area. Default False.
658
- bbox_clip_border (bool, optional): Whether clip the objects outside
659
- the border of the image. Defaults to True.
660
-
661
- Note:
662
- - If the image is smaller than the absolute crop size, return the
663
- original image.
664
- - The keys for bboxes, labels and masks must be aligned. That is,
665
- `gt_bboxes` corresponds to `gt_labels` and `gt_masks`, and
666
- `gt_bboxes_ignore` corresponds to `gt_labels_ignore` and
667
- `gt_masks_ignore`.
668
- - If the crop does not contain any gt-bbox region and
669
- `allow_negative_crop` is set to False, skip this image.
670
- """
671
-
672
- def __init__(self,
673
- crop_size,
674
- crop_type='absolute',
675
- allow_negative_crop=False,
676
- bbox_clip_border=True):
677
- if crop_type not in [
678
- 'relative_range', 'relative', 'absolute', 'absolute_range'
679
- ]:
680
- raise ValueError(f'Invalid crop_type {crop_type}.')
681
- if crop_type in ['absolute', 'absolute_range']:
682
- assert crop_size[0] > 0 and crop_size[1] > 0
683
- assert isinstance(crop_size[0], int) and isinstance(
684
- crop_size[1], int)
685
- else:
686
- assert 0 < crop_size[0] <= 1 and 0 < crop_size[1] <= 1
687
- self.crop_size = crop_size
688
- self.crop_type = crop_type
689
- self.allow_negative_crop = allow_negative_crop
690
- self.bbox_clip_border = bbox_clip_border
691
- # The key correspondence from bboxes to labels and masks.
692
- self.bbox2label = {
693
- 'gt_bboxes': 'gt_labels',
694
- 'gt_bboxes_ignore': 'gt_labels_ignore'
695
- }
696
- self.bbox2mask = {
697
- 'gt_bboxes': 'gt_masks',
698
- 'gt_bboxes_ignore': 'gt_masks_ignore'
699
- }
700
-
701
- def _crop_data(self, results, crop_size, allow_negative_crop):
702
- """Function to randomly crop images, bounding boxes, masks, semantic
703
- segmentation maps.
704
-
705
- Args:
706
- results (dict): Result dict from loading pipeline.
707
- crop_size (tuple): Expected absolute size after cropping, (h, w).
708
- allow_negative_crop (bool): Whether to allow a crop that does not
709
- contain any bbox area. Default to False.
710
-
711
- Returns:
712
- dict: Randomly cropped results, 'img_shape' key in result dict is
713
- updated according to crop size.
714
- """
715
- assert crop_size[0] > 0 and crop_size[1] > 0
716
- for key in results.get('img_fields', ['img']):
717
- img = results[key]
718
- margin_h = max(img.shape[0] - crop_size[0], 0)
719
- margin_w = max(img.shape[1] - crop_size[1], 0)
720
- offset_h = np.random.randint(0, margin_h + 1)
721
- offset_w = np.random.randint(0, margin_w + 1)
722
- crop_y1, crop_y2 = offset_h, offset_h + crop_size[0]
723
- crop_x1, crop_x2 = offset_w, offset_w + crop_size[1]
724
-
725
- # crop the image
726
- img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...]
727
- img_shape = img.shape
728
- results[key] = img
729
- results['img_shape'] = img_shape
730
-
731
- # crop bboxes accordingly and clip to the image boundary
732
- for key in results.get('bbox_fields', []):
733
- # e.g. gt_bboxes and gt_bboxes_ignore
734
- bbox_offset = np.array([offset_w, offset_h, offset_w, offset_h],
735
- dtype=np.float32)
736
- bboxes = results[key] - bbox_offset
737
- if self.bbox_clip_border:
738
- bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])
739
- bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])
740
- valid_inds = (bboxes[:, 2] > bboxes[:, 0]) & (
741
- bboxes[:, 3] > bboxes[:, 1])
742
- # If the crop does not contain any gt-bbox area and
743
- # allow_negative_crop is False, skip this image.
744
- if (key == 'gt_bboxes' and not valid_inds.any()
745
- and not allow_negative_crop):
746
- return None
747
- results[key] = bboxes[valid_inds, :]
748
- # label fields. e.g. gt_labels and gt_labels_ignore
749
- label_key = self.bbox2label.get(key)
750
- if label_key in results:
751
- results[label_key] = results[label_key][valid_inds]
752
-
753
- # mask fields, e.g. gt_masks and gt_masks_ignore
754
- mask_key = self.bbox2mask.get(key)
755
- if mask_key in results:
756
- results[mask_key] = results[mask_key][
757
- valid_inds.nonzero()[0]].crop(
758
- np.asarray([crop_x1, crop_y1, crop_x2, crop_y2]))
759
-
760
- # crop semantic seg
761
- for key in results.get('seg_fields', []):
762
- results[key] = results[key][crop_y1:crop_y2, crop_x1:crop_x2]
763
-
764
- return results
765
-
766
- def _get_crop_size(self, image_size):
767
- """Randomly generates the absolute crop size based on `crop_type` and
768
- `image_size`.
769
-
770
- Args:
771
- image_size (tuple): (h, w).
772
-
773
- Returns:
774
- crop_size (tuple): (crop_h, crop_w) in absolute pixels.
775
- """
776
- h, w = image_size
777
- if self.crop_type == 'absolute':
778
- return (min(self.crop_size[0], h), min(self.crop_size[1], w))
779
- elif self.crop_type == 'absolute_range':
780
- assert self.crop_size[0] <= self.crop_size[1]
781
- crop_h = np.random.randint(
782
- min(h, self.crop_size[0]),
783
- min(h, self.crop_size[1]) + 1)
784
- crop_w = np.random.randint(
785
- min(w, self.crop_size[0]),
786
- min(w, self.crop_size[1]) + 1)
787
- return crop_h, crop_w
788
- elif self.crop_type == 'relative':
789
- crop_h, crop_w = self.crop_size
790
- return int(h * crop_h + 0.5), int(w * crop_w + 0.5)
791
- elif self.crop_type == 'relative_range':
792
- crop_size = np.asarray(self.crop_size, dtype=np.float32)
793
- crop_h, crop_w = crop_size + np.random.rand(2) * (1 - crop_size)
794
- return int(h * crop_h + 0.5), int(w * crop_w + 0.5)
795
-
796
- def __call__(self, results):
797
- """Call function to randomly crop images, bounding boxes, masks,
798
- semantic segmentation maps.
799
-
800
- Args:
801
- results (dict): Result dict from loading pipeline.
802
-
803
- Returns:
804
- dict: Randomly cropped results, 'img_shape' key in result dict is
805
- updated according to crop size.
806
- """
807
- image_size = results['img'].shape[:2]
808
- crop_size = self._get_crop_size(image_size)
809
- results = self._crop_data(results, crop_size, self.allow_negative_crop)
810
- return results
811
-
812
- def __repr__(self):
813
- repr_str = self.__class__.__name__
814
- repr_str += f'(crop_size={self.crop_size}, '
815
- repr_str += f'crop_type={self.crop_type}, '
816
- repr_str += f'allow_negative_crop={self.allow_negative_crop}, '
817
- repr_str += f'bbox_clip_border={self.bbox_clip_border})'
818
- return repr_str
819
-
820
-
821
- @PIPELINES.register_module()
822
- class SegRescale(object):
823
- """Rescale semantic segmentation maps.
824
-
825
- Args:
826
- scale_factor (float): The scale factor of the final output.
827
- backend (str): Image rescale backend, choices are 'cv2' and 'pillow'.
828
- These two backends generates slightly different results. Defaults
829
- to 'cv2'.
830
- """
831
-
832
- def __init__(self, scale_factor=1, backend='cv2'):
833
- self.scale_factor = scale_factor
834
- self.backend = backend
835
-
836
- def __call__(self, results):
837
- """Call function to scale the semantic segmentation map.
838
-
839
- Args:
840
- results (dict): Result dict from loading pipeline.
841
-
842
- Returns:
843
- dict: Result dict with semantic segmentation map scaled.
844
- """
845
-
846
- for key in results.get('seg_fields', []):
847
- if self.scale_factor != 1:
848
- results[key] = mmcv.imrescale(
849
- results[key],
850
- self.scale_factor,
851
- interpolation='nearest',
852
- backend=self.backend)
853
- return results
854
-
855
- def __repr__(self):
856
- return self.__class__.__name__ + f'(scale_factor={self.scale_factor})'
857
-
858
-
859
- @PIPELINES.register_module()
860
- class PhotoMetricDistortion(object):
861
- """Apply photometric distortion to image sequentially, every transformation
862
- is applied with a probability of 0.5. The position of random contrast is in
863
- second or second to last.
864
-
865
- 1. random brightness
866
- 2. random contrast (mode 0)
867
- 3. convert color from BGR to HSV
868
- 4. random saturation
869
- 5. random hue
870
- 6. convert color from HSV to BGR
871
- 7. random contrast (mode 1)
872
- 8. randomly swap channels
873
-
874
- Args:
875
- brightness_delta (int): delta of brightness.
876
- contrast_range (tuple): range of contrast.
877
- saturation_range (tuple): range of saturation.
878
- hue_delta (int): delta of hue.
879
- """
880
-
881
- def __init__(self,
882
- brightness_delta=32,
883
- contrast_range=(0.5, 1.5),
884
- saturation_range=(0.5, 1.5),
885
- hue_delta=18):
886
- self.brightness_delta = brightness_delta
887
- self.contrast_lower, self.contrast_upper = contrast_range
888
- self.saturation_lower, self.saturation_upper = saturation_range
889
- self.hue_delta = hue_delta
890
-
891
- def __call__(self, results):
892
- """Call function to perform photometric distortion on images.
893
-
894
- Args:
895
- results (dict): Result dict from loading pipeline.
896
-
897
- Returns:
898
- dict: Result dict with images distorted.
899
- """
900
-
901
- if 'img_fields' in results:
902
- assert results['img_fields'] == ['img'], \
903
- 'Only single img_fields is allowed'
904
- img = results['img']
905
- assert img.dtype == np.float32, \
906
- 'PhotoMetricDistortion needs the input image of dtype np.float32,'\
907
- ' please set "to_float32=True" in "LoadImageFromFile" pipeline'
908
- # random brightness
909
- if random.randint(2):
910
- delta = random.uniform(-self.brightness_delta,
911
- self.brightness_delta)
912
- img += delta
913
-
914
- # mode == 0 --> do random contrast first
915
- # mode == 1 --> do random contrast last
916
- mode = random.randint(2)
917
- if mode == 1:
918
- if random.randint(2):
919
- alpha = random.uniform(self.contrast_lower,
920
- self.contrast_upper)
921
- img *= alpha
922
-
923
- # convert color from BGR to HSV
924
- img = mmcv.bgr2hsv(img)
925
-
926
- # random saturation
927
- if random.randint(2):
928
- img[..., 1] *= random.uniform(self.saturation_lower,
929
- self.saturation_upper)
930
-
931
- # random hue
932
- if random.randint(2):
933
- img[..., 0] += random.uniform(-self.hue_delta, self.hue_delta)
934
- img[..., 0][img[..., 0] > 360] -= 360
935
- img[..., 0][img[..., 0] < 0] += 360
936
-
937
- # convert color from HSV to BGR
938
- img = mmcv.hsv2bgr(img)
939
-
940
- # random contrast
941
- if mode == 0:
942
- if random.randint(2):
943
- alpha = random.uniform(self.contrast_lower,
944
- self.contrast_upper)
945
- img *= alpha
946
-
947
- # randomly swap channels
948
- if random.randint(2):
949
- img = img[..., random.permutation(3)]
950
-
951
- results['img'] = img
952
- return results
953
-
954
- def __repr__(self):
955
- repr_str = self.__class__.__name__
956
- repr_str += f'(\nbrightness_delta={self.brightness_delta},\n'
957
- repr_str += 'contrast_range='
958
- repr_str += f'{(self.contrast_lower, self.contrast_upper)},\n'
959
- repr_str += 'saturation_range='
960
- repr_str += f'{(self.saturation_lower, self.saturation_upper)},\n'
961
- repr_str += f'hue_delta={self.hue_delta})'
962
- return repr_str
963
-
964
-
965
- @PIPELINES.register_module()
966
- class Expand(object):
967
- """Random expand the image & bboxes.
968
-
969
- Randomly place the original image on a canvas of 'ratio' x original image
970
- size filled with mean values. The ratio is in the range of ratio_range.
971
-
972
- Args:
973
- mean (tuple): mean value of dataset.
974
- to_rgb (bool): if need to convert the order of mean to align with RGB.
975
- ratio_range (tuple): range of expand ratio.
976
- prob (float): probability of applying this transformation
977
- """
978
-
979
- def __init__(self,
980
- mean=(0, 0, 0),
981
- to_rgb=True,
982
- ratio_range=(1, 4),
983
- seg_ignore_label=None,
984
- prob=0.5):
985
- self.to_rgb = to_rgb
986
- self.ratio_range = ratio_range
987
- if to_rgb:
988
- self.mean = mean[::-1]
989
- else:
990
- self.mean = mean
991
- self.min_ratio, self.max_ratio = ratio_range
992
- self.seg_ignore_label = seg_ignore_label
993
- self.prob = prob
994
-
995
- def __call__(self, results):
996
- """Call function to expand images, bounding boxes.
997
-
998
- Args:
999
- results (dict): Result dict from loading pipeline.
1000
-
1001
- Returns:
1002
- dict: Result dict with images, bounding boxes expanded
1003
- """
1004
-
1005
- if random.uniform(0, 1) > self.prob:
1006
- return results
1007
-
1008
- if 'img_fields' in results:
1009
- assert results['img_fields'] == ['img'], \
1010
- 'Only single img_fields is allowed'
1011
- img = results['img']
1012
-
1013
- h, w, c = img.shape
1014
- ratio = random.uniform(self.min_ratio, self.max_ratio)
1015
- # speedup expand when meets large image
1016
- if np.all(self.mean == self.mean[0]):
1017
- expand_img = np.empty((int(h * ratio), int(w * ratio), c),
1018
- img.dtype)
1019
- expand_img.fill(self.mean[0])
1020
- else:
1021
- expand_img = np.full((int(h * ratio), int(w * ratio), c),
1022
- self.mean,
1023
- dtype=img.dtype)
1024
- left = int(random.uniform(0, w * ratio - w))
1025
- top = int(random.uniform(0, h * ratio - h))
1026
- expand_img[top:top + h, left:left + w] = img
1027
-
1028
- results['img'] = expand_img
1029
- # expand bboxes
1030
- for key in results.get('bbox_fields', []):
1031
- results[key] = results[key] + np.tile(
1032
- (left, top), 2).astype(results[key].dtype)
1033
-
1034
- # expand masks
1035
- for key in results.get('mask_fields', []):
1036
- results[key] = results[key].expand(
1037
- int(h * ratio), int(w * ratio), top, left)
1038
-
1039
- # expand segs
1040
- for key in results.get('seg_fields', []):
1041
- gt_seg = results[key]
1042
- expand_gt_seg = np.full((int(h * ratio), int(w * ratio)),
1043
- self.seg_ignore_label,
1044
- dtype=gt_seg.dtype)
1045
- expand_gt_seg[top:top + h, left:left + w] = gt_seg
1046
- results[key] = expand_gt_seg
1047
- return results
1048
-
1049
- def __repr__(self):
1050
- repr_str = self.__class__.__name__
1051
- repr_str += f'(mean={self.mean}, to_rgb={self.to_rgb}, '
1052
- repr_str += f'ratio_range={self.ratio_range}, '
1053
- repr_str += f'seg_ignore_label={self.seg_ignore_label})'
1054
- return repr_str
1055
-
1056
-
1057
- @PIPELINES.register_module()
1058
- class MinIoURandomCrop(object):
1059
- """Random crop the image & bboxes, the cropped patches have minimum IoU
1060
- requirement with original image & bboxes, the IoU threshold is randomly
1061
- selected from min_ious.
1062
-
1063
- Args:
1064
- min_ious (tuple): minimum IoU threshold for all intersections with
1065
- bounding boxes
1066
- min_crop_size (float): minimum crop's size (i.e. h,w := a*h, a*w,
1067
- where a >= min_crop_size).
1068
- bbox_clip_border (bool, optional): Whether clip the objects outside
1069
- the border of the image. Defaults to True.
1070
-
1071
- Note:
1072
- The keys for bboxes, labels and masks should be paired. That is, \
1073
- `gt_bboxes` corresponds to `gt_labels` and `gt_masks`, and \
1074
- `gt_bboxes_ignore` to `gt_labels_ignore` and `gt_masks_ignore`.
1075
- """
1076
-
1077
- def __init__(self,
1078
- min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
1079
- min_crop_size=0.3,
1080
- bbox_clip_border=True):
1081
- # 1: return ori img
1082
- self.min_ious = min_ious
1083
- self.sample_mode = (1, *min_ious, 0)
1084
- self.min_crop_size = min_crop_size
1085
- self.bbox_clip_border = bbox_clip_border
1086
- self.bbox2label = {
1087
- 'gt_bboxes': 'gt_labels',
1088
- 'gt_bboxes_ignore': 'gt_labels_ignore'
1089
- }
1090
- self.bbox2mask = {
1091
- 'gt_bboxes': 'gt_masks',
1092
- 'gt_bboxes_ignore': 'gt_masks_ignore'
1093
- }
1094
-
1095
- def __call__(self, results):
1096
- """Call function to crop images and bounding boxes with minimum IoU
1097
- constraint.
1098
-
1099
- Args:
1100
- results (dict): Result dict from loading pipeline.
1101
-
1102
- Returns:
1103
- dict: Result dict with images and bounding boxes cropped, \
1104
- 'img_shape' key is updated.
1105
- """
1106
-
1107
- if 'img_fields' in results:
1108
- assert results['img_fields'] == ['img'], \
1109
- 'Only single img_fields is allowed'
1110
- img = results['img']
1111
- assert 'bbox_fields' in results
1112
- boxes = [results[key] for key in results['bbox_fields']]
1113
- boxes = np.concatenate(boxes, 0)
1114
- h, w, c = img.shape
1115
- while True:
1116
- mode = random.choice(self.sample_mode)
1117
- self.mode = mode
1118
- if mode == 1:
1119
- return results
1120
-
1121
- min_iou = mode
1122
- for i in range(50):
1123
- new_w = random.uniform(self.min_crop_size * w, w)
1124
- new_h = random.uniform(self.min_crop_size * h, h)
1125
-
1126
- # h / w in [0.5, 2]
1127
- if new_h / new_w < 0.5 or new_h / new_w > 2:
1128
- continue
1129
-
1130
- left = random.uniform(w - new_w)
1131
- top = random.uniform(h - new_h)
1132
-
1133
- patch = np.array(
1134
- (int(left), int(top), int(left + new_w), int(top + new_h)))
1135
- # Line or point crop is not allowed
1136
- if patch[2] == patch[0] or patch[3] == patch[1]:
1137
- continue
1138
- overlaps = bbox_overlaps(
1139
- patch.reshape(-1, 4), boxes.reshape(-1, 4)).reshape(-1)
1140
- if len(overlaps) > 0 and overlaps.min() < min_iou:
1141
- continue
1142
-
1143
- # center of boxes should inside the crop img
1144
- # only adjust boxes and instance masks when the gt is not empty
1145
- if len(overlaps) > 0:
1146
- # adjust boxes
1147
- def is_center_of_bboxes_in_patch(boxes, patch):
1148
- center = (boxes[:, :2] + boxes[:, 2:]) / 2
1149
- mask = ((center[:, 0] > patch[0]) *
1150
- (center[:, 1] > patch[1]) *
1151
- (center[:, 0] < patch[2]) *
1152
- (center[:, 1] < patch[3]))
1153
- return mask
1154
-
1155
- mask = is_center_of_bboxes_in_patch(boxes, patch)
1156
- if not mask.any():
1157
- continue
1158
- for key in results.get('bbox_fields', []):
1159
- boxes = results[key].copy()
1160
- mask = is_center_of_bboxes_in_patch(boxes, patch)
1161
- boxes = boxes[mask]
1162
- if self.bbox_clip_border:
1163
- boxes[:, 2:] = boxes[:, 2:].clip(max=patch[2:])
1164
- boxes[:, :2] = boxes[:, :2].clip(min=patch[:2])
1165
- boxes -= np.tile(patch[:2], 2)
1166
-
1167
- results[key] = boxes
1168
- # labels
1169
- label_key = self.bbox2label.get(key)
1170
- if label_key in results:
1171
- results[label_key] = results[label_key][mask]
1172
-
1173
- # mask fields
1174
- mask_key = self.bbox2mask.get(key)
1175
- if mask_key in results:
1176
- results[mask_key] = results[mask_key][
1177
- mask.nonzero()[0]].crop(patch)
1178
- # adjust the img no matter whether the gt is empty before crop
1179
- img = img[patch[1]:patch[3], patch[0]:patch[2]]
1180
- results['img'] = img
1181
- results['img_shape'] = img.shape
1182
-
1183
- # seg fields
1184
- for key in results.get('seg_fields', []):
1185
- results[key] = results[key][patch[1]:patch[3],
1186
- patch[0]:patch[2]]
1187
- return results
1188
-
1189
- def __repr__(self):
1190
- repr_str = self.__class__.__name__
1191
- repr_str += f'(min_ious={self.min_ious}, '
1192
- repr_str += f'min_crop_size={self.min_crop_size}, '
1193
- repr_str += f'bbox_clip_border={self.bbox_clip_border})'
1194
- return repr_str
1195
-
1196
-
1197
- @PIPELINES.register_module()
1198
- class Corrupt(object):
1199
- """Corruption augmentation.
1200
-
1201
- Corruption transforms implemented based on
1202
- `imagecorruptions <https://github.com/bethgelab/imagecorruptions>`_.
1203
-
1204
- Args:
1205
- corruption (str): Corruption name.
1206
- severity (int, optional): The severity of corruption. Default: 1.
1207
- """
1208
-
1209
- def __init__(self, corruption, severity=1):
1210
- self.corruption = corruption
1211
- self.severity = severity
1212
-
1213
- def __call__(self, results):
1214
- """Call function to corrupt image.
1215
-
1216
- Args:
1217
- results (dict): Result dict from loading pipeline.
1218
-
1219
- Returns:
1220
- dict: Result dict with images corrupted.
1221
- """
1222
-
1223
- if corrupt is None:
1224
- raise RuntimeError('imagecorruptions is not installed')
1225
- if 'img_fields' in results:
1226
- assert results['img_fields'] == ['img'], \
1227
- 'Only single img_fields is allowed'
1228
- results['img'] = corrupt(
1229
- results['img'].astype(np.uint8),
1230
- corruption_name=self.corruption,
1231
- severity=self.severity)
1232
- return results
1233
-
1234
- def __repr__(self):
1235
- repr_str = self.__class__.__name__
1236
- repr_str += f'(corruption={self.corruption}, '
1237
- repr_str += f'severity={self.severity})'
1238
- return repr_str
1239
-
1240
-
1241
- @PIPELINES.register_module()
1242
- class Albu(object):
1243
- """Albumentation augmentation.
1244
-
1245
- Adds custom transformations from Albumentations library.
1246
- Please, visit `https://albumentations.readthedocs.io`
1247
- to get more information.
1248
-
1249
- An example of ``transforms`` is as followed:
1250
-
1251
- .. code-block::
1252
-
1253
- [
1254
- dict(
1255
- type='ShiftScaleRotate',
1256
- shift_limit=0.0625,
1257
- scale_limit=0.0,
1258
- rotate_limit=0,
1259
- interpolation=1,
1260
- p=0.5),
1261
- dict(
1262
- type='RandomBrightnessContrast',
1263
- brightness_limit=[0.1, 0.3],
1264
- contrast_limit=[0.1, 0.3],
1265
- p=0.2),
1266
- dict(type='ChannelShuffle', p=0.1),
1267
- dict(
1268
- type='OneOf',
1269
- transforms=[
1270
- dict(type='Blur', blur_limit=3, p=1.0),
1271
- dict(type='MedianBlur', blur_limit=3, p=1.0)
1272
- ],
1273
- p=0.1),
1274
- ]
1275
-
1276
- Args:
1277
- transforms (list[dict]): A list of albu transformations
1278
- bbox_params (dict): Bbox_params for albumentation `Compose`
1279
- keymap (dict): Contains {'input key':'albumentation-style key'}
1280
- skip_img_without_anno (bool): Whether to skip the image if no ann left
1281
- after aug
1282
- """
1283
-
1284
- def __init__(self,
1285
- transforms,
1286
- bbox_params=None,
1287
- keymap=None,
1288
- update_pad_shape=False,
1289
- skip_img_without_anno=False):
1290
- if Compose is None:
1291
- raise RuntimeError('albumentations is not installed')
1292
-
1293
- # Args will be modified later, copying it will be safer
1294
- transforms = copy.deepcopy(transforms)
1295
- if bbox_params is not None:
1296
- bbox_params = copy.deepcopy(bbox_params)
1297
- if keymap is not None:
1298
- keymap = copy.deepcopy(keymap)
1299
- self.transforms = transforms
1300
- self.filter_lost_elements = False
1301
- self.update_pad_shape = update_pad_shape
1302
- self.skip_img_without_anno = skip_img_without_anno
1303
-
1304
- # A simple workaround to remove masks without boxes
1305
- if (isinstance(bbox_params, dict) and 'label_fields' in bbox_params
1306
- and 'filter_lost_elements' in bbox_params):
1307
- self.filter_lost_elements = True
1308
- self.origin_label_fields = bbox_params['label_fields']
1309
- bbox_params['label_fields'] = ['idx_mapper']
1310
- del bbox_params['filter_lost_elements']
1311
-
1312
- self.bbox_params = (
1313
- self.albu_builder(bbox_params) if bbox_params else None)
1314
- self.aug = Compose([self.albu_builder(t) for t in self.transforms],
1315
- bbox_params=self.bbox_params)
1316
-
1317
- if not keymap:
1318
- self.keymap_to_albu = {
1319
- 'img': 'image',
1320
- 'gt_masks': 'masks',
1321
- 'gt_bboxes': 'bboxes'
1322
- }
1323
- else:
1324
- self.keymap_to_albu = keymap
1325
- self.keymap_back = {v: k for k, v in self.keymap_to_albu.items()}
1326
-
1327
- def albu_builder(self, cfg):
1328
- """Import a module from albumentations.
1329
-
1330
- It inherits some of :func:`build_from_cfg` logic.
1331
-
1332
- Args:
1333
- cfg (dict): Config dict. It should at least contain the key "type".
1334
-
1335
- Returns:
1336
- obj: The constructed object.
1337
- """
1338
-
1339
- assert isinstance(cfg, dict) and 'type' in cfg
1340
- args = cfg.copy()
1341
-
1342
- obj_type = args.pop('type')
1343
- if mmcv.is_str(obj_type):
1344
- if albumentations is None:
1345
- raise RuntimeError('albumentations is not installed')
1346
- obj_cls = getattr(albumentations, obj_type)
1347
- elif inspect.isclass(obj_type):
1348
- obj_cls = obj_type
1349
- else:
1350
- raise TypeError(
1351
- f'type must be a str or valid type, but got {type(obj_type)}')
1352
-
1353
- if 'transforms' in args:
1354
- args['transforms'] = [
1355
- self.albu_builder(transform)
1356
- for transform in args['transforms']
1357
- ]
1358
-
1359
- return obj_cls(**args)
1360
-
1361
- @staticmethod
1362
- def mapper(d, keymap):
1363
- """Dictionary mapper. Renames keys according to keymap provided.
1364
-
1365
- Args:
1366
- d (dict): old dict
1367
- keymap (dict): {'old_key':'new_key'}
1368
- Returns:
1369
- dict: new dict.
1370
- """
1371
-
1372
- updated_dict = {}
1373
- for k, v in zip(d.keys(), d.values()):
1374
- new_k = keymap.get(k, k)
1375
- updated_dict[new_k] = d[k]
1376
- return updated_dict
1377
-
1378
- def __call__(self, results):
1379
- # dict to albumentations format
1380
- results = self.mapper(results, self.keymap_to_albu)
1381
- # TODO: add bbox_fields
1382
- if 'bboxes' in results:
1383
- # to list of boxes
1384
- if isinstance(results['bboxes'], np.ndarray):
1385
- results['bboxes'] = [x for x in results['bboxes']]
1386
- # add pseudo-field for filtration
1387
- if self.filter_lost_elements:
1388
- results['idx_mapper'] = np.arange(len(results['bboxes']))
1389
-
1390
- # TODO: Support mask structure in albu
1391
- if 'masks' in results:
1392
- if isinstance(results['masks'], PolygonMasks):
1393
- raise NotImplementedError(
1394
- 'Albu only supports BitMap masks now')
1395
- ori_masks = results['masks']
1396
- if albumentations.__version__ < '0.5':
1397
- results['masks'] = results['masks'].masks
1398
- else:
1399
- results['masks'] = [mask for mask in results['masks'].masks]
1400
-
1401
- results = self.aug(**results)
1402
-
1403
- if 'bboxes' in results:
1404
- if isinstance(results['bboxes'], list):
1405
- results['bboxes'] = np.array(
1406
- results['bboxes'], dtype=np.float32)
1407
- results['bboxes'] = results['bboxes'].reshape(-1, 4)
1408
-
1409
- # filter label_fields
1410
- if self.filter_lost_elements:
1411
-
1412
- for label in self.origin_label_fields:
1413
- results[label] = np.array(
1414
- [results[label][i] for i in results['idx_mapper']])
1415
- if 'masks' in results:
1416
- results['masks'] = np.array(
1417
- [results['masks'][i] for i in results['idx_mapper']])
1418
- results['masks'] = ori_masks.__class__(
1419
- results['masks'], results['image'].shape[0],
1420
- results['image'].shape[1])
1421
-
1422
- if (not len(results['idx_mapper'])
1423
- and self.skip_img_without_anno):
1424
- return None
1425
-
1426
- if 'gt_labels' in results:
1427
- if isinstance(results['gt_labels'], list):
1428
- results['gt_labels'] = np.array(results['gt_labels'])
1429
- results['gt_labels'] = results['gt_labels'].astype(np.int64)
1430
-
1431
- # back to the original format
1432
- results = self.mapper(results, self.keymap_back)
1433
-
1434
- # update final shape
1435
- if self.update_pad_shape:
1436
- results['pad_shape'] = results['img'].shape
1437
-
1438
- return results
1439
-
1440
- def __repr__(self):
1441
- repr_str = self.__class__.__name__ + f'(transforms={self.transforms})'
1442
- return repr_str
1443
-
1444
-
1445
- @PIPELINES.register_module()
1446
- class RandomCenterCropPad(object):
1447
- """Random center crop and random around padding for CornerNet.
1448
-
1449
- This operation generates randomly cropped image from the original image and
1450
- pads it simultaneously. Different from :class:`RandomCrop`, the output
1451
- shape may not equal to ``crop_size`` strictly. We choose a random value
1452
- from ``ratios`` and the output shape could be larger or smaller than
1453
- ``crop_size``. The padding operation is also different from :class:`Pad`,
1454
- here we use around padding instead of right-bottom padding.
1455
-
1456
- The relation between output image (padding image) and original image:
1457
-
1458
- .. code:: text
1459
-
1460
- output image
1461
-
1462
- +----------------------------+
1463
- | padded area |
1464
- +------|----------------------------|----------+
1465
- | | cropped area | |
1466
- | | +---------------+ | |
1467
- | | | . center | | | original image
1468
- | | | range | | |
1469
- | | +---------------+ | |
1470
- +------|----------------------------|----------+
1471
- | padded area |
1472
- +----------------------------+
1473
-
1474
- There are 5 main areas in the figure:
1475
-
1476
- - output image: output image of this operation, also called padding
1477
- image in following instruction.
1478
- - original image: input image of this operation.
1479
- - padded area: non-intersect area of output image and original image.
1480
- - cropped area: the overlap of output image and original image.
1481
- - center range: a smaller area where random center chosen from.
1482
- center range is computed by ``border`` and original image's shape
1483
- to avoid our random center is too close to original image's border.
1484
-
1485
- Also this operation act differently in train and test mode, the summary
1486
- pipeline is listed below.
1487
-
1488
- Train pipeline:
1489
-
1490
- 1. Choose a ``random_ratio`` from ``ratios``, the shape of padding image
1491
- will be ``random_ratio * crop_size``.
1492
- 2. Choose a ``random_center`` in center range.
1493
- 3. Generate padding image with center matches the ``random_center``.
1494
- 4. Initialize the padding image with pixel value equals to ``mean``.
1495
- 5. Copy the cropped area to padding image.
1496
- 6. Refine annotations.
1497
-
1498
- Test pipeline:
1499
-
1500
- 1. Compute output shape according to ``test_pad_mode``.
1501
- 2. Generate padding image with center matches the original image
1502
- center.
1503
- 3. Initialize the padding image with pixel value equals to ``mean``.
1504
- 4. Copy the ``cropped area`` to padding image.
1505
-
1506
- Args:
1507
- crop_size (tuple | None): expected size after crop, final size will
1508
- computed according to ratio. Requires (h, w) in train mode, and
1509
- None in test mode.
1510
- ratios (tuple): random select a ratio from tuple and crop image to
1511
- (crop_size[0] * ratio) * (crop_size[1] * ratio).
1512
- Only available in train mode.
1513
- border (int): max distance from center select area to image border.
1514
- Only available in train mode.
1515
- mean (sequence): Mean values of 3 channels.
1516
- std (sequence): Std values of 3 channels.
1517
- to_rgb (bool): Whether to convert the image from BGR to RGB.
1518
- test_mode (bool): whether involve random variables in transform.
1519
- In train mode, crop_size is fixed, center coords and ratio is
1520
- random selected from predefined lists. In test mode, crop_size
1521
- is image's original shape, center coords and ratio is fixed.
1522
- test_pad_mode (tuple): padding method and padding shape value, only
1523
- available in test mode. Default is using 'logical_or' with
1524
- 127 as padding shape value.
1525
-
1526
- - 'logical_or': final_shape = input_shape | padding_shape_value
1527
- - 'size_divisor': final_shape = int(
1528
- ceil(input_shape / padding_shape_value) * padding_shape_value)
1529
- bbox_clip_border (bool, optional): Whether clip the objects outside
1530
- the border of the image. Defaults to True.
1531
- """
1532
-
1533
- def __init__(self,
1534
- crop_size=None,
1535
- ratios=(0.9, 1.0, 1.1),
1536
- border=128,
1537
- mean=None,
1538
- std=None,
1539
- to_rgb=None,
1540
- test_mode=False,
1541
- test_pad_mode=('logical_or', 127),
1542
- bbox_clip_border=True):
1543
- if test_mode:
1544
- assert crop_size is None, 'crop_size must be None in test mode'
1545
- assert ratios is None, 'ratios must be None in test mode'
1546
- assert border is None, 'border must be None in test mode'
1547
- assert isinstance(test_pad_mode, (list, tuple))
1548
- assert test_pad_mode[0] in ['logical_or', 'size_divisor']
1549
- else:
1550
- assert isinstance(crop_size, (list, tuple))
1551
- assert crop_size[0] > 0 and crop_size[1] > 0, (
1552
- 'crop_size must > 0 in train mode')
1553
- assert isinstance(ratios, (list, tuple))
1554
- assert test_pad_mode is None, (
1555
- 'test_pad_mode must be None in train mode')
1556
-
1557
- self.crop_size = crop_size
1558
- self.ratios = ratios
1559
- self.border = border
1560
- # We do not set default value to mean, std and to_rgb because these
1561
- # hyper-parameters are easy to forget but could affect the performance.
1562
- # Please use the same setting as Normalize for performance assurance.
1563
- assert mean is not None and std is not None and to_rgb is not None
1564
- self.to_rgb = to_rgb
1565
- self.input_mean = mean
1566
- self.input_std = std
1567
- if to_rgb:
1568
- self.mean = mean[::-1]
1569
- self.std = std[::-1]
1570
- else:
1571
- self.mean = mean
1572
- self.std = std
1573
- self.test_mode = test_mode
1574
- self.test_pad_mode = test_pad_mode
1575
- self.bbox_clip_border = bbox_clip_border
1576
-
1577
- def _get_border(self, border, size):
1578
- """Get final border for the target size.
1579
-
1580
- This function generates a ``final_border`` according to image's shape.
1581
- The area between ``final_border`` and ``size - final_border`` is the
1582
- ``center range``. We randomly choose center from the ``center range``
1583
- to avoid our random center is too close to original image's border.
1584
- Also ``center range`` should be larger than 0.
1585
-
1586
- Args:
1587
- border (int): The initial border, default is 128.
1588
- size (int): The width or height of original image.
1589
- Returns:
1590
- int: The final border.
1591
- """
1592
- k = 2 * border / size
1593
- i = pow(2, np.ceil(np.log2(np.ceil(k))) + (k == int(k)))
1594
- return border // i
1595
-
1596
- def _filter_boxes(self, patch, boxes):
1597
- """Check whether the center of each box is in the patch.
1598
-
1599
- Args:
1600
- patch (list[int]): The cropped area, [left, top, right, bottom].
1601
- boxes (numpy array, (N x 4)): Ground truth boxes.
1602
-
1603
- Returns:
1604
- mask (numpy array, (N,)): Each box is inside or outside the patch.
1605
- """
1606
- center = (boxes[:, :2] + boxes[:, 2:]) / 2
1607
- mask = (center[:, 0] > patch[0]) * (center[:, 1] > patch[1]) * (
1608
- center[:, 0] < patch[2]) * (
1609
- center[:, 1] < patch[3])
1610
- return mask
1611
-
1612
- def _crop_image_and_paste(self, image, center, size):
1613
- """Crop image with a given center and size, then paste the cropped
1614
- image to a blank image with two centers align.
1615
-
1616
- This function is equivalent to generating a blank image with ``size``
1617
- as its shape. Then cover it on the original image with two centers (
1618
- the center of blank image and the random center of original image)
1619
- aligned. The overlap area is paste from the original image and the
1620
- outside area is filled with ``mean pixel``.
1621
-
1622
- Args:
1623
- image (np array, H x W x C): Original image.
1624
- center (list[int]): Target crop center coord.
1625
- size (list[int]): Target crop size. [target_h, target_w]
1626
-
1627
- Returns:
1628
- cropped_img (np array, target_h x target_w x C): Cropped image.
1629
- border (np array, 4): The distance of four border of
1630
- ``cropped_img`` to the original image area, [top, bottom,
1631
- left, right]
1632
- patch (list[int]): The cropped area, [left, top, right, bottom].
1633
- """
1634
- center_y, center_x = center
1635
- target_h, target_w = size
1636
- img_h, img_w, img_c = image.shape
1637
-
1638
- x0 = max(0, center_x - target_w // 2)
1639
- x1 = min(center_x + target_w // 2, img_w)
1640
- y0 = max(0, center_y - target_h // 2)
1641
- y1 = min(center_y + target_h // 2, img_h)
1642
- patch = np.array((int(x0), int(y0), int(x1), int(y1)))
1643
-
1644
- left, right = center_x - x0, x1 - center_x
1645
- top, bottom = center_y - y0, y1 - center_y
1646
-
1647
- cropped_center_y, cropped_center_x = target_h // 2, target_w // 2
1648
- cropped_img = np.zeros((target_h, target_w, img_c), dtype=image.dtype)
1649
- for i in range(img_c):
1650
- cropped_img[:, :, i] += self.mean[i]
1651
- y_slice = slice(cropped_center_y - top, cropped_center_y + bottom)
1652
- x_slice = slice(cropped_center_x - left, cropped_center_x + right)
1653
- cropped_img[y_slice, x_slice, :] = image[y0:y1, x0:x1, :]
1654
-
1655
- border = np.array([
1656
- cropped_center_y - top, cropped_center_y + bottom,
1657
- cropped_center_x - left, cropped_center_x + right
1658
- ],
1659
- dtype=np.float32)
1660
-
1661
- return cropped_img, border, patch
1662
-
1663
- def _train_aug(self, results):
1664
- """Random crop and around padding the original image.
1665
-
1666
- Args:
1667
- results (dict): Image infomations in the augment pipeline.
1668
-
1669
- Returns:
1670
- results (dict): The updated dict.
1671
- """
1672
- img = results['img']
1673
- h, w, c = img.shape
1674
- boxes = results['gt_bboxes']
1675
- while True:
1676
- scale = random.choice(self.ratios)
1677
- new_h = int(self.crop_size[0] * scale)
1678
- new_w = int(self.crop_size[1] * scale)
1679
- h_border = self._get_border(self.border, h)
1680
- w_border = self._get_border(self.border, w)
1681
-
1682
- for i in range(50):
1683
- center_x = random.randint(low=w_border, high=w - w_border)
1684
- center_y = random.randint(low=h_border, high=h - h_border)
1685
-
1686
- cropped_img, border, patch = self._crop_image_and_paste(
1687
- img, [center_y, center_x], [new_h, new_w])
1688
-
1689
- mask = self._filter_boxes(patch, boxes)
1690
- # if image do not have valid bbox, any crop patch is valid.
1691
- if not mask.any() and len(boxes) > 0:
1692
- continue
1693
-
1694
- results['img'] = cropped_img
1695
- results['img_shape'] = cropped_img.shape
1696
- results['pad_shape'] = cropped_img.shape
1697
-
1698
- x0, y0, x1, y1 = patch
1699
-
1700
- left_w, top_h = center_x - x0, center_y - y0
1701
- cropped_center_x, cropped_center_y = new_w // 2, new_h // 2
1702
-
1703
- # crop bboxes accordingly and clip to the image boundary
1704
- for key in results.get('bbox_fields', []):
1705
- mask = self._filter_boxes(patch, results[key])
1706
- bboxes = results[key][mask]
1707
- bboxes[:, 0:4:2] += cropped_center_x - left_w - x0
1708
- bboxes[:, 1:4:2] += cropped_center_y - top_h - y0
1709
- if self.bbox_clip_border:
1710
- bboxes[:, 0:4:2] = np.clip(bboxes[:, 0:4:2], 0, new_w)
1711
- bboxes[:, 1:4:2] = np.clip(bboxes[:, 1:4:2], 0, new_h)
1712
- keep = (bboxes[:, 2] > bboxes[:, 0]) & (
1713
- bboxes[:, 3] > bboxes[:, 1])
1714
- bboxes = bboxes[keep]
1715
- results[key] = bboxes
1716
- if key in ['gt_bboxes']:
1717
- if 'gt_labels' in results:
1718
- labels = results['gt_labels'][mask]
1719
- labels = labels[keep]
1720
- results['gt_labels'] = labels
1721
- if 'gt_masks' in results:
1722
- raise NotImplementedError(
1723
- 'RandomCenterCropPad only supports bbox.')
1724
-
1725
- # crop semantic seg
1726
- for key in results.get('seg_fields', []):
1727
- raise NotImplementedError(
1728
- 'RandomCenterCropPad only supports bbox.')
1729
- return results
1730
-
1731
- def _test_aug(self, results):
1732
- """Around padding the original image without cropping.
1733
-
1734
- The padding mode and value are from ``test_pad_mode``.
1735
-
1736
- Args:
1737
- results (dict): Image infomations in the augment pipeline.
1738
-
1739
- Returns:
1740
- results (dict): The updated dict.
1741
- """
1742
- img = results['img']
1743
- h, w, c = img.shape
1744
- results['img_shape'] = img.shape
1745
- if self.test_pad_mode[0] in ['logical_or']:
1746
- target_h = h | self.test_pad_mode[1]
1747
- target_w = w | self.test_pad_mode[1]
1748
- elif self.test_pad_mode[0] in ['size_divisor']:
1749
- divisor = self.test_pad_mode[1]
1750
- target_h = int(np.ceil(h / divisor)) * divisor
1751
- target_w = int(np.ceil(w / divisor)) * divisor
1752
- else:
1753
- raise NotImplementedError(
1754
- 'RandomCenterCropPad only support two testing pad mode:'
1755
- 'logical-or and size_divisor.')
1756
-
1757
- cropped_img, border, _ = self._crop_image_and_paste(
1758
- img, [h // 2, w // 2], [target_h, target_w])
1759
- results['img'] = cropped_img
1760
- results['pad_shape'] = cropped_img.shape
1761
- results['border'] = border
1762
- return results
1763
-
1764
- def __call__(self, results):
1765
- img = results['img']
1766
- assert img.dtype == np.float32, (
1767
- 'RandomCenterCropPad needs the input image of dtype np.float32,'
1768
- ' please set "to_float32=True" in "LoadImageFromFile" pipeline')
1769
- h, w, c = img.shape
1770
- assert c == len(self.mean)
1771
- if self.test_mode:
1772
- return self._test_aug(results)
1773
- else:
1774
- return self._train_aug(results)
1775
-
1776
- def __repr__(self):
1777
- repr_str = self.__class__.__name__
1778
- repr_str += f'(crop_size={self.crop_size}, '
1779
- repr_str += f'ratios={self.ratios}, '
1780
- repr_str += f'border={self.border}, '
1781
- repr_str += f'mean={self.input_mean}, '
1782
- repr_str += f'std={self.input_std}, '
1783
- repr_str += f'to_rgb={self.to_rgb}, '
1784
- repr_str += f'test_mode={self.test_mode}, '
1785
- repr_str += f'test_pad_mode={self.test_pad_mode}, '
1786
- repr_str += f'bbox_clip_border={self.bbox_clip_border})'
1787
- return repr_str
1788
-
1789
-
1790
- @PIPELINES.register_module()
1791
- class CutOut(object):
1792
- """CutOut operation.
1793
-
1794
- Randomly drop some regions of image used in
1795
- `Cutout <https://arxiv.org/abs/1708.04552>`_.
1796
-
1797
- Args:
1798
- n_holes (int | tuple[int, int]): Number of regions to be dropped.
1799
- If it is given as a list, number of holes will be randomly
1800
- selected from the closed interval [`n_holes[0]`, `n_holes[1]`].
1801
- cutout_shape (tuple[int, int] | list[tuple[int, int]]): The candidate
1802
- shape of dropped regions. It can be `tuple[int, int]` to use a
1803
- fixed cutout shape, or `list[tuple[int, int]]` to randomly choose
1804
- shape from the list.
1805
- cutout_ratio (tuple[float, float] | list[tuple[float, float]]): The
1806
- candidate ratio of dropped regions. It can be `tuple[float, float]`
1807
- to use a fixed ratio or `list[tuple[float, float]]` to randomly
1808
- choose ratio from the list. Please note that `cutout_shape`
1809
- and `cutout_ratio` cannot be both given at the same time.
1810
- fill_in (tuple[float, float, float] | tuple[int, int, int]): The value
1811
- of pixel to fill in the dropped regions. Default: (0, 0, 0).
1812
- """
1813
-
1814
- def __init__(self,
1815
- n_holes,
1816
- cutout_shape=None,
1817
- cutout_ratio=None,
1818
- fill_in=(0, 0, 0)):
1819
-
1820
- assert (cutout_shape is None) ^ (cutout_ratio is None), \
1821
- 'Either cutout_shape or cutout_ratio should be specified.'
1822
- assert (isinstance(cutout_shape, (list, tuple))
1823
- or isinstance(cutout_ratio, (list, tuple)))
1824
- if isinstance(n_holes, tuple):
1825
- assert len(n_holes) == 2 and 0 <= n_holes[0] < n_holes[1]
1826
- else:
1827
- n_holes = (n_holes, n_holes)
1828
- self.n_holes = n_holes
1829
- self.fill_in = fill_in
1830
- self.with_ratio = cutout_ratio is not None
1831
- self.candidates = cutout_ratio if self.with_ratio else cutout_shape
1832
- if not isinstance(self.candidates, list):
1833
- self.candidates = [self.candidates]
1834
-
1835
- def __call__(self, results):
1836
- """Call function to drop some regions of image."""
1837
- h, w, c = results['img'].shape
1838
- n_holes = np.random.randint(self.n_holes[0], self.n_holes[1] + 1)
1839
- for _ in range(n_holes):
1840
- x1 = np.random.randint(0, w)
1841
- y1 = np.random.randint(0, h)
1842
- index = np.random.randint(0, len(self.candidates))
1843
- if not self.with_ratio:
1844
- cutout_w, cutout_h = self.candidates[index]
1845
- else:
1846
- cutout_w = int(self.candidates[index][0] * w)
1847
- cutout_h = int(self.candidates[index][1] * h)
1848
-
1849
- x2 = np.clip(x1 + cutout_w, 0, w)
1850
- y2 = np.clip(y1 + cutout_h, 0, h)
1851
- results['img'][y1:y2, x1:x2, :] = self.fill_in
1852
-
1853
- return results
1854
-
1855
- def __repr__(self):
1856
- repr_str = self.__class__.__name__
1857
- repr_str += f'(n_holes={self.n_holes}, '
1858
- repr_str += (f'cutout_ratio={self.candidates}, ' if self.with_ratio
1859
- else f'cutout_shape={self.candidates}, ')
1860
- repr_str += f'fill_in={self.fill_in})'
1861
- return repr_str
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CofAI/chat.b4/g4f/Provider/Providers/Theb.py DELETED
@@ -1,28 +0,0 @@
1
- import os
2
- import json
3
- import time
4
- import subprocess
5
-
6
- from ...typing import sha256, Dict, get_type_hints
7
-
8
- url = 'https://theb.ai'
9
- model = ['gpt-3.5-turbo']
10
- supports_stream = True
11
- needs_auth = False
12
-
13
- def _create_completion(model: str, messages: list, stream: bool, **kwargs):
14
-
15
- path = os.path.dirname(os.path.realpath(__file__))
16
- config = json.dumps({
17
- 'messages': messages,
18
- 'model': model}, separators=(',', ':'))
19
-
20
- cmd = ['python3', f'{path}/helpers/theb.py', config]
21
-
22
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
23
-
24
- for line in iter(p.stdout.readline, b''):
25
- yield line.decode('utf-8')
26
-
27
- params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
28
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/attr/_config.py DELETED
@@ -1,31 +0,0 @@
1
- # SPDX-License-Identifier: MIT
2
-
3
-
4
- __all__ = ["set_run_validators", "get_run_validators"]
5
-
6
- _run_validators = True
7
-
8
-
9
- def set_run_validators(run):
10
- """
11
- Set whether or not validators are run. By default, they are run.
12
-
13
- .. deprecated:: 21.3.0 It will not be removed, but it also will not be
14
- moved to new ``attrs`` namespace. Use `attrs.validators.set_disabled()`
15
- instead.
16
- """
17
- if not isinstance(run, bool):
18
- raise TypeError("'run' must be bool.")
19
- global _run_validators
20
- _run_validators = run
21
-
22
-
23
- def get_run_validators():
24
- """
25
- Return whether or not validators are run.
26
-
27
- .. deprecated:: 21.3.0 It will not be removed, but it also will not be
28
- moved to new ``attrs`` namespace. Use `attrs.validators.get_disabled()`
29
- instead.
30
- """
31
- return _run_validators
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/dateutil/parser/_parser.py DELETED
@@ -1,1613 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- """
3
- This module offers a generic date/time string parser which is able to parse
4
- most known formats to represent a date and/or time.
5
-
6
- This module attempts to be forgiving with regards to unlikely input formats,
7
- returning a datetime object even for dates which are ambiguous. If an element
8
- of a date/time stamp is omitted, the following rules are applied:
9
-
10
- - If AM or PM is left unspecified, a 24-hour clock is assumed, however, an hour
11
- on a 12-hour clock (``0 <= hour <= 12``) *must* be specified if AM or PM is
12
- specified.
13
- - If a time zone is omitted, a timezone-naive datetime is returned.
14
-
15
- If any other elements are missing, they are taken from the
16
- :class:`datetime.datetime` object passed to the parameter ``default``. If this
17
- results in a day number exceeding the valid number of days per month, the
18
- value falls back to the end of the month.
19
-
20
- Additional resources about date/time string formats can be found below:
21
-
22
- - `A summary of the international standard date and time notation
23
- <https://www.cl.cam.ac.uk/~mgk25/iso-time.html>`_
24
- - `W3C Date and Time Formats <https://www.w3.org/TR/NOTE-datetime>`_
25
- - `Time Formats (Planetary Rings Node) <https://pds-rings.seti.org:443/tools/time_formats.html>`_
26
- - `CPAN ParseDate module
27
- <https://metacpan.org/pod/release/MUIR/Time-modules-2013.0912/lib/Time/ParseDate.pm>`_
28
- - `Java SimpleDateFormat Class
29
- <https://docs.oracle.com/javase/6/docs/api/java/text/SimpleDateFormat.html>`_
30
- """
31
- from __future__ import unicode_literals
32
-
33
- import datetime
34
- import re
35
- import string
36
- import time
37
- import warnings
38
-
39
- from calendar import monthrange
40
- from io import StringIO
41
-
42
- import six
43
- from six import integer_types, text_type
44
-
45
- from decimal import Decimal
46
-
47
- from warnings import warn
48
-
49
- from .. import relativedelta
50
- from .. import tz
51
-
52
- __all__ = ["parse", "parserinfo", "ParserError"]
53
-
54
-
55
- # TODO: pandas.core.tools.datetimes imports this explicitly. Might be worth
56
- # making public and/or figuring out if there is something we can
57
- # take off their plate.
58
- class _timelex(object):
59
- # Fractional seconds are sometimes split by a comma
60
- _split_decimal = re.compile("([.,])")
61
-
62
- def __init__(self, instream):
63
- if isinstance(instream, (bytes, bytearray)):
64
- instream = instream.decode()
65
-
66
- if isinstance(instream, text_type):
67
- instream = StringIO(instream)
68
- elif getattr(instream, 'read', None) is None:
69
- raise TypeError('Parser must be a string or character stream, not '
70
- '{itype}'.format(itype=instream.__class__.__name__))
71
-
72
- self.instream = instream
73
- self.charstack = []
74
- self.tokenstack = []
75
- self.eof = False
76
-
77
- def get_token(self):
78
- """
79
- This function breaks the time string into lexical units (tokens), which
80
- can be parsed by the parser. Lexical units are demarcated by changes in
81
- the character set, so any continuous string of letters is considered
82
- one unit, any continuous string of numbers is considered one unit.
83
-
84
- The main complication arises from the fact that dots ('.') can be used
85
- both as separators (e.g. "Sep.20.2009") or decimal points (e.g.
86
- "4:30:21.447"). As such, it is necessary to read the full context of
87
- any dot-separated strings before breaking it into tokens; as such, this
88
- function maintains a "token stack", for when the ambiguous context
89
- demands that multiple tokens be parsed at once.
90
- """
91
- if self.tokenstack:
92
- return self.tokenstack.pop(0)
93
-
94
- seenletters = False
95
- token = None
96
- state = None
97
-
98
- while not self.eof:
99
- # We only realize that we've reached the end of a token when we
100
- # find a character that's not part of the current token - since
101
- # that character may be part of the next token, it's stored in the
102
- # charstack.
103
- if self.charstack:
104
- nextchar = self.charstack.pop(0)
105
- else:
106
- nextchar = self.instream.read(1)
107
- while nextchar == '\x00':
108
- nextchar = self.instream.read(1)
109
-
110
- if not nextchar:
111
- self.eof = True
112
- break
113
- elif not state:
114
- # First character of the token - determines if we're starting
115
- # to parse a word, a number or something else.
116
- token = nextchar
117
- if self.isword(nextchar):
118
- state = 'a'
119
- elif self.isnum(nextchar):
120
- state = '0'
121
- elif self.isspace(nextchar):
122
- token = ' '
123
- break # emit token
124
- else:
125
- break # emit token
126
- elif state == 'a':
127
- # If we've already started reading a word, we keep reading
128
- # letters until we find something that's not part of a word.
129
- seenletters = True
130
- if self.isword(nextchar):
131
- token += nextchar
132
- elif nextchar == '.':
133
- token += nextchar
134
- state = 'a.'
135
- else:
136
- self.charstack.append(nextchar)
137
- break # emit token
138
- elif state == '0':
139
- # If we've already started reading a number, we keep reading
140
- # numbers until we find something that doesn't fit.
141
- if self.isnum(nextchar):
142
- token += nextchar
143
- elif nextchar == '.' or (nextchar == ',' and len(token) >= 2):
144
- token += nextchar
145
- state = '0.'
146
- else:
147
- self.charstack.append(nextchar)
148
- break # emit token
149
- elif state == 'a.':
150
- # If we've seen some letters and a dot separator, continue
151
- # parsing, and the tokens will be broken up later.
152
- seenletters = True
153
- if nextchar == '.' or self.isword(nextchar):
154
- token += nextchar
155
- elif self.isnum(nextchar) and token[-1] == '.':
156
- token += nextchar
157
- state = '0.'
158
- else:
159
- self.charstack.append(nextchar)
160
- break # emit token
161
- elif state == '0.':
162
- # If we've seen at least one dot separator, keep going, we'll
163
- # break up the tokens later.
164
- if nextchar == '.' or self.isnum(nextchar):
165
- token += nextchar
166
- elif self.isword(nextchar) and token[-1] == '.':
167
- token += nextchar
168
- state = 'a.'
169
- else:
170
- self.charstack.append(nextchar)
171
- break # emit token
172
-
173
- if (state in ('a.', '0.') and (seenletters or token.count('.') > 1 or
174
- token[-1] in '.,')):
175
- l = self._split_decimal.split(token)
176
- token = l[0]
177
- for tok in l[1:]:
178
- if tok:
179
- self.tokenstack.append(tok)
180
-
181
- if state == '0.' and token.count('.') == 0:
182
- token = token.replace(',', '.')
183
-
184
- return token
185
-
186
- def __iter__(self):
187
- return self
188
-
189
- def __next__(self):
190
- token = self.get_token()
191
- if token is None:
192
- raise StopIteration
193
-
194
- return token
195
-
196
- def next(self):
197
- return self.__next__() # Python 2.x support
198
-
199
- @classmethod
200
- def split(cls, s):
201
- return list(cls(s))
202
-
203
- @classmethod
204
- def isword(cls, nextchar):
205
- """ Whether or not the next character is part of a word """
206
- return nextchar.isalpha()
207
-
208
- @classmethod
209
- def isnum(cls, nextchar):
210
- """ Whether the next character is part of a number """
211
- return nextchar.isdigit()
212
-
213
- @classmethod
214
- def isspace(cls, nextchar):
215
- """ Whether the next character is whitespace """
216
- return nextchar.isspace()
217
-
218
-
219
- class _resultbase(object):
220
-
221
- def __init__(self):
222
- for attr in self.__slots__:
223
- setattr(self, attr, None)
224
-
225
- def _repr(self, classname):
226
- l = []
227
- for attr in self.__slots__:
228
- value = getattr(self, attr)
229
- if value is not None:
230
- l.append("%s=%s" % (attr, repr(value)))
231
- return "%s(%s)" % (classname, ", ".join(l))
232
-
233
- def __len__(self):
234
- return (sum(getattr(self, attr) is not None
235
- for attr in self.__slots__))
236
-
237
- def __repr__(self):
238
- return self._repr(self.__class__.__name__)
239
-
240
-
241
- class parserinfo(object):
242
- """
243
- Class which handles what inputs are accepted. Subclass this to customize
244
- the language and acceptable values for each parameter.
245
-
246
- :param dayfirst:
247
- Whether to interpret the first value in an ambiguous 3-integer date
248
- (e.g. 01/05/09) as the day (``True``) or month (``False``). If
249
- ``yearfirst`` is set to ``True``, this distinguishes between YDM
250
- and YMD. Default is ``False``.
251
-
252
- :param yearfirst:
253
- Whether to interpret the first value in an ambiguous 3-integer date
254
- (e.g. 01/05/09) as the year. If ``True``, the first number is taken
255
- to be the year, otherwise the last number is taken to be the year.
256
- Default is ``False``.
257
- """
258
-
259
- # m from a.m/p.m, t from ISO T separator
260
- JUMP = [" ", ".", ",", ";", "-", "/", "'",
261
- "at", "on", "and", "ad", "m", "t", "of",
262
- "st", "nd", "rd", "th"]
263
-
264
- WEEKDAYS = [("Mon", "Monday"),
265
- ("Tue", "Tuesday"), # TODO: "Tues"
266
- ("Wed", "Wednesday"),
267
- ("Thu", "Thursday"), # TODO: "Thurs"
268
- ("Fri", "Friday"),
269
- ("Sat", "Saturday"),
270
- ("Sun", "Sunday")]
271
- MONTHS = [("Jan", "January"),
272
- ("Feb", "February"), # TODO: "Febr"
273
- ("Mar", "March"),
274
- ("Apr", "April"),
275
- ("May", "May"),
276
- ("Jun", "June"),
277
- ("Jul", "July"),
278
- ("Aug", "August"),
279
- ("Sep", "Sept", "September"),
280
- ("Oct", "October"),
281
- ("Nov", "November"),
282
- ("Dec", "December")]
283
- HMS = [("h", "hour", "hours"),
284
- ("m", "minute", "minutes"),
285
- ("s", "second", "seconds")]
286
- AMPM = [("am", "a"),
287
- ("pm", "p")]
288
- UTCZONE = ["UTC", "GMT", "Z", "z"]
289
- PERTAIN = ["of"]
290
- TZOFFSET = {}
291
- # TODO: ERA = ["AD", "BC", "CE", "BCE", "Stardate",
292
- # "Anno Domini", "Year of Our Lord"]
293
-
294
- def __init__(self, dayfirst=False, yearfirst=False):
295
- self._jump = self._convert(self.JUMP)
296
- self._weekdays = self._convert(self.WEEKDAYS)
297
- self._months = self._convert(self.MONTHS)
298
- self._hms = self._convert(self.HMS)
299
- self._ampm = self._convert(self.AMPM)
300
- self._utczone = self._convert(self.UTCZONE)
301
- self._pertain = self._convert(self.PERTAIN)
302
-
303
- self.dayfirst = dayfirst
304
- self.yearfirst = yearfirst
305
-
306
- self._year = time.localtime().tm_year
307
- self._century = self._year // 100 * 100
308
-
309
- def _convert(self, lst):
310
- dct = {}
311
- for i, v in enumerate(lst):
312
- if isinstance(v, tuple):
313
- for v in v:
314
- dct[v.lower()] = i
315
- else:
316
- dct[v.lower()] = i
317
- return dct
318
-
319
- def jump(self, name):
320
- return name.lower() in self._jump
321
-
322
- def weekday(self, name):
323
- try:
324
- return self._weekdays[name.lower()]
325
- except KeyError:
326
- pass
327
- return None
328
-
329
- def month(self, name):
330
- try:
331
- return self._months[name.lower()] + 1
332
- except KeyError:
333
- pass
334
- return None
335
-
336
- def hms(self, name):
337
- try:
338
- return self._hms[name.lower()]
339
- except KeyError:
340
- return None
341
-
342
- def ampm(self, name):
343
- try:
344
- return self._ampm[name.lower()]
345
- except KeyError:
346
- return None
347
-
348
- def pertain(self, name):
349
- return name.lower() in self._pertain
350
-
351
- def utczone(self, name):
352
- return name.lower() in self._utczone
353
-
354
- def tzoffset(self, name):
355
- if name in self._utczone:
356
- return 0
357
-
358
- return self.TZOFFSET.get(name)
359
-
360
- def convertyear(self, year, century_specified=False):
361
- """
362
- Converts two-digit years to year within [-50, 49]
363
- range of self._year (current local time)
364
- """
365
-
366
- # Function contract is that the year is always positive
367
- assert year >= 0
368
-
369
- if year < 100 and not century_specified:
370
- # assume current century to start
371
- year += self._century
372
-
373
- if year >= self._year + 50: # if too far in future
374
- year -= 100
375
- elif year < self._year - 50: # if too far in past
376
- year += 100
377
-
378
- return year
379
-
380
- def validate(self, res):
381
- # move to info
382
- if res.year is not None:
383
- res.year = self.convertyear(res.year, res.century_specified)
384
-
385
- if ((res.tzoffset == 0 and not res.tzname) or
386
- (res.tzname == 'Z' or res.tzname == 'z')):
387
- res.tzname = "UTC"
388
- res.tzoffset = 0
389
- elif res.tzoffset != 0 and res.tzname and self.utczone(res.tzname):
390
- res.tzoffset = 0
391
- return True
392
-
393
-
394
- class _ymd(list):
395
- def __init__(self, *args, **kwargs):
396
- super(self.__class__, self).__init__(*args, **kwargs)
397
- self.century_specified = False
398
- self.dstridx = None
399
- self.mstridx = None
400
- self.ystridx = None
401
-
402
- @property
403
- def has_year(self):
404
- return self.ystridx is not None
405
-
406
- @property
407
- def has_month(self):
408
- return self.mstridx is not None
409
-
410
- @property
411
- def has_day(self):
412
- return self.dstridx is not None
413
-
414
- def could_be_day(self, value):
415
- if self.has_day:
416
- return False
417
- elif not self.has_month:
418
- return 1 <= value <= 31
419
- elif not self.has_year:
420
- # Be permissive, assume leap year
421
- month = self[self.mstridx]
422
- return 1 <= value <= monthrange(2000, month)[1]
423
- else:
424
- month = self[self.mstridx]
425
- year = self[self.ystridx]
426
- return 1 <= value <= monthrange(year, month)[1]
427
-
428
- def append(self, val, label=None):
429
- if hasattr(val, '__len__'):
430
- if val.isdigit() and len(val) > 2:
431
- self.century_specified = True
432
- if label not in [None, 'Y']: # pragma: no cover
433
- raise ValueError(label)
434
- label = 'Y'
435
- elif val > 100:
436
- self.century_specified = True
437
- if label not in [None, 'Y']: # pragma: no cover
438
- raise ValueError(label)
439
- label = 'Y'
440
-
441
- super(self.__class__, self).append(int(val))
442
-
443
- if label == 'M':
444
- if self.has_month:
445
- raise ValueError('Month is already set')
446
- self.mstridx = len(self) - 1
447
- elif label == 'D':
448
- if self.has_day:
449
- raise ValueError('Day is already set')
450
- self.dstridx = len(self) - 1
451
- elif label == 'Y':
452
- if self.has_year:
453
- raise ValueError('Year is already set')
454
- self.ystridx = len(self) - 1
455
-
456
- def _resolve_from_stridxs(self, strids):
457
- """
458
- Try to resolve the identities of year/month/day elements using
459
- ystridx, mstridx, and dstridx, if enough of these are specified.
460
- """
461
- if len(self) == 3 and len(strids) == 2:
462
- # we can back out the remaining stridx value
463
- missing = [x for x in range(3) if x not in strids.values()]
464
- key = [x for x in ['y', 'm', 'd'] if x not in strids]
465
- assert len(missing) == len(key) == 1
466
- key = key[0]
467
- val = missing[0]
468
- strids[key] = val
469
-
470
- assert len(self) == len(strids) # otherwise this should not be called
471
- out = {key: self[strids[key]] for key in strids}
472
- return (out.get('y'), out.get('m'), out.get('d'))
473
-
474
- def resolve_ymd(self, yearfirst, dayfirst):
475
- len_ymd = len(self)
476
- year, month, day = (None, None, None)
477
-
478
- strids = (('y', self.ystridx),
479
- ('m', self.mstridx),
480
- ('d', self.dstridx))
481
-
482
- strids = {key: val for key, val in strids if val is not None}
483
- if (len(self) == len(strids) > 0 or
484
- (len(self) == 3 and len(strids) == 2)):
485
- return self._resolve_from_stridxs(strids)
486
-
487
- mstridx = self.mstridx
488
-
489
- if len_ymd > 3:
490
- raise ValueError("More than three YMD values")
491
- elif len_ymd == 1 or (mstridx is not None and len_ymd == 2):
492
- # One member, or two members with a month string
493
- if mstridx is not None:
494
- month = self[mstridx]
495
- # since mstridx is 0 or 1, self[mstridx-1] always
496
- # looks up the other element
497
- other = self[mstridx - 1]
498
- else:
499
- other = self[0]
500
-
501
- if len_ymd > 1 or mstridx is None:
502
- if other > 31:
503
- year = other
504
- else:
505
- day = other
506
-
507
- elif len_ymd == 2:
508
- # Two members with numbers
509
- if self[0] > 31:
510
- # 99-01
511
- year, month = self
512
- elif self[1] > 31:
513
- # 01-99
514
- month, year = self
515
- elif dayfirst and self[1] <= 12:
516
- # 13-01
517
- day, month = self
518
- else:
519
- # 01-13
520
- month, day = self
521
-
522
- elif len_ymd == 3:
523
- # Three members
524
- if mstridx == 0:
525
- if self[1] > 31:
526
- # Apr-2003-25
527
- month, year, day = self
528
- else:
529
- month, day, year = self
530
- elif mstridx == 1:
531
- if self[0] > 31 or (yearfirst and self[2] <= 31):
532
- # 99-Jan-01
533
- year, month, day = self
534
- else:
535
- # 01-Jan-01
536
- # Give precedence to day-first, since
537
- # two-digit years is usually hand-written.
538
- day, month, year = self
539
-
540
- elif mstridx == 2:
541
- # WTF!?
542
- if self[1] > 31:
543
- # 01-99-Jan
544
- day, year, month = self
545
- else:
546
- # 99-01-Jan
547
- year, day, month = self
548
-
549
- else:
550
- if (self[0] > 31 or
551
- self.ystridx == 0 or
552
- (yearfirst and self[1] <= 12 and self[2] <= 31)):
553
- # 99-01-01
554
- if dayfirst and self[2] <= 12:
555
- year, day, month = self
556
- else:
557
- year, month, day = self
558
- elif self[0] > 12 or (dayfirst and self[1] <= 12):
559
- # 13-01-01
560
- day, month, year = self
561
- else:
562
- # 01-13-01
563
- month, day, year = self
564
-
565
- return year, month, day
566
-
567
-
568
- class parser(object):
569
- def __init__(self, info=None):
570
- self.info = info or parserinfo()
571
-
572
- def parse(self, timestr, default=None,
573
- ignoretz=False, tzinfos=None, **kwargs):
574
- """
575
- Parse the date/time string into a :class:`datetime.datetime` object.
576
-
577
- :param timestr:
578
- Any date/time string using the supported formats.
579
-
580
- :param default:
581
- The default datetime object, if this is a datetime object and not
582
- ``None``, elements specified in ``timestr`` replace elements in the
583
- default object.
584
-
585
- :param ignoretz:
586
- If set ``True``, time zones in parsed strings are ignored and a
587
- naive :class:`datetime.datetime` object is returned.
588
-
589
- :param tzinfos:
590
- Additional time zone names / aliases which may be present in the
591
- string. This argument maps time zone names (and optionally offsets
592
- from those time zones) to time zones. This parameter can be a
593
- dictionary with timezone aliases mapping time zone names to time
594
- zones or a function taking two parameters (``tzname`` and
595
- ``tzoffset``) and returning a time zone.
596
-
597
- The timezones to which the names are mapped can be an integer
598
- offset from UTC in seconds or a :class:`tzinfo` object.
599
-
600
- .. doctest::
601
- :options: +NORMALIZE_WHITESPACE
602
-
603
- >>> from dateutil.parser import parse
604
- >>> from dateutil.tz import gettz
605
- >>> tzinfos = {"BRST": -7200, "CST": gettz("America/Chicago")}
606
- >>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos)
607
- datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -7200))
608
- >>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos)
609
- datetime.datetime(2012, 1, 19, 17, 21,
610
- tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago'))
611
-
612
- This parameter is ignored if ``ignoretz`` is set.
613
-
614
- :param \\*\\*kwargs:
615
- Keyword arguments as passed to ``_parse()``.
616
-
617
- :return:
618
- Returns a :class:`datetime.datetime` object or, if the
619
- ``fuzzy_with_tokens`` option is ``True``, returns a tuple, the
620
- first element being a :class:`datetime.datetime` object, the second
621
- a tuple containing the fuzzy tokens.
622
-
623
- :raises ParserError:
624
- Raised for invalid or unknown string format, if the provided
625
- :class:`tzinfo` is not in a valid format, or if an invalid date
626
- would be created.
627
-
628
- :raises TypeError:
629
- Raised for non-string or character stream input.
630
-
631
- :raises OverflowError:
632
- Raised if the parsed date exceeds the largest valid C integer on
633
- your system.
634
- """
635
-
636
- if default is None:
637
- default = datetime.datetime.now().replace(hour=0, minute=0,
638
- second=0, microsecond=0)
639
-
640
- res, skipped_tokens = self._parse(timestr, **kwargs)
641
-
642
- if res is None:
643
- raise ParserError("Unknown string format: %s", timestr)
644
-
645
- if len(res) == 0:
646
- raise ParserError("String does not contain a date: %s", timestr)
647
-
648
- try:
649
- ret = self._build_naive(res, default)
650
- except ValueError as e:
651
- six.raise_from(ParserError(str(e) + ": %s", timestr), e)
652
-
653
- if not ignoretz:
654
- ret = self._build_tzaware(ret, res, tzinfos)
655
-
656
- if kwargs.get('fuzzy_with_tokens', False):
657
- return ret, skipped_tokens
658
- else:
659
- return ret
660
-
661
- class _result(_resultbase):
662
- __slots__ = ["year", "month", "day", "weekday",
663
- "hour", "minute", "second", "microsecond",
664
- "tzname", "tzoffset", "ampm","any_unused_tokens"]
665
-
666
- def _parse(self, timestr, dayfirst=None, yearfirst=None, fuzzy=False,
667
- fuzzy_with_tokens=False):
668
- """
669
- Private method which performs the heavy lifting of parsing, called from
670
- ``parse()``, which passes on its ``kwargs`` to this function.
671
-
672
- :param timestr:
673
- The string to parse.
674
-
675
- :param dayfirst:
676
- Whether to interpret the first value in an ambiguous 3-integer date
677
- (e.g. 01/05/09) as the day (``True``) or month (``False``). If
678
- ``yearfirst`` is set to ``True``, this distinguishes between YDM
679
- and YMD. If set to ``None``, this value is retrieved from the
680
- current :class:`parserinfo` object (which itself defaults to
681
- ``False``).
682
-
683
- :param yearfirst:
684
- Whether to interpret the first value in an ambiguous 3-integer date
685
- (e.g. 01/05/09) as the year. If ``True``, the first number is taken
686
- to be the year, otherwise the last number is taken to be the year.
687
- If this is set to ``None``, the value is retrieved from the current
688
- :class:`parserinfo` object (which itself defaults to ``False``).
689
-
690
- :param fuzzy:
691
- Whether to allow fuzzy parsing, allowing for string like "Today is
692
- January 1, 2047 at 8:21:00AM".
693
-
694
- :param fuzzy_with_tokens:
695
- If ``True``, ``fuzzy`` is automatically set to True, and the parser
696
- will return a tuple where the first element is the parsed
697
- :class:`datetime.datetime` datetimestamp and the second element is
698
- a tuple containing the portions of the string which were ignored:
699
-
700
- .. doctest::
701
-
702
- >>> from dateutil.parser import parse
703
- >>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True)
704
- (datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at '))
705
-
706
- """
707
- if fuzzy_with_tokens:
708
- fuzzy = True
709
-
710
- info = self.info
711
-
712
- if dayfirst is None:
713
- dayfirst = info.dayfirst
714
-
715
- if yearfirst is None:
716
- yearfirst = info.yearfirst
717
-
718
- res = self._result()
719
- l = _timelex.split(timestr) # Splits the timestr into tokens
720
-
721
- skipped_idxs = []
722
-
723
- # year/month/day list
724
- ymd = _ymd()
725
-
726
- len_l = len(l)
727
- i = 0
728
- try:
729
- while i < len_l:
730
-
731
- # Check if it's a number
732
- value_repr = l[i]
733
- try:
734
- value = float(value_repr)
735
- except ValueError:
736
- value = None
737
-
738
- if value is not None:
739
- # Numeric token
740
- i = self._parse_numeric_token(l, i, info, ymd, res, fuzzy)
741
-
742
- # Check weekday
743
- elif info.weekday(l[i]) is not None:
744
- value = info.weekday(l[i])
745
- res.weekday = value
746
-
747
- # Check month name
748
- elif info.month(l[i]) is not None:
749
- value = info.month(l[i])
750
- ymd.append(value, 'M')
751
-
752
- if i + 1 < len_l:
753
- if l[i + 1] in ('-', '/'):
754
- # Jan-01[-99]
755
- sep = l[i + 1]
756
- ymd.append(l[i + 2])
757
-
758
- if i + 3 < len_l and l[i + 3] == sep:
759
- # Jan-01-99
760
- ymd.append(l[i + 4])
761
- i += 2
762
-
763
- i += 2
764
-
765
- elif (i + 4 < len_l and l[i + 1] == l[i + 3] == ' ' and
766
- info.pertain(l[i + 2])):
767
- # Jan of 01
768
- # In this case, 01 is clearly year
769
- if l[i + 4].isdigit():
770
- # Convert it here to become unambiguous
771
- value = int(l[i + 4])
772
- year = str(info.convertyear(value))
773
- ymd.append(year, 'Y')
774
- else:
775
- # Wrong guess
776
- pass
777
- # TODO: not hit in tests
778
- i += 4
779
-
780
- # Check am/pm
781
- elif info.ampm(l[i]) is not None:
782
- value = info.ampm(l[i])
783
- val_is_ampm = self._ampm_valid(res.hour, res.ampm, fuzzy)
784
-
785
- if val_is_ampm:
786
- res.hour = self._adjust_ampm(res.hour, value)
787
- res.ampm = value
788
-
789
- elif fuzzy:
790
- skipped_idxs.append(i)
791
-
792
- # Check for a timezone name
793
- elif self._could_be_tzname(res.hour, res.tzname, res.tzoffset, l[i]):
794
- res.tzname = l[i]
795
- res.tzoffset = info.tzoffset(res.tzname)
796
-
797
- # Check for something like GMT+3, or BRST+3. Notice
798
- # that it doesn't mean "I am 3 hours after GMT", but
799
- # "my time +3 is GMT". If found, we reverse the
800
- # logic so that timezone parsing code will get it
801
- # right.
802
- if i + 1 < len_l and l[i + 1] in ('+', '-'):
803
- l[i + 1] = ('+', '-')[l[i + 1] == '+']
804
- res.tzoffset = None
805
- if info.utczone(res.tzname):
806
- # With something like GMT+3, the timezone
807
- # is *not* GMT.
808
- res.tzname = None
809
-
810
- # Check for a numbered timezone
811
- elif res.hour is not None and l[i] in ('+', '-'):
812
- signal = (-1, 1)[l[i] == '+']
813
- len_li = len(l[i + 1])
814
-
815
- # TODO: check that l[i + 1] is integer?
816
- if len_li == 4:
817
- # -0300
818
- hour_offset = int(l[i + 1][:2])
819
- min_offset = int(l[i + 1][2:])
820
- elif i + 2 < len_l and l[i + 2] == ':':
821
- # -03:00
822
- hour_offset = int(l[i + 1])
823
- min_offset = int(l[i + 3]) # TODO: Check that l[i+3] is minute-like?
824
- i += 2
825
- elif len_li <= 2:
826
- # -[0]3
827
- hour_offset = int(l[i + 1][:2])
828
- min_offset = 0
829
- else:
830
- raise ValueError(timestr)
831
-
832
- res.tzoffset = signal * (hour_offset * 3600 + min_offset * 60)
833
-
834
- # Look for a timezone name between parenthesis
835
- if (i + 5 < len_l and
836
- info.jump(l[i + 2]) and l[i + 3] == '(' and
837
- l[i + 5] == ')' and
838
- 3 <= len(l[i + 4]) and
839
- self._could_be_tzname(res.hour, res.tzname,
840
- None, l[i + 4])):
841
- # -0300 (BRST)
842
- res.tzname = l[i + 4]
843
- i += 4
844
-
845
- i += 1
846
-
847
- # Check jumps
848
- elif not (info.jump(l[i]) or fuzzy):
849
- raise ValueError(timestr)
850
-
851
- else:
852
- skipped_idxs.append(i)
853
- i += 1
854
-
855
- # Process year/month/day
856
- year, month, day = ymd.resolve_ymd(yearfirst, dayfirst)
857
-
858
- res.century_specified = ymd.century_specified
859
- res.year = year
860
- res.month = month
861
- res.day = day
862
-
863
- except (IndexError, ValueError):
864
- return None, None
865
-
866
- if not info.validate(res):
867
- return None, None
868
-
869
- if fuzzy_with_tokens:
870
- skipped_tokens = self._recombine_skipped(l, skipped_idxs)
871
- return res, tuple(skipped_tokens)
872
- else:
873
- return res, None
874
-
875
- def _parse_numeric_token(self, tokens, idx, info, ymd, res, fuzzy):
876
- # Token is a number
877
- value_repr = tokens[idx]
878
- try:
879
- value = self._to_decimal(value_repr)
880
- except Exception as e:
881
- six.raise_from(ValueError('Unknown numeric token'), e)
882
-
883
- len_li = len(value_repr)
884
-
885
- len_l = len(tokens)
886
-
887
- if (len(ymd) == 3 and len_li in (2, 4) and
888
- res.hour is None and
889
- (idx + 1 >= len_l or
890
- (tokens[idx + 1] != ':' and
891
- info.hms(tokens[idx + 1]) is None))):
892
- # 19990101T23[59]
893
- s = tokens[idx]
894
- res.hour = int(s[:2])
895
-
896
- if len_li == 4:
897
- res.minute = int(s[2:])
898
-
899
- elif len_li == 6 or (len_li > 6 and tokens[idx].find('.') == 6):
900
- # YYMMDD or HHMMSS[.ss]
901
- s = tokens[idx]
902
-
903
- if not ymd and '.' not in tokens[idx]:
904
- ymd.append(s[:2])
905
- ymd.append(s[2:4])
906
- ymd.append(s[4:])
907
- else:
908
- # 19990101T235959[.59]
909
-
910
- # TODO: Check if res attributes already set.
911
- res.hour = int(s[:2])
912
- res.minute = int(s[2:4])
913
- res.second, res.microsecond = self._parsems(s[4:])
914
-
915
- elif len_li in (8, 12, 14):
916
- # YYYYMMDD
917
- s = tokens[idx]
918
- ymd.append(s[:4], 'Y')
919
- ymd.append(s[4:6])
920
- ymd.append(s[6:8])
921
-
922
- if len_li > 8:
923
- res.hour = int(s[8:10])
924
- res.minute = int(s[10:12])
925
-
926
- if len_li > 12:
927
- res.second = int(s[12:])
928
-
929
- elif self._find_hms_idx(idx, tokens, info, allow_jump=True) is not None:
930
- # HH[ ]h or MM[ ]m or SS[.ss][ ]s
931
- hms_idx = self._find_hms_idx(idx, tokens, info, allow_jump=True)
932
- (idx, hms) = self._parse_hms(idx, tokens, info, hms_idx)
933
- if hms is not None:
934
- # TODO: checking that hour/minute/second are not
935
- # already set?
936
- self._assign_hms(res, value_repr, hms)
937
-
938
- elif idx + 2 < len_l and tokens[idx + 1] == ':':
939
- # HH:MM[:SS[.ss]]
940
- res.hour = int(value)
941
- value = self._to_decimal(tokens[idx + 2]) # TODO: try/except for this?
942
- (res.minute, res.second) = self._parse_min_sec(value)
943
-
944
- if idx + 4 < len_l and tokens[idx + 3] == ':':
945
- res.second, res.microsecond = self._parsems(tokens[idx + 4])
946
-
947
- idx += 2
948
-
949
- idx += 2
950
-
951
- elif idx + 1 < len_l and tokens[idx + 1] in ('-', '/', '.'):
952
- sep = tokens[idx + 1]
953
- ymd.append(value_repr)
954
-
955
- if idx + 2 < len_l and not info.jump(tokens[idx + 2]):
956
- if tokens[idx + 2].isdigit():
957
- # 01-01[-01]
958
- ymd.append(tokens[idx + 2])
959
- else:
960
- # 01-Jan[-01]
961
- value = info.month(tokens[idx + 2])
962
-
963
- if value is not None:
964
- ymd.append(value, 'M')
965
- else:
966
- raise ValueError()
967
-
968
- if idx + 3 < len_l and tokens[idx + 3] == sep:
969
- # We have three members
970
- value = info.month(tokens[idx + 4])
971
-
972
- if value is not None:
973
- ymd.append(value, 'M')
974
- else:
975
- ymd.append(tokens[idx + 4])
976
- idx += 2
977
-
978
- idx += 1
979
- idx += 1
980
-
981
- elif idx + 1 >= len_l or info.jump(tokens[idx + 1]):
982
- if idx + 2 < len_l and info.ampm(tokens[idx + 2]) is not None:
983
- # 12 am
984
- hour = int(value)
985
- res.hour = self._adjust_ampm(hour, info.ampm(tokens[idx + 2]))
986
- idx += 1
987
- else:
988
- # Year, month or day
989
- ymd.append(value)
990
- idx += 1
991
-
992
- elif info.ampm(tokens[idx + 1]) is not None and (0 <= value < 24):
993
- # 12am
994
- hour = int(value)
995
- res.hour = self._adjust_ampm(hour, info.ampm(tokens[idx + 1]))
996
- idx += 1
997
-
998
- elif ymd.could_be_day(value):
999
- ymd.append(value)
1000
-
1001
- elif not fuzzy:
1002
- raise ValueError()
1003
-
1004
- return idx
1005
-
1006
- def _find_hms_idx(self, idx, tokens, info, allow_jump):
1007
- len_l = len(tokens)
1008
-
1009
- if idx+1 < len_l and info.hms(tokens[idx+1]) is not None:
1010
- # There is an "h", "m", or "s" label following this token. We take
1011
- # assign the upcoming label to the current token.
1012
- # e.g. the "12" in 12h"
1013
- hms_idx = idx + 1
1014
-
1015
- elif (allow_jump and idx+2 < len_l and tokens[idx+1] == ' ' and
1016
- info.hms(tokens[idx+2]) is not None):
1017
- # There is a space and then an "h", "m", or "s" label.
1018
- # e.g. the "12" in "12 h"
1019
- hms_idx = idx + 2
1020
-
1021
- elif idx > 0 and info.hms(tokens[idx-1]) is not None:
1022
- # There is a "h", "m", or "s" preceding this token. Since neither
1023
- # of the previous cases was hit, there is no label following this
1024
- # token, so we use the previous label.
1025
- # e.g. the "04" in "12h04"
1026
- hms_idx = idx-1
1027
-
1028
- elif (1 < idx == len_l-1 and tokens[idx-1] == ' ' and
1029
- info.hms(tokens[idx-2]) is not None):
1030
- # If we are looking at the final token, we allow for a
1031
- # backward-looking check to skip over a space.
1032
- # TODO: Are we sure this is the right condition here?
1033
- hms_idx = idx - 2
1034
-
1035
- else:
1036
- hms_idx = None
1037
-
1038
- return hms_idx
1039
-
1040
- def _assign_hms(self, res, value_repr, hms):
1041
- # See GH issue #427, fixing float rounding
1042
- value = self._to_decimal(value_repr)
1043
-
1044
- if hms == 0:
1045
- # Hour
1046
- res.hour = int(value)
1047
- if value % 1:
1048
- res.minute = int(60*(value % 1))
1049
-
1050
- elif hms == 1:
1051
- (res.minute, res.second) = self._parse_min_sec(value)
1052
-
1053
- elif hms == 2:
1054
- (res.second, res.microsecond) = self._parsems(value_repr)
1055
-
1056
- def _could_be_tzname(self, hour, tzname, tzoffset, token):
1057
- return (hour is not None and
1058
- tzname is None and
1059
- tzoffset is None and
1060
- len(token) <= 5 and
1061
- (all(x in string.ascii_uppercase for x in token)
1062
- or token in self.info.UTCZONE))
1063
-
1064
- def _ampm_valid(self, hour, ampm, fuzzy):
1065
- """
1066
- For fuzzy parsing, 'a' or 'am' (both valid English words)
1067
- may erroneously trigger the AM/PM flag. Deal with that
1068
- here.
1069
- """
1070
- val_is_ampm = True
1071
-
1072
- # If there's already an AM/PM flag, this one isn't one.
1073
- if fuzzy and ampm is not None:
1074
- val_is_ampm = False
1075
-
1076
- # If AM/PM is found and hour is not, raise a ValueError
1077
- if hour is None:
1078
- if fuzzy:
1079
- val_is_ampm = False
1080
- else:
1081
- raise ValueError('No hour specified with AM or PM flag.')
1082
- elif not 0 <= hour <= 12:
1083
- # If AM/PM is found, it's a 12 hour clock, so raise
1084
- # an error for invalid range
1085
- if fuzzy:
1086
- val_is_ampm = False
1087
- else:
1088
- raise ValueError('Invalid hour specified for 12-hour clock.')
1089
-
1090
- return val_is_ampm
1091
-
1092
- def _adjust_ampm(self, hour, ampm):
1093
- if hour < 12 and ampm == 1:
1094
- hour += 12
1095
- elif hour == 12 and ampm == 0:
1096
- hour = 0
1097
- return hour
1098
-
1099
- def _parse_min_sec(self, value):
1100
- # TODO: Every usage of this function sets res.second to the return
1101
- # value. Are there any cases where second will be returned as None and
1102
- # we *don't* want to set res.second = None?
1103
- minute = int(value)
1104
- second = None
1105
-
1106
- sec_remainder = value % 1
1107
- if sec_remainder:
1108
- second = int(60 * sec_remainder)
1109
- return (minute, second)
1110
-
1111
- def _parse_hms(self, idx, tokens, info, hms_idx):
1112
- # TODO: Is this going to admit a lot of false-positives for when we
1113
- # just happen to have digits and "h", "m" or "s" characters in non-date
1114
- # text? I guess hex hashes won't have that problem, but there's plenty
1115
- # of random junk out there.
1116
- if hms_idx is None:
1117
- hms = None
1118
- new_idx = idx
1119
- elif hms_idx > idx:
1120
- hms = info.hms(tokens[hms_idx])
1121
- new_idx = hms_idx
1122
- else:
1123
- # Looking backwards, increment one.
1124
- hms = info.hms(tokens[hms_idx]) + 1
1125
- new_idx = idx
1126
-
1127
- return (new_idx, hms)
1128
-
1129
- # ------------------------------------------------------------------
1130
- # Handling for individual tokens. These are kept as methods instead
1131
- # of functions for the sake of customizability via subclassing.
1132
-
1133
- def _parsems(self, value):
1134
- """Parse a I[.F] seconds value into (seconds, microseconds)."""
1135
- if "." not in value:
1136
- return int(value), 0
1137
- else:
1138
- i, f = value.split(".")
1139
- return int(i), int(f.ljust(6, "0")[:6])
1140
-
1141
- def _to_decimal(self, val):
1142
- try:
1143
- decimal_value = Decimal(val)
1144
- # See GH 662, edge case, infinite value should not be converted
1145
- # via `_to_decimal`
1146
- if not decimal_value.is_finite():
1147
- raise ValueError("Converted decimal value is infinite or NaN")
1148
- except Exception as e:
1149
- msg = "Could not convert %s to decimal" % val
1150
- six.raise_from(ValueError(msg), e)
1151
- else:
1152
- return decimal_value
1153
-
1154
- # ------------------------------------------------------------------
1155
- # Post-Parsing construction of datetime output. These are kept as
1156
- # methods instead of functions for the sake of customizability via
1157
- # subclassing.
1158
-
1159
- def _build_tzinfo(self, tzinfos, tzname, tzoffset):
1160
- if callable(tzinfos):
1161
- tzdata = tzinfos(tzname, tzoffset)
1162
- else:
1163
- tzdata = tzinfos.get(tzname)
1164
- # handle case where tzinfo is paased an options that returns None
1165
- # eg tzinfos = {'BRST' : None}
1166
- if isinstance(tzdata, datetime.tzinfo) or tzdata is None:
1167
- tzinfo = tzdata
1168
- elif isinstance(tzdata, text_type):
1169
- tzinfo = tz.tzstr(tzdata)
1170
- elif isinstance(tzdata, integer_types):
1171
- tzinfo = tz.tzoffset(tzname, tzdata)
1172
- else:
1173
- raise TypeError("Offset must be tzinfo subclass, tz string, "
1174
- "or int offset.")
1175
- return tzinfo
1176
-
1177
- def _build_tzaware(self, naive, res, tzinfos):
1178
- if (callable(tzinfos) or (tzinfos and res.tzname in tzinfos)):
1179
- tzinfo = self._build_tzinfo(tzinfos, res.tzname, res.tzoffset)
1180
- aware = naive.replace(tzinfo=tzinfo)
1181
- aware = self._assign_tzname(aware, res.tzname)
1182
-
1183
- elif res.tzname and res.tzname in time.tzname:
1184
- aware = naive.replace(tzinfo=tz.tzlocal())
1185
-
1186
- # Handle ambiguous local datetime
1187
- aware = self._assign_tzname(aware, res.tzname)
1188
-
1189
- # This is mostly relevant for winter GMT zones parsed in the UK
1190
- if (aware.tzname() != res.tzname and
1191
- res.tzname in self.info.UTCZONE):
1192
- aware = aware.replace(tzinfo=tz.UTC)
1193
-
1194
- elif res.tzoffset == 0:
1195
- aware = naive.replace(tzinfo=tz.UTC)
1196
-
1197
- elif res.tzoffset:
1198
- aware = naive.replace(tzinfo=tz.tzoffset(res.tzname, res.tzoffset))
1199
-
1200
- elif not res.tzname and not res.tzoffset:
1201
- # i.e. no timezone information was found.
1202
- aware = naive
1203
-
1204
- elif res.tzname:
1205
- # tz-like string was parsed but we don't know what to do
1206
- # with it
1207
- warnings.warn("tzname {tzname} identified but not understood. "
1208
- "Pass `tzinfos` argument in order to correctly "
1209
- "return a timezone-aware datetime. In a future "
1210
- "version, this will raise an "
1211
- "exception.".format(tzname=res.tzname),
1212
- category=UnknownTimezoneWarning)
1213
- aware = naive
1214
-
1215
- return aware
1216
-
1217
- def _build_naive(self, res, default):
1218
- repl = {}
1219
- for attr in ("year", "month", "day", "hour",
1220
- "minute", "second", "microsecond"):
1221
- value = getattr(res, attr)
1222
- if value is not None:
1223
- repl[attr] = value
1224
-
1225
- if 'day' not in repl:
1226
- # If the default day exceeds the last day of the month, fall back
1227
- # to the end of the month.
1228
- cyear = default.year if res.year is None else res.year
1229
- cmonth = default.month if res.month is None else res.month
1230
- cday = default.day if res.day is None else res.day
1231
-
1232
- if cday > monthrange(cyear, cmonth)[1]:
1233
- repl['day'] = monthrange(cyear, cmonth)[1]
1234
-
1235
- naive = default.replace(**repl)
1236
-
1237
- if res.weekday is not None and not res.day:
1238
- naive = naive + relativedelta.relativedelta(weekday=res.weekday)
1239
-
1240
- return naive
1241
-
1242
- def _assign_tzname(self, dt, tzname):
1243
- if dt.tzname() != tzname:
1244
- new_dt = tz.enfold(dt, fold=1)
1245
- if new_dt.tzname() == tzname:
1246
- return new_dt
1247
-
1248
- return dt
1249
-
1250
- def _recombine_skipped(self, tokens, skipped_idxs):
1251
- """
1252
- >>> tokens = ["foo", " ", "bar", " ", "19June2000", "baz"]
1253
- >>> skipped_idxs = [0, 1, 2, 5]
1254
- >>> _recombine_skipped(tokens, skipped_idxs)
1255
- ["foo bar", "baz"]
1256
- """
1257
- skipped_tokens = []
1258
- for i, idx in enumerate(sorted(skipped_idxs)):
1259
- if i > 0 and idx - 1 == skipped_idxs[i - 1]:
1260
- skipped_tokens[-1] = skipped_tokens[-1] + tokens[idx]
1261
- else:
1262
- skipped_tokens.append(tokens[idx])
1263
-
1264
- return skipped_tokens
1265
-
1266
-
1267
- DEFAULTPARSER = parser()
1268
-
1269
-
1270
- def parse(timestr, parserinfo=None, **kwargs):
1271
- """
1272
-
1273
- Parse a string in one of the supported formats, using the
1274
- ``parserinfo`` parameters.
1275
-
1276
- :param timestr:
1277
- A string containing a date/time stamp.
1278
-
1279
- :param parserinfo:
1280
- A :class:`parserinfo` object containing parameters for the parser.
1281
- If ``None``, the default arguments to the :class:`parserinfo`
1282
- constructor are used.
1283
-
1284
- The ``**kwargs`` parameter takes the following keyword arguments:
1285
-
1286
- :param default:
1287
- The default datetime object, if this is a datetime object and not
1288
- ``None``, elements specified in ``timestr`` replace elements in the
1289
- default object.
1290
-
1291
- :param ignoretz:
1292
- If set ``True``, time zones in parsed strings are ignored and a naive
1293
- :class:`datetime` object is returned.
1294
-
1295
- :param tzinfos:
1296
- Additional time zone names / aliases which may be present in the
1297
- string. This argument maps time zone names (and optionally offsets
1298
- from those time zones) to time zones. This parameter can be a
1299
- dictionary with timezone aliases mapping time zone names to time
1300
- zones or a function taking two parameters (``tzname`` and
1301
- ``tzoffset``) and returning a time zone.
1302
-
1303
- The timezones to which the names are mapped can be an integer
1304
- offset from UTC in seconds or a :class:`tzinfo` object.
1305
-
1306
- .. doctest::
1307
- :options: +NORMALIZE_WHITESPACE
1308
-
1309
- >>> from dateutil.parser import parse
1310
- >>> from dateutil.tz import gettz
1311
- >>> tzinfos = {"BRST": -7200, "CST": gettz("America/Chicago")}
1312
- >>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos)
1313
- datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -7200))
1314
- >>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos)
1315
- datetime.datetime(2012, 1, 19, 17, 21,
1316
- tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago'))
1317
-
1318
- This parameter is ignored if ``ignoretz`` is set.
1319
-
1320
- :param dayfirst:
1321
- Whether to interpret the first value in an ambiguous 3-integer date
1322
- (e.g. 01/05/09) as the day (``True``) or month (``False``). If
1323
- ``yearfirst`` is set to ``True``, this distinguishes between YDM and
1324
- YMD. If set to ``None``, this value is retrieved from the current
1325
- :class:`parserinfo` object (which itself defaults to ``False``).
1326
-
1327
- :param yearfirst:
1328
- Whether to interpret the first value in an ambiguous 3-integer date
1329
- (e.g. 01/05/09) as the year. If ``True``, the first number is taken to
1330
- be the year, otherwise the last number is taken to be the year. If
1331
- this is set to ``None``, the value is retrieved from the current
1332
- :class:`parserinfo` object (which itself defaults to ``False``).
1333
-
1334
- :param fuzzy:
1335
- Whether to allow fuzzy parsing, allowing for string like "Today is
1336
- January 1, 2047 at 8:21:00AM".
1337
-
1338
- :param fuzzy_with_tokens:
1339
- If ``True``, ``fuzzy`` is automatically set to True, and the parser
1340
- will return a tuple where the first element is the parsed
1341
- :class:`datetime.datetime` datetimestamp and the second element is
1342
- a tuple containing the portions of the string which were ignored:
1343
-
1344
- .. doctest::
1345
-
1346
- >>> from dateutil.parser import parse
1347
- >>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True)
1348
- (datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at '))
1349
-
1350
- :return:
1351
- Returns a :class:`datetime.datetime` object or, if the
1352
- ``fuzzy_with_tokens`` option is ``True``, returns a tuple, the
1353
- first element being a :class:`datetime.datetime` object, the second
1354
- a tuple containing the fuzzy tokens.
1355
-
1356
- :raises ParserError:
1357
- Raised for invalid or unknown string formats, if the provided
1358
- :class:`tzinfo` is not in a valid format, or if an invalid date would
1359
- be created.
1360
-
1361
- :raises OverflowError:
1362
- Raised if the parsed date exceeds the largest valid C integer on
1363
- your system.
1364
- """
1365
- if parserinfo:
1366
- return parser(parserinfo).parse(timestr, **kwargs)
1367
- else:
1368
- return DEFAULTPARSER.parse(timestr, **kwargs)
1369
-
1370
-
1371
- class _tzparser(object):
1372
-
1373
- class _result(_resultbase):
1374
-
1375
- __slots__ = ["stdabbr", "stdoffset", "dstabbr", "dstoffset",
1376
- "start", "end"]
1377
-
1378
- class _attr(_resultbase):
1379
- __slots__ = ["month", "week", "weekday",
1380
- "yday", "jyday", "day", "time"]
1381
-
1382
- def __repr__(self):
1383
- return self._repr("")
1384
-
1385
- def __init__(self):
1386
- _resultbase.__init__(self)
1387
- self.start = self._attr()
1388
- self.end = self._attr()
1389
-
1390
- def parse(self, tzstr):
1391
- res = self._result()
1392
- l = [x for x in re.split(r'([,:.]|[a-zA-Z]+|[0-9]+)',tzstr) if x]
1393
- used_idxs = list()
1394
- try:
1395
-
1396
- len_l = len(l)
1397
-
1398
- i = 0
1399
- while i < len_l:
1400
- # BRST+3[BRDT[+2]]
1401
- j = i
1402
- while j < len_l and not [x for x in l[j]
1403
- if x in "0123456789:,-+"]:
1404
- j += 1
1405
- if j != i:
1406
- if not res.stdabbr:
1407
- offattr = "stdoffset"
1408
- res.stdabbr = "".join(l[i:j])
1409
- else:
1410
- offattr = "dstoffset"
1411
- res.dstabbr = "".join(l[i:j])
1412
-
1413
- for ii in range(j):
1414
- used_idxs.append(ii)
1415
- i = j
1416
- if (i < len_l and (l[i] in ('+', '-') or l[i][0] in
1417
- "0123456789")):
1418
- if l[i] in ('+', '-'):
1419
- # Yes, that's right. See the TZ variable
1420
- # documentation.
1421
- signal = (1, -1)[l[i] == '+']
1422
- used_idxs.append(i)
1423
- i += 1
1424
- else:
1425
- signal = -1
1426
- len_li = len(l[i])
1427
- if len_li == 4:
1428
- # -0300
1429
- setattr(res, offattr, (int(l[i][:2]) * 3600 +
1430
- int(l[i][2:]) * 60) * signal)
1431
- elif i + 1 < len_l and l[i + 1] == ':':
1432
- # -03:00
1433
- setattr(res, offattr,
1434
- (int(l[i]) * 3600 +
1435
- int(l[i + 2]) * 60) * signal)
1436
- used_idxs.append(i)
1437
- i += 2
1438
- elif len_li <= 2:
1439
- # -[0]3
1440
- setattr(res, offattr,
1441
- int(l[i][:2]) * 3600 * signal)
1442
- else:
1443
- return None
1444
- used_idxs.append(i)
1445
- i += 1
1446
- if res.dstabbr:
1447
- break
1448
- else:
1449
- break
1450
-
1451
-
1452
- if i < len_l:
1453
- for j in range(i, len_l):
1454
- if l[j] == ';':
1455
- l[j] = ','
1456
-
1457
- assert l[i] == ','
1458
-
1459
- i += 1
1460
-
1461
- if i >= len_l:
1462
- pass
1463
- elif (8 <= l.count(',') <= 9 and
1464
- not [y for x in l[i:] if x != ','
1465
- for y in x if y not in "0123456789+-"]):
1466
- # GMT0BST,3,0,30,3600,10,0,26,7200[,3600]
1467
- for x in (res.start, res.end):
1468
- x.month = int(l[i])
1469
- used_idxs.append(i)
1470
- i += 2
1471
- if l[i] == '-':
1472
- value = int(l[i + 1]) * -1
1473
- used_idxs.append(i)
1474
- i += 1
1475
- else:
1476
- value = int(l[i])
1477
- used_idxs.append(i)
1478
- i += 2
1479
- if value:
1480
- x.week = value
1481
- x.weekday = (int(l[i]) - 1) % 7
1482
- else:
1483
- x.day = int(l[i])
1484
- used_idxs.append(i)
1485
- i += 2
1486
- x.time = int(l[i])
1487
- used_idxs.append(i)
1488
- i += 2
1489
- if i < len_l:
1490
- if l[i] in ('-', '+'):
1491
- signal = (-1, 1)[l[i] == "+"]
1492
- used_idxs.append(i)
1493
- i += 1
1494
- else:
1495
- signal = 1
1496
- used_idxs.append(i)
1497
- res.dstoffset = (res.stdoffset + int(l[i]) * signal)
1498
-
1499
- # This was a made-up format that is not in normal use
1500
- warn(('Parsed time zone "%s"' % tzstr) +
1501
- 'is in a non-standard dateutil-specific format, which ' +
1502
- 'is now deprecated; support for parsing this format ' +
1503
- 'will be removed in future versions. It is recommended ' +
1504
- 'that you switch to a standard format like the GNU ' +
1505
- 'TZ variable format.', tz.DeprecatedTzFormatWarning)
1506
- elif (l.count(',') == 2 and l[i:].count('/') <= 2 and
1507
- not [y for x in l[i:] if x not in (',', '/', 'J', 'M',
1508
- '.', '-', ':')
1509
- for y in x if y not in "0123456789"]):
1510
- for x in (res.start, res.end):
1511
- if l[i] == 'J':
1512
- # non-leap year day (1 based)
1513
- used_idxs.append(i)
1514
- i += 1
1515
- x.jyday = int(l[i])
1516
- elif l[i] == 'M':
1517
- # month[-.]week[-.]weekday
1518
- used_idxs.append(i)
1519
- i += 1
1520
- x.month = int(l[i])
1521
- used_idxs.append(i)
1522
- i += 1
1523
- assert l[i] in ('-', '.')
1524
- used_idxs.append(i)
1525
- i += 1
1526
- x.week = int(l[i])
1527
- if x.week == 5:
1528
- x.week = -1
1529
- used_idxs.append(i)
1530
- i += 1
1531
- assert l[i] in ('-', '.')
1532
- used_idxs.append(i)
1533
- i += 1
1534
- x.weekday = (int(l[i]) - 1) % 7
1535
- else:
1536
- # year day (zero based)
1537
- x.yday = int(l[i]) + 1
1538
-
1539
- used_idxs.append(i)
1540
- i += 1
1541
-
1542
- if i < len_l and l[i] == '/':
1543
- used_idxs.append(i)
1544
- i += 1
1545
- # start time
1546
- len_li = len(l[i])
1547
- if len_li == 4:
1548
- # -0300
1549
- x.time = (int(l[i][:2]) * 3600 +
1550
- int(l[i][2:]) * 60)
1551
- elif i + 1 < len_l and l[i + 1] == ':':
1552
- # -03:00
1553
- x.time = int(l[i]) * 3600 + int(l[i + 2]) * 60
1554
- used_idxs.append(i)
1555
- i += 2
1556
- if i + 1 < len_l and l[i + 1] == ':':
1557
- used_idxs.append(i)
1558
- i += 2
1559
- x.time += int(l[i])
1560
- elif len_li <= 2:
1561
- # -[0]3
1562
- x.time = (int(l[i][:2]) * 3600)
1563
- else:
1564
- return None
1565
- used_idxs.append(i)
1566
- i += 1
1567
-
1568
- assert i == len_l or l[i] == ','
1569
-
1570
- i += 1
1571
-
1572
- assert i >= len_l
1573
-
1574
- except (IndexError, ValueError, AssertionError):
1575
- return None
1576
-
1577
- unused_idxs = set(range(len_l)).difference(used_idxs)
1578
- res.any_unused_tokens = not {l[n] for n in unused_idxs}.issubset({",",":"})
1579
- return res
1580
-
1581
-
1582
- DEFAULTTZPARSER = _tzparser()
1583
-
1584
-
1585
- def _parsetz(tzstr):
1586
- return DEFAULTTZPARSER.parse(tzstr)
1587
-
1588
-
1589
- class ParserError(ValueError):
1590
- """Exception subclass used for any failure to parse a datetime string.
1591
-
1592
- This is a subclass of :py:exc:`ValueError`, and should be raised any time
1593
- earlier versions of ``dateutil`` would have raised ``ValueError``.
1594
-
1595
- .. versionadded:: 2.8.1
1596
- """
1597
- def __str__(self):
1598
- try:
1599
- return self.args[0] % self.args[1:]
1600
- except (TypeError, IndexError):
1601
- return super(ParserError, self).__str__()
1602
-
1603
- def __repr__(self):
1604
- args = ", ".join("'%s'" % arg for arg in self.args)
1605
- return "%s(%s)" % (self.__class__.__name__, args)
1606
-
1607
-
1608
- class UnknownTimezoneWarning(RuntimeWarning):
1609
- """Raised when the parser finds a timezone it cannot parse into a tzinfo.
1610
-
1611
- .. versionadded:: 2.7.0
1612
- """
1613
- # vim:ts=4:sw=4:et
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/components/radio.py DELETED
@@ -1,193 +0,0 @@
1
- """gr.Radio() component."""
2
-
3
- from __future__ import annotations
4
-
5
- from typing import Any, Callable, Literal
6
-
7
- from gradio_client.documentation import document, set_documentation_group
8
- from gradio_client.serializing import StringSerializable
9
-
10
- from gradio.components.base import FormComponent, IOComponent, _Keywords
11
- from gradio.deprecation import warn_deprecation, warn_style_method_deprecation
12
- from gradio.events import Changeable, EventListenerMethod, Inputable, Selectable
13
- from gradio.interpretation import NeighborInterpretable
14
-
15
- set_documentation_group("component")
16
-
17
-
18
- @document()
19
- class Radio(
20
- FormComponent,
21
- Selectable,
22
- Changeable,
23
- Inputable,
24
- IOComponent,
25
- StringSerializable,
26
- NeighborInterpretable,
27
- ):
28
- """
29
- Creates a set of radio buttons of which only one can be selected.
30
- Preprocessing: passes the value of the selected radio button as a {str} or its index as an {int} into the function, depending on `type`.
31
- Postprocessing: expects a {str} corresponding to the value of the radio button to be selected.
32
- Examples-format: a {str} representing the radio option to select.
33
-
34
- Demos: sentence_builder, titanic_survival, blocks_essay
35
- """
36
-
37
- def __init__(
38
- self,
39
- choices: list[str] | None = None,
40
- *,
41
- value: str | Callable | None = None,
42
- type: str = "value",
43
- label: str | None = None,
44
- info: str | None = None,
45
- every: float | None = None,
46
- show_label: bool | None = None,
47
- container: bool = True,
48
- scale: int | None = None,
49
- min_width: int = 160,
50
- interactive: bool | None = None,
51
- visible: bool = True,
52
- elem_id: str | None = None,
53
- elem_classes: list[str] | str | None = None,
54
- **kwargs,
55
- ):
56
- """
57
- Parameters:
58
- choices: list of options to select from.
59
- value: the button selected by default. If None, no button is selected by default. If callable, the function will be called whenever the app loads to set the initial value of the component.
60
- type: Type of value to be returned by component. "value" returns the string of the choice selected, "index" returns the index of the choice selected.
61
- label: component name in interface.
62
- info: additional component description.
63
- every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.
64
- show_label: if True, will display label.
65
- container: If True, will place the component in a container - providing some extra padding around the border.
66
- scale: relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.
67
- min_width: minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.
68
- interactive: if True, choices in this radio group will be selectable; if False, selection will be disabled. If not provided, this is inferred based on whether the component is used as an input or output.
69
- visible: If False, component will be hidden.
70
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
71
- elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.
72
- """
73
- self.choices = choices or []
74
- valid_types = ["value", "index"]
75
- if type not in valid_types:
76
- raise ValueError(
77
- f"Invalid value for parameter `type`: {type}. Please choose from one of: {valid_types}"
78
- )
79
- self.type = type
80
- self.select: EventListenerMethod
81
- """
82
- Event listener for when the user selects Radio option.
83
- Uses event data gradio.SelectData to carry `value` referring to label of selected option, and `index` to refer to index.
84
- See EventData documentation on how to use this event data.
85
- """
86
- IOComponent.__init__(
87
- self,
88
- label=label,
89
- info=info,
90
- every=every,
91
- show_label=show_label,
92
- container=container,
93
- scale=scale,
94
- min_width=min_width,
95
- interactive=interactive,
96
- visible=visible,
97
- elem_id=elem_id,
98
- elem_classes=elem_classes,
99
- value=value,
100
- **kwargs,
101
- )
102
- NeighborInterpretable.__init__(self)
103
-
104
- def get_config(self):
105
- return {
106
- "choices": self.choices,
107
- "value": self.value,
108
- **IOComponent.get_config(self),
109
- }
110
-
111
- def example_inputs(self) -> dict[str, Any]:
112
- return {
113
- "raw": self.choices[0] if self.choices else None,
114
- "serialized": self.choices[0] if self.choices else None,
115
- }
116
-
117
- @staticmethod
118
- def update(
119
- value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE,
120
- choices: list[str] | None = None,
121
- label: str | None = None,
122
- info: str | None = None,
123
- show_label: bool | None = None,
124
- container: bool | None = None,
125
- scale: int | None = None,
126
- min_width: int | None = None,
127
- interactive: bool | None = None,
128
- visible: bool | None = None,
129
- ):
130
- return {
131
- "choices": choices,
132
- "label": label,
133
- "info": info,
134
- "show_label": show_label,
135
- "container": container,
136
- "scale": scale,
137
- "min_width": min_width,
138
- "interactive": interactive,
139
- "visible": visible,
140
- "value": value,
141
- "__type__": "update",
142
- }
143
-
144
- def preprocess(self, x: str | None) -> str | int | None:
145
- """
146
- Parameters:
147
- x: selected choice
148
- Returns:
149
- selected choice as string or index within choice list
150
- """
151
- if self.type == "value":
152
- return x
153
- elif self.type == "index":
154
- if x is None:
155
- return None
156
- else:
157
- return self.choices.index(x)
158
- else:
159
- raise ValueError(
160
- f"Unknown type: {self.type}. Please choose from: 'value', 'index'."
161
- )
162
-
163
- def get_interpretation_neighbors(self, x):
164
- choices = list(self.choices)
165
- choices.remove(x)
166
- return choices, {}
167
-
168
- def get_interpretation_scores(
169
- self, x, neighbors, scores: list[float | None], **kwargs
170
- ) -> list:
171
- """
172
- Returns:
173
- Each value represents the interpretation score corresponding to each choice.
174
- """
175
- scores.insert(self.choices.index(x), None)
176
- return scores
177
-
178
- def style(
179
- self,
180
- *,
181
- item_container: bool | None = None,
182
- container: bool | None = None,
183
- **kwargs,
184
- ):
185
- """
186
- This method is deprecated. Please set these arguments in the constructor instead.
187
- """
188
- warn_style_method_deprecation()
189
- if item_container is not None:
190
- warn_deprecation("The `item_container` parameter is deprecated.")
191
- if container is not None:
192
- self.container = container
193
- return self
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Defalt-404/Bittensor_Explore/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Bittensor Explore
3
- emoji: ⚡
4
- colorFrom: blue
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 3.42.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Detomo/ai-comic-generation/src/app/engine/forbidden.ts DELETED
@@ -1,6 +0,0 @@
1
-
2
- // the NSFW has to contain bad words, but doing so might get the code flagged
3
- // or attract unwanted attention, so we hash them
4
- export const forbidden = [
5
- // TODO implement this
6
- ]
 
 
 
 
 
 
 
spaces/DragGan/DragGan-Inversion/PTI/models/StyleCLIP/models/stylegan2/model.py DELETED
@@ -1,674 +0,0 @@
1
- import math
2
- import random
3
-
4
- import torch
5
- from torch import nn
6
- from torch.nn import functional as F
7
-
8
- from models.StyleCLIP.models.stylegan2.op import FusedLeakyReLU, fused_leaky_relu, upfirdn2d
9
-
10
-
11
- class PixelNorm(nn.Module):
12
- def __init__(self):
13
- super().__init__()
14
-
15
- def forward(self, input):
16
- return input * torch.rsqrt(torch.mean(input ** 2, dim=1, keepdim=True) + 1e-8)
17
-
18
-
19
- def make_kernel(k):
20
- k = torch.tensor(k, dtype=torch.float32)
21
-
22
- if k.ndim == 1:
23
- k = k[None, :] * k[:, None]
24
-
25
- k /= k.sum()
26
-
27
- return k
28
-
29
-
30
- class Upsample(nn.Module):
31
- def __init__(self, kernel, factor=2):
32
- super().__init__()
33
-
34
- self.factor = factor
35
- kernel = make_kernel(kernel) * (factor ** 2)
36
- self.register_buffer('kernel', kernel)
37
-
38
- p = kernel.shape[0] - factor
39
-
40
- pad0 = (p + 1) // 2 + factor - 1
41
- pad1 = p // 2
42
-
43
- self.pad = (pad0, pad1)
44
-
45
- def forward(self, input):
46
- out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad=self.pad)
47
-
48
- return out
49
-
50
-
51
- class Downsample(nn.Module):
52
- def __init__(self, kernel, factor=2):
53
- super().__init__()
54
-
55
- self.factor = factor
56
- kernel = make_kernel(kernel)
57
- self.register_buffer('kernel', kernel)
58
-
59
- p = kernel.shape[0] - factor
60
-
61
- pad0 = (p + 1) // 2
62
- pad1 = p // 2
63
-
64
- self.pad = (pad0, pad1)
65
-
66
- def forward(self, input):
67
- out = upfirdn2d(input, self.kernel, up=1, down=self.factor, pad=self.pad)
68
-
69
- return out
70
-
71
-
72
- class Blur(nn.Module):
73
- def __init__(self, kernel, pad, upsample_factor=1):
74
- super().__init__()
75
-
76
- kernel = make_kernel(kernel)
77
-
78
- if upsample_factor > 1:
79
- kernel = kernel * (upsample_factor ** 2)
80
-
81
- self.register_buffer('kernel', kernel)
82
-
83
- self.pad = pad
84
-
85
- def forward(self, input):
86
- out = upfirdn2d(input, self.kernel, pad=self.pad)
87
-
88
- return out
89
-
90
-
91
- class EqualConv2d(nn.Module):
92
- def __init__(
93
- self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True
94
- ):
95
- super().__init__()
96
-
97
- self.weight = nn.Parameter(
98
- torch.randn(out_channel, in_channel, kernel_size, kernel_size)
99
- )
100
- self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2)
101
-
102
- self.stride = stride
103
- self.padding = padding
104
-
105
- if bias:
106
- self.bias = nn.Parameter(torch.zeros(out_channel))
107
-
108
- else:
109
- self.bias = None
110
-
111
- def forward(self, input):
112
- out = F.conv2d(
113
- input,
114
- self.weight * self.scale,
115
- bias=self.bias,
116
- stride=self.stride,
117
- padding=self.padding,
118
- )
119
-
120
- return out
121
-
122
- def __repr__(self):
123
- return (
124
- f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]},'
125
- f' {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})'
126
- )
127
-
128
-
129
- class EqualLinear(nn.Module):
130
- def __init__(
131
- self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None
132
- ):
133
- super().__init__()
134
-
135
- self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
136
-
137
- if bias:
138
- self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
139
-
140
- else:
141
- self.bias = None
142
-
143
- self.activation = activation
144
-
145
- self.scale = (1 / math.sqrt(in_dim)) * lr_mul
146
- self.lr_mul = lr_mul
147
-
148
- def forward(self, input):
149
- if self.activation:
150
- out = F.linear(input, self.weight * self.scale)
151
- out = fused_leaky_relu(out, self.bias * self.lr_mul)
152
-
153
- else:
154
- out = F.linear(
155
- input, self.weight * self.scale, bias=self.bias * self.lr_mul
156
- )
157
-
158
- return out
159
-
160
- def __repr__(self):
161
- return (
162
- f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})'
163
- )
164
-
165
-
166
- class ScaledLeakyReLU(nn.Module):
167
- def __init__(self, negative_slope=0.2):
168
- super().__init__()
169
-
170
- self.negative_slope = negative_slope
171
-
172
- def forward(self, input):
173
- out = F.leaky_relu(input, negative_slope=self.negative_slope)
174
-
175
- return out * math.sqrt(2)
176
-
177
-
178
- class ModulatedConv2d(nn.Module):
179
- def __init__(
180
- self,
181
- in_channel,
182
- out_channel,
183
- kernel_size,
184
- style_dim,
185
- demodulate=True,
186
- upsample=False,
187
- downsample=False,
188
- blur_kernel=[1, 3, 3, 1],
189
- ):
190
- super().__init__()
191
-
192
- self.eps = 1e-8
193
- self.kernel_size = kernel_size
194
- self.in_channel = in_channel
195
- self.out_channel = out_channel
196
- self.upsample = upsample
197
- self.downsample = downsample
198
-
199
- if upsample:
200
- factor = 2
201
- p = (len(blur_kernel) - factor) - (kernel_size - 1)
202
- pad0 = (p + 1) // 2 + factor - 1
203
- pad1 = p // 2 + 1
204
-
205
- self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor=factor)
206
-
207
- if downsample:
208
- factor = 2
209
- p = (len(blur_kernel) - factor) + (kernel_size - 1)
210
- pad0 = (p + 1) // 2
211
- pad1 = p // 2
212
-
213
- self.blur = Blur(blur_kernel, pad=(pad0, pad1))
214
-
215
- fan_in = in_channel * kernel_size ** 2
216
- self.scale = 1 / math.sqrt(fan_in)
217
- self.padding = kernel_size // 2
218
-
219
- self.weight = nn.Parameter(
220
- torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)
221
- )
222
-
223
- self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
224
-
225
- self.demodulate = demodulate
226
-
227
- def __repr__(self):
228
- return (
229
- f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, '
230
- f'upsample={self.upsample}, downsample={self.downsample})'
231
- )
232
-
233
- def forward(self, input, style):
234
- batch, in_channel, height, width = input.shape
235
-
236
- style = self.modulation(style).view(batch, 1, in_channel, 1, 1)
237
- weight = self.scale * self.weight * style
238
-
239
- if self.demodulate:
240
- demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8)
241
- weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)
242
-
243
- weight = weight.view(
244
- batch * self.out_channel, in_channel, self.kernel_size, self.kernel_size
245
- )
246
-
247
- if self.upsample:
248
- input = input.view(1, batch * in_channel, height, width)
249
- weight = weight.view(
250
- batch, self.out_channel, in_channel, self.kernel_size, self.kernel_size
251
- )
252
- weight = weight.transpose(1, 2).reshape(
253
- batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size
254
- )
255
- out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch)
256
- _, _, height, width = out.shape
257
- out = out.view(batch, self.out_channel, height, width)
258
- out = self.blur(out)
259
-
260
- elif self.downsample:
261
- input = self.blur(input)
262
- _, _, height, width = input.shape
263
- input = input.view(1, batch * in_channel, height, width)
264
- out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)
265
- _, _, height, width = out.shape
266
- out = out.view(batch, self.out_channel, height, width)
267
-
268
- else:
269
- input = input.view(1, batch * in_channel, height, width)
270
- out = F.conv2d(input, weight, padding=self.padding, groups=batch)
271
- _, _, height, width = out.shape
272
- out = out.view(batch, self.out_channel, height, width)
273
-
274
- return out
275
-
276
-
277
- class NoiseInjection(nn.Module):
278
- def __init__(self):
279
- super().__init__()
280
-
281
- self.weight = nn.Parameter(torch.zeros(1))
282
-
283
- def forward(self, image, noise=None):
284
- if noise is None:
285
- batch, _, height, width = image.shape
286
- noise = image.new_empty(batch, 1, height, width).normal_()
287
-
288
- return image + self.weight * noise
289
-
290
-
291
- class ConstantInput(nn.Module):
292
- def __init__(self, channel, size=4):
293
- super().__init__()
294
-
295
- self.input = nn.Parameter(torch.randn(1, channel, size, size))
296
-
297
- def forward(self, input):
298
- batch = input.shape[0]
299
- out = self.input.repeat(batch, 1, 1, 1)
300
-
301
- return out
302
-
303
-
304
- class StyledConv(nn.Module):
305
- def __init__(
306
- self,
307
- in_channel,
308
- out_channel,
309
- kernel_size,
310
- style_dim,
311
- upsample=False,
312
- blur_kernel=[1, 3, 3, 1],
313
- demodulate=True,
314
- ):
315
- super().__init__()
316
-
317
- self.conv = ModulatedConv2d(
318
- in_channel,
319
- out_channel,
320
- kernel_size,
321
- style_dim,
322
- upsample=upsample,
323
- blur_kernel=blur_kernel,
324
- demodulate=demodulate,
325
- )
326
-
327
- self.noise = NoiseInjection()
328
- # self.bias = nn.Parameter(torch.zeros(1, out_channel, 1, 1))
329
- # self.activate = ScaledLeakyReLU(0.2)
330
- self.activate = FusedLeakyReLU(out_channel)
331
-
332
- def forward(self, input, style, noise=None):
333
- out = self.conv(input, style)
334
- out = self.noise(out, noise=noise)
335
- # out = out + self.bias
336
- out = self.activate(out)
337
-
338
- return out
339
-
340
-
341
- class ToRGB(nn.Module):
342
- def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]):
343
- super().__init__()
344
-
345
- if upsample:
346
- self.upsample = Upsample(blur_kernel)
347
-
348
- self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate=False)
349
- self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1))
350
-
351
- def forward(self, input, style, skip=None):
352
- out = self.conv(input, style)
353
- out = out + self.bias
354
-
355
- if skip is not None:
356
- skip = self.upsample(skip)
357
-
358
- out = out + skip
359
-
360
- return out
361
-
362
-
363
- class Generator(nn.Module):
364
- def __init__(
365
- self,
366
- size,
367
- style_dim,
368
- n_mlp,
369
- channel_multiplier=2,
370
- blur_kernel=[1, 3, 3, 1],
371
- lr_mlp=0.01,
372
- ):
373
- super().__init__()
374
-
375
- self.size = size
376
-
377
- self.style_dim = style_dim
378
-
379
- layers = [PixelNorm()]
380
-
381
- for i in range(n_mlp):
382
- layers.append(
383
- EqualLinear(
384
- style_dim, style_dim, lr_mul=lr_mlp, activation='fused_lrelu'
385
- )
386
- )
387
-
388
- self.style = nn.Sequential(*layers)
389
-
390
- self.channels = {
391
- 4: 512,
392
- 8: 512,
393
- 16: 512,
394
- 32: 512,
395
- 64: 256 * channel_multiplier,
396
- 128: 128 * channel_multiplier,
397
- 256: 64 * channel_multiplier,
398
- 512: 32 * channel_multiplier,
399
- 1024: 16 * channel_multiplier,
400
- }
401
-
402
- self.input = ConstantInput(self.channels[4])
403
- self.conv1 = StyledConv(
404
- self.channels[4], self.channels[4], 3, style_dim, blur_kernel=blur_kernel
405
- )
406
- self.to_rgb1 = ToRGB(self.channels[4], style_dim, upsample=False)
407
-
408
- self.log_size = int(math.log(size, 2))
409
- self.num_layers = (self.log_size - 2) * 2 + 1
410
-
411
- self.convs = nn.ModuleList()
412
- self.upsamples = nn.ModuleList()
413
- self.to_rgbs = nn.ModuleList()
414
- self.noises = nn.Module()
415
-
416
- in_channel = self.channels[4]
417
-
418
- for layer_idx in range(self.num_layers):
419
- res = (layer_idx + 5) // 2
420
- shape = [1, 1, 2 ** res, 2 ** res]
421
- self.noises.register_buffer(f'noise_{layer_idx}', torch.randn(*shape))
422
-
423
- for i in range(3, self.log_size + 1):
424
- out_channel = self.channels[2 ** i]
425
-
426
- self.convs.append(
427
- StyledConv(
428
- in_channel,
429
- out_channel,
430
- 3,
431
- style_dim,
432
- upsample=True,
433
- blur_kernel=blur_kernel,
434
- )
435
- )
436
-
437
- self.convs.append(
438
- StyledConv(
439
- out_channel, out_channel, 3, style_dim, blur_kernel=blur_kernel
440
- )
441
- )
442
-
443
- self.to_rgbs.append(ToRGB(out_channel, style_dim))
444
-
445
- in_channel = out_channel
446
-
447
- self.n_latent = self.log_size * 2 - 2
448
-
449
- def make_noise(self):
450
- device = self.input.input.device
451
-
452
- noises = [torch.randn(1, 1, 2 ** 2, 2 ** 2, device=device)]
453
-
454
- for i in range(3, self.log_size + 1):
455
- for _ in range(2):
456
- noises.append(torch.randn(1, 1, 2 ** i, 2 ** i, device=device))
457
-
458
- return noises
459
-
460
- def mean_latent(self, n_latent):
461
- latent_in = torch.randn(
462
- n_latent, self.style_dim, device=self.input.input.device
463
- )
464
- latent = self.style(latent_in).mean(0, keepdim=True)
465
-
466
- return latent
467
-
468
- def get_latent(self, input):
469
- return self.style(input)
470
-
471
- def forward(
472
- self,
473
- styles,
474
- return_latents=False,
475
- inject_index=None,
476
- truncation=1,
477
- truncation_latent=None,
478
- input_is_latent=False,
479
- noise=None,
480
- randomize_noise=True,
481
- ):
482
- if not input_is_latent:
483
- styles = [self.style(s) for s in styles]
484
-
485
- if noise is None:
486
- if randomize_noise:
487
- noise = [None] * self.num_layers
488
- else:
489
- noise = [
490
- getattr(self.noises, f'noise_{i}') for i in range(self.num_layers)
491
- ]
492
-
493
- if truncation < 1:
494
- style_t = []
495
-
496
- for style in styles:
497
- style_t.append(
498
- truncation_latent + truncation * (style - truncation_latent)
499
- )
500
-
501
- styles = style_t
502
-
503
- if len(styles) < 2:
504
- inject_index = self.n_latent
505
-
506
- if styles[0].ndim < 3:
507
- latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
508
-
509
- else:
510
- latent = styles[0]
511
-
512
- else:
513
- if inject_index is None:
514
- inject_index = random.randint(1, self.n_latent - 1)
515
-
516
- latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
517
- latent2 = styles[1].unsqueeze(1).repeat(1, self.n_latent - inject_index, 1)
518
-
519
- latent = torch.cat([latent, latent2], 1)
520
-
521
- out = self.input(latent)
522
- out = self.conv1(out, latent[:, 0], noise=noise[0])
523
-
524
- skip = self.to_rgb1(out, latent[:, 1])
525
-
526
- i = 1
527
- for conv1, conv2, noise1, noise2, to_rgb in zip(
528
- self.convs[::2], self.convs[1::2], noise[1::2], noise[2::2], self.to_rgbs
529
- ):
530
- out = conv1(out, latent[:, i], noise=noise1)
531
- out = conv2(out, latent[:, i + 1], noise=noise2)
532
- skip = to_rgb(out, latent[:, i + 2], skip)
533
-
534
- i += 2
535
-
536
- image = skip
537
-
538
- if return_latents:
539
- return image, latent
540
-
541
- else:
542
- return image, None
543
-
544
-
545
- class ConvLayer(nn.Sequential):
546
- def __init__(
547
- self,
548
- in_channel,
549
- out_channel,
550
- kernel_size,
551
- downsample=False,
552
- blur_kernel=[1, 3, 3, 1],
553
- bias=True,
554
- activate=True,
555
- ):
556
- layers = []
557
-
558
- if downsample:
559
- factor = 2
560
- p = (len(blur_kernel) - factor) + (kernel_size - 1)
561
- pad0 = (p + 1) // 2
562
- pad1 = p // 2
563
-
564
- layers.append(Blur(blur_kernel, pad=(pad0, pad1)))
565
-
566
- stride = 2
567
- self.padding = 0
568
-
569
- else:
570
- stride = 1
571
- self.padding = kernel_size // 2
572
-
573
- layers.append(
574
- EqualConv2d(
575
- in_channel,
576
- out_channel,
577
- kernel_size,
578
- padding=self.padding,
579
- stride=stride,
580
- bias=bias and not activate,
581
- )
582
- )
583
-
584
- if activate:
585
- if bias:
586
- layers.append(FusedLeakyReLU(out_channel))
587
-
588
- else:
589
- layers.append(ScaledLeakyReLU(0.2))
590
-
591
- super().__init__(*layers)
592
-
593
-
594
- class ResBlock(nn.Module):
595
- def __init__(self, in_channel, out_channel, blur_kernel=[1, 3, 3, 1]):
596
- super().__init__()
597
-
598
- self.conv1 = ConvLayer(in_channel, in_channel, 3)
599
- self.conv2 = ConvLayer(in_channel, out_channel, 3, downsample=True)
600
-
601
- self.skip = ConvLayer(
602
- in_channel, out_channel, 1, downsample=True, activate=False, bias=False
603
- )
604
-
605
- def forward(self, input):
606
- out = self.conv1(input)
607
- out = self.conv2(out)
608
-
609
- skip = self.skip(input)
610
- out = (out + skip) / math.sqrt(2)
611
-
612
- return out
613
-
614
-
615
- class Discriminator(nn.Module):
616
- def __init__(self, size, channel_multiplier=2, blur_kernel=[1, 3, 3, 1]):
617
- super().__init__()
618
-
619
- channels = {
620
- 4: 512,
621
- 8: 512,
622
- 16: 512,
623
- 32: 512,
624
- 64: 256 * channel_multiplier,
625
- 128: 128 * channel_multiplier,
626
- 256: 64 * channel_multiplier,
627
- 512: 32 * channel_multiplier,
628
- 1024: 16 * channel_multiplier,
629
- }
630
-
631
- convs = [ConvLayer(3, channels[size], 1)]
632
-
633
- log_size = int(math.log(size, 2))
634
-
635
- in_channel = channels[size]
636
-
637
- for i in range(log_size, 2, -1):
638
- out_channel = channels[2 ** (i - 1)]
639
-
640
- convs.append(ResBlock(in_channel, out_channel, blur_kernel))
641
-
642
- in_channel = out_channel
643
-
644
- self.convs = nn.Sequential(*convs)
645
-
646
- self.stddev_group = 4
647
- self.stddev_feat = 1
648
-
649
- self.final_conv = ConvLayer(in_channel + 1, channels[4], 3)
650
- self.final_linear = nn.Sequential(
651
- EqualLinear(channels[4] * 4 * 4, channels[4], activation='fused_lrelu'),
652
- EqualLinear(channels[4], 1),
653
- )
654
-
655
- def forward(self, input):
656
- out = self.convs(input)
657
-
658
- batch, channel, height, width = out.shape
659
- group = min(batch, self.stddev_group)
660
- stddev = out.view(
661
- group, -1, self.stddev_feat, channel // self.stddev_feat, height, width
662
- )
663
- stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8)
664
- stddev = stddev.mean([2, 3, 4], keepdims=True).squeeze(2)
665
- stddev = stddev.repeat(group, 1, height, width)
666
- out = torch.cat([out, stddev], 1)
667
-
668
- out = self.final_conv(out)
669
-
670
- out = out.view(batch, -1)
671
- out = self.final_linear(out)
672
-
673
- return out
674
-