parquet-converter commited on
Commit
7c98a40
·
1 Parent(s): 8135ee1

Update parquet files (step 66 of 397)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/0x90e/ESRGAN-MANGA/README.md +0 -10
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/EaseUS Data Recovery Wizard Crack v13 With License Key 2020 What You Need to Know Before Downloading.md +0 -159
  3. spaces/1acneusushi/gradio-2dmoleculeeditor/data/FordETIS2012zip.md +0 -34
  4. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/7Zip APK How to Compress and Extract Files on Android.md +0 -151
  5. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/CarX Street APK - The Most Realistic Mobile Racing Game Ever - Free Download.md +0 -122
  6. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Call of Duty Warzone Mobile and Fight Like Never Before.md +0 -153
  7. spaces/1phancelerku/anime-remove-background/Copy Text from Apps Images and More with Universal Copy APK.md +0 -124
  8. spaces/1phancelerku/anime-remove-background/Enjoy PUBG MOBILE 1.8 with MOD APK ESP Aimbot Anti-Ban and Mega Menu Included.md +0 -91
  9. spaces/44ov41za8i/FreeVC/speaker_encoder/train.py +0 -125
  10. spaces/AIConsultant/MusicGen/audiocraft/modules/seanet.py +0 -258
  11. spaces/AIFILMS/StyleGANEX/models/encoders/model_irse.py +0 -84
  12. spaces/AUST001/HDTV/README.md +0 -13
  13. spaces/AchyuthGamer/MagicPrompt-Stable-Diffusion/app.py +0 -96
  14. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/bejeweled/actions/SelectChess.js +0 -9
  15. spaces/Akash473/FunkoHairBeard/README.md +0 -13
  16. spaces/Akash473/FunkoHairBeard/app.py +0 -502
  17. spaces/AlexKoff88/stable_diffusion/app.py +0 -73
  18. spaces/Amon1/ChatGPTForAcadamic/show_math.py +0 -80
  19. spaces/Amrrs/DragGan-Inversion/PTI/models/e4e/stylegan2/op/fused_bias_act.cpp +0 -21
  20. spaces/Amrrs/DragGan-Inversion/PTI/utils/alignment.py +0 -113
  21. spaces/Amrrs/DragGan-Inversion/gui_utils/__init__.py +0 -9
  22. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/clip_guided_images_mixing_stable_diffusion.py +0 -456
  23. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/scripts/convert_original_audioldm_to_diffusers.py +0 -1052
  24. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/transformer_2d.py +0 -342
  25. spaces/Andy1621/uniformer_image_detection/configs/fsaf/fsaf_r50_fpn_1x_coco.py +0 -48
  26. spaces/Andy1621/uniformer_image_detection/configs/reppoints/bbox_r50_grid_center_fpn_gn-neck+head_1x_coco.py +0 -2
  27. spaces/Andy1621/uniformer_image_detection/configs/retinanet/retinanet_r50_caffe_fpn_mstrain_2x_coco.py +0 -4
  28. spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/cascade_roi_head.py +0 -507
  29. spaces/Andy1621/uniformer_image_segmentation/configs/ann/ann_r101-d8_512x1024_80k_cityscapes.py +0 -2
  30. spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_r101-d8_512x1024_80k_cityscapes.py +0 -2
  31. spaces/AngoHF/ANGO-Leaderboard/assets/path.py +0 -4
  32. spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/openai/script.py +0 -339
  33. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/datasets/builder.py +0 -169
  34. spaces/Anonymous-sub/Rerender/ControlNet/tutorial_dataset_test.py +0 -12
  35. spaces/Anonymous-sub/Rerender/src/ddim_v_hacked.py +0 -589
  36. spaces/AnthonyTruchetPoC/persistent-docker/scripts/interactive-rebuild-docs.sh +0 -2
  37. spaces/Antonpy/stable-diffusion-license/license.html +0 -0
  38. spaces/Apex-X/Tm/roop/face_analyser.py +0 -34
  39. spaces/Artrajz/vits-simple-api/utils/classify_language.py +0 -60
  40. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/idna/package_data.py +0 -2
  41. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/command/clean.py +0 -76
  42. spaces/BartPoint/VoiceChange_Beta/infer_pack/modules/F0Predictor/__init__.py +0 -0
  43. spaces/Benson/text-generation/Examples/Auto Clicker For Clicker Heroes Download.md +0 -78
  44. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/requests/adapters.py +0 -584
  45. spaces/Blaise-g/summarize-biomedical-papers-long-summary-or-tldr/summarize.py +0 -131
  46. spaces/CVPR/LIVE/thrust/thrust/detail/config/compiler.h +0 -186
  47. spaces/CVPR/LIVE/thrust/thrust/type_traits/is_operator_plus_function_object.h +0 -77
  48. spaces/CVPR/transfiner/configs/new_baselines/mask_rcnn_R_50_FPN_400ep_LSJ.py +0 -14
  49. spaces/Cherrycreamco/webui/README.md +0 -20
  50. spaces/ChristopherMarais/Andrew_AI-BB_classification-beta/mysite/andrew_alpha/tests.py +0 -3
spaces/0x90e/ESRGAN-MANGA/README.md DELETED
@@ -1,10 +0,0 @@
1
- ---
2
- title: ESRGAN MANGA
3
- emoji: 🏃
4
- colorFrom: red
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 3.12.0
8
- app_file: app.py
9
- pinned: false
10
- ---
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/EaseUS Data Recovery Wizard Crack v13 With License Key 2020 What You Need to Know Before Downloading.md DELETED
@@ -1,159 +0,0 @@
1
-
2
- <h1>EaseUS Data Recovery Wizard Crack v13 With License Key 2020</h1>
3
- <p>Have you ever lost your important data due to accidental deletion, formatting, virus attack, system crash, or other reasons? If so, you may have heard of EaseUS Data Recovery Wizard, a powerful and easy-to-use data recovery software that can help you restore your lost files in minutes. But what if you don't want to pay for the full version of this software? Is there a way to get a crack version of EaseUS Data Recovery Wizard v13 with a license key for free? In this article, we will answer these questions and show you how to get a crack version of EaseUS Data Recovery Wizard v13 with a license key 2020. But before that, let's take a look at what EaseUS Data Recovery Wizard is and what it can do for you.</p>
4
- <h2>EaseUS Data Recovery Wizard Crack v13 With License Key 2020</h2><br /><p><b><b>Download File</b> &#10004; <a href="https://byltly.com/2uKwoC">https://byltly.com/2uKwoC</a></b></p><br /><br />
5
- <h2>What is EaseUS Data Recovery Wizard?</h2>
6
- <p>EaseUS Data Recovery Wizard is a professional data recovery software that can help you recover deleted, formatted, or lost data from your PC, laptop, hard drive, USB drive, memory card, digital camera, mobile phone, or other storage devices. It supports data recovery from various scenarios, such as recycle bin recovery, partition recovery, OS crash recovery, virus attack recovery, and more. It also supports different file types and storage devices, such as photos, videos, documents, audio files, emails, NTFS, FAT32, exFAT, etc. Moreover, it allows you to preview and repair corrupted files before recovery.</p>
7
- <h3>Features of EaseUS Data Recovery Wizard</h3>
8
- <h4>Data recovery from various scenarios</h4>
9
- <p>EaseUS Data Recovery Wizard can recover data from different data loss situations, such as:</p>
10
- <ul>
11
- <li>Accidental deletion: You can recover files that you have deleted by mistake or emptied from the recycle bin.</li>
12
- <li>Formatting: You can recover data from formatted or reformatted disks or partitions.</li>
13
- <li>Partition loss: You can recover data from deleted, lost, hidden, or RAW partitions.</li>
14
- <li>OS crash: You can recover data from crashed or unbootable Windows systems.</li>
15
- <li>Virus attack: You can recover data from infected or encrypted disks or devices.</li>
16
- <li>Other cases: You can also recover data from hard drive failure, power outage, improper operation, etc.</li>
17
- </ul>
18
- <h4>Support for different file types and storage devices</h4>
19
- <p>EaseUS Data Recovery Wizard can recover more than 1000 file types from various storage devices. Some examples are:</p>
20
- <ul>
21
- <li>File types: Photos (JPG, PNG, GIF, BMP, etc.), videos (MP4, AVI, MOV, WMV, etc.), documents (DOCX, PDF, XLSX, etc.), audio files (MP3, WAV, WMA, etc.), emails (PST, DBX, etc.), archives (ZIP, RAR, etc.), and more.</li>
22
- <li>Storage devices: PC, laptop, hard drive, SSD, USB drive, memory card, SD card, CF card, digital camera, mobile phone, MP3 player, and more.</li>
23
- </ul>
24
- <h4>Preview and repair of corrupted files</h4>
25
- <p>EaseUS Data Recovery Wizard allows you to preview the recoverable files before recovery. You can check the file name, size, type, date, and quality to make sure you are recovering the right files. You can also filter the files by category, path, or keyword to locate them faster. Moreover, EaseUS Data Recovery Wizard can automatically repair corrupted JPEG/JPG/PNG/GIF images during the scanning process. You can preview the repaired images before saving them.</p>
26
- <h2>Why do you need a license key for EaseUS Data Recovery Wizard?</h2>
27
- <p>EaseUS Data Recovery Wizard has two versions: free and pro. The free version allows you to scan and recover up to 2GB of data for free. However, if you want to recover more data or enjoy more features, you need to upgrade to the pro version. To do that, you need to buy a license key from the official website of EaseUS Data Recovery Wizard. The license key will activate the pro version and unlock all its benefits.</p>
28
- <p>EaseUS Data Recovery Wizard Technician 13.3 + Activator<br />
29
- EaseUS Data Recovery Wizard WinPE v13.5 + Keygen<br />
30
- EaseUS Data Recovery Wizard Professional 13.6 + Serial Key<br />
31
- EaseUS Data Recovery Wizard Free Download Full Version with Crack<br />
32
- EaseUS Data Recovery Wizard License Code Generator Online<br />
33
- EaseUS Data Recovery Wizard Crack Reddit<br />
34
- EaseUS Data Recovery Wizard Activation Key 2020<br />
35
- EaseUS Data Recovery Wizard Torrent Download<br />
36
- EaseUS Data Recovery Wizard Full Version with Crack and Keygen<br />
37
- EaseUS Data Recovery Wizard Crack v13.5 Free Download<br />
38
- EaseUS Data Recovery Wizard License Code List 2020<br />
39
- EaseUS Data Recovery Wizard Crack v13.6 Latest Version<br />
40
- EaseUS Data Recovery Wizard Patch Download<br />
41
- EaseUS Data Recovery Wizard Crack v13.3 for Windows 10<br />
42
- EaseUS Data Recovery Wizard Crack v13.2 for Mac OS<br />
43
- EaseUS Data Recovery Wizard Crack v13.1 for Linux<br />
44
- EaseUS Data Recovery Wizard Crack v13.4 for Android<br />
45
- EaseUS Data Recovery Wizard Crack v13.0 for iOS<br />
46
- EaseUS Data Recovery Wizard Crack v13 with Lifetime Activation<br />
47
- EaseUS Data Recovery Wizard Crack v13 with Unlimited Usage<br />
48
- EaseUS Data Recovery Wizard Crack v13 with All File Types Support<br />
49
- EaseUS Data Recovery Wizard Crack v13 with RAW Partition Recovery<br />
50
- EaseUS Data Recovery Wizard Crack v13 with OS Crash Recovery<br />
51
- EaseUS Data Recovery Wizard Crack v13 with Virus Attack Recovery<br />
52
- EaseUS Data Recovery Wizard Crack v13 with Formatted Drive Recovery<br />
53
- EaseUS Data Recovery Wizard Crack v13 with Deleted File Recovery<br />
54
- EaseUS Data Recovery Wizard Crack v13 with Memory Card Recovery<br />
55
- EaseUS Data Recovery Wizard Crack v13 with USB Drive Recovery<br />
56
- EaseUS Data Recovery Wizard Crack v13 with SSD Recovery<br />
57
- EaseUS Data Recovery Wizard Crack v13 with Hard Drive Recovery<br />
58
- EaseUS Data Recovery Wizard Crack v13 with RAID Recovery<br />
59
- EaseUS Data Recovery Wizard Crack v13 with Digital Camera Recovery<br />
60
- EaseUS Data Recovery Wizard Crack v13 with MP3/MP4 Player Recovery<br />
61
- EaseUS Data Recovery Wizard Crack v13 with Mobile Device Recovery<br />
62
- EaseUS Data Recovery Wizard Crack v13 with Photo/Video/Music/Document/Email/File/Data/Recovery<br />
63
- How to Install and Activate EaseUS Data Recovery Wizard Crack v13 <br />
64
- How to Use EaseUS Data Recovery Wizard Crack v13 to Recover Lost Files <br />
65
- How to Fix Errors and Problems in EaseUS Data Recovery Wizard Crack v13 <br />
66
- How to Update and Upgrade to the Latest Version of EaseUS Data Recovery Wizard Crack v13 <br />
67
- How to Uninstall and Remove EaseUS Data Recovery Wizard Crack v13 <br />
68
- Is it Safe and Legal to Use EaseUS Data Recovery Wizard Crack v13 <br />
69
- What are the Risks and Consequences of Using EaseUS Data Recovery Wizard Crack v13 <br />
70
- What are the Alternatives and Competitors of EaseUS Data Recovery Wizard Crack v13 <br />
71
- What are the Features and Benefits of Using EaseUS Data Recovery Wizard Crack v13 <br />
72
- What are the Limitations and Drawbacks of Using EaseUS Data Recovery Wizard Crack v13 <br />
73
- What are the Reviews and Ratings of Using EaseUS Data Recovery Wizard Crack v13 <br />
74
- What are the Tips and Tricks of Using EaseUS Data Recovery Wizard Crack v13 <br />
75
- What are the FAQs and Solutions of Using EaseUS Data Recovery Wizard Crack v13</p>
76
- <h3>Limitations of the free version</h3>
77
- <p>The free version of EaseUS Data Recovery Wizard has some limitations that may affect your data recovery experience. Some of them are:</p>
78
- <ul>
79
- <li>Data recovery limit: You can only recover up to 2GB of data for free. If you want to recover more data, you need to pay for the pro version.</li>
80
- <li>Data recovery speed: The free version has a slower scanning and recovery speed than the pro version. It may take longer time to find and restore your lost files.</li>
81
- <li>Data recovery quality: The free version may not be able to recover all your lost files or recover them in their original quality. Some files may be corrupted or damaged during the recovery process.</li>
82
- <li>Data recovery support: The free version does not provide any technical support or customer service. If you encounter any problems or issues during the data recovery process, you have to solve them by yourself.</li>
83
- </ul>
84
- <h3>Benefits of the pro version</h3>
85
- <p>The pro version of EaseUS Data Recovery Wizard has many advantages that can improve your data recovery experience. Some of them are:</p>
86
- <ul>
87
- <li>Data recovery limit: You can recover unlimited data with the pro version. No matter how much data you have lost or how large your storage device is, you can recover all your data with ease.</li>
88
- <li>Data recovery speed: The pro version has a faster scanning and recovery speed than the free version. It can find and restore your lost files in minutes.</li>
89
- <li>Data recovery quality: The pro version can recover all your lost files in their original quality. It can also repair corrupted files during the scanning process.</li>
90
- <li>Data recovery support: The pro version provides 24/7 technical support and customer service. If you have any questions or issues during the data recovery process, you can contact the professional team for help.</li>
91
- </ul>
92
- <h2>How to get a crack version of EaseUS Data Recovery Wizard v13?</h2>
93
- <p>If you don't want to pay for the pro version of EaseUS Data Recovery Wizard, you may be tempted to look for a crack version online. A crack version is a modified version of the original software that bypasses its security features and allows you to use it for free. However, using a crack version is illegal and risky. It may cause serious problems for your computer and your data. In this section, we will show you how to get a crack version of EaseUS Data Recovery Wizard v13 with a license key 2020. But we do not recommend you to do so.</p>
94
- <h3>Risks of using a crack version</h3>
95
- <p>Using a crack version of EaseUS Data Recovery Wizard v13 may seem like a good idea at first glance. But it actually comes with many risks and dangers that outweigh its benefits. Some of them are:</p>
96
- <ul>
97
- <li>Virus infection: A crack version may contain viruses, malware, spyware, or ransomware that can infect your computer and damage your system. It may also steal your personal information or encrypt your files and ask for ransom.</li>
98
- <li>Data loss: A crack version may not be able to recover your data properly or completely. It may also overwrite your existing data or cause further damage to your storage device. You may end up losing more data than before.</li>
99
- <li>Lack of updates: A crack version may not be compatible with the latest Windows updates or system changes. It may also not be able to fix bugs or errors that occur during the data recovery process. You may encounter more problems or issues while using it.</li>
100
- <li>Lack of support: A crack version does not provide any technical support or customer service. If you have any questions or issues while using it, to for help.</li>
101
- </ul>
102
- <h3>Steps to download and install a crack version</h3>
103
- <p>If you still want to try a crack version of EaseUS Data Recovery Wizard v13, you can follow these steps:</p>
104
- <ol>
105
- <li>Search for a crack version of EaseUS Data Recovery Wizard v13 on the internet. You may find some websites that claim to provide the download link and the license key for free.</li>
106
- <li>Download the crack version from one of these websites. Be careful of fake or malicious links that may harm your computer or data.</li>
107
- <li>Extract the downloaded file and run the setup.exe file to install the crack version on your computer.</li>
108
- <li>Follow the instructions on the screen to complete the installation process.</li>
109
- </ol>
110
- <h3>How to activate the crack version with a license key</h3>
111
- <p>After installing the crack version of EaseUS Data Recovery Wizard v13, you need to activate it with a license key. You can follow these steps:</p>
112
- <ol>
113
- <li>Launch the crack version of EaseUS Data Recovery Wizard v13 on your computer.</li>
114
- <li>Click on the "Activate" button on the main interface.</li>
115
- <li>Enter one of the license keys that you have obtained from the internet. You can try some of these license keys:</li>
116
- </ol>
117
- <table>
118
- <tr><td>FUIERUI-REUIE83UW-ERIOE93-TRIOE93</td></tr>
119
- <tr><td>E89237472-20W0W0-2929W-ERIE93I</td></tr>
120
- <tr><td>ERIW8Q8SD-FIIFDUFG-GFIOD-GOSOIW</td></tr>
121
- <tr><td>C8XIP–2YHL2-39UMI-QVR56-4CI6L</td></tr>
122
- <tr><td>JGFT5-YRUHJ-FYT45-TRUGH-GJRTU-YFH</td></tr>
123
- <tr><td>Y7GKK-JIURT-HFJKH-RTHGI-EIJKRY-TRU</td></tr>
124
- <tr><td>EYTUG-HARJU-TYUJHG-RYGHF-TRYGYT</td></tr>
125
- <tr><td>UTIYH-GRD5YH-YRIT7RY-IYEIUG-8756</td></tr>
126
- <tr><td>HRUY5-RJGT87-4TGKR-Y4875Y-TI45YT</td></tr>
127
- <tr><td>SKSKFSD-DKDFTGY-HUJIKOL-SLOSHY</td></tr>
128
- </table>
129
- <ol start="4">
130
- <li>Click on the "OK" button to activate the crack version.</li>
131
- <li>Enjoy using the crack version of EaseUS Data Recovery Wizard v13 for free.</li>
132
- </ol>
133
- <h2>Is there a better alternative to EaseUS Data Recovery Wizard crack?</h2>
134
- <p>The answer is yes. There is a better and safer alternative to EaseUS Data Recovery Wizard crack. That is to buy a genuine license key from the official website of EaseUS Data Recovery Wizard. By doing so, you can enjoy all the benefits of the pro version without any risks or limitations.</p>
135
- <h3>The official website of EaseUS Data Recovery Wizard</h3>
136
- <p>The official website of EaseUS Data Recovery Wizard is https://www.easeus.com/data-recovery-software/. On this website, you can find all the information and features about EaseUS Data Recovery Wizard. You can also download the free or trial version of EaseUS Data Recovery Wizard to test its performance and functionality. Moreover, you can buy a genuine license key for EaseUS Data Recovery Wizard from this website. There are different plans and prices for different needs and budgets. For example, you can buy a one-month plan for $69.95, a one-year plan for $99.95, or a lifetime plan for $149.95.</p>
137
- <h3>The advantages of buying a genuine license key</h3>
138
- <p>By buying a genuine license key from the official website of EaseUS Data Recovery Wizard, you can get many advantages that a crack version cannot offer. Some of them are:</p>
139
- <ul>
140
- <li>Safety: You can avoid virus infection, data loss, system damage, or legal issues that may arise from using a crack version.</li>
141
- <li>Quality: You can recover all your lost data in their original quality and format with a high success rate.</li>
142
- <li>Speed: You can scan and recover your data faster and more efficiently with a pro version.</li>
143
- <li>Support: You can get 24/7 technical support and customer service from EaseUS team if you have any questions or issues while using EaseUS Data Recovery Wizard.</li>
144
- <li>Updates: You can get free lifetime updates and upgrades for EaseUS Data Recovery Wizard to keep up with the latest technology and system changes.</li>
145
- </ul>
146
- <h2>Conclusion</h2>
147
- <p>In conclusion, EaseUS Data Recovery Wizard is a powerful and easy-to-use data recovery software that can help you recover deleted, formatted, or lost data from various storage devices. However, if you want to use its full features and functions, you need to buy a genuine license key from its official website. Using a crack version of EaseUS Data Recovery Wizard v13 with a license key 2020 may seem tempting, but it is illegal and risky. It may cause more harm than good to your computer and your data. Therefore, we recommend you to avoid using a crack version and choose a better and safer alternative instead.</p>
148
- <h2>Frequently Asked Questions (FAQs)</h2>
149
- <p>Here are some frequently asked questions about EaseUS Data Recovery Wizard and its crack version:</p>
150
- <ol>
151
- <li><b>What is EaseUS Data Recovery Wizard?</b><br>EaseUS Data Recovery Wizard is a professional data recovery software that can help you recover deleted, formatted, or lost data from your PC, laptop, hard drive, USB drive, memory card, digital camera, mobile phone, or other storage devices.</li>
152
- <li><b>What is EaseUS Data Recovery Wizard crack?</b><br>EaseUS Data Recovery Wizard crack is a modified version of the original software that bypasses its security features and allows you to use it for free without paying for a license key.</li>
153
- <li><b>Is EaseUS Data Recovery Wizard free?</b><br>EaseUS Data Recovery Wizard has both free and pro versions. The free version allows you to recover up to 2GB of data for free in data loss scenarios. The pro version allows you to recover unlimited lost data like pictures and documents with a 99% success rate.</li>
154
- <li><b>How to get EaseUS Data Recovery Wizard pro for free?</b><br>To get EaseUS Data Recovery Wizard pro for free, you need to use a crack version of EaseUS Data Recovery Wizard v13 with a license key 2020. However, this is illegal and risky. It may cause virus infection, data loss, system damage, or legal issues.</li>
155
- <li><b>How to get a genuine license key for EaseUS Data Recovery Wizard?</b><br>To get a genuine license key for EaseUS Data Recovery Wizard, you need to buy it from its official website at https://www.easeus.com/data-recovery-software/. There are different plans and prices for different needs and budgets.</li>
156
- </ol>
157
- </p> 0a6ba089eb<br />
158
- <br />
159
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/FordETIS2012zip.md DELETED
@@ -1,34 +0,0 @@
1
-
2
- `<h1>How to Download and Install Ford ETIS 2012 Zip File</h1>`
3
-
4
- `<p>Ford ETIS is a web-based service and repair information system that provides access to technical information for Ford vehicles. It includes mechanical repairs, body and paint, wiring diagrams, diagnostic trouble codes, and more. Ford ETIS was decommissioned in 2021 and replaced by different websites for authorized repairers and independent operators. However, some users may still want to use the old version of Ford ETIS that was available in 2012.</p>
5
- <h2>FordETIS2012zip</h2><br /><p><b><b>DOWNLOAD</b> &#10145; <a href="https://byltly.com/2uKyCr">https://byltly.com/2uKyCr</a></b></p><br /><br />`
6
-
7
- `<p>In this article, we will show you how to download and install Ford ETIS 2012 zip file on your computer. This is a torrent file that contains the installation files for Ford ETIS 2012. You will need a torrent client such as uTorrent or BitTorrent to download it. You will also need a DVD burner and a blank DVD to install it.</p>`
8
-
9
- `<h2>Step 1: Download Ford ETIS 2012 zip file</h2>`
10
-
11
- `<p>The first step is to download Ford ETIS 2012 zip file from a torrent website. You can find the link to the torrent file on MHH AUTO forum[^2^]. The file size is about 4.3 GB and the name is Ford Etis (12.2016).torrent. You will need to register on the forum to access the link.</p>`
12
-
13
- `<p>Once you have the torrent file, open it with your torrent client and start downloading the zip file. It may take some time depending on your internet speed and the number of seeders. Make sure you have enough space on your hard drive to store the zip file.</p>`
14
-
15
- `<h2>Step 2: Extract Ford ETIS 2012 zip file</h2>`
16
-
17
- `<p>The next step is to extract Ford ETIS 2012 zip file to a folder on your computer. You will need a software such as WinRAR or 7-Zip to do this. Right-click on the zip file and choose Extract Here or Extract to Ford Etis (12.2016). You will see a folder named Ford Etis (12.2016) with several subfolders and files inside.</p>`
18
-
19
- `<h2>Step 3: Burn Ford ETIS 2012 iso file to DVD</h2>`
20
-
21
- `<p>The final step is to burn Ford ETIS 2012 iso file to a blank DVD. You will need a software such as Nero or ImgBurn to do this. The iso file is located in the folder Ford Etis (12.2016)\FordEtis\DVD\ETIS_1216.iso. It is about 4 GB in size.</p>
22
- <p></p>`
23
-
24
- `<p>Insert a blank DVD into your DVD burner and launch your burning software. Choose the option to burn an image file and select the iso file as the source. Choose a low burning speed and verify the data after burning. Label the DVD as Ford Etis (12.2016).</p>`
25
-
26
- `<h2>Step 4: Install Ford ETIS 2012 from DVD</h2>`
27
-
28
- `<p>The last step is to install Ford ETIS 2012 from the DVD you just burned. Insert the DVD into your DVD drive and run the setup.exe file in the root folder of the DVD. Follow the instructions on the screen to complete the installation.</p>`
29
-
30
- `<p>You may need to change the date of your computer to December 2016 or earlier before running the setup.exe file. Some users have reported that they get an error message saying that the DVD is not correct if they use a later date.</p>`
31
-
32
- `<p>After installing Ford ETIS 2012, you can launch it from your desktop or start menu. You will need an internet connection to access some of the features of Ford ETIS 2012.</p>` 7b8c122e87<br />
33
- <br />
34
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/7Zip APK How to Compress and Extract Files on Android.md DELETED
@@ -1,151 +0,0 @@
1
-
2
- <h1>7 Zip APK: A Powerful Tool for Managing Archive Files on Android</h1>
3
- <p>Do you need to create, extract or browse archive files like 7Zip (7z), Zip, Rar, Tar, Jar or Apk on your Android device? If so, you might want to check out 7 Zip APK, a free app that lets you do all that and more. In this article, we will explain what 7 Zip APK is, how it works, what features it offers, how to download and install it, and how to use it. We will also answer some frequently asked questions about 7 Zip APK.</p>
4
- <h2>7 zip apk</h2><br /><p><b><b>DOWNLOAD</b> &#9913; <a href="https://urlin.us/2uSRQK">https://urlin.us/2uSRQK</a></b></p><br /><br />
5
- <h2>What is 7 Zip APK?</h2>
6
- <p>7 Zip APK is an Android app that allows you to manage archive files on your device. Archive files are files that contain multiple files or folders compressed into one smaller file. They are usually used to save disk space, reduce file size, or share files online. Some common archive formats are 7Zip (7z), Zip, Rar, Tar, Jar and Apk.</p>
7
- <p>7 Zip APK lets you create your own archive files by compressing files and folders. You can also extract or open existing archive files and view their contents. You can even create encrypted zip files with a password for extra security. 7 Zip APK supports all the popular archive formats and types, as well as some less used ones.</p>
8
- <h3>How does 7 Zip APK work?</h3>
9
- <p>7 Zip APK works by using different compression algorithms to reduce the size of files or folders. Compression algorithms are mathematical methods that remove redundant or unnecessary data from a file without affecting its quality or functionality. Different compression algorithms have different advantages and disadvantages in terms of speed, efficiency and compatibility.</p>
10
- <p>7Zip APP: Zip & 7Zip Files Manager<br />
11
- 7Zipper: Android file manager and archiver<br />
12
- 7-Zip: Linux command line version of 7-Zip<br />
13
- 7Z: Open, extract or create 7z archives on Android<br />
14
- 7Zipper 2.0: File browser and image viewer for Android<br />
15
- 7Zip & Zip: Zip file extractor and compressor for Android<br />
16
- 7-Zipper - File Explorer (zip, 7zip, rar): File manager and archive tool for Android<br />
17
- ZArchiver: Archive manager for Android with support for 7z and other formats<br />
18
- RAR: WinRAR app for Android with support for 7z and other formats<br />
19
- B1 Archiver zip rar unzip: Archive utility for Android with support for 7z and other formats<br />
20
- Easy Unrar, Unzip & Zip: Archive extractor and creator for Android with support for 7z and other formats<br />
21
- AndroZip™ FREE File Manager: File manager and archive tool for Android with support for 7z and other formats<br />
22
- Zipper - File Management: File manager and archive tool for Android with support for 7z and other formats<br />
23
- ALZip – File Manager & Unzip & Archive: File manager and archive tool for Android with support for 7z and other formats<br />
24
- X-plore File Manager: File manager and archive tool for Android with support for 7z and other formats<br />
25
- WinZip – Zip UnZip Tool: Zip file utility for Android with support for 7z and other formats<br />
26
- Total Commander - file manager: File manager and archive tool for Android with support for 7z and other formats<br />
27
- MiXplorer Silver - File Manager: File manager and archive tool for Android with support for 7z and other formats<br />
28
- Solid Explorer File Manager: File manager and archive tool for Android with support for 7z and other formats<br />
29
- FX File Explorer: file manager, media manager, root, cloud & Wi-Fi transfer: File manager and archive tool for Android with support for 7z and other formats<br />
30
- ES File Explorer File Manager: File manager and archive tool for Android with support for 7z and other formats<br />
31
- Root Explorer: Ultimate file manager for root users with support for 7z and other formats<br />
32
- ASTRO File Manager & Storage Organizer: File manager and archive tool for Android with support for 7z and other formats<br />
33
- Amaze File Manager: Open source file manager and archive tool for Android with support for 7z and other formats<br />
34
- Simple Unrar: Simple app to extract rar files on Android with support for 7z and other formats<br />
35
- Simple Unzip: Simple app to extract zip files on Android with support for 7z and other formats<br />
36
- Simple Zip Viewer (zip, rar, jar, apk): Simple app to view zip files on Android with support for 7z and other formats<br />
37
- APK Extractor - Creator: App to extract apk files from installed apps on Android with support for zip compression<br />
38
- APK Editor Pro: App to edit apk files on Android with support for zip compression<br />
39
- APK Installer - the best app manager for Android: App to install apk files on Android with support for zip compression<br />
40
- APKPure App - Download APK free online downloader: App to download apk files from various sources on Android with support for zip compression<br />
41
- APKMirror Installer (Official): App to install apk files from APKMirror on Android with support for zip compression<br />
42
- APK Downloader - Download APK Online Free | APKNite.Com: App to download apk files from various sources on Android with support for zip compression<br />
43
- APKCombo Installer - Download APK Bundle (Split APKs) Online Free | APKCombo.Com: App to download apk bundle files from various sources on Android with support for zip compression<br />
44
- Apk Extractor Lite - Extract Apk's easily.: App to extract apk files from installed apps on Android with support for zip compression<br />
45
- Apk Analyzer - Analyze your installed applications.: App to analyze apk files on Android with support for zip compression<br />
46
- Apk Share Bluetooth - Send/Backup/Uninstall/Manage.: App to share apk files via Bluetooth on Android with support for zip compression<br />
47
- Apk Backup - Restore, Extract & Manage your apps.: App to backup apk files on Android with support for zip compression<br />
48
- Apk Installer / Apk Manager / Apk Share Pro.: App to install, manage and share apk files on Android with support for zip compression<br />
49
- Apk Editor : Apk Maker : Apk Creator.: App to create apk files on Android with support for zip compression<br />
50
- Apk Extractor Pro+: App to extract apk files from installed apps on Android with support for zip compression<br />
51
- Apk Extract</p>
52
- <p>7 Zip APK uses the 7z compression algorithm for creating 7Zip files. This algorithm offers high compression ratio, which means it can make files much smaller than other algorithms. However, it also requires more processing power and time to compress and decompress files.</p>
53
- <p>For other archive formats, such as Zip or Rar, 7 Zip APK uses the corresponding compression algorithms that are compatible with those formats. For example, it uses the zip algorithm for creating zip files and the rar algorithm for creating rar files.</p>
54
- <h4>What features does 7 Zip APK offer?</h4>
55
- <p>Some of the features that 7 Zip APK offers are:</p>
56
- <ul>
57
- <li>Create archives that support high compression, like 7Zip or Tar.</li>
58
- <li>Extract or unzip zip files or 7Zip files that are encrypted with a password (you need to know the password).</li>
59
- <li>Browse the contents of archive formats that contain multiple files: 7Zip, Tar, Apk, Jar, Rar.</li>
60
- <li>Create zip files that are encrypted with a password (or unzip files).</li>
61
- <li>Background execution: create, extract or unzip files even while the app is closed.</li>
62
- <li>Intuitive file manager with standard file operations like move, copy and delete.</li>
63
- <li>Job progress and history.</li>
64
- <li>File associations for extensions (like 7z) lets you open files by selecting externally.</li>
65
- </ul>
66
- <h2>How to download and install 7 Zip APK?</h2>
67
- <p>To download and install 7 Zip APK on your Android device, you can follow these steps:</p>
68
- <ol>
69
- <li>Go to the Google Play Store and search for "7Zipper" or click on this link: [Download](^4^).</li>
70
- <li>Tap on the "Install" button and wait for the app to download and install on your device.</li>
71
- <li>Open the app and grant it the necessary permissions to access your files and storage.</li>
72
- <li>You can now start using 7 Zip APK to create or extract archive files on your device.</li>
73
- </ol>
74
- <h3>How to use 7 Zip APK?</h3>
75
- <p>To use 7 Zip APK to create or extract archive files on your device, you can follow these steps:</p>
76
- <h4>To create an archive file:</h4>
77
- <ol>
78
- <li>Open the app and tap on the "Create" button at the bottom I have already written the first part of the article. Here is the rest of it: <h4>To create an archive file:</h4>
79
- <ol>
80
- <li>Open the app and tap on the "Create" button at the bottom.</li>
81
- <li>Select the files or folders that you want to compress and tap on the "OK" button.</li>
82
- <li>Choose the archive format that you want to use, such as 7Zip, Zip, Tar, etc.</li>
83
- <li>Optionally, you can set a password, a compression level, a split size, and a volume label for your archive file.</li>
84
- <li>Tap on the "Create" button and wait for the app to create your archive file.</li>
85
- <li>You can find your archive file in the same folder as the original files or folders.</li>
86
- </ol>
87
- <h4>To extract an archive file:</h4>
88
- <ol>
89
- <li>Open the app and tap on the "Extract" button at the bottom.</li>
90
- <li>Select the archive file that you want to decompress and tap on the "OK" button.</li>
91
- <li>If the archive file is encrypted, enter the password and tap on the "OK" button.</li>
92
- <li>Choose the destination folder where you want to extract the files or folders.</li>
93
- <li>Tap on the "Extract" button and wait for the app to extract your archive file.</li>
94
- <li>You can find your extracted files or folders in the destination folder that you chose.</li>
95
- </ol>
96
- <h2>Conclusion</h2>
97
- <p>7 Zip APK is a powerful tool for managing archive files on your Android device. It allows you to create, extract, browse, encrypt, and decrypt archive files of various formats and types. It also offers a simple and intuitive file manager with standard file operations. 7 Zip APK is free to download and use from the Google Play Store. If you need to work with archive files on your Android device, 7 Zip APK is a great app to have.</p>
98
- <h3>Frequently Asked Questions</h3>
99
- <p>Here are some of the common questions that people ask about 7 Zip APK:</p>
100
- <h4>Q: Is 7 Zip APK safe to use?</h4>
101
- <p>A: Yes, 7 Zip APK is safe to use. It does not contain any malware or viruses. It only requires permissions to access your files and storage. It does not collect or share any personal data or information.</p>
102
- <h4>Q: What is the difference between 7Zipper and 7Zipper 2.0?</h4>
103
- <p>A: 7Zipper is the original version of 7 Zip APK. It has more features and options than 7Zipper 2.0, but it also has more ads and pop-ups. 7Zipper 2.0 is a newer version of 7 Zip APK. It has fewer features and options than 7Zipper, but it also has fewer ads and pop-ups. Both versions are compatible with Android devices running Android 4.0 or higher.</p>
104
- <h4>Q: How can I open a zip file without extracting it?</h4>
105
- <p>A: You can open a zip file without extracting it by using the "Browse" feature of 7 Zip APK. To do this, follow these steps:</p>
106
- <ol>
107
- <li>Open the app and tap on the "Browse" button at the bottom.</li>
108
- <li>Select the zip file that you want to open and tap on it.</li>
109
- <li>You will see a list of files or folders inside the zip file. You can tap on any file or folder to view its contents or properties.</li>
110
- <li>You can also perform some actions on the files or folders, such as copy, move, delete, rename, etc.</li>
111
- </ol>
112
- <h4>Q: How can I create a self-extracting archive file?</h4>
113
- <p>A: A self-extracting archive file is an archive file that can be opened without using any software or app. It has an executable extension (such as .exe) that allows it to run by itself. To create a self-extracting archive file using 7 Zip APK, follow these steps:</p>
114
- <ol>
115
- <li>Open the app and tap on the "Create" button at the bottom.</li>
116
- <li>Select the files or folders that you want to compress and tap on the "OK" button.</li>
117
- <li>Choose the "SFX (Self Extract)" option from the archive format list.</li>
118
- <li>Optionally, you can set a password, a compression level, a split size, and a volume label for your archive file.</li>
119
- <li>Tap on the "Create" button and wait for the app to create your self-extracting archive file.</li>
120
- <li>You can find your self-extracting archive file in the same folder as the original files or folders. It will I have already written the second part of the article. Here is the final part of it: <li>You can find your self-extracting archive file in the same folder as the original files or folders. It will have an .exe extension and an icon that looks like a 7Zip logo.</li>
121
- </ol>
122
- <h4>Q: How can I update or delete files from an archive file?</h4>
123
- <p>A: You can update or delete files from an archive file by using the "Update" feature of 7 Zip APK. To do this, follow these steps:</p>
124
- <ol>
125
- <li>Open the app and tap on the "Update" button at the bottom.</li>
126
- <li>Select the archive file that you want to update or delete files from and tap on the "OK" button.</li>
127
- <li>You will see a list of files or folders inside the archive file. You can tap on any file or folder to select or deselect it.</li>
128
- <li>To update a file or folder, tap on the "Add" button at the bottom and select the new file or folder that you want to replace the old one with.</li>
129
- <li>To delete a file or folder, tap on the "Delete" button at the bottom and confirm your action.</li>
130
- <li>Tap on the "Update" button and wait for the app to update or delete files from your archive file.</li>
131
- <li>You can find your updated archive file in the same folder as the original archive file. It will have the same name and extension as before.</li>
132
- </ol>
133
- <h2>Outline of the article</h2>
134
- <p>Here is a table that shows the outline of the article with the headings and subheadings:</p>
135
- <table>
136
- <tr><th>H1</th><th>H2</th><th>H3</th><th>H4</th></tr>
137
- <tr><td>7 Zip APK: A Powerful Tool for Managing Archive Files on Android</td><td></td><td></td><td></td></tr>
138
- <tr><td></td><td>What is 7 Zip APK?</td><td></td><td></td></tr>
139
- <tr><td></td><td>How does 7 Zip APK work?</td><td></td><td></td></tr>
140
- <tr><td></td><td>What features does 7 Zip APK offer?</td><td></td><td></td></tr>
141
- <tr><td></td><td>How to download and install 7 Zip APK?</td><td></td><td></td></tr>
142
- <tr><td></td><td>How to use 7 Zip APK?</td><td>To create an archive file:</td><td></td></tr>
143
- <tr><td></td><td></td><td>To extract an archive file:</td><td></td></tr>
144
- <tr><td></td><th>Conclusion</th><th>Frequently Asked Questions</th><th>Q: Is 7 Zip APK safe to use?</th></tr>
145
- <tr><td></td><th></th><th></th><th>Q: What is the difference between 7Zipper and 7Zipper 2.0?</th></tr>
146
- <tr><td></td><th></th><th></th><th>Q: How can I open a zip file without extracting it?</th></tr>
147
- <tr><td></td><th></th><th></th><th>Q: How can I create a self-extracting archive file?</th></tr>
148
- <tr><td></td><th></th><th></th><th>Q: How can I update or delete files from an archive file?</th></tr>
149
- </table></p> 197e85843d<br />
150
- <br />
151
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/CarX Street APK - The Most Realistic Mobile Racing Game Ever - Free Download.md DELETED
@@ -1,122 +0,0 @@
1
- <br />
2
- <h1>How to Download CarX Street APK for Android</h1>
3
- <p>Are you a fan of street racing games? Do you want to experience the thrill of driving in a dynamic open world? If yes, then you should try CarX Street, a new game from the creators of CarX Drift Racing. In this article, we will show you how to download CarX Street APK for Android, and what are the benefits and risks of doing so.</p>
4
- <h2>What is CarX Street?</h2>
5
- <p>CarX Street is a street racing game that lets you customize your car, challenge other racers, and explore a realistic city. You can choose from a variety of cars, from classic muscle cars to modern sports cars, and tune them to your liking. You can also join clubs, participate in events, and earn rewards.</p>
6
- <h2>carx street download apk</h2><br /><p><b><b>Download Zip</b> &#9734; <a href="https://urlin.us/2uT0mT">https://urlin.us/2uT0mT</a></b></p><br /><br />
7
- <h3>Features of CarX Street</h3>
8
- <p>Some of the features of CarX Street are:</p>
9
- <ul>
10
- <li>Stunning graphics and realistic physics</li>
11
- <li>Over 50 cars with different characteristics and styles</li>
12
- <li>Multiple game modes, such as sprint, drift, drag, and circuit</li>
13
- <li>A large open world with different locations and weather conditions</li>
14
- <li>A dynamic day-night cycle and traffic system</li>
15
- <li>An online multiplayer mode with leaderboards and chat</li>
16
- <li>A car customization system with hundreds of parts and decals</li>
17
- <li>A club system where you can join or create your own team</li>
18
- <li>A reward system where you can earn coins, gems, and car parts</li>
19
- </ul>
20
- <h3>Requirements for CarX Street</h3>
21
- <p>To play CarX Street on your Android device, you need to have:</p>
22
- <ul>
23
- <li>An Android version of 5.0 or higher</li>
24
- <li>A minimum of 2 GB of RAM</li>
25
- <li>A minimum of 1 GB of free storage space</li>
26
- <li>An internet connection</li>
27
- </ul>
28
- <h2>How to Download and Install CarX Street APK</h2>
29
- <p>If you want to download CarX Street APK for Android, you need to follow these steps:</p>
30
- <h3>Step 1: Enable Unknown Sources</h3>
31
- <p>Before you can install any APK file on your Android device, you need to enable the option to allow installation from unknown sources. To do this, go to your device settings, then security, then toggle on the unknown sources option.</p>
32
- <h3>Step 2: Download CarX Street APK File</h3>
33
- <p>Next, you need to download the CarX Street APK file from a reliable source. You can use one of the links below:</p>
34
- <table border="1">
35
- <tr><th>Name</th><th>Version</th><th>Size</th><th>Link</th></tr>
36
- <tr><td>CarX Street APK (Game)</td><td>0.9.2</td><td>1.4 GB</td><td><a href="(^1^)">Download here(^1^)</a></td></tr>
37
- <tr><td>CarX Street APK (App)</td><td>9.8</td><td>14 MB</td><td><a href="(^3^)">Download here(^3^)</a></td></tr>
38
- <tr><td>CarX Street - Apps on Google Play</td><td>N/A</td><td>N/A</td><td><a href=" ">Download here</a></td></tr>
39
- </table>
40
- <p>Make sure you download the file that matches your device and preferences. You can also scan the file with an antivirus software before installing it.</p>
41
- <h3>Step 3: Install CarX Street APK File</h3>
42
- <p>After you have downloaded the CarX Street APK file, you need to install it on your device. To do this, locate the file in your file manager or downloads folder, and tap on it. You will see a prompt asking you to confirm the installation. Tap on install and wait for the process to finish.</p>
43
- <h3>Step 4: Launch CarX Street and Enjoy</h3>
44
- <p>Once the installation is complete, you can launch CarX Street from your app drawer or home screen. You will need to grant some permissions and accept the terms and conditions. Then, you can create your account, choose your car, and start racing.</p>
45
- <p>carx street racing game free download apk<br />
46
- carx street mod apk unlimited money download<br />
47
- carx street android apk download latest version<br />
48
- carx street apk download for pc windows 10<br />
49
- carx street online racing apk download<br />
50
- carx street apk download apkpure<br />
51
- carx street apk download uptodown<br />
52
- carx street apk download rexdl<br />
53
- carx street apk download no obb<br />
54
- carx street apk download highly compressed<br />
55
- carx street open world racing apk download<br />
56
- carx street sunset city apk download<br />
57
- carx street realistic racing apk download<br />
58
- carx street offline racing apk download<br />
59
- carx street drift racing apk download<br />
60
- carx street hack apk download android 1<br />
61
- carx street cheats apk download ios<br />
62
- carx street beta apk download android<br />
63
- carx street update apk download 2023<br />
64
- carx street full version apk download 2022<br />
65
- carx street cracked apk download 2021<br />
66
- carx street premium apk download 2020<br />
67
- carx street pro apk download 2019<br />
68
- carx street old version apk download 2018<br />
69
- carx street new version apk download 2017<br />
70
- how to download carx street apk on android phone<br />
71
- how to install carx street apk on android tablet<br />
72
- how to play carx street apk on android tv<br />
73
- how to update carx street apk on android device<br />
74
- how to uninstall carx street apk on android emulator<br />
75
- where to download carx street apk for android free<br />
76
- where to find carx street apk for android safe<br />
77
- where to get carx street apk for android fast<br />
78
- where to buy carx street apk for android cheap<br />
79
- where to sell carx street apk for android best price<br />
80
- what is carx street apk for android review<br />
81
- what is the size of carx street apk for android file<br />
82
- what is the rating of carx street apk for android app<br />
83
- what is the genre of carx street apk for android game<br />
84
- what is the developer of carx street apk for android studio<br />
85
- why download carx street apk for android fun<br />
86
- why install carx street apk for android easy<br />
87
- why play carx street apk for android addictive<br />
88
- why update carx street apk for android improved<br />
89
- why uninstall carx street apk for android buggy</p>
90
- <h2>Benefits of Downloading CarX Street APK</h2>
91
- <p>There are some benefits of downloading CarX Street APK instead of using the Google Play Store version. Some of them are:</p>
92
- <h3>Access to Latest Updates and Features</h3>
93
- <p>By downloading CarX Street APK, you can get access to the latest updates and features of the game before they are released on the official platform. This way, you can enjoy the new content and improvements as soon as possible.</p>
94
- <h3>No Need to Use Google Play Store</h3>
95
- <p>If you have problems with using the Google Play Store, such as slow downloads, errors, or restrictions, you can download CarX Street APK without using it. This way, you can avoid any hassle or inconvenience that might occur with the Google Play Store.</p>
96
- <h3>Save Storage Space and Data</h3>
97
- <p>By downloading CarX Street APK, you can save some storage space and data on your device. This is because you can download only the file that you need, and not the whole game package. You can also delete the APK file after installing it, and free up some space.</p>
98
- <h2>Risks of Downloading CarX Street APK</h2>
99
- <p>However, there are also some risks of downloading CarX Street APK that you should be aware of. Some of them are:</p>
100
- <h3>Potential Malware and Viruses</h3>
101
- <p>If you download CarX Street APK from an untrusted source, you might expose your device to malware and viruses that can harm your device or steal your data. Therefore, you should always download CarX Street APK from a reliable source, and scan the file with an antivirus software before installing it.</p>
102
- <h3>Legal Issues and Violations</h3>
103
- <p>If you download CarX Street APK without the permission of the developers or publishers, you might violate their terms of service or intellectual property rights. This could result in legal issues or penalties, such as fines, bans, or lawsuits. Therefore, you should always respect the rights of the creators and follow their rules.</p>
104
- <h3>Compatibility and Stability Issues</h3>
105
- <p>If you download CarX Street APK that is not compatible with your device or version of Android, you might experience compatibility and stability issues, such as crashes, glitches, or errors. Therefore, you should always check the requirements and specifications of the game before downloading it.</p>
106
- <h2>Conclusion</h2>
107
- <p>In conclusion, CarX Street is a street racing game that lets you customize your car, challenge other racers, and explore a realistic city. You can download CarX Street APK for Android from one of the links above, but you should also be aware of the benefits and risks of doing so. We hope this article has helped you learn how to download CarX Street APK for Android.</p>
108
- <h2>FAQs</h2>
109
- <ul>
110
- <li><b>Q: Is CarX Street free to play?</b></li>
111
- <li>A: Yes, CarX Street is free to play, but it also offers in-app purchases for coins, gems, and car parts.</li>
112
- <li><b>Q: Can I play CarX Street offline?</b></li>
113
- <li>A: No, CarX Street requires an internet connection to play.</li>
114
- <li><b>Q: Can I play CarX Street with my friends?</b></li>
115
- <li>A: Yes, CarX Street has an online multiplayer mode where you can race with your friends or other players around the world.</li>
116
- <li><b>Q: How can I contact the developers of CarX Street?</b></li>
117
- <li>A: You can contact the developers of CarX Street by sending an email to [email protected] or visiting their website at https://carx-tech.com/.</li>
118
- <li><b>Q: How can I update CarX Street?</b></li>
119
- <li A: You can update CarX Street by downloading the latest APK file from the same source you used before, or by using the Google Play Store version if you have it installed.</li>
120
- </ul></p> 197e85843d<br />
121
- <br />
122
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Call of Duty Warzone Mobile and Fight Like Never Before.md DELETED
@@ -1,153 +0,0 @@
1
-
2
- <h1>Download Call of Duty Warzone Mobile: The Next Era of Battle Royale</h1>
3
- <p>If you are a fan of the Call of Duty franchise, you must have heard about the latest sensation in the mobile gaming world: Call of Duty Warzone Mobile. This is the next generation of mobile battle royale, featuring authentic COD gameplay, shared progression, and up to 120 player count matches on mobile. In this article, we will tell you everything you need to know about this amazing game, including what it is, how to get it, how to play it, and how to optimize your device and performance for it. So, without further ado, let's dive into the new era of fun battle royale!</p>
4
- <h2>What is Call of Duty Warzone Mobile?</h2>
5
- <p>Call of Duty Warzone Mobile is a mobile version of the popular Call of Duty Warzone game, which is a free-to-play online multiplayer battle royale game developed by Activision. It is part of the Call of Duty Modern Warfare II series, which is a reboot of the original Modern Warfare sub-series. Call of Duty Warzone Mobile is built for mobile gamers, with first-class graphics, intuitive controls, and optimized physics, animations, and sound. It also features unified Call of Duty technology, which means that your Battle Pass and friends list sync across platforms for a truly connected multiplayer FPS game experience.</p>
6
- <h2>download call of duty warzone mobile</h2><br /><p><b><b>Download</b> &#128505; <a href="https://urlin.us/2uSSeB">https://urlin.us/2uSSeB</a></b></p><br /><br />
7
- <h3>The features and benefits of Call of Duty Warzone Mobile</h3>
8
- <p>Call of Duty Warzone Mobile offers a lot of features and benefits that make it stand out from other mobile battle royale games. Here are some of them:</p>
9
- <ul>
10
- <li>It delivers authentic Call of Duty gameplay on mobile, with realistic combat, weapons, movement, and vehicles.</li>
11
- <li>It supports up to 120 live players in a match, which means more competition and more fun.</li>
12
- <li>It has epic maps like the iconic Verdansk, which is a fan-favorite map from the original Call of Duty Warzone game.</li>
13
- <li>It has unique and fun gameplay elements, such as contracts, killstreaks, strategies, and the Gulag, where you can win a duel to get a second life.</li>
14
- <li>It has endless replayability, with countless ways to play and win.</li>
15
- </ul>
16
- <h3>How to pre-register and pre-order Call of Duty Warzone Mobile</h3>
17
- <p>Call of Duty Warzone Mobile is not yet officially released worldwide, but you can pre-register or pre-order it now to get a chance to unlock rewards at launch. Here's how:</p>
18
- <ul>
19
- <li>If you have an Android device, you can pre-register on Google Play by scanning the QR code or following this link: [text](^2^).</li>
20
- <li>If you have an iOS device, you can pre-order on the App Store by scanning the QR code or following this link: [text](^1^).</li>
21
- <li>By pre-registering or pre-ordering, you will also help achieve global milestones that will unlock more rewards for everyone. These rewards include exclusive skins, emblems, weapons, maps, and characters.</li>
22
- <li>You can also sign up for updates on the official website: [text](^1^).</li>
23
- </ul>
24
- <h2>How to play Call of Duty Warzone Mobile</h2>
25
- <p>Once you have downloaded and installed Call of Duty Warzone Mobile on your device, you can start playing it right away. Here are some basic steps to follow:</p>
26
- <ul>
27
- <li>Create or log in to your Call of Duty account or link it to your Facebook, Google, or Apple account.</li>
28
- <li>Choose your preferred game mode: Solo, Duo, Trio, or Squad. You can also join a random team or invite your friends to play with you.</li>
29
- <li>Select your loadout, which consists of your primary and secondary weapons, perks, equipment, and killstreaks. You can customize your loadout as you progress and unlock more items.</li>
30
- <li>Enter the match and parachute into the map. You can choose where to land by looking at the map and marking a location.</li>
31
- <li>Explore the map and loot items, such as weapons, ammo, armor, cash, and contracts. Contracts are optional missions that give you rewards for completing them.</li>
32
- <li>Eliminate other players and avoid the gas circle, which shrinks over time and forces players to move closer together. You can also revive your teammates if they are downed.</li>
33
- <li>If you are eliminated, you will be sent to the Gulag, where you can fight another player for a chance to redeploy. You can also be bought back by your teammates at a buy station.</li>
34
- <li>The last team or player standing wins the match and earns XP, Battle Pass progress, and rewards.</li>
35
- </ul>
36
- <h3>The gameplay and modes of Call of Duty Warzone Mobile</h3>
37
- <p>Call of Duty Warzone Mobile has different gameplay and modes that cater to different preferences and styles. Here are some of them:</p>
38
- <ul>
39
- <li>Battle Royale: This is the classic mode where you compete with up to 119 other players in a massive map. You can play solo or with a team of up to four players. The objective is to survive until the end and be the last one standing.</li>
40
- <li>Plunder: This is a mode where you compete with other teams to collect the most cash in the map. You can loot cash from various sources, such as crates, contracts, enemies, and banks. You can also deposit your cash at helicopter pads or balloons to secure it. The team with the most cash at the end of the match wins.</li>
41
- <li>Rebirth: This is a mode where you play in a smaller map with faster-paced action. You have unlimited respawns as long as one of your teammates is alive. The objective is to eliminate as many enemies as possible and be the last team alive.</li>
42
- </ul>
43
- <h3>The maps and locations of Call of Duty Warzone Mobile</h3>
44
- <p>Call of Duty Warzone Mobile has stunning maps and locations that offer diverse environments and challenges. Here are some of them:</p>
45
- <p>How to download call of duty warzone mobile on android<br />
46
- Call of duty warzone mobile apk download free<br />
47
- Call of duty warzone mobile release date and pre-registration<br />
48
- Call of duty warzone mobile gameplay and features<br />
49
- Call of duty warzone mobile system requirements and compatibility<br />
50
- Call of duty warzone mobile vs call of duty mobile comparison<br />
51
- Call of duty warzone mobile tips and tricks for beginners<br />
52
- Call of duty warzone mobile best weapons and loadouts<br />
53
- Call of duty warzone mobile map and locations guide<br />
54
- Call of duty warzone mobile cross-play and cross-progression support<br />
55
- Call of duty warzone mobile review and ratings<br />
56
- Call of duty warzone mobile cheats and hacks<br />
57
- Call of duty warzone mobile controller support and settings<br />
58
- Call of duty warzone mobile battle pass and rewards<br />
59
- Call of duty warzone mobile zombies mode and easter eggs<br />
60
- Call of duty warzone mobile update and patch notes<br />
61
- Call of duty warzone mobile download size and installation time<br />
62
- Call of duty warzone mobile error codes and fixes<br />
63
- Call of duty warzone mobile best graphics settings and performance optimization<br />
64
- Call of duty warzone mobile clans and tournaments<br />
65
- Call of duty warzone mobile skins and customization options<br />
66
- Call of duty warzone mobile solo vs squad mode<br />
67
- Call of duty warzone mobile voice chat and communication options<br />
68
- Call of duty warzone mobile emulator for PC and Mac<br />
69
- Call of duty warzone mobile fan art and wallpapers<br />
70
- Call of duty warzone mobile memes and funny moments<br />
71
- Call of duty warzone mobile reddit and discord communities<br />
72
- Call of duty warzone mobile official website and social media accounts<br />
73
- Call of duty warzone mobile feedback and suggestions<br />
74
- Call of duty warzone mobile news and rumors<br />
75
- Call of duty warzone mobile mod apk download unlimited money<br />
76
- Call of duty warzone mobile offline mode and bots<br />
77
- Call of duty warzone mobile VPN and region lock bypass<br />
78
- Call of duty warzone mobile streamers and influencers to follow<br />
79
- Call of duty warzone mobile merchandise and accessories<br />
80
- Call of duty warzone mobile wallpapers for iphone and ipad<br />
81
- Call of duty warzone mobile challenges and achievements<br />
82
- Call of duty warzone mobile best strategies and tactics<br />
83
- Call of duty warzone mobile gulag tips and tricks<br />
84
- Call of duty warzone mobile killstreaks and contracts guide<br />
85
- Call of duty warzone mobile vehicles and transportation options<br />
86
- Call of duty warzone mobile ping system and markers guide<br />
87
- Call of duty warzone mobile sensitivity settings and aim assist options<br />
88
- Call of duty warzone mobile spectate mode and replay feature<br />
89
- Call of duty warzone mobile esports scene and competitive events</p>
90
- <ul>
91
- <li>Verdansk: This is the main map of Call of Duty Warzone Mobile, which is based on the fictional city of Verdansk in Kastovia. It is a huge map that features various zones, such as downtown, airport, stadium, farmland, prison, dam, and more. It also has landmarks from previous Call of Duty games, such as Broadcast, Scrapyard, Boneyard, and Gulag.</li>
92
- <li>Rebirth Island: This is a smaller map that is based on an island off the coast of Verdansk. It is a former Soviet base that was used for chemical weapons research. It has locations such as prison blocks, bioweapon labs, decontamination zones, and more. It also has a night mode that adds more intensity and stealth to the gameplay.</li>
93
- <li>New Maps: Call of Duty Warzone Mobile will also introduce new maps in the future that will expand the game's content and variety. These maps will be inspired by real-world locations and events, such as Berlin, Chernobyl, Afghanistan, and more.</li>
94
- </ul>
95
- <h3>The weapons and vehicles of Call of Duty Warzone Mobile</h3>
96
- <p>Call of Duty Warzone Mobile has a vast arsenal of weapons and vehicles that you can use to dominate the battlefield. Here are some of them:</p>
97
- <ul>
98
- <li>Weapons: You can choose from over 80 weapons in Call of Duty Warzone Mobile, including assault rifles, submachine guns, sniper rifles, shotguns, pistols, and more. You can also customize your weapons with attachments, such as scopes, barrels, magazines, and more. You can also find legendary weapons in the map, which have unique skins and perks.</li>
99
- <li>Vehicles: You can also use various vehicles in Call of Duty Warzone Mobile, such as cars, trucks, helicopters, ATVs, and more. Vehicles can help you move faster, escape the gas circle, or run over enemies. However, they also make noise and attract attention, so be careful when using them.</li>
100
- </ul>
101
- <h2>How to optimize your device and performance for Call of Duty Warzone Mobile</h2>
102
- <p>Call of Duty Warzone Mobile is a demanding game that requires a lot of resources and power from your device. Therefore, you need to optimize your device and performance for the best gaming experience. Here are some tips to do that:</p>
103
- <h3>The minimum device specifications for Call of Duty Warzone Mobile</h3>
104
- <p>Before you download and play Call of Duty Warzone Mobile, you need to make sure that your device meets the minimum specifications for the game. Here are the minimum requirements for Android and iOS devices:</p>
105
- <table>
106
- <tr>
107
- <th>Android</th>
108
- <th>iOS</th>
109
- </tr>
110
- <tr>
111
- <td>OS: Android 5.0 or higher</td>
112
- <td>OS: iOS 10 or higher</td>
113
- </tr>
114
- <tr>
115
- <td>CPU: Snapdragon 625 or equivalent</td>
116
- <td>CPU: A10 Fusion or equivalent</td>
117
- </tr>
118
- <tr>
119
- <td>RAM: 2 GB or higher</td>
120
- <td>RAM: 2 GB or higher</td>
121
- </tr>
122
- <tr>
123
- <td>Storage: 4 GB or higher</td>
124
- <td>Storage: 4 GB or higher</td>
125
- </tr>
126
- <tr>
127
- <td>Internet: Wi-Fi or cellular data (4G or higher)</td>
128
- <td>Internet: Wi-Fi or cellular data (4G or higher)</td>
129
- </tr>
130
- </table>
131
- <h3>The best settings and tips for Call of Duty Warzone Mobile</h3>
132
- <p>Once you have checked your device specifications, you can also adjust the settings and preferences of the game to optimize your performance and gameplay. Here are some suggestions:</p>
133
- <ul>
134
- <li>Adjust the graphics quality according to your device's capabilities. You can choose from low, medium, high, or very high graphics settings. The higher the graphics quality, the more battery and data it will consume.</li>
135
- <li>Adjust the frame rate according to your preference. You can choose from low, medium, high, or max frame rate settings. The higher the frame rate, the smoother the gameplay, but it will also drain more battery and data.</li>
136
- <li>Adjust the sensitivity according to your comfort level. You can choose from low, medium, high, or custom sensitivity settings. The higher the sensitivity, the faster your aim and movement will be.</li>
137
- <li>Enable or disable the auto-fire option according to your skill level. You can choose to have your weapon fire automatically when you aim at an enemy, or manually when you tap the fire button.</li>
138
- <li>Enable or disable the sound effects and voice chat according to your preference. You can choose to hear the game sounds and communicate with your teammates via voice chat, or mute them if you want to play silently.</li>
139
- <li>Use headphones or earphones for a better audio experience. You can hear the game sounds more clearly and locate enemies more easily with headphones or earphones.</li>
140
- <li>Close other apps and turn off notifications when playing Call of Duty Warzone Mobile. You can avoid distractions and interruptions by closing other apps and turning off notifications on your device.</li>
141
- <h3>The common issues and solutions for Call of Duty Warzone Mobile</h3>
142
- <p>Sometimes, you may encounter some issues or problems when playing Call of Duty Warzone Mobile. Here are some common ones and how to solve them:</p>
143
- <ul><li>If you experience lagging or freezing during gameplay, try lowering your graphics quality and frame rate settings.</li><li>If you experience crashing or error messages during gameplay, try clearing your cache and data for the game app.</li><li>If you experience connection issues or disconnections during gameplay, try switching to a different Wi-Fi network or cellular data provider.</li><li>If you experience login issues or account problems during gameplay, try resetting your password or contacting customer support.</li></ul>
144
- <h2>Conclusion</h2>
145
- <h3>Summary of the main points</h3>
146
- <p>In conclusion, Call of Duty Warzone Mobile is an amazing game that brings the thrill and excitement of battle royale to mobile devices. It has authentic COD gameplay, shared progression, and up to 120 player count matches on mobile. It also has epic maps, unique gameplay elements, vast arsenal of weapons and vehicles, and endless replayability. It is lot of data depending on your graphics quality and frame rate settings. It is recommended to use a Wi-Fi connection or a cellular data plan with unlimited data when playing the game.</li>
147
- <li><b>Q: How can I get more rewards and items in Call of Duty Warzone Mobile?</b></li>
148
- <li>A: You can get more rewards and items in Call of Duty Warzone Mobile by completing various tasks and challenges, such as contracts, missions, events, and achievements. You can also purchase them with real money or COD Points, which are the in-game currency. You can earn COD Points by leveling up your Battle Pass or buying them with real money.</li>
149
- <li><b>Q: How can I contact customer support or report a bug in Call of Duty Warzone Mobile?</b></li>
150
- <li>A: You can contact customer support or report a bug in Call of Duty Warzone Mobile by using the in-game feedback system. You can access it by tapping the settings icon on the top right corner of the screen, then tapping the feedback button. You can also visit the official website or social media pages of the game for more information and assistance.</li>
151
- </ul></p> 197e85843d<br />
152
- <br />
153
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Copy Text from Apps Images and More with Universal Copy APK.md DELETED
@@ -1,124 +0,0 @@
1
-
2
- <h1>How to Download APK Universal Copy for Android</h1>
3
- <p>Have you ever wanted to copy text from an app or an image that doesn't allow you to do so? Have you ever wished you could extract addresses, phone numbers, emails, hashtags, or other entities from a text without having to type them manually? Have you ever wondered how to perform quick actions on the text you copied, such as translating, locating, sharing, or searching?</p>
4
- <p>If you answered yes to any of these questions, then you need APK Universal Copy. APK Universal Copy is a powerful and versatile app that lets you copy text from any app or image on your Android device. It also detects and extracts useful information from the text and allows you to perform actions on it in one tap. In this article, we will tell you what APK Universal Copy is, why you should download it, and how to download and install it on your device.</p>
5
- <h2>download apk universal copy</h2><br /><p><b><b>Download Zip</b> &#10038;&#10038;&#10038; <a href="https://jinyurl.com/2uNSxJ">https://jinyurl.com/2uNSxJ</a></b></p><br /><br />
6
- <h2>What is APK Universal Copy?</h2>
7
- <p>APK Universal Copy is an app that enables you to copy text from any app or image on your Android device, even from the ones that don't let you or inside images. It uses OCR (optical character recognition) technology to scan and recognize text inside images. It also uses smart detection of entities to identify and extract addresses, emails, phone numbers, @, #, and other useful information from the text. It also allows you to perform quick actions on the text you copied, such as translating, locating, sharing, or searching.</p>
8
- <p>APK Universal Copy has several modes that you can choose from depending on your needs:</p>
9
- <h3>Features of APK Universal Copy</h3>
10
- <h4>Normal mode</h4>
11
- <p>This mode lets you copy text from any app such as Facebook, Twitter, Instagram, YouTube, Chrome, WhatsApp, Tumblr, News Republic, Snapchat, and more. You just need to launch Universal Copy from your notification bar or via a shortcut, select the text you want to copy, and it's done.</p>
12
- <h4>Scanner mode</h4>
13
- <p>This mode lets you copy text inside images using OCR technology. It currently works with Chinese, Devanagari (Hindi...), Japanese, Korean and Latin (English, Portuguese...) character sets. You just need to launch Universal Copy in scanner mode, select the image you want to copy text from, and it's done.</p>
14
- <h4>Smart detection of entities</h4>
15
- <p>This feature automatically detects and extracts addresses, emails, phone numbers, @, #, and other useful information from the text you copied. You can then tap on them to perform quick actions such as opening Google Maps for an address, calling a phone number, sending an email, or searching for a hashtag.</p>
16
- <p>download apk universal copy app<br />
17
- download apk universal copy for android<br />
18
- download apk universal copy pro<br />
19
- download apk universal copy plus<br />
20
- download apk universal copy mod<br />
21
- download apk universal copy latest version<br />
22
- download apk universal copy free<br />
23
- download apk universal copy no ads<br />
24
- download apk universal copy premium<br />
25
- download apk universal copy full<br />
26
- download apk universal copy cracked<br />
27
- download apk universal copy unlocked<br />
28
- download apk universal copy offline<br />
29
- download apk universal copy online<br />
30
- download apk universal copy update<br />
31
- download apk universal copy old version<br />
32
- download apk universal copy beta<br />
33
- download apk universal copy from play store<br />
34
- download apk universal copy from softpedia<br />
35
- download apk universal copy from apkpure<br />
36
- download apk universal copy from uptodown<br />
37
- download apk universal copy from apkmirror<br />
38
- download apk universal copy from apksfree<br />
39
- download apk universal copy from apktada<br />
40
- download apk universal copy from apknite<br />
41
- how to download apk universal copy<br />
42
- where to download apk universal copy<br />
43
- why download apk universal copy<br />
44
- what is apk universal copy<br />
45
- who made apk universal copy<br />
46
- when was apk universal copy released<br />
47
- which apps support apk universal copy<br />
48
- which languages does apk universal copy support<br />
49
- which devices are compatible with apk universal copy<br />
50
- which permissions does apk universal copy require<br />
51
- what can you do with apk universal copy<br />
52
- what are the features of apk universal copy<br />
53
- what are the benefits of apk universal copy<br />
54
- what are the drawbacks of apk universal copy<br />
55
- what are the alternatives to apk universal copy<br />
56
- how to use apk universal copy<br />
57
- how to install apk universal copy<br />
58
- how to update apk universal copy<br />
59
- how to uninstall apk universal copy<br />
60
- how to activate apk universal copy plus<br />
61
- how to disable ads in apk universal copy <br />
62
- how to enable smart detection in apk universal copy <br />
63
- how to change settings in apk universal copy <br />
64
- how to contact developers of apk universal copy</p>
65
- <h4>Copy-Paste in 1-tap</h4>
66
- <p>This feature lets you perform quick actions on the text you copied without having to switch apps. You can translate the text using Google Translate, locate it using Google Maps, share it via social media or messaging apps, or search for it using Google or Wikipedia.</p>
67
- <h4>Scroll mode</h4>
68
- <p>This mode lets you select texts from multiple screens or apps to copy them all at once. You just need to launch Universal Copy in scroll mode, scroll through the screens or apps you want to copy text from, select the texts you want to copy, and it's done.</p>
69
- <h4>Harvest mode</h4>
70
- <p>This mode lets you extract all the texts from a screen or an app and copy them to your clipboard. You just need to launch Universal Copy in harvest mode, select the screen or the app you want to copy text from, and it's done.</p>
71
- <h2>Why Download APK Universal Copy?</h2>
72
- <p>APK Universal Copy is a must-have app for anyone who wants to copy text from any app or image on their Android device. It has many benefits that make it worth downloading and installing:</p>
73
- <h3>Benefits of APK Universal Copy</h3>
74
- <h4>Copy text from any app or image</h4>
75
- <p>With APK Universal Copy, you can copy text from any app or image on your device, even from the ones that don't let you or inside images. This means you can copy text from Facebook posts, Instagram captions, YouTube comments, Chrome web pages, WhatsApp messages, Tumblr blogs, News Republic articles, Snapchat stories, and more. You can also copy text inside images such as memes, screenshots, flyers, posters, logos, and more.</p>
76
- <h4>Extract useful information quickly</h4>
77
- <p>With APK Universal Copy, you can extract useful information from the text you copied without having to type them manually. You can extract addresses, emails, phone numbers, @, #, and other entities from the text and perform quick actions on them. This means you can open Google Maps for an address, call a phone number, send an email, or search for a hashtag in one tap.</p>
78
- <h4>Perform actions on the text you copied</h4>
79
- <p>With APK Universal Copy, you can perform quick actions on the text you copied without having to switch apps. You can translate the text using Google Translate, locate it using Google Maps, share it via social media or messaging apps, or search for it using Google or Wikipedia in one tap. This means you can save time and hassle by using APK Universal Copy as your all-in-one tool for copying and pasting.</p>
80
- <h4>Save time and hassle</h4>
81
- <p>With APK Universal Copy, you can save time and hassle by copying and pasting text from any app or image on your device. You don't have to switch apps or type manually to copy and paste text. You don't have to worry about the app or the image not letting you copy text. You don't have to waste time looking for useful information in the text. You don't have to open multiple apps to perform actions on the text. You just need to use APK Universal Copy and enjoy its features.</p>
82
- <h2>How to Download and Install APK Universal Copy?</h2>
83
- <p>If you are convinced by the benefits of APK Universal Copy and want to download and install it on your device, here are the steps you need to follow:</p>
84
- <h3>Steps to Download and Install APK Universal Copy</h3>
85
- <h4>Step 1: Enable unknown sources on your device</h4>
86
- <p>Since APK Universal Copy is not available on Google Play Store, you need to enable unknown sources on your device to install it. To do this, go to Settings > Security > Unknown sources and toggle it on. This will allow you to install apps from sources other than Google Play Store.</p>
87
- <h4>Step 2: Download the APK file from a trusted source</h4>
88
- <p>The next step is to download the APK file of APK Universal Copy from a trusted source. You can use this link to download the latest version of APK Universal Copy (version 5.0.5) for free. The file size is about 7 MB and it requires Android 5.0 or higher.</p>
89
- <h4>Step 3: Locate and install the APK file on your device</h4>
90
- <p>The third step is to locate and install the APK file on your device. To do this, go to your Downloads folder or use a file manager app to find the APK file of APK Universal Copy. Tap on it and follow the instructions to install it on your device.</p>
91
- <h4>Step 4: Launch and enjoy APK Universal Copy</h4>
92
- <p>The final step is to launch and enjoy APK Universal Copy on your device. To do this, go to your app drawer or home screen and look for the icon of APK Universal Copy. Tap on it and grant the necessary permissions to access your device's screen content and camera. Then, choose the mode you want to use (normal mode, scanner mode, scroll mode, or harvest mode) and start copying text from any app or image on your device.</p>
93
- <h2>Conclusion</h2>
94
- <p>In conclusion, APK Universal Copy is a powerful and versatile app that lets you copy text from any app or image on your Android device. It also detects and extracts useful information from the text and allows you to perform actions on it in one tap. It has many benefits such as copying text from any app or image, extracting useful information quickly, performing actions on the text you copied, and saving time and hassle. It is easy to download and install on your device by following a few simple steps. If you want to copy text from any app or image on your Android device, you should definitely try APK Universal Copy. You will be amazed by its features and performance.</p>
95
- <h2>FAQs</h2>
96
- <p>Here are some frequently asked questions about APK Universal Copy:</p>
97
- <table>
98
- <tr>
99
- <th>Question</th>
100
- <th>Answer</th>
101
- </tr>
102
- <tr>
103
- <td>Is APK Universal Copy safe to use?</td>
104
- <td>Yes, APK Universal Copy is safe to use. It does not contain any malware or viruses. It only requires permissions to access your device's screen content and camera to copy text from any app or image. It does not collect or share any personal data.</td>
105
- </tr>
106
- <tr>
107
- <td>Is APK Universal Copy free to use?</td>
108
- <td>Yes, APK Universal Copy is free to use. It does not have any in-app purchases or ads. You can download and install it on your device without paying anything.</td>
109
- </tr>
110
- <tr>
111
- <td>Does APK Universal Copy work offline?</td>
112
- <td>Yes, APK Universal Copy works offline. You can copy text from any app or image on your device without an internet connection. However, some features such as translating, locating, sharing, or searching may require an internet connection.</td>
113
- </tr>
114
- <tr>
115
- <td>How can I contact the developer of APK Universal Copy?</td>
116
- <td>You can contact the developer of APK Universal Copy by sending an email to [email protected]. You can also visit their website at https://universal-copy.com/ for more information.</td>
117
- </tr>
118
- <tr>
119
- <td>How can I support the development of APK Universal Copy?</td>
120
- <td>You can support the development of APK Universal Copy by rating and reviewing it on the source where you downloaded it from. You can also share it with your friends and family who might find it useful.</td>
121
- </tr>
122
- </table></p> 197e85843d<br />
123
- <br />
124
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Enjoy PUBG MOBILE 1.8 with MOD APK ESP Aimbot Anti-Ban and Mega Menu Included.md DELETED
@@ -1,91 +0,0 @@
1
- <br />
2
- <h1>PUBG Mobile 1.8 Mod APK Hack Download: Everything You Need to Know</h1>
3
- <p>If you are a fan of battle royale games, you must have heard of PUBG Mobile, one of the most popular and addictive games in the genre. But did you know that there is a way to enjoy the game even more with unlimited resources and features? In this article, we will tell you everything you need to know about PUBG Mobile 1.8 Mod APK Hack, a modified version of the original game that gives you an edge over your opponents. We will also show you how to download and install it on your device, and how to play it like a pro.</p>
4
- <h2>pubg mobile 1.8 mod apk hack download</h2><br /><p><b><b>DOWNLOAD</b> &#9999; &#9999; &#9999; <a href="https://jinyurl.com/2uNQPg">https://jinyurl.com/2uNQPg</a></b></p><br /><br />
5
- <h2>What is PUBG Mobile?</h2>
6
- <h3>A brief introduction to the popular battle royale game</h3>
7
- <p>PUBG Mobile is a mobile version of PlayerUnknown's Battlegrounds, a multiplayer online battle royale game developed by PUBG Corporation. The game was released in 2018 and has since become one of the most downloaded and played games in the world. The game has won several awards and accolades, such as the Google Play Best Game of 2018, the Golden Joystick Award for Mobile Game of the Year, and the Esports Game of the Year.</p>
8
- <h3>The main features and gameplay of PUBG Mobile</h3>
9
- <p>PUBG Mobile is a game where up to 100 players parachute onto an island and fight for survival. The game offers various modes, such as solo, duo, squad, arcade, arena, and classic. The game also features different maps, such as Erangel, Miramar, Sanhok, Vikendi, Livik, and Karakin. The game is updated regularly with new content, such as weapons, vehicles, skins, events, and seasons.</p>
10
- <p>The gameplay of PUBG Mobile is simple but thrilling. You have to loot weapons, armor, ammo, and other items from buildings, crates, or dead enemies. You have to avoid the blue zone, which is a shrinking circle that forces players to move closer together. You have to kill or avoid other players while staying alive until the end. The last player or team standing wins the match.</p>
11
- <h2>What is PUBG Mobile 1.8 Mod APK Hack?</h2>
12
- <h3>A modified version of the original game with unlimited resources and features</h3>
13
- <p>PUBG Mobile 1.8 Mod APK Hack is a hacked version of the original game that gives you access to unlimited resources and features that are not available in the official version. For example, with this modded version, you can get unlimited UC (Unknown Cash), which is the in-game currency that you can use to buy skins, outfits, crates, emotes, and more. You can also get unlimited BP (Battle Points), which are used to level up your account and unlock rewards. You can also get unlimited health, ammo, aimbot, wallhack, speedhack, no recoil, no fog, no grass, and more.</p>
14
- <h3>The benefits and risks of using PUBG Mobile 1.8 Mod APK Hack</h3>
15
- <p>The benefits of using PUBG Mobile 1.8 Mod APK Hack are obvious. You can enjoy the game without any limitations or restrictions. You can customize your character and weapons with any skin or outfit you want. You can dominate every match with your enhanced skills and abilities. You <p>The risks of using PUBG Mobile 1.8 Mod APK Hack are also evident. You can get banned from the game if the developers detect that you are using a modified version. You can also expose your device to malware or viruses that may harm your data or privacy. You can also ruin the fun and fairness of the game for other players who are playing legitimately. Therefore, you should use PUBG Mobile 1.8 Mod APK Hack at your own risk and discretion.</p>
16
- <h2>How to download and install PUBG Mobile 1.8 Mod APK Hack?</h2>
17
- <h3>The steps to download and install the modded version of the game</h3>
18
- <p>If you want to try PUBG Mobile 1.8 Mod APK Hack, you will need to follow these steps:</p>
19
- <ol>
20
- <li>Download the PUBG Mobile 1.8 Mod APK Hack file from a trusted source. You can search for it on Google or use the link below.</li>
21
- <li>Enable the installation of apps from unknown sources on your device. You can do this by going to Settings > Security > Unknown Sources and toggling it on.</li>
22
- <li>Locate the downloaded file on your device and tap on it to start the installation process.</li>
23
- <li>Follow the instructions on the screen and wait for the installation to complete.</li>
24
- <li>Launch the game and enjoy PUBG Mobile 1.8 Mod APK Hack.</li>
25
- </ol>
26
- <p>Note: You may need to uninstall the original version of PUBG Mobile before installing the modded version. You may also need to allow some permissions for the game to run properly.</p>
27
- <p>pubg mobile 1.8 mod apk unlimited uc and bp<br />
28
- pubg mobile 1.8 mod apk aimbot and wallhack<br />
29
- pubg mobile 1.8 mod apk no root and anti ban<br />
30
- pubg mobile 1.8 mod apk latest version and obb<br />
31
- pubg mobile 1.8 mod apk esp and radar hack<br />
32
- pubg mobile 1.8 mod apk god mode and speed hack<br />
33
- pubg mobile 1.8 mod apk free fire and magic bullet<br />
34
- pubg mobile 1.8 mod apk auto headshot and recoil<br />
35
- pubg mobile 1.8 mod apk unlock all skins and weapons<br />
36
- pubg mobile 1.8 mod apk high damage and jump hack<br />
37
- pubg mobile 1.8 mod apk mega menu and vip features<br />
38
- pubg mobile 1.8 mod apk unlimited health and ammo<br />
39
- pubg mobile 1.8 mod apk fast run and fly hack<br />
40
- pubg mobile 1.8 mod apk no grass and fog removal<br />
41
- pubg mobile 1.8 mod apk cheat menu and script<br />
42
- pubg mobile 1.8 mod apk all maps and modes unlocked<br />
43
- pubg mobile 1.8 mod apk low ping and lag fix<br />
44
- pubg mobile 1.8 mod apk global version and kr version<br />
45
- pubg mobile 1.8 mod apk zombie mode and infection mode<br />
46
- pubg mobile 1.8 mod apk night mode and hdr graphics<br />
47
- pubg mobile 1.8 mod apk voice chat and team up<br />
48
- pubg mobile 1.8 mod apk new weapons and vehicles<br />
49
- pubg mobile 1.8 mod apk season pass and royale pass<br />
50
- pubg mobile 1.8 mod apk custom room and tournament<br />
51
- pubg mobile 1.8 mod apk lite version and full version</p>
52
- <h3>The precautions and tips to avoid any issues or errors</h3>
53
- <p>To avoid any issues or errors while using PUBG Mobile 1.8 Mod APK Hack, you should take some precautions and tips:</p>
54
- <ul>
55
- <li>Do not use your main account or any account that you care about. Use a guest account or a fake account instead.</li>
56
- <li>Do not play on official servers or with real players. Use custom servers or play with bots instead.</li>
57
- <li>Do not use the modded version for too long or too often. Use it sparingly and switch back to the original version occasionally.</li>
58
- <li>Do not update the game from the Play Store or any other source. Wait for the modded version to be updated by its developer.</li>
59
- <li>Do not download or install any suspicious files or apps that claim to be related to PUBG Mobile 1.8 Mod APK Hack. Only use trusted sources and scan your device regularly.</li>
60
- </ul>
61
- <h2>How to play PUBG Mobile 1.8 Mod APK Hack?</h2>
62
- <h3>The basic controls and settings of the game</h3>
63
- <p>The basic controls and settings of PUBG Mobile 1.8 Mod APK Hack are similar to those of the original game. You can use the virtual joystick on the left side of the screen to move your character, and the buttons on the right side of the screen to shoot, aim, jump, crouch, prone, reload, switch weapons, and more. You can also customize your controls and settings by going to Settings > Controls > Customize.</p>
64
- <h3>The best strategies and tips to win every match</h3>
65
- <p>The best strategies and tips to win every match with PUBG Mobile 1.8 Mod APK Hack are as follows:</p>
66
- <ul>
67
- <li>Use your unlimited UC and BP to buy the best skins, outfits, crates, emotes, and more. This will make you look cool and intimidating in front of your enemies.</li>
68
- <li>Use your unlimited health, ammo, aimbot, wallhack, speedhack, no recoil, no fog, no grass, and more to gain an advantage over your enemies. You can see them through walls, shoot them accurately, run faster, survive longer, and more.</li>
69
- <li>Use your unlimited resources wisely and sparingly. Do not abuse them too much or too obviously, as this may alert other players or the developers that you are using a modded version.</li>
70
- <li>Use your skills and tactics as well. Do not rely solely on your modded features, as they may not work in some situations or against some enemies. Use cover, stealth, teamwork, strategy, and common sense as well.</li>
71
- <li>Have fun and enjoy the game. Do not take it too seriously or get frustrated if you lose or get banned. Remember that it is just a game and a modded version at that.</li>
72
- </ul>
73
- <h2>Conclusion</h2>
74
- <p>PUBG Mobile 1.8 Mod APK Hack is a modified version of the original game that gives you unlimited resources and features that are not available in the official version. It can make the game more fun and exciting for some players who want to try something new and different. However, it also comes <p>It also comes with some risks and drawbacks, such as getting banned from the game, exposing your device to malware or viruses, and ruining the fun and fairness of the game for other players. Therefore, you should use PUBG Mobile 1.8 Mod APK Hack at your own risk and discretion, and follow the steps and tips we provided in this article to avoid any issues or errors.</p>
75
- <p>We hope you found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below. And if you liked this article, please share it with your friends and fellow PUBG Mobile fans. Thank you for reading and happy gaming!</p>
76
- <h2>FAQs</h2>
77
- <p>Here are some frequently asked questions about PUBG Mobile 1.8 Mod APK Hack:</p>
78
- <ol>
79
- <li>Q: Is PUBG Mobile 1.8 Mod APK Hack safe to use?<br>
80
- A: PUBG Mobile 1.8 Mod APK Hack is not safe to use, as it is a hacked version of the original game that may contain malware or viruses that can harm your device or data. It may also get you banned from the game if the developers detect that you are using a modified version.</li>
81
- <li>Q: Is PUBG Mobile 1.8 Mod APK Hack legal to use?<br>
82
- A: PUBG Mobile 1.8 Mod APK Hack is not legal to use, as it violates the terms of service and the intellectual property rights of PUBG Corporation, the developer of the game. It may also infringe on the rights of other players who are playing legitimately.</li>
83
- <li>Q: Where can I download PUBG Mobile 1.8 Mod APK Hack?<br>
84
- A: You can download PUBG Mobile 1.8 Mod APK Hack from various sources on the internet, such as websites, blogs, forums, or social media. However, you should be careful and cautious when downloading any file or app from unknown sources, as they may be fake, corrupted, or malicious.</li>
85
- <li>Q: How can I update PUBG Mobile 1.8 Mod APK Hack?<br>
86
- A: You cannot update PUBG Mobile 1.8 Mod APK Hack from the Play Store or any other source, as it is a modded version of the game that is not compatible with the official version. You will have to wait for the developer of the modded version to release a new update that matches the original version.</li>
87
- <li>Q: Can I play PUBG Mobile 1.8 Mod APK Hack with my friends?<br>
88
- A: You can play PUBG Mobile 1.8 Mod APK Hack with your friends if they are also using the same modded version of the game. However, you cannot play with your friends who are using the official version of the game, as they will not be able to join your server or match.</li>
89
- </ol></p> 197e85843d<br />
90
- <br />
91
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/44ov41za8i/FreeVC/speaker_encoder/train.py DELETED
@@ -1,125 +0,0 @@
1
- from speaker_encoder.visualizations import Visualizations
2
- from speaker_encoder.data_objects import SpeakerVerificationDataLoader, SpeakerVerificationDataset
3
- from speaker_encoder.params_model import *
4
- from speaker_encoder.model import SpeakerEncoder
5
- from utils.profiler import Profiler
6
- from pathlib import Path
7
- import torch
8
-
9
- def sync(device: torch.device):
10
- # FIXME
11
- return
12
- # For correct profiling (cuda operations are async)
13
- if device.type == "cuda":
14
- torch.cuda.synchronize(device)
15
-
16
- def train(run_id: str, clean_data_root: Path, models_dir: Path, umap_every: int, save_every: int,
17
- backup_every: int, vis_every: int, force_restart: bool, visdom_server: str,
18
- no_visdom: bool):
19
- # Create a dataset and a dataloader
20
- dataset = SpeakerVerificationDataset(clean_data_root)
21
- loader = SpeakerVerificationDataLoader(
22
- dataset,
23
- speakers_per_batch, # 64
24
- utterances_per_speaker, # 10
25
- num_workers=8,
26
- )
27
-
28
- # Setup the device on which to run the forward pass and the loss. These can be different,
29
- # because the forward pass is faster on the GPU whereas the loss is often (depending on your
30
- # hyperparameters) faster on the CPU.
31
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
32
- # FIXME: currently, the gradient is None if loss_device is cuda
33
- loss_device = torch.device("cpu")
34
-
35
- # Create the model and the optimizer
36
- model = SpeakerEncoder(device, loss_device)
37
- optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate_init)
38
- init_step = 1
39
-
40
- # Configure file path for the model
41
- state_fpath = models_dir.joinpath(run_id + ".pt")
42
- backup_dir = models_dir.joinpath(run_id + "_backups")
43
-
44
- # Load any existing model
45
- if not force_restart:
46
- if state_fpath.exists():
47
- print("Found existing model \"%s\", loading it and resuming training." % run_id)
48
- checkpoint = torch.load(state_fpath)
49
- init_step = checkpoint["step"]
50
- model.load_state_dict(checkpoint["model_state"])
51
- optimizer.load_state_dict(checkpoint["optimizer_state"])
52
- optimizer.param_groups[0]["lr"] = learning_rate_init
53
- else:
54
- print("No model \"%s\" found, starting training from scratch." % run_id)
55
- else:
56
- print("Starting the training from scratch.")
57
- model.train()
58
-
59
- # Initialize the visualization environment
60
- vis = Visualizations(run_id, vis_every, server=visdom_server, disabled=no_visdom)
61
- vis.log_dataset(dataset)
62
- vis.log_params()
63
- device_name = str(torch.cuda.get_device_name(0) if torch.cuda.is_available() else "CPU")
64
- vis.log_implementation({"Device": device_name})
65
-
66
- # Training loop
67
- profiler = Profiler(summarize_every=10, disabled=False)
68
- for step, speaker_batch in enumerate(loader, init_step):
69
- profiler.tick("Blocking, waiting for batch (threaded)")
70
-
71
- # Forward pass
72
- inputs = torch.from_numpy(speaker_batch.data).to(device)
73
- sync(device)
74
- profiler.tick("Data to %s" % device)
75
- embeds = model(inputs)
76
- sync(device)
77
- profiler.tick("Forward pass")
78
- embeds_loss = embeds.view((speakers_per_batch, utterances_per_speaker, -1)).to(loss_device)
79
- loss, eer = model.loss(embeds_loss)
80
- sync(loss_device)
81
- profiler.tick("Loss")
82
-
83
- # Backward pass
84
- model.zero_grad()
85
- loss.backward()
86
- profiler.tick("Backward pass")
87
- model.do_gradient_ops()
88
- optimizer.step()
89
- profiler.tick("Parameter update")
90
-
91
- # Update visualizations
92
- # learning_rate = optimizer.param_groups[0]["lr"]
93
- vis.update(loss.item(), eer, step)
94
-
95
- # Draw projections and save them to the backup folder
96
- if umap_every != 0 and step % umap_every == 0:
97
- print("Drawing and saving projections (step %d)" % step)
98
- backup_dir.mkdir(exist_ok=True)
99
- projection_fpath = backup_dir.joinpath("%s_umap_%06d.png" % (run_id, step))
100
- embeds = embeds.detach().cpu().numpy()
101
- vis.draw_projections(embeds, utterances_per_speaker, step, projection_fpath)
102
- vis.save()
103
-
104
- # Overwrite the latest version of the model
105
- if save_every != 0 and step % save_every == 0:
106
- print("Saving the model (step %d)" % step)
107
- torch.save({
108
- "step": step + 1,
109
- "model_state": model.state_dict(),
110
- "optimizer_state": optimizer.state_dict(),
111
- }, state_fpath)
112
-
113
- # Make a backup
114
- if backup_every != 0 and step % backup_every == 0:
115
- print("Making a backup (step %d)" % step)
116
- backup_dir.mkdir(exist_ok=True)
117
- backup_fpath = backup_dir.joinpath("%s_bak_%06d.pt" % (run_id, step))
118
- torch.save({
119
- "step": step + 1,
120
- "model_state": model.state_dict(),
121
- "optimizer_state": optimizer.state_dict(),
122
- }, backup_fpath)
123
-
124
- profiler.tick("Extras (visualizations, saving)")
125
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIConsultant/MusicGen/audiocraft/modules/seanet.py DELETED
@@ -1,258 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- import typing as tp
8
-
9
- import numpy as np
10
- import torch.nn as nn
11
-
12
- from .conv import StreamableConv1d, StreamableConvTranspose1d
13
- from .lstm import StreamableLSTM
14
-
15
-
16
- class SEANetResnetBlock(nn.Module):
17
- """Residual block from SEANet model.
18
-
19
- Args:
20
- dim (int): Dimension of the input/output.
21
- kernel_sizes (list): List of kernel sizes for the convolutions.
22
- dilations (list): List of dilations for the convolutions.
23
- activation (str): Activation function.
24
- activation_params (dict): Parameters to provide to the activation function.
25
- norm (str): Normalization method.
26
- norm_params (dict): Parameters to provide to the underlying normalization used along with the convolution.
27
- causal (bool): Whether to use fully causal convolution.
28
- pad_mode (str): Padding mode for the convolutions.
29
- compress (int): Reduced dimensionality in residual branches (from Demucs v3).
30
- true_skip (bool): Whether to use true skip connection or a simple
31
- (streamable) convolution as the skip connection.
32
- """
33
- def __init__(self, dim: int, kernel_sizes: tp.List[int] = [3, 1], dilations: tp.List[int] = [1, 1],
34
- activation: str = 'ELU', activation_params: dict = {'alpha': 1.0},
35
- norm: str = 'none', norm_params: tp.Dict[str, tp.Any] = {}, causal: bool = False,
36
- pad_mode: str = 'reflect', compress: int = 2, true_skip: bool = True):
37
- super().__init__()
38
- assert len(kernel_sizes) == len(dilations), 'Number of kernel sizes should match number of dilations'
39
- act = getattr(nn, activation)
40
- hidden = dim // compress
41
- block = []
42
- for i, (kernel_size, dilation) in enumerate(zip(kernel_sizes, dilations)):
43
- in_chs = dim if i == 0 else hidden
44
- out_chs = dim if i == len(kernel_sizes) - 1 else hidden
45
- block += [
46
- act(**activation_params),
47
- StreamableConv1d(in_chs, out_chs, kernel_size=kernel_size, dilation=dilation,
48
- norm=norm, norm_kwargs=norm_params,
49
- causal=causal, pad_mode=pad_mode),
50
- ]
51
- self.block = nn.Sequential(*block)
52
- self.shortcut: nn.Module
53
- if true_skip:
54
- self.shortcut = nn.Identity()
55
- else:
56
- self.shortcut = StreamableConv1d(dim, dim, kernel_size=1, norm=norm, norm_kwargs=norm_params,
57
- causal=causal, pad_mode=pad_mode)
58
-
59
- def forward(self, x):
60
- return self.shortcut(x) + self.block(x)
61
-
62
-
63
- class SEANetEncoder(nn.Module):
64
- """SEANet encoder.
65
-
66
- Args:
67
- channels (int): Audio channels.
68
- dimension (int): Intermediate representation dimension.
69
- n_filters (int): Base width for the model.
70
- n_residual_layers (int): nb of residual layers.
71
- ratios (Sequence[int]): kernel size and stride ratios. The encoder uses downsampling ratios instead of
72
- upsampling ratios, hence it will use the ratios in the reverse order to the ones specified here
73
- that must match the decoder order. We use the decoder order as some models may only employ the decoder.
74
- activation (str): Activation function.
75
- activation_params (dict): Parameters to provide to the activation function.
76
- norm (str): Normalization method.
77
- norm_params (dict): Parameters to provide to the underlying normalization used along with the convolution.
78
- kernel_size (int): Kernel size for the initial convolution.
79
- last_kernel_size (int): Kernel size for the initial convolution.
80
- residual_kernel_size (int): Kernel size for the residual layers.
81
- dilation_base (int): How much to increase the dilation with each layer.
82
- causal (bool): Whether to use fully causal convolution.
83
- pad_mode (str): Padding mode for the convolutions.
84
- true_skip (bool): Whether to use true skip connection or a simple
85
- (streamable) convolution as the skip connection in the residual network blocks.
86
- compress (int): Reduced dimensionality in residual branches (from Demucs v3).
87
- lstm (int): Number of LSTM layers at the end of the encoder.
88
- disable_norm_outer_blocks (int): Number of blocks for which we don't apply norm.
89
- For the encoder, it corresponds to the N first blocks.
90
- """
91
- def __init__(self, channels: int = 1, dimension: int = 128, n_filters: int = 32, n_residual_layers: int = 3,
92
- ratios: tp.List[int] = [8, 5, 4, 2], activation: str = 'ELU', activation_params: dict = {'alpha': 1.0},
93
- norm: str = 'none', norm_params: tp.Dict[str, tp.Any] = {}, kernel_size: int = 7,
94
- last_kernel_size: int = 7, residual_kernel_size: int = 3, dilation_base: int = 2, causal: bool = False,
95
- pad_mode: str = 'reflect', true_skip: bool = True, compress: int = 2, lstm: int = 0,
96
- disable_norm_outer_blocks: int = 0):
97
- super().__init__()
98
- self.channels = channels
99
- self.dimension = dimension
100
- self.n_filters = n_filters
101
- self.ratios = list(reversed(ratios))
102
- del ratios
103
- self.n_residual_layers = n_residual_layers
104
- self.hop_length = np.prod(self.ratios)
105
- self.n_blocks = len(self.ratios) + 2 # first and last conv + residual blocks
106
- self.disable_norm_outer_blocks = disable_norm_outer_blocks
107
- assert self.disable_norm_outer_blocks >= 0 and self.disable_norm_outer_blocks <= self.n_blocks, \
108
- "Number of blocks for which to disable norm is invalid." \
109
- "It should be lower or equal to the actual number of blocks in the network and greater or equal to 0."
110
-
111
- act = getattr(nn, activation)
112
- mult = 1
113
- model: tp.List[nn.Module] = [
114
- StreamableConv1d(channels, mult * n_filters, kernel_size,
115
- norm='none' if self.disable_norm_outer_blocks >= 1 else norm,
116
- norm_kwargs=norm_params, causal=causal, pad_mode=pad_mode)
117
- ]
118
- # Downsample to raw audio scale
119
- for i, ratio in enumerate(self.ratios):
120
- block_norm = 'none' if self.disable_norm_outer_blocks >= i + 2 else norm
121
- # Add residual layers
122
- for j in range(n_residual_layers):
123
- model += [
124
- SEANetResnetBlock(mult * n_filters, kernel_sizes=[residual_kernel_size, 1],
125
- dilations=[dilation_base ** j, 1],
126
- norm=block_norm, norm_params=norm_params,
127
- activation=activation, activation_params=activation_params,
128
- causal=causal, pad_mode=pad_mode, compress=compress, true_skip=true_skip)]
129
-
130
- # Add downsampling layers
131
- model += [
132
- act(**activation_params),
133
- StreamableConv1d(mult * n_filters, mult * n_filters * 2,
134
- kernel_size=ratio * 2, stride=ratio,
135
- norm=block_norm, norm_kwargs=norm_params,
136
- causal=causal, pad_mode=pad_mode),
137
- ]
138
- mult *= 2
139
-
140
- if lstm:
141
- model += [StreamableLSTM(mult * n_filters, num_layers=lstm)]
142
-
143
- model += [
144
- act(**activation_params),
145
- StreamableConv1d(mult * n_filters, dimension, last_kernel_size,
146
- norm='none' if self.disable_norm_outer_blocks == self.n_blocks else norm,
147
- norm_kwargs=norm_params, causal=causal, pad_mode=pad_mode)
148
- ]
149
-
150
- self.model = nn.Sequential(*model)
151
-
152
- def forward(self, x):
153
- return self.model(x)
154
-
155
-
156
- class SEANetDecoder(nn.Module):
157
- """SEANet decoder.
158
-
159
- Args:
160
- channels (int): Audio channels.
161
- dimension (int): Intermediate representation dimension.
162
- n_filters (int): Base width for the model.
163
- n_residual_layers (int): nb of residual layers.
164
- ratios (Sequence[int]): kernel size and stride ratios.
165
- activation (str): Activation function.
166
- activation_params (dict): Parameters to provide to the activation function.
167
- final_activation (str): Final activation function after all convolutions.
168
- final_activation_params (dict): Parameters to provide to the activation function.
169
- norm (str): Normalization method.
170
- norm_params (dict): Parameters to provide to the underlying normalization used along with the convolution.
171
- kernel_size (int): Kernel size for the initial convolution.
172
- last_kernel_size (int): Kernel size for the initial convolution.
173
- residual_kernel_size (int): Kernel size for the residual layers.
174
- dilation_base (int): How much to increase the dilation with each layer.
175
- causal (bool): Whether to use fully causal convolution.
176
- pad_mode (str): Padding mode for the convolutions.
177
- true_skip (bool): Whether to use true skip connection or a simple.
178
- (streamable) convolution as the skip connection in the residual network blocks.
179
- compress (int): Reduced dimensionality in residual branches (from Demucs v3).
180
- lstm (int): Number of LSTM layers at the end of the encoder.
181
- disable_norm_outer_blocks (int): Number of blocks for which we don't apply norm.
182
- For the decoder, it corresponds to the N last blocks.
183
- trim_right_ratio (float): Ratio for trimming at the right of the transposed convolution under the causal setup.
184
- If equal to 1.0, it means that all the trimming is done at the right.
185
- """
186
- def __init__(self, channels: int = 1, dimension: int = 128, n_filters: int = 32, n_residual_layers: int = 3,
187
- ratios: tp.List[int] = [8, 5, 4, 2], activation: str = 'ELU', activation_params: dict = {'alpha': 1.0},
188
- final_activation: tp.Optional[str] = None, final_activation_params: tp.Optional[dict] = None,
189
- norm: str = 'none', norm_params: tp.Dict[str, tp.Any] = {}, kernel_size: int = 7,
190
- last_kernel_size: int = 7, residual_kernel_size: int = 3, dilation_base: int = 2, causal: bool = False,
191
- pad_mode: str = 'reflect', true_skip: bool = True, compress: int = 2, lstm: int = 0,
192
- disable_norm_outer_blocks: int = 0, trim_right_ratio: float = 1.0):
193
- super().__init__()
194
- self.dimension = dimension
195
- self.channels = channels
196
- self.n_filters = n_filters
197
- self.ratios = ratios
198
- del ratios
199
- self.n_residual_layers = n_residual_layers
200
- self.hop_length = np.prod(self.ratios)
201
- self.n_blocks = len(self.ratios) + 2 # first and last conv + residual blocks
202
- self.disable_norm_outer_blocks = disable_norm_outer_blocks
203
- assert self.disable_norm_outer_blocks >= 0 and self.disable_norm_outer_blocks <= self.n_blocks, \
204
- "Number of blocks for which to disable norm is invalid." \
205
- "It should be lower or equal to the actual number of blocks in the network and greater or equal to 0."
206
-
207
- act = getattr(nn, activation)
208
- mult = int(2 ** len(self.ratios))
209
- model: tp.List[nn.Module] = [
210
- StreamableConv1d(dimension, mult * n_filters, kernel_size,
211
- norm='none' if self.disable_norm_outer_blocks == self.n_blocks else norm,
212
- norm_kwargs=norm_params, causal=causal, pad_mode=pad_mode)
213
- ]
214
-
215
- if lstm:
216
- model += [StreamableLSTM(mult * n_filters, num_layers=lstm)]
217
-
218
- # Upsample to raw audio scale
219
- for i, ratio in enumerate(self.ratios):
220
- block_norm = 'none' if self.disable_norm_outer_blocks >= self.n_blocks - (i + 1) else norm
221
- # Add upsampling layers
222
- model += [
223
- act(**activation_params),
224
- StreamableConvTranspose1d(mult * n_filters, mult * n_filters // 2,
225
- kernel_size=ratio * 2, stride=ratio,
226
- norm=block_norm, norm_kwargs=norm_params,
227
- causal=causal, trim_right_ratio=trim_right_ratio),
228
- ]
229
- # Add residual layers
230
- for j in range(n_residual_layers):
231
- model += [
232
- SEANetResnetBlock(mult * n_filters // 2, kernel_sizes=[residual_kernel_size, 1],
233
- dilations=[dilation_base ** j, 1],
234
- activation=activation, activation_params=activation_params,
235
- norm=block_norm, norm_params=norm_params, causal=causal,
236
- pad_mode=pad_mode, compress=compress, true_skip=true_skip)]
237
-
238
- mult //= 2
239
-
240
- # Add final layers
241
- model += [
242
- act(**activation_params),
243
- StreamableConv1d(n_filters, channels, last_kernel_size,
244
- norm='none' if self.disable_norm_outer_blocks >= 1 else norm,
245
- norm_kwargs=norm_params, causal=causal, pad_mode=pad_mode)
246
- ]
247
- # Add optional final activation to decoder (eg. tanh)
248
- if final_activation is not None:
249
- final_act = getattr(nn, final_activation)
250
- final_activation_params = final_activation_params or {}
251
- model += [
252
- final_act(**final_activation_params)
253
- ]
254
- self.model = nn.Sequential(*model)
255
-
256
- def forward(self, z):
257
- y = self.model(z)
258
- return y
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/StyleGANEX/models/encoders/model_irse.py DELETED
@@ -1,84 +0,0 @@
1
- from torch.nn import Linear, Conv2d, BatchNorm1d, BatchNorm2d, PReLU, Dropout, Sequential, Module
2
- from models.encoders.helpers import get_blocks, Flatten, bottleneck_IR, bottleneck_IR_SE, l2_norm
3
-
4
- """
5
- Modified Backbone implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch)
6
- """
7
-
8
-
9
- class Backbone(Module):
10
- def __init__(self, input_size, num_layers, mode='ir', drop_ratio=0.4, affine=True):
11
- super(Backbone, self).__init__()
12
- assert input_size in [112, 224], "input_size should be 112 or 224"
13
- assert num_layers in [50, 100, 152], "num_layers should be 50, 100 or 152"
14
- assert mode in ['ir', 'ir_se'], "mode should be ir or ir_se"
15
- blocks = get_blocks(num_layers)
16
- if mode == 'ir':
17
- unit_module = bottleneck_IR
18
- elif mode == 'ir_se':
19
- unit_module = bottleneck_IR_SE
20
- self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),
21
- BatchNorm2d(64),
22
- PReLU(64))
23
- if input_size == 112:
24
- self.output_layer = Sequential(BatchNorm2d(512),
25
- Dropout(drop_ratio),
26
- Flatten(),
27
- Linear(512 * 7 * 7, 512),
28
- BatchNorm1d(512, affine=affine))
29
- else:
30
- self.output_layer = Sequential(BatchNorm2d(512),
31
- Dropout(drop_ratio),
32
- Flatten(),
33
- Linear(512 * 14 * 14, 512),
34
- BatchNorm1d(512, affine=affine))
35
-
36
- modules = []
37
- for block in blocks:
38
- for bottleneck in block:
39
- modules.append(unit_module(bottleneck.in_channel,
40
- bottleneck.depth,
41
- bottleneck.stride))
42
- self.body = Sequential(*modules)
43
-
44
- def forward(self, x):
45
- x = self.input_layer(x)
46
- x = self.body(x)
47
- x = self.output_layer(x)
48
- return l2_norm(x)
49
-
50
-
51
- def IR_50(input_size):
52
- """Constructs a ir-50 model."""
53
- model = Backbone(input_size, num_layers=50, mode='ir', drop_ratio=0.4, affine=False)
54
- return model
55
-
56
-
57
- def IR_101(input_size):
58
- """Constructs a ir-101 model."""
59
- model = Backbone(input_size, num_layers=100, mode='ir', drop_ratio=0.4, affine=False)
60
- return model
61
-
62
-
63
- def IR_152(input_size):
64
- """Constructs a ir-152 model."""
65
- model = Backbone(input_size, num_layers=152, mode='ir', drop_ratio=0.4, affine=False)
66
- return model
67
-
68
-
69
- def IR_SE_50(input_size):
70
- """Constructs a ir_se-50 model."""
71
- model = Backbone(input_size, num_layers=50, mode='ir_se', drop_ratio=0.4, affine=False)
72
- return model
73
-
74
-
75
- def IR_SE_101(input_size):
76
- """Constructs a ir_se-101 model."""
77
- model = Backbone(input_size, num_layers=100, mode='ir_se', drop_ratio=0.4, affine=False)
78
- return model
79
-
80
-
81
- def IR_SE_152(input_size):
82
- """Constructs a ir_se-152 model."""
83
- model = Backbone(input_size, num_layers=152, mode='ir_se', drop_ratio=0.4, affine=False)
84
- return model
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AUST001/HDTV/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: HDTV
3
- emoji: 👁
4
- colorFrom: blue
5
- colorTo: purple
6
- sdk: gradio
7
- sdk_version: 3.23.0
8
- app_file: app.py
9
- pinned: false
10
- license: cc-by-nc-nd-4.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/MagicPrompt-Stable-Diffusion/app.py DELETED
@@ -1,96 +0,0 @@
1
- from transformers import pipeline, set_seed
2
- import gradio as grad, random, re
3
- import os
4
- import sys
5
-
6
- gpt2_pipe = pipeline('text-generation', model='Gustavosta/MagicPrompt-Stable-Diffusion', tokenizer='gpt2')
7
-
8
- def generate(starting_text):
9
- with open("ideas.txt", "r") as f:
10
- line = f.readlines()
11
- seed = random.randint(100, 1000000)
12
- set_seed(seed)
13
-
14
- if starting_text == "":
15
- starting_text: str = line[random.randrange(0, len(line))].replace("\n", "").capitalize()
16
- starting_text: str = re.sub(r"\.", '', starting_text)
17
-
18
- response = gpt2_pipe(starting_text, max_length=(len(starting_text) + random.randint(60, 80)), num_return_sequences=1)
19
- response_list = []
20
- for x in response:
21
- resp = x['generated_text'].strip()
22
- if resp != starting_text and len(resp) > (len(starting_text) + 4) and resp.endswith((":", "-", "—")) is False:
23
- response_list.append(resp)
24
-
25
- response_end = "\n".join(response_list)
26
- response_end = re.sub('[^ ]+\.[^ ]+','', response_end)
27
- response_end = response_end.replace("<", "").replace(">", "")
28
-
29
- if response_end != "":
30
- return response_end
31
-
32
- with grad.Blocks(css='style.css') as demo:
33
- grad.HTML(
34
- """
35
- <div style="text-align: center; max-width: 650px; margin: 0 auto;">
36
- <div>
37
- <h1 style="font-weight: 900; font-size: 3rem; margin-bottom:20px;">
38
- The Stable Diffusion Prompt Generator - because your text needs a little more visual spice.
39
- </h1>
40
- </div>
41
- <p style="margin-bottom: 10px; font-size: 96%">
42
- Ready to see some magic happen? Simply type in your basic idea. Feeling lazy? No problem, just hit the "Magic Prompt" button and it will randomly pull from a list of thousands of ideas for you.
43
- </p>
44
- <p style="margin-bottom: 10px; font-size: 98%">
45
- ❤️ Press the Like Button if you enjoy my space! ❤️</a>
46
- </p>
47
- </div>
48
- """
49
- )
50
- with grad.Column(elem_id="col-container"):
51
- with grad.Row(variant="compact"):
52
- txt = grad.Textbox(
53
- label="Initial Text",
54
- show_label=False,
55
- max_lines=1,
56
- placeholder="Enter a basic idea",
57
- ).style(
58
- container=False,
59
- )
60
- run = grad.Button("✨ Magic Prompt ✨").style(full_width=False)
61
-
62
-
63
-
64
- with grad.Row(variant="compact"):
65
- out = grad.Textbox(
66
- label="Generated Text",
67
- show_label=False,
68
- lines=5,
69
- ).style(
70
- container=False,
71
- )
72
-
73
- run.click(generate, inputs=[txt], outputs=[out])
74
-
75
-
76
-
77
- with grad.Row():
78
- grad.HTML(
79
- """
80
- <div class="footer">
81
- <p> Powered by <a href="https://huggingface.co/Gustavosta">Gustavosta</a> Stable Diffusion model
82
- </p>
83
- </div>
84
- <div class="acknowledgments" style="font-size: 115%">
85
- <p> Transform your boring ideas into creative masterpieces with just one click! Enter a spark of inspiration and let the "Magic Prompt" button work its magic.
86
- </p>
87
- </div>
88
- """
89
- )
90
-
91
-
92
- fn=generate,
93
- run=generate,
94
- inputs=txt,
95
- outputs=out
96
- demo.launch(enable_queue=False, inline=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/bejeweled/actions/SelectChess.js DELETED
@@ -1,9 +0,0 @@
1
- /*
2
- Do nothing
3
- */
4
-
5
- var SelectChess = function (chess, board, bejeweled) {
6
- // Do nothing
7
- }
8
-
9
- export default SelectChess;
 
 
 
 
 
 
 
 
 
 
spaces/Akash473/FunkoHairBeard/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: FunkoHairBeard
3
- emoji: 🏢
4
- colorFrom: red
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 3.44.2
8
- app_file: app.py
9
- pinned: false
10
- license: openrail
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Akash473/FunkoHairBeard/app.py DELETED
@@ -1,502 +0,0 @@
1
- from io import BytesIO
2
- import base64
3
-
4
- import numpy as np
5
- import torch
6
- import torch.nn as nn
7
- import torch.optim as optim
8
- from torchvision import transforms, models
9
- from PIL import Image
10
- import gradio as gr
11
-
12
- # Combined Code for Beard and Hairstyle Detection and Styling
13
-
14
- male_background_image_paths = [
15
- "Data/AdobeColorFunko/Outfits/MenOutfits/DummyDress1.png",
16
- "Data/AdobeColorFunko/Outfits/MenOutfits/GlassesDummy.png",
17
- "Data/AdobeColorFunko/Outfits/MenOutfits/DummyDress3.png"
18
- ]
19
-
20
- female_background_image_paths = [
21
- "Data/AdobeColorFunko/Outfits/WomenOutfits/WomenOne.png",
22
- "Data/AdobeColorFunko/Outfits/WomenOutfits/WomenTwo.png",
23
- "Data/AdobeColorFunko/Outfits/WomenOutfits/WomenThree.png"
24
- ]
25
-
26
-
27
- class GenderClassifier:
28
- def __init__(self, model_path, class_names):
29
- self.model = models.resnet18(pretrained=False)
30
- num_ftrs = self.model.fc.in_features
31
- self.model.fc = nn.Linear(num_ftrs, len(class_names))
32
- self.load_model(model_path)
33
- self.model.eval()
34
- self.data_transforms = transforms.Compose([
35
- transforms.Resize((224, 224)),
36
- transforms.ToTensor(),
37
- transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
38
- ])
39
- self.class_names = class_names
40
-
41
- def preprocess_image(self, image_path):
42
- image = Image.open(image_path).convert("RGB")
43
- image = self.data_transforms(image)
44
- image = image.unsqueeze(0)
45
- return image
46
-
47
- def load_model(self, model_path):
48
- if torch.cuda.is_available():
49
- self.model.load_state_dict(torch.load(model_path))
50
- else:
51
- self.model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')))
52
-
53
- def classify_gender(self, image_path):
54
- input_image = self.preprocess_image(image_path)
55
-
56
- with torch.no_grad():
57
- predictions = self.model(input_image)
58
-
59
- probabilities = torch.nn.functional.softmax(predictions[0], dim=0)
60
- predicted_class = torch.argmax(probabilities).item()
61
- predicted_label = self.class_names[predicted_class]
62
-
63
- return predicted_label
64
-
65
- class WomenHairStyleClassifier:
66
- def __init__(self, model_path, class_names):
67
- self.model = models.resnet18(pretrained=False)
68
- num_ftrs = self.model.fc.in_features
69
- self.model.fc = nn.Linear(num_ftrs, len(class_names))
70
- self.load_model(model_path)
71
- self.model.eval()
72
- self.data_transforms = transforms.Compose([
73
- transforms.Resize((224, 224)),
74
- transforms.ToTensor(),
75
- transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
76
- ])
77
- self.class_names = class_names
78
-
79
- def preprocess_image(self, image_path):
80
- image = Image.open(image_path).convert("RGB")
81
- image = self.data_transforms(image)
82
- image = image.unsqueeze(0)
83
- return image
84
-
85
- def load_model(self, model_path):
86
- if torch.cuda.is_available():
87
- self.model.load_state_dict(torch.load(model_path))
88
- else:
89
- self.model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')))
90
-
91
- def classify_hairStyle(self, image_path):
92
- input_image = self.preprocess_image(image_path)
93
-
94
- with torch.no_grad():
95
- predictions = self.model(input_image)
96
-
97
- probabilities = torch.nn.functional.softmax(predictions[0], dim=0)
98
- predicted_class = torch.argmax(probabilities).item()
99
- predicted_label = self.class_names[predicted_class]
100
-
101
- return predicted_label
102
-
103
- class WomenHairColorClassifier:
104
- def __init__(self, model_path, class_names):
105
- self.model = models.resnet18(pretrained=False)
106
- num_ftrs = self.model.fc.in_features
107
- self.model.fc = nn.Linear(num_ftrs, len(class_names))
108
- self.load_model(model_path)
109
- self.model.eval()
110
- self.data_transforms = transforms.Compose([
111
- transforms.Resize((224, 224)),
112
- transforms.ToTensor(),
113
- transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
114
- ])
115
- self.class_names = class_names
116
-
117
- def preprocess_image(self, image_path):
118
- image = Image.open(image_path).convert("RGB")
119
- image = self.data_transforms(image)
120
- image = image.unsqueeze(0)
121
- return image
122
-
123
- def load_model(self, model_path):
124
- if torch.cuda.is_available():
125
- self.model.load_state_dict(torch.load(model_path))
126
- else:
127
- self.model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')))
128
-
129
- def classify_hairColor(self, image_path):
130
- input_image = self.preprocess_image(image_path)
131
-
132
- with torch.no_grad():
133
- predictions = self.model(input_image)
134
-
135
- probabilities = torch.nn.functional.softmax(predictions[0], dim=0)
136
- predicted_class = torch.argmax(probabilities).item()
137
- predicted_label = self.class_names[predicted_class]
138
-
139
- return predicted_label
140
- # Function to classify beard style
141
- class BeardClassifier:
142
- def __init__(self, model_path, class_names):
143
- self.model = torch.hub.load('pytorch/vision', 'resnet18', pretrained=False)
144
- num_ftrs = self.model.fc.in_features
145
- self.model.fc = torch.nn.Linear(num_ftrs, len(class_names))
146
- self.load_model(model_path)
147
- self.model.eval()
148
- self.data_transforms = transforms.Compose([
149
- transforms.Resize((224, 224)),
150
- transforms.ToTensor(),
151
- transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
152
- ])
153
- self.class_names = class_names
154
-
155
- def preprocess_image(self, image):
156
- image = Image.open(image).convert("RGB")
157
- image = self.data_transforms(image)
158
- image = image.unsqueeze(0)
159
- return image
160
-
161
- def load_model(self, model_path):
162
- if torch.cuda.is_available():
163
- self.model.load_state_dict(torch.load(model_path))
164
- else:
165
- self.model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')))
166
-
167
- def classify_beard(self, image):
168
- input_image = self.preprocess_image(image)
169
- with torch.no_grad():
170
- predictions = self.model(input_image)
171
- probabilities = torch.nn.functional.softmax(predictions[0], dim=0)
172
- predicted_class = torch.argmax(probabilities).item()
173
- predicted_label = self.class_names[predicted_class]
174
- return predicted_label
175
-
176
- # Function to classify beard color
177
- class BeardColorClassifier:
178
- def __init__(self, model_path, class_names):
179
- self.model = torch.hub.load('pytorch/vision', 'resnet18', pretrained=False)
180
- num_ftrs = self.model.fc.in_features
181
- self.model.fc = torch.nn.Linear(num_ftrs, len(class_names))
182
- self.load_model(model_path)
183
- self.model.eval()
184
- self.data_transforms = transforms.Compose([
185
- transforms.Resize((224, 224)),
186
- transforms.ToTensor(),
187
- transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
188
- ])
189
- self.class_names = class_names
190
-
191
- def preprocess_image(self, image):
192
- image = Image.open(image).convert("RGB")
193
- image = self.data_transforms(image)
194
- image = image.unsqueeze(0)
195
- return image
196
-
197
- def load_model(self, model_path):
198
- if torch.cuda.is_available():
199
- self.model.load_state_dict(torch.load(model_path))
200
- else:
201
- self.model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')))
202
-
203
- def classify_beard_color(self, image):
204
- input_image = self.preprocess_image(image)
205
- with torch.no_grad():
206
- predictions = self.model(input_image)
207
- probabilities = torch.nn.functional.softmax(predictions[0], dim=0)
208
- predicted_class = torch.argmax(probabilities).item()
209
- predicted_label = self.class_names[predicted_class]
210
- return predicted_label
211
-
212
-
213
- # Function to classify hairstyle
214
- class HairStyleClassifier:
215
- def __init__(self, model_path, class_names):
216
- self.model = torch.hub.load('pytorch/vision', 'resnet18', pretrained=False)
217
- num_ftrs = self.model.fc.in_features
218
- self.model.fc = torch.nn.Linear(num_ftrs, len(class_names))
219
- self.load_model(model_path)
220
- self.model.eval()
221
- self.data_transforms = transforms.Compose([
222
- transforms.Resize((224, 224)),
223
- transforms.ToTensor(),
224
- transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
225
- ])
226
- self.class_names = class_names
227
-
228
- def preprocess_image(self, image):
229
- image = Image.open(image).convert("RGB")
230
- image = self.data_transforms(image)
231
- image = image.unsqueeze(0)
232
- return image
233
-
234
- def load_model(self, model_path):
235
- if torch.cuda.is_available():
236
- self.model.load_state_dict(torch.load(model_path))
237
- else:
238
- self.model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')))
239
-
240
- def classify_hair(self, image):
241
- input_image = self.preprocess_image(image)
242
- with torch.no_grad():
243
- predictions = self.model(input_image)
244
- probabilities = torch.nn.functional.softmax(predictions[0], dim=0)
245
- predicted_class = torch.argmax(probabilities).item()
246
- predicted_label = self.class_names[predicted_class]
247
- return predicted_label
248
-
249
- class MenHairColorClassifier:
250
- def __init__(self, model_path, class_names):
251
- self.model = torch.hub.load('pytorch/vision', 'resnet18', pretrained=False)
252
- num_ftrs = self.model.fc.in_features
253
- self.model.fc = torch.nn.Linear(num_ftrs, len(class_names))
254
- self.load_model(model_path)
255
- self.model.eval()
256
- self.data_transforms = transforms.Compose([
257
- transforms.Resize((224, 224)),
258
- transforms.ToTensor(),
259
- transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
260
- ])
261
- self.class_names = class_names
262
-
263
- def preprocess_image(self, image):
264
- image = Image.open(image).convert("RGB")
265
- image = self.data_transforms(image)
266
- image = image.unsqueeze(0)
267
- return image
268
-
269
- def load_model(self, model_path):
270
- if torch.cuda.is_available():
271
- self.model.load_state_dict(torch.load(model_path))
272
- else:
273
- self.model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')))
274
-
275
- def classify_menHair_color(self, image):
276
- input_image = self.preprocess_image(image)
277
- with torch.no_grad():
278
- predictions = self.model(input_image)
279
- probabilities = torch.nn.functional.softmax(predictions[0], dim=0)
280
- predicted_class = torch.argmax(probabilities).item()
281
- predicted_label = self.class_names[predicted_class]
282
- return predicted_label
283
-
284
-
285
- def dummy_eye(background_image, x, y, placeholder_image_path, x_coordinate, y_coordinate):
286
- placeholder_image = Image.open(placeholder_image_path)
287
- target_size = (x, y)
288
- placeholder_image = placeholder_image.resize(target_size, Image.LANCZOS)
289
- placeholder_array = np.array(placeholder_image)
290
- placeholder_width, placeholder_height = placeholder_image.size
291
- region_box = (x_coordinate, y_coordinate, x_coordinate + placeholder_width, y_coordinate + placeholder_height)
292
- placeholder_mask = placeholder_image.split()[3] if placeholder_image.mode == 'RGBA' else None
293
- background_image.paste(placeholder_image, region_box, mask=placeholder_mask)
294
- background_array = np.array(background_image)
295
-
296
- # Function to overlay a beard on a background image
297
- def process_image_Beard(background_image, x, placeholder_image_path, x_coordinate, y_coordinate):
298
- placeholder_image = Image.open(placeholder_image_path)
299
- target_size = (x, x)
300
- placeholder_image = placeholder_image.resize(target_size, Image.LANCZOS)
301
- placeholder_array = np.array(placeholder_image)
302
- placeholder_width, placeholder_height = placeholder_image.size
303
- region_box = (x_coordinate, y_coordinate, x_coordinate + placeholder_width, y_coordinate + placeholder_height)
304
- placeholder_mask = placeholder_image.split()[3] if placeholder_image.mode == 'RGBA' else None
305
- background_image.paste(placeholder_image, region_box, mask=placeholder_mask)
306
- background_array = np.array(background_image)
307
- placeholder_alpha = placeholder_image.split()[3] if placeholder_image.mode == 'RGBA' else None
308
-
309
- def process_image_WomanHair(background_image, x, y, placeholder_image_path, x_coordinate, y_coordinate):
310
- placeholder_image = Image.open(placeholder_image_path)
311
- target_size = (x, y)
312
- placeholder_image = placeholder_image.resize(target_size, Image.LANCZOS)
313
- placeholder_array = np.array(placeholder_image)
314
- placeholder_width, placeholder_height = placeholder_image.size
315
- region_box = (x_coordinate, y_coordinate, x_coordinate + placeholder_width, y_coordinate + placeholder_height)
316
- placeholder_mask = placeholder_image.split()[3] if placeholder_image.mode == 'RGBA' else None
317
- background_image.paste(placeholder_image, region_box, mask=placeholder_mask)
318
- background_array = np.array(background_image)
319
- placeholder_alpha = placeholder_image.split()[3] if placeholder_image.mode == 'RGBA' else None
320
-
321
-
322
- def add_eyebrow(background_image, x_coordinate, y_coordinate, eyebrow_image_path):
323
- eyebrow_image = Image.open(eyebrow_image_path)
324
- target_size = (200, 200) # Adjust the size as needed
325
- eyebrow_image = eyebrow_image.resize(target_size, Image.LANCZOS)
326
- region_box = (x_coordinate, y_coordinate, x_coordinate + eyebrow_image.width, y_coordinate + eyebrow_image.height)
327
- eyebrow_mask = eyebrow_image.split()[3] if eyebrow_image.mode == 'RGBA' else None
328
- background_image.paste(eyebrow_image, region_box, mask=eyebrow_mask)
329
- background_array = np.array(background_image)
330
-
331
-
332
-
333
-
334
- # Function to overlay a hairstyle on a background image
335
- def process_image_menHair(background_image, x, y, placeholder_image_path, x_coordinate, y_coordinate):
336
- placeholder_image = Image.open(placeholder_image_path)
337
- target_size = (x, y)
338
- placeholder_image = placeholder_image.resize(target_size, Image.LANCZOS)
339
- placeholder_array = np.array(placeholder_image)
340
- placeholder_width, placeholder_height = placeholder_image.size
341
- region_box = (x_coordinate, y_coordinate, x_coordinate + placeholder_width, y_coordinate + placeholder_height)
342
- placeholder_mask = placeholder_image.split()[3] if placeholder_image.mode == 'RGBA' else None
343
- background_image.paste(placeholder_image, region_box, mask=placeholder_mask)
344
- background_array = np.array(background_image)
345
- placeholder_alpha = placeholder_image.split()[3] if placeholder_image.mode == 'RGBA' else None
346
-
347
- # Function to generate Funko figurines
348
- def Igenerate_funko_figurines(input_image):
349
-
350
- WomenHairStyle_classifier = WomenHairStyleClassifier('Data/FunkoSavedModels/WomenHairStyle.pt', ['MediumLength', 'ShortHair', 'SidePlait'])
351
- predicted_WomenHairStyle = WomenHairStyle_classifier.classify_hairStyle(input_image)
352
-
353
- WomenHairColor_classifier = WomenHairColorClassifier('Data/FunkoSavedModels/WomenHairColor.pt', ['Black', 'Brown', 'Ginger', 'White'])
354
- predicted_WomenHairColor = WomenHairColor_classifier.classify_hairColor(input_image)
355
- # Detect and classify gender
356
- gender_classifier = GenderClassifier('Data/FunkoSavedModels/Gender.pt', ['Female', 'Male'])
357
- predicted_gender = gender_classifier.classify_gender(input_image)
358
-
359
- # Detect and classify beard style
360
- beard_classifier = BeardClassifier('Data/FunkoSavedModels/FunkoResnet18BeardStyle.pt', ['Bandholz', 'CleanShave', 'FullGoatee', 'Moustache', 'RapIndustryStandards', 'ShortBeard'])
361
- predicted_style_label = beard_classifier.classify_beard(input_image)
362
-
363
- # Detect and classify beard color
364
- beard_color_classifier = BeardColorClassifier('Data/FunkoSavedModels/FunkoResnet18BeardColor.pt', ['Black', 'DarkBrown', 'Ginger', 'LightBrown', 'SaltAndPepper', 'White'])
365
- predicted_color_label = beard_color_classifier.classify_beard_color(input_image)
366
-
367
- # Classify hairstyle
368
- hair_style_classifier = HairStyleClassifier('Data/FunkoSavedModels/FunkoResnet18HairStyle.pt', ['Afro', 'Bald', 'Puff', 'Spike'])
369
- predicted_hairStyle_label = hair_style_classifier.classify_hair(input_image)
370
-
371
- #classify menHairColor
372
- menhair_color_classifier = MenHairColorClassifier('Data/FunkoSavedModels/FunkoResnet18MenHairColor.pt', ['Black', 'DarkBrown', 'Ginger', 'LightBrown', 'SaltAndPepper', 'White'])
373
- predicted_menhairColor_label = menhair_color_classifier.classify_menHair_color(input_image)
374
- # Process background images and apply beard style and color along with hair style and color
375
- final_images = []
376
-
377
- if predicted_gender == 'Male':
378
- background_image_paths = male_background_image_paths
379
- if predicted_gender == 'Female':
380
- background_image_paths = female_background_image_paths
381
-
382
- for background_image_paths in background_image_paths:
383
- background_image = Image.open(background_image_paths)
384
- x_coordinate = 90
385
- y_coordinate = 50
386
- add_eyebrow(background_image, 115, 80, "Data/AdobeColorFunko/EyezBrowz/Eyebrow.png")
387
- #dummy_eye(background_image, 245, 345, 'Data/AdobeColorFunko/EyezBrowz/MaleEye.png', x_coordinate, y_coordinate)
388
- if predicted_gender == 'Male':
389
- x = 245
390
- y = 345
391
- placeholder_image_path = f"Data/AdobeColorFunko/EyezBrowz/{predicted_gender}Eye.png"
392
- x_coordinate = 90
393
- y_coordinate = 50
394
- dummy_eye(background_image, x, y, placeholder_image_path, x_coordinate, y_coordinate)
395
-
396
- if predicted_style_label == 'Bandholz':
397
- process_image_Beard(background_image, 320,
398
- f"Data/AdobeColorFunko/Beard/Bandholz/{predicted_color_label}.png",
399
- 50, 142)
400
-
401
- if predicted_style_label == 'ShortBeard':
402
- process_image_Beard(background_image, 300,
403
- f"Data/AdobeColorFunko/Beard/ShortBeard/{predicted_color_label}.png",
404
- 62, 118)
405
-
406
- if predicted_style_label == 'FullGoatee':
407
- process_image_Beard(background_image, 230,
408
- f"Data/AdobeColorFunko/Beard/Goatee/{predicted_color_label}.png",
409
- 96, 168)
410
-
411
- if predicted_style_label == 'RapIndustryStandards':
412
- process_image_Beard(background_image, 290,
413
- f"Data/AdobeColorFunko/Beard/RapIndustry/{predicted_color_label}.png",
414
- 67, 120)
415
-
416
- if predicted_style_label == 'Moustache':
417
- process_image_Beard(background_image, 220,
418
- f"Data/AdobeColorFunko/Beard/Moustache/{predicted_color_label}.png",
419
- 100, 160)
420
-
421
- if predicted_style_label == 'CleanShave':
422
- process_image_Beard(background_image, 220,
423
- f"Data/AdobeColorFunko/Beard/CleanShave/{predicted_color_label}.png",
424
- 100, 160)
425
-
426
- # Add other conditions for different beard styles
427
-
428
- # Overlay hairstyle
429
- if predicted_hairStyle_label == 'Afro':
430
- process_image_menHair(background_image, 336, 420,
431
- f"Data/AdobeColorFunko/MenHairstyle/Afro/{predicted_menhairColor_label}.png",
432
- 41, 76)
433
-
434
- if predicted_hairStyle_label == 'Puff':
435
- process_image_menHair(background_image, 305, 420,
436
- f"Data/AdobeColorFunko/MenHairstyle/Puff/{predicted_menhairColor_label}.png",
437
- 56, 68)
438
-
439
- if predicted_hairStyle_label == 'Spike':
440
- process_image_menHair(background_image, 310, 420,
441
- f"Data/AdobeColorFunko/MenHairstyle/Spike/{predicted_menhairColor_label}.png",
442
- 52, 70)
443
-
444
- if predicted_hairStyle_label == 'Bald':
445
- process_image_menHair(background_image, 310, 420,
446
- f"Data/AdobeColorFunko/MenHairstyle/Bald/{predicted_menhairColor_label}.png",
447
- 67, 120)
448
-
449
-
450
- if predicted_gender == 'Female':
451
- x = 245
452
- y = 345
453
- placeholder_image_path = f"Data/AdobeColorFunko/EyezBrowz/{predicted_gender}Eye.png"
454
- x_coordinate = 90
455
- y_coordinate = 50
456
- dummy_eye(background_image, x, y, placeholder_image_path, x_coordinate, y_coordinate)
457
- if predicted_WomenHairStyle == 'MediumLength':
458
- process_image_WomanHair(background_image, 300,460,
459
- f"Data/AdobeColorFunko/WomenHairstyle/MediumLength/{predicted_WomenHairColor}.png",
460
- 56, 50)
461
-
462
- if predicted_WomenHairStyle == 'ShortHair':
463
- process_image_WomanHair(background_image, 270,460,
464
- f"Data/AdobeColorFunko/WomenHairstyle/ShortHair/{predicted_WomenHairColor}.png",
465
- 61, 49)
466
-
467
- if predicted_WomenHairStyle == 'SidePlait':
468
- process_image_WomanHair(background_image, 300,450,
469
- f"Data/AdobeColorFunko/WomenHairstyle/SidePlait/{predicted_WomenHairColor}.png",
470
- 54, 56)
471
-
472
-
473
- # Convert the resulting image to base64
474
- buffered = BytesIO()
475
- background_image.save(buffered, format="PNG")
476
- #base64_image = base64.b64encode(buffered.getvalue()).decode("utf-8")
477
- final_images.append(background_image)
478
-
479
- return final_images
480
- imageComponent = gr.Image(type="filepath")
481
-
482
- # Define Gradio input components
483
- input_image = gr.inputs.Image(type="pil", label="Upload your image")
484
-
485
-
486
- with gr.Blocks() as demo:
487
- gr.Markdown(
488
- """
489
- # Funko POP! Figurine Creation
490
- Enabling Streamlined Automation with Generative Artificial Intelligence
491
- """)
492
- imageComponent = gr.Image(type="filepath").style(height=300, width=300)
493
- #MyOutputs=[gr.Image(type="pil", label="Generated Image " + str(i + 1)) for i in range(3)]
494
- with gr.Row():
495
- MyOutputs = [gr.Image(type="pil", label="Generated Image " + str(i + 1)).style(height=300, width=300) for i in range(3)]
496
- submitButton = gr.Button(value="Submit")
497
- submitButton.click(Igenerate_funko_figurines, inputs=imageComponent, outputs=MyOutputs)
498
-
499
-
500
- if __name__ == "__main__":
501
- demo.launch()
502
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexKoff88/stable_diffusion/app.py DELETED
@@ -1,73 +0,0 @@
1
- import gradio as gr
2
- from optimum.intel.openvino import OVStableDiffusionPipeline
3
- from diffusers.training_utils import set_seed
4
- from diffusers import DDPMScheduler, StableDiffusionPipeline
5
- import gc
6
-
7
- import subprocess
8
-
9
- import time
10
-
11
-
12
- def create_pipeline(name):
13
- if name == "svjack/Stable-Diffusion-Pokemon-en": #"valhalla/sd-pokemon-model":
14
- scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012,
15
- beta_schedule="scaled_linear", num_train_timesteps=1000)
16
- pipe = StableDiffusionPipeline.from_pretrained(name, scheduler=scheduler)
17
- pipe.safety_checker = lambda images, clip_input: (images, False)
18
- elif name == "OpenVINO/stable-diffusion-pokemons-fp32": #"stable-diffusion-pokemons-valhalla-fp32":
19
- scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012,
20
- beta_schedule="scaled_linear", num_train_timesteps=1000)
21
- pipe = OVStableDiffusionPipeline.from_pretrained(name, compile=False, scheduler=scheduler)
22
- pipe.reshape(batch_size=1, height=512, width=512, num_images_per_prompt=1)
23
- pipe.compile()
24
- else:
25
- pipe = OVStableDiffusionPipeline.from_pretrained(name, compile=False)
26
- pipe.reshape(batch_size=1, height=512, width=512, num_images_per_prompt=1)
27
- pipe.compile()
28
- return pipe
29
-
30
- pipes = {
31
- "Torch fp32": "svjack/Stable-Diffusion-Pokemon-en", #"valhalla/sd-pokemon-model"
32
- "OpenVINO fp32": "OpenVINO/stable-diffusion-pokemons-fp32", #"OpenVINO/stable-diffusion-pokemons-valhalla-fp32"
33
- "OpenVINO 8-bit quantized": "OpenVINO/stable-diffusion-pokemons-quantized-aggressive", #"OpenVINO/stable-diffusion-pokemons-valhalla-quantized-agressive"
34
- "OpenVINO merged and quantized": "OpenVINO/stable-diffusion-pokemons-tome-quantized-aggressive" #"OpenVINO/stable-diffusion-pokemons-valhalla-tome-quantized-agressive"
35
- }
36
-
37
- # prefetch pipelines on start
38
- for v in pipes.values():
39
- pipe = create_pipeline(v)
40
- del pipe
41
- gc.collect()
42
-
43
- print((subprocess.check_output("lscpu", shell=True).strip()).decode())
44
-
45
- def generate(prompt, option, seed):
46
- pipe = create_pipeline(pipes[option])
47
- set_seed(int(seed))
48
- start_time = time.time()
49
- if "Torch" in option:
50
- output = pipe(prompt, num_inference_steps=50, output_type="pil", height=512, width=512)
51
- else:
52
- output = pipe(prompt, num_inference_steps=50, output_type="pil")
53
- elapsed_time = time.time() - start_time
54
- return (output.images[0], "{:10.4f}".format(elapsed_time))
55
-
56
- examples = ["cartoon bird",
57
- "a drawing of a green pokemon with red eyes",
58
- "plant pokemon in jungle"]
59
-
60
- model_options = [option for option in pipes.keys()]
61
-
62
- gr.Interface(
63
- fn=generate,
64
- inputs=[gr.inputs.Textbox(default="cartoon bird", label="Prompt", lines=1),
65
- gr.inputs.Dropdown(choices=model_options, default=model_options[-1], label="Model version"),
66
- gr.inputs.Textbox(default="42", label="Seed", lines=1)
67
- ],
68
- outputs=[gr.outputs.Image(type="pil", label="Generated Image"), gr.outputs.Textbox(label="Inference time")],
69
- title="OpenVINO-optimized Stable Diffusion",
70
- description="This is the Optimum-based demo for NNCF-optimized Stable Diffusion pipeline trained on 'lambdalabs/pokemon-blip-captions' dataset and running with OpenVINO.\n"
71
- "The pipeline is run using 8 vCPUs (4 cores) only.",
72
- theme="huggingface",
73
- ).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amon1/ChatGPTForAcadamic/show_math.py DELETED
@@ -1,80 +0,0 @@
1
- # This program is written by: https://github.com/polarwinkel/mdtex2html
2
-
3
- from latex2mathml.converter import convert as tex2mathml
4
- import re
5
-
6
- incomplete = '<font style="color:orange;" class="tooltip">&#9888;<span class="tooltiptext">formula incomplete</span></font>'
7
- convError = '<font style="color:red" class="tooltip">&#9888;<span class="tooltiptext">LaTeX-convert-error</span></font>'
8
-
9
- def convert(mdtex, extensions=[], splitParagraphs=True):
10
- ''' converts recursively the Markdown-LaTeX-mixture to HTML with MathML '''
11
- found = False
12
- # handle all paragraphs separately (prevents aftereffects)
13
- if splitParagraphs:
14
- parts = re.split("\n\n", mdtex)
15
- result = ''
16
- for part in parts:
17
- result += convert(part, extensions, splitParagraphs=False)
18
- return result
19
- # find first $$-formula:
20
- parts = re.split('\${2}', mdtex, 2)
21
- if len(parts)>1:
22
- found = True
23
- result = convert(parts[0], extensions, splitParagraphs=False)+'\n'
24
- try:
25
- result += '<div class="blockformula">'+tex2mathml(parts[1])+'</div>\n'
26
- except:
27
- result += '<div class="blockformula">'+convError+'</div>'
28
- if len(parts)==3:
29
- result += convert(parts[2], extensions, splitParagraphs=False)
30
- else:
31
- result += '<div class="blockformula">'+incomplete+'</div>'
32
- # else find first $-formulas:
33
- else:
34
- parts = re.split('\${1}', mdtex, 2)
35
- if len(parts)>1 and not found:
36
- found = True
37
- try:
38
- mathml = tex2mathml(parts[1])
39
- except:
40
- mathml = convError
41
- if parts[0].endswith('\n\n') or parts[0]=='': # make sure textblock starts before formula!
42
- parts[0]=parts[0]+'&#x200b;'
43
- if len(parts)==3:
44
- result = convert(parts[0]+mathml+parts[2], extensions, splitParagraphs=False)
45
- else:
46
- result = convert(parts[0]+mathml+incomplete, extensions, splitParagraphs=False)
47
- # else find first \[..\]-equation:
48
- else:
49
- parts = re.split(r'\\\[', mdtex, 1)
50
- if len(parts)>1 and not found:
51
- found = True
52
- result = convert(parts[0], extensions, splitParagraphs=False)+'\n'
53
- parts = re.split(r'\\\]', parts[1], 1)
54
- try:
55
- result += '<div class="blockformula">'+tex2mathml(parts[0])+'</div>\n'
56
- except:
57
- result += '<div class="blockformula">'+convError+'</div>'
58
- if len(parts)==2:
59
- result += convert(parts[1], extensions, splitParagraphs=False)
60
- else:
61
- result += '<div class="blockformula">'+incomplete+'</div>'
62
- # else find first \(..\)-equation:
63
- else:
64
- parts = re.split(r'\\\(', mdtex, 1)
65
- if len(parts)>1 and not found:
66
- found = True
67
- subp = re.split(r'\\\)', parts[1], 1)
68
- try:
69
- mathml = tex2mathml(subp[0])
70
- except:
71
- mathml = convError
72
- if parts[0].endswith('\n\n') or parts[0]=='': # make sure textblock starts before formula!
73
- parts[0]=parts[0]+'&#x200b;'
74
- if len(subp)==2:
75
- result = convert(parts[0]+mathml+subp[1], extensions, splitParagraphs=False)
76
- else:
77
- result = convert(parts[0]+mathml+incomplete, extensions, splitParagraphs=False)
78
- if not found:
79
- result = mdtex
80
- return result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/PTI/models/e4e/stylegan2/op/fused_bias_act.cpp DELETED
@@ -1,21 +0,0 @@
1
- #include <torch/extension.h>
2
-
3
-
4
- torch::Tensor fused_bias_act_op(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer,
5
- int act, int grad, float alpha, float scale);
6
-
7
- #define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor")
8
- #define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
9
- #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
10
-
11
- torch::Tensor fused_bias_act(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer,
12
- int act, int grad, float alpha, float scale) {
13
- CHECK_CUDA(input);
14
- CHECK_CUDA(bias);
15
-
16
- return fused_bias_act_op(input, bias, refer, act, grad, alpha, scale);
17
- }
18
-
19
- PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
20
- m.def("fused_bias_act", &fused_bias_act, "fused bias act (CUDA)");
21
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/PTI/utils/alignment.py DELETED
@@ -1,113 +0,0 @@
1
- import numpy as np
2
- import PIL
3
- import PIL.Image
4
- import scipy
5
- import scipy.ndimage
6
- import dlib
7
-
8
- def get_landmark(img, predictor):
9
- """get landmark with dlib
10
- :return: np.array shape=(68, 2)
11
- """
12
- detector = dlib.get_frontal_face_detector()
13
-
14
- img = np.array(img)
15
- dets = detector(img, 1)
16
-
17
- for k, d in enumerate(dets):
18
- shape = predictor(img, d)
19
-
20
- t = list(shape.parts())
21
- a = []
22
- for tt in t:
23
- a.append([tt.x, tt.y])
24
- lm = np.array(a)
25
- return lm
26
-
27
-
28
- def align_face(img, predictor, output_size):
29
- """
30
- :param img: PIL Image
31
- :return: PIL Image
32
- """
33
-
34
- lm = get_landmark(img, predictor)
35
-
36
- lm_chin = lm[0: 17] # left-right
37
- lm_eyebrow_left = lm[17: 22] # left-right
38
- lm_eyebrow_right = lm[22: 27] # left-right
39
- lm_nose = lm[27: 31] # top-down
40
- lm_nostrils = lm[31: 36] # top-down
41
- lm_eye_left = lm[36: 42] # left-clockwise
42
- lm_eye_right = lm[42: 48] # left-clockwise
43
- lm_mouth_outer = lm[48: 60] # left-clockwise
44
- lm_mouth_inner = lm[60: 68] # left-clockwise
45
-
46
- # Calculate auxiliary vectors.
47
- eye_left = np.mean(lm_eye_left, axis=0)
48
- eye_right = np.mean(lm_eye_right, axis=0)
49
- eye_avg = (eye_left + eye_right) * 0.5
50
- eye_to_eye = eye_right - eye_left
51
- mouth_left = lm_mouth_outer[0]
52
- mouth_right = lm_mouth_outer[6]
53
- mouth_avg = (mouth_left + mouth_right) * 0.5
54
- eye_to_mouth = mouth_avg - eye_avg
55
-
56
- # Choose oriented crop rectangle.
57
- x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1]
58
- x /= np.hypot(*x)
59
- x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8)
60
- y = np.flipud(x) * [-1, 1]
61
- c = eye_avg + eye_to_mouth * 0.1
62
- quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])
63
- qsize = np.hypot(*x) * 2
64
-
65
- # read image
66
- # img = img
67
-
68
- transform_size = output_size
69
- enable_padding = True
70
-
71
- # Shrink.
72
- shrink = int(np.floor(qsize / output_size * 0.5))
73
- if shrink > 1:
74
- rsize = (int(np.rint(float(img.size[0]) / shrink)), int(np.rint(float(img.size[1]) / shrink)))
75
- img = img.resize(rsize, PIL.Image.ANTIALIAS)
76
- quad /= shrink
77
- qsize /= shrink
78
-
79
- # Crop.
80
- border = max(int(np.rint(qsize * 0.1)), 3)
81
- crop = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))),
82
- int(np.ceil(max(quad[:, 1]))))
83
- crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]),
84
- min(crop[3] + border, img.size[1]))
85
- if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]:
86
- img = img.crop(crop)
87
- quad -= crop[0:2]
88
-
89
- # Pad.
90
- pad = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))),
91
- int(np.ceil(max(quad[:, 1]))))
92
- pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0),
93
- max(pad[3] - img.size[1] + border, 0))
94
- if enable_padding and max(pad) > border - 4:
95
- pad = np.maximum(pad, int(np.rint(qsize * 0.3)))
96
- img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')
97
- h, w, _ = img.shape
98
- y, x, _ = np.ogrid[:h, :w, :1]
99
- mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0], np.float32(w - 1 - x) / pad[2]),
100
- 1.0 - np.minimum(np.float32(y) / pad[1], np.float32(h - 1 - y) / pad[3]))
101
- blur = qsize * 0.02
102
- img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
103
- img += (np.median(img, axis=(0, 1)) - img) * np.clip(mask, 0.0, 1.0)
104
- img = PIL.Image.fromarray(np.uint8(np.clip(np.rint(img), 0, 255)), 'RGB')
105
- quad += pad[:2]
106
-
107
- # Transform.
108
- img = img.transform((transform_size, transform_size), PIL.Image.QUAD, (quad + 0.5).flatten(), PIL.Image.BILINEAR)
109
- if output_size < transform_size:
110
- img = img.resize((output_size, output_size), PIL.Image.ANTIALIAS)
111
-
112
- # Return aligned image.
113
- return img
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/gui_utils/__init__.py DELETED
@@ -1,9 +0,0 @@
1
- # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
- #
3
- # NVIDIA CORPORATION and its licensors retain all intellectual property
4
- # and proprietary rights in and to this software, related documentation
5
- # and any modifications thereto. Any use, reproduction, disclosure or
6
- # distribution of this software and related documentation without an express
7
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
-
9
- # empty
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/clip_guided_images_mixing_stable_diffusion.py DELETED
@@ -1,456 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- import inspect
3
- from typing import Optional, Union
4
-
5
- import numpy as np
6
- import PIL
7
- import torch
8
- from torch.nn import functional as F
9
- from torchvision import transforms
10
- from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
11
-
12
- from diffusers import (
13
- AutoencoderKL,
14
- DDIMScheduler,
15
- DiffusionPipeline,
16
- DPMSolverMultistepScheduler,
17
- LMSDiscreteScheduler,
18
- PNDMScheduler,
19
- UNet2DConditionModel,
20
- )
21
- from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
22
- from diffusers.utils import (
23
- PIL_INTERPOLATION,
24
- randn_tensor,
25
- )
26
-
27
-
28
- def preprocess(image, w, h):
29
- if isinstance(image, torch.Tensor):
30
- return image
31
- elif isinstance(image, PIL.Image.Image):
32
- image = [image]
33
-
34
- if isinstance(image[0], PIL.Image.Image):
35
- image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image]
36
- image = np.concatenate(image, axis=0)
37
- image = np.array(image).astype(np.float32) / 255.0
38
- image = image.transpose(0, 3, 1, 2)
39
- image = 2.0 * image - 1.0
40
- image = torch.from_numpy(image)
41
- elif isinstance(image[0], torch.Tensor):
42
- image = torch.cat(image, dim=0)
43
- return image
44
-
45
-
46
- def slerp(t, v0, v1, DOT_THRESHOLD=0.9995):
47
- if not isinstance(v0, np.ndarray):
48
- inputs_are_torch = True
49
- input_device = v0.device
50
- v0 = v0.cpu().numpy()
51
- v1 = v1.cpu().numpy()
52
-
53
- dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1)))
54
- if np.abs(dot) > DOT_THRESHOLD:
55
- v2 = (1 - t) * v0 + t * v1
56
- else:
57
- theta_0 = np.arccos(dot)
58
- sin_theta_0 = np.sin(theta_0)
59
- theta_t = theta_0 * t
60
- sin_theta_t = np.sin(theta_t)
61
- s0 = np.sin(theta_0 - theta_t) / sin_theta_0
62
- s1 = sin_theta_t / sin_theta_0
63
- v2 = s0 * v0 + s1 * v1
64
-
65
- if inputs_are_torch:
66
- v2 = torch.from_numpy(v2).to(input_device)
67
-
68
- return v2
69
-
70
-
71
- def spherical_dist_loss(x, y):
72
- x = F.normalize(x, dim=-1)
73
- y = F.normalize(y, dim=-1)
74
- return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
75
-
76
-
77
- def set_requires_grad(model, value):
78
- for param in model.parameters():
79
- param.requires_grad = value
80
-
81
-
82
- class CLIPGuidedImagesMixingStableDiffusion(DiffusionPipeline):
83
- def __init__(
84
- self,
85
- vae: AutoencoderKL,
86
- text_encoder: CLIPTextModel,
87
- clip_model: CLIPModel,
88
- tokenizer: CLIPTokenizer,
89
- unet: UNet2DConditionModel,
90
- scheduler: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler],
91
- feature_extractor: CLIPFeatureExtractor,
92
- coca_model=None,
93
- coca_tokenizer=None,
94
- coca_transform=None,
95
- ):
96
- super().__init__()
97
- self.register_modules(
98
- vae=vae,
99
- text_encoder=text_encoder,
100
- clip_model=clip_model,
101
- tokenizer=tokenizer,
102
- unet=unet,
103
- scheduler=scheduler,
104
- feature_extractor=feature_extractor,
105
- coca_model=coca_model,
106
- coca_tokenizer=coca_tokenizer,
107
- coca_transform=coca_transform,
108
- )
109
- self.feature_extractor_size = (
110
- feature_extractor.size
111
- if isinstance(feature_extractor.size, int)
112
- else feature_extractor.size["shortest_edge"]
113
- )
114
- self.normalize = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std)
115
- set_requires_grad(self.text_encoder, False)
116
- set_requires_grad(self.clip_model, False)
117
-
118
- def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
119
- if slice_size == "auto":
120
- # half the attention head size is usually a good trade-off between
121
- # speed and memory
122
- slice_size = self.unet.config.attention_head_dim // 2
123
- self.unet.set_attention_slice(slice_size)
124
-
125
- def disable_attention_slicing(self):
126
- self.enable_attention_slicing(None)
127
-
128
- def freeze_vae(self):
129
- set_requires_grad(self.vae, False)
130
-
131
- def unfreeze_vae(self):
132
- set_requires_grad(self.vae, True)
133
-
134
- def freeze_unet(self):
135
- set_requires_grad(self.unet, False)
136
-
137
- def unfreeze_unet(self):
138
- set_requires_grad(self.unet, True)
139
-
140
- def get_timesteps(self, num_inference_steps, strength, device):
141
- # get the original timestep using init_timestep
142
- init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
143
-
144
- t_start = max(num_inference_steps - init_timestep, 0)
145
- timesteps = self.scheduler.timesteps[t_start:]
146
-
147
- return timesteps, num_inference_steps - t_start
148
-
149
- def prepare_latents(self, image, timestep, batch_size, dtype, device, generator=None):
150
- if not isinstance(image, torch.Tensor):
151
- raise ValueError(f"`image` has to be of type `torch.Tensor` but is {type(image)}")
152
-
153
- image = image.to(device=device, dtype=dtype)
154
-
155
- if isinstance(generator, list):
156
- init_latents = [
157
- self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)
158
- ]
159
- init_latents = torch.cat(init_latents, dim=0)
160
- else:
161
- init_latents = self.vae.encode(image).latent_dist.sample(generator)
162
-
163
- # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
164
- init_latents = 0.18215 * init_latents
165
- init_latents = init_latents.repeat_interleave(batch_size, dim=0)
166
-
167
- noise = randn_tensor(init_latents.shape, generator=generator, device=device, dtype=dtype)
168
-
169
- # get latents
170
- init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
171
- latents = init_latents
172
-
173
- return latents
174
-
175
- def get_image_description(self, image):
176
- transformed_image = self.coca_transform(image).unsqueeze(0)
177
- with torch.no_grad(), torch.cuda.amp.autocast():
178
- generated = self.coca_model.generate(transformed_image.to(device=self.device, dtype=self.coca_model.dtype))
179
- generated = self.coca_tokenizer.decode(generated[0].cpu().numpy())
180
- return generated.split("<end_of_text>")[0].replace("<start_of_text>", "").rstrip(" .,")
181
-
182
- def get_clip_image_embeddings(self, image, batch_size):
183
- clip_image_input = self.feature_extractor.preprocess(image)
184
- clip_image_features = torch.from_numpy(clip_image_input["pixel_values"][0]).unsqueeze(0).to(self.device).half()
185
- image_embeddings_clip = self.clip_model.get_image_features(clip_image_features)
186
- image_embeddings_clip = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=True)
187
- image_embeddings_clip = image_embeddings_clip.repeat_interleave(batch_size, dim=0)
188
- return image_embeddings_clip
189
-
190
- @torch.enable_grad()
191
- def cond_fn(
192
- self,
193
- latents,
194
- timestep,
195
- index,
196
- text_embeddings,
197
- noise_pred_original,
198
- original_image_embeddings_clip,
199
- clip_guidance_scale,
200
- ):
201
- latents = latents.detach().requires_grad_()
202
-
203
- latent_model_input = self.scheduler.scale_model_input(latents, timestep)
204
-
205
- # predict the noise residual
206
- noise_pred = self.unet(latent_model_input, timestep, encoder_hidden_states=text_embeddings).sample
207
-
208
- if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)):
209
- alpha_prod_t = self.scheduler.alphas_cumprod[timestep]
210
- beta_prod_t = 1 - alpha_prod_t
211
- # compute predicted original sample from predicted noise also called
212
- # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
213
- pred_original_sample = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5)
214
-
215
- fac = torch.sqrt(beta_prod_t)
216
- sample = pred_original_sample * (fac) + latents * (1 - fac)
217
- elif isinstance(self.scheduler, LMSDiscreteScheduler):
218
- sigma = self.scheduler.sigmas[index]
219
- sample = latents - sigma * noise_pred
220
- else:
221
- raise ValueError(f"scheduler type {type(self.scheduler)} not supported")
222
-
223
- # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
224
- sample = 1 / 0.18215 * sample
225
- image = self.vae.decode(sample).sample
226
- image = (image / 2 + 0.5).clamp(0, 1)
227
-
228
- image = transforms.Resize(self.feature_extractor_size)(image)
229
- image = self.normalize(image).to(latents.dtype)
230
-
231
- image_embeddings_clip = self.clip_model.get_image_features(image)
232
- image_embeddings_clip = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=True)
233
-
234
- loss = spherical_dist_loss(image_embeddings_clip, original_image_embeddings_clip).mean() * clip_guidance_scale
235
-
236
- grads = -torch.autograd.grad(loss, latents)[0]
237
-
238
- if isinstance(self.scheduler, LMSDiscreteScheduler):
239
- latents = latents.detach() + grads * (sigma**2)
240
- noise_pred = noise_pred_original
241
- else:
242
- noise_pred = noise_pred_original - torch.sqrt(beta_prod_t) * grads
243
- return noise_pred, latents
244
-
245
- @torch.no_grad()
246
- def __call__(
247
- self,
248
- style_image: Union[torch.FloatTensor, PIL.Image.Image],
249
- content_image: Union[torch.FloatTensor, PIL.Image.Image],
250
- style_prompt: Optional[str] = None,
251
- content_prompt: Optional[str] = None,
252
- height: Optional[int] = 512,
253
- width: Optional[int] = 512,
254
- noise_strength: float = 0.6,
255
- num_inference_steps: Optional[int] = 50,
256
- guidance_scale: Optional[float] = 7.5,
257
- batch_size: Optional[int] = 1,
258
- eta: float = 0.0,
259
- clip_guidance_scale: Optional[float] = 100,
260
- generator: Optional[torch.Generator] = None,
261
- output_type: Optional[str] = "pil",
262
- return_dict: bool = True,
263
- slerp_latent_style_strength: float = 0.8,
264
- slerp_prompt_style_strength: float = 0.1,
265
- slerp_clip_image_style_strength: float = 0.1,
266
- ):
267
- if isinstance(generator, list) and len(generator) != batch_size:
268
- raise ValueError(f"You have passed {batch_size} batch_size, but only {len(generator)} generators.")
269
-
270
- if height % 8 != 0 or width % 8 != 0:
271
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
272
-
273
- if isinstance(generator, torch.Generator) and batch_size > 1:
274
- generator = [generator] + [None] * (batch_size - 1)
275
-
276
- coca_is_none = [
277
- ("model", self.coca_model is None),
278
- ("tokenizer", self.coca_tokenizer is None),
279
- ("transform", self.coca_transform is None),
280
- ]
281
- coca_is_none = [x[0] for x in coca_is_none if x[1]]
282
- coca_is_none_str = ", ".join(coca_is_none)
283
- # generate prompts with coca model if prompt is None
284
- if content_prompt is None:
285
- if len(coca_is_none):
286
- raise ValueError(
287
- f"Content prompt is None and CoCa [{coca_is_none_str}] is None."
288
- f"Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline."
289
- )
290
- content_prompt = self.get_image_description(content_image)
291
- if style_prompt is None:
292
- if len(coca_is_none):
293
- raise ValueError(
294
- f"Style prompt is None and CoCa [{coca_is_none_str}] is None."
295
- f" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline."
296
- )
297
- style_prompt = self.get_image_description(style_image)
298
-
299
- # get prompt text embeddings for content and style
300
- content_text_input = self.tokenizer(
301
- content_prompt,
302
- padding="max_length",
303
- max_length=self.tokenizer.model_max_length,
304
- truncation=True,
305
- return_tensors="pt",
306
- )
307
- content_text_embeddings = self.text_encoder(content_text_input.input_ids.to(self.device))[0]
308
-
309
- style_text_input = self.tokenizer(
310
- style_prompt,
311
- padding="max_length",
312
- max_length=self.tokenizer.model_max_length,
313
- truncation=True,
314
- return_tensors="pt",
315
- )
316
- style_text_embeddings = self.text_encoder(style_text_input.input_ids.to(self.device))[0]
317
-
318
- text_embeddings = slerp(slerp_prompt_style_strength, content_text_embeddings, style_text_embeddings)
319
-
320
- # duplicate text embeddings for each generation per prompt
321
- text_embeddings = text_embeddings.repeat_interleave(batch_size, dim=0)
322
-
323
- # set timesteps
324
- accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
325
- extra_set_kwargs = {}
326
- if accepts_offset:
327
- extra_set_kwargs["offset"] = 1
328
-
329
- self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs)
330
- # Some schedulers like PNDM have timesteps as arrays
331
- # It's more optimized to move all timesteps to correct device beforehand
332
- self.scheduler.timesteps.to(self.device)
333
-
334
- timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, noise_strength, self.device)
335
- latent_timestep = timesteps[:1].repeat(batch_size)
336
-
337
- # Preprocess image
338
- preprocessed_content_image = preprocess(content_image, width, height)
339
- content_latents = self.prepare_latents(
340
- preprocessed_content_image, latent_timestep, batch_size, text_embeddings.dtype, self.device, generator
341
- )
342
-
343
- preprocessed_style_image = preprocess(style_image, width, height)
344
- style_latents = self.prepare_latents(
345
- preprocessed_style_image, latent_timestep, batch_size, text_embeddings.dtype, self.device, generator
346
- )
347
-
348
- latents = slerp(slerp_latent_style_strength, content_latents, style_latents)
349
-
350
- if clip_guidance_scale > 0:
351
- content_clip_image_embedding = self.get_clip_image_embeddings(content_image, batch_size)
352
- style_clip_image_embedding = self.get_clip_image_embeddings(style_image, batch_size)
353
- clip_image_embeddings = slerp(
354
- slerp_clip_image_style_strength, content_clip_image_embedding, style_clip_image_embedding
355
- )
356
-
357
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
358
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
359
- # corresponds to doing no classifier free guidance.
360
- do_classifier_free_guidance = guidance_scale > 1.0
361
- # get unconditional embeddings for classifier free guidance
362
- if do_classifier_free_guidance:
363
- max_length = content_text_input.input_ids.shape[-1]
364
- uncond_input = self.tokenizer([""], padding="max_length", max_length=max_length, return_tensors="pt")
365
- uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
366
- # duplicate unconditional embeddings for each generation per prompt
367
- uncond_embeddings = uncond_embeddings.repeat_interleave(batch_size, dim=0)
368
-
369
- # For classifier free guidance, we need to do two forward passes.
370
- # Here we concatenate the unconditional and text embeddings into a single batch
371
- # to avoid doing two forward passes
372
- text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
373
-
374
- # get the initial random noise unless the user supplied it
375
-
376
- # Unlike in other pipelines, latents need to be generated in the target device
377
- # for 1-to-1 results reproducibility with the CompVis implementation.
378
- # However this currently doesn't work in `mps`.
379
- latents_shape = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
380
- latents_dtype = text_embeddings.dtype
381
- if latents is None:
382
- if self.device.type == "mps":
383
- # randn does not work reproducibly on mps
384
- latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
385
- self.device
386
- )
387
- else:
388
- latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
389
- else:
390
- if latents.shape != latents_shape:
391
- raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
392
- latents = latents.to(self.device)
393
-
394
- # scale the initial noise by the standard deviation required by the scheduler
395
- latents = latents * self.scheduler.init_noise_sigma
396
-
397
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
398
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
399
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
400
- # and should be between [0, 1]
401
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
402
- extra_step_kwargs = {}
403
- if accepts_eta:
404
- extra_step_kwargs["eta"] = eta
405
-
406
- # check if the scheduler accepts generator
407
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
408
- if accepts_generator:
409
- extra_step_kwargs["generator"] = generator
410
-
411
- with self.progress_bar(total=num_inference_steps):
412
- for i, t in enumerate(timesteps):
413
- # expand the latents if we are doing classifier free guidance
414
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
415
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
416
-
417
- # predict the noise residual
418
- noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
419
-
420
- # perform classifier free guidance
421
- if do_classifier_free_guidance:
422
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
423
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
424
-
425
- # perform clip guidance
426
- if clip_guidance_scale > 0:
427
- text_embeddings_for_guidance = (
428
- text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings
429
- )
430
- noise_pred, latents = self.cond_fn(
431
- latents,
432
- t,
433
- i,
434
- text_embeddings_for_guidance,
435
- noise_pred,
436
- clip_image_embeddings,
437
- clip_guidance_scale,
438
- )
439
-
440
- # compute the previous noisy sample x_t -> x_t-1
441
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
442
-
443
- # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
444
- latents = 1 / 0.18215 * latents
445
- image = self.vae.decode(latents).sample
446
-
447
- image = (image / 2 + 0.5).clamp(0, 1)
448
- image = image.cpu().permute(0, 2, 3, 1).numpy()
449
-
450
- if output_type == "pil":
451
- image = self.numpy_to_pil(image)
452
-
453
- if not return_dict:
454
- return (image, None)
455
-
456
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=None)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/scripts/convert_original_audioldm_to_diffusers.py DELETED
@@ -1,1052 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 The HuggingFace Inc. team.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """ Conversion script for the AudioLDM checkpoints."""
16
-
17
- import argparse
18
- import re
19
-
20
- import torch
21
- from transformers import (
22
- AutoTokenizer,
23
- ClapTextConfig,
24
- ClapTextModelWithProjection,
25
- SpeechT5HifiGan,
26
- SpeechT5HifiGanConfig,
27
- )
28
-
29
- from diffusers import (
30
- AudioLDMPipeline,
31
- AutoencoderKL,
32
- DDIMScheduler,
33
- DPMSolverMultistepScheduler,
34
- EulerAncestralDiscreteScheduler,
35
- EulerDiscreteScheduler,
36
- HeunDiscreteScheduler,
37
- LMSDiscreteScheduler,
38
- PNDMScheduler,
39
- UNet2DConditionModel,
40
- )
41
- from diffusers.utils import is_omegaconf_available, is_safetensors_available
42
- from diffusers.utils.import_utils import BACKENDS_MAPPING
43
-
44
-
45
- # Copied from diffusers.pipelines.stable_diffusion.convert_from_ckpt.shave_segments
46
- def shave_segments(path, n_shave_prefix_segments=1):
47
- """
48
- Removes segments. Positive values shave the first segments, negative shave the last segments.
49
- """
50
- if n_shave_prefix_segments >= 0:
51
- return ".".join(path.split(".")[n_shave_prefix_segments:])
52
- else:
53
- return ".".join(path.split(".")[:n_shave_prefix_segments])
54
-
55
-
56
- # Copied from diffusers.pipelines.stable_diffusion.convert_from_ckpt.renew_resnet_paths
57
- def renew_resnet_paths(old_list, n_shave_prefix_segments=0):
58
- """
59
- Updates paths inside resnets to the new naming scheme (local renaming)
60
- """
61
- mapping = []
62
- for old_item in old_list:
63
- new_item = old_item.replace("in_layers.0", "norm1")
64
- new_item = new_item.replace("in_layers.2", "conv1")
65
-
66
- new_item = new_item.replace("out_layers.0", "norm2")
67
- new_item = new_item.replace("out_layers.3", "conv2")
68
-
69
- new_item = new_item.replace("emb_layers.1", "time_emb_proj")
70
- new_item = new_item.replace("skip_connection", "conv_shortcut")
71
-
72
- new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
73
-
74
- mapping.append({"old": old_item, "new": new_item})
75
-
76
- return mapping
77
-
78
-
79
- # Copied from diffusers.pipelines.stable_diffusion.convert_from_ckpt.renew_vae_resnet_paths
80
- def renew_vae_resnet_paths(old_list, n_shave_prefix_segments=0):
81
- """
82
- Updates paths inside resnets to the new naming scheme (local renaming)
83
- """
84
- mapping = []
85
- for old_item in old_list:
86
- new_item = old_item
87
-
88
- new_item = new_item.replace("nin_shortcut", "conv_shortcut")
89
- new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
90
-
91
- mapping.append({"old": old_item, "new": new_item})
92
-
93
- return mapping
94
-
95
-
96
- # Copied from diffusers.pipelines.stable_diffusion.convert_from_ckpt.renew_attention_paths
97
- def renew_attention_paths(old_list):
98
- """
99
- Updates paths inside attentions to the new naming scheme (local renaming)
100
- """
101
- mapping = []
102
- for old_item in old_list:
103
- new_item = old_item
104
-
105
- # new_item = new_item.replace('norm.weight', 'group_norm.weight')
106
- # new_item = new_item.replace('norm.bias', 'group_norm.bias')
107
-
108
- # new_item = new_item.replace('proj_out.weight', 'proj_attn.weight')
109
- # new_item = new_item.replace('proj_out.bias', 'proj_attn.bias')
110
-
111
- # new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
112
-
113
- mapping.append({"old": old_item, "new": new_item})
114
-
115
- return mapping
116
-
117
-
118
- # Copied from diffusers.pipelines.stable_diffusion.convert_from_ckpt.renew_vae_attention_paths
119
- def renew_vae_attention_paths(old_list, n_shave_prefix_segments=0):
120
- """
121
- Updates paths inside attentions to the new naming scheme (local renaming)
122
- """
123
- mapping = []
124
- for old_item in old_list:
125
- new_item = old_item
126
-
127
- new_item = new_item.replace("norm.weight", "group_norm.weight")
128
- new_item = new_item.replace("norm.bias", "group_norm.bias")
129
-
130
- new_item = new_item.replace("q.weight", "query.weight")
131
- new_item = new_item.replace("q.bias", "query.bias")
132
-
133
- new_item = new_item.replace("k.weight", "key.weight")
134
- new_item = new_item.replace("k.bias", "key.bias")
135
-
136
- new_item = new_item.replace("v.weight", "value.weight")
137
- new_item = new_item.replace("v.bias", "value.bias")
138
-
139
- new_item = new_item.replace("proj_out.weight", "proj_attn.weight")
140
- new_item = new_item.replace("proj_out.bias", "proj_attn.bias")
141
-
142
- new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
143
-
144
- mapping.append({"old": old_item, "new": new_item})
145
-
146
- return mapping
147
-
148
-
149
- # Copied from diffusers.pipelines.stable_diffusion.convert_from_ckpt.assign_to_checkpoint
150
- def assign_to_checkpoint(
151
- paths, checkpoint, old_checkpoint, attention_paths_to_split=None, additional_replacements=None, config=None
152
- ):
153
- """
154
- This does the final conversion step: take locally converted weights and apply a global renaming to them. It splits
155
- attention layers, and takes into account additional replacements that may arise.
156
-
157
- Assigns the weights to the new checkpoint.
158
- """
159
- assert isinstance(paths, list), "Paths should be a list of dicts containing 'old' and 'new' keys."
160
-
161
- # Splits the attention layers into three variables.
162
- if attention_paths_to_split is not None:
163
- for path, path_map in attention_paths_to_split.items():
164
- old_tensor = old_checkpoint[path]
165
- channels = old_tensor.shape[0] // 3
166
-
167
- target_shape = (-1, channels) if len(old_tensor.shape) == 3 else (-1)
168
-
169
- num_heads = old_tensor.shape[0] // config["num_head_channels"] // 3
170
-
171
- old_tensor = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:])
172
- query, key, value = old_tensor.split(channels // num_heads, dim=1)
173
-
174
- checkpoint[path_map["query"]] = query.reshape(target_shape)
175
- checkpoint[path_map["key"]] = key.reshape(target_shape)
176
- checkpoint[path_map["value"]] = value.reshape(target_shape)
177
-
178
- for path in paths:
179
- new_path = path["new"]
180
-
181
- # These have already been assigned
182
- if attention_paths_to_split is not None and new_path in attention_paths_to_split:
183
- continue
184
-
185
- # Global renaming happens here
186
- new_path = new_path.replace("middle_block.0", "mid_block.resnets.0")
187
- new_path = new_path.replace("middle_block.1", "mid_block.attentions.0")
188
- new_path = new_path.replace("middle_block.2", "mid_block.resnets.1")
189
-
190
- if additional_replacements is not None:
191
- for replacement in additional_replacements:
192
- new_path = new_path.replace(replacement["old"], replacement["new"])
193
-
194
- # proj_attn.weight has to be converted from conv 1D to linear
195
- if "proj_attn.weight" in new_path:
196
- checkpoint[new_path] = old_checkpoint[path["old"]][:, :, 0]
197
- else:
198
- checkpoint[new_path] = old_checkpoint[path["old"]]
199
-
200
-
201
- # Copied from diffusers.pipelines.stable_diffusion.convert_from_ckpt.conv_attn_to_linear
202
- def conv_attn_to_linear(checkpoint):
203
- keys = list(checkpoint.keys())
204
- attn_keys = ["query.weight", "key.weight", "value.weight"]
205
- for key in keys:
206
- if ".".join(key.split(".")[-2:]) in attn_keys:
207
- if checkpoint[key].ndim > 2:
208
- checkpoint[key] = checkpoint[key][:, :, 0, 0]
209
- elif "proj_attn.weight" in key:
210
- if checkpoint[key].ndim > 2:
211
- checkpoint[key] = checkpoint[key][:, :, 0]
212
-
213
-
214
- def create_unet_diffusers_config(original_config, image_size: int):
215
- """
216
- Creates a UNet config for diffusers based on the config of the original AudioLDM model.
217
- """
218
- unet_params = original_config.model.params.unet_config.params
219
- vae_params = original_config.model.params.first_stage_config.params.ddconfig
220
-
221
- block_out_channels = [unet_params.model_channels * mult for mult in unet_params.channel_mult]
222
-
223
- down_block_types = []
224
- resolution = 1
225
- for i in range(len(block_out_channels)):
226
- block_type = "CrossAttnDownBlock2D" if resolution in unet_params.attention_resolutions else "DownBlock2D"
227
- down_block_types.append(block_type)
228
- if i != len(block_out_channels) - 1:
229
- resolution *= 2
230
-
231
- up_block_types = []
232
- for i in range(len(block_out_channels)):
233
- block_type = "CrossAttnUpBlock2D" if resolution in unet_params.attention_resolutions else "UpBlock2D"
234
- up_block_types.append(block_type)
235
- resolution //= 2
236
-
237
- vae_scale_factor = 2 ** (len(vae_params.ch_mult) - 1)
238
-
239
- cross_attention_dim = (
240
- unet_params.cross_attention_dim if "cross_attention_dim" in unet_params else block_out_channels
241
- )
242
-
243
- class_embed_type = "simple_projection" if "extra_film_condition_dim" in unet_params else None
244
- projection_class_embeddings_input_dim = (
245
- unet_params.extra_film_condition_dim if "extra_film_condition_dim" in unet_params else None
246
- )
247
- class_embeddings_concat = unet_params.extra_film_use_concat if "extra_film_use_concat" in unet_params else None
248
-
249
- config = {
250
- "sample_size": image_size // vae_scale_factor,
251
- "in_channels": unet_params.in_channels,
252
- "out_channels": unet_params.out_channels,
253
- "down_block_types": tuple(down_block_types),
254
- "up_block_types": tuple(up_block_types),
255
- "block_out_channels": tuple(block_out_channels),
256
- "layers_per_block": unet_params.num_res_blocks,
257
- "cross_attention_dim": cross_attention_dim,
258
- "class_embed_type": class_embed_type,
259
- "projection_class_embeddings_input_dim": projection_class_embeddings_input_dim,
260
- "class_embeddings_concat": class_embeddings_concat,
261
- }
262
-
263
- return config
264
-
265
-
266
- # Adapted from diffusers.pipelines.stable_diffusion.convert_from_ckpt.create_vae_diffusers_config
267
- def create_vae_diffusers_config(original_config, checkpoint, image_size: int):
268
- """
269
- Creates a VAE config for diffusers based on the config of the original AudioLDM model. Compared to the original
270
- Stable Diffusion conversion, this function passes a *learnt* VAE scaling factor to the diffusers VAE.
271
- """
272
- vae_params = original_config.model.params.first_stage_config.params.ddconfig
273
- _ = original_config.model.params.first_stage_config.params.embed_dim
274
-
275
- block_out_channels = [vae_params.ch * mult for mult in vae_params.ch_mult]
276
- down_block_types = ["DownEncoderBlock2D"] * len(block_out_channels)
277
- up_block_types = ["UpDecoderBlock2D"] * len(block_out_channels)
278
-
279
- scaling_factor = checkpoint["scale_factor"] if "scale_by_std" in original_config.model.params else 0.18215
280
-
281
- config = {
282
- "sample_size": image_size,
283
- "in_channels": vae_params.in_channels,
284
- "out_channels": vae_params.out_ch,
285
- "down_block_types": tuple(down_block_types),
286
- "up_block_types": tuple(up_block_types),
287
- "block_out_channels": tuple(block_out_channels),
288
- "latent_channels": vae_params.z_channels,
289
- "layers_per_block": vae_params.num_res_blocks,
290
- "scaling_factor": float(scaling_factor),
291
- }
292
- return config
293
-
294
-
295
- # Copied from diffusers.pipelines.stable_diffusion.convert_from_ckpt.create_diffusers_schedular
296
- def create_diffusers_schedular(original_config):
297
- schedular = DDIMScheduler(
298
- num_train_timesteps=original_config.model.params.timesteps,
299
- beta_start=original_config.model.params.linear_start,
300
- beta_end=original_config.model.params.linear_end,
301
- beta_schedule="scaled_linear",
302
- )
303
- return schedular
304
-
305
-
306
- # Adapted from diffusers.pipelines.stable_diffusion.convert_from_ckpt.convert_ldm_unet_checkpoint
307
- def convert_ldm_unet_checkpoint(checkpoint, config, path=None, extract_ema=False):
308
- """
309
- Takes a state dict and a config, and returns a converted checkpoint. Compared to the original Stable Diffusion
310
- conversion, this function additionally converts the learnt film embedding linear layer.
311
- """
312
-
313
- # extract state_dict for UNet
314
- unet_state_dict = {}
315
- keys = list(checkpoint.keys())
316
-
317
- unet_key = "model.diffusion_model."
318
- # at least a 100 parameters have to start with `model_ema` in order for the checkpoint to be EMA
319
- if sum(k.startswith("model_ema") for k in keys) > 100 and extract_ema:
320
- print(f"Checkpoint {path} has both EMA and non-EMA weights.")
321
- print(
322
- "In this conversion only the EMA weights are extracted. If you want to instead extract the non-EMA"
323
- " weights (useful to continue fine-tuning), please make sure to remove the `--extract_ema` flag."
324
- )
325
- for key in keys:
326
- if key.startswith("model.diffusion_model"):
327
- flat_ema_key = "model_ema." + "".join(key.split(".")[1:])
328
- unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(flat_ema_key)
329
- else:
330
- if sum(k.startswith("model_ema") for k in keys) > 100:
331
- print(
332
- "In this conversion only the non-EMA weights are extracted. If you want to instead extract the EMA"
333
- " weights (usually better for inference), please make sure to add the `--extract_ema` flag."
334
- )
335
-
336
- for key in keys:
337
- if key.startswith(unet_key):
338
- unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(key)
339
-
340
- new_checkpoint = {}
341
-
342
- new_checkpoint["time_embedding.linear_1.weight"] = unet_state_dict["time_embed.0.weight"]
343
- new_checkpoint["time_embedding.linear_1.bias"] = unet_state_dict["time_embed.0.bias"]
344
- new_checkpoint["time_embedding.linear_2.weight"] = unet_state_dict["time_embed.2.weight"]
345
- new_checkpoint["time_embedding.linear_2.bias"] = unet_state_dict["time_embed.2.bias"]
346
-
347
- new_checkpoint["class_embedding.weight"] = unet_state_dict["film_emb.weight"]
348
- new_checkpoint["class_embedding.bias"] = unet_state_dict["film_emb.bias"]
349
-
350
- new_checkpoint["conv_in.weight"] = unet_state_dict["input_blocks.0.0.weight"]
351
- new_checkpoint["conv_in.bias"] = unet_state_dict["input_blocks.0.0.bias"]
352
-
353
- new_checkpoint["conv_norm_out.weight"] = unet_state_dict["out.0.weight"]
354
- new_checkpoint["conv_norm_out.bias"] = unet_state_dict["out.0.bias"]
355
- new_checkpoint["conv_out.weight"] = unet_state_dict["out.2.weight"]
356
- new_checkpoint["conv_out.bias"] = unet_state_dict["out.2.bias"]
357
-
358
- # Retrieves the keys for the input blocks only
359
- num_input_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "input_blocks" in layer})
360
- input_blocks = {
361
- layer_id: [key for key in unet_state_dict if f"input_blocks.{layer_id}" in key]
362
- for layer_id in range(num_input_blocks)
363
- }
364
-
365
- # Retrieves the keys for the middle blocks only
366
- num_middle_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "middle_block" in layer})
367
- middle_blocks = {
368
- layer_id: [key for key in unet_state_dict if f"middle_block.{layer_id}" in key]
369
- for layer_id in range(num_middle_blocks)
370
- }
371
-
372
- # Retrieves the keys for the output blocks only
373
- num_output_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "output_blocks" in layer})
374
- output_blocks = {
375
- layer_id: [key for key in unet_state_dict if f"output_blocks.{layer_id}" in key]
376
- for layer_id in range(num_output_blocks)
377
- }
378
-
379
- for i in range(1, num_input_blocks):
380
- block_id = (i - 1) // (config["layers_per_block"] + 1)
381
- layer_in_block_id = (i - 1) % (config["layers_per_block"] + 1)
382
-
383
- resnets = [
384
- key for key in input_blocks[i] if f"input_blocks.{i}.0" in key and f"input_blocks.{i}.0.op" not in key
385
- ]
386
- attentions = [key for key in input_blocks[i] if f"input_blocks.{i}.1" in key]
387
-
388
- if f"input_blocks.{i}.0.op.weight" in unet_state_dict:
389
- new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.weight"] = unet_state_dict.pop(
390
- f"input_blocks.{i}.0.op.weight"
391
- )
392
- new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.bias"] = unet_state_dict.pop(
393
- f"input_blocks.{i}.0.op.bias"
394
- )
395
-
396
- paths = renew_resnet_paths(resnets)
397
- meta_path = {"old": f"input_blocks.{i}.0", "new": f"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
398
- assign_to_checkpoint(
399
- paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
400
- )
401
-
402
- if len(attentions):
403
- paths = renew_attention_paths(attentions)
404
- meta_path = {"old": f"input_blocks.{i}.1", "new": f"down_blocks.{block_id}.attentions.{layer_in_block_id}"}
405
- assign_to_checkpoint(
406
- paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
407
- )
408
-
409
- resnet_0 = middle_blocks[0]
410
- attentions = middle_blocks[1]
411
- resnet_1 = middle_blocks[2]
412
-
413
- resnet_0_paths = renew_resnet_paths(resnet_0)
414
- assign_to_checkpoint(resnet_0_paths, new_checkpoint, unet_state_dict, config=config)
415
-
416
- resnet_1_paths = renew_resnet_paths(resnet_1)
417
- assign_to_checkpoint(resnet_1_paths, new_checkpoint, unet_state_dict, config=config)
418
-
419
- attentions_paths = renew_attention_paths(attentions)
420
- meta_path = {"old": "middle_block.1", "new": "mid_block.attentions.0"}
421
- assign_to_checkpoint(
422
- attentions_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
423
- )
424
-
425
- for i in range(num_output_blocks):
426
- block_id = i // (config["layers_per_block"] + 1)
427
- layer_in_block_id = i % (config["layers_per_block"] + 1)
428
- output_block_layers = [shave_segments(name, 2) for name in output_blocks[i]]
429
- output_block_list = {}
430
-
431
- for layer in output_block_layers:
432
- layer_id, layer_name = layer.split(".")[0], shave_segments(layer, 1)
433
- if layer_id in output_block_list:
434
- output_block_list[layer_id].append(layer_name)
435
- else:
436
- output_block_list[layer_id] = [layer_name]
437
-
438
- if len(output_block_list) > 1:
439
- resnets = [key for key in output_blocks[i] if f"output_blocks.{i}.0" in key]
440
- attentions = [key for key in output_blocks[i] if f"output_blocks.{i}.1" in key]
441
-
442
- resnet_0_paths = renew_resnet_paths(resnets)
443
- paths = renew_resnet_paths(resnets)
444
-
445
- meta_path = {"old": f"output_blocks.{i}.0", "new": f"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
446
- assign_to_checkpoint(
447
- paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
448
- )
449
-
450
- output_block_list = {k: sorted(v) for k, v in output_block_list.items()}
451
- if ["conv.bias", "conv.weight"] in output_block_list.values():
452
- index = list(output_block_list.values()).index(["conv.bias", "conv.weight"])
453
- new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.weight"] = unet_state_dict[
454
- f"output_blocks.{i}.{index}.conv.weight"
455
- ]
456
- new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.bias"] = unet_state_dict[
457
- f"output_blocks.{i}.{index}.conv.bias"
458
- ]
459
-
460
- # Clear attentions as they have been attributed above.
461
- if len(attentions) == 2:
462
- attentions = []
463
-
464
- if len(attentions):
465
- paths = renew_attention_paths(attentions)
466
- meta_path = {
467
- "old": f"output_blocks.{i}.1",
468
- "new": f"up_blocks.{block_id}.attentions.{layer_in_block_id}",
469
- }
470
- assign_to_checkpoint(
471
- paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
472
- )
473
- else:
474
- resnet_0_paths = renew_resnet_paths(output_block_layers, n_shave_prefix_segments=1)
475
- for path in resnet_0_paths:
476
- old_path = ".".join(["output_blocks", str(i), path["old"]])
477
- new_path = ".".join(["up_blocks", str(block_id), "resnets", str(layer_in_block_id), path["new"]])
478
-
479
- new_checkpoint[new_path] = unet_state_dict[old_path]
480
-
481
- return new_checkpoint
482
-
483
-
484
- # Copied from diffusers.pipelines.stable_diffusion.convert_from_ckpt.convert_ldm_vae_checkpoint
485
- def convert_ldm_vae_checkpoint(checkpoint, config):
486
- # extract state dict for VAE
487
- vae_state_dict = {}
488
- vae_key = "first_stage_model."
489
- keys = list(checkpoint.keys())
490
- for key in keys:
491
- if key.startswith(vae_key):
492
- vae_state_dict[key.replace(vae_key, "")] = checkpoint.get(key)
493
-
494
- new_checkpoint = {}
495
-
496
- new_checkpoint["encoder.conv_in.weight"] = vae_state_dict["encoder.conv_in.weight"]
497
- new_checkpoint["encoder.conv_in.bias"] = vae_state_dict["encoder.conv_in.bias"]
498
- new_checkpoint["encoder.conv_out.weight"] = vae_state_dict["encoder.conv_out.weight"]
499
- new_checkpoint["encoder.conv_out.bias"] = vae_state_dict["encoder.conv_out.bias"]
500
- new_checkpoint["encoder.conv_norm_out.weight"] = vae_state_dict["encoder.norm_out.weight"]
501
- new_checkpoint["encoder.conv_norm_out.bias"] = vae_state_dict["encoder.norm_out.bias"]
502
-
503
- new_checkpoint["decoder.conv_in.weight"] = vae_state_dict["decoder.conv_in.weight"]
504
- new_checkpoint["decoder.conv_in.bias"] = vae_state_dict["decoder.conv_in.bias"]
505
- new_checkpoint["decoder.conv_out.weight"] = vae_state_dict["decoder.conv_out.weight"]
506
- new_checkpoint["decoder.conv_out.bias"] = vae_state_dict["decoder.conv_out.bias"]
507
- new_checkpoint["decoder.conv_norm_out.weight"] = vae_state_dict["decoder.norm_out.weight"]
508
- new_checkpoint["decoder.conv_norm_out.bias"] = vae_state_dict["decoder.norm_out.bias"]
509
-
510
- new_checkpoint["quant_conv.weight"] = vae_state_dict["quant_conv.weight"]
511
- new_checkpoint["quant_conv.bias"] = vae_state_dict["quant_conv.bias"]
512
- new_checkpoint["post_quant_conv.weight"] = vae_state_dict["post_quant_conv.weight"]
513
- new_checkpoint["post_quant_conv.bias"] = vae_state_dict["post_quant_conv.bias"]
514
-
515
- # Retrieves the keys for the encoder down blocks only
516
- num_down_blocks = len({".".join(layer.split(".")[:3]) for layer in vae_state_dict if "encoder.down" in layer})
517
- down_blocks = {
518
- layer_id: [key for key in vae_state_dict if f"down.{layer_id}" in key] for layer_id in range(num_down_blocks)
519
- }
520
-
521
- # Retrieves the keys for the decoder up blocks only
522
- num_up_blocks = len({".".join(layer.split(".")[:3]) for layer in vae_state_dict if "decoder.up" in layer})
523
- up_blocks = {
524
- layer_id: [key for key in vae_state_dict if f"up.{layer_id}" in key] for layer_id in range(num_up_blocks)
525
- }
526
-
527
- for i in range(num_down_blocks):
528
- resnets = [key for key in down_blocks[i] if f"down.{i}" in key and f"down.{i}.downsample" not in key]
529
-
530
- if f"encoder.down.{i}.downsample.conv.weight" in vae_state_dict:
531
- new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.weight"] = vae_state_dict.pop(
532
- f"encoder.down.{i}.downsample.conv.weight"
533
- )
534
- new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.bias"] = vae_state_dict.pop(
535
- f"encoder.down.{i}.downsample.conv.bias"
536
- )
537
-
538
- paths = renew_vae_resnet_paths(resnets)
539
- meta_path = {"old": f"down.{i}.block", "new": f"down_blocks.{i}.resnets"}
540
- assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)
541
-
542
- mid_resnets = [key for key in vae_state_dict if "encoder.mid.block" in key]
543
- num_mid_res_blocks = 2
544
- for i in range(1, num_mid_res_blocks + 1):
545
- resnets = [key for key in mid_resnets if f"encoder.mid.block_{i}" in key]
546
-
547
- paths = renew_vae_resnet_paths(resnets)
548
- meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"}
549
- assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)
550
-
551
- mid_attentions = [key for key in vae_state_dict if "encoder.mid.attn" in key]
552
- paths = renew_vae_attention_paths(mid_attentions)
553
- meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
554
- assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)
555
- conv_attn_to_linear(new_checkpoint)
556
-
557
- for i in range(num_up_blocks):
558
- block_id = num_up_blocks - 1 - i
559
- resnets = [
560
- key for key in up_blocks[block_id] if f"up.{block_id}" in key and f"up.{block_id}.upsample" not in key
561
- ]
562
-
563
- if f"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict:
564
- new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.weight"] = vae_state_dict[
565
- f"decoder.up.{block_id}.upsample.conv.weight"
566
- ]
567
- new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.bias"] = vae_state_dict[
568
- f"decoder.up.{block_id}.upsample.conv.bias"
569
- ]
570
-
571
- paths = renew_vae_resnet_paths(resnets)
572
- meta_path = {"old": f"up.{block_id}.block", "new": f"up_blocks.{i}.resnets"}
573
- assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)
574
-
575
- mid_resnets = [key for key in vae_state_dict if "decoder.mid.block" in key]
576
- num_mid_res_blocks = 2
577
- for i in range(1, num_mid_res_blocks + 1):
578
- resnets = [key for key in mid_resnets if f"decoder.mid.block_{i}" in key]
579
-
580
- paths = renew_vae_resnet_paths(resnets)
581
- meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"}
582
- assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)
583
-
584
- mid_attentions = [key for key in vae_state_dict if "decoder.mid.attn" in key]
585
- paths = renew_vae_attention_paths(mid_attentions)
586
- meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
587
- assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)
588
- conv_attn_to_linear(new_checkpoint)
589
- return new_checkpoint
590
-
591
-
592
- CLAP_KEYS_TO_MODIFY_MAPPING = {
593
- "text_branch": "text_model",
594
- "attn": "attention.self",
595
- "self.proj": "output.dense",
596
- "attention.self_mask": "attn_mask",
597
- "mlp.fc1": "intermediate.dense",
598
- "mlp.fc2": "output.dense",
599
- "norm1": "layernorm_before",
600
- "norm2": "layernorm_after",
601
- "bn0": "batch_norm",
602
- }
603
-
604
- CLAP_KEYS_TO_IGNORE = ["text_transform"]
605
-
606
- CLAP_EXPECTED_MISSING_KEYS = ["text_model.embeddings.token_type_ids"]
607
-
608
-
609
- def convert_open_clap_checkpoint(checkpoint):
610
- """
611
- Takes a state dict and returns a converted CLAP checkpoint.
612
- """
613
- # extract state dict for CLAP text embedding model, discarding the audio component
614
- model_state_dict = {}
615
- model_key = "cond_stage_model.model.text_"
616
- keys = list(checkpoint.keys())
617
- for key in keys:
618
- if key.startswith(model_key):
619
- model_state_dict[key.replace(model_key, "text_")] = checkpoint.get(key)
620
-
621
- new_checkpoint = {}
622
-
623
- sequential_layers_pattern = r".*sequential.(\d+).*"
624
- text_projection_pattern = r".*_projection.(\d+).*"
625
-
626
- for key, value in model_state_dict.items():
627
- # check if key should be ignored in mapping
628
- if key.split(".")[0] in CLAP_KEYS_TO_IGNORE:
629
- continue
630
-
631
- # check if any key needs to be modified
632
- for key_to_modify, new_key in CLAP_KEYS_TO_MODIFY_MAPPING.items():
633
- if key_to_modify in key:
634
- key = key.replace(key_to_modify, new_key)
635
-
636
- if re.match(sequential_layers_pattern, key):
637
- # replace sequential layers with list
638
- sequential_layer = re.match(sequential_layers_pattern, key).group(1)
639
-
640
- key = key.replace(f"sequential.{sequential_layer}.", f"layers.{int(sequential_layer)//3}.linear.")
641
- elif re.match(text_projection_pattern, key):
642
- projecton_layer = int(re.match(text_projection_pattern, key).group(1))
643
-
644
- # Because in CLAP they use `nn.Sequential`...
645
- transformers_projection_layer = 1 if projecton_layer == 0 else 2
646
-
647
- key = key.replace(f"_projection.{projecton_layer}.", f"_projection.linear{transformers_projection_layer}.")
648
-
649
- if "audio" and "qkv" in key:
650
- # split qkv into query key and value
651
- mixed_qkv = value
652
- qkv_dim = mixed_qkv.size(0) // 3
653
-
654
- query_layer = mixed_qkv[:qkv_dim]
655
- key_layer = mixed_qkv[qkv_dim : qkv_dim * 2]
656
- value_layer = mixed_qkv[qkv_dim * 2 :]
657
-
658
- new_checkpoint[key.replace("qkv", "query")] = query_layer
659
- new_checkpoint[key.replace("qkv", "key")] = key_layer
660
- new_checkpoint[key.replace("qkv", "value")] = value_layer
661
- else:
662
- new_checkpoint[key] = value
663
-
664
- return new_checkpoint
665
-
666
-
667
- def create_transformers_vocoder_config(original_config):
668
- """
669
- Creates a config for transformers SpeechT5HifiGan based on the config of the vocoder model.
670
- """
671
- vocoder_params = original_config.model.params.vocoder_config.params
672
-
673
- config = {
674
- "model_in_dim": vocoder_params.num_mels,
675
- "sampling_rate": vocoder_params.sampling_rate,
676
- "upsample_initial_channel": vocoder_params.upsample_initial_channel,
677
- "upsample_rates": list(vocoder_params.upsample_rates),
678
- "upsample_kernel_sizes": list(vocoder_params.upsample_kernel_sizes),
679
- "resblock_kernel_sizes": list(vocoder_params.resblock_kernel_sizes),
680
- "resblock_dilation_sizes": [
681
- list(resblock_dilation) for resblock_dilation in vocoder_params.resblock_dilation_sizes
682
- ],
683
- "normalize_before": False,
684
- }
685
-
686
- return config
687
-
688
-
689
- def convert_hifigan_checkpoint(checkpoint, config):
690
- """
691
- Takes a state dict and config, and returns a converted HiFiGAN vocoder checkpoint.
692
- """
693
- # extract state dict for vocoder
694
- vocoder_state_dict = {}
695
- vocoder_key = "first_stage_model.vocoder."
696
- keys = list(checkpoint.keys())
697
- for key in keys:
698
- if key.startswith(vocoder_key):
699
- vocoder_state_dict[key.replace(vocoder_key, "")] = checkpoint.get(key)
700
-
701
- # fix upsampler keys, everything else is correct already
702
- for i in range(len(config.upsample_rates)):
703
- vocoder_state_dict[f"upsampler.{i}.weight"] = vocoder_state_dict.pop(f"ups.{i}.weight")
704
- vocoder_state_dict[f"upsampler.{i}.bias"] = vocoder_state_dict.pop(f"ups.{i}.bias")
705
-
706
- if not config.normalize_before:
707
- # if we don't set normalize_before then these variables are unused, so we set them to their initialised values
708
- vocoder_state_dict["mean"] = torch.zeros(config.model_in_dim)
709
- vocoder_state_dict["scale"] = torch.ones(config.model_in_dim)
710
-
711
- return vocoder_state_dict
712
-
713
-
714
- # Adapted from https://huggingface.co/spaces/haoheliu/audioldm-text-to-audio-generation/blob/84a0384742a22bd80c44e903e241f0623e874f1d/audioldm/utils.py#L72-L73
715
- DEFAULT_CONFIG = {
716
- "model": {
717
- "params": {
718
- "linear_start": 0.0015,
719
- "linear_end": 0.0195,
720
- "timesteps": 1000,
721
- "channels": 8,
722
- "scale_by_std": True,
723
- "unet_config": {
724
- "target": "audioldm.latent_diffusion.openaimodel.UNetModel",
725
- "params": {
726
- "extra_film_condition_dim": 512,
727
- "extra_film_use_concat": True,
728
- "in_channels": 8,
729
- "out_channels": 8,
730
- "model_channels": 128,
731
- "attention_resolutions": [8, 4, 2],
732
- "num_res_blocks": 2,
733
- "channel_mult": [1, 2, 3, 5],
734
- "num_head_channels": 32,
735
- },
736
- },
737
- "first_stage_config": {
738
- "target": "audioldm.variational_autoencoder.autoencoder.AutoencoderKL",
739
- "params": {
740
- "embed_dim": 8,
741
- "ddconfig": {
742
- "z_channels": 8,
743
- "resolution": 256,
744
- "in_channels": 1,
745
- "out_ch": 1,
746
- "ch": 128,
747
- "ch_mult": [1, 2, 4],
748
- "num_res_blocks": 2,
749
- },
750
- },
751
- },
752
- "vocoder_config": {
753
- "target": "audioldm.first_stage_model.vocoder",
754
- "params": {
755
- "upsample_rates": [5, 4, 2, 2, 2],
756
- "upsample_kernel_sizes": [16, 16, 8, 4, 4],
757
- "upsample_initial_channel": 1024,
758
- "resblock_kernel_sizes": [3, 7, 11],
759
- "resblock_dilation_sizes": [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
760
- "num_mels": 64,
761
- "sampling_rate": 16000,
762
- },
763
- },
764
- },
765
- },
766
- }
767
-
768
-
769
- def load_pipeline_from_original_audioldm_ckpt(
770
- checkpoint_path: str,
771
- original_config_file: str = None,
772
- image_size: int = 512,
773
- prediction_type: str = None,
774
- extract_ema: bool = False,
775
- scheduler_type: str = "ddim",
776
- num_in_channels: int = None,
777
- model_channels: int = None,
778
- num_head_channels: int = None,
779
- device: str = None,
780
- from_safetensors: bool = False,
781
- ) -> AudioLDMPipeline:
782
- """
783
- Load an AudioLDM pipeline object from a `.ckpt`/`.safetensors` file and (ideally) a `.yaml` config file.
784
-
785
- Although many of the arguments can be automatically inferred, some of these rely on brittle checks against the
786
- global step count, which will likely fail for models that have undergone further fine-tuning. Therefore, it is
787
- recommended that you override the default values and/or supply an `original_config_file` wherever possible.
788
-
789
- Args:
790
- checkpoint_path (`str`): Path to `.ckpt` file.
791
- original_config_file (`str`):
792
- Path to `.yaml` config file corresponding to the original architecture. If `None`, will be automatically
793
- set to the audioldm-s-full-v2 config.
794
- image_size (`int`, *optional*, defaults to 512):
795
- The image size that the model was trained on.
796
- prediction_type (`str`, *optional*):
797
- The prediction type that the model was trained on. If `None`, will be automatically
798
- inferred by looking for a key in the config. For the default config, the prediction type is `'epsilon'`.
799
- num_in_channels (`int`, *optional*, defaults to None):
800
- The number of UNet input channels. If `None`, it will be automatically inferred from the config.
801
- model_channels (`int`, *optional*, defaults to None):
802
- The number of UNet model channels. If `None`, it will be automatically inferred from the config. Override
803
- to 128 for the small checkpoints, 192 for the medium checkpoints and 256 for the large.
804
- num_head_channels (`int`, *optional*, defaults to None):
805
- The number of UNet head channels. If `None`, it will be automatically inferred from the config. Override
806
- to 32 for the small and medium checkpoints, and 64 for the large.
807
- scheduler_type (`str`, *optional*, defaults to 'pndm'):
808
- Type of scheduler to use. Should be one of `["pndm", "lms", "heun", "euler", "euler-ancestral", "dpm",
809
- "ddim"]`.
810
- extract_ema (`bool`, *optional*, defaults to `False`): Only relevant for
811
- checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights or not. Defaults to
812
- `False`. Pass `True` to extract the EMA weights. EMA weights usually yield higher quality images for
813
- inference. Non-EMA weights are usually better to continue fine-tuning.
814
- device (`str`, *optional*, defaults to `None`):
815
- The device to use. Pass `None` to determine automatically.
816
- from_safetensors (`str`, *optional*, defaults to `False`):
817
- If `checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.
818
- return: An AudioLDMPipeline object representing the passed-in `.ckpt`/`.safetensors` file.
819
- """
820
-
821
- if not is_omegaconf_available():
822
- raise ValueError(BACKENDS_MAPPING["omegaconf"][1])
823
-
824
- from omegaconf import OmegaConf
825
-
826
- if from_safetensors:
827
- if not is_safetensors_available():
828
- raise ValueError(BACKENDS_MAPPING["safetensors"][1])
829
-
830
- from safetensors import safe_open
831
-
832
- checkpoint = {}
833
- with safe_open(checkpoint_path, framework="pt", device="cpu") as f:
834
- for key in f.keys():
835
- checkpoint[key] = f.get_tensor(key)
836
- else:
837
- if device is None:
838
- device = "cuda" if torch.cuda.is_available() else "cpu"
839
- checkpoint = torch.load(checkpoint_path, map_location=device)
840
- else:
841
- checkpoint = torch.load(checkpoint_path, map_location=device)
842
-
843
- if "state_dict" in checkpoint:
844
- checkpoint = checkpoint["state_dict"]
845
-
846
- if original_config_file is None:
847
- original_config = DEFAULT_CONFIG
848
- original_config = OmegaConf.create(original_config)
849
- else:
850
- original_config = OmegaConf.load(original_config_file)
851
-
852
- if num_in_channels is not None:
853
- original_config["model"]["params"]["unet_config"]["params"]["in_channels"] = num_in_channels
854
-
855
- if model_channels is not None:
856
- original_config["model"]["params"]["unet_config"]["params"]["model_channels"] = model_channels
857
-
858
- if num_head_channels is not None:
859
- original_config["model"]["params"]["unet_config"]["params"]["num_head_channels"] = num_head_channels
860
-
861
- if (
862
- "parameterization" in original_config["model"]["params"]
863
- and original_config["model"]["params"]["parameterization"] == "v"
864
- ):
865
- if prediction_type is None:
866
- prediction_type = "v_prediction"
867
- else:
868
- if prediction_type is None:
869
- prediction_type = "epsilon"
870
-
871
- if image_size is None:
872
- image_size = 512
873
-
874
- num_train_timesteps = original_config.model.params.timesteps
875
- beta_start = original_config.model.params.linear_start
876
- beta_end = original_config.model.params.linear_end
877
-
878
- scheduler = DDIMScheduler(
879
- beta_end=beta_end,
880
- beta_schedule="scaled_linear",
881
- beta_start=beta_start,
882
- num_train_timesteps=num_train_timesteps,
883
- steps_offset=1,
884
- clip_sample=False,
885
- set_alpha_to_one=False,
886
- prediction_type=prediction_type,
887
- )
888
- # make sure scheduler works correctly with DDIM
889
- scheduler.register_to_config(clip_sample=False)
890
-
891
- if scheduler_type == "pndm":
892
- config = dict(scheduler.config)
893
- config["skip_prk_steps"] = True
894
- scheduler = PNDMScheduler.from_config(config)
895
- elif scheduler_type == "lms":
896
- scheduler = LMSDiscreteScheduler.from_config(scheduler.config)
897
- elif scheduler_type == "heun":
898
- scheduler = HeunDiscreteScheduler.from_config(scheduler.config)
899
- elif scheduler_type == "euler":
900
- scheduler = EulerDiscreteScheduler.from_config(scheduler.config)
901
- elif scheduler_type == "euler-ancestral":
902
- scheduler = EulerAncestralDiscreteScheduler.from_config(scheduler.config)
903
- elif scheduler_type == "dpm":
904
- scheduler = DPMSolverMultistepScheduler.from_config(scheduler.config)
905
- elif scheduler_type == "ddim":
906
- scheduler = scheduler
907
- else:
908
- raise ValueError(f"Scheduler of type {scheduler_type} doesn't exist!")
909
-
910
- # Convert the UNet2DModel
911
- unet_config = create_unet_diffusers_config(original_config, image_size=image_size)
912
- unet = UNet2DConditionModel(**unet_config)
913
-
914
- converted_unet_checkpoint = convert_ldm_unet_checkpoint(
915
- checkpoint, unet_config, path=checkpoint_path, extract_ema=extract_ema
916
- )
917
-
918
- unet.load_state_dict(converted_unet_checkpoint)
919
-
920
- # Convert the VAE model
921
- vae_config = create_vae_diffusers_config(original_config, checkpoint=checkpoint, image_size=image_size)
922
- converted_vae_checkpoint = convert_ldm_vae_checkpoint(checkpoint, vae_config)
923
-
924
- vae = AutoencoderKL(**vae_config)
925
- vae.load_state_dict(converted_vae_checkpoint)
926
-
927
- # Convert the text model
928
- # AudioLDM uses the same configuration and tokenizer as the original CLAP model
929
- config = ClapTextConfig.from_pretrained("laion/clap-htsat-unfused")
930
- tokenizer = AutoTokenizer.from_pretrained("laion/clap-htsat-unfused")
931
-
932
- converted_text_model = convert_open_clap_checkpoint(checkpoint)
933
- text_model = ClapTextModelWithProjection(config)
934
-
935
- missing_keys, unexpected_keys = text_model.load_state_dict(converted_text_model, strict=False)
936
- # we expect not to have token_type_ids in our original state dict so let's ignore them
937
- missing_keys = list(set(missing_keys) - set(CLAP_EXPECTED_MISSING_KEYS))
938
-
939
- if len(unexpected_keys) > 0:
940
- raise ValueError(f"Unexpected keys when loading CLAP model: {unexpected_keys}")
941
-
942
- if len(missing_keys) > 0:
943
- raise ValueError(f"Missing keys when loading CLAP model: {missing_keys}")
944
-
945
- # Convert the vocoder model
946
- vocoder_config = create_transformers_vocoder_config(original_config)
947
- vocoder_config = SpeechT5HifiGanConfig(**vocoder_config)
948
- converted_vocoder_checkpoint = convert_hifigan_checkpoint(checkpoint, vocoder_config)
949
-
950
- vocoder = SpeechT5HifiGan(vocoder_config)
951
- vocoder.load_state_dict(converted_vocoder_checkpoint)
952
-
953
- # Instantiate the diffusers pipeline
954
- pipe = AudioLDMPipeline(
955
- vae=vae,
956
- text_encoder=text_model,
957
- tokenizer=tokenizer,
958
- unet=unet,
959
- scheduler=scheduler,
960
- vocoder=vocoder,
961
- )
962
-
963
- return pipe
964
-
965
-
966
- if __name__ == "__main__":
967
- parser = argparse.ArgumentParser()
968
-
969
- parser.add_argument(
970
- "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
971
- )
972
- parser.add_argument(
973
- "--original_config_file",
974
- default=None,
975
- type=str,
976
- help="The YAML config file corresponding to the original architecture.",
977
- )
978
- parser.add_argument(
979
- "--num_in_channels",
980
- default=None,
981
- type=int,
982
- help="The number of input channels. If `None` number of input channels will be automatically inferred.",
983
- )
984
- parser.add_argument(
985
- "--model_channels",
986
- default=None,
987
- type=int,
988
- help="The number of UNet model channels. If `None`, it will be automatically inferred from the config. Override"
989
- " to 128 for the small checkpoints, 192 for the medium checkpoints and 256 for the large.",
990
- )
991
- parser.add_argument(
992
- "--num_head_channels",
993
- default=None,
994
- type=int,
995
- help="The number of UNet head channels. If `None`, it will be automatically inferred from the config. Override"
996
- " to 32 for the small and medium checkpoints, and 64 for the large.",
997
- )
998
- parser.add_argument(
999
- "--scheduler_type",
1000
- default="ddim",
1001
- type=str,
1002
- help="Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']",
1003
- )
1004
- parser.add_argument(
1005
- "--image_size",
1006
- default=None,
1007
- type=int,
1008
- help=("The image size that the model was trained on."),
1009
- )
1010
- parser.add_argument(
1011
- "--prediction_type",
1012
- default=None,
1013
- type=str,
1014
- help=("The prediction type that the model was trained on."),
1015
- )
1016
- parser.add_argument(
1017
- "--extract_ema",
1018
- action="store_true",
1019
- help=(
1020
- "Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
1021
- " or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
1022
- " higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
1023
- ),
1024
- )
1025
- parser.add_argument(
1026
- "--from_safetensors",
1027
- action="store_true",
1028
- help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
1029
- )
1030
- parser.add_argument(
1031
- "--to_safetensors",
1032
- action="store_true",
1033
- help="Whether to store pipeline in safetensors format or not.",
1034
- )
1035
- parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
1036
- parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
1037
- args = parser.parse_args()
1038
-
1039
- pipe = load_pipeline_from_original_audioldm_ckpt(
1040
- checkpoint_path=args.checkpoint_path,
1041
- original_config_file=args.original_config_file,
1042
- image_size=args.image_size,
1043
- prediction_type=args.prediction_type,
1044
- extract_ema=args.extract_ema,
1045
- scheduler_type=args.scheduler_type,
1046
- num_in_channels=args.num_in_channels,
1047
- model_channels=args.model_channels,
1048
- num_head_channels=args.num_head_channels,
1049
- from_safetensors=args.from_safetensors,
1050
- device=args.device,
1051
- )
1052
- pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/transformer_2d.py DELETED
@@ -1,342 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- from dataclasses import dataclass
15
- from typing import Any, Dict, Optional
16
-
17
- import torch
18
- import torch.nn.functional as F
19
- from torch import nn
20
-
21
- from ..configuration_utils import ConfigMixin, register_to_config
22
- from ..models.embeddings import ImagePositionalEmbeddings
23
- from ..utils import BaseOutput, deprecate
24
- from .attention import BasicTransformerBlock
25
- from .embeddings import PatchEmbed
26
- from .lora import LoRACompatibleConv, LoRACompatibleLinear
27
- from .modeling_utils import ModelMixin
28
-
29
-
30
- @dataclass
31
- class Transformer2DModelOutput(BaseOutput):
32
- """
33
- The output of [`Transformer2DModel`].
34
-
35
- Args:
36
- sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` or `(batch size, num_vector_embeds - 1, num_latent_pixels)` if [`Transformer2DModel`] is discrete):
37
- The hidden states output conditioned on the `encoder_hidden_states` input. If discrete, returns probability
38
- distributions for the unnoised latent pixels.
39
- """
40
-
41
- sample: torch.FloatTensor
42
-
43
-
44
- class Transformer2DModel(ModelMixin, ConfigMixin):
45
- """
46
- A 2D Transformer model for image-like data.
47
-
48
- Parameters:
49
- num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention.
50
- attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head.
51
- in_channels (`int`, *optional*):
52
- The number of channels in the input and output (specify if the input is **continuous**).
53
- num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use.
54
- dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
55
- cross_attention_dim (`int`, *optional*): The number of `encoder_hidden_states` dimensions to use.
56
- sample_size (`int`, *optional*): The width of the latent images (specify if the input is **discrete**).
57
- This is fixed during training since it is used to learn a number of position embeddings.
58
- num_vector_embeds (`int`, *optional*):
59
- The number of classes of the vector embeddings of the latent pixels (specify if the input is **discrete**).
60
- Includes the class for the masked latent pixel.
61
- activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to use in feed-forward.
62
- num_embeds_ada_norm ( `int`, *optional*):
63
- The number of diffusion steps used during training. Pass if at least one of the norm_layers is
64
- `AdaLayerNorm`. This is fixed during training since it is used to learn a number of embeddings that are
65
- added to the hidden states.
66
-
67
- During inference, you can denoise for up to but not more steps than `num_embeds_ada_norm`.
68
- attention_bias (`bool`, *optional*):
69
- Configure if the `TransformerBlocks` attention should contain a bias parameter.
70
- """
71
-
72
- @register_to_config
73
- def __init__(
74
- self,
75
- num_attention_heads: int = 16,
76
- attention_head_dim: int = 88,
77
- in_channels: Optional[int] = None,
78
- out_channels: Optional[int] = None,
79
- num_layers: int = 1,
80
- dropout: float = 0.0,
81
- norm_num_groups: int = 32,
82
- cross_attention_dim: Optional[int] = None,
83
- attention_bias: bool = False,
84
- sample_size: Optional[int] = None,
85
- num_vector_embeds: Optional[int] = None,
86
- patch_size: Optional[int] = None,
87
- activation_fn: str = "geglu",
88
- num_embeds_ada_norm: Optional[int] = None,
89
- use_linear_projection: bool = False,
90
- only_cross_attention: bool = False,
91
- upcast_attention: bool = False,
92
- norm_type: str = "layer_norm",
93
- norm_elementwise_affine: bool = True,
94
- ):
95
- super().__init__()
96
- self.use_linear_projection = use_linear_projection
97
- self.num_attention_heads = num_attention_heads
98
- self.attention_head_dim = attention_head_dim
99
- inner_dim = num_attention_heads * attention_head_dim
100
-
101
- # 1. Transformer2DModel can process both standard continuous images of shape `(batch_size, num_channels, width, height)` as well as quantized image embeddings of shape `(batch_size, num_image_vectors)`
102
- # Define whether input is continuous or discrete depending on configuration
103
- self.is_input_continuous = (in_channels is not None) and (patch_size is None)
104
- self.is_input_vectorized = num_vector_embeds is not None
105
- self.is_input_patches = in_channels is not None and patch_size is not None
106
-
107
- if norm_type == "layer_norm" and num_embeds_ada_norm is not None:
108
- deprecation_message = (
109
- f"The configuration file of this model: {self.__class__} is outdated. `norm_type` is either not set or"
110
- " incorrectly set to `'layer_norm'`.Make sure to set `norm_type` to `'ada_norm'` in the config."
111
- " Please make sure to update the config accordingly as leaving `norm_type` might led to incorrect"
112
- " results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it"
113
- " would be very nice if you could open a Pull request for the `transformer/config.json` file"
114
- )
115
- deprecate("norm_type!=num_embeds_ada_norm", "1.0.0", deprecation_message, standard_warn=False)
116
- norm_type = "ada_norm"
117
-
118
- if self.is_input_continuous and self.is_input_vectorized:
119
- raise ValueError(
120
- f"Cannot define both `in_channels`: {in_channels} and `num_vector_embeds`: {num_vector_embeds}. Make"
121
- " sure that either `in_channels` or `num_vector_embeds` is None."
122
- )
123
- elif self.is_input_vectorized and self.is_input_patches:
124
- raise ValueError(
125
- f"Cannot define both `num_vector_embeds`: {num_vector_embeds} and `patch_size`: {patch_size}. Make"
126
- " sure that either `num_vector_embeds` or `num_patches` is None."
127
- )
128
- elif not self.is_input_continuous and not self.is_input_vectorized and not self.is_input_patches:
129
- raise ValueError(
130
- f"Has to define `in_channels`: {in_channels}, `num_vector_embeds`: {num_vector_embeds}, or patch_size:"
131
- f" {patch_size}. Make sure that `in_channels`, `num_vector_embeds` or `num_patches` is not None."
132
- )
133
-
134
- # 2. Define input layers
135
- if self.is_input_continuous:
136
- self.in_channels = in_channels
137
-
138
- self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True)
139
- if use_linear_projection:
140
- self.proj_in = LoRACompatibleLinear(in_channels, inner_dim)
141
- else:
142
- self.proj_in = LoRACompatibleConv(in_channels, inner_dim, kernel_size=1, stride=1, padding=0)
143
- elif self.is_input_vectorized:
144
- assert sample_size is not None, "Transformer2DModel over discrete input must provide sample_size"
145
- assert num_vector_embeds is not None, "Transformer2DModel over discrete input must provide num_embed"
146
-
147
- self.height = sample_size
148
- self.width = sample_size
149
- self.num_vector_embeds = num_vector_embeds
150
- self.num_latent_pixels = self.height * self.width
151
-
152
- self.latent_image_embedding = ImagePositionalEmbeddings(
153
- num_embed=num_vector_embeds, embed_dim=inner_dim, height=self.height, width=self.width
154
- )
155
- elif self.is_input_patches:
156
- assert sample_size is not None, "Transformer2DModel over patched input must provide sample_size"
157
-
158
- self.height = sample_size
159
- self.width = sample_size
160
-
161
- self.patch_size = patch_size
162
- self.pos_embed = PatchEmbed(
163
- height=sample_size,
164
- width=sample_size,
165
- patch_size=patch_size,
166
- in_channels=in_channels,
167
- embed_dim=inner_dim,
168
- )
169
-
170
- # 3. Define transformers blocks
171
- self.transformer_blocks = nn.ModuleList(
172
- [
173
- BasicTransformerBlock(
174
- inner_dim,
175
- num_attention_heads,
176
- attention_head_dim,
177
- dropout=dropout,
178
- cross_attention_dim=cross_attention_dim,
179
- activation_fn=activation_fn,
180
- num_embeds_ada_norm=num_embeds_ada_norm,
181
- attention_bias=attention_bias,
182
- only_cross_attention=only_cross_attention,
183
- upcast_attention=upcast_attention,
184
- norm_type=norm_type,
185
- norm_elementwise_affine=norm_elementwise_affine,
186
- )
187
- for d in range(num_layers)
188
- ]
189
- )
190
-
191
- # 4. Define output layers
192
- self.out_channels = in_channels if out_channels is None else out_channels
193
- if self.is_input_continuous:
194
- # TODO: should use out_channels for continuous projections
195
- if use_linear_projection:
196
- self.proj_out = LoRACompatibleLinear(inner_dim, in_channels)
197
- else:
198
- self.proj_out = LoRACompatibleConv(inner_dim, in_channels, kernel_size=1, stride=1, padding=0)
199
- elif self.is_input_vectorized:
200
- self.norm_out = nn.LayerNorm(inner_dim)
201
- self.out = nn.Linear(inner_dim, self.num_vector_embeds - 1)
202
- elif self.is_input_patches:
203
- self.norm_out = nn.LayerNorm(inner_dim, elementwise_affine=False, eps=1e-6)
204
- self.proj_out_1 = nn.Linear(inner_dim, 2 * inner_dim)
205
- self.proj_out_2 = nn.Linear(inner_dim, patch_size * patch_size * self.out_channels)
206
-
207
- def forward(
208
- self,
209
- hidden_states: torch.Tensor,
210
- encoder_hidden_states: Optional[torch.Tensor] = None,
211
- timestep: Optional[torch.LongTensor] = None,
212
- class_labels: Optional[torch.LongTensor] = None,
213
- cross_attention_kwargs: Dict[str, Any] = None,
214
- attention_mask: Optional[torch.Tensor] = None,
215
- encoder_attention_mask: Optional[torch.Tensor] = None,
216
- return_dict: bool = True,
217
- ):
218
- """
219
- The [`Transformer2DModel`] forward method.
220
-
221
- Args:
222
- hidden_states (`torch.LongTensor` of shape `(batch size, num latent pixels)` if discrete, `torch.FloatTensor` of shape `(batch size, channel, height, width)` if continuous):
223
- Input `hidden_states`.
224
- encoder_hidden_states ( `torch.FloatTensor` of shape `(batch size, sequence len, embed dims)`, *optional*):
225
- Conditional embeddings for cross attention layer. If not given, cross-attention defaults to
226
- self-attention.
227
- timestep ( `torch.LongTensor`, *optional*):
228
- Used to indicate denoising step. Optional timestep to be applied as an embedding in `AdaLayerNorm`.
229
- class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*):
230
- Used to indicate class labels conditioning. Optional class labels to be applied as an embedding in
231
- `AdaLayerZeroNorm`.
232
- encoder_attention_mask ( `torch.Tensor`, *optional*):
233
- Cross-attention mask applied to `encoder_hidden_states`. Two formats supported:
234
-
235
- * Mask `(batch, sequence_length)` True = keep, False = discard.
236
- * Bias `(batch, 1, sequence_length)` 0 = keep, -10000 = discard.
237
-
238
- If `ndim == 2`: will be interpreted as a mask, then converted into a bias consistent with the format
239
- above. This bias will be added to the cross-attention scores.
240
- return_dict (`bool`, *optional*, defaults to `True`):
241
- Whether or not to return a [`~models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain
242
- tuple.
243
-
244
- Returns:
245
- If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a
246
- `tuple` where the first element is the sample tensor.
247
- """
248
- # ensure attention_mask is a bias, and give it a singleton query_tokens dimension.
249
- # we may have done this conversion already, e.g. if we came here via UNet2DConditionModel#forward.
250
- # we can tell by counting dims; if ndim == 2: it's a mask rather than a bias.
251
- # expects mask of shape:
252
- # [batch, key_tokens]
253
- # adds singleton query_tokens dimension:
254
- # [batch, 1, key_tokens]
255
- # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes:
256
- # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn)
257
- # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn)
258
- if attention_mask is not None and attention_mask.ndim == 2:
259
- # assume that mask is expressed as:
260
- # (1 = keep, 0 = discard)
261
- # convert mask into a bias that can be added to attention scores:
262
- # (keep = +0, discard = -10000.0)
263
- attention_mask = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0
264
- attention_mask = attention_mask.unsqueeze(1)
265
-
266
- # convert encoder_attention_mask to a bias the same way we do for attention_mask
267
- if encoder_attention_mask is not None and encoder_attention_mask.ndim == 2:
268
- encoder_attention_mask = (1 - encoder_attention_mask.to(hidden_states.dtype)) * -10000.0
269
- encoder_attention_mask = encoder_attention_mask.unsqueeze(1)
270
-
271
- # 1. Input
272
- if self.is_input_continuous:
273
- batch, _, height, width = hidden_states.shape
274
- residual = hidden_states
275
-
276
- hidden_states = self.norm(hidden_states)
277
- if not self.use_linear_projection:
278
- hidden_states = self.proj_in(hidden_states)
279
- inner_dim = hidden_states.shape[1]
280
- hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * width, inner_dim)
281
- else:
282
- inner_dim = hidden_states.shape[1]
283
- hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * width, inner_dim)
284
- hidden_states = self.proj_in(hidden_states)
285
- elif self.is_input_vectorized:
286
- hidden_states = self.latent_image_embedding(hidden_states)
287
- elif self.is_input_patches:
288
- hidden_states = self.pos_embed(hidden_states)
289
-
290
- # 2. Blocks
291
- for block in self.transformer_blocks:
292
- hidden_states = block(
293
- hidden_states,
294
- attention_mask=attention_mask,
295
- encoder_hidden_states=encoder_hidden_states,
296
- encoder_attention_mask=encoder_attention_mask,
297
- timestep=timestep,
298
- cross_attention_kwargs=cross_attention_kwargs,
299
- class_labels=class_labels,
300
- )
301
-
302
- # 3. Output
303
- if self.is_input_continuous:
304
- if not self.use_linear_projection:
305
- hidden_states = hidden_states.reshape(batch, height, width, inner_dim).permute(0, 3, 1, 2).contiguous()
306
- hidden_states = self.proj_out(hidden_states)
307
- else:
308
- hidden_states = self.proj_out(hidden_states)
309
- hidden_states = hidden_states.reshape(batch, height, width, inner_dim).permute(0, 3, 1, 2).contiguous()
310
-
311
- output = hidden_states + residual
312
- elif self.is_input_vectorized:
313
- hidden_states = self.norm_out(hidden_states)
314
- logits = self.out(hidden_states)
315
- # (batch, self.num_vector_embeds - 1, self.num_latent_pixels)
316
- logits = logits.permute(0, 2, 1)
317
-
318
- # log(p(x_0))
319
- output = F.log_softmax(logits.double(), dim=1).float()
320
- elif self.is_input_patches:
321
- # TODO: cleanup!
322
- conditioning = self.transformer_blocks[0].norm1.emb(
323
- timestep, class_labels, hidden_dtype=hidden_states.dtype
324
- )
325
- shift, scale = self.proj_out_1(F.silu(conditioning)).chunk(2, dim=1)
326
- hidden_states = self.norm_out(hidden_states) * (1 + scale[:, None]) + shift[:, None]
327
- hidden_states = self.proj_out_2(hidden_states)
328
-
329
- # unpatchify
330
- height = width = int(hidden_states.shape[1] ** 0.5)
331
- hidden_states = hidden_states.reshape(
332
- shape=(-1, height, width, self.patch_size, self.patch_size, self.out_channels)
333
- )
334
- hidden_states = torch.einsum("nhwpqc->nchpwq", hidden_states)
335
- output = hidden_states.reshape(
336
- shape=(-1, self.out_channels, height * self.patch_size, width * self.patch_size)
337
- )
338
-
339
- if not return_dict:
340
- return (output,)
341
-
342
- return Transformer2DModelOutput(sample=output)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/fsaf/fsaf_r50_fpn_1x_coco.py DELETED
@@ -1,48 +0,0 @@
1
- _base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py'
2
- # model settings
3
- model = dict(
4
- type='FSAF',
5
- bbox_head=dict(
6
- type='FSAFHead',
7
- num_classes=80,
8
- in_channels=256,
9
- stacked_convs=4,
10
- feat_channels=256,
11
- reg_decoded_bbox=True,
12
- # Only anchor-free branch is implemented. The anchor generator only
13
- # generates 1 anchor at each feature point, as a substitute of the
14
- # grid of features.
15
- anchor_generator=dict(
16
- type='AnchorGenerator',
17
- octave_base_scale=1,
18
- scales_per_octave=1,
19
- ratios=[1.0],
20
- strides=[8, 16, 32, 64, 128]),
21
- bbox_coder=dict(_delete_=True, type='TBLRBBoxCoder', normalizer=4.0),
22
- loss_cls=dict(
23
- type='FocalLoss',
24
- use_sigmoid=True,
25
- gamma=2.0,
26
- alpha=0.25,
27
- loss_weight=1.0,
28
- reduction='none'),
29
- loss_bbox=dict(
30
- _delete_=True,
31
- type='IoULoss',
32
- eps=1e-6,
33
- loss_weight=1.0,
34
- reduction='none')),
35
- # training and testing settings
36
- train_cfg=dict(
37
- assigner=dict(
38
- _delete_=True,
39
- type='CenterRegionAssigner',
40
- pos_scale=0.2,
41
- neg_scale=0.2,
42
- min_pos_iof=0.01),
43
- allowed_border=-1,
44
- pos_weight=-1,
45
- debug=False))
46
- optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
47
- optimizer_config = dict(
48
- _delete_=True, grad_clip=dict(max_norm=10, norm_type=2))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/reppoints/bbox_r50_grid_center_fpn_gn-neck+head_1x_coco.py DELETED
@@ -1,2 +0,0 @@
1
- _base_ = './reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py'
2
- model = dict(bbox_head=dict(transform_method='minmax', use_grid_points=True))
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/retinanet/retinanet_r50_caffe_fpn_mstrain_2x_coco.py DELETED
@@ -1,4 +0,0 @@
1
- _base_ = './retinanet_r50_caffe_fpn_mstrain_1x_coco.py'
2
- # learning policy
3
- lr_config = dict(step=[16, 23])
4
- runner = dict(type='EpochBasedRunner', max_epochs=24)
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/cascade_roi_head.py DELETED
@@ -1,507 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
-
4
- from mmdet.core import (bbox2result, bbox2roi, bbox_mapping, build_assigner,
5
- build_sampler, merge_aug_bboxes, merge_aug_masks,
6
- multiclass_nms)
7
- from ..builder import HEADS, build_head, build_roi_extractor
8
- from .base_roi_head import BaseRoIHead
9
- from .test_mixins import BBoxTestMixin, MaskTestMixin
10
-
11
-
12
- @HEADS.register_module()
13
- class CascadeRoIHead(BaseRoIHead, BBoxTestMixin, MaskTestMixin):
14
- """Cascade roi head including one bbox head and one mask head.
15
-
16
- https://arxiv.org/abs/1712.00726
17
- """
18
-
19
- def __init__(self,
20
- num_stages,
21
- stage_loss_weights,
22
- bbox_roi_extractor=None,
23
- bbox_head=None,
24
- mask_roi_extractor=None,
25
- mask_head=None,
26
- shared_head=None,
27
- train_cfg=None,
28
- test_cfg=None):
29
- assert bbox_roi_extractor is not None
30
- assert bbox_head is not None
31
- assert shared_head is None, \
32
- 'Shared head is not supported in Cascade RCNN anymore'
33
- self.num_stages = num_stages
34
- self.stage_loss_weights = stage_loss_weights
35
- super(CascadeRoIHead, self).__init__(
36
- bbox_roi_extractor=bbox_roi_extractor,
37
- bbox_head=bbox_head,
38
- mask_roi_extractor=mask_roi_extractor,
39
- mask_head=mask_head,
40
- shared_head=shared_head,
41
- train_cfg=train_cfg,
42
- test_cfg=test_cfg)
43
-
44
- def init_bbox_head(self, bbox_roi_extractor, bbox_head):
45
- """Initialize box head and box roi extractor.
46
-
47
- Args:
48
- bbox_roi_extractor (dict): Config of box roi extractor.
49
- bbox_head (dict): Config of box in box head.
50
- """
51
- self.bbox_roi_extractor = nn.ModuleList()
52
- self.bbox_head = nn.ModuleList()
53
- if not isinstance(bbox_roi_extractor, list):
54
- bbox_roi_extractor = [
55
- bbox_roi_extractor for _ in range(self.num_stages)
56
- ]
57
- if not isinstance(bbox_head, list):
58
- bbox_head = [bbox_head for _ in range(self.num_stages)]
59
- assert len(bbox_roi_extractor) == len(bbox_head) == self.num_stages
60
- for roi_extractor, head in zip(bbox_roi_extractor, bbox_head):
61
- self.bbox_roi_extractor.append(build_roi_extractor(roi_extractor))
62
- self.bbox_head.append(build_head(head))
63
-
64
- def init_mask_head(self, mask_roi_extractor, mask_head):
65
- """Initialize mask head and mask roi extractor.
66
-
67
- Args:
68
- mask_roi_extractor (dict): Config of mask roi extractor.
69
- mask_head (dict): Config of mask in mask head.
70
- """
71
- self.mask_head = nn.ModuleList()
72
- if not isinstance(mask_head, list):
73
- mask_head = [mask_head for _ in range(self.num_stages)]
74
- assert len(mask_head) == self.num_stages
75
- for head in mask_head:
76
- self.mask_head.append(build_head(head))
77
- if mask_roi_extractor is not None:
78
- self.share_roi_extractor = False
79
- self.mask_roi_extractor = nn.ModuleList()
80
- if not isinstance(mask_roi_extractor, list):
81
- mask_roi_extractor = [
82
- mask_roi_extractor for _ in range(self.num_stages)
83
- ]
84
- assert len(mask_roi_extractor) == self.num_stages
85
- for roi_extractor in mask_roi_extractor:
86
- self.mask_roi_extractor.append(
87
- build_roi_extractor(roi_extractor))
88
- else:
89
- self.share_roi_extractor = True
90
- self.mask_roi_extractor = self.bbox_roi_extractor
91
-
92
- def init_assigner_sampler(self):
93
- """Initialize assigner and sampler for each stage."""
94
- self.bbox_assigner = []
95
- self.bbox_sampler = []
96
- if self.train_cfg is not None:
97
- for idx, rcnn_train_cfg in enumerate(self.train_cfg):
98
- self.bbox_assigner.append(
99
- build_assigner(rcnn_train_cfg.assigner))
100
- self.current_stage = idx
101
- self.bbox_sampler.append(
102
- build_sampler(rcnn_train_cfg.sampler, context=self))
103
-
104
- def init_weights(self, pretrained):
105
- """Initialize the weights in head.
106
-
107
- Args:
108
- pretrained (str, optional): Path to pre-trained weights.
109
- Defaults to None.
110
- """
111
- if self.with_shared_head:
112
- self.shared_head.init_weights(pretrained=pretrained)
113
- for i in range(self.num_stages):
114
- if self.with_bbox:
115
- self.bbox_roi_extractor[i].init_weights()
116
- self.bbox_head[i].init_weights()
117
- if self.with_mask:
118
- if not self.share_roi_extractor:
119
- self.mask_roi_extractor[i].init_weights()
120
- self.mask_head[i].init_weights()
121
-
122
- def forward_dummy(self, x, proposals):
123
- """Dummy forward function."""
124
- # bbox head
125
- outs = ()
126
- rois = bbox2roi([proposals])
127
- if self.with_bbox:
128
- for i in range(self.num_stages):
129
- bbox_results = self._bbox_forward(i, x, rois)
130
- outs = outs + (bbox_results['cls_score'],
131
- bbox_results['bbox_pred'])
132
- # mask heads
133
- if self.with_mask:
134
- mask_rois = rois[:100]
135
- for i in range(self.num_stages):
136
- mask_results = self._mask_forward(i, x, mask_rois)
137
- outs = outs + (mask_results['mask_pred'], )
138
- return outs
139
-
140
- def _bbox_forward(self, stage, x, rois):
141
- """Box head forward function used in both training and testing."""
142
- bbox_roi_extractor = self.bbox_roi_extractor[stage]
143
- bbox_head = self.bbox_head[stage]
144
- bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs],
145
- rois)
146
- # do not support caffe_c4 model anymore
147
- cls_score, bbox_pred = bbox_head(bbox_feats)
148
-
149
- bbox_results = dict(
150
- cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats)
151
- return bbox_results
152
-
153
- def _bbox_forward_train(self, stage, x, sampling_results, gt_bboxes,
154
- gt_labels, rcnn_train_cfg):
155
- """Run forward function and calculate loss for box head in training."""
156
- rois = bbox2roi([res.bboxes for res in sampling_results])
157
- bbox_results = self._bbox_forward(stage, x, rois)
158
- bbox_targets = self.bbox_head[stage].get_targets(
159
- sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg)
160
- loss_bbox = self.bbox_head[stage].loss(bbox_results['cls_score'],
161
- bbox_results['bbox_pred'], rois,
162
- *bbox_targets)
163
-
164
- bbox_results.update(
165
- loss_bbox=loss_bbox, rois=rois, bbox_targets=bbox_targets)
166
- return bbox_results
167
-
168
- def _mask_forward(self, stage, x, rois):
169
- """Mask head forward function used in both training and testing."""
170
- mask_roi_extractor = self.mask_roi_extractor[stage]
171
- mask_head = self.mask_head[stage]
172
- mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs],
173
- rois)
174
- # do not support caffe_c4 model anymore
175
- mask_pred = mask_head(mask_feats)
176
-
177
- mask_results = dict(mask_pred=mask_pred)
178
- return mask_results
179
-
180
- def _mask_forward_train(self,
181
- stage,
182
- x,
183
- sampling_results,
184
- gt_masks,
185
- rcnn_train_cfg,
186
- bbox_feats=None):
187
- """Run forward function and calculate loss for mask head in
188
- training."""
189
- pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])
190
- mask_results = self._mask_forward(stage, x, pos_rois)
191
-
192
- mask_targets = self.mask_head[stage].get_targets(
193
- sampling_results, gt_masks, rcnn_train_cfg)
194
- pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
195
- loss_mask = self.mask_head[stage].loss(mask_results['mask_pred'],
196
- mask_targets, pos_labels)
197
-
198
- mask_results.update(loss_mask=loss_mask)
199
- return mask_results
200
-
201
- def forward_train(self,
202
- x,
203
- img_metas,
204
- proposal_list,
205
- gt_bboxes,
206
- gt_labels,
207
- gt_bboxes_ignore=None,
208
- gt_masks=None):
209
- """
210
- Args:
211
- x (list[Tensor]): list of multi-level img features.
212
- img_metas (list[dict]): list of image info dict where each dict
213
- has: 'img_shape', 'scale_factor', 'flip', and may also contain
214
- 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
215
- For details on the values of these keys see
216
- `mmdet/datasets/pipelines/formatting.py:Collect`.
217
- proposals (list[Tensors]): list of region proposals.
218
- gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
219
- shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
220
- gt_labels (list[Tensor]): class indices corresponding to each box
221
- gt_bboxes_ignore (None | list[Tensor]): specify which bounding
222
- boxes can be ignored when computing the loss.
223
- gt_masks (None | Tensor) : true segmentation masks for each box
224
- used if the architecture supports a segmentation task.
225
-
226
- Returns:
227
- dict[str, Tensor]: a dictionary of loss components
228
- """
229
- losses = dict()
230
- for i in range(self.num_stages):
231
- self.current_stage = i
232
- rcnn_train_cfg = self.train_cfg[i]
233
- lw = self.stage_loss_weights[i]
234
-
235
- # assign gts and sample proposals
236
- sampling_results = []
237
- if self.with_bbox or self.with_mask:
238
- bbox_assigner = self.bbox_assigner[i]
239
- bbox_sampler = self.bbox_sampler[i]
240
- num_imgs = len(img_metas)
241
- if gt_bboxes_ignore is None:
242
- gt_bboxes_ignore = [None for _ in range(num_imgs)]
243
-
244
- for j in range(num_imgs):
245
- assign_result = bbox_assigner.assign(
246
- proposal_list[j], gt_bboxes[j], gt_bboxes_ignore[j],
247
- gt_labels[j])
248
- sampling_result = bbox_sampler.sample(
249
- assign_result,
250
- proposal_list[j],
251
- gt_bboxes[j],
252
- gt_labels[j],
253
- feats=[lvl_feat[j][None] for lvl_feat in x])
254
- sampling_results.append(sampling_result)
255
-
256
- # bbox head forward and loss
257
- bbox_results = self._bbox_forward_train(i, x, sampling_results,
258
- gt_bboxes, gt_labels,
259
- rcnn_train_cfg)
260
-
261
- for name, value in bbox_results['loss_bbox'].items():
262
- losses[f's{i}.{name}'] = (
263
- value * lw if 'loss' in name else value)
264
-
265
- # mask head forward and loss
266
- if self.with_mask:
267
- mask_results = self._mask_forward_train(
268
- i, x, sampling_results, gt_masks, rcnn_train_cfg,
269
- bbox_results['bbox_feats'])
270
- for name, value in mask_results['loss_mask'].items():
271
- losses[f's{i}.{name}'] = (
272
- value * lw if 'loss' in name else value)
273
-
274
- # refine bboxes
275
- if i < self.num_stages - 1:
276
- pos_is_gts = [res.pos_is_gt for res in sampling_results]
277
- # bbox_targets is a tuple
278
- roi_labels = bbox_results['bbox_targets'][0]
279
- with torch.no_grad():
280
- roi_labels = torch.where(
281
- roi_labels == self.bbox_head[i].num_classes,
282
- bbox_results['cls_score'][:, :-1].argmax(1),
283
- roi_labels)
284
- proposal_list = self.bbox_head[i].refine_bboxes(
285
- bbox_results['rois'], roi_labels,
286
- bbox_results['bbox_pred'], pos_is_gts, img_metas)
287
-
288
- return losses
289
-
290
- def simple_test(self, x, proposal_list, img_metas, rescale=False):
291
- """Test without augmentation."""
292
- assert self.with_bbox, 'Bbox head must be implemented.'
293
- num_imgs = len(proposal_list)
294
- img_shapes = tuple(meta['img_shape'] for meta in img_metas)
295
- ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)
296
- scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
297
-
298
- # "ms" in variable names means multi-stage
299
- ms_bbox_result = {}
300
- ms_segm_result = {}
301
- ms_scores = []
302
- rcnn_test_cfg = self.test_cfg
303
-
304
- rois = bbox2roi(proposal_list)
305
- for i in range(self.num_stages):
306
- bbox_results = self._bbox_forward(i, x, rois)
307
-
308
- # split batch bbox prediction back to each image
309
- cls_score = bbox_results['cls_score']
310
- bbox_pred = bbox_results['bbox_pred']
311
- num_proposals_per_img = tuple(
312
- len(proposals) for proposals in proposal_list)
313
- rois = rois.split(num_proposals_per_img, 0)
314
- cls_score = cls_score.split(num_proposals_per_img, 0)
315
- if isinstance(bbox_pred, torch.Tensor):
316
- bbox_pred = bbox_pred.split(num_proposals_per_img, 0)
317
- else:
318
- bbox_pred = self.bbox_head[i].bbox_pred_split(
319
- bbox_pred, num_proposals_per_img)
320
- ms_scores.append(cls_score)
321
-
322
- if i < self.num_stages - 1:
323
- bbox_label = [s[:, :-1].argmax(dim=1) for s in cls_score]
324
- rois = torch.cat([
325
- self.bbox_head[i].regress_by_class(rois[j], bbox_label[j],
326
- bbox_pred[j],
327
- img_metas[j])
328
- for j in range(num_imgs)
329
- ])
330
-
331
- # average scores of each image by stages
332
- cls_score = [
333
- sum([score[i] for score in ms_scores]) / float(len(ms_scores))
334
- for i in range(num_imgs)
335
- ]
336
-
337
- # apply bbox post-processing to each image individually
338
- det_bboxes = []
339
- det_labels = []
340
- for i in range(num_imgs):
341
- det_bbox, det_label = self.bbox_head[-1].get_bboxes(
342
- rois[i],
343
- cls_score[i],
344
- bbox_pred[i],
345
- img_shapes[i],
346
- scale_factors[i],
347
- rescale=rescale,
348
- cfg=rcnn_test_cfg)
349
- det_bboxes.append(det_bbox)
350
- det_labels.append(det_label)
351
-
352
- if torch.onnx.is_in_onnx_export():
353
- return det_bboxes, det_labels
354
- bbox_results = [
355
- bbox2result(det_bboxes[i], det_labels[i],
356
- self.bbox_head[-1].num_classes)
357
- for i in range(num_imgs)
358
- ]
359
- ms_bbox_result['ensemble'] = bbox_results
360
-
361
- if self.with_mask:
362
- if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):
363
- mask_classes = self.mask_head[-1].num_classes
364
- segm_results = [[[] for _ in range(mask_classes)]
365
- for _ in range(num_imgs)]
366
- else:
367
- if rescale and not isinstance(scale_factors[0], float):
368
- scale_factors = [
369
- torch.from_numpy(scale_factor).to(det_bboxes[0].device)
370
- for scale_factor in scale_factors
371
- ]
372
- _bboxes = [
373
- det_bboxes[i][:, :4] *
374
- scale_factors[i] if rescale else det_bboxes[i][:, :4]
375
- for i in range(len(det_bboxes))
376
- ]
377
- mask_rois = bbox2roi(_bboxes)
378
- num_mask_rois_per_img = tuple(
379
- _bbox.size(0) for _bbox in _bboxes)
380
- aug_masks = []
381
- for i in range(self.num_stages):
382
- mask_results = self._mask_forward(i, x, mask_rois)
383
- mask_pred = mask_results['mask_pred']
384
- # split batch mask prediction back to each image
385
- mask_pred = mask_pred.split(num_mask_rois_per_img, 0)
386
- aug_masks.append(
387
- [m.sigmoid().cpu().numpy() for m in mask_pred])
388
-
389
- # apply mask post-processing to each image individually
390
- segm_results = []
391
- for i in range(num_imgs):
392
- if det_bboxes[i].shape[0] == 0:
393
- segm_results.append(
394
- [[]
395
- for _ in range(self.mask_head[-1].num_classes)])
396
- else:
397
- aug_mask = [mask[i] for mask in aug_masks]
398
- merged_masks = merge_aug_masks(
399
- aug_mask, [[img_metas[i]]] * self.num_stages,
400
- rcnn_test_cfg)
401
- segm_result = self.mask_head[-1].get_seg_masks(
402
- merged_masks, _bboxes[i], det_labels[i],
403
- rcnn_test_cfg, ori_shapes[i], scale_factors[i],
404
- rescale)
405
- segm_results.append(segm_result)
406
- ms_segm_result['ensemble'] = segm_results
407
-
408
- if self.with_mask:
409
- results = list(
410
- zip(ms_bbox_result['ensemble'], ms_segm_result['ensemble']))
411
- else:
412
- results = ms_bbox_result['ensemble']
413
-
414
- return results
415
-
416
- def aug_test(self, features, proposal_list, img_metas, rescale=False):
417
- """Test with augmentations.
418
-
419
- If rescale is False, then returned bboxes and masks will fit the scale
420
- of imgs[0].
421
- """
422
- rcnn_test_cfg = self.test_cfg
423
- aug_bboxes = []
424
- aug_scores = []
425
- for x, img_meta in zip(features, img_metas):
426
- # only one image in the batch
427
- img_shape = img_meta[0]['img_shape']
428
- scale_factor = img_meta[0]['scale_factor']
429
- flip = img_meta[0]['flip']
430
- flip_direction = img_meta[0]['flip_direction']
431
-
432
- proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,
433
- scale_factor, flip, flip_direction)
434
- # "ms" in variable names means multi-stage
435
- ms_scores = []
436
-
437
- rois = bbox2roi([proposals])
438
- for i in range(self.num_stages):
439
- bbox_results = self._bbox_forward(i, x, rois)
440
- ms_scores.append(bbox_results['cls_score'])
441
-
442
- if i < self.num_stages - 1:
443
- bbox_label = bbox_results['cls_score'][:, :-1].argmax(
444
- dim=1)
445
- rois = self.bbox_head[i].regress_by_class(
446
- rois, bbox_label, bbox_results['bbox_pred'],
447
- img_meta[0])
448
-
449
- cls_score = sum(ms_scores) / float(len(ms_scores))
450
- bboxes, scores = self.bbox_head[-1].get_bboxes(
451
- rois,
452
- cls_score,
453
- bbox_results['bbox_pred'],
454
- img_shape,
455
- scale_factor,
456
- rescale=False,
457
- cfg=None)
458
- aug_bboxes.append(bboxes)
459
- aug_scores.append(scores)
460
-
461
- # after merging, bboxes will be rescaled to the original image size
462
- merged_bboxes, merged_scores = merge_aug_bboxes(
463
- aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)
464
- det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores,
465
- rcnn_test_cfg.score_thr,
466
- rcnn_test_cfg.nms,
467
- rcnn_test_cfg.max_per_img)
468
-
469
- bbox_result = bbox2result(det_bboxes, det_labels,
470
- self.bbox_head[-1].num_classes)
471
-
472
- if self.with_mask:
473
- if det_bboxes.shape[0] == 0:
474
- segm_result = [[[]
475
- for _ in range(self.mask_head[-1].num_classes)]
476
- ]
477
- else:
478
- aug_masks = []
479
- aug_img_metas = []
480
- for x, img_meta in zip(features, img_metas):
481
- img_shape = img_meta[0]['img_shape']
482
- scale_factor = img_meta[0]['scale_factor']
483
- flip = img_meta[0]['flip']
484
- flip_direction = img_meta[0]['flip_direction']
485
- _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,
486
- scale_factor, flip, flip_direction)
487
- mask_rois = bbox2roi([_bboxes])
488
- for i in range(self.num_stages):
489
- mask_results = self._mask_forward(i, x, mask_rois)
490
- aug_masks.append(
491
- mask_results['mask_pred'].sigmoid().cpu().numpy())
492
- aug_img_metas.append(img_meta)
493
- merged_masks = merge_aug_masks(aug_masks, aug_img_metas,
494
- self.test_cfg)
495
-
496
- ori_shape = img_metas[0][0]['ori_shape']
497
- segm_result = self.mask_head[-1].get_seg_masks(
498
- merged_masks,
499
- det_bboxes,
500
- det_labels,
501
- rcnn_test_cfg,
502
- ori_shape,
503
- scale_factor=1.0,
504
- rescale=False)
505
- return [(bbox_result, segm_result)]
506
- else:
507
- return [bbox_result]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/ann/ann_r101-d8_512x1024_80k_cityscapes.py DELETED
@@ -1,2 +0,0 @@
1
- _base_ = './ann_r50-d8_512x1024_80k_cityscapes.py'
2
- model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_r101-d8_512x1024_80k_cityscapes.py DELETED
@@ -1,2 +0,0 @@
1
- _base_ = './fcn_r50-d8_512x1024_80k_cityscapes.py'
2
- model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
 
 
 
spaces/AngoHF/ANGO-Leaderboard/assets/path.py DELETED
@@ -1,4 +0,0 @@
1
- SEASON = {
2
- "latest": "202309",
3
- "2023-09": "202309"
4
- }
 
 
 
 
 
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/openai/script.py DELETED
@@ -1,339 +0,0 @@
1
- import json
2
- import os
3
- import traceback
4
- from http.server import BaseHTTPRequestHandler, ThreadingHTTPServer
5
- from threading import Thread
6
-
7
- import extensions.openai.completions as OAIcompletions
8
- import extensions.openai.edits as OAIedits
9
- import extensions.openai.embeddings as OAIembeddings
10
- import extensions.openai.images as OAIimages
11
- import extensions.openai.models as OAImodels
12
- import extensions.openai.moderations as OAImoderations
13
- from extensions.openai.defaults import clamp, default, get_default_req_params
14
- from extensions.openai.errors import (
15
- InvalidRequestError,
16
- OpenAIError,
17
- ServiceUnavailableError
18
- )
19
- from extensions.openai.tokens import token_count, token_decode, token_encode
20
- from extensions.openai.utils import debug_msg
21
- from modules import shared
22
-
23
- import cgi
24
- import speech_recognition as sr
25
- from pydub import AudioSegment
26
-
27
- params = {
28
- # default params
29
- 'port': 5001,
30
- 'embedding_device': 'cpu',
31
- 'embedding_model': 'all-mpnet-base-v2',
32
-
33
- # optional params
34
- 'sd_webui_url': '',
35
- 'debug': 0
36
- }
37
-
38
- class Handler(BaseHTTPRequestHandler):
39
- def send_access_control_headers(self):
40
- self.send_header("Access-Control-Allow-Origin", "*")
41
- self.send_header("Access-Control-Allow-Credentials", "true")
42
- self.send_header(
43
- "Access-Control-Allow-Methods",
44
- "GET,HEAD,OPTIONS,POST,PUT"
45
- )
46
- self.send_header(
47
- "Access-Control-Allow-Headers",
48
- "Origin, Accept, X-Requested-With, Content-Type, "
49
- "Access-Control-Request-Method, Access-Control-Request-Headers, "
50
- "Authorization"
51
- )
52
-
53
- def do_OPTIONS(self):
54
- self.send_response(200)
55
- self.send_access_control_headers()
56
- self.send_header('Content-Type', 'application/json')
57
- self.end_headers()
58
- self.wfile.write("OK".encode('utf-8'))
59
-
60
- def start_sse(self):
61
- self.send_response(200)
62
- self.send_access_control_headers()
63
- self.send_header('Content-Type', 'text/event-stream')
64
- self.send_header('Cache-Control', 'no-cache')
65
- # self.send_header('Connection', 'keep-alive')
66
- self.end_headers()
67
-
68
- def send_sse(self, chunk: dict):
69
- response = 'data: ' + json.dumps(chunk) + '\r\n\r\n'
70
- debug_msg(response[:-4])
71
- self.wfile.write(response.encode('utf-8'))
72
-
73
- def end_sse(self):
74
- response = 'data: [DONE]\r\n\r\n'
75
- debug_msg(response[:-4])
76
- self.wfile.write(response.encode('utf-8'))
77
-
78
- def return_json(self, ret: dict, code: int = 200, no_debug=False):
79
- self.send_response(code)
80
- self.send_access_control_headers()
81
- self.send_header('Content-Type', 'application/json')
82
-
83
- response = json.dumps(ret)
84
- r_utf8 = response.encode('utf-8')
85
-
86
- self.send_header('Content-Length', str(len(r_utf8)))
87
- self.end_headers()
88
-
89
- self.wfile.write(r_utf8)
90
- if not no_debug:
91
- debug_msg(r_utf8)
92
-
93
- def openai_error(self, message, code=500, error_type='APIError', param='', internal_message=''):
94
-
95
- error_resp = {
96
- 'error': {
97
- 'message': message,
98
- 'code': code,
99
- 'type': error_type,
100
- 'param': param,
101
- }
102
- }
103
- if internal_message:
104
- print(error_type, message)
105
- print(internal_message)
106
- # error_resp['internal_message'] = internal_message
107
-
108
- self.return_json(error_resp, code)
109
-
110
- def openai_error_handler(func):
111
- def wrapper(self):
112
- try:
113
- func(self)
114
- except InvalidRequestError as e:
115
- self.openai_error(e.message, e.code, e.__class__.__name__, e.param, internal_message=e.internal_message)
116
- except OpenAIError as e:
117
- self.openai_error(e.message, e.code, e.__class__.__name__, internal_message=e.internal_message)
118
- except Exception as e:
119
- self.openai_error(repr(e), 500, 'OpenAIError', internal_message=traceback.format_exc())
120
-
121
- return wrapper
122
-
123
- @openai_error_handler
124
- def do_GET(self):
125
- debug_msg(self.requestline)
126
- debug_msg(self.headers)
127
-
128
- if self.path.startswith('/v1/engines') or self.path.startswith('/v1/models'):
129
- is_legacy = 'engines' in self.path
130
- is_list = self.path in ['/v1/engines', '/v1/models']
131
- if is_legacy and not is_list:
132
- model_name = self.path[self.path.find('/v1/engines/') + len('/v1/engines/'):]
133
- resp = OAImodels.load_model(model_name)
134
- elif is_list:
135
- resp = OAImodels.list_models(is_legacy)
136
- else:
137
- model_name = self.path[len('/v1/models/'):]
138
- resp = OAImodels.model_info(model_name)
139
-
140
- self.return_json(resp)
141
-
142
- elif '/billing/usage' in self.path:
143
- # Ex. /v1/dashboard/billing/usage?start_date=2023-05-01&end_date=2023-05-31
144
- self.return_json({"total_usage": 0}, no_debug=True)
145
-
146
- else:
147
- self.send_error(404)
148
-
149
- @openai_error_handler
150
- def do_POST(self):
151
-
152
- if '/v1/audio/transcriptions' in self.path:
153
- r = sr.Recognizer()
154
-
155
- # Parse the form data
156
- form = cgi.FieldStorage(
157
- fp=self.rfile,
158
- headers=self.headers,
159
- environ={'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': self.headers['Content-Type']}
160
- )
161
-
162
- audio_file = form['file'].file
163
- audio_data = AudioSegment.from_file(audio_file)
164
-
165
- # Convert AudioSegment to raw data
166
- raw_data = audio_data.raw_data
167
-
168
- # Create AudioData object
169
- audio_data = sr.AudioData(raw_data, audio_data.frame_rate, audio_data.sample_width)
170
- whipser_language = form.getvalue('language', None)
171
- whipser_model = form.getvalue('model', 'tiny') # Use the model from the form data if it exists, otherwise default to tiny
172
-
173
- transcription = {"text": ""}
174
-
175
- try:
176
- transcription["text"] = r.recognize_whisper(audio_data, language=whipser_language, model=whipser_model)
177
- except sr.UnknownValueError:
178
- print("Whisper could not understand audio")
179
- transcription["text"] = "Whisper could not understand audio UnknownValueError"
180
- except sr.RequestError as e:
181
- print("Could not request results from Whisper", e)
182
- transcription["text"] = "Whisper could not understand audio RequestError"
183
-
184
- self.return_json(transcription, no_debug=True)
185
- return
186
-
187
- debug_msg(self.requestline)
188
- debug_msg(self.headers)
189
-
190
- content_length = self.headers.get('Content-Length')
191
- transfer_encoding = self.headers.get('Transfer-Encoding')
192
-
193
- if content_length:
194
- body = json.loads(self.rfile.read(int(content_length)).decode('utf-8'))
195
- elif transfer_encoding == 'chunked':
196
- chunks = []
197
- while True:
198
- chunk_size = int(self.rfile.readline(), 16) # Read the chunk size
199
- if chunk_size == 0:
200
- break # End of chunks
201
- chunks.append(self.rfile.read(chunk_size))
202
- self.rfile.readline() # Consume the trailing newline after each chunk
203
- body = json.loads(b''.join(chunks).decode('utf-8'))
204
- else:
205
- self.send_response(400, "Bad Request: Either Content-Length or Transfer-Encoding header expected.")
206
- self.end_headers()
207
- return
208
-
209
- debug_msg(body)
210
-
211
- if '/completions' in self.path or '/generate' in self.path:
212
-
213
- if not shared.model:
214
- raise ServiceUnavailableError("No model loaded.")
215
-
216
- is_legacy = '/generate' in self.path
217
- is_streaming = body.get('stream', False)
218
-
219
- if is_streaming:
220
- self.start_sse()
221
-
222
- response = []
223
- if 'chat' in self.path:
224
- response = OAIcompletions.stream_chat_completions(body, is_legacy=is_legacy)
225
- else:
226
- response = OAIcompletions.stream_completions(body, is_legacy=is_legacy)
227
-
228
- for resp in response:
229
- self.send_sse(resp)
230
-
231
- self.end_sse()
232
-
233
- else:
234
- response = ''
235
- if 'chat' in self.path:
236
- response = OAIcompletions.chat_completions(body, is_legacy=is_legacy)
237
- else:
238
- response = OAIcompletions.completions(body, is_legacy=is_legacy)
239
-
240
- self.return_json(response)
241
-
242
- elif '/edits' in self.path:
243
- # deprecated
244
-
245
- if not shared.model:
246
- raise ServiceUnavailableError("No model loaded.")
247
-
248
- req_params = get_default_req_params()
249
-
250
- instruction = body['instruction']
251
- input = body.get('input', '')
252
- temperature = clamp(default(body, 'temperature', req_params['temperature']), 0.001, 1.999) # fixup absolute 0.0
253
- top_p = clamp(default(body, 'top_p', req_params['top_p']), 0.001, 1.0)
254
-
255
- response = OAIedits.edits(instruction, input, temperature, top_p)
256
-
257
- self.return_json(response)
258
-
259
- elif '/images/generations' in self.path:
260
- if not os.environ.get('SD_WEBUI_URL', params.get('sd_webui_url', '')):
261
- raise ServiceUnavailableError("Stable Diffusion not available. SD_WEBUI_URL not set.")
262
-
263
- prompt = body['prompt']
264
- size = default(body, 'size', '1024x1024')
265
- response_format = default(body, 'response_format', 'url') # or b64_json
266
- n = default(body, 'n', 1) # ignore the batch limits of max 10
267
-
268
- response = OAIimages.generations(prompt=prompt, size=size, response_format=response_format, n=n)
269
-
270
- self.return_json(response, no_debug=True)
271
-
272
- elif '/embeddings' in self.path:
273
- encoding_format = body.get('encoding_format', '')
274
-
275
- input = body.get('input', body.get('text', ''))
276
- if not input:
277
- raise InvalidRequestError("Missing required argument input", params='input')
278
-
279
- if type(input) is str:
280
- input = [input]
281
-
282
- response = OAIembeddings.embeddings(input, encoding_format)
283
-
284
- self.return_json(response, no_debug=True)
285
-
286
- elif '/moderations' in self.path:
287
- input = body['input']
288
- if not input:
289
- raise InvalidRequestError("Missing required argument input", params='input')
290
-
291
- response = OAImoderations.moderations(input)
292
-
293
- self.return_json(response, no_debug=True)
294
-
295
- elif self.path == '/api/v1/token-count':
296
- # NOT STANDARD. lifted from the api extension, but it's still very useful to calculate tokenized length client side.
297
- response = token_count(body['prompt'])
298
-
299
- self.return_json(response, no_debug=True)
300
-
301
- elif self.path == '/api/v1/token/encode':
302
- # NOT STANDARD. needed to support logit_bias, logprobs and token arrays for native models
303
- encoding_format = body.get('encoding_format', '')
304
-
305
- response = token_encode(body['input'], encoding_format)
306
-
307
- self.return_json(response, no_debug=True)
308
-
309
- elif self.path == '/api/v1/token/decode':
310
- # NOT STANDARD. needed to support logit_bias, logprobs and token arrays for native models
311
- encoding_format = body.get('encoding_format', '')
312
-
313
- response = token_decode(body['input'], encoding_format)
314
-
315
- self.return_json(response, no_debug=True)
316
-
317
- else:
318
- self.send_error(404)
319
-
320
-
321
- def run_server():
322
- port = int(os.environ.get('OPENEDAI_PORT', params.get('port', 5001)))
323
- server_addr = ('0.0.0.0' if shared.args.listen else '127.0.0.1', port)
324
- server = ThreadingHTTPServer(server_addr, Handler)
325
- if shared.args.share:
326
- try:
327
- from flask_cloudflared import _run_cloudflared
328
- public_url = _run_cloudflared(port, port + 1)
329
- print(f'OpenAI compatible API ready at: OPENAI_API_BASE={public_url}/v1')
330
- except ImportError:
331
- print('You should install flask_cloudflared manually')
332
- else:
333
- print(f'OpenAI compatible API ready at: OPENAI_API_BASE=http://{server_addr[0]}:{server_addr[1]}/v1')
334
-
335
- server.serve_forever()
336
-
337
-
338
- def setup():
339
- Thread(target=run_server, daemon=True).start()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/datasets/builder.py DELETED
@@ -1,169 +0,0 @@
1
- import copy
2
- import platform
3
- import random
4
- from functools import partial
5
-
6
- import numpy as np
7
- from annotator.uniformer.mmcv.parallel import collate
8
- from annotator.uniformer.mmcv.runner import get_dist_info
9
- from annotator.uniformer.mmcv.utils import Registry, build_from_cfg
10
- from annotator.uniformer.mmcv.utils.parrots_wrapper import DataLoader, PoolDataLoader
11
- from torch.utils.data import DistributedSampler
12
-
13
- if platform.system() != 'Windows':
14
- # https://github.com/pytorch/pytorch/issues/973
15
- import resource
16
- rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
17
- hard_limit = rlimit[1]
18
- soft_limit = min(4096, hard_limit)
19
- resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
20
-
21
- DATASETS = Registry('dataset')
22
- PIPELINES = Registry('pipeline')
23
-
24
-
25
- def _concat_dataset(cfg, default_args=None):
26
- """Build :obj:`ConcatDataset by."""
27
- from .dataset_wrappers import ConcatDataset
28
- img_dir = cfg['img_dir']
29
- ann_dir = cfg.get('ann_dir', None)
30
- split = cfg.get('split', None)
31
- num_img_dir = len(img_dir) if isinstance(img_dir, (list, tuple)) else 1
32
- if ann_dir is not None:
33
- num_ann_dir = len(ann_dir) if isinstance(ann_dir, (list, tuple)) else 1
34
- else:
35
- num_ann_dir = 0
36
- if split is not None:
37
- num_split = len(split) if isinstance(split, (list, tuple)) else 1
38
- else:
39
- num_split = 0
40
- if num_img_dir > 1:
41
- assert num_img_dir == num_ann_dir or num_ann_dir == 0
42
- assert num_img_dir == num_split or num_split == 0
43
- else:
44
- assert num_split == num_ann_dir or num_ann_dir <= 1
45
- num_dset = max(num_split, num_img_dir)
46
-
47
- datasets = []
48
- for i in range(num_dset):
49
- data_cfg = copy.deepcopy(cfg)
50
- if isinstance(img_dir, (list, tuple)):
51
- data_cfg['img_dir'] = img_dir[i]
52
- if isinstance(ann_dir, (list, tuple)):
53
- data_cfg['ann_dir'] = ann_dir[i]
54
- if isinstance(split, (list, tuple)):
55
- data_cfg['split'] = split[i]
56
- datasets.append(build_dataset(data_cfg, default_args))
57
-
58
- return ConcatDataset(datasets)
59
-
60
-
61
- def build_dataset(cfg, default_args=None):
62
- """Build datasets."""
63
- from .dataset_wrappers import ConcatDataset, RepeatDataset
64
- if isinstance(cfg, (list, tuple)):
65
- dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
66
- elif cfg['type'] == 'RepeatDataset':
67
- dataset = RepeatDataset(
68
- build_dataset(cfg['dataset'], default_args), cfg['times'])
69
- elif isinstance(cfg.get('img_dir'), (list, tuple)) or isinstance(
70
- cfg.get('split', None), (list, tuple)):
71
- dataset = _concat_dataset(cfg, default_args)
72
- else:
73
- dataset = build_from_cfg(cfg, DATASETS, default_args)
74
-
75
- return dataset
76
-
77
-
78
- def build_dataloader(dataset,
79
- samples_per_gpu,
80
- workers_per_gpu,
81
- num_gpus=1,
82
- dist=True,
83
- shuffle=True,
84
- seed=None,
85
- drop_last=False,
86
- pin_memory=True,
87
- dataloader_type='PoolDataLoader',
88
- **kwargs):
89
- """Build PyTorch DataLoader.
90
-
91
- In distributed training, each GPU/process has a dataloader.
92
- In non-distributed training, there is only one dataloader for all GPUs.
93
-
94
- Args:
95
- dataset (Dataset): A PyTorch dataset.
96
- samples_per_gpu (int): Number of training samples on each GPU, i.e.,
97
- batch size of each GPU.
98
- workers_per_gpu (int): How many subprocesses to use for data loading
99
- for each GPU.
100
- num_gpus (int): Number of GPUs. Only used in non-distributed training.
101
- dist (bool): Distributed training/test or not. Default: True.
102
- shuffle (bool): Whether to shuffle the data at every epoch.
103
- Default: True.
104
- seed (int | None): Seed to be used. Default: None.
105
- drop_last (bool): Whether to drop the last incomplete batch in epoch.
106
- Default: False
107
- pin_memory (bool): Whether to use pin_memory in DataLoader.
108
- Default: True
109
- dataloader_type (str): Type of dataloader. Default: 'PoolDataLoader'
110
- kwargs: any keyword argument to be used to initialize DataLoader
111
-
112
- Returns:
113
- DataLoader: A PyTorch dataloader.
114
- """
115
- rank, world_size = get_dist_info()
116
- if dist:
117
- sampler = DistributedSampler(
118
- dataset, world_size, rank, shuffle=shuffle)
119
- shuffle = False
120
- batch_size = samples_per_gpu
121
- num_workers = workers_per_gpu
122
- else:
123
- sampler = None
124
- batch_size = num_gpus * samples_per_gpu
125
- num_workers = num_gpus * workers_per_gpu
126
-
127
- init_fn = partial(
128
- worker_init_fn, num_workers=num_workers, rank=rank,
129
- seed=seed) if seed is not None else None
130
-
131
- assert dataloader_type in (
132
- 'DataLoader',
133
- 'PoolDataLoader'), f'unsupported dataloader {dataloader_type}'
134
-
135
- if dataloader_type == 'PoolDataLoader':
136
- dataloader = PoolDataLoader
137
- elif dataloader_type == 'DataLoader':
138
- dataloader = DataLoader
139
-
140
- data_loader = dataloader(
141
- dataset,
142
- batch_size=batch_size,
143
- sampler=sampler,
144
- num_workers=num_workers,
145
- collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),
146
- pin_memory=pin_memory,
147
- shuffle=shuffle,
148
- worker_init_fn=init_fn,
149
- drop_last=drop_last,
150
- **kwargs)
151
-
152
- return data_loader
153
-
154
-
155
- def worker_init_fn(worker_id, num_workers, rank, seed):
156
- """Worker init func for dataloader.
157
-
158
- The seed of each worker equals to num_worker * rank + worker_id + user_seed
159
-
160
- Args:
161
- worker_id (int): Worker id.
162
- num_workers (int): Number of workers.
163
- rank (int): The rank of current process.
164
- seed (int): The random seed to use.
165
- """
166
-
167
- worker_seed = num_workers * rank + worker_id + seed
168
- np.random.seed(worker_seed)
169
- random.seed(worker_seed)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/tutorial_dataset_test.py DELETED
@@ -1,12 +0,0 @@
1
- from tutorial_dataset import MyDataset
2
-
3
- dataset = MyDataset()
4
- print(len(dataset))
5
-
6
- item = dataset[1234]
7
- jpg = item['jpg']
8
- txt = item['txt']
9
- hint = item['hint']
10
- print(txt)
11
- print(jpg.shape)
12
- print(hint.shape)
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/src/ddim_v_hacked.py DELETED
@@ -1,589 +0,0 @@
1
- """SAMPLING ONLY."""
2
-
3
- # CrossAttn precision handling
4
- import os
5
-
6
- import einops
7
- import numpy as np
8
- import torch
9
- from tqdm import tqdm
10
-
11
- from ControlNet.ldm.modules.diffusionmodules.util import (
12
- extract_into_tensor, make_ddim_sampling_parameters, make_ddim_timesteps,
13
- noise_like)
14
-
15
- _ATTN_PRECISION = os.environ.get('ATTN_PRECISION', 'fp32')
16
-
17
- device = 'cuda' if torch.cuda.is_available() else 'cpu'
18
-
19
-
20
- def register_attention_control(model, controller=None):
21
-
22
- def ca_forward(self, place_in_unet):
23
-
24
- def forward(x, context=None, mask=None):
25
- h = self.heads
26
-
27
- q = self.to_q(x)
28
- is_cross = context is not None
29
- context = context if is_cross else x
30
- context = controller(context, is_cross, place_in_unet)
31
-
32
- k = self.to_k(context)
33
- v = self.to_v(context)
34
-
35
- q, k, v = map(
36
- lambda t: einops.rearrange(t, 'b n (h d) -> (b h) n d', h=h),
37
- (q, k, v))
38
-
39
- # force cast to fp32 to avoid overflowing
40
- if _ATTN_PRECISION == 'fp32':
41
- with torch.autocast(enabled=False, device_type=device):
42
- q, k = q.float(), k.float()
43
- sim = torch.einsum('b i d, b j d -> b i j', q,
44
- k) * self.scale
45
- else:
46
- sim = torch.einsum('b i d, b j d -> b i j', q, k) * self.scale
47
-
48
- del q, k
49
-
50
- if mask is not None:
51
- mask = einops.rearrange(mask, 'b ... -> b (...)')
52
- max_neg_value = -torch.finfo(sim.dtype).max
53
- mask = einops.repeat(mask, 'b j -> (b h) () j', h=h)
54
- sim.masked_fill_(~mask, max_neg_value)
55
-
56
- # attention, what we cannot get enough of
57
- sim = sim.softmax(dim=-1)
58
-
59
- out = torch.einsum('b i j, b j d -> b i d', sim, v)
60
- out = einops.rearrange(out, '(b h) n d -> b n (h d)', h=h)
61
- return self.to_out(out)
62
-
63
- return forward
64
-
65
- class DummyController:
66
-
67
- def __call__(self, *args):
68
- return args[0]
69
-
70
- def __init__(self):
71
- self.cur_step = 0
72
-
73
- if controller is None:
74
- controller = DummyController()
75
-
76
- def register_recr(net_, place_in_unet):
77
- if net_.__class__.__name__ == 'CrossAttention':
78
- net_.forward = ca_forward(net_, place_in_unet)
79
- elif hasattr(net_, 'children'):
80
- for net__ in net_.children():
81
- register_recr(net__, place_in_unet)
82
-
83
- sub_nets = model.named_children()
84
- for net in sub_nets:
85
- if 'input_blocks' in net[0]:
86
- register_recr(net[1], 'down')
87
- elif 'output_blocks' in net[0]:
88
- register_recr(net[1], 'up')
89
- elif 'middle_block' in net[0]:
90
- register_recr(net[1], 'mid')
91
-
92
-
93
- class DDIMVSampler(object):
94
-
95
- def __init__(self, model, schedule='linear', **kwargs):
96
- super().__init__()
97
- self.model = model
98
- self.ddpm_num_timesteps = model.num_timesteps
99
- self.schedule = schedule
100
-
101
- def register_buffer(self, name, attr):
102
- if type(attr) == torch.Tensor:
103
- if attr.device != torch.device(device):
104
- attr = attr.to(torch.device(device))
105
- setattr(self, name, attr)
106
-
107
- def make_schedule(self,
108
- ddim_num_steps,
109
- ddim_discretize='uniform',
110
- ddim_eta=0.,
111
- verbose=True):
112
- self.ddim_timesteps = make_ddim_timesteps(
113
- ddim_discr_method=ddim_discretize,
114
- num_ddim_timesteps=ddim_num_steps,
115
- num_ddpm_timesteps=self.ddpm_num_timesteps,
116
- verbose=verbose)
117
- alphas_cumprod = self.model.alphas_cumprod
118
- assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, \
119
- 'alphas have to be defined for each timestep'
120
-
121
- def to_torch(x):
122
- return x.clone().detach().to(torch.float32).to(self.model.device)
123
-
124
- self.register_buffer('betas', to_torch(self.model.betas))
125
- self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
126
- self.register_buffer('alphas_cumprod_prev',
127
- to_torch(self.model.alphas_cumprod_prev))
128
-
129
- # calculations for diffusion q(x_t | x_{t-1}) and others
130
- self.register_buffer('sqrt_alphas_cumprod',
131
- to_torch(np.sqrt(alphas_cumprod.cpu())))
132
- self.register_buffer('sqrt_one_minus_alphas_cumprod',
133
- to_torch(np.sqrt(1. - alphas_cumprod.cpu())))
134
- self.register_buffer('log_one_minus_alphas_cumprod',
135
- to_torch(np.log(1. - alphas_cumprod.cpu())))
136
- self.register_buffer('sqrt_recip_alphas_cumprod',
137
- to_torch(np.sqrt(1. / alphas_cumprod.cpu())))
138
- self.register_buffer('sqrt_recipm1_alphas_cumprod',
139
- to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))
140
-
141
- # ddim sampling parameters
142
- ddim_sigmas, ddim_alphas, ddim_alphas_prev = \
143
- make_ddim_sampling_parameters(
144
- alphacums=alphas_cumprod.cpu(),
145
- ddim_timesteps=self.ddim_timesteps,
146
- eta=ddim_eta,
147
- verbose=verbose)
148
- self.register_buffer('ddim_sigmas', ddim_sigmas)
149
- self.register_buffer('ddim_alphas', ddim_alphas)
150
- self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
151
- self.register_buffer('ddim_sqrt_one_minus_alphas',
152
- np.sqrt(1. - ddim_alphas))
153
- sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(
154
- (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) *
155
- (1 - self.alphas_cumprod / self.alphas_cumprod_prev))
156
- self.register_buffer('ddim_sigmas_for_original_num_steps',
157
- sigmas_for_original_sampling_steps)
158
-
159
- @torch.no_grad()
160
- def sample(self,
161
- S,
162
- batch_size,
163
- shape,
164
- conditioning=None,
165
- callback=None,
166
- img_callback=None,
167
- quantize_x0=False,
168
- eta=0.,
169
- mask=None,
170
- x0=None,
171
- xtrg=None,
172
- noise_rescale=None,
173
- temperature=1.,
174
- noise_dropout=0.,
175
- score_corrector=None,
176
- corrector_kwargs=None,
177
- verbose=True,
178
- x_T=None,
179
- log_every_t=100,
180
- unconditional_guidance_scale=1.,
181
- unconditional_conditioning=None,
182
- dynamic_threshold=None,
183
- ucg_schedule=None,
184
- controller=None,
185
- strength=0.0,
186
- **kwargs):
187
- if conditioning is not None:
188
- if isinstance(conditioning, dict):
189
- ctmp = conditioning[list(conditioning.keys())[0]]
190
- while isinstance(ctmp, list):
191
- ctmp = ctmp[0]
192
- cbs = ctmp.shape[0]
193
- if cbs != batch_size:
194
- print(f'Warning: Got {cbs} conditionings'
195
- f'but batch-size is {batch_size}')
196
-
197
- elif isinstance(conditioning, list):
198
- for ctmp in conditioning:
199
- if ctmp.shape[0] != batch_size:
200
- print(f'Warning: Got {cbs} conditionings'
201
- f'but batch-size is {batch_size}')
202
-
203
- else:
204
- if conditioning.shape[0] != batch_size:
205
- print(f'Warning: Got {conditioning.shape[0]}'
206
- f'conditionings but batch-size is {batch_size}')
207
-
208
- self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
209
- # sampling
210
- C, H, W = shape
211
- size = (batch_size, C, H, W)
212
- print(f'Data shape for DDIM sampling is {size}, eta {eta}')
213
-
214
- samples, intermediates = self.ddim_sampling(
215
- conditioning,
216
- size,
217
- callback=callback,
218
- img_callback=img_callback,
219
- quantize_denoised=quantize_x0,
220
- mask=mask,
221
- x0=x0,
222
- xtrg=xtrg,
223
- noise_rescale=noise_rescale,
224
- ddim_use_original_steps=False,
225
- noise_dropout=noise_dropout,
226
- temperature=temperature,
227
- score_corrector=score_corrector,
228
- corrector_kwargs=corrector_kwargs,
229
- x_T=x_T,
230
- log_every_t=log_every_t,
231
- unconditional_guidance_scale=unconditional_guidance_scale,
232
- unconditional_conditioning=unconditional_conditioning,
233
- dynamic_threshold=dynamic_threshold,
234
- ucg_schedule=ucg_schedule,
235
- controller=controller,
236
- strength=strength,
237
- )
238
- return samples, intermediates
239
-
240
- @torch.no_grad()
241
- def ddim_sampling(self,
242
- cond,
243
- shape,
244
- x_T=None,
245
- ddim_use_original_steps=False,
246
- callback=None,
247
- timesteps=None,
248
- quantize_denoised=False,
249
- mask=None,
250
- x0=None,
251
- xtrg=None,
252
- noise_rescale=None,
253
- img_callback=None,
254
- log_every_t=100,
255
- temperature=1.,
256
- noise_dropout=0.,
257
- score_corrector=None,
258
- corrector_kwargs=None,
259
- unconditional_guidance_scale=1.,
260
- unconditional_conditioning=None,
261
- dynamic_threshold=None,
262
- ucg_schedule=None,
263
- controller=None,
264
- strength=0.0):
265
-
266
- if strength == 1 and x0 is not None:
267
- return x0, None
268
-
269
- register_attention_control(self.model.model.diffusion_model,
270
- controller)
271
-
272
- device = self.model.betas.device
273
- b = shape[0]
274
- if x_T is None:
275
- img = torch.randn(shape, device=device)
276
- else:
277
- img = x_T
278
-
279
- if timesteps is None:
280
- timesteps = self.ddpm_num_timesteps if ddim_use_original_steps \
281
- else self.ddim_timesteps
282
- elif timesteps is not None and not ddim_use_original_steps:
283
- subset_end = int(
284
- min(timesteps / self.ddim_timesteps.shape[0], 1) *
285
- self.ddim_timesteps.shape[0]) - 1
286
- timesteps = self.ddim_timesteps[:subset_end]
287
-
288
- intermediates = {'x_inter': [img], 'pred_x0': [img]}
289
- time_range = reversed(range(
290
- 0, timesteps)) if ddim_use_original_steps else np.flip(timesteps)
291
- total_steps = timesteps if ddim_use_original_steps \
292
- else timesteps.shape[0]
293
- print(f'Running DDIM Sampling with {total_steps} timesteps')
294
-
295
- iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)
296
- if controller is not None:
297
- controller.set_total_step(total_steps)
298
- if mask is None:
299
- mask = [None] * total_steps
300
-
301
- dir_xt = 0
302
- for i, step in enumerate(iterator):
303
- if controller is not None:
304
- controller.set_step(i)
305
- index = total_steps - i - 1
306
- ts = torch.full((b, ), step, device=device, dtype=torch.long)
307
-
308
- if strength >= 0 and i == int(
309
- total_steps * strength) and x0 is not None:
310
- img = self.model.q_sample(x0, ts)
311
- if mask is not None and xtrg is not None:
312
- # TODO: deterministic forward pass?
313
- if type(mask) == list:
314
- weight = mask[i]
315
- else:
316
- weight = mask
317
- if weight is not None:
318
- rescale = torch.maximum(1. - weight, (1 - weight**2)**0.5 *
319
- controller.inner_strength)
320
- if noise_rescale is not None:
321
- rescale = (1. - weight) * (
322
- 1 - noise_rescale) + rescale * noise_rescale
323
- img_ref = self.model.q_sample(xtrg, ts)
324
- img = img_ref * weight + (1. - weight) * (
325
- img - dir_xt) + rescale * dir_xt
326
-
327
- if ucg_schedule is not None:
328
- assert len(ucg_schedule) == len(time_range)
329
- unconditional_guidance_scale = ucg_schedule[i]
330
-
331
- outs = self.p_sample_ddim(
332
- img,
333
- cond,
334
- ts,
335
- index=index,
336
- use_original_steps=ddim_use_original_steps,
337
- quantize_denoised=quantize_denoised,
338
- temperature=temperature,
339
- noise_dropout=noise_dropout,
340
- score_corrector=score_corrector,
341
- corrector_kwargs=corrector_kwargs,
342
- unconditional_guidance_scale=unconditional_guidance_scale,
343
- unconditional_conditioning=unconditional_conditioning,
344
- dynamic_threshold=dynamic_threshold,
345
- controller=controller,
346
- return_dir=True)
347
- img, pred_x0, dir_xt = outs
348
- if callback:
349
- callback(i)
350
- if img_callback:
351
- img_callback(pred_x0, i)
352
-
353
- if index % log_every_t == 0 or index == total_steps - 1:
354
- intermediates['x_inter'].append(img)
355
- intermediates['pred_x0'].append(pred_x0)
356
-
357
- return img, intermediates
358
-
359
- @torch.no_grad()
360
- def p_sample_ddim(self,
361
- x,
362
- c,
363
- t,
364
- index,
365
- repeat_noise=False,
366
- use_original_steps=False,
367
- quantize_denoised=False,
368
- temperature=1.,
369
- noise_dropout=0.,
370
- score_corrector=None,
371
- corrector_kwargs=None,
372
- unconditional_guidance_scale=1.,
373
- unconditional_conditioning=None,
374
- dynamic_threshold=None,
375
- controller=None,
376
- return_dir=False):
377
- b, *_, device = *x.shape, x.device
378
-
379
- if unconditional_conditioning is None or \
380
- unconditional_guidance_scale == 1.:
381
- model_output = self.model.apply_model(x, t, c)
382
- else:
383
- model_t = self.model.apply_model(x, t, c)
384
- model_uncond = self.model.apply_model(x, t,
385
- unconditional_conditioning)
386
- model_output = model_uncond + unconditional_guidance_scale * (
387
- model_t - model_uncond)
388
-
389
- if self.model.parameterization == 'v':
390
- e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)
391
- else:
392
- e_t = model_output
393
-
394
- if score_corrector is not None:
395
- assert self.model.parameterization == 'eps', 'not implemented'
396
- e_t = score_corrector.modify_score(self.model, e_t, x, t, c,
397
- **corrector_kwargs)
398
-
399
- if use_original_steps:
400
- alphas = self.model.alphas_cumprod
401
- alphas_prev = self.model.alphas_cumprod_prev
402
- sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod
403
- sigmas = self.model.ddim_sigmas_for_original_num_steps
404
- else:
405
- alphas = self.ddim_alphas
406
- alphas_prev = self.ddim_alphas_prev
407
- sqrt_one_minus_alphas = self.ddim_sqrt_one_minus_alphas
408
- sigmas = self.ddim_sigmas
409
-
410
- # select parameters corresponding to the currently considered timestep
411
- a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
412
- a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
413
- sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
414
- sqrt_one_minus_at = torch.full((b, 1, 1, 1),
415
- sqrt_one_minus_alphas[index],
416
- device=device)
417
-
418
- # current prediction for x_0
419
- if self.model.parameterization != 'v':
420
- pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
421
- else:
422
- pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)
423
-
424
- if quantize_denoised:
425
- pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
426
-
427
- if dynamic_threshold is not None:
428
- raise NotImplementedError()
429
- '''
430
- if mask is not None and xtrg is not None:
431
- pred_x0 = xtrg * mask + (1. - mask) * pred_x0
432
- '''
433
-
434
- if controller is not None:
435
- pred_x0 = controller.update_x0(pred_x0)
436
-
437
- # direction pointing to x_t
438
- dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t
439
- noise = sigma_t * noise_like(x.shape, device,
440
- repeat_noise) * temperature
441
- if noise_dropout > 0.:
442
- noise = torch.nn.functional.dropout(noise, p=noise_dropout)
443
- x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise
444
-
445
- if return_dir:
446
- return x_prev, pred_x0, dir_xt
447
- return x_prev, pred_x0
448
-
449
- @torch.no_grad()
450
- def encode(self,
451
- x0,
452
- c,
453
- t_enc,
454
- use_original_steps=False,
455
- return_intermediates=None,
456
- unconditional_guidance_scale=1.0,
457
- unconditional_conditioning=None,
458
- callback=None):
459
- timesteps = np.arange(self.ddpm_num_timesteps
460
- ) if use_original_steps else self.ddim_timesteps
461
- num_reference_steps = timesteps.shape[0]
462
-
463
- assert t_enc <= num_reference_steps
464
- num_steps = t_enc
465
-
466
- if use_original_steps:
467
- alphas_next = self.alphas_cumprod[:num_steps]
468
- alphas = self.alphas_cumprod_prev[:num_steps]
469
- else:
470
- alphas_next = self.ddim_alphas[:num_steps]
471
- alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])
472
-
473
- x_next = x0
474
- intermediates = []
475
- inter_steps = []
476
- for i in tqdm(range(num_steps), desc='Encoding Image'):
477
- t = torch.full((x0.shape[0], ),
478
- timesteps[i],
479
- device=self.model.device,
480
- dtype=torch.long)
481
- if unconditional_guidance_scale == 1.:
482
- noise_pred = self.model.apply_model(x_next, t, c)
483
- else:
484
- assert unconditional_conditioning is not None
485
- e_t_uncond, noise_pred = torch.chunk(
486
- self.model.apply_model(
487
- torch.cat((x_next, x_next)), torch.cat((t, t)),
488
- torch.cat((unconditional_conditioning, c))), 2)
489
- noise_pred = e_t_uncond + unconditional_guidance_scale * (
490
- noise_pred - e_t_uncond)
491
- xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next
492
- weighted_noise_pred = alphas_next[i].sqrt() * (
493
- (1 / alphas_next[i] - 1).sqrt() -
494
- (1 / alphas[i] - 1).sqrt()) * noise_pred
495
- x_next = xt_weighted + weighted_noise_pred
496
- if return_intermediates and i % (num_steps // return_intermediates
497
- ) == 0 and i < num_steps - 1:
498
- intermediates.append(x_next)
499
- inter_steps.append(i)
500
- elif return_intermediates and i >= num_steps - 2:
501
- intermediates.append(x_next)
502
- inter_steps.append(i)
503
- if callback:
504
- callback(i)
505
-
506
- out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}
507
- if return_intermediates:
508
- out.update({'intermediates': intermediates})
509
- return x_next, out
510
-
511
- @torch.no_grad()
512
- def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):
513
- # fast, but does not allow for exact reconstruction
514
- # t serves as an index to gather the correct alphas
515
- if use_original_steps:
516
- sqrt_alphas_cumprod = self.sqrt_alphas_cumprod
517
- sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod
518
- else:
519
- sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)
520
- sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas
521
-
522
- if noise is None:
523
- noise = torch.randn_like(x0)
524
- if t >= len(sqrt_alphas_cumprod):
525
- return noise
526
- return (
527
- extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +
528
- extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) *
529
- noise)
530
-
531
- @torch.no_grad()
532
- def decode(self,
533
- x_latent,
534
- cond,
535
- t_start,
536
- unconditional_guidance_scale=1.0,
537
- unconditional_conditioning=None,
538
- use_original_steps=False,
539
- callback=None):
540
-
541
- timesteps = np.arange(self.ddpm_num_timesteps
542
- ) if use_original_steps else self.ddim_timesteps
543
- timesteps = timesteps[:t_start]
544
-
545
- time_range = np.flip(timesteps)
546
- total_steps = timesteps.shape[0]
547
- print(f'Running DDIM Sampling with {total_steps} timesteps')
548
-
549
- iterator = tqdm(time_range, desc='Decoding image', total=total_steps)
550
- x_dec = x_latent
551
- for i, step in enumerate(iterator):
552
- index = total_steps - i - 1
553
- ts = torch.full((x_latent.shape[0], ),
554
- step,
555
- device=x_latent.device,
556
- dtype=torch.long)
557
- x_dec, _ = self.p_sample_ddim(
558
- x_dec,
559
- cond,
560
- ts,
561
- index=index,
562
- use_original_steps=use_original_steps,
563
- unconditional_guidance_scale=unconditional_guidance_scale,
564
- unconditional_conditioning=unconditional_conditioning)
565
- if callback:
566
- callback(i)
567
- return x_dec
568
-
569
-
570
- def calc_mean_std(feat, eps=1e-5):
571
- # eps is a small value added to the variance to avoid divide-by-zero.
572
- size = feat.size()
573
- assert (len(size) == 4)
574
- N, C = size[:2]
575
- feat_var = feat.view(N, C, -1).var(dim=2) + eps
576
- feat_std = feat_var.sqrt().view(N, C, 1, 1)
577
- feat_mean = feat.view(N, C, -1).mean(dim=2).view(N, C, 1, 1)
578
- return feat_mean, feat_std
579
-
580
-
581
- def adaptive_instance_normalization(content_feat, style_feat):
582
- assert (content_feat.size()[:2] == style_feat.size()[:2])
583
- size = content_feat.size()
584
- style_mean, style_std = calc_mean_std(style_feat)
585
- content_mean, content_std = calc_mean_std(content_feat)
586
-
587
- normalized_feat = (content_feat -
588
- content_mean.expand(size)) / content_std.expand(size)
589
- return normalized_feat * style_std.expand(size) + style_mean.expand(size)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnthonyTruchetPoC/persistent-docker/scripts/interactive-rebuild-docs.sh DELETED
@@ -1,2 +0,0 @@
1
- #!/usr/bin/env sh
2
- poetry run sphinx-autobuild --open-browser doc dist/doc
 
 
 
spaces/Antonpy/stable-diffusion-license/license.html DELETED
The diff for this file is too large to render. See raw diff
 
spaces/Apex-X/Tm/roop/face_analyser.py DELETED
@@ -1,34 +0,0 @@
1
- import threading
2
- from typing import Any
3
- import insightface
4
-
5
- import roop.globals
6
- from roop.typing import Frame
7
-
8
- FACE_ANALYSER = None
9
- THREAD_LOCK = threading.Lock()
10
-
11
-
12
- def get_face_analyser() -> Any:
13
- global FACE_ANALYSER
14
-
15
- with THREAD_LOCK:
16
- if FACE_ANALYSER is None:
17
- FACE_ANALYSER = insightface.app.FaceAnalysis(name='buffalo_l', providers=roop.globals.execution_providers)
18
- FACE_ANALYSER.prepare(ctx_id=0, det_size=(640, 640))
19
- return FACE_ANALYSER
20
-
21
-
22
- def get_one_face(frame: Frame) -> Any:
23
- face = get_face_analyser().get(frame)
24
- try:
25
- return min(face, key=lambda x: x.bbox[0])
26
- except ValueError:
27
- return None
28
-
29
-
30
- def get_many_faces(frame: Frame) -> Any:
31
- try:
32
- return get_face_analyser().get(frame)
33
- except IndexError:
34
- return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Artrajz/vits-simple-api/utils/classify_language.py DELETED
@@ -1,60 +0,0 @@
1
- from config import LANGUAGE_IDENTIFICATION_LIBRARY
2
-
3
- module = LANGUAGE_IDENTIFICATION_LIBRARY.lower()
4
-
5
- langid_languages = ["af", "am", "an", "ar", "as", "az", "be", "bg", "bn", "br", "bs", "ca", "cs", "cy", "da", "de", "dz", "el",
6
- "en", "eo", "es", "et", "eu", "fa", "fi", "fo", "fr", "ga", "gl", "gu", "he", "hi", "hr", "ht", "hu", "hy",
7
- "id", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "ku", "ky", "la", "lb", "lo", "lt", "lv", "mg",
8
- "mk", "ml", "mn", "mr", "ms", "mt", "nb", "ne", "nl", "nn", "no", "oc", "or", "pa", "pl", "ps", "pt", "qu",
9
- "ro", "ru", "rw", "se", "si", "sk", "sl", "sq", "sr", "sv", "sw", "ta", "te", "th", "tl", "tr", "ug", "uk",
10
- "ur", "vi", "vo", "wa", "xh", "zh", "zu"]
11
-
12
-
13
- def classify_language(text: str, target_languages: list = None) -> str:
14
- if module == "fastlid" or module == "fasttext":
15
- from fastlid import fastlid, supported_langs
16
- classifier = fastlid
17
- if target_languages != None:
18
- target_languages = [lang for lang in target_languages if lang in supported_langs]
19
- fastlid.set_languages = target_languages
20
- elif module == "langid":
21
- import langid
22
- classifier = langid.classify
23
- if target_languages != None:
24
- target_languages = [lang for lang in target_languages if lang in langid_languages]
25
- langid.set_languages(target_languages)
26
- else:
27
- raise ValueError(f"Wrong LANGUAGE_IDENTIFICATION_LIBRARY in config.py")
28
-
29
- lang = classifier(text)[0]
30
-
31
- return lang
32
-
33
-
34
- def classify_zh_ja(text: str) -> str:
35
- for idx, char in enumerate(text):
36
- unicode_val = ord(char)
37
-
38
- # 检测日语字符
39
- if 0x3040 <= unicode_val <= 0x309F or 0x30A0 <= unicode_val <= 0x30FF:
40
- return "ja"
41
-
42
- # 检测汉字字符
43
- if 0x4E00 <= unicode_val <= 0x9FFF:
44
- # 检查周围的字符
45
- next_char = text[idx + 1] if idx + 1 < len(text) else None
46
-
47
- if next_char and (0x3040 <= ord(next_char) <= 0x309F or 0x30A0 <= ord(next_char) <= 0x30FF):
48
- return "ja"
49
-
50
- return "zh"
51
-
52
-
53
- if __name__ == "__main__":
54
- text = "这是一个测试文本"
55
- print(classify_language(text))
56
- print(classify_zh_ja(text)) # "zh"
57
-
58
- text = "これはテストテキストです"
59
- print(classify_language(text))
60
- print(classify_zh_ja(text)) # "ja"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/idna/package_data.py DELETED
@@ -1,2 +0,0 @@
1
- __version__ = '3.4'
2
-
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/command/clean.py DELETED
@@ -1,76 +0,0 @@
1
- """distutils.command.clean
2
-
3
- Implements the Distutils 'clean' command."""
4
-
5
- # contributed by Bastian Kleineidam <[email protected]>, added 2000-03-18
6
-
7
- import os
8
- from distutils.core import Command
9
- from distutils.dir_util import remove_tree
10
- from distutils import log
11
-
12
-
13
- class clean(Command):
14
-
15
- description = "clean up temporary files from 'build' command"
16
- user_options = [
17
- ('build-base=', 'b', "base build directory (default: 'build.build-base')"),
18
- (
19
- 'build-lib=',
20
- None,
21
- "build directory for all modules (default: 'build.build-lib')",
22
- ),
23
- ('build-temp=', 't', "temporary build directory (default: 'build.build-temp')"),
24
- (
25
- 'build-scripts=',
26
- None,
27
- "build directory for scripts (default: 'build.build-scripts')",
28
- ),
29
- ('bdist-base=', None, "temporary directory for built distributions"),
30
- ('all', 'a', "remove all build output, not just temporary by-products"),
31
- ]
32
-
33
- boolean_options = ['all']
34
-
35
- def initialize_options(self):
36
- self.build_base = None
37
- self.build_lib = None
38
- self.build_temp = None
39
- self.build_scripts = None
40
- self.bdist_base = None
41
- self.all = None
42
-
43
- def finalize_options(self):
44
- self.set_undefined_options(
45
- 'build',
46
- ('build_base', 'build_base'),
47
- ('build_lib', 'build_lib'),
48
- ('build_scripts', 'build_scripts'),
49
- ('build_temp', 'build_temp'),
50
- )
51
- self.set_undefined_options('bdist', ('bdist_base', 'bdist_base'))
52
-
53
- def run(self):
54
- # remove the build/temp.<plat> directory (unless it's already
55
- # gone)
56
- if os.path.exists(self.build_temp):
57
- remove_tree(self.build_temp, dry_run=self.dry_run)
58
- else:
59
- log.debug("'%s' does not exist -- can't clean it", self.build_temp)
60
-
61
- if self.all:
62
- # remove build directories
63
- for directory in (self.build_lib, self.bdist_base, self.build_scripts):
64
- if os.path.exists(directory):
65
- remove_tree(directory, dry_run=self.dry_run)
66
- else:
67
- log.warn("'%s' does not exist -- can't clean it", directory)
68
-
69
- # just for the heck of it, try to remove the base build directory:
70
- # we might have emptied it right now, but if not we don't care
71
- if not self.dry_run:
72
- try:
73
- os.rmdir(self.build_base)
74
- log.info("removing '%s'", self.build_base)
75
- except OSError:
76
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BartPoint/VoiceChange_Beta/infer_pack/modules/F0Predictor/__init__.py DELETED
File without changes
spaces/Benson/text-generation/Examples/Auto Clicker For Clicker Heroes Download.md DELETED
@@ -1,78 +0,0 @@
1
- <br />
2
- <h1>Auto Clicker para Clicker Heroes Descargar</h1>
3
- <p>Si eres un fan de los juegos de clickers ociosos, es posible que hayas escuchado o jugado <a href="( 3 )">Clicker Heroes</a>, un juego popular donde matas monstruos, mejoras héroes, encuentras tesoros y matas jefes. ¿Pero sabías que puedes mejorar tu experiencia de juego usando un <strong>auto clicker</strong> para héroes clickers? En este artículo, explicaremos qué es un clicker automático, cómo usarlo para héroes clickers y cuáles son los beneficios de usarlo. </p>
4
- <h2>auto clicker for clicker heroes download</h2><br /><p><b><b>DOWNLOAD</b> &#9734;&#9734;&#9734; <a href="https://bltlly.com/2v6KI0">https://bltlly.com/2v6KI0</a></b></p><br /><br />
5
- <h2>¿Qué es Auto Clicker? </h2>
6
- <p>Un auto clicker es un programa que le permite configurar y automatizar el <strong>click de un mouse</strong> en la pantalla de su computadora. Un clicker automático no solo sigue el cursor, pero a menudo tiene soporte para doble y triple clic, teclas de acceso rápido que funcionan incluso en segundo plano, ajustes automáticos ahorra, y más. </p>
7
- <h3>¿Cómo usar el Auto Clicker? </h3>
8
- <p>Para usar un auto clicker, debes seguir estos pasos:</p>
9
- <ol>
10
- <li>Visite <a href="( 6 )">AutoClickers.org</a> para encontrar las diferentes opciones de dispositivos disponibles y descargar el que se adapte a sus necesidades. </li>
11
- <li>Ejecute el instalador y siga las instrucciones para completar la instalación. </li>
12
- <li>Abra el auto clicker haciendo clic en el icono o en el acceso directo del escritorio. </li>
13
- <li> Elija el atajo de teclado que desea utilizar para iniciar o dejar de hacer clic y haga clic en "Aplicar". </li>
14
- <li>Seleccione el área en la pantalla donde desea que haga clic el clicker automático. Puede hacer esto arrastrando el cursor del ratón o usando las coordenadas. </li>
15
- <li>Ajuste la velocidad de clic y la duración moviendo los controles deslizantes o introduciendo los valores. También puede elegir el tipo de clic (izquierda, derecha, centro) y el número de clics. </li>
16
- <li>Pulse el atajo de teclado para iniciar el auto clic. Puede ver el número de clics y el tiempo transcurrido en la ventana del auto clicker. </li>
17
-
18
- </ol>
19
- <h3>Beneficios de Auto Clicker</h3>
20
- <p>Usar un auto clicker puede tener muchas ventajas, como:</p>
21
- <ul>
22
- <li><strong>Ahorra tiempo y esfuerzo</strong>: No tienes que hacer clic manualmente en la pantalla repetidamente, lo que puede ser agotador y aburrido. Puedes dejar que el auto clicker haga el trabajo por ti mientras te enfocas en otras tareas o te relajas. </li>
23
- <li><strong>Reducir errores</strong>: No tienes que preocuparte por perder un clic o hacer clic en el lugar equivocado. El clicker automático hará clic de forma precisa y consistente de acuerdo con su configuración. </li>
24
- <li><strong>Mejore la experiencia de juego</strong>: Puede disfrutar jugando juegos que requieren mucho clic sin frustrarse o perder interés. También puede mejorar su rendimiento de juego y puntuación mediante el uso de un auto clicker. </li>
25
- <li><strong>Personalizar las opciones de clic</strong>: Puede ajustar la velocidad de clic, duración, área, tipo y número de acuerdo a sus preferencias y necesidades. También puede crear diferentes perfiles para diferentes juegos o tareas y cambiar entre ellos fácilmente. </li>
26
- </ul>
27
- <h2>¿Qué es Clicker Heroes? </h2>
28
- <p><a href="">Clicker Heroes</a> es uno de los juegos de clickers inactivos más populares en la web. Fue lanzado en 2014 por <a href="">Playsaurus</a>, un estudio de juegos independiente con sede en California. El juego ha sido jugado por millones de personas en todo el mundo y ha recibido críticas positivas de críticos y jugadores por igual. </p>
29
- <p></p>
30
- <h3>Cómo jugar Clicker Heroes? </h3>
31
- <p>El modo de juego de Clicker Heroes es simple pero adictivo. Aquí están las instrucciones básicas:</p>
32
- <ol>
33
- <li>Haga clic en monstruos para atacarlos y recoger el oro de ellos. </li>
34
- <li>Usa el oro para subir de nivel a tus héroes, que te ayudarán a luchar contra los monstruos automáticamente. </li>
35
- <li>Compra mejoras y habilidades para tus héroes para hacerlos más fuertes y desbloquear nuevas habilidades. </li>
36
- <li>Progresa a través de zonas y mundos, cada uno con diferentes monstruos y fondos. </li>
37
-
38
- </ol>
39
- <h3>Consejos y trucos para Clicker Heroes</h3>
40
- <p>Para aprovechar al máximo Clicker Heroes, debes seguir estos consejos y trucos:</p>
41
- <ul>
42
- <li>Usa antiguos y extraños, que son personajes especiales que pueden aumentar tu progreso al darte varios bonos y efectos. Puedes comprar antiguos con almas de héroe, que obtienes de ascendente, y extraños con almas antiguas, que obtienes de trascender. </li>
43
- <li>Ascender y trascender regularmente, que son formas de restablecer su juego con beneficios adicionales. Ascender les dará almas de héroes basadas en su zona más alta alcanzada, mientras que trascender les dará almas antiguas basadas en sus almas de héroes totales sacrificadas. Ambas acciones aumentarán tu poder general y acelerarán tu progreso. </li>
44
- <li>Únete a clanes y redadas, que son características multijugador que te permiten cooperar con otros jugadores y obtener más recompensas. Puedes unirte a un clan introduciendo su nombre o creando el tuyo propio, y participar en incursiones luchando contra inmortales con los miembros de tu clan. Puedes obtener almas de héroe, rubíes y monedas de clan de las redadas. </li>
45
- <li>Usa mercenarios y misiones, que son características adicionales que pueden ayudarte a obtener recursos adicionales. Puedes contratar mercenarios con rubíes, que son la moneda premium del juego, y enviarlos en misiones para obtener oro, almas de héroes, rubíes, reliquias o habilidades. Puedes tener hasta cinco mercenarios a la vez. </li>
46
- </ul>
47
- <h2>¿Por qué usar Auto Clicker para Clicker Heroes? </h2>
48
- <p>Como puedes ver, Clicker Heroes es un juego que involucra muchos clics. Si bien esto puede ser divertido al principio, también puede volverse tedioso y aburrido después de un tiempo. Es por eso que usar un clicker automático para héroes clickers puede ser una gran idea. Aquí hay algunas razones por las que:</p>
49
- <h3>Los mejores clickers automáticos para Clicker Heroes</h3>
50
-
51
- <ul>
52
- <li><strong>OP Auto Clicker</strong>: Este es un clicker automático gratuito y fácil de usar que te permite elegir el intervalo de clic, el tipo y la ubicación. También puede establecer teclas de acceso rápido, aleatorizar clics y grabar y reproducir clics. Puede descargarlo desde <a href="( 1 )">here</a>. </li>
53
- <li><strong>GS Auto Clicker</strong>: Este es otro clicker automático gratuito y simple que te permite configurar la tasa de clics, el número y la ubicación. También puede usar teclas de acceso rápido, guardar y cargar la configuración y usar la opción de registro para hacer clic en varios lugares. Puede descargarlo desde <a href="( 2 )">aquí</a>. </li>
54
- <li><strong>Speed Auto Clicker</strong>: Este es un rápido y potente clicker automático que puede alcanzar hasta 50000 clicks por segundo. Puede ajustar la velocidad, el tipo y la ubicación de los clics, así como usar teclas de acceso rápido, aleatorizar los clics y establecer un límite de clics. Puede descargarlo desde <a href="( 3 )">aquí</a>. </li>
55
- <li><strong>Murgee Auto Clicker</strong>: Este es un clicker automático de pago pero versátil que ofrece muchas características y opciones. Puede personalizar el intervalo de clic, el tipo, la ubicación y la duración, así como usar teclas de acceso rápido, programar clics y crear macros. Puede descargarlo desde <a href="( 4 )">aquí</a>. </li>
56
- </ul>
57
- <h3>¿Cómo configurar los clickers automáticos para los héroes del clicker? </h3>
58
- <p>Para configurar los clickers automáticos para los héroes clicker, debe seguir estas directrices:</p>
59
- <ol>
60
- <li>Arrastre y suelte el icono del clicker automático al área deseada en la pantalla del juego. Puedes colocarlo en el área enemiga, los botones de nivel de héroe, las habilidades o el botón de compra de mejoras disponibles. </li>
61
- <li>Elija el número de clickers automáticos que desea utilizar para cada tarea. Puedes tener hasta 99 clickers automáticos en total, pero solo uno por cada botón de nivel de héroe, botón de habilidad o botón de compra de mejoras disponibles. </li>
62
-
63
- <li>Retire los clickers automáticos haciendo clic en el botón X en la esquina superior derecha de cada icono. También puede arrastrar y soltar de nuevo a la piscina de auto clickers en el lado derecho de la pantalla. </li>
64
- </ol>
65
- <h2>Conclusión</h2>
66
- <p>En conclusión, auto clicker es una herramienta útil para jugar clicker héroes, ya que puede automatizar el proceso de clic y mejorar su rendimiento de juego. Hay muchos clickers automáticos disponibles para descargar, cada uno con sus propias características y ventajas. Para usar clickers automáticos para los héroes clickers, necesitas configurarlos correctamente y asignarlos a diferentes tareas. Al hacerlo, puedes disfrutar jugando a clicker heroes sin cansarte o aburrirte. </p>
67
- <h4>Preguntas frecuentes</h4>
68
- <p>Aquí hay algunas preguntas frecuentes sobre el clicker automático para los héroes de clicker:</p>
69
- <ul>
70
- <li><strong>¿Cuál es el mejor clicker automático para los héroes clicker? </strong> No hay una respuesta definitiva a esta pregunta, ya que diferentes clickers automáticos pueden adaptarse a diferentes preferencias y necesidades. Sin embargo, algunos de los más populares y recomendados son OP Auto Clicker, GS Auto Clicker, Speed Auto Clicker y Murgee Auto Clicker.</li>
71
- <li><strong>¿Qué tan rápido puede hacer clic un auto clicker? </strong> La velocidad de un auto clicker depende de su configuración y características. Algunos clickers automáticos pueden alcanzar hasta 50000 clicks por segundo, mientras que otros solo pueden llegar hasta 100 clicks por segundo. Puede ajustar la velocidad de su auto clicker cambiando su intervalo o tasa. </li>
72
- <li><strong>Está usando un clicker automático de engaño? </strong> Esto depende de su perspectiva y opinión. Algunas personas pueden considerar el uso de un auto clicker como trampa, ya que le da una ventaja injusta sobre otros jugadores que no lo utilizan. Otros pueden verlo como una forma legítima de jugar el juego de manera más eficiente y conveniente. </li>
73
-
74
- <li><strong>¿Cuántos clickers automáticos necesito para héroes clickers? </strong> El número de clickers automáticos que necesitas para los clickers depende de tus objetivos y estrategias. En general, usted debe tener al menos un auto clicker en el área enemiga para atacar más rápido, y un auto clicker en el botón comprar mejoras disponibles para subir de nivel héroes y comprar mejoras automáticamente. También puede tener más clickers automáticos en los botones de nivel de héroe o las habilidades para activarlos más a menudo. </li>
75
- </ul>
76
- <p>Espero que este artículo te haya ayudado a entender más sobre el clicker automático para la descarga de héroes clicker. Si usted tiene alguna pregunta o comentario, por favor no dude en dejar un comentario a continuación. Gracias por leer y feliz clic! </p> 64aa2da5cf<br />
77
- <br />
78
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/requests/adapters.py DELETED
@@ -1,584 +0,0 @@
1
- """
2
- requests.adapters
3
- ~~~~~~~~~~~~~~~~~
4
-
5
- This module contains the transport adapters that Requests uses to define
6
- and maintain connections.
7
- """
8
-
9
- import os.path
10
- import socket # noqa: F401
11
-
12
- from pip._vendor.urllib3.exceptions import ClosedPoolError, ConnectTimeoutError
13
- from pip._vendor.urllib3.exceptions import HTTPError as _HTTPError
14
- from pip._vendor.urllib3.exceptions import InvalidHeader as _InvalidHeader
15
- from pip._vendor.urllib3.exceptions import (
16
- LocationValueError,
17
- MaxRetryError,
18
- NewConnectionError,
19
- ProtocolError,
20
- )
21
- from pip._vendor.urllib3.exceptions import ProxyError as _ProxyError
22
- from pip._vendor.urllib3.exceptions import ReadTimeoutError, ResponseError
23
- from pip._vendor.urllib3.exceptions import SSLError as _SSLError
24
- from pip._vendor.urllib3.poolmanager import PoolManager, proxy_from_url
25
- from pip._vendor.urllib3.response import HTTPResponse
26
- from pip._vendor.urllib3.util import Timeout as TimeoutSauce
27
- from pip._vendor.urllib3.util import parse_url
28
- from pip._vendor.urllib3.util.retry import Retry
29
-
30
- from .auth import _basic_auth_str
31
- from .compat import basestring, urlparse
32
- from .cookies import extract_cookies_to_jar
33
- from .exceptions import (
34
- ConnectionError,
35
- ConnectTimeout,
36
- InvalidHeader,
37
- InvalidProxyURL,
38
- InvalidSchema,
39
- InvalidURL,
40
- ProxyError,
41
- ReadTimeout,
42
- RetryError,
43
- SSLError,
44
- )
45
- from .models import Response
46
- from .structures import CaseInsensitiveDict
47
- from .utils import (
48
- DEFAULT_CA_BUNDLE_PATH,
49
- extract_zipped_paths,
50
- get_auth_from_url,
51
- get_encoding_from_headers,
52
- prepend_scheme_if_needed,
53
- select_proxy,
54
- urldefragauth,
55
- )
56
-
57
- try:
58
- from pip._vendor.urllib3.contrib.socks import SOCKSProxyManager
59
- except ImportError:
60
-
61
- def SOCKSProxyManager(*args, **kwargs):
62
- raise InvalidSchema("Missing dependencies for SOCKS support.")
63
-
64
-
65
- DEFAULT_POOLBLOCK = False
66
- DEFAULT_POOLSIZE = 10
67
- DEFAULT_RETRIES = 0
68
- DEFAULT_POOL_TIMEOUT = None
69
-
70
-
71
- class BaseAdapter:
72
- """The Base Transport Adapter"""
73
-
74
- def __init__(self):
75
- super().__init__()
76
-
77
- def send(
78
- self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None
79
- ):
80
- """Sends PreparedRequest object. Returns Response object.
81
-
82
- :param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
83
- :param stream: (optional) Whether to stream the request content.
84
- :param timeout: (optional) How long to wait for the server to send
85
- data before giving up, as a float, or a :ref:`(connect timeout,
86
- read timeout) <timeouts>` tuple.
87
- :type timeout: float or tuple
88
- :param verify: (optional) Either a boolean, in which case it controls whether we verify
89
- the server's TLS certificate, or a string, in which case it must be a path
90
- to a CA bundle to use
91
- :param cert: (optional) Any user-provided SSL certificate to be trusted.
92
- :param proxies: (optional) The proxies dictionary to apply to the request.
93
- """
94
- raise NotImplementedError
95
-
96
- def close(self):
97
- """Cleans up adapter specific items."""
98
- raise NotImplementedError
99
-
100
-
101
- class HTTPAdapter(BaseAdapter):
102
- """The built-in HTTP Adapter for urllib3.
103
-
104
- Provides a general-case interface for Requests sessions to contact HTTP and
105
- HTTPS urls by implementing the Transport Adapter interface. This class will
106
- usually be created by the :class:`Session <Session>` class under the
107
- covers.
108
-
109
- :param pool_connections: The number of urllib3 connection pools to cache.
110
- :param pool_maxsize: The maximum number of connections to save in the pool.
111
- :param max_retries: The maximum number of retries each connection
112
- should attempt. Note, this applies only to failed DNS lookups, socket
113
- connections and connection timeouts, never to requests where data has
114
- made it to the server. By default, Requests does not retry failed
115
- connections. If you need granular control over the conditions under
116
- which we retry a request, import urllib3's ``Retry`` class and pass
117
- that instead.
118
- :param pool_block: Whether the connection pool should block for connections.
119
-
120
- Usage::
121
-
122
- >>> import requests
123
- >>> s = requests.Session()
124
- >>> a = requests.adapters.HTTPAdapter(max_retries=3)
125
- >>> s.mount('http://', a)
126
- """
127
-
128
- __attrs__ = [
129
- "max_retries",
130
- "config",
131
- "_pool_connections",
132
- "_pool_maxsize",
133
- "_pool_block",
134
- ]
135
-
136
- def __init__(
137
- self,
138
- pool_connections=DEFAULT_POOLSIZE,
139
- pool_maxsize=DEFAULT_POOLSIZE,
140
- max_retries=DEFAULT_RETRIES,
141
- pool_block=DEFAULT_POOLBLOCK,
142
- ):
143
- if max_retries == DEFAULT_RETRIES:
144
- self.max_retries = Retry(0, read=False)
145
- else:
146
- self.max_retries = Retry.from_int(max_retries)
147
- self.config = {}
148
- self.proxy_manager = {}
149
-
150
- super().__init__()
151
-
152
- self._pool_connections = pool_connections
153
- self._pool_maxsize = pool_maxsize
154
- self._pool_block = pool_block
155
-
156
- self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block)
157
-
158
- def __getstate__(self):
159
- return {attr: getattr(self, attr, None) for attr in self.__attrs__}
160
-
161
- def __setstate__(self, state):
162
- # Can't handle by adding 'proxy_manager' to self.__attrs__ because
163
- # self.poolmanager uses a lambda function, which isn't pickleable.
164
- self.proxy_manager = {}
165
- self.config = {}
166
-
167
- for attr, value in state.items():
168
- setattr(self, attr, value)
169
-
170
- self.init_poolmanager(
171
- self._pool_connections, self._pool_maxsize, block=self._pool_block
172
- )
173
-
174
- def init_poolmanager(
175
- self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs
176
- ):
177
- """Initializes a urllib3 PoolManager.
178
-
179
- This method should not be called from user code, and is only
180
- exposed for use when subclassing the
181
- :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
182
-
183
- :param connections: The number of urllib3 connection pools to cache.
184
- :param maxsize: The maximum number of connections to save in the pool.
185
- :param block: Block when no free connections are available.
186
- :param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager.
187
- """
188
- # save these values for pickling
189
- self._pool_connections = connections
190
- self._pool_maxsize = maxsize
191
- self._pool_block = block
192
-
193
- self.poolmanager = PoolManager(
194
- num_pools=connections,
195
- maxsize=maxsize,
196
- block=block,
197
- strict=True,
198
- **pool_kwargs,
199
- )
200
-
201
- def proxy_manager_for(self, proxy, **proxy_kwargs):
202
- """Return urllib3 ProxyManager for the given proxy.
203
-
204
- This method should not be called from user code, and is only
205
- exposed for use when subclassing the
206
- :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
207
-
208
- :param proxy: The proxy to return a urllib3 ProxyManager for.
209
- :param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager.
210
- :returns: ProxyManager
211
- :rtype: urllib3.ProxyManager
212
- """
213
- if proxy in self.proxy_manager:
214
- manager = self.proxy_manager[proxy]
215
- elif proxy.lower().startswith("socks"):
216
- username, password = get_auth_from_url(proxy)
217
- manager = self.proxy_manager[proxy] = SOCKSProxyManager(
218
- proxy,
219
- username=username,
220
- password=password,
221
- num_pools=self._pool_connections,
222
- maxsize=self._pool_maxsize,
223
- block=self._pool_block,
224
- **proxy_kwargs,
225
- )
226
- else:
227
- proxy_headers = self.proxy_headers(proxy)
228
- manager = self.proxy_manager[proxy] = proxy_from_url(
229
- proxy,
230
- proxy_headers=proxy_headers,
231
- num_pools=self._pool_connections,
232
- maxsize=self._pool_maxsize,
233
- block=self._pool_block,
234
- **proxy_kwargs,
235
- )
236
-
237
- return manager
238
-
239
- def cert_verify(self, conn, url, verify, cert):
240
- """Verify a SSL certificate. This method should not be called from user
241
- code, and is only exposed for use when subclassing the
242
- :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
243
-
244
- :param conn: The urllib3 connection object associated with the cert.
245
- :param url: The requested URL.
246
- :param verify: Either a boolean, in which case it controls whether we verify
247
- the server's TLS certificate, or a string, in which case it must be a path
248
- to a CA bundle to use
249
- :param cert: The SSL certificate to verify.
250
- """
251
- if url.lower().startswith("https") and verify:
252
-
253
- cert_loc = None
254
-
255
- # Allow self-specified cert location.
256
- if verify is not True:
257
- cert_loc = verify
258
-
259
- if not cert_loc:
260
- cert_loc = extract_zipped_paths(DEFAULT_CA_BUNDLE_PATH)
261
-
262
- if not cert_loc or not os.path.exists(cert_loc):
263
- raise OSError(
264
- f"Could not find a suitable TLS CA certificate bundle, "
265
- f"invalid path: {cert_loc}"
266
- )
267
-
268
- conn.cert_reqs = "CERT_REQUIRED"
269
-
270
- if not os.path.isdir(cert_loc):
271
- conn.ca_certs = cert_loc
272
- else:
273
- conn.ca_cert_dir = cert_loc
274
- else:
275
- conn.cert_reqs = "CERT_NONE"
276
- conn.ca_certs = None
277
- conn.ca_cert_dir = None
278
-
279
- if cert:
280
- if not isinstance(cert, basestring):
281
- conn.cert_file = cert[0]
282
- conn.key_file = cert[1]
283
- else:
284
- conn.cert_file = cert
285
- conn.key_file = None
286
- if conn.cert_file and not os.path.exists(conn.cert_file):
287
- raise OSError(
288
- f"Could not find the TLS certificate file, "
289
- f"invalid path: {conn.cert_file}"
290
- )
291
- if conn.key_file and not os.path.exists(conn.key_file):
292
- raise OSError(
293
- f"Could not find the TLS key file, invalid path: {conn.key_file}"
294
- )
295
-
296
- def build_response(self, req, resp):
297
- """Builds a :class:`Response <requests.Response>` object from a urllib3
298
- response. This should not be called from user code, and is only exposed
299
- for use when subclassing the
300
- :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`
301
-
302
- :param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response.
303
- :param resp: The urllib3 response object.
304
- :rtype: requests.Response
305
- """
306
- response = Response()
307
-
308
- # Fallback to None if there's no status_code, for whatever reason.
309
- response.status_code = getattr(resp, "status", None)
310
-
311
- # Make headers case-insensitive.
312
- response.headers = CaseInsensitiveDict(getattr(resp, "headers", {}))
313
-
314
- # Set encoding.
315
- response.encoding = get_encoding_from_headers(response.headers)
316
- response.raw = resp
317
- response.reason = response.raw.reason
318
-
319
- if isinstance(req.url, bytes):
320
- response.url = req.url.decode("utf-8")
321
- else:
322
- response.url = req.url
323
-
324
- # Add new cookies from the server.
325
- extract_cookies_to_jar(response.cookies, req, resp)
326
-
327
- # Give the Response some context.
328
- response.request = req
329
- response.connection = self
330
-
331
- return response
332
-
333
- def get_connection(self, url, proxies=None):
334
- """Returns a urllib3 connection for the given URL. This should not be
335
- called from user code, and is only exposed for use when subclassing the
336
- :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
337
-
338
- :param url: The URL to connect to.
339
- :param proxies: (optional) A Requests-style dictionary of proxies used on this request.
340
- :rtype: urllib3.ConnectionPool
341
- """
342
- proxy = select_proxy(url, proxies)
343
-
344
- if proxy:
345
- proxy = prepend_scheme_if_needed(proxy, "http")
346
- proxy_url = parse_url(proxy)
347
- if not proxy_url.host:
348
- raise InvalidProxyURL(
349
- "Please check proxy URL. It is malformed "
350
- "and could be missing the host."
351
- )
352
- proxy_manager = self.proxy_manager_for(proxy)
353
- conn = proxy_manager.connection_from_url(url)
354
- else:
355
- # Only scheme should be lower case
356
- parsed = urlparse(url)
357
- url = parsed.geturl()
358
- conn = self.poolmanager.connection_from_url(url)
359
-
360
- return conn
361
-
362
- def close(self):
363
- """Disposes of any internal state.
364
-
365
- Currently, this closes the PoolManager and any active ProxyManager,
366
- which closes any pooled connections.
367
- """
368
- self.poolmanager.clear()
369
- for proxy in self.proxy_manager.values():
370
- proxy.clear()
371
-
372
- def request_url(self, request, proxies):
373
- """Obtain the url to use when making the final request.
374
-
375
- If the message is being sent through a HTTP proxy, the full URL has to
376
- be used. Otherwise, we should only use the path portion of the URL.
377
-
378
- This should not be called from user code, and is only exposed for use
379
- when subclassing the
380
- :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
381
-
382
- :param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
383
- :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs.
384
- :rtype: str
385
- """
386
- proxy = select_proxy(request.url, proxies)
387
- scheme = urlparse(request.url).scheme
388
-
389
- is_proxied_http_request = proxy and scheme != "https"
390
- using_socks_proxy = False
391
- if proxy:
392
- proxy_scheme = urlparse(proxy).scheme.lower()
393
- using_socks_proxy = proxy_scheme.startswith("socks")
394
-
395
- url = request.path_url
396
- if is_proxied_http_request and not using_socks_proxy:
397
- url = urldefragauth(request.url)
398
-
399
- return url
400
-
401
- def add_headers(self, request, **kwargs):
402
- """Add any headers needed by the connection. As of v2.0 this does
403
- nothing by default, but is left for overriding by users that subclass
404
- the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
405
-
406
- This should not be called from user code, and is only exposed for use
407
- when subclassing the
408
- :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
409
-
410
- :param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
411
- :param kwargs: The keyword arguments from the call to send().
412
- """
413
- pass
414
-
415
- def proxy_headers(self, proxy):
416
- """Returns a dictionary of the headers to add to any request sent
417
- through a proxy. This works with urllib3 magic to ensure that they are
418
- correctly sent to the proxy, rather than in a tunnelled request if
419
- CONNECT is being used.
420
-
421
- This should not be called from user code, and is only exposed for use
422
- when subclassing the
423
- :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
424
-
425
- :param proxy: The url of the proxy being used for this request.
426
- :rtype: dict
427
- """
428
- headers = {}
429
- username, password = get_auth_from_url(proxy)
430
-
431
- if username:
432
- headers["Proxy-Authorization"] = _basic_auth_str(username, password)
433
-
434
- return headers
435
-
436
- def send(
437
- self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None
438
- ):
439
- """Sends PreparedRequest object. Returns Response object.
440
-
441
- :param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
442
- :param stream: (optional) Whether to stream the request content.
443
- :param timeout: (optional) How long to wait for the server to send
444
- data before giving up, as a float, or a :ref:`(connect timeout,
445
- read timeout) <timeouts>` tuple.
446
- :type timeout: float or tuple or urllib3 Timeout object
447
- :param verify: (optional) Either a boolean, in which case it controls whether
448
- we verify the server's TLS certificate, or a string, in which case it
449
- must be a path to a CA bundle to use
450
- :param cert: (optional) Any user-provided SSL certificate to be trusted.
451
- :param proxies: (optional) The proxies dictionary to apply to the request.
452
- :rtype: requests.Response
453
- """
454
-
455
- try:
456
- conn = self.get_connection(request.url, proxies)
457
- except LocationValueError as e:
458
- raise InvalidURL(e, request=request)
459
-
460
- self.cert_verify(conn, request.url, verify, cert)
461
- url = self.request_url(request, proxies)
462
- self.add_headers(
463
- request,
464
- stream=stream,
465
- timeout=timeout,
466
- verify=verify,
467
- cert=cert,
468
- proxies=proxies,
469
- )
470
-
471
- chunked = not (request.body is None or "Content-Length" in request.headers)
472
-
473
- if isinstance(timeout, tuple):
474
- try:
475
- connect, read = timeout
476
- timeout = TimeoutSauce(connect=connect, read=read)
477
- except ValueError:
478
- raise ValueError(
479
- f"Invalid timeout {timeout}. Pass a (connect, read) timeout tuple, "
480
- f"or a single float to set both timeouts to the same value."
481
- )
482
- elif isinstance(timeout, TimeoutSauce):
483
- pass
484
- else:
485
- timeout = TimeoutSauce(connect=timeout, read=timeout)
486
-
487
- try:
488
- if not chunked:
489
- resp = conn.urlopen(
490
- method=request.method,
491
- url=url,
492
- body=request.body,
493
- headers=request.headers,
494
- redirect=False,
495
- assert_same_host=False,
496
- preload_content=False,
497
- decode_content=False,
498
- retries=self.max_retries,
499
- timeout=timeout,
500
- )
501
-
502
- # Send the request.
503
- else:
504
- if hasattr(conn, "proxy_pool"):
505
- conn = conn.proxy_pool
506
-
507
- low_conn = conn._get_conn(timeout=DEFAULT_POOL_TIMEOUT)
508
-
509
- try:
510
- skip_host = "Host" in request.headers
511
- low_conn.putrequest(
512
- request.method,
513
- url,
514
- skip_accept_encoding=True,
515
- skip_host=skip_host,
516
- )
517
-
518
- for header, value in request.headers.items():
519
- low_conn.putheader(header, value)
520
-
521
- low_conn.endheaders()
522
-
523
- for i in request.body:
524
- low_conn.send(hex(len(i))[2:].encode("utf-8"))
525
- low_conn.send(b"\r\n")
526
- low_conn.send(i)
527
- low_conn.send(b"\r\n")
528
- low_conn.send(b"0\r\n\r\n")
529
-
530
- # Receive the response from the server
531
- r = low_conn.getresponse()
532
-
533
- resp = HTTPResponse.from_httplib(
534
- r,
535
- pool=conn,
536
- connection=low_conn,
537
- preload_content=False,
538
- decode_content=False,
539
- )
540
- except Exception:
541
- # If we hit any problems here, clean up the connection.
542
- # Then, raise so that we can handle the actual exception.
543
- low_conn.close()
544
- raise
545
-
546
- except (ProtocolError, OSError) as err:
547
- raise ConnectionError(err, request=request)
548
-
549
- except MaxRetryError as e:
550
- if isinstance(e.reason, ConnectTimeoutError):
551
- # TODO: Remove this in 3.0.0: see #2811
552
- if not isinstance(e.reason, NewConnectionError):
553
- raise ConnectTimeout(e, request=request)
554
-
555
- if isinstance(e.reason, ResponseError):
556
- raise RetryError(e, request=request)
557
-
558
- if isinstance(e.reason, _ProxyError):
559
- raise ProxyError(e, request=request)
560
-
561
- if isinstance(e.reason, _SSLError):
562
- # This branch is for urllib3 v1.22 and later.
563
- raise SSLError(e, request=request)
564
-
565
- raise ConnectionError(e, request=request)
566
-
567
- except ClosedPoolError as e:
568
- raise ConnectionError(e, request=request)
569
-
570
- except _ProxyError as e:
571
- raise ProxyError(e)
572
-
573
- except (_SSLError, _HTTPError) as e:
574
- if isinstance(e, _SSLError):
575
- # This branch is for urllib3 versions earlier than v1.22
576
- raise SSLError(e, request=request)
577
- elif isinstance(e, ReadTimeoutError):
578
- raise ReadTimeout(e, request=request)
579
- elif isinstance(e, _InvalidHeader):
580
- raise InvalidHeader(e, request=request)
581
- else:
582
- raise
583
-
584
- return self.build_response(request, resp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Blaise-g/summarize-biomedical-papers-long-summary-or-tldr/summarize.py DELETED
@@ -1,131 +0,0 @@
1
- import logging
2
-
3
- import torch
4
- from tqdm.auto import tqdm
5
- from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
6
-
7
-
8
- def load_model_and_tokenizer(model_name):
9
- """
10
- load_model_and_tokenizer - a function that loads a model and tokenizer from huggingface
11
- Args:
12
- model_name (str): the name of the model to load
13
- Returns:
14
- AutoModelForSeq2SeqLM: the model
15
- AutoTokenizer: the tokenizer
16
- """
17
-
18
- model = AutoModelForSeq2SeqLM.from_pretrained(
19
- model_name,
20
- # low_cpu_mem_usage=True,
21
- # use_cache=False,
22
- )
23
- tokenizer = AutoTokenizer.from_pretrained(model_name)
24
- model = model.to("cuda") if torch.cuda.is_available() else model
25
-
26
- logging.info(f"Loaded model {model_name}")
27
- return model, tokenizer
28
-
29
-
30
- def summarize(ids, mask, model, tokenizer, **kwargs):
31
- """
32
- summarize - given a batch of ids and a mask, returns a summary and the token length of the output summary
33
- Args:
34
- ids (): the batch of ids
35
- mask (): the attention mask for the batch
36
- model (): the model to use for summarization
37
- tokenizer (): the tokenizer to use for summarization
38
- Returns:
39
- str: the summary of the batch
40
- """
41
-
42
- ids = ids[None, :]
43
- mask = mask[None, :]
44
-
45
- input_ids = ids.to("cuda") if torch.cuda.is_available() else ids
46
- attention_mask = mask.to("cuda") if torch.cuda.is_available() else mask
47
-
48
- #global_attention_mask = torch.zeros_like(attention_mask)
49
- # put global attention on <s> token
50
- #global_attention_mask[:, 0] = 1
51
-
52
- summary_pred_ids = model.generate(
53
- input_ids,
54
- attention_mask=attention_mask,
55
- #global_attention_mask=global_attention_mask,
56
- return_dict_in_generate=True,
57
- **kwargs,
58
- )
59
- summary = tokenizer.batch_decode(
60
- summary_pred_ids.sequences,
61
- skip_special_tokens=True,
62
- remove_invalid_values=True,
63
- )
64
- len_res = len(summary_pred_ids.sequences.cpu().numpy()[0])
65
- return summary, len_res
66
-
67
-
68
- def summarize_via_tokenbatches(
69
- input_text: str,
70
- model,
71
- tokenizer,
72
- batch_length=2048,
73
- batch_stride=16,
74
- **kwargs,
75
- ):
76
- """
77
- summarize_via_tokenbatches - a function that takes a string and returns a summary
78
- Args:
79
- input_text (str): the text to summarize
80
- model (): the model to use for summarization
81
- tokenizer (): the tokenizer to use for summarization
82
- batch_length (int, optional): the length of each batch. Defaults to 2048.
83
- batch_stride (int, optional): the stride of each batch. Defaults to 16. The stride is the number of tokens that overlap between batches.
84
- Returns:
85
- str: the summary
86
- """
87
- # log all input parameters
88
- if batch_length < 512:
89
- batch_length = 512
90
- print("WARNING: batch_length was set to 512")
91
- print(
92
- f"input parameters: {kwargs}, batch_length={batch_length}, batch_stride={batch_stride}"
93
- )
94
- encoded_input = tokenizer(
95
- input_text,
96
- padding="max_length",
97
- truncation=True,
98
- max_length=batch_length,
99
- stride=batch_stride,
100
- return_overflowing_tokens=True,
101
- add_special_tokens=False,
102
- return_tensors="pt",
103
- )
104
-
105
- in_id_arr, att_arr = encoded_input.input_ids, encoded_input.attention_mask
106
- gen_summaries = []
107
-
108
- pbar = tqdm(total=len(in_id_arr))
109
-
110
- for _id, _mask in zip(in_id_arr, att_arr):
111
-
112
- result, l = summarize(
113
- ids=_id,
114
- mask=_mask,
115
- model=model,
116
- tokenizer=tokenizer,
117
- **kwargs,
118
- )
119
- rate = round(float((len(_id)-l)/len(_id)),3)
120
- _sum = {
121
- "input_tokens": _id,
122
- "summary": result,
123
- "compression_rate": rate,
124
- }
125
- gen_summaries.append(_sum)
126
- print(f"\t{result[0]}\nCompression:\t{rate}")
127
- pbar.update()
128
-
129
- pbar.close()
130
-
131
- return gen_summaries
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/detail/config/compiler.h DELETED
@@ -1,186 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- /*! \file compiler.h
18
- * \brief Compiler-specific configuration
19
- */
20
-
21
- #pragma once
22
-
23
- // enumerate host compilers we know about
24
- #define THRUST_HOST_COMPILER_UNKNOWN 0
25
- #define THRUST_HOST_COMPILER_MSVC 1
26
- #define THRUST_HOST_COMPILER_GCC 2
27
- #define THRUST_HOST_COMPILER_CLANG 3
28
-
29
- // enumerate device compilers we know about
30
- #define THRUST_DEVICE_COMPILER_UNKNOWN 0
31
- #define THRUST_DEVICE_COMPILER_MSVC 1
32
- #define THRUST_DEVICE_COMPILER_GCC 2
33
- #define THRUST_DEVICE_COMPILER_NVCC 3
34
- #define THRUST_DEVICE_COMPILER_CLANG 4
35
-
36
- // figure out which host compiler we're using
37
- // XXX we should move the definition of THRUST_DEPRECATED out of this logic
38
- #if defined(_MSC_VER)
39
- #define THRUST_HOST_COMPILER THRUST_HOST_COMPILER_MSVC
40
- #define THRUST_MSVC_VERSION _MSC_VER
41
- #define THRUST_MSVC_VERSION_FULL _MSC_FULL_VER
42
- #elif defined(__clang__)
43
- #define THRUST_HOST_COMPILER THRUST_HOST_COMPILER_CLANG
44
- #define THRUST_CLANG_VERSION (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__)
45
- #elif defined(__GNUC__)
46
- #define THRUST_HOST_COMPILER THRUST_HOST_COMPILER_GCC
47
- #define THRUST_GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
48
- #if (THRUST_GCC_VERSION >= 50000)
49
- #define THRUST_MODERN_GCC
50
- #else
51
- #define THRUST_LEGACY_GCC
52
- #endif
53
- #else
54
- #define THRUST_HOST_COMPILER THRUST_HOST_COMPILER_UNKNOWN
55
- #endif // THRUST_HOST_COMPILER
56
-
57
- // figure out which device compiler we're using
58
- #if defined(__CUDACC__)
59
- #define THRUST_DEVICE_COMPILER THRUST_DEVICE_COMPILER_NVCC
60
- #elif THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_MSVC
61
- #define THRUST_DEVICE_COMPILER THRUST_DEVICE_COMPILER_MSVC
62
- #elif THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_GCC
63
- #define THRUST_DEVICE_COMPILER THRUST_DEVICE_COMPILER_GCC
64
- #elif THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_CLANG
65
- // CUDA-capable clang should behave similar to NVCC.
66
- #if defined(__CUDA__)
67
- #define THRUST_DEVICE_COMPILER THRUST_DEVICE_COMPILER_NVCC
68
- #else
69
- #define THRUST_DEVICE_COMPILER THRUST_DEVICE_COMPILER_CLANG
70
- #endif
71
- #else
72
- #define THRUST_DEVICE_COMPILER THRUST_DEVICE_COMPILER_UNKNOWN
73
- #endif
74
-
75
- // is the device compiler capable of compiling omp?
76
- #ifdef _OPENMP
77
- #define THRUST_DEVICE_COMPILER_IS_OMP_CAPABLE THRUST_TRUE
78
- #else
79
- #define THRUST_DEVICE_COMPILER_IS_OMP_CAPABLE THRUST_FALSE
80
- #endif // _OPENMP
81
-
82
-
83
- #if (THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_MSVC) && !defined(__CUDA_ARCH__)
84
- #define THRUST_DISABLE_MSVC_WARNING_BEGIN(x) \
85
- __pragma(warning(push)) \
86
- __pragma(warning(disable : x)) \
87
- /**/
88
- #define THRUST_DISABLE_MSVC_WARNING_END(x) \
89
- __pragma(warning(pop)) \
90
- /**/
91
- #else
92
- #define THRUST_DISABLE_MSVC_WARNING_BEGIN(x)
93
- #define THRUST_DISABLE_MSVC_WARNING_END(x)
94
- #endif
95
-
96
- #if (THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_CLANG) && !defined(__CUDA_ARCH__)
97
- #define THRUST_IGNORE_CLANG_WARNING_IMPL(x) \
98
- THRUST_PP_STRINGIZE(clang diagnostic ignored x) \
99
- /**/
100
- #define THRUST_IGNORE_CLANG_WARNING(x) \
101
- THRUST_IGNORE_CLANG_WARNING_IMPL(THRUST_PP_STRINGIZE(x)) \
102
- /**/
103
-
104
- #define THRUST_DISABLE_CLANG_WARNING_BEGIN(x) \
105
- _Pragma("clang diagnostic push") \
106
- _Pragma(THRUST_IGNORE_CLANG_WARNING(x)) \
107
- /**/
108
- #define THRUST_DISABLE_CLANG_WARNING_END(x) \
109
- _Pragma("clang diagnostic pop") \
110
- /**/
111
- #else
112
- #define THRUST_DISABLE_CLANG_WARNING_BEGIN(x)
113
- #define THRUST_DISABLE_CLANG_WARNING_END(x)
114
- #endif
115
-
116
- #if (THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_GCC) && !defined(__CUDA_ARCH__)
117
- #define THRUST_IGNORE_GCC_WARNING_IMPL(x) \
118
- THRUST_PP_STRINGIZE(GCC diagnostic ignored x) \
119
- /**/
120
- #define THRUST_IGNORE_GCC_WARNING(x) \
121
- THRUST_IGNORE_GCC_WARNING_IMPL(THRUST_PP_STRINGIZE(x)) \
122
- /**/
123
-
124
- #define THRUST_DISABLE_GCC_WARNING_BEGIN(x) \
125
- _Pragma("GCC diagnostic push") \
126
- _Pragma(THRUST_IGNORE_GCC_WARNING(x)) \
127
- /**/
128
- #define THRUST_DISABLE_GCC_WARNING_END(x) \
129
- _Pragma("GCC diagnostic pop") \
130
- /**/
131
- #else
132
- #define THRUST_DISABLE_GCC_WARNING_BEGIN(x)
133
- #define THRUST_DISABLE_GCC_WARNING_END(x)
134
- #endif
135
-
136
- #define THRUST_DISABLE_MSVC_POSSIBLE_LOSS_OF_DATA_WARNING_BEGIN \
137
- THRUST_DISABLE_MSVC_WARNING_BEGIN(4244 4267) \
138
- /**/
139
- #define THRUST_DISABLE_MSVC_POSSIBLE_LOSS_OF_DATA_WARNING_END \
140
- THRUST_DISABLE_MSVC_WARNING_END(4244 4267) \
141
- /**/
142
- #define THRUST_DISABLE_MSVC_POSSIBLE_LOSS_OF_DATA_WARNING(x) \
143
- THRUST_DISABLE_MSVC_POSSIBLE_LOSS_OF_DATA_WARNING_BEGIN \
144
- x; \
145
- THRUST_DISABLE_MSVC_POSSIBLE_LOSS_OF_DATA_WARNING_END \
146
- /**/
147
-
148
- #define THRUST_DISABLE_MSVC_FORCING_VALUE_TO_BOOL_WARNING_BEGIN \
149
- THRUST_DISABLE_MSVC_WARNING_BEGIN(4800) \
150
- /**/
151
- #define THRUST_DISABLE_MSVC_FORCING_VALUE_TO_BOOL_WARNING_END \
152
- THRUST_DISABLE_MSVC_WARNING_END(4800) \
153
- /**/
154
- #define THRUST_DISABLE_MSVC_FORCING_VALUE_TO_BOOL_WARNING(x) \
155
- THRUST_DISABLE_MSVC_FORCING_VALUE_TO_BOOL_WARNING_BEGIN \
156
- x; \
157
- THRUST_DISABLE_MSVC_FORCING_VALUE_TO_BOOL_WARNING_END \
158
- /**/
159
-
160
- #define THRUST_DISABLE_CLANG_SELF_ASSIGNMENT_WARNING_BEGIN \
161
- THRUST_DISABLE_CLANG_WARNING_BEGIN(-Wself-assign) \
162
- /**/
163
- #define THRUST_DISABLE_CLANG_SELF_ASSIGNMENT_WARNING_END \
164
- THRUST_DISABLE_CLANG_WARNING_END(-Wself-assign) \
165
- /**/
166
- #define THRUST_DISABLE_CLANG_SELF_ASSIGNMENT_WARNING(x) \
167
- THRUST_DISABLE_CLANG_SELF_ASSIGNMENT_WARNING_BEGIN \
168
- x; \
169
- THRUST_DISABLE_CLANG_SELF_ASSIGNMENT_WARNING_END \
170
- /**/
171
-
172
- #define THRUST_DISABLE_CLANG_AND_GCC_INITIALIZER_REORDERING_WARNING_BEGIN \
173
- THRUST_DISABLE_CLANG_WARNING_BEGIN(-Wreorder) \
174
- THRUST_DISABLE_GCC_WARNING_BEGIN(-Wreorder) \
175
- /**/
176
- #define THRUST_DISABLE_CLANG_AND_GCC_INITIALIZER_REORDERING_WARNING_END \
177
- THRUST_DISABLE_CLANG_WARNING_END(-Wreorder) \
178
- THRUST_DISABLE_GCC_WARNING_END(-Wreorder) \
179
- /**/
180
- #define THRUST_DISABLE_CLANG_AND_GCC_INITIALIZER_REORDERING_WARNING(x) \
181
- THRUST_DISABLE_CLANG_AND_GCC_INITIALIZER_REORDERING_WARNING_BEGIN \
182
- x; \
183
- THRUST_DISABLE_CLANG_AND_GCC_INITIALIZER_REORDERING_WARNING_END \
184
- /**/
185
-
186
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/type_traits/is_operator_plus_function_object.h DELETED
@@ -1,77 +0,0 @@
1
- /*
2
- * Copyright 2008-2018 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- /*! \file is_operator_plus_function_object.h
18
- * \brief Type traits for determining if a \c BinaryFunction is equivalent to
19
- /// \c operator+.
20
- */
21
-
22
- #pragma once
23
-
24
- #include <thrust/detail/config.h>
25
- #include <thrust/functional.h>
26
- #include <thrust/detail/type_traits.h>
27
- #include <thrust/detail/type_traits/pointer_traits.h>
28
-
29
- namespace thrust
30
- {
31
-
32
- namespace detail
33
- {
34
-
35
- template <typename FunctionObject>
36
- struct is_operator_plus_function_object_impl;
37
-
38
- } // namespace detail
39
-
40
- /// Unary metafunction returns \c true_type if \c FunctionObject is equivalent
41
- /// to \c operator<, and \c false_type otherwise.
42
- template <typename FunctionObject>
43
- #if THRUST_CPP_DIALECT >= 2011
44
- using is_operator_plus_function_object =
45
- #else
46
- struct is_operator_plus_function_object :
47
- #endif
48
- detail::is_operator_plus_function_object_impl<FunctionObject>
49
- #if THRUST_CPP_DIALECT < 2011
50
- {}
51
- #endif
52
- ;
53
-
54
- #if THRUST_CPP_DIALECT >= 2014
55
- /// <code>constexpr bool</code> that is \c true if \c FunctionObject is
56
- /// equivalent to \c operator<, and \c false otherwise.
57
- template <typename FunctionObject>
58
- constexpr bool is_operator_plus_function_object_v
59
- = is_operator_plus_function_object<FunctionObject>::value;
60
- #endif
61
-
62
- ///////////////////////////////////////////////////////////////////////////////
63
-
64
- namespace detail
65
- {
66
-
67
- template <typename FunctionObject>
68
- struct is_operator_plus_function_object_impl : false_type {};
69
- template <typename T>
70
- struct is_operator_plus_function_object_impl<thrust::plus<T> > : true_type {};
71
- template <typename T>
72
- struct is_operator_plus_function_object_impl<std::plus<T> > : true_type {};
73
-
74
- } // namespace detail
75
-
76
- } // end namespace thrust
77
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/transfiner/configs/new_baselines/mask_rcnn_R_50_FPN_400ep_LSJ.py DELETED
@@ -1,14 +0,0 @@
1
- from .mask_rcnn_R_50_FPN_100ep_LSJ import (
2
- dataloader,
3
- lr_multiplier,
4
- model,
5
- optimizer,
6
- train,
7
- )
8
-
9
- train.max_iter *= 4 # 100ep -> 400ep
10
-
11
- lr_multiplier.scheduler.milestones = [
12
- milestone * 4 for milestone in lr_multiplier.scheduler.milestones
13
- ]
14
- lr_multiplier.scheduler.num_updates = train.max_iter
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cherrycreamco/webui/README.md DELETED
@@ -1,20 +0,0 @@
1
- ---
2
- title: Stable Diffusion Web UI
3
- emoji: 🧿
4
- colorFrom: blue
5
- colorTo: blue
6
- sdk: gradio
7
- sdk_version: 3.9
8
- app_file: app.py
9
- pinned: false
10
- duplicated_from: camenduru/webui
11
- ---
12
-
13
- ## Stable Diffusion Web UI
14
- [https://github.com/AUTOMATIC1111/stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui)
15
-
16
- ## Documentation
17
- [https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki)
18
-
19
- ## Models License
20
- https://huggingface.co/spaces/CompVis/stable-diffusion-license
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChristopherMarais/Andrew_AI-BB_classification-beta/mysite/andrew_alpha/tests.py DELETED
@@ -1,3 +0,0 @@
1
- from django.test import TestCase
2
-
3
- # Create your tests here.