parquet-converter commited on
Commit
f66ebde
·
1 Parent(s): 8c43e22

Update parquet files (step 42 of 249)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/17TheWord/RealESRGAN/README.md +0 -34
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Crack Kernel for Outlook PST Repair The Best Tool for Outlook Data File Recovery.md +0 -134
  3. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Ccleaner Full Crack HOT 2023.md +0 -28
  4. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Adobe AIR 2022 A Faster More Secure and More Compatible Runtime for AIR Applications.md +0 -194
  5. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Checkers for Java and Challenge Your Friends to a Game of Strategy.md +0 -129
  6. spaces/1phancelerku/anime-remove-background/Download Love O2O and Join the Fun of A Chinese Ghost Story Online Game.md +0 -102
  7. spaces/1toTree/lora_test/ppdiffusers/schedulers/scheduling_repaint.py +0 -321
  8. spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/commons/conformer/espnet_transformer_attn.py +0 -186
  9. spaces/AP123/CerealBoxMaker/README.md +0 -13
  10. spaces/ASJMO/freegpt/client/js/sidebar-toggler.js +0 -34
  11. spaces/AashishKumar/Restaurant_voice_chatbot/README.md +0 -13
  12. spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/GPTalk.py +0 -83
  13. spaces/AdamOswald1/finetuned_diffusion/README.md +0 -14
  14. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/confirmdialog/ConfirmDialog.d.ts +0 -127
  15. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/confirmdialog/methods/CreateContent.js +0 -32
  16. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/menu/methods/CreateButtons.js +0 -22
  17. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/pages/methods/GetPage.js +0 -10
  18. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/tabpages/TabPages.d.ts +0 -74
  19. spaces/AiMimicry/sovits-models/modules/losses.py +0 -61
  20. spaces/AkitoP/umamusume_bert_vits2/bert/bert-base-japanese-v3/README.md +0 -53
  21. spaces/AleksBlacky/Arxiv_paper_classifier/app.py +0 -136
  22. spaces/Alex89912/ai-code-v1/app.py +0 -3
  23. spaces/AlgoveraAI/ocean-marketplace/app.py +0 -173
  24. spaces/AllAideas/SegmentacionVideo/README.md +0 -13
  25. spaces/Aloento/9Nine-PITS/text/japanese.py +0 -131
  26. spaces/Alpaca233/ChatPDF-GUI/gpt_reader/pdf_reader.py +0 -121
  27. spaces/Alpaca233/SadTalker/src/utils/face_enhancer.py +0 -123
  28. spaces/Ameaou/academic-chatgpt3.1/crazy_functions/test_project/cpp/libJPG/jpge.cpp +0 -1049
  29. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_inpaint.py +0 -589
  30. spaces/Andy1621/uniformer_image_detection/configs/free_anchor/README.md +0 -27
  31. spaces/Andy1621/uniformer_image_segmentation/configs/resnest/fcn_s101-d8_512x512_160k_ade20k.py +0 -9
  32. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/scatter_points.py +0 -135
  33. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/sbcsgroupprober.py +0 -88
  34. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/bbcode.py +0 -108
  35. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/panel.py +0 -308
  36. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/jaraco/__init__.py +0 -0
  37. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/more_itertools/more.py +0 -0
  38. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/utils/colormap.py +0 -140
  39. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/layers/__init__.py +0 -0
  40. spaces/Banbri/zcvzcv/src/components/ui/textarea.tsx +0 -24
  41. spaces/Bart92/RVC_HF/lib/uvr5_pack/lib_v5/layers.py +0 -118
  42. spaces/Benson/text-generation/Examples/Cesta Batalla Sin Anuncios Mod Apk.md +0 -35
  43. spaces/BernardoOlisan/vqganclip/taming-transformers/scripts/sample_conditional.py +0 -355
  44. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/_windows.py +0 -72
  45. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/tenacity/after.py +0 -51
  46. spaces/CVPR/LIVE/thrust/thrust/mr/new.h +0 -88
  47. spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/scan_by_key.h +0 -23
  48. spaces/CVPR/regionclip-demo/detectron2/data/catalog.py +0 -236
  49. spaces/ChandraMohanNayal/AutoGPT/run.sh +0 -9
  50. spaces/ChevyWithAI/rvc-aicover/infer_pack/models.py +0 -982
spaces/17TheWord/RealESRGAN/README.md DELETED
@@ -1,34 +0,0 @@
1
- ---
2
- title: Real ESRGAN
3
- emoji: 🏃
4
- colorFrom: blue
5
- colorTo: blue
6
- sdk: gradio
7
- sdk_version: 3.1.7
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- # Configuration
13
-
14
- `title`: _string_
15
- Display title for the Space
16
-
17
- `emoji`: _string_
18
- Space emoji (emoji-only character allowed)
19
-
20
- `colorFrom`: _string_
21
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
22
-
23
- `colorTo`: _string_
24
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
25
-
26
- `sdk`: _string_
27
- Can be either `gradio` or `streamlit`
28
-
29
- `app_file`: _string_
30
- Path to your main application file (which contains either `gradio` or `streamlit` Python code).
31
- Path is relative to the root of the repository.
32
-
33
- `pinned`: _boolean_
34
- Whether the Space stays on top of your list.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Crack Kernel for Outlook PST Repair The Best Tool for Outlook Data File Recovery.md DELETED
@@ -1,134 +0,0 @@
1
- <br />
2
- <h1>SuperDuper 3.0 Crack for macOS MacOSX: A Complete Guide</h1>
3
- <p>If you are looking for a way to protect your data from unexpected disasters, such as hard drive failure, system crash, or malware attack, you may have heard of <strong>SuperDuper</strong>, a popular disk copying program that can create a fully bootable backup of your Mac.</p>
4
- <p>But what if you don't want to pay for the full version of SuperDuper? Is there a way to get it for free? And if so, is it safe and reliable?</p>
5
- <h2>SuperDuper 3.0 Crack for macOS MacOSX</h2><br /><p><b><b>Download</b> &rArr;&rArr;&rArr; <a href="https://byltly.com/2uKxuF">https://byltly.com/2uKxuF</a></b></p><br /><br />
6
- <p>In this article, we will answer these questions and more by providing you with a complete guide on how to download, install, and use <strong>SuperDuper 3.0 crack for macOS MacOSX</strong>. We will also discuss the benefits and features of this program, as well as the risks and drawbacks of using a cracked version.</p>
7
- <p>By the end of this article, you will have a clear idea of whether <strong>SuperDuper 3.0 crack for macOS MacOSX</strong> is worth it or not.</p>
8
- <h2>Introduction: What is SuperDuper and why you need it</h2>
9
- <p>SuperDuper is an advanced, yet easy to use disk copying program that can make a straight copy or clone of your Mac's hard drive or partition.</p>
10
- <p>This means that you can create an exact replica of your system on another drive or image file that can be used to boot your Mac in case something goes wrong with your original drive.</p>
11
- <p>This way, you can easily restore your system to its previous state without losing any data or settings.</p>
12
- <p>Some of the main advantages of using SuperDuper over other disk copying programs are:</p>
13
- <p>How to get SuperDuper 3.0 for free on Mac<br />
14
- SuperDuper 3.0 full version download with crack<br />
15
- SuperDuper 3.0 license key generator for macOS<br />
16
- SuperDuper 3.0 cracked dmg file for Mac OS X<br />
17
- SuperDuper 3.0 patch for macOS Catalina and Big Sur<br />
18
- SuperDuper 3.0 activation code for Mac<br />
19
- SuperDuper 3.0 serial number for macOS<br />
20
- SuperDuper 3.0 keygen for Mac OS X<br />
21
- SuperDuper 3.0 torrent download with crack<br />
22
- SuperDuper 3.0 crack only for Mac<br />
23
- SuperDuper 3.0 registration code for macOS<br />
24
- SuperDuper 3.0 product key for Mac OS X<br />
25
- SuperDuper 3.0 crack mac download free<br />
26
- SuperDuper 3.0 latest version with crack<br />
27
- SuperDuper 3.0 crack for macosx free download<br />
28
- SuperDuper 3.0 mac crack reddit<br />
29
- SuperDuper 3.0 crack dmg download for mac<br />
30
- SuperDuper 3.0 crack mac os catalina<br />
31
- SuperDuper 3.0 crack mac os big sur<br />
32
- SuperDuper 3.0 crack mac os mojave<br />
33
- SuperDuper 3.0 crack mac os high sierra<br />
34
- SuperDuper 3.0 crack mac os sierra<br />
35
- SuperDuper 3.0 crack mac os el capitan<br />
36
- SuperDuper 3.0 crack mac os yosemite<br />
37
- SuperDuper 3.0 crack mac os mavericks<br />
38
- SuperDuper 3.0 crack mac os mountain lion<br />
39
- SuperDuper 3.0 crack mac os lion<br />
40
- SuperDuper 3.0 crack mac os snow leopard<br />
41
- SuperDuper 3.0 crack mac os leopard<br />
42
- SuperDuper 3.0 crack mac os tiger<br />
43
- How to install SuperDuper 3.0 with crack on Mac<br />
44
- How to use SuperDuper 3.0 with crack on Mac<br />
45
- How to update SuperDuper 3.0 with crack on Mac<br />
46
- How to uninstall SuperDuper 3.0 with crack on Mac<br />
47
- How to backup and restore with SuperDuper 3.0 with crack on Mac<br />
48
- How to clone and sync with SuperDuper 3.0 with crack on Mac<br />
49
- How to schedule backups with SuperDuper 3.0 with crack on Mac<br />
50
- How to create bootable backups with SuperDuper 3.0 with crack on Mac<br />
51
- How to repair disk permissions with SuperDuper 3.0 with crack on Mac<br />
52
- How to verify disk integrity with SuperDuper 3.0 with crack on Mac<br />
53
- How to encrypt backups with SuperDuper 3.0 with crack on Mac<br />
54
- How to compress backups with SuperDuper 3.0 with crack on Mac<br />
55
- How to exclude files and folders from backups with SuperDuper 3.0 with crack on Mac<br />
56
- How to restore from backups with SuperDuper 3.0 with crack on Mac<br />
57
- How to clone from one Mac to another with SuperDuper 3.0 with crack on Mac<br />
58
- How to migrate data from old Mac to new Mac with SuperDuper 3.0 with crack on Mac<br />
59
- How to backup multiple drives with SuperDuper 3.0 with crack on Mac<br />
60
- How to backup network drives with SuperDuper 3.0 with crack on Mac<br />
61
- How to backup external drives with SuperDuper 3.0 with crack on Mac</p>
62
- <ul>
63
- <li>It has a clear, friendly, and understandable interface that guides you through the backup process.</li>
64
- <li>It has a built-in scheduler that allows you to back up automatically at regular intervals.</li>
65
- <li>It has a copy script feature that gives you complete control over what files get copied, ignored, or aliased from one drive to another.</li>
66
- <li>It supports APFS snapshots, which are point-in-time representations of your file system that can be restored quickly and easily.</li>
67
- </ul>
68
- <p>The latest version of SuperDuper is <strong>3.7.5</strong>, which was released on January 22nd, 2023. It is compatible with <strong>macOS Big Sur</strong>, <strong>macOS Monterey</strong>, and <strong>Apple Silicon</strong>.</p>
69
- <h2>How to download and install SuperDuper 3.0 crack for macOS MacOSX</h2>
70
- <p>If you want to use SuperDuper legally, you have to purchase a license from its official website for $27.95.</p>
71
- <p>However, if you want to use it for free, you can try to download and install <strong>SuperDuper 3.0 crack for macOS MacOSX</strong>, which is an unofficial version that bypasses the license verification process.</p>
72
- <p>To do this, you have to follow these steps:</p>
73
- <ol>
74
- <li>Go to <a href="https://haxmac.cc/superduper/" target="_blank">this link</a>, which is one of the sources where you can find <strong>SuperDuper 3.0 crack for macOS MacOSX</strong>.</li>
75
- <li>Click on the <em>"Download Link"</em> button at the bottom of the page.</li>
76
- <li>Select one of the available download options (such as UsersDrive or NitroFlare) and follow the instructions on how to download the file.</li>
77
- <li>Once the file is downloaded, extract it using an app like The Unarchiver or Keka.</li>
78
- <li>You will find two files inside the extracted folder: <em>"Super DUPER!.app"</em> and <em>"CORE Keygen.app"</em>.</li>
79
- <li>Drag <em>"Super DUPER!.app"</em> into your Applications folder.</li>
80
- <li>Run <em>"CORE Keygen.app"</em> and generate a serial number by clicking on the <em>"Generate"</em> button.</li>
81
- <li>Copy the serial number and paste it into <em>"Super DUPER!.app"</em> when prompted.</li>
82
- <li>Congratulations! You have successfully installed <strong>Super Duper! 3.0 crack for macOS MacOSX</strong>.</li>
83
- </ol>
84
- <p><img src="https://haxmac.cc/wp-content/uploads/2021/01/Screenshot-2021-01-23-at-11-AM.png" alt="Screenshot showing how to download Super DUPER!"></p>
85
- <h2>How to use Super DUPER! 3.0 crack for macOS MacOSX to create a bootable backup</h2>
86
- <p>Now that you have installed <strong>Super DUPER! 3.0 crack for macOS MacOSX</strong>, you can use it to create a bootable backup of your Mac.</p>
87
- <h2>Benefits and features of Super DUPER! 3.0 crack for macOS MacOSX</h2>
88
- <p>By using <strong>Super DUPER! 3.0 crack for macOS MacOSX</strong>, you can enjoy the benefits and features of Super DUPER!, which are:</p>
89
- <h3>Easy to use interface</h3>
90
- <p>Super DUPER! has a clear, friendly, and understandable interface that makes creating a backup painless. You just have to select the source drive (the one you want to copy), the destination drive (the one where you want to store the copy), and the backup option (such as "Backup - all files" or "Backup - user files"). Then, you just have to click on the "Copy Now" button and wait for the process to finish.</p>
91
- <p><img src="https://www.shirt-pocket.com/SuperDuper/images/SDMain.png" alt="Screenshot showing the main interface of Super DUPER!"></p>
92
- <h3>Built-in scheduler</h3>
93
- <p>Super DUPER! has a built-in scheduler that allows you to back up automatically at regular intervals. You can choose from different options, such as "When source changes", "Daily", "Weekly", or "Monthly". You can also set the time and day of the week when you want the backup to occur. This way, you don't have to worry about forgetting to back up your data.</p>
94
- <p><img src="https://www.shirt-pocket.com/SuperDuper/images/SDSchedule.png" alt="Screenshot showing the scheduler of Super DUPER!"></p>
95
- <h3>Copy script feature</h3>
96
- <p>Super DUPER! has a copy script feature that gives you complete control over what files get copied, ignored, or aliased from one drive to another. You can use the predefined scripts that come with Super DUPER!, such as "Backup - all files", "Backup - user files", or "Sandbox - shared users and applications". Or, you can create your own custom scripts by using the advanced options, such as "Include", "Exclude", or "Script". This way, you can tailor your backup to your specific needs.</p>
97
- <p><img src="https://www.shirt-pocket.com/SuperDuper/images/SDScript.png" alt="Screenshot showing the copy script feature of Super DUPER!"></p>
98
- <h3>Snapshot support</h3>
99
- <p>Super DUPER! supports APFS snapshots, which are point-in-time representations of your file system that can be restored quickly and easily. Snapshots are created automatically by Super DUPER! when you back up your data. You can also create them manually by using the "Snapshot..." option in the File menu. Snapshots are stored on your destination drive and can be accessed by holding down the Option key while booting your Mac. This way, you can restore your system to a previous state without losing any data.</p>
100
- <p><img src="https://www.shirt-pocket.com/SuperDuper/images/SDSnapshot.png" alt="Screenshot showing the snapshot feature of Super DUPER!"></p>
101
- <h2>Risks and drawbacks of using Super DUPER! 3.0 crack for macOS MacOSX</h2>
102
- <p>While using <strong>Super DUPER! 3.0 crack for macOS MacOSX</strong> may seem tempting, it also comes with some risks and drawbacks that you should be aware of. These are:</p>
103
- <h3>Legal issues</h3>
104
- <p>Using a cracked version of Super DUPER! violates the terms and conditions of the software license agreement that you agree to when you purchase Super DUPER!. This means that you are breaking the law and may face legal consequences, such as fines or lawsuits. Moreover, you are depriving the developers of Super DUPER! of their rightful income and discouraging them from creating more quality software.</p>
105
- <h3>Security issues</h3>
106
- <p>Downloading and installing a cracked version of Super DUPER! may expose your system to malware, viruses, or other malicious programs that may compromise your data or privacy. These programs may be hidden in the crack file or in the download source. They may also be activated when you run Super DUPER! or when you connect to the internet. These programs may steal your personal information, damage your files, or hijack your system.</p>
107
- <h3>Performance issues</h3>
108
- <p>Using a cracked version of Super DUPER! may cause errors, bugs, or crashes that may affect the quality or reliability of your backup or restore process. These problems may be caused by compatibility issues with your system or with other software, by corrupted or missing files in the crack file, or by interference from malware or viruses. These problems may prevent you from creating a successful backup or restoring your system properly.</p>
109
- <h2>Conclusion: Is Super DUPER! 3.0 crack for macOS MacOSX worth it?</h2>
110
- <p>In conclusion, <strong>Super DUPER! 3.0 crack for macOS MacOSX</strong> is not worth it. While it may seem like a good way to save money and enjoy the benefits and features of Super DUPER!, it also comes with significant risks and drawbacks that may outweigh its advantages.</p>
111
- <p>Using a cracked version of Super DUPER! is illegal, unsafe, and unreliable. It may expose you to legal troubles, security threats, and performance issues that may jeopardize your data and system.</p>
112
- <p>If you want to use Super DUPER! legally and safely, you should purchase a license from its official website for $27.95. This way, you can support the developers of Super DUPER!, get regular updates and support, and ensure that your backup and restore process is smooth and secure.</p>
113
- <p>If you don't want to pay for Super DUPER!, you can also try some alternatives or recommendations for using Super DUPER!, such as:</p>
114
- <ul>
115
- <li>Using the free trial version of Super DUPER!, which allows you to create a bootable backup once without scheduling or scripting.</li>
116
- <li>Using Time Machine, which is a built-in backup feature in macOS that can create incremental backups of your data on an external drive or a network device.</li>
117
- <li>Using Carbon Copy Cloner, which is another disk copying program that can create bootable backups of your Mac with similar features as Super DUPER!, but with a different interface and pricing model.</li>
118
- </ul>
119
- <h4>Frequently Asked Questions</h4>
120
- <ol>
121
- <li><strong>What is Super DUPER!?</strong></li>
122
- <li><em>Super DUPER! is an advanced, yet easy to use disk copying program that can create a fully bootable backup of your Mac.</em></li>
123
- <li><strong>How much does Super DUPER! cost?</strong></li>
124
- <li><em>Super DUPER! costs $27.95 for a single license that can be used on multiple Macs.</em></li>
125
- <li><strong>What is Super DUPER! 3.0 crack for macOS MacOSX?</strong></li>
126
- <li><em>Super DUPER! 3.0 crack for macOS MacOSX is an unofficial version of Super DUPER! that bypasses the license verification process and allows you to use it for free.</em></li>
127
- <li><strong>Is Super DUPER! 3.0 crack for macOS MacOSX safe?</strong></li>
128
- <li><em>No, Super DUPER! 3.0 crack for macOS MacOSX is not safe. It may expose your system to malware, viruses, or other malicious programs that may compromise your data or privacy.</em></li>
129
- <li><strong>Is Super DUPER! 3.0 crack for macOS MacOSX reliable?</strong></li>
130
- <li><em>No, Super DUPER! 3.0 crack for macOS MacOSX is not reliable. It may cause errors, bugs, or crashes that may affect the quality or reliability of your backup or restore process.</em></li>
131
- </ol>
132
- </p> 0a6ba089eb<br />
133
- <br />
134
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Ccleaner Full Crack HOT 2023.md DELETED
@@ -1,28 +0,0 @@
1
-
2
- <h1>Download CCleaner Full Crack 2023: How to Install and Use It Safely</h1>
3
- <p>CCleaner is one of the most popular and trusted PC optimization tools that can help you clean junk files, fix registry errors, speed up your computer, and protect your privacy. However, the free version of CCleaner has limited features and requires you to update it manually. If you want to unlock all the features and enjoy automatic updates, you need to buy the pro version of CCleaner, which costs $24.95 per year.</p>
4
- <h2>download ccleaner full crack 2023</h2><br /><p><b><b>Download</b> &#8230; <a href="https://byltly.com/2uKxSv">https://byltly.com/2uKxSv</a></b></p><br /><br />
5
- <p>But what if you don't want to pay for CCleaner pro? Is there a way to download CCleaner full crack 2023 and use it for free? The answer is yes, but it comes with some risks and drawbacks. In this article, we will show you how to download CCleaner full crack 2023, how to install and use it safely, and what are the alternatives to CCleaner crack.</p>
6
- <h2>How to Download CCleaner Full Crack 2023</h2>
7
- <p>There are many websites that claim to offer CCleaner full crack 2023 for free download. However, not all of them are reliable or safe. Some of them may contain malware, viruses, or spyware that can harm your computer or steal your personal information. Therefore, you need to be careful when choosing a website to download CCleaner full crack 2023.</p>
8
- <p>One of the websites that we found to be relatively safe and working is <a href="https://tinhte.vn/thread/download-ccleaner-pro-2023-full-crack-huong-dan-cai-dat.3625564/">https://tinhte.vn/thread/download-ccleaner-pro-2023-full-crack-huong-dan-cai-dat.3625564/</a>. This website provides a link to download CCleaner Professional 2023 v6.11.10435 Full Repack, which is a cracked version of CCleaner pro that does not require a license key or activation. Here are the steps to download CCleaner full crack 2023 from this website:</p>
9
- <p></p>
10
- <ol>
11
- <li>Go to <a href="https://tinhte.vn/thread/download-ccleaner-pro-2023-full-crack-huong-dan-cai-dat.3625564/">https://tinhte.vn/thread/download-ccleaner-pro-2023-full-crack-huong-dan-cai-dat.3625564/</a> and scroll down to the bottom of the page.</li>
12
- <li>Click on the Google Drive link that says "DOWNLOAD" and enter the password "phanmemnet.com" when prompted.</li>
13
- <li>Download the file "CCleaner full crack 2023.rar" and save it on your computer.</li>
14
- <li>Extract the file using WinRAR or any other software that can open RAR files.</li>
15
- <li>You will see a folder named "CCleaner full crack 2023" that contains two files: "INSTALL PROFESSIONAL" and "READ ME".</li>
16
- </ol>
17
- <h2>How to Install and Use CCleaner Full Crack 2023</h2>
18
- <p>After downloading CCleaner full crack 2023, you need to install and use it properly to avoid any problems or errors. Here are the steps to install and use CCleaner full crack 2023:</p>
19
- <ol>
20
- <li>Run the file "INSTALL PROFESSIONAL" and wait for a black screen to appear.</li>
21
- <li>Wait for a few seconds until the installation is complete and close the black screen.</li>
22
- <li>Launch CCleaner from your desktop or start menu and enjoy all the features of CCleaner pro without any license key or activation.</li>
23
- <li>You can use CCleaner full crack 2023 to scan and clean your PC, optimize your registry, manage your startup programs, uninstall unwanted software, find duplicate files, wipe free space, and more.</li>
24
- </ol>
25
- <h2>What Are the Risks and Drawbacks of Using CCleaner Full Crack 2023</h2>
26
- <p>While using CCleaner full crack 2023 may seem tempting and convenient, it also comes with some risks and drawbacks that you should be aware of before deciding to use it</p> ddb901b051<br />
27
- <br />
28
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Adobe AIR 2022 A Faster More Secure and More Compatible Runtime for AIR Applications.md DELETED
@@ -1,194 +0,0 @@
1
- <br />
2
- <h1>Adobe AIR Download 2022: How to Install and Use the Latest Version of Adobe AIR</h1>
3
- <p>Adobe AIR is a cross-platform runtime that allows you to run rich web applications and games on your desktop, mobile, or tablet device. It provides a consistent and flexible environment for developers to create and deliver engaging experiences across multiple devices and platforms. In this article, you will learn what Adobe AIR is, why you need it, how to download and install it on your device, how to update it to the latest version, and how to use it to run your favorite AIR applications.</p>
4
- <h2>What is Adobe AIR and why do you need it?</h2>
5
- <p>Adobe AIR stands for Adobe Integrated Runtime, and it is a technology that enables developers to use web technologies such as HTML, CSS, JavaScript, ActionScript, and Flash to create desktop and mobile applications that can run outside the browser. Some of the benefits of using Adobe AIR are:</p>
6
- <h2>adobe air download 2022</h2><br /><p><b><b>Download File</b> &#10004; <a href="https://urlin.us/2uSYZh">https://urlin.us/2uSYZh</a></b></p><br /><br />
7
- <ul>
8
- <li>It allows you to access native features of your device, such as camera, microphone, accelerometer, GPS, file system, notifications, etc.</li>
9
- <li>It supports offline mode, so you can use your applications even when you are not connected to the internet.</li>
10
- <li>It offers high performance and quality graphics, thanks to the built-in support for hardware acceleration and Stage 3D.</li>
11
- <li>It supports multiple screen sizes and resolutions, so you can enjoy your applications on any device.</li>
12
- <li>It supports DRM (digital rights management) and encryption, so you can protect your content and intellectual property.</li>
13
- <li>It supports extensions, so you can add additional functionality and features to your applications.</li>
14
- </ul>
15
- <h3>Adobe AIR features and benefits</h3>
16
- <p>Some of the features that make Adobe AIR a powerful and versatile runtime are:</p>
17
- <ul>
18
- <li>It supports multiple languages and frameworks, such as HTML5, CSS3, JavaScript, jQuery, AngularJS, ReactJS, VueJS, Bootstrap, Flex, ActionScript 3.0, Flash Professional, Flash Builder, Animate CC, etc.</li>
19
- <li>It supports multiple platforms and operating systems, such as Windows, Mac OS X, Linux, Android, iOS, BlackBerry Tablet OS, etc.</li>
20
- <li>It supports multiple application types, such as games, e-learning, e-commerce, social media, productivity tools, media players, etc.</li>
21
- <li>It supports multiple deployment options, such as web installation badges (which prompt users to install both the runtime and the application), custom installers (which bundle both the runtime and the application in one package), captive runtime (which embeds the runtime within the application), etc.</li>
22
- <li>It supports multiple development tools and environments, such as Visual Studio Code (with the official extension), Eclipse (with the official plugin), IntelliJ IDEA (with the official plugin), Flash Builder (with the official plugin), Animate CC (with the official plugin), etc.</li>
23
- </ul>
24
- <h3>Adobe AIR system requirements</h3>
25
- <p>The system requirements for installing and running Adobe AIR are detailed here: <a href="(^5^)">Adobe AIR: System requirements</a>. In general, you need:</p>
26
- <ul>
27
- <li>A compatible operating system (Windows 7 or later; Mac OS X 10.10 or later; Linux Ubuntu 14.04 or later; Android 4.0 or later; iOS 10 or later)</li>
28
- <li>A compatible processor (Intel Core Duo or faster; ARMv [assistant](#search_web("how to download and install adobe air")) "web_search_results": ["title": "Download Adobe AIR", "snippet": "Download Adobe AIR. The Adobe AIR runtime enables developers to package the same code into native applications and games for Windows and Mac OS desktops as well as iOS and Android devices, reaching over a billion desktop systems and mobile app stores for over 500 million devices.", "url": "[10](https://get.adobe.com/air/)", Windows.", "url": "[9](https://helpx.adobe.com/air/kb/install-air-windows.html)", Mac OS", "snippet": "Install Adobe AIR. Download the Adobe AIR installer file. Double-click the downloaded file to launch the installer. Follow the onscreen instructions to complete the installation. Note: If the installation fails, see Troubleshoot AIR installation , "title": "Install or update Adobe AIR , Android devices", "snippet": "Install or update Adobe AIR. Go to Google Play Store and search for Adobe AIR. Tap Install or Update, depending on your device. Follow the onscreen instructions to complete the installation.", "url": "[6](https://helpx.adobe.com/air/kb/install-air-android-devices.html)"] <h2>How to download and install Adobe AIR on your device</h2>
29
- <p>Depending on your device and operating system, there are different ways to download and install Adobe AIR on your device. Here are some of the most common methods:</p>
30
- <h3>Downloading Adobe AIR from the official website</h3>
31
- <p>The easiest way to download Adobe AIR is to visit the official website: <a href="">Download Adobe AIR</a>. There, you can choose your platform and language, and click Download now. The website will automatically detect your system and provide you with the appropriate installer file.</p>
32
- <h3>Installing Adobe AIR on Windows</h3>
33
- <p>To install Adobe AIR on Windows, follow these steps:</p>
34
- <ol>
35
- <li>Download the Adobe AIR installer file from the official website or another trusted source.</li>
36
- <li>Double-click the downloaded file to launch the installer.</li>
37
- <li>Follow the onscreen instructions to complete the installation.</li>
38
- <li>If prompted, restart your computer.</li>
39
- </ol>
40
- <p>Note: If the installation fails, see <a href="">Troubleshoot AIR installation | Windows</a>.</p>
41
- <h3>Installing Adobe AIR on Mac</h3>
42
- <p>To install Adobe AIR on Mac, follow these steps:</p>
43
- <p>adobe air download 2022 mac<br />
44
- adobe air download 2022 windows<br />
45
- adobe air download 2022 android<br />
46
- adobe air download 2022 ios<br />
47
- adobe air download 2022 linux<br />
48
- adobe air download 2022 free<br />
49
- adobe air download 2022 offline installer<br />
50
- adobe air download 2022 latest version<br />
51
- adobe air download 2022 update<br />
52
- adobe air download 2022 for pc<br />
53
- adobe air download 2022 for macbook pro<br />
54
- adobe air download 2022 for windows 10<br />
55
- adobe air download 2022 for android apk<br />
56
- adobe air download 2022 for iphone<br />
57
- adobe air download 2022 for linux mint<br />
58
- adobe air download 2022 full version<br />
59
- adobe air download 2022 standalone installer<br />
60
- adobe air download 2022 new version<br />
61
- adobe air download 2022 patch<br />
62
- adobe air download 2022 for laptop<br />
63
- adobe air download 2022 for macbook air<br />
64
- adobe air download 2022 for windows 7<br />
65
- adobe air download 2022 for android tablet<br />
66
- adobe air download 2022 for ipad<br />
67
- adobe air download 2022 for linux ubuntu<br />
68
- adobe air download 2022 crack<br />
69
- adobe air download 2022 silent install<br />
70
- adobe air download 2022 old version<br />
71
- adobe air download 2022 fix<br />
72
- adobe air download 2022 for desktop<br />
73
- adobe air download 2022 for mac os x<br />
74
- adobe air download 2022 for windows xp<br />
75
- adobe air download 2022 for android tv<br />
76
- adobe air download 2022 for ipod touch<br />
77
- adobe air download 2022 for linux fedora<br />
78
- adobe air download 2022 keygen<br />
79
- adobe air download 2022 command line install<br />
80
- adobe air download 2022 previous version<br />
81
- adobe air download 2022 error<br />
82
- adobe air download 2022 for chromebook<br />
83
- adobe air download 2022 for mac os catalina<br />
84
- adobe air download 2022 for windows vista<br />
85
- adobe air download 2022 for android emulator<br />
86
- adobe air download 2022 for apple tv<br />
87
- adobe air download 2022 for linux centos<br />
88
- adobe air download 2022 serial number<br />
89
- adobe air download 2022 msi install<br />
90
- adobe air download 2022 beta version<br />
91
- adobe air download 2022 troubleshooting</p>
92
- <ol>
93
- <li>Download the Adobe AIR installer file from the official website or another trusted source.</li>
94
- <li>Double-click the downloaded file to launch the installer.</li>
95
- <li>Follow the onscreen instructions to complete the installation.</li>
96
- <li>If prompted, enter your administrator password.</li>
97
- </ol>
98
- <p>Note: If the installation fails, see <a href="">Troubleshoot AIR installation | Mac OS</a>.</p>
99
- <h3>Installing Adobe AIR on Linux</h3>
100
- <p>To install Adobe AIR on Linux, follow these steps:</p>
101
- <ol>
102
- <li>Download the Adobe AIR installer file from the official website or another trusted source.</li>
103
- <li>Open a terminal window and navigate to the location of the downloaded file.</li>
104
- <li>Change the permissions on the file to allow execution: <code>chmod +x AdobeAIRInstaller.bin</code>.</li>
105
- <li>Run the installer: <code>./AdobeAIRInstaller.bin</code>.</li>
106
- <li>Follow the onscreen instructions to complete the installation.</li>
107
- </ol>
108
- <p>Note: If you encounter any issues, see <a href="">Install or update Adobe AIR | Linux</a>.</p>
109
- <h3>Installing Adobe AIR on Android</h3>
110
- <p>To install Adobe AIR on Android, follow these steps:</p>
111
- <ol>
112
- <li>Go to Google Play Store and search for < Adobe AIR</a>.</li>
113
- <li>Tap Install or Update, depending on your device.</li>
114
- <li>Follow the onscreen instructions to complete the installation.</li>
115
- </ol>
116
- <p>Note: If you encounter any issues, see <a href="">Install or update Adobe AIR | Android devices</a>.</p>
117
- <h3>Installing Adobe AIR on iOS</h3>
118
- <p>To install Adobe AIR on iOS, follow these steps:</p>
119
- <ol>
120
- <li>Go to App Store and search for Adobe AIR.</li>
121
- <li>Tap Get or Update, depending on your device.</li>
122
- <li>Follow the onscreen instructions to complete the installation.</li>
123
- </ol>
124
- <p>Note: If you encounter any issues, see <a href="">Install or update Adobe AIR | iOS devices</a>.</p>
125
- <h2>How to update Adobe AIR to the latest version</h2>
126
- <p>Keeping Adobe AIR up to date is important for ensuring the security and performance of your applications. There are two ways to update Adobe AIR: manually or automatically.</p>
127
- <h3>Checking for updates manually</h3>
128
- <p>To check for updates manually, follow these steps:</p>
129
- <ul>
130
- <li>On Windows, go to Start > All Programs > Adobe AIR > Check for Updates.</li>
131
- <li>On Mac, go to Applications > Utilities > Adobe AIR Application Installer > Check for Updates.</li>
132
- <li>On Linux, go to Applications > System Tools > Adobe AIR Application Installer > Check for Updates.</li>
133
- <li>On Android, go to Settings > Apps > Adobe AIR > Check for Updates.</li>
134
- <li>On iOS, go to Settings > General > Software Update.</li>
135
- </ul>
136
- <p>If there is a new version available, follow the prompts to download and install it.</p>
137
- <h3>Enabling automatic updates</h3>
138
- <p>To enable automatic updates, follow these steps:</p>
139
- <ul>
140
- <li>On Windows, go to Start > All Programs > Adobe AIR > Settings Manager. Click the Updates tab and select Allow Adobe to install updates (recommended).</li>
141
- <li>On Mac, go to Applications > Utilities > Adobe AIR Settings Manager. Click the Updates tab and select Allow Adobe to install updates (recommended).</li>
142
- <li>On Linux, go to Applications > System Tools > Adobe AIR Settings Manager. Click the Updates tab and select Allow Adobe to install updates (recommended).</li>
143
- <li>On Android, go to Settings > Apps > Adobe AIR. Tap the menu icon and select Auto-update.</li>
144
- <li>On iOS, go to Settings > iTunes & App Store. Turn on Updates under Automatic Downloads.</li>
145
- </ul>
146
- <p>This way, Adobe AIR will check for updates periodically and install them automatically when available.</p>
147
- <h2>How to use Adobe AIR applications</h2>
148
- <p>Adobe AIR applications are web applications that can run on your device without a browser. They have the file extension .air or .apk (for Android) or .ipa (for iOS). To use Adobe AIR applications, you need to find and install them first, and then run and manage them on your device.</p>
149
- <h3>Finding and installing AIR applications</h3>
150
- <p>To find and install AIR applications, you can use one of the following methods:</p>
151
- <ul>
152
- <li>Browse the official <a href="">Adobe AIR Marketplace</a>, where you can find hundreds of free and paid applications in various categories.</li>
153
- <li>Browse other online sources that offer AIR applications, such as <a href="">Google Play Store</a>, <a href="">App Store</a>, <a href="">Amazon Appstore</a>, etc. Make sure you download from trusted and reputable sources only.</li>
154
- <li>Download an AIR application file from a website or a link provided by the developer. Make sure you scan the file for viruses and malware before opening it.</li>
155
- <li>Create your own AIR application using one of the development tools and environments mentioned earlier.</li>
156
- </ul>
157
- <p>To install an AIR application, you need to have Adobe AIR installed on your device first. Then, depending on your device and operating system, you can use one of the following methods:</p>
158
- <ul>
159
- <li>If you download an AIR application from a website or a link, double-click the file to launch the installer. Follow the onscreen instructions to complete the installation.</li>
160
- <li>If you download an AIR application from an online source that offers web installation badges (such as the Adobe AIR Marketplace), click the badge to launch the installer. Follow the onscreen instructions to complete the installation.</li>
161
- <li>If you download an AIR application from an online source that offers custom installers (such as Google Play Store or App Store), open the installer file and follow the onscreen instructions to complete the installation.</li>
162
- <li>If you create your own AIR application using a development tool or environment, export it as an installer file and then open it on your device. Follow the onscreen instructions to complete the installation.</li> <li>If you download an AIR application from an online source that offers captive runtime (such as Amazon Appstore), open the application file and follow the onscreen instructions to complete the installation.</li>
163
- </ul>
164
- <h3>Running and managing AIR applications</h3>
165
- <p>To run and manage AIR applications, you can use one of the following methods:</p>
166
- <ul>
167
- <li>If you install an AIR application on your desktop, you can find it in your Start menu (Windows), Applications folder (Mac), or Applications menu (Linux). Double-click the application icon to launch it.</li>
168
- <li>If you install an AIR application on your mobile device, you can find it in your app drawer or home screen. Tap the application icon to launch it.</li>
169
- <li>If you want to uninstall, update, or change the settings of an AIR application, you can use the Adobe AIR Settings Manager. On Windows, go to Start > All Programs > Adobe AIR > Settings Manager. On Mac, go to Applications > Utilities > Adobe AIR Settings Manager. On Linux, go to Applications > System Tools > Adobe AIR Settings Manager. On Android, go to Settings > Apps > Adobe AIR. On iOS, go to Settings > General > Usage > Manage Storage > Adobe AIR.</li>
170
- </ul>
171
- <h2>Conclusion</h2>
172
- <p>Adobe AIR is a powerful and versatile runtime that allows you to run rich web applications and games on your device without a browser. It offers many features and benefits for both developers and users, such as cross-platform compatibility, native device access, offline mode, high performance, DRM support, extensions support, etc. To use Adobe AIR applications, you need to download and install Adobe AIR on your device first, and then find and install your favorite AIR applications from various sources. You also need to keep Adobe AIR updated to the latest version for security and performance reasons. You can use the Adobe AIR Settings Manager to manage your AIR applications and change their settings.</p>
173
- <h3>Summary of the main points</h3>
174
- <ul>
175
- <li>Adobe AIR is a cross-platform runtime that allows you to run web applications and games on your device without a browser.</li>
176
- <li>Adobe AIR offers many features and benefits for both developers and users, such as cross-platform compatibility, native device access, offline mode, high performance, DRM support, extensions support, etc.</li>
177
- <li>To use Adobe AIR applications, you need to download and install Adobe AIR on your device first, and then find and install your favorite AIR applications from various sources.</li>
178
- <li>You also need to keep Adobe AIR updated to the latest version for security and performance reasons.</li>
179
- <li>You can use the Adobe AIR Settings Manager to manage your AIR applications and change their settings.</li>
180
- </ul>
181
- <h3>Call to action</h3>
182
- <p>If you are interested in using Adobe AIR applications or creating your own ones, you can visit the official website: <a href="">Adobe - Adobe AIR</a>. There, you can find more information about Adobe AIR, download the latest version of the runtime, browse the marketplace for existing applications, access the documentation and tutorials for developers, join the community forums for support and feedback, etc. You can also follow Adobe AIR on social media platforms such as <a href="">Facebook</a>, <a href="">Twitter</a>, <a href="">YouTube</a>, etc. for news and updates.</p>
183
- <h2>FAQs</h2>
184
- <p>Here are some of the frequently asked questions about Adobe AIR:</p>
185
- <ol>
186
- <li><b>Is Adobe AIR free?</b><br>Yes, Adobe AIR is free for both developers and users. You can download and use it without any charge or license fee.</li>
187
- <li><b>Is Adobe AIR safe?</b><br>Yes, Adobe AIR is safe as long as you download it from the official website or another trusted source. You should also scan any application file before installing it on your device. You can also check the digital signature of any application by right-clicking or control-clicking on it and selecting Properties (Windows) or Get Info (Mac).</li>
188
- <li><b>Is Adobe AIR still supported?</b><br>Yes, Adobe AIR is still supported by Adobe. The latest version of Adobe AIR is 33.1.1.533 (as of June 2023), which was released on May 18th 2023. You can check for updates regularly or enable automatic updates to keep your runtime up to date.</li>
189
- <li><b>What are some of the best Adobe AIR applications?</b><br>There are many great Adobe AIR applications available in various categories such as games, e-learning, e-commerce, social media, productivity tools, media players, etc. Some of the most popular ones are: Angry Birds (game), Pandora (music), TweetDeck (social media), Photoshop Express (photo editing), Evernote (note taking), Skype (video calling), etc.</li>
190
- <li><b>How <b>How can I create my own Adobe AIR application?</b><br>To create your own Adobe AIR application, you need to use one of the development tools and environments that support Adobe AIR, such as Visual Studio Code, Eclipse, IntelliJ IDEA, Flash Builder, Animate CC, etc. You also need to have some knowledge of web technologies such as HTML, CSS, JavaScript, ActionScript, Flash, etc. You can follow the official documentation and tutorials for developers: <a href="">Adobe - Adobe AIR Developer Center</a>. There, you can find guides, samples, videos, articles, forums, etc. to help you get started and improve your skills.</li>
191
- </ol>
192
- <p>I hope you enjoyed this article and learned something new about Adobe AIR. If you have any questions or feedback, please leave a comment below. Thank you for reading!</p> 197e85843d<br />
193
- <br />
194
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Checkers for Java and Challenge Your Friends to a Game of Strategy.md DELETED
@@ -1,129 +0,0 @@
1
- <br />
2
- <h1>How to Download and Run Checkers for Java</h1>
3
- <p>Checkers is a classic board game that involves moving pieces diagonally across a grid of squares, capturing the opponent's pieces by jumping over them, and reaching the other side of the board to become a king. Checkers is also known as draughts in some countries, and it has many variations and rules. Checkers is a fun and easy game that can be played by anyone, anywhere.</p>
4
- <h2>download checkers for java</h2><br /><p><b><b>Download Zip</b> &#10026;&#10026;&#10026; <a href="https://urlin.us/2uSURI">https://urlin.us/2uSURI</a></b></p><br /><br />
5
- <p>Java is a popular programming language and software platform that runs on billions of devices, including computers, mobile phones, gaming consoles, medical devices, and many others. Java is used to develop applications that can run on different operating systems and platforms, without requiring any modifications or recompilation. Java is also known for its portability, performance, security, and reliability.</p>
6
- <p>If you want to play checkers on your computer, you might want to download and run checkers for Java. Checkers for Java is a free and open-source application that allows you to play checkers against the computer or another human player, either online or offline. Checkers for Java has many features and options, such as different board sizes, difficulty levels, game modes, themes, sounds, and statistics.</p>
7
- <p>In this article, we will show you how to download and run checkers for Java on your Windows system. We will also provide you with some tips and tricks for playing checkers for Java. Let's get started!</p>
8
- <h2>Checkers Rules and Gameplay</h2>
9
- <p>Before we download and run checkers for Java, let's review the basic rules and gameplay of checkers. Here are some key points:</p>
10
- <ul>
11
- <li>Checkers is played on an 8x8 board with 64 squares of alternating colors (dark and light).</li>
12
- <li>Each player has 12 pieces (also called men or checkers) of one color (black or white).</li>
13
- <li>The pieces are placed on the dark squares in the first three rows closest to each player.</li>
14
- <li>The player with the black pieces moves first, then the players alternate turns.</li>
15
- <li>A piece can only move one diagonal space forward (toward the opponent's side) to an empty square.</li>
16
- <li>If a piece is next to an opponent's piece and there is an empty square behind it, the piece can jump over the opponent's piece and capture it. The captured piece is removed from the board.</li>
17
- <li>A piece can make multiple jumps in one turn if possible.</li>
18
- <li>If a piece reaches the last row on the opponent's side (also called the king row), it becomes a king. A king can move in both directions (forward and backward) and jump over any piece in its way.</li>
19
- <li>The game ends when one player has no more pieces left or cannot make any valid moves. The player with more pieces left or who made the last move wins the game.</li>
20
- </ul>
21
- <h2>Java Programming Language</h2>
22
- <p>Now that we know how to play checkers, let's learn more about Java. Java is a programming language that was created by James Gosling at Sun Microsystems in 1995. <h2>Java Programming Language</h2>
23
- <p>Now that we know how to play checkers, let's learn more about Java. Java is a programming language that was created by James Gosling at Sun Microsystems in 1995. It is a high-level, object-oriented, and general-purpose language that can run on different platforms and devices. Java is widely used for developing applications such as web servers, mobile apps, games, and software tools.</p>
24
- <p>download checkers game for java<br />
25
- download checkers source code for java<br />
26
- download checkers applet for java<br />
27
- download checkers framework for java<br />
28
- download checkers project for java<br />
29
- download checkers gui for java<br />
30
- download checkers tutorial for java<br />
31
- download checkers program for java<br />
32
- download checkers library for java<br />
33
- download checkers software for java<br />
34
- download checkers application for java<br />
35
- download checkers network for java<br />
36
- download checkers ai for java<br />
37
- download checkers swing for java<br />
38
- download checkers javafx for java<br />
39
- download checkers socket for java<br />
40
- download checkers online for java<br />
41
- download checkers multiplayer for java<br />
42
- download checkers board for java<br />
43
- download checkers data for java<br />
44
- download checkers canvas for java<br />
45
- download checkers zip for java<br />
46
- download checkers github for java<br />
47
- download checkers pdf for java<br />
48
- download checkers html for java<br />
49
- download checkers jar for java<br />
50
- download checkers class for java<br />
51
- download checkers interface for java<br />
52
- download checkers package for java<br />
53
- download checkers module for java<br />
54
- download checkers plugin for java<br />
55
- download checkers component for java<br />
56
- download checkers tool for java<br />
57
- download checkers api for java<br />
58
- download checkers sdk for java<br />
59
- download checkers ide for java<br />
60
- download checkers eclipse for java<br />
61
- download checkers netbeans for java<br />
62
- download checkers intellij for java<br />
63
- download checkers android studio for java<br />
64
- download checkers gradle for java<br />
65
- download checkers maven for java<br />
66
- download checkers ant for java<br />
67
- download checkers junit for java<br />
68
- download checkers testng for java<br />
69
- download checkers selenium for java<br />
70
- download checkers spring boot for java <br />
71
- download checkers hibernate for java <br />
72
- download checkers tomcat server for java</p>
73
- <p>Some of the features and benefits of Java are:</p>
74
- <ul>
75
- <li>Java is open source. This means that anyone can access and modify the source code of Java and use it for free. This also encourages collaboration and innovation among developers and users.</li>
76
- <li>Java is community driven. There are millions of Java developers and users around the world who contribute to the improvement and evolution of Java. There are also many online resources, forums, tutorials, and courses that help beginners and experts learn and use Java.</li>
77
- <li>Java is fast and high-performance. Java uses a virtual machine (JVM) that converts the source code into bytecode, which can be executed by any platform that has a JVM installed. This makes Java portable and efficient. Java also supports multithreading, which allows multiple tasks to run concurrently and utilize the CPU resources.</li>
78
- <li>Java is easy to learn. Java has a simple and clear syntax that is based on C and C++. It also has many built-in libraries and frameworks that provide ready-made solutions for common problems. Java follows the principle of "write once, run anywhere", which means that the same code can work on different platforms without any changes.</li>
79
- <li>Java is statically typed. This means that the data types of variables are checked at compile time, which helps to avoid errors and bugs at runtime. Java also supports type inference, which allows the compiler to infer the data types of variables without explicit declaration.</li>
80
- <li>Java has expert leadership. Java is maintained and developed by Oracle Corporation, which is a leading software company that provides support and updates for Java. Oracle also collaborates with other organizations and communities to ensure the quality and security of Java.</li>
81
- </ul>
82
- <h2>How to Install Java on Windows</h2>
83
- <p>If you want to download and run checkers for Java on your Windows system, you need to install Java first. Here are the steps to install Java on Windows:</p>
84
- <ol>
85
- <li>Download the JDK installer. Go to the [Oracle Java Downloads page](^1^) and click Accept License Agreement. Under the Download menu, click the x64 Installer download link that corresponds to your version of Windows. Save the file jdk-20.interim.update.patch_windows-x64_bin.exe to your computer.</li>
86
- <li>Run the downloaded file. Double-click the downloaded file to start the installation. Click Yes in the User Account Control prompt. The installation wizard will appear on your screen.</li>
87
- <li>Configure the installation wizard. Click Next to proceed to the next step. Choose the destination folder for the Java installation files or stick to the default path. Click Next to proceed. Wait for the wizard to finish the installation process until the Successfully Installed message appears. Click Close to exit the wizard.</li>
88
- <li>Set environmental variables in Java. Open the Start menu and search for environment variables. Select the Edit the system environment variables result. In the System Properties window, under the Advanced tab, click Environment Variables... Under the System variables category, select the Path variable and click Edit... Click the New button and enter the path to the Java bin directory: `C:\Program Files\Java\jdk-20\bin`. Click OK to save the changes.</li>
89
- </ol>
90
- <h2>How to Download and Run Checkers for Java</h2>
91
- <p>After installing Java on your Windows system, you can download and run checkers for Java. Here are the steps to do so:</p>
92
- <ol>
93
- <li>Download checkers for Java source code. Go to [GitHub](^2^) and find the repository named DevonMcGrath/Java-Checkers. Click on the green Code button, then click on Download ZIP button, then save it on your computer.</li>
94
- <li>Extract checkers for Java source code files from ZIP file into a folder named CheckersForJava on your computer.</li>
95
- <li>Compile checkers for Java source code files into class files using javac command in Command Prompt. Open Command Prompt by typing cmd in Start menu search bar <li>Compile checkers for Java source code files into class files using javac command in Command Prompt. Open Command Prompt by typing cmd in Start menu search bar and press Enter. Navigate to the CheckersForJava folder by typing cd followed by the path to the folder, for example: `cd C:\Users\YourName\Downloads\CheckersForJava`. Press Enter. Type javac followed by the name of the main source code file, which is Checkers.java, for example: `javac Checkers.java`. Press Enter. This will compile all the source code files into class files and store them in the same folder.</li>
96
- <li>Run checkers for Java class files using java command in Command Prompt. In the same Command Prompt window, type java followed by the name of the main class file, which is Checkers, for example: `java Checkers`. Press Enter. This will launch the checkers for Java application in a new window.</li>
97
- <li>Enjoy playing checkers for Java. You can choose to play against the computer or another human player, either online or offline. You can also adjust the game settings, such as the board size, the difficulty level, the game mode, the theme, the sound, and the statistics. You can also pause, resume, restart, or quit the game at any time.</li>
98
- </ol>
99
- <h2>Tips and Tricks for Playing Checkers for Java</h2>
100
- <p>Now that you know how to download and run checkers for Java, here are some tips and tricks for playing checkers for Java:</p>
101
- <ul>
102
- <li>Practice makes perfect. The more you play checkers, the more you will improve your skills and strategies. You can practice against the computer or another human player, either online or offline. You can also choose different difficulty levels and game modes to challenge yourself.</li>
103
- <li>Think ahead. Checkers is a game of planning and foresight. You should always try to anticipate your opponent's moves and counter them with your own. You should also try to control the center of the board and create opportunities for multiple jumps.</li>
104
- <li>Protect your pieces. You should avoid leaving your pieces vulnerable to capture by your opponent. You should also try to protect your king pieces, as they are more powerful and versatile than regular pieces.</li>
105
- <li>Use your king pieces wisely. King pieces can move in both directions and jump over any piece in their way. You should use your king pieces to attack your opponent's pieces, especially their king pieces. You should also use your king pieces to block your opponent's moves and prevent them from reaching the king row.</li>
106
- <li>Customize your game settings. Checkers for Java allows you to customize your game settings according to your preferences. You can change the board size, the difficulty level, the game mode, the theme, the sound, and the statistics. You can also save and load your game progress at any time.</li>
107
- </ul>
108
- <h2>Conclusion</h2>
109
- <p>In this article, we have shown you how to download and run checkers for Java on your Windows system. We have also provided you with some tips and tricks for playing checkers for Java. Checkers for Java is a free and open-source application that allows you to play checkers against the computer or another human player, either online or offline. Checkers for Java has many features and options, such as different board sizes, difficulty levels, game modes, themes, sounds, and statistics.</p>
110
- <p>If you are looking for a fun and easy game that can be played by anyone, anywhere, you should try checkers for Java. Checkers is a classic board game that involves moving pieces diagonally across a grid of squares, capturing the opponent's pieces by jumping over them, and reaching the other side of the board to become a king. Checkers is also known as draughts in some countries, and it has many variations and rules.</p>
111
- <p>We hope you enjoyed this article and learned something new. Thank you for reading!</p>
112
- <h3>FAQs</h3>
113
- <p>Here are some frequently asked questions related to checkers for Java:</p>
114
- <ol>
115
- <li><b>What are some other games that I can play with Java?</b></li>
116
- <p>There are many other games that you can play with Java, such as chess, sudoku, minesweeper, snake, tetris, pacman, pong, tic-tac-toe, hangman, and many others. You can find many free and open-source Java games online or create your own using Java programming language.</p>
117
- <li><b>How can I update my Java version?</b></li>
118
- <p>You can update your Java version by visiting [Oracle Java Downloads page] and downloading and installing the <p>You can update your Java version by visiting [Oracle Java Downloads page] and downloading and installing the latest version of Java for your system. You can also check for updates automatically by opening the Java Control Panel and clicking on the Update tab. You can also uninstall older versions of Java from your system to avoid security risks and performance issues.</p>
119
- <li><b>How can I play checkers for Java online with another human player?</b></li>
120
- <p>You can play checkers for Java online with another human player by choosing the Online mode in the game settings. You will need to enter your name and a server address to connect to. You can either join an existing game or create a new game and wait for another player to join. You can also chat with your opponent during the game using the Chat button.</p>
121
- <li><b>How can I change the theme of checkers for Java?</b></li>
122
- <p>You can change the theme of checkers for Java by choosing the Theme option in the game settings. You can choose from different themes, such as Classic, Wood, Metal, Marble, and Neon. You can also change the color of the board and the pieces according to your preference.</p>
123
- <li><b>How can I view my statistics in checkers for Java?</b></li>
124
- <p>You can view your statistics in checkers for Java by choosing the Statistics option in the game settings. You can see your total number of games played, won, lost, and drawn, as well as your win percentage and rating. You can also see your best and worst moves, your longest and shortest games, and your average moves per game.</p>
125
- <li><b>How can I report a bug or suggest a feature in checkers for Java?</b></li>
126
- <p>You can report a bug or suggest a feature in checkers for Java by visiting [GitHub] and finding the repository named DevonMcGrath/Java-Checkers. Click on the Issues tab, then click on the New issue button. Fill out the title and description of your issue or suggestion, then click on Submit new issue button. The developer will review your feedback and respond accordingly.</p>
127
- </ol></p> 197e85843d<br />
128
- <br />
129
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Love O2O and Join the Fun of A Chinese Ghost Story Online Game.md DELETED
@@ -1,102 +0,0 @@
1
- <br />
2
- <h1>Download Love 020 Dramacool: A Guide to Watch the Hit Chinese Drama Online</h1>
3
- <p>If you are a fan of Chinese dramas, you might have heard of Love 020, a romantic comedy series that has taken the internet by storm. But how can you watch this amazing show online? And how can you download it for offline viewing? In this article, we will tell you everything you need to know about downloading Love 020 on Dramacool, one of the best websites to watch Asian dramas for free.</p>
4
- <h2>What is Love 020?</h2>
5
- <p>Love 020 is a 2016 Chinese drama based on the web novel "A Slight Smile Is Very Charming" by Gu Man. It revolves around the love story of a first-year and a final year student who fell in love with each other while playing an online video game. It follows the couple as they overcome different challenges and numerous obstacles in their online and offline worlds.</p>
6
- <h2>download love 020 dramacool</h2><br /><p><b><b>Download</b> &#8250;&#8250;&#8250;&#8250;&#8250; <a href="https://jinyurl.com/2uNKB5">https://jinyurl.com/2uNKB5</a></b></p><br /><br />
7
- <h3>The plot of Love 020</h3>
8
- <p>Bei Wei Wei (Zheng Shuang) is a beautiful and smart computer science major who loves playing online games. She is the top player in her guild and has a loyal online husband, Zhenshui Wuxiang (Zhang He). However, he dumps her for another girl, leaving her heartbroken. Soon after, she receives a message from the number one player in the game, Yixiao Naihe, who proposes to be her online husband. She accepts, thinking that it is just a game.</p>
9
- <p>Little does she know that Yixiao Naihe is actually Xiao Nai (Yang Yang), her senior in college and the most popular student on campus. He is a gaming expert, a basketball star, an academic genius, and a successful entrepreneur. He falls in love with Wei Wei at first sight when he sees her playing the game in an internet cafe. He decides to pursue her both online and offline, using his skills and charm.</p>
10
- <p>Will their online romance blossom into a real-life relationship? Will they be able to balance their studies, careers, and love lives? Will they face any troubles from their rivals, friends, or families? Watch Love 020 to find out!</p>
11
- <h3>The cast of Love 020</h3>
12
- <p>The cast of Love 020 consists of some of the most talented and popular actors and actresses in China. Here are some of them:</p>
13
- <ul>
14
- <li>Yang Yang as Xiao Nai / Yixiao Naihe: He is the male lead of the drama. He is handsome, smart, athletic, and rich. He is the president of a gaming company and the leader of a famous online guild. He falls in love with Wei Wei and pursues her relentlessly.</li>
15
- <li>Zheng Shuang as Bei Wei Wei / Lu Wei Wei Wei: She is the female lead of the drama. She is beautiful, intelligent, and kind. She is a computer science major and an online gaming expert. She becomes Xiao Nai's online wife and real-life girlfriend.</li>
16
- <li>Bai Yu as Cao Guang / Zhen Shao Xiang: He is Xiao Nai's rival in love and business. He is also a computer science major and a gaming company CEO. He likes Wei Wei and tries to win her over.</li>
17
- <li>Mao Xiao Tong as Er Xi / Yao Yao: She is Wei Wei's best friend and roommate. She is a literature major and an online game fan. She is bubbly, cheerful, and loyal.</li>
18
- <li>Zhang Bin Bin as KO / Yu Ban Shan: He is Xiao Nai's best friend and business partner. He is a computer science major and a gaming genius. He is cool, calm, and witty.</li>
19
- <li>Niu Jun Feng as Hao Mei / Qiu Yong Hou: He is Xiao Nai's friend and colleague. He is a computer science major and a gaming programmer. He is cute, naive, and funny.</li>
20
- <li>Zheng Ye Cheng as Zhen Shui Wu Xiang / Yu Gong: He is Wei Wei's ex-online husband and Cao Guang's friend. He is a computer science major and a gaming developer. He is arrogant, selfish, and jealous.</li>
21
- </ul>
22
- <h3>The popularity of Love 020</h3>
23
- <p>Love 020 is one of the most popular and successful Chinese dramas of all time. It has received rave reviews from critics and audiences alike, for its sweet romance, hilarious comedy, thrilling action, and stunning visuals. It has also won several awards, such as the Best Foreign TV Series at the Seoul International Drama Awards in 2017.</p>
24
- <p>Love 020 has also gained a huge fan base both in China and abroad, especially among the young generation who can relate to the online gaming culture and the campus life. It has been viewed over 24 billion times on various online platforms, making it one of the most watched Chinese dramas ever. It has also been adapted into a movie, a spin-off series, and a Thai remake.</p>
25
- <p>How to download love 020 dramacool with English subtitles<br />
26
- Watch love 020 dramacool online free without downloading<br />
27
- Download love 020 dramacool full episodes in HD quality<br />
28
- Love 020 dramacool review and ratings<br />
29
- Download love 020 dramacool OST and songs<br />
30
- Love 020 dramacool cast and characters<br />
31
- Download love 020 dramacool behind the scenes and interviews<br />
32
- Love 020 dramacool vs love o2o comparison and differences<br />
33
- Download love 020 dramacool Chinese novel and manga<br />
34
- Love 020 dramacool fanfiction and fan art<br />
35
- Download love 020 dramacool spin-off and sequel<br />
36
- Love 020 dramacool best moments and scenes<br />
37
- Download love 020 dramacool wallpapers and gifs<br />
38
- Love 020 dramacool trivia and facts<br />
39
- Download love 020 dramacool bloopers and funny moments<br />
40
- Love 020 dramacool quotes and dialogues<br />
41
- Download love 020 dramacool game and app<br />
42
- Love 020 dramacool merchandise and products<br />
43
- Download love 020 dramacool Netflix and Viki versions<br />
44
- Love 020 dramacool awards and nominations<br />
45
- Download love 020 dramacool alternative links and sites<br />
46
- Love 020 dramacool spoilers and ending explained<br />
47
- Download love 020 dramacool bonus and extra content<br />
48
- Love 020 dramacool recommendations and similar dramas<br />
49
- Download love 020 dramacool in different languages and formats</p>
50
- <h2>Why watch Love 020 on Dramacool?</h2>
51
- <p>If you are interested in watching Love 020 online, you might be wondering where to find it. There are many websites that offer Asian dramas for streaming or downloading, but not all of them are reliable or safe. Some of them might have low-quality videos, annoying ads, broken links, or even viruses. That's why we recommend you to watch Love 020 on Dramacool, one of the best websites to watch Asian dramas for free.</p>
52
- <h3>The benefits of Dramacool</h3>
53
- <p>Dramacool is a website that provides a large collection of Asian dramas, movies, shows, and anime from various countries, such as China, Korea, Japan, Taiwan, Thailand, and more. You can watch them online or download them for offline viewing. Here are some of the benefits of using Dramacool:</p>
54
- <ul>
55
- <li>It is free: You don't have to pay anything to watch or download your favorite dramas on Dramacool. You can enjoy unlimited access to thousands of titles without any subscription or registration.</li>
56
- <li>It is fast: You don't have to wait for long buffering or loading times to watch your favorite dramas on Dramacool. You can stream or download them in high speed and high quality.</li>
57
- <li>It is updated: You don't have to worry about missing out on the latest episodes or releases of your favorite dramas on Dramacool. You can find them as soon as they are available on the website.</li>
58
- <li>It is easy: You don't have to struggle with complicated navigation or search functions to find your favorite dramas on Dramacool. You can browse them by genre, country, year, popularity, or alphabetically.</li>
59
- </ul> <h3>The features of Dramacool</h3>
60
- <p>Dramacool is not only a website that provides a lot of Asian dramas, but also a website that offers a lot of features to enhance your viewing experience. Here are some of the features of Dramacool:</p>
61
- <ul>
62
- <li>It has multiple servers: You can choose from different servers to watch or download your favorite dramas on Dramacool. You can switch to another server if one is not working or slow.</li>
63
- <li>It has multiple languages: You can watch your favorite dramas on Dramacool with subtitles in various languages, such as English, Spanish, French, Arabic, and more. You can also change the font size, color, and style of the subtitles.</li>
64
- <li>It has multiple devices: You can watch your favorite dramas on Dramacool on any device, such as a computer, a laptop, a tablet, or a smartphone. You can also cast them to your TV or Chromecast.</li>
65
- <li>It has multiple genres: You can find your favorite dramas on Dramacool in different genres, such as romance, comedy, action, thriller, horror, fantasy, historical, and more. You can also filter them by ratings, reviews, or recommendations.</li>
66
- </ul>
67
- <h3>The drawbacks of Dramacool</h3>
68
- <p>Dramacool is a great website to watch Asian dramas for free, but it is not perfect. It also has some drawbacks that you should be aware of before using it. Here are some of the drawbacks of Dramacool:</p>
69
- <ul>
70
- <li>It is illegal: You should know that watching or downloading dramas on Dramacool is illegal, as it violates the copyright laws and the intellectual property rights of the original creators and distributors. You might face legal consequences or penalties if you are caught using it.</li>
71
- <li>It is risky: You should also know that watching or downloading dramas on Dramacool is risky, as it might expose your device or data to malware, viruses, spyware, or hackers. You might lose your personal information or damage your device if you are not careful.</li>
72
- <li>It is unreliable: You should also know that watching or downloading dramas on Dramacool is unreliable, as it might have broken links, missing episodes, wrong subtitles, low-quality videos, or annoying ads. You might not enjoy your viewing experience if you encounter these problems.</li>
73
- </ul>
74
- <h2>How to download Love 020 on Dramacool?</h2>
75
- <p>If you still want to watch Love 020 on Dramacool despite its drawbacks, you should follow these steps to download it safely and easily:</p>
76
- <h3>Step 1: Visit the official website of Dramacool</h3>
77
- <p>The first step is to visit the official website of Dramacool at https://www.dramacool9.co/. You can use any browser or device to access it. However, you should make sure that you have a good internet connection and a reliable antivirus software installed on your device.</p>
78
- <h3>Step 2: Search for Love 020 in the search bar</h3>
79
- <p>The second step is to search for Love 020 in the search bar at the top right corner of the website. You can type in "Love 020" or "Just One Smile Is Very Alluring" (the alternative title of the drama) and hit enter. You will see a list of results related to the drama.</p>
80
- <h3>Step 3: Choose the episode you want to download</h3>
81
- <p>The third step is to choose the episode you want to download from the list of results. You can click on the title or the image of the episode to open it. You will see a video player with some options below it.</p> <h3>Step 4: Click on the download button and select the quality and format</h3>
82
- <p>The fourth step is to click on the download button below the video player. You will see a pop-up window with some options to choose from. You can select the quality and format of the video you want to download, such as HD, SD, MP4, or MKV. You can also see the size and duration of the video.</p>
83
- <h3>Step 5: Enjoy watching Love 020 offline</h3>
84
- <p>The fifth and final step is to enjoy watching Love 020 offline. You can click on the download link or scan the QR code to start downloading the video to your device. You can also copy and paste the link to your download manager or browser. Once the download is complete, you can watch Love 020 anytime and anywhere you want.</p>
85
- <h2>Conclusion</h2>
86
- <p>Love 020 is a wonderful Chinese drama that you should not miss. It has a captivating plot, a charming cast, and a beautiful soundtrack. It will make you laugh, cry, and swoon over the adorable couple. If you want to watch Love 020 online, you can use Dramacool, a free website that offers a lot of Asian dramas. However, you should also be aware of the drawbacks of using Dramacool, such as its illegality, riskiness, and unreliability. If you want to download Love 020 on Dramacool, you can follow the steps we have provided in this article. We hope you enjoy watching Love 020 on Dramacool!</p>
87
- <h2>FAQs</h2>
88
- <p>Here are some frequently asked questions about downloading Love 020 on Dramacool:</p>
89
- <ul>
90
- <li>Q: Is it safe to download Love 020 on Dramacool?</li>
91
- <li>A: It depends on how careful you are when using Dramacool. You should always use a reliable antivirus software and a VPN service to protect your device and data from malware, viruses, spyware, or hackers. You should also avoid clicking on any suspicious links or ads that might redirect you to harmful websites.</li>
92
- <li>Q: Is it legal to download Love 020 on Dramacool?</li>
93
- <li>A: No, it is not legal to download Love 020 on Dramacool. You are violating the copyright laws and the intellectual property rights of the original creators and distributors of the drama. You might face legal consequences or penalties if you are caught using Dramacool.</li>
94
- <li>Q: How many episodes are there in Love 020?</li>
95
- <li>A: There are 30 episodes in Love 020, each lasting about 45 minutes. You can watch them all on Dramacool for free.</li>
96
- <li>Q: Where can I find the subtitles for Love 020?</li>
97
- <li>A: You can find the subtitles for Love 020 on Dramacool in various languages, such as English, Spanish, French, Arabic, and more. You can also change the font size, color, and style of the subtitles according to your preference.</li>
98
- <li>Q: What are some other websites to watch or download Love 020?</li>
99
- <li>A: Some other websites to watch or download Love 020 are Kissasian, Viki, Netflix, WeTV, iQiyi, and more. However, some of them might require a subscription or registration fee to access their content.</li>
100
- </ul></p> 401be4b1e0<br />
101
- <br />
102
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1toTree/lora_test/ppdiffusers/schedulers/scheduling_repaint.py DELETED
@@ -1,321 +0,0 @@
1
- # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2
- # Copyright 2022 ETH Zurich Computer Vision Lab and The HuggingFace Team. All rights reserved.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import math
17
- from dataclasses import dataclass
18
- from typing import List, Optional, Tuple, Union
19
-
20
- import numpy as np
21
- import paddle
22
- import paddle.nn.functional as F
23
-
24
- from ..configuration_utils import ConfigMixin, register_to_config
25
- from ..utils import BaseOutput
26
- from .scheduling_utils import SchedulerMixin
27
-
28
-
29
- @dataclass
30
- class RePaintSchedulerOutput(BaseOutput):
31
- """
32
- Output class for the scheduler's step function output.
33
-
34
- Args:
35
- prev_sample (`paddle.Tensor` of shape `(batch_size, num_channels, height, width)` for images):
36
- Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
37
- denoising loop.
38
- pred_original_sample (`paddle.Tensor` of shape `(batch_size, num_channels, height, width)` for images):
39
- The predicted denoised sample (x_{0}) based on the model output from
40
- the current timestep. `pred_original_sample` can be used to preview progress or for guidance.
41
- """
42
-
43
- prev_sample: paddle.Tensor
44
- pred_original_sample: paddle.Tensor
45
-
46
-
47
- def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999):
48
- """
49
- Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
50
- (1-beta) over time from t = [0,1].
51
-
52
- Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
53
- to that part of the diffusion process.
54
-
55
-
56
- Args:
57
- num_diffusion_timesteps (`int`): the number of betas to produce.
58
- max_beta (`float`): the maximum beta to use; use values lower than 1 to
59
- prevent singularities.
60
-
61
- Returns:
62
- betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
63
- """
64
-
65
- def alpha_bar(time_step):
66
- return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2
67
-
68
- betas = []
69
- for i in range(num_diffusion_timesteps):
70
- t1 = i / num_diffusion_timesteps
71
- t2 = (i + 1) / num_diffusion_timesteps
72
- betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
73
- return paddle.to_tensor(betas, dtype="float32")
74
-
75
-
76
- class RePaintScheduler(SchedulerMixin, ConfigMixin):
77
- """
78
- RePaint is a schedule for DDPM inpainting inside a given mask.
79
-
80
- [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
81
- function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
82
- [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and
83
- [`~SchedulerMixin.from_pretrained`] functions.
84
-
85
- For more details, see the original paper: https://arxiv.org/pdf/2201.09865.pdf
86
-
87
- Args:
88
- num_train_timesteps (`int`): number of diffusion steps used to train the model.
89
- beta_start (`float`): the starting `beta` value of inference.
90
- beta_end (`float`): the final `beta` value.
91
- beta_schedule (`str`):
92
- the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
93
- `linear`, `scaled_linear`, or `squaredcos_cap_v2`.
94
- eta (`float`):
95
- The weight of noise for added noise in a diffusion step. Its value is between 0.0 and 1.0 -0.0 is DDIM and
96
- 1.0 is DDPM scheduler respectively.
97
- trained_betas (`np.ndarray`, optional):
98
- option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc.
99
- variance_type (`str`):
100
- options to clip the variance used when adding noise to the denoised sample. Choose from `fixed_small`,
101
- `fixed_small_log`, `fixed_large`, `fixed_large_log`, `learned` or `learned_range`.
102
- clip_sample (`bool`, default `True`):
103
- option to clip predicted sample between -1 and 1 for numerical stability.
104
-
105
- """
106
-
107
- order = 1
108
-
109
- @register_to_config
110
- def __init__(
111
- self,
112
- num_train_timesteps: int = 1000,
113
- beta_start: float = 0.0001,
114
- beta_end: float = 0.02,
115
- beta_schedule: str = "linear",
116
- eta: float = 0.0,
117
- trained_betas: Optional[np.ndarray] = None,
118
- clip_sample: bool = True,
119
- ):
120
- if trained_betas is not None:
121
- self.betas = paddle.to_tensor(trained_betas)
122
- elif beta_schedule == "linear":
123
- self.betas = paddle.linspace(beta_start, beta_end, num_train_timesteps, dtype="float32")
124
- elif beta_schedule == "scaled_linear":
125
- # this schedule is very specific to the latent diffusion model.
126
- self.betas = paddle.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype="float32") ** 2
127
- elif beta_schedule == "squaredcos_cap_v2":
128
- # Glide cosine schedule
129
- self.betas = betas_for_alpha_bar(num_train_timesteps)
130
- elif beta_schedule == "sigmoid":
131
- # GeoDiff sigmoid schedule
132
- betas = paddle.linspace(-6, 6, num_train_timesteps)
133
- self.betas = F.sigmoid(betas) * (beta_end - beta_start) + beta_start
134
- else:
135
- raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
136
-
137
- self.alphas = 1.0 - self.betas
138
- self.alphas_cumprod = paddle.cumprod(self.alphas, 0)
139
- self.one = paddle.to_tensor(1.0)
140
-
141
- self.final_alpha_cumprod = paddle.to_tensor(1.0)
142
-
143
- # standard deviation of the initial noise distribution
144
- self.init_noise_sigma = 1.0
145
-
146
- # setable values
147
- self.num_inference_steps = None
148
- self.timesteps = paddle.to_tensor(np.arange(0, num_train_timesteps)[::-1].copy())
149
-
150
- self.eta = eta
151
-
152
- def scale_model_input(self, sample: paddle.Tensor, timestep: Optional[int] = None) -> paddle.Tensor:
153
- """
154
- Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
155
- current timestep.
156
-
157
- Args:
158
- sample (`paddle.Tensor`): input sample
159
- timestep (`int`, optional): current timestep
160
-
161
- Returns:
162
- `paddle.Tensor`: scaled input sample
163
- """
164
- return sample
165
-
166
- def set_timesteps(
167
- self,
168
- num_inference_steps: int,
169
- jump_length: int = 10,
170
- jump_n_sample: int = 10,
171
- ):
172
- num_inference_steps = min(self.config.num_train_timesteps, num_inference_steps)
173
- self.num_inference_steps = num_inference_steps
174
-
175
- timesteps = []
176
-
177
- jumps = {}
178
- for j in range(0, num_inference_steps - jump_length, jump_length):
179
- jumps[j] = jump_n_sample - 1
180
-
181
- t = num_inference_steps
182
- while t >= 1:
183
- t = t - 1
184
- timesteps.append(t)
185
-
186
- if jumps.get(t, 0) > 0:
187
- jumps[t] = jumps[t] - 1
188
- for _ in range(jump_length):
189
- t = t + 1
190
- timesteps.append(t)
191
-
192
- timesteps = np.array(timesteps) * (self.config.num_train_timesteps // self.num_inference_steps)
193
- self.timesteps = paddle.to_tensor(timesteps)
194
-
195
- def _get_variance(self, t):
196
- prev_timestep = t - self.config.num_train_timesteps // self.num_inference_steps
197
-
198
- alpha_prod_t = self.alphas_cumprod[t]
199
- alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
200
- beta_prod_t = 1 - alpha_prod_t
201
- beta_prod_t_prev = 1 - alpha_prod_t_prev
202
-
203
- # For t > 0, compute predicted variance βt (see formula (6) and (7) from
204
- # https://arxiv.org/pdf/2006.11239.pdf) and sample from it to get
205
- # previous sample x_{t-1} ~ N(pred_prev_sample, variance) == add
206
- # variance to pred_sample
207
- # Is equivalent to formula (16) in https://arxiv.org/pdf/2010.02502.pdf
208
- # without eta.
209
- # variance = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * self.betas[t]
210
- variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev)
211
-
212
- return variance
213
-
214
- def step(
215
- self,
216
- model_output: paddle.Tensor,
217
- timestep: int,
218
- sample: paddle.Tensor,
219
- original_image: paddle.Tensor,
220
- mask: paddle.Tensor,
221
- generator: Optional[Union[paddle.Generator, List[paddle.Generator]]] = None,
222
- return_dict: bool = True,
223
- ) -> Union[RePaintSchedulerOutput, Tuple]:
224
- """
225
- Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
226
- process from the learned model outputs (most often the predicted noise).
227
-
228
- Args:
229
- model_output (`paddle.Tensor`): direct output from learned
230
- diffusion model.
231
- timestep (`int`): current discrete timestep in the diffusion chain.
232
- sample (`paddle.Tensor`):
233
- current instance of sample being created by diffusion process.
234
- original_image (`paddle.Tensor`):
235
- the original image to inpaint on.
236
- mask (`paddle.Tensor`):
237
- the mask where 0.0 values define which part of the original image to inpaint (change).
238
- generator (`paddle.Generator`, *optional*): random number generator.
239
- return_dict (`bool`): option for returning tuple rather than
240
- DDPMSchedulerOutput class
241
-
242
- Returns:
243
- [`~schedulers.scheduling_utils.RePaintSchedulerOutput`] or `tuple`:
244
- [`~schedulers.scheduling_utils.RePaintSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When
245
- returning a tuple, the first element is the sample tensor.
246
-
247
- """
248
- t = timestep
249
- prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps
250
-
251
- # 1. compute alphas, betas
252
- alpha_prod_t = self.alphas_cumprod[t]
253
- alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
254
- beta_prod_t = 1 - alpha_prod_t
255
-
256
- # 2. compute predicted original sample from predicted noise also called
257
- # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
258
- pred_original_sample = (sample - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5
259
-
260
- # 3. Clip "predicted x_0"
261
- if self.config.clip_sample:
262
- pred_original_sample = paddle.clip(pred_original_sample, -1, 1)
263
-
264
- # We choose to follow RePaint Algorithm 1 to get x_{t-1}, however we
265
- # substitute formula (7) in the algorithm coming from DDPM paper
266
- # (formula (4) Algorithm 2 - Sampling) with formula (12) from DDIM paper.
267
- # DDIM schedule gives the same results as DDPM with eta = 1.0
268
- # Noise is being reused in 7. and 8., but no impact on quality has
269
- # been observed.
270
-
271
- # 5. Add noise
272
- noise = paddle.randn(model_output.shape, dtype=model_output.dtype, generator=generator)
273
- std_dev_t = self.eta * self._get_variance(timestep) ** 0.5
274
-
275
- variance = 0
276
- if t > 0 and self.eta > 0:
277
- variance = std_dev_t * noise
278
-
279
- # 6. compute "direction pointing to x_t" of formula (12)
280
- # from https://arxiv.org/pdf/2010.02502.pdf
281
- pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
282
-
283
- # 7. compute x_{t-1} of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
284
- prev_unknown_part = alpha_prod_t_prev**0.5 * pred_original_sample + pred_sample_direction + variance
285
-
286
- # 8. Algorithm 1 Line 5 https://arxiv.org/pdf/2201.09865.pdf
287
- prev_known_part = (alpha_prod_t_prev**0.5) * original_image + ((1 - alpha_prod_t_prev) ** 0.5) * noise
288
-
289
- # 9. Algorithm 1 Line 8 https://arxiv.org/pdf/2201.09865.pdf
290
- pred_prev_sample = mask * prev_known_part + (1.0 - mask) * prev_unknown_part
291
-
292
- if not return_dict:
293
- return (
294
- pred_prev_sample,
295
- pred_original_sample,
296
- )
297
-
298
- return RePaintSchedulerOutput(prev_sample=pred_prev_sample, pred_original_sample=pred_original_sample)
299
-
300
- def undo_step(self, sample, timestep, generator=None):
301
- n = self.config.num_train_timesteps // self.num_inference_steps
302
-
303
- for i in range(n):
304
- beta = self.betas[timestep + i]
305
- noise = paddle.randn(sample.shape, dtype=sample.dtype, generator=generator)
306
-
307
- # 10. Algorithm 1 Line 10 https://arxiv.org/pdf/2201.09865.pdf
308
- sample = (1 - beta) ** 0.5 * sample + beta**0.5 * noise
309
-
310
- return sample
311
-
312
- def add_noise(
313
- self,
314
- original_samples: paddle.Tensor,
315
- noise: paddle.Tensor,
316
- timesteps: paddle.Tensor,
317
- ) -> paddle.Tensor:
318
- raise NotImplementedError("Use `DDPMScheduler.add_noise()` to train for sampling with RePaint.")
319
-
320
- def __len__(self):
321
- return self.config.num_train_timesteps
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/commons/conformer/espnet_transformer_attn.py DELETED
@@ -1,186 +0,0 @@
1
- #!/usr/bin/env python3
2
- # -*- coding: utf-8 -*-
3
-
4
- # Copyright 2019 Shigeki Karita
5
- # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
6
-
7
- """Multi-Head Attention layer definition."""
8
-
9
- import math
10
-
11
- import numpy
12
- import torch
13
- from torch import nn
14
-
15
-
16
- class MultiHeadedAttention(nn.Module):
17
- """Multi-Head Attention layer.
18
- Args:
19
- n_head (int): The number of heads.
20
- n_feat (int): The number of features.
21
- dropout_rate (float): Dropout rate.
22
- """
23
-
24
- def __init__(self, n_head, n_feat, dropout_rate):
25
- """Construct an MultiHeadedAttention object."""
26
- super(MultiHeadedAttention, self).__init__()
27
- assert n_feat % n_head == 0
28
- # We assume d_v always equals d_k
29
- self.d_k = n_feat // n_head
30
- self.h = n_head
31
- self.linear_q = nn.Linear(n_feat, n_feat)
32
- self.linear_k = nn.Linear(n_feat, n_feat)
33
- self.linear_v = nn.Linear(n_feat, n_feat)
34
- self.linear_out = nn.Linear(n_feat, n_feat)
35
- self.attn = None
36
- self.dropout = nn.Dropout(p=dropout_rate)
37
-
38
- def forward_qkv(self, query, key, value):
39
- """Transform query, key and value.
40
- Args:
41
- query (torch.Tensor): Query tensor (#batch, time1, size).
42
- key (torch.Tensor): Key tensor (#batch, time2, size).
43
- value (torch.Tensor): Value tensor (#batch, time2, size).
44
- Returns:
45
- torch.Tensor: Transformed query tensor (#batch, n_head, time1, d_k).
46
- torch.Tensor: Transformed key tensor (#batch, n_head, time2, d_k).
47
- torch.Tensor: Transformed value tensor (#batch, n_head, time2, d_k).
48
- """
49
- n_batch = query.size(0)
50
- q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)
51
- k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)
52
- v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)
53
- q = q.transpose(1, 2) # (batch, head, time1, d_k)
54
- k = k.transpose(1, 2) # (batch, head, time2, d_k)
55
- v = v.transpose(1, 2) # (batch, head, time2, d_k)
56
-
57
- return q, k, v
58
-
59
- def forward_attention(self, value, scores, mask):
60
- """Compute attention context vector.
61
- Args:
62
- value (torch.Tensor): Transformed value (#batch, n_head, time2, d_k).
63
- scores (torch.Tensor): Attention score (#batch, n_head, time1, time2).
64
- mask (torch.Tensor): Mask (#batch, 1, time2) or (#batch, time1, time2).
65
- Returns:
66
- torch.Tensor: Transformed value (#batch, time1, d_model)
67
- weighted by the attention score (#batch, time1, time2).
68
- """
69
- n_batch = value.size(0)
70
- if mask is not None:
71
- mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2)
72
- min_value = float(
73
- numpy.finfo(torch.tensor(0, dtype=scores.dtype).numpy().dtype).min
74
- )
75
- scores = scores.masked_fill(mask, min_value)
76
- self.attn = torch.softmax(scores, dim=-1).masked_fill(
77
- mask, 0.0
78
- ) # (batch, head, time1, time2)
79
- else:
80
- self.attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)
81
-
82
- p_attn = self.dropout(self.attn)
83
- x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)
84
- x = (
85
- x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)
86
- ) # (batch, time1, d_model)
87
-
88
- return self.linear_out(x) # (batch, time1, d_model)
89
-
90
- def forward(self, query, key, value, mask):
91
- """Compute scaled dot product attention.
92
- Args:
93
- query (torch.Tensor): Query tensor (#batch, time1, size).
94
- key (torch.Tensor): Key tensor (#batch, time2, size).
95
- value (torch.Tensor): Value tensor (#batch, time2, size).
96
- mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
97
- (#batch, time1, time2).
98
- Returns:
99
- torch.Tensor: Output tensor (#batch, time1, d_model).
100
- """
101
- q, k, v = self.forward_qkv(query, key, value)
102
- scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)
103
- return self.forward_attention(v, scores, mask)
104
-
105
-
106
- class RelPositionMultiHeadedAttention(MultiHeadedAttention):
107
- """Multi-Head Attention layer with relative position encoding.
108
- Paper: https://arxiv.org/abs/1901.02860
109
- Args:
110
- n_head (int): The number of heads.
111
- n_feat (int): The number of features.
112
- dropout_rate (float): Dropout rate.
113
- """
114
-
115
- def __init__(self, n_head, n_feat, dropout_rate):
116
- """Construct an RelPositionMultiHeadedAttention object."""
117
- super().__init__(n_head, n_feat, dropout_rate)
118
- # linear transformation for positional ecoding
119
- self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)
120
- # these two learnable bias are used in matrix c and matrix d
121
- # as described in https://arxiv.org/abs/1901.02860 Section 3.3
122
- self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))
123
- self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))
124
- torch.nn.init.xavier_uniform_(self.pos_bias_u)
125
- torch.nn.init.xavier_uniform_(self.pos_bias_v)
126
-
127
- def rel_shift(self, x, zero_triu=False):
128
- """Compute relative positinal encoding.
129
- Args:
130
- x (torch.Tensor): Input tensor (batch, time, size).
131
- zero_triu (bool): If true, return the lower triangular part of the matrix.
132
- Returns:
133
- torch.Tensor: Output tensor.
134
- """
135
- zero_pad = torch.zeros((*x.size()[:3], 1), device=x.device, dtype=x.dtype)
136
- x_padded = torch.cat([zero_pad, x], dim=-1)
137
-
138
- x_padded = x_padded.view(*x.size()[:2], x.size(3) + 1, x.size(2))
139
- x = x_padded[:, :, 1:].view_as(x)
140
-
141
- if zero_triu:
142
- ones = torch.ones((x.size(2), x.size(3)))
143
- x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :]
144
-
145
- return x
146
-
147
- def forward(self, query, key, value, pos_emb, mask):
148
- """Compute 'Scaled Dot Product Attention' with rel. positional encoding.
149
- Args:
150
- query (torch.Tensor): Query tensor (#batch, time1, size).
151
- key (torch.Tensor): Key tensor (#batch, time2, size).
152
- value (torch.Tensor): Value tensor (#batch, time2, size).
153
- pos_emb (torch.Tensor): Positional embedding tensor (#batch, time2, size).
154
- mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
155
- (#batch, time1, time2).
156
- Returns:
157
- torch.Tensor: Output tensor (#batch, time1, d_model).
158
- """
159
- q, k, v = self.forward_qkv(query, key, value)
160
- q = q.transpose(1, 2) # (batch, time1, head, d_k)
161
-
162
- n_batch_pos = pos_emb.size(0)
163
- p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)
164
- p = p.transpose(1, 2) # (batch, head, time1, d_k)
165
-
166
- # (batch, head, time1, d_k)
167
- q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)
168
- # (batch, head, time1, d_k)
169
- q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)
170
-
171
- # compute attention score
172
- # first compute matrix a and matrix c
173
- # as described in https://arxiv.org/abs/1901.02860 Section 3.3
174
- # (batch, head, time1, time2)
175
- matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))
176
-
177
- # compute matrix b and matrix d
178
- # (batch, head, time1, time2)
179
- matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))
180
- matrix_bd = self.rel_shift(matrix_bd)
181
-
182
- scores = (matrix_ac + matrix_bd) / math.sqrt(
183
- self.d_k
184
- ) # (batch, head, time1, time2)
185
-
186
- return self.forward_attention(v, scores, mask)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AP123/CerealBoxMaker/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: CerealBoxMaker
3
- emoji: 🥛
4
- colorFrom: pink
5
- colorTo: purple
6
- sdk: gradio
7
- sdk_version: 3.47.1
8
- app_file: app.py
9
- pinned: false
10
- license: bigscience-openrail-m
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ASJMO/freegpt/client/js/sidebar-toggler.js DELETED
@@ -1,34 +0,0 @@
1
- const sidebar = document.querySelector(".sidebar");
2
- const menuButton = document.querySelector(".menu-button");
3
-
4
- function toggleSidebar(event) {
5
- if (sidebar.classList.contains("shown")) {
6
- hideSidebar(event.target);
7
- } else {
8
- showSidebar(event.target);
9
- }
10
- window.scrollTo(0, 0);
11
- }
12
-
13
- function showSidebar(target) {
14
- sidebar.classList.add("shown");
15
- target.classList.add("rotated");
16
- document.body.style.overflow = "hidden";
17
- }
18
-
19
- function hideSidebar(target) {
20
- sidebar.classList.remove("shown");
21
- target.classList.remove("rotated");
22
- document.body.style.overflow = "auto";
23
- }
24
-
25
- menuButton.addEventListener("click", toggleSidebar);
26
-
27
- document.body.addEventListener('click', function(event) {
28
- if (event.target.matches('.conversation-title')) {
29
- const menuButtonStyle = window.getComputedStyle(menuButton);
30
- if (menuButtonStyle.display !== 'none') {
31
- hideSidebar(menuButton);
32
- }
33
- }
34
- });
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AashishKumar/Restaurant_voice_chatbot/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Restaurant Voice Chatbot
3
- emoji: 💩
4
- colorFrom: gray
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 3.20.1
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/GPTalk.py DELETED
@@ -1,83 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import secrets, time, json
4
- from aiohttp import ClientSession
5
- from typing import AsyncGenerator
6
-
7
- from .base_provider import AsyncGeneratorProvider
8
- from .helper import format_prompt
9
-
10
-
11
- class GPTalk(AsyncGeneratorProvider):
12
- url = "https://gptalk.net"
13
- supports_gpt_35_turbo = True
14
- working = True
15
- _auth = None
16
-
17
- @classmethod
18
- async def create_async_generator(
19
- cls,
20
- model: str,
21
- messages: list[dict[str, str]],
22
- **kwargs
23
- ) -> AsyncGenerator:
24
- if not model:
25
- model = "gpt-3.5-turbo"
26
- timestamp = int(time.time())
27
- headers = {
28
- 'authority': 'gptalk.net',
29
- 'accept': '*/*',
30
- 'accept-language': 'de-DE,de;q=0.9,en-DE;q=0.8,en;q=0.7,en-US;q=0.6,nl;q=0.5,zh-CN;q=0.4,zh-TW;q=0.3,zh;q=0.2',
31
- 'content-type': 'application/json',
32
- 'origin': 'https://gptalk.net',
33
- 'sec-ch-ua': '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
34
- 'sec-ch-ua-mobile': '?0',
35
- 'sec-ch-ua-platform': '"Linux"',
36
- 'sec-fetch-dest': 'empty',
37
- 'sec-fetch-mode': 'cors',
38
- 'sec-fetch-site': 'same-origin',
39
- 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36',
40
- 'x-auth-appid': '2229',
41
- 'x-auth-openid': '',
42
- 'x-auth-platform': '',
43
- 'x-auth-timestamp': f"{timestamp}",
44
- }
45
- async with ClientSession(headers=headers) as session:
46
- if not cls._auth or cls._auth["expires_at"] < timestamp:
47
- data = {
48
- "fingerprint": secrets.token_hex(16).zfill(32),
49
- "platform": "fingerprint"
50
- }
51
- async with session.post(cls.url + "/api/chatgpt/user/login", json=data) as response:
52
- response.raise_for_status()
53
- cls._auth = (await response.json())["data"]
54
- data = {
55
- "content": format_prompt(messages),
56
- "accept": "stream",
57
- "from": 1,
58
- "model": model,
59
- "is_mobile": 0,
60
- "user_agent": headers["user-agent"],
61
- "is_open_ctx": 0,
62
- "prompt": "",
63
- "roid": 111,
64
- "temperature": 0,
65
- "ctx_msg_count": 3,
66
- "created_at": timestamp
67
- }
68
- headers = {
69
- 'authorization': f'Bearer {cls._auth["token"]}',
70
- }
71
- async with session.post(cls.url + "/api/chatgpt/chatapi/text", json=data, headers=headers) as response:
72
- response.raise_for_status()
73
- token = (await response.json())["data"]["token"]
74
- last_message = ""
75
- async with session.get(cls.url + "/api/chatgpt/chatapi/stream", params={"token": token}) as response:
76
- response.raise_for_status()
77
- async for line in response.content:
78
- if line.startswith(b"data: "):
79
- if line.startswith(b"data: [DONE]"):
80
- break
81
- message = json.loads(line[6:-1])["content"]
82
- yield message[len(last_message):]
83
- last_message = message
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AdamOswald1/finetuned_diffusion/README.md DELETED
@@ -1,14 +0,0 @@
1
- ---
2
- title: Finetuned Diffusion
3
- emoji: 🪄🖼️
4
- colorFrom: red
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 3.21.0
8
- app_file: app.py
9
- pinned: true
10
- license: mit
11
- duplicated_from: anzorq/finetuned_diffusion
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/confirmdialog/ConfirmDialog.d.ts DELETED
@@ -1,127 +0,0 @@
1
- import Dialog from '../dialog/Dialog';
2
- import { GeneralCreateGameObjectCallbackType } from '../utils/build/GeneralCreateGameObjectCallbackType';
3
- import CreateBackground from '../utils/build/CreateBackground';
4
- import SimpleLabel from '../simplelabel/SimpleLabel';
5
- import CreateTextArea from '../utils/build/CreateTextArea';
6
- import Label from '../label/Label';
7
-
8
- export default ConfirmDialog;
9
-
10
- declare namespace ConfirmDialog {
11
- type AlignTypes = number | 'left' | 'center' | 'right';
12
-
13
- interface IConfigClick {
14
- mode: 0 | 1 | 'pointerup' | 'pointerdown' | 'release' | 'press',
15
- clickInterval?: number
16
- }
17
-
18
- interface IConfig {
19
- x?: number,
20
- y?: number,
21
- width?: number,
22
- height?: number,
23
-
24
- space?: {
25
- left?: number, right?: number, top?: number, bottom?: number,
26
-
27
- title?: number,
28
- titleLeft?: number,
29
- titleRight?: number,
30
-
31
- content?: number,
32
- contentLeft?: number,
33
- contentRight?: number,
34
-
35
- actionsLeft?: number,
36
- actionsRight?: number,
37
- action?: number,
38
-
39
- choices?: number,
40
- choicesLeft?: number,
41
- choicesRight?: number,
42
- choice?: number,
43
- choiceLine?: number,
44
- choiceColumn?: number, choiceRow?: number,
45
- choicesBackgroundLeft?: number,
46
- choicesBackgroundRight?: number,
47
- choicesBackgroundTop?: number,
48
- choicesBackgroundBottom?: number,
49
- };
50
-
51
- background?: CreateBackground.IConfig,
52
-
53
- title?: SimpleLabel.IConfig,
54
-
55
- content?: SimpleLabel.IConfig | CreateTextArea.IConfig,
56
-
57
- buttonMode?: 0 | 1 | 2;
58
- button?: SimpleLabel.IConfig,
59
- buttonA?: SimpleLabel.IConfig,
60
- buttonB?: SimpleLabel.IConfig,
61
-
62
- choicesType?: string,
63
- choice?: SimpleLabel.IConfig,
64
- choicesWidth?: number,
65
- choicesHeight?: number,
66
-
67
- proportion?: {
68
- title?: number,
69
- content?: number,
70
- actions?: number,
71
- choices?: number,
72
- },
73
-
74
- expand?: {
75
- title?: boolean,
76
- content?: boolean,
77
- actions?: boolean,
78
- choices?: boolean,
79
- },
80
-
81
- align?: {
82
- title?: AlignTypes,
83
- content?: AlignTypes,
84
- actions?: AlignTypes,
85
- choices?: AlignTypes,
86
- },
87
-
88
- click?: IConfigClick
89
- }
90
-
91
- interface IResetChoiceDisplayContentConfig extends Label.IResetDisplayContentConfig {
92
- value?: any;
93
- }
94
-
95
- interface IResetDisplayContentConfig {
96
- title?: string | Label.IResetDisplayContentConfig,
97
-
98
- content?: string | Label.IResetDisplayContentConfig,
99
-
100
- buttonA?: string | Label.IResetDisplayContentConfig,
101
- buttonB?: string | Label.IResetDisplayContentConfig,
102
-
103
- choices?: (string | IResetChoiceDisplayContentConfig)[]
104
- }
105
-
106
- interface ICreatorsConfig {
107
- background?: GeneralCreateGameObjectCallbackType,
108
- title?: SimpleLabel.ICreatorsConfig,
109
- content?: SimpleLabel.ICreatorsConfig | CreateTextArea.ICreatorsConfig,
110
- button?: SimpleLabel.ICreatorsConfig,
111
- buttonA?: SimpleLabel.ICreatorsConfig,
112
- buttonB?: SimpleLabel.ICreatorsConfig,
113
- choice?: SimpleLabel.ICreatorsConfig,
114
- }
115
- }
116
-
117
- declare class ConfirmDialog extends Dialog {
118
- constructor(
119
- scene: Phaser.Scene,
120
- config?: ConfirmDialog.IConfig,
121
- creators?: ConfirmDialog.ICreatorsConfig
122
- );
123
-
124
- resetDisplayContent(
125
- config?: ConfirmDialog.IResetDisplayContentConfig
126
- ): this;
127
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/confirmdialog/methods/CreateContent.js DELETED
@@ -1,32 +0,0 @@
1
- import CreateLabel from '../../utils/build/CreateLabel.js';
2
- import CreateTextArea from '../../utils/build/CreateTextArea.js'
3
-
4
- const GetValue = Phaser.Utils.Objects.GetValue;
5
-
6
- var CreateContent = function (scene, config, creators) {
7
- var type = GetValue(config, '$type');
8
- if (type === undefined) {
9
- if (config &&
10
- (config.hasOwnProperty('slider') || config.hasOwnProperty('scroller'))
11
- ) {
12
- type = 'textarea';
13
- }
14
- }
15
-
16
-
17
- var gameObject;
18
- switch (type) {
19
- case 'textarea':
20
- gameObject = new CreateTextArea(scene, config, creators);
21
- break;
22
-
23
- default:
24
- gameObject = new CreateLabel(scene, config, creators);
25
- break;
26
- }
27
-
28
- scene.add.existing(gameObject);
29
- return gameObject;
30
- }
31
-
32
- export default CreateContent;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/menu/methods/CreateButtons.js DELETED
@@ -1,22 +0,0 @@
1
- var CreateButtons = function (scene, items, callback, scope) {
2
- var item;
3
- var buttons = [],
4
- button;
5
- if (items && callback) {
6
- for (var i = 0, cnt = items.length; i < cnt; i++) {
7
- item = items[i];
8
- item.scene = scene;
9
- if (scope) {
10
- button = callback.call(scope, item, i, items);
11
- } else {
12
- button = callback(item, i, items);
13
- }
14
- item.scene = undefined;
15
- buttons.push(button);
16
- }
17
- }
18
-
19
- return buttons;
20
- }
21
-
22
- export default CreateButtons;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/pages/methods/GetPage.js DELETED
@@ -1,10 +0,0 @@
1
- var GetPage = function (key) {
2
- if (key === undefined) {
3
- return null;
4
- } else if (!this.sizerChildren.hasOwnProperty(key)) {
5
- return null;
6
- } else {
7
- return this.sizerChildren[key];
8
- }
9
- }
10
- export default GetPage;
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/tabpages/TabPages.d.ts DELETED
@@ -1,74 +0,0 @@
1
- // import * as Phaser from 'phaser';
2
- import Sizer from '../sizer/Sizer';
3
- import Buttons from '../buttons/Buttons';
4
- import FixWidthButtons from '../fixwidthbuttons/FixWidthButtons';
5
- import Pages from '../pages/Pages';
6
-
7
-
8
- export default TabPages;
9
-
10
- declare namespace TabPages {
11
- interface IConfig extends Sizer.IConfig {
12
- background?: Phaser.GameObjects.GameObject,
13
-
14
- tabPosition?: 'top' | 'bottom' | 'left' | 'right',
15
- wrapTabs?: boolean,
16
- tabs?: Buttons.IConfig | FixWidthButtons.IConfig,
17
- pages?: Pages.IConfig,
18
-
19
- expand?: {
20
- tabs?: boolean
21
- },
22
-
23
- align?: {
24
- tabs?: 'top' | 'bottom' | 'left' | 'right' | 'center'
25
- }
26
-
27
-
28
- }
29
-
30
- interface IAddPageConfig {
31
- key?: string,
32
- tab: Phaser.GameObjects.GameObject,
33
- page: Phaser.GameObjects.GameObject
34
- }
35
-
36
- }
37
-
38
- declare class TabPages extends Sizer {
39
- constructor(
40
- scene: Phaser.Scene,
41
- config?: TabPages.IConfig
42
- );
43
-
44
- getPageKey(index: number): string;
45
- getPageIndex(key: string): number;
46
-
47
- addPage(
48
- key: string,
49
- tabGameObject: Phaser.GameObjects.GameObject,
50
- pageGameObject: Phaser.GameObjects.GameObject
51
- ): this;
52
-
53
- addPage(config: TabPages.IAddPageConfig): this;
54
-
55
- removePage(
56
- key: string,
57
- destroyChild?: boolean
58
- ): this;
59
-
60
- swapPage(
61
- key: string,
62
- fadeInDuration?: number
63
- ): this;
64
- swapFirstPage(fadeInDuration?: number): this;
65
- swapLastPage(fadeInDuration?: number): this;
66
-
67
- currentKey: string;
68
- readonly previousKey: string;
69
- keys: string[];
70
-
71
- getPage(key: string): Phaser.GameObjects.GameObject;
72
- readonly currentPage: Phaser.GameObjects.GameObject;
73
- readonly previousPage: Phaser.GameObjects.GameObject;
74
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AiMimicry/sovits-models/modules/losses.py DELETED
@@ -1,61 +0,0 @@
1
- import torch
2
- from torch.nn import functional as F
3
-
4
- import modules.commons as commons
5
-
6
-
7
- def feature_loss(fmap_r, fmap_g):
8
- loss = 0
9
- for dr, dg in zip(fmap_r, fmap_g):
10
- for rl, gl in zip(dr, dg):
11
- rl = rl.float().detach()
12
- gl = gl.float()
13
- loss += torch.mean(torch.abs(rl - gl))
14
-
15
- return loss * 2
16
-
17
-
18
- def discriminator_loss(disc_real_outputs, disc_generated_outputs):
19
- loss = 0
20
- r_losses = []
21
- g_losses = []
22
- for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
23
- dr = dr.float()
24
- dg = dg.float()
25
- r_loss = torch.mean((1-dr)**2)
26
- g_loss = torch.mean(dg**2)
27
- loss += (r_loss + g_loss)
28
- r_losses.append(r_loss.item())
29
- g_losses.append(g_loss.item())
30
-
31
- return loss, r_losses, g_losses
32
-
33
-
34
- def generator_loss(disc_outputs):
35
- loss = 0
36
- gen_losses = []
37
- for dg in disc_outputs:
38
- dg = dg.float()
39
- l = torch.mean((1-dg)**2)
40
- gen_losses.append(l)
41
- loss += l
42
-
43
- return loss, gen_losses
44
-
45
-
46
- def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):
47
- """
48
- z_p, logs_q: [b, h, t_t]
49
- m_p, logs_p: [b, h, t_t]
50
- """
51
- z_p = z_p.float()
52
- logs_q = logs_q.float()
53
- m_p = m_p.float()
54
- logs_p = logs_p.float()
55
- z_mask = z_mask.float()
56
- #print(logs_p)
57
- kl = logs_p - logs_q - 0.5
58
- kl += 0.5 * ((z_p - m_p)**2) * torch.exp(-2. * logs_p)
59
- kl = torch.sum(kl * z_mask)
60
- l = kl / torch.sum(z_mask)
61
- return l
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AkitoP/umamusume_bert_vits2/bert/bert-base-japanese-v3/README.md DELETED
@@ -1,53 +0,0 @@
1
- ---
2
- license: apache-2.0
3
- datasets:
4
- - cc100
5
- - wikipedia
6
- language:
7
- - ja
8
- widget:
9
- - text: 東北大学で[MASK]の研究をしています。
10
- ---
11
-
12
- # BERT base Japanese (unidic-lite with whole word masking, CC-100 and jawiki-20230102)
13
-
14
- This is a [BERT](https://github.com/google-research/bert) model pretrained on texts in the Japanese language.
15
-
16
- This version of the model processes input texts with word-level tokenization based on the Unidic 2.1.2 dictionary (available in [unidic-lite](https://pypi.org/project/unidic-lite/) package), followed by the WordPiece subword tokenization.
17
- Additionally, the model is trained with the whole word masking enabled for the masked language modeling (MLM) objective.
18
-
19
- The codes for the pretraining are available at [cl-tohoku/bert-japanese](https://github.com/cl-tohoku/bert-japanese/).
20
-
21
- ## Model architecture
22
-
23
- The model architecture is the same as the original BERT base model; 12 layers, 768 dimensions of hidden states, and 12 attention heads.
24
-
25
- ## Training Data
26
-
27
- The model is trained on the Japanese portion of [CC-100 dataset](https://data.statmt.org/cc-100/) and the Japanese version of Wikipedia.
28
- For Wikipedia, we generated a text corpus from the [Wikipedia Cirrussearch dump file](https://dumps.wikimedia.org/other/cirrussearch/) as of January 2, 2023.
29
- The corpus files generated from CC-100 and Wikipedia are 74.3GB and 4.9GB in size and consist of approximately 392M and 34M sentences, respectively.
30
-
31
- For the purpose of splitting texts into sentences, we used [fugashi](https://github.com/polm/fugashi) with [mecab-ipadic-NEologd](https://github.com/neologd/mecab-ipadic-neologd) dictionary (v0.0.7).
32
-
33
- ## Tokenization
34
-
35
- The texts are first tokenized by MeCab with the Unidic 2.1.2 dictionary and then split into subwords by the WordPiece algorithm.
36
- The vocabulary size is 32768.
37
-
38
- We used [fugashi](https://github.com/polm/fugashi) and [unidic-lite](https://github.com/polm/unidic-lite) packages for the tokenization.
39
-
40
- ## Training
41
-
42
- We trained the model first on the CC-100 corpus for 1M steps and then on the Wikipedia corpus for another 1M steps.
43
- For training of the MLM (masked language modeling) objective, we introduced whole word masking in which all of the subword tokens corresponding to a single word (tokenized by MeCab) are masked at once.
44
-
45
- For training of each model, we used a v3-8 instance of Cloud TPUs provided by [TPU Research Cloud](https://sites.research.google/trc/about/).
46
-
47
- ## Licenses
48
-
49
- The pretrained models are distributed under the Apache License 2.0.
50
-
51
- ## Acknowledgments
52
-
53
- This model is trained with Cloud TPUs provided by [TPU Research Cloud](https://sites.research.google/trc/about/) program.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AleksBlacky/Arxiv_paper_classifier/app.py DELETED
@@ -1,136 +0,0 @@
1
- import streamlit as st
2
- import transformers
3
- import pickle
4
- import seaborn as sns
5
- from pandas import DataFrame
6
- from transformers import AutoTokenizer, AutoModelForSequenceClassification
7
-
8
- st.markdown("# Hello, friend!")
9
- st.markdown(" This magic application going to help you with understanding of science paper topic! Cool? Yeah! ")
10
-
11
- try:
12
- model_name_global = "allenai/scibert_scivocab_uncased"
13
- tokenizer_ = AutoTokenizer.from_pretrained(model_name_global)
14
- with open('./models/scibert/decode_dict.pkl', 'rb') as f:
15
- decode_dict = pickle.load(f)
16
- except ValueError:
17
- st.error("Load tokenizer or decode answer dict goes wrong! Pls contact author [email protected]")
18
-
19
- with st.form(key="my_form"):
20
- st.markdown("### 🎈 Do you want a little magic? ")
21
- st.markdown(" Write your article title and abstract to textboxes bellow and I'll gues topic of your paper! ")
22
- ce, c2, c3 = st.columns([0.07, 7, 0.07])
23
-
24
- with c2:
25
- doc_title = st.text_area(
26
- "Paste your abstract title below (1 to 50 words)",
27
- height=210,
28
- )
29
-
30
- doc_abstract = st.text_area(
31
- "Paste your abstract text below (1 to 500 words)",
32
- height=410,
33
- )
34
-
35
- MAX_WORDS_TITLE, MAX_WORDS_ABSTRACT = 50, 500
36
- import re
37
-
38
- len_title = len(re.findall(r"\w+", doc_title))
39
- len_abstract = len(re.findall(r"\w+", doc_abstract))
40
-
41
- if len_title > MAX_WORDS_TITLE:
42
- st.warning(
43
- "⚠️ Your title contains "
44
- + str(len_title)
45
- + " words."
46
- + " Only the first 50 words will be reviewed. Stay tuned as increased allowance is coming! 😊"
47
- )
48
-
49
- doc_title = doc_title[:MAX_WORDS_TITLE]
50
-
51
- if len_abstract > MAX_WORDS_ABSTRACT:
52
- st.warning(
53
- "⚠️ Your abstract contains "
54
- + str(len_abstract)
55
- + " words."
56
- + " Only the first 500 words will be reviewed. Stay tuned as increased allowance is coming! 😊"
57
- )
58
-
59
- doc_abstract = doc_abstract[:MAX_WORDS_ABSTRACT]
60
-
61
- submit_button = st.form_submit_button(label="✨ Let's play, try it!")
62
-
63
- if not submit_button:
64
- st.stop()
65
-
66
- if len_title < 1:
67
- st.error("Article without any words in title? Pls give me correct title!")
68
- st.stop()
69
-
70
- if len_abstract < 1:
71
- st.error("Article without any words in abstract? Pls give me correct abstract!")
72
- st.stop()
73
-
74
-
75
- # allow_output_mutation=True
76
- @st.cache(suppress_st_warning=True)
77
- def load_model():
78
- st.write("Loading big model")
79
- return AutoModelForSequenceClassification.from_pretrained("models/scibert/")
80
-
81
-
82
- def make_predict(tokens, decode_dict):
83
-
84
- model_ = load_model()
85
- outs = model_(tokens.input_ids)
86
-
87
- probs = outs["logits"].softmax(dim=-1).tolist()[0]
88
- topic_probs = {}
89
- for i, p in enumerate(probs):
90
- if p > 0.1:
91
- topic_probs[decode_dict[i]] = p
92
- return topic_probs
93
-
94
-
95
- model_local = "models/scibert/"
96
-
97
- title = doc_title
98
- abstract = doc_abstract
99
- try:
100
- tokens = tokenizer_(title + abstract, return_tensors="pt")
101
- except ValueError:
102
- st.error("Word parsing into tokens went wrong! Is input valid? If yes, pls contact author [email protected]")
103
-
104
- predicts = make_predict(tokens, decode_dict)
105
-
106
- st.markdown("## 🎈 Yor article probably about: ")
107
- st.header("")
108
-
109
- df = (
110
- DataFrame(predicts.items(), columns=["Topic", "Prob"])
111
- .sort_values(by="Prob", ascending=False)
112
- .reset_index(drop=True)
113
- )
114
-
115
- df.index += 1
116
-
117
- # Add styling
118
- cmGreen = sns.light_palette("green", as_cmap=True)
119
- cmRed = sns.light_palette("red", as_cmap=True)
120
- df = df.style.background_gradient(
121
- cmap=cmGreen,
122
- subset=[
123
- "Prob",
124
- ],
125
- )
126
-
127
- c1, c2, c3 = st.columns([1, 3, 1])
128
-
129
- format_dictionary = {
130
- "Prob": "{:.1%}",
131
- }
132
-
133
- df = df.format(format_dictionary)
134
-
135
- with c2:
136
- st.table(df)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alex89912/ai-code-v1/app.py DELETED
@@ -1,3 +0,0 @@
1
- import gradio as gr
2
-
3
- gr.Interface.load("models/codellama/CodeLlama-7b-hf").launch()
 
 
 
 
spaces/AlgoveraAI/ocean-marketplace/app.py DELETED
@@ -1,173 +0,0 @@
1
- import gradio as gr
2
- from ocean_lib.config import Config
3
- from ocean_lib.ocean.ocean import Ocean
4
- from ocean_lib.web3_internal.wallet import Wallet
5
- from ocean_lib.web3_internal.currency import pretty_ether_and_wei, to_wei
6
- from ocean_lib.web3_internal.constants import ZERO_ADDRESS
7
- from ocean_lib.common.agreements.service_types import ServiceTypes
8
- from PIL import Image
9
- import numpy as np
10
- import matplotlib.pyplot as plt
11
-
12
-
13
- config = Config('config.ini')
14
- ocean = Ocean(config)
15
-
16
- def search(term="", did_in="", address="", buy_top_result=False):
17
-
18
- if address:
19
- wallet = Wallet(ocean.web3, private_key=address, transaction_timeout=20, block_confirmations=0)
20
-
21
- results = None
22
- dids = None
23
- data=None
24
- if term and not did_in:
25
- assets = ocean.assets.search(term)
26
-
27
- results = []
28
- datas = []
29
- balances = []
30
- dids = []
31
- for i in range(len(assets)):
32
- name = assets[i].values['_source']['service'][0]['attributes']['main']['name']
33
- type_ = assets[i].values['_source']['service'][0]['attributes']['main']['type'].upper()
34
- symbol = assets[i].values['_source']['dataTokenInfo']['symbol']
35
- data_token_address = assets[i].values['_source']['dataTokenInfo']['address']
36
- try:
37
- description = assets[i].values['_source']['service'][0]['attributes']['additionalInformation']['description']
38
- except:
39
- description = "No description"
40
- author = assets[i].values['_source']['service'][0]['attributes']['main']['author']
41
- did = assets[i].values['_source']['id']
42
- dids.append(did)
43
- chain = assets[i].values['_source']['service'][1]['serviceEndpoint']
44
-
45
- if chain != 'https://provider.rinkeby.oceanprotocol.com':
46
- continue
47
-
48
- if address:
49
- data_token = ocean.get_data_token(data_token_address)
50
- token_address = data_token.address
51
- balances.append(pretty_ether_and_wei(data_token.balanceOf(wallet.address)))
52
- else:
53
- balances.append(0)
54
-
55
- img = Image.open('algovera-tile.png')
56
-
57
- fig = plt.figure(figsize=(5,5))
58
- plt.axis("off")
59
- plt.imshow(img)
60
- plt.text(20, 100, name[:22], size=20)
61
- plt.text(20, 60, symbol)
62
- plt.text(400, 40, type_)
63
- plt.text(20, 140, author, size=12)
64
- plt.text(20, 200, description[:50])
65
- fig.tight_layout()
66
- fig.canvas.draw()
67
- data = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
68
- datas.append(data.reshape(fig.canvas.get_width_height()[::-1] + (3,)))
69
- plt.close()
70
-
71
- results.append([dids[-1], datas[-1], balances[-1]])
72
-
73
-
74
- if did_in:
75
- results = []
76
- balances = []
77
- datas = []
78
- dids = []
79
-
80
- asset = ocean.assets.resolve(did_in)
81
- name = asset.as_dictionary()['service'][0]['attributes']['main']['name']
82
- type_ = asset.as_dictionary()['service'][0]['attributes']['main']['type'].upper()
83
- symbol = asset.as_dictionary()['dataTokenInfo']['symbol']
84
- try:
85
- description = asset.as_dictionary()['service'][0]['attributes']['additionalInformation']['description']
86
- except:
87
- description = "No description"
88
- author = asset.as_dictionary()['service'][0]['attributes']['main']['author']
89
- dids.append(did_in)
90
- chain = asset.as_dictionary()['service'][1]['serviceEndpoint']
91
-
92
- if chain != 'https://provider.rinkeby.oceanprotocol.com':
93
- pass
94
-
95
- if address:
96
- data_token = ocean.get_data_token(asset.data_token_address)
97
- token_address = data_token.address
98
- balances.append(pretty_ether_and_wei(data_token.balanceOf(wallet.address)))
99
- else:
100
- balances.append(0)
101
-
102
-
103
-
104
- img = Image.open('algovera-tile.png')
105
-
106
- fig = plt.figure(figsize=(5,5))
107
- plt.axis("off")
108
- plt.imshow(img)
109
- plt.text(20, 100, name[:22], size=20)
110
- plt.text(20, 60, symbol)
111
- plt.text(400, 40, type_)
112
- plt.text(20, 140, author, size=12)
113
- plt.text(20, 200, description[:50])
114
- fig.tight_layout()
115
- fig.canvas.draw()
116
- data = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
117
- datas.append(data.reshape(fig.canvas.get_width_height()[::-1] + (3,)))
118
- plt.close()
119
-
120
- results.append([dids[-1], datas[-1], balances[-1]])
121
-
122
- if buy_top_result and address:
123
- asset = ocean.assets.resolve(dids[0])
124
- data_token = ocean.get_data_token(asset.data_token_address)
125
-
126
- service_type = asset.as_dictionary()['service'][1]['type']
127
- compute_service = asset.get_service(service_type)
128
-
129
- owner_address = asset.as_dictionary()['publicKey'][0]['owner']
130
-
131
- logs = ocean.exchange.search_exchange_by_data_token(asset.data_token_address)
132
- exchange_id = logs[0].args.exchangeId
133
-
134
- tx_result = ocean.exchange.buy_at_fixed_rate(to_wei(1), wallet, to_wei(5), exchange_id, asset.data_token_address, owner_address)
135
- assert tx_result, "failed buying tokens"
136
-
137
- balance = pretty_ether_and_wei(data_token.balanceOf(wallet.address))
138
-
139
- results[0][2] = balance
140
-
141
- return results
142
-
143
- description = (
144
- "This app can be used to search datasets and algorithms on the Ocean Marketplace. Enter a search term in the text box and the first result will be displayed as an image tile with description. "
145
- )
146
-
147
- article = (
148
- "<p style='text-align: center'>"
149
- "<a href='https://market.oceanprotocol.com/' target='_blank'>1. Ocean Marketplace</a> | "
150
- "<a href='https://docs.algovera.ai/blog/2022/01/04/Using%20the%20Ocean%20Marketplace%20with%20HuggingFace%20Apps,%20Algorithms%20and%20Datasets' target='_blank'>2. Blog about Ocean Protocol on HuggingFace</a> "
151
- "</p>"
152
- )
153
-
154
-
155
- interface = gr.Interface(
156
- search,
157
- [
158
- gr.inputs.Textbox(label="Search Datasets and Algorithms by name"),
159
- gr.inputs.Textbox(label="Search Datasets and Algorithms by DID"),
160
- gr.inputs.Textbox(label="Show Token Balance for Each (by Inputting Private Key)"),
161
- "checkbox"
162
-
163
- ],
164
- [
165
- gr.outputs.Carousel(["text", "image", "text"], label="Search Results"),
166
- ],
167
- title="Ocean Marketplace",
168
- description=description,
169
- article=article,
170
- theme="huggingface",
171
- )
172
-
173
- interface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AllAideas/SegmentacionVideo/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: SegmentacionVideo
3
- emoji: 🔥
4
- colorFrom: purple
5
- colorTo: gray
6
- sdk: gradio
7
- sdk_version: 3.8.2
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Aloento/9Nine-PITS/text/japanese.py DELETED
@@ -1,131 +0,0 @@
1
- import re
2
-
3
- import pyopenjtalk
4
- from unidecode import unidecode
5
-
6
- # Regular expression matching Japanese without punctuation marks:
7
- _japanese_characters = re.compile(
8
- r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
9
-
10
- # Regular expression matching non-Japanese characters or punctuation marks:
11
- _japanese_marks = re.compile(
12
- r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
13
-
14
- # List of (symbol, Japanese) pairs for marks:
15
- _symbols_to_japanese = [(re.compile('%s' % x[0]), x[1]) for x in [
16
- ('%', 'パーセント')
17
- ]]
18
-
19
- # List of (romaji, ipa2) pairs for marks:
20
- _romaji_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [
21
- ('u', 'ɯ'),
22
- ('ʧ', 'tʃ'),
23
- ('j', 'dʑ'),
24
- ('y', 'j'),
25
- ('ni', 'n^i'),
26
- ('nj', 'n^'),
27
- ('hi', 'çi'),
28
- ('hj', 'ç'),
29
- ('f', 'ɸ'),
30
- ('I', 'i*'),
31
- ('U', 'ɯ*'),
32
- ('r', 'ɾ')
33
- ]]
34
-
35
- # List of (consonant, sokuon) pairs:
36
- _real_sokuon = [(re.compile('%s' % x[0]), x[1]) for x in [
37
- (r'Q([↑↓]*[kg])', r'k#\1'),
38
- (r'Q([↑↓]*[tdjʧ])', r't#\1'),
39
- (r'Q([↑↓]*[sʃ])', r's\1'),
40
- (r'Q([↑↓]*[pb])', r'p#\1')
41
- ]]
42
-
43
- # List of (consonant, hatsuon) pairs:
44
- _real_hatsuon = [(re.compile('%s' % x[0]), x[1]) for x in [
45
- (r'N([↑↓]*[pbm])', r'm\1'),
46
- (r'N([↑↓]*[ʧʥj])', r'n^\1'),
47
- (r'N([↑↓]*[tdn])', r'n\1'),
48
- (r'N([↑↓]*[kg])', r'ŋ\1')
49
- ]]
50
-
51
-
52
- def symbols_to_japanese(text):
53
- for regex, replacement in _symbols_to_japanese:
54
- text = re.sub(regex, replacement, text)
55
- return text
56
-
57
-
58
- def japanese_to_romaji_with_accent(text):
59
- """
60
- Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html
61
- """
62
-
63
- text = symbols_to_japanese(text)
64
- sentences = re.split(_japanese_marks, text)
65
- marks = re.findall(_japanese_marks, text)
66
- text = ''
67
-
68
- for i, sentence in enumerate(sentences):
69
-
70
- if re.match(_japanese_characters, sentence):
71
-
72
- if text != '':
73
- text += ' '
74
-
75
- labels = pyopenjtalk.extract_fullcontext(sentence)
76
-
77
- for n, label in enumerate(labels):
78
- phoneme = re.search(r'\-([^\+]*)\+', label).group(1)
79
-
80
- if phoneme not in ['sil', 'pau']:
81
- text += phoneme.replace('ch', 'ʧ').replace('sh', 'ʃ').replace('cl', 'Q')
82
- else:
83
- continue
84
-
85
- # n_moras = int(re.search(r'/F:(\d+)_', label).group(1))
86
- a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1))
87
- a2 = int(re.search(r"\+(\d+)\+", label).group(1))
88
- a3 = int(re.search(r"\+(\d+)/", label).group(1))
89
-
90
- if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil', 'pau']:
91
- a2_next = -1
92
- else:
93
- a2_next = int(re.search(r"\+(\d+)\+", labels[n + 1]).group(1))
94
-
95
- # Accent phrase boundary
96
- if a3 == 1 and a2_next == 1:
97
- text += ' '
98
- # Falling
99
- elif a1 == 0 and a2_next == a2 + 1:
100
- text += '↓'
101
- # Rising
102
- elif a2 == 1 and a2_next == 2:
103
- text += '↑'
104
-
105
- if i < len(marks):
106
- text += unidecode(marks[i]).replace(' ', '')
107
-
108
- return text
109
-
110
-
111
- def get_real_sokuon(text):
112
- for regex, replacement in _real_sokuon:
113
- text = re.sub(regex, replacement, text)
114
- return text
115
-
116
-
117
- def get_real_hatsuon(text):
118
- for regex, replacement in _real_hatsuon:
119
- text = re.sub(regex, replacement, text)
120
- return text
121
-
122
-
123
- def japanese_to_ipa(text):
124
- text = japanese_to_romaji_with_accent(text).replace('...', '…')
125
- text = get_real_sokuon(text)
126
- text = get_real_hatsuon(text)
127
-
128
- for regex, replacement in _romaji_to_ipa:
129
- text = re.sub(regex, replacement, text)
130
-
131
- return text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alpaca233/ChatPDF-GUI/gpt_reader/pdf_reader.py DELETED
@@ -1,121 +0,0 @@
1
- from PyPDF2 import PdfReader
2
- import openai
3
- from .prompt import BASE_POINTS, READING_PROMT_V2
4
- from .paper import Paper
5
- from .model_interface import OpenAIModel
6
-
7
-
8
- # Setting the API key to use the OpenAI API
9
- class PaperReader:
10
-
11
- """
12
- A class for summarizing research papers using the OpenAI API.
13
-
14
- Attributes:
15
- openai_key (str): The API key to use the OpenAI API.
16
- token_length (int): The length of text to send to the API at a time.
17
- model (str): The GPT model to use for summarization.
18
- points_to_focus (str): The key points to focus on while summarizing.
19
- verbose (bool): A flag to enable/disable verbose logging.
20
-
21
- """
22
-
23
- def __init__(self, openai_key, token_length=4000, model="gpt-3.5-turbo",
24
- points_to_focus=BASE_POINTS, verbose=False):
25
-
26
- # Setting the API key to use the OpenAI API
27
- openai.api_key = openai_key
28
-
29
- # Initializing prompts for the conversation
30
- self.init_prompt = READING_PROMT_V2.format(points_to_focus)
31
-
32
- self.summary_prompt = 'You are a researcher helper bot. Now you need to read the summaries of a research paper.'
33
- self.messages = [] # Initializing the conversation messages
34
- self.summary_msg = [] # Initializing the summary messages
35
- self.token_len = token_length # Setting the token length to use
36
- self.keep_round = 2 # Rounds of previous dialogues to keep in conversation
37
- self.model = model # Setting the GPT model to use
38
- self.verbose = verbose # Flag to enable/disable verbose logging
39
- self.model = OpenAIModel(api_key=openai_key, model=model)
40
-
41
- def drop_conversation(self, msg):
42
- # This method is used to drop previous messages from the conversation and keep only recent ones
43
- if len(msg) >= (self.keep_round + 1) * 2 + 1:
44
- new_msg = [msg[0]]
45
- for i in range(3, len(msg)):
46
- new_msg.append(msg[i])
47
- return new_msg
48
- else:
49
- return msg
50
-
51
- def send_msg(self, msg):
52
- return self.model.send_msg(msg)
53
-
54
- def _chat(self, message):
55
- # This method is used to send a message and get a response from the OpenAI API
56
-
57
- # Adding the user message to the conversation messages
58
- self.messages.append({"role": "user", "content": message})
59
- # Sending the messages to the API and getting the response
60
- response = self.send_msg(self.messages)
61
- # Adding the system response to the conversation messages
62
- self.messages.append({"role": "system", "content": response})
63
- # Dropping previous conversation messages to keep the conversation history short
64
- self.messages = self.drop_conversation(self.messages)
65
- # Returning the system response
66
- return response
67
-
68
- def summarize(self, paper: Paper):
69
- # This method is used to summarize a given research paper
70
-
71
- # Adding the initial prompt to the conversation messages
72
- self.messages = [
73
- {"role": "system", "content": self.init_prompt},
74
- ]
75
- # Adding the summary prompt to the summary messages
76
- self.summary_msg = [{"role": "system", "content": self.summary_prompt}]
77
-
78
- # Reading and summarizing each part of the research paper
79
- for (page_idx, part_idx, text) in paper.iter_pages():
80
- print('page: {}, part: {}'.format(page_idx, part_idx))
81
- # Sending the text to the API and getting the response
82
- summary = self._chat('now I send you page {}, part {}:{}'.format(page_idx, part_idx, text))
83
- # Logging the summary if verbose logging is enabled
84
- if self.verbose:
85
- print(summary)
86
- # Adding the summary of the part to the summary messages
87
- self.summary_msg.append({"role": "user", "content": '{}'.format(summary)})
88
-
89
- # Adding a prompt for the user to summarize the whole paper to the summary messages
90
- self.summary_msg.append({"role": "user", "content": 'Now please make a summary of the whole paper'})
91
- # Sending the summary messages to the API and getting the response
92
- result = self.send_msg(self.summary_msg)
93
- # Returning the summary of the whole paper
94
- return result
95
-
96
- def read_pdf_and_summarize(self, pdf_path):
97
- # This method is used to read a research paper from a PDF file and summarize it
98
-
99
- # Creating a PdfReader object to read the PDF file
100
- pdf_reader = PdfReader(pdf_path)
101
- paper = Paper(pdf_reader)
102
- # Summarizing the full text of the research paper and returning the summary
103
- print('reading pdf finished')
104
- summary = self.summarize(paper)
105
- return summary
106
-
107
- def get_summary_of_each_part(self):
108
- # This method is used to get the summary of each part of the research paper
109
- return self.summary_msg
110
-
111
- def question(self, question):
112
- # This method is used to ask a question after summarizing a paper
113
-
114
- # Adding the question to the summary messages
115
- self.summary_msg.append({"role": "user", "content": question})
116
- # Sending the summary messages to the API and getting the response
117
- response = self.send_msg(self.summary_msg)
118
- # Adding the system response to the summary messages
119
- self.summary_msg.append({"role": "system", "content": response})
120
- # Returning the system response
121
- return response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alpaca233/SadTalker/src/utils/face_enhancer.py DELETED
@@ -1,123 +0,0 @@
1
- import os
2
- import torch
3
-
4
- from gfpgan import GFPGANer
5
-
6
- from tqdm import tqdm
7
-
8
- from src.utils.videoio import load_video_to_cv2
9
-
10
- import cv2
11
-
12
-
13
- class GeneratorWithLen(object):
14
- """ From https://stackoverflow.com/a/7460929 """
15
-
16
- def __init__(self, gen, length):
17
- self.gen = gen
18
- self.length = length
19
-
20
- def __len__(self):
21
- return self.length
22
-
23
- def __iter__(self):
24
- return self.gen
25
-
26
- def enhancer_list(images, method='gfpgan', bg_upsampler='realesrgan'):
27
- gen = enhancer_generator_no_len(images, method=method, bg_upsampler=bg_upsampler)
28
- return list(gen)
29
-
30
- def enhancer_generator_with_len(images, method='gfpgan', bg_upsampler='realesrgan'):
31
- """ Provide a generator with a __len__ method so that it can passed to functions that
32
- call len()"""
33
-
34
- if os.path.isfile(images): # handle video to images
35
- # TODO: Create a generator version of load_video_to_cv2
36
- images = load_video_to_cv2(images)
37
-
38
- gen = enhancer_generator_no_len(images, method=method, bg_upsampler=bg_upsampler)
39
- gen_with_len = GeneratorWithLen(gen, len(images))
40
- return gen_with_len
41
-
42
- def enhancer_generator_no_len(images, method='gfpgan', bg_upsampler='realesrgan'):
43
- """ Provide a generator function so that all of the enhanced images don't need
44
- to be stored in memory at the same time. This can save tons of RAM compared to
45
- the enhancer function. """
46
-
47
- print('face enhancer....')
48
- if not isinstance(images, list) and os.path.isfile(images): # handle video to images
49
- images = load_video_to_cv2(images)
50
-
51
- # ------------------------ set up GFPGAN restorer ------------------------
52
- if method == 'gfpgan':
53
- arch = 'clean'
54
- channel_multiplier = 2
55
- model_name = 'GFPGANv1.4'
56
- url = 'https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth'
57
- elif method == 'RestoreFormer':
58
- arch = 'RestoreFormer'
59
- channel_multiplier = 2
60
- model_name = 'RestoreFormer'
61
- url = 'https://github.com/TencentARC/GFPGAN/releases/download/v1.3.4/RestoreFormer.pth'
62
- elif method == 'codeformer': # TODO:
63
- arch = 'CodeFormer'
64
- channel_multiplier = 2
65
- model_name = 'CodeFormer'
66
- url = 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth'
67
- else:
68
- raise ValueError(f'Wrong model version {method}.')
69
-
70
-
71
- # ------------------------ set up background upsampler ------------------------
72
- if bg_upsampler == 'realesrgan':
73
- if not torch.cuda.is_available(): # CPU
74
- import warnings
75
- warnings.warn('The unoptimized RealESRGAN is slow on CPU. We do not use it. '
76
- 'If you really want to use it, please modify the corresponding codes.')
77
- bg_upsampler = None
78
- else:
79
- from basicsr.archs.rrdbnet_arch import RRDBNet
80
- from realesrgan import RealESRGANer
81
- model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2)
82
- bg_upsampler = RealESRGANer(
83
- scale=2,
84
- model_path='https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth',
85
- model=model,
86
- tile=400,
87
- tile_pad=10,
88
- pre_pad=0,
89
- half=True) # need to set False in CPU mode
90
- else:
91
- bg_upsampler = None
92
-
93
- # determine model paths
94
- model_path = os.path.join('gfpgan/weights', model_name + '.pth')
95
-
96
- if not os.path.isfile(model_path):
97
- model_path = os.path.join('checkpoints', model_name + '.pth')
98
-
99
- if not os.path.isfile(model_path):
100
- # download pre-trained models from url
101
- model_path = url
102
-
103
- restorer = GFPGANer(
104
- model_path=model_path,
105
- upscale=2,
106
- arch=arch,
107
- channel_multiplier=channel_multiplier,
108
- bg_upsampler=bg_upsampler)
109
-
110
- # ------------------------ restore ------------------------
111
- for idx in tqdm(range(len(images)), 'Face Enhancer:'):
112
-
113
- img = cv2.cvtColor(images[idx], cv2.COLOR_RGB2BGR)
114
-
115
- # restore faces and background if necessary
116
- cropped_faces, restored_faces, r_img = restorer.enhance(
117
- img,
118
- has_aligned=False,
119
- only_center_face=False,
120
- paste_back=True)
121
-
122
- r_img = cv2.cvtColor(r_img, cv2.COLOR_BGR2RGB)
123
- yield r_img
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ameaou/academic-chatgpt3.1/crazy_functions/test_project/cpp/libJPG/jpge.cpp DELETED
@@ -1,1049 +0,0 @@
1
- // jpge.cpp - C++ class for JPEG compression.
2
- // Public domain, Rich Geldreich <[email protected]>
3
- // v1.01, Dec. 18, 2010 - Initial release
4
- // v1.02, Apr. 6, 2011 - Removed 2x2 ordered dither in H2V1 chroma subsampling method load_block_16_8_8(). (The rounding factor was 2, when it should have been 1. Either way, it wasn't helping.)
5
- // v1.03, Apr. 16, 2011 - Added support for optimized Huffman code tables, optimized dynamic memory allocation down to only 1 alloc.
6
- // Also from Alex Evans: Added RGBA support, linear memory allocator (no longer needed in v1.03).
7
- // v1.04, May. 19, 2012: Forgot to set m_pFile ptr to NULL in cfile_stream::close(). Thanks to Owen Kaluza for reporting this bug.
8
- // Code tweaks to fix VS2008 static code analysis warnings (all looked harmless).
9
- // Code review revealed method load_block_16_8_8() (used for the non-default H2V1 sampling mode to downsample chroma) somehow didn't get the rounding factor fix from v1.02.
10
-
11
- #include "jpge.h"
12
-
13
- #include <stdlib.h>
14
- #include <string.h>
15
- #if PLATFORM_WINDOWS
16
- #include <malloc.h>
17
- #endif
18
-
19
- #define JPGE_MAX(a,b) (((a)>(b))?(a):(b))
20
- #define JPGE_MIN(a,b) (((a)<(b))?(a):(b))
21
-
22
- namespace jpge {
23
-
24
- static inline void *jpge_malloc(size_t nSize) { return FMemory::Malloc(nSize); }
25
- static inline void jpge_free(void *p) { FMemory::Free(p);; }
26
-
27
- // Various JPEG enums and tables.
28
- enum { M_SOF0 = 0xC0, M_DHT = 0xC4, M_SOI = 0xD8, M_EOI = 0xD9, M_SOS = 0xDA, M_DQT = 0xDB, M_APP0 = 0xE0 };
29
- enum { DC_LUM_CODES = 12, AC_LUM_CODES = 256, DC_CHROMA_CODES = 12, AC_CHROMA_CODES = 256, MAX_HUFF_SYMBOLS = 257, MAX_HUFF_CODESIZE = 32 };
30
-
31
- static uint8 s_zag[64] = { 0,1,8,16,9,2,3,10,17,24,32,25,18,11,4,5,12,19,26,33,40,48,41,34,27,20,13,6,7,14,21,28,35,42,49,56,57,50,43,36,29,22,15,23,30,37,44,51,58,59,52,45,38,31,39,46,53,60,61,54,47,55,62,63 };
32
- static int16 s_std_lum_quant[64] = { 16,11,12,14,12,10,16,14,13,14,18,17,16,19,24,40,26,24,22,22,24,49,35,37,29,40,58,51,61,60,57,51,56,55,64,72,92,78,64,68,87,69,55,56,80,109,81,87,95,98,103,104,103,62,77,113,121,112,100,120,92,101,103,99 };
33
- static int16 s_std_croma_quant[64] = { 17,18,18,24,21,24,47,26,26,47,99,66,56,66,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99 };
34
- static uint8 s_dc_lum_bits[17] = { 0,0,1,5,1,1,1,1,1,1,0,0,0,0,0,0,0 };
35
- static uint8 s_dc_lum_val[DC_LUM_CODES] = { 0,1,2,3,4,5,6,7,8,9,10,11 };
36
- static uint8 s_ac_lum_bits[17] = { 0,0,2,1,3,3,2,4,3,5,5,4,4,0,0,1,0x7d };
37
- static uint8 s_ac_lum_val[AC_LUM_CODES] =
38
- {
39
- 0x01,0x02,0x03,0x00,0x04,0x11,0x05,0x12,0x21,0x31,0x41,0x06,0x13,0x51,0x61,0x07,0x22,0x71,0x14,0x32,0x81,0x91,0xa1,0x08,0x23,0x42,0xb1,0xc1,0x15,0x52,0xd1,0xf0,
40
- 0x24,0x33,0x62,0x72,0x82,0x09,0x0a,0x16,0x17,0x18,0x19,0x1a,0x25,0x26,0x27,0x28,0x29,0x2a,0x34,0x35,0x36,0x37,0x38,0x39,0x3a,0x43,0x44,0x45,0x46,0x47,0x48,0x49,
41
- 0x4a,0x53,0x54,0x55,0x56,0x57,0x58,0x59,0x5a,0x63,0x64,0x65,0x66,0x67,0x68,0x69,0x6a,0x73,0x74,0x75,0x76,0x77,0x78,0x79,0x7a,0x83,0x84,0x85,0x86,0x87,0x88,0x89,
42
- 0x8a,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9a,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7,0xa8,0xa9,0xaa,0xb2,0xb3,0xb4,0xb5,0xb6,0xb7,0xb8,0xb9,0xba,0xc2,0xc3,0xc4,0xc5,
43
- 0xc6,0xc7,0xc8,0xc9,0xca,0xd2,0xd3,0xd4,0xd5,0xd6,0xd7,0xd8,0xd9,0xda,0xe1,0xe2,0xe3,0xe4,0xe5,0xe6,0xe7,0xe8,0xe9,0xea,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7,0xf8,
44
- 0xf9,0xfa
45
- };
46
- static uint8 s_dc_chroma_bits[17] = { 0,0,3,1,1,1,1,1,1,1,1,1,0,0,0,0,0 };
47
- static uint8 s_dc_chroma_val[DC_CHROMA_CODES] = { 0,1,2,3,4,5,6,7,8,9,10,11 };
48
- static uint8 s_ac_chroma_bits[17] = { 0,0,2,1,2,4,4,3,4,7,5,4,4,0,1,2,0x77 };
49
- static uint8 s_ac_chroma_val[AC_CHROMA_CODES] =
50
- {
51
- 0x00,0x01,0x02,0x03,0x11,0x04,0x05,0x21,0x31,0x06,0x12,0x41,0x51,0x07,0x61,0x71,0x13,0x22,0x32,0x81,0x08,0x14,0x42,0x91,0xa1,0xb1,0xc1,0x09,0x23,0x33,0x52,0xf0,
52
- 0x15,0x62,0x72,0xd1,0x0a,0x16,0x24,0x34,0xe1,0x25,0xf1,0x17,0x18,0x19,0x1a,0x26,0x27,0x28,0x29,0x2a,0x35,0x36,0x37,0x38,0x39,0x3a,0x43,0x44,0x45,0x46,0x47,0x48,
53
- 0x49,0x4a,0x53,0x54,0x55,0x56,0x57,0x58,0x59,0x5a,0x63,0x64,0x65,0x66,0x67,0x68,0x69,0x6a,0x73,0x74,0x75,0x76,0x77,0x78,0x79,0x7a,0x82,0x83,0x84,0x85,0x86,0x87,
54
- 0x88,0x89,0x8a,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9a,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7,0xa8,0xa9,0xaa,0xb2,0xb3,0xb4,0xb5,0xb6,0xb7,0xb8,0xb9,0xba,0xc2,0xc3,
55
- 0xc4,0xc5,0xc6,0xc7,0xc8,0xc9,0xca,0xd2,0xd3,0xd4,0xd5,0xd6,0xd7,0xd8,0xd9,0xda,0xe2,0xe3,0xe4,0xe5,0xe6,0xe7,0xe8,0xe9,0xea,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7,0xf8,
56
- 0xf9,0xfa
57
- };
58
-
59
- // Low-level helper functions.
60
- template <class T> inline void clear_obj(T &obj) { memset(&obj, 0, sizeof(obj)); }
61
-
62
- const int YR = 19595, YG = 38470, YB = 7471, CB_R = -11059, CB_G = -21709, CB_B = 32768, CR_R = 32768, CR_G = -27439, CR_B = -5329;
63
- static inline uint8 clamp(int i) { if (static_cast<uint>(i) > 255U) { if (i < 0) i = 0; else if (i > 255) i = 255; } return static_cast<uint8>(i); }
64
-
65
- static void RGB_to_YCC(uint8* pDst, const uint8 *pSrc, int num_pixels)
66
- {
67
- for ( ; num_pixels; pDst += 3, pSrc += 3, num_pixels--)
68
- {
69
- const int r = pSrc[0], g = pSrc[1], b = pSrc[2];
70
- pDst[0] = static_cast<uint8>((r * YR + g * YG + b * YB + 32768) >> 16);
71
- pDst[1] = clamp(128 + ((r * CB_R + g * CB_G + b * CB_B + 32768) >> 16));
72
- pDst[2] = clamp(128 + ((r * CR_R + g * CR_G + b * CR_B + 32768) >> 16));
73
- }
74
- }
75
-
76
- static void RGB_to_Y(uint8* pDst, const uint8 *pSrc, int num_pixels)
77
- {
78
- for ( ; num_pixels; pDst++, pSrc += 3, num_pixels--)
79
- pDst[0] = static_cast<uint8>((pSrc[0] * YR + pSrc[1] * YG + pSrc[2] * YB + 32768) >> 16);
80
- }
81
-
82
- static void RGBA_to_YCC(uint8* pDst, const uint8 *pSrc, int num_pixels)
83
- {
84
- for ( ; num_pixels; pDst += 3, pSrc += 4, num_pixels--)
85
- {
86
- const int r = pSrc[0], g = pSrc[1], b = pSrc[2];
87
- pDst[0] = static_cast<uint8>((r * YR + g * YG + b * YB + 32768) >> 16);
88
- pDst[1] = clamp(128 + ((r * CB_R + g * CB_G + b * CB_B + 32768) >> 16));
89
- pDst[2] = clamp(128 + ((r * CR_R + g * CR_G + b * CR_B + 32768) >> 16));
90
- }
91
- }
92
-
93
- static void RGBA_to_Y(uint8* pDst, const uint8 *pSrc, int num_pixels)
94
- {
95
- for ( ; num_pixels; pDst++, pSrc += 4, num_pixels--)
96
- pDst[0] = static_cast<uint8>((pSrc[0] * YR + pSrc[1] * YG + pSrc[2] * YB + 32768) >> 16);
97
- }
98
-
99
- static void Y_to_YCC(uint8* pDst, const uint8* pSrc, int num_pixels)
100
- {
101
- for( ; num_pixels; pDst += 3, pSrc++, num_pixels--) { pDst[0] = pSrc[0]; pDst[1] = 128; pDst[2] = 128; }
102
- }
103
-
104
- // Forward DCT - DCT derived from jfdctint.
105
- #define CONST_BITS 13
106
- #define ROW_BITS 2
107
- #define DCT_DESCALE(x, n) (((x) + (((int32)1) << ((n) - 1))) >> (n))
108
- #define DCT_MUL(var, c) (static_cast<int16>(var) * static_cast<int32>(c))
109
- #define DCT1D(s0, s1, s2, s3, s4, s5, s6, s7) \
110
- int32 t0 = s0 + s7, t7 = s0 - s7, t1 = s1 + s6, t6 = s1 - s6, t2 = s2 + s5, t5 = s2 - s5, t3 = s3 + s4, t4 = s3 - s4; \
111
- int32 t10 = t0 + t3, t13 = t0 - t3, t11 = t1 + t2, t12 = t1 - t2; \
112
- int32 u1 = DCT_MUL(t12 + t13, 4433); \
113
- s2 = u1 + DCT_MUL(t13, 6270); \
114
- s6 = u1 + DCT_MUL(t12, -15137); \
115
- u1 = t4 + t7; \
116
- int32 u2 = t5 + t6, u3 = t4 + t6, u4 = t5 + t7; \
117
- int32 z5 = DCT_MUL(u3 + u4, 9633); \
118
- t4 = DCT_MUL(t4, 2446); t5 = DCT_MUL(t5, 16819); \
119
- t6 = DCT_MUL(t6, 25172); t7 = DCT_MUL(t7, 12299); \
120
- u1 = DCT_MUL(u1, -7373); u2 = DCT_MUL(u2, -20995); \
121
- u3 = DCT_MUL(u3, -16069); u4 = DCT_MUL(u4, -3196); \
122
- u3 += z5; u4 += z5; \
123
- s0 = t10 + t11; s1 = t7 + u1 + u4; s3 = t6 + u2 + u3; s4 = t10 - t11; s5 = t5 + u2 + u4; s7 = t4 + u1 + u3;
124
-
125
- static void DCT2D(int32 *p)
126
- {
127
- int32 c, *q = p;
128
- for (c = 7; c >= 0; c--, q += 8)
129
- {
130
- int32 s0 = q[0], s1 = q[1], s2 = q[2], s3 = q[3], s4 = q[4], s5 = q[5], s6 = q[6], s7 = q[7];
131
- DCT1D(s0, s1, s2, s3, s4, s5, s6, s7);
132
- q[0] = s0 << ROW_BITS; q[1] = DCT_DESCALE(s1, CONST_BITS-ROW_BITS); q[2] = DCT_DESCALE(s2, CONST_BITS-ROW_BITS); q[3] = DCT_DESCALE(s3, CONST_BITS-ROW_BITS);
133
- q[4] = s4 << ROW_BITS; q[5] = DCT_DESCALE(s5, CONST_BITS-ROW_BITS); q[6] = DCT_DESCALE(s6, CONST_BITS-ROW_BITS); q[7] = DCT_DESCALE(s7, CONST_BITS-ROW_BITS);
134
- }
135
- for (q = p, c = 7; c >= 0; c--, q++)
136
- {
137
- int32 s0 = q[0*8], s1 = q[1*8], s2 = q[2*8], s3 = q[3*8], s4 = q[4*8], s5 = q[5*8], s6 = q[6*8], s7 = q[7*8];
138
- DCT1D(s0, s1, s2, s3, s4, s5, s6, s7);
139
- q[0*8] = DCT_DESCALE(s0, ROW_BITS+3); q[1*8] = DCT_DESCALE(s1, CONST_BITS+ROW_BITS+3); q[2*8] = DCT_DESCALE(s2, CONST_BITS+ROW_BITS+3); q[3*8] = DCT_DESCALE(s3, CONST_BITS+ROW_BITS+3);
140
- q[4*8] = DCT_DESCALE(s4, ROW_BITS+3); q[5*8] = DCT_DESCALE(s5, CONST_BITS+ROW_BITS+3); q[6*8] = DCT_DESCALE(s6, CONST_BITS+ROW_BITS+3); q[7*8] = DCT_DESCALE(s7, CONST_BITS+ROW_BITS+3);
141
- }
142
- }
143
-
144
- struct sym_freq { uint m_key, m_sym_index; };
145
-
146
- // Radix sorts sym_freq[] array by 32-bit key m_key. Returns ptr to sorted values.
147
- static inline sym_freq* radix_sort_syms(uint num_syms, sym_freq* pSyms0, sym_freq* pSyms1)
148
- {
149
- const uint cMaxPasses = 4;
150
- uint32 hist[256 * cMaxPasses]; clear_obj(hist);
151
- for (uint i = 0; i < num_syms; i++) { uint freq = pSyms0[i].m_key; hist[freq & 0xFF]++; hist[256 + ((freq >> 8) & 0xFF)]++; hist[256*2 + ((freq >> 16) & 0xFF)]++; hist[256*3 + ((freq >> 24) & 0xFF)]++; }
152
- sym_freq* pCur_syms = pSyms0, *pNew_syms = pSyms1;
153
- uint total_passes = cMaxPasses; while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256])) total_passes--;
154
- for (uint pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8)
155
- {
156
- const uint32* pHist = &hist[pass << 8];
157
- uint offsets[256], cur_ofs = 0;
158
- for (uint i = 0; i < 256; i++) { offsets[i] = cur_ofs; cur_ofs += pHist[i]; }
159
- for (uint i = 0; i < num_syms; i++)
160
- pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] = pCur_syms[i];
161
- sym_freq* t = pCur_syms; pCur_syms = pNew_syms; pNew_syms = t;
162
- }
163
- return pCur_syms;
164
- }
165
-
166
- // calculate_minimum_redundancy() originally written by: Alistair Moffat, [email protected], Jyrki Katajainen, [email protected], November 1996.
167
- static void calculate_minimum_redundancy(sym_freq *A, int n)
168
- {
169
- int root, leaf, next, avbl, used, dpth;
170
- if (n==0) return; else if (n==1) { A[0].m_key = 1; return; }
171
- A[0].m_key += A[1].m_key; root = 0; leaf = 2;
172
- for (next=1; next < n-1; next++)
173
- {
174
- if (leaf>=n || A[root].m_key<A[leaf].m_key) { A[next].m_key = A[root].m_key; A[root++].m_key = next; } else A[next].m_key = A[leaf++].m_key;
175
- if (leaf>=n || (root<next && A[root].m_key<A[leaf].m_key)) { A[next].m_key += A[root].m_key; A[root++].m_key = next; } else A[next].m_key += A[leaf++].m_key;
176
- }
177
- A[n-2].m_key = 0;
178
- for (next=n-3; next>=0; next--) A[next].m_key = A[A[next].m_key].m_key+1;
179
- avbl = 1; used = dpth = 0; root = n-2; next = n-1;
180
- while (avbl>0)
181
- {
182
- while (root>=0 && (int)A[root].m_key==dpth) { used++; root--; }
183
- while (avbl>used) { A[next--].m_key = dpth; avbl--; }
184
- avbl = 2*used; dpth++; used = 0;
185
- }
186
- }
187
-
188
- // Limits canonical Huffman code table's max code size to max_code_size.
189
- static void huffman_enforce_max_code_size(int *pNum_codes, int code_list_len, int max_code_size)
190
- {
191
- if (code_list_len <= 1) return;
192
-
193
- for (int i = max_code_size + 1; i <= MAX_HUFF_CODESIZE; i++) pNum_codes[max_code_size] += pNum_codes[i];
194
-
195
- uint32 total = 0;
196
- for (int i = max_code_size; i > 0; i--)
197
- total += (((uint32)pNum_codes[i]) << (max_code_size - i));
198
-
199
- while (total != (1UL << max_code_size))
200
- {
201
- pNum_codes[max_code_size]--;
202
- for (int i = max_code_size - 1; i > 0; i--)
203
- {
204
- if (pNum_codes[i]) { pNum_codes[i]--; pNum_codes[i + 1] += 2; break; }
205
- }
206
- total--;
207
- }
208
- }
209
-
210
- // Generates an optimized offman table.
211
- void jpeg_encoder::optimize_huffman_table(int table_num, int table_len)
212
- {
213
- sym_freq syms0[MAX_HUFF_SYMBOLS], syms1[MAX_HUFF_SYMBOLS];
214
- syms0[0].m_key = 1; syms0[0].m_sym_index = 0; // dummy symbol, assures that no valid code contains all 1's
215
- int num_used_syms = 1;
216
- const uint32 *pSym_count = &m_huff_count[table_num][0];
217
- for (int i = 0; i < table_len; i++)
218
- if (pSym_count[i]) { syms0[num_used_syms].m_key = pSym_count[i]; syms0[num_used_syms++].m_sym_index = i + 1; }
219
- sym_freq* pSyms = radix_sort_syms(num_used_syms, syms0, syms1);
220
- calculate_minimum_redundancy(pSyms, num_used_syms);
221
-
222
- // Count the # of symbols of each code size.
223
- int num_codes[1 + MAX_HUFF_CODESIZE]; clear_obj(num_codes);
224
- for (int i = 0; i < num_used_syms; i++)
225
- num_codes[pSyms[i].m_key]++;
226
-
227
- const uint JPGE_CODE_SIZE_LIMIT = 16; // the maximum possible size of a JPEG Huffman code (valid range is [9,16] - 9 vs. 8 because of the dummy symbol)
228
- huffman_enforce_max_code_size(num_codes, num_used_syms, JPGE_CODE_SIZE_LIMIT);
229
-
230
- // Compute m_huff_bits array, which contains the # of symbols per code size.
231
- clear_obj(m_huff_bits[table_num]);
232
- for (int i = 1; i <= (int)JPGE_CODE_SIZE_LIMIT; i++)
233
- m_huff_bits[table_num][i] = static_cast<uint8>(num_codes[i]);
234
-
235
- // Remove the dummy symbol added above, which must be in largest bucket.
236
- for (int i = JPGE_CODE_SIZE_LIMIT; i >= 1; i--)
237
- {
238
- if (m_huff_bits[table_num][i]) { m_huff_bits[table_num][i]--; break; }
239
- }
240
-
241
- // Compute the m_huff_val array, which contains the symbol indices sorted by code size (smallest to largest).
242
- for (int i = num_used_syms - 1; i >= 1; i--)
243
- m_huff_val[table_num][num_used_syms - 1 - i] = static_cast<uint8>(pSyms[i].m_sym_index - 1);
244
- }
245
-
246
- // JPEG marker generation.
247
- void jpeg_encoder::emit_byte(uint8 i)
248
- {
249
- m_all_stream_writes_succeeded = m_all_stream_writes_succeeded && m_pStream->put_obj(i);
250
- }
251
-
252
- void jpeg_encoder::emit_word(uint i)
253
- {
254
- emit_byte(uint8(i >> 8)); emit_byte(uint8(i & 0xFF));
255
- }
256
-
257
- void jpeg_encoder::emit_marker(int marker)
258
- {
259
- emit_byte(uint8(0xFF)); emit_byte(uint8(marker));
260
- }
261
-
262
- // Emit JFIF marker
263
- void jpeg_encoder::emit_jfif_app0()
264
- {
265
- emit_marker(M_APP0);
266
- emit_word(2 + 4 + 1 + 2 + 1 + 2 + 2 + 1 + 1);
267
- emit_byte(0x4A); emit_byte(0x46); emit_byte(0x49); emit_byte(0x46); /* Identifier: ASCII "JFIF" */
268
- emit_byte(0);
269
- emit_byte(1); /* Major version */
270
- emit_byte(1); /* Minor version */
271
- emit_byte(0); /* Density unit */
272
- emit_word(1);
273
- emit_word(1);
274
- emit_byte(0); /* No thumbnail image */
275
- emit_byte(0);
276
- }
277
-
278
- // Emit quantization tables
279
- void jpeg_encoder::emit_dqt()
280
- {
281
- for (int i = 0; i < ((m_num_components == 3) ? 2 : 1); i++)
282
- {
283
- emit_marker(M_DQT);
284
- emit_word(64 + 1 + 2);
285
- emit_byte(static_cast<uint8>(i));
286
- for (int j = 0; j < 64; j++)
287
- emit_byte(static_cast<uint8>(m_quantization_tables[i][j]));
288
- }
289
- }
290
-
291
- // Emit start of frame marker
292
- void jpeg_encoder::emit_sof()
293
- {
294
- emit_marker(M_SOF0); /* baseline */
295
- emit_word(3 * m_num_components + 2 + 5 + 1);
296
- emit_byte(8); /* precision */
297
- emit_word(m_image_y);
298
- emit_word(m_image_x);
299
- emit_byte(m_num_components);
300
- for (int i = 0; i < m_num_components; i++)
301
- {
302
- emit_byte(static_cast<uint8>(i + 1)); /* component ID */
303
- emit_byte((m_comp_h_samp[i] << 4) + m_comp_v_samp[i]); /* h and v sampling */
304
- emit_byte(i > 0); /* quant. table num */
305
- }
306
- }
307
-
308
- // Emit Huffman table.
309
- void jpeg_encoder::emit_dht(uint8 *bits, uint8 *val, int index, bool ac_flag)
310
- {
311
- emit_marker(M_DHT);
312
-
313
- int length = 0;
314
- for (int i = 1; i <= 16; i++)
315
- length += bits[i];
316
-
317
- emit_word(length + 2 + 1 + 16);
318
- emit_byte(static_cast<uint8>(index + (ac_flag << 4)));
319
-
320
- for (int i = 1; i <= 16; i++)
321
- emit_byte(bits[i]);
322
-
323
- for (int i = 0; i < length; i++)
324
- emit_byte(val[i]);
325
- }
326
-
327
- // Emit all Huffman tables.
328
- void jpeg_encoder::emit_dhts()
329
- {
330
- emit_dht(m_huff_bits[0+0], m_huff_val[0+0], 0, false);
331
- emit_dht(m_huff_bits[2+0], m_huff_val[2+0], 0, true);
332
- if (m_num_components == 3)
333
- {
334
- emit_dht(m_huff_bits[0+1], m_huff_val[0+1], 1, false);
335
- emit_dht(m_huff_bits[2+1], m_huff_val[2+1], 1, true);
336
- }
337
- }
338
-
339
- // emit start of scan
340
- void jpeg_encoder::emit_sos()
341
- {
342
- emit_marker(M_SOS);
343
- emit_word(2 * m_num_components + 2 + 1 + 3);
344
- emit_byte(m_num_components);
345
- for (int i = 0; i < m_num_components; i++)
346
- {
347
- emit_byte(static_cast<uint8>(i + 1));
348
- if (i == 0)
349
- emit_byte((0 << 4) + 0);
350
- else
351
- emit_byte((1 << 4) + 1);
352
- }
353
- emit_byte(0); /* spectral selection */
354
- emit_byte(63);
355
- emit_byte(0);
356
- }
357
-
358
- // Emit all markers at beginning of image file.
359
- void jpeg_encoder::emit_markers()
360
- {
361
- emit_marker(M_SOI);
362
- emit_jfif_app0();
363
- emit_dqt();
364
- emit_sof();
365
- emit_dhts();
366
- emit_sos();
367
- }
368
-
369
- // Compute the actual canonical Huffman codes/code sizes given the JPEG huff bits and val arrays.
370
- void jpeg_encoder::compute_huffman_table(uint *codes, uint8 *code_sizes, uint8 *bits, uint8 *val)
371
- {
372
- int i, l, last_p, si;
373
- uint8 huff_size[257];
374
- uint huff_code[257];
375
- uint code;
376
-
377
- int p = 0;
378
- for (l = 1; l <= 16; l++)
379
- for (i = 1; i <= bits[l]; i++)
380
- huff_size[p++] = (char)l;
381
-
382
- huff_size[p] = 0; last_p = p; // write sentinel
383
-
384
- code = 0; si = huff_size[0]; p = 0;
385
-
386
- while (huff_size[p])
387
- {
388
- while (huff_size[p] == si)
389
- huff_code[p++] = code++;
390
- code <<= 1;
391
- si++;
392
- }
393
-
394
- memset(codes, 0, sizeof(codes[0])*256);
395
- memset(code_sizes, 0, sizeof(code_sizes[0])*256);
396
- for (p = 0; p < last_p; p++)
397
- {
398
- codes[val[p]] = huff_code[p];
399
- code_sizes[val[p]] = huff_size[p];
400
- }
401
- }
402
-
403
- // Quantization table generation.
404
- void jpeg_encoder::compute_quant_table(int32 *pDst, int16 *pSrc)
405
- {
406
- int32 q;
407
- if (m_params.m_quality < 50)
408
- q = 5000 / m_params.m_quality;
409
- else
410
- q = 200 - m_params.m_quality * 2;
411
- for (int i = 0; i < 64; i++)
412
- {
413
- int32 j = *pSrc++; j = (j * q + 50L) / 100L;
414
- *pDst++ = JPGE_MIN(JPGE_MAX(j, 1), 255);
415
- }
416
- }
417
-
418
- // Higher-level methods.
419
- void jpeg_encoder::first_pass_init()
420
- {
421
- m_bit_buffer = 0; m_bits_in = 0;
422
- memset(m_last_dc_val, 0, 3 * sizeof(m_last_dc_val[0]));
423
- m_mcu_y_ofs = 0;
424
- m_pass_num = 1;
425
- }
426
-
427
- bool jpeg_encoder::second_pass_init()
428
- {
429
- compute_huffman_table(&m_huff_codes[0+0][0], &m_huff_code_sizes[0+0][0], m_huff_bits[0+0], m_huff_val[0+0]);
430
- compute_huffman_table(&m_huff_codes[2+0][0], &m_huff_code_sizes[2+0][0], m_huff_bits[2+0], m_huff_val[2+0]);
431
- if (m_num_components > 1)
432
- {
433
- compute_huffman_table(&m_huff_codes[0+1][0], &m_huff_code_sizes[0+1][0], m_huff_bits[0+1], m_huff_val[0+1]);
434
- compute_huffman_table(&m_huff_codes[2+1][0], &m_huff_code_sizes[2+1][0], m_huff_bits[2+1], m_huff_val[2+1]);
435
- }
436
- first_pass_init();
437
- emit_markers();
438
- m_pass_num = 2;
439
- return true;
440
- }
441
-
442
- bool jpeg_encoder::jpg_open(int p_x_res, int p_y_res, int src_channels)
443
- {
444
- m_num_components = 3;
445
- switch (m_params.m_subsampling)
446
- {
447
- case Y_ONLY:
448
- {
449
- m_num_components = 1;
450
- m_comp_h_samp[0] = 1; m_comp_v_samp[0] = 1;
451
- m_mcu_x = 8; m_mcu_y = 8;
452
- break;
453
- }
454
- case H1V1:
455
- {
456
- m_comp_h_samp[0] = 1; m_comp_v_samp[0] = 1;
457
- m_comp_h_samp[1] = 1; m_comp_v_samp[1] = 1;
458
- m_comp_h_samp[2] = 1; m_comp_v_samp[2] = 1;
459
- m_mcu_x = 8; m_mcu_y = 8;
460
- break;
461
- }
462
- case H2V1:
463
- {
464
- m_comp_h_samp[0] = 2; m_comp_v_samp[0] = 1;
465
- m_comp_h_samp[1] = 1; m_comp_v_samp[1] = 1;
466
- m_comp_h_samp[2] = 1; m_comp_v_samp[2] = 1;
467
- m_mcu_x = 16; m_mcu_y = 8;
468
- break;
469
- }
470
- case H2V2:
471
- {
472
- m_comp_h_samp[0] = 2; m_comp_v_samp[0] = 2;
473
- m_comp_h_samp[1] = 1; m_comp_v_samp[1] = 1;
474
- m_comp_h_samp[2] = 1; m_comp_v_samp[2] = 1;
475
- m_mcu_x = 16; m_mcu_y = 16;
476
- }
477
- }
478
-
479
- m_image_x = p_x_res; m_image_y = p_y_res;
480
- m_image_bpp = src_channels;
481
- m_image_bpl = m_image_x * src_channels;
482
- m_image_x_mcu = (m_image_x + m_mcu_x - 1) & (~(m_mcu_x - 1));
483
- m_image_y_mcu = (m_image_y + m_mcu_y - 1) & (~(m_mcu_y - 1));
484
- m_image_bpl_xlt = m_image_x * m_num_components;
485
- m_image_bpl_mcu = m_image_x_mcu * m_num_components;
486
- m_mcus_per_row = m_image_x_mcu / m_mcu_x;
487
-
488
- if ((m_mcu_lines[0] = static_cast<uint8*>(jpge_malloc(m_image_bpl_mcu * m_mcu_y))) == NULL) return false;
489
- for (int i = 1; i < m_mcu_y; i++)
490
- m_mcu_lines[i] = m_mcu_lines[i-1] + m_image_bpl_mcu;
491
-
492
- compute_quant_table(m_quantization_tables[0], s_std_lum_quant);
493
- compute_quant_table(m_quantization_tables[1], m_params.m_no_chroma_discrim_flag ? s_std_lum_quant : s_std_croma_quant);
494
-
495
- m_out_buf_left = JPGE_OUT_BUF_SIZE;
496
- m_pOut_buf = m_out_buf;
497
-
498
- if (m_params.m_two_pass_flag)
499
- {
500
- clear_obj(m_huff_count);
501
- first_pass_init();
502
- }
503
- else
504
- {
505
- memcpy(m_huff_bits[0+0], s_dc_lum_bits, 17); memcpy(m_huff_val [0+0], s_dc_lum_val, DC_LUM_CODES);
506
- memcpy(m_huff_bits[2+0], s_ac_lum_bits, 17); memcpy(m_huff_val [2+0], s_ac_lum_val, AC_LUM_CODES);
507
- memcpy(m_huff_bits[0+1], s_dc_chroma_bits, 17); memcpy(m_huff_val [0+1], s_dc_chroma_val, DC_CHROMA_CODES);
508
- memcpy(m_huff_bits[2+1], s_ac_chroma_bits, 17); memcpy(m_huff_val [2+1], s_ac_chroma_val, AC_CHROMA_CODES);
509
- if (!second_pass_init()) return false; // in effect, skip over the first pass
510
- }
511
- return m_all_stream_writes_succeeded;
512
- }
513
-
514
- void jpeg_encoder::load_block_8_8_grey(int x)
515
- {
516
- uint8 *pSrc;
517
- sample_array_t *pDst = m_sample_array;
518
- x <<= 3;
519
- for (int i = 0; i < 8; i++, pDst += 8)
520
- {
521
- pSrc = m_mcu_lines[i] + x;
522
- pDst[0] = pSrc[0] - 128; pDst[1] = pSrc[1] - 128; pDst[2] = pSrc[2] - 128; pDst[3] = pSrc[3] - 128;
523
- pDst[4] = pSrc[4] - 128; pDst[5] = pSrc[5] - 128; pDst[6] = pSrc[6] - 128; pDst[7] = pSrc[7] - 128;
524
- }
525
- }
526
-
527
- void jpeg_encoder::load_block_8_8(int x, int y, int c)
528
- {
529
- uint8 *pSrc;
530
- sample_array_t *pDst = m_sample_array;
531
- x = (x * (8 * 3)) + c;
532
- y <<= 3;
533
- for (int i = 0; i < 8; i++, pDst += 8)
534
- {
535
- pSrc = m_mcu_lines[y + i] + x;
536
- pDst[0] = pSrc[0 * 3] - 128; pDst[1] = pSrc[1 * 3] - 128; pDst[2] = pSrc[2 * 3] - 128; pDst[3] = pSrc[3 * 3] - 128;
537
- pDst[4] = pSrc[4 * 3] - 128; pDst[5] = pSrc[5 * 3] - 128; pDst[6] = pSrc[6 * 3] - 128; pDst[7] = pSrc[7 * 3] - 128;
538
- }
539
- }
540
-
541
- void jpeg_encoder::load_block_16_8(int x, int c)
542
- {
543
- uint8 *pSrc1, *pSrc2;
544
- sample_array_t *pDst = m_sample_array;
545
- x = (x * (16 * 3)) + c;
546
- int a = 0, b = 2;
547
- for (int i = 0; i < 16; i += 2, pDst += 8)
548
- {
549
- pSrc1 = m_mcu_lines[i + 0] + x;
550
- pSrc2 = m_mcu_lines[i + 1] + x;
551
- pDst[0] = ((pSrc1[ 0 * 3] + pSrc1[ 1 * 3] + pSrc2[ 0 * 3] + pSrc2[ 1 * 3] + a) >> 2) - 128; pDst[1] = ((pSrc1[ 2 * 3] + pSrc1[ 3 * 3] + pSrc2[ 2 * 3] + pSrc2[ 3 * 3] + b) >> 2) - 128;
552
- pDst[2] = ((pSrc1[ 4 * 3] + pSrc1[ 5 * 3] + pSrc2[ 4 * 3] + pSrc2[ 5 * 3] + a) >> 2) - 128; pDst[3] = ((pSrc1[ 6 * 3] + pSrc1[ 7 * 3] + pSrc2[ 6 * 3] + pSrc2[ 7 * 3] + b) >> 2) - 128;
553
- pDst[4] = ((pSrc1[ 8 * 3] + pSrc1[ 9 * 3] + pSrc2[ 8 * 3] + pSrc2[ 9 * 3] + a) >> 2) - 128; pDst[5] = ((pSrc1[10 * 3] + pSrc1[11 * 3] + pSrc2[10 * 3] + pSrc2[11 * 3] + b) >> 2) - 128;
554
- pDst[6] = ((pSrc1[12 * 3] + pSrc1[13 * 3] + pSrc2[12 * 3] + pSrc2[13 * 3] + a) >> 2) - 128; pDst[7] = ((pSrc1[14 * 3] + pSrc1[15 * 3] + pSrc2[14 * 3] + pSrc2[15 * 3] + b) >> 2) - 128;
555
- int temp = a; a = b; b = temp;
556
- }
557
- }
558
-
559
- void jpeg_encoder::load_block_16_8_8(int x, int c)
560
- {
561
- uint8 *pSrc1;
562
- sample_array_t *pDst = m_sample_array;
563
- x = (x * (16 * 3)) + c;
564
- for (int i = 0; i < 8; i++, pDst += 8)
565
- {
566
- pSrc1 = m_mcu_lines[i + 0] + x;
567
- pDst[0] = ((pSrc1[ 0 * 3] + pSrc1[ 1 * 3]) >> 1) - 128; pDst[1] = ((pSrc1[ 2 * 3] + pSrc1[ 3 * 3]) >> 1) - 128;
568
- pDst[2] = ((pSrc1[ 4 * 3] + pSrc1[ 5 * 3]) >> 1) - 128; pDst[3] = ((pSrc1[ 6 * 3] + pSrc1[ 7 * 3]) >> 1) - 128;
569
- pDst[4] = ((pSrc1[ 8 * 3] + pSrc1[ 9 * 3]) >> 1) - 128; pDst[5] = ((pSrc1[10 * 3] + pSrc1[11 * 3]) >> 1) - 128;
570
- pDst[6] = ((pSrc1[12 * 3] + pSrc1[13 * 3]) >> 1) - 128; pDst[7] = ((pSrc1[14 * 3] + pSrc1[15 * 3]) >> 1) - 128;
571
- }
572
- }
573
-
574
- void jpeg_encoder::load_quantized_coefficients(int component_num)
575
- {
576
- int32 *q = m_quantization_tables[component_num > 0];
577
- int16 *pDst = m_coefficient_array;
578
- for (int i = 0; i < 64; i++)
579
- {
580
- sample_array_t j = m_sample_array[s_zag[i]];
581
- if (j < 0)
582
- {
583
- if ((j = -j + (*q >> 1)) < *q)
584
- *pDst++ = 0;
585
- else
586
- *pDst++ = static_cast<int16>(-(j / *q));
587
- }
588
- else
589
- {
590
- if ((j = j + (*q >> 1)) < *q)
591
- *pDst++ = 0;
592
- else
593
- *pDst++ = static_cast<int16>((j / *q));
594
- }
595
- q++;
596
- }
597
- }
598
-
599
- void jpeg_encoder::flush_output_buffer()
600
- {
601
- if (m_out_buf_left != JPGE_OUT_BUF_SIZE)
602
- m_all_stream_writes_succeeded = m_all_stream_writes_succeeded && m_pStream->put_buf(m_out_buf, JPGE_OUT_BUF_SIZE - m_out_buf_left);
603
- m_pOut_buf = m_out_buf;
604
- m_out_buf_left = JPGE_OUT_BUF_SIZE;
605
- }
606
-
607
- void jpeg_encoder::put_bits(uint bits, uint len)
608
- {
609
- m_bit_buffer |= ((uint32)bits << (24 - (m_bits_in += len)));
610
- while (m_bits_in >= 8)
611
- {
612
- uint8 c;
613
- #define JPGE_PUT_BYTE(c) { *m_pOut_buf++ = (c); if (--m_out_buf_left == 0) flush_output_buffer(); }
614
- JPGE_PUT_BYTE(c = (uint8)((m_bit_buffer >> 16) & 0xFF));
615
- if (c == 0xFF) JPGE_PUT_BYTE(0);
616
- m_bit_buffer <<= 8;
617
- m_bits_in -= 8;
618
- }
619
- }
620
-
621
- void jpeg_encoder::code_coefficients_pass_one(int component_num)
622
- {
623
- if (component_num >= 3) return; // just to shut up static analysis
624
- int i, run_len, nbits, temp1;
625
- int16 *src = m_coefficient_array;
626
- uint32 *dc_count = component_num ? m_huff_count[0 + 1] : m_huff_count[0 + 0], *ac_count = component_num ? m_huff_count[2 + 1] : m_huff_count[2 + 0];
627
-
628
- temp1 = src[0] - m_last_dc_val[component_num];
629
- m_last_dc_val[component_num] = src[0];
630
- if (temp1 < 0) temp1 = -temp1;
631
-
632
- nbits = 0;
633
- while (temp1)
634
- {
635
- nbits++; temp1 >>= 1;
636
- }
637
-
638
- dc_count[nbits]++;
639
- for (run_len = 0, i = 1; i < 64; i++)
640
- {
641
- if ((temp1 = m_coefficient_array[i]) == 0)
642
- run_len++;
643
- else
644
- {
645
- while (run_len >= 16)
646
- {
647
- ac_count[0xF0]++;
648
- run_len -= 16;
649
- }
650
- if (temp1 < 0) temp1 = -temp1;
651
- nbits = 1;
652
- while (temp1 >>= 1) nbits++;
653
- ac_count[(run_len << 4) + nbits]++;
654
- run_len = 0;
655
- }
656
- }
657
- if (run_len) ac_count[0]++;
658
- }
659
-
660
- void jpeg_encoder::code_coefficients_pass_two(int component_num)
661
- {
662
- int i, j, run_len, nbits, temp1, temp2;
663
- int16 *pSrc = m_coefficient_array;
664
- uint *codes[2];
665
- uint8 *code_sizes[2];
666
-
667
- if (component_num == 0)
668
- {
669
- codes[0] = m_huff_codes[0 + 0]; codes[1] = m_huff_codes[2 + 0];
670
- code_sizes[0] = m_huff_code_sizes[0 + 0]; code_sizes[1] = m_huff_code_sizes[2 + 0];
671
- }
672
- else
673
- {
674
- codes[0] = m_huff_codes[0 + 1]; codes[1] = m_huff_codes[2 + 1];
675
- code_sizes[0] = m_huff_code_sizes[0 + 1]; code_sizes[1] = m_huff_code_sizes[2 + 1];
676
- }
677
-
678
- temp1 = temp2 = pSrc[0] - m_last_dc_val[component_num];
679
- m_last_dc_val[component_num] = pSrc[0];
680
-
681
- if (temp1 < 0)
682
- {
683
- temp1 = -temp1; temp2--;
684
- }
685
-
686
- nbits = 0;
687
- while (temp1)
688
- {
689
- nbits++; temp1 >>= 1;
690
- }
691
-
692
- put_bits(codes[0][nbits], code_sizes[0][nbits]);
693
- if (nbits) put_bits(temp2 & ((1 << nbits) - 1), nbits);
694
-
695
- for (run_len = 0, i = 1; i < 64; i++)
696
- {
697
- if ((temp1 = m_coefficient_array[i]) == 0)
698
- run_len++;
699
- else
700
- {
701
- while (run_len >= 16)
702
- {
703
- put_bits(codes[1][0xF0], code_sizes[1][0xF0]);
704
- run_len -= 16;
705
- }
706
- if ((temp2 = temp1) < 0)
707
- {
708
- temp1 = -temp1;
709
- temp2--;
710
- }
711
- nbits = 1;
712
- while (temp1 >>= 1)
713
- nbits++;
714
- j = (run_len << 4) + nbits;
715
- put_bits(codes[1][j], code_sizes[1][j]);
716
- put_bits(temp2 & ((1 << nbits) - 1), nbits);
717
- run_len = 0;
718
- }
719
- }
720
- if (run_len)
721
- put_bits(codes[1][0], code_sizes[1][0]);
722
- }
723
-
724
- void jpeg_encoder::code_block(int component_num)
725
- {
726
- DCT2D(m_sample_array);
727
- load_quantized_coefficients(component_num);
728
- if (m_pass_num == 1)
729
- code_coefficients_pass_one(component_num);
730
- else
731
- code_coefficients_pass_two(component_num);
732
- }
733
-
734
- void jpeg_encoder::process_mcu_row()
735
- {
736
- if (m_num_components == 1)
737
- {
738
- for (int i = 0; i < m_mcus_per_row; i++)
739
- {
740
- load_block_8_8_grey(i); code_block(0);
741
- }
742
- }
743
- else if ((m_comp_h_samp[0] == 1) && (m_comp_v_samp[0] == 1))
744
- {
745
- for (int i = 0; i < m_mcus_per_row; i++)
746
- {
747
- load_block_8_8(i, 0, 0); code_block(0); load_block_8_8(i, 0, 1); code_block(1); load_block_8_8(i, 0, 2); code_block(2);
748
- }
749
- }
750
- else if ((m_comp_h_samp[0] == 2) && (m_comp_v_samp[0] == 1))
751
- {
752
- for (int i = 0; i < m_mcus_per_row; i++)
753
- {
754
- load_block_8_8(i * 2 + 0, 0, 0); code_block(0); load_block_8_8(i * 2 + 1, 0, 0); code_block(0);
755
- load_block_16_8_8(i, 1); code_block(1); load_block_16_8_8(i, 2); code_block(2);
756
- }
757
- }
758
- else if ((m_comp_h_samp[0] == 2) && (m_comp_v_samp[0] == 2))
759
- {
760
- for (int i = 0; i < m_mcus_per_row; i++)
761
- {
762
- load_block_8_8(i * 2 + 0, 0, 0); code_block(0); load_block_8_8(i * 2 + 1, 0, 0); code_block(0);
763
- load_block_8_8(i * 2 + 0, 1, 0); code_block(0); load_block_8_8(i * 2 + 1, 1, 0); code_block(0);
764
- load_block_16_8(i, 1); code_block(1); load_block_16_8(i, 2); code_block(2);
765
- }
766
- }
767
- }
768
-
769
- bool jpeg_encoder::terminate_pass_one()
770
- {
771
- optimize_huffman_table(0+0, DC_LUM_CODES); optimize_huffman_table(2+0, AC_LUM_CODES);
772
- if (m_num_components > 1)
773
- {
774
- optimize_huffman_table(0+1, DC_CHROMA_CODES); optimize_huffman_table(2+1, AC_CHROMA_CODES);
775
- }
776
- return second_pass_init();
777
- }
778
-
779
- bool jpeg_encoder::terminate_pass_two()
780
- {
781
- put_bits(0x7F, 7);
782
- flush_output_buffer();
783
- emit_marker(M_EOI);
784
- m_pass_num++; // purposely bump up m_pass_num, for debugging
785
- return true;
786
- }
787
-
788
- bool jpeg_encoder::process_end_of_image()
789
- {
790
- if (m_mcu_y_ofs)
791
- {
792
- if (m_mcu_y_ofs < 16) // check here just to shut up static analysis
793
- {
794
- for (int i = m_mcu_y_ofs; i < m_mcu_y; i++)
795
- memcpy(m_mcu_lines[i], m_mcu_lines[m_mcu_y_ofs - 1], m_image_bpl_mcu);
796
- }
797
-
798
- process_mcu_row();
799
- }
800
-
801
- if (m_pass_num == 1)
802
- return terminate_pass_one();
803
- else
804
- return terminate_pass_two();
805
- }
806
-
807
- void jpeg_encoder::load_mcu(const void *pSrc)
808
- {
809
- const uint8* Psrc = reinterpret_cast<const uint8*>(pSrc);
810
-
811
- uint8* pDst = m_mcu_lines[m_mcu_y_ofs]; // OK to write up to m_image_bpl_xlt bytes to pDst
812
-
813
- if (m_num_components == 1)
814
- {
815
- if (m_image_bpp == 4)
816
- RGBA_to_Y(pDst, Psrc, m_image_x);
817
- else if (m_image_bpp == 3)
818
- RGB_to_Y(pDst, Psrc, m_image_x);
819
- else
820
- memcpy(pDst, Psrc, m_image_x);
821
- }
822
- else
823
- {
824
- if (m_image_bpp == 4)
825
- RGBA_to_YCC(pDst, Psrc, m_image_x);
826
- else if (m_image_bpp == 3)
827
- RGB_to_YCC(pDst, Psrc, m_image_x);
828
- else
829
- Y_to_YCC(pDst, Psrc, m_image_x);
830
- }
831
-
832
- // Possibly duplicate pixels at end of scanline if not a multiple of 8 or 16
833
- if (m_num_components == 1)
834
- memset(m_mcu_lines[m_mcu_y_ofs] + m_image_bpl_xlt, pDst[m_image_bpl_xlt - 1], m_image_x_mcu - m_image_x);
835
- else
836
- {
837
- const uint8 y = pDst[m_image_bpl_xlt - 3 + 0], cb = pDst[m_image_bpl_xlt - 3 + 1], cr = pDst[m_image_bpl_xlt - 3 + 2];
838
- uint8 *q = m_mcu_lines[m_mcu_y_ofs] + m_image_bpl_xlt;
839
- for (int i = m_image_x; i < m_image_x_mcu; i++)
840
- {
841
- *q++ = y; *q++ = cb; *q++ = cr;
842
- }
843
- }
844
-
845
- if (++m_mcu_y_ofs == m_mcu_y)
846
- {
847
- process_mcu_row();
848
- m_mcu_y_ofs = 0;
849
- }
850
- }
851
-
852
- void jpeg_encoder::clear()
853
- {
854
- m_mcu_lines[0] = NULL;
855
- m_pass_num = 0;
856
- m_all_stream_writes_succeeded = true;
857
- }
858
-
859
- jpeg_encoder::jpeg_encoder()
860
- {
861
- clear();
862
- }
863
-
864
- jpeg_encoder::~jpeg_encoder()
865
- {
866
- deinit();
867
- }
868
-
869
- bool jpeg_encoder::init(output_stream *pStream, int64_t width, int64_t height, int64_t src_channels, const params &comp_params)
870
- {
871
- deinit();
872
- if (((!pStream) || (width < 1) || (height < 1)) || ((src_channels != 1) && (src_channels != 3) && (src_channels != 4)) || (!comp_params.check_valid())) return false;
873
- m_pStream = pStream;
874
- m_params = comp_params;
875
- return jpg_open(width, height, src_channels);
876
- }
877
-
878
- void jpeg_encoder::deinit()
879
- {
880
- jpge_free(m_mcu_lines[0]);
881
- clear();
882
- }
883
-
884
- bool jpeg_encoder::process_scanline(const void* pScanline)
885
- {
886
- if ((m_pass_num < 1) || (m_pass_num > 2)) return false;
887
- if (m_all_stream_writes_succeeded)
888
- {
889
- if (!pScanline)
890
- {
891
- if (!process_end_of_image()) return false;
892
- }
893
- else
894
- {
895
- load_mcu(pScanline);
896
- }
897
- }
898
- return m_all_stream_writes_succeeded;
899
- }
900
-
901
- // Higher level wrappers/examples (optional).
902
- #include <stdio.h>
903
-
904
- class cfile_stream : public output_stream
905
- {
906
- cfile_stream(const cfile_stream &);
907
- cfile_stream &operator= (const cfile_stream &);
908
-
909
- FILE* m_pFile;
910
- bool m_bStatus;
911
-
912
- public:
913
- cfile_stream() : m_pFile(NULL), m_bStatus(false) { }
914
-
915
- virtual ~cfile_stream()
916
- {
917
- close();
918
- }
919
-
920
- bool open(const char *pFilename)
921
- {
922
- close();
923
- #if defined(_MSC_VER)
924
- if (fopen_s(&m_pFile, pFilename, "wb") != 0)
925
- {
926
- return false;
927
- }
928
- #else
929
- m_pFile = fopen(pFilename, "wb");
930
- #endif
931
- m_bStatus = (m_pFile != NULL);
932
- return m_bStatus;
933
- }
934
-
935
- bool close()
936
- {
937
- if (m_pFile)
938
- {
939
- if (fclose(m_pFile) == EOF)
940
- {
941
- m_bStatus = false;
942
- }
943
- m_pFile = NULL;
944
- }
945
- return m_bStatus;
946
- }
947
-
948
- virtual bool put_buf(const void* pBuf, int64_t len)
949
- {
950
- m_bStatus = m_bStatus && (fwrite(pBuf, len, 1, m_pFile) == 1);
951
- return m_bStatus;
952
- }
953
-
954
- uint get_size() const
955
- {
956
- return m_pFile ? ftell(m_pFile) : 0;
957
- }
958
- };
959
-
960
- // Writes JPEG image to file.
961
- bool compress_image_to_jpeg_file(const char *pFilename, int64_t width, int64_t height, int64_t num_channels, const uint8 *pImage_data, const params &comp_params)
962
- {
963
- cfile_stream dst_stream;
964
- if (!dst_stream.open(pFilename))
965
- return false;
966
-
967
- jpge::jpeg_encoder dst_image;
968
- if (!dst_image.init(&dst_stream, width, height, num_channels, comp_params))
969
- return false;
970
-
971
- for (uint pass_index = 0; pass_index < dst_image.get_total_passes(); pass_index++)
972
- {
973
- for (int64_t i = 0; i < height; i++)
974
- {
975
- // i, width, and num_channels are all 64bit
976
- const uint8* pBuf = pImage_data + i * width * num_channels;
977
- if (!dst_image.process_scanline(pBuf))
978
- return false;
979
- }
980
- if (!dst_image.process_scanline(NULL))
981
- return false;
982
- }
983
-
984
- dst_image.deinit();
985
-
986
- return dst_stream.close();
987
- }
988
-
989
- class memory_stream : public output_stream
990
- {
991
- memory_stream(const memory_stream &);
992
- memory_stream &operator= (const memory_stream &);
993
-
994
- uint8 *m_pBuf;
995
- uint64_t m_buf_size, m_buf_ofs;
996
-
997
- public:
998
- memory_stream(void *pBuf, uint64_t buf_size) : m_pBuf(static_cast<uint8*>(pBuf)), m_buf_size(buf_size), m_buf_ofs(0) { }
999
-
1000
- virtual ~memory_stream() { }
1001
-
1002
- virtual bool put_buf(const void* pBuf, int64_t len)
1003
- {
1004
- uint64_t buf_remaining = m_buf_size - m_buf_ofs;
1005
- if ((uint64_t)len > buf_remaining)
1006
- return false;
1007
- memcpy(m_pBuf + m_buf_ofs, pBuf, len);
1008
- m_buf_ofs += len;
1009
- return true;
1010
- }
1011
-
1012
- uint64_t get_size() const
1013
- {
1014
- return m_buf_ofs;
1015
- }
1016
- };
1017
-
1018
- bool compress_image_to_jpeg_file_in_memory(void *pDstBuf, int64_t &buf_size, int64_t width, int64_t height, int64_t num_channels, const uint8 *pImage_data, const params &comp_params)
1019
- {
1020
- if ((!pDstBuf) || (!buf_size))
1021
- return false;
1022
-
1023
- memory_stream dst_stream(pDstBuf, buf_size);
1024
-
1025
- buf_size = 0;
1026
-
1027
- jpge::jpeg_encoder dst_image;
1028
- if (!dst_image.init(&dst_stream, width, height, num_channels, comp_params))
1029
- return false;
1030
-
1031
- for (uint pass_index = 0; pass_index < dst_image.get_total_passes(); pass_index++)
1032
- {
1033
- for (int64_t i = 0; i < height; i++)
1034
- {
1035
- const uint8* pScanline = pImage_data + i * width * num_channels;
1036
- if (!dst_image.process_scanline(pScanline))
1037
- return false;
1038
- }
1039
- if (!dst_image.process_scanline(NULL))
1040
- return false;
1041
- }
1042
-
1043
- dst_image.deinit();
1044
-
1045
- buf_size = dst_stream.get_size();
1046
- return true;
1047
- }
1048
-
1049
- } // namespace jpge
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_inpaint.py DELETED
@@ -1,589 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import warnings
16
- from functools import partial
17
- from typing import Dict, List, Optional, Union
18
-
19
- import jax
20
- import jax.numpy as jnp
21
- import numpy as np
22
- from flax.core.frozen_dict import FrozenDict
23
- from flax.jax_utils import unreplicate
24
- from flax.training.common_utils import shard
25
- from packaging import version
26
- from PIL import Image
27
- from transformers import CLIPImageProcessor, CLIPTokenizer, FlaxCLIPTextModel
28
-
29
- from ...models import FlaxAutoencoderKL, FlaxUNet2DConditionModel
30
- from ...schedulers import (
31
- FlaxDDIMScheduler,
32
- FlaxDPMSolverMultistepScheduler,
33
- FlaxLMSDiscreteScheduler,
34
- FlaxPNDMScheduler,
35
- )
36
- from ...utils import PIL_INTERPOLATION, deprecate, logging, replace_example_docstring
37
- from ..pipeline_flax_utils import FlaxDiffusionPipeline
38
- from . import FlaxStableDiffusionPipelineOutput
39
- from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
40
-
41
-
42
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
43
-
44
- # Set to True to use python for loop instead of jax.fori_loop for easier debugging
45
- DEBUG = False
46
-
47
- EXAMPLE_DOC_STRING = """
48
- Examples:
49
- ```py
50
- >>> import jax
51
- >>> import numpy as np
52
- >>> from flax.jax_utils import replicate
53
- >>> from flax.training.common_utils import shard
54
- >>> import PIL
55
- >>> import requests
56
- >>> from io import BytesIO
57
- >>> from diffusers import FlaxStableDiffusionInpaintPipeline
58
-
59
-
60
- >>> def download_image(url):
61
- ... response = requests.get(url)
62
- ... return PIL.Image.open(BytesIO(response.content)).convert("RGB")
63
-
64
-
65
- >>> img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
66
- >>> mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
67
-
68
- >>> init_image = download_image(img_url).resize((512, 512))
69
- >>> mask_image = download_image(mask_url).resize((512, 512))
70
-
71
- >>> pipeline, params = FlaxStableDiffusionInpaintPipeline.from_pretrained(
72
- ... "xvjiarui/stable-diffusion-2-inpainting"
73
- ... )
74
-
75
- >>> prompt = "Face of a yellow cat, high resolution, sitting on a park bench"
76
- >>> prng_seed = jax.random.PRNGKey(0)
77
- >>> num_inference_steps = 50
78
-
79
- >>> num_samples = jax.device_count()
80
- >>> prompt = num_samples * [prompt]
81
- >>> init_image = num_samples * [init_image]
82
- >>> mask_image = num_samples * [mask_image]
83
- >>> prompt_ids, processed_masked_images, processed_masks = pipeline.prepare_inputs(
84
- ... prompt, init_image, mask_image
85
- ... )
86
- # shard inputs and rng
87
-
88
- >>> params = replicate(params)
89
- >>> prng_seed = jax.random.split(prng_seed, jax.device_count())
90
- >>> prompt_ids = shard(prompt_ids)
91
- >>> processed_masked_images = shard(processed_masked_images)
92
- >>> processed_masks = shard(processed_masks)
93
-
94
- >>> images = pipeline(
95
- ... prompt_ids, processed_masks, processed_masked_images, params, prng_seed, num_inference_steps, jit=True
96
- ... ).images
97
- >>> images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))
98
- ```
99
- """
100
-
101
-
102
- class FlaxStableDiffusionInpaintPipeline(FlaxDiffusionPipeline):
103
- r"""
104
- Flax-based pipeline for text-guided image inpainting using Stable Diffusion.
105
-
106
- <Tip warning={true}>
107
-
108
- 🧪 This is an experimental feature!
109
-
110
- </Tip>
111
-
112
- This model inherits from [`FlaxDiffusionPipeline`]. Check the superclass documentation for the generic methods
113
- implemented for all pipelines (downloading, saving, running on a particular device, etc.).
114
-
115
- Args:
116
- vae ([`FlaxAutoencoderKL`]):
117
- Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
118
- text_encoder ([`~transformers.FlaxCLIPTextModel`]):
119
- Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
120
- tokenizer ([`~transformers.CLIPTokenizer`]):
121
- A `CLIPTokenizer` to tokenize text.
122
- unet ([`FlaxUNet2DConditionModel`]):
123
- A `FlaxUNet2DConditionModel` to denoise the encoded image latents.
124
- scheduler ([`SchedulerMixin`]):
125
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
126
- [`FlaxDDIMScheduler`], [`FlaxLMSDiscreteScheduler`], [`FlaxPNDMScheduler`], or
127
- [`FlaxDPMSolverMultistepScheduler`].
128
- safety_checker ([`FlaxStableDiffusionSafetyChecker`]):
129
- Classification module that estimates whether generated images could be considered offensive or harmful.
130
- Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
131
- about a model's potential harms.
132
- feature_extractor ([`~transformers.CLIPImageProcessor`]):
133
- A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
134
- """
135
-
136
- def __init__(
137
- self,
138
- vae: FlaxAutoencoderKL,
139
- text_encoder: FlaxCLIPTextModel,
140
- tokenizer: CLIPTokenizer,
141
- unet: FlaxUNet2DConditionModel,
142
- scheduler: Union[
143
- FlaxDDIMScheduler, FlaxPNDMScheduler, FlaxLMSDiscreteScheduler, FlaxDPMSolverMultistepScheduler
144
- ],
145
- safety_checker: FlaxStableDiffusionSafetyChecker,
146
- feature_extractor: CLIPImageProcessor,
147
- dtype: jnp.dtype = jnp.float32,
148
- ):
149
- super().__init__()
150
- self.dtype = dtype
151
-
152
- if safety_checker is None:
153
- logger.warning(
154
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
155
- " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
156
- " results in services or applications open to the public. Both the diffusers team and Hugging Face"
157
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
158
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
159
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
160
- )
161
-
162
- is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
163
- version.parse(unet.config._diffusers_version).base_version
164
- ) < version.parse("0.9.0.dev0")
165
- is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
166
- if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
167
- deprecation_message = (
168
- "The configuration file of the unet has set the default `sample_size` to smaller than"
169
- " 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the"
170
- " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
171
- " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
172
- " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
173
- " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
174
- " in the config might lead to incorrect results in future versions. If you have downloaded this"
175
- " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
176
- " the `unet/config.json` file"
177
- )
178
- deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
179
- new_config = dict(unet.config)
180
- new_config["sample_size"] = 64
181
- unet._internal_dict = FrozenDict(new_config)
182
-
183
- self.register_modules(
184
- vae=vae,
185
- text_encoder=text_encoder,
186
- tokenizer=tokenizer,
187
- unet=unet,
188
- scheduler=scheduler,
189
- safety_checker=safety_checker,
190
- feature_extractor=feature_extractor,
191
- )
192
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
193
-
194
- def prepare_inputs(
195
- self,
196
- prompt: Union[str, List[str]],
197
- image: Union[Image.Image, List[Image.Image]],
198
- mask: Union[Image.Image, List[Image.Image]],
199
- ):
200
- if not isinstance(prompt, (str, list)):
201
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
202
-
203
- if not isinstance(image, (Image.Image, list)):
204
- raise ValueError(f"image has to be of type `PIL.Image.Image` or list but is {type(image)}")
205
-
206
- if isinstance(image, Image.Image):
207
- image = [image]
208
-
209
- if not isinstance(mask, (Image.Image, list)):
210
- raise ValueError(f"image has to be of type `PIL.Image.Image` or list but is {type(image)}")
211
-
212
- if isinstance(mask, Image.Image):
213
- mask = [mask]
214
-
215
- processed_images = jnp.concatenate([preprocess_image(img, jnp.float32) for img in image])
216
- processed_masks = jnp.concatenate([preprocess_mask(m, jnp.float32) for m in mask])
217
- # processed_masks[processed_masks < 0.5] = 0
218
- processed_masks = processed_masks.at[processed_masks < 0.5].set(0)
219
- # processed_masks[processed_masks >= 0.5] = 1
220
- processed_masks = processed_masks.at[processed_masks >= 0.5].set(1)
221
-
222
- processed_masked_images = processed_images * (processed_masks < 0.5)
223
-
224
- text_input = self.tokenizer(
225
- prompt,
226
- padding="max_length",
227
- max_length=self.tokenizer.model_max_length,
228
- truncation=True,
229
- return_tensors="np",
230
- )
231
- return text_input.input_ids, processed_masked_images, processed_masks
232
-
233
- def _get_has_nsfw_concepts(self, features, params):
234
- has_nsfw_concepts = self.safety_checker(features, params)
235
- return has_nsfw_concepts
236
-
237
- def _run_safety_checker(self, images, safety_model_params, jit=False):
238
- # safety_model_params should already be replicated when jit is True
239
- pil_images = [Image.fromarray(image) for image in images]
240
- features = self.feature_extractor(pil_images, return_tensors="np").pixel_values
241
-
242
- if jit:
243
- features = shard(features)
244
- has_nsfw_concepts = _p_get_has_nsfw_concepts(self, features, safety_model_params)
245
- has_nsfw_concepts = unshard(has_nsfw_concepts)
246
- safety_model_params = unreplicate(safety_model_params)
247
- else:
248
- has_nsfw_concepts = self._get_has_nsfw_concepts(features, safety_model_params)
249
-
250
- images_was_copied = False
251
- for idx, has_nsfw_concept in enumerate(has_nsfw_concepts):
252
- if has_nsfw_concept:
253
- if not images_was_copied:
254
- images_was_copied = True
255
- images = images.copy()
256
-
257
- images[idx] = np.zeros(images[idx].shape, dtype=np.uint8) # black image
258
-
259
- if any(has_nsfw_concepts):
260
- warnings.warn(
261
- "Potential NSFW content was detected in one or more images. A black image will be returned"
262
- " instead. Try again with a different prompt and/or seed."
263
- )
264
-
265
- return images, has_nsfw_concepts
266
-
267
- def _generate(
268
- self,
269
- prompt_ids: jnp.array,
270
- mask: jnp.array,
271
- masked_image: jnp.array,
272
- params: Union[Dict, FrozenDict],
273
- prng_seed: jax.random.KeyArray,
274
- num_inference_steps: int,
275
- height: int,
276
- width: int,
277
- guidance_scale: float,
278
- latents: Optional[jnp.array] = None,
279
- neg_prompt_ids: Optional[jnp.array] = None,
280
- ):
281
- if height % 8 != 0 or width % 8 != 0:
282
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
283
-
284
- # get prompt text embeddings
285
- prompt_embeds = self.text_encoder(prompt_ids, params=params["text_encoder"])[0]
286
-
287
- # TODO: currently it is assumed `do_classifier_free_guidance = guidance_scale > 1.0`
288
- # implement this conditional `do_classifier_free_guidance = guidance_scale > 1.0`
289
- batch_size = prompt_ids.shape[0]
290
-
291
- max_length = prompt_ids.shape[-1]
292
-
293
- if neg_prompt_ids is None:
294
- uncond_input = self.tokenizer(
295
- [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="np"
296
- ).input_ids
297
- else:
298
- uncond_input = neg_prompt_ids
299
- negative_prompt_embeds = self.text_encoder(uncond_input, params=params["text_encoder"])[0]
300
- context = jnp.concatenate([negative_prompt_embeds, prompt_embeds])
301
-
302
- latents_shape = (
303
- batch_size,
304
- self.vae.config.latent_channels,
305
- height // self.vae_scale_factor,
306
- width // self.vae_scale_factor,
307
- )
308
- if latents is None:
309
- latents = jax.random.normal(prng_seed, shape=latents_shape, dtype=self.dtype)
310
- else:
311
- if latents.shape != latents_shape:
312
- raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
313
-
314
- prng_seed, mask_prng_seed = jax.random.split(prng_seed)
315
-
316
- masked_image_latent_dist = self.vae.apply(
317
- {"params": params["vae"]}, masked_image, method=self.vae.encode
318
- ).latent_dist
319
- masked_image_latents = masked_image_latent_dist.sample(key=mask_prng_seed).transpose((0, 3, 1, 2))
320
- masked_image_latents = self.vae.config.scaling_factor * masked_image_latents
321
- del mask_prng_seed
322
-
323
- mask = jax.image.resize(mask, (*mask.shape[:-2], *masked_image_latents.shape[-2:]), method="nearest")
324
-
325
- # 8. Check that sizes of mask, masked image and latents match
326
- num_channels_latents = self.vae.config.latent_channels
327
- num_channels_mask = mask.shape[1]
328
- num_channels_masked_image = masked_image_latents.shape[1]
329
- if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
330
- raise ValueError(
331
- f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
332
- f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
333
- f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}"
334
- f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of"
335
- " `pipeline.unet` or your `mask_image` or `image` input."
336
- )
337
-
338
- def loop_body(step, args):
339
- latents, mask, masked_image_latents, scheduler_state = args
340
- # For classifier free guidance, we need to do two forward passes.
341
- # Here we concatenate the unconditional and text embeddings into a single batch
342
- # to avoid doing two forward passes
343
- latents_input = jnp.concatenate([latents] * 2)
344
- mask_input = jnp.concatenate([mask] * 2)
345
- masked_image_latents_input = jnp.concatenate([masked_image_latents] * 2)
346
-
347
- t = jnp.array(scheduler_state.timesteps, dtype=jnp.int32)[step]
348
- timestep = jnp.broadcast_to(t, latents_input.shape[0])
349
-
350
- latents_input = self.scheduler.scale_model_input(scheduler_state, latents_input, t)
351
- # concat latents, mask, masked_image_latents in the channel dimension
352
- latents_input = jnp.concatenate([latents_input, mask_input, masked_image_latents_input], axis=1)
353
-
354
- # predict the noise residual
355
- noise_pred = self.unet.apply(
356
- {"params": params["unet"]},
357
- jnp.array(latents_input),
358
- jnp.array(timestep, dtype=jnp.int32),
359
- encoder_hidden_states=context,
360
- ).sample
361
- # perform guidance
362
- noise_pred_uncond, noise_prediction_text = jnp.split(noise_pred, 2, axis=0)
363
- noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond)
364
-
365
- # compute the previous noisy sample x_t -> x_t-1
366
- latents, scheduler_state = self.scheduler.step(scheduler_state, noise_pred, t, latents).to_tuple()
367
- return latents, mask, masked_image_latents, scheduler_state
368
-
369
- scheduler_state = self.scheduler.set_timesteps(
370
- params["scheduler"], num_inference_steps=num_inference_steps, shape=latents.shape
371
- )
372
-
373
- # scale the initial noise by the standard deviation required by the scheduler
374
- latents = latents * params["scheduler"].init_noise_sigma
375
-
376
- if DEBUG:
377
- # run with python for loop
378
- for i in range(num_inference_steps):
379
- latents, mask, masked_image_latents, scheduler_state = loop_body(
380
- i, (latents, mask, masked_image_latents, scheduler_state)
381
- )
382
- else:
383
- latents, _, _, _ = jax.lax.fori_loop(
384
- 0, num_inference_steps, loop_body, (latents, mask, masked_image_latents, scheduler_state)
385
- )
386
-
387
- # scale and decode the image latents with vae
388
- latents = 1 / self.vae.config.scaling_factor * latents
389
- image = self.vae.apply({"params": params["vae"]}, latents, method=self.vae.decode).sample
390
-
391
- image = (image / 2 + 0.5).clip(0, 1).transpose(0, 2, 3, 1)
392
- return image
393
-
394
- @replace_example_docstring(EXAMPLE_DOC_STRING)
395
- def __call__(
396
- self,
397
- prompt_ids: jnp.array,
398
- mask: jnp.array,
399
- masked_image: jnp.array,
400
- params: Union[Dict, FrozenDict],
401
- prng_seed: jax.random.KeyArray,
402
- num_inference_steps: int = 50,
403
- height: Optional[int] = None,
404
- width: Optional[int] = None,
405
- guidance_scale: Union[float, jnp.array] = 7.5,
406
- latents: jnp.array = None,
407
- neg_prompt_ids: jnp.array = None,
408
- return_dict: bool = True,
409
- jit: bool = False,
410
- ):
411
- r"""
412
- Function invoked when calling the pipeline for generation.
413
-
414
- Args:
415
- prompt (`str` or `List[str]`):
416
- The prompt or prompts to guide image generation.
417
- height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
418
- The height in pixels of the generated image.
419
- width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
420
- The width in pixels of the generated image.
421
- num_inference_steps (`int`, *optional*, defaults to 50):
422
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
423
- expense of slower inference. This parameter is modulated by `strength`.
424
- guidance_scale (`float`, *optional*, defaults to 7.5):
425
- A higher guidance scale value encourages the model to generate images closely linked to the text
426
- `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
427
- latents (`jnp.array`, *optional*):
428
- Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
429
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
430
- array is generated by sampling using the supplied random `generator`.
431
- jit (`bool`, defaults to `False`):
432
- Whether to run `pmap` versions of the generation and safety scoring functions.
433
-
434
- <Tip warning={true}>
435
-
436
- This argument exists because `__call__` is not yet end-to-end pmap-able. It will be removed in a
437
- future release.
438
-
439
- </Tip>
440
-
441
- return_dict (`bool`, *optional*, defaults to `True`):
442
- Whether or not to return a [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] instead of
443
- a plain tuple.
444
-
445
- Examples:
446
-
447
- Returns:
448
- [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] or `tuple`:
449
- If `return_dict` is `True`, [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] is
450
- returned, otherwise a `tuple` is returned where the first element is a list with the generated images
451
- and the second element is a list of `bool`s indicating whether the corresponding generated image
452
- contains "not-safe-for-work" (nsfw) content.
453
- """
454
- # 0. Default height and width to unet
455
- height = height or self.unet.config.sample_size * self.vae_scale_factor
456
- width = width or self.unet.config.sample_size * self.vae_scale_factor
457
-
458
- masked_image = jax.image.resize(masked_image, (*masked_image.shape[:-2], height, width), method="bicubic")
459
- mask = jax.image.resize(mask, (*mask.shape[:-2], height, width), method="nearest")
460
-
461
- if isinstance(guidance_scale, float):
462
- # Convert to a tensor so each device gets a copy. Follow the prompt_ids for
463
- # shape information, as they may be sharded (when `jit` is `True`), or not.
464
- guidance_scale = jnp.array([guidance_scale] * prompt_ids.shape[0])
465
- if len(prompt_ids.shape) > 2:
466
- # Assume sharded
467
- guidance_scale = guidance_scale[:, None]
468
-
469
- if jit:
470
- images = _p_generate(
471
- self,
472
- prompt_ids,
473
- mask,
474
- masked_image,
475
- params,
476
- prng_seed,
477
- num_inference_steps,
478
- height,
479
- width,
480
- guidance_scale,
481
- latents,
482
- neg_prompt_ids,
483
- )
484
- else:
485
- images = self._generate(
486
- prompt_ids,
487
- mask,
488
- masked_image,
489
- params,
490
- prng_seed,
491
- num_inference_steps,
492
- height,
493
- width,
494
- guidance_scale,
495
- latents,
496
- neg_prompt_ids,
497
- )
498
-
499
- if self.safety_checker is not None:
500
- safety_params = params["safety_checker"]
501
- images_uint8_casted = (images * 255).round().astype("uint8")
502
- num_devices, batch_size = images.shape[:2]
503
-
504
- images_uint8_casted = np.asarray(images_uint8_casted).reshape(num_devices * batch_size, height, width, 3)
505
- images_uint8_casted, has_nsfw_concept = self._run_safety_checker(images_uint8_casted, safety_params, jit)
506
- images = np.asarray(images)
507
-
508
- # block images
509
- if any(has_nsfw_concept):
510
- for i, is_nsfw in enumerate(has_nsfw_concept):
511
- if is_nsfw:
512
- images[i] = np.asarray(images_uint8_casted[i])
513
-
514
- images = images.reshape(num_devices, batch_size, height, width, 3)
515
- else:
516
- images = np.asarray(images)
517
- has_nsfw_concept = False
518
-
519
- if not return_dict:
520
- return (images, has_nsfw_concept)
521
-
522
- return FlaxStableDiffusionPipelineOutput(images=images, nsfw_content_detected=has_nsfw_concept)
523
-
524
-
525
- # Static argnums are pipe, num_inference_steps, height, width. A change would trigger recompilation.
526
- # Non-static args are (sharded) input tensors mapped over their first dimension (hence, `0`).
527
- @partial(
528
- jax.pmap,
529
- in_axes=(None, 0, 0, 0, 0, 0, None, None, None, 0, 0, 0),
530
- static_broadcasted_argnums=(0, 6, 7, 8),
531
- )
532
- def _p_generate(
533
- pipe,
534
- prompt_ids,
535
- mask,
536
- masked_image,
537
- params,
538
- prng_seed,
539
- num_inference_steps,
540
- height,
541
- width,
542
- guidance_scale,
543
- latents,
544
- neg_prompt_ids,
545
- ):
546
- return pipe._generate(
547
- prompt_ids,
548
- mask,
549
- masked_image,
550
- params,
551
- prng_seed,
552
- num_inference_steps,
553
- height,
554
- width,
555
- guidance_scale,
556
- latents,
557
- neg_prompt_ids,
558
- )
559
-
560
-
561
- @partial(jax.pmap, static_broadcasted_argnums=(0,))
562
- def _p_get_has_nsfw_concepts(pipe, features, params):
563
- return pipe._get_has_nsfw_concepts(features, params)
564
-
565
-
566
- def unshard(x: jnp.ndarray):
567
- # einops.rearrange(x, 'd b ... -> (d b) ...')
568
- num_devices, batch_size = x.shape[:2]
569
- rest = x.shape[2:]
570
- return x.reshape(num_devices * batch_size, *rest)
571
-
572
-
573
- def preprocess_image(image, dtype):
574
- w, h = image.size
575
- w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
576
- image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"])
577
- image = jnp.array(image).astype(dtype) / 255.0
578
- image = image[None].transpose(0, 3, 1, 2)
579
- return 2.0 * image - 1.0
580
-
581
-
582
- def preprocess_mask(mask, dtype):
583
- w, h = mask.size
584
- w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
585
- mask = mask.resize((w, h))
586
- mask = jnp.array(mask.convert("L")).astype(dtype) / 255.0
587
- mask = jnp.expand_dims(mask, axis=(0, 1))
588
-
589
- return mask
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/free_anchor/README.md DELETED
@@ -1,27 +0,0 @@
1
- # FreeAnchor: Learning to Match Anchors for Visual Object Detection
2
-
3
- ## Introduction
4
-
5
- [ALGORITHM]
6
-
7
- ```latex
8
- @inproceedings{zhang2019freeanchor,
9
- title = {{FreeAnchor}: Learning to Match Anchors for Visual Object Detection},
10
- author = {Zhang, Xiaosong and Wan, Fang and Liu, Chang and Ji, Rongrong and Ye, Qixiang},
11
- booktitle = {Neural Information Processing Systems},
12
- year = {2019}
13
- }
14
- ```
15
-
16
- ## Results and Models
17
-
18
- | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download |
19
- |:--------:|:-------:|:-------:|:--------:|:--------------:|:------:|:------:|:--------:|
20
- | R-50 | pytorch | 1x | 4.9 | 18.4 | 38.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco/retinanet_free_anchor_r50_fpn_1x_coco_20200130-0f67375f.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco/retinanet_free_anchor_r50_fpn_1x_coco_20200130_095625.log.json) |
21
- | R-101 | pytorch | 1x | 6.8 | 14.9 | 40.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/free_anchor/retinanet_free_anchor_r101_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/free_anchor/retinanet_free_anchor_r101_fpn_1x_coco/retinanet_free_anchor_r101_fpn_1x_coco_20200130-358324e6.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/free_anchor/retinanet_free_anchor_r101_fpn_1x_coco/retinanet_free_anchor_r101_fpn_1x_coco_20200130_100723.log.json) |
22
- | X-101-32x4d | pytorch | 1x | 8.1 | 11.1 | 41.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/free_anchor/retinanet_free_anchor_x101_32x4d_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/free_anchor/retinanet_free_anchor_x101_32x4d_fpn_1x_coco/retinanet_free_anchor_x101_32x4d_fpn_1x_coco_20200130-d4846968.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/free_anchor/retinanet_free_anchor_x101_32x4d_fpn_1x_coco/retinanet_free_anchor_x101_32x4d_fpn_1x_coco_20200130_095627.log.json) |
23
-
24
- **Notes:**
25
-
26
- - We use 8 GPUs with 2 images/GPU.
27
- - For more settings and models, please refer to the [official repo](https://github.com/zhangxiaosong18/FreeAnchor).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/resnest/fcn_s101-d8_512x512_160k_ade20k.py DELETED
@@ -1,9 +0,0 @@
1
- _base_ = '../fcn/fcn_r101-d8_512x512_160k_ade20k.py'
2
- model = dict(
3
- pretrained='open-mmlab://resnest101',
4
- backbone=dict(
5
- type='ResNeSt',
6
- stem_channels=128,
7
- radix=2,
8
- reduction_factor=4,
9
- avg_down_stride=True))
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/scatter_points.py DELETED
@@ -1,135 +0,0 @@
1
- # Copyright (c) OpenMMLab. All rights reserved.
2
- import torch
3
- from torch import nn
4
- from torch.autograd import Function
5
-
6
- from ..utils import ext_loader
7
-
8
- ext_module = ext_loader.load_ext(
9
- '_ext',
10
- ['dynamic_point_to_voxel_forward', 'dynamic_point_to_voxel_backward'])
11
-
12
-
13
- class _DynamicScatter(Function):
14
-
15
- @staticmethod
16
- def forward(ctx, feats, coors, reduce_type='max'):
17
- """convert kitti points(N, >=3) to voxels.
18
-
19
- Args:
20
- feats (torch.Tensor): [N, C]. Points features to be reduced
21
- into voxels.
22
- coors (torch.Tensor): [N, ndim]. Corresponding voxel coordinates
23
- (specifically multi-dim voxel index) of each points.
24
- reduce_type (str, optional): Reduce op. support 'max', 'sum' and
25
- 'mean'. Default: 'max'.
26
-
27
- Returns:
28
- voxel_feats (torch.Tensor): [M, C]. Reduced features, input
29
- features that shares the same voxel coordinates are reduced to
30
- one row.
31
- voxel_coors (torch.Tensor): [M, ndim]. Voxel coordinates.
32
- """
33
- results = ext_module.dynamic_point_to_voxel_forward(
34
- feats, coors, reduce_type)
35
- (voxel_feats, voxel_coors, point2voxel_map,
36
- voxel_points_count) = results
37
- ctx.reduce_type = reduce_type
38
- ctx.save_for_backward(feats, voxel_feats, point2voxel_map,
39
- voxel_points_count)
40
- ctx.mark_non_differentiable(voxel_coors)
41
- return voxel_feats, voxel_coors
42
-
43
- @staticmethod
44
- def backward(ctx, grad_voxel_feats, grad_voxel_coors=None):
45
- (feats, voxel_feats, point2voxel_map,
46
- voxel_points_count) = ctx.saved_tensors
47
- grad_feats = torch.zeros_like(feats)
48
- # TODO: whether to use index put or use cuda_backward
49
- # To use index put, need point to voxel index
50
- ext_module.dynamic_point_to_voxel_backward(
51
- grad_feats, grad_voxel_feats.contiguous(), feats, voxel_feats,
52
- point2voxel_map, voxel_points_count, ctx.reduce_type)
53
- return grad_feats, None, None
54
-
55
-
56
- dynamic_scatter = _DynamicScatter.apply
57
-
58
-
59
- class DynamicScatter(nn.Module):
60
- """Scatters points into voxels, used in the voxel encoder with dynamic
61
- voxelization.
62
-
63
- Note:
64
- The CPU and GPU implementation get the same output, but have numerical
65
- difference after summation and division (e.g., 5e-7).
66
-
67
- Args:
68
- voxel_size (list): list [x, y, z] size of three dimension.
69
- point_cloud_range (list): The coordinate range of points, [x_min,
70
- y_min, z_min, x_max, y_max, z_max].
71
- average_points (bool): whether to use avg pooling to scatter points
72
- into voxel.
73
- """
74
-
75
- def __init__(self, voxel_size, point_cloud_range, average_points: bool):
76
- super().__init__()
77
-
78
- self.voxel_size = voxel_size
79
- self.point_cloud_range = point_cloud_range
80
- self.average_points = average_points
81
-
82
- def forward_single(self, points, coors):
83
- """Scatters points into voxels.
84
-
85
- Args:
86
- points (torch.Tensor): Points to be reduced into voxels.
87
- coors (torch.Tensor): Corresponding voxel coordinates (specifically
88
- multi-dim voxel index) of each points.
89
-
90
- Returns:
91
- voxel_feats (torch.Tensor): Reduced features, input features that
92
- shares the same voxel coordinates are reduced to one row.
93
- voxel_coors (torch.Tensor): Voxel coordinates.
94
- """
95
- reduce = 'mean' if self.average_points else 'max'
96
- return dynamic_scatter(points.contiguous(), coors.contiguous(), reduce)
97
-
98
- def forward(self, points, coors):
99
- """Scatters points/features into voxels.
100
-
101
- Args:
102
- points (torch.Tensor): Points to be reduced into voxels.
103
- coors (torch.Tensor): Corresponding voxel coordinates (specifically
104
- multi-dim voxel index) of each points.
105
-
106
- Returns:
107
- voxel_feats (torch.Tensor): Reduced features, input features that
108
- shares the same voxel coordinates are reduced to one row.
109
- voxel_coors (torch.Tensor): Voxel coordinates.
110
- """
111
- if coors.size(-1) == 3:
112
- return self.forward_single(points, coors)
113
- else:
114
- batch_size = coors[-1, 0] + 1
115
- voxels, voxel_coors = [], []
116
- for i in range(batch_size):
117
- inds = torch.where(coors[:, 0] == i)
118
- voxel, voxel_coor = self.forward_single(
119
- points[inds], coors[inds][:, 1:])
120
- coor_pad = nn.functional.pad(
121
- voxel_coor, (1, 0), mode='constant', value=i)
122
- voxel_coors.append(coor_pad)
123
- voxels.append(voxel)
124
- features = torch.cat(voxels, dim=0)
125
- feature_coors = torch.cat(voxel_coors, dim=0)
126
-
127
- return features, feature_coors
128
-
129
- def __repr__(self):
130
- s = self.__class__.__name__ + '('
131
- s += 'voxel_size=' + str(self.voxel_size)
132
- s += ', point_cloud_range=' + str(self.point_cloud_range)
133
- s += ', average_points=' + str(self.average_points)
134
- s += ')'
135
- return s
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/sbcsgroupprober.py DELETED
@@ -1,88 +0,0 @@
1
- ######################## BEGIN LICENSE BLOCK ########################
2
- # The Original Code is Mozilla Universal charset detector code.
3
- #
4
- # The Initial Developer of the Original Code is
5
- # Netscape Communications Corporation.
6
- # Portions created by the Initial Developer are Copyright (C) 2001
7
- # the Initial Developer. All Rights Reserved.
8
- #
9
- # Contributor(s):
10
- # Mark Pilgrim - port to Python
11
- # Shy Shalom - original C code
12
- #
13
- # This library is free software; you can redistribute it and/or
14
- # modify it under the terms of the GNU Lesser General Public
15
- # License as published by the Free Software Foundation; either
16
- # version 2.1 of the License, or (at your option) any later version.
17
- #
18
- # This library is distributed in the hope that it will be useful,
19
- # but WITHOUT ANY WARRANTY; without even the implied warranty of
20
- # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21
- # Lesser General Public License for more details.
22
- #
23
- # You should have received a copy of the GNU Lesser General Public
24
- # License along with this library; if not, write to the Free Software
25
- # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
26
- # 02110-1301 USA
27
- ######################### END LICENSE BLOCK #########################
28
-
29
- from .charsetgroupprober import CharSetGroupProber
30
- from .hebrewprober import HebrewProber
31
- from .langbulgarianmodel import ISO_8859_5_BULGARIAN_MODEL, WINDOWS_1251_BULGARIAN_MODEL
32
- from .langgreekmodel import ISO_8859_7_GREEK_MODEL, WINDOWS_1253_GREEK_MODEL
33
- from .langhebrewmodel import WINDOWS_1255_HEBREW_MODEL
34
-
35
- # from .langhungarianmodel import (ISO_8859_2_HUNGARIAN_MODEL,
36
- # WINDOWS_1250_HUNGARIAN_MODEL)
37
- from .langrussianmodel import (
38
- IBM855_RUSSIAN_MODEL,
39
- IBM866_RUSSIAN_MODEL,
40
- ISO_8859_5_RUSSIAN_MODEL,
41
- KOI8_R_RUSSIAN_MODEL,
42
- MACCYRILLIC_RUSSIAN_MODEL,
43
- WINDOWS_1251_RUSSIAN_MODEL,
44
- )
45
- from .langthaimodel import TIS_620_THAI_MODEL
46
- from .langturkishmodel import ISO_8859_9_TURKISH_MODEL
47
- from .sbcharsetprober import SingleByteCharSetProber
48
-
49
-
50
- class SBCSGroupProber(CharSetGroupProber):
51
- def __init__(self) -> None:
52
- super().__init__()
53
- hebrew_prober = HebrewProber()
54
- logical_hebrew_prober = SingleByteCharSetProber(
55
- WINDOWS_1255_HEBREW_MODEL, is_reversed=False, name_prober=hebrew_prober
56
- )
57
- # TODO: See if using ISO-8859-8 Hebrew model works better here, since
58
- # it's actually the visual one
59
- visual_hebrew_prober = SingleByteCharSetProber(
60
- WINDOWS_1255_HEBREW_MODEL, is_reversed=True, name_prober=hebrew_prober
61
- )
62
- hebrew_prober.set_model_probers(logical_hebrew_prober, visual_hebrew_prober)
63
- # TODO: ORDER MATTERS HERE. I changed the order vs what was in master
64
- # and several tests failed that did not before. Some thought
65
- # should be put into the ordering, and we should consider making
66
- # order not matter here, because that is very counter-intuitive.
67
- self.probers = [
68
- SingleByteCharSetProber(WINDOWS_1251_RUSSIAN_MODEL),
69
- SingleByteCharSetProber(KOI8_R_RUSSIAN_MODEL),
70
- SingleByteCharSetProber(ISO_8859_5_RUSSIAN_MODEL),
71
- SingleByteCharSetProber(MACCYRILLIC_RUSSIAN_MODEL),
72
- SingleByteCharSetProber(IBM866_RUSSIAN_MODEL),
73
- SingleByteCharSetProber(IBM855_RUSSIAN_MODEL),
74
- SingleByteCharSetProber(ISO_8859_7_GREEK_MODEL),
75
- SingleByteCharSetProber(WINDOWS_1253_GREEK_MODEL),
76
- SingleByteCharSetProber(ISO_8859_5_BULGARIAN_MODEL),
77
- SingleByteCharSetProber(WINDOWS_1251_BULGARIAN_MODEL),
78
- # TODO: Restore Hungarian encodings (iso-8859-2 and windows-1250)
79
- # after we retrain model.
80
- # SingleByteCharSetProber(ISO_8859_2_HUNGARIAN_MODEL),
81
- # SingleByteCharSetProber(WINDOWS_1250_HUNGARIAN_MODEL),
82
- SingleByteCharSetProber(TIS_620_THAI_MODEL),
83
- SingleByteCharSetProber(ISO_8859_9_TURKISH_MODEL),
84
- hebrew_prober,
85
- logical_hebrew_prober,
86
- visual_hebrew_prober,
87
- ]
88
- self.reset()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/bbcode.py DELETED
@@ -1,108 +0,0 @@
1
- """
2
- pygments.formatters.bbcode
3
- ~~~~~~~~~~~~~~~~~~~~~~~~~~
4
-
5
- BBcode formatter.
6
-
7
- :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
8
- :license: BSD, see LICENSE for details.
9
- """
10
-
11
-
12
- from pip._vendor.pygments.formatter import Formatter
13
- from pip._vendor.pygments.util import get_bool_opt
14
-
15
- __all__ = ['BBCodeFormatter']
16
-
17
-
18
- class BBCodeFormatter(Formatter):
19
- """
20
- Format tokens with BBcodes. These formatting codes are used by many
21
- bulletin boards, so you can highlight your sourcecode with pygments before
22
- posting it there.
23
-
24
- This formatter has no support for background colors and borders, as there
25
- are no common BBcode tags for that.
26
-
27
- Some board systems (e.g. phpBB) don't support colors in their [code] tag,
28
- so you can't use the highlighting together with that tag.
29
- Text in a [code] tag usually is shown with a monospace font (which this
30
- formatter can do with the ``monofont`` option) and no spaces (which you
31
- need for indentation) are removed.
32
-
33
- Additional options accepted:
34
-
35
- `style`
36
- The style to use, can be a string or a Style subclass (default:
37
- ``'default'``).
38
-
39
- `codetag`
40
- If set to true, put the output into ``[code]`` tags (default:
41
- ``false``)
42
-
43
- `monofont`
44
- If set to true, add a tag to show the code with a monospace font
45
- (default: ``false``).
46
- """
47
- name = 'BBCode'
48
- aliases = ['bbcode', 'bb']
49
- filenames = []
50
-
51
- def __init__(self, **options):
52
- Formatter.__init__(self, **options)
53
- self._code = get_bool_opt(options, 'codetag', False)
54
- self._mono = get_bool_opt(options, 'monofont', False)
55
-
56
- self.styles = {}
57
- self._make_styles()
58
-
59
- def _make_styles(self):
60
- for ttype, ndef in self.style:
61
- start = end = ''
62
- if ndef['color']:
63
- start += '[color=#%s]' % ndef['color']
64
- end = '[/color]' + end
65
- if ndef['bold']:
66
- start += '[b]'
67
- end = '[/b]' + end
68
- if ndef['italic']:
69
- start += '[i]'
70
- end = '[/i]' + end
71
- if ndef['underline']:
72
- start += '[u]'
73
- end = '[/u]' + end
74
- # there are no common BBcodes for background-color and border
75
-
76
- self.styles[ttype] = start, end
77
-
78
- def format_unencoded(self, tokensource, outfile):
79
- if self._code:
80
- outfile.write('[code]')
81
- if self._mono:
82
- outfile.write('[font=monospace]')
83
-
84
- lastval = ''
85
- lasttype = None
86
-
87
- for ttype, value in tokensource:
88
- while ttype not in self.styles:
89
- ttype = ttype.parent
90
- if ttype == lasttype:
91
- lastval += value
92
- else:
93
- if lastval:
94
- start, end = self.styles[lasttype]
95
- outfile.write(''.join((start, lastval, end)))
96
- lastval = value
97
- lasttype = ttype
98
-
99
- if lastval:
100
- start, end = self.styles[lasttype]
101
- outfile.write(''.join((start, lastval, end)))
102
-
103
- if self._mono:
104
- outfile.write('[/font]')
105
- if self._code:
106
- outfile.write('[/code]')
107
- if self._code or self._mono:
108
- outfile.write('\n')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/panel.py DELETED
@@ -1,308 +0,0 @@
1
- from typing import TYPE_CHECKING, Optional
2
-
3
- from .align import AlignMethod
4
- from .box import ROUNDED, Box
5
- from .cells import cell_len
6
- from .jupyter import JupyterMixin
7
- from .measure import Measurement, measure_renderables
8
- from .padding import Padding, PaddingDimensions
9
- from .segment import Segment
10
- from .style import Style, StyleType
11
- from .text import Text, TextType
12
-
13
- if TYPE_CHECKING:
14
- from .console import Console, ConsoleOptions, RenderableType, RenderResult
15
-
16
-
17
- class Panel(JupyterMixin):
18
- """A console renderable that draws a border around its contents.
19
-
20
- Example:
21
- >>> console.print(Panel("Hello, World!"))
22
-
23
- Args:
24
- renderable (RenderableType): A console renderable object.
25
- box (Box, optional): A Box instance that defines the look of the border (see :ref:`appendix_box`.
26
- Defaults to box.ROUNDED.
27
- safe_box (bool, optional): Disable box characters that don't display on windows legacy terminal with *raster* fonts. Defaults to True.
28
- expand (bool, optional): If True the panel will stretch to fill the console
29
- width, otherwise it will be sized to fit the contents. Defaults to True.
30
- style (str, optional): The style of the panel (border and contents). Defaults to "none".
31
- border_style (str, optional): The style of the border. Defaults to "none".
32
- width (Optional[int], optional): Optional width of panel. Defaults to None to auto-detect.
33
- height (Optional[int], optional): Optional height of panel. Defaults to None to auto-detect.
34
- padding (Optional[PaddingDimensions]): Optional padding around renderable. Defaults to 0.
35
- highlight (bool, optional): Enable automatic highlighting of panel title (if str). Defaults to False.
36
- """
37
-
38
- def __init__(
39
- self,
40
- renderable: "RenderableType",
41
- box: Box = ROUNDED,
42
- *,
43
- title: Optional[TextType] = None,
44
- title_align: AlignMethod = "center",
45
- subtitle: Optional[TextType] = None,
46
- subtitle_align: AlignMethod = "center",
47
- safe_box: Optional[bool] = None,
48
- expand: bool = True,
49
- style: StyleType = "none",
50
- border_style: StyleType = "none",
51
- width: Optional[int] = None,
52
- height: Optional[int] = None,
53
- padding: PaddingDimensions = (0, 1),
54
- highlight: bool = False,
55
- ) -> None:
56
- self.renderable = renderable
57
- self.box = box
58
- self.title = title
59
- self.title_align: AlignMethod = title_align
60
- self.subtitle = subtitle
61
- self.subtitle_align = subtitle_align
62
- self.safe_box = safe_box
63
- self.expand = expand
64
- self.style = style
65
- self.border_style = border_style
66
- self.width = width
67
- self.height = height
68
- self.padding = padding
69
- self.highlight = highlight
70
-
71
- @classmethod
72
- def fit(
73
- cls,
74
- renderable: "RenderableType",
75
- box: Box = ROUNDED,
76
- *,
77
- title: Optional[TextType] = None,
78
- title_align: AlignMethod = "center",
79
- subtitle: Optional[TextType] = None,
80
- subtitle_align: AlignMethod = "center",
81
- safe_box: Optional[bool] = None,
82
- style: StyleType = "none",
83
- border_style: StyleType = "none",
84
- width: Optional[int] = None,
85
- padding: PaddingDimensions = (0, 1),
86
- ) -> "Panel":
87
- """An alternative constructor that sets expand=False."""
88
- return cls(
89
- renderable,
90
- box,
91
- title=title,
92
- title_align=title_align,
93
- subtitle=subtitle,
94
- subtitle_align=subtitle_align,
95
- safe_box=safe_box,
96
- style=style,
97
- border_style=border_style,
98
- width=width,
99
- padding=padding,
100
- expand=False,
101
- )
102
-
103
- @property
104
- def _title(self) -> Optional[Text]:
105
- if self.title:
106
- title_text = (
107
- Text.from_markup(self.title)
108
- if isinstance(self.title, str)
109
- else self.title.copy()
110
- )
111
- title_text.end = ""
112
- title_text.plain = title_text.plain.replace("\n", " ")
113
- title_text.no_wrap = True
114
- title_text.expand_tabs()
115
- title_text.pad(1)
116
- return title_text
117
- return None
118
-
119
- @property
120
- def _subtitle(self) -> Optional[Text]:
121
- if self.subtitle:
122
- subtitle_text = (
123
- Text.from_markup(self.subtitle)
124
- if isinstance(self.subtitle, str)
125
- else self.subtitle.copy()
126
- )
127
- subtitle_text.end = ""
128
- subtitle_text.plain = subtitle_text.plain.replace("\n", " ")
129
- subtitle_text.no_wrap = True
130
- subtitle_text.expand_tabs()
131
- subtitle_text.pad(1)
132
- return subtitle_text
133
- return None
134
-
135
- def __rich_console__(
136
- self, console: "Console", options: "ConsoleOptions"
137
- ) -> "RenderResult":
138
- _padding = Padding.unpack(self.padding)
139
- renderable = (
140
- Padding(self.renderable, _padding) if any(_padding) else self.renderable
141
- )
142
- style = console.get_style(self.style)
143
- border_style = style + console.get_style(self.border_style)
144
- width = (
145
- options.max_width
146
- if self.width is None
147
- else min(options.max_width, self.width)
148
- )
149
-
150
- safe_box: bool = console.safe_box if self.safe_box is None else self.safe_box
151
- box = self.box.substitute(options, safe=safe_box)
152
-
153
- def align_text(
154
- text: Text, width: int, align: str, character: str, style: Style
155
- ) -> Text:
156
- """Gets new aligned text.
157
-
158
- Args:
159
- text (Text): Title or subtitle text.
160
- width (int): Desired width.
161
- align (str): Alignment.
162
- character (str): Character for alignment.
163
- style (Style): Border style
164
-
165
- Returns:
166
- Text: New text instance
167
- """
168
- text = text.copy()
169
- text.truncate(width)
170
- excess_space = width - cell_len(text.plain)
171
- if excess_space:
172
- if align == "left":
173
- return Text.assemble(
174
- text,
175
- (character * excess_space, style),
176
- no_wrap=True,
177
- end="",
178
- )
179
- elif align == "center":
180
- left = excess_space // 2
181
- return Text.assemble(
182
- (character * left, style),
183
- text,
184
- (character * (excess_space - left), style),
185
- no_wrap=True,
186
- end="",
187
- )
188
- else:
189
- return Text.assemble(
190
- (character * excess_space, style),
191
- text,
192
- no_wrap=True,
193
- end="",
194
- )
195
- return text
196
-
197
- title_text = self._title
198
- if title_text is not None:
199
- title_text.stylize_before(border_style)
200
-
201
- child_width = (
202
- width - 2
203
- if self.expand
204
- else console.measure(
205
- renderable, options=options.update_width(width - 2)
206
- ).maximum
207
- )
208
- child_height = self.height or options.height or None
209
- if child_height:
210
- child_height -= 2
211
- if title_text is not None:
212
- child_width = min(
213
- options.max_width - 2, max(child_width, title_text.cell_len + 2)
214
- )
215
-
216
- width = child_width + 2
217
- child_options = options.update(
218
- width=child_width, height=child_height, highlight=self.highlight
219
- )
220
- lines = console.render_lines(renderable, child_options, style=style)
221
-
222
- line_start = Segment(box.mid_left, border_style)
223
- line_end = Segment(f"{box.mid_right}", border_style)
224
- new_line = Segment.line()
225
- if title_text is None or width <= 4:
226
- yield Segment(box.get_top([width - 2]), border_style)
227
- else:
228
- title_text = align_text(
229
- title_text,
230
- width - 4,
231
- self.title_align,
232
- box.top,
233
- border_style,
234
- )
235
- yield Segment(box.top_left + box.top, border_style)
236
- yield from console.render(title_text, child_options.update_width(width - 4))
237
- yield Segment(box.top + box.top_right, border_style)
238
-
239
- yield new_line
240
- for line in lines:
241
- yield line_start
242
- yield from line
243
- yield line_end
244
- yield new_line
245
-
246
- subtitle_text = self._subtitle
247
- if subtitle_text is not None:
248
- subtitle_text.stylize_before(border_style)
249
-
250
- if subtitle_text is None or width <= 4:
251
- yield Segment(box.get_bottom([width - 2]), border_style)
252
- else:
253
- subtitle_text = align_text(
254
- subtitle_text,
255
- width - 4,
256
- self.subtitle_align,
257
- box.bottom,
258
- border_style,
259
- )
260
- yield Segment(box.bottom_left + box.bottom, border_style)
261
- yield from console.render(
262
- subtitle_text, child_options.update_width(width - 4)
263
- )
264
- yield Segment(box.bottom + box.bottom_right, border_style)
265
-
266
- yield new_line
267
-
268
- def __rich_measure__(
269
- self, console: "Console", options: "ConsoleOptions"
270
- ) -> "Measurement":
271
- _title = self._title
272
- _, right, _, left = Padding.unpack(self.padding)
273
- padding = left + right
274
- renderables = [self.renderable, _title] if _title else [self.renderable]
275
-
276
- if self.width is None:
277
- width = (
278
- measure_renderables(
279
- console,
280
- options.update_width(options.max_width - padding - 2),
281
- renderables,
282
- ).maximum
283
- + padding
284
- + 2
285
- )
286
- else:
287
- width = self.width
288
- return Measurement(width, width)
289
-
290
-
291
- if __name__ == "__main__": # pragma: no cover
292
- from .console import Console
293
-
294
- c = Console()
295
-
296
- from .box import DOUBLE, ROUNDED
297
- from .padding import Padding
298
-
299
- p = Panel(
300
- "Hello, World!",
301
- title="rich.Panel",
302
- style="white on blue",
303
- box=DOUBLE,
304
- padding=1,
305
- )
306
-
307
- c.print()
308
- c.print(p)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/jaraco/__init__.py DELETED
File without changes
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/more_itertools/more.py DELETED
The diff for this file is too large to render. See raw diff
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/utils/colormap.py DELETED
@@ -1,140 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
-
3
- """
4
- An awesome colormap for really neat visualizations.
5
- Copied from Detectron, and removed gray colors.
6
- """
7
-
8
- import numpy as np
9
-
10
- __all__ = ["colormap", "random_color"]
11
-
12
- # fmt: off
13
- # RGB:
14
- _COLORS = np.array(
15
- [
16
- 0.000, 0.447, 0.741,
17
- 0.850, 0.325, 0.098,
18
- 0.929, 0.694, 0.125,
19
- 0.494, 0.184, 0.556,
20
- 0.466, 0.674, 0.188,
21
- 0.301, 0.745, 0.933,
22
- 0.635, 0.078, 0.184,
23
- 0.300, 0.300, 0.300,
24
- 0.600, 0.600, 0.600,
25
- 1.000, 0.000, 0.000,
26
- 1.000, 0.500, 0.000,
27
- 0.749, 0.749, 0.000,
28
- 0.000, 1.000, 0.000,
29
- 0.000, 0.000, 1.000,
30
- 0.667, 0.000, 1.000,
31
- 0.333, 0.333, 0.000,
32
- 0.333, 0.667, 0.000,
33
- 0.333, 1.000, 0.000,
34
- 0.667, 0.333, 0.000,
35
- 0.667, 0.667, 0.000,
36
- 0.667, 1.000, 0.000,
37
- 1.000, 0.333, 0.000,
38
- 1.000, 0.667, 0.000,
39
- 1.000, 1.000, 0.000,
40
- 0.000, 0.333, 0.500,
41
- 0.000, 0.667, 0.500,
42
- 0.000, 1.000, 0.500,
43
- 0.333, 0.000, 0.500,
44
- 0.333, 0.333, 0.500,
45
- 0.333, 0.667, 0.500,
46
- 0.333, 1.000, 0.500,
47
- 0.667, 0.000, 0.500,
48
- 0.667, 0.333, 0.500,
49
- 0.667, 0.667, 0.500,
50
- 0.667, 1.000, 0.500,
51
- 1.000, 0.000, 0.500,
52
- 1.000, 0.333, 0.500,
53
- 1.000, 0.667, 0.500,
54
- 1.000, 1.000, 0.500,
55
- 0.000, 0.333, 1.000,
56
- 0.000, 0.667, 1.000,
57
- 0.000, 1.000, 1.000,
58
- 0.333, 0.000, 1.000,
59
- 0.333, 0.333, 1.000,
60
- 0.333, 0.667, 1.000,
61
- 0.333, 1.000, 1.000,
62
- 0.667, 0.000, 1.000,
63
- 0.667, 0.333, 1.000,
64
- 0.667, 0.667, 1.000,
65
- 0.667, 1.000, 1.000,
66
- 1.000, 0.000, 1.000,
67
- 1.000, 0.333, 1.000,
68
- 1.000, 0.667, 1.000,
69
- 0.333, 0.000, 0.000,
70
- 0.500, 0.000, 0.000,
71
- 0.667, 0.000, 0.000,
72
- 0.833, 0.000, 0.000,
73
- 1.000, 0.000, 0.000,
74
- 0.000, 0.167, 0.000,
75
- 0.000, 0.333, 0.000,
76
- 0.000, 0.500, 0.000,
77
- 0.000, 0.667, 0.000,
78
- 0.000, 0.833, 0.000,
79
- 0.000, 1.000, 0.000,
80
- 0.000, 0.000, 0.167,
81
- 0.000, 0.000, 0.333,
82
- 0.000, 0.000, 0.500,
83
- 0.000, 0.000, 0.667,
84
- 0.000, 0.000, 0.833,
85
- 0.000, 0.000, 1.000,
86
- 0.000, 0.000, 0.000,
87
- 0.143, 0.143, 0.143,
88
- 0.857, 0.857, 0.857,
89
- 1.000, 1.000, 1.000
90
- ]
91
- ).astype(np.float32).reshape(-1, 3)
92
- # fmt: on
93
-
94
-
95
- def colormap(rgb=False, maximum=255):
96
- """
97
- Args:
98
- rgb (bool): whether to return RGB colors or BGR colors.
99
- maximum (int): either 255 or 1
100
-
101
- Returns:
102
- ndarray: a float32 array of Nx3 colors, in range [0, 255] or [0, 1]
103
- """
104
- assert maximum in [255, 1], maximum
105
- c = _COLORS * maximum
106
- if not rgb:
107
- c = c[:, ::-1]
108
- return c
109
-
110
-
111
- def random_color(rgb=False, maximum=255):
112
- """
113
- Args:
114
- rgb (bool): whether to return RGB colors or BGR colors.
115
- maximum (int): either 255 or 1
116
-
117
- Returns:
118
- ndarray: a vector of 3 numbers
119
- """
120
- idx = np.random.randint(0, len(_COLORS))
121
- ret = _COLORS[idx] * maximum
122
- if not rgb:
123
- ret = ret[::-1]
124
- return ret
125
-
126
-
127
- if __name__ == "__main__":
128
- import cv2
129
-
130
- size = 100
131
- H, W = 10, 10
132
- canvas = np.random.rand(H * size, W * size, 3).astype("float32")
133
- for h in range(H):
134
- for w in range(W):
135
- idx = h * W + w
136
- if idx >= len(_COLORS):
137
- break
138
- canvas[h * size : (h + 1) * size, w * size : (w + 1) * size] = _COLORS[idx]
139
- cv2.imshow("a", canvas)
140
- cv2.waitKey(0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/layers/__init__.py DELETED
File without changes
spaces/Banbri/zcvzcv/src/components/ui/textarea.tsx DELETED
@@ -1,24 +0,0 @@
1
- import * as React from "react"
2
-
3
- import { cn } from "@/lib/utils"
4
-
5
- export interface TextareaProps
6
- extends React.TextareaHTMLAttributes<HTMLTextAreaElement> {}
7
-
8
- const Textarea = React.forwardRef<HTMLTextAreaElement, TextareaProps>(
9
- ({ className, ...props }, ref) => {
10
- return (
11
- <textarea
12
- className={cn(
13
- "flex min-h-[80px] w-full rounded-md border border-stone-200 border-stone-200 bg-transparent px-3 py-2 text-sm ring-offset-white placeholder:text-stone-500 focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-stone-400 focus-visible:ring-offset-2 disabled:cursor-not-allowed disabled:opacity-50 dark:border-stone-800 dark:border-stone-800 dark:ring-offset-stone-950 dark:placeholder:text-stone-400 dark:focus-visible:ring-stone-800",
14
- className
15
- )}
16
- ref={ref}
17
- {...props}
18
- />
19
- )
20
- }
21
- )
22
- Textarea.displayName = "Textarea"
23
-
24
- export { Textarea }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bart92/RVC_HF/lib/uvr5_pack/lib_v5/layers.py DELETED
@@ -1,118 +0,0 @@
1
- import torch
2
- from torch import nn
3
- import torch.nn.functional as F
4
-
5
- from . import spec_utils
6
-
7
-
8
- class Conv2DBNActiv(nn.Module):
9
- def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
10
- super(Conv2DBNActiv, self).__init__()
11
- self.conv = nn.Sequential(
12
- nn.Conv2d(
13
- nin,
14
- nout,
15
- kernel_size=ksize,
16
- stride=stride,
17
- padding=pad,
18
- dilation=dilation,
19
- bias=False,
20
- ),
21
- nn.BatchNorm2d(nout),
22
- activ(),
23
- )
24
-
25
- def __call__(self, x):
26
- return self.conv(x)
27
-
28
-
29
- class SeperableConv2DBNActiv(nn.Module):
30
- def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
31
- super(SeperableConv2DBNActiv, self).__init__()
32
- self.conv = nn.Sequential(
33
- nn.Conv2d(
34
- nin,
35
- nin,
36
- kernel_size=ksize,
37
- stride=stride,
38
- padding=pad,
39
- dilation=dilation,
40
- groups=nin,
41
- bias=False,
42
- ),
43
- nn.Conv2d(nin, nout, kernel_size=1, bias=False),
44
- nn.BatchNorm2d(nout),
45
- activ(),
46
- )
47
-
48
- def __call__(self, x):
49
- return self.conv(x)
50
-
51
-
52
- class Encoder(nn.Module):
53
- def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU):
54
- super(Encoder, self).__init__()
55
- self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
56
- self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ)
57
-
58
- def __call__(self, x):
59
- skip = self.conv1(x)
60
- h = self.conv2(skip)
61
-
62
- return h, skip
63
-
64
-
65
- class Decoder(nn.Module):
66
- def __init__(
67
- self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False
68
- ):
69
- super(Decoder, self).__init__()
70
- self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
71
- self.dropout = nn.Dropout2d(0.1) if dropout else None
72
-
73
- def __call__(self, x, skip=None):
74
- x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True)
75
- if skip is not None:
76
- skip = spec_utils.crop_center(skip, x)
77
- x = torch.cat([x, skip], dim=1)
78
- h = self.conv(x)
79
-
80
- if self.dropout is not None:
81
- h = self.dropout(h)
82
-
83
- return h
84
-
85
-
86
- class ASPPModule(nn.Module):
87
- def __init__(self, nin, nout, dilations=(4, 8, 16), activ=nn.ReLU):
88
- super(ASPPModule, self).__init__()
89
- self.conv1 = nn.Sequential(
90
- nn.AdaptiveAvgPool2d((1, None)),
91
- Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ),
92
- )
93
- self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ)
94
- self.conv3 = SeperableConv2DBNActiv(
95
- nin, nin, 3, 1, dilations[0], dilations[0], activ=activ
96
- )
97
- self.conv4 = SeperableConv2DBNActiv(
98
- nin, nin, 3, 1, dilations[1], dilations[1], activ=activ
99
- )
100
- self.conv5 = SeperableConv2DBNActiv(
101
- nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
102
- )
103
- self.bottleneck = nn.Sequential(
104
- Conv2DBNActiv(nin * 5, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1)
105
- )
106
-
107
- def forward(self, x):
108
- _, _, h, w = x.size()
109
- feat1 = F.interpolate(
110
- self.conv1(x), size=(h, w), mode="bilinear", align_corners=True
111
- )
112
- feat2 = self.conv2(x)
113
- feat3 = self.conv3(x)
114
- feat4 = self.conv4(x)
115
- feat5 = self.conv5(x)
116
- out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1)
117
- bottle = self.bottleneck(out)
118
- return bottle
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Cesta Batalla Sin Anuncios Mod Apk.md DELETED
@@ -1,35 +0,0 @@
1
-
2
- <h1>Batalla cesta sin anuncios Mod APK: Un juego de baloncesto divertido y adictivo</h1>
3
- <p>¿Te gustan los juegos de baloncesto? ¿Quieres experimentar la emoción de jugar uno a uno contra tus oponentes? ¿Quieres disfrutar de un juego suave y rápido sin anuncios ni limitaciones? Si respondió sí a cualquiera de estas preguntas, entonces usted debe tratar de Basket Battle No Ads Mod APK, una versión modificada del popular juego de baloncesto por DoubleTap Software. En este artículo, te contaremos todo lo que necesitas saber sobre este juego, sus características y cómo descargarlo e instalarlo en tu dispositivo Android. </p>
4
- <h2>¿Qué es la batalla de la cesta? </h2>
5
- <p>Basket Battle es un divertido y adictivo juego de baloncesto que te permite jugar uno a uno contra diferentes oponentes en varias ubicaciones. Puedes elegir entre diferentes personajes, cada uno con sus propias habilidades y habilidades, y personalizarlos con diferentes trajes y accesorios. También puede desbloquear y actualizar diferentes pistas, desde la calle hasta la playa, y disfrutar de los gráficos realistas y animaciones. El juego tiene controles simples e intuitivos que te permiten moverte, saltar, disparar, encestar, bloquear y robar con facilidad. También puedes realizar increíbles combos y trucos para sumar más puntos e impresionar a tu oponente. </p>
6
- <h2>cesta batalla sin anuncios mod apk</h2><br /><p><b><b>DOWNLOAD</b> &#10038;&#10038;&#10038; <a href="https://bltlly.com/2v6Jqo">https://bltlly.com/2v6Jqo</a></b></p><br /><br />
7
- <h3>Características de la batalla de la cesta</h3>
8
- <h4>Controles simples e intuitivos</h4>
9
- <p>El juego tiene controles fáciles de aprender que lo hacen adecuado para cualquier persona que ama los juegos de baloncesto. Puedes usar el joystick virtual para mover a tu personaje y tocar los botones para disparar, clavar, bloquear o robar. También puede deslizar la pantalla para realizar movimientos especiales y combos. El juego tiene un modo tutorial que te enseña lo básico del juego y te ayuda a mejorar tus habilidades. </p>
10
- <h4>Varios modos de juego y desafíos</h4>
11
-
12
- <h4>Personaliza tus jugadores y canchas</h4>
13
- <p>El juego te permite personalizar a tus jugadores y canchas con varios artículos que puedes comprar con el dinero que ganas jugando. Puedes elegir entre diferentes personajes, cada uno con sus propias fortalezas y debilidades, y cambiar su apariencia con diferentes trajes, peinados, zapatos, accesorios y más. También puede desbloquear y mejorar diferentes canchas, cada una con su propio tema y ambiente, como la calle, el gimnasio, la playa, el parque y más. </p>
14
- <h4>Juega online o offline con amigos</h4>
15
- <p>El juego es compatible con los modos en línea y fuera de línea, por lo que puede jugar en cualquier momento y en cualquier lugar. Puedes jugar en línea con otros jugadores de todo el mundo, o sin conexión con tus amigos en el mismo dispositivo. También puedes chatear con otros jugadores en el lobby del juego, enviarles emojis o retarlos a una revancha. </p>
16
- <h2>¿Por qué descargar Batalla de cesta sin anuncios Mod APK? </h2>
17
- <h3>Beneficios de la versión modificada</h3>
18
- <h4>Dinero ilimitado para comprar lo que quieras</h4>
19
- <p>La versión modificada de Basket Battle te da dinero ilimitado que puedes usar para comprar lo que quieras en el juego. Puedes comprar todos los personajes, trajes, accesorios, canchas y mejoras que quieras sin preocuparte por el costo. También puedes usar el dinero para saltarte los anuncios que aparecen en el juego. </p>
20
- <h4>No hay anuncios molestos para interrumpir su juego</h4>
21
- <p>La versión modificada de Basket Battle elimina todos los anuncios que normalmente aparecen en el juego. Puede disfrutar de un juego suave e ininterrumpido sin tener que ver ningún anuncio o esperar a que se carguen. También puede guardar sus datos y la batería jugando el juego sin anuncios. </p>
22
- <h4>Libre y seguro de instalar y usar</h4>
23
-
24
- <h3>¿Cómo descargar e instalar la cesta de batalla sin anuncios Mod APK? </h3>
25
- <h4>Paso 1: Descargar el archivo APK de una fuente de confianza</h4>
26
- <p>El primer paso es descargar el archivo APK de Basket Battle No Ads Mod APK de una fuente de confianza. Puedes encontrar muchos sitios web que ofrecen la versión modificada del juego, pero debes tener cuidado y elegir uno confiable. Puede utilizar el siguiente enlace para descargar el archivo APK de nuestro sitio web, que es 100% seguro y verificado. </p>
27
- <h4>Paso 2: Habilitar fuentes desconocidas en el dispositivo</h4>
28
- <p>El segundo paso es habilitar fuentes desconocidas en su dispositivo. Esto es necesario porque la versión modificada de Basket Battle no está disponible en Google Play Store, y necesitas permitir que tu dispositivo instale aplicaciones de otras fuentes. Para hacer esto, vaya a la configuración del dispositivo, luego a la seguridad, luego a fuentes desconocidas y enciéndala. </p>
29
- <p></p>
30
- <h4>Paso 3: Instalar el archivo APK y disfrutar del juego</h4>
31
- <p>El tercer y último paso es instalar el archivo APK y disfrutar del juego. Para hacer esto, busque el archivo APK que descargó en el paso 1, y toque en él. Siga las instrucciones en la pantalla para completar el proceso de instalación. Una vez hecho, se puede iniciar el juego desde el cajón de la aplicación o la pantalla de inicio, y empezar a jugar Basket Battle No Ads Mod APK.</p>
32
- <h2>Conclusión</h2>
33
- <p>Batalla de la cesta sin anuncios Mod APK es un juego de baloncesto divertido y adictivo que le permite jugar uno a uno contra diferentes oponentes en varios lugares. Puede personalizar sus jugadores y canchas con varios elementos, y disfrutar de un juego suave y rápido sin anuncios ni limitaciones. La versión modificada de Basket Battle te da dinero ilimitado para comprar lo que quieras en el juego, y elimina todos los anuncios que normalmente aparecen en el juego. Puede descargar e instalar Basket Battle No Ads Mod APK en su dispositivo Android de forma gratuita y segura siguiendo los pasos anteriores. Si te gustan los juegos de baloncesto, usted debe probar definitivamente Basket Battle No Ads Mod APK.</p> 64aa2da5cf<br />
34
- <br />
35
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BernardoOlisan/vqganclip/taming-transformers/scripts/sample_conditional.py DELETED
@@ -1,355 +0,0 @@
1
- import argparse, os, sys, glob, math, time
2
- import torch
3
- import numpy as np
4
- from omegaconf import OmegaConf
5
- import streamlit as st
6
- from streamlit import caching
7
- from PIL import Image
8
- from main import instantiate_from_config, DataModuleFromConfig
9
- from torch.utils.data import DataLoader
10
- from torch.utils.data.dataloader import default_collate
11
-
12
-
13
- rescale = lambda x: (x + 1.) / 2.
14
-
15
-
16
- def bchw_to_st(x):
17
- return rescale(x.detach().cpu().numpy().transpose(0,2,3,1))
18
-
19
- def save_img(xstart, fname):
20
- I = (xstart.clip(0,1)[0]*255).astype(np.uint8)
21
- Image.fromarray(I).save(fname)
22
-
23
-
24
-
25
- def get_interactive_image(resize=False):
26
- image = st.file_uploader("Input", type=["jpg", "JPEG", "png"])
27
- if image is not None:
28
- image = Image.open(image)
29
- if not image.mode == "RGB":
30
- image = image.convert("RGB")
31
- image = np.array(image).astype(np.uint8)
32
- print("upload image shape: {}".format(image.shape))
33
- img = Image.fromarray(image)
34
- if resize:
35
- img = img.resize((256, 256))
36
- image = np.array(img)
37
- return image
38
-
39
-
40
- def single_image_to_torch(x, permute=True):
41
- assert x is not None, "Please provide an image through the upload function"
42
- x = np.array(x)
43
- x = torch.FloatTensor(x/255.*2. - 1.)[None,...]
44
- if permute:
45
- x = x.permute(0, 3, 1, 2)
46
- return x
47
-
48
-
49
- def pad_to_M(x, M):
50
- hp = math.ceil(x.shape[2]/M)*M-x.shape[2]
51
- wp = math.ceil(x.shape[3]/M)*M-x.shape[3]
52
- x = torch.nn.functional.pad(x, (0,wp,0,hp,0,0,0,0))
53
- return x
54
-
55
- @torch.no_grad()
56
- def run_conditional(model, dsets):
57
- if len(dsets.datasets) > 1:
58
- split = st.sidebar.radio("Split", sorted(dsets.datasets.keys()))
59
- dset = dsets.datasets[split]
60
- else:
61
- dset = next(iter(dsets.datasets.values()))
62
- batch_size = 1
63
- start_index = st.sidebar.number_input("Example Index (Size: {})".format(len(dset)), value=0,
64
- min_value=0,
65
- max_value=len(dset)-batch_size)
66
- indices = list(range(start_index, start_index+batch_size))
67
-
68
- example = default_collate([dset[i] for i in indices])
69
-
70
- x = model.get_input("image", example).to(model.device)
71
-
72
- cond_key = model.cond_stage_key
73
- c = model.get_input(cond_key, example).to(model.device)
74
-
75
- scale_factor = st.sidebar.slider("Scale Factor", min_value=0.5, max_value=4.0, step=0.25, value=1.00)
76
- if scale_factor != 1.0:
77
- x = torch.nn.functional.interpolate(x, scale_factor=scale_factor, mode="bicubic")
78
- c = torch.nn.functional.interpolate(c, scale_factor=scale_factor, mode="bicubic")
79
-
80
- quant_z, z_indices = model.encode_to_z(x)
81
- quant_c, c_indices = model.encode_to_c(c)
82
-
83
- cshape = quant_z.shape
84
-
85
- xrec = model.first_stage_model.decode(quant_z)
86
- st.write("image: {}".format(x.shape))
87
- st.image(bchw_to_st(x), clamp=True, output_format="PNG")
88
- st.write("image reconstruction: {}".format(xrec.shape))
89
- st.image(bchw_to_st(xrec), clamp=True, output_format="PNG")
90
-
91
- if cond_key == "segmentation":
92
- # get image from segmentation mask
93
- num_classes = c.shape[1]
94
- c = torch.argmax(c, dim=1, keepdim=True)
95
- c = torch.nn.functional.one_hot(c, num_classes=num_classes)
96
- c = c.squeeze(1).permute(0, 3, 1, 2).float()
97
- c = model.cond_stage_model.to_rgb(c)
98
-
99
- st.write(f"{cond_key}: {tuple(c.shape)}")
100
- st.image(bchw_to_st(c), clamp=True, output_format="PNG")
101
-
102
- idx = z_indices
103
-
104
- half_sample = st.sidebar.checkbox("Image Completion", value=False)
105
- if half_sample:
106
- start = idx.shape[1]//2
107
- else:
108
- start = 0
109
-
110
- idx[:,start:] = 0
111
- idx = idx.reshape(cshape[0],cshape[2],cshape[3])
112
- start_i = start//cshape[3]
113
- start_j = start %cshape[3]
114
-
115
- if not half_sample and quant_z.shape == quant_c.shape:
116
- st.info("Setting idx to c_indices")
117
- idx = c_indices.clone().reshape(cshape[0],cshape[2],cshape[3])
118
-
119
- cidx = c_indices
120
- cidx = cidx.reshape(quant_c.shape[0],quant_c.shape[2],quant_c.shape[3])
121
-
122
- xstart = model.decode_to_img(idx[:,:cshape[2],:cshape[3]], cshape)
123
- st.image(bchw_to_st(xstart), clamp=True, output_format="PNG")
124
-
125
- temperature = st.number_input("Temperature", value=1.0)
126
- top_k = st.number_input("Top k", value=100)
127
- sample = st.checkbox("Sample", value=True)
128
- update_every = st.number_input("Update every", value=75)
129
-
130
- st.text(f"Sampling shape ({cshape[2]},{cshape[3]})")
131
-
132
- animate = st.checkbox("animate")
133
- if animate:
134
- import imageio
135
- outvid = "sampling.mp4"
136
- writer = imageio.get_writer(outvid, fps=25)
137
- elapsed_t = st.empty()
138
- info = st.empty()
139
- st.text("Sampled")
140
- if st.button("Sample"):
141
- output = st.empty()
142
- start_t = time.time()
143
- for i in range(start_i,cshape[2]-0):
144
- if i <= 8:
145
- local_i = i
146
- elif cshape[2]-i < 8:
147
- local_i = 16-(cshape[2]-i)
148
- else:
149
- local_i = 8
150
- for j in range(start_j,cshape[3]-0):
151
- if j <= 8:
152
- local_j = j
153
- elif cshape[3]-j < 8:
154
- local_j = 16-(cshape[3]-j)
155
- else:
156
- local_j = 8
157
-
158
- i_start = i-local_i
159
- i_end = i_start+16
160
- j_start = j-local_j
161
- j_end = j_start+16
162
- elapsed_t.text(f"Time: {time.time() - start_t} seconds")
163
- info.text(f"Step: ({i},{j}) | Local: ({local_i},{local_j}) | Crop: ({i_start}:{i_end},{j_start}:{j_end})")
164
- patch = idx[:,i_start:i_end,j_start:j_end]
165
- patch = patch.reshape(patch.shape[0],-1)
166
- cpatch = cidx[:, i_start:i_end, j_start:j_end]
167
- cpatch = cpatch.reshape(cpatch.shape[0], -1)
168
- patch = torch.cat((cpatch, patch), dim=1)
169
- logits,_ = model.transformer(patch[:,:-1])
170
- logits = logits[:, -256:, :]
171
- logits = logits.reshape(cshape[0],16,16,-1)
172
- logits = logits[:,local_i,local_j,:]
173
-
174
- logits = logits/temperature
175
-
176
- if top_k is not None:
177
- logits = model.top_k_logits(logits, top_k)
178
- # apply softmax to convert to probabilities
179
- probs = torch.nn.functional.softmax(logits, dim=-1)
180
- # sample from the distribution or take the most likely
181
- if sample:
182
- ix = torch.multinomial(probs, num_samples=1)
183
- else:
184
- _, ix = torch.topk(probs, k=1, dim=-1)
185
- idx[:,i,j] = ix
186
-
187
- if (i*cshape[3]+j)%update_every==0:
188
- xstart = model.decode_to_img(idx[:, :cshape[2], :cshape[3]], cshape,)
189
-
190
- xstart = bchw_to_st(xstart)
191
- output.image(xstart, clamp=True, output_format="PNG")
192
-
193
- if animate:
194
- writer.append_data((xstart[0]*255).clip(0, 255).astype(np.uint8))
195
-
196
- xstart = model.decode_to_img(idx[:,:cshape[2],:cshape[3]], cshape)
197
- xstart = bchw_to_st(xstart)
198
- output.image(xstart, clamp=True, output_format="PNG")
199
- #save_img(xstart, "full_res_sample.png")
200
- if animate:
201
- writer.close()
202
- st.video(outvid)
203
-
204
-
205
- def get_parser():
206
- parser = argparse.ArgumentParser()
207
- parser.add_argument(
208
- "-r",
209
- "--resume",
210
- type=str,
211
- nargs="?",
212
- help="load from logdir or checkpoint in logdir",
213
- )
214
- parser.add_argument(
215
- "-b",
216
- "--base",
217
- nargs="*",
218
- metavar="base_config.yaml",
219
- help="paths to base configs. Loaded from left-to-right. "
220
- "Parameters can be overwritten or added with command-line options of the form `--key value`.",
221
- default=list(),
222
- )
223
- parser.add_argument(
224
- "-c",
225
- "--config",
226
- nargs="?",
227
- metavar="single_config.yaml",
228
- help="path to single config. If specified, base configs will be ignored "
229
- "(except for the last one if left unspecified).",
230
- const=True,
231
- default="",
232
- )
233
- parser.add_argument(
234
- "--ignore_base_data",
235
- action="store_true",
236
- help="Ignore data specification from base configs. Useful if you want "
237
- "to specify a custom datasets on the command line.",
238
- )
239
- return parser
240
-
241
-
242
- def load_model_from_config(config, sd, gpu=True, eval_mode=True):
243
- if "ckpt_path" in config.params:
244
- st.warning("Deleting the restore-ckpt path from the config...")
245
- config.params.ckpt_path = None
246
- if "downsample_cond_size" in config.params:
247
- st.warning("Deleting downsample-cond-size from the config and setting factor=0.5 instead...")
248
- config.params.downsample_cond_size = -1
249
- config.params["downsample_cond_factor"] = 0.5
250
- try:
251
- if "ckpt_path" in config.params.first_stage_config.params:
252
- config.params.first_stage_config.params.ckpt_path = None
253
- st.warning("Deleting the first-stage restore-ckpt path from the config...")
254
- if "ckpt_path" in config.params.cond_stage_config.params:
255
- config.params.cond_stage_config.params.ckpt_path = None
256
- st.warning("Deleting the cond-stage restore-ckpt path from the config...")
257
- except:
258
- pass
259
-
260
- model = instantiate_from_config(config)
261
- if sd is not None:
262
- missing, unexpected = model.load_state_dict(sd, strict=False)
263
- st.info(f"Missing Keys in State Dict: {missing}")
264
- st.info(f"Unexpected Keys in State Dict: {unexpected}")
265
- if gpu:
266
- model.cuda()
267
- if eval_mode:
268
- model.eval()
269
- return {"model": model}
270
-
271
-
272
- def get_data(config):
273
- # get data
274
- data = instantiate_from_config(config.data)
275
- data.prepare_data()
276
- data.setup()
277
- return data
278
-
279
-
280
- @st.cache(allow_output_mutation=True, suppress_st_warning=True)
281
- def load_model_and_dset(config, ckpt, gpu, eval_mode):
282
- # get data
283
- dsets = get_data(config) # calls data.config ...
284
-
285
- # now load the specified checkpoint
286
- if ckpt:
287
- pl_sd = torch.load(ckpt, map_location="cpu")
288
- global_step = pl_sd["global_step"]
289
- else:
290
- pl_sd = {"state_dict": None}
291
- global_step = None
292
- model = load_model_from_config(config.model,
293
- pl_sd["state_dict"],
294
- gpu=gpu,
295
- eval_mode=eval_mode)["model"]
296
- return dsets, model, global_step
297
-
298
-
299
- if __name__ == "__main__":
300
- sys.path.append(os.getcwd())
301
-
302
- parser = get_parser()
303
-
304
- opt, unknown = parser.parse_known_args()
305
-
306
- ckpt = None
307
- if opt.resume:
308
- if not os.path.exists(opt.resume):
309
- raise ValueError("Cannot find {}".format(opt.resume))
310
- if os.path.isfile(opt.resume):
311
- paths = opt.resume.split("/")
312
- try:
313
- idx = len(paths)-paths[::-1].index("logs")+1
314
- except ValueError:
315
- idx = -2 # take a guess: path/to/logdir/checkpoints/model.ckpt
316
- logdir = "/".join(paths[:idx])
317
- ckpt = opt.resume
318
- else:
319
- assert os.path.isdir(opt.resume), opt.resume
320
- logdir = opt.resume.rstrip("/")
321
- ckpt = os.path.join(logdir, "checkpoints", "last.ckpt")
322
- print(f"logdir:{logdir}")
323
- base_configs = sorted(glob.glob(os.path.join(logdir, "configs/*-project.yaml")))
324
- opt.base = base_configs+opt.base
325
-
326
- if opt.config:
327
- if type(opt.config) == str:
328
- opt.base = [opt.config]
329
- else:
330
- opt.base = [opt.base[-1]]
331
-
332
- configs = [OmegaConf.load(cfg) for cfg in opt.base]
333
- cli = OmegaConf.from_dotlist(unknown)
334
- if opt.ignore_base_data:
335
- for config in configs:
336
- if hasattr(config, "data"): del config["data"]
337
- config = OmegaConf.merge(*configs, cli)
338
-
339
- st.sidebar.text(ckpt)
340
- gs = st.sidebar.empty()
341
- gs.text(f"Global step: ?")
342
- st.sidebar.text("Options")
343
- #gpu = st.sidebar.checkbox("GPU", value=True)
344
- gpu = True
345
- #eval_mode = st.sidebar.checkbox("Eval Mode", value=True)
346
- eval_mode = True
347
- #show_config = st.sidebar.checkbox("Show Config", value=False)
348
- show_config = False
349
- if show_config:
350
- st.info("Checkpoint: {}".format(ckpt))
351
- st.json(OmegaConf.to_container(config))
352
-
353
- dsets, model, global_step = load_model_and_dset(config, ckpt, gpu, eval_mode)
354
- gs.text(f"Global step: {global_step}")
355
- run_conditional(model, dsets)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/_windows.py DELETED
@@ -1,72 +0,0 @@
1
- import sys
2
- from dataclasses import dataclass
3
-
4
-
5
- @dataclass
6
- class WindowsConsoleFeatures:
7
- """Windows features available."""
8
-
9
- vt: bool = False
10
- """The console supports VT codes."""
11
- truecolor: bool = False
12
- """The console supports truecolor."""
13
-
14
-
15
- try:
16
- import ctypes
17
- from ctypes import LibraryLoader
18
-
19
- if sys.platform == "win32":
20
- windll = LibraryLoader(ctypes.WinDLL)
21
- else:
22
- windll = None
23
- raise ImportError("Not windows")
24
-
25
- from pip._vendor.rich._win32_console import (
26
- ENABLE_VIRTUAL_TERMINAL_PROCESSING,
27
- GetConsoleMode,
28
- GetStdHandle,
29
- LegacyWindowsError,
30
- )
31
-
32
- except (AttributeError, ImportError, ValueError):
33
-
34
- # Fallback if we can't load the Windows DLL
35
- def get_windows_console_features() -> WindowsConsoleFeatures:
36
- features = WindowsConsoleFeatures()
37
- return features
38
-
39
- else:
40
-
41
- def get_windows_console_features() -> WindowsConsoleFeatures:
42
- """Get windows console features.
43
-
44
- Returns:
45
- WindowsConsoleFeatures: An instance of WindowsConsoleFeatures.
46
- """
47
- handle = GetStdHandle()
48
- try:
49
- console_mode = GetConsoleMode(handle)
50
- success = True
51
- except LegacyWindowsError:
52
- console_mode = 0
53
- success = False
54
- vt = bool(success and console_mode & ENABLE_VIRTUAL_TERMINAL_PROCESSING)
55
- truecolor = False
56
- if vt:
57
- win_version = sys.getwindowsversion()
58
- truecolor = win_version.major > 10 or (
59
- win_version.major == 10 and win_version.build >= 15063
60
- )
61
- features = WindowsConsoleFeatures(vt=vt, truecolor=truecolor)
62
- return features
63
-
64
-
65
- if __name__ == "__main__":
66
- import platform
67
-
68
- features = get_windows_console_features()
69
- from pip._vendor.rich import print
70
-
71
- print(f'platform="{platform.system()}"')
72
- print(repr(features))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/tenacity/after.py DELETED
@@ -1,51 +0,0 @@
1
- # Copyright 2016 Julien Danjou
2
- # Copyright 2016 Joshua Harlow
3
- # Copyright 2013-2014 Ray Holder
4
- #
5
- # Licensed under the Apache License, Version 2.0 (the "License");
6
- # you may not use this file except in compliance with the License.
7
- # You may obtain a copy of the License at
8
- #
9
- # http://www.apache.org/licenses/LICENSE-2.0
10
- #
11
- # Unless required by applicable law or agreed to in writing, software
12
- # distributed under the License is distributed on an "AS IS" BASIS,
13
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- # See the License for the specific language governing permissions and
15
- # limitations under the License.
16
-
17
- import typing
18
-
19
- from pip._vendor.tenacity import _utils
20
-
21
- if typing.TYPE_CHECKING:
22
- import logging
23
-
24
- from pip._vendor.tenacity import RetryCallState
25
-
26
-
27
- def after_nothing(retry_state: "RetryCallState") -> None:
28
- """After call strategy that does nothing."""
29
-
30
-
31
- def after_log(
32
- logger: "logging.Logger",
33
- log_level: int,
34
- sec_format: str = "%0.3f",
35
- ) -> typing.Callable[["RetryCallState"], None]:
36
- """After call strategy that logs to some logger the finished attempt."""
37
-
38
- def log_it(retry_state: "RetryCallState") -> None:
39
- if retry_state.fn is None:
40
- # NOTE(sileht): can't really happen, but we must please mypy
41
- fn_name = "<unknown>"
42
- else:
43
- fn_name = _utils.get_callback_name(retry_state.fn)
44
- logger.log(
45
- log_level,
46
- f"Finished call to '{fn_name}' "
47
- f"after {sec_format % retry_state.seconds_since_start}(s), "
48
- f"this was the {_utils.to_ordinal(retry_state.attempt_number)} time calling it.",
49
- )
50
-
51
- return log_it
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/mr/new.h DELETED
@@ -1,88 +0,0 @@
1
- /*
2
- * Copyright 2018 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- /*! \file new.h
18
- * \brief Global operator new-based memory resource.
19
- */
20
-
21
- #pragma once
22
-
23
- #include <thrust/mr/memory_resource.h>
24
-
25
- namespace thrust
26
- {
27
- namespace mr
28
- {
29
-
30
- /** \addtogroup memory_resources Memory Resources
31
- * \ingroup memory_management_classes
32
- * \{
33
- */
34
-
35
- /*! A memory resource that uses global operators new and delete to allocate and deallocate memory. Uses alignment-enabled
36
- * overloads when available, otherwise uses regular overloads and implements alignment requirements by itself.
37
- */
38
- class new_delete_resource THRUST_FINAL : public memory_resource<>
39
- {
40
- public:
41
- void * do_allocate(std::size_t bytes, std::size_t alignment = THRUST_MR_DEFAULT_ALIGNMENT) THRUST_OVERRIDE
42
- {
43
- #if defined(__cpp_aligned_new)
44
- return ::operator new(bytes, std::align_val_t(alignment));
45
- #else
46
- // allocate memory for bytes, plus potential alignment correction,
47
- // plus store of the correction offset
48
- void * p = ::operator new(bytes + alignment + sizeof(std::size_t));
49
- std::size_t ptr_int = reinterpret_cast<std::size_t>(p);
50
- // calculate the offset, i.e. how many bytes of correction was necessary
51
- // to get an aligned pointer
52
- std::size_t offset = (ptr_int % alignment) ? (alignment - ptr_int % alignment) : 0;
53
- // calculate the return pointer
54
- char * ptr = static_cast<char *>(p) + offset;
55
- // store the offset right after the actually returned value
56
- std::size_t * offset_store = reinterpret_cast<std::size_t *>(ptr + bytes);
57
- *offset_store = offset;
58
- return static_cast<void *>(ptr);
59
- #endif
60
- }
61
-
62
- void do_deallocate(void * p, std::size_t bytes, std::size_t alignment = THRUST_MR_DEFAULT_ALIGNMENT) THRUST_OVERRIDE
63
- {
64
- #if defined(__cpp_aligned_new)
65
- # if defined(__cpp_sized_deallocation)
66
- ::operator delete(p, bytes, std::align_val_t(alignment));
67
- # else
68
- (void)bytes;
69
- ::operator delete(p, std::align_val_t(alignment));
70
- # endif
71
- #else
72
- (void)alignment;
73
- char * ptr = static_cast<char *>(p);
74
- // calculate where the offset is stored
75
- std::size_t * offset = reinterpret_cast<std::size_t *>(ptr + bytes);
76
- // calculate the original pointer
77
- p = static_cast<void *>(ptr - *offset);
78
- ::operator delete(p);
79
- #endif
80
- }
81
- };
82
-
83
- /*! \}
84
- */
85
-
86
- } // end mr
87
- } // end thrust
88
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/scan_by_key.h DELETED
@@ -1,23 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // this system inherits this algorithm
22
- #include <thrust/system/cpp/detail/scan_by_key.h>
23
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/regionclip-demo/detectron2/data/catalog.py DELETED
@@ -1,236 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import copy
3
- import logging
4
- import types
5
- from collections import UserDict
6
- from typing import List
7
-
8
- from detectron2.utils.logger import log_first_n
9
-
10
- __all__ = ["DatasetCatalog", "MetadataCatalog", "Metadata"]
11
-
12
-
13
- class _DatasetCatalog(UserDict):
14
- """
15
- A global dictionary that stores information about the datasets and how to obtain them.
16
-
17
- It contains a mapping from strings
18
- (which are names that identify a dataset, e.g. "coco_2014_train")
19
- to a function which parses the dataset and returns the samples in the
20
- format of `list[dict]`.
21
-
22
- The returned dicts should be in Detectron2 Dataset format (See DATASETS.md for details)
23
- if used with the data loader functionalities in `data/build.py,data/detection_transform.py`.
24
-
25
- The purpose of having this catalog is to make it easy to choose
26
- different datasets, by just using the strings in the config.
27
- """
28
-
29
- def register(self, name, func):
30
- """
31
- Args:
32
- name (str): the name that identifies a dataset, e.g. "coco_2014_train".
33
- func (callable): a callable which takes no arguments and returns a list of dicts.
34
- It must return the same results if called multiple times.
35
- """
36
- assert callable(func), "You must register a function with `DatasetCatalog.register`!"
37
- assert name not in self, "Dataset '{}' is already registered!".format(name)
38
- self[name] = func
39
-
40
- def get(self, name):
41
- """
42
- Call the registered function and return its results.
43
-
44
- Args:
45
- name (str): the name that identifies a dataset, e.g. "coco_2014_train".
46
-
47
- Returns:
48
- list[dict]: dataset annotations.
49
- """
50
- try:
51
- f = self[name]
52
- except KeyError as e:
53
- raise KeyError(
54
- "Dataset '{}' is not registered! Available datasets are: {}".format(
55
- name, ", ".join(list(self.keys()))
56
- )
57
- ) from e
58
- return f()
59
-
60
- def list(self) -> List[str]:
61
- """
62
- List all registered datasets.
63
-
64
- Returns:
65
- list[str]
66
- """
67
- return list(self.keys())
68
-
69
- def remove(self, name):
70
- """
71
- Alias of ``pop``.
72
- """
73
- self.pop(name)
74
-
75
- def __str__(self):
76
- return "DatasetCatalog(registered datasets: {})".format(", ".join(self.keys()))
77
-
78
- __repr__ = __str__
79
-
80
-
81
- DatasetCatalog = _DatasetCatalog()
82
- DatasetCatalog.__doc__ = (
83
- _DatasetCatalog.__doc__
84
- + """
85
- .. automethod:: detectron2.data.catalog.DatasetCatalog.register
86
- .. automethod:: detectron2.data.catalog.DatasetCatalog.get
87
- """
88
- )
89
-
90
-
91
- class Metadata(types.SimpleNamespace):
92
- """
93
- A class that supports simple attribute setter/getter.
94
- It is intended for storing metadata of a dataset and make it accessible globally.
95
-
96
- Examples:
97
- ::
98
- # somewhere when you load the data:
99
- MetadataCatalog.get("mydataset").thing_classes = ["person", "dog"]
100
-
101
- # somewhere when you print statistics or visualize:
102
- classes = MetadataCatalog.get("mydataset").thing_classes
103
- """
104
-
105
- # the name of the dataset
106
- # set default to N/A so that `self.name` in the errors will not trigger getattr again
107
- name: str = "N/A"
108
-
109
- _RENAMED = {
110
- "class_names": "thing_classes",
111
- "dataset_id_to_contiguous_id": "thing_dataset_id_to_contiguous_id",
112
- "stuff_class_names": "stuff_classes",
113
- }
114
-
115
- def __getattr__(self, key):
116
- if key in self._RENAMED:
117
- log_first_n(
118
- logging.WARNING,
119
- "Metadata '{}' was renamed to '{}'!".format(key, self._RENAMED[key]),
120
- n=10,
121
- )
122
- return getattr(self, self._RENAMED[key])
123
-
124
- # "name" exists in every metadata
125
- if len(self.__dict__) > 1:
126
- raise AttributeError(
127
- "Attribute '{}' does not exist in the metadata of dataset '{}'. Available "
128
- "keys are {}.".format(key, self.name, str(self.__dict__.keys()))
129
- )
130
- else:
131
- raise AttributeError(
132
- f"Attribute '{key}' does not exist in the metadata of dataset '{self.name}': "
133
- "metadata is empty."
134
- )
135
-
136
- def __setattr__(self, key, val):
137
- if key in self._RENAMED:
138
- log_first_n(
139
- logging.WARNING,
140
- "Metadata '{}' was renamed to '{}'!".format(key, self._RENAMED[key]),
141
- n=10,
142
- )
143
- setattr(self, self._RENAMED[key], val)
144
-
145
- # Ensure that metadata of the same name stays consistent
146
- try:
147
- oldval = getattr(self, key)
148
- assert oldval == val, (
149
- "Attribute '{}' in the metadata of '{}' cannot be set "
150
- "to a different value!\n{} != {}".format(key, self.name, oldval, val)
151
- )
152
- except AttributeError:
153
- super().__setattr__(key, val)
154
-
155
- def as_dict(self):
156
- """
157
- Returns all the metadata as a dict.
158
- Note that modifications to the returned dict will not reflect on the Metadata object.
159
- """
160
- return copy.copy(self.__dict__)
161
-
162
- def set(self, **kwargs):
163
- """
164
- Set multiple metadata with kwargs.
165
- """
166
- for k, v in kwargs.items():
167
- setattr(self, k, v)
168
- return self
169
-
170
- def get(self, key, default=None):
171
- """
172
- Access an attribute and return its value if exists.
173
- Otherwise return default.
174
- """
175
- try:
176
- return getattr(self, key)
177
- except AttributeError:
178
- return default
179
-
180
-
181
- class _MetadataCatalog(UserDict):
182
- """
183
- MetadataCatalog is a global dictionary that provides access to
184
- :class:`Metadata` of a given dataset.
185
-
186
- The metadata associated with a certain name is a singleton: once created, the
187
- metadata will stay alive and will be returned by future calls to ``get(name)``.
188
-
189
- It's like global variables, so don't abuse it.
190
- It's meant for storing knowledge that's constant and shared across the execution
191
- of the program, e.g.: the class names in COCO.
192
- """
193
-
194
- def get(self, name):
195
- """
196
- Args:
197
- name (str): name of a dataset (e.g. coco_2014_train).
198
-
199
- Returns:
200
- Metadata: The :class:`Metadata` instance associated with this name,
201
- or create an empty one if none is available.
202
- """
203
- assert len(name)
204
- r = super().get(name, None)
205
- if r is None:
206
- r = self[name] = Metadata(name=name)
207
- return r
208
-
209
- def list(self):
210
- """
211
- List all registered metadata.
212
-
213
- Returns:
214
- list[str]: keys (names of datasets) of all registered metadata
215
- """
216
- return list(self.keys())
217
-
218
- def remove(self, name):
219
- """
220
- Alias of ``pop``.
221
- """
222
- self.pop(name)
223
-
224
- def __str__(self):
225
- return "MetadataCatalog(registered metadata: {})".format(", ".join(self.keys()))
226
-
227
- __repr__ = __str__
228
-
229
-
230
- MetadataCatalog = _MetadataCatalog()
231
- MetadataCatalog.__doc__ = (
232
- _MetadataCatalog.__doc__
233
- + """
234
- .. automethod:: detectron2.data.catalog.MetadataCatalog.get
235
- """
236
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChandraMohanNayal/AutoGPT/run.sh DELETED
@@ -1,9 +0,0 @@
1
- #!/bin/bash
2
- python scripts/check_requirements.py requirements.txt
3
- if [ $? -eq 1 ]
4
- then
5
- echo Installing missing packages...
6
- pip install -r requirements.txt
7
- fi
8
- python -m autogpt $@
9
- read -p "Press any key to continue..."
 
 
 
 
 
 
 
 
 
 
spaces/ChevyWithAI/rvc-aicover/infer_pack/models.py DELETED
@@ -1,982 +0,0 @@
1
- import math, pdb, os
2
- from time import time as ttime
3
- import torch
4
- from torch import nn
5
- from torch.nn import functional as F
6
- from infer_pack import modules
7
- from infer_pack import attentions
8
- from infer_pack import commons
9
- from infer_pack.commons import init_weights, get_padding
10
- from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
11
- from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
12
- from infer_pack.commons import init_weights
13
- import numpy as np
14
- from infer_pack import commons
15
-
16
-
17
- class TextEncoder256(nn.Module):
18
- def __init__(
19
- self,
20
- out_channels,
21
- hidden_channels,
22
- filter_channels,
23
- n_heads,
24
- n_layers,
25
- kernel_size,
26
- p_dropout,
27
- f0=True,
28
- ):
29
- super().__init__()
30
- self.out_channels = out_channels
31
- self.hidden_channels = hidden_channels
32
- self.filter_channels = filter_channels
33
- self.n_heads = n_heads
34
- self.n_layers = n_layers
35
- self.kernel_size = kernel_size
36
- self.p_dropout = p_dropout
37
- self.emb_phone = nn.Linear(256, hidden_channels)
38
- self.lrelu = nn.LeakyReLU(0.1, inplace=True)
39
- if f0 == True:
40
- self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
41
- self.encoder = attentions.Encoder(
42
- hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
43
- )
44
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
45
-
46
- def forward(self, phone, pitch, lengths):
47
- if pitch == None:
48
- x = self.emb_phone(phone)
49
- else:
50
- x = self.emb_phone(phone) + self.emb_pitch(pitch)
51
- x = x * math.sqrt(self.hidden_channels) # [b, t, h]
52
- x = self.lrelu(x)
53
- x = torch.transpose(x, 1, -1) # [b, h, t]
54
- x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
55
- x.dtype
56
- )
57
- x = self.encoder(x * x_mask, x_mask)
58
- stats = self.proj(x) * x_mask
59
-
60
- m, logs = torch.split(stats, self.out_channels, dim=1)
61
- return m, logs, x_mask
62
-
63
-
64
- class TextEncoder256Sim(nn.Module):
65
- def __init__(
66
- self,
67
- out_channels,
68
- hidden_channels,
69
- filter_channels,
70
- n_heads,
71
- n_layers,
72
- kernel_size,
73
- p_dropout,
74
- f0=True,
75
- ):
76
- super().__init__()
77
- self.out_channels = out_channels
78
- self.hidden_channels = hidden_channels
79
- self.filter_channels = filter_channels
80
- self.n_heads = n_heads
81
- self.n_layers = n_layers
82
- self.kernel_size = kernel_size
83
- self.p_dropout = p_dropout
84
- self.emb_phone = nn.Linear(256, hidden_channels)
85
- self.lrelu = nn.LeakyReLU(0.1, inplace=True)
86
- if f0 == True:
87
- self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
88
- self.encoder = attentions.Encoder(
89
- hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
90
- )
91
- self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
92
-
93
- def forward(self, phone, pitch, lengths):
94
- if pitch == None:
95
- x = self.emb_phone(phone)
96
- else:
97
- x = self.emb_phone(phone) + self.emb_pitch(pitch)
98
- x = x * math.sqrt(self.hidden_channels) # [b, t, h]
99
- x = self.lrelu(x)
100
- x = torch.transpose(x, 1, -1) # [b, h, t]
101
- x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
102
- x.dtype
103
- )
104
- x = self.encoder(x * x_mask, x_mask)
105
- x = self.proj(x) * x_mask
106
- return x, x_mask
107
-
108
-
109
- class ResidualCouplingBlock(nn.Module):
110
- def __init__(
111
- self,
112
- channels,
113
- hidden_channels,
114
- kernel_size,
115
- dilation_rate,
116
- n_layers,
117
- n_flows=4,
118
- gin_channels=0,
119
- ):
120
- super().__init__()
121
- self.channels = channels
122
- self.hidden_channels = hidden_channels
123
- self.kernel_size = kernel_size
124
- self.dilation_rate = dilation_rate
125
- self.n_layers = n_layers
126
- self.n_flows = n_flows
127
- self.gin_channels = gin_channels
128
-
129
- self.flows = nn.ModuleList()
130
- for i in range(n_flows):
131
- self.flows.append(
132
- modules.ResidualCouplingLayer(
133
- channels,
134
- hidden_channels,
135
- kernel_size,
136
- dilation_rate,
137
- n_layers,
138
- gin_channels=gin_channels,
139
- mean_only=True,
140
- )
141
- )
142
- self.flows.append(modules.Flip())
143
-
144
- def forward(self, x, x_mask, g=None, reverse=False):
145
- if not reverse:
146
- for flow in self.flows:
147
- x, _ = flow(x, x_mask, g=g, reverse=reverse)
148
- else:
149
- for flow in reversed(self.flows):
150
- x = flow(x, x_mask, g=g, reverse=reverse)
151
- return x
152
-
153
- def remove_weight_norm(self):
154
- for i in range(self.n_flows):
155
- self.flows[i * 2].remove_weight_norm()
156
-
157
-
158
- class PosteriorEncoder(nn.Module):
159
- def __init__(
160
- self,
161
- in_channels,
162
- out_channels,
163
- hidden_channels,
164
- kernel_size,
165
- dilation_rate,
166
- n_layers,
167
- gin_channels=0,
168
- ):
169
- super().__init__()
170
- self.in_channels = in_channels
171
- self.out_channels = out_channels
172
- self.hidden_channels = hidden_channels
173
- self.kernel_size = kernel_size
174
- self.dilation_rate = dilation_rate
175
- self.n_layers = n_layers
176
- self.gin_channels = gin_channels
177
-
178
- self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
179
- self.enc = modules.WN(
180
- hidden_channels,
181
- kernel_size,
182
- dilation_rate,
183
- n_layers,
184
- gin_channels=gin_channels,
185
- )
186
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
187
-
188
- def forward(self, x, x_lengths, g=None):
189
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
190
- x.dtype
191
- )
192
- x = self.pre(x) * x_mask
193
- x = self.enc(x, x_mask, g=g)
194
- stats = self.proj(x) * x_mask
195
- m, logs = torch.split(stats, self.out_channels, dim=1)
196
- z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
197
- return z, m, logs, x_mask
198
-
199
- def remove_weight_norm(self):
200
- self.enc.remove_weight_norm()
201
-
202
-
203
- class Generator(torch.nn.Module):
204
- def __init__(
205
- self,
206
- initial_channel,
207
- resblock,
208
- resblock_kernel_sizes,
209
- resblock_dilation_sizes,
210
- upsample_rates,
211
- upsample_initial_channel,
212
- upsample_kernel_sizes,
213
- gin_channels=0,
214
- ):
215
- super(Generator, self).__init__()
216
- self.num_kernels = len(resblock_kernel_sizes)
217
- self.num_upsamples = len(upsample_rates)
218
- self.conv_pre = Conv1d(
219
- initial_channel, upsample_initial_channel, 7, 1, padding=3
220
- )
221
- resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
222
-
223
- self.ups = nn.ModuleList()
224
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
225
- self.ups.append(
226
- weight_norm(
227
- ConvTranspose1d(
228
- upsample_initial_channel // (2**i),
229
- upsample_initial_channel // (2 ** (i + 1)),
230
- k,
231
- u,
232
- padding=(k - u) // 2,
233
- )
234
- )
235
- )
236
-
237
- self.resblocks = nn.ModuleList()
238
- for i in range(len(self.ups)):
239
- ch = upsample_initial_channel // (2 ** (i + 1))
240
- for j, (k, d) in enumerate(
241
- zip(resblock_kernel_sizes, resblock_dilation_sizes)
242
- ):
243
- self.resblocks.append(resblock(ch, k, d))
244
-
245
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
246
- self.ups.apply(init_weights)
247
-
248
- if gin_channels != 0:
249
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
250
-
251
- def forward(self, x, g=None):
252
- x = self.conv_pre(x)
253
- if g is not None:
254
- x = x + self.cond(g)
255
-
256
- for i in range(self.num_upsamples):
257
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
258
- x = self.ups[i](x)
259
- xs = None
260
- for j in range(self.num_kernels):
261
- if xs is None:
262
- xs = self.resblocks[i * self.num_kernels + j](x)
263
- else:
264
- xs += self.resblocks[i * self.num_kernels + j](x)
265
- x = xs / self.num_kernels
266
- x = F.leaky_relu(x)
267
- x = self.conv_post(x)
268
- x = torch.tanh(x)
269
-
270
- return x
271
-
272
- def remove_weight_norm(self):
273
- for l in self.ups:
274
- remove_weight_norm(l)
275
- for l in self.resblocks:
276
- l.remove_weight_norm()
277
-
278
-
279
- class SineGen(torch.nn.Module):
280
- """Definition of sine generator
281
- SineGen(samp_rate, harmonic_num = 0,
282
- sine_amp = 0.1, noise_std = 0.003,
283
- voiced_threshold = 0,
284
- flag_for_pulse=False)
285
- samp_rate: sampling rate in Hz
286
- harmonic_num: number of harmonic overtones (default 0)
287
- sine_amp: amplitude of sine-wavefrom (default 0.1)
288
- noise_std: std of Gaussian noise (default 0.003)
289
- voiced_thoreshold: F0 threshold for U/V classification (default 0)
290
- flag_for_pulse: this SinGen is used inside PulseGen (default False)
291
- Note: when flag_for_pulse is True, the first time step of a voiced
292
- segment is always sin(np.pi) or cos(0)
293
- """
294
-
295
- def __init__(
296
- self,
297
- samp_rate,
298
- harmonic_num=0,
299
- sine_amp=0.1,
300
- noise_std=0.003,
301
- voiced_threshold=0,
302
- flag_for_pulse=False,
303
- ):
304
- super(SineGen, self).__init__()
305
- self.sine_amp = sine_amp
306
- self.noise_std = noise_std
307
- self.harmonic_num = harmonic_num
308
- self.dim = self.harmonic_num + 1
309
- self.sampling_rate = samp_rate
310
- self.voiced_threshold = voiced_threshold
311
-
312
- def _f02uv(self, f0):
313
- # generate uv signal
314
- uv = torch.ones_like(f0)
315
- uv = uv * (f0 > self.voiced_threshold)
316
- return uv
317
-
318
- def forward(self, f0, upp):
319
- """sine_tensor, uv = forward(f0)
320
- input F0: tensor(batchsize=1, length, dim=1)
321
- f0 for unvoiced steps should be 0
322
- output sine_tensor: tensor(batchsize=1, length, dim)
323
- output uv: tensor(batchsize=1, length, 1)
324
- """
325
- with torch.no_grad():
326
- f0 = f0[:, None].transpose(1, 2)
327
- f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
328
- # fundamental component
329
- f0_buf[:, :, 0] = f0[:, :, 0]
330
- for idx in np.arange(self.harmonic_num):
331
- f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (
332
- idx + 2
333
- ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
334
- rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化
335
- rand_ini = torch.rand(
336
- f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device
337
- )
338
- rand_ini[:, 0] = 0
339
- rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
340
- tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化
341
- tmp_over_one *= upp
342
- tmp_over_one = F.interpolate(
343
- tmp_over_one.transpose(2, 1),
344
- scale_factor=upp,
345
- mode="linear",
346
- align_corners=True,
347
- ).transpose(2, 1)
348
- rad_values = F.interpolate(
349
- rad_values.transpose(2, 1), scale_factor=upp, mode="nearest"
350
- ).transpose(
351
- 2, 1
352
- ) #######
353
- tmp_over_one %= 1
354
- tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
355
- cumsum_shift = torch.zeros_like(rad_values)
356
- cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
357
- sine_waves = torch.sin(
358
- torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi
359
- )
360
- sine_waves = sine_waves * self.sine_amp
361
- uv = self._f02uv(f0)
362
- uv = F.interpolate(
363
- uv.transpose(2, 1), scale_factor=upp, mode="nearest"
364
- ).transpose(2, 1)
365
- noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
366
- noise = noise_amp * torch.randn_like(sine_waves)
367
- sine_waves = sine_waves * uv + noise
368
- return sine_waves, uv, noise
369
-
370
-
371
- class SourceModuleHnNSF(torch.nn.Module):
372
- """SourceModule for hn-nsf
373
- SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
374
- add_noise_std=0.003, voiced_threshod=0)
375
- sampling_rate: sampling_rate in Hz
376
- harmonic_num: number of harmonic above F0 (default: 0)
377
- sine_amp: amplitude of sine source signal (default: 0.1)
378
- add_noise_std: std of additive Gaussian noise (default: 0.003)
379
- note that amplitude of noise in unvoiced is decided
380
- by sine_amp
381
- voiced_threshold: threhold to set U/V given F0 (default: 0)
382
- Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
383
- F0_sampled (batchsize, length, 1)
384
- Sine_source (batchsize, length, 1)
385
- noise_source (batchsize, length 1)
386
- uv (batchsize, length, 1)
387
- """
388
-
389
- def __init__(
390
- self,
391
- sampling_rate,
392
- harmonic_num=0,
393
- sine_amp=0.1,
394
- add_noise_std=0.003,
395
- voiced_threshod=0,
396
- is_half=True,
397
- ):
398
- super(SourceModuleHnNSF, self).__init__()
399
-
400
- self.sine_amp = sine_amp
401
- self.noise_std = add_noise_std
402
- self.is_half = is_half
403
- # to produce sine waveforms
404
- self.l_sin_gen = SineGen(
405
- sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod
406
- )
407
-
408
- # to merge source harmonics into a single excitation
409
- self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
410
- self.l_tanh = torch.nn.Tanh()
411
-
412
- def forward(self, x, upp=None):
413
- sine_wavs, uv, _ = self.l_sin_gen(x, upp)
414
- if self.is_half:
415
- sine_wavs = sine_wavs.half()
416
- sine_merge = self.l_tanh(self.l_linear(sine_wavs))
417
- return sine_merge, None, None # noise, uv
418
-
419
-
420
- class GeneratorNSF(torch.nn.Module):
421
- def __init__(
422
- self,
423
- initial_channel,
424
- resblock,
425
- resblock_kernel_sizes,
426
- resblock_dilation_sizes,
427
- upsample_rates,
428
- upsample_initial_channel,
429
- upsample_kernel_sizes,
430
- gin_channels,
431
- sr,
432
- is_half=False,
433
- ):
434
- super(GeneratorNSF, self).__init__()
435
- self.num_kernels = len(resblock_kernel_sizes)
436
- self.num_upsamples = len(upsample_rates)
437
-
438
- self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates))
439
- self.m_source = SourceModuleHnNSF(
440
- sampling_rate=sr, harmonic_num=0, is_half=is_half
441
- )
442
- self.noise_convs = nn.ModuleList()
443
- self.conv_pre = Conv1d(
444
- initial_channel, upsample_initial_channel, 7, 1, padding=3
445
- )
446
- resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
447
-
448
- self.ups = nn.ModuleList()
449
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
450
- c_cur = upsample_initial_channel // (2 ** (i + 1))
451
- self.ups.append(
452
- weight_norm(
453
- ConvTranspose1d(
454
- upsample_initial_channel // (2**i),
455
- upsample_initial_channel // (2 ** (i + 1)),
456
- k,
457
- u,
458
- padding=(k - u) // 2,
459
- )
460
- )
461
- )
462
- if i + 1 < len(upsample_rates):
463
- stride_f0 = np.prod(upsample_rates[i + 1 :])
464
- self.noise_convs.append(
465
- Conv1d(
466
- 1,
467
- c_cur,
468
- kernel_size=stride_f0 * 2,
469
- stride=stride_f0,
470
- padding=stride_f0 // 2,
471
- )
472
- )
473
- else:
474
- self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
475
-
476
- self.resblocks = nn.ModuleList()
477
- for i in range(len(self.ups)):
478
- ch = upsample_initial_channel // (2 ** (i + 1))
479
- for j, (k, d) in enumerate(
480
- zip(resblock_kernel_sizes, resblock_dilation_sizes)
481
- ):
482
- self.resblocks.append(resblock(ch, k, d))
483
-
484
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
485
- self.ups.apply(init_weights)
486
-
487
- if gin_channels != 0:
488
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
489
-
490
- self.upp = np.prod(upsample_rates)
491
-
492
- def forward(self, x, f0, g=None):
493
- har_source, noi_source, uv = self.m_source(f0, self.upp)
494
- har_source = har_source.transpose(1, 2)
495
- x = self.conv_pre(x)
496
- if g is not None:
497
- x = x + self.cond(g)
498
-
499
- for i in range(self.num_upsamples):
500
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
501
- x = self.ups[i](x)
502
- x_source = self.noise_convs[i](har_source)
503
- x = x + x_source
504
- xs = None
505
- for j in range(self.num_kernels):
506
- if xs is None:
507
- xs = self.resblocks[i * self.num_kernels + j](x)
508
- else:
509
- xs += self.resblocks[i * self.num_kernels + j](x)
510
- x = xs / self.num_kernels
511
- x = F.leaky_relu(x)
512
- x = self.conv_post(x)
513
- x = torch.tanh(x)
514
- return x
515
-
516
- def remove_weight_norm(self):
517
- for l in self.ups:
518
- remove_weight_norm(l)
519
- for l in self.resblocks:
520
- l.remove_weight_norm()
521
-
522
-
523
- sr2sr = {
524
- "32k": 32000,
525
- "40k": 40000,
526
- "48k": 48000,
527
- }
528
-
529
-
530
- class SynthesizerTrnMs256NSFsid(nn.Module):
531
- def __init__(
532
- self,
533
- spec_channels,
534
- segment_size,
535
- inter_channels,
536
- hidden_channels,
537
- filter_channels,
538
- n_heads,
539
- n_layers,
540
- kernel_size,
541
- p_dropout,
542
- resblock,
543
- resblock_kernel_sizes,
544
- resblock_dilation_sizes,
545
- upsample_rates,
546
- upsample_initial_channel,
547
- upsample_kernel_sizes,
548
- spk_embed_dim,
549
- gin_channels,
550
- sr,
551
- **kwargs
552
- ):
553
- super().__init__()
554
- if type(sr) == type("strr"):
555
- sr = sr2sr[sr]
556
- self.spec_channels = spec_channels
557
- self.inter_channels = inter_channels
558
- self.hidden_channels = hidden_channels
559
- self.filter_channels = filter_channels
560
- self.n_heads = n_heads
561
- self.n_layers = n_layers
562
- self.kernel_size = kernel_size
563
- self.p_dropout = p_dropout
564
- self.resblock = resblock
565
- self.resblock_kernel_sizes = resblock_kernel_sizes
566
- self.resblock_dilation_sizes = resblock_dilation_sizes
567
- self.upsample_rates = upsample_rates
568
- self.upsample_initial_channel = upsample_initial_channel
569
- self.upsample_kernel_sizes = upsample_kernel_sizes
570
- self.segment_size = segment_size
571
- self.gin_channels = gin_channels
572
- # self.hop_length = hop_length#
573
- self.spk_embed_dim = spk_embed_dim
574
- self.enc_p = TextEncoder256(
575
- inter_channels,
576
- hidden_channels,
577
- filter_channels,
578
- n_heads,
579
- n_layers,
580
- kernel_size,
581
- p_dropout,
582
- )
583
- self.dec = GeneratorNSF(
584
- inter_channels,
585
- resblock,
586
- resblock_kernel_sizes,
587
- resblock_dilation_sizes,
588
- upsample_rates,
589
- upsample_initial_channel,
590
- upsample_kernel_sizes,
591
- gin_channels=gin_channels,
592
- sr=sr,
593
- is_half=kwargs["is_half"],
594
- )
595
- self.enc_q = PosteriorEncoder(
596
- spec_channels,
597
- inter_channels,
598
- hidden_channels,
599
- 5,
600
- 1,
601
- 16,
602
- gin_channels=gin_channels,
603
- )
604
- self.flow = ResidualCouplingBlock(
605
- inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
606
- )
607
- self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
608
- print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
609
-
610
- def remove_weight_norm(self):
611
- self.dec.remove_weight_norm()
612
- self.flow.remove_weight_norm()
613
- self.enc_q.remove_weight_norm()
614
-
615
- def forward(
616
- self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds
617
- ): # 这里ds是id,[bs,1]
618
- # print(1,pitch.shape)#[bs,t]
619
- g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
620
- m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
621
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
622
- z_p = self.flow(z, y_mask, g=g)
623
- z_slice, ids_slice = commons.rand_slice_segments(
624
- z, y_lengths, self.segment_size
625
- )
626
- # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
627
- pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
628
- # print(-2,pitchf.shape,z_slice.shape)
629
- o = self.dec(z_slice, pitchf, g=g)
630
- return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
631
-
632
- def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None):
633
- g = self.emb_g(sid).unsqueeze(-1)
634
- m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
635
- z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
636
- z = self.flow(z_p, x_mask, g=g, reverse=True)
637
- o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
638
- return o, x_mask, (z, z_p, m_p, logs_p)
639
-
640
-
641
- class SynthesizerTrnMs256NSFsid_nono(nn.Module):
642
- def __init__(
643
- self,
644
- spec_channels,
645
- segment_size,
646
- inter_channels,
647
- hidden_channels,
648
- filter_channels,
649
- n_heads,
650
- n_layers,
651
- kernel_size,
652
- p_dropout,
653
- resblock,
654
- resblock_kernel_sizes,
655
- resblock_dilation_sizes,
656
- upsample_rates,
657
- upsample_initial_channel,
658
- upsample_kernel_sizes,
659
- spk_embed_dim,
660
- gin_channels,
661
- sr=None,
662
- **kwargs
663
- ):
664
- super().__init__()
665
- self.spec_channels = spec_channels
666
- self.inter_channels = inter_channels
667
- self.hidden_channels = hidden_channels
668
- self.filter_channels = filter_channels
669
- self.n_heads = n_heads
670
- self.n_layers = n_layers
671
- self.kernel_size = kernel_size
672
- self.p_dropout = p_dropout
673
- self.resblock = resblock
674
- self.resblock_kernel_sizes = resblock_kernel_sizes
675
- self.resblock_dilation_sizes = resblock_dilation_sizes
676
- self.upsample_rates = upsample_rates
677
- self.upsample_initial_channel = upsample_initial_channel
678
- self.upsample_kernel_sizes = upsample_kernel_sizes
679
- self.segment_size = segment_size
680
- self.gin_channels = gin_channels
681
- # self.hop_length = hop_length#
682
- self.spk_embed_dim = spk_embed_dim
683
- self.enc_p = TextEncoder256(
684
- inter_channels,
685
- hidden_channels,
686
- filter_channels,
687
- n_heads,
688
- n_layers,
689
- kernel_size,
690
- p_dropout,
691
- f0=False,
692
- )
693
- self.dec = Generator(
694
- inter_channels,
695
- resblock,
696
- resblock_kernel_sizes,
697
- resblock_dilation_sizes,
698
- upsample_rates,
699
- upsample_initial_channel,
700
- upsample_kernel_sizes,
701
- gin_channels=gin_channels,
702
- )
703
- self.enc_q = PosteriorEncoder(
704
- spec_channels,
705
- inter_channels,
706
- hidden_channels,
707
- 5,
708
- 1,
709
- 16,
710
- gin_channels=gin_channels,
711
- )
712
- self.flow = ResidualCouplingBlock(
713
- inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
714
- )
715
- self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
716
- print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
717
-
718
- def remove_weight_norm(self):
719
- self.dec.remove_weight_norm()
720
- self.flow.remove_weight_norm()
721
- self.enc_q.remove_weight_norm()
722
-
723
- def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
724
- g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
725
- m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
726
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
727
- z_p = self.flow(z, y_mask, g=g)
728
- z_slice, ids_slice = commons.rand_slice_segments(
729
- z, y_lengths, self.segment_size
730
- )
731
- o = self.dec(z_slice, g=g)
732
- return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
733
-
734
- def infer(self, phone, phone_lengths, sid, max_len=None):
735
- g = self.emb_g(sid).unsqueeze(-1)
736
- m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
737
- z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
738
- z = self.flow(z_p, x_mask, g=g, reverse=True)
739
- o = self.dec((z * x_mask)[:, :, :max_len], g=g)
740
- return o, x_mask, (z, z_p, m_p, logs_p)
741
-
742
-
743
- class SynthesizerTrnMs256NSFsid_sim(nn.Module):
744
- """
745
- Synthesizer for Training
746
- """
747
-
748
- def __init__(
749
- self,
750
- spec_channels,
751
- segment_size,
752
- inter_channels,
753
- hidden_channels,
754
- filter_channels,
755
- n_heads,
756
- n_layers,
757
- kernel_size,
758
- p_dropout,
759
- resblock,
760
- resblock_kernel_sizes,
761
- resblock_dilation_sizes,
762
- upsample_rates,
763
- upsample_initial_channel,
764
- upsample_kernel_sizes,
765
- spk_embed_dim,
766
- # hop_length,
767
- gin_channels=0,
768
- use_sdp=True,
769
- **kwargs
770
- ):
771
- super().__init__()
772
- self.spec_channels = spec_channels
773
- self.inter_channels = inter_channels
774
- self.hidden_channels = hidden_channels
775
- self.filter_channels = filter_channels
776
- self.n_heads = n_heads
777
- self.n_layers = n_layers
778
- self.kernel_size = kernel_size
779
- self.p_dropout = p_dropout
780
- self.resblock = resblock
781
- self.resblock_kernel_sizes = resblock_kernel_sizes
782
- self.resblock_dilation_sizes = resblock_dilation_sizes
783
- self.upsample_rates = upsample_rates
784
- self.upsample_initial_channel = upsample_initial_channel
785
- self.upsample_kernel_sizes = upsample_kernel_sizes
786
- self.segment_size = segment_size
787
- self.gin_channels = gin_channels
788
- # self.hop_length = hop_length#
789
- self.spk_embed_dim = spk_embed_dim
790
- self.enc_p = TextEncoder256Sim(
791
- inter_channels,
792
- hidden_channels,
793
- filter_channels,
794
- n_heads,
795
- n_layers,
796
- kernel_size,
797
- p_dropout,
798
- )
799
- self.dec = GeneratorNSF(
800
- inter_channels,
801
- resblock,
802
- resblock_kernel_sizes,
803
- resblock_dilation_sizes,
804
- upsample_rates,
805
- upsample_initial_channel,
806
- upsample_kernel_sizes,
807
- gin_channels=gin_channels,
808
- is_half=kwargs["is_half"],
809
- )
810
-
811
- self.flow = ResidualCouplingBlock(
812
- inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
813
- )
814
- self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
815
- print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
816
-
817
- def remove_weight_norm(self):
818
- self.dec.remove_weight_norm()
819
- self.flow.remove_weight_norm()
820
- self.enc_q.remove_weight_norm()
821
-
822
- def forward(
823
- self, phone, phone_lengths, pitch, pitchf, y_lengths, ds
824
- ): # y是spec不需要了现在
825
- g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
826
- x, x_mask = self.enc_p(phone, pitch, phone_lengths)
827
- x = self.flow(x, x_mask, g=g, reverse=True)
828
- z_slice, ids_slice = commons.rand_slice_segments(
829
- x, y_lengths, self.segment_size
830
- )
831
-
832
- pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
833
- o = self.dec(z_slice, pitchf, g=g)
834
- return o, ids_slice
835
-
836
- def infer(
837
- self, phone, phone_lengths, pitch, pitchf, ds, max_len=None
838
- ): # y是spec不需要了现在
839
- g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
840
- x, x_mask = self.enc_p(phone, pitch, phone_lengths)
841
- x = self.flow(x, x_mask, g=g, reverse=True)
842
- o = self.dec((x * x_mask)[:, :, :max_len], pitchf, g=g)
843
- return o, o
844
-
845
-
846
- class MultiPeriodDiscriminator(torch.nn.Module):
847
- def __init__(self, use_spectral_norm=False):
848
- super(MultiPeriodDiscriminator, self).__init__()
849
- periods = [2, 3, 5, 7, 11, 17]
850
- # periods = [3, 5, 7, 11, 17, 23, 37]
851
-
852
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
853
- discs = discs + [
854
- DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
855
- ]
856
- self.discriminators = nn.ModuleList(discs)
857
-
858
- def forward(self, y, y_hat):
859
- y_d_rs = [] #
860
- y_d_gs = []
861
- fmap_rs = []
862
- fmap_gs = []
863
- for i, d in enumerate(self.discriminators):
864
- y_d_r, fmap_r = d(y)
865
- y_d_g, fmap_g = d(y_hat)
866
- # for j in range(len(fmap_r)):
867
- # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
868
- y_d_rs.append(y_d_r)
869
- y_d_gs.append(y_d_g)
870
- fmap_rs.append(fmap_r)
871
- fmap_gs.append(fmap_g)
872
-
873
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
874
-
875
-
876
- class DiscriminatorS(torch.nn.Module):
877
- def __init__(self, use_spectral_norm=False):
878
- super(DiscriminatorS, self).__init__()
879
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
880
- self.convs = nn.ModuleList(
881
- [
882
- norm_f(Conv1d(1, 16, 15, 1, padding=7)),
883
- norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
884
- norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
885
- norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
886
- norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
887
- norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
888
- ]
889
- )
890
- self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
891
-
892
- def forward(self, x):
893
- fmap = []
894
-
895
- for l in self.convs:
896
- x = l(x)
897
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
898
- fmap.append(x)
899
- x = self.conv_post(x)
900
- fmap.append(x)
901
- x = torch.flatten(x, 1, -1)
902
-
903
- return x, fmap
904
-
905
-
906
- class DiscriminatorP(torch.nn.Module):
907
- def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
908
- super(DiscriminatorP, self).__init__()
909
- self.period = period
910
- self.use_spectral_norm = use_spectral_norm
911
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
912
- self.convs = nn.ModuleList(
913
- [
914
- norm_f(
915
- Conv2d(
916
- 1,
917
- 32,
918
- (kernel_size, 1),
919
- (stride, 1),
920
- padding=(get_padding(kernel_size, 1), 0),
921
- )
922
- ),
923
- norm_f(
924
- Conv2d(
925
- 32,
926
- 128,
927
- (kernel_size, 1),
928
- (stride, 1),
929
- padding=(get_padding(kernel_size, 1), 0),
930
- )
931
- ),
932
- norm_f(
933
- Conv2d(
934
- 128,
935
- 512,
936
- (kernel_size, 1),
937
- (stride, 1),
938
- padding=(get_padding(kernel_size, 1), 0),
939
- )
940
- ),
941
- norm_f(
942
- Conv2d(
943
- 512,
944
- 1024,
945
- (kernel_size, 1),
946
- (stride, 1),
947
- padding=(get_padding(kernel_size, 1), 0),
948
- )
949
- ),
950
- norm_f(
951
- Conv2d(
952
- 1024,
953
- 1024,
954
- (kernel_size, 1),
955
- 1,
956
- padding=(get_padding(kernel_size, 1), 0),
957
- )
958
- ),
959
- ]
960
- )
961
- self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
962
-
963
- def forward(self, x):
964
- fmap = []
965
-
966
- # 1d to 2d
967
- b, c, t = x.shape
968
- if t % self.period != 0: # pad first
969
- n_pad = self.period - (t % self.period)
970
- x = F.pad(x, (0, n_pad), "reflect")
971
- t = t + n_pad
972
- x = x.view(b, c, t // self.period, self.period)
973
-
974
- for l in self.convs:
975
- x = l(x)
976
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
977
- fmap.append(x)
978
- x = self.conv_post(x)
979
- fmap.append(x)
980
- x = torch.flatten(x, 1, -1)
981
-
982
- return x, fmap